diff options
Diffstat (limited to 'include/linux')
1833 files changed, 324723 insertions, 0 deletions
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h new file mode 100644 index 00000000..b24ff086 --- /dev/null +++ b/include/linux/8250_pci.h @@ -0,0 +1,37 @@ +/* + * Definitions for PCI support. + */ +#define FL_BASE_MASK 0x0007 +#define FL_BASE0 0x0000 +#define FL_BASE1 0x0001 +#define FL_BASE2 0x0002 +#define FL_BASE3 0x0003 +#define FL_BASE4 0x0004 +#define FL_GET_BASE(x) (x & FL_BASE_MASK) + +/* Use successive BARs (PCI base address registers), + else use offset into some specified BAR */ +#define FL_BASE_BARS 0x0008 + +/* do not assign an irq */ +#define FL_NOIRQ 0x0080 + +/* Use the Base address register size to cap number of ports */ +#define FL_REGION_SZ_CAP 0x0100 + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct serial_private; + +struct serial_private * +pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board); +void pciserial_remove_ports(struct serial_private *priv); +void pciserial_suspend_ports(struct serial_private *priv); +void pciserial_resume_ports(struct serial_private *priv); diff --git a/include/linux/Kbuild b/include/linux/Kbuild new file mode 100644 index 00000000..fb53cda1 --- /dev/null +++ b/include/linux/Kbuild @@ -0,0 +1,412 @@ +header-y += byteorder/ +header-y += can/ +header-y += caif/ +header-y += dvb/ +header-y += hdlc/ +header-y += hsi/ +header-y += isdn/ +header-y += mmc/ +header-y += nfsd/ +header-y += raid/ +header-y += spi/ +header-y += sunrpc/ +header-y += tc_act/ +header-y += tc_ematch/ +header-y += netfilter/ +header-y += netfilter_arp/ +header-y += netfilter_bridge/ +header-y += netfilter_ipv4/ +header-y += netfilter_ipv6/ +header-y += usb/ +header-y += wimax/ + +objhdr-y += version.h + +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ + $(srctree)/include/asm-$(SRCARCH)/a.out.h \ + $(INSTALL_HDR_PATH)/include/asm-*/a.out.h),) +header-y += a.out.h +endif +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ + $(srctree)/include/asm-$(SRCARCH)/kvm.h \ + $(INSTALL_HDR_PATH)/include/asm-*/kvm.h),) +header-y += kvm.h +endif +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \ + $(srctree)/include/asm-$(SRCARCH)/kvm_para.h \ + $(INSTALL_HDR_PATH)/include/asm-*/kvm_para.h),) +header-y += kvm_para.h +endif + +header-y += acct.h +header-y += adb.h +header-y += adfs_fs.h +header-y += affs_hardblocks.h +header-y += agpgart.h +header-y += aio_abi.h +header-y += apm_bios.h +header-y += arcfb.h +header-y += atalk.h +header-y += atm.h +header-y += atm_eni.h +header-y += atm_he.h +header-y += atm_idt77105.h +header-y += atm_nicstar.h +header-y += atm_tcp.h +header-y += atm_zatm.h +header-y += atmapi.h +header-y += atmarp.h +header-y += atmbr2684.h +header-y += atmclip.h +header-y += atmdev.h +header-y += atmioc.h +header-y += atmlec.h +header-y += atmmpc.h +header-y += atmppp.h +header-y += atmsap.h +header-y += atmsvc.h +header-y += audit.h +header-y += auto_fs.h +header-y += auto_fs4.h +header-y += auxvec.h +header-y += ax25.h +header-y += b1lli.h +header-y += baycom.h +header-y += bfs_fs.h +header-y += binfmts.h +header-y += blk_types.h +header-y += blkpg.h +header-y += blktrace_api.h +header-y += bpqether.h +header-y += bsg.h +header-y += can.h +header-y += capability.h +header-y += capi.h +header-y += cciss_defs.h +header-y += cciss_ioctl.h +header-y += cdk.h +header-y += cdrom.h +header-y += cgroupstats.h +header-y += chio.h +header-y += cm4000_cs.h +header-y += cn_proc.h +header-y += coda.h +header-y += coda_psdev.h +header-y += coff.h +header-y += comstats.h +header-y += connector.h +header-y += const.h +header-y += cramfs_fs.h +header-y += cuda.h +header-y += cyclades.h +header-y += cycx_cfm.h +header-y += dcbnl.h +header-y += dccp.h +header-y += dlm.h +header-y += dlm_device.h +header-y += dlm_netlink.h +header-y += dlm_plock.h +header-y += dlmconstants.h +header-y += dm-ioctl.h +header-y += dm-log-userspace.h +header-y += dn.h +header-y += dqblk_xfs.h +header-y += edd.h +header-y += efs_fs_sb.h +header-y += elf-em.h +header-y += elf-fdpic.h +header-y += elf.h +header-y += elfcore.h +header-y += errno.h +header-y += errqueue.h +header-y += ethtool.h +header-y += eventpoll.h +header-y += fadvise.h +header-y += falloc.h +header-y += fanotify.h +header-y += fb.h +header-y += fcntl.h +header-y += fd.h +header-y += fdreg.h +header-y += fib_rules.h +header-y += fiemap.h +header-y += filter.h +header-y += firewire-cdev.h +header-y += firewire-constants.h +header-y += flat.h +header-y += fs.h +header-y += fsl_hypervisor.h +header-y += fuse.h +header-y += futex.h +header-y += gameport.h +header-y += gen_stats.h +header-y += generic_serial.h +header-y += genetlink.h +header-y += gfs2_ondisk.h +header-y += gigaset_dev.h +header-y += hdlc.h +header-y += hdlcdrv.h +header-y += hdreg.h +header-y += hid.h +header-y += hiddev.h +header-y += hidraw.h +header-y += hpet.h +header-y += hysdn_if.h +header-y += i2c-dev.h +header-y += i2c.h +header-y += i2o-dev.h +header-y += i8k.h +header-y += icmp.h +header-y += icmpv6.h +header-y += if.h +header-y += if_addr.h +header-y += if_addrlabel.h +header-y += if_alg.h +header-y += if_arcnet.h +header-y += if_arp.h +header-y += if_bonding.h +header-y += if_bridge.h +header-y += if_cablemodem.h +header-y += if_ec.h +header-y += if_eql.h +header-y += if_ether.h +header-y += if_fc.h +header-y += if_fddi.h +header-y += if_frad.h +header-y += if_hippi.h +header-y += if_infiniband.h +header-y += if_link.h +header-y += if_ltalk.h +header-y += if_packet.h +header-y += if_phonet.h +header-y += if_plip.h +header-y += if_ppp.h +header-y += if_pppol2tp.h +header-y += if_pppox.h +header-y += if_pppolac.h +header-y += if_pppopns.h +header-y += if_slip.h +header-y += if_strip.h +header-y += if_team.h +header-y += if_tr.h +header-y += if_tun.h +header-y += if_tunnel.h +header-y += if_vlan.h +header-y += if_x25.h +header-y += igmp.h +header-y += in.h +header-y += in6.h +header-y += in_route.h +header-y += sock_diag.h +header-y += inet_diag.h +header-y += unix_diag.h +header-y += inotify.h +header-y += input.h +header-y += ioctl.h +header-y += ip.h +header-y += ip6_tunnel.h +header-y += ip_vs.h +header-y += ipc.h +header-y += ipmi.h +header-y += ipmi_msgdefs.h +header-y += ipsec.h +header-y += ipv6.h +header-y += ipv6_route.h +header-y += ipx.h +header-y += irda.h +header-y += irqnr.h +header-y += isdn.h +header-y += isdn_divertif.h +header-y += isdn_ppp.h +header-y += isdnif.h +header-y += iso_fs.h +header-y += ivtv.h +header-y += ivtvfb.h +header-y += ixjuser.h +header-y += jffs2.h +header-y += joystick.h +header-y += kd.h +header-y += kdev_t.h +header-y += kernel.h +header-y += kernelcapi.h +header-y += kernel-page-flags.h +header-y += keyboard.h +header-y += keyctl.h +header-y += l2tp.h +header-y += limits.h +header-y += llc.h +header-y += loop.h +header-y += lp.h +header-y += magic.h +header-y += major.h +header-y += map_to_7segment.h +header-y += matroxfb.h +header-y += mdio.h +header-y += media.h +header-y += mempolicy.h +header-y += meye.h +header-y += mii.h +header-y += minix_fs.h +header-y += mman.h +header-y += mmtimer.h +header-y += mqueue.h +header-y += mroute.h +header-y += mroute6.h +header-y += msdos_fs.h +header-y += msg.h +header-y += mtio.h +header-y += n_r3964.h +header-y += nbd.h +header-y += ncp.h +header-y += ncp_fs.h +header-y += ncp_mount.h +header-y += ncp_no.h +header-y += neighbour.h +header-y += net.h +header-y += net_dropmon.h +header-y += net_tstamp.h +header-y += netdevice.h +header-y += netfilter.h +header-y += netfilter_arp.h +header-y += netfilter_bridge.h +header-y += netfilter_decnet.h +header-y += netfilter_ipv4.h +header-y += netfilter_ipv6.h +header-y += netlink.h +header-y += netrom.h +header-y += nfs.h +header-y += nfs2.h +header-y += nfs3.h +header-y += nfs4.h +header-y += nfs4_mount.h +header-y += nfs_fs.h +header-y += nfs_idmap.h +header-y += nfs_mount.h +header-y += nfsacl.h +header-y += nl80211.h +header-y += nubus.h +header-y += nvram.h +header-y += omap3isp.h +header-y += omapfb.h +header-y += oom.h +header-y += param.h +header-y += parport.h +header-y += patchkey.h +header-y += pci.h +header-y += pci_regs.h +header-y += perf_event.h +header-y += personality.h +header-y += pfkeyv2.h +header-y += pg.h +header-y += phantom.h +header-y += phonet.h +header-y += pkt_cls.h +header-y += pkt_sched.h +header-y += pktcdvd.h +header-y += pmu.h +header-y += poll.h +header-y += posix_types.h +header-y += ppdev.h +header-y += ppp-comp.h +header-y += ppp-ioctl.h +header-y += ppp_defs.h +header-y += pps.h +header-y += prctl.h +header-y += ptp_clock.h +header-y += ptrace.h +header-y += qnx4_fs.h +header-y += qnxtypes.h +header-y += quota.h +header-y += radeonfb.h +header-y += random.h +header-y += raw.h +header-y += rds.h +header-y += reboot.h +header-y += reiserfs_fs.h +header-y += reiserfs_xattr.h +header-y += resource.h +header-y += rfkill.h +header-y += romfs_fs.h +header-y += rose.h +header-y += route.h +header-y += rtc.h +header-y += rtnetlink.h +header-y += scc.h +header-y += sched.h +header-y += screen_info.h +header-y += sdla.h +header-y += securebits.h +header-y += selinux_netlink.h +header-y += sem.h +header-y += serial.h +header-y += serial_core.h +header-y += serial_reg.h +header-y += serio.h +header-y += shm.h +header-y += signal.h +header-y += signalfd.h +header-y += snmp.h +header-y += socket.h +header-y += sockios.h +header-y += som.h +header-y += sonet.h +header-y += sonypi.h +header-y += sound.h +header-y += soundcard.h +header-y += stat.h +header-y += stddef.h +header-y += string.h +header-y += suspend_ioctls.h +header-y += swab.h +header-y += synclink.h +header-y += sysctl.h +header-y += sysinfo.h +header-y += taskstats.h +header-y += tcp.h +header-y += telephony.h +header-y += termios.h +header-y += time.h +header-y += times.h +header-y += timex.h +header-y += tiocl.h +header-y += tipc.h +header-y += tipc_config.h +header-y += toshiba.h +header-y += tty.h +header-y += types.h +header-y += udf_fs_i.h +header-y += udp.h +header-y += uhid.h +header-y += uinput.h +header-y += uio.h +header-y += ultrasound.h +header-y += un.h +header-y += unistd.h +header-y += usbdevice_fs.h +header-y += utime.h +header-y += utsname.h +header-y += uvcvideo.h +header-y += v4l2-mediabus.h +header-y += v4l2-subdev.h +header-y += veth.h +header-y += vhost.h +header-y += videodev2.h +header-y += virtio_9p.h +header-y += virtio_balloon.h +header-y += virtio_blk.h +header-y += virtio_config.h +header-y += virtio_console.h +header-y += virtio_ids.h +header-y += virtio_net.h +header-y += virtio_pci.h +header-y += virtio_ring.h +header-y += virtio_rng.h +header-y += vt.h +header-y += wait.h +header-y += wanrouter.h +header-y += watchdog.h +header-y += wimax.h +header-y += wireless.h +header-y += x25.h +header-y += xattr.h +header-y += xfrm.h diff --git a/include/linux/a.out.h b/include/linux/a.out.h new file mode 100644 index 00000000..e86dfca4 --- /dev/null +++ b/include/linux/a.out.h @@ -0,0 +1,278 @@ +#ifndef __A_OUT_GNU_H__ +#define __A_OUT_GNU_H__ + +#define __GNU_EXEC_MACROS__ + +#ifndef __STRUCT_EXEC_OVERRIDE__ + +#include <asm/a.out.h> + +#endif /* __STRUCT_EXEC_OVERRIDE__ */ + +#ifndef __ASSEMBLY__ + +/* these go in the N_MACHTYPE field */ +enum machine_type { +#if defined (M_OLDSUN2) + M__OLDSUN2 = M_OLDSUN2, +#else + M_OLDSUN2 = 0, +#endif +#if defined (M_68010) + M__68010 = M_68010, +#else + M_68010 = 1, +#endif +#if defined (M_68020) + M__68020 = M_68020, +#else + M_68020 = 2, +#endif +#if defined (M_SPARC) + M__SPARC = M_SPARC, +#else + M_SPARC = 3, +#endif + /* skip a bunch so we don't run into any of sun's numbers */ + M_386 = 100, + M_MIPS1 = 151, /* MIPS R3000/R3000 binary */ + M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ +}; + +#if !defined (N_MAGIC) +#define N_MAGIC(exec) ((exec).a_info & 0xffff) +#endif +#define N_MACHTYPE(exec) ((enum machine_type)(((exec).a_info >> 16) & 0xff)) +#define N_FLAGS(exec) (((exec).a_info >> 24) & 0xff) +#define N_SET_INFO(exec, magic, type, flags) \ + ((exec).a_info = ((magic) & 0xffff) \ + | (((int)(type) & 0xff) << 16) \ + | (((flags) & 0xff) << 24)) +#define N_SET_MAGIC(exec, magic) \ + ((exec).a_info = (((exec).a_info & 0xffff0000) | ((magic) & 0xffff))) + +#define N_SET_MACHTYPE(exec, machtype) \ + ((exec).a_info = \ + ((exec).a_info&0xff00ffff) | ((((int)(machtype))&0xff) << 16)) + +#define N_SET_FLAGS(exec, flags) \ + ((exec).a_info = \ + ((exec).a_info&0x00ffffff) | (((flags) & 0xff) << 24)) + +/* Code indicating object file or impure executable. */ +#define OMAGIC 0407 +/* Code indicating pure executable. */ +#define NMAGIC 0410 +/* Code indicating demand-paged executable. */ +#define ZMAGIC 0413 +/* This indicates a demand-paged executable with the header in the text. + The first page is unmapped to help trap NULL pointer references */ +#define QMAGIC 0314 + +/* Code indicating core file. */ +#define CMAGIC 0421 + +#if !defined (N_BADMAG) +#define N_BADMAG(x) (N_MAGIC(x) != OMAGIC \ + && N_MAGIC(x) != NMAGIC \ + && N_MAGIC(x) != ZMAGIC \ + && N_MAGIC(x) != QMAGIC) +#endif + +#define _N_HDROFF(x) (1024 - sizeof (struct exec)) + +#if !defined (N_TXTOFF) +#define N_TXTOFF(x) \ + (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : \ + (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec))) +#endif + +#if !defined (N_DATOFF) +#define N_DATOFF(x) (N_TXTOFF(x) + (x).a_text) +#endif + +#if !defined (N_TRELOFF) +#define N_TRELOFF(x) (N_DATOFF(x) + (x).a_data) +#endif + +#if !defined (N_DRELOFF) +#define N_DRELOFF(x) (N_TRELOFF(x) + N_TRSIZE(x)) +#endif + +#if !defined (N_SYMOFF) +#define N_SYMOFF(x) (N_DRELOFF(x) + N_DRSIZE(x)) +#endif + +#if !defined (N_STROFF) +#define N_STROFF(x) (N_SYMOFF(x) + N_SYMSIZE(x)) +#endif + +/* Address of text segment in memory after it is loaded. */ +#if !defined (N_TXTADDR) +#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0) +#endif + +/* Address of data segment in memory after it is loaded. + Note that it is up to you to define SEGMENT_SIZE + on machines not listed here. */ +#if defined(vax) || defined(hp300) || defined(pyr) +#define SEGMENT_SIZE page_size +#endif +#ifdef sony +#define SEGMENT_SIZE 0x2000 +#endif /* Sony. */ +#ifdef is68k +#define SEGMENT_SIZE 0x20000 +#endif +#if defined(m68k) && defined(PORTAR) +#define PAGE_SIZE 0x400 +#define SEGMENT_SIZE PAGE_SIZE +#endif + +#ifdef linux +#ifdef __KERNEL__ +#include <asm/page.h> +#else +#include <unistd.h> +#endif +#if defined(__i386__) || defined(__mc68000__) +#define SEGMENT_SIZE 1024 +#else +#ifndef SEGMENT_SIZE +#ifdef __KERNEL__ +#define SEGMENT_SIZE PAGE_SIZE +#else +#define SEGMENT_SIZE getpagesize() +#endif +#endif +#endif +#endif + +#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE) + +#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text) + +#ifndef N_DATADDR +#define N_DATADDR(x) \ + (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x)) \ + : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x)))) +#endif + +/* Address of bss segment in memory after it is loaded. */ +#if !defined (N_BSSADDR) +#define N_BSSADDR(x) (N_DATADDR(x) + (x).a_data) +#endif + +#if !defined (N_NLIST_DECLARED) +struct nlist { + union { + char *n_name; + struct nlist *n_next; + long n_strx; + } n_un; + unsigned char n_type; + char n_other; + short n_desc; + unsigned long n_value; +}; +#endif /* no N_NLIST_DECLARED. */ + +#if !defined (N_UNDF) +#define N_UNDF 0 +#endif +#if !defined (N_ABS) +#define N_ABS 2 +#endif +#if !defined (N_TEXT) +#define N_TEXT 4 +#endif +#if !defined (N_DATA) +#define N_DATA 6 +#endif +#if !defined (N_BSS) +#define N_BSS 8 +#endif +#if !defined (N_FN) +#define N_FN 15 +#endif + +#if !defined (N_EXT) +#define N_EXT 1 +#endif +#if !defined (N_TYPE) +#define N_TYPE 036 +#endif +#if !defined (N_STAB) +#define N_STAB 0340 +#endif + +/* The following type indicates the definition of a symbol as being + an indirect reference to another symbol. The other symbol + appears as an undefined reference, immediately following this symbol. + + Indirection is asymmetrical. The other symbol's value will be used + to satisfy requests for the indirect symbol, but not vice versa. + If the other symbol does not have a definition, libraries will + be searched to find a definition. */ +#define N_INDR 0xa + +/* The following symbols refer to set elements. + All the N_SET[ATDB] symbols with the same name form one set. + Space is allocated for the set in the text section, and each set + element's value is stored into one word of the space. + The first word of the space is the length of the set (number of elements). + + The address of the set is made into an N_SETV symbol + whose name is the same as the name of the set. + This symbol acts like a N_DATA global symbol + in that it can satisfy undefined external references. */ + +/* These appear as input to LD, in a .o file. */ +#define N_SETA 0x14 /* Absolute set element symbol */ +#define N_SETT 0x16 /* Text set element symbol */ +#define N_SETD 0x18 /* Data set element symbol */ +#define N_SETB 0x1A /* Bss set element symbol */ + +/* This is output from LD. */ +#define N_SETV 0x1C /* Pointer to set vector in data area. */ + +#if !defined (N_RELOCATION_INFO_DECLARED) +/* This structure describes a single relocation to be performed. + The text-relocation section of the file is a vector of these structures, + all of which apply to the text section. + Likewise, the data-relocation section applies to the data section. */ + +struct relocation_info +{ + /* Address (within segment) to be relocated. */ + int r_address; + /* The meaning of r_symbolnum depends on r_extern. */ + unsigned int r_symbolnum:24; + /* Nonzero means value is a pc-relative offset + and it should be relocated for changes in its own address + as well as for changes in the symbol or section specified. */ + unsigned int r_pcrel:1; + /* Length (as exponent of 2) of the field to be relocated. + Thus, a value of 2 indicates 1<<2 bytes. */ + unsigned int r_length:2; + /* 1 => relocate with value of symbol. + r_symbolnum is the index of the symbol + in file's the symbol table. + 0 => relocate with the address of a segment. + r_symbolnum is N_TEXT, N_DATA, N_BSS or N_ABS + (the N_EXT bit may be set also, but signifies nothing). */ + unsigned int r_extern:1; + /* Four bits that aren't used, but when writing an object file + it is desirable to clear them. */ +#ifdef NS32K + unsigned r_bsr:1; + unsigned r_disp:1; + unsigned r_pad:2; +#else + unsigned int r_pad:4; +#endif +}; +#endif /* no N_RELOCATION_INFO_DECLARED. */ + +#endif /*__ASSEMBLY__ */ +#endif /* __A_OUT_GNU_H__ */ diff --git a/include/linux/ac97_codec.h b/include/linux/ac97_codec.h new file mode 100644 index 00000000..0260c3e7 --- /dev/null +++ b/include/linux/ac97_codec.h @@ -0,0 +1,362 @@ +#ifndef _AC97_CODEC_H_ +#define _AC97_CODEC_H_ + +#include <linux/types.h> +#include <linux/soundcard.h> + +/* AC97 1.0 */ +#define AC97_RESET 0x0000 // +#define AC97_MASTER_VOL_STEREO 0x0002 // Line Out +#define AC97_HEADPHONE_VOL 0x0004 // +#define AC97_MASTER_VOL_MONO 0x0006 // TAD Output +#define AC97_MASTER_TONE 0x0008 // +#define AC97_PCBEEP_VOL 0x000a // none +#define AC97_PHONE_VOL 0x000c // TAD Input (mono) +#define AC97_MIC_VOL 0x000e // MIC Input (mono) +#define AC97_LINEIN_VOL 0x0010 // Line Input (stereo) +#define AC97_CD_VOL 0x0012 // CD Input (stereo) +#define AC97_VIDEO_VOL 0x0014 // none +#define AC97_AUX_VOL 0x0016 // Aux Input (stereo) +#define AC97_PCMOUT_VOL 0x0018 // Wave Output (stereo) +#define AC97_RECORD_SELECT 0x001a // +#define AC97_RECORD_GAIN 0x001c +#define AC97_RECORD_GAIN_MIC 0x001e +#define AC97_GENERAL_PURPOSE 0x0020 +#define AC97_3D_CONTROL 0x0022 +#define AC97_MODEM_RATE 0x0024 +#define AC97_POWER_CONTROL 0x0026 + +/* AC'97 2.0 */ +#define AC97_EXTENDED_ID 0x0028 /* Extended Audio ID */ +#define AC97_EXTENDED_STATUS 0x002A /* Extended Audio Status */ +#define AC97_PCM_FRONT_DAC_RATE 0x002C /* PCM Front DAC Rate */ +#define AC97_PCM_SURR_DAC_RATE 0x002E /* PCM Surround DAC Rate */ +#define AC97_PCM_LFE_DAC_RATE 0x0030 /* PCM LFE DAC Rate */ +#define AC97_PCM_LR_ADC_RATE 0x0032 /* PCM LR ADC Rate */ +#define AC97_PCM_MIC_ADC_RATE 0x0034 /* PCM MIC ADC Rate */ +#define AC97_CENTER_LFE_MASTER 0x0036 /* Center + LFE Master Volume */ +#define AC97_SURROUND_MASTER 0x0038 /* Surround (Rear) Master Volume */ +#define AC97_RESERVED_3A 0x003A /* Reserved in AC '97 < 2.2 */ + +/* AC'97 2.2 */ +#define AC97_SPDIF_CONTROL 0x003A /* S/PDIF Control */ + +/* range 0x3c-0x58 - MODEM */ +#define AC97_EXTENDED_MODEM_ID 0x003C +#define AC97_EXTEND_MODEM_STAT 0x003E +#define AC97_LINE1_RATE 0x0040 +#define AC97_LINE2_RATE 0x0042 +#define AC97_HANDSET_RATE 0x0044 +#define AC97_LINE1_LEVEL 0x0046 +#define AC97_LINE2_LEVEL 0x0048 +#define AC97_HANDSET_LEVEL 0x004A +#define AC97_GPIO_CONFIG 0x004C +#define AC97_GPIO_POLARITY 0x004E +#define AC97_GPIO_STICKY 0x0050 +#define AC97_GPIO_WAKE_UP 0x0052 +#define AC97_GPIO_STATUS 0x0054 +#define AC97_MISC_MODEM_STAT 0x0056 +#define AC97_RESERVED_58 0x0058 + +/* registers 0x005a - 0x007a are vendor reserved */ + +#define AC97_VENDOR_ID1 0x007c +#define AC97_VENDOR_ID2 0x007e + +/* volume control bit defines */ +#define AC97_MUTE 0x8000 +#define AC97_MICBOOST 0x0040 +#define AC97_LEFTVOL 0x3f00 +#define AC97_RIGHTVOL 0x003f + +/* record mux defines */ +#define AC97_RECMUX_MIC 0x0000 +#define AC97_RECMUX_CD 0x0101 +#define AC97_RECMUX_VIDEO 0x0202 +#define AC97_RECMUX_AUX 0x0303 +#define AC97_RECMUX_LINE 0x0404 +#define AC97_RECMUX_STEREO_MIX 0x0505 +#define AC97_RECMUX_MONO_MIX 0x0606 +#define AC97_RECMUX_PHONE 0x0707 + +/* general purpose register bit defines */ +#define AC97_GP_LPBK 0x0080 /* Loopback mode */ +#define AC97_GP_MS 0x0100 /* Mic Select 0=Mic1, 1=Mic2 */ +#define AC97_GP_MIX 0x0200 /* Mono output select 0=Mix, 1=Mic */ +#define AC97_GP_RLBK 0x0400 /* Remote Loopback - Modem line codec */ +#define AC97_GP_LLBK 0x0800 /* Local Loopback - Modem Line codec */ +#define AC97_GP_LD 0x1000 /* Loudness 1=on */ +#define AC97_GP_3D 0x2000 /* 3D Enhancement 1=on */ +#define AC97_GP_ST 0x4000 /* Stereo Enhancement 1=on */ +#define AC97_GP_POP 0x8000 /* Pcm Out Path, 0=pre 3D, 1=post 3D */ + +/* extended audio status and control bit defines */ +#define AC97_EA_VRA 0x0001 /* Variable bit rate enable bit */ +#define AC97_EA_DRA 0x0002 /* Double-rate audio enable bit */ +#define AC97_EA_SPDIF 0x0004 /* S/PDIF Enable bit */ +#define AC97_EA_VRM 0x0008 /* Variable bit rate for MIC enable bit */ +#define AC97_EA_CDAC 0x0040 /* PCM Center DAC is ready (Read only) */ +#define AC97_EA_SDAC 0x0040 /* PCM Surround DACs are ready (Read only) */ +#define AC97_EA_LDAC 0x0080 /* PCM LFE DAC is ready (Read only) */ +#define AC97_EA_MDAC 0x0100 /* MIC ADC is ready (Read only) */ +#define AC97_EA_SPCV 0x0400 /* S/PDIF configuration valid (Read only) */ +#define AC97_EA_PRI 0x0800 /* Turns the PCM Center DAC off */ +#define AC97_EA_PRJ 0x1000 /* Turns the PCM Surround DACs off */ +#define AC97_EA_PRK 0x2000 /* Turns the PCM LFE DAC off */ +#define AC97_EA_PRL 0x4000 /* Turns the MIC ADC off */ +#define AC97_EA_SLOT_MASK 0xffcf /* Mask for slot assignment bits */ +#define AC97_EA_SPSA_3_4 0x0000 /* Slot assigned to 3 & 4 */ +#define AC97_EA_SPSA_7_8 0x0010 /* Slot assigned to 7 & 8 */ +#define AC97_EA_SPSA_6_9 0x0020 /* Slot assigned to 6 & 9 */ +#define AC97_EA_SPSA_10_11 0x0030 /* Slot assigned to 10 & 11 */ + +/* S/PDIF control bit defines */ +#define AC97_SC_PRO 0x0001 /* Professional status */ +#define AC97_SC_NAUDIO 0x0002 /* Non audio stream */ +#define AC97_SC_COPY 0x0004 /* Copyright status */ +#define AC97_SC_PRE 0x0008 /* Preemphasis status */ +#define AC97_SC_CC_MASK 0x07f0 /* Category Code mask */ +#define AC97_SC_L 0x0800 /* Generation Level status */ +#define AC97_SC_SPSR_MASK 0xcfff /* S/PDIF Sample Rate bits */ +#define AC97_SC_SPSR_44K 0x0000 /* Use 44.1kHz Sample rate */ +#define AC97_SC_SPSR_48K 0x2000 /* Use 48kHz Sample rate */ +#define AC97_SC_SPSR_32K 0x3000 /* Use 32kHz Sample rate */ +#define AC97_SC_DRS 0x4000 /* Double Rate S/PDIF */ +#define AC97_SC_V 0x8000 /* Validity status */ + +/* powerdown control and status bit defines */ + +/* status */ +#define AC97_PWR_MDM 0x0010 /* Modem section ready */ +#define AC97_PWR_REF 0x0008 /* Vref nominal */ +#define AC97_PWR_ANL 0x0004 /* Analog section ready */ +#define AC97_PWR_DAC 0x0002 /* DAC section ready */ +#define AC97_PWR_ADC 0x0001 /* ADC section ready */ + +/* control */ +#define AC97_PWR_PR0 0x0100 /* ADC and Mux powerdown */ +#define AC97_PWR_PR1 0x0200 /* DAC powerdown */ +#define AC97_PWR_PR2 0x0400 /* Output mixer powerdown (Vref on) */ +#define AC97_PWR_PR3 0x0800 /* Output mixer powerdown (Vref off) */ +#define AC97_PWR_PR4 0x1000 /* AC-link powerdown */ +#define AC97_PWR_PR5 0x2000 /* Internal Clk disable */ +#define AC97_PWR_PR6 0x4000 /* HP amp powerdown */ +#define AC97_PWR_PR7 0x8000 /* Modem off - if supported */ + +/* extended audio ID register bit defines */ +#define AC97_EXTID_VRA 0x0001 +#define AC97_EXTID_DRA 0x0002 +#define AC97_EXTID_SPDIF 0x0004 +#define AC97_EXTID_VRM 0x0008 +#define AC97_EXTID_DSA0 0x0010 +#define AC97_EXTID_DSA1 0x0020 +#define AC97_EXTID_CDAC 0x0040 +#define AC97_EXTID_SDAC 0x0080 +#define AC97_EXTID_LDAC 0x0100 +#define AC97_EXTID_AMAP 0x0200 +#define AC97_EXTID_REV0 0x0400 +#define AC97_EXTID_REV1 0x0800 +#define AC97_EXTID_ID0 0x4000 +#define AC97_EXTID_ID1 0x8000 + +/* extended status register bit defines */ +#define AC97_EXTSTAT_VRA 0x0001 +#define AC97_EXTSTAT_DRA 0x0002 +#define AC97_EXTSTAT_SPDIF 0x0004 +#define AC97_EXTSTAT_VRM 0x0008 +#define AC97_EXTSTAT_SPSA0 0x0010 +#define AC97_EXTSTAT_SPSA1 0x0020 +#define AC97_EXTSTAT_CDAC 0x0040 +#define AC97_EXTSTAT_SDAC 0x0080 +#define AC97_EXTSTAT_LDAC 0x0100 +#define AC97_EXTSTAT_MADC 0x0200 +#define AC97_EXTSTAT_SPCV 0x0400 +#define AC97_EXTSTAT_PRI 0x0800 +#define AC97_EXTSTAT_PRJ 0x1000 +#define AC97_EXTSTAT_PRK 0x2000 +#define AC97_EXTSTAT_PRL 0x4000 + +/* extended audio ID register bit defines */ +#define AC97_EXTID_VRA 0x0001 +#define AC97_EXTID_DRA 0x0002 +#define AC97_EXTID_SPDIF 0x0004 +#define AC97_EXTID_VRM 0x0008 +#define AC97_EXTID_DSA0 0x0010 +#define AC97_EXTID_DSA1 0x0020 +#define AC97_EXTID_CDAC 0x0040 +#define AC97_EXTID_SDAC 0x0080 +#define AC97_EXTID_LDAC 0x0100 +#define AC97_EXTID_AMAP 0x0200 +#define AC97_EXTID_REV0 0x0400 +#define AC97_EXTID_REV1 0x0800 +#define AC97_EXTID_ID0 0x4000 +#define AC97_EXTID_ID1 0x8000 + +/* extended status register bit defines */ +#define AC97_EXTSTAT_VRA 0x0001 +#define AC97_EXTSTAT_DRA 0x0002 +#define AC97_EXTSTAT_SPDIF 0x0004 +#define AC97_EXTSTAT_VRM 0x0008 +#define AC97_EXTSTAT_SPSA0 0x0010 +#define AC97_EXTSTAT_SPSA1 0x0020 +#define AC97_EXTSTAT_CDAC 0x0040 +#define AC97_EXTSTAT_SDAC 0x0080 +#define AC97_EXTSTAT_LDAC 0x0100 +#define AC97_EXTSTAT_MADC 0x0200 +#define AC97_EXTSTAT_SPCV 0x0400 +#define AC97_EXTSTAT_PRI 0x0800 +#define AC97_EXTSTAT_PRJ 0x1000 +#define AC97_EXTSTAT_PRK 0x2000 +#define AC97_EXTSTAT_PRL 0x4000 + +/* useful power states */ +#define AC97_PWR_D0 0x0000 /* everything on */ +#define AC97_PWR_D1 AC97_PWR_PR0|AC97_PWR_PR1|AC97_PWR_PR4 +#define AC97_PWR_D2 AC97_PWR_PR0|AC97_PWR_PR1|AC97_PWR_PR2|AC97_PWR_PR3|AC97_PWR_PR4 +#define AC97_PWR_D3 AC97_PWR_PR0|AC97_PWR_PR1|AC97_PWR_PR2|AC97_PWR_PR3|AC97_PWR_PR4 +#define AC97_PWR_ANLOFF AC97_PWR_PR2|AC97_PWR_PR3 /* analog section off */ + +/* Total number of defined registers. */ +#define AC97_REG_CNT 64 + + +/* OSS interface to the ac97s.. */ +#define AC97_STEREO_MASK (SOUND_MASK_VOLUME|SOUND_MASK_PCM|\ + SOUND_MASK_LINE|SOUND_MASK_CD|\ + SOUND_MASK_ALTPCM|SOUND_MASK_IGAIN|\ + SOUND_MASK_LINE1|SOUND_MASK_VIDEO) + +#define AC97_SUPPORTED_MASK (AC97_STEREO_MASK | \ + SOUND_MASK_BASS|SOUND_MASK_TREBLE|\ + SOUND_MASK_SPEAKER|SOUND_MASK_MIC|\ + SOUND_MASK_PHONEIN|SOUND_MASK_PHONEOUT) + +#define AC97_RECORD_MASK (SOUND_MASK_MIC|\ + SOUND_MASK_CD|SOUND_MASK_IGAIN|SOUND_MASK_VIDEO|\ + SOUND_MASK_LINE1| SOUND_MASK_LINE|\ + SOUND_MASK_PHONEIN) + +/* original check is not good enough in case FOO is greater than + * SOUND_MIXER_NRDEVICES because the supported_mixers has exactly + * SOUND_MIXER_NRDEVICES elements. + * before matching the given mixer against the bitmask in supported_mixers we + * check if mixer number exceeds maximum allowed size which is as mentioned + * above SOUND_MIXER_NRDEVICES */ +#define supported_mixer(CODEC,FOO) ((FOO >= 0) && \ + (FOO < SOUND_MIXER_NRDEVICES) && \ + (CODEC)->supported_mixers & (1<<FOO) ) + +struct ac97_codec { + /* Linked list of codecs */ + struct list_head list; + + /* AC97 controller connected with */ + void *private_data; + + char *name; + int id; + int dev_mixer; + int type; + u32 model; + + unsigned int modem:1; + + struct ac97_ops *codec_ops; + + /* controller specific lower leverl ac97 accessing routines. + must be re-entrant safe */ + u16 (*codec_read) (struct ac97_codec *codec, u8 reg); + void (*codec_write) (struct ac97_codec *codec, u8 reg, u16 val); + + /* Wait for codec-ready. Ok to sleep here. */ + void (*codec_wait) (struct ac97_codec *codec); + + /* callback used by helper drivers for interesting ac97 setups */ + void (*codec_unregister) (struct ac97_codec *codec); + + struct ac97_driver *driver; + void *driver_private; /* Private data for the driver */ + + spinlock_t lock; + + /* OSS mixer masks */ + int modcnt; + int supported_mixers; + int stereo_mixers; + int record_sources; + + /* Property flags */ + int flags; + + int bit_resolution; + + /* OSS mixer interface */ + int (*read_mixer) (struct ac97_codec *codec, int oss_channel); + void (*write_mixer)(struct ac97_codec *codec, int oss_channel, + unsigned int left, unsigned int right); + int (*recmask_io) (struct ac97_codec *codec, int rw, int mask); + int (*mixer_ioctl)(struct ac97_codec *codec, unsigned int cmd, unsigned long arg); + + /* saved OSS mixer states */ + unsigned int mixer_state[SOUND_MIXER_NRDEVICES]; + + /* Software Modem interface */ + int (*modem_ioctl)(struct ac97_codec *codec, unsigned int cmd, unsigned long arg); +}; + +/* + * Operation structures for each known AC97 chip + */ + +struct ac97_ops +{ + /* Initialise */ + int (*init)(struct ac97_codec *c); + /* Amplifier control */ + int (*amplifier)(struct ac97_codec *codec, int on); + /* Digital mode control */ + int (*digital)(struct ac97_codec *codec, int slots, int rate, int mode); +#define AUDIO_DIGITAL 0x8000 +#define AUDIO_PRO 0x4000 +#define AUDIO_DRS 0x2000 +#define AUDIO_CCMASK 0x003F + +#define AC97_DELUDED_MODEM 1 /* Audio codec reports its a modem */ +#define AC97_NO_PCM_VOLUME 2 /* Volume control is missing */ +#define AC97_DEFAULT_POWER_OFF 4 /* Needs warm reset to power up */ +}; + +extern int ac97_probe_codec(struct ac97_codec *); + +extern struct ac97_codec *ac97_alloc_codec(void); +extern void ac97_release_codec(struct ac97_codec *codec); + +struct ac97_driver { + struct list_head list; + char *name; + u32 codec_id; + u32 codec_mask; + int (*probe) (struct ac97_codec *codec, struct ac97_driver *driver); + void (*remove) (struct ac97_codec *codec, struct ac97_driver *driver); +}; + +/* quirk types */ +enum { + AC97_TUNE_DEFAULT = -1, /* use default from quirk list (not valid in list) */ + AC97_TUNE_NONE = 0, /* nothing extra to do */ + AC97_TUNE_HP_ONLY, /* headphone (true line-out) control as master only */ + AC97_TUNE_SWAP_HP, /* swap headphone and master controls */ + AC97_TUNE_SWAP_SURROUND, /* swap master and surround controls */ + AC97_TUNE_AD_SHARING, /* for AD1985, turn on OMS bit and use headphone */ + AC97_TUNE_ALC_JACK, /* for Realtek, enable JACK detection */ +}; + +struct ac97_quirk { + unsigned short vendor; /* PCI vendor id */ + unsigned short device; /* PCI device id */ + unsigned short mask; /* device id bit mask, 0 = accept all */ + const char *name; /* name shown as info */ + int type; /* quirk type above */ +}; + +#endif /* _AC97_CODEC_H_ */ diff --git a/include/linux/acct.h b/include/linux/acct.h new file mode 100644 index 00000000..d537aa0e --- /dev/null +++ b/include/linux/acct.h @@ -0,0 +1,215 @@ +/* + * BSD Process Accounting for Linux - Definitions + * + * Author: Marco van Wieringen (mvw@planets.elm.net) + * + * This header file contains the definitions needed to implement + * BSD-style process accounting. The kernel accounting code and all + * user-level programs that try to do something useful with the + * process accounting log must include this file. + * + * Copyright (C) 1995 - 1997 Marco van Wieringen - ELM Consultancy B.V. + * + */ + +#ifndef _LINUX_ACCT_H +#define _LINUX_ACCT_H + +#include <linux/types.h> + +#include <asm/param.h> +#include <asm/byteorder.h> + +/* + * comp_t is a 16-bit "floating" point number with a 3-bit base 8 + * exponent and a 13-bit fraction. + * comp2_t is 24-bit with 5-bit base 2 exponent and 20 bit fraction + * (leading 1 not stored). + * See linux/kernel/acct.c for the specific encoding systems used. + */ + +typedef __u16 comp_t; +typedef __u32 comp2_t; + +/* + * accounting file record + * + * This structure contains all of the information written out to the + * process accounting file whenever a process exits. + */ + +#define ACCT_COMM 16 + +struct acct +{ + char ac_flag; /* Flags */ + char ac_version; /* Always set to ACCT_VERSION */ + /* for binary compatibility back until 2.0 */ + __u16 ac_uid16; /* LSB of Real User ID */ + __u16 ac_gid16; /* LSB of Real Group ID */ + __u16 ac_tty; /* Control Terminal */ + __u32 ac_btime; /* Process Creation Time */ + comp_t ac_utime; /* User Time */ + comp_t ac_stime; /* System Time */ + comp_t ac_etime; /* Elapsed Time */ + comp_t ac_mem; /* Average Memory Usage */ + comp_t ac_io; /* Chars Transferred */ + comp_t ac_rw; /* Blocks Read or Written */ + comp_t ac_minflt; /* Minor Pagefaults */ + comp_t ac_majflt; /* Major Pagefaults */ + comp_t ac_swaps; /* Number of Swaps */ +/* m68k had no padding here. */ +#if !defined(CONFIG_M68K) || !defined(__KERNEL__) + __u16 ac_ahz; /* AHZ */ +#endif + __u32 ac_exitcode; /* Exitcode */ + char ac_comm[ACCT_COMM + 1]; /* Command Name */ + __u8 ac_etime_hi; /* Elapsed Time MSB */ + __u16 ac_etime_lo; /* Elapsed Time LSB */ + __u32 ac_uid; /* Real User ID */ + __u32 ac_gid; /* Real Group ID */ +}; + +struct acct_v3 +{ + char ac_flag; /* Flags */ + char ac_version; /* Always set to ACCT_VERSION */ + __u16 ac_tty; /* Control Terminal */ + __u32 ac_exitcode; /* Exitcode */ + __u32 ac_uid; /* Real User ID */ + __u32 ac_gid; /* Real Group ID */ + __u32 ac_pid; /* Process ID */ + __u32 ac_ppid; /* Parent Process ID */ + __u32 ac_btime; /* Process Creation Time */ +#ifdef __KERNEL__ + __u32 ac_etime; /* Elapsed Time */ +#else + float ac_etime; /* Elapsed Time */ +#endif + comp_t ac_utime; /* User Time */ + comp_t ac_stime; /* System Time */ + comp_t ac_mem; /* Average Memory Usage */ + comp_t ac_io; /* Chars Transferred */ + comp_t ac_rw; /* Blocks Read or Written */ + comp_t ac_minflt; /* Minor Pagefaults */ + comp_t ac_majflt; /* Major Pagefaults */ + comp_t ac_swaps; /* Number of Swaps */ + char ac_comm[ACCT_COMM]; /* Command Name */ +}; + +/* + * accounting flags + */ + /* bit set when the process ... */ +#define AFORK 0x01 /* ... executed fork, but did not exec */ +#define ASU 0x02 /* ... used super-user privileges */ +#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */ +#define ACORE 0x08 /* ... dumped core */ +#define AXSIG 0x10 /* ... was killed by a signal */ + +#ifdef __BIG_ENDIAN +#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */ +#else +#define ACCT_BYTEORDER 0x00 /* accounting file is little endian */ +#endif + +#ifdef __KERNEL__ + + +#ifdef CONFIG_BSD_PROCESS_ACCT +struct vfsmount; +struct super_block; +struct pacct_struct; +struct pid_namespace; +extern int acct_parm[]; /* for sysctl */ +extern void acct_auto_close_mnt(struct vfsmount *m); +extern void acct_auto_close(struct super_block *sb); +extern void acct_collect(long exitcode, int group_dead); +extern void acct_process(void); +extern void acct_exit_ns(struct pid_namespace *); +#else +#define acct_auto_close_mnt(x) do { } while (0) +#define acct_auto_close(x) do { } while (0) +#define acct_collect(x,y) do { } while (0) +#define acct_process() do { } while (0) +#define acct_exit_ns(ns) do { } while (0) +#endif + +/* + * ACCT_VERSION numbers as yet defined: + * 0: old format (until 2.6.7) with 16 bit uid/gid + * 1: extended variant (binary compatible on M68K) + * 2: extended variant (binary compatible on everything except M68K) + * 3: new binary incompatible format (64 bytes) + * 4: new binary incompatible format (128 bytes) + * 5: new binary incompatible format (128 bytes, second half) + * + */ + +#undef ACCT_VERSION +#undef AHZ + +#ifdef CONFIG_BSD_PROCESS_ACCT_V3 +#define ACCT_VERSION 3 +#define AHZ 100 +typedef struct acct_v3 acct_t; +#else +#ifdef CONFIG_M68K +#define ACCT_VERSION 1 +#else +#define ACCT_VERSION 2 +#endif +#define AHZ (USER_HZ) +typedef struct acct acct_t; +#endif + +#else +#define ACCT_VERSION 2 +#define AHZ (HZ) +#endif /* __KERNEL */ + +#ifdef __KERNEL__ +#include <linux/jiffies.h> +/* + * Yet another set of HZ to *HZ helper functions. + * See <linux/jiffies.h> for the original. + */ + +static inline u32 jiffies_to_AHZ(unsigned long x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0 +# if HZ < AHZ + return x * (AHZ / HZ); +# else + return x / (HZ / AHZ); +# endif +#else + u64 tmp = (u64)x * TICK_NSEC; + do_div(tmp, (NSEC_PER_SEC / AHZ)); + return (long)tmp; +#endif +} + +static inline u64 nsec_to_AHZ(u64 x) +{ +#if (NSEC_PER_SEC % AHZ) == 0 + do_div(x, (NSEC_PER_SEC / AHZ)); +#elif (AHZ % 512) == 0 + x *= AHZ/512; + do_div(x, (NSEC_PER_SEC / 512)); +#else + /* + * max relative error 5.7e-8 (1.8s per year) for AHZ <= 1024, + * overflow after 64.99 years. + * exact for AHZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... + */ + x *= 9; + do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (AHZ/2)) + / AHZ)); +#endif + return x; +} + +#endif /* __KERNEL */ + +#endif /* _LINUX_ACCT_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h new file mode 100644 index 00000000..f421dd84 --- /dev/null +++ b/include/linux/acpi.h @@ -0,0 +1,385 @@ +/* + * acpi.h - ACPI Interface + * + * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _LINUX_ACPI_H +#define _LINUX_ACPI_H + +#include <linux/ioport.h> /* for struct resource */ + +#ifdef CONFIG_ACPI + +#ifndef _LINUX +#define _LINUX +#endif + +#include <linux/list.h> +#include <linux/mod_devicetable.h> + +#include <acpi/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> +#include <acpi/acpi_numa.h> +#include <asm/acpi.h> + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC, + ACPI_IRQ_MODEL_IOSAPIC, + ACPI_IRQ_MODEL_PLATFORM, + ACPI_IRQ_MODEL_COUNT +}; + +extern enum acpi_irq_model_id acpi_irq_model; + +enum acpi_interrupt_id { + ACPI_INTERRUPT_PMI = 1, + ACPI_INTERRUPT_INIT, + ACPI_INTERRUPT_CPEI, + ACPI_INTERRUPT_COUNT +}; + +#define ACPI_SPACE_MEM 0 + +enum acpi_address_range_id { + ACPI_ADDRESS_RANGE_MEMORY = 1, + ACPI_ADDRESS_RANGE_RESERVED = 2, + ACPI_ADDRESS_RANGE_ACPI = 3, + ACPI_ADDRESS_RANGE_NVS = 4, + ACPI_ADDRESS_RANGE_COUNT +}; + + +/* Table Handlers */ + +typedef int (*acpi_table_handler) (struct acpi_table_header *table); + +typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); + +char * __acpi_map_table (unsigned long phys_addr, unsigned long size); +void __acpi_unmap_table(char *map, unsigned long size); +int early_acpi_boot_init(void); +int acpi_boot_init (void); +void acpi_boot_table_init (void); +int acpi_mps_check (void); +int acpi_numa_init (void); + +int acpi_table_init (void); +int acpi_table_parse (char *id, acpi_table_handler handler); +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, acpi_table_entry_handler handler, unsigned int max_entries); +int acpi_table_parse_madt (enum acpi_madt_type id, acpi_table_entry_handler handler, unsigned int max_entries); +int acpi_parse_mcfg (struct acpi_table_header *header); +void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); + +/* the following four functions are architecture-dependent */ +void acpi_numa_slit_init (struct acpi_table_slit *slit); +void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); +void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); +void acpi_numa_arch_fixup(void); + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* Arch dependent functions for cpu hotplug support */ +int acpi_map_lsapic(acpi_handle handle, int *pcpu); +int acpi_unmap_lsapic(int cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); +void acpi_irq_stats_init(void); +extern u32 acpi_irq_handled; +extern u32 acpi_irq_not_handled; + +extern int sbf_port; +extern unsigned long acpi_realmode_flags; + +int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); +int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); +int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); + +#ifdef CONFIG_X86_IO_APIC +extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); +#else +#define acpi_get_override_irq(gsi, trigger, polarity) (-1) +#endif +/* + * This function undoes the effect of one call to acpi_register_gsi(). + * If this matches the last registration, any IRQ resources for gsi + * are freed. + */ +void acpi_unregister_gsi (u32 gsi); + +struct pci_dev; + +int acpi_pci_irq_enable (struct pci_dev *dev); +void acpi_penalize_isa_irq(int irq, int active); + +void acpi_pci_irq_disable (struct pci_dev *dev); + +struct acpi_pci_driver { + struct acpi_pci_driver *next; + int (*add)(acpi_handle handle); + void (*remove)(acpi_handle handle); +}; + +int acpi_pci_register_driver(struct acpi_pci_driver *driver); +void acpi_pci_unregister_driver(struct acpi_pci_driver *driver); + +extern int ec_read(u8 addr, u8 *val); +extern int ec_write(u8 addr, u8 val); +extern int ec_transaction(u8 command, + const u8 *wdata, unsigned wdata_len, + u8 *rdata, unsigned rdata_len); +extern acpi_handle ec_get_handle(void); + +#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) + +typedef void (*wmi_notify_handler) (u32 value, void *context); + +extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, + u32 method_id, + const struct acpi_buffer *in, + struct acpi_buffer *out); +extern acpi_status wmi_query_block(const char *guid, u8 instance, + struct acpi_buffer *out); +extern acpi_status wmi_set_block(const char *guid, u8 instance, + const struct acpi_buffer *in); +extern acpi_status wmi_install_notify_handler(const char *guid, + wmi_notify_handler handler, void *data); +extern acpi_status wmi_remove_notify_handler(const char *guid); +extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); +extern bool wmi_has_guid(const char *guid); + +#endif /* CONFIG_ACPI_WMI */ + +#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 +#define ACPI_VIDEO_DEVICE_POSTING 0x0002 +#define ACPI_VIDEO_ROM_AVAILABLE 0x0004 +#define ACPI_VIDEO_BACKLIGHT 0x0008 +#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 +#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 +#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 +#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 +#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 +#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 +#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 +#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 + +#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) + +extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle); +extern long acpi_is_video_device(struct acpi_device *device); +extern int acpi_video_backlight_support(void); +extern int acpi_video_display_switch_support(void); + +#else + +static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle) +{ + return 0; +} + +static inline long acpi_is_video_device(struct acpi_device *device) +{ + return 0; +} + +static inline int acpi_video_backlight_support(void) +{ + return 0; +} + +static inline int acpi_video_display_switch_support(void) +{ + return 0; +} + +#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */ + +extern int acpi_blacklisted(void); +extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); +extern void acpi_osi_setup(char *str); + +#ifdef CONFIG_ACPI_NUMA +int acpi_get_pxm(acpi_handle handle); +int acpi_get_node(acpi_handle *handle); +#else +static inline int acpi_get_pxm(acpi_handle handle) +{ + return 0; +} +static inline int acpi_get_node(acpi_handle *handle) +{ + return 0; +} +#endif +extern int acpi_paddr_to_node(u64 start_addr, u64 size); + +extern int pnpacpi_disabled; + +#define PXM_INVAL (-1) + +int acpi_check_resource_conflict(const struct resource *res); + +int acpi_check_region(resource_size_t start, resource_size_t n, + const char *name); + +int acpi_resources_are_enforced(void); + +#ifdef CONFIG_PM_SLEEP +void __init acpi_no_s4_hw_signature(void); +void __init acpi_old_suspend_ordering(void); +void __init acpi_nvs_nosave(void); +#endif /* CONFIG_PM_SLEEP */ + +struct acpi_osc_context { + char *uuid_str; /* uuid string */ + int rev; + struct acpi_buffer cap; /* arg2/arg3 */ + struct acpi_buffer ret; /* free by caller if success */ +}; + +#define OSC_QUERY_TYPE 0 +#define OSC_SUPPORT_TYPE 1 +#define OSC_CONTROL_TYPE 2 + +/* _OSC DW0 Definition */ +#define OSC_QUERY_ENABLE 1 +#define OSC_REQUEST_ERROR 2 +#define OSC_INVALID_UUID_ERROR 4 +#define OSC_INVALID_REVISION_ERROR 8 +#define OSC_CAPABILITIES_MASK_ERROR 16 + +acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); + +/* platform-wide _OSC bits */ +#define OSC_SB_PAD_SUPPORT 1 +#define OSC_SB_PPC_OST_SUPPORT 2 +#define OSC_SB_PR3_SUPPORT 4 +#define OSC_SB_CPUHP_OST_SUPPORT 8 +#define OSC_SB_APEI_SUPPORT 16 + +extern bool osc_sb_apei_support_acked; + +/* PCI defined _OSC bits */ +/* _OSC DW1 Definition (OS Support Fields) */ +#define OSC_EXT_PCI_CONFIG_SUPPORT 1 +#define OSC_ACTIVE_STATE_PWR_SUPPORT 2 +#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 +#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 +#define OSC_MSI_SUPPORT 16 +#define OSC_PCI_SUPPORT_MASKS 0x1f + +/* _OSC DW1 Definition (OS Control Fields) */ +#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 +#define OSC_SHPC_NATIVE_HP_CONTROL 2 +#define OSC_PCI_EXPRESS_PME_CONTROL 4 +#define OSC_PCI_EXPRESS_AER_CONTROL 8 +#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 + +#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ + OSC_SHPC_NATIVE_HP_CONTROL | \ + OSC_PCI_EXPRESS_PME_CONTROL | \ + OSC_PCI_EXPRESS_AER_CONTROL | \ + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) + +#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ + OSC_SHPC_NATIVE_HP_CONTROL) + +extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, + u32 *mask, u32 req); +extern void acpi_early_init(void); + +extern int acpi_nvs_register(__u64 start, __u64 size); + +extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), + void *data); + +#else /* !CONFIG_ACPI */ + +#define acpi_disabled 1 + +static inline void acpi_early_init(void) { } + +static inline int early_acpi_boot_init(void) +{ + return 0; +} +static inline int acpi_boot_init(void) +{ + return 0; +} + +static inline void acpi_boot_table_init(void) +{ + return; +} + +static inline int acpi_mps_check(void) +{ + return 0; +} + +static inline int acpi_check_resource_conflict(struct resource *res) +{ + return 0; +} + +static inline int acpi_check_region(resource_size_t start, resource_size_t n, + const char *name) +{ + return 0; +} + +struct acpi_table_header; +static inline int acpi_table_parse(char *id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} + +static inline int acpi_nvs_register(__u64 start, __u64 size) +{ + return 0; +} + +static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), + void *data) +{ + return 0; +} + +#endif /* !CONFIG_ACPI */ + +#ifdef CONFIG_ACPI +void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, + u32 pm1a_ctrl, u32 pm1b_ctrl)); + +acpi_status acpi_os_prepare_sleep(u8 sleep_state, + u32 pm1a_control, u32 pm1b_control); +#else +#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) +#endif + +#endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h new file mode 100644 index 00000000..b0ffa219 --- /dev/null +++ b/include/linux/acpi_io.h @@ -0,0 +1,18 @@ +#ifndef _ACPI_IO_H_ +#define _ACPI_IO_H_ + +#include <linux/io.h> +#include <acpi/acpi.h> + +static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, + acpi_size size) +{ + return ioremap_cache(phys, size); +} + +void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); + +int acpi_os_map_generic_address(struct acpi_generic_address *addr); +void acpi_os_unmap_generic_address(struct acpi_generic_address *addr); + +#endif diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h new file mode 100644 index 00000000..1d0ef1ae --- /dev/null +++ b/include/linux/acpi_pmtmr.h @@ -0,0 +1,38 @@ +#ifndef _ACPI_PMTMR_H_ +#define _ACPI_PMTMR_H_ + +#include <linux/clocksource.h> + +/* Number of PMTMR ticks expected during calibration run */ +#define PMTMR_TICKS_PER_SEC 3579545 + +/* limit it to 24 bits */ +#define ACPI_PM_MASK CLOCKSOURCE_MASK(24) + +/* Overrun value */ +#define ACPI_PM_OVRRUN (1<<24) + +#ifdef CONFIG_X86_PM_TIMER + +extern u32 acpi_pm_read_verified(void); +extern u32 pmtmr_ioport; + +static inline u32 acpi_pm_read_early(void) +{ + if (!pmtmr_ioport) + return 0; + /* mask the output to 24 bits */ + return acpi_pm_read_verified() & ACPI_PM_MASK; +} + +#else + +static inline u32 acpi_pm_read_early(void) +{ + return 0; +} + +#endif + +#endif + diff --git a/include/linux/adb.h b/include/linux/adb.h new file mode 100644 index 00000000..63bca502 --- /dev/null +++ b/include/linux/adb.h @@ -0,0 +1,103 @@ +/* + * Definitions for ADB (Apple Desktop Bus) support. + */ +#ifndef __ADB_H +#define __ADB_H + +/* ADB commands */ +#define ADB_BUSRESET 0 +#define ADB_FLUSH(id) (0x01 | ((id) << 4)) +#define ADB_WRITEREG(id, reg) (0x08 | (reg) | ((id) << 4)) +#define ADB_READREG(id, reg) (0x0C | (reg) | ((id) << 4)) + +/* ADB default device IDs (upper 4 bits of ADB command byte) */ +#define ADB_DONGLE 1 /* "software execution control" devices */ +#define ADB_KEYBOARD 2 +#define ADB_MOUSE 3 +#define ADB_TABLET 4 +#define ADB_MODEM 5 +#define ADB_MISC 7 /* maybe a monitor */ + +#define ADB_RET_OK 0 +#define ADB_RET_TIMEOUT 3 + +/* The kind of ADB request. The controller may emulate some + or all of those CUDA/PMU packet kinds */ +#define ADB_PACKET 0 +#define CUDA_PACKET 1 +#define ERROR_PACKET 2 +#define TIMER_PACKET 3 +#define POWER_PACKET 4 +#define MACIIC_PACKET 5 +#define PMU_PACKET 6 +#define ADB_QUERY 7 + +/* ADB queries */ + +/* ADB_QUERY_GETDEVINFO + * Query ADB slot for device presence + * data[2] = id, rep[0] = orig addr, rep[1] = handler_id + */ +#define ADB_QUERY_GETDEVINFO 1 + +#ifdef __KERNEL__ + +struct adb_request { + unsigned char data[32]; + int nbytes; + unsigned char reply[32]; + int reply_len; + unsigned char reply_expected; + unsigned char sent; + unsigned char complete; + void (*done)(struct adb_request *); + void *arg; + struct adb_request *next; +}; + +struct adb_ids { + int nids; + unsigned char id[16]; +}; + +/* Structure which encapsulates a low-level ADB driver */ + +struct adb_driver { + char name[16]; + int (*probe)(void); + int (*init)(void); + int (*send_request)(struct adb_request *req, int sync); + int (*autopoll)(int devs); + void (*poll)(void); + int (*reset_bus)(void); +}; + +/* Values for adb_request flags */ +#define ADBREQ_REPLY 1 /* expect reply */ +#define ADBREQ_SYNC 2 /* poll until done */ +#define ADBREQ_NOSEND 4 /* build the request, but don't send it */ + +/* Messages sent thru the client_list notifier. You should NOT stop + the operation, at least not with this version */ +enum adb_message { + ADB_MSG_POWERDOWN, /* Currently called before sleep only */ + ADB_MSG_PRE_RESET, /* Called before resetting the bus */ + ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ +}; +extern struct blocking_notifier_head adb_client_list; + +int adb_request(struct adb_request *req, void (*done)(struct adb_request *), + int flags, int nbytes, ...); +int adb_register(int default_id,int handler_id,struct adb_ids *ids, + void (*handler)(unsigned char *, int, int)); +int adb_unregister(int index); +void adb_poll(void); +void adb_input(unsigned char *, int, int); +int adb_reset_bus(void); + +int adb_try_handler_change(int address, int new_id); +int adb_get_infos(int address, int *original_address, int *handler_id); + +#endif /* __KERNEL__ */ + +#endif /* __ADB_H */ diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h new file mode 100644 index 00000000..b19801f7 --- /dev/null +++ b/include/linux/adfs_fs.h @@ -0,0 +1,63 @@ +#ifndef _ADFS_FS_H +#define _ADFS_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* + * Disc Record at disc address 0xc00 + */ +struct adfs_discrecord { + __u8 log2secsize; + __u8 secspertrack; + __u8 heads; + __u8 density; + __u8 idlen; + __u8 log2bpmb; + __u8 skew; + __u8 bootoption; + __u8 lowsector; + __u8 nzones; + __le16 zone_spare; + __le32 root; + __le32 disc_size; + __le16 disc_id; + __u8 disc_name[10]; + __le32 disc_type; + __le32 disc_size_high; + __u8 log2sharesize:4; + __u8 unused40:4; + __u8 big_flag:1; + __u8 unused41:1; + __u8 nzones_high; + __le32 format_version; + __le32 root_size; + __u8 unused52[60 - 52]; +}; + +#define ADFS_DISCRECORD (0xc00) +#define ADFS_DR_OFFSET (0x1c0) +#define ADFS_DR_SIZE 60 +#define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3) + +#ifdef __KERNEL__ +/* + * Calculate the boot block checksum on an ADFS drive. Note that this will + * appear to be correct if the sector contains all zeros, so also check that + * the disk size is non-zero!!! + */ +static inline int adfs_checkbblk(unsigned char *ptr) +{ + unsigned int result = 0; + unsigned char *p = ptr + 511; + + do { + result = (result & 0xff) + (result >> 8); + result = result + *--p; + } while (p != ptr); + + return (result & 0xff) != ptr[511]; +} +#endif + +#endif diff --git a/include/linux/aer.h b/include/linux/aer.h new file mode 100644 index 00000000..544abdb2 --- /dev/null +++ b/include/linux/aer.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2006 Intel Corp. + * Tom Long Nguyen (tom.l.nguyen@intel.com) + * Zhang Yanmin (yanmin.zhang@intel.com) + */ + +#ifndef _AER_H_ +#define _AER_H_ + +struct aer_header_log_regs { + unsigned int dw0; + unsigned int dw1; + unsigned int dw2; + unsigned int dw3; +}; + +struct aer_capability_regs { + u32 header; + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + struct aer_header_log_regs header_log; + u32 root_command; + u32 root_status; + u16 cor_err_source; + u16 uncor_err_source; +}; + +#if defined(CONFIG_PCIEAER) +/* pci-e port driver needs this function to enable aer */ +extern int pci_enable_pcie_error_reporting(struct pci_dev *dev); +extern int pci_disable_pcie_error_reporting(struct pci_dev *dev); +extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +#else +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + return -EINVAL; +} +#endif + +extern void cper_print_aer(const char *prefix, int cper_severity, + struct aer_capability_regs *aer); +extern int cper_severity_to_aer(int cper_severity); +extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, + int severity); +#endif //_AER_H_ + diff --git a/include/linux/affs_hardblocks.h b/include/linux/affs_hardblocks.h new file mode 100644 index 00000000..f1b948c1 --- /dev/null +++ b/include/linux/affs_hardblocks.h @@ -0,0 +1,68 @@ +#ifndef AFFS_HARDBLOCKS_H +#define AFFS_HARDBLOCKS_H + +#include <linux/types.h> + +/* Just the needed definitions for the RDB of an Amiga HD. */ + +struct RigidDiskBlock { + __u32 rdb_ID; + __be32 rdb_SummedLongs; + __s32 rdb_ChkSum; + __u32 rdb_HostID; + __be32 rdb_BlockBytes; + __u32 rdb_Flags; + __u32 rdb_BadBlockList; + __be32 rdb_PartitionList; + __u32 rdb_FileSysHeaderList; + __u32 rdb_DriveInit; + __u32 rdb_Reserved1[6]; + __u32 rdb_Cylinders; + __u32 rdb_Sectors; + __u32 rdb_Heads; + __u32 rdb_Interleave; + __u32 rdb_Park; + __u32 rdb_Reserved2[3]; + __u32 rdb_WritePreComp; + __u32 rdb_ReducedWrite; + __u32 rdb_StepRate; + __u32 rdb_Reserved3[5]; + __u32 rdb_RDBBlocksLo; + __u32 rdb_RDBBlocksHi; + __u32 rdb_LoCylinder; + __u32 rdb_HiCylinder; + __u32 rdb_CylBlocks; + __u32 rdb_AutoParkSeconds; + __u32 rdb_HighRDSKBlock; + __u32 rdb_Reserved4; + char rdb_DiskVendor[8]; + char rdb_DiskProduct[16]; + char rdb_DiskRevision[4]; + char rdb_ControllerVendor[8]; + char rdb_ControllerProduct[16]; + char rdb_ControllerRevision[4]; + __u32 rdb_Reserved5[10]; +}; + +#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */ + +struct PartitionBlock { + __be32 pb_ID; + __be32 pb_SummedLongs; + __s32 pb_ChkSum; + __u32 pb_HostID; + __be32 pb_Next; + __u32 pb_Flags; + __u32 pb_Reserved1[2]; + __u32 pb_DevFlags; + __u8 pb_DriveName[32]; + __u32 pb_Reserved2[15]; + __be32 pb_Environment[17]; + __u32 pb_EReserved[15]; +}; + +#define IDNAME_PARTITION 0x50415254 /* "PART" */ + +#define RDB_ALLOCATION_LIMIT 16 + +#endif /* AFFS_HARDBLOCKS_H */ diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h new file mode 100644 index 00000000..eaf6cd75 --- /dev/null +++ b/include/linux/agp_backend.h @@ -0,0 +1,109 @@ +/* + * AGPGART backend specific includes. Not for userspace consumption. + * + * Copyright (C) 2004 Silicon Graphics, Inc. + * Copyright (C) 2002-2003 Dave Jones + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_BACKEND_H +#define _AGP_BACKEND_H 1 + +#include <linux/list.h> + +enum chipset_type { + NOT_SUPPORTED, + SUPPORTED, +}; + +struct agp_version { + u16 major; + u16 minor; +}; + +struct agp_kern_info { + struct agp_version version; + struct pci_dev *device; + enum chipset_type chipset; + unsigned long mode; + unsigned long aper_base; + size_t aper_size; + int max_memory; /* In pages */ + int current_memory; + bool cant_use_aperture; + unsigned long page_mask; + const struct vm_operations_struct *vm_ops; +}; + +/* + * The agp_memory structure has information about the block of agp memory + * allocated. A caller may manipulate the next and prev pointers to link + * each allocated item into a list. These pointers are ignored by the backend. + * Everything else should never be written to, but the caller may read any of + * the items to determine the status of this block of agp memory. + */ + +struct agp_bridge_data; + +struct agp_memory { + struct agp_memory *next; + struct agp_memory *prev; + struct agp_bridge_data *bridge; + struct page **pages; + size_t page_count; + int key; + int num_scratch_pages; + off_t pg_start; + u32 type; + u32 physical; + bool is_bound; + bool is_flushed; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; + /* DMA-mapped addresses */ + struct scatterlist *sg_list; + int num_sg; +}; + +#define AGP_NORMAL_MEMORY 0 + +#define AGP_USER_TYPES (1 << 16) +#define AGP_USER_MEMORY (AGP_USER_TYPES) +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) + +extern struct agp_bridge_data *agp_bridge; +extern struct list_head agp_bridges; + +extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *); + +extern void agp_free_memory(struct agp_memory *); +extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32); +extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); +extern int agp_bind_memory(struct agp_memory *, off_t); +extern int agp_unbind_memory(struct agp_memory *); +extern void agp_enable(struct agp_bridge_data *, u32); +extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); +extern void agp_backend_release(struct agp_bridge_data *); + +#endif /* _AGP_BACKEND_H */ diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h new file mode 100644 index 00000000..f6778ece --- /dev/null +++ b/include/linux/agpgart.h @@ -0,0 +1,214 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AGP_H +#define _AGP_H 1 + +#define AGPIOC_BASE 'A' +#define AGPIOC_INFO _IOR (AGPIOC_BASE, 0, struct agp_info*) +#define AGPIOC_ACQUIRE _IO (AGPIOC_BASE, 1) +#define AGPIOC_RELEASE _IO (AGPIOC_BASE, 2) +#define AGPIOC_SETUP _IOW (AGPIOC_BASE, 3, struct agp_setup*) +#define AGPIOC_RESERVE _IOW (AGPIOC_BASE, 4, struct agp_region*) +#define AGPIOC_PROTECT _IOW (AGPIOC_BASE, 5, struct agp_region*) +#define AGPIOC_ALLOCATE _IOWR(AGPIOC_BASE, 6, struct agp_allocate*) +#define AGPIOC_DEALLOCATE _IOW (AGPIOC_BASE, 7, int) +#define AGPIOC_BIND _IOW (AGPIOC_BASE, 8, struct agp_bind*) +#define AGPIOC_UNBIND _IOW (AGPIOC_BASE, 9, struct agp_unbind*) +#define AGPIOC_CHIPSET_FLUSH _IO (AGPIOC_BASE, 10) + +#define AGP_DEVICE "/dev/agpgart" + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef __KERNEL__ +#include <linux/types.h> + +struct agp_version { + __u16 major; + __u16 minor; +}; + +typedef struct _agp_info { + struct agp_version version; /* version of the driver */ + __u32 bridge_id; /* bridge vendor/device */ + __u32 agp_mode; /* mode info of bridge */ + unsigned long aper_base;/* base of aperture */ + size_t aper_size; /* size of aperture */ + size_t pg_total; /* max pages (swap + system) */ + size_t pg_system; /* max pages (system) */ + size_t pg_used; /* current pages used */ +} agp_info; + +typedef struct _agp_setup { + __u32 agp_mode; /* mode info of bridge */ +} agp_setup; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +typedef struct _agp_segment { + __kernel_off_t pg_start; /* starting page to populate */ + __kernel_size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ +} agp_segment; + +typedef struct _agp_region { + __kernel_pid_t pid; /* pid of process */ + __kernel_size_t seg_count; /* number of segments */ + struct _agp_segment *seg_list; +} agp_region; + +typedef struct _agp_allocate { + int key; /* tag of allocation */ + __kernel_size_t pg_count;/* number of pages */ + __u32 type; /* 0 == normal, other devspec */ + __u32 physical; /* device specific (some devices + * need a phys address of the + * actual page behind the gatt + * table) */ +} agp_allocate; + +typedef struct _agp_bind { + int key; /* tag of allocation */ + __kernel_off_t pg_start;/* starting page to populate */ +} agp_bind; + +typedef struct _agp_unbind { + int key; /* tag of allocation */ + __u32 priority; /* priority for paging out */ +} agp_unbind; + +#else /* __KERNEL__ */ +#include <linux/mutex.h> +#include <linux/agp_backend.h> + +#define AGPGART_MINOR 175 + +struct agp_info { + struct agp_version version; /* version of the driver */ + u32 bridge_id; /* bridge vendor/device */ + u32 agp_mode; /* mode info of bridge */ + unsigned long aper_base;/* base of aperture */ + size_t aper_size; /* size of aperture */ + size_t pg_total; /* max pages (swap + system) */ + size_t pg_system; /* max pages (system) */ + size_t pg_used; /* current pages used */ +}; + +struct agp_setup { + u32 agp_mode; /* mode info of bridge */ +}; + +/* + * The "prot" down below needs still a "sleep" flag somehow ... + */ +struct agp_segment { + off_t pg_start; /* starting page to populate */ + size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ +}; + +struct agp_segment_priv { + off_t pg_start; + size_t pg_count; + pgprot_t prot; +}; + +struct agp_region { + pid_t pid; /* pid of process */ + size_t seg_count; /* number of segments */ + struct agp_segment *seg_list; +}; + +struct agp_allocate { + int key; /* tag of allocation */ + size_t pg_count; /* number of pages */ + u32 type; /* 0 == normal, other devspec */ + u32 physical; /* device specific (some devices + * need a phys address of the + * actual page behind the gatt + * table) */ +}; + +struct agp_bind { + int key; /* tag of allocation */ + off_t pg_start; /* starting page to populate */ +}; + +struct agp_unbind { + int key; /* tag of allocation */ + u32 priority; /* priority for paging out */ +}; + +struct agp_client { + struct agp_client *next; + struct agp_client *prev; + pid_t pid; + int num_segments; + struct agp_segment_priv **segments; +}; + +struct agp_controller { + struct agp_controller *next; + struct agp_controller *prev; + pid_t pid; + int num_clients; + struct agp_memory *pool; + struct agp_client *clients; +}; + +#define AGP_FF_ALLOW_CLIENT 0 +#define AGP_FF_ALLOW_CONTROLLER 1 +#define AGP_FF_IS_CLIENT 2 +#define AGP_FF_IS_CONTROLLER 3 +#define AGP_FF_IS_VALID 4 + +struct agp_file_private { + struct agp_file_private *next; + struct agp_file_private *prev; + pid_t my_pid; + unsigned long access_flags; /* long req'd for set_bit --RR */ +}; + +struct agp_front_data { + struct mutex agp_mutex; + struct agp_controller *current_controller; + struct agp_controller *controllers; + struct agp_file_private *file_priv_list; + bool used_by_controller; + bool backend_acquired; +}; + +#endif /* __KERNEL__ */ + +#endif /* _AGP_H */ diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h new file mode 100644 index 00000000..73a25005 --- /dev/null +++ b/include/linux/ahci_platform.h @@ -0,0 +1,33 @@ +/* + * AHCI SATA platform driver + * + * Copyright 2004-2005 Red Hat, Inc. + * Jeff Garzik <jgarzik@pobox.com> + * Copyright 2010 MontaVista Software, LLC. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ + +#ifndef _AHCI_PLATFORM_H +#define _AHCI_PLATFORM_H + +#include <linux/compiler.h> + +struct device; +struct ata_port_info; + +struct ahci_platform_data { + int (*init)(struct device *dev, void __iomem *addr); + void (*exit)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + const struct ata_port_info *ata_port_info; + unsigned int force_port_map; + unsigned int mask_port_map; +}; + +#endif /* _AHCI_PLATFORM_H */ diff --git a/include/linux/aio.h b/include/linux/aio.h new file mode 100644 index 00000000..b1a520ec --- /dev/null +++ b/include/linux/aio.h @@ -0,0 +1,240 @@ +#ifndef __LINUX__AIO_H +#define __LINUX__AIO_H + +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/aio_abi.h> +#include <linux/uio.h> +#include <linux/rcupdate.h> + +#include <linux/atomic.h> + +#define AIO_MAXSEGS 4 +#define AIO_KIOGRP_NR_ATOMIC 8 + +struct kioctx; + +/* Notes on cancelling a kiocb: + * If a kiocb is cancelled, aio_complete may return 0 to indicate + * that cancel has not yet disposed of the kiocb. All cancel + * operations *must* call aio_put_req to dispose of the kiocb + * to guard against races with the completion code. + */ +#define KIOCB_C_CANCELLED 0x01 +#define KIOCB_C_COMPLETE 0x02 + +#define KIOCB_SYNC_KEY (~0U) + +/* ki_flags bits */ +/* + * This may be used for cancel/retry serialization in the future, but + * for now it's unused and we probably don't want modules to even + * think they can use it. + */ +/* #define KIF_LOCKED 0 */ +#define KIF_KICKED 1 +#define KIF_CANCELLED 2 + +#define kiocbTryLock(iocb) test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags) +#define kiocbTryKick(iocb) test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags) + +#define kiocbSetLocked(iocb) set_bit(KIF_LOCKED, &(iocb)->ki_flags) +#define kiocbSetKicked(iocb) set_bit(KIF_KICKED, &(iocb)->ki_flags) +#define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags) + +#define kiocbClearLocked(iocb) clear_bit(KIF_LOCKED, &(iocb)->ki_flags) +#define kiocbClearKicked(iocb) clear_bit(KIF_KICKED, &(iocb)->ki_flags) +#define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags) + +#define kiocbIsLocked(iocb) test_bit(KIF_LOCKED, &(iocb)->ki_flags) +#define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags) +#define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags) + +/* is there a better place to document function pointer methods? */ +/** + * ki_retry - iocb forward progress callback + * @kiocb: The kiocb struct to advance by performing an operation. + * + * This callback is called when the AIO core wants a given AIO operation + * to make forward progress. The kiocb argument describes the operation + * that is to be performed. As the operation proceeds, perhaps partially, + * ki_retry is expected to update the kiocb with progress made. Typically + * ki_retry is set in the AIO core and it itself calls file_operations + * helpers. + * + * ki_retry's return value determines when the AIO operation is completed + * and an event is generated in the AIO event ring. Except the special + * return values described below, the value that is returned from ki_retry + * is transferred directly into the completion ring as the operation's + * resulting status. Once this has happened ki_retry *MUST NOT* reference + * the kiocb pointer again. + * + * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete() + * will be called on the kiocb pointer in the future. The AIO core will + * not ask the method again -- ki_retry must ensure forward progress. + * aio_complete() must be called once and only once in the future, multiple + * calls may result in undefined behaviour. + * + * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb() + * will be called on the kiocb pointer in the future. This may happen + * through generic helpers that associate kiocb->ki_wait with a wait + * queue head that ki_retry uses via current->io_wait. It can also happen + * with custom tracking and manual calls to kick_iocb(), though that is + * discouraged. In either case, kick_iocb() must be called once and only + * once. ki_retry must ensure forward progress, the AIO core will wait + * indefinitely for kick_iocb() to be called. + */ +struct kiocb { + struct list_head ki_run_list; + unsigned long ki_flags; + int ki_users; + unsigned ki_key; /* id of this request */ + + struct file *ki_filp; + struct kioctx *ki_ctx; /* may be NULL for sync ops */ + int (*ki_cancel)(struct kiocb *, struct io_event *); + ssize_t (*ki_retry)(struct kiocb *); + void (*ki_dtor)(struct kiocb *); + + union { + void __user *user; + struct task_struct *tsk; + } ki_obj; + + __u64 ki_user_data; /* user's data for completion */ + loff_t ki_pos; + + void *private; + /* State that we remember to be able to restart/retry */ + unsigned short ki_opcode; + size_t ki_nbytes; /* copy of iocb->aio_nbytes */ + char __user *ki_buf; /* remaining iocb->aio_buf */ + size_t ki_left; /* remaining bytes */ + struct iovec ki_inline_vec; /* inline vector */ + struct iovec *ki_iovec; + unsigned long ki_nr_segs; + unsigned long ki_cur_seg; + + struct list_head ki_list; /* the aio core uses this + * for cancellation */ + struct list_head ki_batch; /* batch allocation */ + + /* + * If the aio_resfd field of the userspace iocb is not zero, + * this is the underlying eventfd context to deliver events to. + */ + struct eventfd_ctx *ki_eventfd; +}; + +#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY) +#define init_sync_kiocb(x, filp) \ + do { \ + struct task_struct *tsk = current; \ + (x)->ki_flags = 0; \ + (x)->ki_users = 1; \ + (x)->ki_key = KIOCB_SYNC_KEY; \ + (x)->ki_filp = (filp); \ + (x)->ki_ctx = NULL; \ + (x)->ki_cancel = NULL; \ + (x)->ki_retry = NULL; \ + (x)->ki_dtor = NULL; \ + (x)->ki_obj.tsk = tsk; \ + (x)->ki_user_data = 0; \ + (x)->private = NULL; \ + } while (0) + +#define AIO_RING_MAGIC 0xa10a10a1 +#define AIO_RING_COMPAT_FEATURES 1 +#define AIO_RING_INCOMPAT_FEATURES 0 +struct aio_ring { + unsigned id; /* kernel internal index number */ + unsigned nr; /* number of io_events */ + unsigned head; + unsigned tail; + + unsigned magic; + unsigned compat_features; + unsigned incompat_features; + unsigned header_length; /* size of aio_ring */ + + + struct io_event io_events[0]; +}; /* 128 bytes + ring size */ + +#define aio_ring_avail(info, ring) (((ring)->head + (info)->nr - 1 - (ring)->tail) % (info)->nr) + +#define AIO_RING_PAGES 8 +struct aio_ring_info { + unsigned long mmap_base; + unsigned long mmap_size; + + struct page **ring_pages; + spinlock_t ring_lock; + long nr_pages; + + unsigned nr, tail; + + struct page *internal_pages[AIO_RING_PAGES]; +}; + +struct kioctx { + atomic_t users; + int dead; + struct mm_struct *mm; + + /* This needs improving */ + unsigned long user_id; + struct hlist_node list; + + wait_queue_head_t wait; + + spinlock_t ctx_lock; + + int reqs_active; + struct list_head active_reqs; /* used for cancellation */ + struct list_head run_list; /* used for kicked reqs */ + + /* sys_io_setup currently limits this to an unsigned int */ + unsigned max_reqs; + + struct aio_ring_info ring_info; + + struct delayed_work wq; + + struct rcu_head rcu_head; +}; + +/* prototypes */ +extern unsigned aio_max_size; + +#ifdef CONFIG_AIO +extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); +extern int aio_put_req(struct kiocb *iocb); +extern void kick_iocb(struct kiocb *iocb); +extern int aio_complete(struct kiocb *iocb, long res, long res2); +struct mm_struct; +extern void exit_aio(struct mm_struct *mm); +extern long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user *__user *iocbpp, bool compat); +#else +static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } +static inline int aio_put_req(struct kiocb *iocb) { return 0; } +static inline void kick_iocb(struct kiocb *iocb) { } +static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } +struct mm_struct; +static inline void exit_aio(struct mm_struct *mm) { } +static inline long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user * __user *iocbpp, + bool compat) { return 0; } +#endif /* CONFIG_AIO */ + +static inline struct kiocb *list_kiocb(struct list_head *h) +{ + return list_entry(h, struct kiocb, ki_list); +} + +/* for sysctl: */ +extern unsigned long aio_nr; +extern unsigned long aio_max_nr; + +#endif /* __LINUX__AIO_H */ diff --git a/include/linux/aio_abi.h b/include/linux/aio_abi.h new file mode 100644 index 00000000..86fa7a71 --- /dev/null +++ b/include/linux/aio_abi.h @@ -0,0 +1,111 @@ +/* include/linux/aio_abi.h + * + * Copyright 2000,2001,2002 Red Hat. + * + * Written by Benjamin LaHaise <bcrl@kvack.org> + * + * Distribute under the terms of the GPLv2 (see ../../COPYING) or under + * the following terms. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. Red Hat makes no representations about + * the suitability of this software for any purpose. + * + * IN NO EVENT SHALL RED HAT BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, + * SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF + * THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF RED HAT HAS BEEN ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * + * RED HAT DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND + * RED HAT HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, + * ENHANCEMENTS, OR MODIFICATIONS. + */ +#ifndef __LINUX__AIO_ABI_H +#define __LINUX__AIO_ABI_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + /* These two are experimental. + * IOCB_CMD_PREADX = 4, + * IOCB_CMD_POLL = 5, + */ + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +/* + * Valid flags for the "aio_flags" member of the "struct iocb". + * + * IOCB_FLAG_RESFD - Set if the "aio_resfd" member of the "struct iocb" + * is valid. + */ +#define IOCB_FLAG_RESFD (1 << 0) + +/* read() from /dev/aio returns these structures. */ +struct io_event { + __u64 data; /* the data field from the iocb */ + __u64 obj; /* what iocb this event came from */ + __s64 res; /* result code for this event */ + __s64 res2; /* secondary result */ +}; + +#if defined(__LITTLE_ENDIAN) +#define PADDED(x,y) x, y +#elif defined(__BIG_ENDIAN) +#define PADDED(x,y) y, x +#else +#error edit for your odd byteorder. +#endif + +/* + * we always use a 64bit off_t when communicating + * with userland. its up to libraries to do the + * proper padding and aio_error abstraction + */ + +struct iocb { + /* these are internal to the kernel/libc. */ + __u64 aio_data; /* data to be returned in event's data */ + __u32 PADDED(aio_key, aio_reserved1); + /* the kernel sets aio_key to the req # */ + + /* common fields */ + __u16 aio_lio_opcode; /* see IOCB_CMD_ above */ + __s16 aio_reqprio; + __u32 aio_fildes; + + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + + /* extra parameters */ + __u64 aio_reserved2; /* TODO: use this for a (struct sigevent *) */ + + /* flags for the "struct iocb" */ + __u32 aio_flags; + + /* + * if the IOCB_FLAG_RESFD flag of "aio_flags" is set, this is an + * eventfd to signal AIO readiness to + */ + __u32 aio_resfd; +}; /* 64 bytes */ + +#undef IFBIG +#undef IFLITTLE + +#endif /* __LINUX__AIO_ABI_H */ + diff --git a/include/linux/akm8975.h b/include/linux/akm8975.h new file mode 100644 index 00000000..6a7c4326 --- /dev/null +++ b/include/linux/akm8975.h @@ -0,0 +1,87 @@ +/* + * Definitions for akm8975 compass chip. + */ +#ifndef AKM8975_H +#define AKM8975_H + +#include <linux/ioctl.h> + +/*! \name AK8975 operation mode + \anchor AK8975_Mode + Defines an operation mode of the AK8975.*/ +/*! @{*/ +#define AK8975_MODE_SNG_MEASURE 0x01 +#define AK8975_MODE_SELF_TEST 0x08 +#define AK8975_MODE_FUSE_ACCESS 0x0F +#define AK8975_MODE_POWER_DOWN 0x00 +/*! @}*/ + +#define RBUFF_SIZE 8 /* Rx buffer size */ + +/*! \name AK8975 register address +\anchor AK8975_REG +Defines a register address of the AK8975.*/ +/*! @{*/ +#define AK8975_REG_WIA 0x00 +#define AK8975_REG_INFO 0x01 +#define AK8975_REG_ST1 0x02 +#define AK8975_REG_HXL 0x03 +#define AK8975_REG_HXH 0x04 +#define AK8975_REG_HYL 0x05 +#define AK8975_REG_HYH 0x06 +#define AK8975_REG_HZL 0x07 +#define AK8975_REG_HZH 0x08 +#define AK8975_REG_ST2 0x09 +#define AK8975_REG_CNTL 0x0A +#define AK8975_REG_RSV 0x0B +#define AK8975_REG_ASTC 0x0C +#define AK8975_REG_TS1 0x0D +#define AK8975_REG_TS2 0x0E +#define AK8975_REG_I2CDIS 0x0F +/*! @}*/ + +/*! \name AK8975 fuse-rom address +\anchor AK8975_FUSE +Defines a read-only address of the fuse ROM of the AK8975.*/ +/*! @{*/ +#define AK8975_FUSE_ASAX 0x10 +#define AK8975_FUSE_ASAY 0x11 +#define AK8975_FUSE_ASAZ 0x12 +/*! @}*/ + +#define AKMIO 0xA1 + +/* IOCTLs for AKM library */ +#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x02, char[5]) +#define ECS_IOCTL_READ _IOWR(AKMIO, 0x03, char[5]) +#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x08, char[RBUFF_SIZE]) +#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x0C, short[12]) +#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x0D, int) +#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x0E, int) +#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short) + +/* IOCTLs for APPs */ +#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short) +#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short) +#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short) +#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short) +#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short) +#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY +/* Set raw magnetic vector flag */ +#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short) +/* Get raw magnetic vector flag */ +#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short) +#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short) + + +struct akm8975_platform_data { + int intr; + + int (*init)(void); + void (*exit)(void); + int (*power_on)(void); + int (*power_off)(void); +}; + +#endif + diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h new file mode 100644 index 00000000..96c5c249 --- /dev/null +++ b/include/linux/alarmtimer.h @@ -0,0 +1,82 @@ +#ifndef _LINUX_ALARMTIMER_H +#define _LINUX_ALARMTIMER_H + +#include <linux/time.h> +#include <linux/hrtimer.h> +#include <linux/timerqueue.h> +#include <linux/rtc.h> + +enum alarmtimer_type { + ALARM_REALTIME, + ALARM_BOOTTIME, + + ALARM_NUMTYPE, +}; + +enum alarmtimer_restart { + ALARMTIMER_NORESTART, + ALARMTIMER_RESTART, +}; + + +#define ALARMTIMER_STATE_INACTIVE 0x00 +#define ALARMTIMER_STATE_ENQUEUED 0x01 +#define ALARMTIMER_STATE_CALLBACK 0x02 + +/** + * struct alarm - Alarm timer structure + * @node: timerqueue node for adding to the event list this value + * also includes the expiration time. + * @period: Period for recuring alarms + * @function: Function pointer to be executed when the timer fires. + * @type: Alarm type (BOOTTIME/REALTIME) + * @enabled: Flag that represents if the alarm is set to fire or not + * @data: Internal data value. + */ +struct alarm { + struct timerqueue_node node; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); + enum alarmtimer_type type; + int state; + void *data; +}; + +void alarm_init(struct alarm *alarm, enum alarmtimer_type type, + enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); +void alarm_start(struct alarm *alarm, ktime_t start); +int alarm_try_to_cancel(struct alarm *alarm); +int alarm_cancel(struct alarm *alarm); + +u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); + +/* + * A alarmtimer is active, when it is enqueued into timerqueue or the + * callback function is running. + */ +static inline int alarmtimer_active(const struct alarm *timer) +{ + return timer->state != ALARMTIMER_STATE_INACTIVE; +} + +/* + * Helper function to check, whether the timer is on one of the queues + */ +static inline int alarmtimer_is_queued(struct alarm *timer) +{ + return timer->state & ALARMTIMER_STATE_ENQUEUED; +} + +/* + * Helper function to check, whether the timer is running the callback + * function + */ +static inline int alarmtimer_callback_running(struct alarm *timer) +{ + return timer->state & ALARMTIMER_STATE_CALLBACK; +} + + +/* Provide way to access the rtc device being used by alarmtimers */ +struct rtc_device *alarmtimer_get_rtcdev(void); + +#endif diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h new file mode 100644 index 00000000..953b178a --- /dev/null +++ b/include/linux/altera_jtaguart.h @@ -0,0 +1,16 @@ +/* + * altera_jtaguart.h -- Altera JTAG UART driver defines. + */ + +#ifndef __ALTJUART_H +#define __ALTJUART_H + +#define ALTERA_JTAGUART_MAJOR 204 +#define ALTERA_JTAGUART_MINOR 186 + +struct altera_jtaguart_platform_uart { + unsigned long mapbase; /* Physical address base */ + unsigned int irq; /* Interrupt vector */ +}; + +#endif /* __ALTJUART_H */ diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h new file mode 100644 index 00000000..c022c82d --- /dev/null +++ b/include/linux/altera_uart.h @@ -0,0 +1,15 @@ +/* + * altera_uart.h -- Altera UART driver defines. + */ + +#ifndef __ALTUART_H +#define __ALTUART_H + +struct altera_uart_platform_uart { + unsigned long mapbase; /* Physical address base */ + unsigned int irq; /* Interrupt vector */ + unsigned int uartclk; /* UART clock rate */ + unsigned int bus_shift; /* Bus shift (address stride) */ +}; + +#endif /* __ALTUART_H */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h new file mode 100644 index 00000000..8d54f794 --- /dev/null +++ b/include/linux/amba/bus.h @@ -0,0 +1,130 @@ +/* + * linux/include/amba/bus.h + * + * This device type deals with ARM PrimeCells and anything else that + * presents a proper CID (0xB105F00D) at the end of the I/O register + * region or that is derived from a PrimeCell. + * + * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASMARM_AMBA_H +#define ASMARM_AMBA_H + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/err.h> +#include <linux/resource.h> +#include <linux/regulator/consumer.h> + +#define AMBA_NR_IRQS 2 +#define AMBA_CID 0xb105f00d + +struct clk; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + u64 dma_mask; + unsigned int periphid; + unsigned int irq[AMBA_NR_IRQS]; +}; + +struct amba_driver { + struct device_driver drv; + int (*probe)(struct amba_device *, const struct amba_id *); + int (*remove)(struct amba_device *); + void (*shutdown)(struct amba_device *); + int (*suspend)(struct amba_device *, pm_message_t); + int (*resume)(struct amba_device *); + const struct amba_id *id_table; +}; + +enum amba_vendor { + AMBA_VENDOR_ARM = 0x41, + AMBA_VENDOR_ST = 0x80, +}; + +extern struct bus_type amba_bustype; + +#define to_amba_device(d) container_of(d, struct amba_device, dev) + +#define amba_get_drvdata(d) dev_get_drvdata(&d->dev) +#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) + +int amba_driver_register(struct amba_driver *); +void amba_driver_unregister(struct amba_driver *); +struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); +void amba_device_put(struct amba_device *); +int amba_device_add(struct amba_device *, struct resource *); +int amba_device_register(struct amba_device *, struct resource *); +void amba_device_unregister(struct amba_device *); +struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); +int amba_request_regions(struct amba_device *, const char *); +void amba_release_regions(struct amba_device *); + +#define amba_pclk_enable(d) \ + (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) + +#define amba_pclk_disable(d) \ + do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) + +/* Some drivers don't use the struct amba_device */ +#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) +#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) +#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) +#define AMBA_PART_BITS(a) ((a) & 0xfff) + +#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) +#define amba_rev(d) AMBA_REV_BITS((d)->periphid) +#define amba_manf(d) AMBA_MANF_BITS((d)->periphid) +#define amba_part(d) AMBA_PART_BITS((d)->periphid) + +#define __AMBA_DEV(busid, data, mask) \ + { \ + .coherent_dma_mask = mask, \ + .init_name = busid, \ + .platform_data = data, \ + } + +/* + * APB devices do not themselves have the ability to address memory, + * so DMA masks should be zero (much like USB peripheral devices.) + * The DMA controller DMA masks should be used instead (much like + * USB host controllers in conventional PCs.) + */ +#define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \ +struct amba_device name##_device = { \ + .dev = __AMBA_DEV(busid, data, 0), \ + .res = DEFINE_RES_MEM(base, SZ_4K), \ + .irq = irqs, \ + .periphid = id, \ +} + +/* + * AHB devices are DMA capable, so set their DMA masks + */ +#define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \ +struct amba_device name##_device = { \ + .dev = __AMBA_DEV(busid, data, ~0ULL), \ + .res = DEFINE_RES_MEM(base, SZ_4K), \ + .dma_mask = ~0ULL, \ + .irq = irqs, \ + .periphid = id, \ +} + +/* + * module_amba_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. Each + * module may only use this macro once, and calling it replaces module_init() + * and module_exit() + */ +#define module_amba_driver(__amba_drv) \ + module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) + +#endif diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h new file mode 100644 index 00000000..e82e3ee2 --- /dev/null +++ b/include/linux/amba/clcd.h @@ -0,0 +1,330 @@ +/* + * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel. + * + * David A Rusling + * + * Copyright (C) 2001 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#include <linux/fb.h> + +/* + * CLCD Controller Internal Register addresses + */ +#define CLCD_TIM0 0x00000000 +#define CLCD_TIM1 0x00000004 +#define CLCD_TIM2 0x00000008 +#define CLCD_TIM3 0x0000000c +#define CLCD_UBAS 0x00000010 +#define CLCD_LBAS 0x00000014 + +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + +#define CLCD_PALL 0x00000200 +#define CLCD_PALETTE 0x00000200 + +#define TIM2_CLKSEL (1 << 5) +#define TIM2_IVS (1 << 11) +#define TIM2_IHS (1 << 12) +#define TIM2_IPC (1 << 13) +#define TIM2_IOE (1 << 14) +#define TIM2_BCD (1 << 26) + +#define CNTL_LCDEN (1 << 0) +#define CNTL_LCDBPP1 (0 << 1) +#define CNTL_LCDBPP2 (1 << 1) +#define CNTL_LCDBPP4 (2 << 1) +#define CNTL_LCDBPP8 (3 << 1) +#define CNTL_LCDBPP16 (4 << 1) +#define CNTL_LCDBPP16_565 (6 << 1) +#define CNTL_LCDBPP16_444 (7 << 1) +#define CNTL_LCDBPP24 (5 << 1) +#define CNTL_LCDBW (1 << 4) +#define CNTL_LCDTFT (1 << 5) +#define CNTL_LCDMONO8 (1 << 6) +#define CNTL_LCDDUAL (1 << 7) +#define CNTL_BGR (1 << 8) +#define CNTL_BEBO (1 << 9) +#define CNTL_BEPO (1 << 10) +#define CNTL_LCDPWR (1 << 11) +#define CNTL_LCDVCOMP(x) ((x) << 12) +#define CNTL_LDMAFIFOTIME (1 << 15) +#define CNTL_WATERMARK (1 << 16) + +enum { + /* individual formats */ + CLCD_CAP_RGB444 = (1 << 0), + CLCD_CAP_RGB5551 = (1 << 1), + CLCD_CAP_RGB565 = (1 << 2), + CLCD_CAP_RGB888 = (1 << 3), + CLCD_CAP_BGR444 = (1 << 4), + CLCD_CAP_BGR5551 = (1 << 5), + CLCD_CAP_BGR565 = (1 << 6), + CLCD_CAP_BGR888 = (1 << 7), + + /* connection layouts */ + CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444, + CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551, + CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565, + CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888, + + /* red/blue ordering */ + CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 | + CLCD_CAP_RGB565 | CLCD_CAP_RGB888, + CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 | + CLCD_CAP_BGR565 | CLCD_CAP_BGR888, + + CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB, +}; + +struct clcd_panel { + struct fb_videomode mode; + signed short width; /* width in mm */ + signed short height; /* height in mm */ + u32 tim2; + u32 tim3; + u32 cntl; + u32 caps; + unsigned int bpp:8, + fixedtimings:1, + grayscale:1; + unsigned int connector; +}; + +struct clcd_regs { + u32 tim0; + u32 tim1; + u32 tim2; + u32 tim3; + u32 cntl; + unsigned long pixclock; +}; + +struct clcd_fb; + +/* + * the board-type specific routines + */ +struct clcd_board { + const char *name; + + /* + * Optional. Hardware capability flags. + */ + u32 caps; + + /* + * Optional. Check whether the var structure is acceptable + * for this display. + */ + int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var); + + /* + * Compulsory. Decode fb->fb.var into regs->*. In the case of + * fixed timing, set regs->* to the register values required. + */ + void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs); + + /* + * Optional. Disable any extra display hardware. + */ + void (*disable)(struct clcd_fb *); + + /* + * Optional. Enable any extra display hardware. + */ + void (*enable)(struct clcd_fb *); + + /* + * Setup platform specific parts of CLCD driver + */ + int (*setup)(struct clcd_fb *); + + /* + * mmap the framebuffer memory + */ + int (*mmap)(struct clcd_fb *, struct vm_area_struct *); + + /* + * Remove platform specific parts of CLCD driver + */ + void (*remove)(struct clcd_fb *); +}; + +struct amba_device; +struct clk; + +/* this data structure describes each frame buffer device we find */ +struct clcd_fb { + struct fb_info fb; + struct amba_device *dev; + struct clk *clk; + struct clcd_panel *panel; + struct clcd_board *board; + void *board_data; + void __iomem *regs; + u16 off_ienb; + u16 off_cntl; + u32 clcd_cntl; + u32 cmap[16]; + bool clk_enabled; +}; + +static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) +{ + struct fb_var_screeninfo *var = &fb->fb.var; + u32 val, cpl; + + /* + * Program the CLCD controller registers and start the CLCD + */ + val = ((var->xres / 16) - 1) << 2; + val |= (var->hsync_len - 1) << 8; + val |= (var->right_margin - 1) << 16; + val |= (var->left_margin - 1) << 24; + regs->tim0 = val; + + val = var->yres; + if (fb->panel->cntl & CNTL_LCDDUAL) + val /= 2; + val -= 1; + val |= (var->vsync_len - 1) << 10; + val |= var->lower_margin << 16; + val |= var->upper_margin << 24; + regs->tim1 = val; + + val = fb->panel->tim2; + val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS; + val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS; + + cpl = var->xres_virtual; + if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */ + /* / 1 */; + else if (!var->grayscale) /* STN color */ + cpl = cpl * 8 / 3; + else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */ + cpl /= 8; + else /* STN monochrome, 4bit */ + cpl /= 4; + + regs->tim2 = val | ((cpl - 1) << 16); + + regs->tim3 = fb->panel->tim3; + + val = fb->panel->cntl; + if (var->grayscale) + val |= CNTL_LCDBW; + + if (fb->panel->caps && fb->board->caps && + var->bits_per_pixel >= 16) { + /* + * if board and panel supply capabilities, we can support + * changing BGR/RGB depending on supplied parameters + */ + if (var->red.offset == 0) + val &= ~CNTL_BGR; + else + val |= CNTL_BGR; + } + + switch (var->bits_per_pixel) { + case 1: + val |= CNTL_LCDBPP1; + break; + case 2: + val |= CNTL_LCDBPP2; + break; + case 4: + val |= CNTL_LCDBPP4; + break; + case 8: + val |= CNTL_LCDBPP8; + break; + case 16: + /* + * PL110 cannot choose between 5551 and 565 modes in its + * control register. It is possible to use 565 with + * custom external wiring. + */ + if (amba_part(fb->dev) == 0x110 || + var->green.length == 5) + val |= CNTL_LCDBPP16; + else if (var->green.length == 6) + val |= CNTL_LCDBPP16_565; + else + val |= CNTL_LCDBPP16_444; + break; + case 32: + val |= CNTL_LCDBPP24; + break; + } + + regs->cntl = val; + regs->pixclock = var->pixclock; +} + +static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var) +{ + var->xres_virtual = var->xres = (var->xres + 15) & ~15; + var->yres_virtual = var->yres = (var->yres + 1) & ~1; + +#define CHECK(e,l,h) (var->e < l || var->e > h) + if (CHECK(right_margin, (5+1), 256) || /* back porch */ + CHECK(left_margin, (5+1), 256) || /* front porch */ + CHECK(hsync_len, (5+1), 256) || + var->xres > 4096 || + var->lower_margin > 255 || /* back porch */ + var->upper_margin > 255 || /* front porch */ + var->vsync_len > 32 || + var->yres > 1024) + return -EINVAL; +#undef CHECK + + /* single panel mode: PCD = max(PCD, 1) */ + /* dual panel mode: PCD = max(PCD, 5) */ + + /* + * You can't change the grayscale setting, and + * we can only do non-interlaced video. + */ + if (var->grayscale != fb->fb.var.grayscale || + (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) + return -EINVAL; + +#define CHECK(e) (var->e != fb->fb.var.e) + if (fb->panel->fixedtimings && + (CHECK(xres) || + CHECK(yres) || + CHECK(bits_per_pixel) || + CHECK(pixclock) || + CHECK(left_margin) || + CHECK(right_margin) || + CHECK(upper_margin) || + CHECK(lower_margin) || + CHECK(hsync_len) || + CHECK(vsync_len) || + CHECK(sync))) + return -EINVAL; +#undef CHECK + + var->nonstd = 0; + var->accel_flags = 0; + + return 0; +} diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h new file mode 100644 index 00000000..a39e5be7 --- /dev/null +++ b/include/linux/amba/kmi.h @@ -0,0 +1,92 @@ +/* + * linux/include/asm-arm/hardware/amba_kmi.h + * + * Internal header file for AMBA KMI ports + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * --------------------------------------------------------------------------- + * From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical + * Reference Manual - ARM DDI 0143B - see http://www.arm.com/ + * --------------------------------------------------------------------------- + */ +#ifndef ASM_ARM_HARDWARE_AMBA_KMI_H +#define ASM_ARM_HARDWARE_AMBA_KMI_H + +/* + * KMI control register: + * KMICR_TYPE 0 = PS2/AT mode, 1 = No line control bit mode + * KMICR_RXINTREN 1 = enable RX interrupts + * KMICR_TXINTREN 1 = enable TX interrupts + * KMICR_EN 1 = enable KMI + * KMICR_FD 1 = force KMI data low + * KMICR_FC 1 = force KMI clock low + */ +#define KMICR (KMI_BASE + 0x00) +#define KMICR_TYPE (1 << 5) +#define KMICR_RXINTREN (1 << 4) +#define KMICR_TXINTREN (1 << 3) +#define KMICR_EN (1 << 2) +#define KMICR_FD (1 << 1) +#define KMICR_FC (1 << 0) + +/* + * KMI status register: + * KMISTAT_TXEMPTY 1 = transmitter register empty + * KMISTAT_TXBUSY 1 = currently sending data + * KMISTAT_RXFULL 1 = receiver register ready to be read + * KMISTAT_RXBUSY 1 = currently receiving data + * KMISTAT_RXPARITY parity of last databyte received + * KMISTAT_IC current level of KMI clock input + * KMISTAT_ID current level of KMI data input + */ +#define KMISTAT (KMI_BASE + 0x04) +#define KMISTAT_TXEMPTY (1 << 6) +#define KMISTAT_TXBUSY (1 << 5) +#define KMISTAT_RXFULL (1 << 4) +#define KMISTAT_RXBUSY (1 << 3) +#define KMISTAT_RXPARITY (1 << 2) +#define KMISTAT_IC (1 << 1) +#define KMISTAT_ID (1 << 0) + +/* + * KMI data register + */ +#define KMIDATA (KMI_BASE + 0x08) + +/* + * KMI clock divisor: to generate 8MHz internal clock + * div = (ref / 8MHz) - 1; 0 <= div <= 15 + */ +#define KMICLKDIV (KMI_BASE + 0x0c) + +/* + * KMI interrupt register: + * KMIIR_TXINTR 1 = transmit interrupt asserted + * KMIIR_RXINTR 1 = receive interrupt asserted + */ +#define KMIIR (KMI_BASE + 0x10) +#define KMIIR_TXINTR (1 << 1) +#define KMIIR_RXINTR (1 << 0) + +/* + * The size of the KMI primecell + */ +#define KMI_SIZE (0x100) + +#endif diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h new file mode 100644 index 00000000..9d1d9caf --- /dev/null +++ b/include/linux/amba/mmci.h @@ -0,0 +1,90 @@ +/* + * include/linux/amba/mmci.h + */ +#ifndef AMBA_MMCI_H +#define AMBA_MMCI_H + +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> + +struct embedded_sdio_data { + struct sdio_cis cis; + struct sdio_cccr cccr; + struct sdio_embedded_func *funcs; + int num_funcs; +}; + + +/* + * These defines is places here due to access is needed from machine + * configuration files. The ST Micro version does not have ROD and + * reuse the voltage registers for direction settings. + */ +#define MCI_ST_DATA2DIREN (1 << 2) +#define MCI_ST_CMDDIREN (1 << 3) +#define MCI_ST_DATA0DIREN (1 << 4) +#define MCI_ST_DATA31DIREN (1 << 5) +#define MCI_ST_FBCLKEN (1 << 7) +#define MCI_ST_DATA74DIREN (1 << 8) + +/* Just some dummy forwarding */ +struct dma_chan; + +/** + * struct mmci_platform_data - platform configuration for the MMCI + * (also known as PL180) block. + * @f_max: the maximum operational frequency for this host in this + * platform configuration. When this is specified it takes precedence + * over the module parameter for the same frequency. + * @ocr_mask: available voltages on the 4 pins from the block, this + * is ignored if a regulator is used, see the MMC_VDD_* masks in + * mmc/host.h + * @ios_handler: a callback function to act on specfic ios changes, + * used for example to control a levelshifter + * mask into a value to be binary (or set some other custom bits + * in MMCIPWR) or:ed and written into the MMCIPWR register of the + * block. May also control external power based on the power_mode. + * @status: if no GPIO read function was given to the block in + * gpio_wp (below) this function will be called to determine + * whether a card is present in the MMC slot or not + * @gpio_wp: read this GPIO pin to see if the card is write protected + * @gpio_cd: read this GPIO pin to detect card insertion + * @cd_invert: true if the gpio_cd pin value is active low + * @capabilities: the capabilities of the block as implemented in + * this platform, signify anything MMC_CAP_* from mmc/host.h + * @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h + * @sigdir: a bit field indicating for what bits in the MMC bus the host + * should enable signal direction indication. + * @dma_filter: function used to select an appropriate RX and TX + * DMA channel to be used for DMA, if and only if you're deploying the + * generic DMA engine + * @dma_rx_param: parameter passed to the DMA allocation + * filter in order to select an appropriate RX channel. If + * there is a bidirectional RX+TX channel, then just specify + * this and leave dma_tx_param set to NULL + * @dma_tx_param: parameter passed to the DMA allocation + * filter in order to select an appropriate TX channel. If this + * is NULL the driver will attempt to use the RX channel as a + * bidirectional channel + */ +struct mmci_platform_data { + unsigned int f_max; + unsigned int ocr_mask; + int (*ios_handler)(struct device *, struct mmc_ios *); + unsigned int (*status)(struct device *); + int gpio_wp; + int gpio_cd; + bool cd_invert; + unsigned long capabilities; + unsigned long capabilities2; + u32 sigdir; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + unsigned int status_irq; + struct embedded_sdio_data *embedded_sdio; + int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); +}; + +#endif diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h new file mode 100644 index 00000000..76dd1b19 --- /dev/null +++ b/include/linux/amba/pl022.h @@ -0,0 +1,300 @@ +/* + * include/linux/amba/pl022.h + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. + * + * Author: Linus Walleij <linus.walleij@stericsson.com> + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * Initial adoption to PL022 by: + * Sachin Verma <sachin.verma@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SSP_PL022_H +#define _SSP_PL022_H + +#include <linux/types.h> + +/** + * whether SSP is in loopback mode or not + */ +enum ssp_loopback { + LOOPBACK_DISABLED, + LOOPBACK_ENABLED +}; + +/** + * enum ssp_interface - interfaces allowed for this SSP Controller + * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface + * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial + * interface + * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire + * interface + * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810 + * &STn8815 only) + */ +enum ssp_interface { + SSP_INTERFACE_MOTOROLA_SPI, + SSP_INTERFACE_TI_SYNC_SERIAL, + SSP_INTERFACE_NATIONAL_MICROWIRE, + SSP_INTERFACE_UNIDIRECTIONAL +}; + +/** + * enum ssp_hierarchy - whether SSP is configured as Master or Slave + */ +enum ssp_hierarchy { + SSP_MASTER, + SSP_SLAVE +}; + +/** + * enum ssp_clock_params - clock parameters, to set SSP clock at a + * desired freq + */ +struct ssp_clock_params { + u8 cpsdvsr; /* value from 2 to 254 (even only!) */ + u8 scr; /* value from 0 to 255 */ +}; + +/** + * enum ssp_rx_endian - endianess of Rx FIFO Data + * this feature is only available in ST versionf of PL022 + */ +enum ssp_rx_endian { + SSP_RX_MSB, + SSP_RX_LSB +}; + +/** + * enum ssp_tx_endian - endianess of Tx FIFO Data + */ +enum ssp_tx_endian { + SSP_TX_MSB, + SSP_TX_LSB +}; + +/** + * enum ssp_data_size - number of bits in one data element + */ +enum ssp_data_size { + SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6, + SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9, + SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12, + SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15, + SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18, + SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21, + SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24, + SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27, + SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30, + SSP_DATA_BITS_31, SSP_DATA_BITS_32 +}; + +/** + * enum ssp_mode - SSP mode of operation (Communication modes) + */ +enum ssp_mode { + INTERRUPT_TRANSFER, + POLLING_TRANSFER, + DMA_TRANSFER +}; + +/** + * enum ssp_rx_level_trig - receive FIFO watermark level which triggers + * IT: Interrupt fires when _N_ or more elements in RX FIFO. + */ +enum ssp_rx_level_trig { + SSP_RX_1_OR_MORE_ELEM, + SSP_RX_4_OR_MORE_ELEM, + SSP_RX_8_OR_MORE_ELEM, + SSP_RX_16_OR_MORE_ELEM, + SSP_RX_32_OR_MORE_ELEM +}; + +/** + * Transmit FIFO watermark level which triggers (IT Interrupt fires + * when _N_ or more empty locations in TX FIFO) + */ +enum ssp_tx_level_trig { + SSP_TX_1_OR_MORE_EMPTY_LOC, + SSP_TX_4_OR_MORE_EMPTY_LOC, + SSP_TX_8_OR_MORE_EMPTY_LOC, + SSP_TX_16_OR_MORE_EMPTY_LOC, + SSP_TX_32_OR_MORE_EMPTY_LOC +}; + +/** + * enum SPI Clock Phase - clock phase (Motorola SPI interface only) + * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity) + * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity) + */ +enum ssp_spi_clk_phase { + SSP_CLK_FIRST_EDGE, + SSP_CLK_SECOND_EDGE +}; + +/** + * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only) + * @SSP_CLK_POL_IDLE_LOW: Low inactive level + * @SSP_CLK_POL_IDLE_HIGH: High inactive level + */ +enum ssp_spi_clk_pol { + SSP_CLK_POL_IDLE_LOW, + SSP_CLK_POL_IDLE_HIGH +}; + +/** + * Microwire Conrol Lengths Command size in microwire format + */ +enum ssp_microwire_ctrl_len { + SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6, + SSP_BITS_7, SSP_BITS_8, SSP_BITS_9, + SSP_BITS_10, SSP_BITS_11, SSP_BITS_12, + SSP_BITS_13, SSP_BITS_14, SSP_BITS_15, + SSP_BITS_16, SSP_BITS_17, SSP_BITS_18, + SSP_BITS_19, SSP_BITS_20, SSP_BITS_21, + SSP_BITS_22, SSP_BITS_23, SSP_BITS_24, + SSP_BITS_25, SSP_BITS_26, SSP_BITS_27, + SSP_BITS_28, SSP_BITS_29, SSP_BITS_30, + SSP_BITS_31, SSP_BITS_32 +}; + +/** + * enum Microwire Wait State + * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit + * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit + */ +enum ssp_microwire_wait_state { + SSP_MWIRE_WAIT_ZERO, + SSP_MWIRE_WAIT_ONE +}; + +/** + * enum ssp_duplex - whether Full/Half Duplex on microwire, only + * available in the ST Micro variant. + * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional, + * SSPRXD not used + * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is + * an input. + */ +enum ssp_duplex { + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX +}; + +/** + * enum ssp_clkdelay - an optional clock delay on the feedback clock + * only available in the ST Micro PL023 variant. + * @SSP_FEEDBACK_CLK_DELAY_NONE: no delay, the data coming in from the + * slave is sampled directly + * @SSP_FEEDBACK_CLK_DELAY_1T: the incoming slave data is sampled with + * a delay of T-dt + * @SSP_FEEDBACK_CLK_DELAY_2T: dito with a delay if 2T-dt + * @SSP_FEEDBACK_CLK_DELAY_3T: dito with a delay if 3T-dt + * @SSP_FEEDBACK_CLK_DELAY_4T: dito with a delay if 4T-dt + * @SSP_FEEDBACK_CLK_DELAY_5T: dito with a delay if 5T-dt + * @SSP_FEEDBACK_CLK_DELAY_6T: dito with a delay if 6T-dt + * @SSP_FEEDBACK_CLK_DELAY_7T: dito with a delay if 7T-dt + */ +enum ssp_clkdelay { + SSP_FEEDBACK_CLK_DELAY_NONE, + SSP_FEEDBACK_CLK_DELAY_1T, + SSP_FEEDBACK_CLK_DELAY_2T, + SSP_FEEDBACK_CLK_DELAY_3T, + SSP_FEEDBACK_CLK_DELAY_4T, + SSP_FEEDBACK_CLK_DELAY_5T, + SSP_FEEDBACK_CLK_DELAY_6T, + SSP_FEEDBACK_CLK_DELAY_7T +}; + +/** + * CHIP select/deselect commands + */ +enum ssp_chip_select { + SSP_CHIP_SELECT, + SSP_CHIP_DESELECT +}; + + +struct dma_chan; +/** + * struct pl022_ssp_master - device.platform_data for SPI controller devices. + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects - 1. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @enable_dma: if true enables DMA driven transfers. + * @dma_rx_param: parameter to locate an RX DMA channel. + * @dma_tx_param: parameter to locate a TX DMA channel. + * @autosuspend_delay: delay in ms following transfer completion before the + * runtime power management system suspends the device. A setting of 0 + * indicates no delay and the device will be suspended immediately. + * @rt: indicates the controller should run the message pump with realtime + * priority to minimise the transfer latency on the bus. + */ +struct pl022_ssp_controller { + u16 bus_id; + u8 num_chipselect; + u8 enable_dma:1; + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + int autosuspend_delay; + bool rt; +}; + +/** + * struct ssp_config_chip - spi_board_info.controller_data for SPI + * slave devices, copied to spi_device.controller_data. + * + * @lbm: used for test purpose to internally connect RX and TX + * @iface: Interface type(Motorola, TI, Microwire, Universal) + * @hierarchy: sets whether interface is master or slave + * @slave_tx_disable: SSPTXD is disconnected (in slave mode only) + * @clk_freq: Tune freq parameters of SSP(when in master mode) + * @endian_rx: Endianess of Data in Rx FIFO + * @endian_tx: Endianess of Data in Tx FIFO + * @data_size: Width of data element(4 to 32 bits) + * @com_mode: communication mode: polling, Interrupt or DMA + * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode) + * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode) + * @clk_phase: Motorola SPI interface Clock phase + * @clk_pol: Motorola SPI interface Clock polarity + * @ctrl_len: Microwire interface: Control length + * @wait_state: Microwire interface: Wait state + * @duplex: Microwire interface: Full/Half duplex + * @clkdelay: on the PL023 variant, the delay in feeback clock cycles + * before sampling the incoming line + * @cs_control: function pointer to board-specific function to + * assert/deassert I/O port to control HW generation of devices chip-select. + * @dma_xfer_type: Type of DMA xfer (Mem-to-periph or Periph-to-Periph) + * @dma_config: DMA configuration for SSP controller and peripheral + */ +struct pl022_config_chip { + enum ssp_interface iface; + enum ssp_hierarchy hierarchy; + bool slave_tx_disable; + struct ssp_clock_params clk_freq; + enum ssp_mode com_mode; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; + enum ssp_microwire_ctrl_len ctrl_len; + enum ssp_microwire_wait_state wait_state; + enum ssp_duplex duplex; + enum ssp_clkdelay clkdelay; + void (*cs_control) (u32 control); +}; + +#endif /* _SSP_PL022_H */ diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h new file mode 100644 index 00000000..fb83c045 --- /dev/null +++ b/include/linux/amba/pl061.h @@ -0,0 +1,16 @@ +#include <linux/types.h> + +/* platform data for the PL061 GPIO driver */ + +struct pl061_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* number of the first IRQ. + * If the IRQ functionality in not desired this must be set to 0. + */ + unsigned irq_base; + + u8 directions; /* startup directions, 1: out, 0: in */ + u8 values; /* startup values */ +}; diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h new file mode 100644 index 00000000..e64ce2cf --- /dev/null +++ b/include/linux/amba/pl08x.h @@ -0,0 +1,244 @@ +/* + * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver + * + * Copyright (C) 2005 ARM Ltd + * Copyright (C) 2010 ST-Ericsson SA + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * pl08x information required by platform code + * + * Please credit ARM.com + * Documentation: ARM DDI 0196D + */ + +#ifndef AMBA_PL08X_H +#define AMBA_PL08X_H + +/* We need sizes of structs from this header */ +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +struct pl08x_lli; +struct pl08x_driver_data; + +/* Bitmasks for selecting AHB ports for DMA transfers */ +enum { + PL08X_AHB1 = (1 << 0), + PL08X_AHB2 = (1 << 1) +}; + +/** + * struct pl08x_channel_data - data structure to pass info between + * platform and PL08x driver regarding channel configuration + * @bus_id: name of this device channel, not just a device name since + * devices may have more than one channel e.g. "foo_tx" + * @min_signal: the minimum DMA signal number to be muxed in for this + * channel (for platforms supporting muxed signals). If you have + * static assignments, make sure this is set to the assigned signal + * number, PL08x have 16 possible signals in number 0 thru 15 so + * when these are not enough they often get muxed (in hardware) + * disabling simultaneous use of the same channel for two devices. + * @max_signal: the maximum DMA signal number to be muxed in for + * the channel. Set to the same as min_signal for + * devices with static assignments + * @muxval: a number usually used to poke into some mux regiser to + * mux in the signal to this channel + * @cctl_opt: default options for the channel control register + * @addr: source/target address in physical memory for this DMA channel, + * can be the address of a FIFO register for burst requests for example. + * This can be left undefined if the PrimeCell API is used for configuring + * this. + * @circular_buffer: whether the buffer passed in is circular and + * shall simply be looped round round (like a record baby round + * round round round) + * @single: the device connected to this channel will request single DMA + * transfers, not bursts. (Bursts are default.) + * @periph_buses: the device connected to this channel is accessible via + * these buses (use PL08X_AHB1 | PL08X_AHB2). + */ +struct pl08x_channel_data { + char *bus_id; + int min_signal; + int max_signal; + u32 muxval; + u32 cctl; + dma_addr_t addr; + bool circular_buffer; + bool single; + u8 periph_buses; +}; + +/** + * Struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @signal: the physical signal (aka channel) serving this physical channel + * right now + * @serving: the virtual channel currently being served by this physical + * channel + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + int signal; + struct pl08x_dma_chan *serving; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @tx: async tx descriptor + * @node: node for txd list for channels + * @dsg_list: list of children sg's + * @direction: direction of transfer + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + */ +struct pl08x_txd { + struct dma_async_tx_descriptor tx; + struct list_head node; + struct list_head dsg_list; + enum dma_transfer_direction direction; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @chan: wrappped abstract channel + * @phychan: the physical channel utilized by this channel, if there is one + * @phychan_hold: if non-zero, hold on to the physical channel even if we + * have no pending entries + * @tasklet: tasklet scheduled by the IRQ to handle actual work etc + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @runtime_direction: current direction of this channel according to + * runtime config + * @pend_list: queued transactions pending on this channel + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave + * channels. Fill with 'true' if peripheral should be flow controller. Direction + * will be selected at Runtime. + * @waiting: a TX descriptor on this channel which is waiting for a physical + * channel to become available + */ +struct pl08x_dma_chan { + struct dma_chan chan; + struct pl08x_phy_chan *phychan; + int phychan_hold; + struct tasklet_struct tasklet; + char *name; + const struct pl08x_channel_data *cd; + dma_addr_t src_addr; + dma_addr_t dst_addr; + u32 src_cctl; + u32 dst_cctl; + enum dma_transfer_direction runtime_direction; + struct list_head pend_list; + struct pl08x_txd *at; + spinlock_t lock; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + bool device_fc; + struct pl08x_txd *waiting; +}; + +/** + * struct pl08x_platform_data - the platform configuration for the PL08x + * PrimeCells. + * @slave_channels: the channels defined for the different devices on the + * platform, all inclusive, including multiplexed channels. The available + * physical channels will be multiplexed around these signals as they are + * requested, just enumerate all possible channels. + * @get_signal: request a physical signal to be used for a DMA transfer + * immediately: if there is some multiplexing or similar blocking the use + * of the channel the transfer can be denied by returning less than zero, + * else it returns the allocated signal number + * @put_signal: indicate to the platform that this physical signal is not + * running any DMA transfer and multiplexing can be recycled + * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 + * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 + */ +struct pl08x_platform_data { + const struct pl08x_channel_data *slave_channels; + unsigned int num_slave_channels; + struct pl08x_channel_data memcpy_channel; + int (*get_signal)(struct pl08x_dma_chan *); + void (*put_signal)(struct pl08x_dma_chan *); + u8 lli_buses; + u8 mem_buses; +}; + +#ifdef CONFIG_AMBA_PL08X +bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); +#else +static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) +{ + return false; +} +#endif + +#endif /* AMBA_PL08X_H */ diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h new file mode 100644 index 00000000..2983e367 --- /dev/null +++ b/include/linux/amba/pl093.h @@ -0,0 +1,80 @@ +/* linux/amba/pl093.h + * + * Copyright (c) 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * AMBA PL093 SSMC (synchronous static memory controller) + * See DDI0236.pdf (r0p4) for more details + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */ + +/* Offsets for SMBxxxxRy registers */ + +#define SMBIDCYR (0x00) +#define SMBWSTRDR (0x04) +#define SMBWSTWRR (0x08) +#define SMBWSTOENR (0x0C) +#define SMBWSTWENR (0x10) +#define SMBCR (0x14) +#define SMBSR (0x18) +#define SMBWSTBRDR (0x1C) + +/* Masks for SMB registers */ +#define IDCY_MASK (0xf) +#define WSTRD_MASK (0xf) +#define WSTWR_MASK (0xf) +#define WSTOEN_MASK (0xf) +#define WSTWEN_MASK (0xf) + +/* Notes from datasheet: + * WSTOEN <= WSTRD + * WSTWEN <= WSTWR + * + * WSTOEN is not used with nWAIT + */ + +/* SMBCR bit definitions */ +#define SMBCR_BIWRITEEN (1 << 21) +#define SMBCR_ADDRVALIDWRITEEN (1 << 20) +#define SMBCR_SYNCWRITE (1 << 17) +#define SMBCR_BMWRITE (1 << 16) +#define SMBCR_WRAPREAD (1 << 14) +#define SMBCR_BIREADEN (1 << 13) +#define SMBCR_ADDRVALIDREADEN (1 << 12) +#define SMBCR_SYNCREAD (1 << 9) +#define SMBCR_BMREAD (1 << 8) +#define SMBCR_SMBLSPOL (1 << 6) +#define SMBCR_WP (1 << 3) +#define SMBCR_WAITEN (1 << 2) +#define SMBCR_WAITPOL (1 << 1) +#define SMBCR_RBLE (1 << 0) + +#define SMBCR_BURSTLENWRITE_MASK (3 << 18) +#define SMBCR_BURSTLENWRITE_4 (0 << 18) +#define SMBCR_BURSTLENWRITE_8 (1 << 18) +#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18) +#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18) + +#define SMBCR_BURSTLENREAD_MASK (3 << 10) +#define SMBCR_BURSTLENREAD_4 (0 << 10) +#define SMBCR_BURSTLENREAD_8 (1 << 10) +#define SMBCR_BURSTLENREAD_16 (2 << 10) +#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10) + +#define SMBCR_MW_MASK (3 << 4) +#define SMBCR_MW_8BIT (0 << 4) +#define SMBCR_MW_16BIT (1 << 4) +#define SMBCR_MW_M32BIT (2 << 4) + +/* SSMC status registers */ +#define SSMCCSR (0x200) +#define SSMCCR (0x204) +#define SSMCITCR (0x208) +#define SSMCITIP (0x20C) +#define SSMCITIOP (0x210) diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h new file mode 100644 index 00000000..fe93758e --- /dev/null +++ b/include/linux/amba/pl330.h @@ -0,0 +1,35 @@ +/* linux/include/linux/amba/pl330.h + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh <jassi.brar@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __AMBA_PL330_H_ +#define __AMBA_PL330_H_ + +#include <linux/dmaengine.h> + +struct dma_pl330_platdata { + /* + * Number of valid peripherals connected to DMAC. + * This may be different from the value read from + * CR0, as the PL330 implementation might have 'holes' + * in the peri list or the peri could also be reached + * from another DMAC which the platform prefers. + */ + u8 nr_valid_peri; + /* Array of valid peripherals */ + u8 *peri_id; + /* Operational capabilities */ + dma_cap_mask_t cap_mask; + /* Bytes to allocate for MC buffer */ + unsigned mcbuf_sz; +}; + +extern bool pl330_filter(struct dma_chan *chan, void *param); +#endif /* __AMBA_PL330_H_ */ diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h new file mode 100644 index 00000000..d117b29d --- /dev/null +++ b/include/linux/amba/serial.h @@ -0,0 +1,212 @@ +/* + * linux/include/asm-arm/hardware/serial_amba.h + * + * Internal header file for AMBA serial ports + * + * Copyright (C) ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H +#define ASM_ARM_HARDWARE_SERIAL_AMBA_H + +#include <linux/types.h> + +/* ------------------------------------------------------------------------------- + * From AMBA UART (PL010) Block Specification + * ------------------------------------------------------------------------------- + * UART Register Offsets. + */ +#define UART01x_DR 0x00 /* Data read or written from the interface. */ +#define UART01x_RSR 0x04 /* Receive status register (Read). */ +#define UART01x_ECR 0x04 /* Error clear register (Write). */ +#define UART010_LCRH 0x08 /* Line control register, high byte. */ +#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ +#define UART010_LCRM 0x0C /* Line control register, middle byte. */ +#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ +#define UART010_LCRL 0x10 /* Line control register, low byte. */ +#define UART010_CR 0x14 /* Control register. */ +#define UART01x_FR 0x18 /* Flag register (Read only). */ +#define UART010_IIR 0x1C /* Interrupt indentification register (Read). */ +#define UART010_ICR 0x1C /* Interrupt clear register (Write). */ +#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ +#define UART01x_ILPR 0x20 /* IrDA low power counter register. */ +#define UART011_IBRD 0x24 /* Integer baud rate divisor register. */ +#define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */ +#define UART011_LCRH 0x2c /* Line control register. */ +#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ +#define UART011_CR 0x30 /* Control register. */ +#define UART011_IFLS 0x34 /* Interrupt fifo level select. */ +#define UART011_IMSC 0x38 /* Interrupt mask. */ +#define UART011_RIS 0x3c /* Raw interrupt status. */ +#define UART011_MIS 0x40 /* Masked interrupt status. */ +#define UART011_ICR 0x44 /* Interrupt clear register. */ +#define UART011_DMACR 0x48 /* DMA control register. */ +#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ +#define ST_UART011_XON1 0x54 /* XON1 register. */ +#define ST_UART011_XON2 0x58 /* XON2 register. */ +#define ST_UART011_XOFF1 0x5C /* XON1 register. */ +#define ST_UART011_XOFF2 0x60 /* XON2 register. */ +#define ST_UART011_ITCR 0x80 /* Integration test control register. */ +#define ST_UART011_ITIP 0x84 /* Integration test input register. */ +#define ST_UART011_ABCR 0x100 /* Autobaud control register. */ +#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ + +#define UART011_DR_OE (1 << 11) +#define UART011_DR_BE (1 << 10) +#define UART011_DR_PE (1 << 9) +#define UART011_DR_FE (1 << 8) + +#define UART01x_RSR_OE 0x08 +#define UART01x_RSR_BE 0x04 +#define UART01x_RSR_PE 0x02 +#define UART01x_RSR_FE 0x01 + +#define UART011_FR_RI 0x100 +#define UART011_FR_TXFE 0x080 +#define UART011_FR_RXFF 0x040 +#define UART01x_FR_TXFF 0x020 +#define UART01x_FR_RXFE 0x010 +#define UART01x_FR_BUSY 0x008 +#define UART01x_FR_DCD 0x004 +#define UART01x_FR_DSR 0x002 +#define UART01x_FR_CTS 0x001 +#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY) + +#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */ +#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */ +#define UART011_CR_OUT2 0x2000 /* OUT2 */ +#define UART011_CR_OUT1 0x1000 /* OUT1 */ +#define UART011_CR_RTS 0x0800 /* RTS */ +#define UART011_CR_DTR 0x0400 /* DTR */ +#define UART011_CR_RXE 0x0200 /* receive enable */ +#define UART011_CR_TXE 0x0100 /* transmit enable */ +#define UART011_CR_LBE 0x0080 /* loopback enable */ +#define UART010_CR_RTIE 0x0040 +#define UART010_CR_TIE 0x0020 +#define UART010_CR_RIE 0x0010 +#define UART010_CR_MSIE 0x0008 +#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */ +#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */ +#define UART01x_CR_SIREN 0x0002 /* SIR enable */ +#define UART01x_CR_UARTEN 0x0001 /* UART enable */ + +#define UART011_LCRH_SPS 0x80 +#define UART01x_LCRH_WLEN_8 0x60 +#define UART01x_LCRH_WLEN_7 0x40 +#define UART01x_LCRH_WLEN_6 0x20 +#define UART01x_LCRH_WLEN_5 0x00 +#define UART01x_LCRH_FEN 0x10 +#define UART01x_LCRH_STP2 0x08 +#define UART01x_LCRH_EPS 0x04 +#define UART01x_LCRH_PEN 0x02 +#define UART01x_LCRH_BRK 0x01 + +#define ST_UART011_DMAWM_RX_1 (0 << 3) +#define ST_UART011_DMAWM_RX_2 (1 << 3) +#define ST_UART011_DMAWM_RX_4 (2 << 3) +#define ST_UART011_DMAWM_RX_8 (3 << 3) +#define ST_UART011_DMAWM_RX_16 (4 << 3) +#define ST_UART011_DMAWM_RX_32 (5 << 3) +#define ST_UART011_DMAWM_RX_48 (6 << 3) +#define ST_UART011_DMAWM_TX_1 0 +#define ST_UART011_DMAWM_TX_2 1 +#define ST_UART011_DMAWM_TX_4 2 +#define ST_UART011_DMAWM_TX_8 3 +#define ST_UART011_DMAWM_TX_16 4 +#define ST_UART011_DMAWM_TX_32 5 +#define ST_UART011_DMAWM_TX_48 6 + +#define UART010_IIR_RTIS 0x08 +#define UART010_IIR_TIS 0x04 +#define UART010_IIR_RIS 0x02 +#define UART010_IIR_MIS 0x01 + +#define UART011_IFLS_RX1_8 (0 << 3) +#define UART011_IFLS_RX2_8 (1 << 3) +#define UART011_IFLS_RX4_8 (2 << 3) +#define UART011_IFLS_RX6_8 (3 << 3) +#define UART011_IFLS_RX7_8 (4 << 3) +#define UART011_IFLS_TX1_8 (0 << 0) +#define UART011_IFLS_TX2_8 (1 << 0) +#define UART011_IFLS_TX4_8 (2 << 0) +#define UART011_IFLS_TX6_8 (3 << 0) +#define UART011_IFLS_TX7_8 (4 << 0) +/* special values for ST vendor with deeper fifo */ +#define UART011_IFLS_RX_HALF (5 << 3) +#define UART011_IFLS_TX_HALF (5 << 0) + +#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */ +#define UART011_BEIM (1 << 9) /* break error interrupt mask */ +#define UART011_PEIM (1 << 8) /* parity error interrupt mask */ +#define UART011_FEIM (1 << 7) /* framing error interrupt mask */ +#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */ +#define UART011_TXIM (1 << 5) /* transmit interrupt mask */ +#define UART011_RXIM (1 << 4) /* receive interrupt mask */ +#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */ +#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */ +#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */ +#define UART011_RIMIM (1 << 0) /* RI interrupt mask */ + +#define UART011_OEIS (1 << 10) /* overrun error interrupt status */ +#define UART011_BEIS (1 << 9) /* break error interrupt status */ +#define UART011_PEIS (1 << 8) /* parity error interrupt status */ +#define UART011_FEIS (1 << 7) /* framing error interrupt status */ +#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */ +#define UART011_TXIS (1 << 5) /* transmit interrupt status */ +#define UART011_RXIS (1 << 4) /* receive interrupt status */ +#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */ +#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */ +#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */ +#define UART011_RIMIS (1 << 0) /* RI interrupt status */ + +#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */ +#define UART011_BEIC (1 << 9) /* break error interrupt clear */ +#define UART011_PEIC (1 << 8) /* parity error interrupt clear */ +#define UART011_FEIC (1 << 7) /* framing error interrupt clear */ +#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */ +#define UART011_TXIC (1 << 5) /* transmit interrupt clear */ +#define UART011_RXIC (1 << 4) /* receive interrupt clear */ +#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */ +#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */ +#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */ +#define UART011_RIMIC (1 << 0) /* RI interrupt clear */ + +#define UART011_DMAONERR (1 << 2) /* disable dma on error */ +#define UART011_TXDMAE (1 << 1) /* enable transmit dma */ +#define UART011_RXDMAE (1 << 0) /* enable receive dma */ + +#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE) +#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) + +#ifndef __ASSEMBLY__ +struct amba_device; /* in uncompress this is included but amba/bus.h is not */ +struct amba_pl010_data { + void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); +}; + +struct dma_chan; +struct amba_pl011_data { + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + void *dma_rx_param; + void *dma_tx_param; + void (*init) (void); + void (*exit) (void); + void (*reset) (void); +}; +#endif + +#endif diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h new file mode 100644 index 00000000..15f6b9ed --- /dev/null +++ b/include/linux/amd-iommu.h @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * Leo Duran <leo.duran@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_AMD_IOMMU_H +#define _ASM_X86_AMD_IOMMU_H + +#include <linux/types.h> + +#ifdef CONFIG_AMD_IOMMU + +struct task_struct; +struct pci_dev; + +extern int amd_iommu_detect(void); +extern int amd_iommu_init_hardware(void); + +/** + * amd_iommu_enable_device_erratum() - Enable erratum workaround for device + * in the IOMMUv2 driver + * @pdev: The PCI device the workaround is necessary for + * @erratum: The erratum workaround to enable + * + * The function needs to be called before amd_iommu_init_device(). + * Possible values for the erratum number are for now: + * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI + * is enabled + * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI + * requests to one + */ +#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 +#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 + +extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); + +/** + * amd_iommu_init_device() - Init device for use with IOMMUv2 driver + * @pdev: The PCI device to initialize + * @pasids: Number of PASIDs to support for this device + * + * This function does all setup for the device pdev so that it can be + * used with IOMMUv2. + * Returns 0 on success or negative value on error. + */ +extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids); + +/** + * amd_iommu_free_device() - Free all IOMMUv2 related device resources + * and disable IOMMUv2 usage for this device + * @pdev: The PCI device to disable IOMMUv2 usage for' + */ +extern void amd_iommu_free_device(struct pci_dev *pdev); + +/** + * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device + * @pdev: The PCI device to bind the task to + * @pasid: The PASID on the device the task should be bound to + * @task: the task to bind + * + * The function returns 0 on success or a negative value on error. + */ +extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, + struct task_struct *task); + +/** + * amd_iommu_unbind_pasid() - Unbind a PASID from its task on + * a device + * @pdev: The device of the PASID + * @pasid: The PASID to unbind + * + * When this function returns the device is no longer using the PASID + * and the PASID is no longer bound to its task. + */ +extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid); + +/** + * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed + * PRI requests + * @pdev: The PCI device the call-back should be registered for + * @cb: The call-back function + * + * The IOMMUv2 driver invokes this call-back when it is unable to + * successfully handle a PRI request. The device driver can then decide + * which PRI response the device should see. Possible return values for + * the call-back are: + * + * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device + * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device + * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device, + * the device is required to disable + * PRI when it receives this response + * + * The function returns 0 on success or negative value on error. + */ +#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0 +#define AMD_IOMMU_INV_PRI_RSP_INVALID 1 +#define AMD_IOMMU_INV_PRI_RSP_FAIL 2 + +typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, + int pasid, + unsigned long address, + u16); + +extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, + amd_iommu_invalid_ppr_cb cb); + +/** + * amd_iommu_device_info() - Get information about IOMMUv2 support of a + * PCI device + * @pdev: PCI device to query information from + * @info: A pointer to an amd_iommu_device_info structure which will contain + * the information about the PCI device + * + * Returns 0 on success, negative value on error + */ + +#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ +#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution + on memory pages */ +#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request + super-user privileges */ + +struct amd_iommu_device_info { + int max_pasids; + u32 flags; +}; + +extern int amd_iommu_device_info(struct pci_dev *pdev, + struct amd_iommu_device_info *info); + +/** + * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating + * a pasid context. This call-back is + * invoked when the IOMMUv2 driver needs to + * invalidate a PASID context, for example + * because the task that is bound to that + * context is about to exit. + * + * @pdev: The PCI device the call-back should be registered for + * @cb: The call-back function + */ + +typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid); + +extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, + amd_iommu_invalidate_ctx cb); + +#else + +static inline int amd_iommu_detect(void) { return -ENODEV; } + +#endif + +#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/amifd.h b/include/linux/amifd.h new file mode 100644 index 00000000..34699326 --- /dev/null +++ b/include/linux/amifd.h @@ -0,0 +1,62 @@ +#ifndef _AMIFD_H +#define _AMIFD_H + +/* Definitions for the Amiga floppy driver */ + +#include <linux/fd.h> + +#define FD_MAX_UNITS 4 /* Max. Number of drives */ +#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */ + +#ifndef ASSEMBLER + +struct fd_data_type { + char *name; /* description of data type */ + int sects; /* sectors per track */ +#ifdef __STDC__ + int (*read_fkt)(int); + void (*write_fkt)(int); +#else + int (*read_fkt)(); /* read whole track */ + void (*write_fkt)(); /* write whole track */ +#endif +}; + +/* +** Floppy type descriptions +*/ + +struct fd_drive_type { + unsigned long code; /* code returned from drive */ + char *name; /* description of drive */ + unsigned int tracks; /* number of tracks */ + unsigned int heads; /* number of heads */ + unsigned int read_size; /* raw read size for one track */ + unsigned int write_size; /* raw write size for one track */ + unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */ + unsigned int precomp1; /* start track for precomp 1 */ + unsigned int precomp2; /* start track for precomp 2 */ + unsigned int step_delay; /* time (in ms) for delay after step */ + unsigned int settle_time; /* time to settle after dir change */ + unsigned int side_time; /* time needed to change sides */ +}; + +struct amiga_floppy_struct { + struct fd_drive_type *type; /* type of floppy for this unit */ + struct fd_data_type *dtype; /* type of floppy for this unit */ + int track; /* current track (-1 == unknown) */ + unsigned char *trackbuf; /* current track (kmaloc()'d */ + + int blocks; /* total # blocks on disk */ + + int changed; /* true when not known */ + int disk; /* disk in drive (-1 == unknown) */ + int motor; /* true when motor is at speed */ + int busy; /* true when drive is active */ + int dirty; /* true when trackbuf is not on disk */ + int status; /* current error code for unit */ + struct gendisk *gendisk; +}; +#endif + +#endif diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h new file mode 100644 index 00000000..76188bf4 --- /dev/null +++ b/include/linux/amifdreg.h @@ -0,0 +1,81 @@ +#ifndef _LINUX_AMIFDREG_H +#define _LINUX_AMIFDREG_H + +/* +** CIAAPRA bits (read only) +*/ + +#define DSKRDY (0x1<<5) /* disk ready when low */ +#define DSKTRACK0 (0x1<<4) /* head at track zero when low */ +#define DSKPROT (0x1<<3) /* disk protected when low */ +#define DSKCHANGE (0x1<<2) /* low when disk removed */ + +/* +** CIAAPRB bits (read/write) +*/ + +#define DSKMOTOR (0x1<<7) /* motor on when low */ +#define DSKSEL3 (0x1<<6) /* select drive 3 when low */ +#define DSKSEL2 (0x1<<5) /* select drive 2 when low */ +#define DSKSEL1 (0x1<<4) /* select drive 1 when low */ +#define DSKSEL0 (0x1<<3) /* select drive 0 when low */ +#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */ +#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */ +#define DSKSTEP (0x1) /* pulse low to step head 1 track */ + +/* +** DSKBYTR bits (read only) +*/ + +#define DSKBYT (1<<15) /* register contains valid byte when set */ +#define DMAON (1<<14) /* disk DMA enabled */ +#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */ +#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */ +/* bits 7-0 are data */ + +/* +** ADKCON/ADKCONR bits +*/ + +#ifndef SETCLR +#define ADK_SETCLR (1<<15) /* control bit */ +#endif +#define ADK_PRECOMP1 (1<<14) /* precompensation selection */ +#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */ +#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */ +#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */ +#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */ +#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */ + +/* +** DSKLEN bits +*/ + +#define DSKLEN_DMAEN (1<<15) +#define DSKLEN_WRITE (1<<14) + +/* +** INTENA/INTREQ bits +*/ + +#define DSKINDEX (0x1<<4) /* DSKINDEX bit */ + +/* +** Misc +*/ + +#define MFM_SYNC 0x4489 /* standard MFM sync value */ + +/* Values for FD_COMMAND */ +#define FD_RECALIBRATE 0x07 /* move to track 0 */ +#define FD_SEEK 0x0F /* seek track */ +#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */ +#define FD_WRITE 0xC5 /* write with MT, MFM */ +#define FD_SENSEI 0x08 /* Sense Interrupt Status */ +#define FD_SPECIFY 0x03 /* specify HUT etc */ +#define FD_FORMAT 0x4D /* format one track */ +#define FD_VERSION 0x10 /* get version code */ +#define FD_CONFIGURE 0x13 /* configure FIFO operation */ +#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */ + +#endif /* _LINUX_AMIFDREG_H */ diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h new file mode 100644 index 00000000..43b41c06 --- /dev/null +++ b/include/linux/amigaffs.h @@ -0,0 +1,144 @@ +#ifndef AMIGAFFS_H +#define AMIGAFFS_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +#define FS_OFS 0x444F5300 +#define FS_FFS 0x444F5301 +#define FS_INTLOFS 0x444F5302 +#define FS_INTLFFS 0x444F5303 +#define FS_DCOFS 0x444F5304 +#define FS_DCFFS 0x444F5305 +#define MUFS_FS 0x6d754653 /* 'muFS' */ +#define MUFS_OFS 0x6d754600 /* 'muF\0' */ +#define MUFS_FFS 0x6d754601 /* 'muF\1' */ +#define MUFS_INTLOFS 0x6d754602 /* 'muF\2' */ +#define MUFS_INTLFFS 0x6d754603 /* 'muF\3' */ +#define MUFS_DCOFS 0x6d754604 /* 'muF\4' */ +#define MUFS_DCFFS 0x6d754605 /* 'muF\5' */ + +#define T_SHORT 2 +#define T_LIST 16 +#define T_DATA 8 + +#define ST_LINKFILE -4 +#define ST_FILE -3 +#define ST_ROOT 1 +#define ST_USERDIR 2 +#define ST_SOFTLINK 3 +#define ST_LINKDIR 4 + +#define AFFS_ROOT_BMAPS 25 + +struct affs_date { + __be32 days; + __be32 mins; + __be32 ticks; +}; + +struct affs_short_date { + __be16 days; + __be16 mins; + __be16 ticks; +}; + +struct affs_root_head { + __be32 ptype; + __be32 spare1; + __be32 spare2; + __be32 hash_size; + __be32 spare3; + __be32 checksum; + __be32 hashtable[1]; +}; + +struct affs_root_tail { + __be32 bm_flag; + __be32 bm_blk[AFFS_ROOT_BMAPS]; + __be32 bm_ext; + struct affs_date root_change; + u8 disk_name[32]; + __be32 spare1; + __be32 spare2; + struct affs_date disk_change; + struct affs_date disk_create; + __be32 spare3; + __be32 spare4; + __be32 dcache; + __be32 stype; +}; + +struct affs_head { + __be32 ptype; + __be32 key; + __be32 block_count; + __be32 spare1; + __be32 first_data; + __be32 checksum; + __be32 table[1]; +}; + +struct affs_tail { + __be32 spare1; + __be16 uid; + __be16 gid; + __be32 protect; + __be32 size; + u8 comment[92]; + struct affs_date change; + u8 name[32]; + __be32 spare2; + __be32 original; + __be32 link_chain; + __be32 spare[5]; + __be32 hash_chain; + __be32 parent; + __be32 extension; + __be32 stype; +}; + +struct slink_front +{ + __be32 ptype; + __be32 key; + __be32 spare1[3]; + __be32 checksum; + u8 symname[1]; /* depends on block size */ +}; + +struct affs_data_head +{ + __be32 ptype; + __be32 key; + __be32 sequence; + __be32 size; + __be32 next; + __be32 checksum; + u8 data[1]; /* depends on block size */ +}; + +/* Permission bits */ + +#define FIBF_OTR_READ 0x8000 +#define FIBF_OTR_WRITE 0x4000 +#define FIBF_OTR_EXECUTE 0x2000 +#define FIBF_OTR_DELETE 0x1000 +#define FIBF_GRP_READ 0x0800 +#define FIBF_GRP_WRITE 0x0400 +#define FIBF_GRP_EXECUTE 0x0200 +#define FIBF_GRP_DELETE 0x0100 + +#define FIBF_HIDDEN 0x0080 +#define FIBF_SCRIPT 0x0040 +#define FIBF_PURE 0x0020 /* no use under linux */ +#define FIBF_ARCHIVED 0x0010 /* never set, always cleared on write */ +#define FIBF_NOREAD 0x0008 /* 0 means allowed */ +#define FIBF_NOWRITE 0x0004 /* 0 means allowed */ +#define FIBF_NOEXECUTE 0x0002 /* 0 means allowed, ignored under linux */ +#define FIBF_NODELETE 0x0001 /* 0 means allowed */ + +#define FIBF_OWNER 0x000F /* Bits pertaining to owner */ +#define FIBF_MASK 0xEE0E /* Bits modified by Linux */ + +#endif diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h new file mode 100644 index 00000000..0f904b3b --- /dev/null +++ b/include/linux/android_aid.h @@ -0,0 +1,28 @@ +/* include/linux/android_aid.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ANDROID_AID_H +#define _LINUX_ANDROID_AID_H + +/* AIDs that the kernel treats differently */ +#define AID_NET_BT_ADMIN 3001 +#define AID_NET_BT 3002 +#define AID_INET 3003 +#define AID_NET_RAW 3004 +#define AID_NET_ADMIN 3005 +#define AID_NET_BW_STATS 3006 /* read bandwidth statistics */ +#define AID_NET_BW_ACCT 3007 /* change bandwidth statistics accounting */ + +#endif diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h new file mode 100644 index 00000000..8013a452 --- /dev/null +++ b/include/linux/anon_inodes.h @@ -0,0 +1,20 @@ +/* + * include/linux/anon_inodes.h + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + */ + +#ifndef _LINUX_ANON_INODES_H +#define _LINUX_ANON_INODES_H + +struct file_operations; + +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags); +int anon_inode_getfd(const char *name, const struct file_operations *fops, + void *priv, int flags); + +#endif /* _LINUX_ANON_INODES_H */ + diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h new file mode 100644 index 00000000..e6d80035 --- /dev/null +++ b/include/linux/apm-emulation.h @@ -0,0 +1,62 @@ +/* -*- linux-c -*- + * + * (C) 2003 zecke@handhelds.org + * + * GPL version 2 + * + * based on arch/arm/kernel/apm.c + * factor out the information needed by architectures to provide + * apm status + */ +#ifndef __LINUX_APM_EMULATION_H +#define __LINUX_APM_EMULATION_H + +#include <linux/apm_bios.h> + +/* + * This structure gets filled in by the machine specific 'get_power_status' + * implementation. Any fields which are not set default to a safe value. + */ +struct apm_power_info { + unsigned char ac_line_status; +#define APM_AC_OFFLINE 0 +#define APM_AC_ONLINE 1 +#define APM_AC_BACKUP 2 +#define APM_AC_UNKNOWN 0xff + + unsigned char battery_status; +#define APM_BATTERY_STATUS_HIGH 0 +#define APM_BATTERY_STATUS_LOW 1 +#define APM_BATTERY_STATUS_CRITICAL 2 +#define APM_BATTERY_STATUS_CHARGING 3 +#define APM_BATTERY_STATUS_NOT_PRESENT 4 +#define APM_BATTERY_STATUS_UNKNOWN 0xff + + unsigned char battery_flag; +#define APM_BATTERY_FLAG_HIGH (1 << 0) +#define APM_BATTERY_FLAG_LOW (1 << 1) +#define APM_BATTERY_FLAG_CRITICAL (1 << 2) +#define APM_BATTERY_FLAG_CHARGING (1 << 3) +#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7) +#define APM_BATTERY_FLAG_UNKNOWN 0xff + + int battery_life; + int time; + int units; +#define APM_UNITS_MINS 0 +#define APM_UNITS_SECS 1 +#define APM_UNITS_UNKNOWN -1 + +}; + +/* + * This allows machines to provide their own "apm get power status" function. + */ +extern void (*apm_get_power_status)(struct apm_power_info *); + +/* + * Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND) + */ +void apm_queue_event(apm_event_t event); + +#endif /* __LINUX_APM_EMULATION_H */ diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h new file mode 100644 index 00000000..01a6244c --- /dev/null +++ b/include/linux/apm_bios.h @@ -0,0 +1,220 @@ +#ifndef _LINUX_APM_H +#define _LINUX_APM_H + +/* + * Include file for the interface to an APM BIOS + * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/types.h> + +typedef unsigned short apm_event_t; +typedef unsigned short apm_eventinfo_t; + +struct apm_bios_info { + __u16 version; + __u16 cseg; + __u32 offset; + __u16 cseg_16; + __u16 dseg; + __u16 flags; + __u16 cseg_len; + __u16 cseg_16_len; + __u16 dseg_len; +}; + +#ifdef __KERNEL__ + +#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8) +#define APM_CS_16 (APM_CS + 8) +#define APM_DS (APM_CS_16 + 8) + +/* Results of APM Installation Check */ +#define APM_16_BIT_SUPPORT 0x0001 +#define APM_32_BIT_SUPPORT 0x0002 +#define APM_IDLE_SLOWS_CLOCK 0x0004 +#define APM_BIOS_DISABLED 0x0008 +#define APM_BIOS_DISENGAGED 0x0010 + +/* + * Data for APM that is persistent across module unload/load + */ +struct apm_info { + struct apm_bios_info bios; + unsigned short connection_version; + int get_power_status_broken; + int get_power_status_swabinminutes; + int allow_ints; + int forbid_idle; + int realmode_power_off; + int disabled; +}; + +/* + * The APM function codes + */ +#define APM_FUNC_INST_CHECK 0x5300 +#define APM_FUNC_REAL_CONN 0x5301 +#define APM_FUNC_16BIT_CONN 0x5302 +#define APM_FUNC_32BIT_CONN 0x5303 +#define APM_FUNC_DISCONN 0x5304 +#define APM_FUNC_IDLE 0x5305 +#define APM_FUNC_BUSY 0x5306 +#define APM_FUNC_SET_STATE 0x5307 +#define APM_FUNC_ENABLE_PM 0x5308 +#define APM_FUNC_RESTORE_BIOS 0x5309 +#define APM_FUNC_GET_STATUS 0x530a +#define APM_FUNC_GET_EVENT 0x530b +#define APM_FUNC_GET_STATE 0x530c +#define APM_FUNC_ENABLE_DEV_PM 0x530d +#define APM_FUNC_VERSION 0x530e +#define APM_FUNC_ENGAGE_PM 0x530f +#define APM_FUNC_GET_CAP 0x5310 +#define APM_FUNC_RESUME_TIMER 0x5311 +#define APM_FUNC_RESUME_ON_RING 0x5312 +#define APM_FUNC_TIMER 0x5313 + +/* + * Function code for APM_FUNC_RESUME_TIMER + */ +#define APM_FUNC_DISABLE_TIMER 0 +#define APM_FUNC_GET_TIMER 1 +#define APM_FUNC_SET_TIMER 2 + +/* + * Function code for APM_FUNC_RESUME_ON_RING + */ +#define APM_FUNC_DISABLE_RING 0 +#define APM_FUNC_ENABLE_RING 1 +#define APM_FUNC_GET_RING 2 + +/* + * Function code for APM_FUNC_TIMER_STATUS + */ +#define APM_FUNC_TIMER_DISABLE 0 +#define APM_FUNC_TIMER_ENABLE 1 +#define APM_FUNC_TIMER_GET 2 + +/* + * in arch/i386/kernel/setup.c + */ +extern struct apm_info apm_info; + +#endif /* __KERNEL__ */ + +/* + * Power states + */ +#define APM_STATE_READY 0x0000 +#define APM_STATE_STANDBY 0x0001 +#define APM_STATE_SUSPEND 0x0002 +#define APM_STATE_OFF 0x0003 +#define APM_STATE_BUSY 0x0004 +#define APM_STATE_REJECT 0x0005 +#define APM_STATE_OEM_SYS 0x0020 +#define APM_STATE_OEM_DEV 0x0040 + +#define APM_STATE_DISABLE 0x0000 +#define APM_STATE_ENABLE 0x0001 + +#define APM_STATE_DISENGAGE 0x0000 +#define APM_STATE_ENGAGE 0x0001 + +/* + * Events (results of Get PM Event) + */ +#define APM_SYS_STANDBY 0x0001 +#define APM_SYS_SUSPEND 0x0002 +#define APM_NORMAL_RESUME 0x0003 +#define APM_CRITICAL_RESUME 0x0004 +#define APM_LOW_BATTERY 0x0005 +#define APM_POWER_STATUS_CHANGE 0x0006 +#define APM_UPDATE_TIME 0x0007 +#define APM_CRITICAL_SUSPEND 0x0008 +#define APM_USER_STANDBY 0x0009 +#define APM_USER_SUSPEND 0x000a +#define APM_STANDBY_RESUME 0x000b +#define APM_CAPABILITY_CHANGE 0x000c + +/* + * Error codes + */ +#define APM_SUCCESS 0x00 +#define APM_DISABLED 0x01 +#define APM_CONNECTED 0x02 +#define APM_NOT_CONNECTED 0x03 +#define APM_16_CONNECTED 0x05 +#define APM_16_UNSUPPORTED 0x06 +#define APM_32_CONNECTED 0x07 +#define APM_32_UNSUPPORTED 0x08 +#define APM_BAD_DEVICE 0x09 +#define APM_BAD_PARAM 0x0a +#define APM_NOT_ENGAGED 0x0b +#define APM_BAD_FUNCTION 0x0c +#define APM_RESUME_DISABLED 0x0d +#define APM_NO_ERROR 0x53 +#define APM_BAD_STATE 0x60 +#define APM_NO_EVENTS 0x80 +#define APM_NOT_PRESENT 0x86 + +/* + * APM Device IDs + */ +#define APM_DEVICE_BIOS 0x0000 +#define APM_DEVICE_ALL 0x0001 +#define APM_DEVICE_DISPLAY 0x0100 +#define APM_DEVICE_STORAGE 0x0200 +#define APM_DEVICE_PARALLEL 0x0300 +#define APM_DEVICE_SERIAL 0x0400 +#define APM_DEVICE_NETWORK 0x0500 +#define APM_DEVICE_PCMCIA 0x0600 +#define APM_DEVICE_BATTERY 0x8000 +#define APM_DEVICE_OEM 0xe000 +#define APM_DEVICE_OLD_ALL 0xffff +#define APM_DEVICE_CLASS 0x00ff +#define APM_DEVICE_MASK 0xff00 + +#ifdef __KERNEL__ +/* + * This is the "All Devices" ID communicated to the BIOS + */ +#define APM_DEVICE_BALL ((apm_info.connection_version > 0x0100) ? \ + APM_DEVICE_ALL : APM_DEVICE_OLD_ALL) +#endif + +/* + * Battery status + */ +#define APM_MAX_BATTERIES 2 + +/* + * APM defined capability bit flags + */ +#define APM_CAP_GLOBAL_STANDBY 0x0001 +#define APM_CAP_GLOBAL_SUSPEND 0x0002 +#define APM_CAP_RESUME_STANDBY_TIMER 0x0004 /* Timer resume from standby */ +#define APM_CAP_RESUME_SUSPEND_TIMER 0x0008 /* Timer resume from suspend */ +#define APM_CAP_RESUME_STANDBY_RING 0x0010 /* Resume on Ring fr standby */ +#define APM_CAP_RESUME_SUSPEND_RING 0x0020 /* Resume on Ring fr suspend */ +#define APM_CAP_RESUME_STANDBY_PCMCIA 0x0040 /* Resume on PCMCIA Ring */ +#define APM_CAP_RESUME_SUSPEND_PCMCIA 0x0080 /* Resume on PCMCIA Ring */ + +/* + * ioctl operations + */ +#include <linux/ioctl.h> + +#define APM_IOC_STANDBY _IO('A', 1) +#define APM_IOC_SUSPEND _IO('A', 2) + +#endif /* LINUX_APM_H */ diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h new file mode 100644 index 00000000..47bedc0e --- /dev/null +++ b/include/linux/apple_bl.h @@ -0,0 +1,26 @@ +/* + * apple_bl exported symbols + */ + +#ifndef _LINUX_APPLE_BL_H +#define _LINUX_APPLE_BL_H + +#ifdef CONFIG_BACKLIGHT_APPLE + +extern int apple_bl_register(void); +extern void apple_bl_unregister(void); + +#else /* !CONFIG_BACKLIGHT_APPLE */ + +static inline int apple_bl_register(void) +{ + return 0; +} + +static inline void apple_bl_unregister(void) +{ +} + +#endif /* !CONFIG_BACKLIGHT_APPLE */ + +#endif /* _LINUX_APPLE_BL_H */ diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h new file mode 100644 index 00000000..7216b0da --- /dev/null +++ b/include/linux/arcdevice.h @@ -0,0 +1,346 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions used by the ARCnet driver. + * + * Authors: Avery Pennarun and David Woodhouse + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _LINUX_ARCDEVICE_H +#define _LINUX_ARCDEVICE_H + +#include <asm/timex.h> +#include <linux/if_arcnet.h> + +#ifdef __KERNEL__ +#include <linux/irqreturn.h> + +#ifndef bool +#define bool int +#endif + +/* + * RECON_THRESHOLD is the maximum number of RECON messages to receive + * within one minute before printing a "cabling problem" warning. The + * default value should be fine. + * + * After that, a "cabling restored" message will be printed on the next IRQ + * if no RECON messages have been received for 10 seconds. + * + * Do not define RECON_THRESHOLD at all if you want to disable this feature. + */ +#define RECON_THRESHOLD 30 + + +/* + * Define this to the minimum "timeout" value. If a transmit takes longer + * than TX_TIMEOUT jiffies, Linux will abort the TX and retry. On a large + * network, or one with heavy network traffic, this timeout may need to be + * increased. The larger it is, though, the longer it will be between + * necessary transmits - don't set this too high. + */ +#define TX_TIMEOUT (HZ * 200 / 1000) + + +/* Display warnings about the driver being an ALPHA version. */ +#undef ALPHA_WARNING + + +/* + * Debugging bitflags: each option can be enabled individually. + * + * Note: only debug flags included in the ARCNET_DEBUG_MAX define will + * actually be available. GCC will (at least, GCC 2.7.0 will) notice + * lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize + * them out. + */ +#define D_NORMAL 1 /* important operational info */ +#define D_EXTRA 2 /* useful, but non-vital information */ +#define D_INIT 4 /* show init/probe messages */ +#define D_INIT_REASONS 8 /* show reasons for discarding probes */ +#define D_RECON 32 /* print a message whenever token is lost */ +#define D_PROTO 64 /* debug auto-protocol support */ +/* debug levels below give LOTS of output during normal operation! */ +#define D_DURING 128 /* trace operations (including irq's) */ +#define D_TX 256 /* show tx packets */ +#define D_RX 512 /* show rx packets */ +#define D_SKB 1024 /* show skb's */ +#define D_SKB_SIZE 2048 /* show skb sizes */ +#define D_TIMING 4096 /* show time needed to copy buffers to card */ +#define D_DEBUG 8192 /* Very detailed debug line for line */ + +#ifndef ARCNET_DEBUG_MAX +#define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */ +#endif + +#ifndef ARCNET_DEBUG +#define ARCNET_DEBUG (D_NORMAL|D_EXTRA) +#endif +extern int arcnet_debug; + +/* macros to simplify debug checking */ +#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x)) +#define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0) +#define BUGMSG(x,msg,args...) \ + BUGMSG2(x, "%s%6s: " msg, \ + x==D_NORMAL ? KERN_WARNING \ + : x < D_DURING ? KERN_INFO : KERN_DEBUG, \ + dev->name , ## args) + +/* see how long a function call takes to run, expressed in CPU cycles */ +#define TIME(name, bytes, call) BUGLVL(D_TIMING) { \ + unsigned long _x, _y; \ + _x = get_cycles(); \ + call; \ + _y = get_cycles(); \ + BUGMSG(D_TIMING, \ + "%s: %d bytes in %lu cycles == " \ + "%lu Kbytes/100Mcycle\n",\ + name, bytes, _y - _x, \ + 100000000 / 1024 * bytes / (_y - _x + 1));\ + } \ + else { \ + call;\ + } + + +/* + * Time needed to reset the card - in ms (milliseconds). This works on my + * SMC PC100. I can't find a reference that tells me just how long I + * should wait. + */ +#define RESETtime (300) + +/* + * These are the max/min lengths of packet payload, not including the + * arc_hardware header, but definitely including the soft header. + * + * Note: packet sizes 254, 255, 256 are impossible because of the way + * ARCnet registers work That's why RFC1201 defines "exception" packets. + * In non-RFC1201 protocols, we have to just tack some extra bytes on the + * end. + */ +#define MTU 253 /* normal packet max size */ +#define MinTU 257 /* extended packet min size */ +#define XMTU 508 /* extended packet max size */ + +/* status/interrupt mask bit fields */ +#define TXFREEflag 0x01 /* transmitter available */ +#define TXACKflag 0x02 /* transmitted msg. ackd */ +#define RECONflag 0x04 /* network reconfigured */ +#define TESTflag 0x08 /* test flag */ +#define EXCNAKflag 0x08 /* excesive nak flag */ +#define RESETflag 0x10 /* power-on-reset */ +#define RES1flag 0x20 /* reserved - usually set by jumper */ +#define RES2flag 0x40 /* reserved - usually set by jumper */ +#define NORXflag 0x80 /* receiver inhibited */ + +/* Flags used for IO-mapped memory operations */ +#define AUTOINCflag 0x40 /* Increase location with each access */ +#define IOMAPflag 0x02 /* (for 90xx) Use IO mapped memory, not mmap */ +#define ENABLE16flag 0x80 /* (for 90xx) Enable 16-bit mode */ + +/* in the command register, the following bits have these meanings: + * 0-2 command + * 3-4 page number (for enable rcv/xmt command) + * 7 receive broadcasts + */ +#define NOTXcmd 0x01 /* disable transmitter */ +#define NORXcmd 0x02 /* disable receiver */ +#define TXcmd 0x03 /* enable transmitter */ +#define RXcmd 0x04 /* enable receiver */ +#define CONFIGcmd 0x05 /* define configuration */ +#define CFLAGScmd 0x06 /* clear flags */ +#define TESTcmd 0x07 /* load test flags */ + +/* flags for "clear flags" command */ +#define RESETclear 0x08 /* power-on-reset */ +#define CONFIGclear 0x10 /* system reconfigured */ + +#define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */ + +/* flags for "load test flags" command */ +#define TESTload 0x08 /* test flag (diagnostic) */ + +/* byte deposited into first address of buffers on reset */ +#define TESTvalue 0321 /* that's octal for 0xD1 :) */ + +/* for "enable receiver" command */ +#define RXbcasts 0x80 /* receive broadcasts */ + +/* flags for "define configuration" command */ +#define NORMALconf 0x00 /* 1-249 byte packets */ +#define EXTconf 0x08 /* 250-504 byte packets */ + +/* card feature flags, set during auto-detection. + * (currently only used by com20020pci) + */ +#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */ +#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit, + but default is 2.5MBit. */ + + +/* information needed to define an encapsulation driver */ +struct ArcProto { + char suffix; /* a for RFC1201, e for ether-encap, etc. */ + int mtu; /* largest possible packet */ + int is_ip; /* This is a ip plugin - not a raw thing */ + + void (*rx) (struct net_device * dev, int bufnum, + struct archdr * pkthdr, int length); + int (*build_header) (struct sk_buff * skb, struct net_device *dev, + unsigned short ethproto, uint8_t daddr); + + /* these functions return '1' if the skb can now be freed */ + int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length, + int bufnum); + int (*continue_tx) (struct net_device * dev, int bufnum); + int (*ack_tx) (struct net_device * dev, int acked); +}; + +extern struct ArcProto *arc_proto_map[256], *arc_proto_default, + *arc_bcast_proto, *arc_raw_proto; + + +/* + * "Incoming" is information needed for each address that could be sending + * to us. Mostly for partially-received split packets. + */ +struct Incoming { + struct sk_buff *skb; /* packet data buffer */ + __be16 sequence; /* sequence number of assembly */ + uint8_t lastpacket, /* number of last packet (from 1) */ + numpackets; /* number of packets in split */ +}; + + +/* only needed for RFC1201 */ +struct Outgoing { + struct ArcProto *proto; /* protocol driver that owns this: + * if NULL, no packet is pending. + */ + struct sk_buff *skb; /* buffer from upper levels */ + struct archdr *pkt; /* a pointer into the skb */ + uint16_t length, /* bytes total */ + dataleft, /* bytes left */ + segnum, /* segment being sent */ + numsegs; /* number of segments */ +}; + + +struct arcnet_local { + uint8_t config, /* current value of CONFIG register */ + timeout, /* Extended timeout for COM20020 */ + backplane, /* Backplane flag for COM20020 */ + clockp, /* COM20020 clock divider */ + clockm, /* COM20020 clock multiplier flag */ + setup, /* Contents of setup1 register */ + setup2, /* Contents of setup2 register */ + intmask; /* current value of INTMASK register */ + uint8_t default_proto[256]; /* default encap to use for each host */ + int cur_tx, /* buffer used by current transmit, or -1 */ + next_tx, /* buffer where a packet is ready to send */ + cur_rx; /* current receive buffer */ + int lastload_dest, /* can last loaded packet be acked? */ + lasttrans_dest; /* can last TX'd packet be acked? */ + int timed_out; /* need to process TX timeout and drop packet */ + unsigned long last_timeout; /* time of last reported timeout */ + char *card_name; /* card ident string */ + int card_flags; /* special card features */ + + + /* On preemtive and SMB a lock is needed */ + spinlock_t lock; + + /* + * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of + * which can be used for either sending or receiving. The new dynamic + * buffer management routines use a simple circular queue of available + * buffers, and take them as they're needed. This way, we simplify + * situations in which we (for example) want to pre-load a transmit + * buffer, or start receiving while we copy a received packet to + * memory. + * + * The rules: only the interrupt handler is allowed to _add_ buffers to + * the queue; thus, this doesn't require a lock. Both the interrupt + * handler and the transmit function will want to _remove_ buffers, so + * we need to handle the situation where they try to do it at the same + * time. + * + * If next_buf == first_free_buf, the queue is empty. Since there are + * only four possible buffers, the queue should never be full. + */ + atomic_t buf_lock; + int buf_queue[5]; + int next_buf, first_free_buf; + + /* network "reconfiguration" handling */ + unsigned long first_recon; /* time of "first" RECON message to count */ + unsigned long last_recon; /* time of most recent RECON */ + int num_recons; /* number of RECONs between first and last. */ + bool network_down; /* do we think the network is down? */ + + bool excnak_pending; /* We just got an excesive nak interrupt */ + + struct { + uint16_t sequence; /* sequence number (incs with each packet) */ + __be16 aborted_seq; + + struct Incoming incoming[256]; /* one from each address */ + } rfc1201; + + /* really only used by rfc1201, but we'll pretend it's not */ + struct Outgoing outgoing; /* packet currently being sent */ + + /* hardware-specific functions */ + struct { + struct module *owner; + void (*command) (struct net_device * dev, int cmd); + int (*status) (struct net_device * dev); + void (*intmask) (struct net_device * dev, int mask); + bool (*reset) (struct net_device * dev, bool really_reset); + void (*open) (struct net_device * dev); + void (*close) (struct net_device * dev); + + void (*copy_to_card) (struct net_device * dev, int bufnum, int offset, + void *buf, int count); + void (*copy_from_card) (struct net_device * dev, int bufnum, int offset, + void *buf, int count); + } hw; + + void __iomem *mem_start; /* pointer to ioremap'ed MMIO */ +}; + + +#define ARCRESET(x) (lp->hw.reset(dev, (x))) +#define ACOMMAND(x) (lp->hw.command(dev, (x))) +#define ASTATUS() (lp->hw.status(dev)) +#define AINTMASK(x) (lp->hw.intmask(dev, (x))) + + + +#if ARCNET_DEBUG_MAX & D_SKB +void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); +#else +#define arcnet_dump_skb(dev,skb,desc) ; +#endif + +void arcnet_unregister_proto(struct ArcProto *proto); +irqreturn_t arcnet_interrupt(int irq, void *dev_id); +struct net_device *alloc_arcdev(const char *name); + +int arcnet_open(struct net_device *dev); +int arcnet_close(struct net_device *dev); +netdev_tx_t arcnet_send_packet(struct sk_buff *skb, + struct net_device *dev); +void arcnet_timeout(struct net_device *dev); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_ARCDEVICE_H */ diff --git a/include/linux/arcfb.h b/include/linux/arcfb.h new file mode 100644 index 00000000..721e7654 --- /dev/null +++ b/include/linux/arcfb.h @@ -0,0 +1,8 @@ +#ifndef __LINUX_ARCFB_H__ +#define __LINUX_ARCFB_H__ + +#define FBIO_WAITEVENT _IO('F', 0x88) +#define FBIO_GETCONTROL2 _IOR('F', 0x89, size_t) + +#endif + diff --git a/include/linux/async.h b/include/linux/async.h new file mode 100644 index 00000000..68a95301 --- /dev/null +++ b/include/linux/async.h @@ -0,0 +1,27 @@ +/* + * async.h: Asynchronous function calls for boot performance + * + * (C) Copyright 2009 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <linux/types.h> +#include <linux/list.h> + +typedef u64 async_cookie_t; +typedef void (async_func_ptr) (void *data, async_cookie_t cookie); + +extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); +extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, + struct list_head *list); +extern void async_synchronize_full(void); +extern void async_synchronize_full_domain(struct list_head *list); +extern void async_synchronize_cookie(async_cookie_t cookie); +extern void async_synchronize_cookie_domain(async_cookie_t cookie, + struct list_head *list); + diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h new file mode 100644 index 00000000..a1c486a8 --- /dev/null +++ b/include/linux/async_tx.h @@ -0,0 +1,209 @@ +/* + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _ASYNC_TX_H_ +#define _ASYNC_TX_H_ +#include <linux/dmaengine.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> + +/* on architectures without dma-mapping capabilities we need to ensure + * that the asynchronous path compiles away + */ +#ifdef CONFIG_HAS_DMA +#define __async_inline +#else +#define __async_inline __always_inline +#endif + +/** + * dma_chan_ref - object used to manage dma channels received from the + * dmaengine core. + * @chan - the channel being tracked + * @node - node for the channel to be placed on async_tx_master_list + * @rcu - for list_del_rcu + * @count - number of times this channel is listed in the pool + * (for channels with multiple capabiities) + */ +struct dma_chan_ref { + struct dma_chan *chan; + struct list_head node; + struct rcu_head rcu; + atomic_t count; +}; + +/** + * async_tx_flags - modifiers for the async_* calls + * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the + * the destination address is not a source. The asynchronous case handles this + * implicitly, the synchronous case needs to zero the destination block. + * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is + * also one of the source addresses. In the synchronous case the destination + * address is an implied source, whereas the asynchronous case it must be listed + * as a source. The destination address must be the first address in the source + * array. + * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a + * dependency chain + * @ASYNC_TX_FENCE: specify that the next operation in the dependency + * chain uses this operation's result as an input + */ +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = (1 << 0), + ASYNC_TX_XOR_DROP_DST = (1 << 1), + ASYNC_TX_ACK = (1 << 2), + ASYNC_TX_FENCE = (1 << 3), +}; + +/** + * struct async_submit_ctl - async_tx submission/completion modifiers + * @flags: submission modifiers + * @depend_tx: parent dependency of the current operation being submitted + * @cb_fn: callback routine to run at operation completion + * @cb_param: parameter for the callback routine + * @scribble: caller provided space for dma/page address conversions + */ +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + +#ifdef CONFIG_DMA_ENGINE +#define async_tx_issue_pending_all dma_issue_pending_all + +/** + * async_tx_issue_pending - send pending descriptor to the hardware channel + * @tx: descriptor handle to retrieve hardware context + * + * Note: any dependent operations will have already been issued by + * async_tx_channel_switch, or (in the case of no channel switch) will + * be already pending on this channel. + */ +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + if (likely(tx)) { + struct dma_chan *chan = tx->chan; + struct dma_device *dma = chan->device; + + dma->device_issue_pending(chan); + } +} +#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL +#include <asm/async_tx.h> +#else +#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ + __async_tx_find_channel(dep, type) +struct dma_chan * +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); +#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ +#else +static inline void async_tx_issue_pending_all(void) +{ + do { } while (0); +} + +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + do { } while (0); +} + +static inline struct dma_chan * +async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type, struct page **dst, + int dst_count, struct page **src, int src_count, + size_t len) +{ + return NULL; +} +#endif + +/** + * async_tx_sync_epilog - actions to take if an operation is run synchronously + * @cb_fn: function to call when the transaction completes + * @cb_fn_param: parameter to pass to the callback routine + */ +static inline void +async_tx_sync_epilog(struct async_submit_ctl *submit) +{ + if (submit->cb_fn) + submit->cb_fn(submit->cb_param); +} + +typedef union { + unsigned long addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +static inline void +init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, + struct dma_async_tx_descriptor *tx, + dma_async_tx_callback cb_fn, void *cb_param, + addr_conv_t *scribble) +{ + args->flags = flags; + args->depend_tx = tx; + args->cb_fn = cb_fn; + args->cb_param = cb_param; + args->scribble = scribble; +} + +void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_xor(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum sum_check_flags *result, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_memset(struct page *dest, int val, unsigned int offset, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, + struct page **ptrs, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int src_num, size_t bytes, int faila, + struct page **ptrs, struct async_submit_ctl *submit); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); +#endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/ata.h b/include/linux/ata.h new file mode 100644 index 00000000..32df2b6e --- /dev/null +++ b/include/linux/ata.h @@ -0,0 +1,1078 @@ + +/* + * Copyright 2003-2004 Red Hat, Inc. All rights reserved. + * Copyright 2003-2004 Jeff Garzik + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * Hardware documentation available from http://www.t13.org/ + * + */ + +#ifndef __LINUX_ATA_H__ +#define __LINUX_ATA_H__ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <asm/byteorder.h> + +/* defines only for the constants which don't work well as enums */ +#define ATA_DMA_BOUNDARY 0xffffUL +#define ATA_DMA_MASK 0xffffffffULL + +enum { + /* various global constants */ + ATA_MAX_DEVICES = 2, /* per bus/port */ + ATA_MAX_PRD = 256, /* we could make these 256/256 */ + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ + ATA_MAX_SECTORS_TAPE = 65535, + + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = (1 << 1), + + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + + ATA_PCI_CTL_OFS = 2, + + ATA_PIO0 = (1 << 0), + ATA_PIO1 = ATA_PIO0 | (1 << 1), + ATA_PIO2 = ATA_PIO1 | (1 << 2), + ATA_PIO3 = ATA_PIO2 | (1 << 3), + ATA_PIO4 = ATA_PIO3 | (1 << 4), + ATA_PIO5 = ATA_PIO4 | (1 << 5), + ATA_PIO6 = ATA_PIO5 | (1 << 6), + + ATA_PIO4_ONLY = (1 << 4), + + ATA_SWDMA0 = (1 << 0), + ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1), + ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2), + + ATA_SWDMA2_ONLY = (1 << 2), + + ATA_MWDMA0 = (1 << 0), + ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1), + ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2), + ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3), + ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4), + + ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2), + ATA_MWDMA2_ONLY = (1 << 2), + + ATA_UDMA0 = (1 << 0), + ATA_UDMA1 = ATA_UDMA0 | (1 << 1), + ATA_UDMA2 = ATA_UDMA1 | (1 << 2), + ATA_UDMA3 = ATA_UDMA2 | (1 << 3), + ATA_UDMA4 = ATA_UDMA3 | (1 << 4), + ATA_UDMA5 = ATA_UDMA4 | (1 << 5), + ATA_UDMA6 = ATA_UDMA5 | (1 << 6), + ATA_UDMA7 = ATA_UDMA6 | (1 << 7), + /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */ + + ATA_UDMA24_ONLY = (1 << 2) | (1 << 4), + + ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */ + + /* DMA-related */ + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ), + ATA_PRD_EOT = (1 << 31), /* end-of-table flag */ + + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = (1 << 3), + ATA_DMA_START = (1 << 0), + ATA_DMA_INTR = (1 << 2), + ATA_DMA_ERR = (1 << 1), + ATA_DMA_ACTIVE = (1 << 0), + + /* bits in ATA command block registers */ + ATA_HOB = (1 << 7), /* LBA48 selector */ + ATA_NIEN = (1 << 1), /* disable-irq flag */ + ATA_LBA = (1 << 6), /* LBA28 selector */ + ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */ + ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */ + ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */ + ATA_BUSY = (1 << 7), /* BSY status bit */ + ATA_DRDY = (1 << 6), /* device ready */ + ATA_DF = (1 << 5), /* device fault */ + ATA_DSC = (1 << 4), /* drive seek complete */ + ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_CORR = (1 << 2), /* corrected data error */ + ATA_IDX = (1 << 1), /* index */ + ATA_ERR = (1 << 0), /* have an error */ + ATA_SRST = (1 << 2), /* software reset */ + ATA_ICRC = (1 << 7), /* interface CRC error */ + ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ + ATA_UNC = (1 << 6), /* uncorrectable media error */ + ATA_MC = (1 << 5), /* media changed */ + ATA_IDNF = (1 << 4), /* ID not found */ + ATA_MCR = (1 << 3), /* media change requested */ + ATA_ABORTED = (1 << 2), /* command aborted */ + ATA_TRK0NF = (1 << 1), /* track 0 not found */ + ATA_AMNF = (1 << 0), /* address mark not found */ + ATAPI_LFS = 0xF0, /* last failed sense */ + ATAPI_EOM = ATA_TRK0NF, /* end of media */ + ATAPI_ILI = ATA_AMNF, /* illegal length indication */ + ATAPI_IO = (1 << 1), + ATAPI_COD = (1 << 0), + + /* ATA command block registers */ + ATA_REG_DATA = 0x00, + ATA_REG_ERR = 0x01, + ATA_REG_NSECT = 0x02, + ATA_REG_LBAL = 0x03, + ATA_REG_LBAM = 0x04, + ATA_REG_LBAH = 0x05, + ATA_REG_DEVICE = 0x06, + ATA_REG_STATUS = 0x07, + + ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */ + ATA_REG_CMD = ATA_REG_STATUS, + ATA_REG_BYTEL = ATA_REG_LBAM, + ATA_REG_BYTEH = ATA_REG_LBAH, + ATA_REG_DEVSEL = ATA_REG_DEVICE, + ATA_REG_IRQ = ATA_REG_NSECT, + + /* ATA device commands */ + ATA_CMD_DEV_RESET = 0x08, /* ATAPI device reset */ + ATA_CMD_CHK_POWER = 0xE5, /* check power mode */ + ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */ + ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ + ATA_CMD_EDD = 0x90, /* execute device diagnostic */ + ATA_CMD_DOWNLOAD_MICRO = 0x92, + ATA_CMD_NOP = 0x00, + ATA_CMD_FLUSH = 0xE7, + ATA_CMD_FLUSH_EXT = 0xEA, + ATA_CMD_ID_ATA = 0xEC, + ATA_CMD_ID_ATAPI = 0xA1, + ATA_CMD_SERVICE = 0xA2, + ATA_CMD_READ = 0xC8, + ATA_CMD_READ_EXT = 0x25, + ATA_CMD_READ_QUEUED = 0x26, + ATA_CMD_READ_STREAM_EXT = 0x2B, + ATA_CMD_READ_STREAM_DMA_EXT = 0x2A, + ATA_CMD_WRITE = 0xCA, + ATA_CMD_WRITE_EXT = 0x35, + ATA_CMD_WRITE_QUEUED = 0x36, + ATA_CMD_WRITE_STREAM_EXT = 0x3B, + ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A, + ATA_CMD_WRITE_FUA_EXT = 0x3D, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, + ATA_CMD_FPDMA_READ = 0x60, + ATA_CMD_FPDMA_WRITE = 0x61, + ATA_CMD_PIO_READ = 0x20, + ATA_CMD_PIO_READ_EXT = 0x24, + ATA_CMD_PIO_WRITE = 0x30, + ATA_CMD_PIO_WRITE_EXT = 0x34, + ATA_CMD_READ_MULTI = 0xC4, + ATA_CMD_READ_MULTI_EXT = 0x29, + ATA_CMD_WRITE_MULTI = 0xC5, + ATA_CMD_WRITE_MULTI_EXT = 0x39, + ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE, + ATA_CMD_SET_FEATURES = 0xEF, + ATA_CMD_SET_MULTI = 0xC6, + ATA_CMD_PACKET = 0xA0, + ATA_CMD_VERIFY = 0x40, + ATA_CMD_VERIFY_EXT = 0x42, + ATA_CMD_WRITE_UNCORR_EXT = 0x45, + ATA_CMD_STANDBYNOW1 = 0xE0, + ATA_CMD_IDLEIMMEDIATE = 0xE1, + ATA_CMD_SLEEP = 0xE6, + ATA_CMD_INIT_DEV_PARAMS = 0x91, + ATA_CMD_READ_NATIVE_MAX = 0xF8, + ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, + ATA_CMD_SET_MAX = 0xF9, + ATA_CMD_SET_MAX_EXT = 0x37, + ATA_CMD_READ_LOG_EXT = 0x2F, + ATA_CMD_WRITE_LOG_EXT = 0x3F, + ATA_CMD_READ_LOG_DMA_EXT = 0x47, + ATA_CMD_WRITE_LOG_DMA_EXT = 0x57, + ATA_CMD_TRUSTED_RCV = 0x5C, + ATA_CMD_TRUSTED_RCV_DMA = 0x5D, + ATA_CMD_TRUSTED_SND = 0x5E, + ATA_CMD_TRUSTED_SND_DMA = 0x5F, + ATA_CMD_PMP_READ = 0xE4, + ATA_CMD_PMP_WRITE = 0xE8, + ATA_CMD_CONF_OVERLAY = 0xB1, + ATA_CMD_SEC_SET_PASS = 0xF1, + ATA_CMD_SEC_UNLOCK = 0xF2, + ATA_CMD_SEC_ERASE_PREP = 0xF3, + ATA_CMD_SEC_ERASE_UNIT = 0xF4, + ATA_CMD_SEC_FREEZE_LOCK = 0xF5, + ATA_CMD_SEC_DISABLE_PASS = 0xF6, + ATA_CMD_CONFIG_STREAM = 0x51, + ATA_CMD_SMART = 0xB0, + ATA_CMD_MEDIA_LOCK = 0xDE, + ATA_CMD_MEDIA_UNLOCK = 0xDF, + ATA_CMD_DSM = 0x06, + ATA_CMD_CHK_MED_CRD_TYP = 0xD1, + ATA_CMD_CFA_REQ_EXT_ERR = 0x03, + ATA_CMD_CFA_WRITE_NE = 0x38, + ATA_CMD_CFA_TRANS_SECT = 0x87, + ATA_CMD_CFA_ERASE = 0xC0, + ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, + /* marked obsolete in the ATA/ATAPI-7 spec */ + ATA_CMD_RESTORE = 0x10, + + /* READ_LOG_EXT pages */ + ATA_LOG_SATA_NCQ = 0x10, + + /* READ/WRITE LONG (obsolete) */ + ATA_CMD_READ_LONG = 0x22, + ATA_CMD_READ_LONG_ONCE = 0x23, + ATA_CMD_WRITE_LONG = 0x32, + ATA_CMD_WRITE_LONG_ONCE = 0x33, + + /* SETFEATURES stuff */ + SETFEATURES_XFER = 0x03, + XFER_UDMA_7 = 0x47, + XFER_UDMA_6 = 0x46, + XFER_UDMA_5 = 0x45, + XFER_UDMA_4 = 0x44, + XFER_UDMA_3 = 0x43, + XFER_UDMA_2 = 0x42, + XFER_UDMA_1 = 0x41, + XFER_UDMA_0 = 0x40, + XFER_MW_DMA_4 = 0x24, /* CFA only */ + XFER_MW_DMA_3 = 0x23, /* CFA only */ + XFER_MW_DMA_2 = 0x22, + XFER_MW_DMA_1 = 0x21, + XFER_MW_DMA_0 = 0x20, + XFER_SW_DMA_2 = 0x12, + XFER_SW_DMA_1 = 0x11, + XFER_SW_DMA_0 = 0x10, + XFER_PIO_6 = 0x0E, /* CFA only */ + XFER_PIO_5 = 0x0D, /* CFA only */ + XFER_PIO_4 = 0x0C, + XFER_PIO_3 = 0x0B, + XFER_PIO_2 = 0x0A, + XFER_PIO_1 = 0x09, + XFER_PIO_0 = 0x08, + XFER_PIO_SLOW = 0x00, + + SETFEATURES_WC_ON = 0x02, /* Enable write cache */ + SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + + /* Enable/Disable Automatic Acoustic Management */ + SETFEATURES_AAM_ON = 0x42, + SETFEATURES_AAM_OFF = 0xC2, + + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ + + SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ + SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ + + /* SETFEATURE Sector counts for SATA features */ + SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */ + SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */ + SATA_DIPM = 0x03, /* Device Initiated Power Management */ + SATA_FPDMA_IN_ORDER = 0x04, /* FPDMA in-order data delivery */ + SATA_AN = 0x05, /* Asynchronous Notification */ + SATA_SSP = 0x06, /* Software Settings Preservation */ + + /* feature values for SET_MAX */ + ATA_SET_MAX_ADDR = 0x00, + ATA_SET_MAX_PASSWD = 0x01, + ATA_SET_MAX_LOCK = 0x02, + ATA_SET_MAX_UNLOCK = 0x03, + ATA_SET_MAX_FREEZE_LOCK = 0x04, + + /* feature values for DEVICE CONFIGURATION OVERLAY */ + ATA_DCO_RESTORE = 0xC0, + ATA_DCO_FREEZE_LOCK = 0xC1, + ATA_DCO_IDENTIFY = 0xC2, + ATA_DCO_SET = 0xC3, + + /* feature values for SMART */ + ATA_SMART_ENABLE = 0xD8, + ATA_SMART_READ_VALUES = 0xD0, + ATA_SMART_READ_THRESHOLDS = 0xD1, + + /* feature values for Data Set Management */ + ATA_DSM_TRIM = 0x01, + + /* password used in LBA Mid / LBA High for executing SMART commands */ + ATA_SMART_LBAM_PASS = 0x4F, + ATA_SMART_LBAH_PASS = 0xC2, + + /* ATAPI stuff */ + ATAPI_PKT_DMA = (1 << 0), + ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: + 0=to device, 1=to host */ + ATAPI_CDB_LEN = 16, + + /* PMP stuff */ + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + + SATA_PMP_FEAT_BIST = (1 << 0), + SATA_PMP_FEAT_PMREQ = (1 << 1), + SATA_PMP_FEAT_DYNSSC = (1 << 2), + SATA_PMP_FEAT_NOTIFY = (1 << 3), + + /* cable types */ + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ + ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */ + ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */ + ATA_CBL_SATA = 6, + + /* SATA Status and Control Registers */ + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + + /* SError bits */ + SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */ + SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */ + SERR_DATA = (1 << 8), /* unrecovered data error */ + SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */ + SERR_PROTOCOL = (1 << 10), /* protocol violation */ + SERR_INTERNAL = (1 << 11), /* host internal error */ + SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */ + SERR_PHY_INT_ERR = (1 << 17), /* PHY internal error */ + SERR_COMM_WAKE = (1 << 18), /* Comm wake */ + SERR_10B_8B_ERR = (1 << 19), /* 10b to 8b decode error */ + SERR_DISPARITY = (1 << 20), /* Disparity */ + SERR_CRC = (1 << 21), /* CRC error */ + SERR_HANDSHAKE = (1 << 22), /* Handshake error */ + SERR_LINK_SEQ_ERR = (1 << 23), /* Link sequence error */ + SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */ + SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */ + SERR_DEV_XCHG = (1 << 26), /* device exchanged */ + + /* struct ata_taskfile flags */ + ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ + ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ + ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ + ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ + ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ + ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ + ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ + + /* protocol flags */ + ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ + ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ + ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA, + ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ + ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ +}; + +enum ata_tf_protocols { + /* ATA taskfile protocols */ + ATA_PROT_UNKNOWN, /* unknown/invalid */ + ATA_PROT_NODATA, /* no data */ + ATA_PROT_PIO, /* PIO data xfer */ + ATA_PROT_DMA, /* DMA */ + ATA_PROT_NCQ, /* NCQ */ + ATAPI_PROT_NODATA, /* packet command, no data */ + ATAPI_PROT_PIO, /* packet command, PIO data xfer*/ + ATAPI_PROT_DMA, /* packet command with special DMA sauce */ +}; + +enum ata_ioctls { + ATA_IOC_GET_IO32 = 0x309, + ATA_IOC_SET_IO32 = 0x324, +}; + +/* core structures */ + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +struct ata_taskfile { + unsigned long flags; /* ATA_TFLAG_xxx */ + u8 protocol; /* ATA_PROT_xxx */ + + u8 ctl; /* control reg */ + + u8 hob_feature; /* additional data */ + u8 hob_nsect; /* to support LBA48 */ + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + + u8 feature; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + + u8 device; + + u8 command; /* IO operation */ +}; + +/* + * protocol tests + */ +static inline unsigned int ata_prot_flags(u8 prot) +{ + switch (prot) { + case ATA_PROT_NODATA: + return 0; + case ATA_PROT_PIO: + return ATA_PROT_FLAG_PIO; + case ATA_PROT_DMA: + return ATA_PROT_FLAG_DMA; + case ATA_PROT_NCQ: + return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ; + case ATAPI_PROT_NODATA: + return ATA_PROT_FLAG_ATAPI; + case ATAPI_PROT_PIO: + return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO; + case ATAPI_PROT_DMA: + return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA; + } + return 0; +} + +static inline int ata_is_atapi(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI; +} + +static inline int ata_is_nodata(u8 prot) +{ + return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA); +} + +static inline int ata_is_pio(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO; +} + +static inline int ata_is_dma(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA; +} + +static inline int ata_is_ncq(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ; +} + +static inline int ata_is_data(u8 prot) +{ + return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA; +} + +/* + * id tests + */ +#define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) +#define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) +#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) +#define ata_id_has_ncq(id) ((id)[76] & (1 << 8)) +#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) +#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) +#define ata_id_has_atapi_AN(id) \ + ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ + ((id)[78] & (1 << 5)) ) +#define ata_id_has_fpdma_aa(id) \ + ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ + ((id)[78] & (1 << 2)) ) +#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) +#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) +#define ata_id_u32(id,n) \ + (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) +#define ata_id_u64(id,n) \ + ( ((u64) (id)[(n) + 3] << 48) | \ + ((u64) (id)[(n) + 2] << 32) | \ + ((u64) (id)[(n) + 1] << 16) | \ + ((u64) (id)[(n) + 0]) ) + +#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) + +static inline bool ata_id_has_hipm(const u16 *id) +{ + u16 val = id[76]; + + if (val == 0 || val == 0xffff) + return false; + + return val & (1 << 9); +} + +static inline bool ata_id_has_dipm(const u16 *id) +{ + u16 val = id[78]; + + if (val == 0 || val == 0xffff) + return false; + + return val & (1 << 3); +} + + +static inline bool ata_id_has_fua(const u16 *id) +{ + if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFSSE] & (1 << 6); +} + +static inline bool ata_id_has_flush(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 12); +} + +static inline bool ata_id_flush_enabled(const u16 *id) +{ + if (ata_id_has_flush(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 12); +} + +static inline bool ata_id_has_flush_ext(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 13); +} + +static inline bool ata_id_flush_ext_enabled(const u16 *id) +{ + if (ata_id_has_flush_ext(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + /* + * some Maxtor disks have bit 13 defined incorrectly + * so check bit 10 too + */ + return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; +} + +static inline u32 ata_id_logical_sector_size(const u16 *id) +{ + /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. + * IDENTIFY DEVICE data, word 117-118. + * 0xd000 ignores bit 13 (logical:physical > 1) + */ + if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000) + return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16) + + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ; + return ATA_SECT_SIZE; +} + +static inline u8 ata_id_log2_per_physical_sector(const u16 *id) +{ + /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. + * IDENTIFY DEVICE data, word 106. + * 0xe000 ignores bit 12 (logical sector > 512 bytes) + */ + if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000) + return (id[ATA_ID_SECTOR_SIZE] & 0xf); + return 0; +} + +/* Offset of logical sectors relative to physical sectors. + * + * If device has more than one logical sector per physical sector + * (aka 512 byte emulation), vendors might offset the "sector 0" address + * so sector 63 is "naturally aligned" - e.g. FAT partition table. + * This avoids Read/Mod/Write penalties when using FAT partition table + * and updating "well aligned" (FS perspective) physical sectors on every + * transaction. + */ +static inline u16 ata_id_logical_sector_offset(const u16 *id, + u8 log2_per_phys) +{ + u16 word_209 = id[209]; + + if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) { + u16 first = word_209 & 0x3fff; + if (first > 0) + return (1 << log2_per_phys) - first; + } + return 0; +} + +static inline bool ata_id_has_lba48(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) + return false; + return id[ATA_ID_COMMAND_SET_2] & (1 << 10); +} + +static inline bool ata_id_lba48_enabled(const u16 *id) +{ + if (ata_id_has_lba48(id) == 0) + return false; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 10); +} + +static inline bool ata_id_hpa_enabled(const u16 *id) +{ + /* Yes children, word 83 valid bits cover word 82 data */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + /* And 87 covers 85-87 */ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + /* Check command sets enabled as well as supported */ + if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 10); +} + +static inline bool ata_id_has_wcache(const u16 *id) +{ + /* Yes children, word 83 valid bits cover word 82 data */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 5); +} + +static inline bool ata_id_has_pm(const u16 *id) +{ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_COMMAND_SET_1] & (1 << 3); +} + +static inline bool ata_id_rahead_enabled(const u16 *id) +{ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); +} + +static inline bool ata_id_wcache_enabled(const u16 *id) +{ + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return false; + return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); +} + +/** + * ata_id_major_version - get ATA level of drive + * @id: Identify data + * + * Caveats: + * ATA-1 considers identify optional + * ATA-2 introduces mandatory identify + * ATA-3 introduces word 80 and accurate reporting + * + * The practical impact of this is that ata_id_major_version cannot + * reliably report on drives below ATA3. + */ + +static inline unsigned int ata_id_major_version(const u16 *id) +{ + unsigned int mver; + + if (id[ATA_ID_MAJOR_VER] == 0xFFFF) + return 0; + + for (mver = 14; mver >= 1; mver--) + if (id[ATA_ID_MAJOR_VER] & (1 << mver)) + break; + return mver; +} + +static inline bool ata_id_is_sata(const u16 *id) +{ + /* + * See if word 93 is 0 AND drive is at least ATA-5 compatible + * verifying that word 80 by casting it to a signed type -- + * this trick allows us to filter out the reserved values of + * 0x0000 and 0xffff along with the earlier ATA revisions... + */ + if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020) + return true; + return false; +} + +static inline bool ata_id_has_tpm(const u16 *id) +{ + /* The TPM bits are only valid on ATA8 */ + if (ata_id_major_version(id) < 8) + return false; + if ((id[48] & 0xC000) != 0x4000) + return false; + return id[48] & (1 << 0); +} + +static inline bool ata_id_has_dword_io(const u16 *id) +{ + /* ATA 8 reuses this flag for "trusted" computing */ + if (ata_id_major_version(id) > 7) + return false; + return id[ATA_ID_DWORD_IO] & (1 << 0); +} + +static inline bool ata_id_has_unload(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 && + id[ATA_ID_CFSSE] & (1 << 13)) + return true; + return false; +} + +static inline bool ata_id_has_wwn(const u16 *id) +{ + return (id[ATA_ID_CSF_DEFAULT] & 0xC100) == 0x4100; +} + +static inline int ata_id_form_factor(const u16 *id) +{ + u16 val = id[168]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + val &= 0xf; + + if (val > 5) + return 0; + + return val; +} + +static inline int ata_id_rotation_rate(const u16 *id) +{ + u16 val = id[217]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + if (val > 1 && val < 0x401) + return 0; + + return val; +} + +static inline bool ata_id_has_trim(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_DATA_SET_MGMT] & 1)) + return true; + return false; +} + +static inline bool ata_id_has_zero_after_trim(const u16 *id) +{ + /* DSM supported, deterministic read, and read zero after trim set */ + if (ata_id_has_trim(id) && + (id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020) + return true; + + return false; +} + +static inline bool ata_id_current_chs_valid(const u16 *id) +{ + /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command + has not been issued to the device then the values of + id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ + return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ + id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ + id[ATA_ID_CUR_HEADS] && /* heads in current translation */ + id[ATA_ID_CUR_HEADS] <= 16 && + id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ +} + +static inline bool ata_id_is_cfa(const u16 *id) +{ + if ((id[ATA_ID_CONFIG] == 0x848A) || /* Traditional CF */ + (id[ATA_ID_CONFIG] == 0x844A)) /* Delkin Devices CF */ + return true; + /* + * CF specs don't require specific value in the word 0 anymore and yet + * they forbid to report the ATA version in the word 80 and require the + * CFA feature set support to be indicated in the word 83 in this case. + * Unfortunately, some cards only follow either of this requirements, + * and while those that don't indicate CFA feature support need some + * sort of quirk list, it seems impractical for the ones that do... + */ + return (id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004; +} + +static inline bool ata_id_is_ssd(const u16 *id) +{ + return id[ATA_ID_ROT_SPEED] == 0x01; +} + +static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) +{ + /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ + if (pio > 4 && ata_id_is_cfa(id)) + return false; + /* For PIO3 and higher it is mandatory. */ + if (pio > 2) + return true; + /* Turn it on when possible. */ + return ata_id_has_iordy(id); +} + +static inline bool ata_drive_40wire(const u16 *dev_id) +{ + if (ata_id_is_sata(dev_id)) + return false; /* SATA */ + if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) + return false; /* 80 wire */ + return true; +} + +static inline bool ata_drive_40wire_relaxed(const u16 *dev_id) +{ + if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) + return false; /* 80 wire */ + return true; +} + +static inline int atapi_cdb_len(const u16 *dev_id) +{ + u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; + switch (tmp) { + case 0: return 12; + case 1: return 16; + default: return -1; + } +} + +static inline bool atapi_command_packet_set(const u16 *dev_id) +{ + return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; +} + +static inline bool atapi_id_dmadir(const u16 *dev_id) +{ + return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000); +} + +/* + * ata_id_is_lba_capacity_ok() performs a sanity check on + * the claimed LBA capacity value for the device. + * + * Returns 1 if LBA capacity looks sensible, 0 otherwise. + * + * It is called only once for each device. + */ +static inline bool ata_id_is_lba_capacity_ok(u16 *id) +{ + unsigned long lba_sects, chs_sects, head, tail; + + /* No non-LBA info .. so valid! */ + if (id[ATA_ID_CYLS] == 0) + return true; + + lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + + /* + * The ATA spec tells large drives to return + * C/H/S = 16383/16/63 independent of their size. + * Some drives can be jumpered to use 15 heads instead of 16. + * Some drives can be jumpered to use 4092 cyls instead of 16383. + */ + if ((id[ATA_ID_CYLS] == 16383 || + (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) && + id[ATA_ID_SECTORS] == 63 && + (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) && + (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS])) + return true; + + chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS]; + + /* perform a rough sanity check on lba_sects: within 10% is OK */ + if (lba_sects - chs_sects < chs_sects/10) + return true; + + /* some drives have the word order reversed */ + head = (lba_sects >> 16) & 0xffff; + tail = lba_sects & 0xffff; + lba_sects = head | (tail << 16); + + if (lba_sects - chs_sects < chs_sects/10) { + *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects); + return true; /* LBA capacity is (now) good */ + } + + return false; /* LBA capacity value may be bad */ +} + +static inline void ata_id_to_hd_driveid(u16 *id) +{ +#ifdef __BIG_ENDIAN + /* accessed in struct hd_driveid as 8-bit values */ + id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]); + id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]); + id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]); + id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]); + id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]); + + /* as 32-bit values */ + *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG); + + /* as 64-bit value */ + *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] = + ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); +#endif +} + +/* + * Write LBA Range Entries to the buffer that will cover the extent from + * sector to sector + count. This is used for TRIM and for ADD LBA(S) + * TO NV CACHE PINNED SET. + */ +static inline unsigned ata_set_lba_range_entries(void *_buffer, + unsigned buf_size, u64 sector, unsigned long count) +{ + __le64 *buffer = _buffer; + unsigned i = 0, used_bytes; + + while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */ + u64 entry = sector | + ((u64)(count > 0xffff ? 0xffff : count) << 48); + buffer[i++] = __cpu_to_le64(entry); + if (count <= 0xffff) + break; + count -= 0xffff; + sector += 0xffff; + } + + used_bytes = ALIGN(i * 8, 512); + memset(buffer + i, 0, used_bytes - i * 8); + return used_bytes; +} + +static inline int is_multi_taskfile(struct ata_taskfile *tf) +{ + return (tf->command == ATA_CMD_READ_MULTI) || + (tf->command == ATA_CMD_WRITE_MULTI) || + (tf->command == ATA_CMD_READ_MULTI_EXT) || + (tf->command == ATA_CMD_WRITE_MULTI_EXT) || + (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); +} + +static inline bool ata_ok(u8 status) +{ + return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) + == ATA_DRDY); +} + +static inline bool lba_28_ok(u64 block, u32 n_block) +{ + /* check the ending block number: must be LESS THAN 0x0fffffff */ + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); +} + +static inline bool lba_48_ok(u64 block, u32 n_block) +{ + /* check the ending block number */ + return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536); +} + +#define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) +#define sata_pmp_gscr_devid(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16) +#define sata_pmp_gscr_rev(gscr) (((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff) +#define sata_pmp_gscr_ports(gscr) ((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf) + +#endif /* __LINUX_ATA_H__ */ diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h new file mode 100644 index 00000000..b856a2a5 --- /dev/null +++ b/include/linux/ata_platform.h @@ -0,0 +1,34 @@ +#ifndef __LINUX_ATA_PLATFORM_H +#define __LINUX_ATA_PLATFORM_H + +struct pata_platform_info { + /* + * I/O port shift, for platforms with ports that are + * constantly spaced and need larger than the 1-byte + * spacing used by ata_std_ports(). + */ + unsigned int ioport_shift; + /* + * Indicate platform specific irq types and initial + * IRQ flags when call request_irq() + */ + unsigned int irq_flags; +}; + +extern int __devinit __pata_platform_probe(struct device *dev, + struct resource *io_res, + struct resource *ctl_res, + struct resource *irq_res, + unsigned int ioport_shift, + int __pio_mask); + +extern int __devexit __pata_platform_remove(struct device *dev); + +/* + * Marvell SATA private data + */ +struct mv_sata_platform_data { + int n_ports; /* number of sata ports */ +}; + +#endif /* __LINUX_ATA_PLATFORM_H */ diff --git a/include/linux/atalk.h b/include/linux/atalk.h new file mode 100644 index 00000000..f57c3688 --- /dev/null +++ b/include/linux/atalk.h @@ -0,0 +1,209 @@ +#ifndef __LINUX_ATALK_H__ +#define __LINUX_ATALK_H__ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/socket.h> + +/* + * AppleTalk networking structures + * + * The following are directly referenced from the University Of Michigan + * netatalk for compatibility reasons. + */ +#define ATPORT_FIRST 1 +#define ATPORT_RESERVED 128 +#define ATPORT_LAST 254 /* 254 is only legal on localtalk */ +#define ATADDR_ANYNET (__u16)0 +#define ATADDR_ANYNODE (__u8)0 +#define ATADDR_ANYPORT (__u8)0 +#define ATADDR_BCAST (__u8)255 +#define DDP_MAXSZ 587 +#define DDP_MAXHOPS 15 /* 4 bits of hop counter */ + +#define SIOCATALKDIFADDR (SIOCPROTOPRIVATE + 0) + +struct atalk_addr { + __be16 s_net; + __u8 s_node; +}; + +struct sockaddr_at { + __kernel_sa_family_t sat_family; + __u8 sat_port; + struct atalk_addr sat_addr; + char sat_zero[8]; +}; + +struct atalk_netrange { + __u8 nr_phase; + __be16 nr_firstnet; + __be16 nr_lastnet; +}; + +#ifdef __KERNEL__ + +#include <net/sock.h> + +struct atalk_route { + struct net_device *dev; + struct atalk_addr target; + struct atalk_addr gateway; + int flags; + struct atalk_route *next; +}; + +/** + * struct atalk_iface - AppleTalk Interface + * @dev - Network device associated with this interface + * @address - Our address + * @status - What are we doing? + * @nets - Associated direct netrange + * @next - next element in the list of interfaces + */ +struct atalk_iface { + struct net_device *dev; + struct atalk_addr address; + int status; +#define ATIF_PROBE 1 /* Probing for an address */ +#define ATIF_PROBE_FAIL 2 /* Probe collided */ + struct atalk_netrange nets; + struct atalk_iface *next; +}; + +struct atalk_sock { + /* struct sock has to be the first member of atalk_sock */ + struct sock sk; + __be16 dest_net; + __be16 src_net; + unsigned char dest_node; + unsigned char src_node; + unsigned char dest_port; + unsigned char src_port; +}; + +static inline struct atalk_sock *at_sk(struct sock *sk) +{ + return (struct atalk_sock *)sk; +} + +struct ddpehdr { + __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */ + __be16 deh_sum; + __be16 deh_dnet; + __be16 deh_snet; + __u8 deh_dnode; + __u8 deh_snode; + __u8 deh_dport; + __u8 deh_sport; + /* And netatalk apps expect to stick the type in themselves */ +}; + +static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb) +{ + return (struct ddpehdr *)skb_transport_header(skb); +} + +/* AppleTalk AARP headers */ +struct elapaarp { + __be16 hw_type; +#define AARP_HW_TYPE_ETHERNET 1 +#define AARP_HW_TYPE_TOKENRING 2 + __be16 pa_type; + __u8 hw_len; + __u8 pa_len; +#define AARP_PA_ALEN 4 + __be16 function; +#define AARP_REQUEST 1 +#define AARP_REPLY 2 +#define AARP_PROBE 3 + __u8 hw_src[ETH_ALEN]; + __u8 pa_src_zero; + __be16 pa_src_net; + __u8 pa_src_node; + __u8 hw_dst[ETH_ALEN]; + __u8 pa_dst_zero; + __be16 pa_dst_net; + __u8 pa_dst_node; +} __attribute__ ((packed)); + +static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) +{ + return (struct elapaarp *)skb_transport_header(skb); +} + +/* Not specified - how long till we drop a resolved entry */ +#define AARP_EXPIRY_TIME (5 * 60 * HZ) +/* Size of hash table */ +#define AARP_HASH_SIZE 16 +/* Fast retransmission timer when resolving */ +#define AARP_TICK_TIME (HZ / 5) +/* Send 10 requests then give up (2 seconds) */ +#define AARP_RETRANSMIT_LIMIT 10 +/* + * Some value bigger than total retransmit time + a bit for last reply to + * appear and to stop continual requests + */ +#define AARP_RESOLVE_TIME (10 * HZ) + +extern struct datalink_proto *ddp_dl, *aarp_dl; +extern void aarp_proto_init(void); + +/* Inter module exports */ + +/* Give a device find its atif control structure */ +static inline struct atalk_iface *atalk_find_dev(struct net_device *dev) +{ + return dev->atalk_ptr; +} + +extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev); +extern struct net_device *atrtr_get_dev(struct atalk_addr *sa); +extern int aarp_send_ddp(struct net_device *dev, + struct sk_buff *skb, + struct atalk_addr *sa, void *hwaddr); +extern void aarp_device_down(struct net_device *dev); +extern void aarp_probe_network(struct atalk_iface *atif); +extern int aarp_proxy_probe_network(struct atalk_iface *atif, + struct atalk_addr *sa); +extern void aarp_proxy_remove(struct net_device *dev, + struct atalk_addr *sa); + +extern void aarp_cleanup_module(void); + +extern struct hlist_head atalk_sockets; +extern rwlock_t atalk_sockets_lock; + +extern struct atalk_route *atalk_routes; +extern rwlock_t atalk_routes_lock; + +extern struct atalk_iface *atalk_interfaces; +extern rwlock_t atalk_interfaces_lock; + +extern struct atalk_route atrtr_default; + +extern const struct file_operations atalk_seq_arp_fops; + +extern int sysctl_aarp_expiry_time; +extern int sysctl_aarp_tick_time; +extern int sysctl_aarp_retransmit_limit; +extern int sysctl_aarp_resolve_time; + +#ifdef CONFIG_SYSCTL +extern void atalk_register_sysctl(void); +extern void atalk_unregister_sysctl(void); +#else +#define atalk_register_sysctl() do { } while(0) +#define atalk_unregister_sysctl() do { } while(0) +#endif + +#ifdef CONFIG_PROC_FS +extern int atalk_proc_init(void); +extern void atalk_proc_exit(void); +#else +#define atalk_proc_init() ({ 0; }) +#define atalk_proc_exit() do { } while(0) +#endif /* CONFIG_PROC_FS */ + +#endif /* __KERNEL__ */ +#endif /* __LINUX_ATALK_H__ */ diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h new file mode 100644 index 00000000..6e3f54f3 --- /dev/null +++ b/include/linux/ath9k_platform.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_ATH9K_PLATFORM_H +#define _LINUX_ATH9K_PLATFORM_H + +#define ATH9K_PLAT_EEP_MAX_WORDS 2048 + +struct ath9k_platform_data { + u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; + u8 *macaddr; + + int led_pin; + u32 gpio_mask; + u32 gpio_val; + + bool is_clk_25mhz; + int (*get_mac_revision)(void); + int (*external_reset)(void); +}; + +#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/atm.h b/include/linux/atm.h new file mode 100644 index 00000000..d3b29217 --- /dev/null +++ b/include/linux/atm.h @@ -0,0 +1,251 @@ +/* atm.h - general ATM declarations */ + +/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ + + +/* + * WARNING: User-space programs should not #include <linux/atm.h> directly. + * Instead, #include <atm.h> + */ + +#ifndef _LINUX_ATM_H +#define _LINUX_ATM_H + +/* + * BEGIN_xx and END_xx markers are used for automatic generation of + * documentation. Do not change them. + */ + +#include <linux/compiler.h> +#include <linux/atmapi.h> +#include <linux/atmsap.h> +#include <linux/atmioc.h> +#include <linux/types.h> + + +/* general ATM constants */ +#define ATM_CELL_SIZE 53 /* ATM cell size incl. header */ +#define ATM_CELL_PAYLOAD 48 /* ATM payload size */ +#define ATM_AAL0_SDU 52 /* AAL0 SDU size */ +#define ATM_MAX_AAL34_PDU 65535 /* maximum AAL3/4 PDU payload */ +#define ATM_AAL5_TRAILER 8 /* AAL5 trailer size */ +#define ATM_MAX_AAL5_PDU 65535 /* maximum AAL5 PDU payload */ +#define ATM_MAX_CDV 9999 /* maximum (default) CDV */ +#define ATM_NOT_RSV_VCI 32 /* first non-reserved VCI value */ + +#define ATM_MAX_VPI 255 /* maximum VPI at the UNI */ +#define ATM_MAX_VPI_NNI 4096 /* maximum VPI at the NNI */ +#define ATM_MAX_VCI 65535 /* maximum VCI */ + + +/* "protcol" values for the socket system call */ +#define ATM_NO_AAL 0 /* AAL not specified */ +#define ATM_AAL0 13 /* "raw" ATM cells */ +#define ATM_AAL1 1 /* AAL1 (CBR) */ +#define ATM_AAL2 2 /* AAL2 (VBR) */ +#define ATM_AAL34 3 /* AAL3/4 (data) */ +#define ATM_AAL5 5 /* AAL5 (data) */ + +/* + * socket option name coding functions + * + * Note that __SO_ENCODE and __SO_LEVEL are somewhat a hack since the + * << 22 only reserves 9 bits for the level. On some architectures + * SOL_SOCKET is 0xFFFF, so that's a bit of a problem + */ + +#define __SO_ENCODE(l,n,t) ((((l) & 0x1FF) << 22) | ((n) << 16) | \ + sizeof(t)) +#define __SO_LEVEL_MATCH(c,m) (((c) >> 22) == ((m) & 0x1FF)) +#define __SO_NUMBER(c) (((c) >> 16) & 0x3f) +#define __SO_SIZE(c) ((c) & 0x3fff) + +/* + * ATM layer + */ + +#define SO_SETCLP __SO_ENCODE(SOL_ATM,0,int) + /* set CLP bit value - TODO */ +#define SO_CIRANGE __SO_ENCODE(SOL_ATM,1,struct atm_cirange) + /* connection identifier range; socket must be + bound or connected */ +#define SO_ATMQOS __SO_ENCODE(SOL_ATM,2,struct atm_qos) + /* Quality of Service setting */ +#define SO_ATMSAP __SO_ENCODE(SOL_ATM,3,struct atm_sap) + /* Service Access Point */ +#define SO_ATMPVC __SO_ENCODE(SOL_ATM,4,struct sockaddr_atmpvc) + /* "PVC" address (also for SVCs); get only */ +#define SO_MULTIPOINT __SO_ENCODE(SOL_ATM, 5, int) + /* make this vc a p2mp */ + + +/* + * Note @@@: since the socket layers don't really distinguish the control and + * the data plane but generally seems to be data plane-centric, any layer is + * about equally wrong for the SAP. If you have a better idea about this, + * please speak up ... + */ + + +/* ATM cell header (for AAL0) */ + +/* BEGIN_CH */ +#define ATM_HDR_GFC_MASK 0xf0000000 +#define ATM_HDR_GFC_SHIFT 28 +#define ATM_HDR_VPI_MASK 0x0ff00000 +#define ATM_HDR_VPI_SHIFT 20 +#define ATM_HDR_VCI_MASK 0x000ffff0 +#define ATM_HDR_VCI_SHIFT 4 +#define ATM_HDR_PTI_MASK 0x0000000e +#define ATM_HDR_PTI_SHIFT 1 +#define ATM_HDR_CLP 0x00000001 +/* END_CH */ + + +/* PTI codings */ + +/* BEGIN_PTI */ +#define ATM_PTI_US0 0 /* user data cell, congestion not exp, SDU-type 0 */ +#define ATM_PTI_US1 1 /* user data cell, congestion not exp, SDU-type 1 */ +#define ATM_PTI_UCES0 2 /* user data cell, cong. experienced, SDU-type 0 */ +#define ATM_PTI_UCES1 3 /* user data cell, cong. experienced, SDU-type 1 */ +#define ATM_PTI_SEGF5 4 /* segment OAM F5 flow related cell */ +#define ATM_PTI_E2EF5 5 /* end-to-end OAM F5 flow related cell */ +#define ATM_PTI_RSV_RM 6 /* reserved for traffic control/resource mgmt */ +#define ATM_PTI_RSV 7 /* reserved */ +/* END_PTI */ + + +/* + * The following items should stay in linux/atm.h, which should be linked to + * netatm/atm.h + */ + +/* Traffic description */ + +#define ATM_NONE 0 /* no traffic */ +#define ATM_UBR 1 +#define ATM_CBR 2 +#define ATM_VBR 3 +#define ATM_ABR 4 +#define ATM_ANYCLASS 5 /* compatible with everything */ + +#define ATM_MAX_PCR -1 /* maximum available PCR */ + +struct atm_trafprm { + unsigned char traffic_class; /* traffic class (ATM_UBR, ...) */ + int max_pcr; /* maximum PCR in cells per second */ + int pcr; /* desired PCR in cells per second */ + int min_pcr; /* minimum PCR in cells per second */ + int max_cdv; /* maximum CDV in microseconds */ + int max_sdu; /* maximum SDU in bytes */ + /* extra params for ABR */ + unsigned int icr; /* Initial Cell Rate (24-bit) */ + unsigned int tbe; /* Transient Buffer Exposure (24-bit) */ + unsigned int frtt : 24; /* Fixed Round Trip Time (24-bit) */ + unsigned int rif : 4; /* Rate Increment Factor (4-bit) */ + unsigned int rdf : 4; /* Rate Decrease Factor (4-bit) */ + unsigned int nrm_pres :1; /* nrm present bit */ + unsigned int trm_pres :1; /* rm present bit */ + unsigned int adtf_pres :1; /* adtf present bit */ + unsigned int cdf_pres :1; /* cdf present bit*/ + unsigned int nrm :3; /* Max # of Cells for each forward RM cell (3-bit) */ + unsigned int trm :3; /* Time between forward RM cells (3-bit) */ + unsigned int adtf :10; /* ACR Decrease Time Factor (10-bit) */ + unsigned int cdf :3; /* Cutoff Decrease Factor (3-bit) */ + unsigned int spare :9; /* spare bits */ +}; + +struct atm_qos { + struct atm_trafprm txtp; /* parameters in TX direction */ + struct atm_trafprm rxtp __ATM_API_ALIGN; + /* parameters in RX direction */ + unsigned char aal __ATM_API_ALIGN; +}; + +/* PVC addressing */ + +#define ATM_ITF_ANY -1 /* "magic" PVC address values */ +#define ATM_VPI_ANY -1 +#define ATM_VCI_ANY -1 +#define ATM_VPI_UNSPEC -2 +#define ATM_VCI_UNSPEC -2 + + +struct sockaddr_atmpvc { + unsigned short sap_family; /* address family, AF_ATMPVC */ + struct { /* PVC address */ + short itf; /* ATM interface */ + short vpi; /* VPI (only 8 bits at UNI) */ + int vci; /* VCI (only 16 bits at UNI) */ + } sap_addr __ATM_API_ALIGN; /* PVC address */ +}; + +/* SVC addressing */ + +#define ATM_ESA_LEN 20 /* ATM End System Address length */ +#define ATM_E164_LEN 12 /* maximum E.164 number length */ + +#define ATM_AFI_DCC 0x39 /* DCC ATM Format */ +#define ATM_AFI_ICD 0x47 /* ICD ATM Format */ +#define ATM_AFI_E164 0x45 /* E.164 ATM Format */ +#define ATM_AFI_LOCAL 0x49 /* Local ATM Format */ + +#define ATM_AFI_DCC_GROUP 0xBD /* DCC ATM Group Format */ +#define ATM_AFI_ICD_GROUP 0xC5 /* ICD ATM Group Format */ +#define ATM_AFI_E164_GROUP 0xC3 /* E.164 ATM Group Format */ +#define ATM_AFI_LOCAL_GROUP 0xC7 /* Local ATM Group Format */ + +#define ATM_LIJ_NONE 0 /* no leaf-initiated join */ +#define ATM_LIJ 1 /* request joining */ +#define ATM_LIJ_RPJ 2 /* set to root-prompted join */ +#define ATM_LIJ_NJ 3 /* set to network join */ + + +struct sockaddr_atmsvc { + unsigned short sas_family; /* address family, AF_ATMSVC */ + struct { /* SVC address */ + unsigned char prv[ATM_ESA_LEN];/* private ATM address */ + char pub[ATM_E164_LEN+1]; /* public address (E.164) */ + /* unused addresses must be bzero'ed */ + char lij_type; /* role in LIJ call; one of ATM_LIJ* */ + __u32 lij_id; /* LIJ call identifier */ + } sas_addr __ATM_API_ALIGN; /* SVC address */ +}; + + +static __inline__ int atmsvc_addr_in_use(struct sockaddr_atmsvc addr) +{ + return *addr.sas_addr.prv || *addr.sas_addr.pub; +} + + +static __inline__ int atmpvc_addr_in_use(struct sockaddr_atmpvc addr) +{ + return addr.sap_addr.itf || addr.sap_addr.vpi || addr.sap_addr.vci; +} + + +/* + * Some stuff for linux/sockios.h + */ + +struct atmif_sioc { + int number; + int length; + void __user *arg; +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +struct compat_atmif_sioc { + int number; + int length; + compat_uptr_t arg; +}; +#endif +#endif + +typedef unsigned short atm_backend_t; +#endif diff --git a/include/linux/atm_eni.h b/include/linux/atm_eni.h new file mode 100644 index 00000000..34f31797 --- /dev/null +++ b/include/linux/atm_eni.h @@ -0,0 +1,23 @@ +/* atm_eni.h - Driver-specific declarations of the ENI driver (for use by + driver-specific utilities) */ + +/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_ATM_ENI_H +#define LINUX_ATM_ENI_H + +#include <linux/atmioc.h> + + +struct eni_multipliers { + int tx,rx; /* values are in percent and must be > 100 */ +}; + + +#define ENI_MEMDUMP _IOW('a',ATMIOC_SARPRV,struct atmif_sioc) + /* printk memory map */ +#define ENI_SETMULT _IOW('a',ATMIOC_SARPRV+7,struct atmif_sioc) + /* set buffer multipliers */ + +#endif diff --git a/include/linux/atm_he.h b/include/linux/atm_he.h new file mode 100644 index 00000000..2a7713b5 --- /dev/null +++ b/include/linux/atm_he.h @@ -0,0 +1,20 @@ +/* atm_he.h */ + +#ifndef LINUX_ATM_HE_H +#define LINUX_ATM_HE_H + +#include <linux/atmioc.h> + +#define HE_GET_REG _IOW('a', ATMIOC_SARPRV, struct atmif_sioc) + +#define HE_REGTYPE_PCI 1 +#define HE_REGTYPE_RCM 2 +#define HE_REGTYPE_TCM 3 +#define HE_REGTYPE_MBOX 4 + +struct he_ioctl_reg { + unsigned addr, val; + char type; +}; + +#endif /* LINUX_ATM_HE_H */ diff --git a/include/linux/atm_idt77105.h b/include/linux/atm_idt77105.h new file mode 100644 index 00000000..8b724000 --- /dev/null +++ b/include/linux/atm_idt77105.h @@ -0,0 +1,28 @@ +/* atm_idt77105.h - Driver-specific declarations of the IDT77105 driver (for + * use by driver-specific utilities) */ + +/* Written 1999 by Greg Banks <gnb@linuxfan.com>. Copied from atm_suni.h. */ + + +#ifndef LINUX_ATM_IDT77105_H +#define LINUX_ATM_IDT77105_H + +#include <linux/types.h> +#include <linux/atmioc.h> +#include <linux/atmdev.h> + +/* + * Structure for IDT77105_GETSTAT and IDT77105_GETSTATZ ioctls. + * Pointed to by `arg' in atmif_sioc. + */ +struct idt77105_stats { + __u32 symbol_errors; /* wire symbol errors */ + __u32 tx_cells; /* cells transmitted */ + __u32 rx_cells; /* cells received */ + __u32 rx_hec_errors; /* Header Error Check errors on receive */ +}; + +#define IDT77105_GETSTAT _IOW('a',ATMIOC_PHYPRV+2,struct atmif_sioc) /* get stats */ +#define IDT77105_GETSTATZ _IOW('a',ATMIOC_PHYPRV+3,struct atmif_sioc) /* get stats and zero */ + +#endif diff --git a/include/linux/atm_nicstar.h b/include/linux/atm_nicstar.h new file mode 100644 index 00000000..577b79f3 --- /dev/null +++ b/include/linux/atm_nicstar.h @@ -0,0 +1,53 @@ +/****************************************************************************** + * + * atm_nicstar.h + * + * Driver-specific declarations for use by NICSTAR driver specific utils. + * + * Author: Rui Prior + * + * (C) INESC 1998 + * + ******************************************************************************/ + + +#ifndef LINUX_ATM_NICSTAR_H +#define LINUX_ATM_NICSTAR_H + +/* Note: non-kernel programs including this file must also include + * sys/types.h for struct timeval + */ + +#include <linux/atmapi.h> +#include <linux/atmioc.h> + +#define NS_GETPSTAT _IOWR('a',ATMIOC_SARPRV+1,struct atmif_sioc) + /* get pool statistics */ +#define NS_SETBUFLEV _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc) + /* set buffer level markers */ +#define NS_ADJBUFLEV _IO('a',ATMIOC_SARPRV+3) + /* adjust buffer level */ + +typedef struct buf_nr +{ + unsigned min; + unsigned init; + unsigned max; +}buf_nr; + + +typedef struct pool_levels +{ + int buftype; + int count; /* (At least for now) only used in NS_GETPSTAT */ + buf_nr level; +} pool_levels; + +/* type must be one of the following: */ +#define NS_BUFTYPE_SMALL 1 +#define NS_BUFTYPE_LARGE 2 +#define NS_BUFTYPE_HUGE 3 +#define NS_BUFTYPE_IOVEC 4 + + +#endif /* LINUX_ATM_NICSTAR_H */ diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h new file mode 100644 index 00000000..84f3aab5 --- /dev/null +++ b/include/linux/atm_suni.h @@ -0,0 +1,12 @@ +/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by + driver-specific utilities) */ + +/* Written 1998,2000 by Werner Almesberger, EPFL ICA */ + + +#ifndef LINUX_ATM_SUNI_H +#define LINUX_ATM_SUNI_H + +/* everything obsoleted */ + +#endif diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h new file mode 100644 index 00000000..375638f8 --- /dev/null +++ b/include/linux/atm_tcp.h @@ -0,0 +1,73 @@ +/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by + driver-specific utilities) */ + +/* Written 1997-2000 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_ATM_TCP_H +#define LINUX_ATM_TCP_H + +#include <linux/atmapi.h> +#include <linux/atm.h> +#include <linux/atmioc.h> +#include <linux/types.h> + + +/* + * All values in struct atmtcp_hdr are in network byte order + */ + +struct atmtcp_hdr { + __u16 vpi; + __u16 vci; + __u32 length; /* ... of data part */ +}; + +/* + * All values in struct atmtcp_command are in host byte order + */ + +#define ATMTCP_HDR_MAGIC (~0) /* this length indicates a command */ +#define ATMTCP_CTRL_OPEN 1 /* request/reply */ +#define ATMTCP_CTRL_CLOSE 2 /* request/reply */ + +struct atmtcp_control { + struct atmtcp_hdr hdr; /* must be first */ + int type; /* message type; both directions */ + atm_kptr_t vcc; /* both directions */ + struct sockaddr_atmpvc addr; /* suggested value from kernel */ + struct atm_qos qos; /* both directions */ + int result; /* to kernel only */ +} __ATM_API_ALIGN; + +/* + * Field usage: + * Messge type dir. hdr.v?i type addr qos vcc result + * ----------- ---- ------- ---- ---- --- --- ------ + * OPEN K->D Y Y Y Y Y 0 + * OPEN D->K - Y Y Y Y Y + * CLOSE K->D - - Y - Y 0 + * CLOSE D->K - - - - Y Y + */ + +#define SIOCSIFATMTCP _IO('a',ATMIOC_ITF) /* set ATMTCP mode */ +#define ATMTCP_CREATE _IO('a',ATMIOC_ITF+14) /* create persistent ATMTCP + interface */ +#define ATMTCP_REMOVE _IO('a',ATMIOC_ITF+15) /* destroy persistent ATMTCP + interface */ + + +#ifdef __KERNEL__ + +struct atm_tcp_ops { + int (*attach)(struct atm_vcc *vcc,int itf); + int (*create_persistent)(int itf); + int (*remove_persistent)(int itf); + struct module *owner; +}; + +extern struct atm_tcp_ops atm_tcp_ops; + +#endif + +#endif diff --git a/include/linux/atm_zatm.h b/include/linux/atm_zatm.h new file mode 100644 index 00000000..10f0fa29 --- /dev/null +++ b/include/linux/atm_zatm.h @@ -0,0 +1,52 @@ +/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by + driver-specific utilities) */ + +/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_ATM_ZATM_H +#define LINUX_ATM_ZATM_H + +/* + * Note: non-kernel programs including this file must also include + * sys/types.h for struct timeval + */ + +#include <linux/atmapi.h> +#include <linux/atmioc.h> + +#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) + /* get pool statistics */ +#define ZATM_GETPOOLZ _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc) + /* get statistics and zero */ +#define ZATM_SETPOOL _IOW('a',ATMIOC_SARPRV+3,struct atmif_sioc) + /* set pool parameters */ + +struct zatm_pool_info { + int ref_count; /* free buffer pool usage counters */ + int low_water,high_water; /* refill parameters */ + int rqa_count,rqu_count; /* queue condition counters */ + int offset,next_off; /* alignment optimizations: offset */ + int next_cnt,next_thres; /* repetition counter and threshold */ +}; + +struct zatm_pool_req { + int pool_num; /* pool number */ + struct zatm_pool_info info; /* actual information */ +}; + +struct zatm_t_hist { + struct timeval real; /* real (wall-clock) time */ + struct timeval expected; /* expected real time */ +}; + + +#define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */ +#define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */ +#define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */ +#define ZATM_LAST_POOL ZATM_AAL5_POOL_BASE+10 /* max. 64 kB */ + +#define ZATM_TIMER_HISTORY_SIZE 16 /* number of timer adjustments to + record; must be 2^n */ + +#endif diff --git a/include/linux/atmapi.h b/include/linux/atmapi.h new file mode 100644 index 00000000..8fe54d90 --- /dev/null +++ b/include/linux/atmapi.h @@ -0,0 +1,29 @@ +/* atmapi.h - ATM API user space/kernel compatibility */ + +/* Written 1999,2000 by Werner Almesberger, EPFL ICA */ + + +#ifndef _LINUX_ATMAPI_H +#define _LINUX_ATMAPI_H + +#if defined(__sparc__) || defined(__ia64__) +/* such alignment is not required on 32 bit sparcs, but we can't + figure that we are on a sparc64 while compiling user-space programs. */ +#define __ATM_API_ALIGN __attribute__((aligned(8))) +#else +#define __ATM_API_ALIGN +#endif + + +/* + * Opaque type for kernel pointers. Note that _ is never accessed. We need + * the struct in order hide the array, so that we can make simple assignments + * instead of being forced to use memcpy. It also improves error reporting for + * code that still assumes that we're passing unsigned longs. + * + * Convention: NULL pointers are passed as a field of all zeroes. + */ + +typedef struct { unsigned char _[8]; } __ATM_API_ALIGN atm_kptr_t; + +#endif diff --git a/include/linux/atmarp.h b/include/linux/atmarp.h new file mode 100644 index 00000000..231f4bde --- /dev/null +++ b/include/linux/atmarp.h @@ -0,0 +1,41 @@ +/* atmarp.h - ATM ARP protocol and kernel-demon interface definitions */ + +/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef _LINUX_ATMARP_H +#define _LINUX_ATMARP_H + +#include <linux/types.h> +#include <linux/atmapi.h> +#include <linux/atmioc.h> + + +#define ATMARP_RETRY_DELAY 30 /* request next resolution or forget + NAK after 30 sec - should go into + atmclip.h */ +#define ATMARP_MAX_UNRES_PACKETS 5 /* queue that many packets while + waiting for the resolver */ + + +#define ATMARPD_CTRL _IO('a',ATMIOC_CLIP+1) /* become atmarpd ctrl sock */ +#define ATMARP_MKIP _IO('a',ATMIOC_CLIP+2) /* attach socket to IP */ +#define ATMARP_SETENTRY _IO('a',ATMIOC_CLIP+3) /* fill or hide ARP entry */ +#define ATMARP_ENCAP _IO('a',ATMIOC_CLIP+5) /* change encapsulation */ + + +enum atmarp_ctrl_type { + act_invalid, /* catch uninitialized structures */ + act_need, /* need address resolution */ + act_up, /* interface is coming up */ + act_down, /* interface is going down */ + act_change /* interface configuration has changed */ +}; + +struct atmarp_ctrl { + enum atmarp_ctrl_type type; /* message type */ + int itf_num;/* interface number (if present) */ + __be32 ip; /* IP address (act_need only) */ +}; + +#endif diff --git a/include/linux/atmbr2684.h b/include/linux/atmbr2684.h new file mode 100644 index 00000000..fdb2629b --- /dev/null +++ b/include/linux/atmbr2684.h @@ -0,0 +1,117 @@ +#ifndef _LINUX_ATMBR2684_H +#define _LINUX_ATMBR2684_H + +#include <linux/types.h> +#include <linux/atm.h> +#include <linux/if.h> /* For IFNAMSIZ */ + +/* + * Type of media we're bridging (ethernet, token ring, etc) Currently only + * ethernet is supported + */ +#define BR2684_MEDIA_ETHERNET (0) /* 802.3 */ +#define BR2684_MEDIA_802_4 (1) /* 802.4 */ +#define BR2684_MEDIA_TR (2) /* 802.5 - token ring */ +#define BR2684_MEDIA_FDDI (3) +#define BR2684_MEDIA_802_6 (4) /* 802.6 */ + + /* used only at device creation: */ +#define BR2684_FLAG_ROUTED (1<<16) /* payload is routed, not bridged */ + +/* + * Is there FCS inbound on this VC? This currently isn't supported. + */ +#define BR2684_FCSIN_NO (0) +#define BR2684_FCSIN_IGNORE (1) +#define BR2684_FCSIN_VERIFY (2) + +/* + * Is there FCS outbound on this VC? This currently isn't supported. + */ +#define BR2684_FCSOUT_NO (0) +#define BR2684_FCSOUT_SENDZERO (1) +#define BR2684_FCSOUT_GENERATE (2) + +/* + * Does this VC include LLC encapsulation? + */ +#define BR2684_ENCAPS_VC (0) /* VC-mux */ +#define BR2684_ENCAPS_LLC (1) +#define BR2684_ENCAPS_AUTODETECT (2) /* Unsuported */ + +/* + * Is this VC bridged or routed? + */ + +#define BR2684_PAYLOAD_ROUTED (0) +#define BR2684_PAYLOAD_BRIDGED (1) + +/* + * This is for the ATM_NEWBACKENDIF call - these are like socket families: + * the first element of the structure is the backend number and the rest + * is per-backend specific + */ +struct atm_newif_br2684 { + atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */ + int media; /* BR2684_MEDIA_*, flags in upper bits */ + char ifname[IFNAMSIZ]; + int mtu; +}; + +/* + * This structure is used to specify a br2684 interface - either by a + * positive integer (returned by ATM_NEWBACKENDIF) or the interfaces name + */ +#define BR2684_FIND_BYNOTHING (0) +#define BR2684_FIND_BYNUM (1) +#define BR2684_FIND_BYIFNAME (2) +struct br2684_if_spec { + int method; /* BR2684_FIND_* */ + union { + char ifname[IFNAMSIZ]; + int devnum; + } spec; +}; + +/* + * This is for the ATM_SETBACKEND call - these are like socket families: + * the first element of the structure is the backend number and the rest + * is per-backend specific + */ +struct atm_backend_br2684 { + atm_backend_t backend_num; /* ATM_BACKEND_BR2684 */ + struct br2684_if_spec ifspec; + int fcs_in; /* BR2684_FCSIN_* */ + int fcs_out; /* BR2684_FCSOUT_* */ + int fcs_auto; /* 1: fcs_{in,out} disabled if no FCS rx'ed */ + int encaps; /* BR2684_ENCAPS_* */ + int has_vpiid; /* 1: use vpn_id - Unsupported */ + __u8 vpn_id[7]; + int send_padding; /* unsupported */ + int min_size; /* we will pad smaller packets than this */ +}; + +/* + * The BR2684_SETFILT ioctl is an experimental mechanism for folks + * terminating a large number of IP-only vcc's. When netfilter allows + * efficient per-if in/out filters, this support will be removed + */ +struct br2684_filter { + __be32 prefix; /* network byte order */ + __be32 netmask; /* 0 = disable filter */ +}; + +struct br2684_filter_set { + struct br2684_if_spec ifspec; + struct br2684_filter filter; +}; + +enum br2684_payload { + p_routed = BR2684_PAYLOAD_ROUTED, + p_bridged = BR2684_PAYLOAD_BRIDGED, +}; + +#define BR2684_SETFILT _IOW( 'a', ATMIOC_BACKEND + 0, \ + struct br2684_filter_set) + +#endif /* _LINUX_ATMBR2684_H */ diff --git a/include/linux/atmclip.h b/include/linux/atmclip.h new file mode 100644 index 00000000..02c94c44 --- /dev/null +++ b/include/linux/atmclip.h @@ -0,0 +1,21 @@ +/* atmclip.h - Classical IP over ATM */ + +/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_ATMCLIP_H +#define LINUX_ATMCLIP_H + +#include <linux/sockios.h> +#include <linux/atmioc.h> + + +#define RFC1483LLC_LEN 8 /* LLC+OUI+PID = 8 */ +#define RFC1626_MTU 9180 /* RFC1626 default MTU */ + +#define CLIP_DEFAULT_IDLETIMER 1200 /* 20 minutes, see RFC1755 */ +#define CLIP_CHECK_INTERVAL 10 /* check every ten seconds */ + +#define SIOCMKCLIP _IO('a',ATMIOC_CLIP) /* create IP interface */ + +#endif diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h new file mode 100644 index 00000000..06fd4bbc --- /dev/null +++ b/include/linux/atmdev.h @@ -0,0 +1,526 @@ +/* atmdev.h - ATM device driver declarations and various related items */ + +/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_ATMDEV_H +#define LINUX_ATMDEV_H + + +#include <linux/atmapi.h> +#include <linux/atm.h> +#include <linux/atmioc.h> + + +#define ESI_LEN 6 + +#define ATM_OC3_PCR (155520000/270*260/8/53) + /* OC3 link rate: 155520000 bps + SONET overhead: /270*260 (9 section, 1 path) + bits per cell: /8/53 + max cell rate: 353207.547 cells/sec */ +#define ATM_25_PCR ((25600000/8-8000)/54) + /* 25 Mbps ATM cell rate (59111) */ +#define ATM_OC12_PCR (622080000/1080*1040/8/53) + /* OC12 link rate: 622080000 bps + SONET overhead: /1080*1040 + bits per cell: /8/53 + max cell rate: 1412830.188 cells/sec */ +#define ATM_DS3_PCR (8000*12) + /* DS3: 12 cells in a 125 usec time slot */ + + +#define __AAL_STAT_ITEMS \ + __HANDLE_ITEM(tx); /* TX okay */ \ + __HANDLE_ITEM(tx_err); /* TX errors */ \ + __HANDLE_ITEM(rx); /* RX okay */ \ + __HANDLE_ITEM(rx_err); /* RX errors */ \ + __HANDLE_ITEM(rx_drop); /* RX out of memory */ + +struct atm_aal_stats { +#define __HANDLE_ITEM(i) int i + __AAL_STAT_ITEMS +#undef __HANDLE_ITEM +}; + + +struct atm_dev_stats { + struct atm_aal_stats aal0; + struct atm_aal_stats aal34; + struct atm_aal_stats aal5; +} __ATM_API_ALIGN; + + +#define ATM_GETLINKRATE _IOW('a',ATMIOC_ITF+1,struct atmif_sioc) + /* get link rate */ +#define ATM_GETNAMES _IOW('a',ATMIOC_ITF+3,struct atm_iobuf) + /* get interface names (numbers) */ +#define ATM_GETTYPE _IOW('a',ATMIOC_ITF+4,struct atmif_sioc) + /* get interface type name */ +#define ATM_GETESI _IOW('a',ATMIOC_ITF+5,struct atmif_sioc) + /* get interface ESI */ +#define ATM_GETADDR _IOW('a',ATMIOC_ITF+6,struct atmif_sioc) + /* get itf's local ATM addr. list */ +#define ATM_RSTADDR _IOW('a',ATMIOC_ITF+7,struct atmif_sioc) + /* reset itf's ATM address list */ +#define ATM_ADDADDR _IOW('a',ATMIOC_ITF+8,struct atmif_sioc) + /* add a local ATM address */ +#define ATM_DELADDR _IOW('a',ATMIOC_ITF+9,struct atmif_sioc) + /* remove a local ATM address */ +#define ATM_GETCIRANGE _IOW('a',ATMIOC_ITF+10,struct atmif_sioc) + /* get connection identifier range */ +#define ATM_SETCIRANGE _IOW('a',ATMIOC_ITF+11,struct atmif_sioc) + /* set connection identifier range */ +#define ATM_SETESI _IOW('a',ATMIOC_ITF+12,struct atmif_sioc) + /* set interface ESI */ +#define ATM_SETESIF _IOW('a',ATMIOC_ITF+13,struct atmif_sioc) + /* force interface ESI */ +#define ATM_ADDLECSADDR _IOW('a', ATMIOC_ITF+14, struct atmif_sioc) + /* register a LECS address */ +#define ATM_DELLECSADDR _IOW('a', ATMIOC_ITF+15, struct atmif_sioc) + /* unregister a LECS address */ +#define ATM_GETLECSADDR _IOW('a', ATMIOC_ITF+16, struct atmif_sioc) + /* retrieve LECS address(es) */ + +#define ATM_GETSTAT _IOW('a',ATMIOC_SARCOM+0,struct atmif_sioc) + /* get AAL layer statistics */ +#define ATM_GETSTATZ _IOW('a',ATMIOC_SARCOM+1,struct atmif_sioc) + /* get AAL layer statistics and zero */ +#define ATM_GETLOOP _IOW('a',ATMIOC_SARCOM+2,struct atmif_sioc) + /* get loopback mode */ +#define ATM_SETLOOP _IOW('a',ATMIOC_SARCOM+3,struct atmif_sioc) + /* set loopback mode */ +#define ATM_QUERYLOOP _IOW('a',ATMIOC_SARCOM+4,struct atmif_sioc) + /* query supported loopback modes */ +#define ATM_SETSC _IOW('a',ATMIOC_SPECIAL+1,int) + /* enable or disable single-copy */ +#define ATM_SETBACKEND _IOW('a',ATMIOC_SPECIAL+2,atm_backend_t) + /* set backend handler */ +#define ATM_NEWBACKENDIF _IOW('a',ATMIOC_SPECIAL+3,atm_backend_t) + /* use backend to make new if */ +#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf) + /* add party to p2mp call */ +#ifdef CONFIG_COMPAT +/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */ +#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf) +#endif +#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int) + /* drop party from p2mp call */ + +/* + * These are backend handkers that can be set via the ATM_SETBACKEND call + * above. In the future we may support dynamic loading of these - for now, + * they're just being used to share the ATMIOC_BACKEND ioctls + */ +#define ATM_BACKEND_RAW 0 +#define ATM_BACKEND_PPP 1 /* PPPoATM - RFC2364 */ +#define ATM_BACKEND_BR2684 2 /* Bridged RFC1483/2684 */ + +/* for ATM_GETTYPE */ +#define ATM_ITFTYP_LEN 8 /* maximum length of interface type name */ + +/* + * Loopback modes for ATM_{PHY,SAR}_{GET,SET}LOOP + */ + +/* Point of loopback CPU-->SAR-->PHY-->line--> ... */ +#define __ATM_LM_NONE 0 /* no loop back ^ ^ ^ ^ */ +#define __ATM_LM_AAL 1 /* loop back PDUs --' | | | */ +#define __ATM_LM_ATM 2 /* loop back ATM cells ---' | | */ +/* RESERVED 4 loop back on PHY side ---' */ +#define __ATM_LM_PHY 8 /* loop back bits (digital) ----' | */ +#define __ATM_LM_ANALOG 16 /* loop back the analog signal --------' */ + +/* Direction of loopback */ +#define __ATM_LM_MKLOC(n) ((n)) /* Local (i.e. loop TX to RX) */ +#define __ATM_LM_MKRMT(n) ((n) << 8) /* Remote (i.e. loop RX to TX) */ + +#define __ATM_LM_XTLOC(n) ((n) & 0xff) +#define __ATM_LM_XTRMT(n) (((n) >> 8) & 0xff) + +#define ATM_LM_NONE 0 /* no loopback */ + +#define ATM_LM_LOC_AAL __ATM_LM_MKLOC(__ATM_LM_AAL) +#define ATM_LM_LOC_ATM __ATM_LM_MKLOC(__ATM_LM_ATM) +#define ATM_LM_LOC_PHY __ATM_LM_MKLOC(__ATM_LM_PHY) +#define ATM_LM_LOC_ANALOG __ATM_LM_MKLOC(__ATM_LM_ANALOG) + +#define ATM_LM_RMT_AAL __ATM_LM_MKRMT(__ATM_LM_AAL) +#define ATM_LM_RMT_ATM __ATM_LM_MKRMT(__ATM_LM_ATM) +#define ATM_LM_RMT_PHY __ATM_LM_MKRMT(__ATM_LM_PHY) +#define ATM_LM_RMT_ANALOG __ATM_LM_MKRMT(__ATM_LM_ANALOG) + +/* + * Note: ATM_LM_LOC_* and ATM_LM_RMT_* can be combined, provided that + * __ATM_LM_XTLOC(x) <= __ATM_LM_XTRMT(x) + */ + + +struct atm_iobuf { + int length; + void __user *buffer; +}; + +/* for ATM_GETCIRANGE / ATM_SETCIRANGE */ + +#define ATM_CI_MAX -1 /* use maximum range of VPI/VCI */ + +struct atm_cirange { + signed char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */ + signed char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */ +}; + +/* for ATM_SETSC; actually taken from the ATM_VF number space */ + +#define ATM_SC_RX 1024 /* enable RX single-copy */ +#define ATM_SC_TX 2048 /* enable TX single-copy */ + +#define ATM_BACKLOG_DEFAULT 32 /* if we get more, we're likely to time out + anyway */ + +/* MF: change_qos (Modify) flags */ + +#define ATM_MF_IMMED 1 /* Block until change is effective */ +#define ATM_MF_INC_RSV 2 /* Change reservation on increase */ +#define ATM_MF_INC_SHP 4 /* Change shaping on increase */ +#define ATM_MF_DEC_RSV 8 /* Change reservation on decrease */ +#define ATM_MF_DEC_SHP 16 /* Change shaping on decrease */ +#define ATM_MF_BWD 32 /* Set the backward direction parameters */ + +#define ATM_MF_SET (ATM_MF_INC_RSV | ATM_MF_INC_SHP | ATM_MF_DEC_RSV | \ + ATM_MF_DEC_SHP | ATM_MF_BWD) + +/* + * ATM_VS_* are used to express VC state in a human-friendly way. + */ + +#define ATM_VS_IDLE 0 /* VC is not used */ +#define ATM_VS_CONNECTED 1 /* VC is connected */ +#define ATM_VS_CLOSING 2 /* VC is closing */ +#define ATM_VS_LISTEN 3 /* VC is listening for incoming setups */ +#define ATM_VS_INUSE 4 /* VC is in use (registered with atmsigd) */ +#define ATM_VS_BOUND 5 /* VC is bound */ + +#define ATM_VS2TXT_MAP \ + "IDLE", "CONNECTED", "CLOSING", "LISTEN", "INUSE", "BOUND" + +#define ATM_VF2TXT_MAP \ + "ADDR", "READY", "PARTIAL", "REGIS", \ + "RELEASED", "HASQOS", "LISTEN", "META", \ + "256", "512", "1024", "2048", \ + "SESSION", "HASSAP", "BOUND", "CLOSE" + + +#ifdef __KERNEL__ + +#include <linux/wait.h> /* wait_queue_head_t */ +#include <linux/time.h> /* struct timeval */ +#include <linux/net.h> +#include <linux/bug.h> +#include <linux/skbuff.h> /* struct sk_buff */ +#include <linux/uio.h> +#include <net/sock.h> +#include <linux/atomic.h> + +#ifdef CONFIG_PROC_FS +#include <linux/proc_fs.h> + +extern struct proc_dir_entry *atm_proc_root; +#endif + +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +struct compat_atm_iobuf { + int length; + compat_uptr_t buffer; +}; +#endif + +struct k_atm_aal_stats { +#define __HANDLE_ITEM(i) atomic_t i + __AAL_STAT_ITEMS +#undef __HANDLE_ITEM +}; + + +struct k_atm_dev_stats { + struct k_atm_aal_stats aal0; + struct k_atm_aal_stats aal34; + struct k_atm_aal_stats aal5; +}; + +struct device; + +enum { + ATM_VF_ADDR, /* Address is in use. Set by anybody, cleared + by device driver. */ + ATM_VF_READY, /* VC is ready to transfer data. Set by device + driver, cleared by anybody. */ + ATM_VF_PARTIAL, /* resources are bound to PVC (partial PVC + setup), controlled by socket layer */ + ATM_VF_REGIS, /* registered with demon, controlled by SVC + socket layer */ + ATM_VF_BOUND, /* local SAP is set, controlled by SVC socket + layer */ + ATM_VF_RELEASED, /* demon has indicated/requested release, + controlled by SVC socket layer */ + ATM_VF_HASQOS, /* QOS parameters have been set */ + ATM_VF_LISTEN, /* socket is used for listening */ + ATM_VF_META, /* SVC socket isn't used for normal data + traffic and doesn't depend on signaling + to be available */ + ATM_VF_SESSION, /* VCC is p2mp session control descriptor */ + ATM_VF_HASSAP, /* SAP has been set */ + ATM_VF_CLOSE, /* asynchronous close - treat like VF_RELEASED*/ + ATM_VF_WAITING, /* waiting for reply from sigd */ + ATM_VF_IS_CLIP, /* in use by CLIP protocol */ +}; + + +#define ATM_VF2VS(flags) \ + (test_bit(ATM_VF_READY,&(flags)) ? ATM_VS_CONNECTED : \ + test_bit(ATM_VF_RELEASED,&(flags)) ? ATM_VS_CLOSING : \ + test_bit(ATM_VF_LISTEN,&(flags)) ? ATM_VS_LISTEN : \ + test_bit(ATM_VF_REGIS,&(flags)) ? ATM_VS_INUSE : \ + test_bit(ATM_VF_BOUND,&(flags)) ? ATM_VS_BOUND : ATM_VS_IDLE) + + +enum { + ATM_DF_REMOVED, /* device was removed from atm_devs list */ +}; + + +#define ATM_PHY_SIG_LOST 0 /* no carrier/light */ +#define ATM_PHY_SIG_UNKNOWN 1 /* carrier/light status is unknown */ +#define ATM_PHY_SIG_FOUND 2 /* carrier/light okay */ + +#define ATM_ATMOPT_CLP 1 /* set CLP bit */ + +struct atm_vcc { + /* struct sock has to be the first member of atm_vcc */ + struct sock sk; + unsigned long flags; /* VCC flags (ATM_VF_*) */ + short vpi; /* VPI and VCI (types must be equal */ + /* with sockaddr) */ + int vci; + unsigned long aal_options; /* AAL layer options */ + unsigned long atm_options; /* ATM layer options */ + struct atm_dev *dev; /* device back pointer */ + struct atm_qos qos; /* QOS */ + struct atm_sap sap; /* SAP */ + void (*push)(struct atm_vcc *vcc,struct sk_buff *skb); + void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */ + int (*push_oam)(struct atm_vcc *vcc,void *cell); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + void *dev_data; /* per-device data */ + void *proto_data; /* per-protocol data */ + struct k_atm_aal_stats *stats; /* pointer to AAL stats group */ + /* SVC part --- may move later ------------------------------------- */ + short itf; /* interface number */ + struct sockaddr_atmsvc local; + struct sockaddr_atmsvc remote; + /* Multipoint part ------------------------------------------------- */ + struct atm_vcc *session; /* session VCC descriptor */ + /* Other stuff ----------------------------------------------------- */ + void *user_back; /* user backlink - not touched by */ + /* native ATM stack. Currently used */ + /* by CLIP and sch_atm. */ +}; + +static inline struct atm_vcc *atm_sk(struct sock *sk) +{ + return (struct atm_vcc *)sk; +} + +static inline struct atm_vcc *ATM_SD(struct socket *sock) +{ + return atm_sk(sock->sk); +} + +static inline struct sock *sk_atm(struct atm_vcc *vcc) +{ + return (struct sock *)vcc; +} + +struct atm_dev_addr { + struct sockaddr_atmsvc addr; /* ATM address */ + struct list_head entry; /* next address */ +}; + +enum atm_addr_type_t { ATM_ADDR_LOCAL, ATM_ADDR_LECS }; + +struct atm_dev { + const struct atmdev_ops *ops; /* device operations; NULL if unused */ + const struct atmphy_ops *phy; /* PHY operations, may be undefined */ + /* (NULL) */ + const char *type; /* device type name */ + int number; /* device index */ + void *dev_data; /* per-device data */ + void *phy_data; /* private PHY date */ + unsigned long flags; /* device flags (ATM_DF_*) */ + struct list_head local; /* local ATM addresses */ + struct list_head lecs; /* LECS ATM addresses learned via ILMI */ + unsigned char esi[ESI_LEN]; /* ESI ("MAC" addr) */ + struct atm_cirange ci_range; /* VPI/VCI range */ + struct k_atm_dev_stats stats; /* statistics */ + char signal; /* signal status (ATM_PHY_SIG_*) */ + int link_rate; /* link rate (default: OC3) */ + atomic_t refcnt; /* reference count */ + spinlock_t lock; /* protect internal members */ +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; /* proc entry */ + char *proc_name; /* proc entry name */ +#endif + struct device class_dev; /* sysfs device */ + struct list_head dev_list; /* linkage */ +}; + + +/* OF: send_Oam Flags */ + +#define ATM_OF_IMMED 1 /* Attempt immediate delivery */ +#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ + + +/* + * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. + */ + +struct atmdev_ops { /* only send is required */ + void (*dev_close)(struct atm_dev *dev); + int (*open)(struct atm_vcc *vcc); + void (*close)(struct atm_vcc *vcc); + int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); +#ifdef CONFIG_COMPAT + int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, + void __user *arg); +#endif + int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,int optlen); + int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, + void __user *optval,unsigned int optlen); + int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); + void (*phy_put)(struct atm_dev *dev,unsigned char value, + unsigned long addr); + unsigned char (*phy_get)(struct atm_dev *dev,unsigned long addr); + int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags); + int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page); + struct module *owner; +}; + +struct atmphy_ops { + int (*start)(struct atm_dev *dev); + int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); + void (*interrupt)(struct atm_dev *dev); + int (*stop)(struct atm_dev *dev); +}; + +struct atm_skb_data { + struct atm_vcc *vcc; /* ATM VCC */ + unsigned long atm_options; /* ATM layer options */ +}; + +#define VCC_HTABLE_SIZE 32 + +extern struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; +extern rwlock_t vcc_sklist_lock; + +#define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb)) + +struct atm_dev *atm_dev_register(const char *type, struct device *parent, + const struct atmdev_ops *ops, + int number, /* -1 == pick first available */ + unsigned long *flags); +struct atm_dev *atm_dev_lookup(int number); +void atm_dev_deregister(struct atm_dev *dev); + +/* atm_dev_signal_change + * + * Propagate lower layer signal change in atm_dev->signal to netdevice. + * The event will be sent via a notifier call chain. + */ +void atm_dev_signal_change(struct atm_dev *dev, char signal); + +void vcc_insert_socket(struct sock *sk); + +void atm_dev_release_vccs(struct atm_dev *dev); + + +static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) +{ + atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc); +} + + +static inline void atm_return(struct atm_vcc *vcc,int truesize) +{ + atomic_sub(truesize, &sk_atm(vcc)->sk_rmem_alloc); +} + + +static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size) +{ + return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < + sk_atm(vcc)->sk_sndbuf; +} + + +static inline void atm_dev_hold(struct atm_dev *dev) +{ + atomic_inc(&dev->refcnt); +} + + +static inline void atm_dev_put(struct atm_dev *dev) +{ + if (atomic_dec_and_test(&dev->refcnt)) { + BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); + if (dev->ops->dev_close) + dev->ops->dev_close(dev); + put_device(&dev->class_dev); + } +} + + +int atm_charge(struct atm_vcc *vcc,int truesize); +struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, + gfp_t gfp_flags); +int atm_pcr_goal(const struct atm_trafprm *tp); + +void vcc_release_async(struct atm_vcc *vcc, int reply); + +struct atm_ioctl { + struct module *owner; + /* A module reference is kept if appropriate over this call. + * Return -ENOIOCTLCMD if you don't handle it. */ + int (*ioctl)(struct socket *, unsigned int cmd, unsigned long arg); + struct list_head list; +}; + +/** + * register_atm_ioctl - register handler for ioctl operations + * + * Special (non-device) handlers of ioctl's should + * register here. If you're a normal device, you should + * set .ioctl in your atmdev_ops instead. + */ +void register_atm_ioctl(struct atm_ioctl *); + +/** + * deregister_atm_ioctl - remove the ioctl handler + */ +void deregister_atm_ioctl(struct atm_ioctl *); + + +/* register_atmdevice_notifier - register atm_dev notify events + * + * Clients like br2684 will register notify events + * Currently we notify of signal found/lost + */ +int register_atmdevice_notifier(struct notifier_block *nb); +void unregister_atmdevice_notifier(struct notifier_block *nb); + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h new file mode 100644 index 00000000..4c7a4b21 --- /dev/null +++ b/include/linux/atmel-mci.h @@ -0,0 +1,39 @@ +#ifndef __LINUX_ATMEL_MCI_H +#define __LINUX_ATMEL_MCI_H + +#define ATMCI_MAX_NR_SLOTS 2 + +/** + * struct mci_slot_pdata - board-specific per-slot configuration + * @bus_width: Number of data lines wired up the slot + * @detect_pin: GPIO pin wired to the card detect switch + * @wp_pin: GPIO pin wired to the write protect sensor + * @detect_is_active_high: The state of the detect pin when it is active + * + * If a given slot is not present on the board, @bus_width should be + * set to 0. The other fields are ignored in this case. + * + * Any pins that aren't available should be set to a negative value. + * + * Note that support for multiple slots is experimental -- some cards + * might get upset if we don't get the clock management exactly right. + * But in most cases, it should work just fine. + */ +struct mci_slot_pdata { + unsigned int bus_width; + int detect_pin; + int wp_pin; + bool detect_is_active_high; +}; + +/** + * struct mci_platform_data - board-specific MMC/SDcard configuration + * @dma_slave: DMA slave interface to use in data transfers. + * @slot: Per-slot configuration data. + */ +struct mci_platform_data { + struct mci_dma_data *dma_slave; + struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS]; +}; + +#endif /* __LINUX_ATMEL_MCI_H */ diff --git a/include/linux/atmel-pwm-bl.h b/include/linux/atmel-pwm-bl.h new file mode 100644 index 00000000..0153a478 --- /dev/null +++ b/include/linux/atmel-pwm-bl.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2007 Atmel Corporation + * + * Driver for the AT32AP700X PS/2 controller (PSIF). + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __INCLUDE_ATMEL_PWM_BL_H +#define __INCLUDE_ATMEL_PWM_BL_H + +/** + * struct atmel_pwm_bl_platform_data + * @pwm_channel: which PWM channel in the PWM module to use. + * @pwm_frequency: PWM frequency to generate, the driver will try to be as + * close as the prescaler allows. + * @pwm_compare_max: value to use in the PWM channel compare register. + * @pwm_duty_max: maximum duty cycle value, must be less than or equal to + * pwm_compare_max. + * @pwm_duty_min: minimum duty cycle value, must be less than pwm_duty_max. + * @pwm_active_low: set to one if the low part of the PWM signal increases the + * brightness of the backlight. + * @gpio_on: GPIO line to control the backlight on/off, set to -1 if not used. + * @on_active_low: set to one if the on/off signal is on when GPIO is low. + * + * This struct must be added to the platform device in the board code. It is + * used by the atmel-pwm-bl driver to setup the GPIO to control on/off and the + * PWM device. + */ +struct atmel_pwm_bl_platform_data { + unsigned int pwm_channel; + unsigned int pwm_frequency; + unsigned int pwm_compare_max; + unsigned int pwm_duty_max; + unsigned int pwm_duty_min; + unsigned int pwm_active_low; + int gpio_on; + unsigned int on_active_low; +}; + +#endif /* __INCLUDE_ATMEL_PWM_BL_H */ diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h new file mode 100644 index 00000000..06023393 --- /dev/null +++ b/include/linux/atmel-ssc.h @@ -0,0 +1,312 @@ +#ifndef __INCLUDE_ATMEL_SSC_H +#define __INCLUDE_ATMEL_SSC_H + +#include <linux/platform_device.h> +#include <linux/list.h> + +struct ssc_device { + struct list_head list; + void __iomem *regs; + struct platform_device *pdev; + struct clk *clk; + int user; + int irq; +}; + +struct ssc_device * __must_check ssc_request(unsigned int ssc_num); +void ssc_free(struct ssc_device *ssc); + +/* SSC register offsets */ + +/* SSC Control Register */ +#define SSC_CR 0x00000000 +#define SSC_CR_RXDIS_SIZE 1 +#define SSC_CR_RXDIS_OFFSET 1 +#define SSC_CR_RXEN_SIZE 1 +#define SSC_CR_RXEN_OFFSET 0 +#define SSC_CR_SWRST_SIZE 1 +#define SSC_CR_SWRST_OFFSET 15 +#define SSC_CR_TXDIS_SIZE 1 +#define SSC_CR_TXDIS_OFFSET 9 +#define SSC_CR_TXEN_SIZE 1 +#define SSC_CR_TXEN_OFFSET 8 + +/* SSC Clock Mode Register */ +#define SSC_CMR 0x00000004 +#define SSC_CMR_DIV_SIZE 12 +#define SSC_CMR_DIV_OFFSET 0 + +/* SSC Receive Clock Mode Register */ +#define SSC_RCMR 0x00000010 +#define SSC_RCMR_CKG_SIZE 2 +#define SSC_RCMR_CKG_OFFSET 6 +#define SSC_RCMR_CKI_SIZE 1 +#define SSC_RCMR_CKI_OFFSET 5 +#define SSC_RCMR_CKO_SIZE 3 +#define SSC_RCMR_CKO_OFFSET 2 +#define SSC_RCMR_CKS_SIZE 2 +#define SSC_RCMR_CKS_OFFSET 0 +#define SSC_RCMR_PERIOD_SIZE 8 +#define SSC_RCMR_PERIOD_OFFSET 24 +#define SSC_RCMR_START_SIZE 4 +#define SSC_RCMR_START_OFFSET 8 +#define SSC_RCMR_STOP_SIZE 1 +#define SSC_RCMR_STOP_OFFSET 12 +#define SSC_RCMR_STTDLY_SIZE 8 +#define SSC_RCMR_STTDLY_OFFSET 16 + +/* SSC Receive Frame Mode Register */ +#define SSC_RFMR 0x00000014 +#define SSC_RFMR_DATLEN_SIZE 5 +#define SSC_RFMR_DATLEN_OFFSET 0 +#define SSC_RFMR_DATNB_SIZE 4 +#define SSC_RFMR_DATNB_OFFSET 8 +#define SSC_RFMR_FSEDGE_SIZE 1 +#define SSC_RFMR_FSEDGE_OFFSET 24 +#define SSC_RFMR_FSLEN_SIZE 4 +#define SSC_RFMR_FSLEN_OFFSET 16 +#define SSC_RFMR_FSOS_SIZE 4 +#define SSC_RFMR_FSOS_OFFSET 20 +#define SSC_RFMR_LOOP_SIZE 1 +#define SSC_RFMR_LOOP_OFFSET 5 +#define SSC_RFMR_MSBF_SIZE 1 +#define SSC_RFMR_MSBF_OFFSET 7 + +/* SSC Transmit Clock Mode Register */ +#define SSC_TCMR 0x00000018 +#define SSC_TCMR_CKG_SIZE 2 +#define SSC_TCMR_CKG_OFFSET 6 +#define SSC_TCMR_CKI_SIZE 1 +#define SSC_TCMR_CKI_OFFSET 5 +#define SSC_TCMR_CKO_SIZE 3 +#define SSC_TCMR_CKO_OFFSET 2 +#define SSC_TCMR_CKS_SIZE 2 +#define SSC_TCMR_CKS_OFFSET 0 +#define SSC_TCMR_PERIOD_SIZE 8 +#define SSC_TCMR_PERIOD_OFFSET 24 +#define SSC_TCMR_START_SIZE 4 +#define SSC_TCMR_START_OFFSET 8 +#define SSC_TCMR_STTDLY_SIZE 8 +#define SSC_TCMR_STTDLY_OFFSET 16 + +/* SSC Transmit Frame Mode Register */ +#define SSC_TFMR 0x0000001c +#define SSC_TFMR_DATDEF_SIZE 1 +#define SSC_TFMR_DATDEF_OFFSET 5 +#define SSC_TFMR_DATLEN_SIZE 5 +#define SSC_TFMR_DATLEN_OFFSET 0 +#define SSC_TFMR_DATNB_SIZE 4 +#define SSC_TFMR_DATNB_OFFSET 8 +#define SSC_TFMR_FSDEN_SIZE 1 +#define SSC_TFMR_FSDEN_OFFSET 23 +#define SSC_TFMR_FSEDGE_SIZE 1 +#define SSC_TFMR_FSEDGE_OFFSET 24 +#define SSC_TFMR_FSLEN_SIZE 4 +#define SSC_TFMR_FSLEN_OFFSET 16 +#define SSC_TFMR_FSOS_SIZE 3 +#define SSC_TFMR_FSOS_OFFSET 20 +#define SSC_TFMR_MSBF_SIZE 1 +#define SSC_TFMR_MSBF_OFFSET 7 + +/* SSC Receive Hold Register */ +#define SSC_RHR 0x00000020 +#define SSC_RHR_RDAT_SIZE 32 +#define SSC_RHR_RDAT_OFFSET 0 + +/* SSC Transmit Hold Register */ +#define SSC_THR 0x00000024 +#define SSC_THR_TDAT_SIZE 32 +#define SSC_THR_TDAT_OFFSET 0 + +/* SSC Receive Sync. Holding Register */ +#define SSC_RSHR 0x00000030 +#define SSC_RSHR_RSDAT_SIZE 16 +#define SSC_RSHR_RSDAT_OFFSET 0 + +/* SSC Transmit Sync. Holding Register */ +#define SSC_TSHR 0x00000034 +#define SSC_TSHR_TSDAT_SIZE 16 +#define SSC_TSHR_RSDAT_OFFSET 0 + +/* SSC Receive Compare 0 Register */ +#define SSC_RC0R 0x00000038 +#define SSC_RC0R_CP0_SIZE 16 +#define SSC_RC0R_CP0_OFFSET 0 + +/* SSC Receive Compare 1 Register */ +#define SSC_RC1R 0x0000003c +#define SSC_RC1R_CP1_SIZE 16 +#define SSC_RC1R_CP1_OFFSET 0 + +/* SSC Status Register */ +#define SSC_SR 0x00000040 +#define SSC_SR_CP0_SIZE 1 +#define SSC_SR_CP0_OFFSET 8 +#define SSC_SR_CP1_SIZE 1 +#define SSC_SR_CP1_OFFSET 9 +#define SSC_SR_ENDRX_SIZE 1 +#define SSC_SR_ENDRX_OFFSET 6 +#define SSC_SR_ENDTX_SIZE 1 +#define SSC_SR_ENDTX_OFFSET 2 +#define SSC_SR_OVRUN_SIZE 1 +#define SSC_SR_OVRUN_OFFSET 5 +#define SSC_SR_RXBUFF_SIZE 1 +#define SSC_SR_RXBUFF_OFFSET 7 +#define SSC_SR_RXEN_SIZE 1 +#define SSC_SR_RXEN_OFFSET 17 +#define SSC_SR_RXRDY_SIZE 1 +#define SSC_SR_RXRDY_OFFSET 4 +#define SSC_SR_RXSYN_SIZE 1 +#define SSC_SR_RXSYN_OFFSET 11 +#define SSC_SR_TXBUFE_SIZE 1 +#define SSC_SR_TXBUFE_OFFSET 3 +#define SSC_SR_TXEMPTY_SIZE 1 +#define SSC_SR_TXEMPTY_OFFSET 1 +#define SSC_SR_TXEN_SIZE 1 +#define SSC_SR_TXEN_OFFSET 16 +#define SSC_SR_TXRDY_SIZE 1 +#define SSC_SR_TXRDY_OFFSET 0 +#define SSC_SR_TXSYN_SIZE 1 +#define SSC_SR_TXSYN_OFFSET 10 + +/* SSC Interrupt Enable Register */ +#define SSC_IER 0x00000044 +#define SSC_IER_CP0_SIZE 1 +#define SSC_IER_CP0_OFFSET 8 +#define SSC_IER_CP1_SIZE 1 +#define SSC_IER_CP1_OFFSET 9 +#define SSC_IER_ENDRX_SIZE 1 +#define SSC_IER_ENDRX_OFFSET 6 +#define SSC_IER_ENDTX_SIZE 1 +#define SSC_IER_ENDTX_OFFSET 2 +#define SSC_IER_OVRUN_SIZE 1 +#define SSC_IER_OVRUN_OFFSET 5 +#define SSC_IER_RXBUFF_SIZE 1 +#define SSC_IER_RXBUFF_OFFSET 7 +#define SSC_IER_RXRDY_SIZE 1 +#define SSC_IER_RXRDY_OFFSET 4 +#define SSC_IER_RXSYN_SIZE 1 +#define SSC_IER_RXSYN_OFFSET 11 +#define SSC_IER_TXBUFE_SIZE 1 +#define SSC_IER_TXBUFE_OFFSET 3 +#define SSC_IER_TXEMPTY_SIZE 1 +#define SSC_IER_TXEMPTY_OFFSET 1 +#define SSC_IER_TXRDY_SIZE 1 +#define SSC_IER_TXRDY_OFFSET 0 +#define SSC_IER_TXSYN_SIZE 1 +#define SSC_IER_TXSYN_OFFSET 10 + +/* SSC Interrupt Disable Register */ +#define SSC_IDR 0x00000048 +#define SSC_IDR_CP0_SIZE 1 +#define SSC_IDR_CP0_OFFSET 8 +#define SSC_IDR_CP1_SIZE 1 +#define SSC_IDR_CP1_OFFSET 9 +#define SSC_IDR_ENDRX_SIZE 1 +#define SSC_IDR_ENDRX_OFFSET 6 +#define SSC_IDR_ENDTX_SIZE 1 +#define SSC_IDR_ENDTX_OFFSET 2 +#define SSC_IDR_OVRUN_SIZE 1 +#define SSC_IDR_OVRUN_OFFSET 5 +#define SSC_IDR_RXBUFF_SIZE 1 +#define SSC_IDR_RXBUFF_OFFSET 7 +#define SSC_IDR_RXRDY_SIZE 1 +#define SSC_IDR_RXRDY_OFFSET 4 +#define SSC_IDR_RXSYN_SIZE 1 +#define SSC_IDR_RXSYN_OFFSET 11 +#define SSC_IDR_TXBUFE_SIZE 1 +#define SSC_IDR_TXBUFE_OFFSET 3 +#define SSC_IDR_TXEMPTY_SIZE 1 +#define SSC_IDR_TXEMPTY_OFFSET 1 +#define SSC_IDR_TXRDY_SIZE 1 +#define SSC_IDR_TXRDY_OFFSET 0 +#define SSC_IDR_TXSYN_SIZE 1 +#define SSC_IDR_TXSYN_OFFSET 10 + +/* SSC Interrupt Mask Register */ +#define SSC_IMR 0x0000004c +#define SSC_IMR_CP0_SIZE 1 +#define SSC_IMR_CP0_OFFSET 8 +#define SSC_IMR_CP1_SIZE 1 +#define SSC_IMR_CP1_OFFSET 9 +#define SSC_IMR_ENDRX_SIZE 1 +#define SSC_IMR_ENDRX_OFFSET 6 +#define SSC_IMR_ENDTX_SIZE 1 +#define SSC_IMR_ENDTX_OFFSET 2 +#define SSC_IMR_OVRUN_SIZE 1 +#define SSC_IMR_OVRUN_OFFSET 5 +#define SSC_IMR_RXBUFF_SIZE 1 +#define SSC_IMR_RXBUFF_OFFSET 7 +#define SSC_IMR_RXRDY_SIZE 1 +#define SSC_IMR_RXRDY_OFFSET 4 +#define SSC_IMR_RXSYN_SIZE 1 +#define SSC_IMR_RXSYN_OFFSET 11 +#define SSC_IMR_TXBUFE_SIZE 1 +#define SSC_IMR_TXBUFE_OFFSET 3 +#define SSC_IMR_TXEMPTY_SIZE 1 +#define SSC_IMR_TXEMPTY_OFFSET 1 +#define SSC_IMR_TXRDY_SIZE 1 +#define SSC_IMR_TXRDY_OFFSET 0 +#define SSC_IMR_TXSYN_SIZE 1 +#define SSC_IMR_TXSYN_OFFSET 10 + +/* SSC PDC Receive Pointer Register */ +#define SSC_PDC_RPR 0x00000100 + +/* SSC PDC Receive Counter Register */ +#define SSC_PDC_RCR 0x00000104 + +/* SSC PDC Transmit Pointer Register */ +#define SSC_PDC_TPR 0x00000108 + +/* SSC PDC Receive Next Pointer Register */ +#define SSC_PDC_RNPR 0x00000110 + +/* SSC PDC Receive Next Counter Register */ +#define SSC_PDC_RNCR 0x00000114 + +/* SSC PDC Transmit Counter Register */ +#define SSC_PDC_TCR 0x0000010c + +/* SSC PDC Transmit Next Pointer Register */ +#define SSC_PDC_TNPR 0x00000118 + +/* SSC PDC Transmit Next Counter Register */ +#define SSC_PDC_TNCR 0x0000011c + +/* SSC PDC Transfer Control Register */ +#define SSC_PDC_PTCR 0x00000120 +#define SSC_PDC_PTCR_RXTDIS_SIZE 1 +#define SSC_PDC_PTCR_RXTDIS_OFFSET 1 +#define SSC_PDC_PTCR_RXTEN_SIZE 1 +#define SSC_PDC_PTCR_RXTEN_OFFSET 0 +#define SSC_PDC_PTCR_TXTDIS_SIZE 1 +#define SSC_PDC_PTCR_TXTDIS_OFFSET 9 +#define SSC_PDC_PTCR_TXTEN_SIZE 1 +#define SSC_PDC_PTCR_TXTEN_OFFSET 8 + +/* SSC PDC Transfer Status Register */ +#define SSC_PDC_PTSR 0x00000124 +#define SSC_PDC_PTSR_RXTEN_SIZE 1 +#define SSC_PDC_PTSR_RXTEN_OFFSET 0 +#define SSC_PDC_PTSR_TXTEN_SIZE 1 +#define SSC_PDC_PTSR_TXTEN_OFFSET 8 + +/* Bit manipulation macros */ +#define SSC_BIT(name) \ + (1 << SSC_##name##_OFFSET) +#define SSC_BF(name, value) \ + (((value) & ((1 << SSC_##name##_SIZE) - 1)) \ + << SSC_##name##_OFFSET) +#define SSC_BFEXT(name, value) \ + (((value) >> SSC_##name##_OFFSET) \ + & ((1 << SSC_##name##_SIZE) - 1)) +#define SSC_BFINS(name, value, old) \ + (((old) & ~(((1 << SSC_##name##_SIZE) - 1) \ + << SSC_##name##_OFFSET)) | SSC_BF(name, value)) + +/* Register access macros */ +#define ssc_readl(base, reg) __raw_readl(base + SSC_##reg) +#define ssc_writel(base, reg, value) __raw_writel((value), base + SSC_##reg) + +#endif /* __INCLUDE_ATMEL_SSC_H */ diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h new file mode 100644 index 00000000..63499ce8 --- /dev/null +++ b/include/linux/atmel_pdc.h @@ -0,0 +1,38 @@ +/* + * include/linux/atmel_pdc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Peripheral Data Controller (PDC) registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_PDC_H +#define ATMEL_PDC_H + +#define ATMEL_PDC_RPR 0x100 /* Receive Pointer Register */ +#define ATMEL_PDC_RCR 0x104 /* Receive Counter Register */ +#define ATMEL_PDC_TPR 0x108 /* Transmit Pointer Register */ +#define ATMEL_PDC_TCR 0x10c /* Transmit Counter Register */ +#define ATMEL_PDC_RNPR 0x110 /* Receive Next Pointer Register */ +#define ATMEL_PDC_RNCR 0x114 /* Receive Next Counter Register */ +#define ATMEL_PDC_TNPR 0x118 /* Transmit Next Pointer Register */ +#define ATMEL_PDC_TNCR 0x11c /* Transmit Next Counter Register */ + +#define ATMEL_PDC_PTCR 0x120 /* Transfer Control Register */ +#define ATMEL_PDC_RXTEN (1 << 0) /* Receiver Transfer Enable */ +#define ATMEL_PDC_RXTDIS (1 << 1) /* Receiver Transfer Disable */ +#define ATMEL_PDC_TXTEN (1 << 8) /* Transmitter Transfer Enable */ +#define ATMEL_PDC_TXTDIS (1 << 9) /* Transmitter Transfer Disable */ + +#define ATMEL_PDC_PTSR 0x124 /* Transfer Status Register */ + +#define ATMEL_PDC_SCND_BUF_OFF 0x10 /* Offset between first and second buffer registers */ + +#endif diff --git a/include/linux/atmel_pwm.h b/include/linux/atmel_pwm.h new file mode 100644 index 00000000..ea04abb3 --- /dev/null +++ b/include/linux/atmel_pwm.h @@ -0,0 +1,70 @@ +#ifndef __LINUX_ATMEL_PWM_H +#define __LINUX_ATMEL_PWM_H + +/** + * struct pwm_channel - driver handle to a PWM channel + * @regs: base of this channel's registers + * @index: number of this channel (0..31) + * @mck: base clock rate, which can be prescaled and maybe subdivided + * + * Drivers initialize a pwm_channel structure using pwm_channel_alloc(). + * Then they configure its clock rate (derived from MCK), alignment, + * polarity, and duty cycle by writing directly to the channel registers, + * before enabling the channel by calling pwm_channel_enable(). + * + * After emitting a PWM signal for the desired length of time, drivers + * may then pwm_channel_disable() or pwm_channel_free(). Both of these + * disable the channel, but when it's freed the IRQ is deconfigured and + * the channel must later be re-allocated and reconfigured. + * + * Note that if the period or duty cycle need to be changed while the + * PWM channel is operating, drivers must use the PWM_CUPD double buffer + * mechanism, either polling until they change or getting implicitly + * notified through a once-per-period interrupt handler. + */ +struct pwm_channel { + void __iomem *regs; + unsigned index; + unsigned long mck; +}; + +extern int pwm_channel_alloc(int index, struct pwm_channel *ch); +extern int pwm_channel_free(struct pwm_channel *ch); + +extern int pwm_clk_alloc(unsigned prescale, unsigned div); +extern void pwm_clk_free(unsigned clk); + +extern int __pwm_channel_onoff(struct pwm_channel *ch, int enabled); + +#define pwm_channel_enable(ch) __pwm_channel_onoff((ch), 1) +#define pwm_channel_disable(ch) __pwm_channel_onoff((ch), 0) + +/* periodic interrupts, mostly for CUPD changes to period or cycle */ +extern int pwm_channel_handler(struct pwm_channel *ch, + void (*handler)(struct pwm_channel *ch)); + +/* per-channel registers (banked at pwm_channel->regs) */ +#define PWM_CMR 0x00 /* mode register */ +#define PWM_CPR_CPD (1 << 10) /* set: CUPD modifies period */ +#define PWM_CPR_CPOL (1 << 9) /* set: idle high */ +#define PWM_CPR_CALG (1 << 8) /* set: center align */ +#define PWM_CPR_CPRE (0xf << 0) /* mask: rate is mck/(2^pre) */ +#define PWM_CPR_CLKA (0xb << 0) /* rate CLKA */ +#define PWM_CPR_CLKB (0xc << 0) /* rate CLKB */ +#define PWM_CDTY 0x04 /* duty cycle (max of CPRD) */ +#define PWM_CPRD 0x08 /* period (count up from zero) */ +#define PWM_CCNT 0x0c /* counter (20 bits?) */ +#define PWM_CUPD 0x10 /* update CPRD (or CDTY) next period */ + +static inline void +pwm_channel_writel(struct pwm_channel *pwmc, unsigned offset, u32 val) +{ + __raw_writel(val, pwmc->regs + offset); +} + +static inline u32 pwm_channel_readl(struct pwm_channel *pwmc, unsigned offset) +{ + return __raw_readl(pwmc->regs + offset); +} + +#endif /* __LINUX_ATMEL_PWM_H */ diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h new file mode 100644 index 00000000..fd683376 --- /dev/null +++ b/include/linux/atmel_serial.h @@ -0,0 +1,127 @@ +/* + * include/linux/atmel_serial.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * USART registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_SERIAL_H +#define ATMEL_SERIAL_H + +#define ATMEL_US_CR 0x00 /* Control Register */ +#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */ +#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */ +#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */ +#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */ +#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */ +#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */ +#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */ +#define ATMEL_US_STTBRK (1 << 9) /* Start Break */ +#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */ +#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */ +#define ATMEL_US_SENDA (1 << 12) /* Send Address */ +#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */ +#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */ +#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */ +#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */ +#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */ +#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */ +#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */ + +#define ATMEL_US_MR 0x04 /* Mode Register */ +#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */ +#define ATMEL_US_USMODE_NORMAL 0 +#define ATMEL_US_USMODE_RS485 1 +#define ATMEL_US_USMODE_HWHS 2 +#define ATMEL_US_USMODE_MODEM 3 +#define ATMEL_US_USMODE_ISO7816_T0 4 +#define ATMEL_US_USMODE_ISO7816_T1 6 +#define ATMEL_US_USMODE_IRDA 8 +#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */ +#define ATMEL_US_USCLKS_MCK (0 << 4) +#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4) +#define ATMEL_US_USCLKS_SCK (3 << 4) +#define ATMEL_US_CHRL (3 << 6) /* Character Length */ +#define ATMEL_US_CHRL_5 (0 << 6) +#define ATMEL_US_CHRL_6 (1 << 6) +#define ATMEL_US_CHRL_7 (2 << 6) +#define ATMEL_US_CHRL_8 (3 << 6) +#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */ +#define ATMEL_US_PAR (7 << 9) /* Parity Type */ +#define ATMEL_US_PAR_EVEN (0 << 9) +#define ATMEL_US_PAR_ODD (1 << 9) +#define ATMEL_US_PAR_SPACE (2 << 9) +#define ATMEL_US_PAR_MARK (3 << 9) +#define ATMEL_US_PAR_NONE (4 << 9) +#define ATMEL_US_PAR_MULTI_DROP (6 << 9) +#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */ +#define ATMEL_US_NBSTOP_1 (0 << 12) +#define ATMEL_US_NBSTOP_1_5 (1 << 12) +#define ATMEL_US_NBSTOP_2 (2 << 12) +#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */ +#define ATMEL_US_CHMODE_NORMAL (0 << 14) +#define ATMEL_US_CHMODE_ECHO (1 << 14) +#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14) +#define ATMEL_US_CHMODE_REM_LOOP (3 << 14) +#define ATMEL_US_MSBF (1 << 16) /* Bit Order */ +#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */ +#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */ +#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */ +#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */ +#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */ +#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */ +#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */ + +#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */ +#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */ +#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */ +#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */ +#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */ +#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */ +#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */ +#define ATMEL_US_FRAME (1 << 6) /* Framing Error */ +#define ATMEL_US_PARE (1 << 7) /* Parity Error */ +#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */ +#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */ +#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */ +#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */ +#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */ +#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */ +#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */ +#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */ +#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */ +#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */ +#define ATMEL_US_RI (1 << 20) /* RI */ +#define ATMEL_US_DSR (1 << 21) /* DSR */ +#define ATMEL_US_DCD (1 << 22) /* DCD */ +#define ATMEL_US_CTS (1 << 23) /* CTS */ + +#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */ +#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */ +#define ATMEL_US_CSR 0x14 /* Channel Status Register */ +#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */ +#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */ +#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */ + +#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ +#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */ + +#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */ +#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */ + +#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ +#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */ + +#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */ +#define ATMEL_US_NER 0x44 /* Number of Errors Register */ +#define ATMEL_US_IF 0x4c /* IrDA Filter Register */ + +#endif diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h new file mode 100644 index 00000000..1d14b1dc --- /dev/null +++ b/include/linux/atmel_tc.h @@ -0,0 +1,262 @@ +/* + * Timer/Counter Unit (TC) registers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef ATMEL_TC_H +#define ATMEL_TC_H + +#include <linux/compiler.h> +#include <linux/list.h> + +/* + * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds + * three general-purpose 16-bit timers. These timers share one register bank. + * Depending on the SOC, each timer may have its own clock and IRQ, or those + * may be shared by the whole TC block. + * + * These TC blocks may have up to nine external pins: TCLK0..2 signals for + * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM + * or triggering. Those pins need to be set up for use with the TC block, + * else they will be used as GPIOs or for a different controller. + * + * Although we expect each TC block to have a platform_device node, those + * nodes are not what drivers bind to. Instead, they ask for a specific + * TC block, by number ... which is a common approach on systems with many + * timers. Then they use clk_get() and platform_get_irq() to get clock and + * IRQ resources. + */ + +struct clk; + +/** + * struct atmel_tcb_config - SoC data for a Timer/Counter Block + * @counter_width: size in bits of a timer counter register + */ +struct atmel_tcb_config { + size_t counter_width; +}; + +/** + * struct atmel_tc - information about a Timer/Counter Block + * @pdev: physical device + * @iomem: resource associated with the I/O register + * @regs: mapping through which the I/O registers can be accessed + * @tcb_config: configuration data from SoC + * @irq: irq for each of the three channels + * @clk: internal clock source for each of the three channels + * @node: list node, for tclib internal use + * + * On some platforms, each TC channel has its own clocks and IRQs, + * while on others, all TC channels share the same clock and IRQ. + * Drivers should clk_enable() all the clocks they need even though + * all the entries in @clk may point to the same physical clock. + * Likewise, drivers should request irqs independently for each + * channel, but they must use IRQF_SHARED in case some of the entries + * in @irq are actually the same IRQ. + */ +struct atmel_tc { + struct platform_device *pdev; + struct resource *iomem; + void __iomem *regs; + struct atmel_tcb_config *tcb_config; + int irq[3]; + struct clk *clk[3]; + struct list_head node; +}; + +extern struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name); +extern void atmel_tc_free(struct atmel_tc *tc); + +/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ +extern const u8 atmel_tc_divisors[5]; + + +/* + * Two registers have block-wide controls. These are: configuring the three + * "external" clocks (or event sources) used by the timer channels; and + * synchronizing the timers by resetting them all at once. + * + * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2 + * signals. Or, it can mean "external to timer", using the TIOA output from + * one of the other two timers that's being run in waveform mode. + */ + +#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */ +#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */ + +#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */ +#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */ +#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0) +#define ATMEL_TC_TC0XC0S_NONE (1 << 0) +#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0) +#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0) +#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */ +#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2) +#define ATMEL_TC_TC1XC1S_NONE (1 << 2) +#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2) +#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2) +#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */ +#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4) +#define ATMEL_TC_TC2XC2S_NONE (1 << 4) +#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4) +#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4) + + +/* + * Each TC block has three "channels", each with one counter and controls. + * + * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection + * when it's not "external") is silicon-specific. AT91 platforms use one + * set of definitions; AVR32 platforms use a different set. Don't hard-wire + * such knowledge into your code, use the global "atmel_tc_divisors" ... + * where index N is the divisor for clock N+1, else zero to indicate it uses + * the 32 KiHz clock. + * + * The timers can be chained in various ways, and operated in "waveform" + * generation mode (including PWM) or "capture" mode (to time events). In + * both modes, behavior can be configured in many ways. + * + * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a + * PWM output, and TIOB as either another PWM or as a trigger. Capture mode + * uses them only as inputs. + */ +#define ATMEL_TC_CHAN(idx) ((idx)*0x40) +#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg) + +#define ATMEL_TC_CCR 0x00 /* Channel Control Register */ +#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */ +#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */ +#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */ + +#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */ + +/* Both modes share some CMR bits */ +#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */ +#define ATMEL_TC_TIMER_CLOCK1 (0 << 0) +#define ATMEL_TC_TIMER_CLOCK2 (1 << 0) +#define ATMEL_TC_TIMER_CLOCK3 (2 << 0) +#define ATMEL_TC_TIMER_CLOCK4 (3 << 0) +#define ATMEL_TC_TIMER_CLOCK5 (4 << 0) +#define ATMEL_TC_XC0 (5 << 0) +#define ATMEL_TC_XC1 (6 << 0) +#define ATMEL_TC_XC2 (7 << 0) +#define ATMEL_TC_CLKI (1 << 3) /* clock invert */ +#define ATMEL_TC_BURST (3 << 4) /* clock gating */ +#define ATMEL_TC_GATE_NONE (0 << 4) +#define ATMEL_TC_GATE_XC0 (1 << 4) +#define ATMEL_TC_GATE_XC1 (2 << 4) +#define ATMEL_TC_GATE_XC2 (3 << 4) +#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */ + +/* CAPTURE mode CMR bits */ +#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */ +#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */ +#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */ +#define ATMEL_TC_ETRGEDG_NONE (0 << 8) +#define ATMEL_TC_ETRGEDG_RISING (1 << 8) +#define ATMEL_TC_ETRGEDG_FALLING (2 << 8) +#define ATMEL_TC_ETRGEDG_BOTH (3 << 8) +#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */ +#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */ +#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */ +#define ATMEL_TC_LDRA_NONE (0 << 16) +#define ATMEL_TC_LDRA_RISING (1 << 16) +#define ATMEL_TC_LDRA_FALLING (2 << 16) +#define ATMEL_TC_LDRA_BOTH (3 << 16) +#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */ +#define ATMEL_TC_LDRB_NONE (0 << 18) +#define ATMEL_TC_LDRB_RISING (1 << 18) +#define ATMEL_TC_LDRB_FALLING (2 << 18) +#define ATMEL_TC_LDRB_BOTH (3 << 18) + +/* WAVEFORM mode CMR bits */ +#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */ +#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */ +#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */ +#define ATMEL_TC_EEVTEDG_NONE (0 << 8) +#define ATMEL_TC_EEVTEDG_RISING (1 << 8) +#define ATMEL_TC_EEVTEDG_FALLING (2 << 8) +#define ATMEL_TC_EEVTEDG_BOTH (3 << 8) +#define ATMEL_TC_EEVT (3 << 10) /* external event source */ +#define ATMEL_TC_EEVT_TIOB (0 << 10) +#define ATMEL_TC_EEVT_XC0 (1 << 10) +#define ATMEL_TC_EEVT_XC1 (2 << 10) +#define ATMEL_TC_EEVT_XC2 (3 << 10) +#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */ +#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */ +#define ATMEL_TC_WAVESEL_UP (0 << 13) +#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13) +#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13) +#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13) +#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */ +#define ATMEL_TC_ACPA_NONE (0 << 16) +#define ATMEL_TC_ACPA_SET (1 << 16) +#define ATMEL_TC_ACPA_CLEAR (2 << 16) +#define ATMEL_TC_ACPA_TOGGLE (3 << 16) +#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */ +#define ATMEL_TC_ACPC_NONE (0 << 18) +#define ATMEL_TC_ACPC_SET (1 << 18) +#define ATMEL_TC_ACPC_CLEAR (2 << 18) +#define ATMEL_TC_ACPC_TOGGLE (3 << 18) +#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */ +#define ATMEL_TC_AEEVT_NONE (0 << 20) +#define ATMEL_TC_AEEVT_SET (1 << 20) +#define ATMEL_TC_AEEVT_CLEAR (2 << 20) +#define ATMEL_TC_AEEVT_TOGGLE (3 << 20) +#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */ +#define ATMEL_TC_ASWTRG_NONE (0 << 22) +#define ATMEL_TC_ASWTRG_SET (1 << 22) +#define ATMEL_TC_ASWTRG_CLEAR (2 << 22) +#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22) +#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */ +#define ATMEL_TC_BCPB_NONE (0 << 24) +#define ATMEL_TC_BCPB_SET (1 << 24) +#define ATMEL_TC_BCPB_CLEAR (2 << 24) +#define ATMEL_TC_BCPB_TOGGLE (3 << 24) +#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */ +#define ATMEL_TC_BCPC_NONE (0 << 26) +#define ATMEL_TC_BCPC_SET (1 << 26) +#define ATMEL_TC_BCPC_CLEAR (2 << 26) +#define ATMEL_TC_BCPC_TOGGLE (3 << 26) +#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */ +#define ATMEL_TC_BEEVT_NONE (0 << 28) +#define ATMEL_TC_BEEVT_SET (1 << 28) +#define ATMEL_TC_BEEVT_CLEAR (2 << 28) +#define ATMEL_TC_BEEVT_TOGGLE (3 << 28) +#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */ +#define ATMEL_TC_BSWTRG_NONE (0 << 30) +#define ATMEL_TC_BSWTRG_SET (1 << 30) +#define ATMEL_TC_BSWTRG_CLEAR (2 << 30) +#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30) + +#define ATMEL_TC_CV 0x10 /* counter Value */ +#define ATMEL_TC_RA 0x14 /* register A */ +#define ATMEL_TC_RB 0x18 /* register B */ +#define ATMEL_TC_RC 0x1c /* register C */ + +#define ATMEL_TC_SR 0x20 /* status (read-only) */ +/* Status-only flags */ +#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */ +#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */ +#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */ + +#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */ +#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */ +#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */ + +/* Status and IRQ flags */ +#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */ +#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */ +#define ATMEL_TC_CPAS (1 << 2) /* RA compare */ +#define ATMEL_TC_CPBS (1 << 3) /* RB compare */ +#define ATMEL_TC_CPCS (1 << 4) /* RC compare */ +#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ +#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ +#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ + +#endif diff --git a/include/linux/atmioc.h b/include/linux/atmioc.h new file mode 100644 index 00000000..37f67aa8 --- /dev/null +++ b/include/linux/atmioc.h @@ -0,0 +1,41 @@ +/* atmioc.h - ranges for ATM-related ioctl numbers */ + +/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ + + +/* + * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of + * "magic" ioctl numbers. + */ + + +#ifndef _LINUX_ATMIOC_H +#define _LINUX_ATMIOC_H + +#include <asm/ioctl.h> + /* everybody including atmioc.h will also need _IO{,R,W,WR} */ + +#define ATMIOC_PHYCOM 0x00 /* PHY device common ioctls, globally unique */ +#define ATMIOC_PHYCOM_END 0x0f +#define ATMIOC_PHYTYP 0x10 /* PHY dev type ioctls, unique per PHY type */ +#define ATMIOC_PHYTYP_END 0x2f +#define ATMIOC_PHYPRV 0x30 /* PHY dev private ioctls, unique per driver */ +#define ATMIOC_PHYPRV_END 0x4f +#define ATMIOC_SARCOM 0x50 /* SAR device common ioctls, globally unique */ +#define ATMIOC_SARCOM_END 0x50 +#define ATMIOC_SARPRV 0x60 /* SAR dev private ioctls, unique per driver */ +#define ATMIOC_SARPRV_END 0x7f +#define ATMIOC_ITF 0x80 /* Interface ioctls, globally unique */ +#define ATMIOC_ITF_END 0x8f +#define ATMIOC_BACKEND 0x90 /* ATM generic backend ioctls, u. per backend */ +#define ATMIOC_BACKEND_END 0xaf +/* 0xb0-0xbf: Reserved for future use */ +#define ATMIOC_AREQUIPA 0xc0 /* Application requested IP over ATM, glob. u. */ +#define ATMIOC_LANE 0xd0 /* LAN Emulation, globally unique */ +#define ATMIOC_MPOA 0xd8 /* MPOA, globally unique */ +#define ATMIOC_CLIP 0xe0 /* Classical IP over ATM control, globally u. */ +#define ATMIOC_CLIP_END 0xef +#define ATMIOC_SPECIAL 0xf0 /* Special-purpose controls, globally unique */ +#define ATMIOC_SPECIAL_END 0xff + +#endif diff --git a/include/linux/atmlec.h b/include/linux/atmlec.h new file mode 100644 index 00000000..39c917fd --- /dev/null +++ b/include/linux/atmlec.h @@ -0,0 +1,98 @@ +/* + * ATM Lan Emulation Daemon driver interface + * + * Marko Kiiskila <mkiiskila@yahoo.com> + */ + +#ifndef _ATMLEC_H_ +#define _ATMLEC_H_ + +#include <linux/atmapi.h> +#include <linux/atmioc.h> +#include <linux/atm.h> +#include <linux/if_ether.h> +#include <linux/types.h> + +/* ATM lec daemon control socket */ +#define ATMLEC_CTRL _IO('a', ATMIOC_LANE) +#define ATMLEC_DATA _IO('a', ATMIOC_LANE+1) +#define ATMLEC_MCAST _IO('a', ATMIOC_LANE+2) + +/* Maximum number of LEC interfaces (tweakable) */ +#define MAX_LEC_ITF 48 + +/* + * From the total of MAX_LEC_ITF, last NUM_TR_DEVS are reserved for Token Ring. + * E.g. if MAX_LEC_ITF = 48 and NUM_TR_DEVS = 8, then lec0-lec39 are for + * Ethernet ELANs and lec40-lec47 are for Token Ring ELANS. + */ +#define NUM_TR_DEVS 8 + +typedef enum { + l_set_mac_addr, + l_del_mac_addr, + l_svc_setup, + l_addr_delete, + l_topology_change, + l_flush_complete, + l_arp_update, + l_narp_req, /* LANE2 mandates the use of this */ + l_config, + l_flush_tran_id, + l_set_lecid, + l_arp_xmt, + l_rdesc_arp_xmt, + l_associate_req, + l_should_bridge /* should we bridge this MAC? */ +} atmlec_msg_type; + +#define ATMLEC_MSG_TYPE_MAX l_should_bridge + +struct atmlec_config_msg { + unsigned int maximum_unknown_frame_count; + unsigned int max_unknown_frame_time; + unsigned short max_retry_count; + unsigned int aging_time; + unsigned int forward_delay_time; + unsigned int arp_response_time; + unsigned int flush_timeout; + unsigned int path_switching_delay; + unsigned int lane_version; /* LANE2: 1 for LANEv1, 2 for LANEv2 */ + int mtu; + int is_proxy; +}; + +struct atmlec_msg { + atmlec_msg_type type; + int sizeoftlvs; /* LANE2: if != 0, tlvs follow */ + union { + struct { + unsigned char mac_addr[ETH_ALEN]; + unsigned char atm_addr[ATM_ESA_LEN]; + unsigned int flag; /* + * Topology_change flag, + * remoteflag, permanent flag, + * lecid, transaction id + */ + unsigned int targetless_le_arp; /* LANE2 */ + unsigned int no_source_le_narp; /* LANE2 */ + } normal; + struct atmlec_config_msg config; + struct { + __u16 lec_id; /* requestor lec_id */ + __u32 tran_id; /* transaction id */ + unsigned char mac_addr[ETH_ALEN]; /* dst mac addr */ + unsigned char atm_addr[ATM_ESA_LEN]; /* reqestor ATM addr */ + } proxy; /* + * For mapping LE_ARP requests to responses. Filled by + * zeppelin, returned by kernel. Used only when proxying + */ + } content; +} __ATM_API_ALIGN; + +struct atmlec_ioc { + int dev_num; + unsigned char atm_addr[ATM_ESA_LEN]; + unsigned char receive; /* 1= receive vcc, 0 = send vcc */ +}; +#endif /* _ATMLEC_H_ */ diff --git a/include/linux/atmmpc.h b/include/linux/atmmpc.h new file mode 100644 index 00000000..2aba5787 --- /dev/null +++ b/include/linux/atmmpc.h @@ -0,0 +1,126 @@ +#ifndef _ATMMPC_H_ +#define _ATMMPC_H_ + +#include <linux/atmapi.h> +#include <linux/atmioc.h> +#include <linux/atm.h> +#include <linux/types.h> + +#define ATMMPC_CTRL _IO('a', ATMIOC_MPOA) +#define ATMMPC_DATA _IO('a', ATMIOC_MPOA+1) + +#define MPC_SOCKET_INGRESS 1 +#define MPC_SOCKET_EGRESS 2 + +struct atmmpc_ioc { + int dev_num; + __be32 ipaddr; /* the IP address of the shortcut */ + int type; /* ingress or egress */ +}; + +typedef struct in_ctrl_info { + __u8 Last_NHRP_CIE_code; + __u8 Last_Q2931_cause_value; + __u8 eg_MPC_ATM_addr[ATM_ESA_LEN]; + __be32 tag; + __be32 in_dst_ip; /* IP address this ingress MPC sends packets to */ + __u16 holding_time; + __u32 request_id; +} in_ctrl_info; + +typedef struct eg_ctrl_info { + __u8 DLL_header[256]; + __u8 DH_length; + __be32 cache_id; + __be32 tag; + __be32 mps_ip; + __be32 eg_dst_ip; /* IP address to which ingress MPC sends packets */ + __u8 in_MPC_data_ATM_addr[ATM_ESA_LEN]; + __u16 holding_time; +} eg_ctrl_info; + +struct mpc_parameters { + __u16 mpc_p1; /* Shortcut-Setup Frame Count */ + __u16 mpc_p2; /* Shortcut-Setup Frame Time */ + __u8 mpc_p3[8]; /* Flow-detection Protocols */ + __u16 mpc_p4; /* MPC Initial Retry Time */ + __u16 mpc_p5; /* MPC Retry Time Maximum */ + __u16 mpc_p6; /* Hold Down Time */ +} ; + +struct k_message { + __u16 type; + __be32 ip_mask; + __u8 MPS_ctrl[ATM_ESA_LEN]; + union { + in_ctrl_info in_info; + eg_ctrl_info eg_info; + struct mpc_parameters params; + } content; + struct atm_qos qos; +} __ATM_API_ALIGN; + +struct llc_snap_hdr { + /* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */ + __u8 dsap; /* Destination Service Access Point (0xAA) */ + __u8 ssap; /* Source Service Access Point (0xAA) */ + __u8 ui; /* Unnumbered Information (0x03) */ + __u8 org[3]; /* Organizational identification (0x000000) */ + __u8 type[2]; /* Ether type (for IP) (0x0800) */ +}; + +/* TLVs this MPC recognizes */ +#define TLV_MPOA_DEVICE_TYPE 0x00a03e2a + +/* MPOA device types in MPOA Device Type TLV */ +#define NON_MPOA 0 +#define MPS 1 +#define MPC 2 +#define MPS_AND_MPC 3 + + +/* MPC parameter defaults */ + +#define MPC_P1 10 /* Shortcut-Setup Frame Count */ +#define MPC_P2 1 /* Shortcut-Setup Frame Time */ +#define MPC_P3 0 /* Flow-detection Protocols */ +#define MPC_P4 5 /* MPC Initial Retry Time */ +#define MPC_P5 40 /* MPC Retry Time Maximum */ +#define MPC_P6 160 /* Hold Down Time */ +#define HOLDING_TIME_DEFAULT 1200 /* same as MPS-p7 */ + +/* MPC constants */ + +#define MPC_C1 2 /* Retry Time Multiplier */ +#define MPC_C2 60 /* Initial Keep-Alive Lifetime */ + +/* Message types - to MPOA daemon */ + +#define SND_MPOA_RES_RQST 201 +#define SET_MPS_CTRL_ADDR 202 +#define SND_MPOA_RES_RTRY 203 /* Different type in a retry due to req id */ +#define STOP_KEEP_ALIVE_SM 204 +#define EGRESS_ENTRY_REMOVED 205 +#define SND_EGRESS_PURGE 206 +#define DIE 207 /* tell the daemon to exit() */ +#define DATA_PLANE_PURGE 208 /* Data plane purge because of egress cache hit miss or dead MPS */ +#define OPEN_INGRESS_SVC 209 + +/* Message types - from MPOA daemon */ + +#define MPOA_TRIGGER_RCVD 101 +#define MPOA_RES_REPLY_RCVD 102 +#define INGRESS_PURGE_RCVD 103 +#define EGRESS_PURGE_RCVD 104 +#define MPS_DEATH 105 +#define CACHE_IMPOS_RCVD 106 +#define SET_MPC_CTRL_ADDR 107 /* Our MPC's control ATM address */ +#define SET_MPS_MAC_ADDR 108 +#define CLEAN_UP_AND_EXIT 109 +#define SET_MPC_PARAMS 110 /* MPC configuration parameters */ + +/* Message types - bidirectional */ + +#define RELOAD 301 /* kill -HUP the daemon for reload */ + +#endif /* _ATMMPC_H_ */ diff --git a/include/linux/atmppp.h b/include/linux/atmppp.h new file mode 100644 index 00000000..300dcce0 --- /dev/null +++ b/include/linux/atmppp.h @@ -0,0 +1,24 @@ +/* atmppp.h - RFC2364 PPPoATM */ + +/* Written 2000 by Mitchell Blank Jr */ + +#ifndef _LINUX_ATMPPP_H +#define _LINUX_ATMPPP_H + +#include <linux/atm.h> + +#define PPPOATM_ENCAPS_AUTODETECT (0) +#define PPPOATM_ENCAPS_VC (1) +#define PPPOATM_ENCAPS_LLC (2) + +/* + * This is for the ATM_SETBACKEND call - these are like socket families: + * the first element of the structure is the backend number and the rest + * is per-backend specific + */ +struct atm_backend_ppp { + atm_backend_t backend_num; /* ATM_BACKEND_PPP */ + int encaps; /* PPPOATM_ENCAPS_* */ +}; + +#endif /* _LINUX_ATMPPP_H */ diff --git a/include/linux/atmsap.h b/include/linux/atmsap.h new file mode 100644 index 00000000..799b1045 --- /dev/null +++ b/include/linux/atmsap.h @@ -0,0 +1,162 @@ +/* atmsap.h - ATM Service Access Point addressing definitions */ + +/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef _LINUX_ATMSAP_H +#define _LINUX_ATMSAP_H + +#include <linux/atmapi.h> + +/* + * BEGIN_xx and END_xx markers are used for automatic generation of + * documentation. Do not change them. + */ + + +/* + * Layer 2 protocol identifiers + */ + +/* BEGIN_L2 */ +#define ATM_L2_NONE 0 /* L2 not specified */ +#define ATM_L2_ISO1745 0x01 /* Basic mode ISO 1745 */ +#define ATM_L2_Q291 0x02 /* ITU-T Q.291 (Rec. I.441) */ +#define ATM_L2_X25_LL 0x06 /* ITU-T X.25, link layer */ +#define ATM_L2_X25_ML 0x07 /* ITU-T X.25, multilink */ +#define ATM_L2_LAPB 0x08 /* Extended LAPB, half-duplex (Rec. T.71) */ +#define ATM_L2_HDLC_ARM 0x09 /* HDLC ARM (ISO/IEC 4335) */ +#define ATM_L2_HDLC_NRM 0x0a /* HDLC NRM (ISO/IEC 4335) */ +#define ATM_L2_HDLC_ABM 0x0b /* HDLC ABM (ISO/IEC 4335) */ +#define ATM_L2_ISO8802 0x0c /* LAN LLC (ISO/IEC 8802/2) */ +#define ATM_L2_X75 0x0d /* ITU-T X.75, SLP */ +#define ATM_L2_Q922 0x0e /* ITU-T Q.922 */ +#define ATM_L2_USER 0x10 /* user-specified */ +#define ATM_L2_ISO7776 0x11 /* ISO 7776 DTE-DTE */ +/* END_L2 */ + + +/* + * Layer 3 protocol identifiers + */ + +/* BEGIN_L3 */ +#define ATM_L3_NONE 0 /* L3 not specified */ +#define ATM_L3_X25 0x06 /* ITU-T X.25, packet layer */ +#define ATM_L3_ISO8208 0x07 /* ISO/IEC 8208 */ +#define ATM_L3_X223 0x08 /* ITU-T X.223 | ISO/IEC 8878 */ +#define ATM_L3_ISO8473 0x09 /* ITU-T X.233 | ISO/IEC 8473 */ +#define ATM_L3_T70 0x0a /* ITU-T T.70 minimum network layer */ +#define ATM_L3_TR9577 0x0b /* ISO/IEC TR 9577 */ +#define ATM_L3_H310 0x0c /* ITU-T Recommendation H.310 */ +#define ATM_L3_H321 0x0d /* ITU-T Recommendation H.321 */ +#define ATM_L3_USER 0x10 /* user-specified */ +/* END_L3 */ + + +/* + * High layer identifiers + */ + +/* BEGIN_HL */ +#define ATM_HL_NONE 0 /* HL not specified */ +#define ATM_HL_ISO 0x01 /* ISO */ +#define ATM_HL_USER 0x02 /* user-specific */ +#define ATM_HL_HLP 0x03 /* high layer profile - UNI 3.0 only */ +#define ATM_HL_VENDOR 0x04 /* vendor-specific application identifier */ +/* END_HL */ + + +/* + * ITU-T coded mode of operation + */ + +/* BEGIN_IMD */ +#define ATM_IMD_NONE 0 /* mode not specified */ +#define ATM_IMD_NORMAL 1 /* normal mode of operation */ +#define ATM_IMD_EXTENDED 2 /* extended mode of operation */ +/* END_IMD */ + +/* + * H.310 code points + */ + +#define ATM_TT_NONE 0 /* terminal type not specified */ +#define ATM_TT_RX 1 /* receive only */ +#define ATM_TT_TX 2 /* send only */ +#define ATM_TT_RXTX 3 /* receive and send */ + +#define ATM_MC_NONE 0 /* no multiplexing */ +#define ATM_MC_TS 1 /* transport stream (TS) */ +#define ATM_MC_TS_FEC 2 /* transport stream with forward error corr. */ +#define ATM_MC_PS 3 /* program stream (PS) */ +#define ATM_MC_PS_FEC 4 /* program stream with forward error corr. */ +#define ATM_MC_H221 5 /* ITU-T Rec. H.221 */ + +/* + * SAP structures + */ + +#define ATM_MAX_HLI 8 /* maximum high-layer information length */ + + +struct atm_blli { + unsigned char l2_proto; /* layer 2 protocol */ + union { + struct { + unsigned char mode; /* mode of operation (ATM_IMD_xxx), 0 if */ + /* absent */ + unsigned char window; /* window size (k), 1-127 (0 to omit) */ + } itu; /* ITU-T encoding */ + unsigned char user; /* user-specified l2 information */ + } l2; + unsigned char l3_proto; /* layer 3 protocol */ + union { + struct { + unsigned char mode; /* mode of operation (ATM_IMD_xxx), 0 if */ + /* absent */ + unsigned char def_size; /* default packet size (log2), 4-12 (0 to */ + /* omit) */ + unsigned char window;/* packet window size, 1-127 (0 to omit) */ + } itu; /* ITU-T encoding */ + unsigned char user; /* user specified l3 information */ + struct { /* if l3_proto = ATM_L3_H310 */ + unsigned char term_type; /* terminal type */ + unsigned char fw_mpx_cap; /* forward multiplexing capability */ + /* only if term_type != ATM_TT_NONE */ + unsigned char bw_mpx_cap; /* backward multiplexing capability */ + /* only if term_type != ATM_TT_NONE */ + } h310; + struct { /* if l3_proto = ATM_L3_TR9577 */ + unsigned char ipi; /* initial protocol id */ + unsigned char snap[5];/* IEEE 802.1 SNAP identifier */ + /* (only if ipi == NLPID_IEEE802_1_SNAP) */ + } tr9577; + } l3; +} __ATM_API_ALIGN; + + +struct atm_bhli { + unsigned char hl_type; /* high layer information type */ + unsigned char hl_length; /* length (only if hl_type == ATM_HL_USER || */ + /* hl_type == ATM_HL_ISO) */ + unsigned char hl_info[ATM_MAX_HLI];/* high layer information */ +}; + + +#define ATM_MAX_BLLI 3 /* maximum number of BLLI elements */ + + +struct atm_sap { + struct atm_bhli bhli; /* local SAP, high-layer information */ + struct atm_blli blli[ATM_MAX_BLLI] __ATM_API_ALIGN; + /* local SAP, low-layer info */ +}; + + +static __inline__ int blli_in_use(struct atm_blli blli) +{ + return blli.l2_proto || blli.l3_proto; +} + +#endif diff --git a/include/linux/atmsvc.h b/include/linux/atmsvc.h new file mode 100644 index 00000000..aa71583b --- /dev/null +++ b/include/linux/atmsvc.h @@ -0,0 +1,55 @@ +/* atmsvc.h - ATM signaling kernel-demon interface definitions */ + +/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef _LINUX_ATMSVC_H +#define _LINUX_ATMSVC_H + +#include <linux/atmapi.h> +#include <linux/atm.h> +#include <linux/atmioc.h> + + +#define ATMSIGD_CTRL _IO('a',ATMIOC_SPECIAL) + /* become ATM signaling demon control socket */ + +enum atmsvc_msg_type { as_catch_null, as_bind, as_connect, as_accept, as_reject, + as_listen, as_okay, as_error, as_indicate, as_close, + as_itf_notify, as_modify, as_identify, as_terminate, + as_addparty, as_dropparty }; + +struct atmsvc_msg { + enum atmsvc_msg_type type; + atm_kptr_t vcc; + atm_kptr_t listen_vcc; /* indicate */ + int reply; /* for okay and close: */ + /* < 0: error before active */ + /* (sigd has discarded ctx) */ + /* ==0: success */ + /* > 0: error when active (still */ + /* need to close) */ + struct sockaddr_atmpvc pvc; /* indicate, okay (connect) */ + struct sockaddr_atmsvc local; /* local SVC address */ + struct atm_qos qos; /* QOS parameters */ + struct atm_sap sap; /* SAP */ + unsigned int session; /* for p2pm */ + struct sockaddr_atmsvc svc; /* SVC address */ +} __ATM_API_ALIGN; + +/* + * Message contents: see ftp://icaftp.epfl.ch/pub/linux/atm/docs/isp-*.tar.gz + */ + +/* + * Some policy stuff for atmsigd and for net/atm/svc.c. Both have to agree on + * what PCR is used to request bandwidth from the device driver. net/atm/svc.c + * tries to do better than that, but only if there's no routing decision (i.e. + * if signaling only uses one ATM interface). + */ + +#define SELECT_TOP_PCR(tp) ((tp).pcr ? (tp).pcr : \ + (tp).max_pcr && (tp).max_pcr != ATM_MAX_PCR ? (tp).max_pcr : \ + (tp).min_pcr ? (tp).min_pcr : ATM_MAX_PCR) + +#endif diff --git a/include/linux/atomic.h b/include/linux/atomic.h new file mode 100644 index 00000000..70cfcb2d --- /dev/null +++ b/include/linux/atomic.h @@ -0,0 +1,106 @@ +/* Atomic operations usable in machine independent code */ +#ifndef _LINUX_ATOMIC_H +#define _LINUX_ATOMIC_H +#include <asm/atomic.h> + +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + return __atomic_add_unless(v, a, u) != u; +} + +/** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, so long as @v is non-zero. + * Returns non-zero if @v was non-zero, and zero otherwise. + */ +#ifndef atomic_inc_not_zero +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +#endif + +/** + * atomic_inc_not_zero_hint - increment if not null + * @v: pointer of type atomic_t + * @hint: probable value of the atomic before the increment + * + * This version of atomic_inc_not_zero() gives a hint of probable + * value of the atomic. This helps processor to not read the memory + * before doing the atomic read/modify/write cycle, lowering + * number of bus transactions on some arches. + * + * Returns: 0 if increment was not done, 1 otherwise. + */ +#ifndef atomic_inc_not_zero_hint +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) +{ + int val, c = hint; + + /* sanity test, should be removed by compiler if hint is a constant */ + if (!hint) + return atomic_inc_not_zero(v); + + do { + val = atomic_cmpxchg(v, c, c + 1); + if (val == c) + return 1; + c = val; + } while (c); + + return 0; +} +#endif + +#ifndef atomic_inc_unless_negative +static inline int atomic_inc_unless_negative(atomic_t *p) +{ + int v, v1; + for (v = 0; v >= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v + 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + +#ifndef atomic_dec_unless_positive +static inline int atomic_dec_unless_positive(atomic_t *p) +{ + int v, v1; + for (v = 0; v <= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v - 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + +#ifndef CONFIG_ARCH_HAS_ATOMIC_OR +static inline void atomic_or(int i, atomic_t *v) +{ + int old; + int new; + + do { + old = atomic_read(v); + new = old | i; + } while (atomic_cmpxchg(v, old, new) != old); +} +#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ + +#include <asm-generic/atomic-long.h> +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif +#endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h new file mode 100644 index 00000000..896c6892 --- /dev/null +++ b/include/linux/attribute_container.h @@ -0,0 +1,72 @@ +/* + * attribute_container.h - a generic container for all classes + * + * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> + * + * This file is licensed under GPLv2 + */ + +#ifndef _ATTRIBUTE_CONTAINER_H_ +#define _ATTRIBUTE_CONTAINER_H_ + +#include <linux/list.h> +#include <linux/klist.h> + +struct device; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); +#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 + unsigned long flags; +}; + +static inline int +attribute_container_no_classdevs(struct attribute_container *atc) +{ + return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS; +} + +static inline void +attribute_container_set_no_classdevs(struct attribute_container *atc) +{ + atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS; +} + +int attribute_container_register(struct attribute_container *cont); +int __must_check attribute_container_unregister(struct attribute_container *cont); +void attribute_container_create_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_add_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_remove_device(struct device *dev, + void (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_device_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)); +void attribute_container_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *)); +int attribute_container_add_attrs(struct device *classdev); +int attribute_container_add_class_device(struct device *classdev); +int attribute_container_add_class_device_adapter(struct attribute_container *cont, + struct device *dev, + struct device *classdev); +void attribute_container_remove_attrs(struct device *classdev); +void attribute_container_class_device_del(struct device *classdev); +struct attribute_container *attribute_container_classdev_to_container(struct device *); +struct device *attribute_container_find_class_device(struct attribute_container *, struct device *); +struct device_attribute **attribute_container_classdev_to_attrs(const struct device *classdev); + +#endif diff --git a/include/linux/audit.h b/include/linux/audit.h new file mode 100644 index 00000000..ed3ef197 --- /dev/null +++ b/include/linux/audit.h @@ -0,0 +1,723 @@ +/* audit.h -- Auditing support + * + * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Written by Rickard E. (Rik) Faith <faith@redhat.com> + * + */ + +#ifndef _LINUX_AUDIT_H_ +#define _LINUX_AUDIT_H_ + +#include <linux/types.h> +#include <linux/elf-em.h> +#include <linux/ptrace.h> + +/* The netlink messages for the audit system is divided into blocks: + * 1000 - 1099 are for commanding the audit system + * 1100 - 1199 user space trusted application messages + * 1200 - 1299 messages internal to the audit daemon + * 1300 - 1399 audit event messages + * 1400 - 1499 SE Linux use + * 1500 - 1599 kernel LSPP events + * 1600 - 1699 kernel crypto events + * 1700 - 1799 kernel anomaly records + * 1800 - 1899 kernel integrity events + * 1900 - 1999 future kernel use + * 2000 is for otherwise unclassified kernel audit messages (legacy) + * 2001 - 2099 unused (kernel) + * 2100 - 2199 user space anomaly records + * 2200 - 2299 user space actions taken in response to anomalies + * 2300 - 2399 user space generated LSPP events + * 2400 - 2499 user space crypto events + * 2500 - 2999 future user space (maybe integrity labels and related events) + * + * Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are + * exclusively user space. 1300-2099 is kernel --> user space + * communication. + */ +#define AUDIT_GET 1000 /* Get status */ +#define AUDIT_SET 1001 /* Set status (enable/disable/auditd) */ +#define AUDIT_LIST 1002 /* List syscall rules -- deprecated */ +#define AUDIT_ADD 1003 /* Add syscall rule -- deprecated */ +#define AUDIT_DEL 1004 /* Delete syscall rule -- deprecated */ +#define AUDIT_USER 1005 /* Message from userspace -- deprecated */ +#define AUDIT_LOGIN 1006 /* Define the login id and information */ +#define AUDIT_WATCH_INS 1007 /* Insert file/dir watch entry */ +#define AUDIT_WATCH_REM 1008 /* Remove file/dir watch entry */ +#define AUDIT_WATCH_LIST 1009 /* List all file/dir watches */ +#define AUDIT_SIGNAL_INFO 1010 /* Get info about sender of signal to auditd */ +#define AUDIT_ADD_RULE 1011 /* Add syscall filtering rule */ +#define AUDIT_DEL_RULE 1012 /* Delete syscall filtering rule */ +#define AUDIT_LIST_RULES 1013 /* List syscall filtering rules */ +#define AUDIT_TRIM 1014 /* Trim junk from watched tree */ +#define AUDIT_MAKE_EQUIV 1015 /* Append to watched tree */ +#define AUDIT_TTY_GET 1016 /* Get TTY auditing status */ +#define AUDIT_TTY_SET 1017 /* Set TTY auditing status */ + +#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */ +#define AUDIT_USER_AVC 1107 /* We filter this differently */ +#define AUDIT_USER_TTY 1124 /* Non-ICANON TTY input meaning */ +#define AUDIT_LAST_USER_MSG 1199 +#define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */ +#define AUDIT_LAST_USER_MSG2 2999 + +#define AUDIT_DAEMON_START 1200 /* Daemon startup record */ +#define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */ +#define AUDIT_DAEMON_ABORT 1202 /* Daemon error stop record */ +#define AUDIT_DAEMON_CONFIG 1203 /* Daemon config change */ + +#define AUDIT_SYSCALL 1300 /* Syscall event */ +/* #define AUDIT_FS_WATCH 1301 * Deprecated */ +#define AUDIT_PATH 1302 /* Filename path information */ +#define AUDIT_IPC 1303 /* IPC record */ +#define AUDIT_SOCKETCALL 1304 /* sys_socketcall arguments */ +#define AUDIT_CONFIG_CHANGE 1305 /* Audit system configuration change */ +#define AUDIT_SOCKADDR 1306 /* sockaddr copied as syscall arg */ +#define AUDIT_CWD 1307 /* Current working directory */ +#define AUDIT_EXECVE 1309 /* execve arguments */ +#define AUDIT_IPC_SET_PERM 1311 /* IPC new permissions record type */ +#define AUDIT_MQ_OPEN 1312 /* POSIX MQ open record type */ +#define AUDIT_MQ_SENDRECV 1313 /* POSIX MQ send/receive record type */ +#define AUDIT_MQ_NOTIFY 1314 /* POSIX MQ notify record type */ +#define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */ +#define AUDIT_KERNEL_OTHER 1316 /* For use by 3rd party modules */ +#define AUDIT_FD_PAIR 1317 /* audit record for pipe/socketpair */ +#define AUDIT_OBJ_PID 1318 /* ptrace target */ +#define AUDIT_TTY 1319 /* Input on an administrative TTY */ +#define AUDIT_EOE 1320 /* End of multi-record event */ +#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */ +#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */ +#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ +#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ +#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ + +#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ +#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ +#define AUDIT_AVC_PATH 1402 /* dentry, vfsmount pair from avc */ +#define AUDIT_MAC_POLICY_LOAD 1403 /* Policy file load */ +#define AUDIT_MAC_STATUS 1404 /* Changed enforcing,permissive,off */ +#define AUDIT_MAC_CONFIG_CHANGE 1405 /* Changes to booleans */ +#define AUDIT_MAC_UNLBL_ALLOW 1406 /* NetLabel: allow unlabeled traffic */ +#define AUDIT_MAC_CIPSOV4_ADD 1407 /* NetLabel: add CIPSOv4 DOI entry */ +#define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */ +#define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */ +#define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */ +#define AUDIT_MAC_IPSEC_ADDSA 1411 /* Not used */ +#define AUDIT_MAC_IPSEC_DELSA 1412 /* Not used */ +#define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Not used */ +#define AUDIT_MAC_IPSEC_DELSPD 1414 /* Not used */ +#define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ +#define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */ +#define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ + +#define AUDIT_FIRST_KERN_ANOM_MSG 1700 +#define AUDIT_LAST_KERN_ANOM_MSG 1799 +#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ +#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ +#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ +#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ +#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ +#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */ +#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */ +#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */ + +#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ + +/* Rule flags */ +#define AUDIT_FILTER_USER 0x00 /* Apply rule to user-generated messages */ +#define AUDIT_FILTER_TASK 0x01 /* Apply rule at task creation (not syscall) */ +#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */ +#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */ +#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */ +#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */ + +#define AUDIT_NR_FILTERS 6 + +#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */ + +/* Rule actions */ +#define AUDIT_NEVER 0 /* Do not build context if rule matches */ +#define AUDIT_POSSIBLE 1 /* Build context if rule matches */ +#define AUDIT_ALWAYS 2 /* Generate audit record if rule matches */ + +/* Rule structure sizes -- if these change, different AUDIT_ADD and + * AUDIT_LIST commands must be implemented. */ +#define AUDIT_MAX_FIELDS 64 +#define AUDIT_MAX_KEY_LEN 256 +#define AUDIT_BITMASK_SIZE 64 +#define AUDIT_WORD(nr) ((__u32)((nr)/32)) +#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) + +#define AUDIT_SYSCALL_CLASSES 16 +#define AUDIT_CLASS_DIR_WRITE 0 +#define AUDIT_CLASS_DIR_WRITE_32 1 +#define AUDIT_CLASS_CHATTR 2 +#define AUDIT_CLASS_CHATTR_32 3 +#define AUDIT_CLASS_READ 4 +#define AUDIT_CLASS_READ_32 5 +#define AUDIT_CLASS_WRITE 6 +#define AUDIT_CLASS_WRITE_32 7 +#define AUDIT_CLASS_SIGNAL 8 +#define AUDIT_CLASS_SIGNAL_32 9 + +/* This bitmask is used to validate user input. It represents all bits that + * are currently used in an audit field constant understood by the kernel. + * If you are adding a new #define AUDIT_<whatever>, please ensure that + * AUDIT_UNUSED_BITS is updated if need be. */ +#define AUDIT_UNUSED_BITS 0x07FFFC00 + +/* AUDIT_FIELD_COMPARE rule list */ +#define AUDIT_COMPARE_UID_TO_OBJ_UID 1 +#define AUDIT_COMPARE_GID_TO_OBJ_GID 2 +#define AUDIT_COMPARE_EUID_TO_OBJ_UID 3 +#define AUDIT_COMPARE_EGID_TO_OBJ_GID 4 +#define AUDIT_COMPARE_AUID_TO_OBJ_UID 5 +#define AUDIT_COMPARE_SUID_TO_OBJ_UID 6 +#define AUDIT_COMPARE_SGID_TO_OBJ_GID 7 +#define AUDIT_COMPARE_FSUID_TO_OBJ_UID 8 +#define AUDIT_COMPARE_FSGID_TO_OBJ_GID 9 + +#define AUDIT_COMPARE_UID_TO_AUID 10 +#define AUDIT_COMPARE_UID_TO_EUID 11 +#define AUDIT_COMPARE_UID_TO_FSUID 12 +#define AUDIT_COMPARE_UID_TO_SUID 13 + +#define AUDIT_COMPARE_AUID_TO_FSUID 14 +#define AUDIT_COMPARE_AUID_TO_SUID 15 +#define AUDIT_COMPARE_AUID_TO_EUID 16 + +#define AUDIT_COMPARE_EUID_TO_SUID 17 +#define AUDIT_COMPARE_EUID_TO_FSUID 18 + +#define AUDIT_COMPARE_SUID_TO_FSUID 19 + +#define AUDIT_COMPARE_GID_TO_EGID 20 +#define AUDIT_COMPARE_GID_TO_FSGID 21 +#define AUDIT_COMPARE_GID_TO_SGID 22 + +#define AUDIT_COMPARE_EGID_TO_FSGID 23 +#define AUDIT_COMPARE_EGID_TO_SGID 24 +#define AUDIT_COMPARE_SGID_TO_FSGID 25 + +#define AUDIT_MAX_FIELD_COMPARE AUDIT_COMPARE_SGID_TO_FSGID + +/* Rule fields */ + /* These are useful when checking the + * task structure at task creation time + * (AUDIT_PER_TASK). */ +#define AUDIT_PID 0 +#define AUDIT_UID 1 +#define AUDIT_EUID 2 +#define AUDIT_SUID 3 +#define AUDIT_FSUID 4 +#define AUDIT_GID 5 +#define AUDIT_EGID 6 +#define AUDIT_SGID 7 +#define AUDIT_FSGID 8 +#define AUDIT_LOGINUID 9 +#define AUDIT_PERS 10 +#define AUDIT_ARCH 11 +#define AUDIT_MSGTYPE 12 +#define AUDIT_SUBJ_USER 13 /* security label user */ +#define AUDIT_SUBJ_ROLE 14 /* security label role */ +#define AUDIT_SUBJ_TYPE 15 /* security label type */ +#define AUDIT_SUBJ_SEN 16 /* security label sensitivity label */ +#define AUDIT_SUBJ_CLR 17 /* security label clearance label */ +#define AUDIT_PPID 18 +#define AUDIT_OBJ_USER 19 +#define AUDIT_OBJ_ROLE 20 +#define AUDIT_OBJ_TYPE 21 +#define AUDIT_OBJ_LEV_LOW 22 +#define AUDIT_OBJ_LEV_HIGH 23 + + /* These are ONLY useful when checking + * at syscall exit time (AUDIT_AT_EXIT). */ +#define AUDIT_DEVMAJOR 100 +#define AUDIT_DEVMINOR 101 +#define AUDIT_INODE 102 +#define AUDIT_EXIT 103 +#define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */ +#define AUDIT_WATCH 105 +#define AUDIT_PERM 106 +#define AUDIT_DIR 107 +#define AUDIT_FILETYPE 108 +#define AUDIT_OBJ_UID 109 +#define AUDIT_OBJ_GID 110 +#define AUDIT_FIELD_COMPARE 111 + +#define AUDIT_ARG0 200 +#define AUDIT_ARG1 (AUDIT_ARG0+1) +#define AUDIT_ARG2 (AUDIT_ARG0+2) +#define AUDIT_ARG3 (AUDIT_ARG0+3) + +#define AUDIT_FILTERKEY 210 + +#define AUDIT_NEGATE 0x80000000 + +/* These are the supported operators. + * 4 2 1 8 + * = > < ? + * ---------- + * 0 0 0 0 00 nonsense + * 0 0 0 1 08 & bit mask + * 0 0 1 0 10 < + * 0 1 0 0 20 > + * 0 1 1 0 30 != + * 1 0 0 0 40 = + * 1 0 0 1 48 &= bit test + * 1 0 1 0 50 <= + * 1 1 0 0 60 >= + * 1 1 1 1 78 all operators + */ +#define AUDIT_BIT_MASK 0x08000000 +#define AUDIT_LESS_THAN 0x10000000 +#define AUDIT_GREATER_THAN 0x20000000 +#define AUDIT_NOT_EQUAL 0x30000000 +#define AUDIT_EQUAL 0x40000000 +#define AUDIT_BIT_TEST (AUDIT_BIT_MASK|AUDIT_EQUAL) +#define AUDIT_LESS_THAN_OR_EQUAL (AUDIT_LESS_THAN|AUDIT_EQUAL) +#define AUDIT_GREATER_THAN_OR_EQUAL (AUDIT_GREATER_THAN|AUDIT_EQUAL) +#define AUDIT_OPERATORS (AUDIT_EQUAL|AUDIT_NOT_EQUAL|AUDIT_BIT_MASK) + +enum { + Audit_equal, + Audit_not_equal, + Audit_bitmask, + Audit_bittest, + Audit_lt, + Audit_gt, + Audit_le, + Audit_ge, + Audit_bad +}; + +/* Status symbols */ + /* Mask values */ +#define AUDIT_STATUS_ENABLED 0x0001 +#define AUDIT_STATUS_FAILURE 0x0002 +#define AUDIT_STATUS_PID 0x0004 +#define AUDIT_STATUS_RATE_LIMIT 0x0008 +#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 + /* Failure-to-log actions */ +#define AUDIT_FAIL_SILENT 0 +#define AUDIT_FAIL_PRINTK 1 +#define AUDIT_FAIL_PANIC 2 + +/* distinguish syscall tables */ +#define __AUDIT_ARCH_64BIT 0x80000000 +#define __AUDIT_ARCH_LE 0x40000000 +#define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_ARMEB (EM_ARM) +#define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_FRV (EM_FRV) +#define AUDIT_ARCH_H8300 (EM_H8_300) +#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_M32R (EM_M32R) +#define AUDIT_ARCH_M68K (EM_68K) +#define AUDIT_ARCH_MIPS (EM_MIPS) +#define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_PARISC (EM_PARISC) +#define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_PPC (EM_PPC) +#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_S390 (EM_S390) +#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_SH (EM_SH) +#define AUDIT_ARCH_SHEL (EM_SH|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_SH64 (EM_SH|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_SPARC (EM_SPARC) +#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) + +#define AUDIT_PERM_EXEC 1 +#define AUDIT_PERM_WRITE 2 +#define AUDIT_PERM_READ 4 +#define AUDIT_PERM_ATTR 8 + +struct audit_status { + __u32 mask; /* Bit mask for valid entries */ + __u32 enabled; /* 1 = enabled, 0 = disabled */ + __u32 failure; /* Failure-to-log action */ + __u32 pid; /* pid of auditd process */ + __u32 rate_limit; /* messages rate limit (per second) */ + __u32 backlog_limit; /* waiting messages limit */ + __u32 lost; /* messages lost */ + __u32 backlog; /* messages waiting in queue */ +}; + +struct audit_tty_status { + __u32 enabled; /* 1 = enabled, 0 = disabled */ +}; + +/* audit_rule_data supports filter rules with both integer and string + * fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and + * AUDIT_LIST_RULES requests. + */ +struct audit_rule_data { + __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */ + __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */ + __u32 field_count; + __u32 mask[AUDIT_BITMASK_SIZE]; /* syscall(s) affected */ + __u32 fields[AUDIT_MAX_FIELDS]; + __u32 values[AUDIT_MAX_FIELDS]; + __u32 fieldflags[AUDIT_MAX_FIELDS]; + __u32 buflen; /* total length of string fields */ + char buf[0]; /* string fields buffer */ +}; + +/* audit_rule is supported to maintain backward compatibility with + * userspace. It supports integer fields only and corresponds to + * AUDIT_ADD, AUDIT_DEL and AUDIT_LIST requests. + */ +struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */ + __u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */ + __u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */ + __u32 field_count; + __u32 mask[AUDIT_BITMASK_SIZE]; + __u32 fields[AUDIT_MAX_FIELDS]; + __u32 values[AUDIT_MAX_FIELDS]; +}; + +#ifdef __KERNEL__ +#include <linux/sched.h> + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[0]; +}; + +struct audit_buffer; +struct audit_context; +struct inode; +struct netlink_skb_parms; +struct path; +struct linux_binprm; +struct mq_attr; +struct mqstat; +struct audit_watch; +struct audit_tree; + +struct audit_krule { + int vers_ops; + u32 flags; + u32 listnr; + u32 action; + u32 mask[AUDIT_BITMASK_SIZE]; + u32 buflen; /* for data alloc on list rules */ + u32 field_count; + char *filterkey; /* ties events to rules */ + struct audit_field *fields; + struct audit_field *arch_f; /* quick access to arch field */ + struct audit_field *inode_f; /* quick access to an inode field */ + struct audit_watch *watch; /* associated watch */ + struct audit_tree *tree; /* associated watched tree */ + struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ + struct list_head list; /* for AUDIT_LIST* purposes only */ + u64 prio; +}; + +struct audit_field { + u32 type; + u32 val; + u32 op; + char *lsm_str; + void *lsm_rule; +}; + +extern int __init audit_register_class(int class, unsigned *list); +extern int audit_classify_syscall(int abi, unsigned syscall); +extern int audit_classify_arch(int arch); +#ifdef CONFIG_AUDITSYSCALL +/* These are defined in auditsc.c */ + /* Public API */ +extern int audit_alloc(struct task_struct *task); +extern void __audit_free(struct task_struct *task); +extern void __audit_syscall_entry(int arch, + int major, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3); +extern void __audit_syscall_exit(int ret_success, long ret_value); +extern void __audit_getname(const char *name); +extern void audit_putname(const char *name); +extern void __audit_inode(const char *name, const struct dentry *dentry); +extern void __audit_inode_child(const struct dentry *dentry, + const struct inode *parent); +extern void __audit_seccomp(unsigned long syscall); +extern void __audit_ptrace(struct task_struct *t); + +static inline int audit_dummy_context(void) +{ + void *p = current->audit_context; + return !p || *(int *)p; +} +static inline void audit_free(struct task_struct *task) +{ + if (unlikely(task->audit_context)) + __audit_free(task); +} +static inline void audit_syscall_entry(int arch, int major, unsigned long a0, + unsigned long a1, unsigned long a2, + unsigned long a3) +{ + if (unlikely(!audit_dummy_context())) + __audit_syscall_entry(arch, major, a0, a1, a2, a3); +} +static inline void audit_syscall_exit(void *pt_regs) +{ + if (unlikely(current->audit_context)) { + int success = is_syscall_success(pt_regs); + int return_code = regs_return_value(pt_regs); + + __audit_syscall_exit(success, return_code); + } +} +static inline void audit_getname(const char *name) +{ + if (unlikely(!audit_dummy_context())) + __audit_getname(name); +} +static inline void audit_inode(const char *name, const struct dentry *dentry) { + if (unlikely(!audit_dummy_context())) + __audit_inode(name, dentry); +} +static inline void audit_inode_child(const struct dentry *dentry, + const struct inode *parent) { + if (unlikely(!audit_dummy_context())) + __audit_inode_child(dentry, parent); +} +void audit_core_dumps(long signr); + +static inline void audit_seccomp(unsigned long syscall) +{ + if (unlikely(!audit_dummy_context())) + __audit_seccomp(syscall); +} + +static inline void audit_ptrace(struct task_struct *t) +{ + if (unlikely(!audit_dummy_context())) + __audit_ptrace(t); +} + + /* Private API (for audit.c only) */ +extern unsigned int audit_serial(void); +extern int auditsc_get_stamp(struct audit_context *ctx, + struct timespec *t, unsigned int *serial); +extern int audit_set_loginuid(uid_t loginuid); +#define audit_get_loginuid(t) ((t)->loginuid) +#define audit_get_sessionid(t) ((t)->sessionid) +extern void audit_log_task_context(struct audit_buffer *ab); +extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); +extern int __audit_bprm(struct linux_binprm *bprm); +extern void __audit_socketcall(int nargs, unsigned long *args); +extern int __audit_sockaddr(int len, void *addr); +extern void __audit_fd_pair(int fd1, int fd2); +extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); +extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); +extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); +extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old); +extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); +extern void __audit_mmap_fd(int fd, int flags); + +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) +{ + if (unlikely(!audit_dummy_context())) + __audit_ipc_obj(ipcp); +} +static inline void audit_fd_pair(int fd1, int fd2) +{ + if (unlikely(!audit_dummy_context())) + __audit_fd_pair(fd1, fd2); +} +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) +{ + if (unlikely(!audit_dummy_context())) + __audit_ipc_set_perm(qbytes, uid, gid, mode); +} +static inline int audit_bprm(struct linux_binprm *bprm) +{ + if (unlikely(!audit_dummy_context())) + return __audit_bprm(bprm); + return 0; +} +static inline void audit_socketcall(int nargs, unsigned long *args) +{ + if (unlikely(!audit_dummy_context())) + __audit_socketcall(nargs, args); +} +static inline int audit_sockaddr(int len, void *addr) +{ + if (unlikely(!audit_dummy_context())) + return __audit_sockaddr(len, addr); + return 0; +} +static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_open(oflag, mode, attr); +} +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); +} +static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_notify(mqdes, notification); +} +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +{ + if (unlikely(!audit_dummy_context())) + __audit_mq_getsetattr(mqdes, mqstat); +} + +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + return __audit_log_bprm_fcaps(bprm, new, old); + return 0; +} + +static inline void audit_log_capset(pid_t pid, const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + __audit_log_capset(pid, new, old); +} + +static inline void audit_mmap_fd(int fd, int flags) +{ + if (unlikely(!audit_dummy_context())) + __audit_mmap_fd(fd, flags); +} + +extern int audit_n_rules; +extern int audit_signals; +#else /* CONFIG_AUDITSYSCALL */ +#define audit_alloc(t) ({ 0; }) +#define audit_free(t) do { ; } while (0) +#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) +#define audit_syscall_exit(r) do { ; } while (0) +#define audit_dummy_context() 1 +#define audit_getname(n) do { ; } while (0) +#define audit_putname(n) do { ; } while (0) +#define __audit_inode(n,d) do { ; } while (0) +#define __audit_inode_child(i,p) do { ; } while (0) +#define audit_inode(n,d) do { (void)(d); } while (0) +#define audit_inode_child(i,p) do { ; } while (0) +#define audit_core_dumps(i) do { ; } while (0) +#define audit_seccomp(i) do { ; } while (0) +#define auditsc_get_stamp(c,t,s) (0) +#define audit_get_loginuid(t) (-1) +#define audit_get_sessionid(t) (-1) +#define audit_log_task_context(b) do { ; } while (0) +#define audit_ipc_obj(i) ((void)0) +#define audit_ipc_set_perm(q,u,g,m) ((void)0) +#define audit_bprm(p) ({ 0; }) +#define audit_socketcall(n,a) ((void)0) +#define audit_fd_pair(n,a) ((void)0) +#define audit_sockaddr(len, addr) ({ 0; }) +#define audit_mq_open(o,m,a) ((void)0) +#define audit_mq_sendrecv(d,l,p,t) ((void)0) +#define audit_mq_notify(d,n) ((void)0) +#define audit_mq_getsetattr(d,s) ((void)0) +#define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) +#define audit_log_capset(pid, ncr, ocr) ((void)0) +#define audit_mmap_fd(fd, flags) ((void)0) +#define audit_ptrace(t) ((void)0) +#define audit_n_rules 0 +#define audit_signals 0 +#endif /* CONFIG_AUDITSYSCALL */ + +#ifdef CONFIG_AUDIT +/* These are defined in audit.c */ + /* Public API */ +extern __printf(4, 5) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...); + +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); +extern __printf(2, 3) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); +extern void audit_log_end(struct audit_buffer *ab); +extern int audit_string_contains_control(const char *string, + size_t len); +extern void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, + size_t len); +extern void audit_log_n_string(struct audit_buffer *ab, + const char *buf, + size_t n); +#define audit_log_string(a,b) audit_log_n_string(a, b, strlen(b)); +extern void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, + size_t n); +extern void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string); +extern void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path); +extern void audit_log_key(struct audit_buffer *ab, + char *key); +extern void audit_log_lost(const char *message); +#ifdef CONFIG_SECURITY +extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); +#else +#define audit_log_secctx(b,s) do { ; } while (0) +#endif + +extern int audit_update_lsm_rules(void); + + /* Private API (for audit.c only) */ +extern int audit_filter_user(struct netlink_skb_parms *cb); +extern int audit_filter_type(int type); +extern int audit_receive_filter(int type, int pid, int uid, int seq, + void *data, size_t datasz, uid_t loginuid, + u32 sessionid, u32 sid); +extern int audit_enabled; +#else +#define audit_log(c,g,t,f,...) do { ; } while (0) +#define audit_log_start(c,g,t) ({ NULL; }) +#define audit_log_vformat(b,f,a) do { ; } while (0) +#define audit_log_format(b,f,...) do { ; } while (0) +#define audit_log_end(b) do { ; } while (0) +#define audit_log_n_hex(a,b,l) do { ; } while (0) +#define audit_log_n_string(a,c,l) do { ; } while (0) +#define audit_log_string(a,c) do { ; } while (0) +#define audit_log_n_untrustedstring(a,n,s) do { ; } while (0) +#define audit_log_untrustedstring(a,s) do { ; } while (0) +#define audit_log_d_path(b, p, d) do { ; } while (0) +#define audit_log_key(b, k) do { ; } while (0) +#define audit_log_secctx(b,s) do { ; } while (0) +#define audit_enabled 0 +#endif +#endif +#endif diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h new file mode 100644 index 00000000..850f39b3 --- /dev/null +++ b/include/linux/auto_dev-ioctl.h @@ -0,0 +1,229 @@ +/* + * Copyright 2008 Red Hat, Inc. All rights reserved. + * Copyright 2008 Ian Kent <raven@themaw.net> + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + */ + +#ifndef _LINUX_AUTO_DEV_IOCTL_H +#define _LINUX_AUTO_DEV_IOCTL_H + +#include <linux/auto_fs.h> + +#ifdef __KERNEL__ +#include <linux/string.h> +#else +#include <string.h> +#endif /* __KERNEL__ */ + +#define AUTOFS_DEVICE_NAME "autofs" + +#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1 +#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0 + +#define AUTOFS_DEVID_LEN 16 + +#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl) + +/* + * An ioctl interface for autofs mount point control. + */ + +struct args_protover { + __u32 version; +}; + +struct args_protosubver { + __u32 sub_version; +}; + +struct args_openmount { + __u32 devid; +}; + +struct args_ready { + __u32 token; +}; + +struct args_fail { + __u32 token; + __s32 status; +}; + +struct args_setpipefd { + __s32 pipefd; +}; + +struct args_timeout { + __u64 timeout; +}; + +struct args_requester { + __u32 uid; + __u32 gid; +}; + +struct args_expire { + __u32 how; +}; + +struct args_askumount { + __u32 may_umount; +}; + +struct args_ismountpoint { + union { + struct args_in { + __u32 type; + } in; + struct args_out { + __u32 devid; + __u32 magic; + } out; + }; +}; + +/* + * All the ioctls use this structure. + * When sending a path size must account for the total length + * of the chunk of memory otherwise is is the size of the + * structure. + */ + +struct autofs_dev_ioctl { + __u32 ver_major; + __u32 ver_minor; + __u32 size; /* total size of data passed in + * including this struct */ + __s32 ioctlfd; /* automount command fd */ + + /* Command parameters */ + + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; + + char path[0]; +}; + +static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) +{ + memset(in, 0, sizeof(struct autofs_dev_ioctl)); + in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; + in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; + in->size = sizeof(struct autofs_dev_ioctl); + in->ioctlfd = -1; + return; +} + +/* + * If you change this make sure you make the corresponding change + * to autofs-dev-ioctl.c:lookup_ioctl() + */ +enum { + /* Get various version info */ + AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, + AUTOFS_DEV_IOCTL_PROTOVER_CMD, + AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, + + /* Open mount ioctl fd */ + AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, + + /* Close mount ioctl fd */ + AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, + + /* Mount/expire status returns */ + AUTOFS_DEV_IOCTL_READY_CMD, + AUTOFS_DEV_IOCTL_FAIL_CMD, + + /* Activate/deactivate autofs mount */ + AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, + AUTOFS_DEV_IOCTL_CATATONIC_CMD, + + /* Expiry timeout */ + AUTOFS_DEV_IOCTL_TIMEOUT_CMD, + + /* Get mount last requesting uid and gid */ + AUTOFS_DEV_IOCTL_REQUESTER_CMD, + + /* Check for eligible expire candidates */ + AUTOFS_DEV_IOCTL_EXPIRE_CMD, + + /* Request busy status */ + AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, + + /* Check if path is a mountpoint */ + AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, +}; + +#define AUTOFS_IOCTL 0x93 + +#define AUTOFS_DEV_IOCTL_VERSION \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_PROTOVER \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_PROTOSUBVER \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_OPENMOUNT \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_READY \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_FAIL \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_SETPIPEFD \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_CATATONIC \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_TIMEOUT \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_REQUESTER \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_EXPIRE \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_ASKUMOUNT \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl) + +#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \ + _IOWR(AUTOFS_IOCTL, \ + AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl) + +#endif /* _LINUX_AUTO_DEV_IOCTL_H */ diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h new file mode 100644 index 00000000..da64e150 --- /dev/null +++ b/include/linux/auto_fs.h @@ -0,0 +1,86 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * linux/include/linux/auto_fs.h + * + * Copyright 1997 Transmeta Corporation - All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + + +#ifndef _LINUX_AUTO_FS_H +#define _LINUX_AUTO_FS_H + +#include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/fs.h> +#include <linux/limits.h> +#include <linux/ioctl.h> +#else +#include <sys/ioctl.h> +#endif /* __KERNEL__ */ + +/* This file describes autofs v3 */ +#define AUTOFS_PROTO_VERSION 3 + +/* Range of protocol versions defined */ +#define AUTOFS_MAX_PROTO_VERSION AUTOFS_PROTO_VERSION +#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION + +/* + * Architectures where both 32- and 64-bit binaries can be executed + * on 64-bit kernels need this. This keeps the structure format + * uniform, and makes sure the wait_queue_token isn't too big to be + * passed back down to the kernel. + * + * This assumes that on these architectures: + * mode 32 bit 64 bit + * ------------------------- + * int 32 bit 32 bit + * long 32 bit 64 bit + * + * If so, 32-bit user-space code should be backwards compatible. + */ + +#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ + || defined(__powerpc__) || defined(__s390__) +typedef unsigned int autofs_wqt_t; +#else +typedef unsigned long autofs_wqt_t; +#endif + +/* Packet types */ +#define autofs_ptype_missing 0 /* Missing entry (mount request) */ +#define autofs_ptype_expire 1 /* Expire entry (umount request) */ + +struct autofs_packet_hdr { + int proto_version; /* Protocol version */ + int type; /* Type of packet */ +}; + +struct autofs_packet_missing { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[NAME_MAX+1]; +}; + +/* v3 expire (via ioctl) */ +struct autofs_packet_expire { + struct autofs_packet_hdr hdr; + int len; + char name[NAME_MAX+1]; +}; + +#define AUTOFS_IOC_READY _IO(0x93,0x60) +#define AUTOFS_IOC_FAIL _IO(0x93,0x61) +#define AUTOFS_IOC_CATATONIC _IO(0x93,0x62) +#define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int) +#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,compat_ulong_t) +#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long) +#define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire) + +#endif /* _LINUX_AUTO_FS_H */ diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h new file mode 100644 index 00000000..e02982fa --- /dev/null +++ b/include/linux/auto_fs4.h @@ -0,0 +1,164 @@ +/* -*- c -*- + * linux/include/linux/auto_fs4.h + * + * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + */ + +#ifndef _LINUX_AUTO_FS4_H +#define _LINUX_AUTO_FS4_H + +/* Include common v3 definitions */ +#include <linux/types.h> +#include <linux/auto_fs.h> + +/* autofs v4 definitions */ +#undef AUTOFS_PROTO_VERSION +#undef AUTOFS_MIN_PROTO_VERSION +#undef AUTOFS_MAX_PROTO_VERSION + +#define AUTOFS_PROTO_VERSION 5 +#define AUTOFS_MIN_PROTO_VERSION 3 +#define AUTOFS_MAX_PROTO_VERSION 5 + +#define AUTOFS_PROTO_SUBVERSION 2 + +/* Mask for expire behaviour */ +#define AUTOFS_EXP_IMMEDIATE 1 +#define AUTOFS_EXP_LEAVES 2 + +#define AUTOFS_TYPE_ANY 0U +#define AUTOFS_TYPE_INDIRECT 1U +#define AUTOFS_TYPE_DIRECT 2U +#define AUTOFS_TYPE_OFFSET 4U + +static inline void set_autofs_type_indirect(unsigned int *type) +{ + *type = AUTOFS_TYPE_INDIRECT; + return; +} + +static inline unsigned int autofs_type_indirect(unsigned int type) +{ + return (type == AUTOFS_TYPE_INDIRECT); +} + +static inline void set_autofs_type_direct(unsigned int *type) +{ + *type = AUTOFS_TYPE_DIRECT; + return; +} + +static inline unsigned int autofs_type_direct(unsigned int type) +{ + return (type == AUTOFS_TYPE_DIRECT); +} + +static inline void set_autofs_type_offset(unsigned int *type) +{ + *type = AUTOFS_TYPE_OFFSET; + return; +} + +static inline unsigned int autofs_type_offset(unsigned int type) +{ + return (type == AUTOFS_TYPE_OFFSET); +} + +static inline unsigned int autofs_type_trigger(unsigned int type) +{ + return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET); +} + +/* + * This isn't really a type as we use it to say "no type set" to + * indicate we want to search for "any" mount in the + * autofs_dev_ioctl_ismountpoint() device ioctl function. + */ +static inline void set_autofs_type_any(unsigned int *type) +{ + *type = AUTOFS_TYPE_ANY; + return; +} + +static inline unsigned int autofs_type_any(unsigned int type) +{ + return (type == AUTOFS_TYPE_ANY); +} + +/* Daemon notification packet types */ +enum autofs_notify { + NFY_NONE, + NFY_MOUNT, + NFY_EXPIRE +}; + +/* Kernel protocol version 4 packet types */ + +/* Expire entry (umount request) */ +#define autofs_ptype_expire_multi 2 + +/* Kernel protocol version 5 packet types */ + +/* Indirect mount missing and expire requests. */ +#define autofs_ptype_missing_indirect 3 +#define autofs_ptype_expire_indirect 4 + +/* Direct mount missing and expire requests */ +#define autofs_ptype_missing_direct 5 +#define autofs_ptype_expire_direct 6 + +/* v4 multi expire (via pipe) */ +struct autofs_packet_expire_multi { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + int len; + char name[NAME_MAX+1]; +}; + +union autofs_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_packet_missing missing; + struct autofs_packet_expire expire; + struct autofs_packet_expire_multi expire_multi; +}; + +/* autofs v5 common packet struct */ +struct autofs_v5_packet { + struct autofs_packet_hdr hdr; + autofs_wqt_t wait_queue_token; + __u32 dev; + __u64 ino; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 tgid; + __u32 len; + char name[NAME_MAX+1]; +}; + +typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; +typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; +typedef struct autofs_v5_packet autofs_packet_missing_direct_t; +typedef struct autofs_v5_packet autofs_packet_expire_direct_t; + +union autofs_v5_packet_union { + struct autofs_packet_hdr hdr; + struct autofs_v5_packet v5_packet; + autofs_packet_missing_indirect_t missing_indirect; + autofs_packet_expire_indirect_t expire_indirect; + autofs_packet_missing_direct_t missing_direct; + autofs_packet_expire_direct_t expire_direct; +}; + +#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int) +#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) +#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int) + + +#endif /* _LINUX_AUTO_FS4_H */ diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h new file mode 100644 index 00000000..f3b5d4e3 --- /dev/null +++ b/include/linux/auxvec.h @@ -0,0 +1,39 @@ +#ifndef _LINUX_AUXVEC_H +#define _LINUX_AUXVEC_H + +#include <asm/auxvec.h> + +/* Symbolic values for the entries in the auxiliary table + put on the initial stack */ +#define AT_NULL 0 /* end of vector */ +#define AT_IGNORE 1 /* entry should be ignored */ +#define AT_EXECFD 2 /* file descriptor of program */ +#define AT_PHDR 3 /* program headers for program */ +#define AT_PHENT 4 /* size of program header entry */ +#define AT_PHNUM 5 /* number of program headers */ +#define AT_PAGESZ 6 /* system page size */ +#define AT_BASE 7 /* base address of interpreter */ +#define AT_FLAGS 8 /* flags */ +#define AT_ENTRY 9 /* entry point of program */ +#define AT_NOTELF 10 /* program is not ELF */ +#define AT_UID 11 /* real uid */ +#define AT_EUID 12 /* effective uid */ +#define AT_GID 13 /* real gid */ +#define AT_EGID 14 /* effective gid */ +#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ +#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ +#define AT_CLKTCK 17 /* frequency at which times() increments */ +/* AT_* values 18 through 22 are reserved */ +#define AT_SECURE 23 /* secure mode boolean */ +#define AT_BASE_PLATFORM 24 /* string identifying real platform, may + * differ from AT_PLATFORM. */ +#define AT_RANDOM 25 /* address of 16 random bytes */ + +#define AT_EXECFN 31 /* filename of program */ + +#ifdef __KERNEL__ +#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ + /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ +#endif + +#endif /* _LINUX_AUXVEC_H */ diff --git a/include/linux/average.h b/include/linux/average.h new file mode 100644 index 00000000..c6028fd7 --- /dev/null +++ b/include/linux/average.h @@ -0,0 +1,30 @@ +#ifndef _LINUX_AVERAGE_H +#define _LINUX_AVERAGE_H + +/* Exponentially weighted moving average (EWMA) */ + +/* For more documentation see lib/average.c */ + +struct ewma { + unsigned long internal; + unsigned long factor; + unsigned long weight; +}; + +extern void ewma_init(struct ewma *avg, unsigned long factor, + unsigned long weight); + +extern struct ewma *ewma_add(struct ewma *avg, unsigned long val); + +/** + * ewma_read() - Get average value + * @avg: Average structure + * + * Returns the average value held in @avg. + */ +static inline unsigned long ewma_read(const struct ewma *avg) +{ + return avg->internal >> avg->factor; +} + +#endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/ax25.h b/include/linux/ax25.h new file mode 100644 index 00000000..74c89a41 --- /dev/null +++ b/include/linux/ax25.h @@ -0,0 +1,116 @@ +/* + * These are the public elements of the Linux kernel AX.25 code. A similar + * file netrom.h exists for the NET/ROM protocol. + */ + +#ifndef AX25_KERNEL_H +#define AX25_KERNEL_H + +#include <linux/socket.h> + +#define AX25_MTU 256 +#define AX25_MAX_DIGIS 8 + +#define AX25_WINDOW 1 +#define AX25_T1 2 +#define AX25_N2 3 +#define AX25_T3 4 +#define AX25_T2 5 +#define AX25_BACKOFF 6 +#define AX25_EXTSEQ 7 +#define AX25_PIDINCL 8 +#define AX25_IDLE 9 +#define AX25_PACLEN 10 +#define AX25_IAMDIGI 12 + +#define AX25_KILL 99 + +#define SIOCAX25GETUID (SIOCPROTOPRIVATE+0) +#define SIOCAX25ADDUID (SIOCPROTOPRIVATE+1) +#define SIOCAX25DELUID (SIOCPROTOPRIVATE+2) +#define SIOCAX25NOUID (SIOCPROTOPRIVATE+3) +#define SIOCAX25OPTRT (SIOCPROTOPRIVATE+7) +#define SIOCAX25CTLCON (SIOCPROTOPRIVATE+8) +#define SIOCAX25GETINFOOLD (SIOCPROTOPRIVATE+9) +#define SIOCAX25ADDFWD (SIOCPROTOPRIVATE+10) +#define SIOCAX25DELFWD (SIOCPROTOPRIVATE+11) +#define SIOCAX25DEVCTL (SIOCPROTOPRIVATE+12) +#define SIOCAX25GETINFO (SIOCPROTOPRIVATE+13) + +#define AX25_SET_RT_IPMODE 2 + +#define AX25_NOUID_DEFAULT 0 +#define AX25_NOUID_BLOCK 1 + +typedef struct { + char ax25_call[7]; /* 6 call + SSID (shifted ascii!) */ +} ax25_address; + +struct sockaddr_ax25 { + __kernel_sa_family_t sax25_family; + ax25_address sax25_call; + int sax25_ndigis; + /* Digipeater ax25_address sets follow */ +}; + +#define sax25_uid sax25_ndigis + +struct full_sockaddr_ax25 { + struct sockaddr_ax25 fsa_ax25; + ax25_address fsa_digipeater[AX25_MAX_DIGIS]; +}; + +struct ax25_routes_struct { + ax25_address port_addr; + ax25_address dest_addr; + unsigned char digi_count; + ax25_address digi_addr[AX25_MAX_DIGIS]; +}; + +struct ax25_route_opt_struct { + ax25_address port_addr; + ax25_address dest_addr; + int cmd; + int arg; +}; + +struct ax25_ctl_struct { + ax25_address port_addr; + ax25_address source_addr; + ax25_address dest_addr; + unsigned int cmd; + unsigned long arg; + unsigned char digi_count; + ax25_address digi_addr[AX25_MAX_DIGIS]; +}; + +/* this will go away. Please do not export to user land */ +struct ax25_info_struct_deprecated { + unsigned int n2, n2count; + unsigned int t1, t1timer; + unsigned int t2, t2timer; + unsigned int t3, t3timer; + unsigned int idle, idletimer; + unsigned int state; + unsigned int rcv_q, snd_q; +}; + +struct ax25_info_struct { + unsigned int n2, n2count; + unsigned int t1, t1timer; + unsigned int t2, t2timer; + unsigned int t3, t3timer; + unsigned int idle, idletimer; + unsigned int state; + unsigned int rcv_q, snd_q; + unsigned int vs, vr, va, vs_max; + unsigned int paclen; + unsigned int window; +}; + +struct ax25_fwd_struct { + ax25_address port_from; + ax25_address port_to; +}; + +#endif diff --git a/include/linux/b1lli.h b/include/linux/b1lli.h new file mode 100644 index 00000000..713f7126 --- /dev/null +++ b/include/linux/b1lli.h @@ -0,0 +1,73 @@ +/* $Id: b1lli.h,v 1.8.8.3 2001/09/23 22:25:05 kai Exp $ + * + * ISDN lowlevel-module for AVM B1-card. + * + * Copyright 1996 by Carsten Paeth (calle@calle.in-berlin.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _B1LLI_H_ +#define _B1LLI_H_ +/* + * struct for loading t4 file + */ +typedef struct avmb1_t4file { + int len; + unsigned char *data; +} avmb1_t4file; + +typedef struct avmb1_loaddef { + int contr; + avmb1_t4file t4file; +} avmb1_loaddef; + +typedef struct avmb1_loadandconfigdef { + int contr; + avmb1_t4file t4file; + avmb1_t4file t4config; +} avmb1_loadandconfigdef; + +typedef struct avmb1_resetdef { + int contr; +} avmb1_resetdef; + +typedef struct avmb1_getdef { + int contr; + int cardtype; + int cardstate; +} avmb1_getdef; + +/* + * struct for adding new cards + */ +typedef struct avmb1_carddef { + int port; + int irq; +} avmb1_carddef; + +#define AVM_CARDTYPE_B1 0 +#define AVM_CARDTYPE_T1 1 +#define AVM_CARDTYPE_M1 2 +#define AVM_CARDTYPE_M2 3 + +typedef struct avmb1_extcarddef { + int port; + int irq; + int cardtype; + int cardnr; /* for HEMA/T1 */ +} avmb1_extcarddef; + +#define AVMB1_LOAD 0 /* load image to card */ +#define AVMB1_ADDCARD 1 /* add a new card - OBSOLETE */ +#define AVMB1_RESETCARD 2 /* reset a card */ +#define AVMB1_LOAD_AND_CONFIG 3 /* load image and config to card */ +#define AVMB1_ADDCARD_WITH_TYPE 4 /* add a new card, with cardtype */ +#define AVMB1_GET_CARDINFO 5 /* get cardtype */ +#define AVMB1_REMOVECARD 6 /* remove a card - OBSOLETE */ + +#define AVMB1_REGISTERCARD_IS_OBSOLETE + +#endif /* _B1LLI_H_ */ diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h new file mode 100644 index 00000000..12a867c6 --- /dev/null +++ b/include/linux/b1pcmcia.h @@ -0,0 +1,21 @@ +/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $ + * + * Exported functions of module b1pcmcia to be called by + * avm_cs card services module. + * + * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _B1PCMCIA_H_ +#define _B1PCMCIA_H_ + +int b1pcmcia_addcard_b1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m1(unsigned int port, unsigned irq); +int b1pcmcia_addcard_m2(unsigned int port, unsigned irq); +int b1pcmcia_delcard(unsigned int port, unsigned irq); + +#endif /* _B1PCMCIA_H_ */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h new file mode 100644 index 00000000..b1038bd6 --- /dev/null +++ b/include/linux/backing-dev.h @@ -0,0 +1,356 @@ +/* + * include/linux/backing-dev.h + * + * low-level device information and state which is propagated up through + * to high-level code. + */ + +#ifndef _LINUX_BACKING_DEV_H +#define _LINUX_BACKING_DEV_H + +#include <linux/percpu_counter.h> +#include <linux/log2.h> +#include <linux/proportions.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/writeback.h> +#include <linux/atomic.h> + +struct page; +struct device; +struct dentry; + +/* + * Bits in backing_dev_info.state + */ +enum bdi_state { + BDI_pending, /* On its way to being activated */ + BDI_wb_alloc, /* Default embedded wb allocated */ + BDI_async_congested, /* The async (write) queue is getting full */ + BDI_sync_congested, /* The sync queue is getting full */ + BDI_registered, /* bdi_register() was done */ + BDI_writeback_running, /* Writeback is in progress */ + BDI_unused, /* Available bits start here */ +}; + +typedef int (congested_fn)(void *, int); + +enum bdi_stat_item { + BDI_RECLAIMABLE, + BDI_WRITEBACK, + BDI_DIRTIED, + BDI_WRITTEN, + NR_BDI_STAT_ITEMS +}; + +#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) + +struct bdi_writeback { + struct backing_dev_info *bdi; /* our parent bdi */ + unsigned int nr; + + unsigned long last_old_flush; /* last old data flush */ + unsigned long last_active; /* last time bdi thread was active */ + + struct task_struct *task; /* writeback thread */ + struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */ + struct list_head b_dirty; /* dirty inodes */ + struct list_head b_io; /* parked for writeback */ + struct list_head b_more_io; /* parked for more writeback */ + spinlock_t list_lock; /* protects the b_* lists */ +}; + +struct backing_dev_info { + struct list_head bdi_list; + unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ + unsigned long state; /* Always use atomic bitops on this */ + unsigned int capabilities; /* Device capabilities */ + congested_fn *congested_fn; /* Function pointer if device is md/dm */ + void *congested_data; /* Pointer to aux data for congested func */ + + char *name; + + struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; + + unsigned long bw_time_stamp; /* last time write bw is updated */ + unsigned long dirtied_stamp; + unsigned long written_stamp; /* pages written at bw_time_stamp */ + unsigned long write_bandwidth; /* the estimated write bandwidth */ + unsigned long avg_write_bandwidth; /* further smoothed write bw */ + + /* + * The base dirty throttle rate, re-calculated on every 200ms. + * All the bdi tasks' dirty rate will be curbed under it. + * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit + * in small steps and is much more smooth/stable than the latter. + */ + unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; + + struct prop_local_percpu completions; + int dirty_exceeded; + + unsigned int min_ratio; + unsigned int max_ratio, max_prop_frac; + + struct bdi_writeback wb; /* default writeback info for this bdi */ + spinlock_t wb_lock; /* protects work_list */ + + struct list_head work_list; + + struct device *dev; + + struct timer_list laptop_mode_wb_timer; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; + struct dentry *debug_stats; +#endif +}; + +int bdi_init(struct backing_dev_info *bdi); +void bdi_destroy(struct backing_dev_info *bdi); + +int bdi_register(struct backing_dev_info *bdi, struct device *parent, + const char *fmt, ...); +int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +void bdi_unregister(struct backing_dev_info *bdi); +int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, + enum wb_reason reason); +void bdi_start_background_writeback(struct backing_dev_info *bdi); +int bdi_writeback_thread(void *data); +int bdi_has_dirty_io(struct backing_dev_info *bdi); +void bdi_arm_supers_timer(void); +void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); +void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); + +extern spinlock_t bdi_lock; +extern struct list_head bdi_list; +extern struct list_head bdi_pending_list; + +static inline int wb_has_dirty_io(struct bdi_writeback *wb) +{ + return !list_empty(&wb->b_dirty) || + !list_empty(&wb->b_io) || + !list_empty(&wb->b_more_io); +} + +static inline void __add_bdi_stat(struct backing_dev_info *bdi, + enum bdi_stat_item item, s64 amount) +{ + __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); +} + +static inline void __inc_bdi_stat(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + __add_bdi_stat(bdi, item, 1); +} + +static inline void inc_bdi_stat(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + unsigned long flags; + + local_irq_save(flags); + __inc_bdi_stat(bdi, item); + local_irq_restore(flags); +} + +static inline void __dec_bdi_stat(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + __add_bdi_stat(bdi, item, -1); +} + +static inline void dec_bdi_stat(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + unsigned long flags; + + local_irq_save(flags); + __dec_bdi_stat(bdi, item); + local_irq_restore(flags); +} + +static inline s64 bdi_stat(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + return percpu_counter_read_positive(&bdi->bdi_stat[item]); +} + +static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + return percpu_counter_sum_positive(&bdi->bdi_stat[item]); +} + +static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, + enum bdi_stat_item item) +{ + s64 sum; + unsigned long flags; + + local_irq_save(flags); + sum = __bdi_stat_sum(bdi, item); + local_irq_restore(flags); + + return sum; +} + +extern void bdi_writeout_inc(struct backing_dev_info *bdi); + +/* + * maximal error of a stat counter. + */ +static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) +{ +#ifdef CONFIG_SMP + return nr_cpu_ids * BDI_STAT_BATCH; +#else + return 1; +#endif +} + +int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); +int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); + +/* + * Flags in backing_dev_info::capability + * + * The first three flags control whether dirty pages will contribute to the + * VM's accounting and whether writepages() should be called for dirty pages + * (something that would not, for example, be appropriate for ramfs) + * + * WARNING: these flags are closely related and should not normally be + * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these + * three flags into a single convenience macro. + * + * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting + * BDI_CAP_NO_WRITEBACK: Don't write pages back + * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages + * + * These flags let !MMU mmap() govern direct device mapping vs immediate + * copying more easily for MAP_PRIVATE, especially for ROM filesystems. + * + * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE) + * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED) + * BDI_CAP_READ_MAP: Can be mapped for reading + * BDI_CAP_WRITE_MAP: Can be mapped for writing + * BDI_CAP_EXEC_MAP: Can be mapped for execution + * + * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed. + */ +#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 +#define BDI_CAP_NO_WRITEBACK 0x00000002 +#define BDI_CAP_MAP_COPY 0x00000004 +#define BDI_CAP_MAP_DIRECT 0x00000008 +#define BDI_CAP_READ_MAP 0x00000010 +#define BDI_CAP_WRITE_MAP 0x00000020 +#define BDI_CAP_EXEC_MAP 0x00000040 +#define BDI_CAP_NO_ACCT_WB 0x00000080 +#define BDI_CAP_SWAP_BACKED 0x00000100 + +#define BDI_CAP_VMFLAGS \ + (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) + +#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ + (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) + +#if defined(VM_MAYREAD) && \ + (BDI_CAP_READ_MAP != VM_MAYREAD || \ + BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ + BDI_CAP_EXEC_MAP != VM_MAYEXEC) +#error please change backing_dev_info::capabilities flags +#endif + +extern struct backing_dev_info default_backing_dev_info; +extern struct backing_dev_info noop_backing_dev_info; + +int writeback_in_progress(struct backing_dev_info *bdi); + +static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) +{ + if (bdi->congested_fn) + return bdi->congested_fn(bdi->congested_data, bdi_bits); + return (bdi->state & bdi_bits); +} + +static inline int bdi_read_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, 1 << BDI_sync_congested); +} + +static inline int bdi_write_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, 1 << BDI_async_congested); +} + +static inline int bdi_rw_congested(struct backing_dev_info *bdi) +{ + return bdi_congested(bdi, (1 << BDI_sync_congested) | + (1 << BDI_async_congested)); +} + +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +void clear_bdi_congested(struct backing_dev_info *bdi, int sync); +void set_bdi_congested(struct backing_dev_info *bdi, int sync); +long congestion_wait(int sync, long timeout); +long wait_iff_congested(struct zone *zone, int sync, long timeout); + +static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); +} + +static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) +{ + return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); +} + +static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) +{ + /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ + return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | + BDI_CAP_NO_WRITEBACK)); +} + +static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) +{ + return bdi->capabilities & BDI_CAP_SWAP_BACKED; +} + +static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi) +{ + return bdi == &default_backing_dev_info; +} + +static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) +{ + return bdi_cap_writeback_dirty(mapping->backing_dev_info); +} + +static inline bool mapping_cap_account_dirty(struct address_space *mapping) +{ + return bdi_cap_account_dirty(mapping->backing_dev_info); +} + +static inline bool mapping_cap_swap_backed(struct address_space *mapping) +{ + return bdi_cap_swap_backed(mapping->backing_dev_info); +} + +static inline int bdi_sched_wait(void *word) +{ + schedule(); + return 0; +} + +#endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/backlight.h b/include/linux/backlight.h new file mode 100644 index 00000000..5ffc6dda --- /dev/null +++ b/include/linux/backlight.h @@ -0,0 +1,137 @@ +/* + * Backlight Lowlevel Control Abstraction + * + * Copyright (C) 2003,2004 Hewlett-Packard Company + * + */ + +#ifndef _LINUX_BACKLIGHT_H +#define _LINUX_BACKLIGHT_H + +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/notifier.h> + +/* Notes on locking: + * + * backlight_device->ops_lock is an internal backlight lock protecting the + * ops pointer and no code outside the core should need to touch it. + * + * Access to update_status() is serialised by the update_lock mutex since + * most drivers seem to need this and historically get it wrong. + * + * Most drivers don't need locking on their get_brightness() method. + * If yours does, you need to implement it in the driver. You can use the + * update_lock mutex if appropriate. + * + * Any other use of the locks below is probably wrong. + */ + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY, + BACKLIGHT_UPDATE_SYSFS, +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM, + BACKLIGHT_FIRMWARE, + BACKLIGHT_TYPE_MAX, +}; + +struct backlight_device; +struct fb_info; + +struct backlight_ops { + unsigned int options; + +#define BL_CORE_SUSPENDRESUME (1 << 0) + + /* Notify the backlight driver some property has changed */ + int (*update_status)(struct backlight_device *); + /* Return the current backlight brightness (accounting for power, + fb_blank etc.) */ + int (*get_brightness)(struct backlight_device *); + /* Check if given framebuffer device is the one bound to this backlight; + return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ + int (*check_fb)(struct backlight_device *, struct fb_info *); +}; + +/* This structure defines all the properties of a backlight */ +struct backlight_properties { + /* Current User requested brightness (0 - max_brightness) */ + int brightness; + /* Maximal value for brightness (read-only) */ + int max_brightness; + /* Current FB Power mode (0: full on, 1..3: power saving + modes; 4: full off), see FB_BLANK_XXX */ + int power; + /* FB Blanking active? (values as for power) */ + /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + int fb_blank; + /* Backlight type */ + enum backlight_type type; + /* Flags used to signal drivers of state changes */ + /* Upper 4 bits are reserved for driver internal use */ + unsigned int state; + +#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ +#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ +#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */ +#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */ +#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */ +#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */ + +}; + +struct backlight_device { + /* Backlight properties */ + struct backlight_properties props; + + /* Serialise access to update_status method */ + struct mutex update_lock; + + /* This protects the 'ops' field. If 'ops' is NULL, the driver that + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ + struct mutex ops_lock; + const struct backlight_ops *ops; + + /* The framebuffer notifier block */ + struct notifier_block fb_notif; + + struct device dev; +}; + +static inline void backlight_update_status(struct backlight_device *bd) +{ + mutex_lock(&bd->update_lock); + if (bd->ops && bd->ops->update_status) + bd->ops->update_status(bd); + mutex_unlock(&bd->update_lock); +} + +extern struct backlight_device *backlight_device_register(const char *name, + struct device *dev, void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); +extern void backlight_device_unregister(struct backlight_device *bd); +extern void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); + +#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) + +static inline void * bl_get_data(struct backlight_device *bl_dev) +{ + return dev_get_drvdata(&bl_dev->dev); +} + +struct generic_bl_info { + const char *name; + int max_intensity; + int default_intensity; + int limit_mask; + void (*set_bl_intensity)(int intensity); + void (*kick_battery)(void); +}; + +#endif diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h new file mode 100644 index 00000000..feb91219 --- /dev/null +++ b/include/linux/basic_mmio_gpio.h @@ -0,0 +1,72 @@ +/* + * Basic memory-mapped GPIO controllers. + * + * Copyright 2008 MontaVista Software, Inc. + * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __BASIC_MMIO_GPIO_H +#define __BASIC_MMIO_GPIO_H + +#include <linux/gpio.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/spinlock_types.h> + +struct bgpio_pdata { + int base; + int ngpio; +}; + +struct device; + +struct bgpio_chip { + struct gpio_chip gc; + + unsigned long (*read_reg)(void __iomem *reg); + void (*write_reg)(void __iomem *reg, unsigned long data); + + void __iomem *reg_dat; + void __iomem *reg_set; + void __iomem *reg_clr; + void __iomem *reg_dir; + + /* Number of bits (GPIOs): <register width> * 8. */ + int bits; + + /* + * Some GPIO controllers work with the big-endian bits notation, + * e.g. in a 8-bits register, GPIO7 is the least significant bit. + */ + unsigned long (*pin2mask)(struct bgpio_chip *bgc, unsigned int pin); + + /* + * Used to lock bgpio_chip->data. Also, this is needed to keep + * shadowed and real data registers writes together. + */ + spinlock_t lock; + + /* Shadowed data register to clear/set bits safely. */ + unsigned long data; + + /* Shadowed direction registers to clear/set direction safely. */ + unsigned long dir; +}; + +static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) +{ + return container_of(gc, struct bgpio_chip, gc); +} + +int bgpio_remove(struct bgpio_chip *bgc); +int bgpio_init(struct bgpio_chip *bgc, struct device *dev, + unsigned long sz, void __iomem *dat, void __iomem *set, + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, + bool big_endian); + +#endif /* __BASIC_MMIO_GPIO_H */ diff --git a/include/linux/baycom.h b/include/linux/baycom.h new file mode 100644 index 00000000..81249e02 --- /dev/null +++ b/include/linux/baycom.h @@ -0,0 +1,39 @@ +/* + * The Linux BAYCOM driver for the Baycom serial 1200 baud modem + * and the parallel 9600 baud modem + * (C) 1997-1998 by Thomas Sailer, HB9JNX/AE4WA + */ + +#ifndef _BAYCOM_H +#define _BAYCOM_H + +/* -------------------------------------------------------------------- */ +/* + * structs for the IOCTL commands + */ + +struct baycom_debug_data { + unsigned long debug1; + unsigned long debug2; + long debug3; +}; + +struct baycom_ioctl { + int cmd; + union { + struct baycom_debug_data dbg; + } data; +}; + +/* -------------------------------------------------------------------- */ + +/* + * ioctl values change for baycom + */ +#define BAYCOMCTL_GETDEBUG 0x92 + +/* -------------------------------------------------------------------- */ + +#endif /* _BAYCOM_H */ + +/* --------------------------------------------------------------------- */ diff --git a/include/linux/bcd.h b/include/linux/bcd.h new file mode 100644 index 00000000..22ea563b --- /dev/null +++ b/include/linux/bcd.h @@ -0,0 +1,9 @@ +#ifndef _BCD_H +#define _BCD_H + +#include <linux/compiler.h> + +unsigned bcd2bin(unsigned char val) __attribute_const__; +unsigned char bin2bcd(unsigned val) __attribute_const__; + +#endif /* _BCD_H */ diff --git a/include/linux/bch.h b/include/linux/bch.h new file mode 100644 index 00000000..295b4ef1 --- /dev/null +++ b/include/linux/bch.h @@ -0,0 +1,79 @@ +/* + * Generic binary BCH encoding/decoding library + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Copyright © 2011 Parrot S.A. + * + * Author: Ivan Djelic <ivan.djelic@parrot.com> + * + * Description: + * + * This library provides runtime configurable encoding/decoding of binary + * Bose-Chaudhuri-Hocquenghem (BCH) codes. +*/ +#ifndef _BCH_H +#define _BCH_H + +#include <linux/types.h> + +/** + * struct bch_control - BCH control structure + * @m: Galois field order + * @n: maximum codeword size in bits (= 2^m-1) + * @t: error correction capability in bits + * @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t) + * @ecc_bytes: ecc max size (m*t bits) in bytes + * @a_pow_tab: Galois field GF(2^m) exponentiation lookup table + * @a_log_tab: Galois field GF(2^m) log lookup table + * @mod8_tab: remainder generator polynomial lookup tables + * @ecc_buf: ecc parity words buffer + * @ecc_buf2: ecc parity words buffer + * @xi_tab: GF(2^m) base for solving degree 2 polynomial roots + * @syn: syndrome buffer + * @cache: log-based polynomial representation buffer + * @elp: error locator polynomial + * @poly_2t: temporary polynomials of degree 2t + */ +struct bch_control { + unsigned int m; + unsigned int n; + unsigned int t; + unsigned int ecc_bits; + unsigned int ecc_bytes; +/* private: */ + uint16_t *a_pow_tab; + uint16_t *a_log_tab; + uint32_t *mod8_tab; + uint32_t *ecc_buf; + uint32_t *ecc_buf2; + unsigned int *xi_tab; + unsigned int *syn; + int *cache; + struct gf_poly *elp; + struct gf_poly *poly_2t[4]; +}; + +struct bch_control *init_bch(int m, int t, unsigned int prim_poly); + +void free_bch(struct bch_control *bch); + +void encode_bch(struct bch_control *bch, const uint8_t *data, + unsigned int len, uint8_t *ecc); + +int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, + const uint8_t *recv_ecc, const uint8_t *calc_ecc, + const unsigned int *syn, unsigned int *errloc); + +#endif /* _BCH_H */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h new file mode 100644 index 00000000..5af9a075 --- /dev/null +++ b/include/linux/bcma/bcma.h @@ -0,0 +1,307 @@ +#ifndef LINUX_BCMA_H_ +#define LINUX_BCMA_H_ + +#include <linux/pci.h> +#include <linux/mod_devicetable.h> + +#include <linux/bcma/bcma_driver_chipcommon.h> +#include <linux/bcma/bcma_driver_pci.h> +#include <linux/bcma/bcma_driver_mips.h> +#include <linux/ssb/ssb.h> /* SPROM sharing */ + +#include "bcma_regs.h" + +struct bcma_device; +struct bcma_bus; + +enum bcma_hosttype { + BCMA_HOSTTYPE_PCI, + BCMA_HOSTTYPE_SDIO, + BCMA_HOSTTYPE_SOC, +}; + +struct bcma_chipinfo { + u16 id; + u8 rev; + u8 pkg; +}; + +enum bcma_clkmode { + BCMA_CLKMODE_FAST, + BCMA_CLKMODE_DYNAMIC, +}; + +struct bcma_host_ops { + u8 (*read8)(struct bcma_device *core, u16 offset); + u16 (*read16)(struct bcma_device *core, u16 offset); + u32 (*read32)(struct bcma_device *core, u16 offset); + void (*write8)(struct bcma_device *core, u16 offset, u8 value); + void (*write16)(struct bcma_device *core, u16 offset, u16 value); + void (*write32)(struct bcma_device *core, u16 offset, u32 value); +#ifdef CONFIG_BCMA_BLOCKIO + void (*block_read)(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct bcma_device *core, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif + /* Agent ops */ + u32 (*aread32)(struct bcma_device *core, u16 offset); + void (*awrite32)(struct bcma_device *core, u16 offset, u32 value); +}; + +/* Core manufacturers */ +#define BCMA_MANUF_ARM 0x43B +#define BCMA_MANUF_MIPS 0x4A7 +#define BCMA_MANUF_BCM 0x4BF + +/* Core class values. */ +#define BCMA_CL_SIM 0x0 +#define BCMA_CL_EROM 0x1 +#define BCMA_CL_CORESIGHT 0x9 +#define BCMA_CL_VERIF 0xB +#define BCMA_CL_OPTIMO 0xD +#define BCMA_CL_GEN 0xE +#define BCMA_CL_PRIMECELL 0xF + +/* Core-ID values. */ +#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */ +#define BCMA_CORE_INVALID 0x700 +#define BCMA_CORE_CHIPCOMMON 0x800 +#define BCMA_CORE_ILINE20 0x801 +#define BCMA_CORE_SRAM 0x802 +#define BCMA_CORE_SDRAM 0x803 +#define BCMA_CORE_PCI 0x804 +#define BCMA_CORE_MIPS 0x805 +#define BCMA_CORE_ETHERNET 0x806 +#define BCMA_CORE_V90 0x807 +#define BCMA_CORE_USB11_HOSTDEV 0x808 +#define BCMA_CORE_ADSL 0x809 +#define BCMA_CORE_ILINE100 0x80A +#define BCMA_CORE_IPSEC 0x80B +#define BCMA_CORE_UTOPIA 0x80C +#define BCMA_CORE_PCMCIA 0x80D +#define BCMA_CORE_INTERNAL_MEM 0x80E +#define BCMA_CORE_MEMC_SDRAM 0x80F +#define BCMA_CORE_OFDM 0x810 +#define BCMA_CORE_EXTIF 0x811 +#define BCMA_CORE_80211 0x812 +#define BCMA_CORE_PHY_A 0x813 +#define BCMA_CORE_PHY_B 0x814 +#define BCMA_CORE_PHY_G 0x815 +#define BCMA_CORE_MIPS_3302 0x816 +#define BCMA_CORE_USB11_HOST 0x817 +#define BCMA_CORE_USB11_DEV 0x818 +#define BCMA_CORE_USB20_HOST 0x819 +#define BCMA_CORE_USB20_DEV 0x81A +#define BCMA_CORE_SDIO_HOST 0x81B +#define BCMA_CORE_ROBOSWITCH 0x81C +#define BCMA_CORE_PARA_ATA 0x81D +#define BCMA_CORE_SATA_XORDMA 0x81E +#define BCMA_CORE_ETHERNET_GBIT 0x81F +#define BCMA_CORE_PCIE 0x820 +#define BCMA_CORE_PHY_N 0x821 +#define BCMA_CORE_SRAM_CTL 0x822 +#define BCMA_CORE_MINI_MACPHY 0x823 +#define BCMA_CORE_ARM_1176 0x824 +#define BCMA_CORE_ARM_7TDMI 0x825 +#define BCMA_CORE_PHY_LP 0x826 +#define BCMA_CORE_PMU 0x827 +#define BCMA_CORE_PHY_SSN 0x828 +#define BCMA_CORE_SDIO_DEV 0x829 +#define BCMA_CORE_ARM_CM3 0x82A +#define BCMA_CORE_PHY_HT 0x82B +#define BCMA_CORE_MIPS_74K 0x82C +#define BCMA_CORE_MAC_GBIT 0x82D +#define BCMA_CORE_DDR12_MEM_CTL 0x82E +#define BCMA_CORE_PCIE_RC 0x82F /* PCIe Root Complex */ +#define BCMA_CORE_OCP_OCP_BRIDGE 0x830 +#define BCMA_CORE_SHARED_COMMON 0x831 +#define BCMA_CORE_OCP_AHB_BRIDGE 0x832 +#define BCMA_CORE_SPI_HOST 0x833 +#define BCMA_CORE_I2S 0x834 +#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ +#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ +#define BCMA_CORE_DEFAULT 0xFFF + +#define BCMA_MAX_NR_CORES 16 + +struct bcma_device { + struct bcma_bus *bus; + struct bcma_device_id id; + + struct device dev; + struct device *dma_dev; + + unsigned int irq; + bool dev_registered; + + u8 core_index; + u8 core_unit; + + u32 addr; + u32 wrap; + + void __iomem *io_addr; + void __iomem *io_wrap; + + void *drvdata; + struct list_head list; +}; + +static inline void *bcma_get_drvdata(struct bcma_device *core) +{ + return core->drvdata; +} +static inline void bcma_set_drvdata(struct bcma_device *core, void *drvdata) +{ + core->drvdata = drvdata; +} + +struct bcma_driver { + const char *name; + const struct bcma_device_id *id_table; + + int (*probe)(struct bcma_device *dev); + void (*remove)(struct bcma_device *dev); + int (*suspend)(struct bcma_device *dev); + int (*resume)(struct bcma_device *dev); + void (*shutdown)(struct bcma_device *dev); + + struct device_driver drv; +}; +extern +int __bcma_driver_register(struct bcma_driver *drv, struct module *owner); +#define bcma_driver_register(drv) \ + __bcma_driver_register(drv, THIS_MODULE) + +extern void bcma_driver_unregister(struct bcma_driver *drv); + +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int bcma_arch_register_fallback_sprom( + int (*sprom_callback)(struct bcma_bus *bus, + struct ssb_sprom *out)); + +struct bcma_bus { + /* The MMIO area. */ + void __iomem *mmio; + + const struct bcma_host_ops *ops; + + enum bcma_hosttype hosttype; + union { + /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ + struct pci_dev *host_pci; + /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ + struct sdio_func *host_sdio; + }; + + struct bcma_chipinfo chipinfo; + + struct bcma_device *mapped_core; + struct list_head cores; + u8 nr_cores; + u8 init_done:1; + u8 num; + + struct bcma_drv_cc drv_cc; + struct bcma_drv_pci drv_pci; + struct bcma_drv_mips drv_mips; + + /* We decided to share SPROM struct with SSB as long as we do not need + * any hacks for BCMA. This simplifies drivers code. */ + struct ssb_sprom sprom; +}; + +static inline u32 bcma_read8(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read8(core, offset); +} +static inline u32 bcma_read16(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read16(core, offset); +} +static inline u32 bcma_read32(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->read32(core, offset); +} +static inline +void bcma_write8(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write8(core, offset, value); +} +static inline +void bcma_write16(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write16(core, offset, value); +} +static inline +void bcma_write32(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->write32(core, offset, value); +} +#ifdef CONFIG_BCMA_BLOCKIO +static inline void bcma_block_read(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + core->bus->ops->block_read(core, buffer, count, offset, reg_width); +} +static inline void bcma_block_write(struct bcma_device *core, + const void *buffer, size_t count, + u16 offset, u8 reg_width) +{ + core->bus->ops->block_write(core, buffer, count, offset, reg_width); +} +#endif +static inline u32 bcma_aread32(struct bcma_device *core, u16 offset) +{ + return core->bus->ops->aread32(core, offset); +} +static inline +void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) +{ + core->bus->ops->awrite32(core, offset, value); +} + +static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) & mask); +} +static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) | set); +} +static inline void bcma_maskset32(struct bcma_device *cc, + u16 offset, u32 mask, u32 set) +{ + bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set); +} +static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) & mask); +} +static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) | set); +} +static inline void bcma_maskset16(struct bcma_device *cc, + u16 offset, u16 mask, u16 set) +{ + bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set); +} + +extern struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid); +extern bool bcma_core_is_enabled(struct bcma_device *core); +extern void bcma_core_disable(struct bcma_device *core, u32 flags); +extern int bcma_core_enable(struct bcma_device *core, u32 flags); +extern void bcma_core_set_clockmode(struct bcma_device *core, + enum bcma_clkmode clkmode); +extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, + bool on); +#define BCMA_DMA_TRANSLATION_MASK 0xC0000000 +#define BCMA_DMA_TRANSLATION_NONE 0x00000000 +#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */ +#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ +extern u32 bcma_core_dma_translation(struct bcma_device *core); + +#endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h new file mode 100644 index 00000000..8bbfe31f --- /dev/null +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -0,0 +1,415 @@ +#ifndef LINUX_BCMA_DRIVER_CC_H_ +#define LINUX_BCMA_DRIVER_CC_H_ + +/** ChipCommon core registers. **/ +#define BCMA_CC_ID 0x0000 +#define BCMA_CC_ID_ID 0x0000FFFF +#define BCMA_CC_ID_ID_SHIFT 0 +#define BCMA_CC_ID_REV 0x000F0000 +#define BCMA_CC_ID_REV_SHIFT 16 +#define BCMA_CC_ID_PKG 0x00F00000 +#define BCMA_CC_ID_PKG_SHIFT 20 +#define BCMA_CC_ID_NRCORES 0x0F000000 +#define BCMA_CC_ID_NRCORES_SHIFT 24 +#define BCMA_CC_ID_TYPE 0xF0000000 +#define BCMA_CC_ID_TYPE_SHIFT 28 +#define BCMA_CC_CAP 0x0004 /* Capabilities */ +#define BCMA_CC_CAP_NRUART 0x00000003 /* # of UARTs */ +#define BCMA_CC_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define BCMA_CC_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define BCMA_CC_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define BCMA_CC_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define BCMA_CC_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define BCMA_CC_CAP_FLASHT 0x00000700 /* Flash Type */ +#define BCMA_CC_FLASHT_NONE 0x00000000 /* No flash */ +#define BCMA_CC_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define BCMA_CC_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define BCMA_CC_FLASHT_NFLASH 0x00000200 +#define BCMA_CC_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define BCMA_CC_CAP_PLLT 0x00038000 /* PLL Type */ +#define BCMA_PLLTYPE_NONE 0x00000000 +#define BCMA_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define BCMA_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define BCMA_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define BCMA_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define BCMA_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define BCMA_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define BCMA_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define BCMA_CC_CAP_PCTL 0x00040000 /* Power Control */ +#define BCMA_CC_CAP_OTPS 0x00380000 /* OTP size */ +#define BCMA_CC_CAP_OTPS_SHIFT 19 +#define BCMA_CC_CAP_OTPS_BASE 5 +#define BCMA_CC_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define BCMA_CC_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define BCMA_CC_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define BCMA_CC_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ +#define BCMA_CC_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ +#define BCMA_CC_CAP_SPROM 0x40000000 /* SPROM present */ +#define BCMA_CC_CORECTL 0x0008 +#define BCMA_CC_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define BCMA_CC_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define BCMA_CC_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ +#define BCMA_CC_BIST 0x000C +#define BCMA_CC_OTPS 0x0010 /* OTP status */ +#define BCMA_CC_OTPS_PROGFAIL 0x80000000 +#define BCMA_CC_OTPS_PROTECT 0x00000007 +#define BCMA_CC_OTPS_HW_PROTECT 0x00000001 +#define BCMA_CC_OTPS_SW_PROTECT 0x00000002 +#define BCMA_CC_OTPS_CID_PROTECT 0x00000004 +#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */ +#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8 +#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */ +#define BCMA_CC_OTPC 0x0014 /* OTP control */ +#define BCMA_CC_OTPC_RECWAIT 0xFF000000 +#define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00 +#define BCMA_CC_OTPC_PRW_SHIFT 8 +#define BCMA_CC_OTPC_MAXFAIL 0x00000038 +#define BCMA_CC_OTPC_VSEL 0x00000006 +#define BCMA_CC_OTPC_SELVL 0x00000001 +#define BCMA_CC_OTPP 0x0018 /* OTP prog */ +#define BCMA_CC_OTPP_COL 0x000000FF +#define BCMA_CC_OTPP_ROW 0x0000FF00 +#define BCMA_CC_OTPP_ROW_SHIFT 8 +#define BCMA_CC_OTPP_READERR 0x10000000 +#define BCMA_CC_OTPP_VALUE 0x20000000 +#define BCMA_CC_OTPP_READ 0x40000000 +#define BCMA_CC_OTPP_START 0x80000000 +#define BCMA_CC_OTPP_BUSY 0x80000000 +#define BCMA_CC_OTPL 0x001C /* OTP layout */ +#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */ +#define BCMA_CC_IRQSTAT 0x0020 +#define BCMA_CC_IRQMASK 0x0024 +#define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */ +#define BCMA_CC_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1 +#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 +#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 +#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 +#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */ +#define BCMA_CC_JCMD_START 0x80000000 +#define BCMA_CC_JCMD_BUSY 0x80000000 +#define BCMA_CC_JCMD_PAUSE 0x40000000 +#define BCMA_CC_JCMD0_ACC_MASK 0x0000F000 +#define BCMA_CC_JCMD0_ACC_IRDR 0x00000000 +#define BCMA_CC_JCMD0_ACC_DR 0x00001000 +#define BCMA_CC_JCMD0_ACC_IR 0x00002000 +#define BCMA_CC_JCMD0_ACC_RESET 0x00003000 +#define BCMA_CC_JCMD0_ACC_IRPDR 0x00004000 +#define BCMA_CC_JCMD0_ACC_PDR 0x00005000 +#define BCMA_CC_JCMD0_IRW_MASK 0x00000F00 +#define BCMA_CC_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define BCMA_CC_JCMD_ACC_IRDR 0x00000000 +#define BCMA_CC_JCMD_ACC_DR 0x00010000 +#define BCMA_CC_JCMD_ACC_IR 0x00020000 +#define BCMA_CC_JCMD_ACC_RESET 0x00030000 +#define BCMA_CC_JCMD_ACC_IRPDR 0x00040000 +#define BCMA_CC_JCMD_ACC_PDR 0x00050000 +#define BCMA_CC_JCMD_IRW_MASK 0x00001F00 +#define BCMA_CC_JCMD_IRW_SHIFT 8 +#define BCMA_CC_JCMD_DRW_MASK 0x0000003F +#define BCMA_CC_JIR 0x0034 /* Rev >= 10 only */ +#define BCMA_CC_JDR 0x0038 /* Rev >= 10 only */ +#define BCMA_CC_JCTL 0x003C /* Rev >= 10 only */ +#define BCMA_CC_JCTL_FORCE_CLK 4 /* Force clock */ +#define BCMA_CC_JCTL_EXT_EN 2 /* Enable external targets */ +#define BCMA_CC_JCTL_EN 1 /* Enable Jtag master */ +#define BCMA_CC_FLASHCTL 0x0040 +#define BCMA_CC_FLASHCTL_START 0x80000000 +#define BCMA_CC_FLASHCTL_BUSY BCMA_CC_FLASHCTL_START +#define BCMA_CC_FLASHADDR 0x0044 +#define BCMA_CC_FLASHDATA 0x0048 +#define BCMA_CC_BCAST_ADDR 0x0050 +#define BCMA_CC_BCAST_DATA 0x0054 +#define BCMA_CC_GPIOPULLUP 0x0058 /* Rev >= 20 only */ +#define BCMA_CC_GPIOPULLDOWN 0x005C /* Rev >= 20 only */ +#define BCMA_CC_GPIOIN 0x0060 +#define BCMA_CC_GPIOOUT 0x0064 +#define BCMA_CC_GPIOOUTEN 0x0068 +#define BCMA_CC_GPIOCTL 0x006C +#define BCMA_CC_GPIOPOL 0x0070 +#define BCMA_CC_GPIOIRQ 0x0074 +#define BCMA_CC_WATCHDOG 0x0080 +#define BCMA_CC_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define BCMA_CC_GPIOTIMER_OFFTIME 0x0000FFFF +#define BCMA_CC_GPIOTIMER_OFFTIME_SHIFT 0 +#define BCMA_CC_GPIOTIMER_ONTIME 0xFFFF0000 +#define BCMA_CC_GPIOTIMER_ONTIME_SHIFT 16 +#define BCMA_CC_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define BCMA_CC_CLOCK_N 0x0090 +#define BCMA_CC_CLOCK_SB 0x0094 +#define BCMA_CC_CLOCK_PCI 0x0098 +#define BCMA_CC_CLOCK_M2 0x009C +#define BCMA_CC_CLOCK_MIPS 0x00A0 +#define BCMA_CC_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define BCMA_CC_CLKDIV_SFLASH 0x0F000000 +#define BCMA_CC_CLKDIV_SFLASH_SHIFT 24 +#define BCMA_CC_CLKDIV_OTP 0x000F0000 +#define BCMA_CC_CLKDIV_OTP_SHIFT 16 +#define BCMA_CC_CLKDIV_JTAG 0x00000F00 +#define BCMA_CC_CLKDIV_JTAG_SHIFT 8 +#define BCMA_CC_CLKDIV_UART 0x000000FF +#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ +#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define BCMA_CC_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define BCMA_CC_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define BCMA_CC_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define BCMA_CC_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define BCMA_CC_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define BCMA_CC_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define BCMA_CC_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define BCMA_CC_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define BCMA_CC_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define BCMA_CC_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define BCMA_CC_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define BCMA_CC_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define BCMA_CC_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define BCMA_CC_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define BCMA_CC_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define BCMA_CC_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define BCMA_CC_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define BCMA_CC_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define BCMA_CC_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define BCMA_CC_SYSCLKCTL_CLKDIV_SHIFT 16 +#define BCMA_CC_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define BCMA_CC_EROM 0x00FC +#define BCMA_CC_PCMCIA_CFG 0x0100 +#define BCMA_CC_PCMCIA_MEMWAIT 0x0104 +#define BCMA_CC_PCMCIA_ATTRWAIT 0x0108 +#define BCMA_CC_PCMCIA_IOWAIT 0x010C +#define BCMA_CC_IDE_CFG 0x0110 +#define BCMA_CC_IDE_MEMWAIT 0x0114 +#define BCMA_CC_IDE_ATTRWAIT 0x0118 +#define BCMA_CC_IDE_IOWAIT 0x011C +#define BCMA_CC_PROG_CFG 0x0120 +#define BCMA_CC_PROG_WAITCNT 0x0124 +#define BCMA_CC_FLASH_CFG 0x0128 +#define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define BCMA_CC_FLASH_WAITCNT 0x012C +#define BCMA_CC_SROM_CONTROL 0x0190 +#define BCMA_CC_SROM_CONTROL_START 0x80000000 +#define BCMA_CC_SROM_CONTROL_BUSY 0x80000000 +#define BCMA_CC_SROM_CONTROL_OPCODE 0x60000000 +#define BCMA_CC_SROM_CONTROL_OP_READ 0x00000000 +#define BCMA_CC_SROM_CONTROL_OP_WRITE 0x20000000 +#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000 +#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000 +#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010 +#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008 +#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006 +#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000 +#define BCMA_CC_SROM_CONTROL_SIZE_4K 0x00000002 +#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004 +#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1 +#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001 +/* 0x1E0 is defined as shared BCMA_CLKCTLST */ +#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ +#define BCMA_CC_UART0_DATA 0x0300 +#define BCMA_CC_UART0_IMR 0x0304 +#define BCMA_CC_UART0_FCR 0x0308 +#define BCMA_CC_UART0_LCR 0x030C +#define BCMA_CC_UART0_MCR 0x0310 +#define BCMA_CC_UART0_LSR 0x0314 +#define BCMA_CC_UART0_MSR 0x0318 +#define BCMA_CC_UART0_SCRATCH 0x031C +#define BCMA_CC_UART1_DATA 0x0400 +#define BCMA_CC_UART1_IMR 0x0404 +#define BCMA_CC_UART1_FCR 0x0408 +#define BCMA_CC_UART1_LCR 0x040C +#define BCMA_CC_UART1_MCR 0x0410 +#define BCMA_CC_UART1_LSR 0x0414 +#define BCMA_CC_UART1_MSR 0x0418 +#define BCMA_CC_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ +#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 +#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 +#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define BCMA_CC_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define BCMA_CC_PMU_CTL_XTALFREQ_SHIFT 2 +#define BCMA_CC_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define BCMA_CC_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */ +#define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define BCMA_CC_PMU_STAT 0x0608 /* PMU status */ +#define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define BCMA_CC_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define BCMA_CC_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define BCMA_CC_PMU_RES_STAT 0x060C /* PMU res status */ +#define BCMA_CC_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define BCMA_CC_PMU_TIMER 0x0614 /* PMU timer */ +#define BCMA_CC_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define BCMA_CC_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define BCMA_CC_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define BCMA_CC_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define BCMA_CC_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define BCMA_CC_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define BCMA_CC_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define BCMA_CC_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define BCMA_CC_CHIPCTL_ADDR 0x0650 +#define BCMA_CC_CHIPCTL_DATA 0x0654 +#define BCMA_CC_REGCTL_ADDR 0x0658 +#define BCMA_CC_REGCTL_DATA 0x065C +#define BCMA_CC_PLLCTL_ADDR 0x0660 +#define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ + +/* Divider allocation in 4716/47162/5356 */ +#define BCMA_CC_PMU5_MAINPLL_CPU 1 +#define BCMA_CC_PMU5_MAINPLL_MEM 2 +#define BCMA_CC_PMU5_MAINPLL_SSB 3 + +/* PLL usage in 4716/47162 */ +#define BCMA_CC_PMU4716_MAINPLL_PLL0 12 + +/* PLL usage in 5356/5357 */ +#define BCMA_CC_PMU5356_MAINPLL_PLL0 0 +#define BCMA_CC_PMU5357_MAINPLL_PLL0 0 + +/* 4706 PMU */ +#define BCMA_CC_PMU4706_MAINPLL_PLL0 0 + +/* ALP clock on pre-PMU chips */ +#define BCMA_CC_PMU_ALP_CLOCK 20000000 +/* HT clock for systems with PMU-enabled chipcommon */ +#define BCMA_CC_PMU_HT_CLOCK 80000000 + +/* PMU rev 5 (& 6) */ +#define BCMA_CC_PPL_P1P2_OFF 0 +#define BCMA_CC_PPL_P1_MASK 0x0f000000 +#define BCMA_CC_PPL_P1_SHIFT 24 +#define BCMA_CC_PPL_P2_MASK 0x00f00000 +#define BCMA_CC_PPL_P2_SHIFT 20 +#define BCMA_CC_PPL_M14_OFF 1 +#define BCMA_CC_PPL_MDIV_MASK 0x000000ff +#define BCMA_CC_PPL_MDIV_WIDTH 8 +#define BCMA_CC_PPL_NM5_OFF 2 +#define BCMA_CC_PPL_NDIV_MASK 0xfff00000 +#define BCMA_CC_PPL_NDIV_SHIFT 20 +#define BCMA_CC_PPL_FMAB_OFF 3 +#define BCMA_CC_PPL_MRAT_MASK 0xf0000000 +#define BCMA_CC_PPL_MRAT_SHIFT 28 +#define BCMA_CC_PPL_ABRAT_MASK 0x08000000 +#define BCMA_CC_PPL_ABRAT_SHIFT 27 +#define BCMA_CC_PPL_FDIV_MASK 0x07ffffff +#define BCMA_CC_PPL_PLLCTL_OFF 4 +#define BCMA_CC_PPL_PCHI_OFF 5 +#define BCMA_CC_PPL_PCHI_MASK 0x0000003f + +/* BCM4331 ChipControl numbers. */ +#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */ +#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */ +#define BCMA_CHIPCTL_4331_EXT_LNA BIT(2) /* 0 disable */ +#define BCMA_CHIPCTL_4331_SPROM_GPIO13_15 BIT(3) /* sprom/gpio13-15 mux */ +#define BCMA_CHIPCTL_4331_EXTPA_EN BIT(4) /* 0 ext pa disable, 1 ext pa enabled */ +#define BCMA_CHIPCTL_4331_GPIOCLK_ON_SPROMCS BIT(5) /* set drive out GPIO_CLK on sprom_cs pin */ +#define BCMA_CHIPCTL_4331_PCIE_MDIO_ON_SPROMCS BIT(6) /* use sprom_cs pin as PCIE mdio interface */ +#define BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5 BIT(7) /* aband extpa will be at gpio2/5 and sprom_dout */ +#define BCMA_CHIPCTL_4331_OVR_PIPEAUXCLKEN BIT(8) /* override core control on pipe_AuxClkEnable */ +#define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */ +#define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */ +#define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */ +#define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */ +#define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */ + +/* Data for the PMU, if available. + * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) + */ +struct bcma_chipcommon_pmu { + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + +#ifdef CONFIG_BCMA_DRIVER_MIPS +struct bcma_pflash { + u8 buswidth; + u32 window; + u32 window_size; +}; + +struct bcma_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; +#endif /* CONFIG_BCMA_DRIVER_MIPS */ + +struct bcma_drv_cc { + struct bcma_device *core; + u32 status; + u32 capabilities; + u32 capabilities_ext; + u8 setup_done:1; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + struct bcma_chipcommon_pmu pmu; +#ifdef CONFIG_BCMA_DRIVER_MIPS + struct bcma_pflash pflash; + + int nr_serial_ports; + struct bcma_serial_port serial_ports[4]; +#endif /* CONFIG_BCMA_DRIVER_MIPS */ +}; + +/* Register access */ +#define bcma_cc_read32(cc, offset) \ + bcma_read32((cc)->core, offset) +#define bcma_cc_write32(cc, offset, val) \ + bcma_write32((cc)->core, offset, val) + +#define bcma_cc_mask32(cc, offset, mask) \ + bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) & (mask)) +#define bcma_cc_set32(cc, offset, set) \ + bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) | (set)) +#define bcma_cc_maskset32(cc, offset, mask, set) \ + bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) + +extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); + +extern void bcma_chipco_suspend(struct bcma_drv_cc *cc); +extern void bcma_chipco_resume(struct bcma_drv_cc *cc); + +void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); + +extern void bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, + u32 ticks); + +void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value); + +u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask); + +/* Chipcommon GPIO pin access. */ +u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask); +u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value); +u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value); + +/* PMU support */ +extern void bcma_pmu_init(struct bcma_drv_cc *cc); + +extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, + u32 value); +extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, + u32 mask, u32 set); +extern void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set); +extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set); + +#endif /* LINUX_BCMA_DRIVER_CC_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h new file mode 100644 index 00000000..c0043645 --- /dev/null +++ b/include/linux/bcma/bcma_driver_mips.h @@ -0,0 +1,51 @@ +#ifndef LINUX_BCMA_DRIVER_MIPS_H_ +#define LINUX_BCMA_DRIVER_MIPS_H_ + +#define BCMA_MIPS_IPSFLAG 0x0F08 +/* which sbflags get routed to mips interrupt 1 */ +#define BCMA_MIPS_IPSFLAG_IRQ1 0x0000003F +#define BCMA_MIPS_IPSFLAG_IRQ1_SHIFT 0 +/* which sbflags get routed to mips interrupt 2 */ +#define BCMA_MIPS_IPSFLAG_IRQ2 0x00003F00 +#define BCMA_MIPS_IPSFLAG_IRQ2_SHIFT 8 +/* which sbflags get routed to mips interrupt 3 */ +#define BCMA_MIPS_IPSFLAG_IRQ3 0x003F0000 +#define BCMA_MIPS_IPSFLAG_IRQ3_SHIFT 16 +/* which sbflags get routed to mips interrupt 4 */ +#define BCMA_MIPS_IPSFLAG_IRQ4 0x3F000000 +#define BCMA_MIPS_IPSFLAG_IRQ4_SHIFT 24 + +/* MIPS 74K core registers */ +#define BCMA_MIPS_MIPS74K_CORECTL 0x0000 +#define BCMA_MIPS_MIPS74K_EXCEPTBASE 0x0004 +#define BCMA_MIPS_MIPS74K_BIST 0x000C +#define BCMA_MIPS_MIPS74K_INTMASK_INT0 0x0014 +#define BCMA_MIPS_MIPS74K_INTMASK(int) \ + ((int) * 4 + BCMA_MIPS_MIPS74K_INTMASK_INT0) +#define BCMA_MIPS_MIPS74K_NMIMASK 0x002C +#define BCMA_MIPS_MIPS74K_GPIOSEL 0x0040 +#define BCMA_MIPS_MIPS74K_GPIOOUT 0x0044 +#define BCMA_MIPS_MIPS74K_GPIOEN 0x0048 +#define BCMA_MIPS_MIPS74K_CLKCTLST 0x01E0 + +#define BCMA_MIPS_OOBSELOUTA30 0x100 + +struct bcma_device; + +struct bcma_drv_mips { + struct bcma_device *core; + u8 setup_done:1; + unsigned int assigned_irqs; +}; + +#ifdef CONFIG_BCMA_DRIVER_MIPS +extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); +#else +static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } +#endif + +extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore); + +extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); + +#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */ diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h new file mode 100644 index 00000000..46c71e27 --- /dev/null +++ b/include/linux/bcma/bcma_driver_pci.h @@ -0,0 +1,214 @@ +#ifndef LINUX_BCMA_DRIVER_PCI_H_ +#define LINUX_BCMA_DRIVER_PCI_H_ + +#include <linux/types.h> + +struct pci_dev; + +/** PCI core registers. **/ +#define BCMA_CORE_PCI_CTL 0x0000 /* PCI Control */ +#define BCMA_CORE_PCI_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define BCMA_CORE_PCI_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define BCMA_CORE_PCI_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define BCMA_CORE_PCI_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define BCMA_CORE_PCI_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define BCMA_CORE_PCI_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define BCMA_CORE_PCI_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define BCMA_CORE_PCI_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define BCMA_CORE_PCI_ISTAT 0x0020 /* Interrupt status */ +#define BCMA_CORE_PCI_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define BCMA_CORE_PCI_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define BCMA_CORE_PCI_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define BCMA_CORE_PCI_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define BCMA_CORE_PCI_ISTAT_PME 0x00000010 /* PCI PME# */ +#define BCMA_CORE_PCI_IMASK 0x0024 /* Interrupt mask */ +#define BCMA_CORE_PCI_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define BCMA_CORE_PCI_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define BCMA_CORE_PCI_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define BCMA_CORE_PCI_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define BCMA_CORE_PCI_IMASK_PME 0x00000010 /* PCI PME# */ +#define BCMA_CORE_PCI_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define BCMA_CORE_PCI_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define BCMA_CORE_PCI_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define BCMA_CORE_PCI_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define BCMA_CORE_PCI_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define BCMA_CORE_PCI_BCAST_ADDR_MASK 0x000000FF +#define BCMA_CORE_PCI_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define BCMA_CORE_PCI_GPIO_IN 0x0060 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define BCMA_CORE_PCI_GPIO_CTL 0x006C /* rev >= 2 only */ +#define BCMA_CORE_PCI_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define BCMA_CORE_PCI_SBTOPCI0_MASK 0xFC000000 +#define BCMA_CORE_PCI_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define BCMA_CORE_PCI_SBTOPCI1_MASK 0xFC000000 +#define BCMA_CORE_PCI_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define BCMA_CORE_PCI_SBTOPCI2_MASK 0xC0000000 +#define BCMA_CORE_PCI_CONFIG_ADDR 0x0120 /* pcie config space access */ +#define BCMA_CORE_PCI_CONFIG_DATA 0x0124 /* pcie config space access */ +#define BCMA_CORE_PCI_MDIO_CONTROL 0x0128 /* controls the mdio access */ +#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL 0x2 +#define BCMA_CORE_PCI_MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */ +#define BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */ +#define BCMA_CORE_PCI_MDIO_DATA 0x012c /* Data to the mdio access */ +#define BCMA_CORE_PCI_MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define BCMA_CORE_PCI_MDIODATA_TA 0x00020000 /* Turnaround */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define BCMA_CORE_PCI_MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define BCMA_CORE_PCI_MDIODATA_READ 0x20000000 /* Read Transaction */ +#define BCMA_CORE_PCI_MDIODATA_START 0x40000000 /* start of Transaction */ +#define BCMA_CORE_PCI_MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define BCMA_CORE_PCI_MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ +#define BCMA_CORE_PCI_MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ +#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */ +#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */ +#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */ +#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ +#define BCMA_CORE_PCI_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ +#define BCMA_CORE_PCI_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ + +/* SBtoPCIx */ +#define BCMA_CORE_PCI_SBTOPCI_MEM 0x00000000 +#define BCMA_CORE_PCI_SBTOPCI_IO 0x00000001 +#define BCMA_CORE_PCI_SBTOPCI_CFG0 0x00000002 +#define BCMA_CORE_PCI_SBTOPCI_CFG1 0x00000003 +#define BCMA_CORE_PCI_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define BCMA_CORE_PCI_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define BCMA_CORE_PCI_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define BCMA_CORE_PCI_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define BCMA_CORE_PCI_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + +/* PCIE protocol PHY diagnostic registers */ +#define BCMA_CORE_PCI_PLP_MODEREG 0x200 /* Mode */ +#define BCMA_CORE_PCI_PLP_STATUSREG 0x204 /* Status */ +#define BCMA_CORE_PCI_PLP_POLARITYINV_STAT 0x10 /* Status reg PCIE_PLP_STATUSREG */ +#define BCMA_CORE_PCI_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */ +#define BCMA_CORE_PCI_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */ +#define BCMA_CORE_PCI_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */ +#define BCMA_CORE_PCI_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */ +#define BCMA_CORE_PCI_PLP_ATTNREG 0x218 /* Attention */ +#define BCMA_CORE_PCI_PLP_ATTNMASKREG 0x21C /* Attention Mask */ +#define BCMA_CORE_PCI_PLP_RXERRCTR 0x220 /* Rx Error */ +#define BCMA_CORE_PCI_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */ +#define BCMA_CORE_PCI_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */ +#define BCMA_CORE_PCI_PLP_TESTCTRLREG 0x22C /* Test Control reg */ +#define BCMA_CORE_PCI_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */ +#define BCMA_CORE_PCI_PLP_TIMINGOVRDREG 0x234 /* Timing param override */ +#define BCMA_CORE_PCI_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */ +#define BCMA_CORE_PCI_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define BCMA_CORE_PCI_DLLP_LCREG 0x100 /* Link Control */ +#define BCMA_CORE_PCI_DLLP_LSREG 0x104 /* Link Status */ +#define BCMA_CORE_PCI_DLLP_LAREG 0x108 /* Link Attention */ +#define BCMA_CORE_PCI_DLLP_LSREG_LINKUP (1 << 16) +#define BCMA_CORE_PCI_DLLP_LAMASKREG 0x10C /* Link Attention Mask */ +#define BCMA_CORE_PCI_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */ +#define BCMA_CORE_PCI_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */ +#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */ +#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */ +#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */ +#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */ +#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */ +#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */ +#define BCMA_CORE_PCI_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */ +#define BCMA_CORE_PCI_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */ +#define BCMA_CORE_PCI_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */ +#define BCMA_CORE_PCI_DLLP_ERRCTRREG 0x144 /* Error Counter */ +#define BCMA_CORE_PCI_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */ +#define BCMA_CORE_PCI_DLLP_TESTREG 0x14C /* Test */ +#define BCMA_CORE_PCI_DLLP_PKTBIST 0x150 /* Packet BIST */ +#define BCMA_CORE_PCI_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */ + +/* SERDES RX registers */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL 1 /* Rx cntrl */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ +#define BCMA_CORE_PCI_SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define BCMA_CORE_PCI_SERDES_RX_CDR 6 /* CDR */ +#define BCMA_CORE_PCI_SERDES_RX_CDRBW 7 /* CDR BW */ + +/* SERDES PLL registers */ +#define BCMA_CORE_PCI_SERDES_PLL_CTRL 1 /* PLL control reg */ +#define BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* PCIcore specific boardflags */ +#define BCMA_CORE_PCI_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + +/* PCIE Config space accessing MACROS */ +#define BCMA_CORE_PCI_CFG_BUS_SHIFT 24 /* Bus shift */ +#define BCMA_CORE_PCI_CFG_SLOT_SHIFT 19 /* Slot/Device shift */ +#define BCMA_CORE_PCI_CFG_FUN_SHIFT 16 /* Function shift */ +#define BCMA_CORE_PCI_CFG_OFF_SHIFT 0 /* Register shift */ + +#define BCMA_CORE_PCI_CFG_BUS_MASK 0xff /* Bus mask */ +#define BCMA_CORE_PCI_CFG_SLOT_MASK 0x1f /* Slot/Device mask */ +#define BCMA_CORE_PCI_CFG_FUN_MASK 7 /* Function mask */ +#define BCMA_CORE_PCI_CFG_OFF_MASK 0xfff /* Register mask */ + +/* PCIE Root Capability Register bits (Host mode only) */ +#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 + +struct bcma_drv_pci; + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE +struct bcma_drv_pci_host { + struct bcma_drv_pci *pdev; + + u32 host_cfg_addr; + spinlock_t cfgspace_lock; + + struct pci_controller pci_controller; + struct pci_ops pci_ops; + struct resource mem_resource; + struct resource io_resource; +}; +#endif + +struct bcma_drv_pci { + struct bcma_device *core; + u8 setup_done:1; + u8 hostmode:1; + +#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE + struct bcma_drv_pci_host *host_controller; +#endif +}; + +/* Register access */ +#define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset) +#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) + +extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc); +extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, + struct bcma_device *core, bool enable); + +extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); +extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); + +#endif /* LINUX_BCMA_DRIVER_PCI_H_ */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h new file mode 100644 index 00000000..5a71d571 --- /dev/null +++ b/include/linux/bcma/bcma_regs.h @@ -0,0 +1,86 @@ +#ifndef LINUX_BCMA_REGS_H_ +#define LINUX_BCMA_REGS_H_ + +/* Some single registers are shared between many cores */ +/* BCMA_CLKCTLST: ChipCommon (rev >= 20), PCIe, 80211 */ +#define BCMA_CLKCTLST 0x01E0 /* Clock control and status */ +#define BCMA_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define BCMA_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define BCMA_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */ +#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ +#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */ +#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */ +#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */ +#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */ +/* Is there any BCM4328 on BCMA bus? */ +#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ +#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ + +/* Agent registers (common for every core) */ +#define BCMA_IOCTL 0x0408 /* IO control */ +#define BCMA_IOCTL_CLK 0x0001 +#define BCMA_IOCTL_FGC 0x0002 +#define BCMA_IOCTL_CORE_BITS 0x3FFC +#define BCMA_IOCTL_PME_EN 0x4000 +#define BCMA_IOCTL_BIST_EN 0x8000 +#define BCMA_IOST 0x0500 /* IO status */ +#define BCMA_IOST_CORE_BITS 0x0FFF +#define BCMA_IOST_DMA64 0x1000 +#define BCMA_IOST_GATED_CLK 0x2000 +#define BCMA_IOST_BIST_ERROR 0x4000 +#define BCMA_IOST_BIST_DONE 0x8000 +#define BCMA_RESET_CTL 0x0800 +#define BCMA_RESET_CTL_RESET 0x0001 + +/* BCMA PCI config space registers. */ +#define BCMA_PCI_PMCSR 0x44 +#define BCMA_PCI_PE 0x100 +#define BCMA_PCI_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define BCMA_PCI_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define BCMA_PCI_SPROMCTL 0x88 /* SPROM control */ +#define BCMA_PCI_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define BCMA_PCI_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define BCMA_PCI_IRQS 0x90 /* PCI interrupts */ +#define BCMA_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define BCMA_PCI_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define BCMA_PCI_BAR0_WIN2 0xAC +#define BCMA_PCI_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define BCMA_PCI_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define BCMA_PCI_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define BCMA_SOC_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define BCMA_SOC_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define BCMA_SOC_PCI_MEM_SZ (64 * 1024 * 1024) +#define BCMA_SOC_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define BCMA_SOC_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define BCMA_SOC_SDRAM_R2 0x80000000U /* Region 2 for sdram (512 MB) */ + + +#define BCMA_SOC_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define BCMA_SOC_PCI_DMA2 0x80000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define BCMA_SOC_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define BCMA_SOC_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define BCMA_SOC_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define BCMA_SOC_PCI1_MEM 0x40000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define BCMA_SOC_PCI1_CFG 0x44000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define BCMA_SOC_PCIE1_DMA_H32 0xc0000000U /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#endif /* LINUX_BCMA_REGS_H_ */ diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h new file mode 100644 index 00000000..4203c559 --- /dev/null +++ b/include/linux/bcma/bcma_soc.h @@ -0,0 +1,16 @@ +#ifndef LINUX_BCMA_SOC_H_ +#define LINUX_BCMA_SOC_H_ + +#include <linux/bcma/bcma.h> + +struct bcma_soc { + struct bcma_bus bus; + struct bcma_device core_cc; + struct bcma_device core_mips; +}; + +int __init bcma_host_soc_register(struct bcma_soc *soc); + +int bcma_bus_register(struct bcma_bus *bus); + +#endif /* LINUX_BCMA_SOC_H_ */ diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h new file mode 100644 index 00000000..a69554ef --- /dev/null +++ b/include/linux/bfin_mac.h @@ -0,0 +1,30 @@ +/* + * Blackfin On-Chip MAC Driver + * + * Copyright 2004-2010 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _LINUX_BFIN_MAC_H_ +#define _LINUX_BFIN_MAC_H_ + +#include <linux/phy.h> + +struct bfin_phydev_platform_data { + unsigned short addr; + int irq; +}; + +struct bfin_mii_bus_platform_data { + int phydev_number; + struct bfin_phydev_platform_data *phydev_data; + const unsigned short *mac_peripherals; + int phy_mode; + unsigned int phy_mask; + unsigned short vlan1_mask, vlan2_mask; +}; + +#endif diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h new file mode 100644 index 00000000..1c0b355a --- /dev/null +++ b/include/linux/bfs_fs.h @@ -0,0 +1,81 @@ +/* + * include/linux/bfs_fs.h - BFS data structures on disk. + * Copyright (C) 1999 Tigran Aivazian <tigran@veritas.com> + */ + +#ifndef _LINUX_BFS_FS_H +#define _LINUX_BFS_FS_H + +#include <linux/types.h> + +#define BFS_BSIZE_BITS 9 +#define BFS_BSIZE (1<<BFS_BSIZE_BITS) + +#define BFS_MAGIC 0x1BADFACE +#define BFS_ROOT_INO 2 +#define BFS_INODES_PER_BLOCK 8 + +/* SVR4 vnode type values (bfs_inode->i_vtype) */ +#define BFS_VDIR 2L +#define BFS_VREG 1L + +/* BFS inode layout on disk */ +struct bfs_inode { + __le16 i_ino; + __u16 i_unused; + __le32 i_sblock; + __le32 i_eblock; + __le32 i_eoffset; + __le32 i_vtype; + __le32 i_mode; + __le32 i_uid; + __le32 i_gid; + __le32 i_nlink; + __le32 i_atime; + __le32 i_mtime; + __le32 i_ctime; + __u32 i_padding[4]; +}; + +#define BFS_NAMELEN 14 +#define BFS_DIRENT_SIZE 16 +#define BFS_DIRS_PER_BLOCK 32 + +struct bfs_dirent { + __le16 ino; + char name[BFS_NAMELEN]; +}; + +/* BFS superblock layout on disk */ +struct bfs_super_block { + __le32 s_magic; + __le32 s_start; + __le32 s_end; + __le32 s_from; + __le32 s_to; + __s32 s_bfrom; + __s32 s_bto; + char s_fsname[6]; + char s_volume[6]; + __u32 s_padding[118]; +}; + + +#define BFS_OFF2INO(offset) \ + ((((offset) - BFS_BSIZE) / sizeof(struct bfs_inode)) + BFS_ROOT_INO) + +#define BFS_INO2OFF(ino) \ + ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) +#define BFS_NZFILESIZE(ip) \ + ((le32_to_cpu((ip)->i_eoffset) + 1) - le32_to_cpu((ip)->i_sblock) * BFS_BSIZE) + +#define BFS_FILESIZE(ip) \ + ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) + +#define BFS_FILEBLOCKS(ip) \ + ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock)) +#define BFS_UNCLEAN(bfs_sb, sb) \ + ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) + + +#endif /* _LINUX_BFS_FS_H */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h new file mode 100644 index 00000000..366422bc --- /dev/null +++ b/include/linux/binfmts.h @@ -0,0 +1,140 @@ +#ifndef _LINUX_BINFMTS_H +#define _LINUX_BINFMTS_H + +#include <linux/capability.h> + +struct pt_regs; + +/* + * These are the maximum length and maximum number of strings passed to the + * execve() system call. MAX_ARG_STRLEN is essentially random but serves to + * prevent the kernel from being unduly impacted by misaddressed pointers. + * MAX_ARG_STRINGS is chosen to fit in a signed 32-bit integer. + */ +#define MAX_ARG_STRLEN (PAGE_SIZE * 32) +#define MAX_ARG_STRINGS 0x7FFFFFFF + +/* sizeof(linux_binprm->buf) */ +#define BINPRM_BUF_SIZE 128 + +#ifdef __KERNEL__ +#include <linux/sched.h> + +#define CORENAME_MAX_SIZE 128 + +/* + * This structure is used to hold the arguments that are used when loading binaries. + */ +struct linux_binprm { + char buf[BINPRM_BUF_SIZE]; +#ifdef CONFIG_MMU + struct vm_area_struct *vma; + unsigned long vma_pages; +#else +# define MAX_ARG_PAGES 32 + struct page *page[MAX_ARG_PAGES]; +#endif + struct mm_struct *mm; + unsigned long p; /* current top of mem */ + unsigned int + cred_prepared:1,/* true if creds already prepared (multiple + * preps happen for interpreters) */ + cap_effective:1;/* true if has elevated effective capabilities, + * false if not; except for init which inherits + * its parent's caps anyway */ +#ifdef __alpha__ + unsigned int taso:1; +#endif + unsigned int recursion_depth; + struct file * file; + struct cred *cred; /* new credentials */ + int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ + unsigned int per_clear; /* bits to clear in current->personality */ + int argc, envc; + const char * filename; /* Name of binary as seen by procps */ + const char * interp; /* Name of the binary really executed. Most + of the time same as filename, but could be + different for binfmt_{misc,script} */ + unsigned interp_flags; + unsigned interp_data; + unsigned long loader, exec; + char tcomm[TASK_COMM_LEN]; +}; + +#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 +#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) + +/* fd of the binary should be passed to the interpreter */ +#define BINPRM_FLAGS_EXECFD_BIT 1 +#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) + +#define BINPRM_MAX_RECURSION 4 + +/* Function parameter for binfmt->coredump */ +struct coredump_params { + long signr; + struct pt_regs *regs; + struct file *file; + unsigned long limit; + unsigned long mm_flags; +}; + +/* + * This structure defines the functions that are used to load the binary formats that + * linux accepts. + */ +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *cprm); + unsigned long min_coredump; /* minimal dump size */ +}; + +extern void __register_binfmt(struct linux_binfmt *fmt, int insert); + +/* Registration of default binfmt handlers */ +static inline void register_binfmt(struct linux_binfmt *fmt) +{ + __register_binfmt(fmt, 0); +} +/* Same as above, but adds a new binfmt at the top of the list */ +static inline void insert_binfmt(struct linux_binfmt *fmt) +{ + __register_binfmt(fmt, 1); +} + +extern void unregister_binfmt(struct linux_binfmt *); + +extern int prepare_binprm(struct linux_binprm *); +extern int __must_check remove_arg_zero(struct linux_binprm *); +extern int search_binary_handler(struct linux_binprm *, struct pt_regs *); +extern int flush_old_exec(struct linux_binprm * bprm); +extern void setup_new_exec(struct linux_binprm * bprm); +extern void would_dump(struct linux_binprm *, struct file *); + +extern int suid_dumpable; +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* Stack area protections */ +#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */ +#define EXSTACK_DISABLE_X 1 /* Disable executable stacks */ +#define EXSTACK_ENABLE_X 2 /* Enable executable stacks */ + +extern int setup_arg_pages(struct linux_binprm * bprm, + unsigned long stack_top, + int executable_stack); +extern int bprm_mm_init(struct linux_binprm *bprm); +extern int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm); +extern int prepare_bprm_creds(struct linux_binprm *bprm); +extern void install_exec_creds(struct linux_binprm *bprm); +extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); +extern void set_binfmt(struct linux_binfmt *new); +extern void free_bprm(struct linux_binprm *); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h new file mode 100644 index 00000000..4d94eb8b --- /dev/null +++ b/include/linux/bio.h @@ -0,0 +1,581 @@ +/* + * 2.5 block I/O model + * + * Copyright (C) 2001 Jens Axboe <axboe@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + */ +#ifndef __LINUX_BIO_H +#define __LINUX_BIO_H + +#include <linux/highmem.h> +#include <linux/mempool.h> +#include <linux/ioprio.h> +#include <linux/bug.h> + +#ifdef CONFIG_BLOCK + +#include <asm/io.h> + +/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ +#include <linux/blk_types.h> + +#define BIO_DEBUG + +#ifdef BIO_DEBUG +#define BIO_BUG_ON BUG_ON +#else +#define BIO_BUG_ON +#endif + +#define BIO_MAX_PAGES 256 +#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) +#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) + +/* + * upper 16 bits of bi_rw define the io priority of this bio + */ +#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) +#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) +#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) + +#define bio_set_prio(bio, prio) do { \ + WARN_ON(prio >= (1 << IOPRIO_BITS)); \ + (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ + (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ +} while (0) + +/* + * various member access, note that bio_data should of course not be used + * on highmem page vectors + */ +#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) +#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) +#define bio_page(bio) bio_iovec((bio))->bv_page +#define bio_offset(bio) bio_iovec((bio))->bv_offset +#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) +#define bio_sectors(bio) ((bio)->bi_size >> 9) + +static inline unsigned int bio_cur_bytes(struct bio *bio) +{ + if (bio->bi_vcnt) + return bio_iovec(bio)->bv_len; + else /* dataless requests such as discard */ + return bio->bi_size; +} + +static inline void *bio_data(struct bio *bio) +{ + if (bio->bi_vcnt) + return page_address(bio_page(bio)) + bio_offset(bio); + + return NULL; +} + +static inline int bio_has_allocated_vec(struct bio *bio) +{ + return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; +} + +/* + * will die + */ +#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) +#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) + +/* + * queues that have highmem support enabled may still need to revert to + * PIO transfers occasionally and thus map high pages temporarily. For + * permanent PIO fall back, user is probably better off disabling highmem + * I/O completely on that queue (see ide-dma for example) + */ +#define __bio_kmap_atomic(bio, idx, kmtype) \ + (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ + bio_iovec_idx((bio), (idx))->bv_offset) + +#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr) + +/* + * merge helpers etc + */ + +#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) +#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) + +/* Default implementation of BIOVEC_PHYS_MERGEABLE */ +#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) + +/* + * allow arch override, for eg virtualized architectures (put in asm/io.h) + */ +#ifndef BIOVEC_PHYS_MERGEABLE +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + __BIOVEC_PHYS_MERGEABLE(vec1, vec2) +#endif + +#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ + (((addr1) | (mask)) == (((addr2) - 1) | (mask))) +#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ + __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) +#define BIO_SEG_BOUNDARY(q, b1, b2) \ + BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) + +#define bio_io_error(bio) bio_endio((bio), -EIO) + +/* + * drivers should not use the __ version unless they _really_ want to + * run through the entire bio and not just pending pieces + */ +#define __bio_for_each_segment(bvl, bio, i, start_idx) \ + for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ + i < (bio)->bi_vcnt; \ + bvl++, i++) + +#define bio_for_each_segment(bvl, bio, i) \ + __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx) + +/* + * get a reference to a bio, so it won't disappear. the intended use is + * something like: + * + * bio_get(bio); + * submit_bio(rw, bio); + * if (bio->bi_flags ...) + * do_something + * bio_put(bio); + * + * without the bio_get(), it could potentially complete I/O before submit_bio + * returns. and then bio would be freed memory when if (bio->bi_flags ...) + * runs + */ +#define bio_get(bio) atomic_inc(&(bio)->bi_cnt) + +#if defined(CONFIG_BLK_DEV_INTEGRITY) +/* + * bio integrity payload + */ +struct bio_integrity_payload { + struct bio *bip_bio; /* parent bio */ + + sector_t bip_sector; /* virtual start sector */ + + void *bip_buf; /* generated integrity data */ + bio_end_io_t *bip_end_io; /* saved I/O completion fn */ + + unsigned int bip_size; + + unsigned short bip_slab; /* slab the bip came from */ + unsigned short bip_vcnt; /* # of integrity bio_vecs */ + unsigned short bip_idx; /* current bip_vec index */ + + struct work_struct bip_work; /* I/O completion */ + struct bio_vec bip_vec[0]; /* embedded bvec array */ +}; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +/* + * A bio_pair is used when we need to split a bio. + * This can only happen for a bio that refers to just one + * page of data, and in the unusual situation when the + * page crosses a chunk/device boundary + * + * The address of the master bio is stored in bio1.bi_private + * The address of the pool the pair was allocated from is stored + * in bio2.bi_private + */ +struct bio_pair { + struct bio bio1, bio2; + struct bio_vec bv1, bv2; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload bip1, bip2; + struct bio_vec iv1, iv2; +#endif + atomic_t cnt; + int error; +}; +extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); +extern void bio_pair_release(struct bio_pair *dbio); + +extern struct bio_set *bioset_create(unsigned int, unsigned int); +extern void bioset_free(struct bio_set *); + +extern struct bio *bio_alloc(gfp_t, unsigned int); +extern struct bio *bio_kmalloc(gfp_t, unsigned int); +extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); +extern void bio_put(struct bio *); +extern void bio_free(struct bio *, struct bio_set *); + +extern void bio_endio(struct bio *, int); +struct request_queue; +extern int bio_phys_segments(struct request_queue *, struct bio *); + +extern void __bio_clone(struct bio *, struct bio *); +extern struct bio *bio_clone(struct bio *, gfp_t); + +extern void bio_init(struct bio *); + +extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); +extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, + unsigned int, unsigned int); +extern int bio_get_nr_vecs(struct block_device *); +extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int); +extern struct bio *bio_map_user(struct request_queue *, struct block_device *, + unsigned long, unsigned int, int, gfp_t); +struct sg_iovec; +struct rq_map_data; +extern struct bio *bio_map_user_iov(struct request_queue *, + struct block_device *, + struct sg_iovec *, int, int, gfp_t); +extern void bio_unmap_user(struct bio *); +extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, + gfp_t); +extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, + gfp_t, int); +extern void bio_set_pages_dirty(struct bio *bio); +extern void bio_check_pages_dirty(struct bio *bio); + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void bio_flush_dcache_pages(struct bio *bi); +#else +static inline void bio_flush_dcache_pages(struct bio *bi) +{ +} +#endif + +extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, + unsigned long, unsigned int, int, gfp_t); +extern struct bio *bio_copy_user_iov(struct request_queue *, + struct rq_map_data *, struct sg_iovec *, + int, int, gfp_t); +extern int bio_uncopy_user(struct bio *); +void zero_fill_bio(struct bio *bio); +extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); +extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); +extern unsigned int bvec_nr_vecs(unsigned short idx); + +/* + * bio_set is used to allow other portions of the IO system to + * allocate their own private memory pools for bio and iovec structures. + * These memory pools in turn all allocate from the bio_slab + * and the bvec_slabs[]. + */ +#define BIO_POOL_SIZE 2 +#define BIOVEC_NR_POOLS 6 +#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + + mempool_t *bio_pool; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + mempool_t *bio_integrity_pool; +#endif + mempool_t *bvec_pool; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +extern struct bio_set *fs_bio_set; + +/* + * a small number of entries is fine, not going to be performance critical. + * basically we just need to survive + */ +#define BIO_SPLIT_ENTRIES 2 + +#ifdef CONFIG_HIGHMEM +/* + * remember never ever reenable interrupts between a bvec_kmap_irq and + * bvec_kunmap_irq! + */ +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + unsigned long addr; + + /* + * might not be a highmem page, but the preempt/irq count + * balancing is a lot nicer this way + */ + local_irq_save(*flags); + addr = (unsigned long) kmap_atomic(bvec->bv_page); + + BUG_ON(addr & ~PAGE_MASK); + + return (char *) addr + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; + + kunmap_atomic((void *) ptr); + local_irq_restore(*flags); +} + +#else +static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + *flags = 0; +} +#endif + +static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, + unsigned long *flags) +{ + return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); +} +#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) + +#define bio_kmap_irq(bio, flags) \ + __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) +#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) + +/* + * Check whether this bio carries any data or not. A NULL bio is allowed. + */ +static inline int bio_has_data(struct bio *bio) +{ + return bio && bio->bi_io_vec != NULL; +} + +/* + * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. + * + * A bio_list anchors a singly-linked list of bios chained through the bi_next + * member of the bio. The bio_list also caches the last list member to allow + * fast access to the tail. + */ +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +static inline int bio_list_empty(const struct bio_list *bl) +{ + return bl->head == NULL; +} + +static inline void bio_list_init(struct bio_list *bl) +{ + bl->head = bl->tail = NULL; +} + +#define bio_list_for_each(bio, bl) \ + for (bio = (bl)->head; bio; bio = bio->bi_next) + +static inline unsigned bio_list_size(const struct bio_list *bl) +{ + unsigned sz = 0; + struct bio *bio; + + bio_list_for_each(bio, bl) + sz++; + + return sz; +} + +static inline void bio_list_add(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = NULL; + + if (bl->tail) + bl->tail->bi_next = bio; + else + bl->head = bio; + + bl->tail = bio; +} + +static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = bl->head; + + bl->head = bio; + + if (!bl->tail) + bl->tail = bio; +} + +static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->tail) + bl->tail->bi_next = bl2->head; + else + bl->head = bl2->head; + + bl->tail = bl2->tail; +} + +static inline void bio_list_merge_head(struct bio_list *bl, + struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->head) + bl2->tail->bi_next = bl->head; + else + bl->tail = bl2->tail; + + bl->head = bl2->head; +} + +static inline struct bio *bio_list_peek(struct bio_list *bl) +{ + return bl->head; +} + +static inline struct bio *bio_list_pop(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + if (bio) { + bl->head = bl->head->bi_next; + if (!bl->head) + bl->tail = NULL; + + bio->bi_next = NULL; + } + + return bio; +} + +static inline struct bio *bio_list_get(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + bl->head = bl->tail = NULL; + + return bio; +} + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) +#define bip_vec(bip) bip_vec_idx(bip, 0) + +#define __bip_for_each_vec(bvl, bip, i, start_idx) \ + for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \ + i < (bip)->bip_vcnt; \ + bvl++, i++) + +#define bip_for_each_vec(bvl, bip, i) \ + __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) + +#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ + for_each_bio(_bio) \ + bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) + +#define bio_integrity(bio) (bio->bi_integrity != NULL) + +extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); +extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); +extern void bio_integrity_free(struct bio *, struct bio_set *); +extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); +extern int bio_integrity_enabled(struct bio *bio); +extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); +extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); +extern int bio_integrity_prep(struct bio *); +extern void bio_integrity_endio(struct bio *, int); +extern void bio_integrity_advance(struct bio *, unsigned int); +extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); +extern void bio_integrity_split(struct bio *, struct bio_pair *, int); +extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *); +extern int bioset_integrity_create(struct bio_set *, int); +extern void bioset_integrity_free(struct bio_set *); +extern void bio_integrity_init(void); + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +static inline int bio_integrity(struct bio *bio) +{ + return 0; +} + +static inline int bio_integrity_enabled(struct bio *bio) +{ + return 0; +} + +static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + return 0; +} + +static inline void bioset_integrity_free (struct bio_set *bs) +{ + return; +} + +static inline int bio_integrity_prep(struct bio *bio) +{ + return 0; +} + +static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs) +{ + return; +} + +static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask, struct bio_set *bs) +{ + return 0; +} + +static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp, + int sectors) +{ + return; +} + +static inline void bio_integrity_advance(struct bio *bio, + unsigned int bytes_done) +{ + return; +} + +static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, + unsigned int sectors) +{ + return; +} + +static inline void bio_integrity_init(void) +{ + return; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +#endif /* CONFIG_BLOCK */ +#endif /* __LINUX_BIO_H */ diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 00000000..3b5bafce --- /dev/null +++ b/include/linux/bit_spinlock.h @@ -0,0 +1,100 @@ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +#include <linux/kernel.h> +#include <linux/preempt.h> +#include <linux/atomic.h> +#include <linux/bug.h> + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + do { + cpu_relax(); + } while (test_bit(bitnum, addr)); + preempt_disable(); + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT_COUNT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h new file mode 100644 index 00000000..7ad63450 --- /dev/null +++ b/include/linux/bitmap.h @@ -0,0 +1,309 @@ +#ifndef __LINUX_BITMAP_H +#define __LINUX_BITMAP_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/string.h> +#include <linux/kernel.h> + +/* + * bitmaps provide bit arrays that consume one or more unsigned + * longs. The bitmap interface and available operations are listed + * here, in bitmap.h + * + * Function implementations generic to all architectures are in + * lib/bitmap.c. Functions implementations that are architecture + * specific are in various include/asm-<arch>/bitops.h headers + * and other arch/<arch> specific files. + * + * See lib/bitmap.c for more details. + */ + +/* + * The available bitmap operations and their rough meaning in the + * case that the bitmap is a single unsigned long are thus: + * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_weight(src, nbits) Hamming Weight: number set bits + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n + * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n + * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) + * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz + * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf + * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf + * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf + * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region + * bitmap_release_region(bitmap, pos, order) Free specified bit region + * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + */ + +/* + * Also the following operations in asm/bitops.h apply to bitmaps. + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + */ + +/* + * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used + * to declare an array named 'name' of just enough unsigned longs to + * contain all bit positions from 0 to 'bits' - 1. + */ + +/* + * lib/bitmap.c provides these functions: + */ + +extern int __bitmap_empty(const unsigned long *bitmap, int bits); +extern int __bitmap_full(const unsigned long *bitmap, int bits); +extern int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, + int bits); +extern void __bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int shift, int bits); +extern void __bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int shift, int bits); +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_weight(const unsigned long *bitmap, int bits); + +extern void bitmap_set(unsigned long *map, int i, int len); +extern void bitmap_clear(unsigned long *map, int start, int nr); +extern unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask); + +extern int bitmap_scnprintf(char *buf, unsigned int len, + const unsigned long *src, int nbits); +extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, + unsigned long *dst, int nbits); +extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern int bitmap_scnlistprintf(char *buf, unsigned int len, + const unsigned long *src, int nbits); +extern int bitmap_parselist(const char *buf, unsigned long *maskp, + int nmaskbits); +extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, int bits); +extern int bitmap_bitremap(int oldbit, + const unsigned long *old, const unsigned long *new, int bits); +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, int bits); +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, + int sz, int bits); +extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); +extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); +extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); +extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); +extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); + +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) +#define BITMAP_LAST_WORD_MASK(nbits) \ +( \ + ((nbits) % BITS_PER_LONG) ? \ + (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ +) + +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} + +static inline void bitmap_fill(unsigned long *dst, int nbits) +{ + size_t nlongs = BITS_TO_LONGS(nbits); + if (!small_const_nbits(nbits)) { + int len = (nlongs - 1) * sizeof(unsigned long); + memset(dst, 0xff, len); + } + dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); +} + +static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); + } +} + +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & *src2) != 0; + return __bitmap_and(dst, src1, src2, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + +static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 ^ *src2; + else + __bitmap_xor(dst, src1, src2, nbits); +} + +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & ~(*src2)) != 0; + return __bitmap_andnot(dst, src1, src2, nbits); +} + +static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, + int nbits) +{ + if (small_const_nbits(nbits)) + *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); + else + __bitmap_complement(dst, src, nbits); +} + +static inline int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_equal(src1, src2, nbits); +} + +static inline int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + else + return __bitmap_intersects(src1, src2, nbits); +} + +static inline int bitmap_subset(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_subset(src1, src2, nbits); +} + +static inline int bitmap_empty(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_empty(src, nbits); +} + +static inline int bitmap_full(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_full(src, nbits); +} + +static inline int bitmap_weight(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); + return __bitmap_weight(src, nbits); +} + +static inline void bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int n, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src >> n; + else + __bitmap_shift_right(dst, src, n, nbits); +} + +static inline void bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int n, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); + else + __bitmap_shift_left(dst, src, n, nbits); +} + +static inline int bitmap_parse(const char *buf, unsigned int buflen, + unsigned long *maskp, int nmaskbits) +{ + return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h new file mode 100644 index 00000000..a3b6b821 --- /dev/null +++ b/include/linux/bitops.h @@ -0,0 +1,201 @@ +#ifndef _LINUX_BITOPS_H +#define _LINUX_BITOPS_H +#include <asm/types.h> + +#ifdef __KERNEL__ +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#endif + +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + +/* + * Include this here because some architectures need generic_ffs/fls in + * scope + */ +#include <asm/bitops.h> + +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +#define for_each_clear_bit(bit, addr, size) \ + for ((bit) = find_first_zero_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +/* same as for_each_clear_bit() but use bit as value to start with */ +#define for_each_clear_bit_from(bit, addr, size) \ + for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +static __inline__ int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +static __inline__ int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + +static inline unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? hweight32(w) : hweight64(w); +} + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << shift) | (word >> (64 - shift)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> shift) | (word << (64 - shift)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> (32 - shift)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> shift) | (word << (32 - shift)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << shift) | (word >> (16 - shift)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> shift) | (word << (16 - shift)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << shift) | (word >> (8 - shift)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> shift) | (word << (8 - shift)); +} + +/** + * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<32) to sign bit + */ +static inline __s32 sign_extend32(__u32 value, int index) +{ + __u8 shift = 31 - index; + return (__s32)(value << shift) >> shift; +} + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +#ifdef __KERNEL__ + +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h new file mode 100644 index 00000000..7ffe03f4 --- /dev/null +++ b/include/linux/bitrev.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_BITREV_H +#define _LINUX_BITREV_H + +#include <linux/types.h> + +extern u8 const byte_rev_table[256]; + +static inline u8 bitrev8(u8 byte) +{ + return byte_rev_table[byte]; +} + +extern u16 bitrev16(u16 in); +extern u32 bitrev32(u32 in); + +#endif /* _LINUX_BITREV_H */ diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h new file mode 100644 index 00000000..308734d3 --- /dev/null +++ b/include/linux/blk-iopoll.h @@ -0,0 +1,48 @@ +#ifndef BLK_IOPOLL_H +#define BLK_IOPOLL_H + +struct blk_iopoll; +typedef int (blk_iopoll_fn)(struct blk_iopoll *, int); + +struct blk_iopoll { + struct list_head list; + unsigned long state; + unsigned long data; + int weight; + int max; + blk_iopoll_fn *poll; +}; + +enum { + IOPOLL_F_SCHED = 0, + IOPOLL_F_DISABLE = 1, +}; + +/* + * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating + * that we were the first to acquire this iop for scheduling. If this iop + * is currently disabled, return "failure". + */ +static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop) +{ + if (!test_bit(IOPOLL_F_DISABLE, &iop->state)) + return test_and_set_bit(IOPOLL_F_SCHED, &iop->state); + + return 1; +} + +static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop) +{ + return test_bit(IOPOLL_F_DISABLE, &iop->state); +} + +extern void blk_iopoll_sched(struct blk_iopoll *); +extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *); +extern void blk_iopoll_complete(struct blk_iopoll *); +extern void __blk_iopoll_complete(struct blk_iopoll *); +extern void blk_iopoll_enable(struct blk_iopoll *); +extern void blk_iopoll_disable(struct blk_iopoll *); + +extern int blk_iopoll_enabled; + +#endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h new file mode 100644 index 00000000..4053cbd4 --- /dev/null +++ b/include/linux/blk_types.h @@ -0,0 +1,195 @@ +/* + * Block data types and constants. Directly include this file only to + * break include dependency loop. + */ +#ifndef __LINUX_BLK_TYPES_H +#define __LINUX_BLK_TYPES_H + +#ifdef CONFIG_BLOCK + +#include <linux/types.h> + +struct bio_set; +struct bio; +struct bio_integrity_payload; +struct page; +struct block_device; +typedef void (bio_end_io_t) (struct bio *, int); +typedef void (bio_destructor_t) (struct bio *); + +/* + * was unsigned short, but we might as well be ready for > 64kB I/O pages + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +/* + * main unit of I/O for the block layer and lower layers (ie drivers and + * stacking drivers) + */ +struct bio { + sector_t bi_sector; /* device address in 512 byte + sectors */ + struct bio *bi_next; /* request queue link */ + struct block_device *bi_bdev; + unsigned long bi_flags; /* status, command, etc */ + unsigned long bi_rw; /* bottom bits READ/WRITE, + * top bits priority + */ + + unsigned short bi_vcnt; /* how many bio_vec's */ + unsigned short bi_idx; /* current index into bvl_vec */ + + /* Number of segments in this BIO after + * physical address coalescing is performed. + */ + unsigned int bi_phys_segments; + + unsigned int bi_size; /* residual I/O count */ + + /* + * To keep track of the max segment size, we account for the + * sizes of the first and last mergeable segments in this bio. + */ + unsigned int bi_seg_front_size; + unsigned int bi_seg_back_size; + + unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ + + atomic_t bi_cnt; /* pin count */ + + struct bio_vec *bi_io_vec; /* the actual vec list */ + + bio_end_io_t *bi_end_io; + + void *bi_private; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + struct bio_integrity_payload *bi_integrity; /* data integrity */ +#endif + + bio_destructor_t *bi_destructor; /* destructor */ + + /* + * We can inline a number of vecs at the end of the bio, to avoid + * double allocations for a small number of bio_vecs. This member + * MUST obviously be kept at the very end of the bio. + */ + struct bio_vec bi_inline_vecs[0]; +}; + +/* + * bio flags + */ +#define BIO_UPTODATE 0 /* ok after I/O completion */ +#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ +#define BIO_EOF 2 /* out-out-bounds error */ +#define BIO_SEG_VALID 3 /* bi_phys_segments valid */ +#define BIO_CLONED 4 /* doesn't own data */ +#define BIO_BOUNCED 5 /* bio is a bounce bio */ +#define BIO_USER_MAPPED 6 /* contains user pages */ +#define BIO_EOPNOTSUPP 7 /* not supported */ +#define BIO_NULL_MAPPED 8 /* contains invalid user pages */ +#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ +#define BIO_QUIET 10 /* Make BIO Quiet */ +#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */ +#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) + +/* + * top 4 bits of bio flags indicate the pool this bio came from + */ +#define BIO_POOL_BITS (4) +#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) +#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) +#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) +#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) + +#endif /* CONFIG_BLOCK */ + +/* + * Request flags. For use in the cmd_flags field of struct request, and in + * bi_rw of struct bio. Note that some flags are only valid in either one. + */ +enum rq_flag_bits { + /* common flags */ + __REQ_WRITE, /* not set, read. set, write */ + __REQ_FAILFAST_DEV, /* no driver retries of device errors */ + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + + __REQ_SYNC, /* request is sync (sync write or read) */ + __REQ_META, /* metadata io request */ + __REQ_PRIO, /* boost priority in cfq */ + __REQ_DISCARD, /* request to discard sectors */ + __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ + + __REQ_NOIDLE, /* don't anticipate more IO after this one */ + __REQ_FUA, /* forced unit access */ + __REQ_FLUSH, /* request for cache flush */ + + /* bio only flags */ + __REQ_RAHEAD, /* read ahead, can fail anytime */ + __REQ_THROTTLED, /* This bio has already been subjected to + * throttling rules. Don't do it again. */ + + /* request only flags */ + __REQ_SORTED, /* elevator knows about this request */ + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_STARTED, /* drive already may have started this one */ + __REQ_DONTPREP, /* don't call prep for this one */ + __REQ_QUEUED, /* uses queueing */ + __REQ_ELVPRIV, /* elevator private data attached */ + __REQ_FAILED, /* set if the request failed */ + __REQ_QUIET, /* don't worry about errors */ + __REQ_PREEMPT, /* set for "ide_preempt" requests */ + __REQ_ALLOCED, /* request came from our alloc pool */ + __REQ_COPY_USER, /* contains copies of user pages */ + __REQ_FLUSH_SEQ, /* request for flush sequence */ + __REQ_IO_STAT, /* account I/O stat */ + __REQ_MIXED_MERGE, /* merge of different types, fail separately */ + __REQ_NR_BITS, /* stops here */ +}; + +#define REQ_WRITE (1 << __REQ_WRITE) +#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) +#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) +#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) +#define REQ_SYNC (1 << __REQ_SYNC) +#define REQ_META (1 << __REQ_META) +#define REQ_PRIO (1 << __REQ_PRIO) +#define REQ_DISCARD (1 << __REQ_DISCARD) +#define REQ_NOIDLE (1 << __REQ_NOIDLE) + +#define REQ_FAILFAST_MASK \ + (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) +#define REQ_COMMON_MASK \ + (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ + REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) +#define REQ_CLONE_MASK REQ_COMMON_MASK + +#define REQ_RAHEAD (1 << __REQ_RAHEAD) +#define REQ_THROTTLED (1 << __REQ_THROTTLED) + +#define REQ_SORTED (1 << __REQ_SORTED) +#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) +#define REQ_FUA (1 << __REQ_FUA) +#define REQ_NOMERGE (1 << __REQ_NOMERGE) +#define REQ_STARTED (1 << __REQ_STARTED) +#define REQ_DONTPREP (1 << __REQ_DONTPREP) +#define REQ_QUEUED (1 << __REQ_QUEUED) +#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) +#define REQ_FAILED (1 << __REQ_FAILED) +#define REQ_QUIET (1 << __REQ_QUIET) +#define REQ_PREEMPT (1 << __REQ_PREEMPT) +#define REQ_ALLOCED (1 << __REQ_ALLOCED) +#define REQ_COPY_USER (1 << __REQ_COPY_USER) +#define REQ_FLUSH (1 << __REQ_FLUSH) +#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ) +#define REQ_IO_STAT (1 << __REQ_IO_STAT) +#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) +#define REQ_SECURE (1 << __REQ_SECURE) + +#endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h new file mode 100644 index 00000000..4d4ac24a --- /dev/null +++ b/include/linux/blkdev.h @@ -0,0 +1,1421 @@ +#ifndef _LINUX_BLKDEV_H +#define _LINUX_BLKDEV_H + +#include <linux/sched.h> + +#ifdef CONFIG_BLOCK + +#include <linux/major.h> +#include <linux/genhd.h> +#include <linux/list.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/pagemap.h> +#include <linux/backing-dev.h> +#include <linux/wait.h> +#include <linux/mempool.h> +#include <linux/bio.h> +#include <linux/stringify.h> +#include <linux/gfp.h> +#include <linux/bsg.h> +#include <linux/smp.h> + +#include <asm/scatterlist.h> + +struct module; +struct scsi_ioctl_command; + +struct request_queue; +struct elevator_queue; +struct request_pm_state; +struct blk_trace; +struct request; +struct sg_io_hdr; +struct bsg_job; + +#define BLKDEV_MIN_RQ 4 +#define BLKDEV_MAX_RQ 128 /* Default maximum */ + +struct request; +typedef void (rq_end_io_fn)(struct request *, int); + +struct request_list { + /* + * count[], starved[], and wait[] are indexed by + * BLK_RW_SYNC/BLK_RW_ASYNC + */ + int count[2]; + int starved[2]; + int elvpriv; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; +}; + +/* + * request command types + */ +enum rq_cmd_type_bits { + REQ_TYPE_FS = 1, /* fs request */ + REQ_TYPE_BLOCK_PC, /* scsi command */ + REQ_TYPE_SENSE, /* sense request */ + REQ_TYPE_PM_SUSPEND, /* suspend request */ + REQ_TYPE_PM_RESUME, /* resume request */ + REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ + REQ_TYPE_SPECIAL, /* driver defined type */ + /* + * for ATA/ATAPI devices. this really doesn't belong here, ide should + * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver + * private REQ_LB opcodes to differentiate what type of request this is + */ + REQ_TYPE_ATA_TASKFILE, + REQ_TYPE_ATA_PC, +}; + +#define BLK_MAX_CDB 16 + +/* + * try to put the fields that are referenced together in the same cacheline. + * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() + * as well! + */ +struct request { + struct list_head queuelist; + struct call_single_data csd; + + struct request_queue *q; + + unsigned int cmd_flags; + enum rq_cmd_type_bits cmd_type; + unsigned long atomic_flags; + + int cpu; + + /* the following two fields are internal, NEVER access directly */ + unsigned int __data_len; /* total data len */ + sector_t __sector; /* sector cursor */ + + struct bio *bio; + struct bio *biotail; + + struct hlist_node hash; /* merge hash */ + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + void *completion_data; + }; + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + + struct gendisk *rq_disk; + struct hd_struct *part; + unsigned long start_time; +#ifdef CONFIG_BLK_CGROUP + unsigned long long start_time_ns; + unsigned long long io_start_time_ns; /* when passed to hardware */ +#endif + /* Number of scatter-gather DMA addr+len pairs after + * physical address coalescing is performed. + */ + unsigned short nr_phys_segments; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + unsigned short nr_integrity_segments; +#endif + + unsigned short ioprio; + + int ref_count; + + void *special; /* opaque pointer available for LLD use */ + char *buffer; /* kaddr of the current segment if available */ + + int tag; + int errors; + + /* + * when request is used as a packet command carrier + */ + unsigned char __cmd[BLK_MAX_CDB]; + unsigned char *cmd; + unsigned short cmd_len; + + unsigned int extra_len; /* length of alignment and padding */ + unsigned int sense_len; + unsigned int resid_len; /* residual count */ + void *sense; + + unsigned long deadline; + struct list_head timeout_list; + unsigned int timeout; + int retries; + + /* + * completion callback. + */ + rq_end_io_fn *end_io; + void *end_io_data; + + /* for bidi */ + struct request *next_rq; +}; + +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + +/* + * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME + * requests. Some step values could eventually be made generic. + */ +struct request_pm_state +{ + /* PM state machine step value, currently driver specific */ + int pm_step; + /* requested PM state value (S1, S2, S3, S4, ...) */ + u32 pm_state; + void* data; /* for driver use */ +}; + +#include <linux/elevator.h> + +typedef void (request_fn_proc) (struct request_queue *q); +typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unprep_rq_fn) (struct request_queue *, struct request *); + +struct bio_vec; +struct bvec_merge_data { + struct block_device *bi_bdev; + sector_t bi_sector; + unsigned bi_size; + unsigned long bi_rw; +}; +typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, + struct bio_vec *); +typedef void (softirq_done_fn)(struct request *); +typedef int (dma_drain_needed_fn)(struct request *); +typedef int (lld_busy_fn) (struct request_queue *q); +typedef int (bsg_job_fn) (struct bsg_job *); + +enum blk_eh_timer_return { + BLK_EH_NOT_HANDLED, + BLK_EH_HANDLED, + BLK_EH_RESET_TIMER, +}; + +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); + +enum blk_queue_state { + Queue_down, + Queue_up, +}; + +struct blk_queue_tag { + struct request **tag_index; /* map of busy tags */ + unsigned long *tag_map; /* bit map of free/busy tags */ + int busy; /* current depth */ + int max_depth; /* what we will send to device */ + int real_max_depth; /* what the array can hold */ + atomic_t refcnt; /* map can be shared */ +}; + +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct queue_limits { + unsigned long bounce_pfn; + unsigned long seg_boundary_mask; + + unsigned int max_hw_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + + unsigned short logical_block_size; + unsigned short max_segments; + unsigned short max_integrity_segments; + + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char cluster; + unsigned char discard_zeroes_data; +}; + +struct request_queue { + /* + * Together with queue_head for cacheline sharing + */ + struct list_head queue_head; + struct request *last_merge; + struct elevator_queue *elevator; + + /* + * the queue request freelist, one for reads and one for writes + */ + struct request_list rq; + + request_fn_proc *request_fn; + make_request_fn *make_request_fn; + prep_rq_fn *prep_rq_fn; + unprep_rq_fn *unprep_rq_fn; + merge_bvec_fn *merge_bvec_fn; + softirq_done_fn *softirq_done_fn; + rq_timed_out_fn *rq_timed_out_fn; + dma_drain_needed_fn *dma_drain_needed; + lld_busy_fn *lld_busy_fn; + + /* + * Dispatch queue sorting + */ + sector_t end_sector; + struct request *boundary_rq; + + /* + * Delayed queue handling + */ + struct delayed_work delay_work; + + struct backing_dev_info backing_dev_info; + + /* + * The queue owner gets to use this for whatever they like. + * ll_rw_blk doesn't touch it. + */ + void *queuedata; + + /* + * various queue flags, see QUEUE_* below + */ + unsigned long queue_flags; + + /* + * ida allocated id for this queue. Used to index queues from + * ioctx. + */ + int id; + + /* + * queue needs bounce pages for pages above this limit + */ + gfp_t bounce_gfp; + + /* + * protects queue structures from reentrancy. ->__queue_lock should + * _never_ be used directly, it is queue private. always use + * ->queue_lock. + */ + spinlock_t __queue_lock; + spinlock_t *queue_lock; + + /* + * queue kobject + */ + struct kobject kobj; + + /* + * queue settings + */ + unsigned long nr_requests; /* Max # of requests */ + unsigned int nr_congestion_on; + unsigned int nr_congestion_off; + unsigned int nr_batching; + + unsigned int dma_drain_size; + void *dma_drain_buffer; + unsigned int dma_pad_mask; + unsigned int dma_alignment; + + struct blk_queue_tag *queue_tags; + struct list_head tag_busy_list; + + unsigned int nr_sorted; + unsigned int in_flight[2]; + + unsigned int rq_timeout; + struct timer_list timeout; + struct list_head timeout_list; + + struct list_head icq_list; + + struct queue_limits limits; + + /* + * sg stuff + */ + unsigned int sg_timeout; + unsigned int sg_reserved_size; + int node; +#ifdef CONFIG_BLK_DEV_IO_TRACE + struct blk_trace *blk_trace; +#endif + /* + * for flush operations + */ + unsigned int flush_flags; + unsigned int flush_not_queueable:1; + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request flush_rq; + + struct mutex sysfs_lock; + +#if defined(CONFIG_BLK_DEV_BSG) + bsg_job_fn *bsg_job_fn; + int bsg_job_size; + struct bsg_class_device bsg_dev; +#endif + +#ifdef CONFIG_BLK_DEV_THROTTLING + /* Throttle data */ + struct throtl_data *td; +#endif +}; + +#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ +#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ +#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ +#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ +#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ +#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ +#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ +#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ +#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ +#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ +#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ +#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ +#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ +#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_STACKABLE) | \ + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_ADD_RANDOM)) + +static inline void queue_lockdep_assert_held(struct request_queue *q) +{ + if (q->queue_lock) + lockdep_assert_held(q->queue_lock); +} + +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + __set_bit(flag, &q->queue_flags); +} + +static inline int queue_flag_test_and_clear(unsigned int flag, + struct request_queue *q) +{ + queue_lockdep_assert_held(q); + + if (test_bit(flag, &q->queue_flags)) { + __clear_bit(flag, &q->queue_flags); + return 1; + } + + return 0; +} + +static inline int queue_flag_test_and_set(unsigned int flag, + struct request_queue *q) +{ + queue_lockdep_assert_held(q); + + if (!test_bit(flag, &q->queue_flags)) { + __set_bit(flag, &q->queue_flags); + return 0; + } + + return 1; +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + queue_lockdep_assert_held(q); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + queue_lockdep_assert_held(q); + __clear_bit(flag, &q->queue_flags); +} + +#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) +#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) +#define blk_queue_noxmerges(q) \ + test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) +#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) +#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) +#define blk_queue_stackable(q) \ + test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) +#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) +#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ + test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) + +#define blk_noretry_request(rq) \ + ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ + REQ_FAILFAST_DRIVER)) + +#define blk_account_rq(rq) \ + (((rq)->cmd_flags & REQ_STARTED) && \ + ((rq)->cmd_type == REQ_TYPE_FS || \ + ((rq)->cmd_flags & REQ_DISCARD))) + +#define blk_pm_request(rq) \ + ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ + (rq)->cmd_type == REQ_TYPE_PM_RESUME) + +#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) +#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) +/* rq->queuelist of dequeued request must be list_empty() */ +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) + +#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) + +#define rq_data_dir(rq) ((rq)->cmd_flags & 1) + +static inline unsigned int blk_queue_cluster(struct request_queue *q) +{ + return q->limits.cluster; +} + +/* + * We regard a request as sync, if either a read or a sync write + */ +static inline bool rw_is_sync(unsigned int rw_flags) +{ + return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); +} + +static inline bool rq_is_sync(struct request *rq) +{ + return rw_is_sync(rq->cmd_flags); +} + +static inline int blk_queue_full(struct request_queue *q, int sync) +{ + if (sync) + return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); + return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); +} + +static inline void blk_set_queue_full(struct request_queue *q, int sync) +{ + if (sync) + queue_flag_set(QUEUE_FLAG_SYNCFULL, q); + else + queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); +} + +static inline void blk_clear_queue_full(struct request_queue *q, int sync) +{ + if (sync) + queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); + else + queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); +} + + +/* + * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may + * it already be started by driver. + */ +#define RQ_NOMERGE_FLAGS \ + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) +#define rq_mergeable(rq) \ + (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ + (((rq)->cmd_flags & REQ_DISCARD) || \ + (rq)->cmd_type == REQ_TYPE_FS)) + +/* + * q->prep_rq_fn return values + */ +#define BLKPREP_OK 0 /* serve it */ +#define BLKPREP_KILL 1 /* fatal error, kill */ +#define BLKPREP_DEFER 2 /* leave on queue */ + +extern unsigned long blk_max_low_pfn, blk_max_pfn; + +/* + * standard bounce addresses: + * + * BLK_BOUNCE_HIGH : bounce all highmem pages + * BLK_BOUNCE_ANY : don't bounce anything + * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary + */ + +#if BITS_PER_LONG == 32 +#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) +#else +#define BLK_BOUNCE_HIGH -1ULL +#endif +#define BLK_BOUNCE_ANY (-1ULL) +#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) + +/* + * default timeout for SG_IO if none specified + */ +#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) +#define BLK_MIN_SG_TIMEOUT (7 * HZ) + +#ifdef CONFIG_BOUNCE +extern int init_emergency_isa_pool(void); +extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); +#else +static inline int init_emergency_isa_pool(void) +{ + return 0; +} +static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) +{ +} +#endif /* CONFIG_MMU */ + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; + int from_user; +}; + +struct req_iterator { + int i; + struct bio *bio; +}; + +/* This should not be used directly - use rq_for_each_segment */ +#define for_each_bio(_bio) \ + for (; _bio; _bio = _bio->bi_next) +#define __rq_for_each_bio(_bio, rq) \ + if ((rq->bio)) \ + for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) + +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.i) + +#define rq_iter_last(rq, _iter) \ + (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void rq_flush_dcache_pages(struct request *rq); +#else +static inline void rq_flush_dcache_pages(struct request *rq) +{ +} +#endif + +extern int blk_register_queue(struct gendisk *disk); +extern void blk_unregister_queue(struct gendisk *disk); +extern void generic_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); +extern void blk_put_request(struct request *); +extern void __blk_put_request(struct request_queue *, struct request *); +extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern struct request *blk_make_request(struct request_queue *, struct bio *, + gfp_t); +extern void blk_requeue_request(struct request_queue *, struct request *); +extern void blk_add_request_payload(struct request *rq, struct page *page, + unsigned int len); +extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data); +extern void blk_rq_unprep_clone(struct request *rq); +extern int blk_insert_cloned_request(struct request_queue *q, + struct request *rq); +extern void blk_delay_queue(struct request_queue *, unsigned long); +extern void blk_recount_segments(struct request_queue *, struct bio *); +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, + unsigned int, void __user *); +extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, + unsigned int, void __user *); +extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, + struct scsi_ioctl_command __user *); + +extern void blk_queue_bio(struct request_queue *q, struct bio *bio); + +/* + * A queue has just exitted congestion. Note this in the global counter of + * congested queues, and wake up anyone who was waiting for requests to be + * put back. + */ +static inline void blk_clear_queue_congested(struct request_queue *q, int sync) +{ + clear_bdi_congested(&q->backing_dev_info, sync); +} + +/* + * A queue has just entered congestion. Flag that in the queue's VM-visible + * state flags and increment the global gounter of congested queues. + */ +static inline void blk_set_queue_congested(struct request_queue *q, int sync) +{ + set_bdi_congested(&q->backing_dev_info, sync); +} + +extern void blk_start_queue(struct request_queue *q); +extern void blk_stop_queue(struct request_queue *q); +extern void blk_sync_queue(struct request_queue *q); +extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *q); +extern void blk_run_queue(struct request_queue *); +extern void blk_run_queue_async(struct request_queue *q); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, + gfp_t); +extern int blk_rq_unmap_user(struct bio *); +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, struct sg_iovec *, int, + unsigned int, gfp_t); +extern int blk_execute_rq(struct request_queue *, struct gendisk *, + struct request *, int); +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, + struct request *, int, rq_end_io_fn *); + +static inline struct request_queue *bdev_get_queue(struct block_device *bdev) +{ + return bdev->bd_disk->queue; +} + +/* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_err_bytes() : bytes left till the next error boundary + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +extern unsigned int blk_rq_err_bytes(const struct request *rq); + +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> 9; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> 9; +} + +/* + * Request issue related functions. + */ +extern struct request *blk_peek_request(struct request_queue *q); +extern void blk_start_request(struct request *rq); +extern struct request *blk_fetch_request(struct request_queue *q); + +/* + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + * + * blk_end_request() and friends. __blk_end_request() must be called + * with the request queue spinlock acquired. + * + * Several drivers define their own end_request and call + * blk_end_request() for parts of the original function. + * This prevents code duplication in drivers. + */ +extern bool blk_update_request(struct request *rq, int error, + unsigned int nr_bytes); +extern bool blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void blk_end_request_all(struct request *rq, int error); +extern bool blk_end_request_cur(struct request *rq, int error); +extern bool blk_end_request_err(struct request *rq, int error); +extern bool __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void __blk_end_request_all(struct request *rq, int error); +extern bool __blk_end_request_cur(struct request *rq, int error); +extern bool __blk_end_request_err(struct request *rq, int error); + +extern void blk_complete_request(struct request *); +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); +extern void blk_abort_queue(struct request_queue *); +extern void blk_unprep_request(struct request *); + +/* + * Access functions for manipulating queue properties + */ +extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, + spinlock_t *lock, int node_id); +extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); +extern struct request_queue *blk_init_allocated_queue(struct request_queue *, + request_fn_proc *, spinlock_t *); +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_alignment_offset(struct request_queue *q, + unsigned int alignment); +extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); +extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_stacking_limits(struct queue_limits *lim); +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); +extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t offset); +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_dma_pad(struct request_queue *, unsigned int); +extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size); +extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); +extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); +extern void blk_queue_flush(struct request_queue *q, unsigned int flush); +extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); +extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); + +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +extern void blk_dump_rq_flags(struct request *, char *); +extern long nr_blockdev_pages(void); + +bool __must_check blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t, int); +extern void blk_put_queue(struct request_queue *); + +/* + * blk_plug permits building a queue of related requests by holding the I/O + * fragments for a short period. This allows merging of sequential requests + * into single larger request. As the requests are moved from a per-task list to + * the device's request_queue in a batch, this results in improved scalability + * as the lock contention for request_queue lock is reduced. + * + * It is ok not to disable preemption when adding the request to the plug list + * or when attempting a merge, because blk_schedule_flush_list() will only flush + * the plug list when the task sleeps by itself. For details, please see + * schedule() where blk_schedule_flush_plug() is called. + */ +struct blk_plug { + unsigned long magic; /* detect uninitialized use-cases */ + struct list_head list; /* requests */ + struct list_head cb_list; /* md requires an unplug callback */ + unsigned int should_sort; /* list to be sorted before flushing? */ +}; +#define BLK_MAX_REQUEST_COUNT 16 + +struct blk_plug_cb { + struct list_head list; + void (*callback)(struct blk_plug_cb *); +}; + +extern void blk_start_plug(struct blk_plug *); +extern void blk_finish_plug(struct blk_plug *); +extern void blk_flush_plug_list(struct blk_plug *, bool); + +static inline void blk_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, false); +} + +static inline void blk_schedule_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, true); +} + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); +} + +/* + * tag stuff + */ +#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) +extern int blk_queue_start_tag(struct request_queue *, struct request *); +extern struct request *blk_queue_find_tag(struct request_queue *, int); +extern void blk_queue_end_tag(struct request_queue *, struct request *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); +extern void blk_queue_free_tags(struct request_queue *); +extern int blk_queue_resize_tags(struct request_queue *, int); +extern void blk_queue_invalidate_tags(struct request_queue *); +extern struct blk_queue_tag *blk_init_tags(int); +extern void blk_free_tags(struct blk_queue_tag *); + +static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, + int tag) +{ + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) + return NULL; + return bqt->tag_index[tag]; +} + +#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ + +extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); +extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask); +static inline int sb_issue_discard(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) +{ + return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), + nr_blocks << (sb->s_blocksize_bits - 9), + gfp_mask, flags); +} +static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask) +{ + return blkdev_issue_zeroout(sb->s_bdev, + block << (sb->s_blocksize_bits - 9), + nr_blocks << (sb->s_blocksize_bits - 9), + gfp_mask); +} + +extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_DEF_MAX_SECTORS = 1024, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, +}; + +#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) + +static inline unsigned long queue_bounce_pfn(struct request_queue *q) +{ + return q->limits.bounce_pfn; +} + +static inline unsigned long queue_segment_boundary(struct request_queue *q) +{ + return q->limits.seg_boundary_mask; +} + +static inline unsigned int queue_max_sectors(struct request_queue *q) +{ + return q->limits.max_sectors; +} + +static inline unsigned int queue_max_hw_sectors(struct request_queue *q) +{ + return q->limits.max_hw_sectors; +} + +static inline unsigned short queue_max_segments(struct request_queue *q) +{ + return q->limits.max_segments; +} + +static inline unsigned int queue_max_segment_size(struct request_queue *q) +{ + return q->limits.max_segment_size; +} + +static inline unsigned short queue_logical_block_size(struct request_queue *q) +{ + int retval = 512; + + if (q && q->limits.logical_block_size) + retval = q->limits.logical_block_size; + + return retval; +} + +static inline unsigned short bdev_logical_block_size(struct block_device *bdev) +{ + return queue_logical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_physical_block_size(struct request_queue *q) +{ + return q->limits.physical_block_size; +} + +static inline unsigned int bdev_physical_block_size(struct block_device *bdev) +{ + return queue_physical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_io_min(struct request_queue *q) +{ + return q->limits.io_min; +} + +static inline int bdev_io_min(struct block_device *bdev) +{ + return queue_io_min(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_io_opt(struct request_queue *q) +{ + return q->limits.io_opt; +} + +static inline int bdev_io_opt(struct block_device *bdev) +{ + return queue_io_opt(bdev_get_queue(bdev)); +} + +static inline int queue_alignment_offset(struct request_queue *q) +{ + if (q->limits.misaligned) + return -1; + + return q->limits.alignment_offset; +} + +static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) +{ + unsigned int granularity = max(lim->physical_block_size, lim->io_min); + unsigned int alignment = (sector << 9) & (granularity - 1); + + return (granularity + lim->alignment_offset - alignment) + & (granularity - 1); +} + +static inline int bdev_alignment_offset(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q->limits.misaligned) + return -1; + + if (bdev != bdev->bd_contains) + return bdev->bd_part->alignment_offset; + + return q->limits.alignment_offset; +} + +static inline int queue_discard_alignment(struct request_queue *q) +{ + if (q->limits.discard_misaligned) + return -1; + + return q->limits.discard_alignment; +} + +static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) +{ + unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); + + if (!lim->max_discard_sectors) + return 0; + + return (lim->discard_granularity + lim->discard_alignment - alignment) + & (lim->discard_granularity - 1); +} + +static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) +{ + if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) + return 1; + + return 0; +} + +static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) +{ + return queue_discard_zeroes_data(bdev_get_queue(bdev)); +} + +static inline int queue_dma_alignment(struct request_queue *q) +{ + return q ? q->dma_alignment : 511; +} + +static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, + unsigned int len) +{ + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !(addr & alignment) && !(len & alignment); +} + +/* assumes size > 256 */ +static inline unsigned int blksize_bits(unsigned int size) +{ + unsigned int bits = 8; + do { + bits++; + size >>= 1; + } while (size > 256); + return bits; +} + +static inline unsigned int block_size(struct block_device *bdev) +{ + return bdev->bd_block_size; +} + +static inline bool queue_flush_queueable(struct request_queue *q) +{ + return !q->flush_not_queueable; +} + +typedef struct {struct page *v;} Sector; + +unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); + +static inline void put_dev_sector(Sector p) +{ + page_cache_release(p.v); +} + +struct work_struct; +int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); + +#ifdef CONFIG_BLK_CGROUP +/* + * This should not be using sched_clock(). A real patch is in progress + * to fix this up, until that is in place we need to disable preemption + * around sched_clock() in this function and set_io_start_time_ns(). + */ +static inline void set_start_time_ns(struct request *req) +{ + preempt_disable(); + req->start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline void set_io_start_time_ns(struct request *req) +{ + preempt_disable(); + req->io_start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline uint64_t rq_start_time_ns(struct request *req) +{ + return req->start_time_ns; +} + +static inline uint64_t rq_io_start_time_ns(struct request *req) +{ + return req->io_start_time_ns; +} +#else +static inline void set_start_time_ns(struct request *req) {} +static inline void set_io_start_time_ns(struct request *req) {} +static inline uint64_t rq_start_time_ns(struct request *req) +{ + return 0; +} +static inline uint64_t rq_io_start_time_ns(struct request *req) +{ + return 0; +} +#endif + +#define MODULE_ALIAS_BLOCKDEV(major,minor) \ + MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ + MODULE_ALIAS("block-major-" __stringify(major) "-*") + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ +#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ + +struct blk_integrity_exchg { + void *prot_buf; + void *data_buf; + sector_t sector; + unsigned int data_size; + unsigned short sector_size; + const char *disk_name; +}; + +typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); +typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); +typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); +typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); + +struct blk_integrity { + integrity_gen_fn *generate_fn; + integrity_vrfy_fn *verify_fn; + integrity_set_tag_fn *set_tag_fn; + integrity_get_tag_fn *get_tag_fn; + + unsigned short flags; + unsigned short tuple_size; + unsigned short sector_size; + unsigned short tag_size; + + const char *name; + + struct kobject kobj; +}; + +extern bool blk_integrity_is_initialized(struct gendisk *); +extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_unregister(struct gendisk *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); +extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, + struct scatterlist *); +extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); +extern int blk_integrity_merge_rq(struct request_queue *, struct request *, + struct request *); +extern int blk_integrity_merge_bio(struct request_queue *, struct request *, + struct bio *); + +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +{ + return bdev->bd_disk->integrity; +} + +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return disk->integrity; +} + +static inline int blk_integrity_rq(struct request *rq) +{ + if (rq->bio == NULL) + return 0; + + return bio_integrity(rq->bio); +} + +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ + q->limits.max_integrity_segments = segs; +} + +static inline unsigned short +queue_max_integrity_segments(struct request_queue *q) +{ + return q->limits.max_integrity_segments; +} + +#else /* CONFIG_BLK_DEV_INTEGRITY */ + +struct bio; +struct block_device; +struct gendisk; +struct blk_integrity; + +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return 0; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline int blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ + return 0; +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short queue_max_integrity_segments(struct request_queue *q) +{ + return 0; +} +static inline int blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) +{ + return 0; +} +static inline int blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) +{ + return 0; +} +static inline bool blk_integrity_is_initialized(struct gendisk *g) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + +struct block_device_operations { + int (*open) (struct block_device *, fmode_t); + int (*release) (struct gendisk *, fmode_t); + int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*direct_access) (struct block_device *, sector_t, + void **, unsigned long *); + unsigned int (*check_events) (struct gendisk *disk, + unsigned int clearing); + /* ->media_changed() is DEPRECATED, use ->check_events() instead */ + int (*media_changed) (struct gendisk *); + void (*unlock_native_capacity) (struct gendisk *); + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + /* this callback is with swap_lock and sometimes page table lock held */ + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + struct module *owner; +}; + +extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +#else /* CONFIG_BLOCK */ +/* + * stubs for when the block layer is configured out + */ +#define buffer_heads_over_limit 0 + +static inline long nr_blockdev_pages(void) +{ + return 0; +} + +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +#endif /* CONFIG_BLOCK */ + +#endif diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h new file mode 100644 index 00000000..faf8a45a --- /dev/null +++ b/include/linux/blkpg.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_BLKPG_H +#define _LINUX_BLKPG_H + +/* + * Partition table and disk geometry handling + * + * A single ioctl with lots of subfunctions: + * + * Device number stuff: + * get_whole_disk() (given the device number of a partition, + * find the device number of the encompassing disk) + * get_all_partitions() (given the device number of a disk, return the + * device numbers of all its known partitions) + * + * Partition stuff: + * add_partition() + * delete_partition() + * test_partition_in_use() (also for test_disk_in_use) + * + * Geometry stuff: + * get_geometry() + * set_geometry() + * get_bios_drivedata() + * + * For today, only the partition stuff - aeb, 990515 + */ +#include <linux/compiler.h> +#include <linux/ioctl.h> + +#define BLKPG _IO(0x12,105) + +/* The argument structure */ +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void __user *data; +}; + +/* The subfunctions (for the op field) */ +#define BLKPG_ADD_PARTITION 1 +#define BLKPG_DEL_PARTITION 2 + +/* Sizes of name fields. Unused at present. */ +#define BLKPG_DEVNAMELTH 64 +#define BLKPG_VOLNAMELTH 64 + +/* The data structure for ADD_PARTITION and DEL_PARTITION */ +struct blkpg_partition { + long long start; /* starting offset in bytes */ + long long length; /* length in bytes */ + int pno; /* partition number */ + char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2, + to be used in kernel messages */ + char volname[BLKPG_VOLNAMELTH]; /* volume label */ +}; + +#endif /* _LINUX_BLKPG_H */ diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h new file mode 100644 index 00000000..4d1a0748 --- /dev/null +++ b/include/linux/blktrace_api.h @@ -0,0 +1,254 @@ +#ifndef BLKTRACE_H +#define BLKTRACE_H + +#include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/blkdev.h> +#include <linux/relay.h> +#include <linux/compat.h> +#endif + +/* + * Trace categories + */ +enum blktrace_cat { + BLK_TC_READ = 1 << 0, /* reads */ + BLK_TC_WRITE = 1 << 1, /* writes */ + BLK_TC_FLUSH = 1 << 2, /* flush */ + BLK_TC_SYNC = 1 << 3, /* sync IO */ + BLK_TC_SYNCIO = BLK_TC_SYNC, + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ + BLK_TC_REQUEUE = 1 << 5, /* requeueing */ + BLK_TC_ISSUE = 1 << 6, /* issue */ + BLK_TC_COMPLETE = 1 << 7, /* completions */ + BLK_TC_FS = 1 << 8, /* fs requests */ + BLK_TC_PC = 1 << 9, /* pc requests */ + BLK_TC_NOTIFY = 1 << 10, /* special message */ + BLK_TC_AHEAD = 1 << 11, /* readahead */ + BLK_TC_META = 1 << 12, /* metadata */ + BLK_TC_DISCARD = 1 << 13, /* discard requests */ + BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */ + BLK_TC_FUA = 1 << 15, /* fua requests */ + + BLK_TC_END = 1 << 15, /* we've run out of bits! */ +}; + +#define BLK_TC_SHIFT (16) +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) + +/* + * Basic trace actions + */ +enum blktrace_act { + __BLK_TA_QUEUE = 1, /* queued */ + __BLK_TA_BACKMERGE, /* back merged to existing rq */ + __BLK_TA_FRONTMERGE, /* front merge to existing rq */ + __BLK_TA_GETRQ, /* allocated new request */ + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ + __BLK_TA_REQUEUE, /* request requeued */ + __BLK_TA_ISSUE, /* sent to driver */ + __BLK_TA_COMPLETE, /* completed by driver */ + __BLK_TA_PLUG, /* queue was plugged */ + __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */ + __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ + __BLK_TA_INSERT, /* insert request */ + __BLK_TA_SPLIT, /* bio was split */ + __BLK_TA_BOUNCE, /* bio was bounced */ + __BLK_TA_REMAP, /* bio was remapped */ + __BLK_TA_ABORT, /* request aborted */ + __BLK_TA_DRV_DATA, /* driver-specific binary data */ +}; + +/* + * Notify events. + */ +enum blktrace_notify { + __BLK_TN_PROCESS = 0, /* establish pid/name mapping */ + __BLK_TN_TIMESTAMP, /* include system clock */ + __BLK_TN_MESSAGE, /* Character string message */ +}; + + +/* + * Trace actions in full. Additionally, read or write is masked + */ +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) +#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SPLIT (__BLK_TA_SPLIT) +#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) +#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA)) + +#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) +#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) +#define BLK_TN_MESSAGE (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY)) + +#define BLK_IO_TRACE_MAGIC 0x65617400 +#define BLK_IO_TRACE_VERSION 0x07 + +/* + * The trace itself + */ +struct blk_io_trace { + __u32 magic; /* MAGIC << 8 | version */ + __u32 sequence; /* event number */ + __u64 time; /* in microseconds */ + __u64 sector; /* disk offset */ + __u32 bytes; /* transfer length */ + __u32 action; /* what happened */ + __u32 pid; /* who did it */ + __u32 device; /* device number */ + __u32 cpu; /* on what cpu did it happen */ + __u16 error; /* completion error */ + __u16 pdu_len; /* length of data after this trace */ +}; + +/* + * The remap event + */ +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running, + Blktrace_stopped, +}; + +#define BLKTRACE_BDEV_SIZE 32 + +/* + * User setup structure passed with BLKTRACESTART + */ +struct blk_user_trace_setup { + char name[BLKTRACE_BDEV_SIZE]; /* output */ + __u16 act_mask; /* input */ + __u32 buf_size; /* input */ + __u32 buf_nr; /* input */ + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +#ifdef __KERNEL__ +#if defined(CONFIG_BLK_DEV_IO_TRACE) + +#include <linux/sysfs.h> + +struct blk_trace { + int trace_state; + struct rchan *rchan; + unsigned long __percpu *sequence; + unsigned char __percpu *msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; + atomic_t dropped; +}; + +extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +extern void blk_trace_shutdown(struct request_queue *); +extern int do_blk_trace_setup(struct request_queue *q, char *name, + dev_t dev, struct block_device *bdev, + struct blk_user_trace_setup *buts); +extern __printf(2, 3) +void __trace_note_message(struct blk_trace *, const char *fmt, ...); + +/** + * blk_add_trace_msg - Add a (simple) message to the blktrace stream + * @q: queue the io is for + * @fmt: format to print message in + * args... Variable argument list for format + * + * Description: + * Records a (simple) message onto the blktrace stream. + * + * NOTE: BLK_TN_MAX_MSG characters are output at most. + * NOTE: Can not use 'static inline' due to presence of var args... + * + **/ +#define blk_add_trace_msg(q, fmt, ...) \ + do { \ + struct blk_trace *bt = (q)->blk_trace; \ + if (unlikely(bt)) \ + __trace_note_message(bt, fmt, ##__VA_ARGS__); \ + } while (0) +#define BLK_TN_MAX_MSG 128 + +extern void blk_add_driver_data(struct request_queue *q, struct request *rq, + void *data, size_t len); +extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, + char __user *arg); +extern int blk_trace_startstop(struct request_queue *q, int start); +extern int blk_trace_remove(struct request_queue *q); +extern void blk_trace_remove_sysfs(struct device *dev); +extern int blk_trace_init_sysfs(struct device *dev); + +extern struct attribute_group blk_trace_attr_group; + +#else /* !CONFIG_BLK_DEV_IO_TRACE */ +# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) +# define blk_trace_shutdown(q) do { } while (0) +# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) +# define blk_add_driver_data(q, rq, data, len) do {} while (0) +# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) +# define blk_trace_startstop(q, start) (-ENOTTY) +# define blk_trace_remove(q) (-ENOTTY) +# define blk_add_trace_msg(q, fmt, ...) do { } while (0) +# define blk_trace_remove_sysfs(dev) do { } while (0) +static inline int blk_trace_init_sysfs(struct device *dev) +{ + return 0; +} + +#endif /* CONFIG_BLK_DEV_IO_TRACE */ + +#ifdef CONFIG_COMPAT + +struct compat_blk_user_trace_setup { + char name[32]; + u16 act_mask; + u32 buf_size; + u32 buf_nr; + compat_u64 start_lba; + compat_u64 end_lba; + u32 pid; +}; +#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) + +#endif + +#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) + +static inline int blk_cmd_buf_len(struct request *rq) +{ + return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; +} + +extern void blk_dump_cmd(char *buf, struct request *rq); +extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); + +#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h new file mode 100644 index 00000000..e44b88ba --- /dev/null +++ b/include/linux/blockgroup_lock.h @@ -0,0 +1,62 @@ +#ifndef _LINUX_BLOCKGROUP_LOCK_H +#define _LINUX_BLOCKGROUP_LOCK_H +/* + * Per-blockgroup locking for ext2 and ext3. + * + * Simple hashed spinlocking. + */ + +#include <linux/spinlock.h> +#include <linux/cache.h> + +#ifdef CONFIG_SMP + +/* + * We want a power-of-two. Is there a better way than this? + */ + +#if NR_CPUS >= 32 +#define NR_BG_LOCKS 128 +#elif NR_CPUS >= 16 +#define NR_BG_LOCKS 64 +#elif NR_CPUS >= 8 +#define NR_BG_LOCKS 32 +#elif NR_CPUS >= 4 +#define NR_BG_LOCKS 16 +#elif NR_CPUS >= 2 +#define NR_BG_LOCKS 8 +#else +#define NR_BG_LOCKS 4 +#endif + +#else /* CONFIG_SMP */ +#define NR_BG_LOCKS 1 +#endif /* CONFIG_SMP */ + +struct bgl_lock { + spinlock_t lock; +} ____cacheline_aligned_in_smp; + +struct blockgroup_lock { + struct bgl_lock locks[NR_BG_LOCKS]; +}; + +static inline void bgl_lock_init(struct blockgroup_lock *bgl) +{ + int i; + + for (i = 0; i < NR_BG_LOCKS; i++) + spin_lock_init(&bgl->locks[i].lock); +} + +/* + * The accessor is a macro so we can embed a blockgroup_lock into different + * superblock types + */ +static inline spinlock_t * +bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) +{ + return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock; +} + +#endif diff --git a/include/linux/bma150.h b/include/linux/bma150.h new file mode 100644 index 00000000..7911fda2 --- /dev/null +++ b/include/linux/bma150.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2011 Bosch Sensortec GmbH + * Copyright (c) 2011 Unixphere + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _BMA150_H_ +#define _BMA150_H_ + +#define BMA150_DRIVER "bma150" + +struct bma150_cfg { + bool any_motion_int; /* Set to enable any-motion interrupt */ + bool hg_int; /* Set to enable high-G interrupt */ + bool lg_int; /* Set to enable low-G interrupt */ + unsigned char any_motion_dur; /* Any-motion duration */ + unsigned char any_motion_thres; /* Any-motion threshold */ + unsigned char hg_hyst; /* High-G hysterisis */ + unsigned char hg_dur; /* High-G duration */ + unsigned char hg_thres; /* High-G threshold */ + unsigned char lg_hyst; /* Low-G hysterisis */ + unsigned char lg_dur; /* Low-G duration */ + unsigned char lg_thres; /* Low-G threshold */ + unsigned char range; /* BMA0150_RANGE_xxx (in G) */ + unsigned char bandwidth; /* BMA0150_BW_xxx (in Hz) */ +}; + +struct bma150_platform_data { + struct bma150_cfg cfg; + int (*irq_gpio_cfg)(void); +}; + +#endif /* _BMA150_H_ */ diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h new file mode 100644 index 00000000..66d3e954 --- /dev/null +++ b/include/linux/bootmem.h @@ -0,0 +1,174 @@ +/* + * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 + */ +#ifndef _LINUX_BOOTMEM_H +#define _LINUX_BOOTMEM_H + +#include <linux/mmzone.h> +#include <asm/dma.h> + +/* + * simple boot-time physical memory area allocator. + */ + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; + +/* + * highest page + */ +extern unsigned long max_pfn; + +#ifndef CONFIG_NO_BOOTMEM +/* + * node_bootmem_map is a map pointer - the bits represent all physical + * memory pages (including holes) on the node. + */ +typedef struct bootmem_data { + unsigned long node_min_pfn; + unsigned long node_low_pfn; + void *node_bootmem_map; + unsigned long last_end_off; + unsigned long hint_idx; + struct list_head list; +} bootmem_data_t; + +extern bootmem_data_t bootmem_node_data[]; +#endif + +extern unsigned long bootmem_bootmap_pages(unsigned long); + +extern unsigned long init_bootmem_node(pg_data_t *pgdat, + unsigned long freepfn, + unsigned long startpfn, + unsigned long endpfn); +extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); + +extern unsigned long free_low_memory_core_early(int nodeid); +extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); +extern unsigned long free_all_bootmem(void); + +extern void free_bootmem_node(pg_data_t *pgdat, + unsigned long addr, + unsigned long size); +extern void free_bootmem(unsigned long addr, unsigned long size); +extern void free_bootmem_late(unsigned long addr, unsigned long size); + +/* + * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, + * the architecture-specific code should honor this). + * + * If flags is 0, then the return value is always 0 (success). If + * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the + * memory already was reserved. + */ +#define BOOTMEM_DEFAULT 0 +#define BOOTMEM_EXCLUSIVE (1<<0) + +extern int reserve_bootmem(unsigned long addr, + unsigned long size, + int flags); +extern int reserve_bootmem_node(pg_data_t *pgdat, + unsigned long physaddr, + unsigned long size, + int flags); + +extern void *__alloc_bootmem(unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_nopanic(unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); +void *__alloc_bootmem_node_high(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_low(unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); + +#ifdef CONFIG_NO_BOOTMEM +/* We are using top down, so it is safe to use 0 here */ +#define BOOTMEM_LOW_LIMIT 0 +#else +#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS) +#endif + +#define alloc_bootmem(x) \ + __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_align(x, align) \ + __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages(x) \ + __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_node(pgdat, x) \ + __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_node(pgdat, x) \ + __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) +#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT) + +#define alloc_bootmem_low(x) \ + __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) +#define alloc_bootmem_low_pages(x) \ + __alloc_bootmem_low(x, PAGE_SIZE, 0) +#define alloc_bootmem_low_pages_node(pgdat, x) \ + __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) + +extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, + int flags); + +extern void *alloc_bootmem_section(unsigned long size, + unsigned long section_nr); + +#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP +extern void *alloc_remap(int nid, unsigned long size); +#else +static inline void *alloc_remap(int nid, unsigned long size) +{ + return NULL; +} +#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ + +extern void *alloc_large_system_hash(const char *tablename, + unsigned long bucketsize, + unsigned long numentries, + int scale, + int flags, + unsigned int *_hash_shift, + unsigned int *_hash_mask, + unsigned long limit); + +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ + +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. + */ +#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT) +#define HASHDIST_DEFAULT 1 +#else +#define HASHDIST_DEFAULT 0 +#endif +extern int hashdist; /* Distribute hashes across NUMA nodes? */ + + +#endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h new file mode 100644 index 00000000..27b1bcff --- /dev/null +++ b/include/linux/bottom_half.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_BH_H +#define _LINUX_BH_H + +extern void local_bh_disable(void); +extern void _local_bh_enable(void); +extern void local_bh_enable(void); +extern void local_bh_enable_ip(unsigned long ip); + +#endif /* _LINUX_BH_H */ diff --git a/include/linux/bpqether.h b/include/linux/bpqether.h new file mode 100644 index 00000000..a6c35e1a --- /dev/null +++ b/include/linux/bpqether.h @@ -0,0 +1,41 @@ +#ifndef __BPQETHER_H +#define __BPQETHER_H + +/* + * Defines for the BPQETHER pseudo device driver + */ + +#ifndef __LINUX_IF_ETHER_H +#include <linux/if_ether.h> +#endif + +#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ +#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) + +struct bpq_ethaddr { + unsigned char destination[ETH_ALEN]; + unsigned char accept[ETH_ALEN]; +}; + +/* + * For SIOCSBPQETHOPT - this is compatible with PI2/PacketTwin card drivers, + * currently not implemented, though. If someone wants to hook a radio + * to his Ethernet card he may find this useful. ;-) + */ + +#define SIOCGBPQETHPARAM 0x5000 /* get Level 1 parameters */ +#define SIOCSBPQETHPARAM 0x5001 /* set */ + +struct bpq_req { + int cmd; + int speed; /* unused */ + int clockmode; /* unused */ + int txdelay; + unsigned char persist; /* unused */ + int slotime; /* unused */ + int squeldelay; + int dmachan; /* unused */ + int irq; /* unused */ +}; + +#endif diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h new file mode 100644 index 00000000..b840a496 --- /dev/null +++ b/include/linux/brcmphy.h @@ -0,0 +1,31 @@ +#define PHY_ID_BCM50610 0x0143bd60 +#define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5241 0x0143bc30 +#define PHY_ID_BCMAC131 0x0143bc70 +#define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5482 0x0143bcb0 +#define PHY_ID_BCM5411 0x00206070 +#define PHY_ID_BCM5421 0x002060e0 +#define PHY_ID_BCM5464 0x002060b0 +#define PHY_ID_BCM5461 0x002060c0 +#define PHY_ID_BCM57780 0x03625d90 + +#define PHY_BCM_OUI_MASK 0xfffffc00 +#define PHY_BCM_OUI_1 0x00206000 +#define PHY_BCM_OUI_2 0x0143bc00 +#define PHY_BCM_OUI_3 0x03625c00 + + +#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 +#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 +#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010 +#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020 +#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100 +#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200 +#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 +#define PHY_BRCM_STD_IBND_DISABLE 0x00000800 +#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 +#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 +#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 +#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 +#define PHY_BCM_FLAGS_VALID 0x80000000 diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 00000000..90b1aa86 --- /dev/null +++ b/include/linux/bsearch.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_BSEARCH_H +#define _LINUX_BSEARCH_H + +#include <linux/types.h> + +void *bsearch(const void *key, const void *base, size_t num, size_t size, + int (*cmp)(const void *key, const void *elt)); + +#endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h new file mode 100644 index 00000000..f55ab8cd --- /dev/null +++ b/include/linux/bsg-lib.h @@ -0,0 +1,73 @@ +/* + * BSG helper library + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * Copyright (C) 2011 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef _BLK_BSG_ +#define _BLK_BSG_ + +#include <linux/blkdev.h> + +struct request; +struct device; +struct scatterlist; +struct request_queue; + +struct bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +struct bsg_job { + struct device *dev; + struct request *req; + + /* Transport/driver specific request/reply structs */ + void *request; + void *reply; + + unsigned int request_len; + unsigned int reply_len; + /* + * On entry : reply_len indicates the buffer size allocated for + * the reply. + * + * Upon completion : the message handler must set reply_len + * to indicates the size of the reply to be returned to the + * caller. + */ + + /* DMA payloads for the request/response */ + struct bsg_buffer request_payload; + struct bsg_buffer reply_payload; + + void *dd_data; /* Used for driver-specific storage */ +}; + +void bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len); +int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, + bsg_job_fn *job_fn, int dd_job_size); +void bsg_request_fn(struct request_queue *q); +void bsg_remove_queue(struct request_queue *q); +void bsg_goose_queue(struct request_queue *q); + +#endif diff --git a/include/linux/bsg.h b/include/linux/bsg.h new file mode 100644 index 00000000..ecb4730d --- /dev/null +++ b/include/linux/bsg.h @@ -0,0 +1,94 @@ +#ifndef BSG_H +#define BSG_H + +#include <linux/types.h> + +#define BSG_PROTOCOL_SCSI 0 + +#define BSG_SUB_PROTOCOL_SCSI_CMD 0 +#define BSG_SUB_PROTOCOL_SCSI_TMF 1 +#define BSG_SUB_PROTOCOL_SCSI_TRANSPORT 2 + +/* + * For flags member below + * sg.h sg_io_hdr also has bits defined for it's flags member. However + * none of these bits are implemented/used by bsg. The bits below are + * allocated to not conflict with sg.h ones anyway. + */ +#define BSG_FLAG_Q_AT_TAIL 0x10 /* default, == 0 at this bit, is Q_AT_HEAD */ + +struct sg_io_v4 { + __s32 guard; /* [i] 'Q' to differentiate from v3 */ + __u32 protocol; /* [i] 0 -> SCSI , .... */ + __u32 subprotocol; /* [i] 0 -> SCSI command, 1 -> SCSI task + management function, .... */ + + __u32 request_len; /* [i] in bytes */ + __u64 request; /* [i], [*i] {SCSI: cdb} */ + __u64 request_tag; /* [i] {SCSI: task tag (only if flagged)} */ + __u32 request_attr; /* [i] {SCSI: task attribute} */ + __u32 request_priority; /* [i] {SCSI: task priority} */ + __u32 request_extra; /* [i] {spare, for padding} */ + __u32 max_response_len; /* [i] in bytes */ + __u64 response; /* [i], [*o] {SCSI: (auto)sense data} */ + + /* "dout_": data out (to device); "din_": data in (from device) */ + __u32 dout_iovec_count; /* [i] 0 -> "flat" dout transfer else + dout_xfer points to array of iovec */ + __u32 dout_xfer_len; /* [i] bytes to be transferred to device */ + __u32 din_iovec_count; /* [i] 0 -> "flat" din transfer */ + __u32 din_xfer_len; /* [i] bytes to be transferred from device */ + __u64 dout_xferp; /* [i], [*i] */ + __u64 din_xferp; /* [i], [*o] */ + + __u32 timeout; /* [i] units: millisecond */ + __u32 flags; /* [i] bit mask */ + __u64 usr_ptr; /* [i->o] unused internally */ + __u32 spare_in; /* [i] */ + + __u32 driver_status; /* [o] 0 -> ok */ + __u32 transport_status; /* [o] 0 -> ok */ + __u32 device_status; /* [o] {SCSI: command completion status} */ + __u32 retry_delay; /* [o] {SCSI: status auxiliary information} */ + __u32 info; /* [o] additional information */ + __u32 duration; /* [o] time to complete, in milliseconds */ + __u32 response_len; /* [o] bytes of response actually written */ + __s32 din_resid; /* [o] din_xfer_len - actual_din_xfer_len */ + __s32 dout_resid; /* [o] dout_xfer_len - actual_dout_xfer_len */ + __u64 generated_tag; /* [o] {SCSI: transport generated task tag} */ + __u32 spare_out; /* [o] */ + + __u32 padding; +}; + +#ifdef __KERNEL__ + +#if defined(CONFIG_BLK_DEV_BSG) +struct bsg_class_device { + struct device *class_dev; + struct device *parent; + int minor; + struct request_queue *queue; + struct kref ref; + void (*release)(struct device *); +}; + +extern int bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + void (*release)(struct device *)); +extern void bsg_unregister_queue(struct request_queue *); +#else +static inline int bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + void (*release)(struct device *)) +{ + return 0; +} +static inline void bsg_unregister_queue(struct request_queue *q) +{ +} +#endif + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h new file mode 100644 index 00000000..0b3414c4 --- /dev/null +++ b/include/linux/btree-128.h @@ -0,0 +1,109 @@ +extern struct btree_geo btree_geo128; + +struct btree_head128 { struct btree_head h; }; + +static inline void btree_init_mempool128(struct btree_head128 *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int btree_init128(struct btree_head128 *head) +{ + return btree_init(&head->h); +} + +static inline void btree_destroy128(struct btree_head128 *head) +{ + btree_destroy(&head->h); +} + +static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_get_prev128(struct btree_head128 *head, + u64 *k1, u64 *k2) +{ + u64 key[2] = {*k1, *k2}; + void *val; + + val = btree_get_prev(&head->h, &btree_geo128, + (unsigned long *)&key); + *k1 = key[0]; + *k2 = key[1]; + return val; +} + +static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, + void *val, gfp_t gfp) +{ + u64 key[2] = {k1, k2}; + return btree_insert(&head->h, &btree_geo128, + (unsigned long *)&key, val, gfp); +} + +static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, + void *val) +{ + u64 key[2] = {k1, k2}; + return btree_update(&head->h, &btree_geo128, + (unsigned long *)&key, val); +} + +static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) +{ + u64 key[2] = {k1, k2}; + return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); +} + +static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) +{ + u64 key[2]; + void *val; + + val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); + if (val) { + *k1 = key[0]; + *k2 = key[1]; + } + + return val; +} + +static inline int btree_merge128(struct btree_head128 *target, + struct btree_head128 *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, &btree_geo128, gfp); +} + +void visitor128(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func); + +typedef void (*visitor128_t)(void *elem, unsigned long opaque, + u64 key1, u64 key2, size_t index); + +static inline size_t btree_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +static inline size_t btree_grim_visitor128(struct btree_head128 *head, + unsigned long opaque, + visitor128_t func2) +{ + return btree_grim_visitor(&head->h, &btree_geo128, opaque, + visitor128, func2); +} + +#define btree_for_each_safe128(head, k1, k2, val) \ + for (val = btree_last128(head, &k1, &k2); \ + val; \ + val = btree_get_prev128(head, &k1, &k2)) + diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h new file mode 100644 index 00000000..9a1147ef --- /dev/null +++ b/include/linux/btree-type.h @@ -0,0 +1,147 @@ +#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx +#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx) +#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,) +#define BTREE_FN(name) BTREE_TP(btree_ ## name) +#define BTREE_TYPE_HEAD BTREE_TP(struct btree_head) +#define VISITOR_FN BTREE_TP(visitor) +#define VISITOR_FN_T _BTREE_TP(visitor, BTREE_TYPE_SUFFIX, _t) + +BTREE_TYPE_HEAD { + struct btree_head h; +}; + +static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, + mempool_t *mempool) +{ + btree_init_mempool(&head->h, mempool); +} + +static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) +{ + return btree_init(&head->h); +} + +static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) +{ + btree_destroy(&head->h); +} + +static inline int BTREE_FN(merge)(BTREE_TYPE_HEAD *target, + BTREE_TYPE_HEAD *victim, + gfp_t gfp) +{ + return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp); +} + +#if (BITS_PER_LONG > BTREE_TYPE_BITS) +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + unsigned long _key = key; + return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + unsigned long _key = key; + return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + unsigned long _key = key; + return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key; + void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + unsigned long _key = *key; + void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); + if (val) + *key = _key; + return val; +} +#else +static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val, gfp_t gfp) +{ + return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, + val, gfp); +} + +static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, + void *val) +{ + return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); +} + +static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) +{ + return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); +} + +static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} + +static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) +{ + return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); +} +#endif + +void VISITOR_FN(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *__func); + +typedef void (*VISITOR_FN_T)(void *elem, unsigned long opaque, + BTREE_KEYTYPE key, size_t index); + +static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, + unsigned long opaque, + VISITOR_FN_T func2) +{ + return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, + visitorl, func2); +} + +#undef VISITOR_FN +#undef VISITOR_FN_T +#undef __BTREE_TP +#undef _BTREE_TP +#undef BTREE_TP +#undef BTREE_FN +#undef BTREE_TYPE_HEAD +#undef BTREE_TYPE_SUFFIX +#undef BTREE_TYPE_GEO +#undef BTREE_KEYTYPE +#undef BTREE_TYPE_BITS diff --git a/include/linux/btree.h b/include/linux/btree.h new file mode 100644 index 00000000..65b5bb05 --- /dev/null +++ b/include/linux/btree.h @@ -0,0 +1,243 @@ +#ifndef BTREE_H +#define BTREE_H + +#include <linux/kernel.h> +#include <linux/mempool.h> + +/** + * DOC: B+Tree basics + * + * A B+Tree is a data structure for looking up arbitrary (currently allowing + * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure + * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * use binary search to find the key on lookups. + * + * Each B+Tree consists of a head, that contains bookkeeping information and + * a variable number (starting with zero) nodes. Each node contains the keys + * and pointers to sub-nodes, or, for leaf nodes, the keys and values for the + * tree entries. + * + * Each node in this implementation has the following layout: + * [key1, key2, ..., keyN] [val1, val2, ..., valN] + * + * Each key here is an array of unsigned longs, geo->no_longs in total. The + * number of keys and values (N) is geo->no_pairs. + */ + +/** + * struct btree_head - btree head + * + * @node: the first node in the tree + * @mempool: mempool used for node allocations + * @height: current of the tree + */ +struct btree_head { + unsigned long *node; + mempool_t *mempool; + int height; +}; + +/* btree geometry */ +struct btree_geo; + +/** + * btree_alloc - allocate function for the mempool + * @gfp_mask: gfp mask for the allocation + * @pool_data: unused + */ +void *btree_alloc(gfp_t gfp_mask, void *pool_data); + +/** + * btree_free - free function for the mempool + * @element: the element to free + * @pool_data: unused + */ +void btree_free(void *element, void *pool_data); + +/** + * btree_init_mempool - initialise a btree with given mempool + * + * @head: the btree head to initialise + * @mempool: the mempool to use + * + * When this function is used, there is no need to destroy + * the mempool. + */ +void btree_init_mempool(struct btree_head *head, mempool_t *mempool); + +/** + * btree_init - initialise a btree + * + * @head: the btree head to initialise + * + * This function allocates the memory pool that the + * btree needs. Returns zero or a negative error code + * (-%ENOMEM) when memory allocation fails. + * + */ +int __must_check btree_init(struct btree_head *head); + +/** + * btree_destroy - destroy mempool + * + * @head: the btree head to destroy + * + * This function destroys the internal memory pool, use only + * when using btree_init(), not with btree_init_mempool(). + */ +void btree_destroy(struct btree_head *head); + +/** + * btree_lookup - look up a key in the btree + * + * @head: the btree to look in + * @geo: the btree geometry + * @key: the key to look up + * + * This function returns the value for the given key, or %NULL. + */ +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_insert - insert an entry into the btree + * + * @head: the btree to add to + * @geo: the btree geometry + * @key: the key to add (must not already be present) + * @val: the value to add (must not be %NULL) + * @gfp: allocation flags for node allocations + * + * This function returns 0 if the item could be added, or an + * error code if it failed (may fail due to memory pressure). + */ +int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, gfp_t gfp); +/** + * btree_update - update an entry in the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to update + * @val: the value to change it to (must not be %NULL) + * + * This function returns 0 if the update was successful, or + * -%ENOENT if the key could not be found. + */ +int btree_update(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val); +/** + * btree_remove - remove an entry from the btree + * + * @head: the btree to update + * @geo: the btree geometry + * @key: the key to remove + * + * This function returns the removed entry, or %NULL if the key + * could not be found. + */ +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_merge - merge two btrees + * + * @target: the tree that gets all the entries + * @victim: the tree that gets merged into @target + * @geo: the btree geometry + * @gfp: allocation flags + * + * The two trees @target and @victim may not contain the same keys, + * that is a bug and triggers a BUG(). This function returns zero + * if the trees were merged successfully, and may return a failure + * when memory allocation fails, in which case both trees might have + * been partially merged, i.e. some entries have been moved from + * @victim to @target. + */ +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, gfp_t gfp); + +/** + * btree_last - get last entry in btree + * + * @head: btree head + * @geo: btree geometry + * @key: last key + * + * Returns the last entry in the btree, and sets @key to the key + * of that entry; returns NULL if the tree is empty, in that case + * key is not changed. + */ +void *btree_last(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + +/** + * btree_get_prev - get previous entry + * + * @head: btree head + * @geo: btree geometry + * @key: pointer to key + * + * The function returns the next item right before the value pointed to by + * @key, and updates @key with its key, or returns %NULL when there is no + * entry with a key smaller than the given key. + */ +void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, + unsigned long *key); + + +/* internal use, use btree_visitor{l,32,64,128} */ +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, size_t index, + void *func2), + void *func2); + +/* internal use, use btree_grim_visitor{l,32,64,128} */ +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2); + + +#include <linux/btree-128.h> + +extern struct btree_geo btree_geo32; +#define BTREE_TYPE_SUFFIX l +#define BTREE_TYPE_BITS BITS_PER_LONG +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE unsigned long +#include <linux/btree-type.h> + +#define btree_for_each_safel(head, key, val) \ + for (val = btree_lastl(head, &key); \ + val; \ + val = btree_get_prevl(head, &key)) + +#define BTREE_TYPE_SUFFIX 32 +#define BTREE_TYPE_BITS 32 +#define BTREE_TYPE_GEO &btree_geo32 +#define BTREE_KEYTYPE u32 +#include <linux/btree-type.h> + +#define btree_for_each_safe32(head, key, val) \ + for (val = btree_last32(head, &key); \ + val; \ + val = btree_get_prev32(head, &key)) + +extern struct btree_geo btree_geo64; +#define BTREE_TYPE_SUFFIX 64 +#define BTREE_TYPE_BITS 64 +#define BTREE_TYPE_GEO &btree_geo64 +#define BTREE_KEYTYPE u64 +#include <linux/btree-type.h> + +#define btree_for_each_safe64(head, key, val) \ + for (val = btree_last64(head, &key); \ + val; \ + val = btree_get_prev64(head, &key)) + +#endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h new file mode 100644 index 00000000..458f4977 --- /dev/null +++ b/include/linux/buffer_head.h @@ -0,0 +1,350 @@ +/* + * include/linux/buffer_head.h + * + * Everything to do with buffer_heads. + */ + +#ifndef _LINUX_BUFFER_HEAD_H +#define _LINUX_BUFFER_HEAD_H + +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/linkage.h> +#include <linux/pagemap.h> +#include <linux/wait.h> +#include <linux/atomic.h> + +#ifdef CONFIG_BLOCK + +enum bh_state_bits { + BH_Uptodate, /* Contains valid data */ + BH_Dirty, /* Is dirty */ + BH_Lock, /* Is locked */ + BH_Req, /* Has been submitted for I/O */ + BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise + * IO completion of other buffers in the page + */ + + BH_Mapped, /* Has a disk mapping */ + BH_New, /* Disk mapping was newly created by get_block */ + BH_Async_Read, /* Is under end_buffer_async_read I/O */ + BH_Async_Write, /* Is under end_buffer_async_write I/O */ + BH_Delay, /* Buffer is not yet allocated on disk */ + BH_Boundary, /* Block is followed by a discontiguity */ + BH_Write_EIO, /* I/O error on write */ + BH_Unwritten, /* Buffer is allocated on disk but not written */ + BH_Quiet, /* Buffer Error Prinks to be quiet */ + + BH_PrivateStart,/* not a state bit, but the first bit available + * for private allocation by other entities + */ +}; + +#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) + +struct page; +struct buffer_head; +struct address_space; +typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); + +/* + * Historically, a buffer_head was used to map a single block + * within a page, and of course as the unit of I/O through the + * filesystem and block layers. Nowadays the basic I/O unit + * is the bio, and buffer_heads are used for extracting block + * mappings (via a get_block_t call), for tracking state within + * a page (via a page_mapping) and for wrapping bio submission + * for backward compatibility reasons (e.g. submit_bh). + */ +struct buffer_head { + unsigned long b_state; /* buffer state bitmap (see above) */ + struct buffer_head *b_this_page;/* circular list of page's buffers */ + struct page *b_page; /* the page this bh is mapped to */ + + sector_t b_blocknr; /* start block number */ + size_t b_size; /* size of mapping */ + char *b_data; /* pointer to data within the page */ + + struct block_device *b_bdev; + bh_end_io_t *b_end_io; /* I/O completion */ + void *b_private; /* reserved for b_end_io */ + struct list_head b_assoc_buffers; /* associated with another mapping */ + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ +}; + +/* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. + */ +#define BUFFER_FNS(bit, name) \ +static inline void set_buffer_##name(struct buffer_head *bh) \ +{ \ + set_bit(BH_##bit, &(bh)->b_state); \ +} \ +static inline void clear_buffer_##name(struct buffer_head *bh) \ +{ \ + clear_bit(BH_##bit, &(bh)->b_state); \ +} \ +static inline int buffer_##name(const struct buffer_head *bh) \ +{ \ + return test_bit(BH_##bit, &(bh)->b_state); \ +} + +/* + * test_set_buffer_foo() and test_clear_buffer_foo() + */ +#define TAS_BUFFER_FNS(bit, name) \ +static inline int test_set_buffer_##name(struct buffer_head *bh) \ +{ \ + return test_and_set_bit(BH_##bit, &(bh)->b_state); \ +} \ +static inline int test_clear_buffer_##name(struct buffer_head *bh) \ +{ \ + return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ +} \ + +/* + * Emit the buffer bitops functions. Note that there are also functions + * of the form "mark_buffer_foo()". These are higher-level functions which + * do something in addition to setting a b_state bit. + */ +BUFFER_FNS(Uptodate, uptodate) +BUFFER_FNS(Dirty, dirty) +TAS_BUFFER_FNS(Dirty, dirty) +BUFFER_FNS(Lock, locked) +BUFFER_FNS(Req, req) +TAS_BUFFER_FNS(Req, req) +BUFFER_FNS(Mapped, mapped) +BUFFER_FNS(New, new) +BUFFER_FNS(Async_Read, async_read) +BUFFER_FNS(Async_Write, async_write) +BUFFER_FNS(Delay, delay) +BUFFER_FNS(Boundary, boundary) +BUFFER_FNS(Write_EIO, write_io_error) +BUFFER_FNS(Unwritten, unwritten) + +#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) +#define touch_buffer(bh) mark_page_accessed(bh->b_page) + +/* If we *know* page->private refers to buffer_heads */ +#define page_buffers(page) \ + ({ \ + BUG_ON(!PagePrivate(page)); \ + ((struct buffer_head *)page_private(page)); \ + }) +#define page_has_buffers(page) PagePrivate(page) + +/* + * Declarations + */ + +void mark_buffer_dirty(struct buffer_head *bh); +void init_buffer(struct buffer_head *, bh_end_io_t *, void *); +void set_bh_page(struct buffer_head *bh, + struct page *page, unsigned long offset); +int try_to_free_buffers(struct page *); +struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, + int retry); +void create_empty_buffers(struct page *, unsigned long, + unsigned long b_state); +void end_buffer_read_sync(struct buffer_head *bh, int uptodate); +void end_buffer_write_sync(struct buffer_head *bh, int uptodate); +void end_buffer_async_write(struct buffer_head *bh, int uptodate); + +/* Things to do with buffers at mapping->private_list */ +void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); +int inode_has_buffers(struct inode *); +void invalidate_inode_buffers(struct inode *); +int remove_inode_buffers(struct inode *inode); +int sync_mapping_buffers(struct address_space *mapping); +void unmap_underlying_metadata(struct block_device *bdev, sector_t block); + +void mark_buffer_async_write(struct buffer_head *bh); +void __wait_on_buffer(struct buffer_head *); +wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); +struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, + unsigned size); +struct buffer_head *__getblk(struct block_device *bdev, sector_t block, + unsigned size); +void __brelse(struct buffer_head *); +void __bforget(struct buffer_head *); +void __breadahead(struct block_device *, sector_t block, unsigned int size); +struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); +void invalidate_bh_lrus(void); +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); +void free_buffer_head(struct buffer_head * bh); +void unlock_buffer(struct buffer_head *bh); +void __lock_buffer(struct buffer_head *bh); +void ll_rw_block(int, int, struct buffer_head * bh[]); +int sync_dirty_buffer(struct buffer_head *bh); +int __sync_dirty_buffer(struct buffer_head *bh, int rw); +void write_dirty_buffer(struct buffer_head *bh, int rw); +int submit_bh(int, struct buffer_head *); +void write_boundary_block(struct block_device *bdev, + sector_t bblock, unsigned blocksize); +int bh_uptodate_or_lock(struct buffer_head *bh); +int bh_submit_read(struct buffer_head *bh); + +extern int buffer_heads_over_limit; + +/* + * Generic address_space_operations implementations for buffer_head-backed + * address_spaces. + */ +void block_invalidatepage(struct page *page, unsigned long offset); +int block_write_full_page(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); +int block_write_full_page_endio(struct page *page, get_block_t *get_block, + struct writeback_control *wbc, bh_end_io_t *handler); +int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, + unsigned long from); +int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, + unsigned flags, struct page **pagep, get_block_t *get_block); +int __block_write_begin(struct page *page, loff_t pos, unsigned len, + get_block_t *get_block); +int block_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +int generic_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); +int cont_write_begin(struct file *, struct address_space *, loff_t, + unsigned, unsigned, struct page **, void **, + get_block_t *, loff_t *); +int generic_cont_expand_simple(struct inode *inode, loff_t size); +int block_commit_write(struct page *page, unsigned from, unsigned to); +int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block); +int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block); +/* Convert errno to return value from ->page_mkwrite() call */ +static inline int block_page_mkwrite_return(int err) +{ + if (err == 0) + return VM_FAULT_LOCKED; + if (err == -EFAULT) + return VM_FAULT_NOPAGE; + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err == -EAGAIN) + return VM_FAULT_RETRY; + /* -ENOSPC, -EDQUOT, -EIO ... */ + return VM_FAULT_SIGBUS; +} +sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); +int block_truncate_page(struct address_space *, loff_t, get_block_t *); +int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, + struct page **, void **, get_block_t*); +int nobh_write_end(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page *, void *); +int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); +int nobh_writepage(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); + +void buffer_init(void); + +/* + * inline definitions + */ + +static inline void attach_page_buffers(struct page *page, + struct buffer_head *head) +{ + page_cache_get(page); + SetPagePrivate(page); + set_page_private(page, (unsigned long)head); +} + +static inline void get_bh(struct buffer_head *bh) +{ + atomic_inc(&bh->b_count); +} + +static inline void put_bh(struct buffer_head *bh) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&bh->b_count); +} + +static inline void brelse(struct buffer_head *bh) +{ + if (bh) + __brelse(bh); +} + +static inline void bforget(struct buffer_head *bh) +{ + if (bh) + __bforget(bh); +} + +static inline struct buffer_head * +sb_bread(struct super_block *sb, sector_t block) +{ + return __bread(sb->s_bdev, block, sb->s_blocksize); +} + +static inline void +sb_breadahead(struct super_block *sb, sector_t block) +{ + __breadahead(sb->s_bdev, block, sb->s_blocksize); +} + +static inline struct buffer_head * +sb_getblk(struct super_block *sb, sector_t block) +{ + return __getblk(sb->s_bdev, block, sb->s_blocksize); +} + +static inline struct buffer_head * +sb_find_get_block(struct super_block *sb, sector_t block) +{ + return __find_get_block(sb->s_bdev, block, sb->s_blocksize); +} + +static inline void +map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) +{ + set_buffer_mapped(bh); + bh->b_bdev = sb->s_bdev; + bh->b_blocknr = block; + bh->b_size = sb->s_blocksize; +} + +static inline void wait_on_buffer(struct buffer_head *bh) +{ + might_sleep(); + if (buffer_locked(bh)) + __wait_on_buffer(bh); +} + +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state)); +} + +static inline void lock_buffer(struct buffer_head *bh) +{ + might_sleep(); + if (!trylock_buffer(bh)) + __lock_buffer(bh); +} + +extern int __set_page_dirty_buffers(struct page *page); + +#else /* CONFIG_BLOCK */ + +static inline void buffer_init(void) {} +static inline int try_to_free_buffers(struct page *page) { return 1; } +static inline int inode_has_buffers(struct inode *inode) { return 0; } +static inline void invalidate_inode_buffers(struct inode *inode) {} +static inline int remove_inode_buffers(struct inode *inode) { return 1; } +static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } + +#endif /* CONFIG_BLOCK */ +#endif /* _LINUX_BUFFER_HEAD_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h new file mode 100644 index 00000000..72961c39 --- /dev/null +++ b/include/linux/bug.h @@ -0,0 +1,99 @@ +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H + +#include <asm/bug.h> + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +struct pt_regs; + +#ifdef __CHECKER__ +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) +#define BUILD_BUG_ON_ZERO(e) (0) +#define BUILD_BUG_ON_NULL(e) ((void*)0) +#define BUILD_BUG_ON(condition) +#define BUILD_BUG() (0) +#else /* __CHECKER__ */ + +/* Force a compilation error if a constant expression is not a power of 2 */ +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) + +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) +#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + * + * The implementation uses gcc's reluctance to create a negative array, but + * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments + * to inline functions). So as a fallback we use the optimizer; if it can't + * prove the condition is false, it will cause a link error on the undefined + * "__build_bug_on_failed". This error message can be harder to track down + * though, hence the two different methods. + */ +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else +extern int __build_bug_on_failed; +#define BUILD_BUG_ON(condition) \ + do { \ + ((void)sizeof(char[1 - 2*!!(condition)])); \ + if (condition) __build_bug_on_failed = 1; \ + } while(0) +#endif + +/** + * BUILD_BUG - break compile if used. + * + * If you have some code that you expect the compiler to eliminate at + * build time, you should use BUILD_BUG to detect if it is + * unexpectedly used. + */ +#define BUILD_BUG() \ + do { \ + extern void __build_bug_failed(void) \ + __linktime_error("BUILD_BUG failed"); \ + __build_bug_failed(); \ + } while (0) + +#endif /* __CHECKER__ */ + +#ifdef CONFIG_GENERIC_BUG +#include <asm-generic/bug.h> + +static inline int is_warning_bug(const struct bug_entry *bug) +{ + return bug->flags & BUGFLAG_WARNING; +} + +const struct bug_entry *find_bug(unsigned long bugaddr); + +enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); + +/* These are defined by the architecture */ +int is_valid_bugaddr(unsigned long addr); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline enum bug_trap_type report_bug(unsigned long bug_addr, + struct pt_regs *regs) +{ + return BUG_TRAP_TYPE_BUG; +} + +#endif /* CONFIG_GENERIC_BUG */ +#endif /* _LINUX_BUG_H */ diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild new file mode 100644 index 00000000..5896e344 --- /dev/null +++ b/include/linux/byteorder/Kbuild @@ -0,0 +1,2 @@ +header-y += big_endian.h +header-y += little_endian.h diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h new file mode 100644 index 00000000..3c80fd7e --- /dev/null +++ b/include/linux/byteorder/big_endian.h @@ -0,0 +1,108 @@ +#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H +#define _LINUX_BYTEORDER_BIG_ENDIAN_H + +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#endif +#ifndef __BIG_ENDIAN_BITFIELD +#define __BIG_ENDIAN_BITFIELD +#endif + +#include <linux/types.h> +#include <linux/swab.h> + +#define __constant_htonl(x) ((__force __be32)(__u32)(x)) +#define __constant_ntohl(x) ((__force __u32)(__be32)(x)) +#define __constant_htons(x) ((__force __be16)(__u16)(x)) +#define __constant_ntohs(x) ((__force __u16)(__be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x))) +#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x))) +#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x))) +#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x)) +#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x)) +#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x)) +#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)__swab64((x))) +#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)__swab32((x))) +#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)__swab16((x))) +#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) +#define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +#define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +#define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)__swab64p(p); +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)__swab32p(p); +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)__swab16p(p); +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return __swab16p((__u16 *)p); +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)*p; +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return (__force __u64)*p; +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)*p; +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return (__force __u32)*p; +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)*p; +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return (__force __u16)*p; +} +#define __cpu_to_le64s(x) __swab64s((x)) +#define __le64_to_cpus(x) __swab64s((x)) +#define __cpu_to_le32s(x) __swab32s((x)) +#define __le32_to_cpus(x) __swab32s((x)) +#define __cpu_to_le16s(x) __swab16s((x)) +#define __le16_to_cpus(x) __swab16s((x)) +#define __cpu_to_be64s(x) do { (void)(x); } while (0) +#define __be64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be32s(x) do { (void)(x); } while (0) +#define __be32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be16s(x) do { (void)(x); } while (0) +#define __be16_to_cpus(x) do { (void)(x); } while (0) + +#ifdef __KERNEL__ +#include <linux/byteorder/generic.h> +#endif + +#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h new file mode 100644 index 00000000..0846e6b9 --- /dev/null +++ b/include/linux/byteorder/generic.h @@ -0,0 +1,173 @@ +#ifndef _LINUX_BYTEORDER_GENERIC_H +#define _LINUX_BYTEORDER_GENERIC_H + +/* + * linux/byteorder_generic.h + * Generic Byte-reordering support + * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * + * Francois-Rene Rideau <fare@tunes.org> 19970707 + * gathered all the good ideas from all asm-foo/byteorder.h into one file, + * cleaned them up. + * I hope it is compliant with non-GCC compilers. + * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, + * because I wasn't sure it would be ok to put it in types.h + * Upgraded it to 2.1.43 + * Francois-Rene Rideau <fare@tunes.org> 19971012 + * Upgraded it to 2.1.57 + * to please Linus T., replaced huge #ifdef's between little/big endian + * by nestedly #include'd files. + * Francois-Rene Rideau <fare@tunes.org> 19971205 + * Made it to 2.1.71; now a facelift: + * Put files under include/linux/byteorder/ + * Split swab from generic support. + * + * TODO: + * = Regular kernel maintainers could also replace all these manual + * byteswap macros that remain, disseminated among drivers, + * after some grep or the sources... + * = Linus might want to rename all these macros and files to fit his taste, + * to fit his personal naming scheme. + * = it seems that a few drivers would also appreciate + * nybble swapping support... + * = every architecture could add their byteswap macro in asm/byteorder.h + * see how some architectures already do (i386, alpha, ppc, etc) + * = cpu_to_beXX and beXX_to_cpu might some day need to be well + * distinguished throughout the kernel. This is not the case currently, + * since little endian, big endian, and pdp endian machines needn't it. + * But this might be the case for, say, a port of Linux to 20/21 bit + * architectures (and F21 Linux addict around?). + */ + +/* + * The following macros are to be defined by <asm/byteorder.h>: + * + * Conversion of long and short int between network and host format + * ntohl(__u32 x) + * ntohs(__u16 x) + * htonl(__u32 x) + * htons(__u16 x) + * It seems that some programs (which? where? or perhaps a standard? POSIX?) + * might like the above to be functions, not macros (why?). + * if that's true, then detect them, and take measures. + * Anyway, the measure is: define only ___ntohl as a macro instead, + * and in a separate file, have + * unsigned long inline ntohl(x){return ___ntohl(x);} + * + * The same for constant arguments + * __constant_ntohl(__u32 x) + * __constant_ntohs(__u16 x) + * __constant_htonl(__u32 x) + * __constant_htons(__u16 x) + * + * Conversion of XX-bit integers (16- 32- or 64-) + * between native CPU format and little/big endian format + * 64-bit stuff only defined for proper architectures + * cpu_to_[bl]eXX(__uXX x) + * [bl]eXX_to_cpu(__uXX x) + * + * The same, but takes a pointer to the value to convert + * cpu_to_[bl]eXXp(__uXX x) + * [bl]eXX_to_cpup(__uXX x) + * + * The same, but change in situ + * cpu_to_[bl]eXXs(__uXX x) + * [bl]eXX_to_cpus(__uXX x) + * + * See asm-foo/byteorder.h for examples of how to provide + * architecture-optimized versions + * + */ + +#define cpu_to_le64 __cpu_to_le64 +#define le64_to_cpu __le64_to_cpu +#define cpu_to_le32 __cpu_to_le32 +#define le32_to_cpu __le32_to_cpu +#define cpu_to_le16 __cpu_to_le16 +#define le16_to_cpu __le16_to_cpu +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_le64p __cpu_to_le64p +#define le64_to_cpup __le64_to_cpup +#define cpu_to_le32p __cpu_to_le32p +#define le32_to_cpup __le32_to_cpup +#define cpu_to_le16p __cpu_to_le16p +#define le16_to_cpup __le16_to_cpup +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define be16_to_cpup __be16_to_cpup +#define cpu_to_le64s __cpu_to_le64s +#define le64_to_cpus __le64_to_cpus +#define cpu_to_le32s __cpu_to_le32s +#define le32_to_cpus __le32_to_cpus +#define cpu_to_le16s __cpu_to_le16s +#define le16_to_cpus __le16_to_cpus +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ + +#undef ntohl +#undef ntohs +#undef htonl +#undef htons + +#define ___htonl(x) __cpu_to_be32(x) +#define ___htons(x) __cpu_to_be16(x) +#define ___ntohl(x) __be32_to_cpu(x) +#define ___ntohs(x) __be16_to_cpu(x) + +#define htonl(x) ___htonl(x) +#define ntohl(x) ___ntohl(x) +#define htons(x) ___htons(x) +#define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpu(*var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpu(*var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpu(*var) + val); +} + +#endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h new file mode 100644 index 00000000..83195fb8 --- /dev/null +++ b/include/linux/byteorder/little_endian.h @@ -0,0 +1,108 @@ +#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif +#ifndef __LITTLE_ENDIAN_BITFIELD +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#include <linux/types.h> +#include <linux/swab.h> + +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) +#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)*p; +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return (__force __u64)*p; +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)*p; +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return (__force __u32)*p; +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)*p; +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return (__force __u16)*p; +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)__swab64p(p); +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)__swab32p(p); +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)__swab16p(p); +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return __swab16p((__u16 *)p); +} +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be64s(x) __swab64s((x)) +#define __be64_to_cpus(x) __swab64s((x)) +#define __cpu_to_be32s(x) __swab32s((x)) +#define __be32_to_cpus(x) __swab32s((x)) +#define __cpu_to_be16s(x) __swab16s((x)) +#define __be16_to_cpus(x) __swab16s((x)) + +#ifdef __KERNEL__ +#include <linux/byteorder/generic.h> +#endif + +#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/c2port.h b/include/linux/c2port.h new file mode 100644 index 00000000..4efabcb5 --- /dev/null +++ b/include/linux/c2port.h @@ -0,0 +1,66 @@ +/* + * Silicon Labs C2 port Linux support + * + * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation + */ + +#include <linux/kmemcheck.h> + +#define C2PORT_NAME_LEN 32 + +struct device; + +/* + * C2 port basic structs + */ + +/* Main struct */ +struct c2port_ops; +struct c2port_device { + kmemcheck_bitfield_begin(flags); + unsigned int access:1; + unsigned int flash_access:1; + kmemcheck_bitfield_end(flags); + + int id; + char name[C2PORT_NAME_LEN]; + struct c2port_ops *ops; + struct mutex mutex; /* prevent races during read/write */ + + struct device *dev; + + void *private_data; +}; + +/* Basic operations */ +struct c2port_ops { + /* Flash layout */ + unsigned short block_size; /* flash block size in bytes */ + unsigned short blocks_num; /* flash blocks number */ + + /* Enable or disable the access to C2 port */ + void (*access)(struct c2port_device *dev, int status); + + /* Set C2D data line as input/output */ + void (*c2d_dir)(struct c2port_device *dev, int dir); + + /* Read/write C2D data line */ + int (*c2d_get)(struct c2port_device *dev); + void (*c2d_set)(struct c2port_device *dev, int status); + + /* Write C2CK clock line */ + void (*c2ck_set)(struct c2port_device *dev, int status); +}; + +/* + * Exported functions + */ + +extern struct c2port_device *c2port_device_register(char *name, + struct c2port_ops *ops, void *devdata); +extern void c2port_device_unregister(struct c2port_device *dev); diff --git a/include/linux/cache.h b/include/linux/cache.h new file mode 100644 index 00000000..4c570653 --- /dev/null +++ b/include/linux/cache.h @@ -0,0 +1,67 @@ +#ifndef __LINUX_CACHE_H +#define __LINUX_CACHE_H + +#include <linux/kernel.h> +#include <asm/cache.h> + +#ifndef L1_CACHE_ALIGN +#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES) +#endif + +#ifndef SMP_CACHE_BYTES +#define SMP_CACHE_BYTES L1_CACHE_BYTES +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef ____cacheline_aligned +#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#ifndef __cacheline_aligned +#define __cacheline_aligned \ + __attribute__((__aligned__(SMP_CACHE_BYTES), \ + __section__(".data..cacheline_aligned"))) +#endif /* __cacheline_aligned */ + +#ifndef __cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define __cacheline_aligned_in_smp __cacheline_aligned +#else +#define __cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +/* + * The maximum alignment needed for some critical structures + * These could be inter-node cacheline sizes/L3 cacheline + * size etc. Define this in asm/cache.h for your arch + */ +#ifndef INTERNODE_CACHE_SHIFT +#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT +#endif + +#if !defined(____cacheline_internodealigned_in_smp) +#if defined(CONFIG_SMP) +#define ____cacheline_internodealigned_in_smp \ + __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) +#else +#define ____cacheline_internodealigned_in_smp +#endif +#endif + +#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE +#define cache_line_size() L1_CACHE_BYTES +#endif + +#endif /* __LINUX_CACHE_H */ diff --git a/include/linux/caif/Kbuild b/include/linux/caif/Kbuild new file mode 100644 index 00000000..a9cf2506 --- /dev/null +++ b/include/linux/caif/Kbuild @@ -0,0 +1,2 @@ +header-y += caif_socket.h +header-y += if_caif.h diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h new file mode 100644 index 00000000..3f3bac6a --- /dev/null +++ b/include/linux/caif/caif_socket.h @@ -0,0 +1,194 @@ +/* linux/caif_socket.h + * CAIF Definitions for CAIF socket and network layer + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _LINUX_CAIF_SOCKET_H +#define _LINUX_CAIF_SOCKET_H + +#include <linux/types.h> +#include <linux/socket.h> + +/** + * enum caif_link_selector - Physical Link Selection. + * @CAIF_LINK_HIGH_BANDW: Physical interface for high-bandwidth + * traffic. + * @CAIF_LINK_LOW_LATENCY: Physical interface for low-latency + * traffic. + * + * CAIF Link Layers can register their link properties. + * This enum is used for choosing between CAIF Link Layers when + * setting up CAIF Channels when multiple CAIF Link Layers exists. + */ +enum caif_link_selector { + CAIF_LINK_HIGH_BANDW, + CAIF_LINK_LOW_LATENCY +}; + +/** + * enum caif_channel_priority - CAIF channel priorities. + * + * @CAIF_PRIO_MIN: Min priority for a channel. + * @CAIF_PRIO_LOW: Low-priority channel. + * @CAIF_PRIO_NORMAL: Normal/default priority level. + * @CAIF_PRIO_HIGH: High priority level + * @CAIF_PRIO_MAX: Max priority for channel + * + * Priority can be set on CAIF Channels in order to + * prioritize between traffic on different CAIF Channels. + * These priority levels are recommended, but the priority value + * is not restricted to the values defined in this enum, any value + * between CAIF_PRIO_MIN and CAIF_PRIO_MAX could be used. + */ +enum caif_channel_priority { + CAIF_PRIO_MIN = 0x01, + CAIF_PRIO_LOW = 0x04, + CAIF_PRIO_NORMAL = 0x0f, + CAIF_PRIO_HIGH = 0x14, + CAIF_PRIO_MAX = 0x1F +}; + +/** + * enum caif_protocol_type - CAIF Channel type. + * @CAIFPROTO_AT: Classic AT channel. + * @CAIFPROTO_DATAGRAM: Datagram channel. + * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing. + * @CAIFPROTO_UTIL: Utility (Psock) channel. + * @CAIFPROTO_RFM: Remote File Manager + * @CAIFPROTO_DEBUG: Debug link + * + * This enum defines the CAIF Channel type to be used. This defines + * the service to connect to on the modem. + */ +enum caif_protocol_type { + CAIFPROTO_AT, + CAIFPROTO_DATAGRAM, + CAIFPROTO_DATAGRAM_LOOP, + CAIFPROTO_UTIL, + CAIFPROTO_RFM, + CAIFPROTO_DEBUG, + _CAIFPROTO_MAX +}; +#define CAIFPROTO_MAX _CAIFPROTO_MAX + +/** + * enum caif_at_type - AT Service Endpoint + * @CAIF_ATTYPE_PLAIN: Connects to a plain vanilla AT channel. + */ +enum caif_at_type { + CAIF_ATTYPE_PLAIN = 2 +}; + /** + * enum caif_debug_type - Content selection for debug connection + * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain + * both trace and interactive debug. + * @CAIF_DEBUG_TRACE: Connection contains trace only. + * @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug. + */ +enum caif_debug_type { + CAIF_DEBUG_TRACE_INTERACTIVE = 0, + CAIF_DEBUG_TRACE, + CAIF_DEBUG_INTERACTIVE, +}; + +/** + * enum caif_debug_service - Debug Service Endpoint + * @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system + * @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system + */ +enum caif_debug_service { + CAIF_RADIO_DEBUG_SERVICE = 1, + CAIF_APP_DEBUG_SERVICE +}; + +/** + * struct sockaddr_caif - the sockaddr structure for CAIF sockets. + * @family: Address family number, must be AF_CAIF. + * @u: Union of address data 'switched' by family. + * : + * @u.at: Applies when family = CAIFPROTO_AT. + * + * @u.at.type: Type of AT link to set up (enum caif_at_type). + * + * @u.util: Applies when family = CAIFPROTO_UTIL + * + * @u.util.service: Utility service name. + * + * @u.dgm: Applies when family = CAIFPROTO_DATAGRAM + * + * @u.dgm.connection_id: Datagram connection id. + * + * @u.dgm.nsapi: NSAPI of the PDP-Context. + * + * @u.rfm: Applies when family = CAIFPROTO_RFM + * + * @u.rfm.connection_id: Connection ID for RFM. + * + * @u.rfm.volume: Volume to mount. + * + * @u.dbg: Applies when family = CAIFPROTO_DEBUG. + * + * @u.dbg.type: Type of debug connection to set up + * (caif_debug_type). + * + * @u.dbg.service: Service sub-system to connect (caif_debug_service + * Description: + * This structure holds the connect parameters used for setting up a + * CAIF Channel. It defines the service to connect to on the modem. + */ +struct sockaddr_caif { + __kernel_sa_family_t family; + union { + struct { + __u8 type; /* type: enum caif_at_type */ + } at; /* CAIFPROTO_AT */ + struct { + char service[16]; + } util; /* CAIFPROTO_UTIL */ + union { + __u32 connection_id; + __u8 nsapi; + } dgm; /* CAIFPROTO_DATAGRAM(_LOOP)*/ + struct { + __u32 connection_id; + char volume[16]; + } rfm; /* CAIFPROTO_RFM */ + struct { + __u8 type; /* type:enum caif_debug_type */ + __u8 service; /* service:caif_debug_service */ + } dbg; /* CAIFPROTO_DEBUG */ + } u; +}; + +/** + * enum caif_socket_opts - CAIF option values for getsockopt and setsockopt. + * + * @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are + * available. Either a high bandwidth + * link can be selected (CAIF_LINK_HIGH_BANDW) or + * or a low latency link (CAIF_LINK_LOW_LATENCY). + * This option is of type __u32. + * Alternatively SO_BINDTODEVICE can be used. + * + * @CAIFSO_REQ_PARAM: Used to set the request parameters for a + * utility channel. (maximum 256 bytes). This + * option must be set before connecting. + * + * @CAIFSO_RSP_PARAM: Gets the response parameters for a utility + * channel. (maximum 256 bytes). This option + * is valid after a successful connect. + * + * + * This enum defines the CAIF Socket options to be used on a socket + * of type PF_CAIF. + * + */ +enum caif_socket_opts { + CAIFSO_LINK_SELECT = 127, + CAIFSO_REQ_PARAM = 128, + CAIFSO_RSP_PARAM = 129, +}; + +#endif /* _LINUX_CAIF_SOCKET_H */ diff --git a/include/linux/caif/if_caif.h b/include/linux/caif/if_caif.h new file mode 100644 index 00000000..5e7eed4e --- /dev/null +++ b/include/linux/caif/if_caif.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef IF_CAIF_H_ +#define IF_CAIF_H_ +#include <linux/sockios.h> +#include <linux/types.h> +#include <linux/socket.h> + +/** + * enum ifla_caif - CAIF NetlinkRT parameters. + * @IFLA_CAIF_IPV4_CONNID: Connection ID for IPv4 PDP Context. + * The type of attribute is NLA_U32. + * @IFLA_CAIF_IPV6_CONNID: Connection ID for IPv6 PDP Context. + * The type of attribute is NLA_U32. + * @IFLA_CAIF_LOOPBACK: If different from zero, device is doing loopback + * The type of attribute is NLA_U8. + * + * When using RT Netlink to create, destroy or configure a CAIF IP interface, + * enum ifla_caif is used to specify the configuration attributes. + */ +enum ifla_caif { + __IFLA_CAIF_UNSPEC, + IFLA_CAIF_IPV4_CONNID, + IFLA_CAIF_IPV6_CONNID, + IFLA_CAIF_LOOPBACK, + __IFLA_CAIF_MAX +}; +#define IFLA_CAIF_MAX (__IFLA_CAIF_MAX-1) + +#endif /*IF_CAIF_H_*/ diff --git a/include/linux/can.h b/include/linux/can.h new file mode 100644 index 00000000..9a19bcb3 --- /dev/null +++ b/include/linux/can.h @@ -0,0 +1,109 @@ +/* + * linux/can.h + * + * Definitions for CAN network layer (socket addr / CAN frame / CAN filter) + * + * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Urs Thuermann <urs.thuermann@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef CAN_H +#define CAN_H + +#include <linux/types.h> +#include <linux/socket.h> + +/* controller area network (CAN) kernel definitions */ + +/* special address description flags for the CAN_ID */ +#define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */ +#define CAN_RTR_FLAG 0x40000000U /* remote transmission request */ +#define CAN_ERR_FLAG 0x20000000U /* error frame */ + +/* valid bits in CAN ID for frame formats */ +#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */ +#define CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */ +#define CAN_ERR_MASK 0x1FFFFFFFU /* omit EFF, RTR, ERR flags */ + +/* + * Controller Area Network Identifier structure + * + * bit 0-28 : CAN identifier (11/29 bit) + * bit 29 : error frame flag (0 = data frame, 1 = error frame) + * bit 30 : remote transmission request flag (1 = rtr frame) + * bit 31 : frame format flag (0 = standard 11 bit, 1 = extended 29 bit) + */ +typedef __u32 canid_t; + +/* + * Controller Area Network Error Frame Mask structure + * + * bit 0-28 : error class mask (see include/linux/can/error.h) + * bit 29-31 : set to zero + */ +typedef __u32 can_err_mask_t; + +/** + * struct can_frame - basic CAN frame structure + * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above. + * @can_dlc: the data length field of the CAN frame + * @data: the CAN frame payload. + */ +struct can_frame { + canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ + __u8 can_dlc; /* data length code: 0 .. 8 */ + __u8 data[8] __attribute__((aligned(8))); +}; + +/* particular protocols of the protocol family PF_CAN */ +#define CAN_RAW 1 /* RAW sockets */ +#define CAN_BCM 2 /* Broadcast Manager */ +#define CAN_TP16 3 /* VAG Transport Protocol v1.6 */ +#define CAN_TP20 4 /* VAG Transport Protocol v2.0 */ +#define CAN_MCNET 5 /* Bosch MCNet */ +#define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */ +#define CAN_NPROTO 7 + +#define SOL_CAN_BASE 100 + +/** + * struct sockaddr_can - the sockaddr structure for CAN sockets + * @can_family: address family number AF_CAN. + * @can_ifindex: CAN network interface index. + * @can_addr: protocol specific address information + */ +struct sockaddr_can { + __kernel_sa_family_t can_family; + int can_ifindex; + union { + /* transport protocol class address information (e.g. ISOTP) */ + struct { canid_t rx_id, tx_id; } tp; + + /* reserved for future CAN protocols address information */ + } can_addr; +}; + +/** + * struct can_filter - CAN ID based filter in can_register(). + * @can_id: relevant bits of CAN ID which are not masked out. + * @can_mask: CAN mask (see description) + * + * Description: + * A filter matches, when + * + * <received_can_id> & mask == can_id & mask + * + * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can + * filter for error frames (CAN_ERR_FLAG bit set in mask). + */ +struct can_filter { + canid_t can_id; + canid_t can_mask; +}; + +#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ + +#endif /* CAN_H */ diff --git a/include/linux/can/Kbuild b/include/linux/can/Kbuild new file mode 100644 index 00000000..c62b7f17 --- /dev/null +++ b/include/linux/can/Kbuild @@ -0,0 +1,5 @@ +header-y += raw.h +header-y += bcm.h +header-y += gw.h +header-y += error.h +header-y += netlink.h diff --git a/include/linux/can/bcm.h b/include/linux/can/bcm.h new file mode 100644 index 00000000..3ebe387f --- /dev/null +++ b/include/linux/can/bcm.h @@ -0,0 +1,66 @@ +/* + * linux/can/bcm.h + * + * Definitions for CAN Broadcast Manager (BCM) + * + * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef CAN_BCM_H +#define CAN_BCM_H + +#include <linux/types.h> +#include <linux/can.h> + +/** + * struct bcm_msg_head - head of messages to/from the broadcast manager + * @opcode: opcode, see enum below. + * @flags: special flags, see below. + * @count: number of frames to send before changing interval. + * @ival1: interval for the first @count frames. + * @ival2: interval for the following frames. + * @can_id: CAN ID of frames to be sent or received. + * @nframes: number of frames appended to the message head. + * @frames: array of CAN frames. + */ +struct bcm_msg_head { + __u32 opcode; + __u32 flags; + __u32 count; + struct timeval ival1, ival2; + canid_t can_id; + __u32 nframes; + struct can_frame frames[0]; +}; + +enum { + TX_SETUP = 1, /* create (cyclic) transmission task */ + TX_DELETE, /* remove (cyclic) transmission task */ + TX_READ, /* read properties of (cyclic) transmission task */ + TX_SEND, /* send one CAN frame */ + RX_SETUP, /* create RX content filter subscription */ + RX_DELETE, /* remove RX content filter subscription */ + RX_READ, /* read properties of RX content filter subscription */ + TX_STATUS, /* reply to TX_READ request */ + TX_EXPIRED, /* notification on performed transmissions (count=0) */ + RX_STATUS, /* reply to RX_READ request */ + RX_TIMEOUT, /* cyclic message is absent */ + RX_CHANGED /* updated CAN frame (detected content change) */ +}; + +#define SETTIMER 0x0001 +#define STARTTIMER 0x0002 +#define TX_COUNTEVT 0x0004 +#define TX_ANNOUNCE 0x0008 +#define TX_CP_CAN_ID 0x0010 +#define RX_FILTER_ID 0x0020 +#define RX_CHECK_DLC 0x0040 +#define RX_NO_AUTOTIMER 0x0080 +#define RX_ANNOUNCE_RESUME 0x0100 +#define TX_RESET_MULTI_IDX 0x0200 +#define RX_RTR_FRAME 0x0400 + +#endif /* CAN_BCM_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h new file mode 100644 index 00000000..0ccc1cd2 --- /dev/null +++ b/include/linux/can/core.h @@ -0,0 +1,61 @@ +/* + * linux/can/core.h + * + * Protoypes and definitions for CAN protocol modules using the PF_CAN core + * + * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Urs Thuermann <urs.thuermann@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef CAN_CORE_H +#define CAN_CORE_H + +#include <linux/can.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +#define CAN_VERSION "20090105" + +/* increment this number each time you change some user-space interface */ +#define CAN_ABI_VERSION "8" + +#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION + +#define DNAME(dev) ((dev) ? (dev)->name : "any") + +/** + * struct can_proto - CAN protocol structure + * @type: type argument in socket() syscall, e.g. SOCK_DGRAM. + * @protocol: protocol number in socket() syscall. + * @ops: pointer to struct proto_ops for sock->ops. + * @prot: pointer to struct proto structure. + */ +struct can_proto { + int type; + int protocol; + const struct proto_ops *ops; + struct proto *prot; +}; + +/* function prototypes for the CAN networklayer core (af_can.c) */ + +extern int can_proto_register(const struct can_proto *cp); +extern void can_proto_unregister(const struct can_proto *cp); + +extern int can_rx_register(struct net_device *dev, canid_t can_id, + canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data, char *ident); + +extern void can_rx_unregister(struct net_device *dev, canid_t can_id, + canid_t mask, + void (*func)(struct sk_buff *, void *), + void *data); + +extern int can_send(struct sk_buff *skb, int loop); +extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +#endif /* CAN_CORE_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h new file mode 100644 index 00000000..5d2efe7e --- /dev/null +++ b/include/linux/can/dev.h @@ -0,0 +1,102 @@ +/* + * linux/can/dev.h + * + * Definitions for the CAN network device driver interface + * + * Copyright (C) 2006 Andrey Volkov <avolkov@varma-el.com> + * Varma Electronics Oy + * + * Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com> + * + */ + +#ifndef CAN_DEV_H +#define CAN_DEV_H + +#include <linux/can.h> +#include <linux/can/netlink.h> +#include <linux/can/error.h> + +/* + * CAN mode + */ +enum can_mode { + CAN_MODE_STOP = 0, + CAN_MODE_START, + CAN_MODE_SLEEP +}; + +/* + * CAN common private data + */ +struct can_priv { + struct can_device_stats can_stats; + + struct can_bittiming bittiming; + struct can_bittiming_const *bittiming_const; + struct can_clock clock; + + enum can_state state; + u32 ctrlmode; + u32 ctrlmode_supported; + + int restart_ms; + struct timer_list restart_timer; + + int (*do_set_bittiming)(struct net_device *dev); + int (*do_set_mode)(struct net_device *dev, enum can_mode mode); + int (*do_get_state)(const struct net_device *dev, + enum can_state *state); + int (*do_get_berr_counter)(const struct net_device *dev, + struct can_berr_counter *bec); + + unsigned int echo_skb_max; + struct sk_buff **echo_skb; +}; + +/* + * get_can_dlc(value) - helper macro to cast a given data length code (dlc) + * to __u8 and ensure the dlc value to be max. 8 bytes. + * + * To be used in the CAN netdriver receive path to ensure conformance with + * ISO 11898-1 Chapter 8.4.2.3 (DLC field) + */ +#define get_can_dlc(i) (min_t(__u8, (i), 8)) + +/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ +static inline int can_dropped_invalid_skb(struct net_device *dev, + struct sk_buff *skb) +{ + const struct can_frame *cf = (struct can_frame *)skb->data; + + if (unlikely(skb->len != sizeof(*cf) || cf->can_dlc > 8)) { + kfree_skb(skb); + dev->stats.tx_dropped++; + return 1; + } + + return 0; +} + +struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); +void free_candev(struct net_device *dev); + +int open_candev(struct net_device *dev); +void close_candev(struct net_device *dev); + +int register_candev(struct net_device *dev); +void unregister_candev(struct net_device *dev); + +int can_restart_now(struct net_device *dev); +void can_bus_off(struct net_device *dev); + +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx); +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); +void can_free_echo_skb(struct net_device *dev, unsigned int idx); + +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); +struct sk_buff *alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf); + +#endif /* CAN_DEV_H */ diff --git a/include/linux/can/error.h b/include/linux/can/error.h new file mode 100644 index 00000000..63e855ea --- /dev/null +++ b/include/linux/can/error.h @@ -0,0 +1,91 @@ +/* + * linux/can/error.h + * + * Definitions of the CAN error frame to be filtered and passed to the user. + * + * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef CAN_ERROR_H +#define CAN_ERROR_H + +#define CAN_ERR_DLC 8 /* dlc for error frames */ + +/* error class (mask) in can_id */ +#define CAN_ERR_TX_TIMEOUT 0x00000001U /* TX timeout (by netdevice driver) */ +#define CAN_ERR_LOSTARB 0x00000002U /* lost arbitration / data[0] */ +#define CAN_ERR_CRTL 0x00000004U /* controller problems / data[1] */ +#define CAN_ERR_PROT 0x00000008U /* protocol violations / data[2..3] */ +#define CAN_ERR_TRX 0x00000010U /* transceiver status / data[4] */ +#define CAN_ERR_ACK 0x00000020U /* received no ACK on transmission */ +#define CAN_ERR_BUSOFF 0x00000040U /* bus off */ +#define CAN_ERR_BUSERROR 0x00000080U /* bus error (may flood!) */ +#define CAN_ERR_RESTARTED 0x00000100U /* controller restarted */ + +/* arbitration lost in bit ... / data[0] */ +#define CAN_ERR_LOSTARB_UNSPEC 0x00 /* unspecified */ + /* else bit number in bitstream */ + +/* error status of CAN-controller / data[1] */ +#define CAN_ERR_CRTL_UNSPEC 0x00 /* unspecified */ +#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /* RX buffer overflow */ +#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /* TX buffer overflow */ +#define CAN_ERR_CRTL_RX_WARNING 0x04 /* reached warning level for RX errors */ +#define CAN_ERR_CRTL_TX_WARNING 0x08 /* reached warning level for TX errors */ +#define CAN_ERR_CRTL_RX_PASSIVE 0x10 /* reached error passive status RX */ +#define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */ + /* (at least one error counter exceeds */ + /* the protocol-defined level of 127) */ + +/* error in CAN protocol (type) / data[2] */ +#define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */ +#define CAN_ERR_PROT_BIT 0x01 /* single bit error */ +#define CAN_ERR_PROT_FORM 0x02 /* frame format error */ +#define CAN_ERR_PROT_STUFF 0x04 /* bit stuffing error */ +#define CAN_ERR_PROT_BIT0 0x08 /* unable to send dominant bit */ +#define CAN_ERR_PROT_BIT1 0x10 /* unable to send recessive bit */ +#define CAN_ERR_PROT_OVERLOAD 0x20 /* bus overload */ +#define CAN_ERR_PROT_ACTIVE 0x40 /* active error announcement */ +#define CAN_ERR_PROT_TX 0x80 /* error occurred on transmission */ + +/* error in CAN protocol (location) / data[3] */ +#define CAN_ERR_PROT_LOC_UNSPEC 0x00 /* unspecified */ +#define CAN_ERR_PROT_LOC_SOF 0x03 /* start of frame */ +#define CAN_ERR_PROT_LOC_ID28_21 0x02 /* ID bits 28 - 21 (SFF: 10 - 3) */ +#define CAN_ERR_PROT_LOC_ID20_18 0x06 /* ID bits 20 - 18 (SFF: 2 - 0 )*/ +#define CAN_ERR_PROT_LOC_SRTR 0x04 /* substitute RTR (SFF: RTR) */ +#define CAN_ERR_PROT_LOC_IDE 0x05 /* identifier extension */ +#define CAN_ERR_PROT_LOC_ID17_13 0x07 /* ID bits 17-13 */ +#define CAN_ERR_PROT_LOC_ID12_05 0x0F /* ID bits 12-5 */ +#define CAN_ERR_PROT_LOC_ID04_00 0x0E /* ID bits 4-0 */ +#define CAN_ERR_PROT_LOC_RTR 0x0C /* RTR */ +#define CAN_ERR_PROT_LOC_RES1 0x0D /* reserved bit 1 */ +#define CAN_ERR_PROT_LOC_RES0 0x09 /* reserved bit 0 */ +#define CAN_ERR_PROT_LOC_DLC 0x0B /* data length code */ +#define CAN_ERR_PROT_LOC_DATA 0x0A /* data section */ +#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /* CRC sequence */ +#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /* CRC delimiter */ +#define CAN_ERR_PROT_LOC_ACK 0x19 /* ACK slot */ +#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /* ACK delimiter */ +#define CAN_ERR_PROT_LOC_EOF 0x1A /* end of frame */ +#define CAN_ERR_PROT_LOC_INTERM 0x12 /* intermission */ + +/* error status of CAN-transceiver / data[4] */ +/* CANH CANL */ +#define CAN_ERR_TRX_UNSPEC 0x00 /* 0000 0000 */ +#define CAN_ERR_TRX_CANH_NO_WIRE 0x04 /* 0000 0100 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_BAT 0x05 /* 0000 0101 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_VCC 0x06 /* 0000 0110 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_GND 0x07 /* 0000 0111 */ +#define CAN_ERR_TRX_CANL_NO_WIRE 0x40 /* 0100 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_BAT 0x50 /* 0101 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_VCC 0x60 /* 0110 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */ + +/* controller specific additional information / data[5..7] */ + +#endif /* CAN_ERROR_H */ diff --git a/include/linux/can/gw.h b/include/linux/can/gw.h new file mode 100644 index 00000000..8e1db18c --- /dev/null +++ b/include/linux/can/gw.h @@ -0,0 +1,162 @@ +/* + * linux/can/gw.h + * + * Definitions for CAN frame Gateway/Router/Bridge + * + * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Copyright (c) 2011 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef CAN_GW_H +#define CAN_GW_H + +#include <linux/types.h> +#include <linux/can.h> + +struct rtcanmsg { + __u8 can_family; + __u8 gwtype; + __u16 flags; +}; + +/* CAN gateway types */ +enum { + CGW_TYPE_UNSPEC, + CGW_TYPE_CAN_CAN, /* CAN->CAN routing */ + __CGW_TYPE_MAX +}; + +#define CGW_TYPE_MAX (__CGW_TYPE_MAX - 1) + +/* CAN rtnetlink attribute definitions */ +enum { + CGW_UNSPEC, + CGW_MOD_AND, /* CAN frame modification binary AND */ + CGW_MOD_OR, /* CAN frame modification binary OR */ + CGW_MOD_XOR, /* CAN frame modification binary XOR */ + CGW_MOD_SET, /* CAN frame modification set alternate values */ + CGW_CS_XOR, /* set data[] XOR checksum into data[index] */ + CGW_CS_CRC8, /* set data[] CRC8 checksum into data[index] */ + CGW_HANDLED, /* number of handled CAN frames */ + CGW_DROPPED, /* number of dropped CAN frames */ + CGW_SRC_IF, /* ifindex of source network interface */ + CGW_DST_IF, /* ifindex of destination network interface */ + CGW_FILTER, /* specify struct can_filter on source CAN device */ + __CGW_MAX +}; + +#define CGW_MAX (__CGW_MAX - 1) + +#define CGW_FLAGS_CAN_ECHO 0x01 +#define CGW_FLAGS_CAN_SRC_TSTAMP 0x02 + +#define CGW_MOD_FUNCS 4 /* AND OR XOR SET */ + +/* CAN frame elements that are affected by curr. 3 CAN frame modifications */ +#define CGW_MOD_ID 0x01 +#define CGW_MOD_DLC 0x02 +#define CGW_MOD_DATA 0x04 + +#define CGW_FRAME_MODS 3 /* ID DLC DATA */ + +#define MAX_MODFUNCTIONS (CGW_MOD_FUNCS * CGW_FRAME_MODS) + +struct cgw_frame_mod { + struct can_frame cf; + __u8 modtype; +} __attribute__((packed)); + +#define CGW_MODATTR_LEN sizeof(struct cgw_frame_mod) + +struct cgw_csum_xor { + __s8 from_idx; + __s8 to_idx; + __s8 result_idx; + __u8 init_xor_val; +} __attribute__((packed)); + +struct cgw_csum_crc8 { + __s8 from_idx; + __s8 to_idx; + __s8 result_idx; + __u8 init_crc_val; + __u8 final_xor_val; + __u8 crctab[256]; + __u8 profile; + __u8 profile_data[20]; +} __attribute__((packed)); + +/* length of checksum operation parameters. idx = index in CAN frame data[] */ +#define CGW_CS_XOR_LEN sizeof(struct cgw_csum_xor) +#define CGW_CS_CRC8_LEN sizeof(struct cgw_csum_crc8) + +/* CRC8 profiles (compute CRC for additional data elements - see below) */ +enum { + CGW_CRC8PRF_UNSPEC, + CGW_CRC8PRF_1U8, /* compute one additional u8 value */ + CGW_CRC8PRF_16U8, /* u8 value table indexed by data[1] & 0xF */ + CGW_CRC8PRF_SFFID_XOR, /* (can_id & 0xFF) ^ (can_id >> 8 & 0xFF) */ + __CGW_CRC8PRF_MAX +}; + +#define CGW_CRC8PRF_MAX (__CGW_CRC8PRF_MAX - 1) + +/* + * CAN rtnetlink attribute contents in detail + * + * CGW_XXX_IF (length 4 bytes): + * Sets an interface index for source/destination network interfaces. + * For the CAN->CAN gwtype the indices of _two_ CAN interfaces are mandatory. + * + * CGW_FILTER (length 8 bytes): + * Sets a CAN receive filter for the gateway job specified by the + * struct can_filter described in include/linux/can.h + * + * CGW_MOD_XXX (length 17 bytes): + * Specifies a modification that's done to a received CAN frame before it is + * send out to the destination interface. + * + * <struct can_frame> data used as operator + * <u8> affected CAN frame elements + * + * CGW_CS_XOR (length 4 bytes): + * Set a simple XOR checksum starting with an initial value into + * data[result-idx] using data[start-idx] .. data[end-idx] + * + * The XOR checksum is calculated like this: + * + * xor = init_xor_val + * + * for (i = from_idx .. to_idx) + * xor ^= can_frame.data[i] + * + * can_frame.data[ result_idx ] = xor + * + * CGW_CS_CRC8 (length 282 bytes): + * Set a CRC8 value into data[result-idx] using a given 256 byte CRC8 table, + * a given initial value and a defined input data[start-idx] .. data[end-idx]. + * Finally the result value is XOR'ed with the final_xor_val. + * + * The CRC8 checksum is calculated like this: + * + * crc = init_crc_val + * + * for (i = from_idx .. to_idx) + * crc = crctab[ crc ^ can_frame.data[i] ] + * + * can_frame.data[ result_idx ] = crc ^ final_xor_val + * + * The calculated CRC may contain additional source data elements that can be + * defined in the handling of 'checksum profiles' e.g. shown in AUTOSAR specs + * like http://www.autosar.org/download/R4.0/AUTOSAR_SWS_E2ELibrary.pdf + * E.g. the profile_data[] may contain additional u8 values (called DATA_IDs) + * that are used depending on counter values inside the CAN frame data[]. + * So far only three profiles have been implemented for illustration. + * + * Remark: In general the attribute data is a linear buffer. + * Beware of sending unpacked or aligned structs! + */ + +#endif diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h new file mode 100644 index 00000000..14966ddb --- /dev/null +++ b/include/linux/can/netlink.h @@ -0,0 +1,122 @@ +/* + * linux/can/netlink.h + * + * Definitions for the CAN netlink interface + * + * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com> + * + */ + +#ifndef CAN_NETLINK_H +#define CAN_NETLINK_H + +#include <linux/types.h> + +/* + * CAN bit-timing parameters + * + * For further information, please read chapter "8 BIT TIMING + * REQUIREMENTS" of the "Bosch CAN Specification version 2.0" + * at http://www.semiconductors.bosch.de/pdf/can2spec.pdf. + */ +struct can_bittiming { + __u32 bitrate; /* Bit-rate in bits/second */ + __u32 sample_point; /* Sample point in one-tenth of a percent */ + __u32 tq; /* Time quanta (TQ) in nanoseconds */ + __u32 prop_seg; /* Propagation segment in TQs */ + __u32 phase_seg1; /* Phase buffer segment 1 in TQs */ + __u32 phase_seg2; /* Phase buffer segment 2 in TQs */ + __u32 sjw; /* Synchronisation jump width in TQs */ + __u32 brp; /* Bit-rate prescaler */ +}; + +/* + * CAN harware-dependent bit-timing constant + * + * Used for calculating and checking bit-timing parameters + */ +struct can_bittiming_const { + char name[16]; /* Name of the CAN controller hardware */ + __u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */ + __u32 tseg1_max; + __u32 tseg2_min; /* Time segement 2 = phase_seg2 */ + __u32 tseg2_max; + __u32 sjw_max; /* Synchronisation jump width */ + __u32 brp_min; /* Bit-rate prescaler */ + __u32 brp_max; + __u32 brp_inc; +}; + +/* + * CAN clock parameters + */ +struct can_clock { + __u32 freq; /* CAN system clock frequency in Hz */ +}; + +/* + * CAN operational and error states + */ +enum can_state { + CAN_STATE_ERROR_ACTIVE = 0, /* RX/TX error count < 96 */ + CAN_STATE_ERROR_WARNING, /* RX/TX error count < 128 */ + CAN_STATE_ERROR_PASSIVE, /* RX/TX error count < 256 */ + CAN_STATE_BUS_OFF, /* RX/TX error count >= 256 */ + CAN_STATE_STOPPED, /* Device is stopped */ + CAN_STATE_SLEEPING, /* Device is sleeping */ + CAN_STATE_MAX +}; + +/* + * CAN bus error counters + */ +struct can_berr_counter { + __u16 txerr; + __u16 rxerr; +}; + +/* + * CAN controller mode + */ +struct can_ctrlmode { + __u32 mask; + __u32 flags; +}; + +#define CAN_CTRLMODE_LOOPBACK 0x01 /* Loopback mode */ +#define CAN_CTRLMODE_LISTENONLY 0x02 /* Listen-only mode */ +#define CAN_CTRLMODE_3_SAMPLES 0x04 /* Triple sampling mode */ +#define CAN_CTRLMODE_ONE_SHOT 0x08 /* One-Shot mode */ +#define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */ + +/* + * CAN device statistics + */ +struct can_device_stats { + __u32 bus_error; /* Bus errors */ + __u32 error_warning; /* Changes to error warning state */ + __u32 error_passive; /* Changes to error passive state */ + __u32 bus_off; /* Changes to bus off state */ + __u32 arbitration_lost; /* Arbitration lost errors */ + __u32 restarts; /* CAN controller re-starts */ +}; + +/* + * CAN netlink interface + */ +enum { + IFLA_CAN_UNSPEC, + IFLA_CAN_BITTIMING, + IFLA_CAN_BITTIMING_CONST, + IFLA_CAN_CLOCK, + IFLA_CAN_STATE, + IFLA_CAN_CTRLMODE, + IFLA_CAN_RESTART_MS, + IFLA_CAN_RESTART, + IFLA_CAN_BERR_COUNTER, + __IFLA_CAN_MAX +}; + +#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1) + +#endif /* CAN_NETLINK_H */ diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h new file mode 100644 index 00000000..7702641f --- /dev/null +++ b/include/linux/can/platform/cc770.h @@ -0,0 +1,33 @@ +#ifndef _CAN_PLATFORM_CC770_H_ +#define _CAN_PLATFORM_CC770_H_ + +/* CPU Interface Register (0x02) */ +#define CPUIF_CEN 0x01 /* Clock Out Enable */ +#define CPUIF_MUX 0x04 /* Multiplex */ +#define CPUIF_SLP 0x08 /* Sleep */ +#define CPUIF_PWD 0x10 /* Power Down Mode */ +#define CPUIF_DMC 0x20 /* Divide Memory Clock */ +#define CPUIF_DSC 0x40 /* Divide System Clock */ +#define CPUIF_RST 0x80 /* Hardware Reset Status */ + +/* Clock Out Register (0x1f) */ +#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */ +#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */ +#define CLKOUT_SL_SHIFT 4 + +/* Bus Configuration Register (0x2f) */ +#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */ +#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */ +#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */ +#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */ +#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */ + +struct cc770_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 cir; /* CPU Interface Register */ + u8 cor; /* Clock Out Register */ + u8 bcr; /* Bus Configuration Register */ +}; + +#endif /* !_CAN_PLATFORM_CC770_H_ */ diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 00000000..72b713ab --- /dev/null +++ b/include/linux/can/platform/flexcan.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de> + * + * This file is released under the GPLv2 + * + */ + +#ifndef __CAN_PLATFORM_FLEXCAN_H +#define __CAN_PLATFORM_FLEXCAN_H + +/** + * struct flexcan_platform_data - flex CAN controller platform data + * @transceiver_enable: - called to power on/off the transceiver + * + */ +struct flexcan_platform_data { + void (*transceiver_switch)(int enable); +}; + +#endif /* __CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h new file mode 100644 index 00000000..089fe432 --- /dev/null +++ b/include/linux/can/platform/mcp251x.h @@ -0,0 +1,34 @@ +#ifndef __CAN_PLATFORM_MCP251X_H__ +#define __CAN_PLATFORM_MCP251X_H__ + +/* + * + * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * + */ + +#include <linux/spi/spi.h> + +/** + * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data + * @oscillator_frequency: - oscillator frequency in Hz + * @irq_flags: - IRQF configuration flags + * @board_specific_setup: - called before probing the chip (power,reset) + * @transceiver_enable: - called to power on/off the transceiver + * @power_enable: - called to power on/off the mcp *and* the + * transceiver + * + * Please note that you should define power_enable or transceiver_enable or + * none of them. Defining both of them is no use. + * + */ + +struct mcp251x_platform_data { + unsigned long oscillator_frequency; + unsigned long irq_flags; + int (*board_specific_setup)(struct spi_device *spi); + int (*transceiver_enable)(int enable); + int (*power_enable) (int enable); +}; + +#endif /* __CAN_PLATFORM_MCP251X_H__ */ diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h new file mode 100644 index 00000000..96f8fcc7 --- /dev/null +++ b/include/linux/can/platform/sja1000.h @@ -0,0 +1,35 @@ +#ifndef _CAN_PLATFORM_SJA1000_H_ +#define _CAN_PLATFORM_SJA1000_H_ + +/* clock divider register */ +#define CDR_CLKOUT_MASK 0x07 +#define CDR_CLK_OFF 0x08 /* Clock off (CLKOUT pin) */ +#define CDR_RXINPEN 0x20 /* TX1 output is RX irq output */ +#define CDR_CBP 0x40 /* CAN input comparator bypass */ +#define CDR_PELICAN 0x80 /* PeliCAN mode */ + +/* output control register */ +#define OCR_MODE_BIPHASE 0x00 +#define OCR_MODE_TEST 0x01 +#define OCR_MODE_NORMAL 0x02 +#define OCR_MODE_CLOCK 0x03 +#define OCR_MODE_MASK 0x07 +#define OCR_TX0_INVERT 0x04 +#define OCR_TX0_PULLDOWN 0x08 +#define OCR_TX0_PULLUP 0x10 +#define OCR_TX0_PUSHPULL 0x18 +#define OCR_TX1_INVERT 0x20 +#define OCR_TX1_PULLDOWN 0x40 +#define OCR_TX1_PULLUP 0x80 +#define OCR_TX1_PUSHPULL 0xc0 +#define OCR_TX_MASK 0xfc +#define OCR_TX_SHIFT 2 + +struct sja1000_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 ocr; /* output control register */ + u8 cdr; /* clock divider register */ +}; + +#endif /* !_CAN_PLATFORM_SJA1000_H_ */ diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h new file mode 100644 index 00000000..af17cb3f --- /dev/null +++ b/include/linux/can/platform/ti_hecc.h @@ -0,0 +1,44 @@ +#ifndef __CAN_PLATFORM_TI_HECC_H__ +#define __CAN_PLATFORM_TI_HECC_H__ + +/* + * TI HECC (High End CAN Controller) driver platform header + * + * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed as is WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/** + * struct hecc_platform_data - HECC Platform Data + * + * @scc_hecc_offset: mostly 0 - should really never change + * @scc_ram_offset: SCC RAM offset + * @hecc_ram_offset: HECC RAM offset + * @mbx_offset: Mailbox RAM offset + * @int_line: Interrupt line to use - 0 or 1 + * @version: version for future use + * @transceiver_switch: platform specific callback fn for transceiver control + * + * Platform data structure to get all platform specific settings. + * this structure also accounts the fact that the IP may have different + * RAM and mailbox offsets for different SOC's + */ +struct ti_hecc_platform_data { + u32 scc_hecc_offset; + u32 scc_ram_offset; + u32 hecc_ram_offset; + u32 mbx_offset; + u32 int_line; + u32 version; + void (*transceiver_switch) (int); +}; +#endif diff --git a/include/linux/can/raw.h b/include/linux/can/raw.h new file mode 100644 index 00000000..781f3a37 --- /dev/null +++ b/include/linux/can/raw.h @@ -0,0 +1,29 @@ +/* + * linux/can/raw.h + * + * Definitions for raw CAN sockets + * + * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> + * Urs Thuermann <urs.thuermann@volkswagen.de> + * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * All rights reserved. + * + */ + +#ifndef CAN_RAW_H +#define CAN_RAW_H + +#include <linux/can.h> + +#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW) + +/* for socket options affecting the socket (not the global system) */ + +enum { + CAN_RAW_FILTER = 1, /* set 0 .. n can_filter(s) */ + CAN_RAW_ERR_FILTER, /* set filter for error frames */ + CAN_RAW_LOOPBACK, /* local loopback (default:on) */ + CAN_RAW_RECV_OWN_MSGS /* receive my own msgs (default:off) */ +}; + +#endif diff --git a/include/linux/capability.h b/include/linux/capability.h new file mode 100644 index 00000000..c398cff3 --- /dev/null +++ b/include/linux/capability.h @@ -0,0 +1,560 @@ +/* + * This is <linux/capability.h> + * + * Andrew G. Morgan <morgan@kernel.org> + * Alexander Kjeldaas <astor@guardian.no> + * with help from Aleph1, Roland Buresund and Andrew Main. + * + * See here for the libcap library ("POSIX draft" compliance): + * + * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ + */ + +#ifndef _LINUX_CAPABILITY_H +#define _LINUX_CAPABILITY_H + +#include <linux/types.h> + +struct task_struct; + +/* User-level do most of the mapping between kernel and user + capabilities based on the version tag given by the kernel. The + kernel might be somewhat backwards compatible, but don't bet on + it. */ + +/* Note, cap_t, is defined by POSIX (draft) to be an "opaque" pointer to + a set of three capability sets. The transposition of 3*the + following structure to such a composite is better handled in a user + library since the draft standard requires the use of malloc/free + etc.. */ + +#define _LINUX_CAPABILITY_VERSION_1 0x19980330 +#define _LINUX_CAPABILITY_U32S_1 1 + +#define _LINUX_CAPABILITY_VERSION_2 0x20071026 /* deprecated - use v3 */ +#define _LINUX_CAPABILITY_U32S_2 2 + +#define _LINUX_CAPABILITY_VERSION_3 0x20080522 +#define _LINUX_CAPABILITY_U32S_3 2 + +typedef struct __user_cap_header_struct { + __u32 version; + int pid; +} __user *cap_user_header_t; + +typedef struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +} __user *cap_user_data_t; + + +#define VFS_CAP_REVISION_MASK 0xFF000000 +#define VFS_CAP_REVISION_SHIFT 24 +#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK +#define VFS_CAP_FLAGS_EFFECTIVE 0x000001 + +#define VFS_CAP_REVISION_1 0x01000000 +#define VFS_CAP_U32_1 1 +#define XATTR_CAPS_SZ_1 (sizeof(__le32)*(1 + 2*VFS_CAP_U32_1)) + +#define VFS_CAP_REVISION_2 0x02000000 +#define VFS_CAP_U32_2 2 +#define XATTR_CAPS_SZ_2 (sizeof(__le32)*(1 + 2*VFS_CAP_U32_2)) + +#define XATTR_CAPS_SZ XATTR_CAPS_SZ_2 +#define VFS_CAP_U32 VFS_CAP_U32_2 +#define VFS_CAP_REVISION VFS_CAP_REVISION_2 + +struct vfs_cap_data { + __le32 magic_etc; /* Little endian */ + struct { + __le32 permitted; /* Little endian */ + __le32 inheritable; /* Little endian */ + } data[VFS_CAP_U32]; +}; + +#ifndef __KERNEL__ + +/* + * Backwardly compatible definition for source code - trapped in a + * 32-bit world. If you find you need this, please consider using + * libcap to untrap yourself... + */ +#define _LINUX_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_1 +#define _LINUX_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_1 + +#else + +#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 +#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 + +extern int file_caps_enabled; + +typedef struct kernel_cap_struct { + __u32 cap[_KERNEL_CAPABILITY_U32S]; +} kernel_cap_t; + +/* exact same as vfs_cap_data but in cpu endian and always filled completely */ +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + +#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) +#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) + +#endif + + +/** + ** POSIX-draft defined capabilities. + **/ + +/* In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this + overrides the restriction of changing file ownership and group + ownership. */ + +#define CAP_CHOWN 0 + +/* Override all DAC access, including ACL execute access if + [_POSIX_ACL] is defined. Excluding DAC access covered by + CAP_LINUX_IMMUTABLE. */ + +#define CAP_DAC_OVERRIDE 1 + +/* Overrides all DAC restrictions regarding read and search on files + and directories, including ACL restrictions if [_POSIX_ACL] is + defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. */ + +#define CAP_DAC_READ_SEARCH 2 + +/* Overrides all restrictions about allowed operations on files, where + file owner ID must be equal to the user ID, except where CAP_FSETID + is applicable. It doesn't override MAC and DAC restrictions. */ + +#define CAP_FOWNER 3 + +/* Overrides the following restrictions that the effective user ID + shall match the file owner ID when setting the S_ISUID and S_ISGID + bits on that file; that the effective group ID (or one of the + supplementary group IDs) shall match the file owner ID when setting + the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are + cleared on successful return from chown(2) (not implemented). */ + +#define CAP_FSETID 4 + +/* Overrides the restriction that the real or effective user ID of a + process sending a signal must match the real or effective user ID + of the process receiving the signal. */ + +#define CAP_KILL 5 + +/* Allows setgid(2) manipulation */ +/* Allows setgroups(2) */ +/* Allows forged gids on socket credentials passing. */ + +#define CAP_SETGID 6 + +/* Allows set*uid(2) manipulation (including fsuid). */ +/* Allows forged pids on socket credentials passing. */ + +#define CAP_SETUID 7 + + +/** + ** Linux-specific capabilities + **/ + +/* Without VFS support for capabilities: + * Transfer any capability in your permitted set to any pid, + * remove any capability in your permitted set from any pid + * With VFS support for capabilities (neither of above, but) + * Add any capability from current's capability bounding set + * to the current process' inheritable set + * Allow taking bits out of capability bounding set + * Allow modification of the securebits for a process + */ + +#define CAP_SETPCAP 8 + +/* Allow modification of S_IMMUTABLE and S_APPEND file attributes */ + +#define CAP_LINUX_IMMUTABLE 9 + +/* Allows binding to TCP/UDP sockets below 1024 */ +/* Allows binding to ATM VCIs below 32 */ + +#define CAP_NET_BIND_SERVICE 10 + +/* Allow broadcasting, listen to multicast */ + +#define CAP_NET_BROADCAST 11 + +/* Allow interface configuration */ +/* Allow administration of IP firewall, masquerading and accounting */ +/* Allow setting debug option on sockets */ +/* Allow modification of routing tables */ +/* Allow setting arbitrary process / process group ownership on + sockets */ +/* Allow binding to any address for transparent proxying (also via NET_RAW) */ +/* Allow setting TOS (type of service) */ +/* Allow setting promiscuous mode */ +/* Allow clearing driver statistics */ +/* Allow multicasting */ +/* Allow read/write of device-specific registers */ +/* Allow activation of ATM control sockets */ + +#define CAP_NET_ADMIN 12 + +/* Allow use of RAW sockets */ +/* Allow use of PACKET sockets */ +/* Allow binding to any address for transparent proxying (also via NET_ADMIN) */ + +#define CAP_NET_RAW 13 + +/* Allow locking of shared memory segments */ +/* Allow mlock and mlockall (which doesn't really have anything to do + with IPC) */ + +#define CAP_IPC_LOCK 14 + +/* Override IPC ownership checks */ + +#define CAP_IPC_OWNER 15 + +/* Insert and remove kernel modules - modify kernel without limit */ +#define CAP_SYS_MODULE 16 + +/* Allow ioperm/iopl access */ +/* Allow sending USB messages to any device via /proc/bus/usb */ + +#define CAP_SYS_RAWIO 17 + +/* Allow use of chroot() */ + +#define CAP_SYS_CHROOT 18 + +/* Allow ptrace() of any process */ + +#define CAP_SYS_PTRACE 19 + +/* Allow configuration of process accounting */ + +#define CAP_SYS_PACCT 20 + +/* Allow configuration of the secure attention key */ +/* Allow administration of the random device */ +/* Allow examination and configuration of disk quotas */ +/* Allow setting the domainname */ +/* Allow setting the hostname */ +/* Allow calling bdflush() */ +/* Allow mount() and umount(), setting up new smb connection */ +/* Allow some autofs root ioctls */ +/* Allow nfsservctl */ +/* Allow VM86_REQUEST_IRQ */ +/* Allow to read/write pci config on alpha */ +/* Allow irix_prctl on mips (setstacksize) */ +/* Allow flushing all cache on m68k (sys_cacheflush) */ +/* Allow removing semaphores */ +/* Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores + and shared memory */ +/* Allow locking/unlocking of shared memory segment */ +/* Allow turning swap on/off */ +/* Allow forged pids on socket credentials passing */ +/* Allow setting readahead and flushing buffers on block devices */ +/* Allow setting geometry in floppy driver */ +/* Allow turning DMA on/off in xd driver */ +/* Allow administration of md devices (mostly the above, but some + extra ioctls) */ +/* Allow tuning the ide driver */ +/* Allow access to the nvram device */ +/* Allow administration of apm_bios, serial and bttv (TV) device */ +/* Allow manufacturer commands in isdn CAPI support driver */ +/* Allow reading non-standardized portions of pci configuration space */ +/* Allow DDI debug ioctl on sbpcd driver */ +/* Allow setting up serial ports */ +/* Allow sending raw qic-117 commands */ +/* Allow enabling/disabling tagged queuing on SCSI controllers and sending + arbitrary SCSI commands */ +/* Allow setting encryption key on loopback filesystem */ +/* Allow setting zone reclaim policy */ + +#define CAP_SYS_ADMIN 21 + +/* Allow use of reboot() */ + +#define CAP_SYS_BOOT 22 + +/* Allow raising priority and setting priority on other (different + UID) processes */ +/* Allow use of FIFO and round-robin (realtime) scheduling on own + processes and setting the scheduling algorithm used by another + process. */ +/* Allow setting cpu affinity on other processes */ + +#define CAP_SYS_NICE 23 + +/* Override resource limits. Set resource limits. */ +/* Override quota limits. */ +/* Override reserved space on ext2 filesystem */ +/* Modify data journaling mode on ext3 filesystem (uses journaling + resources) */ +/* NOTE: ext2 honors fsuid when checking for resource overrides, so + you can override using fsuid too */ +/* Override size restrictions on IPC message queues */ +/* Allow more than 64hz interrupts from the real-time clock */ +/* Override max number of consoles on console allocation */ +/* Override max number of keymaps */ + +#define CAP_SYS_RESOURCE 24 + +/* Allow manipulation of system clock */ +/* Allow irix_stime on mips */ +/* Allow setting the real-time clock */ + +#define CAP_SYS_TIME 25 + +/* Allow configuration of tty devices */ +/* Allow vhangup() of tty */ + +#define CAP_SYS_TTY_CONFIG 26 + +/* Allow the privileged aspects of mknod() */ + +#define CAP_MKNOD 27 + +/* Allow taking of leases on files */ + +#define CAP_LEASE 28 + +#define CAP_AUDIT_WRITE 29 + +#define CAP_AUDIT_CONTROL 30 + +#define CAP_SETFCAP 31 + +/* Override MAC access. + The base kernel enforces no MAC policy. + An LSM may enforce a MAC policy, and if it does and it chooses + to implement capability based overrides of that policy, this is + the capability it should use to do so. */ + +#define CAP_MAC_OVERRIDE 32 + +/* Allow MAC configuration or state changes. + The base kernel requires no MAC configuration. + An LSM may enforce a MAC policy, and if it does and it chooses + to implement capability based checks on modifications to that + policy or the data required to maintain it, this is the + capability it should use to do so. */ + +#define CAP_MAC_ADMIN 33 + +/* Allow configuring the kernel's syslog (printk behaviour) */ + +#define CAP_SYSLOG 34 + +/* Allow triggering something that will wake the system */ + +#define CAP_WAKE_ALARM 35 + +/* Allow preventing system suspends while epoll events are pending */ + +#define CAP_EPOLLWAKEUP 36 + +#define CAP_LAST_CAP CAP_EPOLLWAKEUP + +#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) + +/* + * Bit location of each capability (used by user-space library and kernel) + */ + +#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */ +#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */ + +#ifdef __KERNEL__ + +struct dentry; +struct user_namespace; + +struct user_namespace *current_user_ns(void); + +extern const kernel_cap_t __cap_empty_set; +extern const kernel_cap_t __cap_init_eff_set; + +/* + * Internal kernel functions only + */ + +#define CAP_FOR_EACH_U32(__capi) \ + for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) + +/* + * CAP_FS_MASK and CAP_NFSD_MASKS: + * + * The fs mask is all the privileges that fsuid==0 historically meant. + * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. + * + * It has never meant setting security.* and trusted.* xattrs. + * + * We could also define fsmask as follows: + * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions + * 2. The security.* and trusted.* xattrs are fs-related MAC permissions + */ + +# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ + | CAP_TO_MASK(CAP_MKNOD) \ + | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ + | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ + | CAP_TO_MASK(CAP_FOWNER) \ + | CAP_TO_MASK(CAP_FSETID)) + +# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE)) + +#if _KERNEL_CAPABILITY_U32S != 2 +# error Fix up hand-coded capability macro initializers +#else /* HAND-CODED capability initializers */ + +# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) +# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ + CAP_FS_MASK_B1 } }) +# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_SYS_RESOURCE), \ + CAP_FS_MASK_B1 } }) + +#endif /* _KERNEL_CAPABILITY_U32S != 2 */ + +# define cap_clear(c) do { (c) = __cap_empty_set; } while (0) + +#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag)) +#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag)) +#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag)) + +#define CAP_BOP_ALL(c, a, b, OP) \ +do { \ + unsigned __capi; \ + CAP_FOR_EACH_U32(__capi) { \ + c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \ + } \ +} while (0) + +#define CAP_UOP_ALL(c, a, OP) \ +do { \ + unsigned __capi; \ + CAP_FOR_EACH_U32(__capi) { \ + c.cap[__capi] = OP a.cap[__capi]; \ + } \ +} while (0) + +static inline kernel_cap_t cap_combine(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, b, |); + return dest; +} + +static inline kernel_cap_t cap_intersect(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, b, &); + return dest; +} + +static inline kernel_cap_t cap_drop(const kernel_cap_t a, + const kernel_cap_t drop) +{ + kernel_cap_t dest; + CAP_BOP_ALL(dest, a, drop, &~); + return dest; +} + +static inline kernel_cap_t cap_invert(const kernel_cap_t c) +{ + kernel_cap_t dest; + CAP_UOP_ALL(dest, c, ~); + return dest; +} + +static inline int cap_isclear(const kernel_cap_t a) +{ + unsigned __capi; + CAP_FOR_EACH_U32(__capi) { + if (a.cap[__capi] != 0) + return 0; + } + return 1; +} + +/* + * Check if "a" is a subset of "set". + * return 1 if ALL of the capabilities in "a" are also in "set" + * cap_issubset(0101, 1111) will return 1 + * return 0 if ANY of the capabilities in "a" are not in "set" + * cap_issubset(1111, 0101) will return 0 + */ +static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) +{ + kernel_cap_t dest; + dest = cap_drop(a, set); + return cap_isclear(dest); +} + +/* Used to decide between falling back on the old suser() or fsuser(). */ + +static inline int cap_is_fs_cap(int cap) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return !!(CAP_TO_MASK(cap) & __cap_fs_set.cap[CAP_TO_INDEX(cap)]); +} + +static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return cap_drop(a, __cap_fs_set); +} + +static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_fs_set = CAP_FS_SET; + return cap_combine(a, + cap_intersect(permitted, __cap_fs_set)); +} + +static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = CAP_NFSD_SET; + return cap_drop(a, __cap_fs_set); +} + +static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET; + return cap_combine(a, + cap_intersect(permitted, __cap_nfsd_set)); +} + +extern bool has_capability(struct task_struct *t, int cap); +extern bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool has_capability_noaudit(struct task_struct *t, int cap); +extern bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool capable(int cap); +extern bool ns_capable(struct user_namespace *ns, int cap); +extern bool nsown_capable(int cap); + +/* audit system wants to get cap info from files as well */ +extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); + +#endif /* __KERNEL__ */ + +#endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/capi.h b/include/linux/capi.h new file mode 100644 index 00000000..65100d6c --- /dev/null +++ b/include/linux/capi.h @@ -0,0 +1,133 @@ +/* $Id: capi.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ + * + * CAPI 2.0 Interface for Linux + * + * Copyright 1997 by Carsten Paeth (calle@calle.in-berlin.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __LINUX_CAPI_H__ +#define __LINUX_CAPI_H__ + +#include <linux/types.h> +#include <linux/ioctl.h> +#ifndef __KERNEL__ +#include <linux/kernelcapi.h> +#endif + +/* + * CAPI_REGISTER + */ + +typedef struct capi_register_params { /* CAPI_REGISTER */ + __u32 level3cnt; /* No. of simulatneous user data connections */ + __u32 datablkcnt; /* No. of buffered data messages */ + __u32 datablklen; /* Size of buffered data messages */ +} capi_register_params; + +#define CAPI_REGISTER _IOW('C',0x01,struct capi_register_params) + +/* + * CAPI_GET_MANUFACTURER + */ + +#define CAPI_MANUFACTURER_LEN 64 + +#define CAPI_GET_MANUFACTURER _IOWR('C',0x06,int) /* broken: wanted size 64 (CAPI_MANUFACTURER_LEN) */ + +/* + * CAPI_GET_VERSION + */ + +typedef struct capi_version { + __u32 majorversion; + __u32 minorversion; + __u32 majormanuversion; + __u32 minormanuversion; +} capi_version; + +#define CAPI_GET_VERSION _IOWR('C',0x07,struct capi_version) + +/* + * CAPI_GET_SERIAL + */ + +#define CAPI_SERIAL_LEN 8 +#define CAPI_GET_SERIAL _IOWR('C',0x08,int) /* broken: wanted size 8 (CAPI_SERIAL_LEN) */ + +/* + * CAPI_GET_PROFILE + */ + +typedef struct capi_profile { + __u16 ncontroller; /* number of installed controller */ + __u16 nbchannel; /* number of B-Channels */ + __u32 goptions; /* global options */ + __u32 support1; /* B1 protocols support */ + __u32 support2; /* B2 protocols support */ + __u32 support3; /* B3 protocols support */ + __u32 reserved[6]; /* reserved */ + __u32 manu[5]; /* manufacturer specific information */ +} capi_profile; + +#define CAPI_GET_PROFILE _IOWR('C',0x09,struct capi_profile) + +typedef struct capi_manufacturer_cmd { + unsigned long cmd; + void __user *data; +} capi_manufacturer_cmd; + +/* + * CAPI_MANUFACTURER_CMD + */ + +#define CAPI_MANUFACTURER_CMD _IOWR('C',0x20, struct capi_manufacturer_cmd) + +/* + * CAPI_GET_ERRCODE + * capi errcode is set, * if read, write, or ioctl returns EIO, + * ioctl returns errcode directly, and in arg, if != 0 + */ + +#define CAPI_GET_ERRCODE _IOR('C',0x21, __u16) + +/* + * CAPI_INSTALLED + */ +#define CAPI_INSTALLED _IOR('C',0x22, __u16) + + +/* + * member contr is input for + * CAPI_GET_MANUFACTURER, CAPI_VERSION, CAPI_GET_SERIAL + * and CAPI_GET_PROFILE + */ +typedef union capi_ioctl_struct { + __u32 contr; + capi_register_params rparams; + __u8 manufacturer[CAPI_MANUFACTURER_LEN]; + capi_version version; + __u8 serial[CAPI_SERIAL_LEN]; + capi_profile profile; + capi_manufacturer_cmd cmd; + __u16 errcode; +} capi_ioctl_struct; + +/* + * Middleware extension + */ + +#define CAPIFLAG_HIGHJACKING 0x0001 + +#define CAPI_GET_FLAGS _IOR('C',0x23, unsigned) +#define CAPI_SET_FLAGS _IOR('C',0x24, unsigned) +#define CAPI_CLR_FLAGS _IOR('C',0x25, unsigned) + +#define CAPI_NCCI_OPENCOUNT _IOR('C',0x26, unsigned) + +#define CAPI_NCCI_GETUNIT _IOR('C',0x27, unsigned) + +#endif /* __LINUX_CAPI_H__ */ diff --git a/include/linux/cb710.h b/include/linux/cb710.h new file mode 100644 index 00000000..8cc10411 --- /dev/null +++ b/include/linux/cb710.h @@ -0,0 +1,208 @@ +/* + * cb710/cb710.h + * + * Copyright by Michał Mirosław, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_CB710_DRIVER_H +#define LINUX_CB710_DRIVER_H + +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> + +struct cb710_slot; + +typedef int (*cb710_irq_handler_t)(struct cb710_slot *); + +/* per-virtual-slot structure */ +struct cb710_slot { + struct platform_device pdev; + void __iomem *iobase; + cb710_irq_handler_t irq_handler; +}; + +/* per-device structure */ +struct cb710_chip { + struct pci_dev *pdev; + void __iomem *iobase; + unsigned platform_id; +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + atomic_t slot_refs_count; +#endif + unsigned slot_mask; + unsigned slots; + spinlock_t irq_lock; + struct cb710_slot slot[0]; +}; + +/* NOTE: cb710_chip.slots is modified only during device init/exit and + * they are all serialized wrt themselves */ + +/* cb710_chip.slot_mask values */ +#define CB710_SLOT_MMC 1 +#define CB710_SLOT_MS 2 +#define CB710_SLOT_SM 4 + +/* slot port accessors - so the logic is more clear in the code */ +#define CB710_PORT_ACCESSORS(t) \ +static inline void cb710_write_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t value) \ +{ \ + iowrite##t(value, slot->iobase + port); \ +} \ + \ +static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \ + unsigned port) \ +{ \ + return ioread##t(slot->iobase + port); \ +} \ + \ +static inline void cb710_modify_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t set, u##t clear) \ +{ \ + iowrite##t( \ + (ioread##t(slot->iobase + port) & ~clear)|set, \ + slot->iobase + port); \ +} + +CB710_PORT_ACCESSORS(8) +CB710_PORT_ACCESSORS(16) +CB710_PORT_ACCESSORS(32) + +void cb710_pci_update_config_reg(struct pci_dev *pdev, + int reg, uint32_t and, uint32_t xor); +void cb710_set_irq_handler(struct cb710_slot *slot, + cb710_irq_handler_t handler); + +/* some device struct walking */ + +static inline struct cb710_slot *cb710_pdev_to_slot( + struct platform_device *pdev) +{ + return container_of(pdev, struct cb710_slot, pdev); +} + +static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot) +{ + return dev_get_drvdata(slot->pdev.dev.parent); +} + +static inline struct device *cb710_slot_dev(struct cb710_slot *slot) +{ + return &slot->pdev.dev; +} + +static inline struct device *cb710_chip_dev(struct cb710_chip *chip) +{ + return &chip->pdev->dev; +} + +/* debugging aids */ + +#ifdef CONFIG_CB710_DEBUG +void cb710_dump_regs(struct cb710_chip *chip, unsigned dump); +#else +#define cb710_dump_regs(c, d) do {} while (0) +#endif + +#define CB710_DUMP_REGS_MMC 0x0F +#define CB710_DUMP_REGS_MS 0x30 +#define CB710_DUMP_REGS_SM 0xC0 +#define CB710_DUMP_REGS_ALL 0xFF +#define CB710_DUMP_REGS_MASK 0xFF + +#define CB710_DUMP_ACCESS_8 0x100 +#define CB710_DUMP_ACCESS_16 0x200 +#define CB710_DUMP_ACCESS_32 0x400 +#define CB710_DUMP_ACCESS_ALL 0x700 +#define CB710_DUMP_ACCESS_MASK 0x700 + +#endif /* LINUX_CB710_DRIVER_H */ +/* + * cb710/sgbuf2.h + * + * Copyright by Michał Mirosław, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_CB710_SG_H +#define LINUX_CB710_SG_H + +#include <linux/highmem.h> +#include <linux/scatterlist.h> + +/* + * 32-bit PIO mapping sg iterator + * + * Hides scatterlist access issues - fragment boundaries, alignment, page + * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices + * without DMA support). + * + * Best-case reading (transfer from device): + * sg_miter_start(, SG_MITER_TO_SG); + * cb710_sg_dwiter_write_from_io(); + * sg_miter_stop(); + * + * Best-case writing (transfer to device): + * sg_miter_start(, SG_MITER_FROM_SG); + * cb710_sg_dwiter_read_to_io(); + * sg_miter_stop(); + */ + +uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter); +void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data); + +/** + * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Reads @count 32-bit words from register @port and stores it in + * buffer iterated by @miter. Data that would overflow the buffer + * is silently ignored. Iterator is advanced by 4*@count bytes + * or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + cb710_sg_dwiter_write_next_block(miter, ioread32(port)); +} + +/** + * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Writes @count 32-bit words to register @port from buffer iterated + * through @miter. If buffer ends before @count words are written + * missing data is replaced by zeroes. @miter is advanced by 4*@count + * bytes or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + iowrite32(cb710_sg_dwiter_read_next_block(miter), port); +} + +#endif /* LINUX_CB710_SG_H */ diff --git a/include/linux/cciss_defs.h b/include/linux/cciss_defs.h new file mode 100644 index 00000000..316b670d --- /dev/null +++ b/include/linux/cciss_defs.h @@ -0,0 +1,130 @@ +#ifndef CCISS_DEFS_H +#define CCISS_DEFS_H + +#include <linux/types.h> + +/* general boundary definitions */ +#define SENSEINFOBYTES 32 /* note that this value may vary + between host implementations */ + +/* Command Status value */ +#define CMD_SUCCESS 0x0000 +#define CMD_TARGET_STATUS 0x0001 +#define CMD_DATA_UNDERRUN 0x0002 +#define CMD_DATA_OVERRUN 0x0003 +#define CMD_INVALID 0x0004 +#define CMD_PROTOCOL_ERR 0x0005 +#define CMD_HARDWARE_ERR 0x0006 +#define CMD_CONNECTION_LOST 0x0007 +#define CMD_ABORTED 0x0008 +#define CMD_ABORT_FAILED 0x0009 +#define CMD_UNSOLICITED_ABORT 0x000A +#define CMD_TIMEOUT 0x000B +#define CMD_UNABORTABLE 0x000C + +/* transfer direction */ +#define XFER_NONE 0x00 +#define XFER_WRITE 0x01 +#define XFER_READ 0x02 +#define XFER_RSVD 0x03 + +/* task attribute */ +#define ATTR_UNTAGGED 0x00 +#define ATTR_SIMPLE 0x04 +#define ATTR_HEADOFQUEUE 0x05 +#define ATTR_ORDERED 0x06 +#define ATTR_ACA 0x07 + +/* cdb type */ +#define TYPE_CMD 0x00 +#define TYPE_MSG 0x01 + +/* Type defs used in the following structs */ +#define BYTE __u8 +#define WORD __u16 +#define HWORD __u16 +#define DWORD __u32 + +#define CISS_MAX_LUN 1024 + +#define LEVEL2LUN 1 /* index into Target(x) structure, due to byte swapping */ +#define LEVEL3LUN 0 + +#pragma pack(1) + +/* Command List Structure */ +typedef union _SCSI3Addr_struct { + struct { + BYTE Dev; + BYTE Bus:6; + BYTE Mode:2; /* b00 */ + } PeripDev; + struct { + BYTE DevLSB; + BYTE DevMSB:6; + BYTE Mode:2; /* b01 */ + } LogDev; + struct { + BYTE Dev:5; + BYTE Bus:3; + BYTE Targ:6; + BYTE Mode:2; /* b10 */ + } LogUnit; +} SCSI3Addr_struct; + +typedef struct _PhysDevAddr_struct { + DWORD TargetId:24; + DWORD Bus:6; + DWORD Mode:2; + SCSI3Addr_struct Target[2]; /* 2 level target device addr */ +} PhysDevAddr_struct; + +typedef struct _LogDevAddr_struct { + DWORD VolId:30; + DWORD Mode:2; + BYTE reserved[4]; +} LogDevAddr_struct; + +typedef union _LUNAddr_struct { + BYTE LunAddrBytes[8]; + SCSI3Addr_struct SCSI3Lun[4]; + PhysDevAddr_struct PhysDev; + LogDevAddr_struct LogDev; +} LUNAddr_struct; + +typedef struct _RequestBlock_struct { + BYTE CDBLen; + struct { + BYTE Type:3; + BYTE Attribute:3; + BYTE Direction:2; + } Type; + HWORD Timeout; + BYTE CDB[16]; +} RequestBlock_struct; + +typedef union _MoreErrInfo_struct{ + struct { + BYTE Reserved[3]; + BYTE Type; + DWORD ErrorInfo; + } Common_Info; + struct{ + BYTE Reserved[2]; + BYTE offense_size; /* size of offending entry */ + BYTE offense_num; /* byte # of offense 0-base */ + DWORD offense_value; + } Invalid_Cmd; +} MoreErrInfo_struct; +typedef struct _ErrorInfo_struct { + BYTE ScsiStatus; + BYTE SenseLen; + HWORD CommandStatus; + DWORD ResidualCnt; + MoreErrInfo_struct MoreErrInfo; + BYTE SenseInfo[SENSEINFOBYTES]; +} ErrorInfo_struct; + +#pragma pack() + +#endif /* CCISS_DEFS_H */ diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h new file mode 100644 index 00000000..986493f5 --- /dev/null +++ b/include/linux/cciss_ioctl.h @@ -0,0 +1,115 @@ +#ifndef CCISS_IOCTLH +#define CCISS_IOCTLH + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <linux/cciss_defs.h> + +#define CCISS_IOC_MAGIC 'B' + + +typedef struct _cciss_pci_info_struct +{ + unsigned char bus; + unsigned char dev_fn; + unsigned short domain; + __u32 board_id; +} cciss_pci_info_struct; + +typedef struct _cciss_coalint_struct +{ + __u32 delay; + __u32 count; +} cciss_coalint_struct; + +typedef char NodeName_type[16]; + +typedef __u32 Heartbeat_type; + +#define CISS_PARSCSIU2 0x0001 +#define CISS_PARCSCIU3 0x0002 +#define CISS_FIBRE1G 0x0100 +#define CISS_FIBRE2G 0x0200 +typedef __u32 BusTypes_type; + +typedef char FirmwareVer_type[4]; +typedef __u32 DriverVer_type; + +#define MAX_KMALLOC_SIZE 128000 + +typedef struct _IOCTL_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + WORD buf_size; /* size in bytes of the buf */ + BYTE __user *buf; +} IOCTL_Command_struct; + +typedef struct _BIG_IOCTL_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + DWORD malloc_size; /* < MAX_KMALLOC_SIZE in cciss.c */ + DWORD buf_size; /* size in bytes of the buf */ + /* < malloc_size * MAXSGENTRIES */ + BYTE __user *buf; +} BIG_IOCTL_Command_struct; + +typedef struct _LogvolInfo_struct{ + __u32 LunID; + int num_opens; /* number of opens on the logical volume */ + int num_parts; /* number of partitions configured on logvol */ +} LogvolInfo_struct; + +#define CCISS_GETPCIINFO _IOR(CCISS_IOC_MAGIC, 1, cciss_pci_info_struct) + +#define CCISS_GETINTINFO _IOR(CCISS_IOC_MAGIC, 2, cciss_coalint_struct) +#define CCISS_SETINTINFO _IOW(CCISS_IOC_MAGIC, 3, cciss_coalint_struct) + +#define CCISS_GETNODENAME _IOR(CCISS_IOC_MAGIC, 4, NodeName_type) +#define CCISS_SETNODENAME _IOW(CCISS_IOC_MAGIC, 5, NodeName_type) + +#define CCISS_GETHEARTBEAT _IOR(CCISS_IOC_MAGIC, 6, Heartbeat_type) +#define CCISS_GETBUSTYPES _IOR(CCISS_IOC_MAGIC, 7, BusTypes_type) +#define CCISS_GETFIRMVER _IOR(CCISS_IOC_MAGIC, 8, FirmwareVer_type) +#define CCISS_GETDRIVVER _IOR(CCISS_IOC_MAGIC, 9, DriverVer_type) +#define CCISS_REVALIDVOLS _IO(CCISS_IOC_MAGIC, 10) +#define CCISS_PASSTHRU _IOWR(CCISS_IOC_MAGIC, 11, IOCTL_Command_struct) +#define CCISS_DEREGDISK _IO(CCISS_IOC_MAGIC, 12) + +/* no longer used... use REGNEWD instead */ +#define CCISS_REGNEWDISK _IOW(CCISS_IOC_MAGIC, 13, int) + +#define CCISS_REGNEWD _IO(CCISS_IOC_MAGIC, 14) +#define CCISS_RESCANDISK _IO(CCISS_IOC_MAGIC, 16) +#define CCISS_GETLUNINFO _IOR(CCISS_IOC_MAGIC, 17, LogvolInfo_struct) +#define CCISS_BIG_PASSTHRU _IOWR(CCISS_IOC_MAGIC, 18, BIG_IOCTL_Command_struct) + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +/* 32 bit compatible ioctl structs */ +typedef struct _IOCTL32_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + WORD buf_size; /* size in bytes of the buf */ + __u32 buf; /* 32 bit pointer to data buffer */ +} IOCTL32_Command_struct; + +typedef struct _BIG_IOCTL32_Command_struct { + LUNAddr_struct LUN_info; + RequestBlock_struct Request; + ErrorInfo_struct error_info; + DWORD malloc_size; /* < MAX_KMALLOC_SIZE in cciss.c */ + DWORD buf_size; /* size in bytes of the buf */ + /* < malloc_size * MAXSGENTRIES */ + __u32 buf; /* 32 bit pointer to data buffer */ +} BIG_IOCTL32_Command_struct; + +#define CCISS_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 11, IOCTL32_Command_struct) +#define CCISS_BIG_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 18, BIG_IOCTL32_Command_struct) + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/cd1400.h b/include/linux/cd1400.h new file mode 100644 index 00000000..1dc3ab05 --- /dev/null +++ b/include/linux/cd1400.h @@ -0,0 +1,292 @@ +/*****************************************************************************/ + +/* + * cd1400.h -- cd1400 UART hardware info. + * + * Copyright (C) 1996-1998 Stallion Technologies + * Copyright (C) 1994-1996 Greg Ungerer. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*****************************************************************************/ +#ifndef _CD1400_H +#define _CD1400_H +/*****************************************************************************/ + +/* + * Define the number of async ports per cd1400 uart chip. + */ +#define CD1400_PORTS 4 + +/* + * Define the cd1400 uarts internal FIFO sizes. + */ +#define CD1400_TXFIFOSIZE 12 +#define CD1400_RXFIFOSIZE 12 + +/* + * Local RX FIFO thresh hold level. Also define the RTS thresh hold + * based on the RX thresh hold. + */ +#define FIFO_RXTHRESHOLD 6 +#define FIFO_RTSTHRESHOLD 7 + +/*****************************************************************************/ + +/* + * Define the cd1400 register addresses. These are all the valid + * registers with the cd1400. Some are global, some virtual, some + * per port. + */ +#define GFRCR 0x40 +#define CAR 0x68 +#define GCR 0x4b +#define SVRR 0x67 +#define RICR 0x44 +#define TICR 0x45 +#define MICR 0x46 +#define RIR 0x6b +#define TIR 0x6a +#define MIR 0x69 +#define PPR 0x7e + +#define RIVR 0x43 +#define TIVR 0x42 +#define MIVR 0x41 +#define TDR 0x63 +#define RDSR 0x62 +#define MISR 0x4c +#define EOSRR 0x60 + +#define LIVR 0x18 +#define CCR 0x05 +#define SRER 0x06 +#define COR1 0x08 +#define COR2 0x09 +#define COR3 0x0a +#define COR4 0x1e +#define COR5 0x1f +#define CCSR 0x0b +#define RDCR 0x0e +#define SCHR1 0x1a +#define SCHR2 0x1b +#define SCHR3 0x1c +#define SCHR4 0x1d +#define SCRL 0x22 +#define SCRH 0x23 +#define LNC 0x24 +#define MCOR1 0x15 +#define MCOR2 0x16 +#define RTPR 0x21 +#define MSVR1 0x6c +#define MSVR2 0x6d +#define PSVR 0x6f +#define RBPR 0x78 +#define RCOR 0x7c +#define TBPR 0x72 +#define TCOR 0x76 + +/*****************************************************************************/ + +/* + * Define the set of baud rate clock divisors. + */ +#define CD1400_CLK0 8 +#define CD1400_CLK1 32 +#define CD1400_CLK2 128 +#define CD1400_CLK3 512 +#define CD1400_CLK4 2048 + +#define CD1400_NUMCLKS 5 + +/*****************************************************************************/ + +/* + * Define the clock pre-scalar value to be a 5 ms clock. This should be + * OK for now. It would probably be better to make it 10 ms, but we + * can't fit that divisor into 8 bits! + */ +#define PPR_SCALAR 244 + +/*****************************************************************************/ + +/* + * Define values used to set character size options. + */ +#define COR1_CHL5 0x00 +#define COR1_CHL6 0x01 +#define COR1_CHL7 0x02 +#define COR1_CHL8 0x03 + +/* + * Define values used to set the number of stop bits. + */ +#define COR1_STOP1 0x00 +#define COR1_STOP15 0x04 +#define COR1_STOP2 0x08 + +/* + * Define values used to set the parity scheme in use. + */ +#define COR1_PARNONE 0x00 +#define COR1_PARFORCE 0x20 +#define COR1_PARENB 0x40 +#define COR1_PARIGNORE 0x10 + +#define COR1_PARODD 0x80 +#define COR1_PAREVEN 0x00 + +#define COR2_IXM 0x80 +#define COR2_TXIBE 0x40 +#define COR2_ETC 0x20 +#define COR2_LLM 0x10 +#define COR2_RLM 0x08 +#define COR2_RTSAO 0x04 +#define COR2_CTSAE 0x02 + +#define COR3_SCDRNG 0x80 +#define COR3_SCD34 0x40 +#define COR3_FCT 0x20 +#define COR3_SCD12 0x10 + +/* + * Define values used by COR4. + */ +#define COR4_BRKINT 0x08 +#define COR4_IGNBRK 0x18 + +/*****************************************************************************/ + +/* + * Define the modem control register values. + * Note that the actual hardware is a little different to the conventional + * pin names on the cd1400. + */ +#define MSVR1_DTR 0x01 +#define MSVR1_DSR 0x10 +#define MSVR1_RI 0x20 +#define MSVR1_CTS 0x40 +#define MSVR1_DCD 0x80 + +#define MSVR2_RTS 0x02 +#define MSVR2_DSR 0x10 +#define MSVR2_RI 0x20 +#define MSVR2_CTS 0x40 +#define MSVR2_DCD 0x80 + +#define MCOR1_DCD 0x80 +#define MCOR1_CTS 0x40 +#define MCOR1_RI 0x20 +#define MCOR1_DSR 0x10 + +#define MCOR2_DCD 0x80 +#define MCOR2_CTS 0x40 +#define MCOR2_RI 0x20 +#define MCOR2_DSR 0x10 + +/*****************************************************************************/ + +/* + * Define the bits used with the service (interrupt) enable register. + */ +#define SRER_NNDT 0x01 +#define SRER_TXEMPTY 0x02 +#define SRER_TXDATA 0x04 +#define SRER_RXDATA 0x10 +#define SRER_MODEM 0x80 + +/*****************************************************************************/ + +/* + * Define operational commands for the command register. + */ +#define CCR_RESET 0x80 +#define CCR_CORCHANGE 0x4e +#define CCR_SENDCH 0x20 +#define CCR_CHANCTRL 0x10 + +#define CCR_TXENABLE (CCR_CHANCTRL | 0x08) +#define CCR_TXDISABLE (CCR_CHANCTRL | 0x04) +#define CCR_RXENABLE (CCR_CHANCTRL | 0x02) +#define CCR_RXDISABLE (CCR_CHANCTRL | 0x01) + +#define CCR_SENDSCHR1 (CCR_SENDCH | 0x01) +#define CCR_SENDSCHR2 (CCR_SENDCH | 0x02) +#define CCR_SENDSCHR3 (CCR_SENDCH | 0x03) +#define CCR_SENDSCHR4 (CCR_SENDCH | 0x04) + +#define CCR_RESETCHAN (CCR_RESET | 0x00) +#define CCR_RESETFULL (CCR_RESET | 0x01) +#define CCR_TXFLUSHFIFO (CCR_RESET | 0x02) + +#define CCR_MAXWAIT 10000 + +/*****************************************************************************/ + +/* + * Define the valid acknowledgement types (for hw ack cycle). + */ +#define ACK_TYPMASK 0x07 +#define ACK_TYPTX 0x02 +#define ACK_TYPMDM 0x01 +#define ACK_TYPRXGOOD 0x03 +#define ACK_TYPRXBAD 0x07 + +#define SVRR_RX 0x01 +#define SVRR_TX 0x02 +#define SVRR_MDM 0x04 + +#define ST_OVERRUN 0x01 +#define ST_FRAMING 0x02 +#define ST_PARITY 0x04 +#define ST_BREAK 0x08 +#define ST_SCHAR1 0x10 +#define ST_SCHAR2 0x20 +#define ST_SCHAR3 0x30 +#define ST_SCHAR4 0x40 +#define ST_RANGE 0x70 +#define ST_SCHARMASK 0x70 +#define ST_TIMEOUT 0x80 + +#define MISR_DCD 0x80 +#define MISR_CTS 0x40 +#define MISR_RI 0x20 +#define MISR_DSR 0x10 + +/*****************************************************************************/ + +/* + * Defines for the CCSR status register. + */ +#define CCSR_RXENABLED 0x80 +#define CCSR_RXFLOWON 0x40 +#define CCSR_RXFLOWOFF 0x20 +#define CCSR_TXENABLED 0x08 +#define CCSR_TXFLOWON 0x04 +#define CCSR_TXFLOWOFF 0x02 + +/*****************************************************************************/ + +/* + * Define the embedded commands. + */ +#define ETC_CMD 0x00 +#define ETC_STARTBREAK 0x81 +#define ETC_DELAY 0x82 +#define ETC_STOPBREAK 0x83 + +/*****************************************************************************/ +#endif diff --git a/include/linux/cdev.h b/include/linux/cdev.h new file mode 100644 index 00000000..fb459197 --- /dev/null +++ b/include/linux/cdev.h @@ -0,0 +1,35 @@ +#ifndef _LINUX_CDEV_H +#define _LINUX_CDEV_H + +#include <linux/kobject.h> +#include <linux/kdev_t.h> +#include <linux/list.h> + +struct file_operations; +struct inode; +struct module; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +void cdev_init(struct cdev *, const struct file_operations *); + +struct cdev *cdev_alloc(void); + +void cdev_put(struct cdev *p); + +int cdev_add(struct cdev *, dev_t, unsigned); + +void cdev_del(struct cdev *); + +void cd_forget(struct inode *); + +extern struct backing_dev_info directly_mappable_cdev_bdi; + +#endif diff --git a/include/linux/cdk.h b/include/linux/cdk.h new file mode 100644 index 00000000..80093a8d --- /dev/null +++ b/include/linux/cdk.h @@ -0,0 +1,486 @@ +/*****************************************************************************/ + +/* + * cdk.h -- CDK interface definitions. + * + * Copyright (C) 1996-1998 Stallion Technologies + * Copyright (C) 1994-1996 Greg Ungerer. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*****************************************************************************/ +#ifndef _CDK_H +#define _CDK_H +/*****************************************************************************/ + +#pragma pack(2) + +/* + * The following set of definitions is used to communicate with the + * shared memory interface of the Stallion intelligent multiport serial + * boards. The definitions in this file are taken directly from the + * document titled "Generic Stackable Interface, Downloader and + * Communications Development Kit". + */ + +/* + * Define the set of important shared memory addresses. These are + * required to initialize the board and get things started. All of these + * addresses are relative to the start of the shared memory. + */ +#define CDK_SIGADDR 0x200 +#define CDK_FEATADDR 0x280 +#define CDK_CDKADDR 0x300 +#define CDK_RDYADDR 0x262 + +#define CDK_ALIVEMARKER 13 + +/* + * On hardware power up the ROMs located on the EasyConnection 8/64 will + * fill out the following signature information into shared memory. This + * way the host system can quickly determine that the board is present + * and is operational. + */ +typedef struct cdkecpsig { + unsigned long magic; + unsigned short romver; + unsigned short cputype; + unsigned char panelid[8]; +} cdkecpsig_t; + +#define ECP_MAGIC 0x21504345 + +/* + * On hardware power up the ROMs located on the ONboard, Stallion and + * Brumbys will fill out the following signature information into shared + * memory. This way the host system can quickly determine that the board + * is present and is operational. + */ +typedef struct cdkonbsig { + unsigned short magic0; + unsigned short magic1; + unsigned short magic2; + unsigned short magic3; + unsigned short romver; + unsigned short memoff; + unsigned short memseg; + unsigned short amask0; + unsigned short pic; + unsigned short status; + unsigned short btype; + unsigned short clkticks; + unsigned short clkspeed; + unsigned short amask1; + unsigned short amask2; +} cdkonbsig_t; + +#define ONB_MAGIC0 0xf2a7 +#define ONB_MAGIC1 0xa149 +#define ONB_MAGIC2 0x6352 +#define ONB_MAGIC3 0xf121 + +/* + * Define the feature area structure. The feature area is the set of + * startup parameters used by the slave image when it starts executing. + * They allow for the specification of buffer sizes, debug trace, etc. + */ +typedef struct cdkfeature { + unsigned long debug; + unsigned long banner; + unsigned long etype; + unsigned long nrdevs; + unsigned long brdspec; + unsigned long txrqsize; + unsigned long rxrqsize; + unsigned long flags; +} cdkfeature_t; + +#define ETYP_DDK 0 +#define ETYP_CDK 1 + +/* + * Define the CDK header structure. This is the info that the slave + * environment sets up after it has been downloaded and started. It + * essentially provides a memory map for the shared memory interface. + */ +typedef struct cdkhdr { + unsigned short command; + unsigned short status; + unsigned short port; + unsigned short mode; + unsigned long cmd_buf[14]; + unsigned short alive_cnt; + unsigned short intrpt_mode; + unsigned char intrpt_id[8]; + unsigned char ver_release; + unsigned char ver_modification; + unsigned char ver_fix; + unsigned char deadman_restart; + unsigned short deadman; + unsigned short nrdevs; + unsigned long memp; + unsigned long hostp; + unsigned long slavep; + unsigned char hostreq; + unsigned char slavereq; + unsigned char cmd_reserved[30]; +} cdkhdr_t; + +#define MODE_DDK 0 +#define MODE_CDK 1 + +#define IMD_INTR 0x0 +#define IMD_PPINTR 0x1 +#define IMD_POLL 0xff + +/* + * Define the memory mapping structure. This structure is pointed to by + * the memp field in the stlcdkhdr struct. As many as these structures + * as required are laid out in shared memory to define how the rest of + * shared memory is divided up. There will be one for each port. + */ +typedef struct cdkmem { + unsigned short dtype; + unsigned long offset; +} cdkmem_t; + +#define TYP_UNDEFINED 0x0 +#define TYP_ASYNCTRL 0x1 +#define TYP_ASYNC 0x20 +#define TYP_PARALLEL 0x40 +#define TYP_SYNCX21 0x60 + +/*****************************************************************************/ + +/* + * Following is a set of defines and structures used to actually deal + * with the serial ports on the board. Firstly is the set of commands + * that can be applied to ports. + */ +#define ASYCMD (((unsigned long) 'a') << 8) + +#define A_NULL (ASYCMD | 0) +#define A_FLUSH (ASYCMD | 1) +#define A_BREAK (ASYCMD | 2) +#define A_GETPORT (ASYCMD | 3) +#define A_SETPORT (ASYCMD | 4) +#define A_SETPORTF (ASYCMD | 5) +#define A_SETPORTFTX (ASYCMD | 6) +#define A_SETPORTFRX (ASYCMD | 7) +#define A_GETSIGNALS (ASYCMD | 8) +#define A_SETSIGNALS (ASYCMD | 9) +#define A_SETSIGNALSF (ASYCMD | 10) +#define A_SETSIGNALSFTX (ASYCMD | 11) +#define A_SETSIGNALSFRX (ASYCMD | 12) +#define A_GETNOTIFY (ASYCMD | 13) +#define A_SETNOTIFY (ASYCMD | 14) +#define A_NOTIFY (ASYCMD | 15) +#define A_PORTCTRL (ASYCMD | 16) +#define A_GETSTATS (ASYCMD | 17) +#define A_RQSTATE (ASYCMD | 18) +#define A_FLOWSTATE (ASYCMD | 19) +#define A_CLEARSTATS (ASYCMD | 20) + +/* + * Define those arguments used for simple commands. + */ +#define FLUSHRX 0x1 +#define FLUSHTX 0x2 + +#define BREAKON -1 +#define BREAKOFF -2 + +/* + * Define the port setting structure, and all those defines that go along + * with it. Basically this structure defines the characteristics of this + * port: baud rate, chars, parity, input/output char cooking etc. + */ +typedef struct asyport { + unsigned long baudout; + unsigned long baudin; + unsigned long iflag; + unsigned long oflag; + unsigned long lflag; + unsigned long pflag; + unsigned long flow; + unsigned long spare1; + unsigned short vtime; + unsigned short vmin; + unsigned short txlo; + unsigned short txhi; + unsigned short rxlo; + unsigned short rxhi; + unsigned short rxhog; + unsigned short spare2; + unsigned char csize; + unsigned char stopbs; + unsigned char parity; + unsigned char stopin; + unsigned char startin; + unsigned char stopout; + unsigned char startout; + unsigned char parmark; + unsigned char brkmark; + unsigned char cc[11]; +} asyport_t; + +#define PT_STOP1 0x0 +#define PT_STOP15 0x1 +#define PT_STOP2 0x2 + +#define PT_NOPARITY 0x0 +#define PT_ODDPARITY 0x1 +#define PT_EVENPARITY 0x2 +#define PT_MARKPARITY 0x3 +#define PT_SPACEPARITY 0x4 + +#define F_NONE 0x0 +#define F_IXON 0x1 +#define F_IXOFF 0x2 +#define F_IXANY 0x4 +#define F_IOXANY 0x8 +#define F_RTSFLOW 0x10 +#define F_CTSFLOW 0x20 +#define F_DTRFLOW 0x40 +#define F_DCDFLOW 0x80 +#define F_DSROFLOW 0x100 +#define F_DSRIFLOW 0x200 + +#define FI_NORX 0x1 +#define FI_RAW 0x2 +#define FI_ISTRIP 0x4 +#define FI_UCLC 0x8 +#define FI_INLCR 0x10 +#define FI_ICRNL 0x20 +#define FI_IGNCR 0x40 +#define FI_IGNBREAK 0x80 +#define FI_DSCRDBREAK 0x100 +#define FI_1MARKBREAK 0x200 +#define FI_2MARKBREAK 0x400 +#define FI_XCHNGBREAK 0x800 +#define FI_IGNRXERRS 0x1000 +#define FI_DSCDRXERRS 0x2000 +#define FI_1MARKRXERRS 0x4000 +#define FI_2MARKRXERRS 0x8000 +#define FI_XCHNGRXERRS 0x10000 +#define FI_DSCRDNULL 0x20000 + +#define FO_OLCUC 0x1 +#define FO_ONLCR 0x2 +#define FO_OOCRNL 0x4 +#define FO_ONOCR 0x8 +#define FO_ONLRET 0x10 +#define FO_ONL 0x20 +#define FO_OBS 0x40 +#define FO_OVT 0x80 +#define FO_OFF 0x100 +#define FO_OTAB1 0x200 +#define FO_OTAB2 0x400 +#define FO_OTAB3 0x800 +#define FO_OCR1 0x1000 +#define FO_OCR2 0x2000 +#define FO_OCR3 0x4000 +#define FO_OFILL 0x8000 +#define FO_ODELL 0x10000 + +#define P_RTSLOCK 0x1 +#define P_CTSLOCK 0x2 +#define P_MAPRTS 0x4 +#define P_MAPCTS 0x8 +#define P_LOOPBACK 0x10 +#define P_DTRFOLLOW 0x20 +#define P_FAKEDCD 0x40 + +#define P_RXIMIN 0x10000 +#define P_RXITIME 0x20000 +#define P_RXTHOLD 0x40000 + +/* + * Define a structure to communicate serial port signal and data state + * information. + */ +typedef struct asysigs { + unsigned long data; + unsigned long signal; + unsigned long sigvalue; +} asysigs_t; + +#define DT_TXBUSY 0x1 +#define DT_TXEMPTY 0x2 +#define DT_TXLOW 0x4 +#define DT_TXHIGH 0x8 +#define DT_TXFULL 0x10 +#define DT_TXHOG 0x20 +#define DT_TXFLOWED 0x40 +#define DT_TXBREAK 0x80 + +#define DT_RXBUSY 0x100 +#define DT_RXEMPTY 0x200 +#define DT_RXLOW 0x400 +#define DT_RXHIGH 0x800 +#define DT_RXFULL 0x1000 +#define DT_RXHOG 0x2000 +#define DT_RXFLOWED 0x4000 +#define DT_RXBREAK 0x8000 + +#define SG_DTR 0x1 +#define SG_DCD 0x2 +#define SG_RTS 0x4 +#define SG_CTS 0x8 +#define SG_DSR 0x10 +#define SG_RI 0x20 + +/* + * Define the notification setting structure. This is used to tell the + * port what events we want to be informed about. Fields here use the + * same defines as for the asysigs structure above. + */ +typedef struct asynotify { + unsigned long ctrl; + unsigned long data; + unsigned long signal; + unsigned long sigvalue; +} asynotify_t; + +/* + * Define the port control structure. It is used to do fine grain + * control operations on the port. + */ +typedef struct { + unsigned long rxctrl; + unsigned long txctrl; + char rximdch; + char tximdch; + char spare1; + char spare2; +} asyctrl_t; + +#define CT_ENABLE 0x1 +#define CT_DISABLE 0x2 +#define CT_STOP 0x4 +#define CT_START 0x8 +#define CT_STARTFLOW 0x10 +#define CT_STOPFLOW 0x20 +#define CT_SENDCHR 0x40 + +/* + * Define the stats structure kept for each port. This is a useful set + * of data collected for each port on the slave. The A_GETSTATS command + * is used to retrieve this data from the slave. + */ +typedef struct asystats { + unsigned long opens; + unsigned long txchars; + unsigned long rxchars; + unsigned long txringq; + unsigned long rxringq; + unsigned long txmsgs; + unsigned long rxmsgs; + unsigned long txflushes; + unsigned long rxflushes; + unsigned long overruns; + unsigned long framing; + unsigned long parity; + unsigned long ringover; + unsigned long lost; + unsigned long rxstart; + unsigned long rxstop; + unsigned long txstart; + unsigned long txstop; + unsigned long dcdcnt; + unsigned long dtrcnt; + unsigned long ctscnt; + unsigned long rtscnt; + unsigned long dsrcnt; + unsigned long ricnt; + unsigned long txbreaks; + unsigned long rxbreaks; + unsigned long signals; + unsigned long state; + unsigned long hwid; +} asystats_t; + +/*****************************************************************************/ + +/* + * All command and control communication with a device on the slave is + * via a control block in shared memory. Each device has its own control + * block, defined by the following structure. The control block allows + * the host to open, close and control the device on the slave. + */ +typedef struct cdkctrl { + unsigned char open; + unsigned char close; + unsigned long openarg; + unsigned long closearg; + unsigned long cmd; + unsigned long status; + unsigned long args[32]; +} cdkctrl_t; + +/* + * Each device on the slave passes data to and from the host via a ring + * queue in shared memory. Define a ring queue structure to hold the + * vital information about each ring queue. Two ring queues will be + * allocated for each port, one for receive data and one for transmit + * data. + */ +typedef struct cdkasyrq { + unsigned long offset; + unsigned short size; + unsigned short head; + unsigned short tail; +} cdkasyrq_t; + +/* + * Each asynchronous port is defined in shared memory by the following + * structure. It contains a control block to command a device, and also + * the necessary data channel information as well. + */ +typedef struct cdkasy { + cdkctrl_t ctrl; + unsigned short notify; + asynotify_t changed; + unsigned short receive; + cdkasyrq_t rxq; + unsigned short transmit; + cdkasyrq_t txq; +} cdkasy_t; + +#pragma pack() + +/*****************************************************************************/ + +/* + * Define the set of ioctls used by the driver to do special things + * to the board. These include interrupting it, and initializing + * the driver after board startup and shutdown. + */ +#include <linux/ioctl.h> + +#define STL_BINTR _IO('s',20) +#define STL_BSTART _IO('s',21) +#define STL_BSTOP _IO('s',22) +#define STL_BRESET _IO('s',23) + +/* + * Define a set of ioctl extensions, used to get at special stuff. + */ +#define STL_GETPFLAG _IO('s',80) +#define STL_SETPFLAG _IO('s',81) + +/*****************************************************************************/ +#endif diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h new file mode 100644 index 00000000..dfd7f187 --- /dev/null +++ b/include/linux/cdrom.h @@ -0,0 +1,1214 @@ +/* + * -- <linux/cdrom.h> + * General header file for linux CD-ROM drivers + * Copyright (C) 1992 David Giller, rafetmad@oxy.edu + * 1994, 1995 Eberhard Mönkeberg, emoenke@gwdg.de + * 1996 David van Leeuwen, david@tm.tno.nl + * 1997, 1998 Erik Andersen, andersee@debian.org + * 1998-2002 Jens Axboe, axboe@suse.de + */ + +#ifndef _LINUX_CDROM_H +#define _LINUX_CDROM_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +/******************************************************* + * As of Linux 2.1.x, all Linux CD-ROM application programs will use this + * (and only this) include file. It is my hope to provide Linux with + * a uniform interface between software accessing CD-ROMs and the various + * device drivers that actually talk to the drives. There may still be + * 23 different kinds of strange CD-ROM drives, but at least there will + * now be one, and only one, Linux CD-ROM interface. + * + * Additionally, as of Linux 2.1.x, all Linux application programs + * should use the O_NONBLOCK option when opening a CD-ROM device + * for subsequent ioctl commands. This allows for neat system errors + * like "No medium found" or "Wrong medium type" upon attempting to + * mount or play an empty slot, mount an audio disc, or play a data disc. + * Generally, changing an application program to support O_NONBLOCK + * is as easy as the following: + * - drive = open("/dev/cdrom", O_RDONLY); + * + drive = open("/dev/cdrom", O_RDONLY | O_NONBLOCK); + * It is worth the small change. + * + * Patches for many common CD programs (provided by David A. van Leeuwen) + * can be found at: ftp://ftp.gwdg.de/pub/linux/cdrom/drivers/cm206/ + * + *******************************************************/ + +/* When a driver supports a certain function, but the cdrom drive we are + * using doesn't, we will return the error EDRIVE_CANT_DO_THIS. We will + * borrow the "Operation not supported" error from the network folks to + * accomplish this. Maybe someday we will get a more targeted error code, + * but this will do for now... */ +#define EDRIVE_CANT_DO_THIS EOPNOTSUPP + +/******************************************************* + * The CD-ROM IOCTL commands -- these should be supported by + * all the various cdrom drivers. For the CD-ROM ioctls, we + * will commandeer byte 0x53, or 'S'. + *******************************************************/ +#define CDROMPAUSE 0x5301 /* Pause Audio Operation */ +#define CDROMRESUME 0x5302 /* Resume paused Audio Operation */ +#define CDROMPLAYMSF 0x5303 /* Play Audio MSF (struct cdrom_msf) */ +#define CDROMPLAYTRKIND 0x5304 /* Play Audio Track/index + (struct cdrom_ti) */ +#define CDROMREADTOCHDR 0x5305 /* Read TOC header + (struct cdrom_tochdr) */ +#define CDROMREADTOCENTRY 0x5306 /* Read TOC entry + (struct cdrom_tocentry) */ +#define CDROMSTOP 0x5307 /* Stop the cdrom drive */ +#define CDROMSTART 0x5308 /* Start the cdrom drive */ +#define CDROMEJECT 0x5309 /* Ejects the cdrom media */ +#define CDROMVOLCTRL 0x530a /* Control output volume + (struct cdrom_volctrl) */ +#define CDROMSUBCHNL 0x530b /* Read subchannel data + (struct cdrom_subchnl) */ +#define CDROMREADMODE2 0x530c /* Read CDROM mode 2 data (2336 Bytes) + (struct cdrom_read) */ +#define CDROMREADMODE1 0x530d /* Read CDROM mode 1 data (2048 Bytes) + (struct cdrom_read) */ +#define CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */ +#define CDROMEJECT_SW 0x530f /* enable(1)/disable(0) auto-ejecting */ +#define CDROMMULTISESSION 0x5310 /* Obtain the start-of-last-session + address of multi session disks + (struct cdrom_multisession) */ +#define CDROM_GET_MCN 0x5311 /* Obtain the "Universal Product Code" + if available (struct cdrom_mcn) */ +#define CDROM_GET_UPC CDROM_GET_MCN /* This one is deprecated, + but here anyway for compatibility */ +#define CDROMRESET 0x5312 /* hard-reset the drive */ +#define CDROMVOLREAD 0x5313 /* Get the drive's volume setting + (struct cdrom_volctrl) */ +#define CDROMREADRAW 0x5314 /* read data in raw mode (2352 Bytes) + (struct cdrom_read) */ +/* + * These ioctls are used only used in aztcd.c and optcd.c + */ +#define CDROMREADCOOKED 0x5315 /* read data in cooked mode */ +#define CDROMSEEK 0x5316 /* seek msf address */ + +/* + * This ioctl is only used by the scsi-cd driver. + It is for playing audio in logical block addressing mode. + */ +#define CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */ + +/* + * These ioctls are only used in optcd.c + */ +#define CDROMREADALL 0x5318 /* read all 2646 bytes */ + +/* + * These ioctls are (now) only in ide-cd.c for controlling + * drive spindown time. They should be implemented in the + * Uniform driver, via generic packet commands, GPCMD_MODE_SELECT_10, + * GPCMD_MODE_SENSE_10 and the GPMODE_POWER_PAGE... + * -Erik + */ +#define CDROMGETSPINDOWN 0x531d +#define CDROMSETSPINDOWN 0x531e + +/* + * These ioctls are implemented through the uniform CD-ROM driver + * They _will_ be adopted by all CD-ROM drivers, when all the CD-ROM + * drivers are eventually ported to the uniform CD-ROM driver interface. + */ +#define CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */ +#define CDROM_SET_OPTIONS 0x5320 /* Set behavior options */ +#define CDROM_CLEAR_OPTIONS 0x5321 /* Clear behavior options */ +#define CDROM_SELECT_SPEED 0x5322 /* Set the CD-ROM speed */ +#define CDROM_SELECT_DISC 0x5323 /* Select disc (for juke-boxes) */ +#define CDROM_MEDIA_CHANGED 0x5325 /* Check is media changed */ +#define CDROM_DRIVE_STATUS 0x5326 /* Get tray position, etc. */ +#define CDROM_DISC_STATUS 0x5327 /* Get disc type, etc. */ +#define CDROM_CHANGER_NSLOTS 0x5328 /* Get number of slots */ +#define CDROM_LOCKDOOR 0x5329 /* lock or unlock door */ +#define CDROM_DEBUG 0x5330 /* Turn debug messages on/off */ +#define CDROM_GET_CAPABILITY 0x5331 /* get capabilities */ + +/* Note that scsi/scsi_ioctl.h also uses 0x5382 - 0x5386. + * Future CDROM ioctls should be kept below 0x537F + */ + +/* This ioctl is only used by sbpcd at the moment */ +#define CDROMAUDIOBUFSIZ 0x5382 /* set the audio buffer size */ + /* conflict with SCSI_IOCTL_GET_IDLUN */ + +/* DVD-ROM Specific ioctls */ +#define DVD_READ_STRUCT 0x5390 /* Read structure */ +#define DVD_WRITE_STRUCT 0x5391 /* Write structure */ +#define DVD_AUTH 0x5392 /* Authentication */ + +#define CDROM_SEND_PACKET 0x5393 /* send a packet to the drive */ +#define CDROM_NEXT_WRITABLE 0x5394 /* get next writable block */ +#define CDROM_LAST_WRITTEN 0x5395 /* get last block written on disc */ + +/******************************************************* + * CDROM IOCTL structures + *******************************************************/ + +/* Address in MSF format */ +struct cdrom_msf0 +{ + __u8 minute; + __u8 second; + __u8 frame; +}; + +/* Address in either MSF or logical format */ +union cdrom_addr +{ + struct cdrom_msf0 msf; + int lba; +}; + +/* This struct is used by the CDROMPLAYMSF ioctl */ +struct cdrom_msf +{ + __u8 cdmsf_min0; /* start minute */ + __u8 cdmsf_sec0; /* start second */ + __u8 cdmsf_frame0; /* start frame */ + __u8 cdmsf_min1; /* end minute */ + __u8 cdmsf_sec1; /* end second */ + __u8 cdmsf_frame1; /* end frame */ +}; + +/* This struct is used by the CDROMPLAYTRKIND ioctl */ +struct cdrom_ti +{ + __u8 cdti_trk0; /* start track */ + __u8 cdti_ind0; /* start index */ + __u8 cdti_trk1; /* end track */ + __u8 cdti_ind1; /* end index */ +}; + +/* This struct is used by the CDROMREADTOCHDR ioctl */ +struct cdrom_tochdr +{ + __u8 cdth_trk0; /* start track */ + __u8 cdth_trk1; /* end track */ +}; + +/* This struct is used by the CDROMVOLCTRL and CDROMVOLREAD ioctls */ +struct cdrom_volctrl +{ + __u8 channel0; + __u8 channel1; + __u8 channel2; + __u8 channel3; +}; + +/* This struct is used by the CDROMSUBCHNL ioctl */ +struct cdrom_subchnl +{ + __u8 cdsc_format; + __u8 cdsc_audiostatus; + __u8 cdsc_adr: 4; + __u8 cdsc_ctrl: 4; + __u8 cdsc_trk; + __u8 cdsc_ind; + union cdrom_addr cdsc_absaddr; + union cdrom_addr cdsc_reladdr; +}; + + +/* This struct is used by the CDROMREADTOCENTRY ioctl */ +struct cdrom_tocentry +{ + __u8 cdte_track; + __u8 cdte_adr :4; + __u8 cdte_ctrl :4; + __u8 cdte_format; + union cdrom_addr cdte_addr; + __u8 cdte_datamode; +}; + +/* This struct is used by the CDROMREADMODE1, and CDROMREADMODE2 ioctls */ +struct cdrom_read +{ + int cdread_lba; + char *cdread_bufaddr; + int cdread_buflen; +}; + +/* This struct is used by the CDROMREADAUDIO ioctl */ +struct cdrom_read_audio +{ + union cdrom_addr addr; /* frame address */ + __u8 addr_format; /* CDROM_LBA or CDROM_MSF */ + int nframes; /* number of 2352-byte-frames to read at once */ + __u8 __user *buf; /* frame buffer (size: nframes*2352 bytes) */ +}; + +/* This struct is used with the CDROMMULTISESSION ioctl */ +struct cdrom_multisession +{ + union cdrom_addr addr; /* frame address: start-of-last-session + (not the new "frame 16"!). Only valid + if the "xa_flag" is true. */ + __u8 xa_flag; /* 1: "is XA disk" */ + __u8 addr_format; /* CDROM_LBA or CDROM_MSF */ +}; + +/* This struct is used with the CDROM_GET_MCN ioctl. + * Very few audio discs actually have Universal Product Code information, + * which should just be the Medium Catalog Number on the box. Also note + * that the way the codeis written on CD is _not_ uniform across all discs! + */ +struct cdrom_mcn +{ + __u8 medium_catalog_number[14]; /* 13 ASCII digits, null-terminated */ +}; + +/* This is used by the CDROMPLAYBLK ioctl */ +struct cdrom_blk +{ + unsigned from; + unsigned short len; +}; + +#define CDROM_PACKET_SIZE 12 + +#define CGC_DATA_UNKNOWN 0 +#define CGC_DATA_WRITE 1 +#define CGC_DATA_READ 2 +#define CGC_DATA_NONE 3 + +/* for CDROM_PACKET_COMMAND ioctl */ +struct cdrom_generic_command +{ + unsigned char cmd[CDROM_PACKET_SIZE]; + unsigned char __user *buffer; + unsigned int buflen; + int stat; + struct request_sense __user *sense; + unsigned char data_direction; + int quiet; + int timeout; + void __user *reserved[1]; /* unused, actually */ +}; + +/* + * A CD-ROM physical sector size is 2048, 2052, 2056, 2324, 2332, 2336, + * 2340, or 2352 bytes long. + +* Sector types of the standard CD-ROM data formats: + * + * format sector type user data size (bytes) + * ----------------------------------------------------------------------------- + * 1 (Red Book) CD-DA 2352 (CD_FRAMESIZE_RAW) + * 2 (Yellow Book) Mode1 Form1 2048 (CD_FRAMESIZE) + * 3 (Yellow Book) Mode1 Form2 2336 (CD_FRAMESIZE_RAW0) + * 4 (Green Book) Mode2 Form1 2048 (CD_FRAMESIZE) + * 5 (Green Book) Mode2 Form2 2328 (2324+4 spare bytes) + * + * + * The layout of the standard CD-ROM data formats: + * ----------------------------------------------------------------------------- + * - audio (red): | audio_sample_bytes | + * | 2352 | + * + * - data (yellow, mode1): | sync - head - data - EDC - zero - ECC | + * | 12 - 4 - 2048 - 4 - 8 - 276 | + * + * - data (yellow, mode2): | sync - head - data | + * | 12 - 4 - 2336 | + * + * - XA data (green, mode2 form1): | sync - head - sub - data - EDC - ECC | + * | 12 - 4 - 8 - 2048 - 4 - 276 | + * + * - XA data (green, mode2 form2): | sync - head - sub - data - Spare | + * | 12 - 4 - 8 - 2324 - 4 | + * + */ + +/* Some generally useful CD-ROM information -- mostly based on the above */ +#define CD_MINS 74 /* max. minutes per CD, not really a limit */ +#define CD_SECS 60 /* seconds per minute */ +#define CD_FRAMES 75 /* frames per second */ +#define CD_SYNC_SIZE 12 /* 12 sync bytes per raw data frame */ +#define CD_MSF_OFFSET 150 /* MSF numbering offset of first frame */ +#define CD_CHUNK_SIZE 24 /* lowest-level "data bytes piece" */ +#define CD_NUM_OF_CHUNKS 98 /* chunks per frame */ +#define CD_FRAMESIZE_SUB 96 /* subchannel data "frame" size */ +#define CD_HEAD_SIZE 4 /* header (address) bytes per raw data frame */ +#define CD_SUBHEAD_SIZE 8 /* subheader bytes per raw XA data frame */ +#define CD_EDC_SIZE 4 /* bytes EDC per most raw data frame types */ +#define CD_ZERO_SIZE 8 /* bytes zero per yellow book mode 1 frame */ +#define CD_ECC_SIZE 276 /* bytes ECC per most raw data frame types */ +#define CD_FRAMESIZE 2048 /* bytes per frame, "cooked" mode */ +#define CD_FRAMESIZE_RAW 2352 /* bytes per frame, "raw" mode */ +#define CD_FRAMESIZE_RAWER 2646 /* The maximum possible returned bytes */ +/* most drives don't deliver everything: */ +#define CD_FRAMESIZE_RAW1 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE) /*2340*/ +#define CD_FRAMESIZE_RAW0 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE) /*2336*/ + +#define CD_XA_HEAD (CD_HEAD_SIZE+CD_SUBHEAD_SIZE) /* "before data" part of raw XA frame */ +#define CD_XA_TAIL (CD_EDC_SIZE+CD_ECC_SIZE) /* "after data" part of raw XA frame */ +#define CD_XA_SYNC_HEAD (CD_SYNC_SIZE+CD_XA_HEAD) /* sync bytes + header of XA frame */ + +/* CD-ROM address types (cdrom_tocentry.cdte_format) */ +#define CDROM_LBA 0x01 /* "logical block": first frame is #0 */ +#define CDROM_MSF 0x02 /* "minute-second-frame": binary, not bcd here! */ + +/* bit to tell whether track is data or audio (cdrom_tocentry.cdte_ctrl) */ +#define CDROM_DATA_TRACK 0x04 + +/* The leadout track is always 0xAA, regardless of # of tracks on disc */ +#define CDROM_LEADOUT 0xAA + +/* audio states (from SCSI-2, but seen with other drives, too) */ +#define CDROM_AUDIO_INVALID 0x00 /* audio status not supported */ +#define CDROM_AUDIO_PLAY 0x11 /* audio play operation in progress */ +#define CDROM_AUDIO_PAUSED 0x12 /* audio play operation paused */ +#define CDROM_AUDIO_COMPLETED 0x13 /* audio play successfully completed */ +#define CDROM_AUDIO_ERROR 0x14 /* audio play stopped due to error */ +#define CDROM_AUDIO_NO_STATUS 0x15 /* no current audio status to return */ + +/* capability flags used with the uniform CD-ROM driver */ +#define CDC_CLOSE_TRAY 0x1 /* caddy systems _can't_ close */ +#define CDC_OPEN_TRAY 0x2 /* but _can_ eject. */ +#define CDC_LOCK 0x4 /* disable manual eject */ +#define CDC_SELECT_SPEED 0x8 /* programmable speed */ +#define CDC_SELECT_DISC 0x10 /* select disc from juke-box */ +#define CDC_MULTI_SESSION 0x20 /* read sessions>1 */ +#define CDC_MCN 0x40 /* Medium Catalog Number */ +#define CDC_MEDIA_CHANGED 0x80 /* media changed */ +#define CDC_PLAY_AUDIO 0x100 /* audio functions */ +#define CDC_RESET 0x200 /* hard reset device */ +#define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */ +#define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */ +#define CDC_CD_R 0x2000 /* drive is a CD-R */ +#define CDC_CD_RW 0x4000 /* drive is a CD-RW */ +#define CDC_DVD 0x8000 /* drive is a DVD */ +#define CDC_DVD_R 0x10000 /* drive can write DVD-R */ +#define CDC_DVD_RAM 0x20000 /* drive can write DVD-RAM */ +#define CDC_MO_DRIVE 0x40000 /* drive is an MO device */ +#define CDC_MRW 0x80000 /* drive can read MRW */ +#define CDC_MRW_W 0x100000 /* drive can write MRW */ +#define CDC_RAM 0x200000 /* ok to open for WRITE */ + +/* drive status possibilities returned by CDROM_DRIVE_STATUS ioctl */ +#define CDS_NO_INFO 0 /* if not implemented */ +#define CDS_NO_DISC 1 +#define CDS_TRAY_OPEN 2 +#define CDS_DRIVE_NOT_READY 3 +#define CDS_DISC_OK 4 + +/* return values for the CDROM_DISC_STATUS ioctl */ +/* can also return CDS_NO_[INFO|DISC], from above */ +#define CDS_AUDIO 100 +#define CDS_DATA_1 101 +#define CDS_DATA_2 102 +#define CDS_XA_2_1 103 +#define CDS_XA_2_2 104 +#define CDS_MIXED 105 + +/* User-configurable behavior options for the uniform CD-ROM driver */ +#define CDO_AUTO_CLOSE 0x1 /* close tray on first open() */ +#define CDO_AUTO_EJECT 0x2 /* open tray on last release() */ +#define CDO_USE_FFLAGS 0x4 /* use O_NONBLOCK information on open */ +#define CDO_LOCK 0x8 /* lock tray on open files */ +#define CDO_CHECK_TYPE 0x10 /* check type on open for data */ + +/* Special codes used when specifying changer slots. */ +#define CDSL_NONE (INT_MAX-1) +#define CDSL_CURRENT INT_MAX + +/* For partition based multisession access. IDE can handle 64 partitions + * per drive - SCSI CD-ROM's use minors to differentiate between the + * various drives, so we can't do multisessions the same way there. + * Use the -o session=x option to mount on them. + */ +#define CD_PART_MAX 64 +#define CD_PART_MASK (CD_PART_MAX - 1) + +/********************************************************************* + * Generic Packet commands, MMC commands, and such + *********************************************************************/ + + /* The generic packet command opcodes for CD/DVD Logical Units, + * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ +#define GPCMD_BLANK 0xa1 +#define GPCMD_CLOSE_TRACK 0x5b +#define GPCMD_FLUSH_CACHE 0x35 +#define GPCMD_FORMAT_UNIT 0x04 +#define GPCMD_GET_CONFIGURATION 0x46 +#define GPCMD_GET_EVENT_STATUS_NOTIFICATION 0x4a +#define GPCMD_GET_PERFORMANCE 0xac +#define GPCMD_INQUIRY 0x12 +#define GPCMD_LOAD_UNLOAD 0xa6 +#define GPCMD_MECHANISM_STATUS 0xbd +#define GPCMD_MODE_SELECT_10 0x55 +#define GPCMD_MODE_SENSE_10 0x5a +#define GPCMD_PAUSE_RESUME 0x4b +#define GPCMD_PLAY_AUDIO_10 0x45 +#define GPCMD_PLAY_AUDIO_MSF 0x47 +#define GPCMD_PLAY_AUDIO_TI 0x48 +#define GPCMD_PLAY_CD 0xbc +#define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e +#define GPCMD_READ_10 0x28 +#define GPCMD_READ_12 0xa8 +#define GPCMD_READ_BUFFER 0x3c +#define GPCMD_READ_BUFFER_CAPACITY 0x5c +#define GPCMD_READ_CDVD_CAPACITY 0x25 +#define GPCMD_READ_CD 0xbe +#define GPCMD_READ_CD_MSF 0xb9 +#define GPCMD_READ_DISC_INFO 0x51 +#define GPCMD_READ_DVD_STRUCTURE 0xad +#define GPCMD_READ_FORMAT_CAPACITIES 0x23 +#define GPCMD_READ_HEADER 0x44 +#define GPCMD_READ_TRACK_RZONE_INFO 0x52 +#define GPCMD_READ_SUBCHANNEL 0x42 +#define GPCMD_READ_TOC_PMA_ATIP 0x43 +#define GPCMD_REPAIR_RZONE_TRACK 0x58 +#define GPCMD_REPORT_KEY 0xa4 +#define GPCMD_REQUEST_SENSE 0x03 +#define GPCMD_RESERVE_RZONE_TRACK 0x53 +#define GPCMD_SEND_CUE_SHEET 0x5d +#define GPCMD_SCAN 0xba +#define GPCMD_SEEK 0x2b +#define GPCMD_SEND_DVD_STRUCTURE 0xbf +#define GPCMD_SEND_EVENT 0xa2 +#define GPCMD_SEND_KEY 0xa3 +#define GPCMD_SEND_OPC 0x54 +#define GPCMD_SET_READ_AHEAD 0xa7 +#define GPCMD_SET_STREAMING 0xb6 +#define GPCMD_START_STOP_UNIT 0x1b +#define GPCMD_STOP_PLAY_SCAN 0x4e +#define GPCMD_TEST_UNIT_READY 0x00 +#define GPCMD_VERIFY_10 0x2f +#define GPCMD_WRITE_10 0x2a +#define GPCMD_WRITE_12 0xaa +#define GPCMD_WRITE_AND_VERIFY_10 0x2e +#define GPCMD_WRITE_BUFFER 0x3b +/* This is listed as optional in ATAPI 2.6, but is (curiously) + * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji + * Table 377 as an MMC command for SCSi devices though... Most ATAPI + * drives support it. */ +#define GPCMD_SET_SPEED 0xbb +/* This seems to be a SCSI specific CD-ROM opcode + * to play data at track/index */ +#define GPCMD_PLAYAUDIO_TI 0x48 +/* + * From MS Media Status Notification Support Specification. For + * older drives only. + */ +#define GPCMD_GET_MEDIA_STATUS 0xda + +/* Mode page codes for mode sense/set */ +#define GPMODE_VENDOR_PAGE 0x00 +#define GPMODE_R_W_ERROR_PAGE 0x01 +#define GPMODE_WRITE_PARMS_PAGE 0x05 +#define GPMODE_WCACHING_PAGE 0x08 +#define GPMODE_AUDIO_CTL_PAGE 0x0e +#define GPMODE_POWER_PAGE 0x1a +#define GPMODE_FAULT_FAIL_PAGE 0x1c +#define GPMODE_TO_PROTECT_PAGE 0x1d +#define GPMODE_CAPABILITIES_PAGE 0x2a +#define GPMODE_ALL_PAGES 0x3f +/* Not in Mt. Fuji, but in ATAPI 2.6 -- deprecated now in favor + * of MODE_SENSE_POWER_PAGE */ +#define GPMODE_CDROM_PAGE 0x0d + + + +/* DVD struct types */ +#define DVD_STRUCT_PHYSICAL 0x00 +#define DVD_STRUCT_COPYRIGHT 0x01 +#define DVD_STRUCT_DISCKEY 0x02 +#define DVD_STRUCT_BCA 0x03 +#define DVD_STRUCT_MANUFACT 0x04 + +struct dvd_layer { + __u8 book_version : 4; + __u8 book_type : 4; + __u8 min_rate : 4; + __u8 disc_size : 4; + __u8 layer_type : 4; + __u8 track_path : 1; + __u8 nlayers : 2; + __u8 track_density : 4; + __u8 linear_density : 4; + __u8 bca : 1; + __u32 start_sector; + __u32 end_sector; + __u32 end_sector_l0; +}; + +#define DVD_LAYERS 4 + +struct dvd_physical { + __u8 type; + __u8 layer_num; + struct dvd_layer layer[DVD_LAYERS]; +}; + +struct dvd_copyright { + __u8 type; + + __u8 layer_num; + __u8 cpst; + __u8 rmi; +}; + +struct dvd_disckey { + __u8 type; + + unsigned agid : 2; + __u8 value[2048]; +}; + +struct dvd_bca { + __u8 type; + + int len; + __u8 value[188]; +}; + +struct dvd_manufact { + __u8 type; + + __u8 layer_num; + int len; + __u8 value[2048]; +}; + +typedef union { + __u8 type; + + struct dvd_physical physical; + struct dvd_copyright copyright; + struct dvd_disckey disckey; + struct dvd_bca bca; + struct dvd_manufact manufact; +} dvd_struct; + +/* + * DVD authentication ioctl + */ + +/* Authentication states */ +#define DVD_LU_SEND_AGID 0 +#define DVD_HOST_SEND_CHALLENGE 1 +#define DVD_LU_SEND_KEY1 2 +#define DVD_LU_SEND_CHALLENGE 3 +#define DVD_HOST_SEND_KEY2 4 + +/* Termination states */ +#define DVD_AUTH_ESTABLISHED 5 +#define DVD_AUTH_FAILURE 6 + +/* Other functions */ +#define DVD_LU_SEND_TITLE_KEY 7 +#define DVD_LU_SEND_ASF 8 +#define DVD_INVALIDATE_AGID 9 +#define DVD_LU_SEND_RPC_STATE 10 +#define DVD_HOST_SEND_RPC_STATE 11 + +/* State data */ +typedef __u8 dvd_key[5]; /* 40-bit value, MSB is first elem. */ +typedef __u8 dvd_challenge[10]; /* 80-bit value, MSB is first elem. */ + +struct dvd_lu_send_agid { + __u8 type; + unsigned agid : 2; +}; + +struct dvd_host_send_challenge { + __u8 type; + unsigned agid : 2; + + dvd_challenge chal; +}; + +struct dvd_send_key { + __u8 type; + unsigned agid : 2; + + dvd_key key; +}; + +struct dvd_lu_send_challenge { + __u8 type; + unsigned agid : 2; + + dvd_challenge chal; +}; + +#define DVD_CPM_NO_COPYRIGHT 0 +#define DVD_CPM_COPYRIGHTED 1 + +#define DVD_CP_SEC_NONE 0 +#define DVD_CP_SEC_EXIST 1 + +#define DVD_CGMS_UNRESTRICTED 0 +#define DVD_CGMS_SINGLE 2 +#define DVD_CGMS_RESTRICTED 3 + +struct dvd_lu_send_title_key { + __u8 type; + unsigned agid : 2; + + dvd_key title_key; + int lba; + unsigned cpm : 1; + unsigned cp_sec : 1; + unsigned cgms : 2; +}; + +struct dvd_lu_send_asf { + __u8 type; + unsigned agid : 2; + + unsigned asf : 1; +}; + +struct dvd_host_send_rpcstate { + __u8 type; + __u8 pdrc; +}; + +struct dvd_lu_send_rpcstate { + __u8 type : 2; + __u8 vra : 3; + __u8 ucca : 3; + __u8 region_mask; + __u8 rpc_scheme; +}; + +typedef union { + __u8 type; + + struct dvd_lu_send_agid lsa; + struct dvd_host_send_challenge hsc; + struct dvd_send_key lsk; + struct dvd_lu_send_challenge lsc; + struct dvd_send_key hsk; + struct dvd_lu_send_title_key lstk; + struct dvd_lu_send_asf lsasf; + struct dvd_host_send_rpcstate hrpcs; + struct dvd_lu_send_rpcstate lrpcs; +} dvd_authinfo; + +struct request_sense { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 valid : 1; + __u8 error_code : 7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 error_code : 7; + __u8 valid : 1; +#endif + __u8 segment_number; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 2; + __u8 ili : 1; + __u8 reserved2 : 1; + __u8 sense_key : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 sense_key : 4; + __u8 reserved2 : 1; + __u8 ili : 1; + __u8 reserved1 : 2; +#endif + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +/* + * feature profile + */ +#define CDF_RWRT 0x0020 /* "Random Writable" */ +#define CDF_HWDM 0x0024 /* "Hardware Defect Management" */ +#define CDF_MRW 0x0028 + +/* + * media status bits + */ +#define CDM_MRW_NOTMRW 0 +#define CDM_MRW_BGFORMAT_INACTIVE 1 +#define CDM_MRW_BGFORMAT_ACTIVE 2 +#define CDM_MRW_BGFORMAT_COMPLETE 3 + +/* + * mrw address spaces + */ +#define MRW_LBA_DMA 0 +#define MRW_LBA_GAA 1 + +/* + * mrw mode pages (first is deprecated) -- probed at init time and + * cdi->mrw_mode_page is set + */ +#define MRW_MODE_PC_PRE1 0x2c +#define MRW_MODE_PC 0x03 + +struct mrw_feature_desc { + __be16 feature_code; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 2; + __u8 feature_version : 4; + __u8 persistent : 1; + __u8 curr : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 curr : 1; + __u8 persistent : 1; + __u8 feature_version : 4; + __u8 reserved1 : 2; +#endif + __u8 add_len; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved2 : 7; + __u8 write : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 write : 1; + __u8 reserved2 : 7; +#endif + __u8 reserved3; + __u8 reserved4; + __u8 reserved5; +}; + +/* cf. mmc4r02g.pdf 5.3.10 Random Writable Feature (0020h) pg 197 of 635 */ +struct rwrt_feature_desc { + __be16 feature_code; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 2; + __u8 feature_version : 4; + __u8 persistent : 1; + __u8 curr : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 curr : 1; + __u8 persistent : 1; + __u8 feature_version : 4; + __u8 reserved1 : 2; +#endif + __u8 add_len; + __u32 last_lba; + __u32 block_size; + __u16 blocking; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved2 : 7; + __u8 page_present : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 page_present : 1; + __u8 reserved2 : 7; +#endif + __u8 reserved3; +}; + +typedef struct { + __be16 disc_information_length; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 3; + __u8 erasable : 1; + __u8 border_status : 2; + __u8 disc_status : 2; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 disc_status : 2; + __u8 border_status : 2; + __u8 erasable : 1; + __u8 reserved1 : 3; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 n_first_track; + __u8 n_sessions_lsb; + __u8 first_track_lsb; + __u8 last_track_lsb; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 did_v : 1; + __u8 dbc_v : 1; + __u8 uru : 1; + __u8 reserved2 : 2; + __u8 dbit : 1; + __u8 mrw_status : 2; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 mrw_status : 2; + __u8 dbit : 1; + __u8 reserved2 : 2; + __u8 uru : 1; + __u8 dbc_v : 1; + __u8 did_v : 1; +#endif + __u8 disc_type; + __u8 n_sessions_msb; + __u8 first_track_msb; + __u8 last_track_msb; + __u32 disc_id; + __u32 lead_in; + __u32 lead_out; + __u8 disc_bar_code[8]; + __u8 reserved3; + __u8 n_opc; +} disc_information; + +typedef struct { + __be16 track_information_length; + __u8 track_lsb; + __u8 session_lsb; + __u8 reserved1; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved2 : 2; + __u8 damage : 1; + __u8 copy : 1; + __u8 track_mode : 4; + __u8 rt : 1; + __u8 blank : 1; + __u8 packet : 1; + __u8 fp : 1; + __u8 data_mode : 4; + __u8 reserved3 : 6; + __u8 lra_v : 1; + __u8 nwa_v : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 track_mode : 4; + __u8 copy : 1; + __u8 damage : 1; + __u8 reserved2 : 2; + __u8 data_mode : 4; + __u8 fp : 1; + __u8 packet : 1; + __u8 blank : 1; + __u8 rt : 1; + __u8 nwa_v : 1; + __u8 lra_v : 1; + __u8 reserved3 : 6; +#endif + __be32 track_start; + __be32 next_writable; + __be32 free_blocks; + __be32 fixed_packet_size; + __be32 track_size; + __be32 last_rec_address; +} track_information; + +struct feature_header { + __u32 data_len; + __u8 reserved1; + __u8 reserved2; + __u16 curr_profile; +}; + +struct mode_page_header { + __be16 mode_data_length; + __u8 medium_type; + __u8 reserved1; + __u8 reserved2; + __u8 reserved3; + __be16 desc_length; +}; + +#ifdef __KERNEL__ +#include <linux/fs.h> /* not really needed, later.. */ +#include <linux/list.h> + +struct packet_command +{ + unsigned char cmd[CDROM_PACKET_SIZE]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +/* + * _OLD will use PIO transfer on atapi devices, _BPC_* will use DMA + */ +#define CDDA_OLD 0 /* old style */ +#define CDDA_BPC_SINGLE 1 /* single frame block pc */ +#define CDDA_BPC_FULL 2 /* multi frame block pc */ + +/* Uniform cdrom data structures for cdrom.c */ +struct cdrom_device_info { + struct cdrom_device_ops *ops; /* link to device_ops */ + struct list_head list; /* linked list of all device_info */ + struct gendisk *disk; /* matching block layer disk */ + void *handle; /* driver-dependent data */ +/* specifications */ + int mask; /* mask of capability: disables them */ + int speed; /* maximum speed for reading data */ + int capacity; /* number of discs in jukebox */ +/* device-related storage */ + unsigned int options : 30; /* options flags */ + unsigned mc_flags : 2; /* media change buffer flags */ + unsigned int vfs_events; /* cached events for vfs path */ + unsigned int ioctl_events; /* cached events for ioctl path */ + int use_count; /* number of times device opened */ + char name[20]; /* name of the device type */ +/* per-device flags */ + __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ + __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */ + __u8 reserved : 5; /* not used yet */ + int cdda_method; /* see flags */ + __u8 last_sense; + __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ + unsigned short mmc3_profile; /* current MMC3 profile */ + int for_data; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; +}; + +struct cdrom_device_ops { +/* routines */ + int (*open) (struct cdrom_device_info *, int); + void (*release) (struct cdrom_device_info *); + int (*drive_status) (struct cdrom_device_info *, int); + unsigned int (*check_events) (struct cdrom_device_info *cdi, + unsigned int clearing, int slot); + int (*media_changed) (struct cdrom_device_info *, int); + int (*tray_move) (struct cdrom_device_info *, int); + int (*lock_door) (struct cdrom_device_info *, int); + int (*select_speed) (struct cdrom_device_info *, int); + int (*select_disc) (struct cdrom_device_info *, int); + int (*get_last_session) (struct cdrom_device_info *, + struct cdrom_multisession *); + int (*get_mcn) (struct cdrom_device_info *, + struct cdrom_mcn *); + /* hard reset device */ + int (*reset) (struct cdrom_device_info *); + /* play stuff */ + int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); + +/* driver specifications */ + const int capability; /* capability flags */ + int n_minors; /* number of active minor devices */ + /* handle uniform packets for scsi type devices (scsi,atapi) */ + int (*generic_packet) (struct cdrom_device_info *, + struct packet_command *); +}; + +/* the general block_device operations structure: */ +extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode); +extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode); +extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode, unsigned int cmd, unsigned long arg); +extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, + unsigned int clearing); +extern int cdrom_media_changed(struct cdrom_device_info *); + +extern int register_cdrom(struct cdrom_device_info *cdi); +extern void unregister_cdrom(struct cdrom_device_info *cdi); + +typedef struct { + int data; + int audio; + int cdi; + int xa; + long error; +} tracktype; + +extern int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written); +extern int cdrom_number_of_slots(struct cdrom_device_info *cdi); +extern int cdrom_mode_select(struct cdrom_device_info *cdi, + struct packet_command *cgc); +extern int cdrom_mode_sense(struct cdrom_device_info *cdi, + struct packet_command *cgc, + int page_code, int page_control); +extern void init_cdrom_command(struct packet_command *cgc, + void *buffer, int len, int type); + +/* The SCSI spec says there could be 256 slots. */ +#define CDROM_MAX_SLOTS 256 + +struct cdrom_mechstat_header { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 fault : 1; + __u8 changer_state : 2; + __u8 curslot : 5; + __u8 mech_state : 3; + __u8 door_open : 1; + __u8 reserved1 : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 curslot : 5; + __u8 changer_state : 2; + __u8 fault : 1; + __u8 reserved1 : 4; + __u8 door_open : 1; + __u8 mech_state : 3; +#endif + __u8 curlba[3]; + __u8 nslots; + __u16 slot_tablelen; +}; + +struct cdrom_slot { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 disc_present : 1; + __u8 reserved1 : 6; + __u8 change : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 change : 1; + __u8 reserved1 : 6; + __u8 disc_present : 1; +#endif + __u8 reserved2[3]; +}; + +struct cdrom_changer_info { + struct cdrom_mechstat_header hdr; + struct cdrom_slot slots[CDROM_MAX_SLOTS]; +}; + +typedef enum { + mechtype_caddy = 0, + mechtype_tray = 1, + mechtype_popup = 2, + mechtype_individual_changer = 4, + mechtype_cartridge_changer = 5 +} mechtype_t; + +typedef struct { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 ps : 1; + __u8 reserved1 : 1; + __u8 page_code : 6; + __u8 page_length; + __u8 reserved2 : 1; + __u8 bufe : 1; + __u8 ls_v : 1; + __u8 test_write : 1; + __u8 write_type : 4; + __u8 multi_session : 2; /* or border, DVD */ + __u8 fp : 1; + __u8 copy : 1; + __u8 track_mode : 4; + __u8 reserved3 : 4; + __u8 data_block_type : 4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 page_code : 6; + __u8 reserved1 : 1; + __u8 ps : 1; + __u8 page_length; + __u8 write_type : 4; + __u8 test_write : 1; + __u8 ls_v : 1; + __u8 bufe : 1; + __u8 reserved2 : 1; + __u8 track_mode : 4; + __u8 copy : 1; + __u8 fp : 1; + __u8 multi_session : 2; /* or border, DVD */ + __u8 data_block_type : 4; + __u8 reserved3 : 4; +#endif + __u8 link_size; + __u8 reserved4; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved5 : 2; + __u8 app_code : 6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 app_code : 6; + __u8 reserved5 : 2; +#endif + __u8 session_format; + __u8 reserved6; + __be32 packet_size; + __u16 audio_pause; + __u8 mcn[16]; + __u8 isrc[16]; + __u8 subhdr0; + __u8 subhdr1; + __u8 subhdr2; + __u8 subhdr3; +} __attribute__((packed)) write_param_page; + +struct modesel_head +{ + __u8 reserved1; + __u8 medium; + __u8 reserved2; + __u8 block_desc_length; + __u8 density; + __u8 number_of_blocks_hi; + __u8 number_of_blocks_med; + __u8 number_of_blocks_lo; + __u8 reserved3; + __u8 block_length_hi; + __u8 block_length_med; + __u8 block_length_lo; +}; + +typedef struct { + __u16 report_key_length; + __u8 reserved1; + __u8 reserved2; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 type_code : 2; + __u8 vra : 3; + __u8 ucca : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ucca : 3; + __u8 vra : 3; + __u8 type_code : 2; +#endif + __u8 region_mask; + __u8 rpc_scheme; + __u8 reserved3; +} rpc_state_t; + +struct event_header { + __be16 data_len; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 nea : 1; + __u8 reserved1 : 4; + __u8 notification_class : 3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 notification_class : 3; + __u8 reserved1 : 4; + __u8 nea : 1; +#endif + __u8 supp_event_class; +}; + +struct media_event_desc { +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 reserved1 : 4; + __u8 media_event_code : 4; + __u8 reserved2 : 6; + __u8 media_present : 1; + __u8 door_open : 1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 media_event_code : 4; + __u8 reserved1 : 4; + __u8 door_open : 1; + __u8 media_present : 1; + __u8 reserved2 : 6; +#endif + __u8 start_slot; + __u8 end_slot; +}; + +extern int cdrom_get_media_event(struct cdrom_device_info *cdi, struct media_event_desc *med); + +static inline void lba_to_msf(int lba, u8 *m, u8 *s, u8 *f) +{ + lba += CD_MSF_OFFSET; + lba &= 0xffffff; /* negative lbas use only 24 bits */ + *m = lba / (CD_SECS * CD_FRAMES); + lba %= (CD_SECS * CD_FRAMES); + *s = lba / CD_FRAMES; + *f = lba % CD_FRAMES; +} + +static inline int msf_to_lba(u8 m, u8 s, u8 f) +{ + return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; +} +#endif /* End of kernel only stuff */ + +#endif /* _LINUX_CDROM_H */ diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h new file mode 100644 index 00000000..aa13392a --- /dev/null +++ b/include/linux/ceph/auth.h @@ -0,0 +1,92 @@ +#ifndef _FS_CEPH_AUTH_H +#define _FS_CEPH_AUTH_H + +#include <linux/ceph/types.h> +#include <linux/ceph/buffer.h> + +/* + * Abstract interface for communicating with the authenticate module. + * There is some handshake that takes place between us and the monitor + * to acquire the necessary keys. These are used to generate an + * 'authorizer' that we use when connecting to a service (mds, osd). + */ + +struct ceph_auth_client; +struct ceph_authorizer; + +struct ceph_auth_client_ops { + const char *name; + + /* + * true if we are authenticated and can connect to + * services. + */ + int (*is_authenticated)(struct ceph_auth_client *ac); + + /* + * true if we should (re)authenticate, e.g., when our tickets + * are getting old and crusty. + */ + int (*should_authenticate)(struct ceph_auth_client *ac); + + /* + * build requests and process replies during monitor + * handshake. if handle_reply returns -EAGAIN, we build + * another request. + */ + int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); + int (*handle_reply)(struct ceph_auth_client *ac, int result, + void *buf, void *end); + + /* + * Create authorizer for connecting to a service, and verify + * the response to authenticate the service. + */ + int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_authorizer **a, + void **buf, size_t *len, + void **reply_buf, size_t *reply_len); + int (*verify_authorizer_reply)(struct ceph_auth_client *ac, + struct ceph_authorizer *a, size_t len); + void (*destroy_authorizer)(struct ceph_auth_client *ac, + struct ceph_authorizer *a); + void (*invalidate_authorizer)(struct ceph_auth_client *ac, + int peer_type); + + /* reset when we (re)connect to a monitor */ + void (*reset)(struct ceph_auth_client *ac); + + void (*destroy)(struct ceph_auth_client *ac); +}; + +struct ceph_auth_client { + u32 protocol; /* CEPH_AUTH_* */ + void *private; /* for use by protocol implementation */ + const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */ + + bool negotiating; /* true if negotiating protocol */ + const char *name; /* entity name */ + u64 global_id; /* our unique id in system */ + const struct ceph_crypto_key *key; /* our secret key */ + unsigned want_keys; /* which services we want */ +}; + +extern struct ceph_auth_client *ceph_auth_init(const char *name, + const struct ceph_crypto_key *key); +extern void ceph_auth_destroy(struct ceph_auth_client *ac); + +extern void ceph_auth_reset(struct ceph_auth_client *ac); + +extern int ceph_auth_build_hello(struct ceph_auth_client *ac, + void *buf, size_t len); +extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, + void *buf, size_t len, + void *reply_buf, size_t reply_len); +extern int ceph_entity_name_encode(const char *name, void **p, void *end); + +extern int ceph_build_auth(struct ceph_auth_client *ac, + void *msg_buf, size_t msg_len); + +extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); + +#endif diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h new file mode 100644 index 00000000..58d19014 --- /dev/null +++ b/include/linux/ceph/buffer.h @@ -0,0 +1,39 @@ +#ifndef __FS_CEPH_BUFFER_H +#define __FS_CEPH_BUFFER_H + +#include <linux/kref.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/types.h> +#include <linux/uio.h> + +/* + * a simple reference counted buffer. + * + * use kmalloc for small sizes (<= one page), vmalloc for larger + * sizes. + */ +struct ceph_buffer { + struct kref kref; + struct kvec vec; + size_t alloc_len; + bool is_vmalloc; +}; + +extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); +extern void ceph_buffer_release(struct kref *kref); + +static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) +{ + kref_get(&b->kref); + return b; +} + +static inline void ceph_buffer_put(struct ceph_buffer *b) +{ + kref_put(&b->kref, ceph_buffer_release); +} + +extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); + +#endif diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h new file mode 100644 index 00000000..aa2e1918 --- /dev/null +++ b/include/linux/ceph/ceph_debug.h @@ -0,0 +1,38 @@ +#ifndef _FS_CEPH_DEBUG_H +#define _FS_CEPH_DEBUG_H + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG + +/* + * wrap pr_debug to include a filename:lineno prefix on each line. + * this incurs some overhead (kernel size and execution time) due to + * the extra function call at each call site. + */ + +# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +extern const char *ceph_file_part(const char *s, int len); +# define dout(fmt, ...) \ + pr_debug("%.*s %12.12s:%-4d : " fmt, \ + 8 - (int)sizeof(KBUILD_MODNAME), " ", \ + ceph_file_part(__FILE__, sizeof(__FILE__)), \ + __LINE__, ##__VA_ARGS__) +# else +/* faux printk call just to see any compiler warnings. */ +# define dout(fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) +# endif + +#else + +/* + * or, just wrap pr_debug + */ +# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) + +#endif + +#endif diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h new file mode 100644 index 00000000..5babb8e9 --- /dev/null +++ b/include/linux/ceph/ceph_frag.h @@ -0,0 +1,109 @@ +#ifndef FS_CEPH_FRAG_H +#define FS_CEPH_FRAG_H + +/* + * "Frags" are a way to describe a subset of a 32-bit number space, + * using a mask and a value to match against that mask. Any given frag + * (subset of the number space) can be partitioned into 2^n sub-frags. + * + * Frags are encoded into a 32-bit word: + * 8 upper bits = "bits" + * 24 lower bits = "value" + * (We could go to 5+27 bits, but who cares.) + * + * We use the _most_ significant bits of the 24 bit value. This makes + * values logically sort. + * + * Unfortunately, because the "bits" field is still in the high bits, we + * can't sort encoded frags numerically. However, it does allow you + * to feed encoded frags as values into frag_contains_value. + */ +static inline __u32 ceph_frag_make(__u32 b, __u32 v) +{ + return (b << 24) | + (v & (0xffffffu << (24-b)) & 0xffffffu); +} +static inline __u32 ceph_frag_bits(__u32 f) +{ + return f >> 24; +} +static inline __u32 ceph_frag_value(__u32 f) +{ + return f & 0xffffffu; +} +static inline __u32 ceph_frag_mask(__u32 f) +{ + return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu; +} +static inline __u32 ceph_frag_mask_shift(__u32 f) +{ + return 24 - ceph_frag_bits(f); +} + +static inline int ceph_frag_contains_value(__u32 f, __u32 v) +{ + return (v & ceph_frag_mask(f)) == ceph_frag_value(f); +} +static inline int ceph_frag_contains_frag(__u32 f, __u32 sub) +{ + /* is sub as specific as us, and contained by us? */ + return ceph_frag_bits(sub) >= ceph_frag_bits(f) && + (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f); +} + +static inline __u32 ceph_frag_parent(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f) - 1, + ceph_frag_value(f) & (ceph_frag_mask(f) << 1)); +} +static inline int ceph_frag_is_left_child(__u32 f) +{ + return ceph_frag_bits(f) > 0 && + (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0; +} +static inline int ceph_frag_is_right_child(__u32 f) +{ + return ceph_frag_bits(f) > 0 && + (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1; +} +static inline __u32 ceph_frag_sibling(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f), + ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f))); +} +static inline __u32 ceph_frag_left_child(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f)); +} +static inline __u32 ceph_frag_right_child(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f)+1, + ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f)))); +} +static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) +{ + int newbits = ceph_frag_bits(f) + by; + return ceph_frag_make(newbits, + ceph_frag_value(f) | (i << (24 - newbits))); +} +static inline int ceph_frag_is_leftmost(__u32 f) +{ + return ceph_frag_value(f) == 0; +} +static inline int ceph_frag_is_rightmost(__u32 f) +{ + return ceph_frag_value(f) == ceph_frag_mask(f); +} +static inline __u32 ceph_frag_next(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f), + ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f))); +} + +/* + * comparator to sort frags logically, as when traversing the + * number space in ascending order... + */ +int ceph_frag_compare(__u32 a, __u32 b); + +#endif diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h new file mode 100644 index 00000000..b8c60694 --- /dev/null +++ b/include/linux/ceph/ceph_fs.h @@ -0,0 +1,751 @@ +/* + * ceph_fs.h - Ceph constants and data types to share between kernel and + * user space. + * + * Most types in this file are defined as little-endian, and are + * primarily intended to describe data structures that pass over the + * wire or that are stored on disk. + * + * LGPL2 + */ + +#ifndef CEPH_FS_H +#define CEPH_FS_H + +#include "msgr.h" +#include "rados.h" + +/* + * subprotocol versions. when specific messages types or high-level + * protocols change, bump the affected components. we keep rev + * internal cluster protocols separately from the public, + * client-facing protocol. + */ +#define CEPH_OSD_PROTOCOL 8 /* cluster internal */ +#define CEPH_MDS_PROTOCOL 12 /* cluster internal */ +#define CEPH_MON_PROTOCOL 5 /* cluster internal */ +#define CEPH_OSDC_PROTOCOL 24 /* server/client */ +#define CEPH_MDSC_PROTOCOL 32 /* server/client */ +#define CEPH_MONC_PROTOCOL 15 /* server/client */ + + +#define CEPH_INO_ROOT 1 +#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ + +/* arbitrary limit on max # of monitors (cluster of 3 is typical) */ +#define CEPH_MAX_MON 31 + + +/* + * feature bits + */ +#define CEPH_FEATURE_UID (1<<0) +#define CEPH_FEATURE_NOSRCADDR (1<<1) +#define CEPH_FEATURE_MONCLOCKCHECK (1<<2) +#define CEPH_FEATURE_FLOCK (1<<3) +#define CEPH_FEATURE_SUBSCRIBE2 (1<<4) +#define CEPH_FEATURE_MONNAMES (1<<5) +#define CEPH_FEATURE_RECONNECT_SEQ (1<<6) +#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7) + + +/* + * ceph_file_layout - describe data layout for a file/inode + */ +struct ceph_file_layout { + /* file -> object mapping */ + __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple + of page size. */ + __le32 fl_stripe_count; /* over this many objects */ + __le32 fl_object_size; /* until objects are this big, then move to + new objects */ + __le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */ + + /* pg -> disk layout */ + __le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */ + + /* object -> pg layout */ + __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */ + __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ +} __attribute__ ((packed)); + +#define CEPH_MIN_STRIPE_UNIT 65536 + +int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); + +struct ceph_dir_layout { + __u8 dl_dir_hash; /* see ceph_hash.h for ids */ + __u8 dl_unused1; + __u16 dl_unused2; + __u32 dl_unused3; +} __attribute__ ((packed)); + +/* crypto algorithms */ +#define CEPH_CRYPTO_NONE 0x0 +#define CEPH_CRYPTO_AES 0x1 + +#define CEPH_AES_IV "cephsageyudagreg" + +/* security/authentication protocols */ +#define CEPH_AUTH_UNKNOWN 0x0 +#define CEPH_AUTH_NONE 0x1 +#define CEPH_AUTH_CEPHX 0x2 + +#define CEPH_AUTH_UID_DEFAULT ((__u64) -1) + + +/********************************************* + * message layer + */ + +/* + * message types + */ + +/* misc */ +#define CEPH_MSG_SHUTDOWN 1 +#define CEPH_MSG_PING 2 + +/* client <-> monitor */ +#define CEPH_MSG_MON_MAP 4 +#define CEPH_MSG_MON_GET_MAP 5 +#define CEPH_MSG_STATFS 13 +#define CEPH_MSG_STATFS_REPLY 14 +#define CEPH_MSG_MON_SUBSCRIBE 15 +#define CEPH_MSG_MON_SUBSCRIBE_ACK 16 +#define CEPH_MSG_AUTH 17 +#define CEPH_MSG_AUTH_REPLY 18 + +/* client <-> mds */ +#define CEPH_MSG_MDS_MAP 21 + +#define CEPH_MSG_CLIENT_SESSION 22 +#define CEPH_MSG_CLIENT_RECONNECT 23 + +#define CEPH_MSG_CLIENT_REQUEST 24 +#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 +#define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_CAPS 0x310 +#define CEPH_MSG_CLIENT_LEASE 0x311 +#define CEPH_MSG_CLIENT_SNAP 0x312 +#define CEPH_MSG_CLIENT_CAPRELEASE 0x313 + +/* pool ops */ +#define CEPH_MSG_POOLOP_REPLY 48 +#define CEPH_MSG_POOLOP 49 + + +/* osd */ +#define CEPH_MSG_OSD_MAP 41 +#define CEPH_MSG_OSD_OP 42 +#define CEPH_MSG_OSD_OPREPLY 43 +#define CEPH_MSG_WATCH_NOTIFY 44 + + +/* watch-notify operations */ +enum { + WATCH_NOTIFY = 1, /* notifying watcher */ + WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */ +}; + + +/* pool operations */ +enum { + POOL_OP_CREATE = 0x01, + POOL_OP_DELETE = 0x02, + POOL_OP_AUID_CHANGE = 0x03, + POOL_OP_CREATE_SNAP = 0x11, + POOL_OP_DELETE_SNAP = 0x12, + POOL_OP_CREATE_UNMANAGED_SNAP = 0x21, + POOL_OP_DELETE_UNMANAGED_SNAP = 0x22, +}; + +struct ceph_mon_request_header { + __le64 have_version; + __le16 session_mon; + __le64 session_mon_tid; +} __attribute__ ((packed)); + +struct ceph_mon_statfs { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +struct ceph_statfs { + __le64 kb, kb_used, kb_avail; + __le64 num_objects; +} __attribute__ ((packed)); + +struct ceph_mon_statfs_reply { + struct ceph_fsid fsid; + __le64 version; + struct ceph_statfs st; +} __attribute__ ((packed)); + +const char *ceph_pool_op_name(int op); + +struct ceph_mon_poolop { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 pool; + __le32 op; + __le64 auid; + __le64 snapid; + __le32 name_len; +} __attribute__ ((packed)); + +struct ceph_mon_poolop_reply { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 reply_code; + __le32 epoch; + char has_data; + char data[0]; +} __attribute__ ((packed)); + +struct ceph_mon_unmanaged_snap { + __le64 snapid; +} __attribute__ ((packed)); + +struct ceph_osd_getmap { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; + __le32 start; +} __attribute__ ((packed)); + +struct ceph_mds_getmap { + struct ceph_mon_request_header monhdr; + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +struct ceph_client_mount { + struct ceph_mon_request_header monhdr; +} __attribute__ ((packed)); + +#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */ + +struct ceph_mon_subscribe_item { + __le64 have_version; __le64 have; + __u8 onetime; +} __attribute__ ((packed)); + +struct ceph_mon_subscribe_ack { + __le32 duration; /* seconds */ + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +/* + * mds states + * > 0 -> in + * <= 0 -> out + */ +#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */ +#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees. + empty log. */ +#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */ +#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */ +#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */ +#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */ +#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */ + +#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */ +#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed + operations (import, rename, etc.) */ +#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */ +#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */ +#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */ +#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */ +#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */ + +extern const char *ceph_mds_state_name(int s); + + +/* + * metadata lock types. + * - these are bitmasks.. we can compose them + * - they also define the lock ordering by the MDS + * - a few of these are internal to the mds + */ +#define CEPH_LOCK_DVERSION 1 +#define CEPH_LOCK_DN 2 +#define CEPH_LOCK_ISNAP 16 +#define CEPH_LOCK_IVERSION 32 /* mds internal */ +#define CEPH_LOCK_IFILE 64 +#define CEPH_LOCK_IAUTH 128 +#define CEPH_LOCK_ILINK 256 +#define CEPH_LOCK_IDFT 512 /* dir frag tree */ +#define CEPH_LOCK_INEST 1024 /* mds internal */ +#define CEPH_LOCK_IXATTR 2048 +#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */ +#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */ + +/* client_session ops */ +enum { + CEPH_SESSION_REQUEST_OPEN, + CEPH_SESSION_OPEN, + CEPH_SESSION_REQUEST_CLOSE, + CEPH_SESSION_CLOSE, + CEPH_SESSION_REQUEST_RENEWCAPS, + CEPH_SESSION_RENEWCAPS, + CEPH_SESSION_STALE, + CEPH_SESSION_RECALL_STATE, +}; + +extern const char *ceph_session_op_name(int op); + +struct ceph_mds_session_head { + __le32 op; + __le64 seq; + struct ceph_timespec stamp; + __le32 max_caps, max_leases; +} __attribute__ ((packed)); + +/* client_request */ +/* + * metadata ops. + * & 0x001000 -> write op + * & 0x010000 -> follow symlink (e.g. stat(), not lstat()). + & & 0x100000 -> use weird ino/path trace + */ +#define CEPH_MDS_OP_WRITE 0x001000 +enum { + CEPH_MDS_OP_LOOKUP = 0x00100, + CEPH_MDS_OP_GETATTR = 0x00101, + CEPH_MDS_OP_LOOKUPHASH = 0x00102, + CEPH_MDS_OP_LOOKUPPARENT = 0x00103, + CEPH_MDS_OP_LOOKUPINO = 0x00104, + + CEPH_MDS_OP_SETXATTR = 0x01105, + CEPH_MDS_OP_RMXATTR = 0x01106, + CEPH_MDS_OP_SETLAYOUT = 0x01107, + CEPH_MDS_OP_SETATTR = 0x01108, + CEPH_MDS_OP_SETFILELOCK= 0x01109, + CEPH_MDS_OP_GETFILELOCK= 0x00110, + CEPH_MDS_OP_SETDIRLAYOUT=0x0110a, + + CEPH_MDS_OP_MKNOD = 0x01201, + CEPH_MDS_OP_LINK = 0x01202, + CEPH_MDS_OP_UNLINK = 0x01203, + CEPH_MDS_OP_RENAME = 0x01204, + CEPH_MDS_OP_MKDIR = 0x01220, + CEPH_MDS_OP_RMDIR = 0x01221, + CEPH_MDS_OP_SYMLINK = 0x01222, + + CEPH_MDS_OP_CREATE = 0x01301, + CEPH_MDS_OP_OPEN = 0x00302, + CEPH_MDS_OP_READDIR = 0x00305, + + CEPH_MDS_OP_LOOKUPSNAP = 0x00400, + CEPH_MDS_OP_MKSNAP = 0x01400, + CEPH_MDS_OP_RMSNAP = 0x01401, + CEPH_MDS_OP_LSSNAP = 0x00402, +}; + +extern const char *ceph_mds_op_name(int op); + + +#define CEPH_SETATTR_MODE 1 +#define CEPH_SETATTR_UID 2 +#define CEPH_SETATTR_GID 4 +#define CEPH_SETATTR_MTIME 8 +#define CEPH_SETATTR_ATIME 16 +#define CEPH_SETATTR_SIZE 32 +#define CEPH_SETATTR_CTIME 64 + +union ceph_mds_request_args { + struct { + __le32 mask; /* CEPH_CAP_* */ + } __attribute__ ((packed)) getattr; + struct { + __le32 mode; + __le32 uid; + __le32 gid; + struct ceph_timespec mtime; + struct ceph_timespec atime; + __le64 size, old_size; /* old_size needed by truncate */ + __le32 mask; /* CEPH_SETATTR_* */ + } __attribute__ ((packed)) setattr; + struct { + __le32 frag; /* which dir fragment */ + __le32 max_entries; /* how many dentries to grab */ + __le32 max_bytes; + } __attribute__ ((packed)) readdir; + struct { + __le32 mode; + __le32 rdev; + } __attribute__ ((packed)) mknod; + struct { + __le32 mode; + } __attribute__ ((packed)) mkdir; + struct { + __le32 flags; + __le32 mode; + __le32 stripe_unit; /* layout for newly created file */ + __le32 stripe_count; /* ... */ + __le32 object_size; + __le32 file_replication; + __le32 preferred; + } __attribute__ ((packed)) open; + struct { + __le32 flags; + } __attribute__ ((packed)) setxattr; + struct { + struct ceph_file_layout layout; + } __attribute__ ((packed)) setlayout; + struct { + __u8 rule; /* currently fcntl or flock */ + __u8 type; /* shared, exclusive, remove*/ + __le64 pid; /* process id requesting the lock */ + __le64 pid_namespace; + __le64 start; /* initial location to lock */ + __le64 length; /* num bytes to lock from start */ + __u8 wait; /* will caller wait for lock to become available? */ + } __attribute__ ((packed)) filelock_change; +} __attribute__ ((packed)); + +#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ +#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ + +struct ceph_mds_request_head { + __le64 oldest_client_tid; + __le32 mdsmap_epoch; /* on client */ + __le32 flags; /* CEPH_MDS_FLAG_* */ + __u8 num_retry, num_fwd; /* count retry, fwd attempts */ + __le16 num_releases; /* # include cap/lease release records */ + __le32 op; /* mds op code */ + __le32 caller_uid, caller_gid; + __le64 ino; /* use this ino for openc, mkdir, mknod, + etc. (if replaying) */ + union ceph_mds_request_args args; +} __attribute__ ((packed)); + +/* cap/lease release record */ +struct ceph_mds_request_release { + __le64 ino, cap_id; /* ino and unique cap id */ + __le32 caps, wanted; /* new issued, wanted */ + __le32 seq, issue_seq, mseq; + __le32 dname_seq; /* if releasing a dentry lease, a */ + __le32 dname_len; /* string follows. */ +} __attribute__ ((packed)); + +/* client reply */ +struct ceph_mds_reply_head { + __le32 op; + __le32 result; + __le32 mdsmap_epoch; + __u8 safe; /* true if committed to disk */ + __u8 is_dentry, is_target; /* true if dentry, target inode records + are included with reply */ +} __attribute__ ((packed)); + +/* one for each node split */ +struct ceph_frag_tree_split { + __le32 frag; /* this frag splits... */ + __le32 by; /* ...by this many bits */ +} __attribute__ ((packed)); + +struct ceph_frag_tree_head { + __le32 nsplits; /* num ceph_frag_tree_split records */ + struct ceph_frag_tree_split splits[]; +} __attribute__ ((packed)); + +/* capability issue, for bundling with mds reply */ +struct ceph_mds_reply_cap { + __le32 caps, wanted; /* caps issued, wanted */ + __le64 cap_id; + __le32 seq, mseq; + __le64 realm; /* snap realm */ + __u8 flags; /* CEPH_CAP_FLAG_* */ +} __attribute__ ((packed)); + +#define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */ + +/* inode record, for bundling with mds reply */ +struct ceph_mds_reply_inode { + __le64 ino; + __le64 snapid; + __le32 rdev; + __le64 version; /* inode version */ + __le64 xattr_version; /* version for xattr blob */ + struct ceph_mds_reply_cap cap; /* caps issued for this inode */ + struct ceph_file_layout layout; + struct ceph_timespec ctime, mtime, atime; + __le32 time_warp_seq; + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + __le32 mode, uid, gid; + __le32 nlink; + __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */ + struct ceph_timespec rctime; + struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ +} __attribute__ ((packed)); +/* followed by frag array, symlink string, dir layout, xattr blob */ + +/* reply_lease follows dname, and reply_inode */ +struct ceph_mds_reply_lease { + __le16 mask; /* lease type(s) */ + __le32 duration_ms; /* lease duration */ + __le32 seq; +} __attribute__ ((packed)); + +struct ceph_mds_reply_dirfrag { + __le32 frag; /* fragment */ + __le32 auth; /* auth mds, if this is a delegation point */ + __le32 ndist; /* number of mds' this is replicated on */ + __le32 dist[]; +} __attribute__ ((packed)); + +#define CEPH_LOCK_FCNTL 1 +#define CEPH_LOCK_FLOCK 2 + +#define CEPH_LOCK_SHARED 1 +#define CEPH_LOCK_EXCL 2 +#define CEPH_LOCK_UNLOCK 4 + +struct ceph_filelock { + __le64 start;/* file offset to start lock at */ + __le64 length; /* num bytes to lock; 0 for all following start */ + __le64 client; /* which client holds the lock */ + __le64 pid; /* process id holding the lock on the client */ + __le64 pid_namespace; + __u8 type; /* shared lock, exclusive lock, or unlock */ +} __attribute__ ((packed)); + + +/* file access modes */ +#define CEPH_FILE_MODE_PIN 0 +#define CEPH_FILE_MODE_RD 1 +#define CEPH_FILE_MODE_WR 2 +#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ +#define CEPH_FILE_MODE_LAZY 4 /* lazy io */ +#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */ + +int ceph_flags_to_mode(int flags); + + +/* capability bits */ +#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ + +/* generic cap bits */ +#define CEPH_CAP_GSHARED 1 /* client can reads */ +#define CEPH_CAP_GEXCL 2 /* client can read and update */ +#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */ +#define CEPH_CAP_GRD 8 /* (file) client can read */ +#define CEPH_CAP_GWR 16 /* (file) client can write */ +#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */ +#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */ +#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */ + +/* per-lock shift */ +#define CEPH_CAP_SAUTH 2 +#define CEPH_CAP_SLINK 4 +#define CEPH_CAP_SXATTR 6 +#define CEPH_CAP_SFILE 8 +#define CEPH_CAP_SFLOCK 20 + +#define CEPH_CAP_BITS 22 + +/* composed values */ +#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH) +#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH) +#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK) +#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK) +#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR) +#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR) +#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE) +#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK) +#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK) + + +/* cap masks (for getattr) */ +#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN +#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */ +#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN +#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED +#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */ +#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED +#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \ + CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_XATTR_SHARED) + +#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_XATTR_SHARED | \ + CEPH_CAP_FILE_SHARED) +#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \ + CEPH_CAP_FILE_CACHE) + +#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \ + CEPH_CAP_LINK_EXCL | \ + CEPH_CAP_XATTR_EXCL | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) +#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ + CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \ + CEPH_CAP_PIN) + +#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ + CEPH_LOCK_IXATTR) + +int ceph_caps_for_mode(int mode); + +enum { + CEPH_CAP_OP_GRANT, /* mds->client grant */ + CEPH_CAP_OP_REVOKE, /* mds->client revoke */ + CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */ + CEPH_CAP_OP_EXPORT, /* mds has exported the cap */ + CEPH_CAP_OP_IMPORT, /* mds has imported the cap */ + CEPH_CAP_OP_UPDATE, /* client->mds update */ + CEPH_CAP_OP_DROP, /* client->mds drop cap bits */ + CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */ + CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */ + CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */ + CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */ + CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */ + CEPH_CAP_OP_RENEW, /* client->mds renewal request */ +}; + +extern const char *ceph_cap_op_name(int op); + +/* + * caps message, used for capability callbacks, acks, requests, etc. + */ +struct ceph_mds_caps { + __le32 op; /* CEPH_CAP_OP_* */ + __le64 ino, realm; + __le64 cap_id; + __le32 seq, issue_seq; + __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */ + __le32 migrate_seq; + __le64 snap_follows; + __le32 snap_trace_len; + + /* authlock */ + __le32 uid, gid, mode; + + /* linklock */ + __le32 nlink; + + /* xattrlock */ + __le32 xattr_len; + __le64 xattr_version; + + /* filelock */ + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + struct ceph_timespec mtime, atime, ctime; + struct ceph_file_layout layout; + __le32 time_warp_seq; +} __attribute__ ((packed)); + +/* cap release msg head */ +struct ceph_mds_cap_release { + __le32 num; /* number of cap_items that follow */ +} __attribute__ ((packed)); + +struct ceph_mds_cap_item { + __le64 ino; + __le64 cap_id; + __le32 migrate_seq, seq; +} __attribute__ ((packed)); + +#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ +#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */ +#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */ +#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */ + +extern const char *ceph_lease_op_name(int o); + +/* lease msg header */ +struct ceph_mds_lease { + __u8 action; /* CEPH_MDS_LEASE_* */ + __le16 mask; /* which lease */ + __le64 ino; + __le64 first, last; /* snap range */ + __le32 seq; + __le32 duration_ms; /* duration of renewal */ +} __attribute__ ((packed)); +/* followed by a __le32+string for dname */ + +/* client reconnect */ +struct ceph_mds_cap_reconnect { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ + __le32 flock_len; /* size of flock state blob, if any */ +} __attribute__ ((packed)); +/* followed by flock blob */ + +struct ceph_mds_cap_reconnect_v1 { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 size; + struct ceph_timespec mtime, atime; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ +} __attribute__ ((packed)); + +struct ceph_mds_snaprealm_reconnect { + __le64 ino; /* snap realm base */ + __le64 seq; /* snap seq for this snap realm */ + __le64 parent; /* parent realm */ +} __attribute__ ((packed)); + +/* + * snaps + */ +enum { + CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */ + CEPH_SNAP_OP_CREATE, + CEPH_SNAP_OP_DESTROY, + CEPH_SNAP_OP_SPLIT, +}; + +extern const char *ceph_snap_op_name(int o); + +/* snap msg header */ +struct ceph_mds_snap_head { + __le32 op; /* CEPH_SNAP_OP_* */ + __le64 split; /* ino to split off, if any */ + __le32 num_split_inos; /* # inos belonging to new child realm */ + __le32 num_split_realms; /* # child realms udner new child realm */ + __le32 trace_len; /* size of snap trace blob */ +} __attribute__ ((packed)); +/* followed by split ino list, then split realms, then the trace blob */ + +/* + * encode info about a snaprealm, as viewed by a client + */ +struct ceph_mds_snap_realm { + __le64 ino; /* ino */ + __le64 created; /* snap: when created */ + __le64 parent; /* ino: parent realm */ + __le64 parent_since; /* snap: same parent since */ + __le64 seq; /* snap: version */ + __le32 num_snaps; + __le32 num_prior_parent_snaps; +} __attribute__ ((packed)); +/* followed by my snap list, then prior parent snap list */ + +#endif diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h new file mode 100644 index 00000000..d099c3f9 --- /dev/null +++ b/include/linux/ceph/ceph_hash.h @@ -0,0 +1,13 @@ +#ifndef FS_CEPH_HASH_H +#define FS_CEPH_HASH_H + +#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */ +#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */ + +extern unsigned ceph_str_hash_linux(const char *s, unsigned len); +extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len); + +extern unsigned ceph_str_hash(int type, const char *s, unsigned len); +extern const char *ceph_str_hash_name(int type); + +#endif diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h new file mode 100644 index 00000000..2a79702e --- /dev/null +++ b/include/linux/ceph/debugfs.h @@ -0,0 +1,33 @@ +#ifndef _FS_CEPH_DEBUGFS_H +#define _FS_CEPH_DEBUGFS_H + +#include "ceph_debug.h" +#include "types.h" + +#define CEPH_DEFINE_SHOW_FUNC(name) \ +static int name##_open(struct inode *inode, struct file *file) \ +{ \ + struct seq_file *sf; \ + int ret; \ + \ + ret = single_open(file, name, NULL); \ + sf = file->private_data; \ + sf->private = inode->i_private; \ + return ret; \ +} \ + \ +static const struct file_operations name##_fops = { \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + +/* debugfs.c */ +extern int ceph_debugfs_init(void); +extern void ceph_debugfs_cleanup(void); +extern int ceph_debugfs_client_init(struct ceph_client *client); +extern void ceph_debugfs_client_cleanup(struct ceph_client *client); + +#endif + diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h new file mode 100644 index 00000000..220ae21e --- /dev/null +++ b/include/linux/ceph/decode.h @@ -0,0 +1,202 @@ +#ifndef __CEPH_DECODE_H +#define __CEPH_DECODE_H + +#include <linux/bug.h> +#include <linux/time.h> +#include <asm/unaligned.h> + +#include "types.h" + +/* + * in all cases, + * void **p pointer to position pointer + * void *end pointer to end of buffer (last byte + 1) + */ + +static inline u64 ceph_decode_64(void **p) +{ + u64 v = get_unaligned_le64(*p); + *p += sizeof(u64); + return v; +} +static inline u32 ceph_decode_32(void **p) +{ + u32 v = get_unaligned_le32(*p); + *p += sizeof(u32); + return v; +} +static inline u16 ceph_decode_16(void **p) +{ + u16 v = get_unaligned_le16(*p); + *p += sizeof(u16); + return v; +} +static inline u8 ceph_decode_8(void **p) +{ + u8 v = *(u8 *)*p; + (*p)++; + return v; +} +static inline void ceph_decode_copy(void **p, void *pv, size_t n) +{ + memcpy(pv, *p, n); + *p += n; +} + +/* + * bounds check input. + */ +#define ceph_decode_need(p, end, n, bad) \ + do { \ + if (unlikely(*(p) + (n) > (end))) \ + goto bad; \ + } while (0) + +#define ceph_decode_64_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u64), bad); \ + v = ceph_decode_64(p); \ + } while (0) +#define ceph_decode_32_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u32), bad); \ + v = ceph_decode_32(p); \ + } while (0) +#define ceph_decode_16_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u16), bad); \ + v = ceph_decode_16(p); \ + } while (0) +#define ceph_decode_8_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u8), bad); \ + v = ceph_decode_8(p); \ + } while (0) + +#define ceph_decode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_decode_need(p, end, n, bad); \ + ceph_decode_copy(p, pv, n); \ + } while (0) + +/* + * struct ceph_timespec <-> struct timespec + */ +static inline void ceph_decode_timespec(struct timespec *ts, + const struct ceph_timespec *tv) +{ + ts->tv_sec = le32_to_cpu(tv->tv_sec); + ts->tv_nsec = le32_to_cpu(tv->tv_nsec); +} +static inline void ceph_encode_timespec(struct ceph_timespec *tv, + const struct timespec *ts) +{ + tv->tv_sec = cpu_to_le32(ts->tv_sec); + tv->tv_nsec = cpu_to_le32(ts->tv_nsec); +} + +/* + * sockaddr_storage <-> ceph_sockaddr + */ +static inline void ceph_encode_addr(struct ceph_entity_addr *a) +{ + __be16 ss_family = htons(a->in_addr.ss_family); + a->in_addr.ss_family = *(__u16 *)&ss_family; +} +static inline void ceph_decode_addr(struct ceph_entity_addr *a) +{ + __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; + a->in_addr.ss_family = ntohs(ss_family); + WARN_ON(a->in_addr.ss_family == 512); +} + +/* + * encoders + */ +static inline void ceph_encode_64(void **p, u64 v) +{ + put_unaligned_le64(v, (__le64 *)*p); + *p += sizeof(u64); +} +static inline void ceph_encode_32(void **p, u32 v) +{ + put_unaligned_le32(v, (__le32 *)*p); + *p += sizeof(u32); +} +static inline void ceph_encode_16(void **p, u16 v) +{ + put_unaligned_le16(v, (__le16 *)*p); + *p += sizeof(u16); +} +static inline void ceph_encode_8(void **p, u8 v) +{ + *(u8 *)*p = v; + (*p)++; +} +static inline void ceph_encode_copy(void **p, const void *s, int len) +{ + memcpy(*p, s, len); + *p += len; +} + +/* + * filepath, string encoders + */ +static inline void ceph_encode_filepath(void **p, void *end, + u64 ino, const char *path) +{ + u32 len = path ? strlen(path) : 0; + BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end); + ceph_encode_8(p, 1); + ceph_encode_64(p, ino); + ceph_encode_32(p, len); + if (len) + memcpy(*p, path, len); + *p += len; +} + +static inline void ceph_encode_string(void **p, void *end, + const char *s, u32 len) +{ + BUG_ON(*p + sizeof(len) + len > end); + ceph_encode_32(p, len); + if (len) + memcpy(*p, s, len); + *p += len; +} + +#define ceph_encode_need(p, end, n, bad) \ + do { \ + if (unlikely(*(p) + (n) > (end))) \ + goto bad; \ + } while (0) + +#define ceph_encode_64_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u64), bad); \ + ceph_encode_64(p, v); \ + } while (0) +#define ceph_encode_32_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u32), bad); \ + ceph_encode_32(p, v); \ + } while (0) +#define ceph_encode_16_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u16), bad); \ + ceph_encode_16(p, v); \ + } while (0) + +#define ceph_encode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_copy(p, pv, n); \ + } while (0) +#define ceph_encode_string_safe(p, end, s, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_string(p, end, s, n); \ + } while (0) + + +#endif diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h new file mode 100644 index 00000000..e71d6839 --- /dev/null +++ b/include/linux/ceph/libceph.h @@ -0,0 +1,252 @@ +#ifndef _FS_CEPH_LIBCEPH_H +#define _FS_CEPH_LIBCEPH_H + +#include "ceph_debug.h" + +#include <asm/unaligned.h> +#include <linux/backing-dev.h> +#include <linux/completion.h> +#include <linux/exportfs.h> +#include <linux/bug.h> +#include <linux/fs.h> +#include <linux/mempool.h> +#include <linux/pagemap.h> +#include <linux/wait.h> +#include <linux/writeback.h> +#include <linux/slab.h> + +#include "types.h" +#include "messenger.h" +#include "msgpool.h" +#include "mon_client.h" +#include "osd_client.h" +#include "ceph_fs.h" + +/* + * Supported features + */ +#define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR +#define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR + +/* + * mount options + */ +#define CEPH_OPT_FSID (1<<0) +#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ +#define CEPH_OPT_MYIP (1<<2) /* specified my ip */ +#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ + +#define CEPH_OPT_DEFAULT (0) + +#define ceph_set_opt(client, opt) \ + (client)->options->flags |= CEPH_OPT_##opt; +#define ceph_test_opt(client, opt) \ + (!!((client)->options->flags & CEPH_OPT_##opt)) + +struct ceph_options { + int flags; + struct ceph_fsid fsid; + struct ceph_entity_addr my_addr; + int mount_timeout; + int osd_idle_ttl; + int osd_timeout; + int osd_keepalive_timeout; + + /* + * any type that can't be simply compared or doesn't need need + * to be compared should go beyond this point, + * ceph_compare_options() should be updated accordingly + */ + + struct ceph_entity_addr *mon_addr; /* should be the first + pointer type of args */ + int num_mon; + char *name; + struct ceph_crypto_key *key; +}; + +/* + * defaults + */ +#define CEPH_MOUNT_TIMEOUT_DEFAULT 60 +#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ +#define CEPH_OSD_KEEPALIVE_DEFAULT 5 +#define CEPH_OSD_IDLE_TTL_DEFAULT 60 + +#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) +#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) + +#define CEPH_AUTH_NAME_DEFAULT "guest" + +/* + * Delay telling the MDS we no longer want caps, in case we reopen + * the file. Delay a minimum amount of time, even if we send a cap + * message for some other reason. Otherwise, take the oppotunity to + * update the mds to avoid sending another message later. + */ +#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ +#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ + +#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4) + +/* mount state */ +enum { + CEPH_MOUNT_MOUNTING, + CEPH_MOUNT_MOUNTED, + CEPH_MOUNT_UNMOUNTING, + CEPH_MOUNT_UNMOUNTED, + CEPH_MOUNT_SHUTDOWN, +}; + +/* + * subtract jiffies + */ +static inline unsigned long time_sub(unsigned long a, unsigned long b) +{ + BUG_ON(time_after(b, a)); + return (long)a - (long)b; +} + +struct ceph_mds_client; + +/* + * per client state + * + * possibly shared by multiple mount points, if they are + * mounting the same ceph filesystem/cluster. + */ +struct ceph_client { + struct ceph_fsid fsid; + bool have_fsid; + + void *private; + + struct ceph_options *options; + + struct mutex mount_mutex; /* serialize mount attempts */ + wait_queue_head_t auth_wq; + int auth_err; + + int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); + + u32 supported_features; + u32 required_features; + + struct ceph_messenger *msgr; /* messenger instance */ + struct ceph_mon_client monc; + struct ceph_osd_client osdc; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *debugfs_monmap; + struct dentry *debugfs_osdmap; +#endif +}; + + + +/* + * snapshots + */ + +/* + * A "snap context" is the set of existing snapshots when we + * write data. It is used by the OSD to guide its COW behavior. + * + * The ceph_snap_context is refcounted, and attached to each dirty + * page, indicating which context the dirty data belonged when it was + * dirtied. + */ +struct ceph_snap_context { + atomic_t nref; + u64 seq; + int num_snaps; + u64 snaps[]; +}; + +static inline struct ceph_snap_context * +ceph_get_snap_context(struct ceph_snap_context *sc) +{ + /* + printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), + atomic_read(&sc->nref)+1); + */ + if (sc) + atomic_inc(&sc->nref); + return sc; +} + +static inline void ceph_put_snap_context(struct ceph_snap_context *sc) +{ + if (!sc) + return; + /* + printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), + atomic_read(&sc->nref)-1); + */ + if (atomic_dec_and_test(&sc->nref)) { + /*printk(" deleting snap_context %p\n", sc);*/ + kfree(sc); + } +} + +/* + * calculate the number of pages a given length and offset map onto, + * if we align the data. + */ +static inline int calc_pages_for(u64 off, u64 len) +{ + return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - + (off >> PAGE_CACHE_SHIFT); +} + +/* ceph_common.c */ +extern const char *ceph_msg_type_name(int type); +extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); +extern struct kmem_cache *ceph_inode_cachep; +extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_dentry_cachep; +extern struct kmem_cache *ceph_file_cachep; + +extern struct ceph_options *ceph_parse_options(char *options, + const char *dev_name, const char *dev_name_end, + int (*parse_extra_token)(char *c, void *private), + void *private); +extern void ceph_destroy_options(struct ceph_options *opt); +extern int ceph_compare_options(struct ceph_options *new_opt, + struct ceph_client *client); +extern struct ceph_client *ceph_create_client(struct ceph_options *opt, + void *private, + unsigned supported_features, + unsigned required_features); +extern u64 ceph_client_id(struct ceph_client *client); +extern void ceph_destroy_client(struct ceph_client *client); +extern int __ceph_open_session(struct ceph_client *client, + unsigned long started); +extern int ceph_open_session(struct ceph_client *client); + +/* pagevec.c */ +extern void ceph_release_page_vector(struct page **pages, int num_pages); + +extern struct page **ceph_get_direct_page_vector(const char __user *data, + int num_pages, + bool write_page); +extern void ceph_put_page_vector(struct page **pages, int num_pages, + bool dirty); +extern void ceph_release_page_vector(struct page **pages, int num_pages); +extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); +extern int ceph_copy_user_to_page_vector(struct page **pages, + const char __user *data, + loff_t off, size_t len); +extern int ceph_copy_to_page_vector(struct page **pages, + const char *data, + loff_t off, size_t len); +extern int ceph_copy_from_page_vector(struct page **pages, + char *data, + loff_t off, size_t len); +extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data, + loff_t off, size_t len); +extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); + + +#endif /* _FS_CEPH_SUPER_H */ diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h new file mode 100644 index 00000000..9935fac8 --- /dev/null +++ b/include/linux/ceph/mdsmap.h @@ -0,0 +1,63 @@ +#ifndef _FS_CEPH_MDSMAP_H +#define _FS_CEPH_MDSMAP_H + +#include <linux/bug.h> +#include "types.h" + +/* + * mds map - describe servers in the mds cluster. + * + * we limit fields to those the client actually xcares about + */ +struct ceph_mds_info { + u64 global_id; + struct ceph_entity_addr addr; + s32 state; + int num_export_targets; + bool laggy; + u32 *export_targets; +}; + +struct ceph_mdsmap { + u32 m_epoch, m_client_epoch, m_last_failure; + u32 m_root; + u32 m_session_timeout; /* seconds */ + u32 m_session_autoclose; /* seconds */ + u64 m_max_file_size; + u32 m_max_mds; /* size of m_addr, m_state arrays */ + struct ceph_mds_info *m_info; + + /* which object pools file data can be stored in */ + int m_num_data_pg_pools; + u32 *m_data_pg_pools; + u32 m_cas_pg_pool; +}; + +static inline struct ceph_entity_addr * +ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) +{ + if (w >= m->m_max_mds) + return NULL; + return &m->m_info[w].addr; +} + +static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) +{ + BUG_ON(w < 0); + if (w >= m->m_max_mds) + return CEPH_MDS_STATE_DNE; + return m->m_info[w].state; +} + +static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) +{ + if (w >= 0 && w < m->m_max_mds) + return m->m_info[w].laggy; + return false; +} + +extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); +extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); +extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); + +#endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h new file mode 100644 index 00000000..3bff047f --- /dev/null +++ b/include/linux/ceph/messenger.h @@ -0,0 +1,255 @@ +#ifndef __FS_CEPH_MESSENGER_H +#define __FS_CEPH_MESSENGER_H + +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/net.h> +#include <linux/radix-tree.h> +#include <linux/uio.h> +#include <linux/workqueue.h> + +#include "types.h" +#include "buffer.h" + +struct ceph_msg; +struct ceph_connection; + +/* + * Ceph defines these callbacks for handling connection events. + */ +struct ceph_connection_operations { + struct ceph_connection *(*get)(struct ceph_connection *); + void (*put)(struct ceph_connection *); + + /* handle an incoming message. */ + void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); + + /* authorize an outgoing connection */ + int (*get_authorizer) (struct ceph_connection *con, + void **buf, int *len, int *proto, + void **reply_buf, int *reply_len, int force_new); + int (*verify_authorizer_reply) (struct ceph_connection *con, int len); + int (*invalidate_authorizer)(struct ceph_connection *con); + + /* protocol version mismatch */ + void (*bad_proto) (struct ceph_connection *con); + + /* there was some error on the socket (disconnect, whatever) */ + void (*fault) (struct ceph_connection *con); + + /* a remote host as terminated a message exchange session, and messages + * we sent (or they tried to send us) may be lost. */ + void (*peer_reset) (struct ceph_connection *con); + + struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, + struct ceph_msg_header *hdr, + int *skip); +}; + +/* use format string %s%d */ +#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) + +struct ceph_messenger { + struct ceph_entity_inst inst; /* my name+address */ + struct ceph_entity_addr my_enc_addr; + + bool nocrc; + + /* + * the global_seq counts connections i (attempt to) initiate + * in order to disambiguate certain connect race conditions. + */ + u32 global_seq; + spinlock_t global_seq_lock; + + u32 supported_features; + u32 required_features; +}; + +/* + * a single message. it contains a header (src, dest, message type, etc.), + * footer (crc values, mainly), a "front" message body, and possibly a + * data payload (stored in some number of pages). + */ +struct ceph_msg { + struct ceph_msg_header hdr; /* header */ + struct ceph_msg_footer footer; /* footer */ + struct kvec front; /* unaligned blobs of message */ + struct ceph_buffer *middle; + struct page **pages; /* data payload. NOT OWNER. */ + unsigned nr_pages; /* size of page array */ + unsigned page_alignment; /* io offset in first page */ + struct ceph_pagelist *pagelist; /* instead of pages */ + struct list_head list_head; + struct kref kref; + struct bio *bio; /* instead of pages/pagelist */ + struct bio *bio_iter; /* bio iterator */ + int bio_seg; /* current bio segment */ + struct ceph_pagelist *trail; /* the trailing part of the data */ + bool front_is_vmalloc; + bool more_to_follow; + bool needs_out_seq; + int front_max; + unsigned long ack_stamp; /* tx: when we were acked */ + + struct ceph_msgpool *pool; +}; + +struct ceph_msg_pos { + int page, page_pos; /* which page; offset in page */ + int data_pos; /* offset in data payload */ + bool did_page_crc; /* true if we've calculated crc for current page */ +}; + +/* ceph connection fault delay defaults, for exponential backoff */ +#define BASE_DELAY_INTERVAL (HZ/2) +#define MAX_DELAY_INTERVAL (5 * 60 * HZ) + +/* + * ceph_connection state bit flags + */ +#define LOSSYTX 0 /* we can close channel or drop messages on errors */ +#define CONNECTING 1 +#define NEGOTIATING 2 +#define KEEPALIVE_PENDING 3 +#define WRITE_PENDING 4 /* we have data ready to send */ +#define STANDBY 8 /* no outgoing messages, socket closed. we keep + * the ceph_connection around to maintain shared + * state with the peer. */ +#define CLOSED 10 /* we've closed the connection */ +#define SOCK_CLOSED 11 /* socket state changed to closed */ +#define OPENING 13 /* open connection w/ (possibly new) peer */ +#define DEAD 14 /* dead, about to kfree */ +#define BACKOFF 15 + +/* + * A single connection with another host. + * + * We maintain a queue of outgoing messages, and some session state to + * ensure that we can preserve the lossless, ordered delivery of + * messages in the case of a TCP disconnect. + */ +struct ceph_connection { + void *private; + atomic_t nref; + + const struct ceph_connection_operations *ops; + + struct ceph_messenger *msgr; + struct socket *sock; + unsigned long state; /* connection state (see flags above) */ + const char *error_msg; /* error message, if any */ + + struct ceph_entity_addr peer_addr; /* peer address */ + struct ceph_entity_name peer_name; /* peer name */ + struct ceph_entity_addr peer_addr_for_me; + unsigned peer_features; + u32 connect_seq; /* identify the most recent connection + attempt for this connection, client */ + u32 peer_global_seq; /* peer's global seq for this connection */ + + int auth_retry; /* true if we need a newer authorizer */ + void *auth_reply_buf; /* where to put the authorizer reply */ + int auth_reply_buf_len; + + struct mutex mutex; + + /* out queue */ + struct list_head out_queue; + struct list_head out_sent; /* sending or sent but unacked */ + u64 out_seq; /* last message queued for send */ + + u64 in_seq, in_seq_acked; /* last message received, acked */ + + /* connection negotiation temps */ + char in_banner[CEPH_BANNER_MAX_LEN]; + union { + struct { /* outgoing connection */ + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + }; + struct { /* incoming */ + struct ceph_msg_connect in_connect; + struct ceph_msg_connect_reply out_reply; + }; + }; + struct ceph_entity_addr actual_peer_addr; + + /* message out temps */ + struct ceph_msg *out_msg; /* sending message (== tail of + out_sent) */ + bool out_msg_done; + struct ceph_msg_pos out_msg_pos; + + struct kvec out_kvec[8], /* sending header/footer data */ + *out_kvec_cur; + int out_kvec_left; /* kvec's left in out_kvec */ + int out_skip; /* skip this many bytes */ + int out_kvec_bytes; /* total bytes left */ + bool out_kvec_is_msg; /* kvec refers to out_msg */ + int out_more; /* there is more data after the kvecs */ + __le64 out_temp_ack; /* for writing an ack */ + + /* message in temps */ + struct ceph_msg_header in_hdr; + struct ceph_msg *in_msg; + struct ceph_msg_pos in_msg_pos; + u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ + + char in_tag; /* protocol control byte */ + int in_base_pos; /* bytes read */ + __le64 in_temp_ack; /* for reading an ack */ + + struct delayed_work work; /* send|recv work */ + unsigned long delay; /* current delay interval */ +}; + + +extern const char *ceph_pr_addr(const struct sockaddr_storage *ss); +extern int ceph_parse_ips(const char *c, const char *end, + struct ceph_entity_addr *addr, + int max_count, int *count); + + +extern int ceph_msgr_init(void); +extern void ceph_msgr_exit(void); +extern void ceph_msgr_flush(void); + +extern struct ceph_messenger *ceph_messenger_create( + struct ceph_entity_addr *myaddr, + u32 features, u32 required); +extern void ceph_messenger_destroy(struct ceph_messenger *); + +extern void ceph_con_init(struct ceph_messenger *msgr, + struct ceph_connection *con); +extern void ceph_con_open(struct ceph_connection *con, + struct ceph_entity_addr *addr); +extern bool ceph_con_opened(struct ceph_connection *con); +extern void ceph_con_close(struct ceph_connection *con); +extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); +extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); +extern void ceph_con_revoke_message(struct ceph_connection *con, + struct ceph_msg *msg); +extern void ceph_con_keepalive(struct ceph_connection *con); +extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); +extern void ceph_con_put(struct ceph_connection *con); + +extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, + bool can_fail); +extern void ceph_msg_kfree(struct ceph_msg *m); + + +static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) +{ + kref_get(&msg->kref); + return msg; +} +extern void ceph_msg_last_put(struct kref *kref); +static inline void ceph_msg_put(struct ceph_msg *msg) +{ + kref_put(&msg->kref, ceph_msg_last_put); +} + +extern void ceph_msg_dump(struct ceph_msg *msg); + +#endif diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h new file mode 100644 index 00000000..545f8591 --- /dev/null +++ b/include/linux/ceph/mon_client.h @@ -0,0 +1,122 @@ +#ifndef _FS_CEPH_MON_CLIENT_H +#define _FS_CEPH_MON_CLIENT_H + +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/rbtree.h> + +#include "messenger.h" + +struct ceph_client; +struct ceph_mount_args; +struct ceph_auth_client; + +/* + * The monitor map enumerates the set of all monitors. + */ +struct ceph_monmap { + struct ceph_fsid fsid; + u32 epoch; + u32 num_mon; + struct ceph_entity_inst mon_inst[0]; +}; + +struct ceph_mon_client; +struct ceph_mon_generic_request; + + +/* + * Generic mechanism for resending monitor requests. + */ +typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc, + int newmon); + +/* a pending monitor request */ +struct ceph_mon_request { + struct ceph_mon_client *monc; + struct delayed_work delayed_work; + unsigned long delay; + ceph_monc_request_func_t do_request; +}; + +/* + * ceph_mon_generic_request is being used for the statfs and poolop requests + * which are bening done a bit differently because we need to get data back + * to the caller + */ +struct ceph_mon_generic_request { + struct kref kref; + u64 tid; + struct rb_node node; + int result; + void *buf; + int buf_len; + struct completion completion; + struct ceph_msg *request; /* original request */ + struct ceph_msg *reply; /* and reply */ +}; + +struct ceph_mon_client { + struct ceph_client *client; + struct ceph_monmap *monmap; + + struct mutex mutex; + struct delayed_work delayed_work; + + struct ceph_auth_client *auth; + struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack; + int pending_auth; + + bool hunting; + int cur_mon; /* last monitor i contacted */ + unsigned long sub_sent, sub_renew_after; + struct ceph_connection *con; + bool have_fsid; + + /* pending generic requests */ + struct rb_root generic_request_tree; + int num_generic_requests; + u64 last_tid; + + /* mds/osd map */ + int want_mdsmap; + int want_next_osdmap; /* 1 = want, 2 = want+asked */ + u32 have_osdmap, have_mdsmap; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif +}; + +extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); +extern int ceph_monmap_contains(struct ceph_monmap *m, + struct ceph_entity_addr *addr); + +extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); +extern void ceph_monc_stop(struct ceph_mon_client *monc); + +/* + * The model here is to indicate that we need a new map of at least + * epoch @want, and also call in when we receive a map. We will + * periodically rerequest the map from the monitor cluster until we + * get what we want. + */ +extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); +extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); + +extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); + +extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, + struct ceph_statfs *buf); + +extern int ceph_monc_open_session(struct ceph_mon_client *monc); + +extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); + +extern int ceph_monc_create_snapid(struct ceph_mon_client *monc, + u32 pool, u64 *snapid); + +extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc, + u32 pool, u64 snapid); + +#endif diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h new file mode 100644 index 00000000..a362605f --- /dev/null +++ b/include/linux/ceph/msgpool.h @@ -0,0 +1,25 @@ +#ifndef _FS_CEPH_MSGPOOL +#define _FS_CEPH_MSGPOOL + +#include <linux/mempool.h> +#include "messenger.h" + +/* + * we use memory pools for preallocating messages we may receive, to + * avoid unexpected OOM conditions. + */ +struct ceph_msgpool { + const char *name; + mempool_t *pool; + int front_len; /* preallocated payload size */ +}; + +extern int ceph_msgpool_init(struct ceph_msgpool *pool, + int front_len, int size, bool blocking, + const char *name); +extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); +extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, + int front_len); +extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); + +#endif diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h new file mode 100644 index 00000000..680d3d64 --- /dev/null +++ b/include/linux/ceph/msgr.h @@ -0,0 +1,175 @@ +#ifndef CEPH_MSGR_H +#define CEPH_MSGR_H + +/* + * Data types for message passing layer used by Ceph. + */ + +#define CEPH_MON_PORT 6789 /* default monitor port */ + +/* + * client-side processes will try to bind to ports in this + * range, simply for the benefit of tools like nmap or wireshark + * that would like to identify the protocol. + */ +#define CEPH_PORT_FIRST 6789 +#define CEPH_PORT_START 6800 /* non-monitors start here */ +#define CEPH_PORT_LAST 6900 + +/* + * tcp connection banner. include a protocol version. and adjust + * whenever the wire protocol changes. try to keep this string length + * constant. + */ +#define CEPH_BANNER "ceph v027" +#define CEPH_BANNER_MAX_LEN 30 + + +/* + * Rollover-safe type and comparator for 32-bit sequence numbers. + * Comparator returns -1, 0, or 1. + */ +typedef __u32 ceph_seq_t; + +static inline __s32 ceph_seq_cmp(__u32 a, __u32 b) +{ + return (__s32)a - (__s32)b; +} + + +/* + * entity_name -- logical name for a process participating in the + * network, e.g. 'mds0' or 'osd3'. + */ +struct ceph_entity_name { + __u8 type; /* CEPH_ENTITY_TYPE_* */ + __le64 num; +} __attribute__ ((packed)); + +#define CEPH_ENTITY_TYPE_MON 0x01 +#define CEPH_ENTITY_TYPE_MDS 0x02 +#define CEPH_ENTITY_TYPE_OSD 0x04 +#define CEPH_ENTITY_TYPE_CLIENT 0x08 +#define CEPH_ENTITY_TYPE_AUTH 0x20 + +#define CEPH_ENTITY_TYPE_ANY 0xFF + +extern const char *ceph_entity_type_name(int type); + +/* + * entity_addr -- network address + */ +struct ceph_entity_addr { + __le32 type; + __le32 nonce; /* unique id for process (e.g. pid) */ + struct sockaddr_storage in_addr; +} __attribute__ ((packed)); + +struct ceph_entity_inst { + struct ceph_entity_name name; + struct ceph_entity_addr addr; +} __attribute__ ((packed)); + + +/* used by message exchange protocol */ +#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */ +#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */ +#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing + incoming connection */ +#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again + with higher cseq */ +#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again + with higher gseq */ +#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */ +#define CEPH_MSGR_TAG_MSG 7 /* message */ +#define CEPH_MSGR_TAG_ACK 8 /* message ack */ +#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ +#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ +#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ +#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ + + +/* + * connection negotiation + */ +struct ceph_msg_connect { + __le64 features; /* supported feature bits */ + __le32 host_type; /* CEPH_ENTITY_TYPE_* */ + __le32 global_seq; /* count connections initiated by this host */ + __le32 connect_seq; /* count connections initiated in this session */ + __le32 protocol_version; + __le32 authorizer_protocol; + __le32 authorizer_len; + __u8 flags; /* CEPH_MSG_CONNECT_* */ +} __attribute__ ((packed)); + +struct ceph_msg_connect_reply { + __u8 tag; + __le64 features; /* feature bits for this session */ + __le32 global_seq; + __le32 connect_seq; + __le32 protocol_version; + __le32 authorizer_len; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */ + + +/* + * message header + */ +struct ceph_msg_header_old { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_inst src, orig_src; + __le32 reserved; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +struct ceph_msg_header { + __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_name src; + __le32 reserved; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +#define CEPH_MSG_PRIO_LOW 64 +#define CEPH_MSG_PRIO_DEFAULT 127 +#define CEPH_MSG_PRIO_HIGH 196 +#define CEPH_MSG_PRIO_HIGHEST 255 + +/* + * follows data payload + */ +struct ceph_msg_footer { + __le32 front_crc, middle_crc, data_crc; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ +#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ + + +#endif diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h new file mode 100644 index 00000000..7c05ac20 --- /dev/null +++ b/include/linux/ceph/osd_client.h @@ -0,0 +1,296 @@ +#ifndef _FS_CEPH_OSD_CLIENT_H +#define _FS_CEPH_OSD_CLIENT_H + +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/mempool.h> +#include <linux/rbtree.h> + +#include "types.h" +#include "osdmap.h" +#include "messenger.h" + +/* + * Maximum object name size + * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100) + */ +#define MAX_OBJ_NAME_SIZE 100 + +struct ceph_msg; +struct ceph_snap_context; +struct ceph_osd_request; +struct ceph_osd_client; +struct ceph_authorizer; +struct ceph_pagelist; + +/* + * completion callback for async writepages + */ +typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, + struct ceph_msg *); + +/* a given osd we're communicating with */ +struct ceph_osd { + atomic_t o_ref; + struct ceph_osd_client *o_osdc; + int o_osd; + int o_incarnation; + struct rb_node o_node; + struct ceph_connection o_con; + struct list_head o_requests; + struct list_head o_linger_requests; + struct list_head o_osd_lru; + struct ceph_authorizer *o_authorizer; + void *o_authorizer_buf, *o_authorizer_reply_buf; + size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; + unsigned long lru_ttl; + int o_marked_for_keepalive; + struct list_head o_keepalive_item; +}; + +/* an in-flight request */ +struct ceph_osd_request { + u64 r_tid; /* unique for this client */ + struct rb_node r_node; + struct list_head r_req_lru_item; + struct list_head r_osd_item; + struct list_head r_linger_item; + struct list_head r_linger_osd; + struct ceph_osd *r_osd; + struct ceph_pg r_pgid; + int r_pg_osds[CEPH_PG_MAX_SIZE]; + int r_num_pg_osds; + + struct ceph_connection *r_con_filling_msg; + + struct ceph_msg *r_request, *r_reply; + int r_result; + int r_flags; /* any additional flags for the osd */ + u32 r_sent; /* >0 if r_request is sending/sent */ + int r_got_reply; + int r_linger; + + struct ceph_osd_client *r_osdc; + struct kref r_kref; + bool r_mempool; + struct completion r_completion, r_safe_completion; + ceph_osdc_callback_t r_callback, r_safe_callback; + struct ceph_eversion r_reassert_version; + struct list_head r_unsafe_item; + + struct inode *r_inode; /* for use by callbacks */ + void *r_priv; /* ditto */ + + char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */ + int r_oid_len; + unsigned long r_stamp; /* send OR check time */ + + struct ceph_file_layout r_file_layout; + struct ceph_snap_context *r_snapc; /* snap context for writes */ + unsigned r_num_pages; /* size of page array (follows) */ + unsigned r_page_alignment; /* io offset in first page */ + struct page **r_pages; /* pages for data payload */ + int r_pages_from_pool; + int r_own_pages; /* if true, i own page list */ +#ifdef CONFIG_BLOCK + struct bio *r_bio; /* instead of pages */ +#endif + + struct ceph_pagelist *r_trail; /* trailing part of the data */ +}; + +struct ceph_osd_event { + u64 cookie; + int one_shot; + struct ceph_osd_client *osdc; + void (*cb)(u64, u64, u8, void *); + void *data; + struct rb_node node; + struct list_head osd_node; + struct kref kref; + struct completion completion; +}; + +struct ceph_osd_event_work { + struct work_struct work; + struct ceph_osd_event *event; + u64 ver; + u64 notify_id; + u8 opcode; +}; + +struct ceph_osd_client { + struct ceph_client *client; + + struct ceph_osdmap *osdmap; /* current map */ + struct rw_semaphore map_sem; + struct completion map_waiters; + u64 last_requested_map; + + struct mutex request_mutex; + struct rb_root osds; /* osds */ + struct list_head osd_lru; /* idle osds */ + u64 timeout_tid; /* tid of timeout triggering rq */ + u64 last_tid; /* tid of last request */ + struct rb_root requests; /* pending requests */ + struct list_head req_lru; /* in-flight lru */ + struct list_head req_unsent; /* unsent/need-resend queue */ + struct list_head req_notarget; /* map to no osd */ + struct list_head req_linger; /* lingering requests */ + int num_requests; + struct delayed_work timeout_work; + struct delayed_work osds_timeout_work; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif + + mempool_t *req_mempool; + + struct ceph_msgpool msgpool_op; + struct ceph_msgpool msgpool_op_reply; + + spinlock_t event_lock; + struct rb_root event_tree; + u64 event_count; + + struct workqueue_struct *notify_wq; +}; + +struct ceph_osd_req_op { + u16 op; /* CEPH_OSD_OP_* */ + u32 flags; /* CEPH_OSD_FLAG_* */ + union { + struct { + u64 offset, length; + u64 truncate_size; + u32 truncate_seq; + } extent; + struct { + const char *name; + u32 name_len; + const char *val; + u32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + } xattr; + struct { + const char *class_name; + __u8 class_len; + const char *method_name; + __u8 method_len; + __u8 argc; + const char *indata; + u32 indata_len; + } cls; + struct { + u64 cookie, count; + } pgls; + struct { + u64 snapid; + } snap; + struct { + u64 cookie; + u64 ver; + __u8 flag; + u32 prot_ver; + u32 timeout; + } watch; + }; + u32 payload_len; +}; + +extern int ceph_osdc_init(struct ceph_osd_client *osdc, + struct ceph_client *client); +extern void ceph_osdc_stop(struct ceph_osd_client *osdc); + +extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, + struct ceph_msg *msg); +extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, + struct ceph_msg *msg); + +extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc, + struct ceph_file_layout *layout, + u64 snapid, + u64 off, u64 *plen, u64 *bno, + struct ceph_osd_request *req, + struct ceph_osd_req_op *op); + +extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, + int flags, + struct ceph_snap_context *snapc, + struct ceph_osd_req_op *ops, + bool use_mempool, + gfp_t gfp_flags, + struct page **pages, + struct bio *bio); + +extern void ceph_osdc_build_request(struct ceph_osd_request *req, + u64 off, u64 *plen, + struct ceph_osd_req_op *src_ops, + struct ceph_snap_context *snapc, + struct timespec *mtime, + const char *oid, + int oid_len); + +extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 offset, u64 *len, int op, int flags, + struct ceph_snap_context *snapc, + int do_sync, u32 truncate_seq, + u64 truncate_size, + struct timespec *mtime, + bool use_mempool, int num_reply, + int page_align); + +extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); +extern void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); + +static inline void ceph_osdc_get_request(struct ceph_osd_request *req) +{ + kref_get(&req->r_kref); +} +extern void ceph_osdc_release_request(struct kref *kref); +static inline void ceph_osdc_put_request(struct ceph_osd_request *req) +{ + kref_put(&req->r_kref, ceph_osdc_release_request); +} + +extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail); +extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); +extern void ceph_osdc_sync(struct ceph_osd_client *osdc); + +extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int nr_pages, + int page_align); + +extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *sc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec *mtime, + struct page **pages, int nr_pages, + int flags, int do_sync, bool nofail); + +/* watch/notify events */ +extern int ceph_osdc_create_event(struct ceph_osd_client *osdc, + void (*event_cb)(u64, u64, u8, void *), + int one_shot, void *data, + struct ceph_osd_event **pevent); +extern void ceph_osdc_cancel_event(struct ceph_osd_event *event); +extern int ceph_osdc_wait_event(struct ceph_osd_event *event, + unsigned long timeout); +extern void ceph_osdc_put_event(struct ceph_osd_event *event); +#endif + diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h new file mode 100644 index 00000000..ba4c205c --- /dev/null +++ b/include/linux/ceph/osdmap.h @@ -0,0 +1,130 @@ +#ifndef _FS_CEPH_OSDMAP_H +#define _FS_CEPH_OSDMAP_H + +#include <linux/rbtree.h> +#include "types.h" +#include "ceph_fs.h" +#include <linux/crush/crush.h> + +/* + * The osd map describes the current membership of the osd cluster and + * specifies the mapping of objects to placement groups and placement + * groups to (sets of) osds. That is, it completely specifies the + * (desired) distribution of all data objects in the system at some + * point in time. + * + * Each map version is identified by an epoch, which increases monotonically. + * + * The map can be updated either via an incremental map (diff) describing + * the change between two successive epochs, or as a fully encoded map. + */ +struct ceph_pg_pool_info { + struct rb_node node; + int id; + struct ceph_pg_pool v; + int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; + char *name; +}; + +struct ceph_pg_mapping { + struct rb_node node; + struct ceph_pg pgid; + int len; + int osds[]; +}; + +struct ceph_osdmap { + struct ceph_fsid fsid; + u32 epoch; + u32 mkfs_epoch; + struct ceph_timespec created, modified; + + u32 flags; /* CEPH_OSDMAP_* */ + + u32 max_osd; /* size of osd_state, _offload, _addr arrays */ + u8 *osd_state; /* CEPH_OSD_* */ + u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ + struct ceph_entity_addr *osd_addr; + + struct rb_root pg_temp; + struct rb_root pg_pools; + u32 pool_max; + + /* the CRUSH map specifies the mapping of placement groups to + * the list of osds that store+replicate them. */ + struct crush_map *crush; +}; + +/* + * file layout helpers + */ +#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit)) +#define ceph_file_layout_stripe_count(l) \ + ((__s32)le32_to_cpu((l).fl_stripe_count)) +#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size)) +#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash)) +#define ceph_file_layout_object_su(l) \ + ((__s32)le32_to_cpu((l).fl_object_stripe_unit)) +#define ceph_file_layout_pg_preferred(l) \ + ((__s32)le32_to_cpu((l).fl_pg_preferred)) +#define ceph_file_layout_pg_pool(l) \ + ((__s32)le32_to_cpu((l).fl_pg_pool)) + +static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l) +{ + return le32_to_cpu(l->fl_stripe_unit) * + le32_to_cpu(l->fl_stripe_count); +} + +/* "period" == bytes before i start on a new set of objects */ +static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l) +{ + return le32_to_cpu(l->fl_object_size) * + le32_to_cpu(l->fl_stripe_count); +} + + +static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd) +{ + return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP); +} + +static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) +{ + return map && (map->flags & flag); +} + +extern char *ceph_osdmap_state_str(char *str, int len, int state); + +static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, + int osd) +{ + if (osd >= map->max_osd) + return NULL; + return &map->osd_addr[osd]; +} + +extern struct ceph_osdmap *osdmap_decode(void **p, void *end); +extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map, + struct ceph_messenger *msgr); +extern void ceph_osdmap_destroy(struct ceph_osdmap *map); + +/* calculate mapping of a file extent to an object */ +extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, + u64 off, u64 *plen, + u64 *bno, u64 *oxoff, u64 *oxlen); + +/* calculate mapping of object to a placement group */ +extern int ceph_calc_object_layout(struct ceph_object_layout *ol, + const char *oid, + struct ceph_file_layout *fl, + struct ceph_osdmap *osdmap); +extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, + int *acting); +extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, + struct ceph_pg pgid); + +extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); + +#endif diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h new file mode 100644 index 00000000..9660d6b0 --- /dev/null +++ b/include/linux/ceph/pagelist.h @@ -0,0 +1,75 @@ +#ifndef __FS_CEPH_PAGELIST_H +#define __FS_CEPH_PAGELIST_H + +#include <linux/list.h> + +struct ceph_pagelist { + struct list_head head; + void *mapped_tail; + size_t length; + size_t room; + struct list_head free_list; + size_t num_pages_free; +}; + +struct ceph_pagelist_cursor { + struct ceph_pagelist *pl; /* pagelist, for error checking */ + struct list_head *page_lru; /* page in list */ + size_t room; /* room remaining to reset to */ +}; + +static inline void ceph_pagelist_init(struct ceph_pagelist *pl) +{ + INIT_LIST_HEAD(&pl->head); + pl->mapped_tail = NULL; + pl->length = 0; + pl->room = 0; + INIT_LIST_HEAD(&pl->free_list); + pl->num_pages_free = 0; +} + +extern int ceph_pagelist_release(struct ceph_pagelist *pl); + +extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); + +extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); + +extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); + +extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, + struct ceph_pagelist_cursor *c); + +extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, + struct ceph_pagelist_cursor *c); + +static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) +{ + __le64 ev = cpu_to_le64(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) +{ + __le32 ev = cpu_to_le32(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) +{ + __le16 ev = cpu_to_le16(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) +{ + return ceph_pagelist_append(pl, &v, 1); +} +static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, + char *s, size_t len) +{ + int ret = ceph_pagelist_encode_32(pl, len); + if (ret) + return ret; + if (len) + return ceph_pagelist_append(pl, s, len); + return 0; +} + +#endif diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h new file mode 100644 index 00000000..0a990998 --- /dev/null +++ b/include/linux/ceph/rados.h @@ -0,0 +1,426 @@ +#ifndef CEPH_RADOS_H +#define CEPH_RADOS_H + +/* + * Data types for the Ceph distributed object storage layer RADOS + * (Reliable Autonomic Distributed Object Store). + */ + +#include "msgr.h" + +/* + * osdmap encoding versions + */ +#define CEPH_OSDMAP_INC_VERSION 5 +#define CEPH_OSDMAP_INC_VERSION_EXT 6 +#define CEPH_OSDMAP_VERSION 5 +#define CEPH_OSDMAP_VERSION_EXT 6 + +/* + * fs id + */ +struct ceph_fsid { + unsigned char fsid[16]; +}; + +static inline int ceph_fsid_compare(const struct ceph_fsid *a, + const struct ceph_fsid *b) +{ + return memcmp(a, b, sizeof(*a)); +} + +/* + * ino, object, etc. + */ +typedef __le64 ceph_snapid_t; +#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */ +#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */ +#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */ + +struct ceph_timespec { + __le32 tv_sec; + __le32 tv_nsec; +} __attribute__ ((packed)); + + +/* + * object layout - how objects are mapped into PGs + */ +#define CEPH_OBJECT_LAYOUT_HASH 1 +#define CEPH_OBJECT_LAYOUT_LINEAR 2 +#define CEPH_OBJECT_LAYOUT_HASHINO 3 + +/* + * pg layout -- how PGs are mapped onto (sets of) OSDs + */ +#define CEPH_PG_LAYOUT_CRUSH 0 +#define CEPH_PG_LAYOUT_HASH 1 +#define CEPH_PG_LAYOUT_LINEAR 2 +#define CEPH_PG_LAYOUT_HYBRID 3 + +#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ + +/* + * placement group. + * we encode this into one __le64. + */ +struct ceph_pg { + __le16 preferred; /* preferred primary osd */ + __le16 ps; /* placement seed */ + __le32 pool; /* object pool */ +} __attribute__ ((packed)); + +/* + * pg_pool is a set of pgs storing a pool of objects + * + * pg_num -- base number of pseudorandomly placed pgs + * + * pgp_num -- effective number when calculating pg placement. this + * is used for pg_num increases. new pgs result in data being "split" + * into new pgs. for this to proceed smoothly, new pgs are intiially + * colocated with their parents; that is, pgp_num doesn't increase + * until the new pgs have successfully split. only _then_ are the new + * pgs placed independently. + * + * lpg_num -- localized pg count (per device). replicas are randomly + * selected. + * + * lpgp_num -- as above. + */ +#define CEPH_PG_TYPE_REP 1 +#define CEPH_PG_TYPE_RAID4 2 +#define CEPH_PG_POOL_VERSION 2 +struct ceph_pg_pool { + __u8 type; /* CEPH_PG_TYPE_* */ + __u8 size; /* number of osds in each pg */ + __u8 crush_ruleset; /* crush placement rule */ + __u8 object_hash; /* hash mapping object name to ps */ + __le32 pg_num, pgp_num; /* number of pg's */ + __le32 lpg_num, lpgp_num; /* number of localized pg's */ + __le32 last_change; /* most recent epoch changed */ + __le64 snap_seq; /* seq for per-pool snapshot */ + __le32 snap_epoch; /* epoch of last snap */ + __le32 num_snaps; + __le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */ + __le64 auid; /* who owns the pg */ +} __attribute__ ((packed)); + +/* + * stable_mod func is used to control number of placement groups. + * similar to straight-up modulo, but produces a stable mapping as b + * increases over time. b is the number of bins, and bmask is the + * containing power of 2 minus 1. + * + * b <= bmask and bmask=(2**n)-1 + * e.g., b=12 -> bmask=15, b=123 -> bmask=127 + */ +static inline int ceph_stable_mod(int x, int b, int bmask) +{ + if ((x & bmask) < b) + return x & bmask; + else + return x & (bmask >> 1); +} + +/* + * object layout - how a given object should be stored. + */ +struct ceph_object_layout { + struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */ + __le32 ol_stripe_unit; /* for per-object parity, if any */ +} __attribute__ ((packed)); + +/* + * compound epoch+version, used by storage layer to serialize mutations + */ +struct ceph_eversion { + __le32 epoch; + __le64 version; +} __attribute__ ((packed)); + +/* + * osd map bits + */ + +/* status bits */ +#define CEPH_OSD_EXISTS 1 +#define CEPH_OSD_UP 2 + +/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */ +#define CEPH_OSD_IN 0x10000 +#define CEPH_OSD_OUT 0 + + +/* + * osd map flag bits + */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ +#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ +#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ +#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ + +/* + * osd ops + */ +#define CEPH_OSD_OP_MODE 0xf000 +#define CEPH_OSD_OP_MODE_RD 0x1000 +#define CEPH_OSD_OP_MODE_WR 0x2000 +#define CEPH_OSD_OP_MODE_RMW 0x3000 +#define CEPH_OSD_OP_MODE_SUB 0x4000 + +#define CEPH_OSD_OP_TYPE 0x0f00 +#define CEPH_OSD_OP_TYPE_LOCK 0x0100 +#define CEPH_OSD_OP_TYPE_DATA 0x0200 +#define CEPH_OSD_OP_TYPE_ATTR 0x0300 +#define CEPH_OSD_OP_TYPE_EXEC 0x0400 +#define CEPH_OSD_OP_TYPE_PG 0x0500 + +enum { + /** data **/ + /* read */ + CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, + CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2, + CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3, + + /* fancy read */ + CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4, + CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5, + + CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6, + CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7, + + /* versioning */ + CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8, + + /* write */ + CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1, + CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2, + CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3, + CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4, + CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5, + + /* fancy write */ + CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6, + CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7, + CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8, + CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9, + + CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10, + CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11, + CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12, + + CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13, + CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14, + + CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15, + + /** attrs **/ + /* read */ + CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1, + CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2, + CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3, + + /* write */ + CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1, + CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2, + CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3, + CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4, + + /** subop **/ + CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1, + CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2, + CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3, + CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4, + CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5, + CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6, + CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7, + CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8, + + /** lock **/ + CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1, + CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2, + CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3, + CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4, + CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5, + CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6, + + /** exec **/ + CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1, + + /** pg **/ + CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1, +}; + +static inline int ceph_osd_op_type_lock(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK; +} +static inline int ceph_osd_op_type_data(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA; +} +static inline int ceph_osd_op_type_attr(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR; +} +static inline int ceph_osd_op_type_exec(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC; +} +static inline int ceph_osd_op_type_pg(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG; +} + +static inline int ceph_osd_op_mode_subop(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB; +} +static inline int ceph_osd_op_mode_read(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD; +} +static inline int ceph_osd_op_mode_modify(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR; +} + +/* + * note that the following tmap stuff is also defined in the ceph librados.h + * any modification here needs to be updated there + */ +#define CEPH_OSD_TMAP_HDR 'h' +#define CEPH_OSD_TMAP_SET 's' +#define CEPH_OSD_TMAP_RM 'r' + +extern const char *ceph_osd_op_name(int op); + + +/* + * osd op flags + * + * An op may be READ, WRITE, or READ|WRITE. + */ +enum { + CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */ + CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */ + CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */ + CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */ + CEPH_OSD_FLAG_READ = 16, /* op may read */ + CEPH_OSD_FLAG_WRITE = 32, /* op may write */ + CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */ + CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */ + CEPH_OSD_FLAG_BALANCE_READS = 256, + CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */ + CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */ + CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */ + CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */ +}; + +enum { + CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ +}; + +#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ +#define EBLACKLISTED ESHUTDOWN /* blacklisted */ + +/* xattr comparison */ +enum { + CEPH_OSD_CMPXATTR_OP_NOP = 0, + CEPH_OSD_CMPXATTR_OP_EQ = 1, + CEPH_OSD_CMPXATTR_OP_NE = 2, + CEPH_OSD_CMPXATTR_OP_GT = 3, + CEPH_OSD_CMPXATTR_OP_GTE = 4, + CEPH_OSD_CMPXATTR_OP_LT = 5, + CEPH_OSD_CMPXATTR_OP_LTE = 6 +}; + +enum { + CEPH_OSD_CMPXATTR_MODE_STRING = 1, + CEPH_OSD_CMPXATTR_MODE_U64 = 2 +}; + +#define RADOS_NOTIFY_VER 1 + +/* + * an individual object operation. each may be accompanied by some data + * payload + */ +struct ceph_osd_op { + __le16 op; /* CEPH_OSD_OP_* */ + __le32 flags; /* CEPH_OSD_FLAG_* */ + union { + struct { + __le64 offset, length; + __le64 truncate_size; + __le32 truncate_seq; + } __attribute__ ((packed)) extent; + struct { + __le32 name_len; + __le32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + } __attribute__ ((packed)) xattr; + struct { + __u8 class_len; + __u8 method_len; + __u8 argc; + __le32 indata_len; + } __attribute__ ((packed)) cls; + struct { + __le64 cookie, count; + } __attribute__ ((packed)) pgls; + struct { + __le64 snapid; + } __attribute__ ((packed)) snap; + struct { + __le64 cookie; + __le64 ver; + __u8 flag; /* 0 = unwatch, 1 = watch */ + } __attribute__ ((packed)) watch; +}; + __le32 payload_len; +} __attribute__ ((packed)); + +/* + * osd request message header. each request may include multiple + * ceph_osd_op object operations. + */ +struct ceph_osd_request_head { + __le32 client_inc; /* client incarnation */ + struct ceph_object_layout layout; /* pgid */ + __le32 osdmap_epoch; /* client's osdmap epoch */ + + __le32 flags; + + struct ceph_timespec mtime; /* for mutations only */ + struct ceph_eversion reassert_version; /* if we are replaying op */ + + __le32 object_len; /* length of object name */ + + __le64 snapid; /* snapid to read */ + __le64 snap_seq; /* writer's snap context */ + __le32 num_snaps; + + __le16 num_ops; + struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */ +} __attribute__ ((packed)); + +struct ceph_osd_reply_head { + __le32 client_inc; /* client incarnation */ + __le32 flags; + struct ceph_object_layout layout; + __le32 osdmap_epoch; + struct ceph_eversion reassert_version; /* for replaying uncommitted */ + + __le32 result; /* result code */ + + __le32 object_len; /* length of object name */ + __le32 num_ops; + struct ceph_osd_op ops[0]; /* ops[], object */ +} __attribute__ ((packed)); + + + +#endif diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h new file mode 100644 index 00000000..28b35a00 --- /dev/null +++ b/include/linux/ceph/types.h @@ -0,0 +1,29 @@ +#ifndef _FS_CEPH_TYPES_H +#define _FS_CEPH_TYPES_H + +/* needed before including ceph_fs.h */ +#include <linux/in.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/string.h> + +#include "ceph_fs.h" +#include "ceph_frag.h" +#include "ceph_hash.h" + +/* + * Identify inodes by both their ino AND snapshot id (a u64). + */ +struct ceph_vino { + u64 ino; + u64 snap; +}; + + +/* context for the caps reservation mechanism */ +struct ceph_cap_reservation { + int count; +}; + + +#endif diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h new file mode 100644 index 00000000..b454dfce --- /dev/null +++ b/include/linux/cfag12864b.h @@ -0,0 +1,82 @@ +/* + * Filename: cfag12864b.h + * Version: 0.1.0 + * Description: cfag12864b LCD driver header + * License: GPLv2 + * + * Author: Copyright (C) Miguel Ojeda Sandonis + * Date: 2006-10-12 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _CFAG12864B_H_ +#define _CFAG12864B_H_ + +#define CFAG12864B_WIDTH (128) +#define CFAG12864B_HEIGHT (64) +#define CFAG12864B_CONTROLLERS (2) +#define CFAG12864B_PAGES (8) +#define CFAG12864B_ADDRESSES (64) +#define CFAG12864B_SIZE ((CFAG12864B_CONTROLLERS) * \ + (CFAG12864B_PAGES) * \ + (CFAG12864B_ADDRESSES)) + +/* + * The driver will blit this buffer to the LCD + * + * Its size is CFAG12864B_SIZE. + */ +extern unsigned char * cfag12864b_buffer; + +/* + * Get the refresh rate of the LCD + * + * Returns the refresh rate (hertz). + */ +extern unsigned int cfag12864b_getrate(void); + +/* + * Enable refreshing + * + * Returns 0 if successful (anyone was using it), + * or != 0 if failed (someone is using it). + */ +extern unsigned char cfag12864b_enable(void); + +/* + * Disable refreshing + * + * You should call this only when you finish using the LCD. + */ +extern void cfag12864b_disable(void); + +/* + * Is enabled refreshing? (is anyone using the module?) + * + * Returns 0 if refreshing is not enabled (anyone is using it), + * or != 0 if refreshing is enabled (someone is using it). + * + * Useful for buffer read-only modules. + */ +extern unsigned char cfag12864b_isenabled(void); + +/* + * Is the module inited? + */ +extern unsigned char cfag12864b_isinited(void); + +#endif /* _CFAG12864B_H_ */ + diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h new file mode 100644 index 00000000..b2a37357 --- /dev/null +++ b/include/linux/cgroup.h @@ -0,0 +1,627 @@ +#ifndef _LINUX_CGROUP_H +#define _LINUX_CGROUP_H +/* + * cgroup interface + * + * Copyright (C) 2003 BULL SA + * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * + */ + +#include <linux/sched.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> +#include <linux/rcupdate.h> +#include <linux/cgroupstats.h> +#include <linux/prio_heap.h> +#include <linux/rwsem.h> +#include <linux/idr.h> + +#ifdef CONFIG_CGROUPS + +struct cgroupfs_root; +struct cgroup_subsys; +struct inode; +struct cgroup; +struct css_id; + +extern int cgroup_init_early(void); +extern int cgroup_init(void); +extern void cgroup_lock(void); +extern int cgroup_lock_is_held(void); +extern bool cgroup_lock_live_group(struct cgroup *cgrp); +extern void cgroup_unlock(void); +extern void cgroup_fork(struct task_struct *p); +extern void cgroup_fork_callbacks(struct task_struct *p); +extern void cgroup_post_fork(struct task_struct *p); +extern void cgroup_exit(struct task_struct *p, int run_callbacks); +extern int cgroupstats_build(struct cgroupstats *stats, + struct dentry *dentry); +extern int cgroup_load_subsys(struct cgroup_subsys *ss); +extern void cgroup_unload_subsys(struct cgroup_subsys *ss); + +extern const struct file_operations proc_cgroup_operations; + +/* Define the enumeration of all builtin cgroup subsystems */ +#define SUBSYS(_x) _x ## _subsys_id, +enum cgroup_subsys_id { +#include <linux/cgroup_subsys.h> + CGROUP_BUILTIN_SUBSYS_COUNT +}; +#undef SUBSYS +/* + * This define indicates the maximum number of subsystems that can be loaded + * at once. We limit to this many since cgroupfs_root has subsys_bits to keep + * track of all of them. + */ +#define CGROUP_SUBSYS_COUNT (BITS_PER_BYTE*sizeof(unsigned long)) + +/* Per-subsystem/per-cgroup state maintained by the system. */ +struct cgroup_subsys_state { + /* + * The cgroup that this subsystem is attached to. Useful + * for subsystems that want to know about the cgroup + * hierarchy structure + */ + struct cgroup *cgroup; + + /* + * State maintained by the cgroup system to allow subsystems + * to be "busy". Should be accessed via css_get(), + * css_tryget() and and css_put(). + */ + + atomic_t refcnt; + + unsigned long flags; + /* ID for this css, if possible */ + struct css_id __rcu *id; +}; + +/* bits in struct cgroup_subsys_state flags field */ +enum { + CSS_ROOT, /* This CSS is the root of the subsystem */ + CSS_REMOVED, /* This CSS is dead */ +}; + +/* + * Call css_get() to hold a reference on the css; it can be used + * for a reference obtained via: + * - an existing ref-counted reference to the css + * - task->cgroups for a locked task + */ + +extern void __css_get(struct cgroup_subsys_state *css, int count); +static inline void css_get(struct cgroup_subsys_state *css) +{ + /* We don't need to reference count the root state */ + if (!test_bit(CSS_ROOT, &css->flags)) + __css_get(css, 1); +} + +static inline bool css_is_removed(struct cgroup_subsys_state *css) +{ + return test_bit(CSS_REMOVED, &css->flags); +} + +/* + * Call css_tryget() to take a reference on a css if your existing + * (known-valid) reference isn't already ref-counted. Returns false if + * the css has been destroyed. + */ + +static inline bool css_tryget(struct cgroup_subsys_state *css) +{ + if (test_bit(CSS_ROOT, &css->flags)) + return true; + while (!atomic_inc_not_zero(&css->refcnt)) { + if (test_bit(CSS_REMOVED, &css->flags)) + return false; + cpu_relax(); + } + return true; +} + +/* + * css_put() should be called to release a reference taken by + * css_get() or css_tryget() + */ + +extern void __css_put(struct cgroup_subsys_state *css, int count); +static inline void css_put(struct cgroup_subsys_state *css) +{ + if (!test_bit(CSS_ROOT, &css->flags)) + __css_put(css, 1); +} + +/* bits in struct cgroup flags field */ +enum { + /* Control Group is dead */ + CGRP_REMOVED, + /* Control Group has ever had a child cgroup or a task */ + CGRP_RELEASABLE, + /* Control Group requires release notifications to userspace */ + CGRP_NOTIFY_ON_RELEASE, + /* + * A thread in rmdir() is wating for this cgroup. + */ + CGRP_WAIT_ON_RMDIR, + /* + * Clone cgroup values when creating a new child cgroup + */ + CGRP_CLONE_CHILDREN, +}; + +struct cgroup { + unsigned long flags; /* "unsigned long" so bitops work */ + + /* + * count users of this cgroup. >0 means busy, but doesn't + * necessarily indicate the number of tasks in the cgroup + */ + atomic_t count; + + /* + * We link our 'sibling' struct into our parent's 'children'. + * Our children link their 'sibling' into our 'children'. + */ + struct list_head sibling; /* my parent's children */ + struct list_head children; /* my children */ + + struct cgroup *parent; /* my parent */ + struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */ + + /* Private pointers for each registered subsystem */ + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + struct cgroupfs_root *root; + struct cgroup *top_cgroup; + + /* + * List of cg_cgroup_links pointing at css_sets with + * tasks in this cgroup. Protected by css_set_lock + */ + struct list_head css_sets; + + /* + * Linked list running through all cgroups that can + * potentially be reaped by the release agent. Protected by + * release_list_lock + */ + struct list_head release_list; + + /* + * list of pidlists, up to two for each namespace (one for procs, one + * for tasks); created on demand. + */ + struct list_head pidlists; + struct mutex pidlist_mutex; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; + + /* List of events which userspace want to receive */ + struct list_head event_list; + spinlock_t event_list_lock; +}; + +/* + * A css_set is a structure holding pointers to a set of + * cgroup_subsys_state objects. This saves space in the task struct + * object and speeds up fork()/exit(), since a single inc/dec and a + * list_add()/del() can bump the reference count on the entire cgroup + * set for a task. + */ + +struct css_set { + + /* Reference count */ + atomic_t refcount; + + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; + + /* + * List running through all tasks using this cgroup + * group. Protected by css_set_lock + */ + struct list_head tasks; + + /* + * List of cg_cgroup_link objects on link chains from + * cgroups referenced from this css_set. Protected by + * css_set_lock + */ + struct list_head cg_links; + + /* + * Set of subsystem states, one for each subsystem. This array + * is immutable after creation apart from the init_css_set + * during subsystem registration (at boot time) and modular subsystem + * loading/unloading. + */ + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; + struct work_struct work; +}; + +/* + * cgroup_map_cb is an abstract callback API for reporting map-valued + * control files + */ + +struct cgroup_map_cb { + int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); + void *state; +}; + +/* + * struct cftype: handler definitions for cgroup control files + * + * When reading/writing to a file: + * - the cgroup to use is file->f_dentry->d_parent->d_fsdata + * - the 'cftype' of the file is file->f_dentry->d_fsdata + */ + +#define MAX_CFTYPE_NAME 64 +struct cftype { + /* + * By convention, the name should begin with the name of the + * subsystem, followed by a period + */ + char name[MAX_CFTYPE_NAME]; + int private; + /* + * If not 0, file mode is set to this value, otherwise it will + * be figured out automatically + */ + umode_t mode; + + /* + * If non-zero, defines the maximum length of string that can + * be passed to write_string; defaults to 64 + */ + size_t max_write_len; + + int (*open)(struct inode *inode, struct file *file); + ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + char __user *buf, size_t nbytes, loff_t *ppos); + /* + * read_u64() is a shortcut for the common case of returning a + * single integer. Use it in place of read() + */ + u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); + /* + * read_s64() is a signed version of read_u64() + */ + s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); + /* + * read_map() is used for defining a map of key/value + * pairs. It should call cb->fill(cb, key, value) for each + * entry. The key/value pairs (and their ordering) should not + * change between reboots. + */ + int (*read_map)(struct cgroup *cont, struct cftype *cft, + struct cgroup_map_cb *cb); + /* + * read_seq_string() is used for outputting a simple sequence + * using seqfile. + */ + int (*read_seq_string)(struct cgroup *cont, struct cftype *cft, + struct seq_file *m); + + ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + const char __user *buf, size_t nbytes, loff_t *ppos); + + /* + * write_u64() is a shortcut for the common case of accepting + * a single integer (as parsed by simple_strtoull) from + * userspace. Use in place of write(); return 0 or error. + */ + int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); + /* + * write_s64() is a signed version of write_u64() + */ + int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); + + /* + * write_string() is passed a nul-terminated kernelspace + * buffer of maximum length determined by max_write_len. + * Returns 0 or -ve error code. + */ + int (*write_string)(struct cgroup *cgrp, struct cftype *cft, + const char *buffer); + /* + * trigger() callback can be used to get some kick from the + * userspace, when the actual string written is not important + * at all. The private field can be used to determine the + * kick type for multiplexing. + */ + int (*trigger)(struct cgroup *cgrp, unsigned int event); + + int (*release)(struct inode *inode, struct file *file); + + /* + * register_event() callback will be used to add new userspace + * waiter for changes related to the cftype. Implement it if + * you want to provide this functionality. Use eventfd_signal() + * on eventfd to send notification to userspace. + */ + int (*register_event)(struct cgroup *cgrp, struct cftype *cft, + struct eventfd_ctx *eventfd, const char *args); + /* + * unregister_event() callback will be called when userspace + * closes the eventfd or on cgroup removing. + * This callback must be implemented, if you want provide + * notification functionality. + */ + void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, + struct eventfd_ctx *eventfd); +}; + +struct cgroup_scanner { + struct cgroup *cg; + int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); + void (*process_task)(struct task_struct *p, + struct cgroup_scanner *scan); + struct ptr_heap *heap; + void *data; +}; + +/* + * Add a new file to the given cgroup directory. Should only be + * called by subsystems from within a populate() method + */ +int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, + const struct cftype *cft); + +/* + * Add a set of new files to the given cgroup directory. Should + * only be called by subsystems from within a populate() method + */ +int cgroup_add_files(struct cgroup *cgrp, + struct cgroup_subsys *subsys, + const struct cftype cft[], + int count); + +int cgroup_is_removed(const struct cgroup *cgrp); + +int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); + +int cgroup_task_count(const struct cgroup *cgrp); + +/* Return true if cgrp is a descendant of the task's cgroup */ +int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); + +/* + * When the subsys has to access css and may add permanent refcnt to css, + * it should take care of racy conditions with rmdir(). Following set of + * functions, is for stop/restart rmdir if necessary. + * Because these will call css_get/put, "css" should be alive css. + * + * cgroup_exclude_rmdir(); + * ...do some jobs which may access arbitrary empty cgroup + * cgroup_release_and_wakeup_rmdir(); + * + * When someone removes a cgroup while cgroup_exclude_rmdir() holds it, + * it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up. + */ + +void cgroup_exclude_rmdir(struct cgroup_subsys_state *css); +void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css); + +/* + * Control Group taskset, used to pass around set of tasks to cgroup_subsys + * methods. + */ +struct cgroup_taskset; +struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); +struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); +struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); +int cgroup_taskset_size(struct cgroup_taskset *tset); + +/** + * cgroup_taskset_for_each - iterate cgroup_taskset + * @task: the loop cursor + * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all + * @tset: taskset to iterate + */ +#define cgroup_taskset_for_each(task, skip_cgrp, tset) \ + for ((task) = cgroup_taskset_first((tset)); (task); \ + (task) = cgroup_taskset_next((tset))) \ + if (!(skip_cgrp) || \ + cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp)) + +/* + * Control Group subsystem type. + * See Documentation/cgroups/cgroups.txt for details + */ + +struct cgroup_subsys { + struct cgroup_subsys_state *(*create)(struct cgroup *cgrp); + int (*pre_destroy)(struct cgroup *cgrp); + void (*destroy)(struct cgroup *cgrp); + int (*allow_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); + int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); + void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); + void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); + void (*fork)(struct task_struct *task); + void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, + struct task_struct *task); + int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp); + void (*post_clone)(struct cgroup *cgrp); + void (*bind)(struct cgroup *root); + + int subsys_id; + int active; + int disabled; + int early_init; + /* + * True if this subsys uses ID. ID is not available before cgroup_init() + * (not available in early_init time.) + */ + bool use_id; +#define MAX_CGROUP_TYPE_NAMELEN 32 + const char *name; + + /* + * Protects sibling/children links of cgroups in this + * hierarchy, plus protects which hierarchy (or none) the + * subsystem is a part of (i.e. root/sibling). To avoid + * potential deadlocks, the following operations should not be + * undertaken while holding any hierarchy_mutex: + * + * - allocating memory + * - initiating hotplug events + */ + struct mutex hierarchy_mutex; + struct lock_class_key subsys_key; + + /* + * Link to parent, and list entry in parent's children. + * Protected by this->hierarchy_mutex and cgroup_lock() + */ + struct cgroupfs_root *root; + struct list_head sibling; + /* used when use_id == true */ + struct idr idr; + spinlock_t id_lock; + + /* should be defined only by modular subsystems */ + struct module *module; +}; + +#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; +#include <linux/cgroup_subsys.h> +#undef SUBSYS + +static inline struct cgroup_subsys_state *cgroup_subsys_state( + struct cgroup *cgrp, int subsys_id) +{ + return cgrp->subsys[subsys_id]; +} + +/* + * function to get the cgroup_subsys_state which allows for extra + * rcu_dereference_check() conditions, such as locks used during the + * cgroup_subsys::attach() methods. + */ +#define task_subsys_state_check(task, subsys_id, __c) \ + rcu_dereference_check(task->cgroups->subsys[subsys_id], \ + lockdep_is_held(&task->alloc_lock) || \ + cgroup_lock_is_held() || (__c)) + +static inline struct cgroup_subsys_state * +task_subsys_state(struct task_struct *task, int subsys_id) +{ + return task_subsys_state_check(task, subsys_id, false); +} + +static inline struct cgroup* task_cgroup(struct task_struct *task, + int subsys_id) +{ + return task_subsys_state(task, subsys_id)->cgroup; +} + +/* A cgroup_iter should be treated as an opaque object */ +struct cgroup_iter { + struct list_head *cg_link; + struct list_head *task; +}; + +/* + * To iterate across the tasks in a cgroup: + * + * 1) call cgroup_iter_start to initialize an iterator + * + * 2) call cgroup_iter_next() to retrieve member tasks until it + * returns NULL or until you want to end the iteration + * + * 3) call cgroup_iter_end() to destroy the iterator. + * + * Or, call cgroup_scan_tasks() to iterate through every task in a + * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling + * the test_task() callback, but not while calling the process_task() + * callback. + */ +void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); +struct task_struct *cgroup_iter_next(struct cgroup *cgrp, + struct cgroup_iter *it); +void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); +int cgroup_scan_tasks(struct cgroup_scanner *scan); +int cgroup_attach_task(struct cgroup *, struct task_struct *); +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); + +/* + * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works + * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. + * CSS ID is assigned at cgroup allocation (create) automatically + * and removed when subsys calls free_css_id() function. This is because + * the lifetime of cgroup_subsys_state is subsys's matter. + * + * Looking up and scanning function should be called under rcu_read_lock(). + * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls. + * But the css returned by this routine can be "not populated yet" or "being + * destroyed". The caller should check css and cgroup's status. + */ + +/* + * Typically Called at ->destroy(), or somewhere the subsys frees + * cgroup_subsys_state. + */ +void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); + +/* Find a cgroup_subsys_state which has given ID */ + +struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); + +/* + * Get a cgroup whose id is greater than or equal to id under tree of root. + * Returning a cgroup_subsys_state or NULL. + */ +struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id, + struct cgroup_subsys_state *root, int *foundid); + +/* Returns true if root is ancestor of cg */ +bool css_is_ancestor(struct cgroup_subsys_state *cg, + const struct cgroup_subsys_state *root); + +/* Get id and depth of css */ +unsigned short css_id(struct cgroup_subsys_state *css); +unsigned short css_depth(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); + +#else /* !CONFIG_CGROUPS */ + +static inline int cgroup_init_early(void) { return 0; } +static inline int cgroup_init(void) { return 0; } +static inline void cgroup_fork(struct task_struct *p) {} +static inline void cgroup_fork_callbacks(struct task_struct *p) {} +static inline void cgroup_post_fork(struct task_struct *p) {} +static inline void cgroup_exit(struct task_struct *p, int callbacks) {} + +static inline void cgroup_lock(void) {} +static inline void cgroup_unlock(void) {} +static inline int cgroupstats_build(struct cgroupstats *stats, + struct dentry *dentry) +{ + return -EINVAL; +} + +/* No cgroups - nothing to do */ +static inline int cgroup_attach_task_all(struct task_struct *from, + struct task_struct *t) +{ + return 0; +} + +#endif /* !CONFIG_CGROUPS */ + +#endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h new file mode 100644 index 00000000..0bd390ce --- /dev/null +++ b/include/linux/cgroup_subsys.h @@ -0,0 +1,74 @@ +/* Add subsystem definitions of the form SUBSYS(<name>) in this + * file. Surround each one by a line of comment markers so that + * patches don't collide + */ + +/* */ + +/* */ + +#ifdef CONFIG_CPUSETS +SUBSYS(cpuset) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_DEBUG +SUBSYS(debug) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_SCHED +SUBSYS(cpu_cgroup) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_CPUACCT +SUBSYS(cpuacct) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +SUBSYS(mem_cgroup) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_DEVICE +SUBSYS(devices) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_FREEZER +SUBSYS(freezer) +#endif + +/* */ + +#ifdef CONFIG_NET_CLS_CGROUP +SUBSYS(net_cls) +#endif + +/* */ + +#ifdef CONFIG_BLK_CGROUP +SUBSYS(blkio) +#endif + +/* */ + +#ifdef CONFIG_CGROUP_PERF +SUBSYS(perf) +#endif + +/* */ + +#ifdef CONFIG_NETPRIO_CGROUP +SUBSYS(net_prio) +#endif + +/* */ diff --git a/include/linux/cgroupstats.h b/include/linux/cgroupstats.h new file mode 100644 index 00000000..3753c331 --- /dev/null +++ b/include/linux/cgroupstats.h @@ -0,0 +1,71 @@ +/* cgroupstats.h - exporting per-cgroup statistics + * + * Copyright IBM Corporation, 2007 + * Author Balbir Singh <balbir@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _LINUX_CGROUPSTATS_H +#define _LINUX_CGROUPSTATS_H + +#include <linux/types.h> +#include <linux/taskstats.h> + +/* + * Data shared between user space and kernel space on a per cgroup + * basis. This data is shared using taskstats. + * + * Most of these states are derived by looking at the task->state value + * For the nr_io_wait state, a flag in the delay accounting structure + * indicates that the task is waiting on IO + * + * Each member is aligned to a 8 byte boundary. + */ +struct cgroupstats { + __u64 nr_sleeping; /* Number of tasks sleeping */ + __u64 nr_running; /* Number of tasks running */ + __u64 nr_stopped; /* Number of tasks in stopped state */ + __u64 nr_uninterruptible; /* Number of tasks in uninterruptible */ + /* state */ + __u64 nr_io_wait; /* Number of tasks waiting on IO */ +}; + +/* + * Commands sent from userspace + * Not versioned. New commands should only be inserted at the enum's end + * prior to __CGROUPSTATS_CMD_MAX + */ + +enum { + CGROUPSTATS_CMD_UNSPEC = __TASKSTATS_CMD_MAX, /* Reserved */ + CGROUPSTATS_CMD_GET, /* user->kernel request/get-response */ + CGROUPSTATS_CMD_NEW, /* kernel->user event */ + __CGROUPSTATS_CMD_MAX, +}; + +#define CGROUPSTATS_CMD_MAX (__CGROUPSTATS_CMD_MAX - 1) + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, /* Reserved */ + CGROUPSTATS_TYPE_CGROUP_STATS, /* contains name + stats */ + __CGROUPSTATS_TYPE_MAX, +}; + +#define CGROUPSTATS_TYPE_MAX (__CGROUPSTATS_TYPE_MAX - 1) + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD, + __CGROUPSTATS_CMD_ATTR_MAX, +}; + +#define CGROUPSTATS_CMD_ATTR_MAX (__CGROUPSTATS_CMD_ATTR_MAX - 1) + +#endif /* _LINUX_CGROUPSTATS_H */ diff --git a/include/linux/chio.h b/include/linux/chio.h new file mode 100644 index 00000000..d9bac7f9 --- /dev/null +++ b/include/linux/chio.h @@ -0,0 +1,168 @@ +/* + * ioctl interface for the scsi media changer driver + */ + +/* changer element types */ +#define CHET_MT 0 /* media transport element (robot) */ +#define CHET_ST 1 /* storage element (media slots) */ +#define CHET_IE 2 /* import/export element */ +#define CHET_DT 3 /* data transfer element (tape/cdrom/whatever) */ +#define CHET_V1 4 /* vendor specific #1 */ +#define CHET_V2 5 /* vendor specific #2 */ +#define CHET_V3 6 /* vendor specific #3 */ +#define CHET_V4 7 /* vendor specific #4 */ + + +/* + * CHIOGPARAMS + * query changer properties + * + * CHIOVGPARAMS + * query vendor-specific element types + * + * accessing elements works by specifing type and unit of the element. + * for example, storage elements are addressed with type = CHET_ST and + * unit = 0 .. cp_nslots-1 + * + */ +struct changer_params { + int cp_curpicker; /* current transport element */ + int cp_npickers; /* number of transport elements (CHET_MT) */ + int cp_nslots; /* number of storage elements (CHET_ST) */ + int cp_nportals; /* number of import/export elements (CHET_IE) */ + int cp_ndrives; /* number of data transfer elements (CHET_DT) */ +}; +struct changer_vendor_params { + int cvp_n1; /* number of vendor specific elems (CHET_V1) */ + char cvp_label1[16]; + int cvp_n2; /* number of vendor specific elems (CHET_V2) */ + char cvp_label2[16]; + int cvp_n3; /* number of vendor specific elems (CHET_V3) */ + char cvp_label3[16]; + int cvp_n4; /* number of vendor specific elems (CHET_V4) */ + char cvp_label4[16]; + int reserved[8]; +}; + + +/* + * CHIOMOVE + * move a medium from one element to another + */ +struct changer_move { + int cm_fromtype; /* type/unit of source element */ + int cm_fromunit; + int cm_totype; /* type/unit of destination element */ + int cm_tounit; + int cm_flags; +}; +#define CM_INVERT 1 /* flag: rotate media (for double-sided like MOD) */ + + +/* + * CHIOEXCHANGE + * move one medium from element #1 to element #2, + * and another one from element #2 to element #3. + * element #1 and #3 are allowed to be identical. + */ +struct changer_exchange { + int ce_srctype; /* type/unit of element #1 */ + int ce_srcunit; + int ce_fdsttype; /* type/unit of element #2 */ + int ce_fdstunit; + int ce_sdsttype; /* type/unit of element #3 */ + int ce_sdstunit; + int ce_flags; +}; +#define CE_INVERT1 1 +#define CE_INVERT2 2 + + +/* + * CHIOPOSITION + * move the transport element (robot arm) to a specific element. + */ +struct changer_position { + int cp_type; + int cp_unit; + int cp_flags; +}; +#define CP_INVERT 1 + + +/* + * CHIOGSTATUS + * get element status for all elements of a specific type + */ +struct changer_element_status { + int ces_type; + unsigned char __user *ces_data; +}; +#define CESTATUS_FULL 0x01 /* full */ +#define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ +#define CESTATUS_EXCEPT 0x04 /* error condition */ +#define CESTATUS_ACCESS 0x08 /* access allowed */ +#define CESTATUS_EXENAB 0x10 /* element can export media */ +#define CESTATUS_INENAB 0x20 /* element can import media */ + + +/* + * CHIOGELEM + * get more detailed status information for a single element + */ +struct changer_get_element { + int cge_type; /* type/unit */ + int cge_unit; + int cge_status; /* status */ + int cge_errno; /* errno */ + int cge_srctype; /* source element of the last move/exchange */ + int cge_srcunit; + int cge_id; /* scsi id (for data transfer elements) */ + int cge_lun; /* scsi lun (for data transfer elements) */ + char cge_pvoltag[36]; /* primary volume tag */ + char cge_avoltag[36]; /* alternate volume tag */ + int cge_flags; +}; +/* flags */ +#define CGE_ERRNO 0x01 /* errno available */ +#define CGE_INVERT 0x02 /* media inverted */ +#define CGE_SRC 0x04 /* media src available */ +#define CGE_IDLUN 0x08 /* ID+LUN available */ +#define CGE_PVOLTAG 0x10 /* primary volume tag available */ +#define CGE_AVOLTAG 0x20 /* alternate volume tag available */ + + +/* + * CHIOSVOLTAG + * set volume tag + */ +struct changer_set_voltag { + int csv_type; /* type/unit */ + int csv_unit; + char csv_voltag[36]; /* volume tag */ + int csv_flags; +}; +#define CSV_PVOLTAG 0x01 /* primary volume tag */ +#define CSV_AVOLTAG 0x02 /* alternate volume tag */ +#define CSV_CLEARTAG 0x04 /* clear volume tag */ + +/* ioctls */ +#define CHIOMOVE _IOW('c', 1,struct changer_move) +#define CHIOEXCHANGE _IOW('c', 2,struct changer_exchange) +#define CHIOPOSITION _IOW('c', 3,struct changer_position) +#define CHIOGPICKER _IOR('c', 4,int) /* not impl. */ +#define CHIOSPICKER _IOW('c', 5,int) /* not impl. */ +#define CHIOGPARAMS _IOR('c', 6,struct changer_params) +#define CHIOGSTATUS _IOW('c', 8,struct changer_element_status) +#define CHIOGELEM _IOW('c',16,struct changer_get_element) +#define CHIOINITELEM _IO('c',17) +#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag) +#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params) + +/* ---------------------------------------------------------------------- */ + +/* + * Local variables: + * c-basic-offset: 8 + * End: + */ diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h new file mode 100644 index 00000000..90f2471d --- /dev/null +++ b/include/linux/circ_buf.h @@ -0,0 +1,36 @@ +/* + * See Documentation/circular-buffers.txt for more information. + */ + +#ifndef _LINUX_CIRC_BUF_H +#define _LINUX_CIRC_BUF_H 1 + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +/* Return count in buffer. */ +#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) + +/* Return space available, 0..size-1. We always leave one free char + as a completely full buffer has head == tail, which is the same as + empty. */ +#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) + +/* Return count up to the end of the buffer. Carefully avoid + accessing head and tail more than once, so they can change + underneath us without returning inconsistent results. */ +#define CIRC_CNT_TO_END(head,tail,size) \ + ({int end = (size) - (tail); \ + int n = ((head) + end) & ((size)-1); \ + n < end ? n : end;}) + +/* Return space available up to the end of the buffer. */ +#define CIRC_SPACE_TO_END(head,tail,size) \ + ({int end = (size) - 1 - (head); \ + int n = (end + (tail)) & ((size)-1); \ + n <= end ? n : end+1;}) + +#endif /* _LINUX_CIRC_BUF_H */ diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h new file mode 100644 index 00000000..42e55dee --- /dev/null +++ b/include/linux/cleancache.h @@ -0,0 +1,122 @@ +#ifndef _LINUX_CLEANCACHE_H +#define _LINUX_CLEANCACHE_H + +#include <linux/fs.h> +#include <linux/exportfs.h> +#include <linux/mm.h> + +#define CLEANCACHE_KEY_MAX 6 + +/* + * cleancache requires every file with a page in cleancache to have a + * unique key unless/until the file is removed/truncated. For some + * filesystems, the inode number is unique, but for "modern" filesystems + * an exportable filehandle is required (see exportfs.h) + */ +struct cleancache_filekey { + union { + ino_t ino; + __u32 fh[CLEANCACHE_KEY_MAX]; + u32 key[CLEANCACHE_KEY_MAX]; + } u; +}; + +struct cleancache_ops { + int (*init_fs)(size_t); + int (*init_shared_fs)(char *uuid, size_t); + int (*get_page)(int, struct cleancache_filekey, + pgoff_t, struct page *); + void (*put_page)(int, struct cleancache_filekey, + pgoff_t, struct page *); + void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); +}; + +extern struct cleancache_ops + cleancache_register_ops(struct cleancache_ops *ops); +extern void __cleancache_init_fs(struct super_block *); +extern void __cleancache_init_shared_fs(char *, struct super_block *); +extern int __cleancache_get_page(struct page *); +extern void __cleancache_put_page(struct page *); +extern void __cleancache_invalidate_page(struct address_space *, struct page *); +extern void __cleancache_invalidate_inode(struct address_space *); +extern void __cleancache_invalidate_fs(struct super_block *); +extern int cleancache_enabled; + +#ifdef CONFIG_CLEANCACHE +static inline bool cleancache_fs_enabled(struct page *page) +{ + return page->mapping->host->i_sb->cleancache_poolid >= 0; +} +static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) +{ + return mapping->host->i_sb->cleancache_poolid >= 0; +} +#else +#define cleancache_enabled (0) +#define cleancache_fs_enabled(_page) (0) +#define cleancache_fs_enabled_mapping(_page) (0) +#endif + +/* + * The shim layer provided by these inline functions allows the compiler + * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE + * is disabled, to a single global variable check if CONFIG_CLEANCACHE + * is enabled but no cleancache "backend" has dynamically enabled it, + * and, for the most frequent cleancache ops, to a single global variable + * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled + * and a cleancache backend has dynamically enabled cleancache, but the + * filesystem referenced by that cleancache op has not enabled cleancache. + * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially + * no measurable performance impact. + */ + +static inline void cleancache_init_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_init_fs(sb); +} + +static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_init_shared_fs(uuid, sb); +} + +static inline int cleancache_get_page(struct page *page) +{ + int ret = -1; + + if (cleancache_enabled && cleancache_fs_enabled(page)) + ret = __cleancache_get_page(page); + return ret; +} + +static inline void cleancache_put_page(struct page *page) +{ + if (cleancache_enabled && cleancache_fs_enabled(page)) + __cleancache_put_page(page); +} + +static inline void cleancache_invalidate_page(struct address_space *mapping, + struct page *page) +{ + /* careful... page->mapping is NULL sometimes when this is called */ + if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) + __cleancache_invalidate_page(mapping, page); +} + +static inline void cleancache_invalidate_inode(struct address_space *mapping) +{ + if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) + __cleancache_invalidate_inode(mapping); +} + +static inline void cleancache_invalidate_fs(struct super_block *sb) +{ + if (cleancache_enabled) + __cleancache_invalidate_fs(sb); +} + +#endif /* _LINUX_CLEANCACHE_H */ diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h new file mode 100644 index 00000000..5e4312b6 --- /dev/null +++ b/include/linux/clk-private.h @@ -0,0 +1,196 @@ +/* + * linux/include/linux/clk-private.h + * + * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com> + * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_CLK_PRIVATE_H +#define __LINUX_CLK_PRIVATE_H + +#include <linux/clk-provider.h> +#include <linux/list.h> + +/* + * WARNING: Do not include clk-private.h from any file that implements struct + * clk_ops. Doing so is a layering violation! + * + * This header exists only to allow for statically initialized clock data. Any + * static clock data must be defined in a separate file from the logic that + * implements the clock operations for that same data. + */ + +#ifdef CONFIG_COMMON_CLK + +struct clk { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct clk *parent; + char **parent_names; + struct clk **parents; + u8 num_parents; + unsigned long rate; + unsigned long new_rate; + unsigned long flags; + unsigned int enable_count; + unsigned int prepare_count; + struct hlist_head children; + struct hlist_node child_node; + unsigned int notifier_count; +#ifdef CONFIG_COMMON_CLK_DEBUG + struct dentry *dentry; +#endif +}; + +/* + * DOC: Basic clock implementations common to many platforms + * + * Each basic clock hardware type is comprised of a structure describing the + * clock hardware, implementations of the relevant callbacks in struct clk_ops, + * unique flags for that hardware type, a registration function and an + * alternative macro for static initialization + */ + +extern struct clk_ops clk_fixed_rate_ops; + +#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \ + _fixed_rate_flags) \ + static struct clk _name; \ + static char *_name##_parent_names[] = {}; \ + static struct clk_fixed_rate _name##_hw = { \ + .hw = { \ + .clk = &_name, \ + }, \ + .fixed_rate = _rate, \ + .flags = _fixed_rate_flags, \ + }; \ + static struct clk _name = { \ + .name = #_name, \ + .ops = &clk_fixed_rate_ops, \ + .hw = &_name##_hw.hw, \ + .parent_names = _name##_parent_names, \ + .num_parents = \ + ARRAY_SIZE(_name##_parent_names), \ + .flags = _flags, \ + }; + +extern struct clk_ops clk_gate_ops; + +#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \ + _flags, _reg, _bit_idx, \ + _gate_flags, _lock) \ + static struct clk _name; \ + static char *_name##_parent_names[] = { \ + _parent_name, \ + }; \ + static struct clk *_name##_parents[] = { \ + _parent_ptr, \ + }; \ + static struct clk_gate _name##_hw = { \ + .hw = { \ + .clk = &_name, \ + }, \ + .reg = _reg, \ + .bit_idx = _bit_idx, \ + .flags = _gate_flags, \ + .lock = _lock, \ + }; \ + static struct clk _name = { \ + .name = #_name, \ + .ops = &clk_gate_ops, \ + .hw = &_name##_hw.hw, \ + .parent_names = _name##_parent_names, \ + .num_parents = \ + ARRAY_SIZE(_name##_parent_names), \ + .parents = _name##_parents, \ + .flags = _flags, \ + }; + +extern struct clk_ops clk_divider_ops; + +#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \ + _flags, _reg, _shift, _width, \ + _divider_flags, _lock) \ + static struct clk _name; \ + static char *_name##_parent_names[] = { \ + _parent_name, \ + }; \ + static struct clk *_name##_parents[] = { \ + _parent_ptr, \ + }; \ + static struct clk_divider _name##_hw = { \ + .hw = { \ + .clk = &_name, \ + }, \ + .reg = _reg, \ + .shift = _shift, \ + .width = _width, \ + .flags = _divider_flags, \ + .lock = _lock, \ + }; \ + static struct clk _name = { \ + .name = #_name, \ + .ops = &clk_divider_ops, \ + .hw = &_name##_hw.hw, \ + .parent_names = _name##_parent_names, \ + .num_parents = \ + ARRAY_SIZE(_name##_parent_names), \ + .parents = _name##_parents, \ + .flags = _flags, \ + }; + +extern struct clk_ops clk_mux_ops; + +#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \ + _reg, _shift, _width, \ + _mux_flags, _lock) \ + static struct clk _name; \ + static struct clk_mux _name##_hw = { \ + .hw = { \ + .clk = &_name, \ + }, \ + .reg = _reg, \ + .shift = _shift, \ + .width = _width, \ + .flags = _mux_flags, \ + .lock = _lock, \ + }; \ + static struct clk _name = { \ + .name = #_name, \ + .ops = &clk_mux_ops, \ + .hw = &_name##_hw.hw, \ + .parent_names = _parent_names, \ + .num_parents = \ + ARRAY_SIZE(_parent_names), \ + .parents = _parents, \ + .flags = _flags, \ + }; + +/** + * __clk_init - initialize the data structures in a struct clk + * @dev: device initializing this clk, placeholder for now + * @clk: clk being initialized + * + * Initializes the lists in struct clk, queries the hardware for the + * parent and rate and sets them both. + * + * Any struct clk passed into __clk_init must have the following members + * populated: + * .name + * .ops + * .hw + * .parent_names + * .num_parents + * .flags + * + * It is not necessary to call clk_register if __clk_init is used directly with + * statically initialized clock data. + */ +void __clk_init(struct device *dev, struct clk *clk); + +#endif /* CONFIG_COMMON_CLK */ +#endif /* CLK_PRIVATE_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h new file mode 100644 index 00000000..5508897a --- /dev/null +++ b/include/linux/clk-provider.h @@ -0,0 +1,300 @@ +/* + * linux/include/linux/clk-provider.h + * + * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com> + * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_CLK_PROVIDER_H +#define __LINUX_CLK_PROVIDER_H + +#include <linux/clk.h> + +#ifdef CONFIG_COMMON_CLK + +/** + * struct clk_hw - handle for traversing from a struct clk to its corresponding + * hardware-specific structure. struct clk_hw should be declared within struct + * clk_foo and then referenced by the struct clk instance that uses struct + * clk_foo's clk_ops + * + * clk: pointer to the struct clk instance that points back to this struct + * clk_hw instance + */ +struct clk_hw { + struct clk *clk; +}; + +/* + * flags used across common struct clk. these flags should only affect the + * top-level framework. custom flags for dealing with hardware specifics + * belong in struct clk_foo + */ +#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ +#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ +#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ +#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ +#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */ + +/** + * struct clk_ops - Callback operations for hardware clocks; these are to + * be provided by the clock implementation, and will be called by drivers + * through the clk_* api. + * + * @prepare: Prepare the clock for enabling. This must not return until + * the clock is fully prepared, and it's safe to call clk_enable. + * This callback is intended to allow clock implementations to + * do any initialisation that may sleep. Called with + * prepare_lock held. + * + * @unprepare: Release the clock from its prepared state. This will typically + * undo any work done in the @prepare callback. Called with + * prepare_lock held. + * + * @enable: Enable the clock atomically. This must not return until the + * clock is generating a valid clock signal, usable by consumer + * devices. Called with enable_lock held. This function must not + * sleep. + * + * @disable: Disable the clock atomically. Called with enable_lock held. + * This function must not sleep. + * + * @recalc_rate Recalculate the rate of this clock, by quering hardware. The + * parent rate is an input parameter. It is up to the caller to + * insure that the prepare_mutex is held across this call. + * Returns the calculated rate. Optional, but recommended - if + * this op is not set then clock rate will be initialized to 0. + * + * @round_rate: Given a target rate as input, returns the closest rate actually + * supported by the clock. + * + * @get_parent: Queries the hardware to determine the parent of a clock. The + * return value is a u8 which specifies the index corresponding to + * the parent clock. This index can be applied to either the + * .parent_names or .parents arrays. In short, this function + * translates the parent value read from hardware into an array + * index. Currently only called when the clock is initialized by + * __clk_init. This callback is mandatory for clocks with + * multiple parents. It is optional (and unnecessary) for clocks + * with 0 or 1 parents. + * + * @set_parent: Change the input source of this clock; for clocks with multiple + * possible parents specify a new parent by passing in the index + * as a u8 corresponding to the parent in either the .parent_names + * or .parents arrays. This function in affect translates an + * array index into the value programmed into the hardware. + * Returns 0 on success, -EERROR otherwise. + * + * @set_rate: Change the rate of this clock. If this callback returns + * CLK_SET_RATE_PARENT, the rate change will be propagated to the + * parent clock (which may propagate again if the parent clock + * also sets this flag). The requested rate of the parent is + * passed back from the callback in the second 'unsigned long *' + * argument. Note that it is up to the hardware clock's set_rate + * implementation to insure that clocks do not run out of spec + * when propgating the call to set_rate up to the parent. One way + * to do this is to gate the clock (via clk_disable and/or + * clk_unprepare) before calling clk_set_rate, then ungating it + * afterward. If your clock also has the CLK_GATE_SET_RATE flag + * set then this will insure safety. Returns 0 on success, + * -EERROR otherwise. + * + * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow + * implementations to split any work between atomic (enable) and sleepable + * (prepare) contexts. If enabling a clock requires code that might sleep, + * this must be done in clk_prepare. Clock enable code that will never be + * called in a sleepable context may be implement in clk_enable. + * + * Typically, drivers will call clk_prepare when a clock may be needed later + * (eg. when a device is opened), and clk_enable when the clock is actually + * required (eg. from an interrupt). Note that clk_prepare MUST have been + * called before clk_enable. + */ +struct clk_ops { + int (*prepare)(struct clk_hw *hw); + void (*unprepare)(struct clk_hw *hw); + int (*enable)(struct clk_hw *hw); + void (*disable)(struct clk_hw *hw); + int (*is_enabled)(struct clk_hw *hw); + unsigned long (*recalc_rate)(struct clk_hw *hw, + unsigned long parent_rate); + long (*round_rate)(struct clk_hw *hw, unsigned long, + unsigned long *); + int (*set_parent)(struct clk_hw *hw, u8 index); + u8 (*get_parent)(struct clk_hw *hw); + int (*set_rate)(struct clk_hw *hw, unsigned long); + void (*init)(struct clk_hw *hw); +}; + +/* + * DOC: Basic clock implementations common to many platforms + * + * Each basic clock hardware type is comprised of a structure describing the + * clock hardware, implementations of the relevant callbacks in struct clk_ops, + * unique flags for that hardware type, a registration function and an + * alternative macro for static initialization + */ + +/** + * struct clk_fixed_rate - fixed-rate clock + * @hw: handle between common and hardware-specific interfaces + * @fixed_rate: constant frequency of clock + */ +struct clk_fixed_rate { + struct clk_hw hw; + unsigned long fixed_rate; + u8 flags; +}; + +struct clk *clk_register_fixed_rate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned long fixed_rate); + +/** + * struct clk_gate - gating clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling gate + * @bit_idx: single bit controlling gate + * @flags: hardware-specific flags + * @lock: register lock + * + * Clock which can gate its output. Implements .enable & .disable + * + * Flags: + * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to + * enable the clock. Setting this flag does the opposite: setting the bit + * disable the clock and clearing it enables the clock + */ +struct clk_gate { + struct clk_hw hw; + void __iomem *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; + char *parent[1]; +}; + +#define CLK_GATE_SET_TO_DISABLE BIT(0) + +struct clk *clk_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); + +/** + * struct clk_divider - adjustable divider clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the divider + * @shift: shift to the divider bit field + * @width: width of the divider bit field + * @lock: register lock + * + * Clock with an adjustable divider affecting its output frequency. Implements + * .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the + * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is + * the raw value read from the register, with the value of zero considered + * invalid + * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from + * the hardware register + */ +struct clk_divider { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; + char *parent[1]; +}; + +#define CLK_DIVIDER_ONE_BASED BIT(0) +#define CLK_DIVIDER_POWER_OF_TWO BIT(1) + +struct clk *clk_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, spinlock_t *lock); + +/** + * struct clk_mux - multiplexer clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register controlling multiplexer + * @shift: shift to multiplexer bit field + * @width: width of mutliplexer bit field + * @num_clks: number of parent clocks + * @lock: register lock + * + * Clock with multiple selectable parents. Implements .get_parent, .set_parent + * and .recalc_rate + * + * Flags: + * CLK_MUX_INDEX_ONE - register index starts at 1, not 0 + * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two) + */ +struct clk_mux { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +#define CLK_MUX_INDEX_ONE BIT(0) +#define CLK_MUX_INDEX_BIT BIT(1) + +struct clk *clk_register_mux(struct device *dev, const char *name, + char **parent_names, u8 num_parents, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags, spinlock_t *lock); + +/** + * clk_register - allocate a new clock, register it and return an opaque cookie + * @dev: device that is registering this clock + * @name: clock name + * @ops: operations this clock supports + * @hw: link to hardware-specific clock data + * @parent_names: array of string names for all possible parents + * @num_parents: number of possible parents + * @flags: framework-level hints and quirks + * + * clk_register is the primary interface for populating the clock tree with new + * clock nodes. It returns a pointer to the newly allocated struct clk which + * cannot be dereferenced by driver code but may be used in conjuction with the + * rest of the clock API. + */ +struct clk *clk_register(struct device *dev, const char *name, + const struct clk_ops *ops, struct clk_hw *hw, + char **parent_names, u8 num_parents, unsigned long flags); + +/* helper functions */ +const char *__clk_get_name(struct clk *clk); +struct clk_hw *__clk_get_hw(struct clk *clk); +u8 __clk_get_num_parents(struct clk *clk); +struct clk *__clk_get_parent(struct clk *clk); +inline int __clk_get_enable_count(struct clk *clk); +inline int __clk_get_prepare_count(struct clk *clk); +unsigned long __clk_get_rate(struct clk *clk); +unsigned long __clk_get_flags(struct clk *clk); +int __clk_is_enabled(struct clk *clk); +struct clk *__clk_lookup(const char *name); + +/* + * FIXME clock api without lock protection + */ +int __clk_prepare(struct clk *clk); +void __clk_unprepare(struct clk *clk); +void __clk_reparent(struct clk *clk, struct clk *new_parent); +unsigned long __clk_round_rate(struct clk *clk, unsigned long rate); + +#endif /* CONFIG_COMMON_CLK */ +#endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h new file mode 100644 index 00000000..b0252726 --- /dev/null +++ b/include/linux/clk.h @@ -0,0 +1,281 @@ +/* + * linux/include/linux/clk.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_CLK_H +#define __LINUX_CLK_H + +#include <linux/kernel.h> +#include <linux/notifier.h> + +struct device; + +struct clk; + +#ifdef CONFIG_COMMON_CLK + +/** + * DOC: clk notifier callback types + * + * PRE_RATE_CHANGE - called immediately before the clk rate is changed, + * to indicate that the rate change will proceed. Drivers must + * immediately terminate any operations that will be affected by the + * rate change. Callbacks may either return NOTIFY_DONE or + * NOTIFY_STOP. + * + * ABORT_RATE_CHANGE: called if the rate change failed for some reason + * after PRE_RATE_CHANGE. In this case, all registered notifiers on + * the clk will be called with ABORT_RATE_CHANGE. Callbacks must + * always return NOTIFY_DONE. + * + * POST_RATE_CHANGE - called after the clk rate change has successfully + * completed. Callbacks must always return NOTIFY_DONE. + * + */ +#define PRE_RATE_CHANGE BIT(0) +#define POST_RATE_CHANGE BIT(1) +#define ABORT_RATE_CHANGE BIT(2) + +/** + * struct clk_notifier - associate a clk with a notifier + * @clk: struct clk * to associate the notifier with + * @notifier_head: a blocking_notifier_head for this clk + * @node: linked list pointers + * + * A list of struct clk_notifier is maintained by the notifier code. + * An entry is created whenever code registers the first notifier on a + * particular @clk. Future notifiers on that @clk are added to the + * @notifier_head. + */ +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +/** + * struct clk_notifier_data - rate data to pass to the notifier callback + * @clk: struct clk * being changed + * @old_rate: previous rate of this clk + * @new_rate: new rate of this clk + * + * For a pre-notifier, old_rate is the clk's rate before this rate + * change, and new_rate is what the rate will be in the future. For a + * post-notifier, old_rate and new_rate are both set to the clk's + * current rate (this was done to optimize the implementation). + */ +struct clk_notifier_data { + struct clk *clk; + unsigned long old_rate; + unsigned long new_rate; +}; + +int clk_notifier_register(struct clk *clk, struct notifier_block *nb); + +int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); + +#endif /* !CONFIG_COMMON_CLK */ + +/** + * clk_get - lookup and obtain a reference to a clock producer. + * @dev: device for clock "consumer" + * @id: clock comsumer ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get should not be called from within interrupt context. + */ +struct clk *clk_get(struct device *dev, const char *id); + +/** + * clk_prepare - prepare a clock source + * @clk: clock source + * + * This prepares the clock source for use. + * + * Must not be called from within atomic context. + */ +#ifdef CONFIG_HAVE_CLK_PREPARE +int clk_prepare(struct clk *clk); +#else +static inline int clk_prepare(struct clk *clk) +{ + might_sleep(); + return 0; +} +#endif + +/** + * clk_enable - inform the system when the clock source should be running. + * @clk: clock source + * + * If the clock can not be enabled/disabled, this should return success. + * + * May be called from atomic contexts. + * + * Returns success (0) or negative errno. + */ +int clk_enable(struct clk *clk); + +/** + * clk_disable - inform the system when the clock source is no longer required. + * @clk: clock source + * + * Inform the system that a clock source is no longer required by + * a driver and may be shut down. + * + * May be called from atomic contexts. + * + * Implementation detail: if the clock source is shared between + * multiple drivers, clk_enable() calls must be balanced by the + * same number of clk_disable() calls for the clock source to be + * disabled. + */ +void clk_disable(struct clk *clk); + + +/** + * clk_unprepare - undo preparation of a clock source + * @clk: clock source + * + * This undoes a previously prepared clock. The caller must balance + * the number of prepare and unprepare calls. + * + * Must not be called from within atomic context. + */ +#ifdef CONFIG_HAVE_CLK_PREPARE +void clk_unprepare(struct clk *clk); +#else +static inline void clk_unprepare(struct clk *clk) +{ + might_sleep(); +} +#endif + +/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ +static inline int clk_prepare_enable(struct clk *clk) +{ + int ret; + + ret = clk_prepare(clk); + if (ret) + return ret; + ret = clk_enable(clk); + if (ret) + clk_unprepare(clk); + + return ret; +} + +/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ +static inline void clk_disable_unprepare(struct clk *clk) +{ + clk_disable(clk); + clk_unprepare(clk); +} + +/** + * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. + * This is only valid once the clock source has been enabled. + * @clk: clock source + */ +unsigned long clk_get_rate(struct clk *clk); + +/** + * clk_put - "free" the clock source + * @clk: clock source + * + * Note: drivers must ensure that all clk_enable calls made on this + * clock source are balanced by clk_disable calls prior to calling + * this function. + * + * clk_put should not be called from within interrupt context. + */ +void clk_put(struct clk *clk); + + +/* + * The remaining APIs are optional for machine class support. + */ + + +/** + * clk_round_rate - adjust a rate to the exact rate a clock can provide + * @clk: clock source + * @rate: desired clock rate in Hz + * + * Returns rounded clock rate in Hz, or negative errno. + */ +long clk_round_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_rate - set the clock rate for a clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * Returns success (0) or negative errno. + */ +int clk_set_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_parent - set the parent clock source for this clock + * @clk: clock source + * @parent: parent clock source + * + * Returns success (0) or negative errno. + */ +int clk_set_parent(struct clk *clk, struct clk *parent); + +/** + * clk_get_parent - get the parent clock source for this clock + * @clk: clock source + * + * Returns struct clk corresponding to parent clock source, or + * valid IS_ERR() condition containing errno. + */ +struct clk *clk_get_parent(struct clk *clk); + +/** + * clk_get_sys - get a clock based upon the device name + * @dev_id: device name + * @con_id: connection ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev_id and @con_id to determine the clock consumer, and + * thereby the clock producer. In contrast to clk_get() this function + * takes the device name instead of the device itself for identification. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get_sys should not be called from within interrupt context. + */ +struct clk *clk_get_sys(const char *dev_id, const char *con_id); + +/** + * clk_add_alias - add a new clock alias + * @alias: name for clock alias + * @alias_dev_name: device name + * @id: platform specific clock name + * @dev: device + * + * Allows using generic clock names for drivers by adding a new alias. + * Assumes clkdev, see clkdev.h for more info. + */ +int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, + struct device *dev); + +#endif diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h new file mode 100644 index 00000000..d9a4fd02 --- /dev/null +++ b/include/linux/clkdev.h @@ -0,0 +1,43 @@ +/* + * include/linux/clkdev.h + * + * Copyright (C) 2008 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Helper for the clk API to assist looking up a struct clk. + */ +#ifndef __CLKDEV_H +#define __CLKDEV_H + +#include <asm/clkdev.h> + +struct clk; +struct device; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; +}; + +#define CLKDEV_INIT(d, n, c) \ + { \ + .dev_id = d, \ + .con_id = n, \ + .clk = c, \ + } + +struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, + const char *dev_fmt, ...); + +void clkdev_add(struct clk_lookup *cl); +void clkdev_drop(struct clk_lookup *cl); + +void clkdev_add_table(struct clk_lookup *, size_t); +int clk_add_alias(const char *, const char *, char *, struct device *); + +#endif diff --git a/include/linux/clksrc-dbx500-prcmu.h b/include/linux/clksrc-dbx500-prcmu.h new file mode 100644 index 00000000..4fb8119c --- /dev/null +++ b/include/linux/clksrc-dbx500-prcmu.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * Author: Mattias Wallin <mattias.wallin@stericsson.com> + * + */ +#ifndef __CLKSRC_DBX500_PRCMU_H +#define __CLKSRC_DBX500_PRCMU_H + +#include <linux/init.h> +#include <linux/io.h> + +#ifdef CONFIG_CLKSRC_DBX500_PRCMU +void __init clksrc_dbx500_prcmu_init(void __iomem *base); +#else +static inline void __init clksrc_dbx500_prcmu_init(void __iomem *base) {} +#endif + +#endif diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h new file mode 100644 index 00000000..81e803e9 --- /dev/null +++ b/include/linux/clockchips.h @@ -0,0 +1,170 @@ +/* linux/include/linux/clockchips.h + * + * This file contains the structure definitions for clockchips. + * + * If you are not a clockchip, or the time of day code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKCHIPS_H +#define _LINUX_CLOCKCHIPS_H + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD + +#include <linux/clocksource.h> +#include <linux/cpumask.h> +#include <linux/ktime.h> +#include <linux/notifier.h> + +struct clock_event_device; + +/* Clock event mode commands */ +enum clock_event_mode { + CLOCK_EVT_MODE_UNUSED = 0, + CLOCK_EVT_MODE_SHUTDOWN, + CLOCK_EVT_MODE_PERIODIC, + CLOCK_EVT_MODE_ONESHOT, + CLOCK_EVT_MODE_RESUME, +}; + +/* Clock event notification values */ +enum clock_event_nofitiers { + CLOCK_EVT_NOTIFY_ADD, + CLOCK_EVT_NOTIFY_BROADCAST_ON, + CLOCK_EVT_NOTIFY_BROADCAST_OFF, + CLOCK_EVT_NOTIFY_BROADCAST_FORCE, + CLOCK_EVT_NOTIFY_BROADCAST_ENTER, + CLOCK_EVT_NOTIFY_BROADCAST_EXIT, + CLOCK_EVT_NOTIFY_SUSPEND, + CLOCK_EVT_NOTIFY_RESUME, + CLOCK_EVT_NOTIFY_CPU_DYING, + CLOCK_EVT_NOTIFY_CPU_DEAD, +}; + +/* + * Clock event features + */ +#define CLOCK_EVT_FEAT_PERIODIC 0x000001 +#define CLOCK_EVT_FEAT_ONESHOT 0x000002 +#define CLOCK_EVT_FEAT_KTIME 0x000004 +/* + * x86(64) specific misfeatures: + * + * - Clockevent source stops in C3 State and needs broadcast support. + * - Local APIC timer is used as a dummy device. + */ +#define CLOCK_EVT_FEAT_C3STOP 0x000008 +#define CLOCK_EVT_FEAT_DUMMY 0x000010 + +/** + * struct clock_event_device - clock event device descriptor + * @event_handler: Assigned by the framework to be called by the low + * level handler of the event source + * @set_next_event: set next event function using a clocksource delta + * @set_next_ktime: set next event function using a direct ktime value + * @next_event: local storage for the next event in oneshot mode + * @max_delta_ns: maximum delta value in ns + * @min_delta_ns: minimum delta value in ns + * @mult: nanosecond to cycles multiplier + * @shift: nanoseconds to cycles divisor (power of two) + * @mode: operating mode assigned by the management code + * @features: features + * @retries: number of forced programming retries + * @set_mode: set mode function + * @broadcast: function to broadcast events + * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration + * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration + * @name: ptr to clock event name + * @rating: variable to rate clock event devices + * @irq: IRQ number (only for non CPU local devices) + * @cpumask: cpumask to indicate for which CPUs this device works + * @list: list head for the management code + */ +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(unsigned long evt, + struct clock_event_device *); + int (*set_next_ktime)(ktime_t expires, + struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_mode mode; + unsigned int features; + unsigned long retries; + + void (*broadcast)(const struct cpumask *mask); + void (*set_mode)(enum clock_event_mode mode, + struct clock_event_device *); + unsigned long min_delta_ticks; + unsigned long max_delta_ticks; + + const char *name; + int rating; + int irq; + const struct cpumask *cpumask; + struct list_head list; +} ____cacheline_aligned; + +/* + * Calculate a multiplication factor for scaled math, which is used to convert + * nanoseconds based values to clock ticks: + * + * clock_ticks = (nanoseconds * factor) >> shift. + * + * div_sc is the rearranged equation to calculate a factor from a given clock + * ticks / nanoseconds ratio: + * + * factor = (clock_ticks << shift) / nanoseconds + */ +static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, + int shift) +{ + uint64_t tmp = ((uint64_t)ticks) << shift; + + do_div(tmp, nsec); + return (unsigned long) tmp; +} + +/* Clock event layer functions */ +extern u64 clockevent_delta2ns(unsigned long latch, + struct clock_event_device *evt); +extern void clockevents_register_device(struct clock_event_device *dev); + +extern void clockevents_config_and_register(struct clock_event_device *dev, + u32 freq, unsigned long min_delta, + unsigned long max_delta); + +extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); + +extern void clockevents_exchange_device(struct clock_event_device *old, + struct clock_event_device *new); +extern void clockevents_set_mode(struct clock_event_device *dev, + enum clock_event_mode mode); +extern int clockevents_register_notifier(struct notifier_block *nb); +extern int clockevents_program_event(struct clock_event_device *dev, + ktime_t expires, bool force); + +extern void clockevents_handle_noop(struct clock_event_device *dev); + +static inline void +clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) +{ + return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, + freq, minsec); +} + +#ifdef CONFIG_GENERIC_CLOCKEVENTS +extern void clockevents_notify(unsigned long reason, void *arg); +#else +# define clockevents_notify(reason, arg) do { } while (0) +#endif + +#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ + +#define clockevents_notify(reason, arg) do { } while (0) + +#endif + +#endif diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h new file mode 100644 index 00000000..fbe89e17 --- /dev/null +++ b/include/linux/clocksource.h @@ -0,0 +1,351 @@ +/* linux/include/linux/clocksource.h + * + * This file contains the structure definitions for clocksources. + * + * If you are not a clocksource, or timekeeping code, you should + * not be including this file! + */ +#ifndef _LINUX_CLOCKSOURCE_H +#define _LINUX_CLOCKSOURCE_H + +#include <linux/types.h> +#include <linux/timex.h> +#include <linux/time.h> +#include <linux/list.h> +#include <linux/cache.h> +#include <linux/timer.h> +#include <linux/init.h> +#include <asm/div64.h> +#include <asm/io.h> + +/* clocksource cycle base type */ +typedef u64 cycle_t; +struct clocksource; + +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA +#include <asm/clocksource.h> +#endif + +/** + * struct cyclecounter - hardware abstraction for a free running counter + * Provides completely state-free accessors to the underlying hardware. + * Depending on which hardware it reads, the cycle counter may wrap + * around quickly. Locking rules (if necessary) have to be defined + * by the implementor and user of specific instances of this API. + * + * @read: returns the current cycle value + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters, + * see CLOCKSOURCE_MASK() helper macro + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + */ +struct cyclecounter { + cycle_t (*read)(const struct cyclecounter *cc); + cycle_t mask; + u32 mult; + u32 shift; +}; + +/** + * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds + * Contains the state needed by timecounter_read() to detect + * cycle counter wrap around. Initialize with + * timecounter_init(). Also used to convert cycle counts into the + * corresponding nanosecond counts with timecounter_cyc2time(). Users + * of this code are responsible for initializing the underlying + * cycle counter hardware, locking issues and reading the time + * more often than the cycle counter wraps around. The nanosecond + * counter will only wrap around after ~585 years. + * + * @cc: the cycle counter used by this instance + * @cycle_last: most recent cycle counter value seen by + * timecounter_read() + * @nsec: continuously increasing count + */ +struct timecounter { + const struct cyclecounter *cc; + cycle_t cycle_last; + u64 nsec; +}; + +/** + * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds + * @cc: Pointer to cycle counter. + * @cycles: Cycles + * + * XXX - This could use some mult_lxl_ll() asm optimization. Same code + * as in cyc2ns, but with unsigned result. + */ +static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, + cycle_t cycles) +{ + u64 ret = (u64)cycles; + ret = (ret * cc->mult) >> cc->shift; + return ret; +} + +/** + * timecounter_init - initialize a time counter + * @tc: Pointer to time counter which is to be initialized/reset + * @cc: A cycle counter, ready to be used. + * @start_tstamp: Arbitrary initial time stamp. + * + * After this call the current cycle register (roughly) corresponds to + * the initial time stamp. Every call to timecounter_read() increments + * the time stamp counter by the number of elapsed nanoseconds. + */ +extern void timecounter_init(struct timecounter *tc, + const struct cyclecounter *cc, + u64 start_tstamp); + +/** + * timecounter_read - return nanoseconds elapsed since timecounter_init() + * plus the initial time stamp + * @tc: Pointer to time counter. + * + * In other words, keeps track of time since the same epoch as + * the function which generated the initial time stamp. + */ +extern u64 timecounter_read(struct timecounter *tc); + +/** + * timecounter_cyc2time - convert a cycle counter to same + * time base as values returned by + * timecounter_read() + * @tc: Pointer to time counter. + * @cycle_tstamp: a value returned by tc->cc->read() + * + * Cycle counts that are converted correctly as long as they + * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], + * with "max cycle count" == cs->mask+1. + * + * This allows conversion of cycle counter values which were generated + * in the past. + */ +extern u64 timecounter_cyc2time(struct timecounter *tc, + cycle_t cycle_tstamp); + +/** + * struct clocksource - hardware abstraction for a free running counter + * Provides mostly state-free accessors to the underlying hardware. + * This is the structure used for system time. + * + * @name: ptr to clocksource name + * @list: list head for registration + * @rating: rating value for selection (higher is better) + * To avoid rating inflation the following + * list should give you a guide as to how + * to assign your clocksource a rating + * 1-99: Unfit for real use + * Only available for bootup and testing purposes. + * 100-199: Base level usability. + * Functional for real use, but not desired. + * 200-299: Good. + * A correct and usable clocksource. + * 300-399: Desired. + * A reasonably fast and accurate clocksource. + * 400-499: Perfect + * The ideal clocksource. A must-use where + * available. + * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + * @max_idle_ns: max idle time permitted by the clocksource (nsecs) + * @maxadj: maximum adjustment value to mult (~11%) + * @flags: flags describing special properties + * @archdata: arch-specific data + * @suspend: suspend function for the clocksource, if necessary + * @resume: resume function for the clocksource, if necessary + * @cycle_last: most recent cycle counter value seen by ::read() + */ +struct clocksource { + /* + * Hotpath data, fits in a single cache line when the + * clocksource itself is cacheline aligned. + */ + cycle_t (*read)(struct clocksource *cs); + cycle_t cycle_last; + cycle_t mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA + struct arch_clocksource_data archdata; +#endif + + const char *name; + struct list_head list; + int rating; + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); + unsigned long flags; + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); + + /* private: */ +#ifdef CONFIG_CLOCKSOURCE_WATCHDOG + /* Watchdog related data, used by the framework */ + struct list_head wd_list; + cycle_t cs_last; + cycle_t wd_last; +#endif +} ____cacheline_aligned; + +/* + * Clock source flags bits:: + */ +#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 +#define CLOCK_SOURCE_MUST_VERIFY 0x02 + +#define CLOCK_SOURCE_WATCHDOG 0x10 +#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 +#define CLOCK_SOURCE_UNSTABLE 0x40 + +/* simplify initialization of mask field */ +#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) + +/** + * clocksource_khz2mult - calculates mult from khz and shift + * @khz: Clocksource frequency in KHz + * @shift_constant: Clocksource shift factor + * + * Helper functions that converts a khz counter frequency to a timsource + * multiplier, given the clocksource shift value + */ +static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) +{ + /* khz = cyc/(Million ns) + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = 1Million/khz * 2^shift + * mult = 1000000 * 2^shift / khz + * mult = (1000000<<shift) / khz + */ + u64 tmp = ((u64)1000000) << shift_constant; + + tmp += khz/2; /* round for do_div */ + do_div(tmp, khz); + + return (u32)tmp; +} + +/** + * clocksource_hz2mult - calculates mult from hz and shift + * @hz: Clocksource frequency in Hz + * @shift_constant: Clocksource shift factor + * + * Helper functions that converts a hz counter + * frequency to a timsource multiplier, given the + * clocksource shift value + */ +static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) +{ + /* hz = cyc/(Billion ns) + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = 1Billion/hz * 2^shift + * mult = 1000000000 * 2^shift / hz + * mult = (1000000000<<shift) / hz + */ + u64 tmp = ((u64)1000000000) << shift_constant; + + tmp += hz/2; /* round for do_div */ + do_div(tmp, hz); + + return (u32)tmp; +} + +/** + * clocksource_cyc2ns - converts clocksource cycles to nanoseconds + * @cycles: cycles + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + * + * Converts cycles to nanoseconds, using the given mult and shift. + * + * XXX - This could use some mult_lxl_ll() asm optimization + */ +static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) +{ + return ((u64) cycles * mult) >> shift; +} + + +extern int clocksource_register(struct clocksource*); +extern void clocksource_unregister(struct clocksource*); +extern void clocksource_touch_watchdog(void); +extern struct clocksource* clocksource_get_next(void); +extern void clocksource_change_rating(struct clocksource *cs, int rating); +extern void clocksource_suspend(void); +extern void clocksource_resume(void); +extern struct clocksource * __init __weak clocksource_default_clock(void); +extern void clocksource_mark_unstable(struct clocksource *cs); + +extern void +clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); + +/* + * Don't call __clocksource_register_scale directly, use + * clocksource_register_hz/khz + */ +extern int +__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); +extern void +__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); + +static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) +{ + return __clocksource_register_scale(cs, 1, hz); +} + +static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) +{ + return __clocksource_register_scale(cs, 1000, khz); +} + +static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) +{ + __clocksource_updatefreq_scale(cs, 1, hz); +} + +static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) +{ + __clocksource_updatefreq_scale(cs, 1000, khz); +} + +#ifdef CONFIG_GENERIC_TIME_VSYSCALL +extern void +update_vsyscall(struct timespec *ts, struct timespec *wtm, + struct clocksource *c, u32 mult); +extern void update_vsyscall_tz(void); +#else +static inline void +update_vsyscall(struct timespec *ts, struct timespec *wtm, + struct clocksource *c, u32 mult) +{ +} + +static inline void update_vsyscall_tz(void) +{ +} +#endif + +extern void timekeeping_notify(struct clocksource *clock); + +extern cycle_t clocksource_mmio_readl_up(struct clocksource *); +extern cycle_t clocksource_mmio_readl_down(struct clocksource *); +extern cycle_t clocksource_mmio_readw_up(struct clocksource *); +extern cycle_t clocksource_mmio_readw_down(struct clocksource *); + +extern int clocksource_mmio_init(void __iomem *, const char *, + unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); + +extern int clocksource_i8253_init(void); + +#endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h new file mode 100644 index 00000000..3c4aac40 --- /dev/null +++ b/include/linux/cm4000_cs.h @@ -0,0 +1,68 @@ +#ifndef _CM4000_H_ +#define _CM4000_H_ + +#include <linux/types.h> + +#define MAX_ATR 33 + +#define CM4000_MAX_DEV 4 + +/* those two structures are passed via ioctl() from/to userspace. They are + * used by existing userspace programs, so I kepth the awkward "bIFSD" naming + * not to break compilation of userspace apps. -HW */ + +typedef struct atreq { + __s32 atr_len; + unsigned char atr[64]; + __s32 power_act; + unsigned char bIFSD; + unsigned char bIFSC; +} atreq_t; + + +/* what is particularly stupid in the original driver is the arch-dependent + * member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace + * will lay out the structure members differently than the 64bit kernel. + * + * I've changed "ptsreq.protocol" from "unsigned long" to "__u32". + * On 32bit this will make no difference. With 64bit kernels, it will make + * 32bit apps work, too. + */ + +typedef struct ptsreq { + __u32 protocol; /*T=0: 2^0, T=1: 2^1*/ + unsigned char flags; + unsigned char pts1; + unsigned char pts2; + unsigned char pts3; +} ptsreq_t; + +#define CM_IOC_MAGIC 'c' +#define CM_IOC_MAXNR 255 + +#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *) +#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *) +#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *) +#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3) +#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4) + +#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*) + +/* card and device states */ +#define CM_CARD_INSERTED 0x01 +#define CM_CARD_POWERED 0x02 +#define CM_ATR_PRESENT 0x04 +#define CM_ATR_VALID 0x08 +#define CM_STATE_VALID 0x0f +/* extra info only from CM4000 */ +#define CM_NO_READER 0x10 +#define CM_BAD_CARD 0x20 + + +#ifdef __KERNEL__ + +#define DEVICE_NAME "cmm" +#define MODULE_NAME "cm4000_cs" + +#endif /* __KERNEL__ */ +#endif /* _CM4000_H_ */ diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h new file mode 100644 index 00000000..d03612b1 --- /dev/null +++ b/include/linux/cn_proc.h @@ -0,0 +1,155 @@ +/* + * cn_proc.h - process events connector + * + * Copyright (C) Matt Helsley, IBM Corp. 2005 + * Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin + * Copyright (C) 2005 Nguyen Anh Quynh <aquynh@gmail.com> + * Copyright (C) 2005 Guillaume Thouvenin <guillaume.thouvenin@bull.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef CN_PROC_H +#define CN_PROC_H + +#include <linux/types.h> + +/* + * Userspace sends this enum to register with the kernel that it is listening + * for events on the connector. + */ +enum proc_cn_mcast_op { + PROC_CN_MCAST_LISTEN = 1, + PROC_CN_MCAST_IGNORE = 2 +}; + +/* + * From the user's point of view, the process + * ID is the thread group ID and thread ID is the internal + * kernel "pid". So, fields are assigned as follow: + * + * In user space - In kernel space + * + * parent process ID = parent->tgid + * parent thread ID = parent->pid + * child process ID = child->tgid + * child thread ID = child->pid + */ + +struct proc_event { + enum what { + /* Use successive bits so the enums can be used to record + * sets of events as well + */ + PROC_EVENT_NONE = 0x00000000, + PROC_EVENT_FORK = 0x00000001, + PROC_EVENT_EXEC = 0x00000002, + PROC_EVENT_UID = 0x00000004, + PROC_EVENT_GID = 0x00000040, + PROC_EVENT_SID = 0x00000080, + PROC_EVENT_PTRACE = 0x00000100, + PROC_EVENT_COMM = 0x00000200, + /* "next" should be 0x00000400 */ + /* "last" is the last process event: exit */ + PROC_EVENT_EXIT = 0x80000000 + } what; + __u32 cpu; + __u64 __attribute__((aligned(8))) timestamp_ns; + /* Number of nano seconds since system boot */ + union { /* must be last field of proc_event struct */ + struct { + __u32 err; + } ack; + + struct fork_proc_event { + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; + __kernel_pid_t child_pid; + __kernel_pid_t child_tgid; + } fork; + + struct exec_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + } exec; + + struct id_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + union { + __u32 ruid; /* task uid */ + __u32 rgid; /* task gid */ + } r; + union { + __u32 euid; + __u32 egid; + } e; + } id; + + struct sid_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + } sid; + + struct ptrace_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t tracer_pid; + __kernel_pid_t tracer_tgid; + } ptrace; + + struct comm_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + char comm[16]; + } comm; + + struct exit_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __u32 exit_code, exit_signal; + } exit; + } event_data; +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_PROC_EVENTS +void proc_fork_connector(struct task_struct *task); +void proc_exec_connector(struct task_struct *task); +void proc_id_connector(struct task_struct *task, int which_id); +void proc_sid_connector(struct task_struct *task); +void proc_ptrace_connector(struct task_struct *task, int which_id); +void proc_comm_connector(struct task_struct *task); +void proc_exit_connector(struct task_struct *task); +#else +static inline void proc_fork_connector(struct task_struct *task) +{} + +static inline void proc_exec_connector(struct task_struct *task) +{} + +static inline void proc_id_connector(struct task_struct *task, + int which_id) +{} + +static inline void proc_sid_connector(struct task_struct *task) +{} + +static inline void proc_comm_connector(struct task_struct *task) +{} + +static inline void proc_ptrace_connector(struct task_struct *task, + int ptrace_id) +{} + +static inline void proc_exit_connector(struct task_struct *task) +{} +#endif /* CONFIG_PROC_EVENTS */ +#endif /* __KERNEL__ */ +#endif /* CN_PROC_H */ diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h new file mode 100644 index 00000000..aa629bce --- /dev/null +++ b/include/linux/cnt32_to_63.h @@ -0,0 +1,107 @@ +/* + * Extend a 32-bit counter to 63 bits + * + * Author: Nicolas Pitre + * Created: December 3, 2006 + * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __LINUX_CNT32_TO_63_H__ +#define __LINUX_CNT32_TO_63_H__ + +#include <linux/compiler.h> +#include <linux/types.h> +#include <asm/byteorder.h> + +/* this is used only to give gcc a clue about good code generation */ +union cnt32_to_63 { + struct { +#if defined(__LITTLE_ENDIAN) + u32 lo, hi; +#elif defined(__BIG_ENDIAN) + u32 hi, lo; +#endif + }; + u64 val; +}; + + +/** + * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter + * @cnt_lo: The low part of the counter + * + * Many hardware clock counters are only 32 bits wide and therefore have + * a relatively short period making wrap-arounds rather frequent. This + * is a problem when implementing sched_clock() for example, where a 64-bit + * non-wrapping monotonic value is expected to be returned. + * + * To overcome that limitation, let's extend a 32-bit counter to 63 bits + * in a completely lock free fashion. Bits 0 to 31 of the clock are provided + * by the hardware while bits 32 to 62 are stored in memory. The top bit in + * memory is used to synchronize with the hardware clock half-period. When + * the top bit of both counters (hardware and in memory) differ then the + * memory is updated with a new value, incrementing it when the hardware + * counter wraps around. + * + * Because a word store in memory is atomic then the incremented value will + * always be in synch with the top bit indicating to any potential concurrent + * reader if the value in memory is up to date or not with regards to the + * needed increment. And any race in updating the value in memory is harmless + * as the same value would simply be stored more than once. + * + * The restrictions for the algorithm to work properly are: + * + * 1) this code must be called at least once per each half period of the + * 32-bit counter; + * + * 2) this code must not be preempted for a duration longer than the + * 32-bit counter half period minus the longest period between two + * calls to this code; + * + * Those requirements ensure proper update to the state bit in memory. + * This is usually not a problem in practice, but if it is then a kernel + * timer should be scheduled to manage for this code to be executed often + * enough. + * + * And finally: + * + * 3) the cnt_lo argument must be seen as a globally incrementing value, + * meaning that it should be a direct reference to the counter data which + * can be evaluated according to a specific ordering within the macro, + * and not the result of a previous evaluation stored in a variable. + * + * For example, this is wrong: + * + * u32 partial = get_hw_count(); + * u64 full = cnt32_to_63(partial); + * return full; + * + * This is fine: + * + * u64 full = cnt32_to_63(get_hw_count()); + * return full; + * + * Note that the top bit (bit 63) in the returned value should be considered + * as garbage. It is not cleared here because callers are likely to use a + * multiplier on the returned value which can get rid of the top bit + * implicitly by making the multiplier even, therefore saving on a runtime + * clear-bit instruction. Otherwise caller must remember to clear the top + * bit explicitly. + */ +#define cnt32_to_63(cnt_lo) \ +({ \ + static u32 __m_cnt_hi; \ + union cnt32_to_63 __x; \ + __x.hi = __m_cnt_hi; \ + smp_rmb(); \ + __x.lo = (cnt_lo); \ + if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ + __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ + __x.val; \ +}) + +#endif diff --git a/include/linux/coda.h b/include/linux/coda.h new file mode 100644 index 00000000..96c87693 --- /dev/null +++ b/include/linux/coda.h @@ -0,0 +1,744 @@ +/* + You may distribute this file under either of the two licenses that + follow at your discretion. +*/ + +/* BLURB lgpl + + Coda File System + Release 5 + + Copyright (c) 1987-1999 Carnegie Mellon University + Additional copyrights listed below + +This code is distributed "AS IS" without warranty of any kind under +the terms of the GNU Library General Public Licence Version 2, as +shown in the file LICENSE, or under the license shown below. The +technical and financial contributors to Coda are listed in the file +CREDITS. + + Additional copyrights +*/ + +/* + + Coda: an Experimental Distributed File System + Release 4.0 + + Copyright (c) 1987-1999 Carnegie Mellon University + All Rights Reserved + +Permission to use, copy, modify and distribute this software and its +documentation is hereby granted, provided that both the copyright +notice and this permission notice appear in all copies of the +software, derivative works or modified versions, and any portions +thereof, and that both notices appear in supporting documentation, and +that credit is given to Carnegie Mellon University in all documents +and publicity pertaining to direct or indirect use of this code or its +derivatives. + +CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS, +SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS +FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON +DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER +RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF +ANY DERIVATIVE WORK. + +Carnegie Mellon encourages users of this software to return any +improvements or extensions that they make, and to grant Carnegie +Mellon the rights to redistribute these changes without encumbrance. +*/ + +/* + * + * Based on cfs.h from Mach, but revamped for increased simplicity. + * Linux modifications by + * Peter Braam, Aug 1996 + */ + +#ifndef _CODA_HEADER_ +#define _CODA_HEADER_ + + +/* Catch new _KERNEL defn for NetBSD and DJGPP/__CYGWIN32__ */ +#if defined(__NetBSD__) || \ + ((defined(DJGPP) || defined(__CYGWIN32__)) && !defined(KERNEL)) +#include <sys/types.h> +#endif + +#ifndef CODA_MAXSYMLINKS +#define CODA_MAXSYMLINKS 10 +#endif + +#if defined(DJGPP) || defined(__CYGWIN32__) +#ifdef KERNEL +typedef unsigned long u_long; +typedef unsigned int u_int; +typedef unsigned short u_short; +typedef u_long ino_t; +typedef u_long dev_t; +typedef void * caddr_t; +#ifdef DOS +typedef unsigned __int64 u_quad_t; +#else +typedef unsigned long long u_quad_t; +#endif + +#define inline + +struct timespec { + long ts_sec; + long ts_nsec; +}; +#else /* DJGPP but not KERNEL */ +#include <sys/time.h> +typedef unsigned long long u_quad_t; +#endif /* !KERNEL */ +#endif /* !DJGPP */ + + +#if defined(__linux__) +#include <linux/time.h> +#define cdev_t u_quad_t +#ifndef __KERNEL__ +#if !defined(_UQUAD_T_) && (!defined(__GLIBC__) || __GLIBC__ < 2) +#define _UQUAD_T_ 1 +typedef unsigned long long u_quad_t; +#endif +#else /*__KERNEL__ */ +typedef unsigned long long u_quad_t; +#endif /* __KERNEL__ */ +#else +#define cdev_t dev_t +#endif + +#ifdef __CYGWIN32__ +struct timespec { + time_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; +#endif + +#ifndef __BIT_TYPES_DEFINED__ +#define __BIT_TYPES_DEFINED__ +typedef signed char int8_t; +typedef unsigned char u_int8_t; +typedef short int16_t; +typedef unsigned short u_int16_t; +typedef int int32_t; +typedef unsigned int u_int32_t; +#endif + + +/* + * Cfs constants + */ +#define CODA_MAXNAMLEN 255 +#define CODA_MAXPATHLEN 1024 +#define CODA_MAXSYMLINK 10 + +/* these are Coda's version of O_RDONLY etc combinations + * to deal with VFS open modes + */ +#define C_O_READ 0x001 +#define C_O_WRITE 0x002 +#define C_O_TRUNC 0x010 +#define C_O_EXCL 0x100 +#define C_O_CREAT 0x200 + +/* these are to find mode bits in Venus */ +#define C_M_READ 00400 +#define C_M_WRITE 00200 + +/* for access Venus will use */ +#define C_A_C_OK 8 /* Test for writing upon create. */ +#define C_A_R_OK 4 /* Test for read permission. */ +#define C_A_W_OK 2 /* Test for write permission. */ +#define C_A_X_OK 1 /* Test for execute permission. */ +#define C_A_F_OK 0 /* Test for existence. */ + + + +#ifndef _VENUS_DIRENT_T_ +#define _VENUS_DIRENT_T_ 1 +struct venus_dirent { + u_int32_t d_fileno; /* file number of entry */ + u_int16_t d_reclen; /* length of this record */ + u_int8_t d_type; /* file type, see below */ + u_int8_t d_namlen; /* length of string in d_name */ + char d_name[CODA_MAXNAMLEN + 1];/* name must be no longer than this */ +}; +#undef DIRSIZ +#define DIRSIZ(dp) ((sizeof (struct venus_dirent) - (CODA_MAXNAMLEN+1)) + \ + (((dp)->d_namlen+1 + 3) &~ 3)) + +/* + * File types + */ +#define CDT_UNKNOWN 0 +#define CDT_FIFO 1 +#define CDT_CHR 2 +#define CDT_DIR 4 +#define CDT_BLK 6 +#define CDT_REG 8 +#define CDT_LNK 10 +#define CDT_SOCK 12 +#define CDT_WHT 14 + +/* + * Convert between stat structure types and directory types. + */ +#define IFTOCDT(mode) (((mode) & 0170000) >> 12) +#define CDTTOIF(dirtype) ((dirtype) << 12) + +#endif + +#ifndef _VUID_T_ +#define _VUID_T_ +typedef u_int32_t vuid_t; +typedef u_int32_t vgid_t; +#endif /*_VUID_T_ */ + +struct CodaFid { + u_int32_t opaque[4]; +}; + +#define coda_f2i(fid)\ + (fid ? (fid->opaque[3] ^ (fid->opaque[2]<<10) ^ (fid->opaque[1]<<20) ^ fid->opaque[0]) : 0) + +#ifndef _VENUS_VATTR_T_ +#define _VENUS_VATTR_T_ +/* + * Vnode types. VNON means no type. + */ +enum coda_vtype { C_VNON, C_VREG, C_VDIR, C_VBLK, C_VCHR, C_VLNK, C_VSOCK, C_VFIFO, C_VBAD }; + +struct coda_vattr { + long va_type; /* vnode type (for create) */ + u_short va_mode; /* files access mode and type */ + short va_nlink; /* number of references to file */ + vuid_t va_uid; /* owner user id */ + vgid_t va_gid; /* owner group id */ + long va_fileid; /* file id */ + u_quad_t va_size; /* file size in bytes */ + long va_blocksize; /* blocksize preferred for i/o */ + struct timespec va_atime; /* time of last access */ + struct timespec va_mtime; /* time of last modification */ + struct timespec va_ctime; /* time file changed */ + u_long va_gen; /* generation number of file */ + u_long va_flags; /* flags defined for file */ + cdev_t va_rdev; /* device special file represents */ + u_quad_t va_bytes; /* bytes of disk space held by file */ + u_quad_t va_filerev; /* file modification number */ +}; + +#endif + +/* structure used by CODA_STATFS for getting cache information from venus */ +struct coda_statfs { + int32_t f_blocks; + int32_t f_bfree; + int32_t f_bavail; + int32_t f_files; + int32_t f_ffree; +}; + +/* + * Kernel <--> Venus communications. + */ + +#define CODA_ROOT 2 +#define CODA_OPEN_BY_FD 3 +#define CODA_OPEN 4 +#define CODA_CLOSE 5 +#define CODA_IOCTL 6 +#define CODA_GETATTR 7 +#define CODA_SETATTR 8 +#define CODA_ACCESS 9 +#define CODA_LOOKUP 10 +#define CODA_CREATE 11 +#define CODA_REMOVE 12 +#define CODA_LINK 13 +#define CODA_RENAME 14 +#define CODA_MKDIR 15 +#define CODA_RMDIR 16 +#define CODA_SYMLINK 18 +#define CODA_READLINK 19 +#define CODA_FSYNC 20 +#define CODA_VGET 22 +#define CODA_SIGNAL 23 +#define CODA_REPLACE 24 /* DOWNCALL */ +#define CODA_FLUSH 25 /* DOWNCALL */ +#define CODA_PURGEUSER 26 /* DOWNCALL */ +#define CODA_ZAPFILE 27 /* DOWNCALL */ +#define CODA_ZAPDIR 28 /* DOWNCALL */ +#define CODA_PURGEFID 30 /* DOWNCALL */ +#define CODA_OPEN_BY_PATH 31 +#define CODA_RESOLVE 32 +#define CODA_REINTEGRATE 33 +#define CODA_STATFS 34 +#define CODA_STORE 35 +#define CODA_RELEASE 36 +#define CODA_NCALLS 37 + +#define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID) + +#define VC_MAXDATASIZE 8192 +#define VC_MAXMSGSIZE sizeof(union inputArgs)+sizeof(union outputArgs) +\ + VC_MAXDATASIZE + +#define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) + +#define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ + +/* + * Venus <-> Coda RPC arguments + */ +struct coda_in_hdr { + u_int32_t opcode; + u_int32_t unique; /* Keep multiple outstanding msgs distinct */ + pid_t pid; + pid_t pgid; + vuid_t uid; +}; + +/* Really important that opcode and unique are 1st two fields! */ +struct coda_out_hdr { + u_int32_t opcode; + u_int32_t unique; + u_int32_t result; +}; + +/* coda_root: NO_IN */ +struct coda_root_out { + struct coda_out_hdr oh; + struct CodaFid VFid; +}; + +struct coda_root_in { + struct coda_in_hdr in; +}; + +/* coda_open: */ +struct coda_open_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_open_out { + struct coda_out_hdr oh; + cdev_t dev; + ino_t inode; +}; + + +/* coda_store: */ +struct coda_store_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_store_out { + struct coda_out_hdr out; +}; + +/* coda_release: */ +struct coda_release_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_release_out { + struct coda_out_hdr out; +}; + +/* coda_close: */ +struct coda_close_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_close_out { + struct coda_out_hdr out; +}; + +/* coda_ioctl: */ +struct coda_ioctl_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int cmd; + int len; + int rwflag; + char *data; /* Place holder for data. */ +}; + +struct coda_ioctl_out { + struct coda_out_hdr oh; + int len; + caddr_t data; /* Place holder for data. */ +}; + + +/* coda_getattr: */ +struct coda_getattr_in { + struct coda_in_hdr ih; + struct CodaFid VFid; +}; + +struct coda_getattr_out { + struct coda_out_hdr oh; + struct coda_vattr attr; +}; + + +/* coda_setattr: NO_OUT */ +struct coda_setattr_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + struct coda_vattr attr; +}; + +struct coda_setattr_out { + struct coda_out_hdr out; +}; + +/* coda_access: NO_OUT */ +struct coda_access_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_access_out { + struct coda_out_hdr out; +}; + + +/* lookup flags */ +#define CLU_CASE_SENSITIVE 0x01 +#define CLU_CASE_INSENSITIVE 0x02 + +/* coda_lookup: */ +struct coda_lookup_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int name; /* Place holder for data. */ + int flags; +}; + +struct coda_lookup_out { + struct coda_out_hdr oh; + struct CodaFid VFid; + int vtype; +}; + + +/* coda_create: */ +struct coda_create_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + struct coda_vattr attr; + int excl; + int mode; + int name; /* Place holder for data. */ +}; + +struct coda_create_out { + struct coda_out_hdr oh; + struct CodaFid VFid; + struct coda_vattr attr; +}; + + +/* coda_remove: NO_OUT */ +struct coda_remove_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int name; /* Place holder for data. */ +}; + +struct coda_remove_out { + struct coda_out_hdr out; +}; + +/* coda_link: NO_OUT */ +struct coda_link_in { + struct coda_in_hdr ih; + struct CodaFid sourceFid; /* cnode to link *to* */ + struct CodaFid destFid; /* Directory in which to place link */ + int tname; /* Place holder for data. */ +}; + +struct coda_link_out { + struct coda_out_hdr out; +}; + + +/* coda_rename: NO_OUT */ +struct coda_rename_in { + struct coda_in_hdr ih; + struct CodaFid sourceFid; + int srcname; + struct CodaFid destFid; + int destname; +}; + +struct coda_rename_out { + struct coda_out_hdr out; +}; + +/* coda_mkdir: */ +struct coda_mkdir_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + struct coda_vattr attr; + int name; /* Place holder for data. */ +}; + +struct coda_mkdir_out { + struct coda_out_hdr oh; + struct CodaFid VFid; + struct coda_vattr attr; +}; + + +/* coda_rmdir: NO_OUT */ +struct coda_rmdir_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int name; /* Place holder for data. */ +}; + +struct coda_rmdir_out { + struct coda_out_hdr out; +}; + +/* coda_symlink: NO_OUT */ +struct coda_symlink_in { + struct coda_in_hdr ih; + struct CodaFid VFid; /* Directory to put symlink in */ + int srcname; + struct coda_vattr attr; + int tname; +}; + +struct coda_symlink_out { + struct coda_out_hdr out; +}; + +/* coda_readlink: */ +struct coda_readlink_in { + struct coda_in_hdr ih; + struct CodaFid VFid; +}; + +struct coda_readlink_out { + struct coda_out_hdr oh; + int count; + caddr_t data; /* Place holder for data. */ +}; + + +/* coda_fsync: NO_OUT */ +struct coda_fsync_in { + struct coda_in_hdr ih; + struct CodaFid VFid; +}; + +struct coda_fsync_out { + struct coda_out_hdr out; +}; + +/* coda_vget: */ +struct coda_vget_in { + struct coda_in_hdr ih; + struct CodaFid VFid; +}; + +struct coda_vget_out { + struct coda_out_hdr oh; + struct CodaFid VFid; + int vtype; +}; + + +/* CODA_SIGNAL is out-of-band, doesn't need data. */ +/* CODA_INVALIDATE is a venus->kernel call */ +/* CODA_FLUSH is a venus->kernel call */ + +/* coda_purgeuser: */ +/* CODA_PURGEUSER is a venus->kernel call */ +struct coda_purgeuser_out { + struct coda_out_hdr oh; + vuid_t uid; +}; + +/* coda_zapfile: */ +/* CODA_ZAPFILE is a venus->kernel call */ +struct coda_zapfile_out { + struct coda_out_hdr oh; + struct CodaFid CodaFid; +}; + +/* coda_zapdir: */ +/* CODA_ZAPDIR is a venus->kernel call */ +struct coda_zapdir_out { + struct coda_out_hdr oh; + struct CodaFid CodaFid; +}; + +/* coda_purgefid: */ +/* CODA_PURGEFID is a venus->kernel call */ +struct coda_purgefid_out { + struct coda_out_hdr oh; + struct CodaFid CodaFid; +}; + +/* coda_replace: */ +/* CODA_REPLACE is a venus->kernel call */ +struct coda_replace_out { /* coda_replace is a venus->kernel call */ + struct coda_out_hdr oh; + struct CodaFid NewFid; + struct CodaFid OldFid; +}; + +/* coda_open_by_fd: */ +struct coda_open_by_fd_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_open_by_fd_out { + struct coda_out_hdr oh; + int fd; + +#ifdef __KERNEL__ + struct file *fh; /* not passed from userspace but used in-kernel only */ +#endif +}; + +/* coda_open_by_path: */ +struct coda_open_by_path_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int flags; +}; + +struct coda_open_by_path_out { + struct coda_out_hdr oh; + int path; +}; + +/* coda_statfs: NO_IN */ +struct coda_statfs_in { + struct coda_in_hdr in; +}; + +struct coda_statfs_out { + struct coda_out_hdr oh; + struct coda_statfs stat; +}; + +/* + * Occasionally, we don't cache the fid returned by CODA_LOOKUP. + * For instance, if the fid is inconsistent. + * This case is handled by setting the top bit of the type result parameter. + */ +#define CODA_NOCACHE 0x80000000 + +union inputArgs { + struct coda_in_hdr ih; /* NB: every struct below begins with an ih */ + struct coda_open_in coda_open; + struct coda_store_in coda_store; + struct coda_release_in coda_release; + struct coda_close_in coda_close; + struct coda_ioctl_in coda_ioctl; + struct coda_getattr_in coda_getattr; + struct coda_setattr_in coda_setattr; + struct coda_access_in coda_access; + struct coda_lookup_in coda_lookup; + struct coda_create_in coda_create; + struct coda_remove_in coda_remove; + struct coda_link_in coda_link; + struct coda_rename_in coda_rename; + struct coda_mkdir_in coda_mkdir; + struct coda_rmdir_in coda_rmdir; + struct coda_symlink_in coda_symlink; + struct coda_readlink_in coda_readlink; + struct coda_fsync_in coda_fsync; + struct coda_vget_in coda_vget; + struct coda_open_by_fd_in coda_open_by_fd; + struct coda_open_by_path_in coda_open_by_path; + struct coda_statfs_in coda_statfs; +}; + +union outputArgs { + struct coda_out_hdr oh; /* NB: every struct below begins with an oh */ + struct coda_root_out coda_root; + struct coda_open_out coda_open; + struct coda_ioctl_out coda_ioctl; + struct coda_getattr_out coda_getattr; + struct coda_lookup_out coda_lookup; + struct coda_create_out coda_create; + struct coda_mkdir_out coda_mkdir; + struct coda_readlink_out coda_readlink; + struct coda_vget_out coda_vget; + struct coda_purgeuser_out coda_purgeuser; + struct coda_zapfile_out coda_zapfile; + struct coda_zapdir_out coda_zapdir; + struct coda_purgefid_out coda_purgefid; + struct coda_replace_out coda_replace; + struct coda_open_by_fd_out coda_open_by_fd; + struct coda_open_by_path_out coda_open_by_path; + struct coda_statfs_out coda_statfs; +}; + +union coda_downcalls { + /* CODA_INVALIDATE is a venus->kernel call */ + /* CODA_FLUSH is a venus->kernel call */ + struct coda_purgeuser_out purgeuser; + struct coda_zapfile_out zapfile; + struct coda_zapdir_out zapdir; + struct coda_purgefid_out purgefid; + struct coda_replace_out replace; +}; + + +/* + * Used for identifying usage of "Control" and pioctls + */ + +#define PIOCPARM_MASK 0x0000ffff +struct ViceIoctl { + void __user *in; /* Data to be transferred in */ + void __user *out; /* Data to be transferred out */ + u_short in_size; /* Size of input buffer <= 2K */ + u_short out_size; /* Maximum size of output buffer, <= 2K */ +}; + +struct PioctlData { + const char __user *path; + int follow; + struct ViceIoctl vi; +}; + +#define CODA_CONTROL ".CONTROL" +#define CODA_CONTROLLEN 8 +#define CTL_INO -1 + +/* Data passed to mount */ + +#define CODA_MOUNT_VERSION 1 + +struct coda_mount_data { + int version; + int fd; /* Opened device */ +}; + +#endif + diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h new file mode 100644 index 00000000..72f2d2f0 --- /dev/null +++ b/include/linux/coda_psdev.h @@ -0,0 +1,96 @@ +#ifndef __CODA_PSDEV_H +#define __CODA_PSDEV_H + +#include <linux/magic.h> + +#define CODA_PSDEV_MAJOR 67 +#define MAX_CODADEVS 5 /* how many do we allow */ + +#ifdef __KERNEL__ +#include <linux/backing-dev.h> +#include <linux/mutex.h> + +struct kstatfs; + +/* communication pending/processing queues */ +struct venus_comm { + u_long vc_seq; + wait_queue_head_t vc_waitq; /* Venus wait queue */ + struct list_head vc_pending; + struct list_head vc_processing; + int vc_inuse; + struct super_block *vc_sb; + struct backing_dev_info bdi; + struct mutex vc_mutex; +}; + + +static inline struct venus_comm *coda_vcp(struct super_block *sb) +{ + return (struct venus_comm *)((sb)->s_fs_info); +} + +/* upcalls */ +int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); +int venus_getattr(struct super_block *sb, struct CodaFid *fid, + struct coda_vattr *attr); +int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); +int venus_lookup(struct super_block *sb, struct CodaFid *fid, + const char *name, int length, int *type, + struct CodaFid *resfid); +int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, + vuid_t uid); +int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, + struct file **f); +int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length, + struct CodaFid *newfid, struct coda_vattr *attrs); +int venus_create(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length, int excl, int mode, + struct CodaFid *newfid, struct coda_vattr *attrs) ; +int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length); +int venus_remove(struct super_block *sb, struct CodaFid *dirfid, + const char *name, int length); +int venus_readlink(struct super_block *sb, struct CodaFid *fid, + char *buffer, int *length); +int venus_rename(struct super_block *, struct CodaFid *new_fid, + struct CodaFid *old_fid, size_t old_length, + size_t new_length, const char *old_name, + const char *new_name); +int venus_link(struct super_block *sb, struct CodaFid *fid, + struct CodaFid *dirfid, const char *name, int len ); +int venus_symlink(struct super_block *sb, struct CodaFid *fid, + const char *name, int len, const char *symname, int symlen); +int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); +int venus_pioctl(struct super_block *sb, struct CodaFid *fid, + unsigned int cmd, struct PioctlData *data); +int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); +int venus_fsync(struct super_block *sb, struct CodaFid *fid); +int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); + +/* + * Statistics + */ + +extern struct venus_comm coda_comms[]; +#endif /* __KERNEL__ */ + +/* messages between coda filesystem in kernel and Venus */ +struct upc_req { + struct list_head uc_chain; + caddr_t uc_data; + u_short uc_flags; + u_short uc_inSize; /* Size is at most 5000 bytes */ + u_short uc_outSize; + u_short uc_opcode; /* copied from data to save lookup */ + int uc_unique; + wait_queue_head_t uc_sleep; /* process' wait queue */ +}; + +#define CODA_REQ_ASYNC 0x1 +#define CODA_REQ_READ 0x2 +#define CODA_REQ_WRITE 0x4 +#define CODA_REQ_ABORT 0x8 + +#endif diff --git a/include/linux/coff.h b/include/linux/coff.h new file mode 100644 index 00000000..6354a7fe --- /dev/null +++ b/include/linux/coff.h @@ -0,0 +1,351 @@ +/* This file is derived from the GAS 2.1.4 assembler control file. + The GAS product is under the GNU General Public License, version 2 or later. + As such, this file is also under that license. + + If the file format changes in the COFF object, this file should be + subsequently updated to reflect the changes. + + The actual loader module only uses a few of these structures. The full + set is documented here because I received the full set. If you wish + more information about COFF, then O'Reilly has a very excellent book. +*/ + +#define E_SYMNMLEN 8 /* Number of characters in a symbol name */ +#define E_FILNMLEN 14 /* Number of characters in a file name */ +#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */ + +/* + * These defines are byte order independent. There is no alignment of fields + * permitted in the structures. Therefore they are declared as characters + * and the values loaded from the character positions. It also makes it + * nice to have it "endian" independent. + */ + +/* Load a short int from the following tables with little-endian formats */ +#define COFF_SHORT_L(ps) ((short)(((unsigned short)((unsigned char)ps[1])<<8)|\ + ((unsigned short)((unsigned char)ps[0])))) + +/* Load a long int from the following tables with little-endian formats */ +#define COFF_LONG_L(ps) (((long)(((unsigned long)((unsigned char)ps[3])<<24) |\ + ((unsigned long)((unsigned char)ps[2])<<16) |\ + ((unsigned long)((unsigned char)ps[1])<<8) |\ + ((unsigned long)((unsigned char)ps[0]))))) + +/* Load a short int from the following tables with big-endian formats */ +#define COFF_SHORT_H(ps) ((short)(((unsigned short)((unsigned char)ps[0])<<8)|\ + ((unsigned short)((unsigned char)ps[1])))) + +/* Load a long int from the following tables with big-endian formats */ +#define COFF_LONG_H(ps) (((long)(((unsigned long)((unsigned char)ps[0])<<24) |\ + ((unsigned long)((unsigned char)ps[1])<<16) |\ + ((unsigned long)((unsigned char)ps[2])<<8) |\ + ((unsigned long)((unsigned char)ps[3]))))) + +/* These may be overridden later by brain dead implementations which generate + a big-endian header with little-endian data. In that case, generate a + replacement macro which tests a flag and uses either of the two above + as appropriate. */ + +#define COFF_LONG(v) COFF_LONG_L(v) +#define COFF_SHORT(v) COFF_SHORT_L(v) + +/*** coff information for Intel 386/486. */ + +/********************** FILE HEADER **********************/ + +struct COFF_filehdr { + char f_magic[2]; /* magic number */ + char f_nscns[2]; /* number of sections */ + char f_timdat[4]; /* time & date stamp */ + char f_symptr[4]; /* file pointer to symtab */ + char f_nsyms[4]; /* number of symtab entries */ + char f_opthdr[2]; /* sizeof(optional hdr) */ + char f_flags[2]; /* flags */ +}; + +/* + * Bits for f_flags: + * + * F_RELFLG relocation info stripped from file + * F_EXEC file is executable (i.e. no unresolved external + * references) + * F_LNNO line numbers stripped from file + * F_LSYMS local symbols stripped from file + * F_MINMAL this is a minimal object file (".m") output of fextract + * F_UPDATE this is a fully bound update file, output of ogen + * F_SWABD this file has had its bytes swabbed (in names) + * F_AR16WR this file has the byte ordering of an AR16WR + * (e.g. 11/70) machine + * F_AR32WR this file has the byte ordering of an AR32WR machine + * (e.g. vax and iNTEL 386) + * F_AR32W this file has the byte ordering of an AR32W machine + * (e.g. 3b,maxi) + * F_PATCH file contains "patch" list in optional header + * F_NODF (minimal file only) no decision functions for + * replaced functions + */ + +#define COFF_F_RELFLG 0000001 +#define COFF_F_EXEC 0000002 +#define COFF_F_LNNO 0000004 +#define COFF_F_LSYMS 0000010 +#define COFF_F_MINMAL 0000020 +#define COFF_F_UPDATE 0000040 +#define COFF_F_SWABD 0000100 +#define COFF_F_AR16WR 0000200 +#define COFF_F_AR32WR 0000400 +#define COFF_F_AR32W 0001000 +#define COFF_F_PATCH 0002000 +#define COFF_F_NODF 0002000 + +#define COFF_I386MAGIC 0x14c /* Linux's system */ + +#if 0 /* Perhaps, someday, these formats may be used. */ +#define COFF_I386PTXMAGIC 0x154 +#define COFF_I386AIXMAGIC 0x175 /* IBM's AIX system */ +#define COFF_I386BADMAG(x) ((COFF_SHORT((x).f_magic) != COFF_I386MAGIC) \ + && COFF_SHORT((x).f_magic) != COFF_I386PTXMAGIC \ + && COFF_SHORT((x).f_magic) != COFF_I386AIXMAGIC) +#else +#define COFF_I386BADMAG(x) (COFF_SHORT((x).f_magic) != COFF_I386MAGIC) +#endif + +#define COFF_FILHDR struct COFF_filehdr +#define COFF_FILHSZ sizeof(COFF_FILHDR) + +/********************** AOUT "OPTIONAL HEADER" **********************/ + +/* Linux COFF must have this "optional" header. Standard COFF has no entry + location for the "entry" point. They normally would start with the first + location of the .text section. This is not a good idea for linux. So, + the use of this "optional" header is not optional. It is required. + + Do not be tempted to assume that the size of the optional header is + a constant and simply index the next byte by the size of this structure. + Use the 'f_opthdr' field in the main coff header for the size of the + structure actually written to the file!! +*/ + +typedef struct +{ + char magic[2]; /* type of file */ + char vstamp[2]; /* version stamp */ + char tsize[4]; /* text size in bytes, padded to FW bdry */ + char dsize[4]; /* initialized data " " */ + char bsize[4]; /* uninitialized data " " */ + char entry[4]; /* entry pt. */ + char text_start[4]; /* base of text used for this file */ + char data_start[4]; /* base of data used for this file */ +} +COFF_AOUTHDR; + +#define COFF_AOUTSZ (sizeof(COFF_AOUTHDR)) + +#define COFF_STMAGIC 0401 +#define COFF_OMAGIC 0404 +#define COFF_JMAGIC 0407 /* dirty text and data image, can't share */ +#define COFF_DMAGIC 0410 /* dirty text segment, data aligned */ +#define COFF_ZMAGIC 0413 /* The proper magic number for executables */ +#define COFF_SHMAGIC 0443 /* shared library header */ + +/********************** SECTION HEADER **********************/ + +struct COFF_scnhdr { + char s_name[8]; /* section name */ + char s_paddr[4]; /* physical address, aliased s_nlib */ + char s_vaddr[4]; /* virtual address */ + char s_size[4]; /* section size */ + char s_scnptr[4]; /* file ptr to raw data for section */ + char s_relptr[4]; /* file ptr to relocation */ + char s_lnnoptr[4]; /* file ptr to line numbers */ + char s_nreloc[2]; /* number of relocation entries */ + char s_nlnno[2]; /* number of line number entries */ + char s_flags[4]; /* flags */ +}; + +#define COFF_SCNHDR struct COFF_scnhdr +#define COFF_SCNHSZ sizeof(COFF_SCNHDR) + +/* + * names of "special" sections + */ + +#define COFF_TEXT ".text" +#define COFF_DATA ".data" +#define COFF_BSS ".bss" +#define COFF_COMMENT ".comment" +#define COFF_LIB ".lib" + +#define COFF_SECT_TEXT 0 /* Section for instruction code */ +#define COFF_SECT_DATA 1 /* Section for initialized globals */ +#define COFF_SECT_BSS 2 /* Section for un-initialized globals */ +#define COFF_SECT_REQD 3 /* Minimum number of sections for good file */ + +#define COFF_STYP_REG 0x00 /* regular segment */ +#define COFF_STYP_DSECT 0x01 /* dummy segment */ +#define COFF_STYP_NOLOAD 0x02 /* no-load segment */ +#define COFF_STYP_GROUP 0x04 /* group segment */ +#define COFF_STYP_PAD 0x08 /* .pad segment */ +#define COFF_STYP_COPY 0x10 /* copy section */ +#define COFF_STYP_TEXT 0x20 /* .text segment */ +#define COFF_STYP_DATA 0x40 /* .data segment */ +#define COFF_STYP_BSS 0x80 /* .bss segment */ +#define COFF_STYP_INFO 0x200 /* .comment section */ +#define COFF_STYP_OVER 0x400 /* overlay section */ +#define COFF_STYP_LIB 0x800 /* library section */ + +/* + * Shared libraries have the following section header in the data field for + * each library. + */ + +struct COFF_slib { + char sl_entsz[4]; /* Size of this entry */ + char sl_pathndx[4]; /* size of the header field */ +}; + +#define COFF_SLIBHD struct COFF_slib +#define COFF_SLIBSZ sizeof(COFF_SLIBHD) + +/********************** LINE NUMBERS **********************/ + +/* 1 line number entry for every "breakpointable" source line in a section. + * Line numbers are grouped on a per function basis; first entry in a function + * grouping will have l_lnno = 0 and in place of physical address will be the + * symbol table index of the function name. + */ + +struct COFF_lineno { + union { + char l_symndx[4]; /* function name symbol index, iff l_lnno == 0*/ + char l_paddr[4]; /* (physical) address of line number */ + } l_addr; + char l_lnno[2]; /* line number */ +}; + +#define COFF_LINENO struct COFF_lineno +#define COFF_LINESZ 6 + +/********************** SYMBOLS **********************/ + +#define COFF_E_SYMNMLEN 8 /* # characters in a short symbol name */ +#define COFF_E_FILNMLEN 14 /* # characters in a file name */ +#define COFF_E_DIMNUM 4 /* # array dimensions in auxiliary entry */ + +/* + * All symbols and sections have the following definition + */ + +struct COFF_syment +{ + union { + char e_name[E_SYMNMLEN]; /* Symbol name (first 8 characters) */ + struct { + char e_zeroes[4]; /* Leading zeros */ + char e_offset[4]; /* Offset if this is a header section */ + } e; + } e; + + char e_value[4]; /* Value (address) of the segment */ + char e_scnum[2]; /* Section number */ + char e_type[2]; /* Type of section */ + char e_sclass[1]; /* Loader class */ + char e_numaux[1]; /* Number of auxiliary entries which follow */ +}; + +#define COFF_N_BTMASK (0xf) /* Mask for important class bits */ +#define COFF_N_TMASK (0x30) /* Mask for important type bits */ +#define COFF_N_BTSHFT (4) /* # bits to shift class field */ +#define COFF_N_TSHIFT (2) /* # bits to shift type field */ + +/* + * Auxiliary entries because the main table is too limiting. + */ + +union COFF_auxent { + +/* + * Debugger information + */ + + struct { + char x_tagndx[4]; /* str, un, or enum tag indx */ + union { + struct { + char x_lnno[2]; /* declaration line number */ + char x_size[2]; /* str/union/array size */ + } x_lnsz; + char x_fsize[4]; /* size of function */ + } x_misc; + + union { + struct { /* if ISFCN, tag, or .bb */ + char x_lnnoptr[4]; /* ptr to fcn line # */ + char x_endndx[4]; /* entry ndx past block end */ + } x_fcn; + + struct { /* if ISARY, up to 4 dimen. */ + char x_dimen[E_DIMNUM][2]; + } x_ary; + } x_fcnary; + + char x_tvndx[2]; /* tv index */ + } x_sym; + +/* + * Source file names (debugger information) + */ + + union { + char x_fname[E_FILNMLEN]; + struct { + char x_zeroes[4]; + char x_offset[4]; + } x_n; + } x_file; + +/* + * Section information + */ + + struct { + char x_scnlen[4]; /* section length */ + char x_nreloc[2]; /* # relocation entries */ + char x_nlinno[2]; /* # line numbers */ + } x_scn; + +/* + * Transfer vector (branch table) + */ + + struct { + char x_tvfill[4]; /* tv fill value */ + char x_tvlen[2]; /* length of .tv */ + char x_tvran[2][2]; /* tv range */ + } x_tv; /* info about .tv section (in auxent of symbol .tv)) */ +}; + +#define COFF_SYMENT struct COFF_syment +#define COFF_SYMESZ 18 +#define COFF_AUXENT union COFF_auxent +#define COFF_AUXESZ 18 + +#define COFF_ETEXT "etext" + +/********************** RELOCATION DIRECTIVES **********************/ + +struct COFF_reloc { + char r_vaddr[4]; /* Virtual address of item */ + char r_symndx[4]; /* Symbol index in the symtab */ + char r_type[2]; /* Relocation type */ +}; + +#define COFF_RELOC struct COFF_reloc +#define COFF_RELSZ 10 + +#define COFF_DEF_DATA_SECTION_ALIGNMENT 4 +#define COFF_DEF_BSS_SECTION_ALIGNMENT 4 +#define COFF_DEF_TEXT_SECTION_ALIGNMENT 4 + +/* For new sections we haven't heard of before */ +#define COFF_DEF_SECTION_ALIGNMENT 4 diff --git a/include/linux/com20020.h b/include/linux/com20020.h new file mode 100644 index 00000000..5dcfb944 --- /dev/null +++ b/include/linux/com20020.h @@ -0,0 +1,116 @@ +/* + * Linux ARCnet driver - COM20020 chipset support - function declarations + * + * Written 1997 by David Woodhouse. + * Written 1994-1999 by Avery Pennarun. + * Derived from skeleton.c by Donald Becker. + * + * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) + * for sponsoring the further development of this driver. + * + * ********************** + * + * The original copyright of skeleton.c was as follows: + * + * skeleton.c Written 1993 by Donald Becker. + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. This software may only be used + * and distributed according to the terms of the GNU General Public License as + * modified by SRC, incorporated herein by reference. + * + * ********************** + * + * For more details, see drivers/net/arcnet.c + * + * ********************** + */ +#ifndef __COM20020_H +#define __COM20020_H + +int com20020_check(struct net_device *dev); +int com20020_found(struct net_device *dev, int shared); +extern const struct net_device_ops com20020_netdev_ops; + +/* The number of low I/O ports used by the card. */ +#define ARCNET_TOTAL_SIZE 8 + +/* various register addresses */ +#ifdef CONFIG_SA1100_CT6001 +#define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */ +#else +#define BUS_ALIGN 1 +#endif + + +#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ +#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ +#define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */ +#define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */ +#define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */ +#define _ADDR_LO (ioaddr+BUS_ALIGN*3) +#define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */ +#define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */ +#define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */ +#define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG + or _SUBADR) */ + +/* in the ADDR_HI register */ +#define RDDATAflag 0x80 /* next access is a read (not a write) */ + +/* in the DIAGSTAT register */ +#define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */ + +/* in the CONFIG register */ +#define RESETcfg 0x80 /* put card in reset state */ +#define TXENcfg 0x20 /* enable TX */ + +/* in SETUP register */ +#define PROMISCset 0x10 /* enable RCV_ALL */ +#define P1MODE 0x80 /* enable P1-MODE for Backplane */ +#define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */ + +/* COM2002x */ +#define SUB_TENTATIVE 0 /* tentative node ID */ +#define SUB_NODE 1 /* node ID */ +#define SUB_SETUP1 2 /* various options */ +#define SUB_TEST 3 /* test/diag register */ + +/* COM20022 only */ +#define SUB_SETUP2 4 /* sundry options */ +#define SUB_BUSCTL 5 /* bus control options */ +#define SUB_DMACOUNT 6 /* DMA count options */ + +#define SET_SUBADR(x) do { \ + if ((x) < 4) \ + { \ + lp->config = (lp->config & ~0x03) | (x); \ + SETCONF; \ + } \ + else \ + { \ + outb(x, _SUBADR); \ + } \ +} while (0) + +#undef ARCRESET +#undef ASTATUS +#undef ACOMMAND +#undef AINTMASK + +#define ARCRESET { outb(lp->config | 0x80, _CONFIG); \ + udelay(5); \ + outb(lp->config , _CONFIG); \ + } +#define ARCRESET0 { outb(0x18 | 0x80, _CONFIG); \ + udelay(5); \ + outb(0x18 , _CONFIG); \ + } + +#define ASTATUS() inb(_STATUS) +#define ADIAGSTATUS() inb(_DIAGSTAT) +#define ACOMMAND(cmd) outb((cmd),_COMMAND) +#define AINTMASK(msk) outb((msk),_INTMASK) + +#define SETCONF outb(lp->config, _CONFIG) + +#endif /* __COM20020_H */ diff --git a/include/linux/combo_mt66xx.h b/include/linux/combo_mt66xx.h new file mode 100755 index 00000000..7f46cf7f --- /dev/null +++ b/include/linux/combo_mt66xx.h @@ -0,0 +1,25 @@ + + +#ifndef __COMBO_MT66XX_H__ +#define __COMBO_MT66xx_H__ + +struct mtk_wmt_platform_data { + /* GPIO pin config */ + int pmu; + int rst; + int bgf_int; + int urt_cts; + int rtc; + int gps_sync; + int gps_lna; + + /* kernel launcher parameter. */ + +}; + +struct mtk_sdio_eint_platform_data { + /* used for sdio eint. */ + int sdio_eint; +}; + +#endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h new file mode 100644 index 00000000..51a90b7f --- /dev/null +++ b/include/linux/compaction.h @@ -0,0 +1,109 @@ +#ifndef _LINUX_COMPACTION_H +#define _LINUX_COMPACTION_H + +/* Return values for compact_zone() and try_to_compact_pages() */ +/* compaction didn't start as it was not possible or direct reclaim was more suitable */ +#define COMPACT_SKIPPED 0 +/* compaction should continue to another pageblock */ +#define COMPACT_CONTINUE 1 +/* direct compaction partially compacted a zone and there are suitable pages */ +#define COMPACT_PARTIAL 2 +/* The full zone was compacted */ +#define COMPACT_COMPLETE 3 + +#ifdef CONFIG_COMPACTION +extern int sysctl_compact_memory; +extern int sysctl_compaction_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); +extern int sysctl_extfrag_threshold; +extern int sysctl_extfrag_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); + +extern int fragmentation_index(struct zone *zone, unsigned int order); +extern unsigned long try_to_compact_pages(struct zonelist *zonelist, + int order, gfp_t gfp_mask, nodemask_t *mask, + bool sync); +extern int compact_pgdat(pg_data_t *pgdat, int order); +extern unsigned long compaction_suitable(struct zone *zone, int order); + +/* Do not skip compaction more than 64 times */ +#define COMPACT_MAX_DEFER_SHIFT 6 + +/* + * Compaction is deferred when compaction fails to result in a page + * allocation success. 1 << compact_defer_limit compactions are skipped up + * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT + */ +static inline void defer_compaction(struct zone *zone, int order) +{ + zone->compact_considered = 0; + zone->compact_defer_shift++; + + if (order < zone->compact_order_failed) + zone->compact_order_failed = order; + + if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) + zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; +} + +/* Returns true if compaction should be skipped this time */ +static inline bool compaction_deferred(struct zone *zone, int order) +{ + unsigned long defer_limit = 1UL << zone->compact_defer_shift; + + if (order < zone->compact_order_failed) + return false; + + /* Avoid possible overflow */ + if (++zone->compact_considered > defer_limit) + zone->compact_considered = defer_limit; + + return zone->compact_considered < (1UL << zone->compact_defer_shift); +} + +#else +static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, + int order, gfp_t gfp_mask, nodemask_t *nodemask, + bool sync) +{ + return COMPACT_CONTINUE; +} + +static inline int compact_pgdat(pg_data_t *pgdat, int order) +{ + return COMPACT_CONTINUE; +} + +static inline unsigned long compaction_suitable(struct zone *zone, int order) +{ + return COMPACT_SKIPPED; +} + +static inline void defer_compaction(struct zone *zone, int order) +{ +} + +static inline bool compaction_deferred(struct zone *zone, int order) +{ + return 1; +} + +#endif /* CONFIG_COMPACTION */ + +#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +extern int compaction_register_node(struct node *node); +extern void compaction_unregister_node(struct node *node); + +#else + +static inline int compaction_register_node(struct node *node) +{ + return 0; +} + +static inline void compaction_unregister_node(struct node *node) +{ +} +#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ + +#endif /* _LINUX_COMPACTION_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h new file mode 100644 index 00000000..5d46217f --- /dev/null +++ b/include/linux/compat.h @@ -0,0 +1,599 @@ +#ifndef _LINUX_COMPAT_H +#define _LINUX_COMPAT_H +/* + * These are the type definitions for the architecture specific + * syscall compatibility layer. + */ + +#ifdef CONFIG_COMPAT + +#include <linux/stat.h> +#include <linux/param.h> /* for HZ */ +#include <linux/sem.h> +#include <linux/socket.h> +#include <linux/if.h> +#include <linux/fs.h> +#include <linux/aio_abi.h> /* for aio_context_t */ + +#include <asm/compat.h> +#include <asm/siginfo.h> +#include <asm/signal.h> + +#ifndef COMPAT_USE_64BIT_TIME +#define COMPAT_USE_64BIT_TIME 0 +#endif + +#define compat_jiffies_to_clock_t(x) \ + (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) + +typedef __compat_uid32_t compat_uid_t; +typedef __compat_gid32_t compat_gid_t; + +struct compat_sel_arg_struct; +struct rusage; + +struct compat_itimerspec { + struct compat_timespec it_interval; + struct compat_timespec it_value; +}; + +struct compat_utimbuf { + compat_time_t actime; + compat_time_t modtime; +}; + +struct compat_itimerval { + struct compat_timeval it_interval; + struct compat_timeval it_value; +}; + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + +struct compat_timex { + compat_uint_t modes; + compat_long_t offset; + compat_long_t freq; + compat_long_t maxerror; + compat_long_t esterror; + compat_int_t status; + compat_long_t constant; + compat_long_t precision; + compat_long_t tolerance; + struct compat_timeval time; + compat_long_t tick; + compat_long_t ppsfreq; + compat_long_t jitter; + compat_int_t shift; + compat_long_t stabil; + compat_long_t jitcnt; + compat_long_t calcnt; + compat_long_t errcnt; + compat_long_t stbcnt; + compat_int_t tai; + + compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; + compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; + compat_int_t:32; compat_int_t:32; compat_int_t:32; +}; + +#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) + +typedef struct { + compat_sigset_word sig[_COMPAT_NSIG_WORDS]; +} compat_sigset_t; + +/* + * These functions operate strictly on struct compat_time* + */ +extern int get_compat_timespec(struct timespec *, + const struct compat_timespec __user *); +extern int put_compat_timespec(const struct timespec *, + struct compat_timespec __user *); +extern int get_compat_timeval(struct timeval *, + const struct compat_timeval __user *); +extern int put_compat_timeval(const struct timeval *, + struct compat_timeval __user *); +/* + * These functions operate on 32- or 64-bit specs depending on + * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments and the + * naming as compat_get/put_ rather than get/put_compat_. + */ +extern int compat_get_timespec(struct timespec *, const void __user *); +extern int compat_put_timespec(const struct timespec *, void __user *); +extern int compat_get_timeval(struct timeval *, const void __user *); +extern int compat_put_timeval(const struct timeval *, void __user *); + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct compat_rusage { + struct compat_timeval ru_utime; + struct compat_timeval ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +extern int put_compat_rusage(const struct rusage *, + struct compat_rusage __user *); + +struct compat_siginfo; + +extern asmlinkage long compat_sys_waitid(int, compat_pid_t, + struct compat_siginfo __user *, int, + struct compat_rusage __user *); + +struct compat_dirent { + u32 d_ino; + compat_off_t d_off; + u16 d_reclen; + char d_name[256]; +}; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +#define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) + +typedef struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE]; + compat_int_t _tid; + + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +} compat_sigevent_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; /* Type of physical device or protocol */ + unsigned int size; /* Size of the data allocated by the caller */ + compat_uptr_t ifs_ifsu; /* union of pointers */ +}; + +struct compat_ifreq { + union { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[IFNAMSIZ]; /* Just fits the size */ + char ifru_newname[IFNAMSIZ]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_ifconf { + compat_int_t ifc_len; /* size of buffer */ + compat_caddr_t ifcbuf; +}; + +struct compat_robust_list { + compat_uptr_t next; +}; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +struct compat_statfs; +struct compat_statfs64; +struct compat_old_linux_dirent; +struct compat_linux_dirent; +struct linux_dirent64; +struct compat_msghdr; +struct compat_mmsghdr; +struct compat_sysinfo; +struct compat_sysctl_args; +struct compat_kexec_segment; +struct compat_mq_attr; +struct compat_msgbuf; + +extern void compat_exit_robust_list(struct task_struct *curr); + +asmlinkage long +compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +asmlinkage long +compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + compat_size_t __user *len_ptr); + +#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC +long compat_sys_semctl(int first, int second, int third, void __user *uptr); +long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); +long compat_sys_msgrcv(int first, int second, int msgtyp, int third, + int version, void __user *uptr); +long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, + void __user *uptr); +#else +long compat_sys_semctl(int semid, int semnum, int cmd, int arg); +long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp, + size_t msgsz, int msgflg); +long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp, + size_t msgsz, long msgtyp, int msgflg); +long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); +#endif +long compat_sys_msgctl(int first, int second, void __user *uptr); +long compat_sys_shmctl(int first, int second, void __user *uptr); +long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned nsems, const struct compat_timespec __user *timeout); +asmlinkage long compat_sys_keyctl(u32 option, + u32 arg2, u32 arg3, u32 arg4, u32 arg5); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); + +asmlinkage ssize_t compat_sys_readv(unsigned long fd, + const struct compat_iovec __user *vec, unsigned long vlen); +asmlinkage ssize_t compat_sys_writev(unsigned long fd, + const struct compat_iovec __user *vec, unsigned long vlen); +asmlinkage ssize_t compat_sys_preadv(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, u32 pos_low, u32 pos_high); + +int compat_do_execve(char *filename, compat_uptr_t __user *argv, + compat_uptr_t __user *envp, struct pt_regs *regs); + +asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, compat_ulong_t __user *exp, + struct compat_timeval __user *tvp); + +asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); + +asmlinkage long compat_sys_wait4(compat_pid_t pid, + compat_uint_t __user *stat_addr, int options, + struct compat_rusage __user *ru); + +#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) + +#define BITS_TO_COMPAT_LONGS(bits) \ + (((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG) + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + unsigned long bitmap_size); +int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); +int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); +int get_compat_sigevent(struct sigevent *event, + const struct compat_sigevent __user *u_event); +long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); + +static inline int compat_timeval_compare(struct compat_timeval *lhs, + struct compat_timeval *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_usec - rhs->tv_usec; +} + +static inline int compat_timespec_compare(struct compat_timespec *lhs, + struct compat_timespec *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern int get_compat_itimerspec(struct itimerspec *dst, + const struct compat_itimerspec __user *src); +extern int put_compat_itimerspec(struct compat_itimerspec __user *dst, + const struct itimerspec *src); + +asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); +asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, + struct timezone __user *tz); + +asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); + +extern int compat_printk(const char *fmt, ...); +extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); + +asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, + compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, + const compat_ulong_t __user *new_nodes); + +extern int compat_ptrace_request(struct task_struct *child, + compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); +asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + compat_long_t addr, compat_long_t data); + +/* + * epoll (fs/eventpoll.c) compat bits follow ... + */ +struct epoll_event; +#define compat_epoll_event epoll_event +asmlinkage long compat_sys_epoll_pwait(int epfd, + struct compat_epoll_event __user *events, + int maxevents, int timeout, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); + +asmlinkage long compat_sys_utime(const char __user *filename, + struct compat_utimbuf __user *t); +asmlinkage long compat_sys_utimensat(unsigned int dfd, + const char __user *filename, + struct compat_timespec __user *t, + int flags); + +asmlinkage long compat_sys_time(compat_time_t __user *tloc); +asmlinkage long compat_sys_stime(compat_time_t __user *tptr); +asmlinkage long compat_sys_signalfd(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); +asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, + const struct compat_itimerspec __user *utmr, + struct compat_itimerspec __user *otmr); +asmlinkage long compat_sys_timerfd_gettime(int ufd, + struct compat_itimerspec __user *otmr); + +asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page, + __u32 __user *pages, + const int __user *nodes, + int __user *status, + int flags); +asmlinkage long compat_sys_futimesat(unsigned int dfd, + const char __user *filename, + struct compat_timeval __user *t); +asmlinkage long compat_sys_utimes(const char __user *filename, + struct compat_timeval __user *t); +asmlinkage long compat_sys_newstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newlstat(const char __user *filename, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_newfstatat(unsigned int dfd, + const char __user *filename, + struct compat_stat __user *statbuf, + int flag); +asmlinkage long compat_sys_newfstat(unsigned int fd, + struct compat_stat __user *statbuf); +asmlinkage long compat_sys_statfs(const char __user *pathname, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_fstatfs(unsigned int fd, + struct compat_statfs __user *buf); +asmlinkage long compat_sys_statfs64(const char __user *pathname, + compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 __user *buf); +asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, + unsigned long arg); +asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, + unsigned long arg); +asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); +asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id, + unsigned long min_nr, + unsigned long nr, + struct io_event __user *events, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr, + u32 __user *iocb); +asmlinkage long compat_sys_mount(const char __user *dev_name, + const char __user *dir_name, + const char __user *type, unsigned long flags, + const void __user *data); +asmlinkage long compat_sys_old_readdir(unsigned int fd, + struct compat_old_linux_dirent __user *, + unsigned int count); +asmlinkage long compat_sys_getdents(unsigned int fd, + struct compat_linux_dirent __user *dirent, + unsigned int count); +asmlinkage long compat_sys_getdents64(unsigned int fd, + struct linux_dirent64 __user *dirent, + unsigned int count); +asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, + unsigned int nr_segs, unsigned int flags); +asmlinkage long compat_sys_open(const char __user *filename, int flags, + umode_t mode); +asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, + int flags, umode_t mode); +asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, + compat_ulong_t __user *exp, + struct compat_timespec __user *tsp, + void __user *sig); +asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, + unsigned int nfds, + struct compat_timespec __user *tsp, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); +asmlinkage long compat_sys_signalfd4(int ufd, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize, int flags); +asmlinkage long compat_sys_get_mempolicy(int __user *policy, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, + compat_ulong_t addr, + compat_ulong_t flags); +asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, + compat_ulong_t maxnode); +asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, + compat_ulong_t mode, + compat_ulong_t __user *nmask, + compat_ulong_t maxnode, compat_ulong_t flags); + +asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, + char __user *optval, unsigned int optlen); +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, + unsigned flags); +asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags); +asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, + unsigned int flags); +asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, + unsigned flags); +asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, + unsigned flags, struct sockaddr __user *addr, + int __user *addrlen); +asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct compat_timespec __user *timeout); +asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, + struct compat_timespec __user *rmtp); +asmlinkage long compat_sys_getitimer(int which, + struct compat_itimerval __user *it); +asmlinkage long compat_sys_setitimer(int which, + struct compat_itimerval __user *in, + struct compat_itimerval __user *out); +asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); +asmlinkage long compat_sys_setrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); +asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, + unsigned int len, + compat_ulong_t __user *user_mask_ptr); +asmlinkage long compat_sys_timer_create(clockid_t which_clock, + struct compat_sigevent __user *timer_event_spec, + timer_t __user *created_timer_id); +asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, + struct compat_itimerspec __user *new, + struct compat_itimerspec __user *old); +asmlinkage long compat_sys_timer_gettime(timer_t timer_id, + struct compat_itimerspec __user *setting); +asmlinkage long compat_sys_clock_settime(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, + struct compat_timex __user *tp); +asmlinkage long compat_sys_clock_getres(clockid_t which_clock, + struct compat_timespec __user *tp); +asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, + struct compat_timespec __user *rqtp, + struct compat_timespec __user *rmtp); +asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct compat_timespec __user *uts, compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, + compat_size_t sigsetsize); +asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); +asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + unsigned long arg); +asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, + struct compat_timespec __user *utime, u32 __user *uaddr2, + u32 val3); +asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long nr_segments, + struct compat_kexec_segment __user *, + unsigned long flags); +asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, + const struct compat_mq_attr __user *u_mqstat, + struct compat_mq_attr __user *u_omqstat); +asmlinkage long compat_sys_mq_notify(mqd_t mqdes, + const struct compat_sigevent __user *u_notification); +asmlinkage long compat_sys_mq_open(const char __user *u_name, + int oflag, compat_mode_t mode, + struct compat_mq_attr __user *u_attr); +asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, + const char __user *u_msg_ptr, + size_t msg_len, unsigned int msg_prio, + const struct compat_timespec __user *u_abs_timeout); +asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, + char __user *u_msg_ptr, + size_t msg_len, unsigned int __user *u_msg_prio, + const struct compat_timespec __user *u_abs_timeout); +asmlinkage long compat_sys_socketcall(int call, u32 __user *args); +asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); + +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, + unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer, + int check_access); + +extern void __user *compat_alloc_user_space(unsigned long len); + +asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, + const struct compat_iovec __user *lvec, + unsigned long liovcnt, const struct compat_iovec __user *rvec, + unsigned long riovcnt, unsigned long flags); +asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, + const struct compat_iovec __user *lvec, + unsigned long liovcnt, const struct compat_iovec __user *rvec, + unsigned long riovcnt, unsigned long flags); + +#else + +#define is_compat_task() (0) + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h new file mode 100644 index 00000000..e5834aa2 --- /dev/null +++ b/include/linux/compiler-gcc.h @@ -0,0 +1,112 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." +#endif + +/* + * Common definitions for all gcc versions go here. + */ + + +/* Optimization barrier */ +/* The "volatile" is due to gcc bugs */ +#define barrier() __asm__ __volatile__("": : :"memory") + +/* + * This macro obfuscates arithmetic on a variable address so that gcc + * shouldn't recognize the original var, and make assumptions about it. + * + * This is needed because the C standard makes it undefined to do + * pointer arithmetic on "objects" outside their boundaries and the + * gcc optimizers assume this is the case. In particular they + * assume such arithmetic does not wrap. + * + * A miscompilation has been observed because of this on PPC. + * To work around it we hide the relationship of the pointer and the object + * using this macro. + * + * Versions of the ppc64 compiler before 4.1 had a bug where use of + * RELOC_HIDE could trash r30. The bug can be worked around by changing + * the inline assembly constraint from =g to =r, in this particular + * case either is valid. + */ +#define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); }) + +#ifdef __CHECKER__ +#define __must_be_array(arr) 0 +#else +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#endif + +/* + * Force always-inline if the user requests it so via the .config, + * or if gcc is too old: + */ +#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ + !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) +# define inline inline __attribute__((always_inline)) +# define __inline__ __inline__ __attribute__((always_inline)) +# define __inline __inline __attribute__((always_inline)) +#else +/* A lot of inline functions can cause havoc with function tracing */ +# define inline inline notrace +# define __inline__ __inline__ notrace +# define __inline __inline notrace +#endif + +#define __deprecated __attribute__((deprecated)) +#define __packed __attribute__((packed)) +#define __weak __attribute__((weak)) + +/* + * it doesn't make sense on ARM (currently the only user of __naked) to trace + * naked functions because then mcount is called without stack and frame pointer + * being set up and there is no chance to restore the lr register to the value + * before mcount was called. + * + * The asm() bodies of naked functions often depend on standard calling conventions, + * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce + * this, so we must do so ourselves. See GCC PR44290. + */ +#define __naked __attribute__((naked)) noinline __noclone notrace + +#define __noreturn __attribute__((noreturn)) + +/* + * From the GCC manual: + * + * Many functions have no effects except the return value and their + * return value depends only on the parameters and/or global + * variables. Such a function can be subject to common subexpression + * elimination and loop optimization just as an arithmetic operator + * would be. + * [...] + */ +#define __pure __attribute__((pure)) +#define __aligned(x) __attribute__((aligned(x))) +#define __printf(a, b) __attribute__((format(printf, a, b))) +#define __scanf(a, b) __attribute__((format(scanf, a, b))) +#define noinline __attribute__((noinline)) +#define __attribute_const__ __attribute__((__const__)) +#define __maybe_unused __attribute__((unused)) +#define __always_unused __attribute__((unused)) + +#define __gcc_header(x) #x +#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) +#define gcc_header(x) _gcc_header(x) +#include gcc_header(__GNUC__) + +#if !defined(__noclone) +#define __noclone /* not needed */ +#endif + +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x + +#define __always_inline inline __attribute__((always_inline)) diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h new file mode 100644 index 00000000..37d41243 --- /dev/null +++ b/include/linux/compiler-gcc3.h @@ -0,0 +1,23 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." +#endif + +#if __GNUC_MINOR__ < 2 +# error Sorry, your compiler is too old - please upgrade it. +#endif + +#if __GNUC_MINOR__ >= 3 +# define __used __attribute__((__used__)) +#else +# define __used __attribute__((__unused__)) +#endif + +#if __GNUC_MINOR__ >= 4 +#define __must_check __attribute__((warn_unused_result)) +#endif + +#ifdef CONFIG_GCOV_KERNEL +# if __GNUC_MINOR__ < 4 +# error "GCOV profiling support for gcc versions below 3.4 not included" +# endif /* __GNUC_MINOR__ */ +#endif /* CONFIG_GCOV_KERNEL */ diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h new file mode 100644 index 00000000..2f407917 --- /dev/null +++ b/include/linux/compiler-gcc4.h @@ -0,0 +1,58 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." +#endif + +/* GCC 4.1.[01] miscompiles __weak */ +#ifdef __KERNEL__ +# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 +# error Your version of gcc miscompiles the __weak directive +# endif +#endif + +#define __used __attribute__((__used__)) +#define __must_check __attribute__((warn_unused_result)) +#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) + +#if __GNUC_MINOR__ >= 3 +/* Mark functions as cold. gcc will assume any path leading to a call + to them will be unlikely. This means a lot of manual unlikely()s + are unnecessary now for any paths leading to the usual suspects + like BUG(), printk(), panic() etc. [but let's keep them for now for + older compilers] + + Early snapshots of gcc 4.3 don't support this and we can't detect this + in the preprocessor, but we can live with this because they're unreleased. + Maketime probing would be overkill here. + + gcc also has a __attribute__((__hot__)) to move hot functions into + a special section, but I don't see any sense in this right now in + the kernel context */ +#define __cold __attribute__((__cold__)) + +#define __linktime_error(message) __attribute__((__error__(message))) + +#if __GNUC_MINOR__ >= 5 +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. + */ +#define unreachable() __builtin_unreachable() + +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__)) + +#endif +#endif + +#if __GNUC_MINOR__ > 0 +#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) +#endif +#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__) +#define __compiletime_warning(message) __attribute__((warning(message))) +#define __compiletime_error(message) __attribute__((error(message))) +#endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h new file mode 100644 index 00000000..d8e636e5 --- /dev/null +++ b/include/linux/compiler-intel.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead." +#endif + +#ifdef __ECC + +/* Some compiler specific definitions are overwritten here + * for Intel ECC compiler + */ + +#include <asm/intrinsics.h> + +/* Intel ECC compiler doesn't support gcc specific asm stmts. + * It uses intrinsics to do the equivalent things. + */ +#undef barrier +#undef RELOC_HIDE + +#define barrier() __memory_barrier() + +#define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) + +/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ +#define __must_be_array(a) 0 + +#endif + +#define uninitialized_var(x) x diff --git a/include/linux/compiler.h b/include/linux/compiler.h new file mode 100644 index 00000000..923d093c --- /dev/null +++ b/include/linux/compiler.h @@ -0,0 +1,313 @@ +#ifndef __LINUX_COMPILER_H +#define __LINUX_COMPILER_H + +#ifndef __ASSEMBLY__ + +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __kernel __attribute__((address_space(0))) +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +#ifdef CONFIG_SPARSE_RCU_POINTER +# define __rcu __attribute__((noderef, address_space(4))) +#else +# define __rcu +#endif +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +#else +# define __user +# define __kernel +# define __safe +# define __force +# define __nocast +# define __iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __percpu +# define __rcu +#endif + +#ifdef __KERNEL__ + +#ifdef __GNUC__ +#include <linux/compiler-gcc.h> +#endif + +#define notrace __attribute__((no_instrument_function)) + +/* Intel compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __INTEL_COMPILER +# include <linux/compiler-intel.h> +#endif + +/* + * Generic compiler-dependent macros required for kernel + * build go below this comment. Actual compiler/compiler version + * specific implementations come from the above header files + */ + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +/* + * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code + * to disable branch tracing on a per file basis. + */ +#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) +void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + +#define likely_notrace(x) __builtin_expect(!!(x), 1) +#define unlikely_notrace(x) __builtin_expect(!!(x), 0) + +#define __branch_check__(x, expect) ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_annotated_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = likely_notrace(x); \ + ftrace_likely_update(&______f, ______r, expect); \ + ______r; \ + }) + +/* + * Using __builtin_constant_p(x) to ignore cases where the return + * value is always the same. This idea is taken from a similar patch + * written by Daniel Walker. + */ +# ifndef likely +# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# endif +# ifndef unlikely +# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +/* + * "Define 'is'", Bill Clinton + * "Define 'if'", Steven Rostedt + */ +#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) +#define __trace_if(cond) \ + if (__builtin_constant_p((cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = !!(cond); \ + ______f.miss_hit[______r]++; \ + ______r; \ + })) +#endif /* CONFIG_PROFILE_ALL_BRANCHES */ + +#else +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +/* Optimization barrier */ +#ifndef barrier +# define barrier() __memory_barrier() +#endif + +/* Unreachable code */ +#ifndef unreachable +# define unreachable() do { } while (1) +#endif + +#ifndef RELOC_HIDE +# define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +/* + * Allow us to mark functions as 'deprecated' and have gcc emit a nice + * warning for each use, in hopes of speeding the functions removal. + * Usage is: + * int __deprecated foo(void) + */ +#ifndef __deprecated +# define __deprecated /* unimplemented */ +#endif + +#ifdef MODULE +#define __deprecated_for_modules __deprecated +#else +#define __deprecated_for_modules +#endif + +#ifndef __must_check +#define __must_check +#endif + +#ifndef CONFIG_ENABLE_MUST_CHECK +#undef __must_check +#define __must_check +#endif +#ifndef CONFIG_ENABLE_WARN_DEPRECATED +#undef __deprecated +#undef __deprecated_for_modules +#define __deprecated +#define __deprecated_for_modules +#endif + +/* + * Allow us to avoid 'defined but not used' warnings on functions and data, + * as well as force them to be emitted to the assembly file. + * + * As of gcc 3.4, static functions that are not marked with attribute((used)) + * may be elided from the assembly file. As of gcc 3.4, static data not so + * marked will not be elided, but this may change in a future gcc version. + * + * NOTE: Because distributions shipped with a backported unit-at-a-time + * compiler in gcc 3.3, we must define __used to be __attribute__((used)) + * for gcc >=3.3 instead of 3.4. + * + * In prior versions of gcc, such functions and data would be emitted, but + * would be warned about except with attribute((unused)). + * + * Mark functions that are referenced only in inline assembly as __used so + * the code is emitted even though it appears to be unreferenced. + */ +#ifndef __used +# define __used /* unimplemented */ +#endif + +#ifndef __maybe_unused +# define __maybe_unused /* unimplemented */ +#endif + +#ifndef __always_unused +# define __always_unused /* unimplemented */ +#endif + +#ifndef noinline +#define noinline +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#ifndef __always_inline +#define __always_inline inline +#endif + +#endif /* __KERNEL__ */ + +/* + * From the GCC manual: + * + * Many functions do not examine any values except their arguments, + * and have no effects except the return value. Basically this is + * just slightly more strict class than the `pure' attribute above, + * since function is not allowed to read global memory. + * + * Note that a function that has pointer arguments and examines the + * data pointed to must _not_ be declared `const'. Likewise, a + * function that calls a non-`const' function usually must not be + * `const'. It does not make sense for a `const' function to return + * `void'. + */ +#ifndef __attribute_const__ +# define __attribute_const__ /* unimplemented */ +#endif + +/* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. + */ + +#ifndef __cold +#define __cold +#endif + +/* Simple shorthand for a section definition */ +#ifndef __section +# define __section(S) __attribute__ ((__section__(#S))) +#endif + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +#endif +#ifndef __linktime_error +# define __linktime_error(message) +#endif +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * This macro does absolutely -nothing- to prevent the CPU from reordering, + * merging, or refetching absolutely anything at any time. Its main intended + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. + */ +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + +#endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h new file mode 100644 index 00000000..51494e6b --- /dev/null +++ b/include/linux/completion.h @@ -0,0 +1,104 @@ +#ifndef __LINUX_COMPLETION_H +#define __LINUX_COMPLETION_H + +/* + * (C) Copyright 2001 Linus Torvalds + * + * Atomic wait-for-completion handler data structures. + * See kernel/sched.c for details. + */ + +#include <linux/wait.h> + +/* + * struct completion - structure used to maintain state for a "completion" + * + * This is the opaque structure used to maintain the state for a "completion". + * Completions currently use a FIFO to queue threads that have to wait for + * the "completion" event. + * + * See also: complete(), wait_for_completion() (and friends _timeout, + * _interruptible, _interruptible_timeout, and _killable), init_completion(), + * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and + * INIT_COMPLETION(). + */ +struct completion { + unsigned int done; + wait_queue_head_t wait; +}; + +#define COMPLETION_INITIALIZER(work) \ + { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + +#define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) + +/** + * DECLARE_COMPLETION - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure. Generally used + * for static declarations. You should use the _ONSTACK variant for automatic + * variables. + */ +#define DECLARE_COMPLETION(work) \ + struct completion work = COMPLETION_INITIALIZER(work) + +/* + * Lockdep needs to run a non-constant initializer for on-stack + * completions - so we use the _ONSTACK() variant for those that + * are on the kernel stack: + */ +/** + * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure on the kernel + * stack. + */ +#ifdef CONFIG_LOCKDEP +# define DECLARE_COMPLETION_ONSTACK(work) \ + struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) +#else +# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +#endif + +/** + * init_completion - Initialize a dynamically allocated completion + * @x: completion structure that is to be initialized + * + * This inline function will initialize a dynamically created completion + * structure. + */ +static inline void init_completion(struct completion *x) +{ + x->done = 0; + init_waitqueue_head(&x->wait); +} + +extern void wait_for_completion(struct completion *); +extern int wait_for_completion_interruptible(struct completion *x); +extern int wait_for_completion_killable(struct completion *x); +extern unsigned long wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +extern long wait_for_completion_interruptible_timeout( + struct completion *x, unsigned long timeout); +extern long wait_for_completion_killable_timeout( + struct completion *x, unsigned long timeout); +extern bool try_wait_for_completion(struct completion *x); +extern bool completion_done(struct completion *x); + +extern void complete(struct completion *); +extern void complete_all(struct completion *); + +/** + * INIT_COMPLETION - reinitialize a completion structure + * @x: completion structure to be reinitialized + * + * This macro should be used to reinitialize a completion structure so it can + * be reused. This is especially important after complete_all() is used. + */ +#define INIT_COMPLETION(x) ((x).done = 0) + + +#endif diff --git a/include/linux/comstats.h b/include/linux/comstats.h new file mode 100644 index 00000000..3f5ea8e8 --- /dev/null +++ b/include/linux/comstats.h @@ -0,0 +1,119 @@ +/*****************************************************************************/ + +/* + * comstats.h -- Serial Port Stats. + * + * Copyright (C) 1996-1998 Stallion Technologies + * Copyright (C) 1994-1996 Greg Ungerer. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*****************************************************************************/ +#ifndef _COMSTATS_H +#define _COMSTATS_H +/*****************************************************************************/ + +/* + * Serial port stats structure. The structure itself is UART + * independent, but some fields may be UART/driver specific (for + * example state). + */ + +typedef struct { + unsigned long brd; + unsigned long panel; + unsigned long port; + unsigned long hwid; + unsigned long type; + unsigned long txtotal; + unsigned long rxtotal; + unsigned long txbuffered; + unsigned long rxbuffered; + unsigned long rxoverrun; + unsigned long rxparity; + unsigned long rxframing; + unsigned long rxlost; + unsigned long txbreaks; + unsigned long rxbreaks; + unsigned long txxon; + unsigned long txxoff; + unsigned long rxxon; + unsigned long rxxoff; + unsigned long txctson; + unsigned long txctsoff; + unsigned long rxrtson; + unsigned long rxrtsoff; + unsigned long modem; + unsigned long state; + unsigned long flags; + unsigned long ttystate; + unsigned long cflags; + unsigned long iflags; + unsigned long oflags; + unsigned long lflags; + unsigned long signals; +} comstats_t; + + +/* + * Board stats structure. Returns useful info about the board. + */ + +#define COM_MAXPANELS 8 + +typedef struct { + unsigned long panel; + unsigned long type; + unsigned long hwid; + unsigned long nrports; +} companel_t; + +typedef struct { + unsigned long brd; + unsigned long type; + unsigned long hwid; + unsigned long state; + unsigned long ioaddr; + unsigned long ioaddr2; + unsigned long memaddr; + unsigned long irq; + unsigned long nrpanels; + unsigned long nrports; + companel_t panels[COM_MAXPANELS]; +} combrd_t; + + +/* + * Define the ioctl operations for stats stuff. + */ +#include <linux/ioctl.h> + +#define COM_GETPORTSTATS _IO('c',30) +#define COM_CLRPORTSTATS _IO('c',31) +#define COM_GETBRDSTATS _IO('c',32) + + +/* + * Define the set of ioctls that give user level access to the + * private port, panel and board structures. The argument required + * will be driver dependent! + */ +#define COM_READPORT _IO('c',40) +#define COM_READBOARD _IO('c',41) +#define COM_READPANEL _IO('c',42) + +/*****************************************************************************/ +#endif diff --git a/include/linux/concap.h b/include/linux/concap.h new file mode 100644 index 00000000..977acb3d --- /dev/null +++ b/include/linux/concap.h @@ -0,0 +1,112 @@ +/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $ + * + * Copyright 1997 by Henner Eisen <eis@baty.hanse.de> + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + */ + +#ifndef _LINUX_CONCAP_H +#define _LINUX_CONCAP_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +/* Stuff to support encapsulation protocols genericly. The encapsulation + protocol is processed at the uppermost layer of the network interface. + + Based on a ideas developed in a 'synchronous device' thread in the + linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski + and Jonathan Naylor. + + For more documetation on this refer to Documentation/isdn/README.concap +*/ + +struct concap_proto_ops; +struct concap_device_ops; + +/* this manages all data needed by the encapsulation protocol + */ +struct concap_proto{ + struct net_device *net_dev; /* net device using our service */ + struct concap_device_ops *dops; /* callbacks provided by device */ + struct concap_proto_ops *pops; /* callbacks provided by us */ + spinlock_t lock; + int flags; + void *proto_data; /* protocol specific private data, to + be accessed via *pops methods only*/ + /* + : + whatever + : + */ +}; + +/* Operations to be supported by the net device. Called by the encapsulation + * protocol entity. No receive method is offered because the encapsulation + * protocol directly calls netif_rx(). + */ +struct concap_device_ops{ + + /* to request data is submitted by device*/ + int (*data_req)(struct concap_proto *, struct sk_buff *); + + /* Control methods must be set to NULL by devices which do not + support connection control.*/ + /* to request a connection is set up */ + int (*connect_req)(struct concap_proto *); + + /* to request a connection is released */ + int (*disconn_req)(struct concap_proto *); +}; + +/* Operations to be supported by the encapsulation protocol. Called by + * device driver. + */ +struct concap_proto_ops{ + + /* create a new encapsulation protocol instance of same type */ + struct concap_proto * (*proto_new) (void); + + /* delete encapsulation protocol instance and free all its resources. + cprot may no loger be referenced after calling this */ + void (*proto_del)(struct concap_proto *cprot); + + /* initialize the protocol's data. To be called at interface startup + or when the device driver resets the interface. All services of the + encapsulation protocol may be used after this*/ + int (*restart)(struct concap_proto *cprot, + struct net_device *ndev, + struct concap_device_ops *dops); + + /* inactivate an encapsulation protocol instance. The encapsulation + protocol may not call any *dops methods after this. */ + int (*close)(struct concap_proto *cprot); + + /* process a frame handed down to us by upper layer */ + int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb); + + /* to be called for each data entity received from lower layer*/ + int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb); + + /* to be called when a connection was set up/down. + Protocols that don't process these primitives might fill in + dummy methods here */ + int (*connect_ind)(struct concap_proto *cprot); + int (*disconn_ind)(struct concap_proto *cprot); + /* + Some network device support functions, like net_header(), rebuild_header(), + and others, that depend solely on the encapsulation protocol, might + be provided here, too. The net device would just fill them in its + corresponding fields when it is opened. + */ +}; + +/* dummy restart/close/connect/reset/disconn methods + */ +extern int concap_nop(struct concap_proto *cprot); + +/* dummy submit method + */ +extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb); +#endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h new file mode 100644 index 00000000..34025df6 --- /dev/null +++ b/include/linux/configfs.h @@ -0,0 +1,260 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * configfs.h - definitions for the device driver filesystem + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * Based on sysfs: + * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel + * + * Based on kobject.h: + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * + * configfs Copyright (C) 2005 Oracle. All rights reserved. + * + * Please read Documentation/filesystems/configfs/configfs.txt before using + * the configfs interface, ESPECIALLY the parts about reference counts and + * item destructors. + */ + +#ifndef _CONFIGFS_H_ +#define _CONFIGFS_H_ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/err.h> + +#include <linux/atomic.h> + +#define CONFIGFS_ITEM_NAME_LEN 20 + +struct module; + +struct configfs_item_operations; +struct configfs_group_operations; +struct configfs_attribute; +struct configfs_subsystem; + +struct config_item { + char *ci_name; + char ci_namebuf[CONFIGFS_ITEM_NAME_LEN]; + struct kref ci_kref; + struct list_head ci_entry; + struct config_item *ci_parent; + struct config_group *ci_group; + struct config_item_type *ci_type; + struct dentry *ci_dentry; +}; + +extern int config_item_set_name(struct config_item *, const char *, ...); + +static inline char *config_item_name(struct config_item * item) +{ + return item->ci_name; +} + +extern void config_item_init(struct config_item *); +extern void config_item_init_type_name(struct config_item *item, + const char *name, + struct config_item_type *type); + +extern struct config_item * config_item_get(struct config_item *); +extern void config_item_put(struct config_item *); + +struct config_item_type { + struct module *ct_owner; + struct configfs_item_operations *ct_item_ops; + struct configfs_group_operations *ct_group_ops; + struct configfs_attribute **ct_attrs; +}; + +/** + * group - a group of config_items of a specific type, belonging + * to a specific subsystem. + */ +struct config_group { + struct config_item cg_item; + struct list_head cg_children; + struct configfs_subsystem *cg_subsys; + struct config_group **default_groups; +}; + +extern void config_group_init(struct config_group *group); +extern void config_group_init_type_name(struct config_group *group, + const char *name, + struct config_item_type *type); + +static inline struct config_group *to_config_group(struct config_item *item) +{ + return item ? container_of(item,struct config_group,cg_item) : NULL; +} + +static inline struct config_group *config_group_get(struct config_group *group) +{ + return group ? to_config_group(config_item_get(&group->cg_item)) : NULL; +} + +static inline void config_group_put(struct config_group *group) +{ + config_item_put(&group->cg_item); +} + +extern struct config_item *config_group_find_item(struct config_group *, + const char *); + + +struct configfs_attribute { + const char *ca_name; + struct module *ca_owner; + umode_t ca_mode; +}; + +/* + * Users often need to create attribute structures for their configurable + * attributes, containing a configfs_attribute member and function pointers + * for the show() and store() operations on that attribute. If they don't + * need anything else on the extended attribute structure, they can use + * this macro to define it The argument _item is the name of the + * config_item structure. + */ +#define CONFIGFS_ATTR_STRUCT(_item) \ +struct _item##_attribute { \ + struct configfs_attribute attr; \ + ssize_t (*show)(struct _item *, char *); \ + ssize_t (*store)(struct _item *, const char *, size_t); \ +} + +/* + * With the extended attribute structure, users can use this macro + * (similar to sysfs' __ATTR) to make defining attributes easier. + * An example: + * #define MYITEM_ATTR(_name, _mode, _show, _store) \ + * struct myitem_attribute childless_attr_##_name = \ + * __CONFIGFS_ATTR(_name, _mode, _show, _store) + */ +#define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = _mode, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ + .store = _store, \ +} +/* Here is a readonly version, only requiring a show() operation */ +#define __CONFIGFS_ATTR_RO(_name, _show) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = 0444, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ +} + +/* + * With these extended attributes, the simple show_attribute() and + * store_attribute() operations need to call the show() and store() of the + * attributes. This is a common pattern, so we provide a macro to define + * them. The argument _item is the name of the config_item structure. + * This macro expects the attributes to be named "struct <name>_attribute" + * and the function to_<name>() to exist; + */ +#define CONFIGFS_ATTR_OPS(_item) \ +static ssize_t _item##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_item##_attr->show) \ + ret = _item##_attr->show(_item, page); \ + return ret; \ +} \ +static ssize_t _item##_attr_store(struct config_item *item, \ + struct configfs_attribute *attr, \ + const char *page, size_t count) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = -EINVAL; \ + \ + if (_item##_attr->store) \ + ret = _item##_attr->store(_item, page, count); \ + return ret; \ +} + +/* + * If allow_link() exists, the item can symlink(2) out to other + * items. If the item is a group, it may support mkdir(2). + * Groups supply one of make_group() and make_item(). If the + * group supports make_group(), one can create group children. If it + * supports make_item(), one can create config_item children. make_group() + * and make_item() return ERR_PTR() on errors. If it has + * default_groups on group->default_groups, it has automatically created + * group children. default_groups may coexist alongsize make_group() or + * make_item(), but if the group wishes to have only default_groups + * children (disallowing mkdir(2)), it need not provide either function. + * If the group has commit(), it supports pending and committed (active) + * items. + */ +struct configfs_item_operations { + void (*release)(struct config_item *); + ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *); + ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t); + int (*allow_link)(struct config_item *src, struct config_item *target); + int (*drop_link)(struct config_item *src, struct config_item *target); +}; + +struct configfs_group_operations { + struct config_item *(*make_item)(struct config_group *group, const char *name); + struct config_group *(*make_group)(struct config_group *group, const char *name); + int (*commit_item)(struct config_item *item); + void (*disconnect_notify)(struct config_group *group, struct config_item *item); + void (*drop_item)(struct config_group *group, struct config_item *item); +}; + +struct configfs_subsystem { + struct config_group su_group; + struct mutex su_mutex; +}; + +static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group) +{ + return group ? + container_of(group, struct configfs_subsystem, su_group) : + NULL; +} + +int configfs_register_subsystem(struct configfs_subsystem *subsys); +void configfs_unregister_subsystem(struct configfs_subsystem *subsys); + +/* These functions can sleep and can alloc with GFP_KERNEL */ +/* WARNING: These cannot be called underneath configfs callbacks!! */ +int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); +void configfs_undepend_item(struct configfs_subsystem *subsys, struct config_item *target); + +#endif /* _CONFIGFS_H_ */ diff --git a/include/linux/connector.h b/include/linux/connector.h new file mode 100644 index 00000000..76384074 --- /dev/null +++ b/include/linux/connector.h @@ -0,0 +1,141 @@ +/* + * connector.h + * + * 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __CONNECTOR_H +#define __CONNECTOR_H + +#include <linux/types.h> + +/* + * Process Events connector unique ids -- used for message routing + */ +#define CN_IDX_PROC 0x1 +#define CN_VAL_PROC 0x1 +#define CN_IDX_CIFS 0x2 +#define CN_VAL_CIFS 0x1 +#define CN_W1_IDX 0x3 /* w1 communication */ +#define CN_W1_VAL 0x1 +#define CN_IDX_V86D 0x4 +#define CN_VAL_V86D_UVESAFB 0x1 +#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ +#define CN_DST_IDX 0x6 +#define CN_DST_VAL 0x1 +#define CN_IDX_DM 0x7 /* Device Mapper */ +#define CN_VAL_DM_USERSPACE_LOG 0x1 +#define CN_IDX_DRBD 0x8 +#define CN_VAL_DRBD 0x1 +#define CN_KVP_IDX 0x9 /* HyperV KVP */ +#define CN_KVP_VAL 0x1 /* queries from the kernel */ + +#define CN_NETLINK_USERS 10 /* Highest index + 1 */ + +/* + * Maximum connector's message size. + */ +#define CONNECTOR_MAX_MSG_SIZE 16384 + +/* + * idx and val are unique identifiers which + * are used for message routing and + * must be registered in connector.h for in-kernel usage. + */ + +struct cb_id { + __u32 idx; + __u32 val; +}; + +struct cn_msg { + struct cb_id id; + + __u32 seq; + __u32 ack; + + __u16 len; /* Length of the following data */ + __u16 flags; + __u8 data[0]; +}; + +#ifdef __KERNEL__ + +#include <linux/atomic.h> + +#include <linux/list.h> +#include <linux/workqueue.h> + +#include <net/sock.h> + +#define CN_CBQ_NAMELEN 32 + +struct cn_queue_dev { + atomic_t refcnt; + unsigned char name[CN_CBQ_NAMELEN]; + + struct list_head queue_list; + spinlock_t queue_lock; + + struct sock *nls; +}; + +struct cn_callback_id { + unsigned char name[CN_CBQ_NAMELEN]; + struct cb_id id; +}; + +struct cn_callback_entry { + struct list_head callback_entry; + atomic_t refcnt; + struct cn_queue_dev *pdev; + + struct cn_callback_id id; + void (*callback) (struct cn_msg *, struct netlink_skb_parms *); + + u32 seq, group; +}; + +struct cn_dev { + struct cb_id id; + + u32 seq, groups; + struct sock *nls; + void (*input) (struct sk_buff *skb); + + struct cn_queue_dev *cbdev; +}; + +int cn_add_callback(struct cb_id *id, const char *name, + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); +void cn_del_callback(struct cb_id *); +int cn_netlink_send(struct cn_msg *, u32, gfp_t); + +int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, + struct cb_id *id, + void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); +void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); +void cn_queue_release_callback(struct cn_callback_entry *); + +struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); +void cn_queue_free_dev(struct cn_queue_dev *dev); + +int cn_cb_equal(struct cb_id *, struct cb_id *); + +#endif /* __KERNEL__ */ +#endif /* __CONNECTOR_H */ diff --git a/include/linux/console.h b/include/linux/console.h new file mode 100644 index 00000000..7201ce42 --- /dev/null +++ b/include/linux/console.h @@ -0,0 +1,184 @@ +/* + * linux/include/linux/console.h + * + * Copyright (C) 1993 Hamish Macdonald + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Changed: + * 10-Mar-94: Arno Griffioen: Conversion for vt100 emulator port from PC LINUX + */ + +#ifndef _LINUX_CONSOLE_H_ +#define _LINUX_CONSOLE_H_ 1 + +#include <linux/types.h> + +struct vc_data; +struct console_font_op; +struct console_font; +struct module; +struct tty_struct; + +/* + * this is what the terminal answers to a ESC-Z or csi0c query. + */ +#define VT100ID "\033[?1;2c" +#define VT102ID "\033[?6c" + +struct consw { + struct module *owner; + const char *(*con_startup)(void); + void (*con_init)(struct vc_data *, int); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, int, int, int, int); + void (*con_putc)(struct vc_data *, int, int, int); + void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); + void (*con_cursor)(struct vc_data *, int); + int (*con_scroll)(struct vc_data *, int, int, int, int); + void (*con_bmove)(struct vc_data *, int, int, int, int, int, int); + int (*con_switch)(struct vc_data *); + int (*con_blank)(struct vc_data *, int, int); + int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); + int (*con_font_get)(struct vc_data *, struct console_font *); + int (*con_font_default)(struct vc_data *, struct console_font *, char *); + int (*con_font_copy)(struct vc_data *, int); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, + unsigned int); + int (*con_set_palette)(struct vc_data *, unsigned char *); + int (*con_scrolldelta)(struct vc_data *, int); + int (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8); + void (*con_invert_region)(struct vc_data *, u16 *, int); + u16 *(*con_screen_pos)(struct vc_data *, int); + unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); + /* + * Prepare the console for the debugger. This includes, but is not + * limited to, unblanking the console, loading an appropriate + * palette, and allowing debugger generated output. + */ + int (*con_debug_enter)(struct vc_data *); + /* + * Restore the console to its pre-debug state as closely as possible. + */ + int (*con_debug_leave)(struct vc_data *); +}; + +extern const struct consw *conswitchp; + +extern const struct consw dummy_con; /* dummy console buffer */ +extern const struct consw vga_con; /* VGA text console */ +extern const struct consw newport_con; /* SGI Newport console */ +extern const struct consw prom_con; /* SPARC PROM console */ + +int con_is_bound(const struct consw *csw); +int register_con_driver(const struct consw *csw, int first, int last); +int unregister_con_driver(const struct consw *csw); +int take_over_console(const struct consw *sw, int first, int last, int deflt); +void give_up_console(const struct consw *sw); +#ifdef CONFIG_HW_CONSOLE +int con_debug_enter(struct vc_data *vc); +int con_debug_leave(void); +#else +#define con_debug_enter(vc) (0) +#define con_debug_leave() (0) +#endif + +/* scroll */ +#define SM_UP (1) +#define SM_DOWN (2) + +/* cursor */ +#define CM_DRAW (1) +#define CM_ERASE (2) +#define CM_MOVE (3) + +/* + * The interface for a console, or any other device that wants to capture + * console messages (printer driver?) + * + * If a console driver is marked CON_BOOT then it will be auto-unregistered + * when the first real console is registered. This is for early-printk drivers. + */ + +#define CON_PRINTBUFFER (1) +#define CON_CONSDEV (2) /* Last on the command line */ +#define CON_ENABLED (4) +#define CON_BOOT (8) +#define CON_ANYTIME (16) /* Safe to call when cpu is offline */ +#define CON_BRL (32) /* Used for a braille device */ + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned); + int (*read)(struct console *, char *, unsigned); + struct tty_driver *(*device)(struct console *, int *); + void (*unblank)(void); + int (*setup)(struct console *, char *); + int (*early_setup)(void); + short flags; + short index; + int cflag; + void *data; + struct console *next; +}; + +/* + * for_each_console() allows you to iterate on each console + */ +#define for_each_console(con) \ + for (con = console_drivers; con != NULL; con = con->next) + +extern int console_set_on_cmdline; + +extern int add_preferred_console(char *name, int idx, char *options); +extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); +extern void register_console(struct console *); +extern int unregister_console(struct console *); +extern struct console *console_drivers; +extern void console_lock(void); +extern int console_trylock(void); +extern void console_unlock(void); +extern void console_conditional_schedule(void); +extern void console_unblank(void); +extern struct tty_driver *console_device(int *); +extern void console_stop(struct console *); +extern void console_start(struct console *); +extern int is_console_locked(void); +extern int braille_register_console(struct console *, int index, + char *console_options, char *braille_options); +extern int braille_unregister_console(struct console *); +extern void console_sysfs_notify(void); +extern bool console_suspend_enabled; + +/* Suspend and resume console messages over PM events */ +extern void suspend_console(void); +extern void resume_console(void); + +int mda_console_init(void); +void prom_con_init(void); + +void vcs_make_sysfs(int index); +void vcs_remove_sysfs(int index); + +/* Some debug stub to catch some of the obvious races in the VT code */ +#if 1 +#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress) +#else +#define WARN_CONSOLE_UNLOCKED() +#endif + +/* VESA Blanking Levels */ +#define VESA_NO_BLANKING 0 +#define VESA_VSYNC_SUSPEND 1 +#define VESA_HSYNC_SUSPEND 2 +#define VESA_POWERDOWN 3 + +#ifdef CONFIG_VGA_CONSOLE +extern bool vgacon_text_force(void); +#endif + +#endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h new file mode 100644 index 00000000..7f0c3290 --- /dev/null +++ b/include/linux/console_struct.h @@ -0,0 +1,139 @@ +/* + * console_struct.h + * + * Data structure describing single virtual console except for data + * used by vt.c. + * + * Fields marked with [#] must be set by the low-level driver. + * Fields marked with [!] can be changed by the low-level driver + * to achieve effects such as fast scrolling by changing the origin. + */ + +#ifndef _LINUX_CONSOLE_STRUCT_H +#define _LINUX_CONSOLE_STRUCT_H + +#include <linux/wait.h> +#include <linux/vt.h> +#include <linux/workqueue.h> + +struct vt_struct; + +#define NPAR 16 + +struct vc_data { + struct tty_port port; /* Upper level data */ + + unsigned short vc_num; /* Console number */ + unsigned int vc_cols; /* [#] Console size */ + unsigned int vc_rows; + unsigned int vc_size_row; /* Bytes per row */ + unsigned int vc_scan_lines; /* # of scan lines */ + unsigned long vc_origin; /* [!] Start of real screen */ + unsigned long vc_scr_end; /* [!] End of real screen */ + unsigned long vc_visible_origin; /* [!] Top of visible window */ + unsigned int vc_top, vc_bottom; /* Scrolling region */ + const struct consw *vc_sw; + unsigned short *vc_screenbuf; /* In-memory character/attribute buffer */ + unsigned int vc_screenbuf_size; + unsigned char vc_mode; /* KD_TEXT, ... */ + /* attributes for all characters on screen */ + unsigned char vc_attr; /* Current attributes */ + unsigned char vc_def_color; /* Default colors */ + unsigned char vc_color; /* Foreground & background */ + unsigned char vc_s_color; /* Saved foreground & background */ + unsigned char vc_ulcolor; /* Color for underline mode */ + unsigned char vc_itcolor; + unsigned char vc_halfcolor; /* Color for half intensity mode */ + /* cursor */ + unsigned int vc_cursor_type; + unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ + unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ + unsigned int vc_x, vc_y; /* Cursor position */ + unsigned int vc_saved_x, vc_saved_y; + unsigned long vc_pos; /* Cursor address */ + /* fonts */ + unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ + struct console_font vc_font; /* Current VC font set */ + unsigned short vc_video_erase_char; /* Background erase character */ + /* VT terminal data */ + unsigned int vc_state; /* Escape sequence parser state */ + unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ + /* data for manual vt switching */ + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + /* mode flags */ + unsigned int vc_charset : 1; /* Character set G0 / G1 */ + unsigned int vc_s_charset : 1; /* Saved character set */ + unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ + unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ + unsigned int vc_decscnm : 1; /* Screen Mode */ + unsigned int vc_decom : 1; /* Origin Mode */ + unsigned int vc_decawm : 1; /* Autowrap Mode */ + unsigned int vc_deccm : 1; /* Cursor Visible */ + unsigned int vc_decim : 1; /* Insert Mode */ + unsigned int vc_deccolm : 1; /* 80/132 Column Mode */ + /* attribute flags */ + unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ + unsigned int vc_italic:1; + unsigned int vc_underline : 1; + unsigned int vc_blink : 1; + unsigned int vc_reverse : 1; + unsigned int vc_s_intensity : 2; /* saved rendition */ + unsigned int vc_s_italic:1; + unsigned int vc_s_underline : 1; + unsigned int vc_s_blink : 1; + unsigned int vc_s_reverse : 1; + /* misc */ + unsigned int vc_ques : 1; + unsigned int vc_need_wrap : 1; + unsigned int vc_can_do_color : 1; + unsigned int vc_report_mouse : 2; + unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ + unsigned char vc_utf_count; + int vc_utf_char; + unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ + unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ + unsigned short * vc_translate; + unsigned char vc_G0_charset; + unsigned char vc_G1_charset; + unsigned char vc_saved_G0; + unsigned char vc_saved_G1; + unsigned int vc_resize_user; /* resize request from user */ + unsigned int vc_bell_pitch; /* Console bell pitch */ + unsigned int vc_bell_duration; /* Console bell duration */ + struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ + unsigned long vc_uni_pagedir; + unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ + bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ + /* additional information is in vt_kern.h */ +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; + + /* might add scrmem, vt_struct, kbd at some time, + to have everything in one place - the disadvantage + would be that vc_cons etc can no longer be static */ +}; + +extern struct vc vc_cons [MAX_NR_CONSOLES]; +extern void vc_SAK(struct work_struct *work); + +#define CUR_DEF 0 +#define CUR_NONE 1 +#define CUR_UNDERLINE 2 +#define CUR_LOWER_THIRD 3 +#define CUR_LOWER_HALF 4 +#define CUR_TWO_THIRDS 5 +#define CUR_BLOCK 6 +#define CUR_HWMASK 0x0f +#define CUR_SWMASK 0xfff0 + +#define CUR_DEFAULT CUR_UNDERLINE + +#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp) + +#endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h new file mode 100644 index 00000000..c4811da1 --- /dev/null +++ b/include/linux/consolemap.h @@ -0,0 +1,34 @@ +/* + * consolemap.h + * + * Interface between console.c, selection.c and consolemap.c + */ +#ifndef __LINUX_CONSOLEMAP_H__ +#define __LINUX_CONSOLEMAP_H__ + +#define LAT1_MAP 0 +#define GRAF_MAP 1 +#define IBMPC_MAP 2 +#define USER_MAP 3 + +#include <linux/types.h> + +#ifdef CONFIG_CONSOLE_TRANSLATIONS +struct vc_data; + +extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); +extern unsigned short *set_translate(int m, struct vc_data *vc); +extern int conv_uni_to_pc(struct vc_data *conp, long ucs); +extern u32 conv_8bit_to_uni(unsigned char c); +extern int conv_uni_to_8bit(u32 uni); +void console_map_init(void); +#else +#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph) +#define set_translate(m, vc) ((unsigned short *)NULL) +#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs)) +#define conv_8bit_to_uni(c) ((uint32_t)(c)) +#define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) +#define console_map_init(c) do { ; } while (0) +#endif /* CONFIG_CONSOLE_TRANSLATIONS */ + +#endif /* __LINUX_CONSOLEMAP_H__ */ diff --git a/include/linux/const.h b/include/linux/const.h new file mode 100644 index 00000000..c22c707c --- /dev/null +++ b/include/linux/const.h @@ -0,0 +1,24 @@ +/* const.h: Macros for dealing with constants. */ + +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +/* Some constant macros are used in both assembler and + * C code. Therefore we cannot annotate them always with + * 'UL' and other type specifiers unilaterally. We + * use the following macros to deal with this. + * + * Similarly, _AT() will cast an expression with a type in C, but + * leave it unchanged in asm. + */ + +#ifdef __ASSEMBLY__ +#define _AC(X,Y) X +#define _AT(T,X) X +#else +#define __AC(X,Y) (X##Y) +#define _AC(X,Y) __AC(X,Y) +#define _AT(T,X) ((T)(X)) +#endif + +#endif /* !(_LINUX_CONST_H) */ diff --git a/include/linux/cordic.h b/include/linux/cordic.h new file mode 100644 index 00000000..cf68ca4a --- /dev/null +++ b/include/linux/cordic.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CORDIC_H_ +#define __CORDIC_H_ + +#include <linux/types.h> + +/** + * struct cordic_iq - i/q coordinate. + * + * @i: real part of coordinate (in phase). + * @q: imaginary part of coordinate (quadrature). + */ +struct cordic_iq { + s32 i; + s32 q; +}; + +/** + * cordic_calc_iq() - calculates the i/q coordinate for given angle. + * + * @theta: angle in degrees for which i/q coordinate is to be calculated. + * @coord: function output parameter holding the i/q coordinate. + * + * The function calculates the i/q coordinate for a given angle using the + * CORDIC algorithm. The coordinate consists of a real (i) and an + * imaginary (q) part. The real part is essentially the cosine of the + * angle and the imaginary part is the sine of the angle. The returned + * values are scaled by 2^16 for precision. The range for theta is + * for -180 degrees to +180 degrees. Passed values outside this range are + * converted before doing the actual calculation. + */ +struct cordic_iq cordic_calc_iq(s32 theta); + +#endif /* __CORDIC_H_ */ diff --git a/include/linux/coredump.h b/include/linux/coredump.h new file mode 100644 index 00000000..ba4b85a6 --- /dev/null +++ b/include/linux/coredump.h @@ -0,0 +1,15 @@ +#ifndef _LINUX_COREDUMP_H +#define _LINUX_COREDUMP_H + +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/fs.h> + +/* + * These are the only things you should do on a core-file: use only these + * functions to write out all the necessary info. + */ +extern int dump_write(struct file *file, const void *addr, int nr); +extern int dump_seek(struct file *file, loff_t off); + +#endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/cper.h b/include/linux/cper.h new file mode 100644 index 00000000..c2304949 --- /dev/null +++ b/include/linux/cper.h @@ -0,0 +1,394 @@ +/* + * UEFI Common Platform Error Record + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef LINUX_CPER_H +#define LINUX_CPER_H + +#include <linux/uuid.h> + +/* CPER record signature and the size */ +#define CPER_SIG_RECORD "CPER" +#define CPER_SIG_SIZE 4 +/* Used in signature_end field in struct cper_record_header */ +#define CPER_SIG_END 0xffffffff + +/* + * CPER record header revision, used in revision field in struct + * cper_record_header + */ +#define CPER_RECORD_REV 0x0100 + +/* + * Severity difinition for error_severity in struct cper_record_header + * and section_severity in struct cper_section_descriptor + */ +enum { + CPER_SEV_RECOVERABLE, + CPER_SEV_FATAL, + CPER_SEV_CORRECTED, + CPER_SEV_INFORMATIONAL, +}; + +/* + * Validation bits difinition for validation_bits in struct + * cper_record_header. If set, corresponding fields in struct + * cper_record_header contain valid information. + * + * corresponds platform_id + */ +#define CPER_VALID_PLATFORM_ID 0x0001 +/* corresponds timestamp */ +#define CPER_VALID_TIMESTAMP 0x0002 +/* corresponds partition_id */ +#define CPER_VALID_PARTITION_ID 0x0004 + +/* + * Notification type used to generate error record, used in + * notification_type in struct cper_record_header + * + * Corrected Machine Check + */ +#define CPER_NOTIFY_CMC \ + UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) +/* Corrected Platform Error */ +#define CPER_NOTIFY_CPE \ + UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \ + 0xF2, 0x7E, 0xBE, 0xEE) +/* Machine Check Exception */ +#define CPER_NOTIFY_MCE \ + UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) +/* PCI Express Error */ +#define CPER_NOTIFY_PCIE \ + UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \ + 0xAF, 0x67, 0xC1, 0x04) +/* INIT Record (for IPF) */ +#define CPER_NOTIFY_INIT \ + UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \ + 0xD3, 0x9B, 0xC9, 0x8E) +/* Non-Maskable Interrupt */ +#define CPER_NOTIFY_NMI \ + UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \ + 0x85, 0xD6, 0xE9, 0x8A) +/* BOOT Error Record */ +#define CPER_NOTIFY_BOOT \ + UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) +/* DMA Remapping Error */ +#define CPER_NOTIFY_DMAR \ + UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ + 0x72, 0x2D, 0xEB, 0x41) + +/* + * Flags bits definitions for flags in struct cper_record_header + * If set, the error has been recovered + */ +#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1 +/* If set, the error is for previous boot */ +#define CPER_HW_ERROR_FLAGS_PREVERR 0x2 +/* If set, the error is injected for testing */ +#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4 + +/* + * CPER section header revision, used in revision field in struct + * cper_section_descriptor + */ +#define CPER_SEC_REV 0x0100 + +/* + * Validation bits difinition for validation_bits in struct + * cper_section_descriptor. If set, corresponding fields in struct + * cper_section_descriptor contain valid information. + * + * corresponds fru_id + */ +#define CPER_SEC_VALID_FRU_ID 0x1 +/* corresponds fru_text */ +#define CPER_SEC_VALID_FRU_TEXT 0x2 + +/* + * Flags bits definitions for flags in struct cper_section_descriptor + * + * If set, the section is associated with the error condition + * directly, and should be focused on + */ +#define CPER_SEC_PRIMARY 0x0001 +/* + * If set, the error was not contained within the processor or memory + * hierarchy and the error may have propagated to persistent storage + * or network + */ +#define CPER_SEC_CONTAINMENT_WARNING 0x0002 +/* If set, the component must be re-initialized or re-enabled prior to use */ +#define CPER_SEC_RESET 0x0004 +/* If set, Linux may choose to discontinue use of the resource */ +#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008 +/* + * If set, resource could not be queried for error information due to + * conflicts with other system software or resources. Some fields of + * the section will be invalid + */ +#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010 +/* + * If set, action has been taken to ensure error containment (such as + * poisoning data), but the error has not been fully corrected and the + * data has not been consumed. Linux may choose to take further + * corrective action before the data is consumed + */ +#define CPER_SEC_LATENT_ERROR 0x0020 + +/* + * Section type definitions, used in section_type field in struct + * cper_section_descriptor + * + * Processor Generic + */ +#define CPER_SEC_PROC_GENERIC \ + UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ + 0x93, 0xC4, 0xF3, 0xDB) +/* Processor Specific: X86/X86_64 */ +#define CPER_SEC_PROC_IA \ + UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) +/* Processor Specific: IA64 */ +#define CPER_SEC_PROC_IPF \ + UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ + 0x80, 0xC7, 0x3C, 0x88, 0x81) +/* Platform Memory */ +#define CPER_SEC_PLATFORM_MEM \ + UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ + 0xED, 0x7C, 0x83, 0xB1) +#define CPER_SEC_PCIE \ + UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \ + 0xCB, 0x3C, 0x6F, 0x35) +/* Firmware Error Record Reference */ +#define CPER_SEC_FW_ERR_REC_REF \ + UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \ + 0x9C, 0x8E, 0x69, 0xED) +/* PCI/PCI-X Bus */ +#define CPER_SEC_PCI_X_BUS \ + UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \ + 0xD3, 0xF9, 0xC9, 0xDD) +/* PCI Component/Device */ +#define CPER_SEC_PCI_DEV \ + UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \ + 0x8B, 0x00, 0x13, 0x26) +#define CPER_SEC_DMAR_GENERIC \ + UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \ + 0xDE, 0x3E, 0x2C, 0x64) +/* Intel VT for Directed I/O specific DMAr */ +#define CPER_SEC_DMAR_VT \ + UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \ + 0xDD, 0x93, 0xE8, 0xCF) +/* IOMMU specific DMAr */ +#define CPER_SEC_DMAR_IOMMU \ + UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ + 0xDF, 0xAA, 0x84, 0xEC) + +#define CPER_PROC_VALID_TYPE 0x0001 +#define CPER_PROC_VALID_ISA 0x0002 +#define CPER_PROC_VALID_ERROR_TYPE 0x0004 +#define CPER_PROC_VALID_OPERATION 0x0008 +#define CPER_PROC_VALID_FLAGS 0x0010 +#define CPER_PROC_VALID_LEVEL 0x0020 +#define CPER_PROC_VALID_VERSION 0x0040 +#define CPER_PROC_VALID_BRAND_INFO 0x0080 +#define CPER_PROC_VALID_ID 0x0100 +#define CPER_PROC_VALID_TARGET_ADDRESS 0x0200 +#define CPER_PROC_VALID_REQUESTOR_ID 0x0400 +#define CPER_PROC_VALID_RESPONDER_ID 0x0800 +#define CPER_PROC_VALID_IP 0x1000 + +#define CPER_MEM_VALID_ERROR_STATUS 0x0001 +#define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002 +#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004 +#define CPER_MEM_VALID_NODE 0x0008 +#define CPER_MEM_VALID_CARD 0x0010 +#define CPER_MEM_VALID_MODULE 0x0020 +#define CPER_MEM_VALID_BANK 0x0040 +#define CPER_MEM_VALID_DEVICE 0x0080 +#define CPER_MEM_VALID_ROW 0x0100 +#define CPER_MEM_VALID_COLUMN 0x0200 +#define CPER_MEM_VALID_BIT_POSITION 0x0400 +#define CPER_MEM_VALID_REQUESTOR_ID 0x0800 +#define CPER_MEM_VALID_RESPONDER_ID 0x1000 +#define CPER_MEM_VALID_TARGET_ID 0x2000 +#define CPER_MEM_VALID_ERROR_TYPE 0x4000 + +#define CPER_PCIE_VALID_PORT_TYPE 0x0001 +#define CPER_PCIE_VALID_VERSION 0x0002 +#define CPER_PCIE_VALID_COMMAND_STATUS 0x0004 +#define CPER_PCIE_VALID_DEVICE_ID 0x0008 +#define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010 +#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020 +#define CPER_PCIE_VALID_CAPABILITY 0x0040 +#define CPER_PCIE_VALID_AER_INFO 0x0080 + +#define CPER_PCIE_SLOT_SHIFT 3 + +/* + * All tables and structs must be byte-packed to match CPER + * specification, since the tables are provided by the system BIOS + */ +#pragma pack(1) + +struct cper_record_header { + char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ + __u16 revision; /* must be CPER_RECORD_REV */ + __u32 signature_end; /* must be CPER_SIG_END */ + __u16 section_count; + __u32 error_severity; + __u32 validation_bits; + __u32 record_length; + __u64 timestamp; + uuid_le platform_id; + uuid_le partition_id; + uuid_le creator_id; + uuid_le notification_type; + __u64 record_id; + __u32 flags; + __u64 persistence_information; + __u8 reserved[12]; /* must be zero */ +}; + +struct cper_section_descriptor { + __u32 section_offset; /* Offset in bytes of the + * section body from the base + * of the record header */ + __u32 section_length; + __u16 revision; /* must be CPER_RECORD_REV */ + __u8 validation_bits; + __u8 reserved; /* must be zero */ + __u32 flags; + uuid_le section_type; + uuid_le fru_id; + __u32 section_severity; + __u8 fru_text[20]; +}; + +/* Generic Processor Error Section */ +struct cper_sec_proc_generic { + __u64 validation_bits; + __u8 proc_type; + __u8 proc_isa; + __u8 proc_error_type; + __u8 operation; + __u8 flags; + __u8 level; + __u16 reserved; + __u64 cpu_version; + char cpu_brand[128]; + __u64 proc_id; + __u64 target_addr; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; +}; + +/* IA32/X64 Processor Error Section */ +struct cper_sec_proc_ia { + __u64 validation_bits; + __u8 lapic_id; + __u8 cpuid[48]; +}; + +/* IA32/X64 Processor Error Information Structure */ +struct cper_ia_err_info { + uuid_le err_type; + __u64 validation_bits; + __u64 check_info; + __u64 target_id; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; +}; + +/* IA32/X64 Processor Context Information Structure */ +struct cper_ia_proc_ctx { + __u16 reg_ctx_type; + __u16 reg_arr_size; + __u32 msr_addr; + __u64 mm_reg_addr; +}; + +/* Memory Error Section */ +struct cper_sec_mem_err { + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; +}; + +struct cper_sec_pcie { + __u64 validation_bits; + __u32 port_type; + struct { + __u8 minor; + __u8 major; + __u8 reserved[2]; + } version; + __u16 command; + __u16 status; + __u32 reserved; + struct { + __u16 vendor_id; + __u16 device_id; + __u8 class_code[3]; + __u8 function; + __u8 device; + __u16 segment; + __u8 bus; + __u8 secondary_bus; + __u16 slot; + __u8 reserved; + } device_id; + struct { + __u32 lower; + __u32 upper; + } serial_number; + struct { + __u16 secondary_status; + __u16 control; + } bridge; + __u8 capability[60]; + __u8 aer_info[96]; +}; + +/* Reset to default packing */ +#pragma pack() + +u64 cper_next_record_id(void); +void cper_print_bits(const char *prefix, unsigned int bits, + const char *strs[], unsigned int strs_size); + +#endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h new file mode 100644 index 00000000..11dd01b8 --- /dev/null +++ b/include/linux/cpu.h @@ -0,0 +1,222 @@ +/* + * include/linux/cpu.h - generic cpu definition + * + * This is mainly for topological representation. We define the + * basic 'struct cpu' here, which can be embedded in per-arch + * definitions of processors. + * + * Basic handling of the devices is done in drivers/base/cpu.c + * and system devices are handled in drivers/base/sys.c. + * + * CPUs are exported via sysfs in the class/cpu/devices/ + * directory. + */ +#ifndef _LINUX_CPU_H_ +#define _LINUX_CPU_H_ + +#include <linux/node.h> +#include <linux/compiler.h> +#include <linux/cpumask.h> + +struct device; + +struct cpu { + int node_id; /* The node which contains the CPU */ + int hotpluggable; /* creates sysfs control file if hotpluggable */ + struct device dev; +}; + +extern int register_cpu(struct cpu *cpu, int num); +extern struct device *get_cpu_device(unsigned cpu); +extern bool cpu_is_hotpluggable(unsigned cpu); + +extern int cpu_add_dev_attr(struct device_attribute *attr); +extern void cpu_remove_dev_attr(struct device_attribute *attr); + +extern int cpu_add_dev_attr_group(struct attribute_group *attrs); +extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); + +extern int sched_create_sysfs_power_savings_entries(struct device *dev); + +#ifdef CONFIG_HOTPLUG_CPU +extern void unregister_cpu(struct cpu *cpu); +extern ssize_t arch_cpu_probe(const char *, size_t); +extern ssize_t arch_cpu_release(const char *, size_t); +#endif +struct notifier_block; + +#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE +extern int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env); +extern ssize_t arch_print_cpu_modalias(struct device *dev, + struct device_attribute *attr, + char *bufptr); +#endif + +/* + * CPU notifier priorities. + */ +enum { + /* + * SCHED_ACTIVE marks a cpu which is coming up active during + * CPU_ONLINE and CPU_DOWN_FAILED and must be the first + * notifier. CPUSET_ACTIVE adjusts cpuset according to + * cpu_active mask right after SCHED_ACTIVE. During + * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are + * ordered in the similar way. + * + * This ordering guarantees consistent cpu_active mask and + * migration behavior to all cpu notifiers. + */ + CPU_PRI_SCHED_ACTIVE = INT_MAX, + CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, + CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, + CPU_PRI_CPUSET_INACTIVE = INT_MIN, + + /* migration should happen before other stuff but after perf */ + CPU_PRI_PERF = 20, + CPU_PRI_MIGRATION = 10, + /* prepare workqueues for other notifiers */ + CPU_PRI_WORKQUEUE = 5, +}; + +#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ +#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ +#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ +#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ +#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ +#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ +#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, + * not handling interrupts, soon dead. + * Called on the dying cpu, interrupts + * are already disabled. Must not + * sleep, must not fail */ +#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug + * lock is dropped */ +#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. + * Called on the new cpu, just before + * enabling interrupts. Must not sleep, + * must not fail */ + +/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend + * operation in progress + */ +#define CPU_TASKS_FROZEN 0x0010 + +#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) +#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) +#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) +#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) +#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) +#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) +#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) +#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) + + +#ifdef CONFIG_SMP +/* Need to know about CPUs going up/down? */ +#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) +#define cpu_notifier(fn, pri) { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = pri }; \ + register_cpu_notifier(&fn##_nb); \ +} +#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +#ifdef CONFIG_HOTPLUG_CPU +extern int register_cpu_notifier(struct notifier_block *nb); +extern void unregister_cpu_notifier(struct notifier_block *nb); +#else + +#ifndef MODULE +extern int register_cpu_notifier(struct notifier_block *nb); +#else +static inline int register_cpu_notifier(struct notifier_block *nb) +{ + return 0; +} +#endif + +static inline void unregister_cpu_notifier(struct notifier_block *nb) +{ +} +#endif + +int cpu_up(unsigned int cpu); +void notify_cpu_starting(unsigned int cpu); +extern void cpu_maps_update_begin(void); +extern void cpu_maps_update_done(void); + +#else /* CONFIG_SMP */ + +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) + +static inline int register_cpu_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline void unregister_cpu_notifier(struct notifier_block *nb) +{ +} + +static inline void cpu_maps_update_begin(void) +{ +} + +static inline void cpu_maps_update_done(void) +{ +} + +#endif /* CONFIG_SMP */ +extern struct bus_type cpu_subsys; + +#ifdef CONFIG_HOTPLUG_CPU +/* Stop CPUs going up and down. */ + +extern void get_online_cpus(void); +extern void put_online_cpus(void); +#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) +#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) +int cpu_down(unsigned int cpu); + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +extern void cpu_hotplug_driver_lock(void); +extern void cpu_hotplug_driver_unlock(void); +#else +static inline void cpu_hotplug_driver_lock(void) +{ +} + +static inline void cpu_hotplug_driver_unlock(void) +{ +} +#endif + +#else /* CONFIG_HOTPLUG_CPU */ + +#define get_online_cpus() do { } while (0) +#define put_online_cpus() do { } while (0) +#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) +/* These aren't inline functions due to a GCC bug. */ +#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) +#define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) +#endif /* CONFIG_HOTPLUG_CPU */ + +#ifdef CONFIG_PM_SLEEP_SMP +extern int disable_nonboot_cpus(void); +extern void enable_nonboot_cpus(void); +#else /* !CONFIG_PM_SLEEP_SMP */ +static inline int disable_nonboot_cpus(void) { return 0; } +static inline void enable_nonboot_cpus(void) {} +#endif /* !CONFIG_PM_SLEEP_SMP */ + +#define IDLE_START 1 +#define IDLE_END 2 + +void idle_notifier_register(struct notifier_block *n); +void idle_notifier_unregister(struct notifier_block *n); +void idle_notifier_call_chain(unsigned long val); + +#endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h new file mode 100644 index 00000000..455b233d --- /dev/null +++ b/include/linux/cpu_pm.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_CPU_PM_H +#define _LINUX_CPU_PM_H + +#include <linux/kernel.h> +#include <linux/notifier.h> + +/* + * When a CPU goes to a low power state that turns off power to the CPU's + * power domain, the contents of some blocks (floating point coprocessors, + * interrupt controllers, caches, timers) in the same power domain can + * be lost. The cpm_pm notifiers provide a method for platform idle, suspend, + * and hotplug implementations to notify the drivers for these blocks that + * they may be reset. + * + * All cpu_pm notifications must be called with interrupts disabled. + * + * The notifications are split into two classes: CPU notifications and CPU + * cluster notifications. + * + * CPU notifications apply to a single CPU and must be called on the affected + * CPU. They are used to save per-cpu context for affected blocks. + * + * CPU cluster notifications apply to all CPUs in a single power domain. They + * are used to save any global context for affected blocks, and must be called + * after all the CPUs in the power domain have been notified of the low power + * state. + */ + +/* + * Event codes passed as unsigned long val to notifier calls + */ +enum cpu_pm_event { + /* A single cpu is entering a low power state */ + CPU_PM_ENTER, + + /* A single cpu failed to enter a low power state */ + CPU_PM_ENTER_FAILED, + + /* A single cpu is exiting a low power state */ + CPU_PM_EXIT, + + /* A cpu power domain is entering a low power state */ + CPU_CLUSTER_PM_ENTER, + + /* A cpu power domain failed to enter a low power state */ + CPU_CLUSTER_PM_ENTER_FAILED, + + /* A cpu power domain is exiting a low power state */ + CPU_CLUSTER_PM_EXIT, +}; + +#ifdef CONFIG_CPU_PM +int cpu_pm_register_notifier(struct notifier_block *nb); +int cpu_pm_unregister_notifier(struct notifier_block *nb); +int cpu_pm_enter(void); +int cpu_pm_exit(void); +int cpu_cluster_pm_enter(void); +int cpu_cluster_pm_exit(void); + +#else + +static inline int cpu_pm_register_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int cpu_pm_unregister_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int cpu_pm_enter(void) +{ + return 0; +} + +static inline int cpu_pm_exit(void) +{ + return 0; +} + +static inline int cpu_cluster_pm_enter(void) +{ + return 0; +} + +static inline int cpu_cluster_pm_exit(void) +{ + return 0; +} +#endif +#endif diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h new file mode 100644 index 00000000..473771a5 --- /dev/null +++ b/include/linux/cpu_rmap.h @@ -0,0 +1,73 @@ +/* + * cpu_rmap.c: CPU affinity reverse-map support + * Copyright 2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include <linux/cpumask.h> +#include <linux/gfp.h> +#include <linux/slab.h> + +/** + * struct cpu_rmap - CPU affinity reverse-map + * @size: Number of objects to be reverse-mapped + * @used: Number of objects added + * @obj: Pointer to array of object pointers + * @near: For each CPU, the index and distance to the nearest object, + * based on affinity masks + */ +struct cpu_rmap { + u16 size, used; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; +#define CPU_RMAP_DIST_INF 0xffff + +extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); + +/** + * free_cpu_rmap - free CPU affinity reverse-map + * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL + */ +static inline void free_cpu_rmap(struct cpu_rmap *rmap) +{ + kfree(rmap); +} + +extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); +extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, + const struct cpumask *affinity); + +static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu) +{ + return rmap->near[cpu].index; +} + +static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu) +{ + return rmap->obj[rmap->near[cpu].index]; +} + +#ifdef CONFIG_GENERIC_HARDIRQS + +/** + * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs + * @size: Number of objects to be mapped + * + * Must be called in process context. + */ +static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size) +{ + return alloc_cpu_rmap(size, GFP_KERNEL); +} +extern void free_irq_cpu_rmap(struct cpu_rmap *rmap); + +extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq); + +#endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h new file mode 100644 index 00000000..d1c3bb0e --- /dev/null +++ b/include/linux/cpufreq.h @@ -0,0 +1,412 @@ +/* + * linux/include/linux/cpufreq.h + * + * Copyright (C) 2001 Russell King + * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_CPUFREQ_H +#define _LINUX_CPUFREQ_H + +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/threads.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/cpumask.h> +#include <asm/div64.h> + +#define CPUFREQ_NAME_LEN 16 + + +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ + +#define CPUFREQ_TRANSITION_NOTIFIER (0) +#define CPUFREQ_POLICY_NOTIFIER (1) + +#ifdef CONFIG_CPU_FREQ +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); +extern void disable_cpufreq(void); +#else /* CONFIG_CPU_FREQ */ +static inline int cpufreq_register_notifier(struct notifier_block *nb, + unsigned int list) +{ + return 0; +} +static inline int cpufreq_unregister_notifier(struct notifier_block *nb, + unsigned int list) +{ + return 0; +} +static inline void disable_cpufreq(void) { } +#endif /* CONFIG_CPU_FREQ */ + +/* if (cpufreq_driver->target) exists, the ->governor decides what frequency + * within the limits is used. If (cpufreq_driver->setpolicy> exists, these + * two generic policies are available: + */ + +#define CPUFREQ_POLICY_POWERSAVE (1) +#define CPUFREQ_POLICY_PERFORMANCE (2) + +/* Frequency values here are CPU kHz so that hardware which doesn't run + * with some frequencies can complain without having to guess what per + * cent / per mille means. + * Maximum transition latency is in nanoseconds - if it's unknown, + * CPUFREQ_ETERNAL shall be used. + */ + +struct cpufreq_governor; + +/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ +extern struct kobject *cpufreq_global_kobject; + +#define CPUFREQ_ETERNAL (-1) +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + + /* in 10^(-9) s = nanoseconds */ + unsigned int transition_latency; +}; + +struct cpufreq_real_policy { + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ + unsigned int policy; /* see above */ + struct cpufreq_governor *governor; /* see below */ +}; + +struct cpufreq_policy { + cpumask_var_t cpus; /* CPUs requiring sw coordination */ + cpumask_var_t related_cpus; /* CPUs with any coordination */ + unsigned int shared_type; /* ANY or ALL affected CPUs + should set cpufreq */ + unsigned int cpu; /* cpu nr of registered CPU */ + struct cpufreq_cpuinfo cpuinfo;/* see above */ + + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ + unsigned int cur; /* in kHz, only needed if cpufreq + * governors are used */ + unsigned int policy; /* see above */ + struct cpufreq_governor *governor; /* see below */ + + struct work_struct update; /* if update_policy() needs to be + * called, but you're in IRQ context */ + + struct cpufreq_real_policy user_policy; + + struct kobject kobj; + struct completion kobj_unregister; +}; + +#define CPUFREQ_ADJUST (0) +#define CPUFREQ_INCOMPATIBLE (1) +#define CPUFREQ_NOTIFY (2) +#define CPUFREQ_START (3) + +#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ +#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ +#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ +#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ + +/******************** cpufreq transition notifiers *******************/ + +#define CPUFREQ_PRECHANGE (0) +#define CPUFREQ_POSTCHANGE (1) +#define CPUFREQ_RESUMECHANGE (8) +#define CPUFREQ_SUSPENDCHANGE (9) + +struct cpufreq_freqs { + unsigned int cpu; /* cpu nr */ + unsigned int old; + unsigned int new; + u8 flags; /* flags of cpufreq_driver, see below. */ +}; + + +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * + * new = old * mult / div + */ +static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) +{ +#if BITS_PER_LONG == 32 + + u64 result = ((u64) old) * ((u64) mult); + do_div(result, div); + return (unsigned long) result; + +#elif BITS_PER_LONG == 64 + + unsigned long result = old * ((u64) mult); + result /= div; + return result; + +#endif +}; + +/********************************************************************* + * CPUFREQ GOVERNORS * + *********************************************************************/ + +#define CPUFREQ_GOV_START 1 +#define CPUFREQ_GOV_STOP 2 +#define CPUFREQ_GOV_LIMITS 3 + +struct cpufreq_governor { + char name[CPUFREQ_NAME_LEN]; + int (*governor) (struct cpufreq_policy *policy, + unsigned int event); + ssize_t (*show_setspeed) (struct cpufreq_policy *policy, + char *buf); + int (*store_setspeed) (struct cpufreq_policy *policy, + unsigned int freq); + unsigned int max_transition_latency; /* HW must be able to switch to + next freq faster than this value in nano secs or we + will fallback to performance governor */ + struct list_head governor_list; + struct module *owner; +}; + +/* + * Pass a target to the cpufreq driver. + */ +extern int cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); +extern int __cpufreq_driver_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); + + +extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, + unsigned int cpu); + +int cpufreq_register_governor(struct cpufreq_governor *governor); +void cpufreq_unregister_governor(struct cpufreq_governor *governor); + + +/********************************************************************* + * CPUFREQ DRIVER INTERFACE * + *********************************************************************/ + +#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ +#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ + +struct freq_attr; + +struct cpufreq_driver { + struct module *owner; + char name[CPUFREQ_NAME_LEN]; + u8 flags; + + /* needed by all drivers */ + int (*init) (struct cpufreq_policy *policy); + int (*verify) (struct cpufreq_policy *policy); + + /* define one out of two */ + int (*setpolicy) (struct cpufreq_policy *policy); + int (*target) (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); + + /* should be defined, if possible */ + unsigned int (*get) (unsigned int cpu); + + /* optional */ + unsigned int (*getavg) (struct cpufreq_policy *policy, + unsigned int cpu); + int (*bios_limit) (int cpu, unsigned int *limit); + + int (*exit) (struct cpufreq_policy *policy); + int (*suspend) (struct cpufreq_policy *policy); + int (*resume) (struct cpufreq_policy *policy); + struct freq_attr **attr; +}; + +/* flags */ + +#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if + * all ->init() calls failed */ +#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel + * "constants" aren't affected by + * frequency transitions */ +#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed + * mismatches */ + +int cpufreq_register_driver(struct cpufreq_driver *driver_data); +int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); + + +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); + + +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) +{ + if (policy->min < min) + policy->min = min; + if (policy->max < min) + policy->max = min; + if (policy->min > max) + policy->min = max; + if (policy->max > max) + policy->max = max; + if (policy->min > policy->max) + policy->min = policy->max; + return; +} + +struct freq_attr { + struct attribute attr; + ssize_t (*show)(struct cpufreq_policy *, char *); + ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); +}; + +#define cpufreq_freq_attr_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define cpufreq_freq_attr_ro_perm(_name, _perm) \ +static struct freq_attr _name = \ +__ATTR(_name, _perm, show_##_name, NULL) + +#define cpufreq_freq_attr_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +struct global_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *a, struct attribute *b, + const char *c, size_t count); +}; + +#define define_one_global_ro(_name) \ +static struct global_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_global_rw(_name) \ +static struct global_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + + +/********************************************************************* + * CPUFREQ 2.6. INTERFACE * + *********************************************************************/ +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); +int cpufreq_update_policy(unsigned int cpu); + +#ifdef CONFIG_CPU_FREQ +/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ +unsigned int cpufreq_get(unsigned int cpu); +#else +static inline unsigned int cpufreq_get(unsigned int cpu) +{ + return 0; +} +#endif + +/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ +#ifdef CONFIG_CPU_FREQ +unsigned int cpufreq_quick_get(unsigned int cpu); +unsigned int cpufreq_quick_get_max(unsigned int cpu); +#else +static inline unsigned int cpufreq_quick_get(unsigned int cpu) +{ + return 0; +} +static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) +{ + return 0; +} +#endif + + +/********************************************************************* + * CPUFREQ DEFAULT GOVERNOR * + *********************************************************************/ + + +/* + Performance governor is fallback governor if any other gov failed to + auto load due latency restrictions +*/ +#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE +extern struct cpufreq_governor cpufreq_gov_performance; +#endif +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE) +extern struct cpufreq_governor cpufreq_gov_powersave; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) +extern struct cpufreq_governor cpufreq_gov_userspace; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND) +extern struct cpufreq_governor cpufreq_gov_ondemand; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) +extern struct cpufreq_governor cpufreq_gov_conservative; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) +extern struct cpufreq_governor cpufreq_gov_interactive; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) +#endif + + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +#define CPUFREQ_ENTRY_INVALID ~0 +#define CPUFREQ_TABLE_END ~1 + +struct cpufreq_frequency_table { + unsigned int index; /* any */ + unsigned int frequency; /* kHz - doesn't need to be in ascending + * order */ +}; + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_target(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int target_freq, + unsigned int relation, + unsigned int *index); + +/* the following 3 funtions are for cpufreq core use only */ +struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); +struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); +void cpufreq_cpu_put(struct cpufreq_policy *data); + +/* the following are really really optional */ +extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; + +void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, + unsigned int cpu); + +void cpufreq_frequency_table_put_attr(unsigned int cpu); + + +#endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h new file mode 100644 index 00000000..5ab71833 --- /dev/null +++ b/include/linux/cpuidle.h @@ -0,0 +1,237 @@ +/* + * cpuidle.h - a generic framework for CPU idle power management + * + * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> + * Shaohua Li <shaohua.li@intel.com> + * Adam Belay <abelay@novell.com> + * + * This code is licenced under the GPL. + */ + +#ifndef _LINUX_CPUIDLE_H +#define _LINUX_CPUIDLE_H + +#include <linux/percpu.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/completion.h> +#include <linux/hrtimer.h> + +#define CPUIDLE_STATE_MAX 8 +#define CPUIDLE_NAME_LEN 16 +#define CPUIDLE_DESC_LEN 32 + +struct module; + +struct cpuidle_device; +struct cpuidle_driver; + + +/**************************** + * CPUIDLE DEVICE INTERFACE * + ****************************/ + +struct cpuidle_state_usage { + void *driver_data; + + unsigned long long usage; + unsigned long long time; /* in US */ +}; + +struct cpuidle_state { + char name[CPUIDLE_NAME_LEN]; + char desc[CPUIDLE_DESC_LEN]; + + unsigned int flags; + unsigned int exit_latency; /* in US */ + int power_usage; /* in mW */ + unsigned int target_residency; /* in US */ + unsigned int disable; + + int (*enter) (struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); + + int (*enter_dead) (struct cpuidle_device *dev, int index); +}; + +/* Idle State Flags */ +#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ + +#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) + +/** + * cpuidle_get_statedata - retrieves private driver state data + * @st_usage: the state usage statistics + */ +static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage) +{ + return st_usage->driver_data; +} + +/** + * cpuidle_set_statedata - stores private driver state data + * @st_usage: the state usage statistics + * @data: the private data + */ +static inline void +cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data) +{ + st_usage->driver_data = data; +} + +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_device { + unsigned int registered:1; + unsigned int enabled:1; + unsigned int cpu; + + int last_residency; + int state_count; + struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; + struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; + + struct list_head device_list; + struct kobject kobj; + struct completion kobj_unregister; + +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED + int safe_state_index; + cpumask_t coupled_cpus; + struct cpuidle_coupled *coupled; +#endif +}; + +DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); + +/** + * cpuidle_get_last_residency - retrieves the last state's residency time + * @dev: the target CPU + * + * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set + */ +static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) +{ + return dev->last_residency; +} + + +/**************************** + * CPUIDLE DRIVER INTERFACE * + ****************************/ + +struct cpuidle_driver { + const char *name; + struct module *owner; + + unsigned int power_specified:1; + /* set to 1 to use the core cpuidle time keeping (for all states). */ + unsigned int en_core_tk_irqen:1; + struct cpuidle_state states[CPUIDLE_STATE_MAX]; + int state_count; + int safe_state_index; +}; + +#ifdef CONFIG_CPU_IDLE +extern void disable_cpuidle(void); +extern int cpuidle_idle_call(void); +extern int cpuidle_register_driver(struct cpuidle_driver *drv); +struct cpuidle_driver *cpuidle_get_driver(void); +extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); +extern int cpuidle_register_device(struct cpuidle_device *dev); +extern void cpuidle_unregister_device(struct cpuidle_device *dev); + +extern void cpuidle_pause_and_lock(void); +extern void cpuidle_resume_and_unlock(void); +extern int cpuidle_enable_device(struct cpuidle_device *dev); +extern void cpuidle_disable_device(struct cpuidle_device *dev); +extern int cpuidle_wrap_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index, + int (*enter)(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index)); +extern int cpuidle_play_dead(void); + +#else +static inline void disable_cpuidle(void) { } +static inline int cpuidle_idle_call(void) { return -ENODEV; } +static inline int cpuidle_register_driver(struct cpuidle_driver *drv) +{return -ENODEV; } +static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } +static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } +static inline int cpuidle_register_device(struct cpuidle_device *dev) +{return -ENODEV; } +static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } + +static inline void cpuidle_pause_and_lock(void) { } +static inline void cpuidle_resume_and_unlock(void) { } +static inline int cpuidle_enable_device(struct cpuidle_device *dev) +{return -ENODEV; } +static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } +static inline int cpuidle_wrap_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index, + int (*enter)(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index)) +{ return -ENODEV; } +static inline int cpuidle_play_dead(void) {return -ENODEV; } + +#endif + +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED +void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); +#endif + +/****************************** + * CPUIDLE GOVERNOR INTERFACE * + ******************************/ + +struct cpuidle_governor { + char name[CPUIDLE_NAME_LEN]; + struct list_head governor_list; + unsigned int rating; + + int (*enable) (struct cpuidle_driver *drv, + struct cpuidle_device *dev); + void (*disable) (struct cpuidle_driver *drv, + struct cpuidle_device *dev); + + int (*select) (struct cpuidle_driver *drv, + struct cpuidle_device *dev); + void (*reflect) (struct cpuidle_device *dev, int index); + + struct module *owner; +}; + +#ifdef CONFIG_CPU_IDLE + +extern int cpuidle_register_governor(struct cpuidle_governor *gov); +extern void cpuidle_unregister_governor(struct cpuidle_governor *gov); + +#ifdef CONFIG_INTEL_IDLE +extern int intel_idle_cpu_init(int cpu); +#else +static inline int intel_idle_cpu_init(int cpu) { return -1; } +#endif + +#else +static inline int intel_idle_cpu_init(int cpu) { return -1; } + +static inline int cpuidle_register_governor(struct cpuidle_governor *gov) +{return 0;} +static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { } + +#endif + +#ifdef CONFIG_ARCH_HAS_CPU_RELAX +#define CPUIDLE_DRIVER_STATE_START 1 +#else +#define CPUIDLE_DRIVER_STATE_START 0 +#endif + +#endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h new file mode 100644 index 00000000..a2c819d3 --- /dev/null +++ b/include/linux/cpumask.h @@ -0,0 +1,937 @@ +#ifndef __LINUX_CPUMASK_H +#define __LINUX_CPUMASK_H + +/* + * Cpumasks provide a bitmap suitable for representing the + * set of CPU's in a system, one bit position per CPU number. In general, + * only nr_cpu_ids (<= NR_CPUS) bits are valid. + */ +#include <linux/kernel.h> +#include <linux/threads.h> +#include <linux/bitmap.h> +#include <linux/bug.h> + +typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; + +/** + * cpumask_bits - get the bits in a cpumask + * @maskp: the struct cpumask * + * + * You should only assume nr_cpu_ids bits of this mask are valid. This is + * a macro so it's const-correct. + */ +#define cpumask_bits(maskp) ((maskp)->bits) + +#if NR_CPUS == 1 +#define nr_cpu_ids 1 +#else +extern int nr_cpu_ids; +#endif + +#ifdef CONFIG_CPUMASK_OFFSTACK +/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, + * not all bits may be allocated. */ +#define nr_cpumask_bits nr_cpu_ids +#else +#define nr_cpumask_bits NR_CPUS +#endif + +/* + * The following particular system cpumasks and operations manage + * possible, present, active and online cpus. + * + * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable + * cpu_present_mask - has bit 'cpu' set iff cpu is populated + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration + * + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. + * + * The cpu_possible_mask is fixed at boot time, as the set of CPU id's + * that it is possible might ever be plugged in at anytime during the + * life of that system boot. The cpu_present_mask is dynamic(*), + * representing which CPUs are currently plugged in. And + * cpu_online_mask is the dynamic subset of cpu_present_mask, + * indicating those CPUs available for scheduling. + * + * If HOTPLUG is enabled, then cpu_possible_mask is forced to have + * all NR_CPUS bits set, otherwise it is just the set of CPUs that + * ACPI reports present at boot. + * + * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, + * depending on what ACPI reports as currently plugged in, otherwise + * cpu_present_mask is just a copy of cpu_possible_mask. + * + * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not + * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. + * + * Subtleties: + * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode + * assumption that their single CPU is online. The UP + * cpu_{online,possible,present}_masks are placebos. Changing them + * will have no useful affect on the following num_*_cpus() + * and cpu_*() macros in the UP case. This ugliness is a UP + * optimization - don't waste any instructions or memory references + * asking if you're online or how many CPUs there are if there is + * only one CPU. + */ + +extern const struct cpumask *const cpu_possible_mask; +extern const struct cpumask *const cpu_online_mask; +extern const struct cpumask *const cpu_present_mask; +extern const struct cpumask *const cpu_active_mask; + +#if NR_CPUS > 1 +#define num_online_cpus() cpumask_weight(cpu_online_mask) +#define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_present_cpus() cpumask_weight(cpu_present_mask) +#define num_active_cpus() cpumask_weight(cpu_active_mask) +#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) +#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) +#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) +#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) +#else +#define num_online_cpus() 1U +#define num_possible_cpus() 1U +#define num_present_cpus() 1U +#define num_active_cpus() 1U +#define cpu_online(cpu) ((cpu) == 0) +#define cpu_possible(cpu) ((cpu) == 0) +#define cpu_present(cpu) ((cpu) == 0) +#define cpu_active(cpu) ((cpu) == 0) +#endif + +/* verify cpu argument to cpumask_* operators */ +static inline unsigned int cpumask_check(unsigned int cpu) +{ +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + WARN_ON_ONCE(cpu >= nr_cpumask_bits); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + return cpu; +} + +#if NR_CPUS == 1 +/* Uniprocessor. Assume all masks are "1". */ +static inline unsigned int cpumask_first(const struct cpumask *srcp) +{ + return 0; +} + +/* Valid inputs for n are -1 and 0. */ +static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + return n+1; +} + +static inline unsigned int cpumask_next_and(int n, + const struct cpumask *srcp, + const struct cpumask *andp) +{ + return n+1; +} + +/* cpu must be a valid cpu, ie 0, so there's no other choice. */ +static inline unsigned int cpumask_any_but(const struct cpumask *mask, + unsigned int cpu) +{ + return 1; +} + +#define for_each_cpu(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#define for_each_cpu_not(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) +#else +/** + * cpumask_first - get the first cpu in a cpumask + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline unsigned int cpumask_first(const struct cpumask *srcp) +{ + return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_next - get the next cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set. + */ +static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); +} + +/** + * cpumask_next_zero - get the next unset cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus unset. + */ +static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); +} + +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); +int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); + +/** + * for_each_cpu - iterate over every cpu in a mask + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next((cpu), (mask)), \ + (cpu) < nr_cpu_ids;) + +/** + * for_each_cpu_not - iterate over every cpu in a complemented mask + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_not(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next_zero((cpu), (mask)), \ + (cpu) < nr_cpu_ids;) + +/** + * for_each_cpu_and - iterate over every cpu in both masks + * @cpu: the (optionally unsigned) integer iterator + * @mask: the first cpumask pointer + * @and: the second cpumask pointer + * + * This saves a temporary CPU mask in many places. It is equivalent to: + * struct cpumask tmp; + * cpumask_and(&tmp, &mask, &and); + * for_each_cpu(cpu, &tmp) + * ... + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_and(cpu, mask, and) \ + for ((cpu) = -1; \ + (cpu) = cpumask_next_and((cpu), (mask), (and)), \ + (cpu) < nr_cpu_ids;) +#endif /* SMP */ + +#define CPU_BITS_NONE \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ +} + +#define CPU_BITS_CPU0 \ +{ \ + [0] = 1UL \ +} + +/** + * cpumask_set_cpu - set a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + */ +static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +{ + set_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +/** + * cpumask_clear_cpu - clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + */ +static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} + +/** + * cpumask_test_cpu - test for a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * No static inline type checking - see Subtlety (1) above. + */ +#define cpumask_test_cpu(cpu, cpumask) \ + test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) + +/** + * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * test_and_set_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + +/** + * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @cpumask: the cpumask pointer + * + * test_and_clear_bit wrapper for cpumasks. + */ +static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); +} + +/** + * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask + * @dstp: the cpumask pointer + */ +static inline void cpumask_setall(struct cpumask *dstp) +{ + bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask + * @dstp: the cpumask pointer + */ +static inline void cpumask_clear(struct cpumask *dstp) +{ + bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_and - *dstp = *src1p & *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline int cpumask_and(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_or - *dstp = *src1p | *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_xor - *dstp = *src1p ^ *src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline void cpumask_xor(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_andnot - *dstp = *src1p & ~*src2p + * @dstp: the cpumask result + * @src1p: the first input + * @src2p: the second input + */ +static inline int cpumask_andnot(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), + cpumask_bits(src2p), nr_cpumask_bits); +} + +/** + * cpumask_complement - *dstp = ~*srcp + * @dstp: the cpumask result + * @srcp: the input to invert + */ +static inline void cpumask_complement(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), + nr_cpumask_bits); +} + +/** + * cpumask_equal - *src1p == *src2p + * @src1p: the first input + * @src2p: the second input + */ +static inline bool cpumask_equal(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_intersects - (*src1p & *src2p) != 0 + * @src1p: the first input + * @src2p: the second input + */ +static inline bool cpumask_intersects(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_subset - (*src1p & ~*src2p) == 0 + * @src1p: the first input + * @src2p: the second input + */ +static inline int cpumask_subset(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits); +} + +/** + * cpumask_empty - *srcp == 0 + * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. + */ +static inline bool cpumask_empty(const struct cpumask *srcp) +{ + return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_full - *srcp == 0xFFFFFFFF... + * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. + */ +static inline bool cpumask_full(const struct cpumask *srcp) +{ + return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_weight - Count of bits in *srcp + * @srcp: the cpumask to count bits (< nr_cpu_ids) in. + */ +static inline unsigned int cpumask_weight(const struct cpumask *srcp) +{ + return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_shift_right - *dstp = *srcp >> n + * @dstp: the cpumask result + * @srcp: the input to shift + * @n: the number of bits to shift by + */ +static inline void cpumask_shift_right(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, + nr_cpumask_bits); +} + +/** + * cpumask_shift_left - *dstp = *srcp << n + * @dstp: the cpumask result + * @srcp: the input to shift + * @n: the number of bits to shift by + */ +static inline void cpumask_shift_left(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, + nr_cpumask_bits); +} + +/** + * cpumask_copy - *dstp = *srcp + * @dstp: the result + * @srcp: the input cpumask + */ +static inline void cpumask_copy(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_any - pick a "random" cpu from *srcp + * @srcp: the input cpumask + * + * Returns >= nr_cpu_ids if no cpus set. + */ +#define cpumask_any(srcp) cpumask_first(srcp) + +/** + * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 + * @src1p: the first input + * @src2p: the second input + * + * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). + */ +#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) + +/** + * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * + * Returns >= nr_cpu_ids if no cpus set. + */ +#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) + +/** + * cpumask_of - the cpumask containing just a given cpu + * @cpu: the cpu (<= nr_cpu_ids) + */ +#define cpumask_of(cpu) (get_cpu_mask(cpu)) + +/** + * cpumask_scnprintf - print a cpumask into a string as comma-separated hex + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print + * + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpumask_scnprintf(char *buf, int len, + const struct cpumask *srcp) +{ + return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_parse_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parse_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_parselist_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parselist_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parselist_user(buf, len, cpumask_bits(dstp), + nr_cpumask_bits); +} + +/** + * cpulist_scnprintf - print a cpumask into a string as comma-separated list + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print + * + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpulist_scnprintf(char *buf, int len, + const struct cpumask *srcp) +{ + return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), + nr_cpumask_bits); +} + +/** + * cpulist_parse_user - extract a cpumask from a user string of ranges + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpulist_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); +} + +/** + * cpumask_size - size to allocate for a 'struct cpumask' in bytes + * + * This will eventually be a runtime variable, depending on nr_cpu_ids. + */ +static inline size_t cpumask_size(void) +{ + /* FIXME: Once all cpumask assignments are eliminated, this + * can be nr_cpumask_bits */ + return BITS_TO_LONGS(NR_CPUS) * sizeof(long); +} + +/* + * cpumask_var_t: struct cpumask for stack usage. + * + * Oh, the wicked games we play! In order to make kernel coding a + * little more difficult, we typedef cpumask_var_t to an array or a + * pointer: doing &mask on an array is a noop, so it still works. + * + * ie. + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * ... use 'tmpmask' like a normal struct cpumask * ... + * + * free_cpumask_var(tmpmask); + * + * + * However, one notable exception is there. alloc_cpumask_var() allocates + * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has + * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. + * + * cpumask_var_t tmpmask; + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) + * return -ENOMEM; + * + * var = *tmpmask; + * + * This code makes NR_CPUS length memcopy and brings to a memory corruption. + * cpumask_copy() provide safe copy functionality. + */ +#ifdef CONFIG_CPUMASK_OFFSTACK +typedef struct cpumask *cpumask_var_t; + +bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +void alloc_bootmem_cpumask_var(cpumask_var_t *mask); +void free_cpumask_var(cpumask_var_t mask); +void free_bootmem_cpumask_var(cpumask_var_t mask); + +#else +typedef struct cpumask cpumask_var_t[1]; + +static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return true; +} + +static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + return true; +} + +static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + cpumask_clear(*mask); + return true; +} + +static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + cpumask_clear(*mask); + return true; +} + +static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) +{ +} + +static inline void free_cpumask_var(cpumask_var_t mask) +{ +} + +static inline void free_bootmem_cpumask_var(cpumask_var_t mask) +{ +} +#endif /* CONFIG_CPUMASK_OFFSTACK */ + +/* It's common to want to use cpu_all_mask in struct member initializers, + * so it has to refer to an address rather than a pointer. */ +extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); +#define cpu_all_mask to_cpumask(cpu_all_bits) + +/* First bits of cpu_bit_bitmap are in fact unset. */ +#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) + +#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) +#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) +#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) + +/* Wrappers for arch boot code to manipulate normally-constant masks */ +void set_cpu_possible(unsigned int cpu, bool possible); +void set_cpu_present(unsigned int cpu, bool present); +void set_cpu_online(unsigned int cpu, bool online); +void set_cpu_active(unsigned int cpu, bool active); +void init_cpu_present(const struct cpumask *src); +void init_cpu_possible(const struct cpumask *src); +void init_cpu_online(const struct cpumask *src); + +/** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap + * + * There are a few places where cpumask_var_t isn't appropriate and + * static cpumasks must be used (eg. very early boot), yet we don't + * expose the definition of 'struct cpumask'. + * + * This does the conversion, and can be used as a constant initializer. + */ +#define to_cpumask(bitmap) \ + ((struct cpumask *)(1 ? (bitmap) \ + : (void *)sizeof(__check_is_bitmap(bitmap)))) + +static inline int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} + +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const struct cpumask *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return to_cpumask(p); +} + +#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) + +#if NR_CPUS <= BITS_PER_LONG +#define CPU_BITS_ALL \ +{ \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} + +#else /* NR_CPUS > BITS_PER_LONG */ + +#define CPU_BITS_ALL \ +{ \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} +#endif /* NR_CPUS > BITS_PER_LONG */ + +/* + * + * From here down, all obsolete. Use cpumask_ variants! + * + */ +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS +#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) + +#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) + +#if NR_CPUS <= BITS_PER_LONG + +#define CPU_MASK_ALL \ +(cpumask_t) { { \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} } + +#else + +#define CPU_MASK_ALL \ +(cpumask_t) { { \ + [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ + [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ +} } + +#endif + +#define CPU_MASK_NONE \ +(cpumask_t) { { \ + [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ +} } + +#define CPU_MASK_CPU0 \ +(cpumask_t) { { \ + [0] = 1UL \ +} } + +#if NR_CPUS == 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) +#define any_online_cpu(mask) 0 +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) +#else /* NR_CPUS > 1 */ +int __first_cpu(const cpumask_t *srcp); +int __next_cpu(int n, const cpumask_t *srcp); + +#define first_cpu(src) __first_cpu(&(src)) +#define next_cpu(n, src) __next_cpu((n), &(src)) +#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu((cpu), (mask)), \ + (cpu) < NR_CPUS; ) +#endif /* SMP */ + +#if NR_CPUS <= 64 + +#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) + +#else /* NR_CPUS > 64 */ + +int __next_cpu_nr(int n, const cpumask_t *srcp); +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = __next_cpu_nr((cpu), &(mask)), \ + (cpu) < nr_cpu_ids; ) + +#endif /* NR_CPUS > 64 */ + +#define cpus_addr(src) ((src).bits) + +#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) +static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) +{ + set_bit(cpu, dstp->bits); +} + +#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) +static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) +{ + clear_bit(cpu, dstp->bits); +} + +#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) +static inline void __cpus_setall(cpumask_t *dstp, int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + +#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) +static inline void __cpus_clear(cpumask_t *dstp, int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + +/* No static inline type checking - see Subtlety (1) above. */ +#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) + +#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) +static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) +{ + return test_and_set_bit(cpu, addr->bits); +} + +#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) +static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) +static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) +static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_andnot(dst, src1, src2) \ + __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) +static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) +static inline int __cpus_equal(const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + +#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) +static inline int __cpus_intersects(const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + +#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) +static inline int __cpus_subset(const cpumask_t *src1p, + const cpumask_t *src2p, int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + +#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) +static inline int __cpus_empty(const cpumask_t *srcp, int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + +#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) +static inline int __cpus_weight(const cpumask_t *srcp, int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + +#define cpus_shift_left(dst, src, n) \ + __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) +static inline void __cpus_shift_left(cpumask_t *dstp, + const cpumask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ + +#endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h new file mode 100644 index 00000000..668f66ba --- /dev/null +++ b/include/linux/cpuset.h @@ -0,0 +1,238 @@ +#ifndef _LINUX_CPUSET_H +#define _LINUX_CPUSET_H +/* + * cpuset interface + * + * Copyright (C) 2003 BULL SA + * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * + */ + +#include <linux/sched.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> +#include <linux/cgroup.h> +#include <linux/mm.h> + +#ifdef CONFIG_CPUSETS + +extern int number_of_cpusets; /* How many cpusets are defined in system? */ + +extern int cpuset_init(void); +extern void cpuset_init_smp(void); +extern void cpuset_update_active_cpus(void); +extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); +extern void cpuset_cpus_allowed_fallback(struct task_struct *p); +extern nodemask_t cpuset_mems_allowed(struct task_struct *p); +#define cpuset_current_mems_allowed (current->mems_allowed) +void cpuset_init_current_mems_allowed(void); +int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); + +extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); +extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); + +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +{ + return number_of_cpusets <= 1 || + __cpuset_node_allowed_softwall(node, gfp_mask); +} + +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +{ + return number_of_cpusets <= 1 || + __cpuset_node_allowed_hardwall(node, gfp_mask); +} + +static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +{ + return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); +} + +static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +{ + return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); +} + +extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, + const struct task_struct *tsk2); + +#define cpuset_memory_pressure_bump() \ + do { \ + if (cpuset_memory_pressure_enabled) \ + __cpuset_memory_pressure_bump(); \ + } while (0) +extern int cpuset_memory_pressure_enabled; +extern void __cpuset_memory_pressure_bump(void); + +extern const struct file_operations proc_cpuset_operations; +struct seq_file; +extern void cpuset_task_status_allowed(struct seq_file *m, + struct task_struct *task); + +extern int cpuset_mem_spread_node(void); +extern int cpuset_slab_spread_node(void); + +static inline int cpuset_do_page_mem_spread(void) +{ + return current->flags & PF_SPREAD_PAGE; +} + +static inline int cpuset_do_slab_mem_spread(void) +{ + return current->flags & PF_SPREAD_SLAB; +} + +extern int current_cpuset_is_being_rebound(void); + +extern void rebuild_sched_domains(void); + +extern void cpuset_print_task_mems_allowed(struct task_struct *p); + +/* + * get_mems_allowed is required when making decisions involving mems_allowed + * such as during page allocation. mems_allowed can be updated in parallel + * and depending on the new value an operation can fail potentially causing + * process failure. A retry loop with get_mems_allowed and put_mems_allowed + * prevents these artificial failures. + */ +static inline unsigned int get_mems_allowed(void) +{ + return read_seqcount_begin(¤t->mems_allowed_seq); +} + +/* + * If this returns false, the operation that took place after get_mems_allowed + * may have failed. It is up to the caller to retry the operation if + * appropriate. + */ +static inline bool put_mems_allowed(unsigned int seq) +{ + return !read_seqcount_retry(¤t->mems_allowed_seq, seq); +} + +static inline void set_mems_allowed(nodemask_t nodemask) +{ + task_lock(current); + write_seqcount_begin(¤t->mems_allowed_seq); + current->mems_allowed = nodemask; + write_seqcount_end(¤t->mems_allowed_seq); + task_unlock(current); +} + +#else /* !CONFIG_CPUSETS */ + +static inline int cpuset_init(void) { return 0; } +static inline void cpuset_init_smp(void) {} + +static inline void cpuset_update_active_cpus(void) +{ + partition_sched_domains(1, NULL, NULL); +} + +static inline void cpuset_cpus_allowed(struct task_struct *p, + struct cpumask *mask) +{ + cpumask_copy(mask, cpu_possible_mask); +} + +static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) +{ +} + +static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) +{ + return node_possible_map; +} + +#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) +static inline void cpuset_init_current_mems_allowed(void) {} + +static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) +{ + return 1; +} + +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +{ + return 1; +} + +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +{ + return 1; +} + +static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +{ + return 1; +} + +static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +{ + return 1; +} + +static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, + const struct task_struct *tsk2) +{ + return 1; +} + +static inline void cpuset_memory_pressure_bump(void) {} + +static inline void cpuset_task_status_allowed(struct seq_file *m, + struct task_struct *task) +{ +} + +static inline int cpuset_mem_spread_node(void) +{ + return 0; +} + +static inline int cpuset_slab_spread_node(void) +{ + return 0; +} + +static inline int cpuset_do_page_mem_spread(void) +{ + return 0; +} + +static inline int cpuset_do_slab_mem_spread(void) +{ + return 0; +} + +static inline int current_cpuset_is_being_rebound(void) +{ + return 0; +} + +static inline void rebuild_sched_domains(void) +{ + partition_sched_domains(1, NULL, NULL); +} + +static inline void cpuset_print_task_mems_allowed(struct task_struct *p) +{ +} + +static inline void set_mems_allowed(nodemask_t nodemask) +{ +} + +static inline unsigned int get_mems_allowed(void) +{ + return 0; +} + +static inline bool put_mems_allowed(unsigned int seq) +{ + return true; +} + +#endif /* !CONFIG_CPUSETS */ + +#endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h new file mode 100644 index 00000000..0e7bf272 --- /dev/null +++ b/include/linux/cramfs_fs.h @@ -0,0 +1,94 @@ +#ifndef __CRAMFS_H +#define __CRAMFS_H + +#include <linux/types.h> +#include <linux/magic.h> + +#define CRAMFS_SIGNATURE "Compressed ROMFS" + +/* + * Width of various bitfields in struct cramfs_inode. + * Primarily used to generate warnings in mkcramfs. + */ +#define CRAMFS_MODE_WIDTH 16 +#define CRAMFS_UID_WIDTH 16 +#define CRAMFS_SIZE_WIDTH 24 +#define CRAMFS_GID_WIDTH 8 +#define CRAMFS_NAMELEN_WIDTH 6 +#define CRAMFS_OFFSET_WIDTH 26 + +/* + * Since inode.namelen is a unsigned 6-bit number, the maximum cramfs + * path length is 63 << 2 = 252. + */ +#define CRAMFS_MAXPATHLEN (((1 << CRAMFS_NAMELEN_WIDTH) - 1) << 2) + +/* + * Reasonably terse representation of the inode data. + */ +struct cramfs_inode { + __u32 mode:CRAMFS_MODE_WIDTH, uid:CRAMFS_UID_WIDTH; + /* SIZE for device files is i_rdev */ + __u32 size:CRAMFS_SIZE_WIDTH, gid:CRAMFS_GID_WIDTH; + /* NAMELEN is the length of the file name, divided by 4 and + rounded up. (cramfs doesn't support hard links.) */ + /* OFFSET: For symlinks and non-empty regular files, this + contains the offset (divided by 4) of the file data in + compressed form (starting with an array of block pointers; + see README). For non-empty directories it is the offset + (divided by 4) of the inode of the first file in that + directory. For anything else, offset is zero. */ + __u32 namelen:CRAMFS_NAMELEN_WIDTH, offset:CRAMFS_OFFSET_WIDTH; +}; + +struct cramfs_info { + __u32 crc; + __u32 edition; + __u32 blocks; + __u32 files; +}; + +/* + * Superblock information at the beginning of the FS. + */ +struct cramfs_super { + __u32 magic; /* 0x28cd3d45 - random number */ + __u32 size; /* length in bytes */ + __u32 flags; /* feature flags */ + __u32 future; /* reserved for future use */ + __u8 signature[16]; /* "Compressed ROMFS" */ + struct cramfs_info fsid; /* unique filesystem info */ + __u8 name[16]; /* user-defined name */ + struct cramfs_inode root; /* root inode data */ +}; + +/* + * Feature flags + * + * 0x00000000 - 0x000000ff: features that work for all past kernels + * 0x00000100 - 0xffffffff: features that don't work for past kernels + */ +#define CRAMFS_FLAG_FSID_VERSION_2 0x00000001 /* fsid version #2 */ +#define CRAMFS_FLAG_SORTED_DIRS 0x00000002 /* sorted dirs */ +#define CRAMFS_FLAG_HOLES 0x00000100 /* support for holes */ +#define CRAMFS_FLAG_WRONG_SIGNATURE 0x00000200 /* reserved */ +#define CRAMFS_FLAG_SHIFTED_ROOT_OFFSET 0x00000400 /* shifted root fs */ + +/* + * Valid values in super.flags. Currently we refuse to mount + * if (flags & ~CRAMFS_SUPPORTED_FLAGS). Maybe that should be + * changed to test super.future instead. + */ +#define CRAMFS_SUPPORTED_FLAGS ( 0x000000ff \ + | CRAMFS_FLAG_HOLES \ + | CRAMFS_FLAG_WRONG_SIGNATURE \ + | CRAMFS_FLAG_SHIFTED_ROOT_OFFSET ) + +#ifdef __KERNEL__ +/* Uncompression interfaces to the underlying zlib */ +int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen); +int cramfs_uncompress_init(void); +void cramfs_uncompress_exit(void); +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/cramfs_fs_sb.h b/include/linux/cramfs_fs_sb.h new file mode 100644 index 00000000..83906935 --- /dev/null +++ b/include/linux/cramfs_fs_sb.h @@ -0,0 +1,20 @@ +#ifndef _CRAMFS_FS_SB +#define _CRAMFS_FS_SB + +/* + * cramfs super-block data in memory + */ +struct cramfs_sb_info { + unsigned long magic; + unsigned long size; + unsigned long blocks; + unsigned long files; + unsigned long flags; +}; + +static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +#endif diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h new file mode 100644 index 00000000..37e4f8da --- /dev/null +++ b/include/linux/crash_dump.h @@ -0,0 +1,80 @@ +#ifndef LINUX_CRASH_DUMP_H +#define LINUX_CRASH_DUMP_H + +#ifdef CONFIG_CRASH_DUMP +#include <linux/kexec.h> +#include <linux/proc_fs.h> +#include <linux/elf.h> + +#define ELFCORE_ADDR_MAX (-1ULL) +#define ELFCORE_ADDR_ERR (-2ULL) + +extern unsigned long long elfcorehdr_addr; +extern unsigned long long elfcorehdr_size; + +extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, + unsigned long, int); + +/* Architecture code defines this if there are other possible ELF + * machine types, e.g. on bi-arch capable hardware. */ +#ifndef vmcore_elf_check_arch_cross +#define vmcore_elf_check_arch_cross(x) 0 +#endif + +/* + * Architecture code can redefine this if there are any special checks + * needed for 64-bit ELF vmcores. In case of 32-bit only architecture, + * this can be set to zero. + */ +#ifndef vmcore_elf64_check_arch +#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) +#endif + +/* + * is_kdump_kernel() checks whether this kernel is booting after a panic of + * previous kernel or not. This is determined by checking if previous kernel + * has passed the elf core header address on command line. + * + * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will + * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of + * previous kernel. + */ + +static inline int is_kdump_kernel(void) +{ + return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; +} + +/* is_vmcore_usable() checks if the kernel is booting after a panic and + * the vmcore region is usable. + * + * This makes use of the fact that due to alignment -2ULL is not + * a valid pointer, much in the vain of IS_ERR(), except + * dealing directly with an unsigned long long rather than a pointer. + */ + +static inline int is_vmcore_usable(void) +{ + return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0; +} + +/* vmcore_unusable() marks the vmcore as unusable, + * without disturbing the logic of is_kdump_kernel() + */ + +static inline void vmcore_unusable(void) +{ + if (is_kdump_kernel()) + elfcorehdr_addr = ELFCORE_ADDR_ERR; +} + +#define HAVE_OLDMEM_PFN_IS_RAM 1 +extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); +extern void unregister_oldmem_pfn_is_ram(void); + +#else /* !CONFIG_CRASH_DUMP */ +static inline int is_kdump_kernel(void) { return 0; } +#endif /* CONFIG_CRASH_DUMP */ + +extern unsigned long saved_max_pfn; +#endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h new file mode 100644 index 00000000..f52696a1 --- /dev/null +++ b/include/linux/crc-ccitt.h @@ -0,0 +1,15 @@ +#ifndef _LINUX_CRC_CCITT_H +#define _LINUX_CRC_CCITT_H + +#include <linux/types.h> + +extern u16 const crc_ccitt_table[256]; + +extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc_ccitt_byte(u16 crc, const u8 c) +{ + return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; +} + +#endif /* _LINUX_CRC_CCITT_H */ diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h new file mode 100644 index 00000000..84920f3c --- /dev/null +++ b/include/linux/crc-itu-t.h @@ -0,0 +1,28 @@ +/* + * crc-itu-t.h - CRC ITU-T V.41 routine + * + * Implements the standard CRC ITU-T V.41: + * Width 16 + * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1) + * Init 0 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef CRC_ITU_T_H +#define CRC_ITU_T_H + +#include <linux/types.h> + +extern u16 const crc_itu_t_table[256]; + +extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc_itu_t_byte(u16 crc, const u8 data) +{ + return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff]; +} + +#endif /* CRC_ITU_T_H */ + diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h new file mode 100644 index 00000000..a9c96d86 --- /dev/null +++ b/include/linux/crc-t10dif.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_CRC_T10DIF_H +#define _LINUX_CRC_T10DIF_H + +#include <linux/types.h> + +__u16 crc_t10dif(unsigned char const *, size_t); + +#endif diff --git a/include/linux/crc16.h b/include/linux/crc16.h new file mode 100644 index 00000000..9443c084 --- /dev/null +++ b/include/linux/crc16.h @@ -0,0 +1,30 @@ +/* + * crc16.h - CRC-16 routine + * + * Implements the standard CRC-16: + * Width 16 + * Poly 0x8005 (x^16 + x^15 + x^2 + 1) + * Init 0 + * + * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com> + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef __CRC16_H +#define __CRC16_H + +#include <linux/types.h> + +extern u16 const crc16_table[256]; + +extern u16 crc16(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc16_byte(u16 crc, const u8 data) +{ + return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; +} + +#endif /* __CRC16_H */ + diff --git a/include/linux/crc32.h b/include/linux/crc32.h new file mode 100644 index 00000000..68267b64 --- /dev/null +++ b/include/linux/crc32.h @@ -0,0 +1,29 @@ +/* + * crc32.h + * See linux/lib/crc32.c for license and changes + */ +#ifndef _LINUX_CRC32_H +#define _LINUX_CRC32_H + +#include <linux/types.h> +#include <linux/bitrev.h> + +extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); +extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); + +extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len); + +#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) + +/* + * Helpers for hash table generation of ethernet nics: + * + * Ethernet sends the least significant bit of a byte first, thus crc32_le + * is used. The output of crc32_le is bit reversed [most significant bit + * is in bit nr 0], thus it must be reversed before use. Except for + * nics that bit swap the result internally... + */ +#define ether_crc(length, data) bitrev32(crc32_le(~0, data, length)) +#define ether_crc_le(length, data) crc32_le(~0, data, length) + +#endif /* _LINUX_CRC32_H */ diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h new file mode 100644 index 00000000..bd8b44d9 --- /dev/null +++ b/include/linux/crc32c.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_CRC32C_H +#define _LINUX_CRC32C_H + +#include <linux/types.h> + +extern u32 crc32c(u32 crc, const void *address, unsigned int length); + +/* This macro exists for backwards-compatibility. */ +#define crc32c_le crc32c + +#endif /* _LINUX_CRC32C_H */ diff --git a/include/linux/crc7.h b/include/linux/crc7.h new file mode 100644 index 00000000..1786e772 --- /dev/null +++ b/include/linux/crc7.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_CRC7_H +#define _LINUX_CRC7_H +#include <linux/types.h> + +extern const u8 crc7_syndrome_table[256]; + +static inline u8 crc7_byte(u8 crc, u8 data) +{ + return crc7_syndrome_table[(crc << 1) ^ data]; +} + +extern u8 crc7(u8 crc, const u8 *buffer, size_t len); + +#endif diff --git a/include/linux/crc8.h b/include/linux/crc8.h new file mode 100644 index 00000000..13c8dabb --- /dev/null +++ b/include/linux/crc8.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CRC8_H_ +#define __CRC8_H_ + +#include <linux/types.h> + +/* see usage of this value in crc8() description */ +#define CRC8_INIT_VALUE 0xFF + +/* + * Return value of crc8() indicating valid message+crc. This is true + * if a CRC is inverted before transmission. The CRC computed over the + * whole received bitstream is _table[x], where x is the bit pattern + * of the modification (almost always 0xff). + */ +#define CRC8_GOOD_VALUE(_table) (_table[0xFF]) + +/* required table size for crc8 algorithm */ +#define CRC8_TABLE_SIZE 256 + +/* helper macro assuring right table size is used */ +#define DECLARE_CRC8_TABLE(_table) \ + static u8 _table[CRC8_TABLE_SIZE] + +/** + * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. + * + * @table: table to be filled. + * @polynomial: polynomial for which table is to be filled. + * + * This function fills the provided table according the polynomial provided for + * regular bit order (lsb first). Polynomials in CRC algorithms are typically + * represented as shown below. + * + * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1 + * + * For lsb first direction x^7 maps to the lsb. So the polynomial is as below. + * + * - lsb first: poly = 10101011(1) = 0xAB + */ +void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); + +/** + * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. + * + * @table: table to be filled. + * @polynomial: polynomial for which table is to be filled. + * + * This function fills the provided table according the polynomial provided for + * reverse bit order (msb first). Polynomials in CRC algorithms are typically + * represented as shown below. + * + * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1 + * + * For msb first direction x^7 maps to the msb. So the polynomial is as below. + * + * - msb first: poly = (1)11010101 = 0xD5 + */ +void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); + +/** + * crc8() - calculate a crc8 over the given input data. + * + * @table: crc table used for calculation. + * @pdata: pointer to data buffer. + * @nbytes: number of bytes in data buffer. + * @crc: previous returned crc8 value. + * + * The CRC8 is calculated using the polynomial given in crc8_populate_msb() + * or crc8_populate_lsb(). + * + * The caller provides the initial value (either %CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When validating a byte + * stream (including CRC8), a final return value of %CRC8_GOOD_VALUE + * indicates the byte stream data can be considered valid. + * + * Reference: + * "A Painless Guide to CRC Error Detection Algorithms", ver 3, Aug 1993 + * Williams, Ross N., ross<at>ross.net + * (see URL http://www.ross.net/crc/download/crc_v3.txt). + */ +u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); + +#endif /* __CRC8_H_ */ diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 00000000..adadf71a --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,394 @@ +/* Credentials management - see Documentation/security/credentials.txt + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#include <linux/capability.h> +#include <linux/init.h> +#include <linux/key.h> +#include <linux/selinux.h> +#include <linux/atomic.h> + +struct user_struct; +struct cred; +struct inode; + +/* + * COW Supplementary groups list + */ +#define NGROUPS_SMALL 32 +#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) + +struct group_info { + atomic_t usage; + int ngroups; + int nblocks; + gid_t small_block[NGROUPS_SMALL]; + gid_t *blocks[0]; +}; + +/** + * get_group_info - Get a reference to a group info structure + * @group_info: The group info to reference + * + * This gets a reference to a set of supplementary groups. + * + * If the caller is accessing a task's credentials, they must hold the RCU read + * lock when reading. + */ +static inline struct group_info *get_group_info(struct group_info *gi) +{ + atomic_inc(&gi->usage); + return gi; +} + +/** + * put_group_info - Release a reference to a group info structure + * @group_info: The group info to release + */ +#define put_group_info(group_info) \ +do { \ + if (atomic_dec_and_test(&(group_info)->usage)) \ + groups_free(group_info); \ +} while (0) + +extern struct group_info *groups_alloc(int); +extern struct group_info init_groups; +extern void groups_free(struct group_info *); +extern int set_current_groups(struct group_info *); +extern int set_groups(struct cred *, struct group_info *); +extern int groups_search(const struct group_info *, gid_t); + +/* access the groups "array" with this macro */ +#define GROUP_AT(gi, i) \ + ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK]) + +extern int in_group_p(gid_t); +extern int in_egroup_p(gid_t); + +/* + * The common credentials for a thread group + * - shared by CLONE_THREAD + */ +#ifdef CONFIG_KEYS +struct thread_group_cred { + atomic_t usage; + pid_t tgid; /* thread group process ID */ + spinlock_t lock; + struct key __rcu *session_keyring; /* keyring inherited over fork */ + struct key *process_keyring; /* keyring private to this process */ + struct rcu_head rcu; /* RCU deletion hook */ +}; +#endif + +/* + * The security context of a task + * + * The parts of the context break down into two categories: + * + * (1) The objective context of a task. These parts are used when some other + * task is attempting to affect this one. + * + * (2) The subjective context. These details are used when the task is acting + * upon another object, be that a file, a task, a key or whatever. + * + * Note that some members of this structure belong to both categories - the + * LSM security pointer for instance. + * + * A task has two security pointers. task->real_cred points to the objective + * context that defines that task's actual details. The objective part of this + * context is used whenever that task is acted upon. + * + * task->cred points to the subjective context that defines the details of how + * that task is going to act upon another object. This may be overridden + * temporarily to point to another security context, but normally points to the + * same context as task->real_cred. + */ +struct cred { + atomic_t usage; +#ifdef CONFIG_DEBUG_CREDENTIALS + atomic_t subscribers; /* number of processes subscribed */ + void *put_addr; + unsigned magic; +#define CRED_MAGIC 0x43736564 +#define CRED_MAGIC_DEAD 0x44656144 +#endif + uid_t uid; /* real UID of the task */ + gid_t gid; /* real GID of the task */ + uid_t suid; /* saved UID of the task */ + gid_t sgid; /* saved GID of the task */ + uid_t euid; /* effective UID of the task */ + gid_t egid; /* effective GID of the task */ + uid_t fsuid; /* UID for VFS ops */ + gid_t fsgid; /* GID for VFS ops */ + unsigned securebits; /* SUID-less security management */ + kernel_cap_t cap_inheritable; /* caps our children can inherit */ + kernel_cap_t cap_permitted; /* caps we're permitted */ + kernel_cap_t cap_effective; /* caps we can actually use */ + kernel_cap_t cap_bset; /* capability bounding set */ +#ifdef CONFIG_KEYS + unsigned char jit_keyring; /* default keyring to attach requested + * keys to */ + struct key *thread_keyring; /* keyring private to this thread */ + struct key *request_key_auth; /* assumed request_key authority */ + struct thread_group_cred *tgcred; /* thread-group shared credentials */ +#endif +#ifdef CONFIG_SECURITY + void *security; /* subjective LSM security */ +#endif + struct user_struct *user; /* real user ID subscription */ + struct user_namespace *user_ns; /* cached user->user_ns */ + struct group_info *group_info; /* supplementary groups for euid/fsgid */ + struct rcu_head rcu; /* RCU deletion hook */ +}; + +extern void __put_cred(struct cred *); +extern void exit_creds(struct task_struct *); +extern int copy_creds(struct task_struct *, unsigned long); +extern const struct cred *get_task_cred(struct task_struct *); +extern struct cred *cred_alloc_blank(void); +extern struct cred *prepare_creds(void); +extern struct cred *prepare_exec_creds(void); +extern int commit_creds(struct cred *); +extern void abort_creds(struct cred *); +extern const struct cred *override_creds(const struct cred *); +extern void revert_creds(const struct cred *); +extern struct cred *prepare_kernel_cred(struct task_struct *); +extern int change_create_files_as(struct cred *, struct inode *); +extern int set_security_override(struct cred *, u32); +extern int set_security_override_from_ctx(struct cred *, const char *); +extern int set_create_files_as(struct cred *, struct inode *); +extern void __init cred_init(void); + +/* + * check for validity of credentials + */ +#ifdef CONFIG_DEBUG_CREDENTIALS +extern void __invalid_creds(const struct cred *, const char *, unsigned); +extern void __validate_process_creds(struct task_struct *, + const char *, unsigned); + +extern bool creds_are_invalid(const struct cred *cred); + +static inline void __validate_creds(const struct cred *cred, + const char *file, unsigned line) +{ + if (unlikely(creds_are_invalid(cred))) + __invalid_creds(cred, file, line); +} + +#define validate_creds(cred) \ +do { \ + __validate_creds((cred), __FILE__, __LINE__); \ +} while(0) + +#define validate_process_creds() \ +do { \ + __validate_process_creds(current, __FILE__, __LINE__); \ +} while(0) + +extern void validate_creds_for_do_exit(struct task_struct *); +#else +static inline void validate_creds(const struct cred *cred) +{ +} +static inline void validate_creds_for_do_exit(struct task_struct *tsk) +{ +} +static inline void validate_process_creds(void) +{ +} +#endif + +/** + * get_new_cred - Get a reference on a new set of credentials + * @cred: The new credentials to reference + * + * Get a reference on the specified set of new credentials. The caller must + * release the reference. + */ +static inline struct cred *get_new_cred(struct cred *cred) +{ + atomic_inc(&cred->usage); + return cred; +} + +/** + * get_cred - Get a reference on a set of credentials + * @cred: The credentials to reference + * + * Get a reference on the specified set of credentials. The caller must + * release the reference. + * + * This is used to deal with a committed set of credentials. Although the + * pointer is const, this will temporarily discard the const and increment the + * usage count. The purpose of this is to attempt to catch at compile time the + * accidental alteration of a set of credentials that should be considered + * immutable. + */ +static inline const struct cred *get_cred(const struct cred *cred) +{ + struct cred *nonconst_cred = (struct cred *) cred; + validate_creds(cred); + return get_new_cred(nonconst_cred); +} + +/** + * put_cred - Release a reference to a set of credentials + * @cred: The credentials to release + * + * Release a reference to a set of credentials, deleting them when the last ref + * is released. + * + * This takes a const pointer to a set of credentials because the credentials + * on task_struct are attached by const pointers to prevent accidental + * alteration of otherwise immutable credential sets. + */ +static inline void put_cred(const struct cred *_cred) +{ + struct cred *cred = (struct cred *) _cred; + + validate_creds(cred); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); +} + +/** + * current_cred - Access the current task's subjective credentials + * + * Access the subjective credentials of the current task. RCU-safe, + * since nobody else can modify it. + */ +#define current_cred() \ + rcu_dereference_protected(current->cred, 1) + +/** + * __task_cred - Access a task's objective credentials + * @task: The task to query + * + * Access the objective credentials of a task. The caller must hold the RCU + * readlock or the task must be dead and unable to change its own credentials. + * + * The result of this function should not be passed directly to get_cred(); + * rather get_task_cred() should be used instead. + */ +#define __task_cred(task) \ + ({ \ + const struct task_struct *__t = (task); \ + rcu_dereference_check(__t->real_cred, \ + task_is_dead(__t)); \ + }) + +/** + * get_current_cred - Get the current task's subjective credentials + * + * Get the subjective credentials of the current task, pinning them so that + * they can't go away. Accessing the current task's credentials directly is + * not permitted. + */ +#define get_current_cred() \ + (get_cred(current_cred())) + +/** + * get_current_user - Get the current task's user_struct + * + * Get the user record of the current task, pinning it so that it can't go + * away. + */ +#define get_current_user() \ +({ \ + struct user_struct *__u; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + __u = get_uid(__cred->user); \ + __u; \ +}) + +/** + * get_current_groups - Get the current task's supplementary group list + * + * Get the supplementary group list of the current task, pinning it so that it + * can't go away. + */ +#define get_current_groups() \ +({ \ + struct group_info *__groups; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + __groups = get_group_info(__cred->group_info); \ + __groups; \ +}) + +#define task_cred_xxx(task, xxx) \ +({ \ + __typeof__(((struct cred *)NULL)->xxx) ___val; \ + rcu_read_lock(); \ + ___val = __task_cred((task))->xxx; \ + rcu_read_unlock(); \ + ___val; \ +}) + +#define task_uid(task) (task_cred_xxx((task), uid)) +#define task_euid(task) (task_cred_xxx((task), euid)) + +#define current_cred_xxx(xxx) \ +({ \ + current_cred()->xxx; \ +}) + +#define current_uid() (current_cred_xxx(uid)) +#define current_gid() (current_cred_xxx(gid)) +#define current_euid() (current_cred_xxx(euid)) +#define current_egid() (current_cred_xxx(egid)) +#define current_suid() (current_cred_xxx(suid)) +#define current_sgid() (current_cred_xxx(sgid)) +#define current_fsuid() (current_cred_xxx(fsuid)) +#define current_fsgid() (current_cred_xxx(fsgid)) +#define current_cap() (current_cred_xxx(cap_effective)) +#define current_user() (current_cred_xxx(user)) +#define current_security() (current_cred_xxx(security)) + +#ifdef CONFIG_USER_NS +#define current_user_ns() (current_cred_xxx(user_ns)) +#define task_user_ns(task) (task_cred_xxx((task), user_ns)) +#else +extern struct user_namespace init_user_ns; +#define current_user_ns() (&init_user_ns) +#define task_user_ns(task) (&init_user_ns) +#endif + + +#define current_uid_gid(_uid, _gid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_uid) = __cred->uid; \ + *(_gid) = __cred->gid; \ +} while(0) + +#define current_euid_egid(_euid, _egid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_euid) = __cred->euid; \ + *(_egid) = __cred->egid; \ +} while(0) + +#define current_fsuid_fsgid(_fsuid, _fsgid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_fsuid) = __cred->fsuid; \ + *(_fsgid) = __cred->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h new file mode 100644 index 00000000..97e435b1 --- /dev/null +++ b/include/linux/crush/crush.h @@ -0,0 +1,180 @@ +#ifndef CEPH_CRUSH_CRUSH_H +#define CEPH_CRUSH_CRUSH_H + +#include <linux/types.h> + +/* + * CRUSH is a pseudo-random data distribution algorithm that + * efficiently distributes input values (typically, data objects) + * across a heterogeneous, structured storage cluster. + * + * The algorithm was originally described in detail in this paper + * (although the algorithm has evolved somewhat since then): + * + * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * + * LGPL2 + */ + + +#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ + + +#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ +#define CRUSH_MAX_SET 10 /* max size of a mapping result */ + + +/* + * CRUSH uses user-defined "rules" to describe how inputs should be + * mapped to devices. A rule consists of sequence of steps to perform + * to generate the set of output devices. + */ +struct crush_rule_step { + __u32 op; + __s32 arg1; + __s32 arg2; +}; + +/* step op codes */ +enum { + CRUSH_RULE_NOOP = 0, + CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ + CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ + /* arg2 = type */ + CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ + CRUSH_RULE_EMIT = 4, /* no args */ + CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6, + CRUSH_RULE_CHOOSE_LEAF_INDEP = 7, +}; + +/* + * for specifying choose num (arg1) relative to the max parameter + * passed to do_rule + */ +#define CRUSH_CHOOSE_N 0 +#define CRUSH_CHOOSE_N_MINUS(x) (-(x)) + +/* + * The rule mask is used to describe what the rule is intended for. + * Given a ruleset and size of output set, we search through the + * rule list for a matching rule_mask. + */ +struct crush_rule_mask { + __u8 ruleset; + __u8 type; + __u8 min_size; + __u8 max_size; +}; + +struct crush_rule { + __u32 len; + struct crush_rule_mask mask; + struct crush_rule_step steps[0]; +}; + +#define crush_rule_size(len) (sizeof(struct crush_rule) + \ + (len)*sizeof(struct crush_rule_step)) + + + +/* + * A bucket is a named container of other items (either devices or + * other buckets). Items within a bucket are chosen using one of a + * few different algorithms. The table summarizes how the speed of + * each option measures up against mapping stability when items are + * added or removed. + * + * Bucket Alg Speed Additions Removals + * ------------------------------------------------ + * uniform O(1) poor poor + * list O(n) optimal poor + * tree O(log n) good good + * straw O(n) optimal optimal + */ +enum { + CRUSH_BUCKET_UNIFORM = 1, + CRUSH_BUCKET_LIST = 2, + CRUSH_BUCKET_TREE = 3, + CRUSH_BUCKET_STRAW = 4 +}; +extern const char *crush_bucket_alg_name(int alg); + +struct crush_bucket { + __s32 id; /* this'll be negative */ + __u16 type; /* non-zero; type=0 is reserved for devices */ + __u8 alg; /* one of CRUSH_BUCKET_* */ + __u8 hash; /* which hash function to use, CRUSH_HASH_* */ + __u32 weight; /* 16-bit fixed point */ + __u32 size; /* num items */ + __s32 *items; + + /* + * cached random permutation: used for uniform bucket and for + * the linear search fallback for the other bucket types. + */ + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; +}; + +struct crush_bucket_uniform { + struct crush_bucket h; + __u32 item_weight; /* 16-bit fixed point; all items equally weighted */ +}; + +struct crush_bucket_list { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ + __u32 *sum_weights; /* 16-bit fixed point. element i is sum + of weights 0..i, inclusive */ +}; + +struct crush_bucket_tree { + struct crush_bucket h; /* note: h.size is _tree_ size, not number of + actual items */ + __u8 num_nodes; + __u32 *node_weights; +}; + +struct crush_bucket_straw { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ + __u32 *straws; /* 16-bit fixed point */ +}; + + + +/* + * CRUSH map includes all buckets, rules, etc. + */ +struct crush_map { + struct crush_bucket **buckets; + struct crush_rule **rules; + + /* + * Parent pointers to identify the parent bucket a device or + * bucket in the hierarchy. If an item appears more than + * once, this is the _last_ time it appeared (where buckets + * are processed in bucket id order, from -1 on down to + * -max_buckets. + */ + __u32 *bucket_parents; + __u32 *device_parents; + + __s32 max_buckets; + __u32 max_rules; + __s32 max_devices; +}; + + +/* crush.c */ +extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos); +extern void crush_calc_parents(struct crush_map *map); +extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); +extern void crush_destroy_bucket_list(struct crush_bucket_list *b); +extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); +extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); +extern void crush_destroy_bucket(struct crush_bucket *b); +extern void crush_destroy(struct crush_map *map); + +#endif diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h new file mode 100644 index 00000000..91e88423 --- /dev/null +++ b/include/linux/crush/hash.h @@ -0,0 +1,17 @@ +#ifndef CEPH_CRUSH_HASH_H +#define CEPH_CRUSH_HASH_H + +#define CRUSH_HASH_RJENKINS1 0 + +#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 + +extern const char *crush_hash_name(int type); + +extern __u32 crush_hash32(int type, __u32 a); +extern __u32 crush_hash32_2(int type, __u32 a, __u32 b); +extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); +extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); +extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, + __u32 e); + +#endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h new file mode 100644 index 00000000..c46b99c1 --- /dev/null +++ b/include/linux/crush/mapper.h @@ -0,0 +1,20 @@ +#ifndef CEPH_CRUSH_MAPPER_H +#define CEPH_CRUSH_MAPPER_H + +/* + * CRUSH functions for find rules and then mapping an input to an + * output set. + * + * LGPL2 + */ + +#include "crush.h" + +extern int crush_find_rule(struct crush_map *map, int pool, int type, int size); +extern int crush_do_rule(struct crush_map *map, + int ruleno, + int x, int *result, int result_max, + int forcefeed, /* -1 for none */ + __u32 *weights); + +#endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h new file mode 100644 index 00000000..b92eadf9 --- /dev/null +++ b/include/linux/crypto.h @@ -0,0 +1,1287 @@ +/* + * Scatterlist Cryptographic API. + * + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2002 David S. Miller (davem@redhat.com) + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> + * + * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> + * and Nettle, by Niels Möller. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _LINUX_CRYPTO_H +#define _LINUX_CRYPTO_H + +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/uaccess.h> + +/* + * Algorithm masks and types. + */ +#define CRYPTO_ALG_TYPE_MASK 0x0000000f +#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 +#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 +#define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 +#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 +#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 +#define CRYPTO_ALG_TYPE_HASH 0x00000008 +#define CRYPTO_ALG_TYPE_SHASH 0x00000009 +#define CRYPTO_ALG_TYPE_AHASH 0x0000000a +#define CRYPTO_ALG_TYPE_RNG 0x0000000c +#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f + +#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c + +#define CRYPTO_ALG_LARVAL 0x00000010 +#define CRYPTO_ALG_DEAD 0x00000020 +#define CRYPTO_ALG_DYING 0x00000040 +#define CRYPTO_ALG_ASYNC 0x00000080 + +/* + * Set this bit if and only if the algorithm requires another algorithm of + * the same type to handle corner cases. + */ +#define CRYPTO_ALG_NEED_FALLBACK 0x00000100 + +/* + * This bit is set for symmetric key ciphers that have already been wrapped + * with a generic IV generator to prevent them from being wrapped again. + */ +#define CRYPTO_ALG_GENIV 0x00000200 + +/* + * Set if the algorithm has passed automated run-time testing. Note that + * if there is no run-time testing for a given algorithm it is considered + * to have passed. + */ + +#define CRYPTO_ALG_TESTED 0x00000400 + +/* + * Set if the algorithm is an instance that is build from templates. + */ +#define CRYPTO_ALG_INSTANCE 0x00000800 + +/* Set this bit if the algorithm provided is hardware accelerated but + * not available to userspace via instruction set or so. + */ +#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 + +/* + * Transform masks and values (for crt_flags). + */ +#define CRYPTO_TFM_REQ_MASK 0x000fff00 +#define CRYPTO_TFM_RES_MASK 0xfff00000 + +#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 +#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 +#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 +#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 +#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 +#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 +#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 + +/* + * Miscellaneous stuff. + */ +#define CRYPTO_MAX_ALG_NAME 64 + +/* + * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual + * declaration) is used to ensure that the crypto_tfm context structure is + * aligned correctly for the given architecture so that there are no alignment + * faults for C data types. In particular, this is required on platforms such + * as arm where pointers are 32-bit aligned but there are data types such as + * u64 which require 64-bit alignment. + */ +#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN + +#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) + +struct scatterlist; +struct crypto_ablkcipher; +struct crypto_async_request; +struct crypto_aead; +struct crypto_blkcipher; +struct crypto_hash; +struct crypto_rng; +struct crypto_tfm; +struct crypto_type; +struct aead_givcrypt_request; +struct skcipher_givcrypt_request; + +typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + + u32 flags; +}; + +struct ablkcipher_request { + struct crypto_async_request base; + + unsigned int nbytes; + + void *info; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct aead_request - AEAD request + * @base: Common attributes for async crypto requests + * @assoclen: Length in bytes of associated data for authentication + * @cryptlen: Length of data to be encrypted or decrypted + * @iv: Initialisation vector + * @assoc: Associated data + * @src: Source data + * @dst: Destination data + * @__ctx: Start of private context data + */ +struct aead_request { + struct crypto_async_request base; + + unsigned int assoclen; + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *assoc; + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct blkcipher_desc { + struct crypto_blkcipher *tfm; + void *info; + u32 flags; +}; + +struct cipher_desc { + struct crypto_tfm *tfm; + void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes); + void *info; +}; + +struct hash_desc { + struct crypto_hash *tfm; + u32 flags; +}; + +/* + * Algorithms: modular crypto algorithm implementations, managed + * via crypto_register_alg() and crypto_unregister_alg(). + */ +struct ablkcipher_alg { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + + const char *geniv; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +struct aead_alg { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + int (*givencrypt)(struct aead_givcrypt_request *req); + int (*givdecrypt)(struct aead_givcrypt_request *req); + + const char *geniv; + + unsigned int ivsize; + unsigned int maxauthsize; +}; + +struct blkcipher_alg { + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + + const char *geniv; + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); + int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); +}; + +struct rng_alg { + int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); + + unsigned int seedsize; +}; + + +#define cra_ablkcipher cra_u.ablkcipher +#define cra_aead cra_u.aead +#define cra_blkcipher cra_u.blkcipher +#define cra_cipher cra_u.cipher +#define cra_compress cra_u.compress +#define cra_rng cra_u.rng + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + + int cra_priority; + atomic_t cra_refcnt; + + char cra_name[CRYPTO_MAX_ALG_NAME]; + char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + + const struct crypto_type *cra_type; + + union { + struct ablkcipher_alg ablkcipher; + struct aead_alg aead; + struct blkcipher_alg blkcipher; + struct cipher_alg cipher; + struct compress_alg compress; + struct rng_alg rng; + } cra_u; + + int (*cra_init)(struct crypto_tfm *tfm); + void (*cra_exit)(struct crypto_tfm *tfm); + void (*cra_destroy)(struct crypto_alg *alg); + + struct module *cra_module; +}; + +/* + * Algorithm registration interface. + */ +int crypto_register_alg(struct crypto_alg *alg); +int crypto_unregister_alg(struct crypto_alg *alg); +int crypto_register_algs(struct crypto_alg *algs, int count); +int crypto_unregister_algs(struct crypto_alg *algs, int count); + +/* + * Algorithm query interface. + */ +int crypto_has_alg(const char *name, u32 type, u32 mask); + +/* + * Transforms: user-instantiated objects which encapsulate algorithms + * and core processing logic. Managed via crypto_alloc_*() and + * crypto_free_*(), as well as the various helpers below. + */ + +struct ablkcipher_tfm { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + + struct crypto_ablkcipher *base; + + unsigned int ivsize; + unsigned int reqsize; +}; + +struct aead_tfm { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + int (*givencrypt)(struct aead_givcrypt_request *req); + int (*givdecrypt)(struct aead_givcrypt_request *req); + + struct crypto_aead *base; + + unsigned int ivsize; + unsigned int authsize; + unsigned int reqsize; +}; + +struct blkcipher_tfm { + void *iv; + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); +}; + +struct cipher_tfm { + int (*cit_setkey)(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +}; + +struct hash_tfm { + int (*init)(struct hash_desc *desc); + int (*update)(struct hash_desc *desc, + struct scatterlist *sg, unsigned int nsg); + int (*final)(struct hash_desc *desc, u8 *out); + int (*digest)(struct hash_desc *desc, struct scatterlist *sg, + unsigned int nsg, u8 *out); + int (*setkey)(struct crypto_hash *tfm, const u8 *key, + unsigned int keylen); + unsigned int digestsize; +}; + +struct compress_tfm { + int (*cot_compress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); + int (*cot_decompress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +}; + +struct rng_tfm { + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); +}; + +#define crt_ablkcipher crt_u.ablkcipher +#define crt_aead crt_u.aead +#define crt_blkcipher crt_u.blkcipher +#define crt_cipher crt_u.cipher +#define crt_hash crt_u.hash +#define crt_compress crt_u.compress +#define crt_rng crt_u.rng + +struct crypto_tfm { + + u32 crt_flags; + + union { + struct ablkcipher_tfm ablkcipher; + struct aead_tfm aead; + struct blkcipher_tfm blkcipher; + struct cipher_tfm cipher; + struct hash_tfm hash; + struct compress_tfm compress; + struct rng_tfm rng; + } crt_u; + + void (*exit)(struct crypto_tfm *tfm); + + struct crypto_alg *__crt_alg; + + void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_ablkcipher { + struct crypto_tfm base; +}; + +struct crypto_aead { + struct crypto_tfm base; +}; + +struct crypto_blkcipher { + struct crypto_tfm base; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +struct crypto_hash { + struct crypto_tfm base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC, + CRYPTOA_ALG, + CRYPTOA_TYPE, + CRYPTOA_U32, + __CRYPTOA_MAX, +}; + +#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) + +/* Maximum number of (rtattr) parameters for each template. */ +#define CRYPTO_MAX_ATTRS 32 + +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_u32 { + u32 num; +}; + +/* + * Transform user interface. + */ + +struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); +void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); + +static inline void crypto_free_tfm(struct crypto_tfm *tfm) +{ + return crypto_destroy_tfm(tfm, tfm); +} + +int alg_test(const char *driver, const char *alg, u32 type, u32 mask); + +/* + * Transform helpers which query the underlying algorithm. + */ +static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_name; +} + +static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_driver_name; +} + +static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_priority; +} + +static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; +} + +static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_blocksize; +} + +static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_alignmask; +} + +static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) +{ + return tfm->crt_flags; +} + +static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags |= flags; +} + +static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags &= ~flags; +} + +static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) +{ + return tfm->__crt_ctx; +} + +static inline unsigned int crypto_tfm_ctx_alignment(void) +{ + struct crypto_tfm *tfm; + return __alignof__(tfm->__crt_ctx); +} + +/* + * API wrappers. + */ +static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_ablkcipher *)tfm; +} + +static inline u32 crypto_skcipher_type(u32 type) +{ + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + return type; +} + +static inline u32 crypto_skcipher_mask(u32 mask) +{ + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; + return mask; +} + +struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_ablkcipher_tfm( + struct crypto_ablkcipher *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) +{ + crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); +} + +static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); +} + +static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( + struct crypto_ablkcipher *tfm) +{ + return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; +} + +static inline unsigned int crypto_ablkcipher_ivsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->ivsize; +} + +static inline unsigned int crypto_ablkcipher_blocksize( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_ablkcipher_alignmask( + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); +} + +static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); +} + +static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); +} + +static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); + + return crt->setkey(crt->base, key, keylen); +} + +static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) +{ + return __crypto_ablkcipher_cast(req->base.tfm); +} + +static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->encrypt(req); +} + +static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + return crt->decrypt(req); +} + +static inline unsigned int crypto_ablkcipher_reqsize( + struct crypto_ablkcipher *tfm) +{ + return crypto_ablkcipher_crt(tfm)->reqsize; +} + +static inline void ablkcipher_request_set_tfm( + struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) +{ + req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); +} + +static inline struct ablkcipher_request *ablkcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ablkcipher_request, base); +} + +static inline struct ablkcipher_request *ablkcipher_request_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct ablkcipher_request *req; + + req = kmalloc(sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + ablkcipher_request_set_tfm(req, tfm); + + return req; +} + +static inline void ablkcipher_request_free(struct ablkcipher_request *req) +{ + kzfree(req); +} + +static inline void ablkcipher_request_set_callback( + struct ablkcipher_request *req, + u32 flags, crypto_completion_t complete, void *data) +{ + req->base.complete = complete; + req->base.data = data; + req->base.flags = flags; +} + +static inline void ablkcipher_request_set_crypt( + struct ablkcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + req->src = src; + req->dst = dst; + req->nbytes = nbytes; + req->info = iv; +} + +static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_aead *)tfm; +} + +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_aead(struct crypto_aead *tfm) +{ + crypto_free_tfm(crypto_aead_tfm(tfm)); +} + +static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) +{ + return &crypto_aead_tfm(tfm)->crt_aead; +} + +static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->ivsize; +} + +static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->authsize; +} + +static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); +} + +static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); +} + +static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) +{ + return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); +} + +static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); +} + +static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); +} + +static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct aead_tfm *crt = crypto_aead_crt(tfm); + + return crt->setkey(crt->base, key, keylen); +} + +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + +static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) +{ + return __crypto_aead_cast(req->base.tfm); +} + +static inline int crypto_aead_encrypt(struct aead_request *req) +{ + return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); +} + +static inline int crypto_aead_decrypt(struct aead_request *req) +{ + return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); +} + +static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->reqsize; +} + +static inline void aead_request_set_tfm(struct aead_request *req, + struct crypto_aead *tfm) +{ + req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); +} + +static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, + gfp_t gfp) +{ + struct aead_request *req; + + req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_request_set_tfm(req, tfm); + + return req; +} + +static inline void aead_request_free(struct aead_request *req) +{ + kzfree(req); +} + +static inline void aead_request_set_callback(struct aead_request *req, + u32 flags, + crypto_completion_t complete, + void *data) +{ + req->base.complete = complete; + req->base.data = data; + req->base.flags = flags; +} + +static inline void aead_request_set_crypt(struct aead_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, u8 *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +static inline void aead_request_set_assoc(struct aead_request *req, + struct scatterlist *assoc, + unsigned int assoclen) +{ + req->assoc = assoc; + req->assoclen = assoclen; +} + +static inline struct crypto_blkcipher *__crypto_blkcipher_cast( + struct crypto_tfm *tfm) +{ + return (struct crypto_blkcipher *)tfm; +} + +static inline struct crypto_blkcipher *crypto_blkcipher_cast( + struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); + return __crypto_blkcipher_cast(tfm); +} + +static inline struct crypto_blkcipher *crypto_alloc_blkcipher( + const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_blkcipher_tfm( + struct crypto_blkcipher *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) +{ + crypto_free_tfm(crypto_blkcipher_tfm(tfm)); +} + +static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); +} + +static inline struct blkcipher_tfm *crypto_blkcipher_crt( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; +} + +static inline struct blkcipher_alg *crypto_blkcipher_alg( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; +} + +static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) +{ + return crypto_blkcipher_alg(tfm)->ivsize; +} + +static inline unsigned int crypto_blkcipher_blocksize( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_blkcipher_alignmask( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); +} + +static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); +} + +static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); +} + +static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); +} + +static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), + key, keylen); +} + +static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, + const u8 *src, unsigned int len) +{ + memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); +} + +static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, + u8 *dst, unsigned int len) +{ + memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); +} + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); + return __crypto_cipher_cast(tfm); +} + +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->crt_cipher; +} + +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), + key, keylen); +} + +static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_hash *)tfm; +} + +static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) +{ + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) & + CRYPTO_ALG_TYPE_HASH_MASK); + return __crypto_hash_cast(tfm); +} + +static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_HASH; + mask |= CRYPTO_ALG_TYPE_HASH_MASK; + + return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_hash(struct crypto_hash *tfm) +{ + crypto_free_tfm(crypto_hash_tfm(tfm)); +} + +static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_HASH; + mask |= CRYPTO_ALG_TYPE_HASH_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) +{ + return &crypto_hash_tfm(tfm)->crt_hash; +} + +static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); +} + +static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); +} + +static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) +{ + return crypto_hash_crt(tfm)->digestsize; +} + +static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm) +{ + return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); +} + +static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); +} + +static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); +} + +static inline int crypto_hash_init(struct hash_desc *desc) +{ + return crypto_hash_crt(desc->tfm)->init(desc); +} + +static inline int crypto_hash_update(struct hash_desc *desc, + struct scatterlist *sg, + unsigned int nbytes) +{ + return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); +} + +static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) +{ + return crypto_hash_crt(desc->tfm)->final(desc, out); +} + +static inline int crypto_hash_digest(struct hash_desc *desc, + struct scatterlist *sg, + unsigned int nbytes, u8 *out) +{ + return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); +} + +static inline int crypto_hash_setkey(struct crypto_hash *hash, + const u8 *key, unsigned int keylen) +{ + return crypto_hash_crt(hash)->setkey(hash, key, keylen); +} + +static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_comp *)tfm; +} + +static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) +{ + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & + CRYPTO_ALG_TYPE_MASK); + return __crypto_comp_cast(tfm); +} + +static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_COMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_comp(struct crypto_comp *tfm) +{ + crypto_free_tfm(crypto_comp_tfm(tfm)); +} + +static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_COMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline const char *crypto_comp_name(struct crypto_comp *tfm) +{ + return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); +} + +static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) +{ + return &crypto_comp_tfm(tfm)->crt_compress; +} + +static inline int crypto_comp_compress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), + src, slen, dst, dlen); +} + +static inline int crypto_comp_decompress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), + src, slen, dst, dlen); +} + +#endif /* _LINUX_CRYPTO_H */ + diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h new file mode 100644 index 00000000..2cd9f1cf --- /dev/null +++ b/include/linux/cryptohash.h @@ -0,0 +1,18 @@ +#ifndef __CRYPTOHASH_H +#define __CRYPTOHASH_H + +#define SHA_DIGEST_WORDS 5 +#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) +#define SHA_WORKSPACE_WORDS 16 + +void sha_init(__u32 *buf); +void sha_transform(__u32 *digest, const char *data, __u32 *W); + +#define MD5_DIGEST_WORDS 4 +#define MD5_MESSAGE_BYTES 64 + +void md5_transform(__u32 *hash, __u32 const *in); + +__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]); + +#endif diff --git a/include/linux/cryptouser.h b/include/linux/cryptouser.h new file mode 100644 index 00000000..4abf2ea6 --- /dev/null +++ b/include/linux/cryptouser.h @@ -0,0 +1,105 @@ +/* + * Crypto user configuration API. + * + * Copyright (C) 2011 secunet Security Networks AG + * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* Netlink configuration messages. */ +enum { + CRYPTO_MSG_BASE = 0x10, + CRYPTO_MSG_NEWALG = 0x10, + CRYPTO_MSG_DELALG, + CRYPTO_MSG_UPDATEALG, + CRYPTO_MSG_GETALG, + __CRYPTO_MSG_MAX +}; +#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) +#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE) + +#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME + +/* Netlink message attributes. */ +enum crypto_attr_type_t { + CRYPTOCFGA_UNSPEC, + CRYPTOCFGA_PRIORITY_VAL, /* __u32 */ + CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */ + CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */ + CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */ + CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */ + CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */ + CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ + CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ + __CRYPTOCFGA_MAX + +#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) +}; + +struct crypto_user_alg { + char cru_name[CRYPTO_MAX_ALG_NAME]; + char cru_driver_name[CRYPTO_MAX_ALG_NAME]; + char cru_module_name[CRYPTO_MAX_ALG_NAME]; + __u32 cru_type; + __u32 cru_mask; + __u32 cru_refcnt; + __u32 cru_flags; +}; + +struct crypto_report_larval { + char type[CRYPTO_MAX_NAME]; +}; + +struct crypto_report_hash { + char type[CRYPTO_MAX_NAME]; + unsigned int blocksize; + unsigned int digestsize; +}; + +struct crypto_report_cipher { + char type[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + unsigned int min_keysize; + unsigned int max_keysize; +}; + +struct crypto_report_blkcipher { + char type[CRYPTO_MAX_NAME]; + char geniv[CRYPTO_MAX_NAME]; + unsigned int blocksize; + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +struct crypto_report_aead { + char type[CRYPTO_MAX_NAME]; + char geniv[CRYPTO_MAX_NAME]; + unsigned int blocksize; + unsigned int maxauthsize; + unsigned int ivsize; +}; + +struct crypto_report_comp { + char type[CRYPTO_MAX_NAME]; +}; + +struct crypto_report_rng { + char type[CRYPTO_MAX_NAME]; + unsigned int seedsize; +}; + +#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ + sizeof(struct crypto_report_blkcipher)) diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h new file mode 100644 index 00000000..c077aec3 --- /dev/null +++ b/include/linux/cs5535.h @@ -0,0 +1,238 @@ +/* + * AMD CS5535/CS5536 definitions + * Copyright (C) 2006 Advanced Micro Devices, Inc. + * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef _CS5535_H +#define _CS5535_H + +#include <asm/msr.h> + +/* MSRs */ +#define MSR_GLIU_P2D_RO0 0x10000029 + +#define MSR_LX_GLD_MSR_CONFIG 0x48002001 +#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data + * sheet has the wrong value */ +#define MSR_GLCP_SYS_RSTPLL 0x4C000014 +#define MSR_GLCP_DOTPLL 0x4C000015 + +#define MSR_LBAR_SMB 0x5140000B +#define MSR_LBAR_GPIO 0x5140000C +#define MSR_LBAR_MFGPT 0x5140000D +#define MSR_LBAR_ACPI 0x5140000E +#define MSR_LBAR_PMS 0x5140000F + +#define MSR_DIVIL_SOFT_RESET 0x51400017 + +#define MSR_PIC_YSEL_LOW 0x51400020 +#define MSR_PIC_YSEL_HIGH 0x51400021 +#define MSR_PIC_ZSEL_LOW 0x51400022 +#define MSR_PIC_ZSEL_HIGH 0x51400023 +#define MSR_PIC_IRQM_LPC 0x51400025 + +#define MSR_MFGPT_IRQ 0x51400028 +#define MSR_MFGPT_NR 0x51400029 +#define MSR_MFGPT_SETUP 0x5140002B + +#define MSR_RTC_DOMA_OFFSET 0x51400055 +#define MSR_RTC_MONA_OFFSET 0x51400056 +#define MSR_RTC_CEN_OFFSET 0x51400057 + +#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ + +#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 +#define MSR_GX_MSR_PADSEL 0xC0002011 + +static inline int cs5535_pic_unreqz_select_high(unsigned int group, + unsigned int irq) +{ + uint32_t lo, hi; + + rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi); + lo &= ~(0xF << (group * 4)); + lo |= (irq & 0xF) << (group * 4); + wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi); + return 0; +} + +/* PIC registers */ +#define CS5536_PIC_INT_SEL1 0x4d0 +#define CS5536_PIC_INT_SEL2 0x4d1 + +/* resource sizes */ +#define LBAR_GPIO_SIZE 0xFF +#define LBAR_MFGPT_SIZE 0x40 +#define LBAR_ACPI_SIZE 0x40 +#define LBAR_PMS_SIZE 0x80 + +/* + * PMC registers (PMS block) + * It is only safe to access these registers as dword accesses. + * See CS5536 Specification Update erratas 17 & 18 + */ +#define CS5536_PM_SCLK 0x10 +#define CS5536_PM_IN_SLPCTL 0x20 +#define CS5536_PM_WKXD 0x34 +#define CS5536_PM_WKD 0x30 +#define CS5536_PM_SSC 0x54 + +/* + * PM registers (ACPI block) + * It is only safe to access these registers as dword accesses. + * See CS5536 Specification Update erratas 17 & 18 + */ +#define CS5536_PM1_STS 0x00 +#define CS5536_PM1_EN 0x02 +#define CS5536_PM1_CNT 0x08 +#define CS5536_PM_GPE0_STS 0x18 +#define CS5536_PM_GPE0_EN 0x1c + +/* CS5536_PM1_STS bits */ +#define CS5536_WAK_FLAG (1 << 15) +#define CS5536_PWRBTN_FLAG (1 << 8) + +/* CS5536_PM1_EN bits */ +#define CS5536_PM_PWRBTN (1 << 8) +#define CS5536_PM_RTC (1 << 10) + +/* CS5536_PM_GPE0_STS bits */ +#define CS5536_GPIOM7_PME_FLAG (1 << 31) +#define CS5536_GPIOM6_PME_FLAG (1 << 30) + +/* CS5536_PM_GPE0_EN bits */ +#define CS5536_GPIOM7_PME_EN (1 << 31) +#define CS5536_GPIOM6_PME_EN (1 << 30) + +/* VSA2 magic values */ +#define VSA_VRC_INDEX 0xAC1C +#define VSA_VRC_DATA 0xAC1E +#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ +#define VSA_VR_SIGNATURE 0x0003 +#define VSA_VR_MEM_SIZE 0x0200 +#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ +#define GSW_VSA_SIG 0x534d /* General Software signature */ + +#include <linux/io.h> + +static inline int cs5535_has_vsa2(void) +{ + static int has_vsa2 = -1; + + if (has_vsa2 == -1) { + uint16_t val; + + /* + * The VSA has virtual registers that we can query for a + * signature. + */ + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + val = inw(VSA_VRC_DATA); + has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); + } + + return has_vsa2; +} + +/* GPIOs */ +#define GPIO_OUTPUT_VAL 0x00 +#define GPIO_OUTPUT_ENABLE 0x04 +#define GPIO_OUTPUT_OPEN_DRAIN 0x08 +#define GPIO_OUTPUT_INVERT 0x0C +#define GPIO_OUTPUT_AUX1 0x10 +#define GPIO_OUTPUT_AUX2 0x14 +#define GPIO_PULL_UP 0x18 +#define GPIO_PULL_DOWN 0x1C +#define GPIO_INPUT_ENABLE 0x20 +#define GPIO_INPUT_INVERT 0x24 +#define GPIO_INPUT_FILTER 0x28 +#define GPIO_INPUT_EVENT_COUNT 0x2C +#define GPIO_READ_BACK 0x30 +#define GPIO_INPUT_AUX1 0x34 +#define GPIO_EVENTS_ENABLE 0x38 +#define GPIO_LOCK_ENABLE 0x3C +#define GPIO_POSITIVE_EDGE_EN 0x40 +#define GPIO_NEGATIVE_EDGE_EN 0x44 +#define GPIO_POSITIVE_EDGE_STS 0x48 +#define GPIO_NEGATIVE_EDGE_STS 0x4C + +#define GPIO_FLTR7_AMOUNT 0xD8 + +#define GPIO_MAP_X 0xE0 +#define GPIO_MAP_Y 0xE4 +#define GPIO_MAP_Z 0xE8 +#define GPIO_MAP_W 0xEC + +#define GPIO_FE7_SEL 0xF7 + +void cs5535_gpio_set(unsigned offset, unsigned int reg); +void cs5535_gpio_clear(unsigned offset, unsigned int reg); +int cs5535_gpio_isset(unsigned offset, unsigned int reg); +int cs5535_gpio_set_irq(unsigned group, unsigned irq); +void cs5535_gpio_setup_event(unsigned offset, int pair, int pme); + +/* MFGPTs */ + +#define MFGPT_MAX_TIMERS 8 +#define MFGPT_TIMER_ANY (-1) + +#define MFGPT_DOMAIN_WORKING 1 +#define MFGPT_DOMAIN_STANDBY 2 +#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) + +#define MFGPT_CMP1 0 +#define MFGPT_CMP2 1 + +#define MFGPT_EVENT_IRQ 0 +#define MFGPT_EVENT_NMI 1 +#define MFGPT_EVENT_RESET 3 + +#define MFGPT_REG_CMP1 0 +#define MFGPT_REG_CMP2 2 +#define MFGPT_REG_COUNTER 4 +#define MFGPT_REG_SETUP 6 + +#define MFGPT_SETUP_CNTEN (1 << 15) +#define MFGPT_SETUP_CMP2 (1 << 14) +#define MFGPT_SETUP_CMP1 (1 << 13) +#define MFGPT_SETUP_SETUP (1 << 12) +#define MFGPT_SETUP_STOPEN (1 << 11) +#define MFGPT_SETUP_EXTEN (1 << 10) +#define MFGPT_SETUP_REVEN (1 << 5) +#define MFGPT_SETUP_CLKSEL (1 << 4) + +struct cs5535_mfgpt_timer; + +extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, + uint16_t reg); +extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value); + +extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable); +extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, + int *irq, int enable); +extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer, + int domain); +extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer); + +static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 1); +} + +static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer, + int cmp, int *irq) +{ + return cs5535_mfgpt_set_irq(timer, cmp, irq, 0); +} + +#endif diff --git a/include/linux/ctype.h b/include/linux/ctype.h new file mode 100644 index 00000000..8acfe312 --- /dev/null +++ b/include/linux/ctype.h @@ -0,0 +1,64 @@ +#ifndef _LINUX_CTYPE_H +#define _LINUX_CTYPE_H + +/* + * NOTE! This ctype does not handle EOF like the standard C + * library is required to. + */ + +#define _U 0x01 /* upper */ +#define _L 0x02 /* lower */ +#define _D 0x04 /* digit */ +#define _C 0x08 /* cntrl */ +#define _P 0x10 /* punct */ +#define _S 0x20 /* white space (space/lf/tab) */ +#define _X 0x40 /* hex digit */ +#define _SP 0x80 /* hard space (0x20) */ + +extern const unsigned char _ctype[]; + +#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) + +#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) +#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) +#define iscntrl(c) ((__ismask(c)&(_C)) != 0) +#define isdigit(c) ((__ismask(c)&(_D)) != 0) +#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) +#define islower(c) ((__ismask(c)&(_L)) != 0) +#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define ispunct(c) ((__ismask(c)&(_P)) != 0) +/* Note: isspace() must return false for %NUL-terminator */ +#define isspace(c) ((__ismask(c)&(_S)) != 0) +#define isupper(c) ((__ismask(c)&(_U)) != 0) +#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) + +#define isascii(c) (((unsigned char)(c))<=0x7f) +#define toascii(c) (((unsigned char)(c))&0x7f) + +static inline unsigned char __tolower(unsigned char c) +{ + if (isupper(c)) + c -= 'A'-'a'; + return c; +} + +static inline unsigned char __toupper(unsigned char c) +{ + if (islower(c)) + c -= 'a'-'A'; + return c; +} + +#define tolower(c) __tolower(c) +#define toupper(c) __toupper(c) + +/* + * Fast implementation of tolower() for internal usage. Do not use in your + * code. + */ +static inline char _tolower(const char c) +{ + return c | 0x20; +} + +#endif diff --git a/include/linux/cuda.h b/include/linux/cuda.h new file mode 100644 index 00000000..9f9865ff --- /dev/null +++ b/include/linux/cuda.h @@ -0,0 +1,41 @@ +/* + * Definitions for talking to the CUDA. The CUDA is a microcontroller + * which controls the ADB, system power, RTC, and various other things. + * + * Copyright (C) 1996 Paul Mackerras. + */ + +#ifndef _LINUX_CUDA_H +#define _LINUX_CUDA_H + +/* CUDA commands (2nd byte) */ +#define CUDA_WARM_START 0 +#define CUDA_AUTOPOLL 1 +#define CUDA_GET_6805_ADDR 2 +#define CUDA_GET_TIME 3 +#define CUDA_GET_PRAM 7 +#define CUDA_SET_6805_ADDR 8 +#define CUDA_SET_TIME 9 +#define CUDA_POWERDOWN 0xa +#define CUDA_POWERUP_TIME 0xb +#define CUDA_SET_PRAM 0xc +#define CUDA_MS_RESET 0xd +#define CUDA_SEND_DFAC 0xe +#define CUDA_RESET_SYSTEM 0x11 +#define CUDA_SET_IPL 0x12 +#define CUDA_SET_AUTO_RATE 0x14 +#define CUDA_GET_AUTO_RATE 0x16 +#define CUDA_SET_DEVICE_LIST 0x19 +#define CUDA_GET_DEVICE_LIST 0x1a +#define CUDA_GET_SET_IIC 0x22 + +#ifdef __KERNEL__ + +extern int find_via_cuda(void); +extern int cuda_request(struct adb_request *req, + void (*done)(struct adb_request *), int nbytes, ...); +extern void cuda_poll(void); + +#endif /* __KERNEL */ + +#endif /* _LINUX_CUDA_H */ diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h new file mode 100644 index 00000000..a5049eaf --- /dev/null +++ b/include/linux/cyclades.h @@ -0,0 +1,784 @@ +/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $ + * linux/include/linux/cyclades.h + * + * This file was initially written by + * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by + * Ivan Passos <ivan@cyclades.com>. + * + * This file contains the general definitions for the cyclades.c driver + *$Log: cyclades.h,v $ + *Revision 3.1 2002/01/29 11:36:16 henrique + *added throttle field on struct cyclades_port to indicate whether the + *port is throttled or not + * + *Revision 3.1 2000/04/19 18:52:52 ivan + *converted address fields to unsigned long and added fields for physical + *addresses on cyclades_card structure; + * + *Revision 3.0 1998/11/02 14:20:59 ivan + *added nports field on cyclades_card structure; + * + *Revision 2.5 1998/08/03 16:57:01 ivan + *added cyclades_idle_stats structure; + * + *Revision 2.4 1998/06/01 12:09:53 ivan + *removed closing_wait2 from cyclades_port structure; + * + *Revision 2.3 1998/03/16 18:01:12 ivan + *changes in the cyclades_port structure to get it closer to the + *standard serial port structure; + *added constants for new ioctls; + * + *Revision 2.2 1998/02/17 16:50:00 ivan + *changes in the cyclades_port structure (addition of shutdown_wait and + *chip_rev variables); + *added constants for new ioctls and for CD1400 rev. numbers. + * + *Revision 2.1 1997/10/24 16:03:00 ivan + *added rflow (which allows enabling the CD1400 special flow control + *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to + *cyclades_port structure; + *added Alpha support + * + *Revision 2.0 1997/06/30 10:30:00 ivan + *added some new doorbell command constants related to IOCTLW and + *UART error signaling + * + *Revision 1.8 1997/06/03 15:30:00 ivan + *added constant ZFIRM_HLT + *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin) + * + *Revision 1.7 1997/03/26 10:30:00 daniel + *new entries at the end of cyclades_port struct to reallocate + *variables illegally allocated within card memory. + * + *Revision 1.6 1996/09/09 18:35:30 bentson + *fold in changes for Cyclom-Z -- including structures for + *communicating with board as well modest changes to original + *structures to support new features. + * + *Revision 1.5 1995/11/13 21:13:31 bentson + *changes suggested by Michael Chastain <mec@duracef.shout.net> + *to support use of this file in non-kernel applications + * + * + */ + +#ifndef _LINUX_CYCLADES_H +#define _LINUX_CYCLADES_H + +#include <linux/types.h> + +struct cyclades_monitor { + unsigned long int_count; + unsigned long char_count; + unsigned long char_max; + unsigned long char_last; +}; + +/* + * These stats all reflect activity since the device was last initialized. + * (i.e., since the port was opened with no other processes already having it + * open) + */ +struct cyclades_idle_stats { + __kernel_time_t in_use; /* Time device has been in use (secs) */ + __kernel_time_t recv_idle; /* Time since last char received (secs) */ + __kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */ + unsigned long recv_bytes; /* Bytes received */ + unsigned long xmit_bytes; /* Bytes transmitted */ + unsigned long overruns; /* Input overruns */ + unsigned long frame_errs; /* Input framing errors */ + unsigned long parity_errs; /* Input parity errors */ +}; + +#define CYCLADES_MAGIC 0x4359 + +#define CYGETMON 0x435901 +#define CYGETTHRESH 0x435902 +#define CYSETTHRESH 0x435903 +#define CYGETDEFTHRESH 0x435904 +#define CYSETDEFTHRESH 0x435905 +#define CYGETTIMEOUT 0x435906 +#define CYSETTIMEOUT 0x435907 +#define CYGETDEFTIMEOUT 0x435908 +#define CYSETDEFTIMEOUT 0x435909 +#define CYSETRFLOW 0x43590a +#define CYGETRFLOW 0x43590b +#define CYSETRTSDTR_INV 0x43590c +#define CYGETRTSDTR_INV 0x43590d +#define CYZSETPOLLCYCLE 0x43590e +#define CYZGETPOLLCYCLE 0x43590f +#define CYGETCD1400VER 0x435910 +#define CYSETWAIT 0x435912 +#define CYGETWAIT 0x435913 + +/*************** CYCLOM-Z ADDITIONS ***************/ + +#define CZIOC ('M' << 8) +#define CZ_NBOARDS (CZIOC|0xfa) +#define CZ_BOOT_START (CZIOC|0xfb) +#define CZ_BOOT_DATA (CZIOC|0xfc) +#define CZ_BOOT_END (CZIOC|0xfd) +#define CZ_TEST (CZIOC|0xfe) + +#define CZ_DEF_POLL (HZ/25) + +#define MAX_BOARD 4 /* Max number of boards */ +#define MAX_DEV 256 /* Max number of ports total */ +#define CYZ_MAX_SPEED 921600 + +#define CYZ_FIFO_SIZE 16 + +#define CYZ_BOOT_NWORDS 0x100 +struct CYZ_BOOT_CTRL { + unsigned short nboard; + int status[MAX_BOARD]; + int nchannel[MAX_BOARD]; + int fw_rev[MAX_BOARD]; + unsigned long offset; + unsigned long data[CYZ_BOOT_NWORDS]; +}; + + +#ifndef DP_WINDOW_SIZE +/* + * Memory Window Sizes + */ + +#define DP_WINDOW_SIZE (0x00080000) /* window size 512 Kb */ +#define ZE_DP_WINDOW_SIZE (0x00100000) /* window size 1 Mb (Ze and + 8Zo V.2 */ +#define CTRL_WINDOW_SIZE (0x00000080) /* runtime regs 128 bytes */ + +/* + * CUSTOM_REG - Cyclom-Z/PCI Custom Registers Set. The driver + * normally will access only interested on the fpga_id, fpga_version, + * start_cpu and stop_cpu. + */ + +struct CUSTOM_REG { + __u32 fpga_id; /* FPGA Identification Register */ + __u32 fpga_version; /* FPGA Version Number Register */ + __u32 cpu_start; /* CPU start Register (write) */ + __u32 cpu_stop; /* CPU stop Register (write) */ + __u32 misc_reg; /* Miscellaneous Register */ + __u32 idt_mode; /* IDT mode Register */ + __u32 uart_irq_status; /* UART IRQ status Register */ + __u32 clear_timer0_irq; /* Clear timer interrupt Register */ + __u32 clear_timer1_irq; /* Clear timer interrupt Register */ + __u32 clear_timer2_irq; /* Clear timer interrupt Register */ + __u32 test_register; /* Test Register */ + __u32 test_count; /* Test Count Register */ + __u32 timer_select; /* Timer select register */ + __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */ + __u32 ram_wait_state; /* RAM wait-state Register */ + __u32 uart_wait_state; /* UART wait-state Register */ + __u32 timer_wait_state; /* timer wait-state Register */ + __u32 ack_wait_state; /* ACK wait State Register */ +}; + +/* + * RUNTIME_9060 - PLX PCI9060ES local configuration and shared runtime + * registers. This structure can be used to access the 9060 registers + * (memory mapped). + */ + +struct RUNTIME_9060 { + __u32 loc_addr_range; /* 00h - Local Address Range */ + __u32 loc_addr_base; /* 04h - Local Address Base */ + __u32 loc_arbitr; /* 08h - Local Arbitration */ + __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */ + __u32 loc_rom_range; /* 10h - Local ROM Range */ + __u32 loc_rom_base; /* 14h - Local ROM Base */ + __u32 loc_bus_descr; /* 18h - Local Bus descriptor */ + __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */ + __u32 loc_base_mst; /* 20h - Local Base for Master PCI */ + __u32 loc_range_io; /* 24h - Local Range for Master IO */ + __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */ + __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */ + __u32 filler1; /* 30h */ + __u32 filler2; /* 34h */ + __u32 filler3; /* 38h */ + __u32 filler4; /* 3Ch */ + __u32 mail_box_0; /* 40h - Mail Box 0 */ + __u32 mail_box_1; /* 44h - Mail Box 1 */ + __u32 mail_box_2; /* 48h - Mail Box 2 */ + __u32 mail_box_3; /* 4Ch - Mail Box 3 */ + __u32 filler5; /* 50h */ + __u32 filler6; /* 54h */ + __u32 filler7; /* 58h */ + __u32 filler8; /* 5Ch */ + __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */ + __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */ + __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */ + __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */ +}; + +/* Values for the Local Base Address re-map register */ + +#define WIN_RAM 0x00000001L /* set the sliding window to RAM */ +#define WIN_CREG 0x14000001L /* set the window to custom Registers */ + +/* Values timer select registers */ + +#define TIMER_BY_1M 0x00 /* clock divided by 1M */ +#define TIMER_BY_256K 0x01 /* clock divided by 256k */ +#define TIMER_BY_128K 0x02 /* clock divided by 128k */ +#define TIMER_BY_32K 0x03 /* clock divided by 32k */ + +/****************** ****************** *******************/ +#endif + +#ifndef ZFIRM_ID +/* #include "zfwint.h" */ +/****************** ****************** *******************/ +/* + * This file contains the definitions for interfacing with the + * Cyclom-Z ZFIRM Firmware. + */ + +/* General Constant definitions */ + +#define MAX_CHAN 64 /* max number of channels per board */ + +/* firmware id structure (set after boot) */ + +#define ID_ADDRESS 0x00000180L /* signature/pointer address */ +#define ZFIRM_ID 0x5557465AL /* ZFIRM/U signature */ +#define ZFIRM_HLT 0x59505B5CL /* ZFIRM needs external power supply */ +#define ZFIRM_RST 0x56040674L /* RST signal (due to FW reset) */ + +#define ZF_TINACT_DEF 1000 /* default inactivity timeout + (1000 ms) */ +#define ZF_TINACT ZF_TINACT_DEF + +struct FIRM_ID { + __u32 signature; /* ZFIRM/U signature */ + __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */ +}; + +/* Op. System id */ + +#define C_OS_LINUX 0x00000030 /* generic Linux system */ + +/* channel op_mode */ + +#define C_CH_DISABLE 0x00000000 /* channel is disabled */ +#define C_CH_TXENABLE 0x00000001 /* channel Tx enabled */ +#define C_CH_RXENABLE 0x00000002 /* channel Rx enabled */ +#define C_CH_ENABLE 0x00000003 /* channel Tx/Rx enabled */ +#define C_CH_LOOPBACK 0x00000004 /* Loopback mode */ + +/* comm_parity - parity */ + +#define C_PR_NONE 0x00000000 /* None */ +#define C_PR_ODD 0x00000001 /* Odd */ +#define C_PR_EVEN 0x00000002 /* Even */ +#define C_PR_MARK 0x00000004 /* Mark */ +#define C_PR_SPACE 0x00000008 /* Space */ +#define C_PR_PARITY 0x000000ff + +#define C_PR_DISCARD 0x00000100 /* discard char with frame/par error */ +#define C_PR_IGNORE 0x00000200 /* ignore frame/par error */ + +/* comm_data_l - data length and stop bits */ + +#define C_DL_CS5 0x00000001 +#define C_DL_CS6 0x00000002 +#define C_DL_CS7 0x00000004 +#define C_DL_CS8 0x00000008 +#define C_DL_CS 0x0000000f +#define C_DL_1STOP 0x00000010 +#define C_DL_15STOP 0x00000020 +#define C_DL_2STOP 0x00000040 +#define C_DL_STOP 0x000000f0 + +/* interrupt enabling/status */ + +#define C_IN_DISABLE 0x00000000 /* zero, disable interrupts */ +#define C_IN_TXBEMPTY 0x00000001 /* tx buffer empty */ +#define C_IN_TXLOWWM 0x00000002 /* tx buffer below LWM */ +#define C_IN_RXHIWM 0x00000010 /* rx buffer above HWM */ +#define C_IN_RXNNDT 0x00000020 /* rx no new data timeout */ +#define C_IN_MDCD 0x00000100 /* modem DCD change */ +#define C_IN_MDSR 0x00000200 /* modem DSR change */ +#define C_IN_MRI 0x00000400 /* modem RI change */ +#define C_IN_MCTS 0x00000800 /* modem CTS change */ +#define C_IN_RXBRK 0x00001000 /* Break received */ +#define C_IN_PR_ERROR 0x00002000 /* parity error */ +#define C_IN_FR_ERROR 0x00004000 /* frame error */ +#define C_IN_OVR_ERROR 0x00008000 /* overrun error */ +#define C_IN_RXOFL 0x00010000 /* RX buffer overflow */ +#define C_IN_IOCTLW 0x00020000 /* I/O control w/ wait */ +#define C_IN_MRTS 0x00040000 /* modem RTS drop */ +#define C_IN_ICHAR 0x00080000 + +/* flow control */ + +#define C_FL_OXX 0x00000001 /* output Xon/Xoff flow control */ +#define C_FL_IXX 0x00000002 /* output Xon/Xoff flow control */ +#define C_FL_OIXANY 0x00000004 /* output Xon/Xoff (any xon) */ +#define C_FL_SWFLOW 0x0000000f + +/* flow status */ + +#define C_FS_TXIDLE 0x00000000 /* no Tx data in the buffer or UART */ +#define C_FS_SENDING 0x00000001 /* UART is sending data */ +#define C_FS_SWFLOW 0x00000002 /* Tx is stopped by received Xoff */ + +/* rs_control/rs_status RS-232 signals */ + +#define C_RS_PARAM 0x80000000 /* Indicates presence of parameter in + IOCTLM command */ +#define C_RS_RTS 0x00000001 /* RTS */ +#define C_RS_DTR 0x00000004 /* DTR */ +#define C_RS_DCD 0x00000100 /* CD */ +#define C_RS_DSR 0x00000200 /* DSR */ +#define C_RS_RI 0x00000400 /* RI */ +#define C_RS_CTS 0x00000800 /* CTS */ + +/* commands Host <-> Board */ + +#define C_CM_RESET 0x01 /* reset/flush buffers */ +#define C_CM_IOCTL 0x02 /* re-read CH_CTRL */ +#define C_CM_IOCTLW 0x03 /* re-read CH_CTRL, intr when done */ +#define C_CM_IOCTLM 0x04 /* RS-232 outputs change */ +#define C_CM_SENDXOFF 0x10 /* send Xoff */ +#define C_CM_SENDXON 0x11 /* send Xon */ +#define C_CM_CLFLOW 0x12 /* Clear flow control (resume) */ +#define C_CM_SENDBRK 0x41 /* send break */ +#define C_CM_INTBACK 0x42 /* Interrupt back */ +#define C_CM_SET_BREAK 0x43 /* Tx break on */ +#define C_CM_CLR_BREAK 0x44 /* Tx break off */ +#define C_CM_CMD_DONE 0x45 /* Previous command done */ +#define C_CM_INTBACK2 0x46 /* Alternate Interrupt back */ +#define C_CM_TINACT 0x51 /* set inactivity detection */ +#define C_CM_IRQ_ENBL 0x52 /* enable generation of interrupts */ +#define C_CM_IRQ_DSBL 0x53 /* disable generation of interrupts */ +#define C_CM_ACK_ENBL 0x54 /* enable acknowledged interrupt mode */ +#define C_CM_ACK_DSBL 0x55 /* disable acknowledged intr mode */ +#define C_CM_FLUSH_RX 0x56 /* flushes Rx buffer */ +#define C_CM_FLUSH_TX 0x57 /* flushes Tx buffer */ +#define C_CM_Q_ENABLE 0x58 /* enables queue access from the + driver */ +#define C_CM_Q_DISABLE 0x59 /* disables queue access from the + driver */ + +#define C_CM_TXBEMPTY 0x60 /* Tx buffer is empty */ +#define C_CM_TXLOWWM 0x61 /* Tx buffer low water mark */ +#define C_CM_RXHIWM 0x62 /* Rx buffer high water mark */ +#define C_CM_RXNNDT 0x63 /* rx no new data timeout */ +#define C_CM_TXFEMPTY 0x64 +#define C_CM_ICHAR 0x65 +#define C_CM_MDCD 0x70 /* modem DCD change */ +#define C_CM_MDSR 0x71 /* modem DSR change */ +#define C_CM_MRI 0x72 /* modem RI change */ +#define C_CM_MCTS 0x73 /* modem CTS change */ +#define C_CM_MRTS 0x74 /* modem RTS drop */ +#define C_CM_RXBRK 0x84 /* Break received */ +#define C_CM_PR_ERROR 0x85 /* Parity error */ +#define C_CM_FR_ERROR 0x86 /* Frame error */ +#define C_CM_OVR_ERROR 0x87 /* Overrun error */ +#define C_CM_RXOFL 0x88 /* RX buffer overflow */ +#define C_CM_CMDERROR 0x90 /* command error */ +#define C_CM_FATAL 0x91 /* fatal error */ +#define C_CM_HW_RESET 0x92 /* reset board */ + +/* + * CH_CTRL - This per port structure contains all parameters + * that control an specific port. It can be seen as the + * configuration registers of a "super-serial-controller". + */ + +struct CH_CTRL { + __u32 op_mode; /* operation mode */ + __u32 intr_enable; /* interrupt masking */ + __u32 sw_flow; /* SW flow control */ + __u32 flow_status; /* output flow status */ + __u32 comm_baud; /* baud rate - numerically specified */ + __u32 comm_parity; /* parity */ + __u32 comm_data_l; /* data length/stop */ + __u32 comm_flags; /* other flags */ + __u32 hw_flow; /* HW flow control */ + __u32 rs_control; /* RS-232 outputs */ + __u32 rs_status; /* RS-232 inputs */ + __u32 flow_xon; /* xon char */ + __u32 flow_xoff; /* xoff char */ + __u32 hw_overflow; /* hw overflow counter */ + __u32 sw_overflow; /* sw overflow counter */ + __u32 comm_error; /* frame/parity error counter */ + __u32 ichar; + __u32 filler[7]; +}; + + +/* + * BUF_CTRL - This per channel structure contains + * all Tx and Rx buffer control for a given channel. + */ + +struct BUF_CTRL { + __u32 flag_dma; /* buffers are in Host memory */ + __u32 tx_bufaddr; /* address of the tx buffer */ + __u32 tx_bufsize; /* tx buffer size */ + __u32 tx_threshold; /* tx low water mark */ + __u32 tx_get; /* tail index tx buf */ + __u32 tx_put; /* head index tx buf */ + __u32 rx_bufaddr; /* address of the rx buffer */ + __u32 rx_bufsize; /* rx buffer size */ + __u32 rx_threshold; /* rx high water mark */ + __u32 rx_get; /* tail index rx buf */ + __u32 rx_put; /* head index rx buf */ + __u32 filler[5]; /* filler to align structures */ +}; + +/* + * BOARD_CTRL - This per board structure contains all global + * control fields related to the board. + */ + +struct BOARD_CTRL { + + /* static info provided by the on-board CPU */ + __u32 n_channel; /* number of channels */ + __u32 fw_version; /* firmware version */ + + /* static info provided by the driver */ + __u32 op_system; /* op_system id */ + __u32 dr_version; /* driver version */ + + /* board control area */ + __u32 inactivity; /* inactivity control */ + + /* host to FW commands */ + __u32 hcmd_channel; /* channel number */ + __u32 hcmd_param; /* pointer to parameters */ + + /* FW to Host commands */ + __u32 fwcmd_channel; /* channel number */ + __u32 fwcmd_param; /* pointer to parameters */ + __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */ + + /* filler so the structures are aligned */ + __u32 filler[6]; +}; + +/* Host Interrupt Queue */ + +#define QUEUE_SIZE (10*MAX_CHAN) + +struct INT_QUEUE { + unsigned char intr_code[QUEUE_SIZE]; + unsigned long channel[QUEUE_SIZE]; + unsigned long param[QUEUE_SIZE]; + unsigned long put; + unsigned long get; +}; + +/* + * ZFW_CTRL - This is the data structure that includes all other + * data structures used by the Firmware. + */ + +struct ZFW_CTRL { + struct BOARD_CTRL board_ctrl; + struct CH_CTRL ch_ctrl[MAX_CHAN]; + struct BUF_CTRL buf_ctrl[MAX_CHAN]; +}; + +/****************** ****************** *******************/ +#endif + +#ifdef __KERNEL__ + +/* Per card data structure */ +struct cyclades_card { + void __iomem *base_addr; + union { + void __iomem *p9050; + struct RUNTIME_9060 __iomem *p9060; + } ctl_addr; + struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */ + int irq; + unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ + unsigned int first_line; /* minor number of first channel on card */ + unsigned int nports; /* Number of ports in the card */ + int bus_index; /* address shift - 0 for ISA, 1 for PCI */ + int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ + u32 hw_ver; + spinlock_t card_lock; + struct cyclades_port *ports; +}; + +/*************************************** + * Memory access functions/macros * + * (required to support Alpha systems) * + ***************************************/ + +#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0) +#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0) +#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0) + +/* + * Statistics counters + */ +struct cyclades_icount { + __u32 cts, dsr, rng, dcd, tx, rx; + __u32 frame, parity, overrun, brk; + __u32 buf_overrun; +}; + +/* + * This is our internal structure for each serial port's state. + * + * Many fields are paralleled by the structure used by the serial_struct + * structure. + * + * For definitions of the flags field, see tty.h + */ + +struct cyclades_port { + int magic; + struct tty_port port; + struct cyclades_card *card; + union { + struct { + void __iomem *base_addr; + } cyy; + struct { + struct CH_CTRL __iomem *ch_ctrl; + struct BUF_CTRL __iomem *buf_ctrl; + } cyz; + } u; + int line; + int flags; /* defined in tty.h */ + int type; /* UART type */ + int read_status_mask; + int ignore_status_mask; + int timeout; + int xmit_fifo_size; + int cor1,cor2,cor3,cor4,cor5; + int tbpr,tco,rbpr,rco; + int baud; + int rflow; + int rtsdtr_inv; + int chip_rev; + int custom_divisor; + u8 x_char; /* to be pushed out ASAP */ + int breakon; + int breakoff; + int xmit_head; + int xmit_tail; + int xmit_cnt; + int default_threshold; + int default_timeout; + unsigned long rflush_count; + struct cyclades_monitor mon; + struct cyclades_idle_stats idle_stats; + struct cyclades_icount icount; + struct completion shutdown_wait; + int throttle; +}; + +#define CLOSING_WAIT_DELAY 30*HZ +#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE +#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF + + +#define CyMAX_CHIPS_PER_CARD 8 +#define CyMAX_CHAR_FIFO 12 +#define CyPORTS_PER_CHIP 4 +#define CD1400_MAX_SPEED 115200 + +#define CyISA_Ywin 0x2000 + +#define CyPCI_Ywin 0x4000 +#define CyPCI_Yctl 0x80 +#define CyPCI_Zctl CTRL_WINDOW_SIZE +#define CyPCI_Zwin 0x80000 +#define CyPCI_Ze_win (2 * CyPCI_Zwin) + +#define PCI_DEVICE_ID_MASK 0x06 + +/**** CD1400 registers ****/ + +#define CD1400_REV_G 0x46 +#define CD1400_REV_J 0x48 + +#define CyRegSize 0x0400 +#define Cy_HwReset 0x1400 +#define Cy_ClrIntr 0x1800 +#define Cy_EpldRev 0x1e00 + +/* Global Registers */ + +#define CyGFRCR (0x40*2) +#define CyRevE (44) +#define CyCAR (0x68*2) +#define CyCHAN_0 (0x00) +#define CyCHAN_1 (0x01) +#define CyCHAN_2 (0x02) +#define CyCHAN_3 (0x03) +#define CyGCR (0x4B*2) +#define CyCH0_SERIAL (0x00) +#define CyCH0_PARALLEL (0x80) +#define CySVRR (0x67*2) +#define CySRModem (0x04) +#define CySRTransmit (0x02) +#define CySRReceive (0x01) +#define CyRICR (0x44*2) +#define CyTICR (0x45*2) +#define CyMICR (0x46*2) +#define CyICR0 (0x00) +#define CyICR1 (0x01) +#define CyICR2 (0x02) +#define CyICR3 (0x03) +#define CyRIR (0x6B*2) +#define CyTIR (0x6A*2) +#define CyMIR (0x69*2) +#define CyIRDirEq (0x80) +#define CyIRBusy (0x40) +#define CyIRUnfair (0x20) +#define CyIRContext (0x1C) +#define CyIRChannel (0x03) +#define CyPPR (0x7E*2) +#define CyCLOCK_20_1MS (0x27) +#define CyCLOCK_25_1MS (0x31) +#define CyCLOCK_25_5MS (0xf4) +#define CyCLOCK_60_1MS (0x75) +#define CyCLOCK_60_2MS (0xea) + +/* Virtual Registers */ + +#define CyRIVR (0x43*2) +#define CyTIVR (0x42*2) +#define CyMIVR (0x41*2) +#define CyIVRMask (0x07) +#define CyIVRRxEx (0x07) +#define CyIVRRxOK (0x03) +#define CyIVRTxOK (0x02) +#define CyIVRMdmOK (0x01) +#define CyTDR (0x63*2) +#define CyRDSR (0x62*2) +#define CyTIMEOUT (0x80) +#define CySPECHAR (0x70) +#define CyBREAK (0x08) +#define CyPARITY (0x04) +#define CyFRAME (0x02) +#define CyOVERRUN (0x01) +#define CyMISR (0x4C*2) +/* see CyMCOR_ and CyMSVR_ for bits*/ +#define CyEOSRR (0x60*2) + +/* Channel Registers */ + +#define CyLIVR (0x18*2) +#define CyMscsr (0x01) +#define CyTdsr (0x02) +#define CyRgdsr (0x03) +#define CyRedsr (0x07) +#define CyCCR (0x05*2) +/* Format 1 */ +#define CyCHAN_RESET (0x80) +#define CyCHIP_RESET (0x81) +#define CyFlushTransFIFO (0x82) +/* Format 2 */ +#define CyCOR_CHANGE (0x40) +#define CyCOR1ch (0x02) +#define CyCOR2ch (0x04) +#define CyCOR3ch (0x08) +/* Format 3 */ +#define CySEND_SPEC_1 (0x21) +#define CySEND_SPEC_2 (0x22) +#define CySEND_SPEC_3 (0x23) +#define CySEND_SPEC_4 (0x24) +/* Format 4 */ +#define CyCHAN_CTL (0x10) +#define CyDIS_RCVR (0x01) +#define CyENB_RCVR (0x02) +#define CyDIS_XMTR (0x04) +#define CyENB_XMTR (0x08) +#define CySRER (0x06*2) +#define CyMdmCh (0x80) +#define CyRxData (0x10) +#define CyTxRdy (0x04) +#define CyTxMpty (0x02) +#define CyNNDT (0x01) +#define CyCOR1 (0x08*2) +#define CyPARITY_NONE (0x00) +#define CyPARITY_0 (0x20) +#define CyPARITY_1 (0xA0) +#define CyPARITY_E (0x40) +#define CyPARITY_O (0xC0) +#define Cy_1_STOP (0x00) +#define Cy_1_5_STOP (0x04) +#define Cy_2_STOP (0x08) +#define Cy_5_BITS (0x00) +#define Cy_6_BITS (0x01) +#define Cy_7_BITS (0x02) +#define Cy_8_BITS (0x03) +#define CyCOR2 (0x09*2) +#define CyIXM (0x80) +#define CyTxIBE (0x40) +#define CyETC (0x20) +#define CyAUTO_TXFL (0x60) +#define CyLLM (0x10) +#define CyRLM (0x08) +#define CyRtsAO (0x04) +#define CyCtsAE (0x02) +#define CyDsrAE (0x01) +#define CyCOR3 (0x0A*2) +#define CySPL_CH_DRANGE (0x80) /* special character detect range */ +#define CySPL_CH_DET1 (0x40) /* enable special character detection + on SCHR4-SCHR3 */ +#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */ +#define CySPL_CH_DET2 (0x10) /* Enable special character detection + on SCHR2-SCHR1 */ +#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */ +#define CyCOR4 (0x1E*2) +#define CyCOR5 (0x1F*2) +#define CyCCSR (0x0B*2) +#define CyRxEN (0x80) +#define CyRxFloff (0x40) +#define CyRxFlon (0x20) +#define CyTxEN (0x08) +#define CyTxFloff (0x04) +#define CyTxFlon (0x02) +#define CyRDCR (0x0E*2) +#define CySCHR1 (0x1A*2) +#define CySCHR2 (0x1B*2) +#define CySCHR3 (0x1C*2) +#define CySCHR4 (0x1D*2) +#define CySCRL (0x22*2) +#define CySCRH (0x23*2) +#define CyLNC (0x24*2) +#define CyMCOR1 (0x15*2) +#define CyMCOR2 (0x16*2) +#define CyRTPR (0x21*2) +#define CyMSVR1 (0x6C*2) +#define CyMSVR2 (0x6D*2) +#define CyANY_DELTA (0xF0) +#define CyDSR (0x80) +#define CyCTS (0x40) +#define CyRI (0x20) +#define CyDCD (0x10) +#define CyDTR (0x02) +#define CyRTS (0x01) +#define CyPVSR (0x6F*2) +#define CyRBPR (0x78*2) +#define CyRCOR (0x7C*2) +#define CyTBPR (0x72*2) +#define CyTCOR (0x76*2) + +/* Custom Registers */ + +#define CyPLX_VER (0x3400) +#define PLX_9050 0x0b +#define PLX_9060 0x0c +#define PLX_9080 0x0d + +/***************************************************************************/ + +#endif /* __KERNEL__ */ +#endif /* _LINUX_CYCLADES_H */ diff --git a/include/linux/cyclomx.h b/include/linux/cyclomx.h new file mode 100644 index 00000000..b88f7f42 --- /dev/null +++ b/include/linux/cyclomx.h @@ -0,0 +1,77 @@ +#ifndef _CYCLOMX_H +#define _CYCLOMX_H +/* +* cyclomx.h Cyclom 2X WAN Link Driver. +* User-level API definitions. +* +* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> +* +* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo +* +* Based on wanpipe.h by Gene Kozin <genek@compuserve.com> +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version +* 2 of the License, or (at your option) any later version. +* ============================================================================ +* 2000/07/13 acme remove crap #if KERNEL_VERSION > blah +* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count +* and cyclomx_close to cyclomx_mod_dec_use_count +* 1999/05/19 acme wait_queue_head_t wait_stats(support for 2.3.*) +* 1999/01/03 acme judicious use of data types +* 1998/12/27 acme cleanup: PACKED not needed +* 1998/08/08 acme Version 0.0.1 +*/ + +#include <linux/wanrouter.h> +#include <linux/spinlock.h> + +#ifdef __KERNEL__ +/* Kernel Interface */ + +#include <linux/cycx_drv.h> /* Cyclom 2X support module API definitions */ +#include <linux/cycx_cfm.h> /* Cyclom 2X firmware module definitions */ +#ifdef CONFIG_CYCLOMX_X25 +#include <linux/cycx_x25.h> +#endif + +/* Adapter Data Space. + * This structure is needed because we handle multiple cards, otherwise + * static data would do it. + */ +struct cycx_device { + char devname[WAN_DRVNAME_SZ + 1];/* card name */ + struct cycx_hw hw; /* hardware configuration */ + struct wan_device wandev; /* WAN device data space */ + u32 state_tick; /* link state timestamp */ + spinlock_t lock; + char in_isr; /* interrupt-in-service flag */ + char buff_int_mode_unbusy; /* flag for carrying out dev_tint */ + wait_queue_head_t wait_stats; /* to wait for the STATS indication */ + void __iomem *mbox; /* -> mailbox */ + void (*isr)(struct cycx_device* card); /* interrupt service routine */ + int (*exec)(struct cycx_device* card, void* u_cmd, void* u_data); + union { +#ifdef CONFIG_CYCLOMX_X25 + struct { /* X.25 specific data */ + u32 lo_pvc; + u32 hi_pvc; + u32 lo_svc; + u32 hi_svc; + struct cycx_x25_stats stats; + spinlock_t lock; + u32 connection_keys; + } x; +#endif + } u; +}; + +/* Public Functions */ +void cycx_set_state(struct cycx_device *card, int state); + +#ifdef CONFIG_CYCLOMX_X25 +int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf); +#endif +#endif /* __KERNEL__ */ +#endif /* _CYCLOMX_H */ diff --git a/include/linux/cycx_cfm.h b/include/linux/cycx_cfm.h new file mode 100644 index 00000000..032d26ed --- /dev/null +++ b/include/linux/cycx_cfm.h @@ -0,0 +1,101 @@ +/* +* cycx_cfm.h Cyclom 2X WAN Link Driver. +* Definitions for the Cyclom 2X Firmware Module (CFM). +* +* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> +* +* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo +* +* Based on sdlasfm.h by Gene Kozin <74604.152@compuserve.com> +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version +* 2 of the License, or (at your option) any later version. +* ============================================================================ +* 1998/08/08 acme Initial version. +*/ +#ifndef _CYCX_CFM_H +#define _CYCX_CFM_H + +/* Defines */ + +#define CFM_VERSION 2 +#define CFM_SIGNATURE "CFM - Cyclades CYCX Firmware Module" + +/* min/max */ +#define CFM_IMAGE_SIZE 0x20000 /* max size of CYCX code image file */ +#define CFM_DESCR_LEN 256 /* max length of description string */ +#define CFM_MAX_CYCX 1 /* max number of compatible adapters */ +#define CFM_LOAD_BUFSZ 0x400 /* buffer size for reset code (buffer_load) */ + +/* Firmware Commands */ +#define GEN_POWER_ON 0x1280 + +#define GEN_SET_SEG 0x1401 /* boot segment setting. */ +#define GEN_BOOT_DAT 0x1402 /* boot data. */ +#define GEN_START 0x1403 /* board start. */ +#define GEN_DEFPAR 0x1404 /* buffer length for boot. */ + +/* Adapter Types */ +#define CYCX_2X 2 +/* for now only the 2X is supported, no plans to support 8X or 16X */ +#define CYCX_8X 8 +#define CYCX_16X 16 + +#define CFID_X25_2X 5200 + +/** + * struct cycx_fw_info - firmware module information. + * @codeid - firmware ID + * @version - firmware version number + * @adapter - compatible adapter types + * @memsize - minimum memory size + * @reserved - reserved + * @startoffs - entry point offset + * @winoffs - dual-port memory window offset + * @codeoffs - code load offset + * @codesize - code size + * @dataoffs - configuration data load offset + * @datasize - configuration data size + */ +struct cycx_fw_info { + unsigned short codeid; + unsigned short version; + unsigned short adapter[CFM_MAX_CYCX]; + unsigned long memsize; + unsigned short reserved[2]; + unsigned short startoffs; + unsigned short winoffs; + unsigned short codeoffs; + unsigned long codesize; + unsigned short dataoffs; + unsigned long datasize; +}; + +/** + * struct cycx_firmware - CYCX firmware file structure + * @signature - CFM file signature + * @version - file format version + * @checksum - info + image + * @reserved - reserved + * @descr - description string + * @info - firmware module info + * @image - code image (variable size) + */ +struct cycx_firmware { + char signature[80]; + unsigned short version; + unsigned short checksum; + unsigned short reserved[6]; + char descr[CFM_DESCR_LEN]; + struct cycx_fw_info info; + unsigned char image[0]; +}; + +struct cycx_fw_header { + unsigned long reset_size; + unsigned long data_size; + unsigned long code_size; +}; +#endif /* _CYCX_CFM_H */ diff --git a/include/linux/cycx_drv.h b/include/linux/cycx_drv.h new file mode 100644 index 00000000..12fe6b0b --- /dev/null +++ b/include/linux/cycx_drv.h @@ -0,0 +1,64 @@ +/* +* cycx_drv.h CYCX Support Module. Kernel API Definitions. +* +* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> +* +* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo +* +* Based on sdladrv.h by Gene Kozin <genek@compuserve.com> +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version +* 2 of the License, or (at your option) any later version. +* ============================================================================ +* 1999/10/23 acme cycxhw_t cleanup +* 1999/01/03 acme more judicious use of data types... +* uclong, ucchar, etc deleted, the u8, u16, u32 +* types are the portable way to go. +* 1999/01/03 acme judicious use of data types... u16, u32, etc +* 1998/12/26 acme FIXED_BUFFERS, CONF_OFFSET, +* removal of cy_read{bwl} +* 1998/08/08 acme Initial version. +*/ +#ifndef _CYCX_DRV_H +#define _CYCX_DRV_H + +#define CYCX_WINDOWSIZE 0x4000 /* default dual-port memory window size */ +#define GEN_CYCX_INTR 0x02 +#define RST_ENABLE 0x04 +#define START_CPU 0x06 +#define RST_DISABLE 0x08 +#define FIXED_BUFFERS 0x08 +#define TEST_PATTERN 0xaa55 +#define CMD_OFFSET 0x20 +#define CONF_OFFSET 0x0380 +#define RESET_OFFSET 0x3c00 /* For reset file load */ +#define DATA_OFFSET 0x0100 /* For code and data files load */ +#define START_OFFSET 0x3ff0 /* 80186 starts here */ + +/** + * struct cycx_hw - Adapter hardware configuration + * @fwid - firmware ID + * @irq - interrupt request level + * @dpmbase - dual-port memory base + * @dpmsize - dual-port memory size + * @reserved - reserved for future use + */ +struct cycx_hw { + u32 fwid; + int irq; + void __iomem *dpmbase; + u32 dpmsize; + u32 reserved[5]; +}; + +/* Function Prototypes */ +extern int cycx_setup(struct cycx_hw *hw, void *sfm, u32 len, unsigned long base); +extern int cycx_down(struct cycx_hw *hw); +extern int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len); +extern int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len); +extern int cycx_exec(void __iomem *addr); + +extern void cycx_intr(struct cycx_hw *hw); +#endif /* _CYCX_DRV_H */ diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h new file mode 100644 index 00000000..362bf19d --- /dev/null +++ b/include/linux/cycx_x25.h @@ -0,0 +1,125 @@ +#ifndef _CYCX_X25_H +#define _CYCX_X25_H +/* +* cycx_x25.h Cyclom X.25 firmware API definitions. +* +* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> +* +* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo +* +* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com> +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version +* 2 of the License, or (at your option) any later version. +* ============================================================================ +* 2000/04/02 acme dprintk and cycx_debug +* 1999/01/03 acme judicious use of data types +* 1999/01/02 acme #define X25_ACK_N3 0x4411 +* 1998/12/28 acme cleanup: lot'o'things removed +* commands listed, +* TX25Cmd & TX25Config structs +* typedef'ed +*/ +#ifndef PACKED +#define PACKED __attribute__((packed)) +#endif + +/* X.25 shared memory layout. */ +#define X25_MBOX_OFFS 0x300 /* general mailbox block */ +#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */ + +/* Debug */ +#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a) + +extern unsigned int cycx_debug; + +/* Data Structures */ +/* X.25 Command Block. */ +struct cycx_x25_cmd { + u16 command; + u16 link; /* values: 0 or 1 */ + u16 len; /* values: 0 thru 0x205 (517) */ + u32 buf; +} PACKED; + +/* Defines for the 'command' field. */ +#define X25_CONNECT_REQUEST 0x4401 +#define X25_CONNECT_RESPONSE 0x4402 +#define X25_DISCONNECT_REQUEST 0x4403 +#define X25_DISCONNECT_RESPONSE 0x4404 +#define X25_DATA_REQUEST 0x4405 +#define X25_ACK_TO_VC 0x4406 +#define X25_INTERRUPT_RESPONSE 0x4407 +#define X25_CONFIG 0x4408 +#define X25_CONNECT_INDICATION 0x4409 +#define X25_CONNECT_CONFIRM 0x440A +#define X25_DISCONNECT_INDICATION 0x440B +#define X25_DISCONNECT_CONFIRM 0x440C +#define X25_DATA_INDICATION 0x440E +#define X25_INTERRUPT_INDICATION 0x440F +#define X25_ACK_FROM_VC 0x4410 +#define X25_ACK_N3 0x4411 +#define X25_CONNECT_COLLISION 0x4413 +#define X25_N3WIN 0x4414 +#define X25_LINE_ON 0x4415 +#define X25_LINE_OFF 0x4416 +#define X25_RESET_REQUEST 0x4417 +#define X25_LOG 0x4500 +#define X25_STATISTIC 0x4600 +#define X25_TRACE 0x4700 +#define X25_N2TRACEXC 0x4702 +#define X25_N3TRACEXC 0x4703 + +/** + * struct cycx_x25_config - cyclom2x x25 firmware configuration + * @link - link number + * @speed - line speed + * @clock - internal/external + * @n2 - # of level 2 retransm.(values: 1 thru FF) + * @n2win - level 2 window (values: 1 thru 7) + * @n3win - level 3 window (values: 1 thru 7) + * @nvc - # of logical channels (values: 1 thru 64) + * @pktlen - level 3 packet length - log base 2 of size + * @locaddr - my address + * @remaddr - remote address + * @t1 - time, in seconds + * @t2 - time, in seconds + * @t21 - time, in seconds + * @npvc - # of permanent virt. circuits (1 thru nvc) + * @t23 - time, in seconds + * @flags - see dosx25.doc, in portuguese, for details + */ +struct cycx_x25_config { + u8 link; + u8 speed; + u8 clock; + u8 n2; + u8 n2win; + u8 n3win; + u8 nvc; + u8 pktlen; + u8 locaddr; + u8 remaddr; + u16 t1; + u16 t2; + u8 t21; + u8 npvc; + u8 t23; + u8 flags; +} PACKED; + +struct cycx_x25_stats { + u16 rx_crc_errors; + u16 rx_over_errors; + u16 n2_tx_frames; + u16 n2_rx_frames; + u16 tx_timeouts; + u16 rx_timeouts; + u16 n3_tx_packets; + u16 n3_rx_packets; + u16 tx_aborts; + u16 rx_aborts; +} PACKED; +#endif /* _CYCX_X25_H */ diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h new file mode 100644 index 00000000..54288850 --- /dev/null +++ b/include/linux/davinci_emac.h @@ -0,0 +1,50 @@ +/* + * TI DaVinci EMAC platform support + * + * Author: Kevin Hilman, Deep Root Systems, LLC + * + * 2007 (c) Deep Root Systems, LLC. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#ifndef _LINUX_DAVINCI_EMAC_H +#define _LINUX_DAVINCI_EMAC_H + +#include <linux/if_ether.h> +#include <linux/memory.h> + +struct mdio_platform_data { + unsigned long bus_freq; +}; + +struct emac_platform_data { + char mac_addr[ETH_ALEN]; + u32 ctrl_reg_offset; + u32 ctrl_mod_reg_offset; + u32 ctrl_ram_offset; + u32 hw_ram_addr; + u32 ctrl_ram_size; + + /* + * phy_id can be one of the following: + * - NULL : use the first phy on the bus, + * - "" : force to 100/full, no mdio control + * - "<bus>:<addr>" : use the specified bus and phy + */ + const char *phy_id; + + u8 rmii_en; + u8 version; + bool no_bd_ram; + void (*interrupt_enable) (void); + void (*interrupt_disable) (void); +}; + +enum { + EMAC_VERSION_1, /* DM644x */ + EMAC_VERSION_2, /* DM646x */ +}; + +void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context); +#endif diff --git a/include/linux/dca.h b/include/linux/dca.h new file mode 100644 index 00000000..d27a7a05 --- /dev/null +++ b/include/linux/dca.h @@ -0,0 +1,81 @@ +/* + * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef DCA_H +#define DCA_H + +#include <linux/pci.h> + +/* DCA Provider API */ + +/* DCA Notifier Interface */ +void dca_register_notify(struct notifier_block *nb); +void dca_unregister_notify(struct notifier_block *nb); + +#define DCA_PROVIDER_ADD 0x0001 +#define DCA_PROVIDER_REMOVE 0x0002 + +struct dca_provider { + struct list_head node; + struct dca_ops *ops; + struct device *cd; + int id; +}; + +struct dca_domain { + struct list_head node; + struct list_head dca_providers; + struct pci_bus *pci_rc; +}; + +struct dca_ops { + int (*add_requester) (struct dca_provider *, struct device *); + int (*remove_requester) (struct dca_provider *, struct device *); + u8 (*get_tag) (struct dca_provider *, struct device *, + int cpu); + int (*dev_managed) (struct dca_provider *, struct device *); +}; + +struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); +void free_dca_provider(struct dca_provider *dca); +int register_dca_provider(struct dca_provider *dca, struct device *dev); +void unregister_dca_provider(struct dca_provider *dca, struct device *dev); + +static inline void *dca_priv(struct dca_provider *dca) +{ + return (void *)dca + sizeof(struct dca_provider); +} + +/* Requester API */ +#define DCA_GET_TAG_TWO_ARGS +int dca_add_requester(struct device *dev); +int dca_remove_requester(struct device *dev); +u8 dca_get_tag(int cpu); +u8 dca3_get_tag(struct device *dev, int cpu); + +/* internal stuff */ +int __init dca_sysfs_init(void); +void __exit dca_sysfs_exit(void); +int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev); +void dca_sysfs_remove_provider(struct dca_provider *dca); +int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot); +void dca_sysfs_remove_req(struct dca_provider *dca, int slot); + +#endif /* DCA_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h new file mode 100644 index 00000000..7e11f141 --- /dev/null +++ b/include/linux/dcache.h @@ -0,0 +1,403 @@ +#ifndef __LINUX_DCACHE_H +#define __LINUX_DCACHE_H + +#include <linux/atomic.h> +#include <linux/list.h> +#include <linux/rculist.h> +#include <linux/rculist_bl.h> +#include <linux/spinlock.h> +#include <linux/seqlock.h> +#include <linux/cache.h> +#include <linux/rcupdate.h> + +struct nameidata; +struct path; +struct vfsmount; + +/* + * linux/include/linux/dcache.h + * + * Dirent cache data structures + * + * (C) Copyright 1997 Thomas Schoebel-Theuer, + * with heavy changes by Linus Torvalds + */ + +#define IS_ROOT(x) ((x) == (x)->d_parent) + +/* + * "quick string" -- eases parameter passing, but more importantly + * saves "metadata" about the string (ie length and the hash). + * + * hash comes first so it snuggles against d_parent in the + * dentry. + */ +struct qstr { + unsigned int hash; + unsigned int len; + const unsigned char *name; +}; + +struct dentry_stat_t { + int nr_dentry; + int nr_unused; + int age_limit; /* age in seconds */ + int want_pages; /* pages requested by system */ + int dummy[2]; +}; +extern struct dentry_stat_t dentry_stat; + +/* Name hashing routines. Initial hash value */ +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash() 0 + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits) + */ +static inline unsigned long end_name_hash(unsigned long hash) +{ + return (unsigned int) hash; +} + +/* Compute the hash for a name string. */ +extern unsigned int full_name_hash(const unsigned char *, unsigned int); + +/* + * Try to keep struct dentry aligned on 64 byte cachelines (this will + * give reasonable cacheline footprint with larger lines without the + * large memory footprint increase). + */ +#ifdef CONFIG_64BIT +# define DNAME_INLINE_LEN 32 /* 192 bytes */ +#else +# ifdef CONFIG_SMP +# define DNAME_INLINE_LEN 36 /* 128 bytes */ +# else +# define DNAME_INLINE_LEN 40 /* 128 bytes */ +# endif +#endif + +struct dentry { + /* RCU lookup touched fields */ + unsigned int d_flags; /* protected by d_lock */ + seqcount_t d_seq; /* per dentry seqlock */ + struct hlist_bl_node d_hash; /* lookup hash list */ + struct dentry *d_parent; /* parent directory */ + struct qstr d_name; + struct inode *d_inode; /* Where the name belongs to - NULL is + * negative */ + unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ + + /* Ref lookup also touches following */ + unsigned int d_count; /* protected by d_lock */ + spinlock_t d_lock; /* per dentry lock */ + const struct dentry_operations *d_op; + struct super_block *d_sb; /* The root of the dentry tree */ + unsigned long d_time; /* used by d_revalidate */ + void *d_fsdata; /* fs-specific data */ + + struct list_head d_lru; /* LRU list */ + /* + * d_child and d_rcu can share memory + */ + union { + struct list_head d_child; /* child of parent list */ + struct rcu_head d_rcu; + } d_u; + struct list_head d_subdirs; /* our children */ + struct list_head d_alias; /* inode alias list */ +}; + +/* + * dentry->d_lock spinlock nesting subclasses: + * + * 0: normal + * 1: nested + */ +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ + DENTRY_D_LOCK_NESTED +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, struct nameidata *); + int (*d_hash)(const struct dentry *, const struct inode *, + struct qstr *); + int (*d_compare)(const struct dentry *, const struct inode *, + const struct dentry *, const struct inode *, + unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)(struct dentry *, char *, int); + struct vfsmount *(*d_automount)(struct path *); + int (*d_manage)(struct dentry *, bool); +} ____cacheline_aligned; + +/* + * Locking rules for dentry_operations callbacks are to be found in + * Documentation/filesystems/Locking. Keep it updated! + * + * FUrther descriptions are found in Documentation/filesystems/vfs.txt. + * Keep it updated too! + */ + +/* d_flags entries */ +#define DCACHE_OP_HASH 0x0001 +#define DCACHE_OP_COMPARE 0x0002 +#define DCACHE_OP_REVALIDATE 0x0004 +#define DCACHE_OP_DELETE 0x0008 +#define DCACHE_OP_PRUNE 0x0010 + +#define DCACHE_DISCONNECTED 0x0020 + /* This dentry is possibly not currently connected to the dcache tree, in + * which case its parent will either be itself, or will have this flag as + * well. nfsd will not use a dentry with this bit set, but will first + * endeavour to clear the bit either by discovering that it is connected, + * or by performing lookup operations. Any filesystem which supports + * nfsd_operations MUST have a lookup function which, if it finds a + * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that + * dentry into place and return that dentry rather than the passed one, + * typically using d_splice_alias. */ + +#define DCACHE_REFERENCED 0x0040 /* Recently used, don't discard. */ +#define DCACHE_RCUACCESS 0x0080 /* Entry has ever been RCU-visible */ + +#define DCACHE_CANT_MOUNT 0x0100 +#define DCACHE_GENOCIDE 0x0200 +#define DCACHE_SHRINK_LIST 0x0400 + +#define DCACHE_NFSFS_RENAMED 0x1000 + /* this dentry has been "silly renamed" and has to be deleted on the last + * dput() */ +#define DCACHE_COOKIE 0x2000 /* For use by dcookie subsystem */ +#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x4000 + /* Parent inode is watched by some fsnotify listener */ + +#define DCACHE_MOUNTED 0x10000 /* is a mountpoint */ +#define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */ +#define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */ +#define DCACHE_NEED_LOOKUP 0x80000 /* dentry requires i_op->lookup */ +#define DCACHE_MANAGED_DENTRY \ + (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) + +extern seqlock_t rename_lock; + +static inline int dname_external(struct dentry *dentry) +{ + return dentry->d_name.name != dentry->d_iname; +} + +/* + * These are the low-level FS interfaces to the dcache.. + */ +extern void d_instantiate(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); +extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); +extern void __d_drop(struct dentry *dentry); +extern void d_drop(struct dentry *dentry); +extern void d_delete(struct dentry *); +extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); + +/* allocate/de-allocate */ +extern struct dentry * d_alloc(struct dentry *, const struct qstr *); +extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); +extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); +extern struct dentry *d_find_any_alias(struct inode *inode); +extern struct dentry * d_obtain_alias(struct inode *); +extern void shrink_dcache_sb(struct super_block *); +extern void shrink_dcache_parent(struct dentry *); +extern void shrink_dcache_for_umount(struct super_block *); +extern int d_invalidate(struct dentry *); + +/* only used at mount-time */ +extern struct dentry * d_make_root(struct inode *); + +/* <clickety>-<click> the ramfs-type tree */ +extern void d_genocide(struct dentry *); + +extern struct dentry *d_find_alias(struct inode *); +extern void d_prune_aliases(struct inode *); + +/* test whether we have any submounts in a subdir tree */ +extern int have_submounts(struct dentry *); + +/* + * This adds the entry to the hash queues. + */ +extern void d_rehash(struct dentry *); + +/** + * d_add - add dentry to hash queues + * @entry: dentry to add + * @inode: The inode to attach to this dentry + * + * This adds the entry to the hash queues and initializes @inode. + * The entry was actually filled in earlier during d_alloc(). + */ + +static inline void d_add(struct dentry *entry, struct inode *inode) +{ + d_instantiate(entry, inode); + d_rehash(entry); +} + +/** + * d_add_unique - add dentry to hash queues without aliasing + * @entry: dentry to add + * @inode: The inode to attach to this dentry + * + * This adds the entry to the hash queues and initializes @inode. + * The entry was actually filled in earlier during d_alloc(). + */ +static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode) +{ + struct dentry *res; + + res = d_instantiate_unique(entry, inode); + d_rehash(res != NULL ? res : entry); + return res; +} + +extern void dentry_update_name_case(struct dentry *, struct qstr *); + +/* used for rename() and baskets */ +extern void d_move(struct dentry *, struct dentry *); +extern struct dentry *d_ancestor(struct dentry *, struct dentry *); + +/* appendix may either be NULL or be used for transname suffixes */ +extern struct dentry *d_lookup(struct dentry *, struct qstr *); +extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); +extern struct dentry *__d_lookup(struct dentry *, struct qstr *); +extern struct dentry *__d_lookup_rcu(const struct dentry *parent, + const struct qstr *name, + unsigned *seq, struct inode **inode); + +/** + * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok + * @dentry: dentry to take a ref on + * @seq: seqcount to verify against + * Returns: 0 on failure, else 1. + * + * __d_rcu_to_refcount operates on a dentry,seq pair that was returned + * by __d_lookup_rcu, to get a reference on an rcu-walk dentry. + */ +static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) +{ + int ret = 0; + + assert_spin_locked(&dentry->d_lock); + if (!read_seqcount_retry(&dentry->d_seq, seq)) { + ret = 1; + dentry->d_count++; + } + + return ret; +} + +/* validate "insecure" dentry pointer */ +extern int d_validate(struct dentry *, struct dentry *); + +/* + * helper function for dentry_operations.d_dname() members + */ +extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); + +extern char *__d_path(const struct path *, const struct path *, char *, int); +extern char *d_absolute_path(const struct path *, char *, int); +extern char *d_path(const struct path *, char *, int); +extern char *d_path_with_unreachable(const struct path *, char *, int); +extern char *dentry_path_raw(struct dentry *, char *, int); +extern char *dentry_path(struct dentry *, char *, int); + +/* Allocation counts.. */ + +/** + * dget, dget_dlock - get a reference to a dentry + * @dentry: dentry to get a reference to + * + * Given a dentry or %NULL pointer increment the reference count + * if appropriate and return the dentry. A dentry will not be + * destroyed when it has references. + */ +static inline struct dentry *dget_dlock(struct dentry *dentry) +{ + if (dentry) + dentry->d_count++; + return dentry; +} + +static inline struct dentry *dget(struct dentry *dentry) +{ + if (dentry) { + spin_lock(&dentry->d_lock); + dget_dlock(dentry); + spin_unlock(&dentry->d_lock); + } + return dentry; +} + +extern struct dentry *dget_parent(struct dentry *dentry); + +/** + * d_unhashed - is dentry hashed + * @dentry: entry to check + * + * Returns true if the dentry passed is not currently hashed. + */ + +static inline int d_unhashed(struct dentry *dentry) +{ + return hlist_bl_unhashed(&dentry->d_hash); +} + +static inline int d_unlinked(struct dentry *dentry) +{ + return d_unhashed(dentry) && !IS_ROOT(dentry); +} + +static inline int cant_mount(struct dentry *dentry) +{ + return (dentry->d_flags & DCACHE_CANT_MOUNT); +} + +static inline void dont_mount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_CANT_MOUNT; + spin_unlock(&dentry->d_lock); +} + +extern void dput(struct dentry *); + +static inline bool d_managed(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_MANAGED_DENTRY; +} + +static inline bool d_mountpoint(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_MOUNTED; +} + +static inline bool d_need_lookup(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_NEED_LOOKUP; +} + +extern void d_clear_need_lookup(struct dentry *dentry); + +extern int sysctl_vfs_cache_pressure; + +#endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h new file mode 100644 index 00000000..65a2562f --- /dev/null +++ b/include/linux/dcbnl.h @@ -0,0 +1,672 @@ +/* + * Copyright (c) 2008-2011, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Author: Lucy Liu <lucy.liu@intel.com> + */ + +#ifndef __LINUX_DCBNL_H__ +#define __LINUX_DCBNL_H__ + +#include <linux/types.h> + +/* IEEE 802.1Qaz std supported values */ +#define IEEE_8021QAZ_MAX_TCS 8 + +#define IEEE_8021QAZ_TSA_STRICT 0 +#define IEEE_8021QAZ_TSA_CB_SHAPER 1 +#define IEEE_8021QAZ_TSA_ETS 2 +#define IEEE_8021QAZ_TSA_VENDOR 255 + +/* This structure contains the IEEE 802.1Qaz ETS managed object + * + * @willing: willing bit in ETS configuration TLV + * @ets_cap: indicates supported capacity of ets feature + * @cbs: credit based shaper ets algorithm supported + * @tc_tx_bw: tc tx bandwidth indexed by traffic class + * @tc_rx_bw: tc rx bandwidth indexed by traffic class + * @tc_tsa: TSA Assignment table, indexed by traffic class + * @prio_tc: priority assignment table mapping 8021Qp to traffic class + * @tc_reco_bw: recommended tc bandwidth indexed by traffic class for TLV + * @tc_reco_tsa: recommended tc bandwidth indexed by traffic class for TLV + * @reco_prio_tc: recommended tc tx bandwidth indexed by traffic class for TLV + * + * Recommended values are used to set fields in the ETS recommendation TLV + * with hardware offloaded LLDP. + * + * ---- + * TSA Assignment 8 bit identifiers + * 0 strict priority + * 1 credit-based shaper + * 2 enhanced transmission selection + * 3-254 reserved + * 255 vendor specific + */ +struct ieee_ets { + __u8 willing; + __u8 ets_cap; + __u8 cbs; + __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS]; + __u8 tc_rx_bw[IEEE_8021QAZ_MAX_TCS]; + __u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_tc[IEEE_8021QAZ_MAX_TCS]; + __u8 tc_reco_bw[IEEE_8021QAZ_MAX_TCS]; + __u8 tc_reco_tsa[IEEE_8021QAZ_MAX_TCS]; + __u8 reco_prio_tc[IEEE_8021QAZ_MAX_TCS]; +}; + +/* This structure contains the IEEE 802.1Qaz PFC managed object + * + * @pfc_cap: Indicates the number of traffic classes on the local device + * that may simultaneously have PFC enabled. + * @pfc_en: bitmap indicating pfc enabled traffic classes + * @mbc: enable macsec bypass capability + * @delay: the allowance made for a round-trip propagation delay of the + * link in bits. + * @requests: count of the sent pfc frames + * @indications: count of the received pfc frames + */ +struct ieee_pfc { + __u8 pfc_cap; + __u8 pfc_en; + __u8 mbc; + __u16 delay; + __u64 requests[IEEE_8021QAZ_MAX_TCS]; + __u64 indications[IEEE_8021QAZ_MAX_TCS]; +}; + +/* CEE DCBX std supported values */ +#define CEE_DCBX_MAX_PGS 8 +#define CEE_DCBX_MAX_PRIO 8 + +/** + * struct cee_pg - CEE Priority-Group managed object + * + * @willing: willing bit in the PG tlv + * @error: error bit in the PG tlv + * @pg_en: enable bit of the PG feature + * @tcs_supported: number of traffic classes supported + * @pg_bw: bandwidth percentage for each priority group + * @prio_pg: priority to PG mapping indexed by priority + */ +struct cee_pg { + __u8 willing; + __u8 error; + __u8 pg_en; + __u8 tcs_supported; + __u8 pg_bw[CEE_DCBX_MAX_PGS]; + __u8 prio_pg[CEE_DCBX_MAX_PGS]; +}; + +/** + * struct cee_pfc - CEE PFC managed object + * + * @willing: willing bit in the PFC tlv + * @error: error bit in the PFC tlv + * @pfc_en: bitmap indicating pfc enabled traffic classes + * @tcs_supported: number of traffic classes supported + */ +struct cee_pfc { + __u8 willing; + __u8 error; + __u8 pfc_en; + __u8 tcs_supported; +}; + +/* IEEE 802.1Qaz std supported values */ +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#define IEEE_8021QAZ_APP_SEL_STREAM 2 +#define IEEE_8021QAZ_APP_SEL_DGRAM 3 +#define IEEE_8021QAZ_APP_SEL_ANY 4 + +/* This structure contains the IEEE 802.1Qaz APP managed object. This + * object is also used for the CEE std as well. There is no difference + * between the objects. + * + * @selector: protocol identifier type + * @protocol: protocol of type indicated + * @priority: 3-bit unsigned integer indicating priority + * + * ---- + * Selector field values + * 0 Reserved + * 1 Ethertype + * 2 Well known port number over TCP or SCTP + * 3 Well known port number over UDP or DCCP + * 4 Well known port number over TCP, SCTP, UDP, or DCCP + * 5-7 Reserved + */ +struct dcb_app { + __u8 selector; + __u8 priority; + __u16 protocol; +}; + +/** + * struct dcb_peer_app_info - APP feature information sent by the peer + * + * @willing: willing bit in the peer APP tlv + * @error: error bit in the peer APP tlv + * + * In addition to this information the full peer APP tlv also contains + * a table of 'app_count' APP objects defined above. + */ +struct dcb_peer_app_info { + __u8 willing; + __u8 error; +}; + +struct dcbmsg { + __u8 dcb_family; + __u8 cmd; + __u16 dcb_pad; +}; + +/** + * enum dcbnl_commands - supported DCB commands + * + * @DCB_CMD_UNDEFINED: unspecified command to catch errors + * @DCB_CMD_GSTATE: request the state of DCB in the device + * @DCB_CMD_SSTATE: set the state of DCB in the device + * @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx + * @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx + * @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx + * @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx + * @DCB_CMD_PFC_GCFG: request the priority flow control configuration + * @DCB_CMD_PFC_SCFG: set the priority flow control configuration + * @DCB_CMD_SET_ALL: apply all changes to the underlying device + * @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying + * device. Only useful when using bonding. + * @DCB_CMD_GCAP: request the DCB capabilities of the device + * @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported + * @DCB_CMD_SNUMTCS: set the number of traffic classes + * @DCB_CMD_GBCN: set backward congestion notification configuration + * @DCB_CMD_SBCN: get backward congestion notification configration. + * @DCB_CMD_GAPP: get application protocol configuration + * @DCB_CMD_SAPP: set application protocol configuration + * @DCB_CMD_IEEE_SET: set IEEE 802.1Qaz configuration + * @DCB_CMD_IEEE_GET: get IEEE 802.1Qaz configuration + * @DCB_CMD_GDCBX: get DCBX engine configuration + * @DCB_CMD_SDCBX: set DCBX engine configuration + * @DCB_CMD_GFEATCFG: get DCBX features flags + * @DCB_CMD_SFEATCFG: set DCBX features negotiation flags + * @DCB_CMD_CEE_GET: get CEE aggregated configuration + * @DCB_CMD_IEEE_DEL: delete IEEE 802.1Qaz configuration + */ +enum dcbnl_commands { + DCB_CMD_UNDEFINED, + + DCB_CMD_GSTATE, + DCB_CMD_SSTATE, + + DCB_CMD_PGTX_GCFG, + DCB_CMD_PGTX_SCFG, + DCB_CMD_PGRX_GCFG, + DCB_CMD_PGRX_SCFG, + + DCB_CMD_PFC_GCFG, + DCB_CMD_PFC_SCFG, + + DCB_CMD_SET_ALL, + + DCB_CMD_GPERM_HWADDR, + + DCB_CMD_GCAP, + + DCB_CMD_GNUMTCS, + DCB_CMD_SNUMTCS, + + DCB_CMD_PFC_GSTATE, + DCB_CMD_PFC_SSTATE, + + DCB_CMD_BCN_GCFG, + DCB_CMD_BCN_SCFG, + + DCB_CMD_GAPP, + DCB_CMD_SAPP, + + DCB_CMD_IEEE_SET, + DCB_CMD_IEEE_GET, + + DCB_CMD_GDCBX, + DCB_CMD_SDCBX, + + DCB_CMD_GFEATCFG, + DCB_CMD_SFEATCFG, + + DCB_CMD_CEE_GET, + DCB_CMD_IEEE_DEL, + + __DCB_CMD_ENUM_MAX, + DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_attrs - DCB top-level netlink attributes + * + * @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING) + * @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8) + * @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8) + * @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED) + * @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8) + * @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED) + * @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8) + * @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED) + * @DCB_ATTR_CAP: DCB capabilities of the device (NLA_NESTED) + * @DCB_ATTR_NUMTCS: number of traffic classes supported (NLA_NESTED) + * @DCB_ATTR_BCN: backward congestion notification configuration (NLA_NESTED) + * @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED) + * @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8) + * @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED) + * @DCB_ATTR_CEE: CEE std supported attributes (NLA_NESTED) + */ +enum dcbnl_attrs { + DCB_ATTR_UNDEFINED, + + DCB_ATTR_IFNAME, + DCB_ATTR_STATE, + DCB_ATTR_PFC_STATE, + DCB_ATTR_PFC_CFG, + DCB_ATTR_NUM_TC, + DCB_ATTR_PG_CFG, + DCB_ATTR_SET_ALL, + DCB_ATTR_PERM_HWADDR, + DCB_ATTR_CAP, + DCB_ATTR_NUMTCS, + DCB_ATTR_BCN, + DCB_ATTR_APP, + + /* IEEE std attributes */ + DCB_ATTR_IEEE, + + DCB_ATTR_DCBX, + DCB_ATTR_FEATCFG, + + /* CEE nested attributes */ + DCB_ATTR_CEE, + + __DCB_ATTR_ENUM_MAX, + DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1, +}; + +/** + * enum ieee_attrs - IEEE 802.1Qaz get/set attributes + * + * @DCB_ATTR_IEEE_UNSPEC: unspecified + * @DCB_ATTR_IEEE_ETS: negotiated ETS configuration + * @DCB_ATTR_IEEE_PFC: negotiated PFC configuration + * @DCB_ATTR_IEEE_APP_TABLE: negotiated APP configuration + * @DCB_ATTR_IEEE_PEER_ETS: peer ETS configuration - get only + * @DCB_ATTR_IEEE_PEER_PFC: peer PFC configuration - get only + * @DCB_ATTR_IEEE_PEER_APP: peer APP tlv - get only + */ +enum ieee_attrs { + DCB_ATTR_IEEE_UNSPEC, + DCB_ATTR_IEEE_ETS, + DCB_ATTR_IEEE_PFC, + DCB_ATTR_IEEE_APP_TABLE, + DCB_ATTR_IEEE_PEER_ETS, + DCB_ATTR_IEEE_PEER_PFC, + DCB_ATTR_IEEE_PEER_APP, + __DCB_ATTR_IEEE_MAX +}; +#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) + +enum ieee_attrs_app { + DCB_ATTR_IEEE_APP_UNSPEC, + DCB_ATTR_IEEE_APP, + __DCB_ATTR_IEEE_APP_MAX +}; +#define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1) + +/** + * enum cee_attrs - CEE DCBX get attributes. + * + * @DCB_ATTR_CEE_UNSPEC: unspecified + * @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only + * @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only + * @DCB_ATTR_CEE_PEER_APP_TABLE: peer APP tlv - get only + * @DCB_ATTR_CEE_TX_PG: TX PG configuration (DCB_CMD_PGTX_GCFG) + * @DCB_ATTR_CEE_RX_PG: RX PG configuration (DCB_CMD_PGRX_GCFG) + * @DCB_ATTR_CEE_PFC: PFC configuration (DCB_CMD_PFC_GCFG) + * @DCB_ATTR_CEE_APP_TABLE: APP configuration (multi DCB_CMD_GAPP) + * @DCB_ATTR_CEE_FEAT: DCBX features flags (DCB_CMD_GFEATCFG) + * + * An aggregated collection of the cee std negotiated parameters. + */ +enum cee_attrs { + DCB_ATTR_CEE_UNSPEC, + DCB_ATTR_CEE_PEER_PG, + DCB_ATTR_CEE_PEER_PFC, + DCB_ATTR_CEE_PEER_APP_TABLE, + DCB_ATTR_CEE_TX_PG, + DCB_ATTR_CEE_RX_PG, + DCB_ATTR_CEE_PFC, + DCB_ATTR_CEE_APP_TABLE, + DCB_ATTR_CEE_FEAT, + __DCB_ATTR_CEE_MAX +}; +#define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1) + +enum peer_app_attr { + DCB_ATTR_CEE_PEER_APP_UNSPEC, + DCB_ATTR_CEE_PEER_APP_INFO, + DCB_ATTR_CEE_PEER_APP, + __DCB_ATTR_CEE_PEER_APP_MAX +}; +#define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1) + +enum cee_attrs_app { + DCB_ATTR_CEE_APP_UNSPEC, + DCB_ATTR_CEE_APP, + __DCB_ATTR_CEE_APP_MAX +}; +#define DCB_ATTR_CEE_APP_MAX (__DCB_ATTR_CEE_APP_MAX - 1) + +/** + * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs + * + * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8) + * @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8) + * @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8) + * @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8) + * @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8) + * @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8) + * @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8) + * @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8) + * @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined + * @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG) + * + */ +enum dcbnl_pfc_up_attrs { + DCB_PFC_UP_ATTR_UNDEFINED, + + DCB_PFC_UP_ATTR_0, + DCB_PFC_UP_ATTR_1, + DCB_PFC_UP_ATTR_2, + DCB_PFC_UP_ATTR_3, + DCB_PFC_UP_ATTR_4, + DCB_PFC_UP_ATTR_5, + DCB_PFC_UP_ATTR_6, + DCB_PFC_UP_ATTR_7, + DCB_PFC_UP_ATTR_ALL, + + __DCB_PFC_UP_ATTR_ENUM_MAX, + DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_pg_attrs - DCB Priority Group attributes + * + * @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined + * @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED) + * @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined + * @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG) + * + */ +enum dcbnl_pg_attrs { + DCB_PG_ATTR_UNDEFINED, + + DCB_PG_ATTR_TC_0, + DCB_PG_ATTR_TC_1, + DCB_PG_ATTR_TC_2, + DCB_PG_ATTR_TC_3, + DCB_PG_ATTR_TC_4, + DCB_PG_ATTR_TC_5, + DCB_PG_ATTR_TC_6, + DCB_PG_ATTR_TC_7, + DCB_PG_ATTR_TC_MAX, + DCB_PG_ATTR_TC_ALL, + + DCB_PG_ATTR_BW_ID_0, + DCB_PG_ATTR_BW_ID_1, + DCB_PG_ATTR_BW_ID_2, + DCB_PG_ATTR_BW_ID_3, + DCB_PG_ATTR_BW_ID_4, + DCB_PG_ATTR_BW_ID_5, + DCB_PG_ATTR_BW_ID_6, + DCB_PG_ATTR_BW_ID_7, + DCB_PG_ATTR_BW_ID_MAX, + DCB_PG_ATTR_BW_ID_ALL, + + __DCB_PG_ATTR_ENUM_MAX, + DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_tc_attrs - DCB Traffic Class attributes + * + * @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors + * @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to + * Valid values are: 0-7 + * @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map + * Some devices may not support changing the + * user priority map of a TC. + * @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting + * 0 - none + * 1 - group strict + * 2 - link strict + * @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and + * not configured to use link strict priority, + * this is the percentage of bandwidth of the + * priority group this traffic class belongs to + * @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters + * + */ +enum dcbnl_tc_attrs { + DCB_TC_ATTR_PARAM_UNDEFINED, + + DCB_TC_ATTR_PARAM_PGID, + DCB_TC_ATTR_PARAM_UP_MAPPING, + DCB_TC_ATTR_PARAM_STRICT_PRIO, + DCB_TC_ATTR_PARAM_BW_PCT, + DCB_TC_ATTR_PARAM_ALL, + + __DCB_TC_ATTR_PARAM_ENUM_MAX, + DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_cap_attrs - DCB Capability attributes + * + * @DCB_CAP_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_CAP_ATTR_ALL: (NLA_FLAG) all capability parameters + * @DCB_CAP_ATTR_PG: (NLA_U8) device supports Priority Groups + * @DCB_CAP_ATTR_PFC: (NLA_U8) device supports Priority Flow Control + * @DCB_CAP_ATTR_UP2TC: (NLA_U8) device supports user priority to + * traffic class mapping + * @DCB_CAP_ATTR_PG_TCS: (NLA_U8) bitmap where each bit represents a + * number of traffic classes the device + * can be configured to use for Priority Groups + * @DCB_CAP_ATTR_PFC_TCS: (NLA_U8) bitmap where each bit represents a + * number of traffic classes the device can be + * configured to use for Priority Flow Control + * @DCB_CAP_ATTR_GSP: (NLA_U8) device supports group strict priority + * @DCB_CAP_ATTR_BCN: (NLA_U8) device supports Backwards Congestion + * Notification + * @DCB_CAP_ATTR_DCBX: (NLA_U8) device supports DCBX engine + * + */ +enum dcbnl_cap_attrs { + DCB_CAP_ATTR_UNDEFINED, + DCB_CAP_ATTR_ALL, + DCB_CAP_ATTR_PG, + DCB_CAP_ATTR_PFC, + DCB_CAP_ATTR_UP2TC, + DCB_CAP_ATTR_PG_TCS, + DCB_CAP_ATTR_PFC_TCS, + DCB_CAP_ATTR_GSP, + DCB_CAP_ATTR_BCN, + DCB_CAP_ATTR_DCBX, + + __DCB_CAP_ATTR_ENUM_MAX, + DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1, +}; + +/** + * DCBX capability flags + * + * @DCB_CAP_DCBX_HOST: DCBX negotiation is performed by the host LLDP agent. + * 'set' routines are used to configure the device with + * the negotiated parameters + * + * @DCB_CAP_DCBX_LLD_MANAGED: DCBX negotiation is not performed in the host but + * by another entity + * 'get' routines are used to retrieve the + * negotiated parameters + * 'set' routines can be used to set the initial + * negotiation configuration + * + * @DCB_CAP_DCBX_VER_CEE: for a non-host DCBX engine, indicates the engine + * supports the CEE protocol flavor + * + * @DCB_CAP_DCBX_VER_IEEE: for a non-host DCBX engine, indicates the engine + * supports the IEEE protocol flavor + * + * @DCB_CAP_DCBX_STATIC: for a non-host DCBX engine, indicates the engine + * supports static configuration (i.e no actual + * negotiation is performed negotiated parameters equal + * the initial configuration) + * + */ +#define DCB_CAP_DCBX_HOST 0x01 +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#define DCB_CAP_DCBX_VER_CEE 0x04 +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#define DCB_CAP_DCBX_STATIC 0x10 + +/** + * enum dcbnl_numtcs_attrs - number of traffic classes + * + * @DCB_NUMTCS_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_NUMTCS_ATTR_ALL: (NLA_FLAG) all traffic class attributes + * @DCB_NUMTCS_ATTR_PG: (NLA_U8) number of traffic classes used for + * priority groups + * @DCB_NUMTCS_ATTR_PFC: (NLA_U8) number of traffic classes which can + * support priority flow control + */ +enum dcbnl_numtcs_attrs { + DCB_NUMTCS_ATTR_UNDEFINED, + DCB_NUMTCS_ATTR_ALL, + DCB_NUMTCS_ATTR_PG, + DCB_NUMTCS_ATTR_PFC, + + __DCB_NUMTCS_ATTR_ENUM_MAX, + DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1, +}; + +enum dcbnl_bcn_attrs{ + DCB_BCN_ATTR_UNDEFINED = 0, + + DCB_BCN_ATTR_RP_0, + DCB_BCN_ATTR_RP_1, + DCB_BCN_ATTR_RP_2, + DCB_BCN_ATTR_RP_3, + DCB_BCN_ATTR_RP_4, + DCB_BCN_ATTR_RP_5, + DCB_BCN_ATTR_RP_6, + DCB_BCN_ATTR_RP_7, + DCB_BCN_ATTR_RP_ALL, + + DCB_BCN_ATTR_BCNA_0, + DCB_BCN_ATTR_BCNA_1, + DCB_BCN_ATTR_ALPHA, + DCB_BCN_ATTR_BETA, + DCB_BCN_ATTR_GD, + DCB_BCN_ATTR_GI, + DCB_BCN_ATTR_TMAX, + DCB_BCN_ATTR_TD, + DCB_BCN_ATTR_RMIN, + DCB_BCN_ATTR_W, + DCB_BCN_ATTR_RD, + DCB_BCN_ATTR_RU, + DCB_BCN_ATTR_WRTT, + DCB_BCN_ATTR_RI, + DCB_BCN_ATTR_C, + DCB_BCN_ATTR_ALL, + + __DCB_BCN_ATTR_ENUM_MAX, + DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcb_general_attr_values - general DCB attribute values + * + * @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported + * + */ +enum dcb_general_attr_values { + DCB_ATTR_VALUE_UNDEFINED = 0xff +}; + +#define DCB_APP_IDTYPE_ETHTYPE 0x00 +#define DCB_APP_IDTYPE_PORTNUM 0x01 +enum dcbnl_app_attrs { + DCB_APP_ATTR_UNDEFINED, + + DCB_APP_ATTR_IDTYPE, + DCB_APP_ATTR_ID, + DCB_APP_ATTR_PRIORITY, + + __DCB_APP_ATTR_ENUM_MAX, + DCB_APP_ATTR_MAX = __DCB_APP_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_featcfg_attrs - features conifiguration flags + * + * @DCB_FEATCFG_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_FEATCFG_ATTR_ALL: (NLA_FLAG) all features configuration attributes + * @DCB_FEATCFG_ATTR_PG: (NLA_U8) configuration flags for priority groups + * @DCB_FEATCFG_ATTR_PFC: (NLA_U8) configuration flags for priority + * flow control + * @DCB_FEATCFG_ATTR_APP: (NLA_U8) configuration flags for application TLV + * + */ +#define DCB_FEATCFG_ERROR 0x01 /* error in feature resolution */ +#define DCB_FEATCFG_ENABLE 0x02 /* enable feature */ +#define DCB_FEATCFG_WILLING 0x04 /* feature is willing */ +#define DCB_FEATCFG_ADVERTISE 0x08 /* advertise feature */ +enum dcbnl_featcfg_attrs { + DCB_FEATCFG_ATTR_UNDEFINED, + DCB_FEATCFG_ATTR_ALL, + DCB_FEATCFG_ATTR_PG, + DCB_FEATCFG_ATTR_PFC, + DCB_FEATCFG_ATTR_APP, + + __DCB_FEATCFG_ATTR_ENUM_MAX, + DCB_FEATCFG_ATTR_MAX = __DCB_FEATCFG_ATTR_ENUM_MAX - 1, +}; + +#endif /* __LINUX_DCBNL_H__ */ diff --git a/include/linux/dccp.h b/include/linux/dccp.h new file mode 100644 index 00000000..eaf95a02 --- /dev/null +++ b/include/linux/dccp.h @@ -0,0 +1,554 @@ +#ifndef _LINUX_DCCP_H +#define _LINUX_DCCP_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +/** + * struct dccp_hdr - generic part of DCCP packet header + * + * @dccph_sport - Relevant port on the endpoint that sent this packet + * @dccph_dport - Relevant port on the other endpoint + * @dccph_doff - Data Offset from the start of the DCCP header, in 32-bit words + * @dccph_ccval - Used by the HC-Sender CCID + * @dccph_cscov - Parts of the packet that are covered by the Checksum field + * @dccph_checksum - Internet checksum, depends on dccph_cscov + * @dccph_x - 0 = 24 bit sequence number, 1 = 48 + * @dccph_type - packet type, see DCCP_PKT_ prefixed macros + * @dccph_seq - sequence number high or low order 24 bits, depends on dccph_x + */ +struct dccp_hdr { + __be16 dccph_sport, + dccph_dport; + __u8 dccph_doff; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 dccph_cscov:4, + dccph_ccval:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 dccph_ccval:4, + dccph_cscov:4; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __sum16 dccph_checksum; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 dccph_x:1, + dccph_type:4, + dccph_reserved:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 dccph_reserved:3, + dccph_type:4, + dccph_x:1; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +/** + * struct dccp_hdr_ext - the low bits of a 48 bit seq packet + * + * @dccph_seq_low - low 24 bits of a 48 bit seq packet + */ +struct dccp_hdr_ext { + __be32 dccph_seq_low; +}; + +/** + * struct dccp_hdr_request - Connection initiation request header + * + * @dccph_req_service - Service to which the client app wants to connect + */ +struct dccp_hdr_request { + __be32 dccph_req_service; +}; +/** + * struct dccp_hdr_ack_bits - acknowledgment bits common to most packets + * + * @dccph_resp_ack_nr_high - 48 bit ack number high order bits, contains GSR + * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR + */ +struct dccp_hdr_ack_bits { + __be16 dccph_reserved1; + __be16 dccph_ack_nr_high; + __be32 dccph_ack_nr_low; +}; +/** + * struct dccp_hdr_response - Connection initiation response header + * + * @dccph_resp_ack - 48 bit Acknowledgment Number Subheader (5.3) + * @dccph_resp_service - Echoes the Service Code on a received DCCP-Request + */ +struct dccp_hdr_response { + struct dccp_hdr_ack_bits dccph_resp_ack; + __be32 dccph_resp_service; +}; + +/** + * struct dccp_hdr_reset - Unconditionally shut down a connection + * + * @dccph_reset_ack - 48 bit Acknowledgment Number Subheader (5.6) + * @dccph_reset_code - one of %dccp_reset_codes + * @dccph_reset_data - the Data 1 ... Data 3 fields from 5.6 + */ +struct dccp_hdr_reset { + struct dccp_hdr_ack_bits dccph_reset_ack; + __u8 dccph_reset_code, + dccph_reset_data[3]; +}; + +enum dccp_pkt_type { + DCCP_PKT_REQUEST = 0, + DCCP_PKT_RESPONSE, + DCCP_PKT_DATA, + DCCP_PKT_ACK, + DCCP_PKT_DATAACK, + DCCP_PKT_CLOSEREQ, + DCCP_PKT_CLOSE, + DCCP_PKT_RESET, + DCCP_PKT_SYNC, + DCCP_PKT_SYNCACK, + DCCP_PKT_INVALID, +}; + +#define DCCP_NR_PKT_TYPES DCCP_PKT_INVALID + +static inline unsigned int dccp_packet_hdr_len(const __u8 type) +{ + if (type == DCCP_PKT_DATA) + return 0; + if (type == DCCP_PKT_DATAACK || + type == DCCP_PKT_ACK || + type == DCCP_PKT_SYNC || + type == DCCP_PKT_SYNCACK || + type == DCCP_PKT_CLOSE || + type == DCCP_PKT_CLOSEREQ) + return sizeof(struct dccp_hdr_ack_bits); + if (type == DCCP_PKT_REQUEST) + return sizeof(struct dccp_hdr_request); + if (type == DCCP_PKT_RESPONSE) + return sizeof(struct dccp_hdr_response); + return sizeof(struct dccp_hdr_reset); +} +enum dccp_reset_codes { + DCCP_RESET_CODE_UNSPECIFIED = 0, + DCCP_RESET_CODE_CLOSED, + DCCP_RESET_CODE_ABORTED, + DCCP_RESET_CODE_NO_CONNECTION, + DCCP_RESET_CODE_PACKET_ERROR, + DCCP_RESET_CODE_OPTION_ERROR, + DCCP_RESET_CODE_MANDATORY_ERROR, + DCCP_RESET_CODE_CONNECTION_REFUSED, + DCCP_RESET_CODE_BAD_SERVICE_CODE, + DCCP_RESET_CODE_TOO_BUSY, + DCCP_RESET_CODE_BAD_INIT_COOKIE, + DCCP_RESET_CODE_AGGRESSION_PENALTY, + + DCCP_MAX_RESET_CODES /* Leave at the end! */ +}; + +/* DCCP options */ +enum { + DCCPO_PADDING = 0, + DCCPO_MANDATORY = 1, + DCCPO_MIN_RESERVED = 3, + DCCPO_MAX_RESERVED = 31, + DCCPO_CHANGE_L = 32, + DCCPO_CONFIRM_L = 33, + DCCPO_CHANGE_R = 34, + DCCPO_CONFIRM_R = 35, + DCCPO_NDP_COUNT = 37, + DCCPO_ACK_VECTOR_0 = 38, + DCCPO_ACK_VECTOR_1 = 39, + DCCPO_TIMESTAMP = 41, + DCCPO_TIMESTAMP_ECHO = 42, + DCCPO_ELAPSED_TIME = 43, + DCCPO_MAX = 45, + DCCPO_MIN_RX_CCID_SPECIFIC = 128, /* from sender to receiver */ + DCCPO_MAX_RX_CCID_SPECIFIC = 191, + DCCPO_MIN_TX_CCID_SPECIFIC = 192, /* from receiver to sender */ + DCCPO_MAX_TX_CCID_SPECIFIC = 255, +}; +/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ +#define DCCP_SINGLE_OPT_MAXLEN 253 + +/* DCCP CCIDS */ +enum { + DCCPC_CCID2 = 2, + DCCPC_CCID3 = 3, +}; + +/* DCCP features (RFC 4340 section 6.4) */ +enum dccp_feature_numbers { + DCCPF_RESERVED = 0, + DCCPF_CCID = 1, + DCCPF_SHORT_SEQNOS = 2, + DCCPF_SEQUENCE_WINDOW = 3, + DCCPF_ECN_INCAPABLE = 4, + DCCPF_ACK_RATIO = 5, + DCCPF_SEND_ACK_VECTOR = 6, + DCCPF_SEND_NDP_COUNT = 7, + DCCPF_MIN_CSUM_COVER = 8, + DCCPF_DATA_CHECKSUM = 9, + /* 10-127 reserved */ + DCCPF_MIN_CCID_SPECIFIC = 128, + DCCPF_SEND_LEV_RATE = 192, /* RFC 4342, sec. 8.4 */ + DCCPF_MAX_CCID_SPECIFIC = 255, +}; + +/* DCCP socket control message types for cmsg */ +enum dccp_cmsg_type { + DCCP_SCM_PRIORITY = 1, + DCCP_SCM_QPOLICY_MAX = 0xFFFF, + /* ^-- Up to here reserved exclusively for qpolicy parameters */ + DCCP_SCM_MAX +}; + +/* DCCP priorities for outgoing/queued packets */ +enum dccp_packet_dequeueing_policy { + DCCPQ_POLICY_SIMPLE, + DCCPQ_POLICY_PRIO, + DCCPQ_POLICY_MAX +}; + +/* DCCP socket options */ +#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */ +#define DCCP_SOCKOPT_SERVICE 2 +#define DCCP_SOCKOPT_CHANGE_L 3 +#define DCCP_SOCKOPT_CHANGE_R 4 +#define DCCP_SOCKOPT_GET_CUR_MPS 5 +#define DCCP_SOCKOPT_SERVER_TIMEWAIT 6 +#define DCCP_SOCKOPT_SEND_CSCOV 10 +#define DCCP_SOCKOPT_RECV_CSCOV 11 +#define DCCP_SOCKOPT_AVAILABLE_CCIDS 12 +#define DCCP_SOCKOPT_CCID 13 +#define DCCP_SOCKOPT_TX_CCID 14 +#define DCCP_SOCKOPT_RX_CCID 15 +#define DCCP_SOCKOPT_QPOLICY_ID 16 +#define DCCP_SOCKOPT_QPOLICY_TXQLEN 17 +#define DCCP_SOCKOPT_CCID_RX_INFO 128 +#define DCCP_SOCKOPT_CCID_TX_INFO 192 + +/* maximum number of services provided on the same listening port */ +#define DCCP_SERVICE_LIST_MAX_LEN 32 + +#ifdef __KERNEL__ + +#include <linux/in.h> +#include <linux/interrupt.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/uio.h> +#include <linux/workqueue.h> + +#include <net/inet_connection_sock.h> +#include <net/inet_sock.h> +#include <net/inet_timewait_sock.h> +#include <net/tcp_states.h> + +enum dccp_state { + DCCP_OPEN = TCP_ESTABLISHED, + DCCP_REQUESTING = TCP_SYN_SENT, + DCCP_LISTEN = TCP_LISTEN, + DCCP_RESPOND = TCP_SYN_RECV, + /* + * States involved in closing a DCCP connection: + * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq. + * + * 2) CLOSING can have three different meanings (RFC 4340, 8.3): + * a. Client has performed active-close, has sent a Close to the server + * from state OPEN or PARTOPEN, and is waiting for the final Reset + * (in this case, SOCK_DONE == 1). + * b. Client is asked to perform passive-close, by receiving a CloseReq + * in (PART)OPEN state. It sends a Close and waits for final Reset + * (in this case, SOCK_DONE == 0). + * c. Server performs an active-close as in (a), keeps TIMEWAIT state. + * + * 3) The following intermediate states are employed to give passively + * closing nodes a chance to process their unread data: + * - PASSIVE_CLOSE (from OPEN => CLOSED) and + * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above). + */ + DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1, + DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */ + DCCP_CLOSING = TCP_CLOSING, + DCCP_TIME_WAIT = TCP_TIME_WAIT, + DCCP_CLOSED = TCP_CLOSE, + DCCP_PARTOPEN = TCP_MAX_STATES, + DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ + DCCP_MAX_STATES +}; + +enum { + DCCPF_OPEN = TCPF_ESTABLISHED, + DCCPF_REQUESTING = TCPF_SYN_SENT, + DCCPF_LISTEN = TCPF_LISTEN, + DCCPF_RESPOND = TCPF_SYN_RECV, + DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1, + DCCPF_CLOSING = TCPF_CLOSING, + DCCPF_TIME_WAIT = TCPF_TIME_WAIT, + DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), +}; + +static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb) +{ + return (struct dccp_hdr *)skb_transport_header(skb); +} + +static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) +{ + skb_push(skb, headlen); + skb_reset_transport_header(skb); + return memset(skb_transport_header(skb), 0, headlen); +} + +static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh) +{ + return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh)); +} + +static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) +{ + return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0); +} + +static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb) +{ + const struct dccp_hdr *dh = dccp_hdr(skb); + return __dccp_basic_hdr_len(dh); +} + +static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh) +{ + __u64 seq_nr = ntohs(dh->dccph_seq); + + if (dh->dccph_x != 0) + seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low); + else + seq_nr += (u32)dh->dccph_seq2 << 16; + + return seq_nr; +} + +static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb) +{ + return (struct dccp_hdr_request *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb) +{ + return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb) +{ + const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb); + return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low); +} + +static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb) +{ + return (struct dccp_hdr_response *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb) +{ + return (struct dccp_hdr_reset *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); +} + +static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh) +{ + return __dccp_basic_hdr_len(dh) + + dccp_packet_hdr_len(dh->dccph_type); +} + +static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) +{ + return __dccp_hdr_len(dccp_hdr(skb)); +} + +/** + * struct dccp_request_sock - represent DCCP-specific connection request + * @dreq_inet_rsk: structure inherited from + * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1) + * @dreq_gss: greatest sequence number sent (for retransmitted Responses) + * @dreq_isr: initial sequence number received in the first Request + * @dreq_gsr: greatest sequence number received (for retransmitted Request(s)) + * @dreq_service: service code present on the Request (there is just one) + * @dreq_featneg: feature negotiation options for this connection + * The following two fields are analogous to the ones in dccp_sock: + * @dreq_timestamp_echo: last received timestamp to echo (13.1) + * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo + */ +struct dccp_request_sock { + struct inet_request_sock dreq_inet_rsk; + __u64 dreq_iss; + __u64 dreq_gss; + __u64 dreq_isr; + __u64 dreq_gsr; + __be32 dreq_service; + struct list_head dreq_featneg; + __u32 dreq_timestamp_echo; + __u32 dreq_timestamp_time; +}; + +static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) +{ + return (struct dccp_request_sock *)req; +} + +extern struct inet_timewait_death_row dccp_death_row; + +extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, + struct sk_buff *skb); + +struct dccp_options_received { + u64 dccpor_ndp:48; + u32 dccpor_timestamp; + u32 dccpor_timestamp_echo; + u32 dccpor_elapsed_time; +}; + +struct ccid; + +enum dccp_role { + DCCP_ROLE_UNDEFINED, + DCCP_ROLE_LISTEN, + DCCP_ROLE_CLIENT, + DCCP_ROLE_SERVER, +}; + +struct dccp_service_list { + __u32 dccpsl_nr; + __be32 dccpsl_list[0]; +}; + +#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) +#define DCCP_SERVICE_CODE_IS_ABSENT 0 + +static inline int dccp_list_has_service(const struct dccp_service_list *sl, + const __be32 service) +{ + if (likely(sl != NULL)) { + u32 i = sl->dccpsl_nr; + while (i--) + if (sl->dccpsl_list[i] == service) + return 1; + } + return 0; +} + +struct dccp_ackvec; + +/** + * struct dccp_sock - DCCP socket state + * + * @dccps_swl - sequence number window low + * @dccps_swh - sequence number window high + * @dccps_awl - acknowledgement number window low + * @dccps_awh - acknowledgement number window high + * @dccps_iss - initial sequence number sent + * @dccps_isr - initial sequence number received + * @dccps_osr - first OPEN sequence number received + * @dccps_gss - greatest sequence number sent + * @dccps_gsr - greatest valid sequence number received + * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss + * @dccps_service - first (passive sock) or unique (active sock) service code + * @dccps_service_list - second .. last service code on passive socket + * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option + * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo + * @dccps_l_ack_ratio - feature-local Ack Ratio + * @dccps_r_ack_ratio - feature-remote Ack Ratio + * @dccps_l_seq_win - local Sequence Window (influences ack number validity) + * @dccps_r_seq_win - remote Sequence Window (influences seq number validity) + * @dccps_pcslen - sender partial checksum coverage (via sockopt) + * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) + * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) + * @dccps_ndp_count - number of Non Data Packets since last data packet + * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) + * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) + * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) + * @dccps_hc_rx_ackvec - rx half connection ack vector + * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) + * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) + * @dccps_options_received - parsed set of retrieved options + * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy + * @dccps_tx_qlen - maximum length of the TX queue + * @dccps_role - role of this sock, one of %dccp_role + * @dccps_hc_rx_insert_options - receiver wants to add options when acking + * @dccps_hc_tx_insert_options - sender wants to add options when sending + * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) + * @dccps_sync_scheduled - flag which signals "send out-of-band message soon" + * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets + * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing) + * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) + */ +struct dccp_sock { + /* inet_connection_sock has to be the first member of dccp_sock */ + struct inet_connection_sock dccps_inet_connection; +#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime + __u64 dccps_swl; + __u64 dccps_swh; + __u64 dccps_awl; + __u64 dccps_awh; + __u64 dccps_iss; + __u64 dccps_isr; + __u64 dccps_osr; + __u64 dccps_gss; + __u64 dccps_gsr; + __u64 dccps_gar; + __be32 dccps_service; + __u32 dccps_mss_cache; + struct dccp_service_list *dccps_service_list; + __u32 dccps_timestamp_echo; + __u32 dccps_timestamp_time; + __u16 dccps_l_ack_ratio; + __u16 dccps_r_ack_ratio; + __u64 dccps_l_seq_win:48; + __u64 dccps_r_seq_win:48; + __u8 dccps_pcslen:4; + __u8 dccps_pcrlen:4; + __u8 dccps_send_ndp_count:1; + __u64 dccps_ndp_count:48; + unsigned long dccps_rate_last; + struct list_head dccps_featneg; + struct dccp_ackvec *dccps_hc_rx_ackvec; + struct ccid *dccps_hc_rx_ccid; + struct ccid *dccps_hc_tx_ccid; + struct dccp_options_received dccps_options_received; + __u8 dccps_qpolicy; + __u32 dccps_tx_qlen; + enum dccp_role dccps_role:2; + __u8 dccps_hc_rx_insert_options:1; + __u8 dccps_hc_tx_insert_options:1; + __u8 dccps_server_timewait:1; + __u8 dccps_sync_scheduled:1; + struct tasklet_struct dccps_xmitlet; + struct timer_list dccps_xmit_timer; +}; + +static inline struct dccp_sock *dccp_sk(const struct sock *sk) +{ + return (struct dccp_sock *)sk; +} + +static inline const char *dccp_role(const struct sock *sk) +{ + switch (dccp_sk(sk)->dccps_role) { + case DCCP_ROLE_UNDEFINED: return "undefined"; + case DCCP_ROLE_LISTEN: return "listen"; + case DCCP_ROLE_SERVER: return "server"; + case DCCP_ROLE_CLIENT: return "client"; + } + return NULL; +} + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_DCCP_H */ diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h new file mode 100644 index 00000000..5ac3bdd5 --- /dev/null +++ b/include/linux/dcookies.h @@ -0,0 +1,68 @@ +/* + * dcookies.h + * + * Persistent cookie-path mappings + * + * Copyright 2002 John Levon <levon@movementarian.org> + */ + +#ifndef DCOOKIES_H +#define DCOOKIES_H + + +#ifdef CONFIG_PROFILING + +#include <linux/dcache.h> +#include <linux/types.h> + +struct dcookie_user; +struct path; + +/** + * dcookie_register - register a user of dcookies + * + * Register as a dcookie user. Returns %NULL on failure. + */ +struct dcookie_user * dcookie_register(void); + +/** + * dcookie_unregister - unregister a user of dcookies + * + * Unregister as a dcookie user. This may invalidate + * any dcookie values returned from get_dcookie(). + */ +void dcookie_unregister(struct dcookie_user * user); + +/** + * get_dcookie - acquire a dcookie + * + * Convert the given dentry/vfsmount pair into + * a cookie value. + * + * Returns -EINVAL if no living task has registered as a + * dcookie user. + * + * Returns 0 on success, with *cookie filled in + */ +int get_dcookie(struct path *path, unsigned long *cookie); + +#else + +static inline struct dcookie_user * dcookie_register(void) +{ + return NULL; +} + +static inline void dcookie_unregister(struct dcookie_user * user) +{ + return; +} + +static inline int get_dcookie(struct path *path, unsigned long *cookie) +{ + return -ENOSYS; +} + +#endif /* CONFIG_PROFILING */ + +#endif /* DCOOKIES_H */ diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h new file mode 100644 index 00000000..3bd46f76 --- /dev/null +++ b/include/linux/debug_locks.h @@ -0,0 +1,75 @@ +#ifndef __LINUX_DEBUG_LOCKING_H +#define __LINUX_DEBUG_LOCKING_H + +#include <linux/kernel.h> +#include <linux/atomic.h> +#include <linux/bug.h> + +struct task_struct; + +extern int debug_locks; +extern int debug_locks_silent; + + +static inline int __debug_locks_off(void) +{ + return xchg(&debug_locks, 0); +} + +/* + * Generic 'turn off all lock debugging' function: + */ +extern int debug_locks_off(void); + +#define DEBUG_LOCKS_WARN_ON(c) \ +({ \ + int __ret = 0; \ + \ + if (!oops_in_progress && unlikely(c)) { \ + if (debug_locks_off() && !debug_locks_silent) \ + WARN_ON(1); \ + __ret = 1; \ + } \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) +#else +# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS + extern void locking_selftest(void); +#else +# define locking_selftest() do { } while (0) +#endif + +struct task_struct; + +#ifdef CONFIG_LOCKDEP +extern void debug_show_all_locks(void); +extern void debug_show_held_locks(struct task_struct *task); +extern void debug_check_no_locks_freed(const void *from, unsigned long len); +extern void debug_check_no_locks_held(struct task_struct *task); +#else +static inline void debug_show_all_locks(void) +{ +} + +static inline void debug_show_held_locks(struct task_struct *task) +{ +} + +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ +} + +static inline void +debug_check_no_locks_held(struct task_struct *task) +{ +} +#endif + +#endif diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h new file mode 100644 index 00000000..ae36b72c --- /dev/null +++ b/include/linux/debugfs.h @@ -0,0 +1,224 @@ +/* + * debugfs.h - a tiny little debug file system + * + * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2004 IBM Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * debugfs is for people to use instead of /proc or /sys. + * See Documentation/DocBook/filesystems for more details. + */ + +#ifndef _DEBUGFS_H_ +#define _DEBUGFS_H_ + +#include <linux/fs.h> +#include <linux/seq_file.h> + +#include <linux/types.h> + +struct file_operations; + +struct debugfs_blob_wrapper { + void *data; + unsigned long size; +}; + +struct debugfs_reg32 { + char *name; + unsigned long offset; +}; + +struct debugfs_regset32 { + struct debugfs_reg32 *regs; + int nregs; + void __iomem *base; +}; + +extern struct dentry *arch_debugfs_dir; + +#if defined(CONFIG_DEBUG_FS) + +/* declared over in file.c */ +extern const struct file_operations debugfs_file_operations; +extern const struct inode_operations debugfs_link_operations; + +struct dentry *debugfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); + +struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); + +struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, + const char *dest); + +void debugfs_remove(struct dentry *dentry); +void debugfs_remove_recursive(struct dentry *dentry); + +struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, + struct dentry *new_dir, const char *new_name); + +struct dentry *debugfs_create_u8(const char *name, umode_t mode, + struct dentry *parent, u8 *value); +struct dentry *debugfs_create_u16(const char *name, umode_t mode, + struct dentry *parent, u16 *value); +struct dentry *debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, u32 *value); +struct dentry *debugfs_create_u64(const char *name, umode_t mode, + struct dentry *parent, u64 *value); +struct dentry *debugfs_create_x8(const char *name, umode_t mode, + struct dentry *parent, u8 *value); +struct dentry *debugfs_create_x16(const char *name, umode_t mode, + struct dentry *parent, u16 *value); +struct dentry *debugfs_create_x32(const char *name, umode_t mode, + struct dentry *parent, u32 *value); +struct dentry *debugfs_create_x64(const char *name, umode_t mode, + struct dentry *parent, u64 *value); +struct dentry *debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, size_t *value); +struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, u32 *value); + +struct dentry *debugfs_create_blob(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_blob_wrapper *blob); + +struct dentry *debugfs_create_regset32(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset); + +int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix); + +bool debugfs_initialized(void); + +#else + +#include <linux/err.h> + +/* + * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled + * so users have a chance to detect if there was a real error or not. We don't + * want to duplicate the design decision mistakes of procfs and devfs again. + */ + +static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_dir(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_symlink(const char *name, + struct dentry *parent, + const char *dest) +{ + return ERR_PTR(-ENODEV); +} + +static inline void debugfs_remove(struct dentry *dentry) +{ } + +static inline void debugfs_remove_recursive(struct dentry *dentry) +{ } + +static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, + struct dentry *new_dir, char *new_name) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode, + struct dentry *parent, + u8 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode, + struct dentry *parent, + u16 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode, + struct dentry *parent, + u64 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode, + struct dentry *parent, + u8 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode, + struct dentry *parent, + u16 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode, + struct dentry *parent, + size_t *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, + struct dentry *parent, + u32 *value) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_blob_wrapper *blob) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *debugfs_create_regset32(const char *name, + umode_t mode, struct dentry *parent, + struct debugfs_regset32 *regset) +{ + return ERR_PTR(-ENODEV); +} + +static inline bool debugfs_initialized(void) +{ + return false; +} + +#endif + +#endif diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h new file mode 100644 index 00000000..0e5f5785 --- /dev/null +++ b/include/linux/debugobjects.h @@ -0,0 +1,110 @@ +#ifndef _LINUX_DEBUGOBJECTS_H +#define _LINUX_DEBUGOBJECTS_H + +#include <linux/list.h> +#include <linux/spinlock.h> + +enum debug_obj_state { + ODEBUG_STATE_NONE, + ODEBUG_STATE_INIT, + ODEBUG_STATE_INACTIVE, + ODEBUG_STATE_ACTIVE, + ODEBUG_STATE_DESTROYED, + ODEBUG_STATE_NOTAVAILABLE, + ODEBUG_STATE_MAX, +}; + +struct debug_obj_descr; + +/** + * struct debug_obj - representaion of an tracked object + * @node: hlist node to link the object into the tracker list + * @state: tracked object state + * @astate: current active state + * @object: pointer to the real object + * @descr: pointer to an object type specific debug description structure + */ +struct debug_obj { + struct hlist_node node; + enum debug_obj_state state; + unsigned int astate; + void *object; + struct debug_obj_descr *descr; +}; + +/** + * struct debug_obj_descr - object type specific debug description structure + * + * @name: name of the object typee + * @debug_hint: function returning address, which have associated + * kernel symbol, to allow identify the object + * @fixup_init: fixup function, which is called when the init check + * fails + * @fixup_activate: fixup function, which is called when the activate check + * fails + * @fixup_destroy: fixup function, which is called when the destroy check + * fails + * @fixup_free: fixup function, which is called when the free check + * fails + * @fixup_assert_init: fixup function, which is called when the assert_init + * check fails + */ +struct debug_obj_descr { + const char *name; + void *(*debug_hint) (void *addr); + int (*fixup_init) (void *addr, enum debug_obj_state state); + int (*fixup_activate) (void *addr, enum debug_obj_state state); + int (*fixup_destroy) (void *addr, enum debug_obj_state state); + int (*fixup_free) (void *addr, enum debug_obj_state state); + int (*fixup_assert_init)(void *addr, enum debug_obj_state state); +}; + +#ifdef CONFIG_DEBUG_OBJECTS +extern void debug_object_init (void *addr, struct debug_obj_descr *descr); +extern void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); +extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); +extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); +extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); +extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); + +/* + * Active state: + * - Set at 0 upon initialization. + * - Must return to 0 before deactivation. + */ +extern void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next); + +extern void debug_objects_early_init(void); +extern void debug_objects_mem_init(void); +#else +static inline void +debug_object_init (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_activate (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_free (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } + +static inline void debug_objects_early_init(void) { } +static inline void debug_objects_mem_init(void) { } +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_FREE +extern void debug_check_no_obj_freed(const void *address, unsigned long size); +#else +static inline void +debug_check_no_obj_freed(const void *address, unsigned long size) { } +#endif + +#endif diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h new file mode 100644 index 00000000..11527213 --- /dev/null +++ b/include/linux/decompress/bunzip2.h @@ -0,0 +1,10 @@ +#ifndef DECOMPRESS_BUNZIP2_H +#define DECOMPRESS_BUNZIP2_H + +int bunzip2(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h new file mode 100644 index 00000000..0c7111a5 --- /dev/null +++ b/include/linux/decompress/generic.h @@ -0,0 +1,39 @@ +#ifndef DECOMPRESS_GENERIC_H +#define DECOMPRESS_GENERIC_H + +typedef int (*decompress_fn) (unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *outbuf, + int *posp, + void(*error)(char *x)); + +/* inbuf - input buffer + *len - len of pre-read data in inbuf + *fill - function to fill inbuf when empty + *flush - function to write out outbuf + *outbuf - output buffer + *posp - if non-null, input position (number of bytes read) will be + * returned here + * + *If len != 0, inbuf should contain all the necessary input data, and fill + *should be NULL + *If len = 0, inbuf can be NULL, in which case the decompressor will allocate + *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes. + *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE + *bytes should be read per call. Replace XXX with the appropriate decompressor + *name, i.e. LZMA_IOBUF_SIZE. + * + *If flush = NULL, outbuf must be large enough to buffer all the expected + *output. If flush != NULL, the output buffer will be allocated by the + *decompressor (outbuf = NULL), and the flush function will be called to + *flush the output buffer at the appropriate time (decompressor and stream + *dependent). + */ + + +/* Utility routine to detect the decompression method */ +decompress_fn decompress_method(const unsigned char *inbuf, int len, + const char **name); + +#endif diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h new file mode 100644 index 00000000..8c0aef1b --- /dev/null +++ b/include/linux/decompress/inflate.h @@ -0,0 +1,10 @@ +#ifndef INFLATE_H +#define INFLATE_H + +int gunzip(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error_fn)(char *x)); +#endif diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h new file mode 100644 index 00000000..7925bf0e --- /dev/null +++ b/include/linux/decompress/mm.h @@ -0,0 +1,93 @@ +/* + * linux/compr_mm.h + * + * Memory management for pre-boot and ramdisk uncompressors + * + * Authors: Alain Knaff <alain@knaff.lu> + * + */ + +#ifndef DECOMPR_MM_H +#define DECOMPR_MM_H + +#ifdef STATIC + +/* Code active when included from pre-boot environment: */ + +/* + * Some architectures want to ensure there is no local data in their + * pre-boot environment, so that data can arbitrarily relocated (via + * GOT references). This is achieved by defining STATIC_RW_DATA to + * be null. + */ +#ifndef STATIC_RW_DATA +#define STATIC_RW_DATA static +#endif + +/* A trivial malloc implementation, adapted from + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + */ +STATIC_RW_DATA unsigned long malloc_ptr; +STATIC_RW_DATA int malloc_count; + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + return NULL; + if (!malloc_ptr) + malloc_ptr = free_mem_ptr; + + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + + p = (void *)malloc_ptr; + malloc_ptr += size; + + if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + return NULL; + + malloc_count++; + return p; +} + +static void free(void *where) +{ + malloc_count--; + if (!malloc_count) + malloc_ptr = free_mem_ptr; +} + +#define large_malloc(a) malloc(a) +#define large_free(a) free(a) + +#define INIT + +#else /* STATIC */ + +/* Code active when compiled standalone for use when loading ramdisk: */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +/* Use defines rather than static inline in order to avoid spurious + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +#define malloc(a) kmalloc(a, GFP_KERNEL) +#define free(a) kfree(a) + +#define large_malloc(a) vmalloc(a) +#define large_free(a) vfree(a) + +#define INIT __init +#define STATIC + +#include <linux/init.h> + +#endif /* STATIC */ + +#endif /* DECOMPR_MM_H */ diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h new file mode 100755 index 00000000..d5b68bf3 --- /dev/null +++ b/include/linux/decompress/unlz4.h @@ -0,0 +1,10 @@ +#ifndef DECOMPRESS_UNLZ4_H +#define DECOMPRESS_UNLZ4_H + +int unlz4(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h new file mode 100644 index 00000000..7796538f --- /dev/null +++ b/include/linux/decompress/unlzma.h @@ -0,0 +1,12 @@ +#ifndef DECOMPRESS_UNLZMA_H +#define DECOMPRESS_UNLZMA_H + +int unlzma(unsigned char *, int, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *posp, + void(*error)(char *x) + ); + +#endif diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h new file mode 100644 index 00000000..98722975 --- /dev/null +++ b/include/linux/decompress/unlzo.h @@ -0,0 +1,10 @@ +#ifndef DECOMPRESS_UNLZO_H +#define DECOMPRESS_UNLZO_H + +int unlzo(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h new file mode 100644 index 00000000..41728fc6 --- /dev/null +++ b/include/linux/decompress/unxz.h @@ -0,0 +1,19 @@ +/* + * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd + * + * Author: Lasse Collin <lasse.collin@tukaani.org> + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef DECOMPRESS_UNXZ_H +#define DECOMPRESS_UNXZ_H + +int unxz(unsigned char *in, int in_size, + int (*fill)(void *dest, unsigned int size), + int (*flush)(void *src, unsigned int size), + unsigned char *out, int *in_used, + void (*error)(char *x)); + +#endif diff --git a/include/linux/delay.h b/include/linux/delay.h new file mode 100644 index 00000000..a6ecb34c --- /dev/null +++ b/include/linux/delay.h @@ -0,0 +1,55 @@ +#ifndef _LINUX_DELAY_H +#define _LINUX_DELAY_H + +/* + * Copyright (C) 1993 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + */ + +#include <linux/kernel.h> + +extern unsigned long loops_per_jiffy; + +#include <asm/delay.h> + +/* + * Using udelay() for intervals greater than a few milliseconds can + * risk overflow for high loops_per_jiffy (high bogomips) machines. The + * mdelay() provides a wrapper to prevent this. For delays greater + * than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture + * specific values can be defined in asm-???/delay.h as an override. + * The 2nd mdelay() definition ensures GCC will optimize away the + * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. + */ + +#ifndef MAX_UDELAY_MS +#define MAX_UDELAY_MS 5 +#endif + +#ifndef mdelay +#define mdelay(n) (\ + (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ + ({unsigned long __ms=(n); while (__ms--) udelay(1000);})) +#endif + +#ifndef ndelay +static inline void ndelay(unsigned long x) +{ + udelay(DIV_ROUND_UP(x, 1000)); +} +#define ndelay(x) ndelay(x) +#endif + +extern unsigned long lpj_fine; +void calibrate_delay(void); +void msleep(unsigned int msecs); +unsigned long msleep_interruptible(unsigned int msecs); +void usleep_range(unsigned long min, unsigned long max); + +static inline void ssleep(unsigned int seconds) +{ + msleep(seconds * 1000); +} + +#endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h new file mode 100644 index 00000000..6cee17c2 --- /dev/null +++ b/include/linux/delayacct.h @@ -0,0 +1,153 @@ +/* delayacct.h - per-task delay accounting + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + */ + +#ifndef _LINUX_DELAYACCT_H +#define _LINUX_DELAYACCT_H + +#include <linux/sched.h> +#include <linux/slab.h> + +/* + * Per-task flags relevant to delay accounting + * maintained privately to avoid exhausting similar flags in sched.h:PF_* + * Used to set current->delays->flags + */ +#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */ +#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ + +#ifdef CONFIG_TASK_DELAY_ACCT + +extern int delayacct_on; /* Delay accounting turned on/off */ +extern struct kmem_cache *delayacct_cache; +extern void delayacct_init(void); +extern void __delayacct_tsk_init(struct task_struct *); +extern void __delayacct_tsk_exit(struct task_struct *); +extern void __delayacct_blkio_start(void); +extern void __delayacct_blkio_end(void); +extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); +extern __u64 __delayacct_blkio_ticks(struct task_struct *); +extern void __delayacct_freepages_start(void); +extern void __delayacct_freepages_end(void); + +static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) +{ + if (p->delays) + return (p->delays->flags & DELAYACCT_PF_BLKIO); + else + return 0; +} + +static inline void delayacct_set_flag(int flag) +{ + if (current->delays) + current->delays->flags |= flag; +} + +static inline void delayacct_clear_flag(int flag) +{ + if (current->delays) + current->delays->flags &= ~flag; +} + +static inline void delayacct_tsk_init(struct task_struct *tsk) +{ + /* reinitialize in case parent's non-null pointer was dup'ed*/ + tsk->delays = NULL; + if (delayacct_on) + __delayacct_tsk_init(tsk); +} + +/* Free tsk->delays. Called from bad fork and __put_task_struct + * where there's no risk of tsk->delays being accessed elsewhere + */ +static inline void delayacct_tsk_free(struct task_struct *tsk) +{ + if (tsk->delays) + kmem_cache_free(delayacct_cache, tsk->delays); + tsk->delays = NULL; +} + +static inline void delayacct_blkio_start(void) +{ + delayacct_set_flag(DELAYACCT_PF_BLKIO); + if (current->delays) + __delayacct_blkio_start(); +} + +static inline void delayacct_blkio_end(void) +{ + if (current->delays) + __delayacct_blkio_end(); + delayacct_clear_flag(DELAYACCT_PF_BLKIO); +} + +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ + if (!delayacct_on || !tsk->delays) + return 0; + return __delayacct_add_tsk(d, tsk); +} + +static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) +{ + if (tsk->delays) + return __delayacct_blkio_ticks(tsk); + return 0; +} + +static inline void delayacct_freepages_start(void) +{ + if (current->delays) + __delayacct_freepages_start(); +} + +static inline void delayacct_freepages_end(void) +{ + if (current->delays) + __delayacct_freepages_end(); +} + +#else +static inline void delayacct_set_flag(int flag) +{} +static inline void delayacct_clear_flag(int flag) +{} +static inline void delayacct_init(void) +{} +static inline void delayacct_tsk_init(struct task_struct *tsk) +{} +static inline void delayacct_tsk_free(struct task_struct *tsk) +{} +static inline void delayacct_blkio_start(void) +{} +static inline void delayacct_blkio_end(void) +{} +static inline int delayacct_add_tsk(struct taskstats *d, + struct task_struct *tsk) +{ return 0; } +static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) +{ return 0; } +static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) +{ return 0; } +static inline void delayacct_freepages_start(void) +{} +static inline void delayacct_freepages_end(void) +{} + +#endif /* CONFIG_TASK_DELAY_ACCT */ + +#endif diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h new file mode 100644 index 00000000..281c72a3 --- /dev/null +++ b/include/linux/devfreq.h @@ -0,0 +1,253 @@ +/* + * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework + * for Non-CPU Devices. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_DEVFREQ_H__ +#define __LINUX_DEVFREQ_H__ + +#include <linux/device.h> +#include <linux/notifier.h> +#include <linux/opp.h> + +#define DEVFREQ_NAME_LEN 16 + +struct devfreq; + +/** + * struct devfreq_dev_status - Data given from devfreq user device to + * governors. Represents the performance + * statistics. + * @total_time The total time represented by this instance of + * devfreq_dev_status + * @busy_time The time that the device was working among the + * total_time. + * @current_frequency The operating frequency. + * @private_data An entry not specified by the devfreq framework. + * A device and a specific governor may have their + * own protocol with private_data. However, because + * this is governor-specific, a governor using this + * will be only compatible with devices aware of it. + */ +struct devfreq_dev_status { + /* both since the last measure */ + unsigned long total_time; + unsigned long busy_time; + unsigned long current_frequency; + void *private_data; +}; + +/* + * The resulting frequency should be at most this. (this bound is the + * least upper bound; thus, the resulting freq should be lower or same) + * If the flag is not set, the resulting frequency should be at most the + * bound (greatest lower bound) + */ +#define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1 + +/** + * struct devfreq_dev_profile - Devfreq's user device profile + * @initial_freq The operating frequency when devfreq_add_device() is + * called. + * @polling_ms The polling interval in ms. 0 disables polling. + * @target The device should set its operating frequency at + * freq or lowest-upper-than-freq value. If freq is + * higher than any operable frequency, set maximum. + * Before returning, target function should set + * freq at the current frequency. + * The "flags" parameter's possible values are + * explained above with "DEVFREQ_FLAG_*" macros. + * @get_dev_status The device should provide the current performance + * status to devfreq, which is used by governors. + * @exit An optional callback that is called when devfreq + * is removing the devfreq object due to error or + * from devfreq_remove_device() call. If the user + * has registered devfreq->nb at a notifier-head, + * this is the time to unregister it. + */ +struct devfreq_dev_profile { + unsigned long initial_freq; + unsigned int polling_ms; + + int (*target)(struct device *dev, unsigned long *freq, u32 flags); + int (*get_dev_status)(struct device *dev, + struct devfreq_dev_status *stat); + void (*exit)(struct device *dev); +}; + +/** + * struct devfreq_governor - Devfreq policy governor + * @name Governor's name + * @get_target_freq Returns desired operating frequency for the device. + * Basically, get_target_freq will run + * devfreq_dev_profile.get_dev_status() to get the + * status of the device (load = busy_time / total_time). + * If no_central_polling is set, this callback is called + * only with update_devfreq() notified by OPP. + * @init Called when the devfreq is being attached to a device + * @exit Called when the devfreq is being removed from a + * device. Governor should stop any internal routines + * before return because related data may be + * freed after exit(). + * @no_central_polling Do not use devfreq's central polling mechanism. + * When this is set, devfreq will not call + * get_target_freq with devfreq_monitor(). However, + * devfreq will call get_target_freq with + * devfreq_update() notified by OPP framework. + * + * Note that the callbacks are called with devfreq->lock locked by devfreq. + */ +struct devfreq_governor { + const char name[DEVFREQ_NAME_LEN]; + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + int (*init)(struct devfreq *this); + void (*exit)(struct devfreq *this); + const bool no_central_polling; +}; + +/** + * struct devfreq - Device devfreq structure + * @node list node - contains the devices with devfreq that have been + * registered. + * @lock a mutex to protect accessing devfreq. + * @dev device registered by devfreq class. dev.parent is the device + * using devfreq. + * @profile device-specific devfreq profile + * @governor method how to choose frequency based on the usage. + * @nb notifier block used to notify devfreq object that it should + * reevaluate operable frequencies. Devfreq users may use + * devfreq.nb to the corresponding register notifier call chain. + * @polling_jiffies interval in jiffies. + * @previous_freq previously configured frequency value. + * @next_polling the number of remaining jiffies to poll with + * "devfreq_monitor" executions to reevaluate + * frequency/voltage of the device. Set by + * profile's polling_ms interval. + * @data Private data of the governor. The devfreq framework does not + * touch this. + * @being_removed a flag to mark that this object is being removed in + * order to prevent trying to remove the object multiple times. + * @min_freq Limit minimum frequency requested by user (0: none) + * @max_freq Limit maximum frequency requested by user (0: none) + * + * This structure stores the devfreq information for a give device. + * + * Note that when a governor accesses entries in struct devfreq in its + * functions except for the context of callbacks defined in struct + * devfreq_governor, the governor should protect its access with the + * struct mutex lock in struct devfreq. A governor may use this mutex + * to protect its own private data in void *data as well. + */ +struct devfreq { + struct list_head node; + + struct mutex lock; + struct device dev; + struct devfreq_dev_profile *profile; + const struct devfreq_governor *governor; + struct notifier_block nb; + + unsigned long polling_jiffies; + unsigned long previous_freq; + unsigned int next_polling; + + void *data; /* private data for governors */ + + bool being_removed; + + unsigned long min_freq; + unsigned long max_freq; +}; + +#if defined(CONFIG_PM_DEVFREQ) +extern struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const struct devfreq_governor *governor, + void *data); +extern int devfreq_remove_device(struct devfreq *devfreq); + +/* Helper functions for devfreq user device driver with OPP. */ +extern struct opp *devfreq_recommended_opp(struct device *dev, + unsigned long *freq, u32 flags); +extern int devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +extern int devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); + +#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE +extern const struct devfreq_governor devfreq_powersave; +#endif +#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE +extern const struct devfreq_governor devfreq_performance; +#endif +#ifdef CONFIG_DEVFREQ_GOV_USERSPACE +extern const struct devfreq_governor devfreq_userspace; +#endif +#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND +extern const struct devfreq_governor devfreq_simple_ondemand; +/** + * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq + * and devfreq_add_device + * @ upthreshold If the load is over this value, the frequency jumps. + * Specify 0 to use the default. Valid value = 0 to 100. + * @ downdifferential If the load is under upthreshold - downdifferential, + * the governor may consider slowing the frequency down. + * Specify 0 to use the default. Valid value = 0 to 100. + * downdifferential < upthreshold must hold. + * + * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor, + * the governor uses the default values. + */ +struct devfreq_simple_ondemand_data { + unsigned int upthreshold; + unsigned int downdifferential; +}; +#endif + +#else /* !CONFIG_PM_DEVFREQ */ +static struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + struct devfreq_governor *governor, + void *data) +{ + return NULL; +} + +static int devfreq_remove_device(struct devfreq *devfreq) +{ + return 0; +} + +static struct opp *devfreq_recommended_opp(struct device *dev, + unsigned long *freq, u32 flags) +{ + return -EINVAL; +} + +static int devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + return -EINVAL; +} + +static int devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq) +{ + return -EINVAL; +} + +#define devfreq_powersave NULL +#define devfreq_performance NULL +#define devfreq_userspace NULL +#define devfreq_simple_ondemand NULL + +#endif /* CONFIG_PM_DEVFREQ */ + +#endif /* __LINUX_DEVFREQ_H__ */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h new file mode 100644 index 00000000..98f34b88 --- /dev/null +++ b/include/linux/device-mapper.h @@ -0,0 +1,514 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DEVICE_MAPPER_H +#define _LINUX_DEVICE_MAPPER_H + +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/ratelimit.h> + +struct dm_dev; +struct dm_target; +struct dm_table; +struct mapped_device; +struct bio_vec; + +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; + +union map_info { + void *ptr; + unsigned long long ll; + unsigned target_request_nr; +}; + +/* + * In the constructor the target parameter will already have the + * table, type, begin and len fields filled in. + */ +typedef int (*dm_ctr_fn) (struct dm_target *target, + unsigned int argc, char **argv); + +/* + * The destructor doesn't need to free the dm_target, just + * anything hidden ti->private. + */ +typedef void (*dm_dtr_fn) (struct dm_target *ti); + +/* + * The map function must return: + * < 0: error + * = 0: The target will handle the io by resubmitting it later + * = 1: simple remap complete + * = 2: The target wants to push back the io + */ +typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, + union map_info *map_context); +typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, + union map_info *map_context); + +/* + * Returns: + * < 0 : error (currently ignored) + * 0 : ended successfully + * 1 : for some reason the io has still not completed (eg, + * multipath target might want to requeue a failed io). + * 2 : The target wants to push back the io + */ +typedef int (*dm_endio_fn) (struct dm_target *ti, + struct bio *bio, int error, + union map_info *map_context); +typedef int (*dm_request_endio_fn) (struct dm_target *ti, + struct request *clone, int error, + union map_info *map_context); + +typedef void (*dm_flush_fn) (struct dm_target *ti); +typedef void (*dm_presuspend_fn) (struct dm_target *ti); +typedef void (*dm_postsuspend_fn) (struct dm_target *ti); +typedef int (*dm_preresume_fn) (struct dm_target *ti); +typedef void (*dm_resume_fn) (struct dm_target *ti); + +typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, + char *result, unsigned int maxlen); + +typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); + +typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, + unsigned long arg); + +typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size); + +typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, + struct dm_dev *dev, + sector_t start, sector_t len, + void *data); + +typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data); + +typedef void (*dm_io_hints_fn) (struct dm_target *ti, + struct queue_limits *limits); + +/* + * Returns: + * 0: The target can handle the next I/O immediately. + * 1: The target can't handle the next I/O immediately. + */ +typedef int (*dm_busy_fn) (struct dm_target *ti); + +void dm_error(const char *message); + +/* + * Combine device limits. + */ +int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data); + +struct dm_dev { + struct block_device *bdev; + fmode_t mode; + char name[16]; +}; + +/* + * Constructors should call these functions to ensure destination devices + * are opened/closed correctly. + */ +int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result); +void dm_put_device(struct dm_target *ti, struct dm_dev *d); + +/* + * Information about a target type + */ + +struct target_type { + uint64_t features; + const char *name; + struct module *module; + unsigned version[3]; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_map_request_fn map_rq; + dm_endio_fn end_io; + dm_request_endio_fn rq_end_io; + dm_flush_fn flush; + dm_presuspend_fn presuspend; + dm_postsuspend_fn postsuspend; + dm_preresume_fn preresume; + dm_resume_fn resume; + dm_status_fn status; + dm_message_fn message; + dm_ioctl_fn ioctl; + dm_merge_fn merge; + dm_busy_fn busy; + dm_iterate_devices_fn iterate_devices; + dm_io_hints_fn io_hints; + + /* For internal device-mapper use. */ + struct list_head list; +}; + +/* + * Target features + */ + +/* + * Any table that contains an instance of this target must have only one. + */ +#define DM_TARGET_SINGLETON 0x00000001 +#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) + +/* + * Indicates that a target does not support read-only devices. + */ +#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 +#define dm_target_always_writeable(type) \ + ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) + +/* + * Any device that contains a table with an instance of this target may never + * have tables containing any different target type. + */ +#define DM_TARGET_IMMUTABLE 0x00000004 +#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) + +struct dm_target { + struct dm_table *table; + struct target_type *type; + + /* target limits */ + sector_t begin; + sector_t len; + + /* Always a power of 2 */ + sector_t split_io; + + /* + * A number of zero-length barrier requests that will be submitted + * to the target for the purpose of flushing cache. + * + * The request number will be placed in union map_info->target_request_nr. + * It is a responsibility of the target driver to remap these requests + * to the real underlying devices. + */ + unsigned num_flush_requests; + + /* + * The number of discard requests that will be submitted to the + * target. map_info->request_nr is used just like num_flush_requests. + */ + unsigned num_discard_requests; + + /* target specific data */ + void *private; + + /* Used to provide an error string from the ctr */ + char *error; + + /* + * Set if this target needs to receive discards regardless of + * whether or not its underlying devices have support. + */ + unsigned discards_supported:1; + + /* + * Set if this target does not return zeroes on discarded blocks. + */ + unsigned discard_zeroes_data_unsupported:1; +}; + +/* Each target can link one of these into the table */ +struct dm_target_callbacks { + struct list_head list; + int (*congested_fn) (struct dm_target_callbacks *, int); +}; + +int dm_register_target(struct target_type *t); +void dm_unregister_target(struct target_type *t); + +/* + * Target argument parsing. + */ +struct dm_arg_set { + unsigned argc; + char **argv; +}; + +/* + * The minimum and maximum value of a numeric argument, together with + * the error message to use if the number is found to be outside that range. + */ +struct dm_arg { + unsigned min; + unsigned max; + char *error; +}; + +/* + * Validate the next argument, either returning it as *value or, if invalid, + * returning -EINVAL and setting *error. + */ +int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error); + +/* + * Process the next argument as the start of a group containing between + * arg->min and arg->max further arguments. Either return the size as + * *num_args or, if invalid, return -EINVAL and set *error. + */ +int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *num_args, char **error); + +/* + * Return the current argument and shift to the next. + */ +const char *dm_shift_arg(struct dm_arg_set *as); + +/* + * Move through num_args arguments. + */ +void dm_consume_args(struct dm_arg_set *as, unsigned num_args); + +/*----------------------------------------------------------------- + * Functions for creating and manipulating mapped devices. + * Drop the reference with dm_put when you finish with the object. + *---------------------------------------------------------------*/ + +/* + * DM_ANY_MINOR chooses the next available minor number. + */ +#define DM_ANY_MINOR (-1) +int dm_create(int minor, struct mapped_device **md); + +/* + * Reference counting for md. + */ +struct mapped_device *dm_get_md(dev_t dev); +void dm_get(struct mapped_device *md); +void dm_put(struct mapped_device *md); + +/* + * An arbitrary pointer may be stored alongside a mapped device. + */ +void dm_set_mdptr(struct mapped_device *md, void *ptr); +void *dm_get_mdptr(struct mapped_device *md); + +/* + * A device can still be used while suspended, but I/O is deferred. + */ +int dm_suspend(struct mapped_device *md, unsigned suspend_flags); +int dm_resume(struct mapped_device *md); + +/* + * Event functions. + */ +uint32_t dm_get_event_nr(struct mapped_device *md); +int dm_wait_event(struct mapped_device *md, int event_nr); +uint32_t dm_next_uevent_seq(struct mapped_device *md); +void dm_uevent_add(struct mapped_device *md, struct list_head *elist); + +/* + * Info functions. + */ +const char *dm_device_name(struct mapped_device *md); +int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); +struct gendisk *dm_disk(struct mapped_device *md); +int dm_suspended(struct dm_target *ti); +int dm_noflush_suspending(struct dm_target *ti); +union map_info *dm_get_mapinfo(struct bio *bio); +union map_info *dm_get_rq_mapinfo(struct request *rq); + +/* + * Geometry functions. + */ +int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); +int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); + + +/*----------------------------------------------------------------- + * Functions for manipulating device-mapper tables. + *---------------------------------------------------------------*/ + +/* + * First create an empty table. + */ +int dm_table_create(struct dm_table **result, fmode_t mode, + unsigned num_targets, struct mapped_device *md); + +/* + * Then call this once for each target. + */ +int dm_table_add_target(struct dm_table *t, const char *type, + sector_t start, sector_t len, char *params); + +/* + * Target_ctr should call this if it needs to add any callbacks. + */ +void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); + +/* + * Finally call this to make the table ready for use. + */ +int dm_table_complete(struct dm_table *t); + +/* + * Table reference counting. + */ +struct dm_table *dm_get_live_table(struct mapped_device *md); +void dm_table_get(struct dm_table *t); +void dm_table_put(struct dm_table *t); + +/* + * Queries + */ +sector_t dm_table_get_size(struct dm_table *t); +unsigned int dm_table_get_num_targets(struct dm_table *t); +fmode_t dm_table_get_mode(struct dm_table *t); +struct mapped_device *dm_table_get_md(struct dm_table *t); + +/* + * Trigger an event. + */ +void dm_table_event(struct dm_table *t); + +/* + * The device must be suspended before calling this method. + * Returns the previous table, which the caller must destroy. + */ +struct dm_table *dm_swap_table(struct mapped_device *md, + struct dm_table *t); + +/* + * A wrapper around vmalloc. + */ +void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); + +/*----------------------------------------------------------------- + * Macros. + *---------------------------------------------------------------*/ +#define DM_NAME "device-mapper" + +#ifdef CONFIG_PRINTK +extern struct ratelimit_state dm_ratelimit_state; + +#define dm_ratelimit() __ratelimit(&dm_ratelimit_state) +#else +#define dm_ratelimit() 0 +#endif + +#define DMCRIT(f, arg...) \ + printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) + +#define DMERR(f, arg...) \ + printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) +#define DMERR_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ + f "\n", ## arg); \ + } while (0) + +#define DMWARN(f, arg...) \ + printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) +#define DMWARN_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ + f "\n", ## arg); \ + } while (0) + +#define DMINFO(f, arg...) \ + printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) +#define DMINFO_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ + "\n", ## arg); \ + } while (0) + +#ifdef CONFIG_DM_DEBUG +# define DMDEBUG(f, arg...) \ + printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) +# define DMDEBUG_LIMIT(f, arg...) \ + do { \ + if (dm_ratelimit()) \ + printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ + "\n", ## arg); \ + } while (0) +#else +# define DMDEBUG(f, arg...) do {} while (0) +# define DMDEBUG_LIMIT(f, arg...) do {} while (0) +#endif + +#define DMEMIT(x...) sz += ((sz >= maxlen) ? \ + 0 : scnprintf(result + sz, maxlen - sz, x)) + +#define SECTOR_SHIFT 9 + +/* + * Definitions of return values from target end_io function. + */ +#define DM_ENDIO_INCOMPLETE 1 +#define DM_ENDIO_REQUEUE 2 + +/* + * Definitions of return values from target map function. + */ +#define DM_MAPIO_SUBMITTED 0 +#define DM_MAPIO_REMAPPED 1 +#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE + +/* + * Ceiling(n / sz) + */ +#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) + +#define dm_sector_div_up(n, sz) ( \ +{ \ + sector_t _r = ((n) + (sz) - 1); \ + sector_div(_r, (sz)); \ + _r; \ +} \ +) + +/* + * ceiling(n / size) * size + */ +#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) + +#define dm_array_too_big(fixed, obj, num) \ + ((num) > (UINT_MAX - (fixed)) / (obj)) + +/* + * Sector offset taken relative to the start of the target instead of + * relative to the start of the device. + */ +#define dm_target_offset(ti, sector) ((sector) - (ti)->begin) + +static inline sector_t to_sector(unsigned long n) +{ + return (n >> SECTOR_SHIFT); +} + +static inline unsigned long to_bytes(sector_t n) +{ + return (n << SECTOR_SHIFT); +} + +/*----------------------------------------------------------------- + * Helper for block layer and dm core operations + *---------------------------------------------------------------*/ +void dm_dispatch_request(struct request *rq); +void dm_requeue_unmapped_request(struct request *rq); +void dm_kill_unmapped_request(struct request *rq, int error); +int dm_underlying_device_busy(struct request_queue *q); + +#endif /* _LINUX_DEVICE_MAPPER_H */ diff --git a/include/linux/device.h b/include/linux/device.h new file mode 100644 index 00000000..5ad17ccc --- /dev/null +++ b/include/linux/device.h @@ -0,0 +1,1021 @@ +/* + * device.h - generic, centralized driver model + * + * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> + * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2008-2009 Novell Inc. + * + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. + */ + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include <linux/ioport.h> +#include <linux/kobject.h> +#include <linux/klist.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/pm.h> +#include <linux/atomic.h> +#include <asm/device.h> + +struct device; +struct device_private; +struct device_driver; +struct driver_private; +struct module; +struct class; +struct subsys_private; +struct bus_type; +struct device_node; +struct iommu_ops; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *bus, char *buf); + ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); +}; + +#define BUS_ATTR(_name, _mode, _show, _store) \ +struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) + +extern int __must_check bus_create_file(struct bus_type *, + struct bus_attribute *); +extern void bus_remove_file(struct bus_type *, struct bus_attribute *); + +/** + * struct bus_type - The bus type of the device + * + * @name: The name of the bus. + * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). + * @dev_root: Default device to use as the parent. + * @bus_attrs: Default attributes of the bus. + * @dev_attrs: Default attributes of the devices on the bus. + * @drv_attrs: Default attributes of the device drivers on the bus. + * @match: Called, perhaps multiple times, whenever a new device or driver + * is added for this bus. It should return a nonzero value if the + * given device can be handled by the given driver. + * @uevent: Called when a device is added, removed, or a few other things + * that generate uevents to add the environment variables. + * @probe: Called when a new device or driver add to this bus, and callback + * the specific driver's probe to initial the matched device. + * @remove: Called when a device removed from this bus. + * @shutdown: Called at shut-down time to quiesce the device. + * @suspend: Called when a device on this bus wants to go to sleep mode. + * @resume: Called to bring a device on this bus out of sleep mode. + * @pm: Power management operations of this bus, callback the specific + * device driver's pm-ops. + * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU + * driver implementations to a bus and allow the driver to do + * bus-specific setup + * @p: The private data of the driver core, only the driver core can + * touch this. + * + * A bus is a channel between the processor and one or more devices. For the + * purposes of the device model, all devices are connected via a bus, even if + * it is an internal, virtual, "platform" bus. Buses can plug into each other. + * A USB controller is usually a PCI device, for example. The device model + * represents the actual connections between buses and the devices they control. + * A bus is represented by the bus_type structure. It contains the name, the + * default attributes, the bus' methods, PM operations, and the driver core's + * private data. + */ +struct bus_type { + const char *name; + const char *dev_name; + struct device *dev_root; + struct bus_attribute *bus_attrs; + struct device_attribute *dev_attrs; + struct driver_attribute *drv_attrs; + + int (*match)(struct device *dev, struct device_driver *drv); + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + int (*probe)(struct device *dev); + int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); + + const struct dev_pm_ops *pm; + + struct iommu_ops *iommu_ops; + + struct subsys_private *p; +}; + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define bus_register(subsys) \ +({ \ + static struct lock_class_key __key; \ + __bus_register(subsys, &__key); \ +}) +extern int __must_check __bus_register(struct bus_type *bus, + struct lock_class_key *key); +extern void bus_unregister(struct bus_type *bus); + +extern int __must_check bus_rescan_devices(struct bus_type *bus); + +/* iterator helpers for buses */ +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; +void subsys_dev_iter_init(struct subsys_dev_iter *iter, + struct bus_type *subsys, + struct device *start, + const struct device_type *type); +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); +void subsys_dev_iter_exit(struct subsys_dev_iter *iter); + +int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *bus_find_device(struct bus_type *bus, struct device *start, + void *data, + int (*match)(struct device *dev, void *data)); +struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name); +struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, + struct device *hint); +int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, + void *data, int (*fn)(struct device_driver *, void *)); +void bus_sort_breadthfirst(struct bus_type *bus, + int (*compare)(const struct device *a, + const struct device *b)); +/* + * Bus notifiers: Get notified of addition/removal of devices + * and binding/unbinding of drivers to devices. + * In the long run, it should be a replacement for the platform + * notify hooks. + */ +struct notifier_block; + +extern int bus_register_notifier(struct bus_type *bus, + struct notifier_block *nb); +extern int bus_unregister_notifier(struct bus_type *bus, + struct notifier_block *nb); + +/* All 4 notifers below get called with the target struct device * + * as an argument. Note that those functions are likely to be called + * with the device lock held in the core, so be careful. + */ +#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ +#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ +#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be + bound */ +#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ +#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be + unbound */ +#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound + from the device */ + +extern struct kset *bus_get_kset(struct bus_type *bus); +extern struct klist *bus_get_device_klist(struct bus_type *bus); + +/** + * struct device_driver - The basic device driver structure + * @name: Name of the device driver. + * @bus: The bus which the device of this driver belongs to. + * @owner: The module owner. + * @mod_name: Used for built-in modules. + * @suppress_bind_attrs: Disables bind/unbind via sysfs. + * @of_match_table: The open firmware table. + * @probe: Called to query the existence of a specific device, + * whether this driver can work with it, and bind the driver + * to a specific device. + * @remove: Called when the device is removed from the system to + * unbind a device from this driver. + * @shutdown: Called at shut-down time to quiesce the device. + * @suspend: Called to put the device to sleep mode. Usually to a + * low power state. + * @resume: Called to bring a device from sleep mode. + * @groups: Default attributes that get created by the driver core + * automatically. + * @pm: Power management operations of the device which matched + * this driver. + * @p: Driver core's private data, no one other than the driver + * core can touch this. + * + * The device driver-model tracks all of the drivers known to the system. + * The main reason for this tracking is to enable the driver core to match + * up drivers with new devices. Once drivers are known objects within the + * system, however, a number of other things become possible. Device drivers + * can export information and configuration variables that are independent + * of any specific device. + */ +struct device_driver { + const char *name; + struct bus_type *bus; + + struct module *owner; + const char *mod_name; /* used for built-in modules */ + + bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ + + const struct of_device_id *of_match_table; + + int (*probe) (struct device *dev); + int (*remove) (struct device *dev); + void (*shutdown) (struct device *dev); + int (*suspend) (struct device *dev, pm_message_t state); + int (*resume) (struct device *dev); + const struct attribute_group **groups; + + const struct dev_pm_ops *pm; + + struct driver_private *p; +}; + + +extern int __must_check driver_register(struct device_driver *drv); +extern void driver_unregister(struct device_driver *drv); + +extern struct device_driver *driver_find(const char *name, + struct bus_type *bus); +extern int driver_probe_done(void); +extern void wait_for_device_probe(void); + + +/* sysfs interface for exporting driver attributes */ + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *driver, char *buf); + ssize_t (*store)(struct device_driver *driver, const char *buf, + size_t count); +}; + +#define DRIVER_ATTR(_name, _mode, _show, _store) \ +struct driver_attribute driver_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +extern int __must_check driver_create_file(struct device_driver *driver, + const struct driver_attribute *attr); +extern void driver_remove_file(struct device_driver *driver, + const struct driver_attribute *attr); + +extern int __must_check driver_for_each_device(struct device_driver *drv, + struct device *start, + void *data, + int (*fn)(struct device *dev, + void *)); +struct device *driver_find_device(struct device_driver *drv, + struct device *start, void *data, + int (*match)(struct device *dev, void *data)); + +/** + * struct subsys_interface - interfaces to device functions + * @name: name of the device function + * @subsys: subsytem of the devices to attach to + * @node: the list of functions registered at the subsystem + * @add_dev: device hookup to device function handler + * @remove_dev: device hookup to device function handler + * + * Simple interfaces attached to a subsystem. Multiple interfaces can + * attach to a subsystem and its devices. Unlike drivers, they do not + * exclusively claim or control devices. Interfaces usually represent + * a specific functionality of a subsystem/class of devices. + */ +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *dev, struct subsys_interface *sif); + int (*remove_dev)(struct device *dev, struct subsys_interface *sif); +}; + +int subsys_interface_register(struct subsys_interface *sif); +void subsys_interface_unregister(struct subsys_interface *sif); + +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups); + +/** + * struct class - device classes + * @name: Name of the class. + * @owner: The module owner. + * @class_attrs: Default attributes of this class. + * @dev_attrs: Default attributes of the devices belong to the class. + * @dev_bin_attrs: Default binary attributes of the devices belong to the class. + * @dev_kobj: The kobject that represents this class and links it into the hierarchy. + * @dev_uevent: Called when a device is added, removed from this class, or a + * few other things that generate uevents to add the environment + * variables. + * @devnode: Callback to provide the devtmpfs. + * @class_release: Called to release this class. + * @dev_release: Called to release the device. + * @suspend: Used to put the device to sleep mode, usually to a low power + * state. + * @resume: Used to bring the device from the sleep mode. + * @ns_type: Callbacks so sysfs can detemine namespaces. + * @namespace: Namespace of the device belongs to this class. + * @pm: The default device power management operations of this class. + * @p: The private data of the driver core, no one other than the + * driver core can touch this. + * + * A class is a higher-level view of a device that abstracts out low-level + * implementation details. Drivers may see a SCSI disk or an ATA disk, but, + * at the class level, they are all simply disks. Classes allow user space + * to work with devices based on what they do, rather than how they are + * connected or how they work. + */ +struct class { + const char *name; + struct module *owner; + + struct class_attribute *class_attrs; + struct device_attribute *dev_attrs; + struct bin_attribute *dev_bin_attrs; + struct kobject *dev_kobj; + + int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode); + + void (*class_release)(struct class *class); + void (*dev_release)(struct device *dev); + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); + + const struct kobj_ns_type_operations *ns_type; + const void *(*namespace)(struct device *dev); + + const struct dev_pm_ops *pm; + + struct subsys_private *p; +}; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +extern struct kobject *sysfs_dev_block_kobj; +extern struct kobject *sysfs_dev_char_kobj; +extern int __must_check __class_register(struct class *class, + struct lock_class_key *key); +extern void class_unregister(struct class *class); + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_register(class) \ +({ \ + static struct lock_class_key __key; \ + __class_register(class, &__key); \ +}) + +struct class_compat; +struct class_compat *class_compat_register(const char *name); +void class_compat_unregister(struct class_compat *cls); +int class_compat_create_link(struct class_compat *cls, struct device *dev, + struct device *device_link); +void class_compat_remove_link(struct class_compat *cls, struct device *dev, + struct device *device_link); + +extern void class_dev_iter_init(struct class_dev_iter *iter, + struct class *class, + struct device *start, + const struct device_type *type); +extern struct device *class_dev_iter_next(struct class_dev_iter *iter); +extern void class_dev_iter_exit(struct class_dev_iter *iter); + +extern int class_for_each_device(struct class *class, struct device *start, + void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *class_find_device(struct class *class, + struct device *start, void *data, + int (*match)(struct device *, void *)); + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *class, struct class_attribute *attr, + char *buf); + ssize_t (*store)(struct class *class, struct class_attribute *attr, + const char *buf, size_t count); + const void *(*namespace)(struct class *class, + const struct class_attribute *attr); +}; + +#define CLASS_ATTR(_name, _mode, _show, _store) \ +struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) + +extern int __must_check class_create_file(struct class *class, + const struct class_attribute *attr); +extern void class_remove_file(struct class *class, + const struct class_attribute *attr); + +/* Simple class attribute that is just a static string */ + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +/* Currently read-only only */ +#define _CLASS_ATTR_STRING(_name, _mode, _str) \ + { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } +#define CLASS_ATTR_STRING(_name, _mode, _str) \ + struct class_attribute_string class_attr_##_name = \ + _CLASS_ATTR_STRING(_name, _mode, _str) + +extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, + char *buf); + +struct class_interface { + struct list_head node; + struct class *class; + + int (*add_dev) (struct device *, struct class_interface *); + void (*remove_dev) (struct device *, struct class_interface *); +}; + +extern int __must_check class_interface_register(struct class_interface *); +extern void class_interface_unregister(struct class_interface *); + +extern struct class * __must_check __class_create(struct module *owner, + const char *name, + struct lock_class_key *key); +extern void class_destroy(struct class *cls); + +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define class_create(owner, name) \ +({ \ + static struct lock_class_key __key; \ + __class_create(owner, name, &__key); \ +}) + +/* + * The type of device, "struct device" is embedded in. A class + * or bus can contain devices of different types + * like "partitions" and "disks", "mouse" and "event". + * This identifies the device type and carries type-specific + * information, equivalent to the kobj_type of a kobject. + * If "name" is specified, the uevent will contain it in + * the DEVTYPE variable. + */ +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode); + void (*release)(struct device *dev); + + const struct dev_pm_ops *pm; +}; + +/* interface for exporting device attributes */ +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *dev, struct device_attribute *attr, + char *buf); + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t device_show_int(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_int(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); + +#define DEVICE_ATTR(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ULONG_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } +#define DEVICE_INT_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } + +extern int device_create_file(struct device *device, + const struct device_attribute *entry); +extern void device_remove_file(struct device *dev, + const struct device_attribute *attr); +extern int __must_check device_create_bin_file(struct device *dev, + const struct bin_attribute *attr); +extern void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); +extern int device_schedule_callback_owner(struct device *dev, + void (*func)(struct device *dev), struct module *owner); + +/* This is a macro to avoid include problems with THIS_MODULE */ +#define device_schedule_callback(dev, func) \ + device_schedule_callback_owner(dev, func, THIS_MODULE) + +/* device resource management */ +typedef void (*dr_release_t)(struct device *dev, void *res); +typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); + +#ifdef CONFIG_DEBUG_DEVRES +extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, + const char *name); +#define devres_alloc(release, size, gfp) \ + __devres_alloc(release, size, gfp, #release) +#else +extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); +#endif +extern void devres_free(void *res); +extern void devres_add(struct device *dev, void *res); +extern void *devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +extern void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); + +/* devres group */ +extern void * __must_check devres_open_group(struct device *dev, void *id, + gfp_t gfp); +extern void devres_close_group(struct device *dev, void *id); +extern void devres_remove_group(struct device *dev, void *id); +extern int devres_release_group(struct device *dev, void *id); + +/* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */ +extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); +extern void devm_kfree(struct device *dev, void *p); + +void __iomem *devm_request_and_ioremap(struct device *dev, + struct resource *res); + +struct device_dma_parameters { + /* + * a low level driver may set these to teach IOMMU code about + * sg limitations. + */ + unsigned int max_segment_size; + unsigned long segment_boundary_mask; +}; + +/** + * struct device - The basic device structure + * @parent: The device's "parent" device, the device to which it is attached. + * In most cases, a parent device is some sort of bus or host + * controller. If parent is NULL, the device, is a top-level device, + * which is not usually what you want. + * @p: Holds the private data of the driver core portions of the device. + * See the comment of the struct device_private for detail. + * @kobj: A top-level, abstract class from which other classes are derived. + * @init_name: Initial name of the device. + * @type: The type of device. + * This identifies the device type and carries type-specific + * information. + * @mutex: Mutex to synchronize calls to its driver. + * @bus: Type of bus device is on. + * @driver: Which driver has allocated this + * @platform_data: Platform data specific to the device. + * Example: For devices on custom boards, as typical of embedded + * and SOC based hardware, Linux often uses platform_data to point + * to board-specific structures describing devices and how they + * are wired. That can include what ports are available, chip + * variants, which GPIO pins act in what additional roles, and so + * on. This shrinks the "Board Support Packages" (BSPs) and + * minimizes board-specific #ifdefs in drivers. + * @power: For device power management. + * See Documentation/power/devices.txt for details. + * @pm_domain: Provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions + * along with subsystem-level and driver-level callbacks. + * @numa_node: NUMA node this device is close to. + * @dma_mask: Dma mask (if dma'ble device). + * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all + * hardware supports 64-bit addresses for consistent allocations + * such descriptors. + * @dma_parms: A low level driver may set these to teach IOMMU code about + * segment limitations. + * @dma_pools: Dma pools (if dma'ble device). + * @dma_mem: Internal for coherent mem override. + * @archdata: For arch-specific additions. + * @of_node: Associated device tree node. + * @devt: For creating the sysfs "dev". + * @id: device instance + * @devres_lock: Spinlock to protect the resource of the device. + * @devres_head: The resources list of the device. + * @knode_class: The node used to add the device to the class list. + * @class: The class of the device. + * @groups: Optional attribute groups. + * @release: Callback to free the device after all references have + * gone away. This should be set by the allocator of the + * device (i.e. the bus driver that discovered the device). + * + * At the lowest level, every device in a Linux system is represented by an + * instance of struct device. The device structure contains the information + * that the device model core needs to model the system. Most subsystems, + * however, track additional information about the devices they host. As a + * result, it is rare for devices to be represented by bare device structures; + * instead, that structure, like kobject structures, is usually embedded within + * a higher-level representation of the device. + */ +struct device { + struct device *parent; + + struct device_private *p; + + struct kobject kobj; + const char *init_name; /* initial name of the device */ + const struct device_type *type; + + struct mutex mutex; /* mutex to synchronize calls to + * its driver. + */ + + struct bus_type *bus; /* type of bus device is on */ + struct device_driver *driver; /* which driver has allocated this + device */ + void *platform_data; /* Platform specific data, device + core doesn't touch it */ + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + +#ifdef CONFIG_NUMA + int numa_node; /* NUMA node this device is close to */ +#endif + u64 *dma_mask; /* dma mask (if dma'able device) */ + u64 coherent_dma_mask;/* Like dma_mask, but for + alloc_coherent mappings as + not all hardware supports + 64 bit addresses for consistent + allocations such descriptors. */ + + struct device_dma_parameters *dma_parms; + + struct list_head dma_pools; /* dma pools (if dma'ble) */ + + struct dma_coherent_mem *dma_mem; /* internal for coherent mem + override */ + /* arch specific additions */ + struct dev_archdata archdata; + + struct device_node *of_node; /* associated device tree node */ + + dev_t devt; /* dev_t, creates the sysfs "dev" */ + u32 id; /* device instance */ + + spinlock_t devres_lock; + struct list_head devres_head; + + struct klist_node knode_class; + struct class *class; + const struct attribute_group **groups; /* optional groups */ + + void (*release)(struct device *dev); +}; + +/* Get the wakeup routines, which depend on struct device */ +#include <linux/pm_wakeup.h> + +static inline const char *dev_name(const struct device *dev) +{ + /* Use the init name until the kobject becomes available */ + if (dev->init_name) + return dev->init_name; + + return kobject_name(&dev->kobj); +} + +extern __printf(2, 3) +int dev_set_name(struct device *dev, const char *name, ...); + +#ifdef CONFIG_NUMA +static inline int dev_to_node(struct device *dev) +{ + return dev->numa_node; +} +static inline void set_dev_node(struct device *dev, int node) +{ + dev->numa_node = node; +} +#else +static inline int dev_to_node(struct device *dev) +{ + return -1; +} +static inline void set_dev_node(struct device *dev, int node) +{ +} +#endif + +static inline struct pm_subsys_data *dev_to_psd(struct device *dev) +{ + return dev ? dev->power.subsys_data : NULL; +} + +static inline unsigned int dev_get_uevent_suppress(const struct device *dev) +{ + return dev->kobj.uevent_suppress; +} + +static inline void dev_set_uevent_suppress(struct device *dev, int val) +{ + dev->kobj.uevent_suppress = val; +} + +static inline int device_is_registered(struct device *dev) +{ + return dev->kobj.state_in_sysfs; +} + +static inline void device_enable_async_suspend(struct device *dev) +{ + if (!dev->power.is_prepared) + dev->power.async_suspend = true; +} + +static inline void device_disable_async_suspend(struct device *dev) +{ + if (!dev->power.is_prepared) + dev->power.async_suspend = false; +} + +static inline bool device_async_suspend_enabled(struct device *dev) +{ + return !!dev->power.async_suspend; +} + +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) +{ + dev->power.ignore_children = enable; +} + +static inline void device_lock(struct device *dev) +{ + mutex_lock(&dev->mutex); +} + +static inline int device_trylock(struct device *dev) +{ + return mutex_trylock(&dev->mutex); +} + +static inline void device_unlock(struct device *dev) +{ + mutex_unlock(&dev->mutex); +} + +void driver_init(void); + +/* + * High level routines for use by the bus drivers + */ +extern int __must_check device_register(struct device *dev); +extern void device_unregister(struct device *dev); +extern void device_initialize(struct device *dev); +extern int __must_check device_add(struct device *dev); +extern void device_del(struct device *dev); +extern int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); +extern int device_rename(struct device *dev, const char *new_name); +extern int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +extern const char *device_get_devnode(struct device *dev, + umode_t *mode, const char **tmp); +extern void *dev_get_drvdata(const struct device *dev); +extern int dev_set_drvdata(struct device *dev, void *data); + +/* + * Root device objects for grouping under /sys/devices + */ +extern struct device *__root_device_register(const char *name, + struct module *owner); + +/* + * This is a macro to avoid include problems with THIS_MODULE, + * just as per what is done for device_schedule_callback() above. + */ +#define root_device_register(name) \ + __root_device_register(name, THIS_MODULE) + +extern void root_device_unregister(struct device *root); + +static inline void *dev_get_platdata(const struct device *dev) +{ + return dev->platform_data; +} + +/* + * Manual binding of a device to driver. See drivers/base/bus.c + * for information on use. + */ +extern int __must_check device_bind_driver(struct device *dev); +extern void device_release_driver(struct device *dev); +extern int __must_check device_attach(struct device *dev); +extern int __must_check driver_attach(struct device_driver *drv); +extern int __must_check device_reprobe(struct device *dev); + +/* + * Easy functions for dynamically creating devices on the fly + */ +extern struct device *device_create_vargs(struct class *cls, + struct device *parent, + dev_t devt, + void *drvdata, + const char *fmt, + va_list vargs); +extern __printf(5, 6) +struct device *device_create(struct class *cls, struct device *parent, + dev_t devt, void *drvdata, + const char *fmt, ...); +extern void device_destroy(struct class *cls, dev_t devt); + +/* + * Platform "fixup" functions - allow the platform to have their say + * about devices and actions that the general device layer doesn't + * know about. + */ +/* Notify platform of device discovery */ +extern int (*platform_notify)(struct device *dev); + +extern int (*platform_notify_remove)(struct device *dev); + + +/* + * get_device - atomically increment the reference count for the device. + * + */ +extern struct device *get_device(struct device *dev); +extern void put_device(struct device *dev); + +extern void wait_for_device_probe(void); + +#ifdef CONFIG_DEVTMPFS +extern int devtmpfs_create_node(struct device *dev); +extern int devtmpfs_delete_node(struct device *dev); +extern int devtmpfs_mount(const char *mntdir); +#else +static inline int devtmpfs_create_node(struct device *dev) { return 0; } +static inline int devtmpfs_delete_node(struct device *dev) { return 0; } +static inline int devtmpfs_mount(const char *mountpoint) { return 0; } +#endif + +/* drivers/base/power/shutdown.c */ +extern void device_shutdown(void); + +/* debugging and troubleshooting/diagnostic helpers. */ +extern const char *dev_driver_string(const struct device *dev); + + +#ifdef CONFIG_PRINTK + +extern int __dev_printk(const char *level, const struct device *dev, + struct va_format *vaf); +extern __printf(3, 4) +int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) + ; +extern __printf(2, 3) +int dev_emerg(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +int dev_alert(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +int dev_crit(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +int dev_err(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +int dev_warn(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +int dev_notice(const struct device *dev, const char *fmt, ...); +extern __printf(2, 3) +int _dev_info(const struct device *dev, const char *fmt, ...); + +#else + +static inline int __dev_printk(const char *level, const struct device *dev, + struct va_format *vaf) +{ return 0; } +static inline __printf(3, 4) +int dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) +{ return 0; } + +static inline __printf(2, 3) +int dev_emerg(const struct device *dev, const char *fmt, ...) +{ return 0; } +static inline __printf(2, 3) +int dev_crit(const struct device *dev, const char *fmt, ...) +{ return 0; } +static inline __printf(2, 3) +int dev_alert(const struct device *dev, const char *fmt, ...) +{ return 0; } +static inline __printf(2, 3) +int dev_err(const struct device *dev, const char *fmt, ...) +{ return 0; } +static inline __printf(2, 3) +int dev_warn(const struct device *dev, const char *fmt, ...) +{ return 0; } +static inline __printf(2, 3) +int dev_notice(const struct device *dev, const char *fmt, ...) +{ return 0; } +static inline __printf(2, 3) +int _dev_info(const struct device *dev, const char *fmt, ...) +{ return 0; } + +#endif + +/* + * Stupid hackaround for existing uses of non-printk uses dev_info + * + * Note that the definition of dev_info below is actually _dev_info + * and a macro is used to avoid redefining dev_info + */ + +#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define dev_dbg(dev, format, ...) \ +do { \ + dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define dev_dbg(dev, format, arg...) \ + dev_printk(KERN_DEBUG, dev, format, ##arg) +#else +#define dev_dbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ + 0; \ +}) +#endif + +#ifdef VERBOSE_DEBUG +#define dev_vdbg dev_dbg +#else +#define dev_vdbg(dev, format, arg...) \ +({ \ + if (0) \ + dev_printk(KERN_DEBUG, dev, format, ##arg); \ + 0; \ +}) +#endif + +/* + * dev_WARN*() acts like dev_printk(), but with the key difference + * of using a WARN/WARN_ON to get the message out, including the + * file/line information and a backtrace. + */ +#define dev_WARN(dev, format, arg...) \ + WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); + +#define dev_WARN_ONCE(dev, condition, format, arg...) \ + WARN_ONCE(condition, "Device %s\n" format, \ + dev_driver_string(dev), ## arg) + +/* Create alias, so I can be autoloaded. */ +#define MODULE_ALIAS_CHARDEV(major,minor) \ + MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ + MODULE_ALIAS("char-major-" __stringify(major) "-*") + +#ifdef CONFIG_SYSFS_DEPRECATED +extern long sysfs_deprecated; +#else +#define sysfs_deprecated 0 +#endif + +/** + * module_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. + * Each module may only use this macro once, and calling it replaces + * module_init() and module_exit(). + * + * @__driver: driver name + * @__register: register function for this driver type + * @__unregister: unregister function for this driver type + * @...: Additional arguments to be passed to __register and __unregister. + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define module_driver(__driver, __register, __unregister, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver) , ##__VA_ARGS__); \ +} \ +module_init(__driver##_init); \ +static void __exit __driver##_exit(void) \ +{ \ + __unregister(&(__driver) , ##__VA_ARGS__); \ +} \ +module_exit(__driver##_exit); + +#endif /* _DEVICE_H_ */ diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h new file mode 100644 index 00000000..8b64221b --- /dev/null +++ b/include/linux/device_cgroup.h @@ -0,0 +1,19 @@ +#include <linux/fs.h> + +#ifdef CONFIG_CGROUP_DEVICE +extern int __devcgroup_inode_permission(struct inode *inode, int mask); +extern int devcgroup_inode_mknod(int mode, dev_t dev); +static inline int devcgroup_inode_permission(struct inode *inode, int mask) +{ + if (likely(!inode->i_rdev)) + return 0; + if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) + return 0; + return __devcgroup_inode_permission(inode, mask); +} +#else +static inline int devcgroup_inode_permission(struct inode *inode, int mask) +{ return 0; } +static inline int devcgroup_inode_mknod(int mode, dev_t dev) +{ return 0; } +#endif diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h new file mode 100644 index 00000000..5ce0e5fd --- /dev/null +++ b/include/linux/devpts_fs.h @@ -0,0 +1,49 @@ +/* -*- linux-c -*- --------------------------------------------------------- * + * + * linux/include/linux/devpts_fs.h + * + * Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * + * ------------------------------------------------------------------------- */ + +#ifndef _LINUX_DEVPTS_FS_H +#define _LINUX_DEVPTS_FS_H + +#include <linux/errno.h> + +#ifdef CONFIG_UNIX98_PTYS + +int devpts_new_index(struct inode *ptmx_inode); +void devpts_kill_index(struct inode *ptmx_inode, int idx); +/* mknod in devpts */ +int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty); +/* get tty structure */ +struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number); +/* unlink */ +void devpts_pty_kill(struct tty_struct *tty); + +#else + +/* Dummy stubs in the no-pty case */ +static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } +static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } +static inline int devpts_pty_new(struct inode *ptmx_inode, + struct tty_struct *tty) +{ + return -EINVAL; +} +static inline struct tty_struct *devpts_get_tty(struct inode *pts_inode, + int number) +{ + return NULL; +} +static inline void devpts_pty_kill(struct tty_struct *tty) { } + +#endif + + +#endif /* _LINUX_DEVPTS_FS_H */ diff --git a/include/linux/digsig.h b/include/linux/digsig.h new file mode 100644 index 00000000..6f85a070 --- /dev/null +++ b/include/linux/digsig.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2011 Nokia Corporation + * Copyright (C) 2011 Intel Corporation + * + * Author: + * Dmitry Kasatkin <dmitry.kasatkin@nokia.com> + * <dmitry.kasatkin@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + */ + +#ifndef _DIGSIG_H +#define _DIGSIG_H + +#include <linux/key.h> + +enum pubkey_algo { + PUBKEY_ALGO_RSA, + PUBKEY_ALGO_MAX, +}; + +enum digest_algo { + DIGEST_ALGO_SHA1, + DIGEST_ALGO_SHA256, + DIGEST_ALGO_MAX +}; + +struct pubkey_hdr { + uint8_t version; /* key format version */ + uint32_t timestamp; /* key made, always 0 for now */ + uint8_t algo; + uint8_t nmpi; + char mpi[0]; +} __packed; + +struct signature_hdr { + uint8_t version; /* signature format version */ + uint32_t timestamp; /* signature made */ + uint8_t algo; + uint8_t hash; + uint8_t keyid[8]; + uint8_t nmpi; + char mpi[0]; +} __packed; + +#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE) + +int digsig_verify(struct key *keyring, const char *sig, int siglen, + const char *digest, int digestlen); + +#else + +static inline int digsig_verify(struct key *keyring, const char *sig, + int siglen, const char *digest, int digestlen) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_SIGNATURE */ + +#endif /* _DIGSIG_H */ diff --git a/include/linux/dio.h b/include/linux/dio.h new file mode 100644 index 00000000..2cc0fd00 --- /dev/null +++ b/include/linux/dio.h @@ -0,0 +1,280 @@ +/* header file for DIO boards for the HP300 architecture. + * Maybe this should handle DIO-II later? + * The general structure of this is vaguely based on how + * the Amiga port handles Zorro boards. + * Copyright (C) Peter Maydell 05/1998 <pmaydell@chiark.greenend.org.uk> + * Converted to driver model Jochen Friedrich <jochen@scram.de> + * + * The board IDs are from the NetBSD kernel, which for once provided + * helpful comments... + * + * This goes with drivers/dio/dio.c + */ + +#ifndef _LINUX_DIO_H +#define _LINUX_DIO_H + +/* The DIO boards in a system are distinguished by 'select codes' which + * range from 0-63 (DIO) and 132-255 (DIO-II). + * The DIO board with select code sc is located at physical address + * 0x600000 + sc * 0x10000 + * So DIO cards cover [0x600000-0x800000); the areas [0x200000-0x400000) and + * [0x800000-0x1000000) are for additional space required by things + * like framebuffers. [0x400000-0x600000) is for miscellaneous internal I/O. + * On Linux, this is currently all mapped into the virtual address space + * at 0xf0000000 on bootup. + * DIO-II boards are at 0x1000000 + (sc - 132) * 0x400000 + * which is address range [0x1000000-0x20000000) -- too big to map completely, + * so currently we just don't handle DIO-II boards. It wouldn't be hard to + * do with ioremap() though. + */ + +#include <linux/device.h> + +#ifdef __KERNEL__ + +#include <asm/hp300hw.h> + +typedef __u16 dio_id; + + /* + * DIO devices + */ + +struct dio_dev { + struct dio_bus *bus; + dio_id id; + int scode; + struct dio_driver *driver; /* which driver has allocated this device */ + struct device dev; /* Generic device interface */ + u8 ipl; + char name[64]; + struct resource resource; +}; + +#define to_dio_dev(n) container_of(n, struct dio_dev, dev) + + /* + * DIO bus + */ + +struct dio_bus { + struct list_head devices; /* list of devices on this bus */ + unsigned int num_resources; /* number of resources */ + struct resource resources[2]; /* address space routed to this bus */ + struct device dev; + char name[10]; +}; + +extern struct dio_bus dio_bus; /* Single DIO bus */ +extern struct bus_type dio_bus_type; + + /* + * DIO device IDs + */ + +struct dio_device_id { + dio_id id; /* Device ID or DIO_WILDCARD */ + unsigned long driver_data; /* Data private to the driver */ +}; + + /* + * DIO device drivers + */ + +struct dio_driver { + struct list_head node; + char *name; + const struct dio_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct dio_dev *z, const struct dio_device_id *id); +/* New device inserted */ + void (*remove)(struct dio_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */ + struct device_driver driver; +}; + +#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver) + +/* DIO/DIO-II boards all have the following 8bit registers. + * These are offsets from the base of the device. + */ +#define DIO_IDOFF 0x01 /* primary device ID */ +#define DIO_IPLOFF 0x03 /* interrupt priority level */ +#define DIO_SECIDOFF 0x15 /* secondary device ID */ +#define DIOII_SIZEOFF 0x101 /* device size, DIO-II only */ +#define DIO_VIRADDRBASE 0xf0000000UL /* vir addr where IOspace is mapped */ + +#define DIO_BASE 0x600000 /* start of DIO space */ +#define DIO_END 0x1000000 /* end of DIO space */ +#define DIO_DEVSIZE 0x10000 /* size of a DIO device */ + +#define DIOII_BASE 0x01000000 /* start of DIO-II space */ +#define DIOII_END 0x20000000 /* end of DIO-II space */ +#define DIOII_DEVSIZE 0x00400000 /* size of a DIO-II device */ + +/* Highest valid select code. If we add DIO-II support this should become + * 256 for everything except HP320, which only has DIO. + */ +#define DIO_SCMAX (hp300_model == HP_320 ? 32 : 256) +#define DIOII_SCBASE 132 /* lowest DIO-II select code */ +#define DIO_SCINHOLE(scode) (((scode) >= 32) && ((scode) < DIOII_SCBASE)) +#define DIO_ISDIOII(scode) ((scode) >= 132 && (scode) < 256) + +/* macros to read device IDs, given base address */ +#define DIO_ID(baseaddr) in_8((baseaddr) + DIO_IDOFF) +#define DIO_SECID(baseaddr) in_8((baseaddr) + DIO_SECIDOFF) + +/* extract the interrupt level */ +#define DIO_IPL(baseaddr) (((in_8((baseaddr) + DIO_IPLOFF) >> 4) & 0x03) + 3) + +/* find the size of a DIO-II board's address space. + * DIO boards are all fixed length. + */ +#define DIOII_SIZE(baseaddr) ((in_8((baseaddr) + DIOII_SIZEOFF) + 1) * 0x100000) + +/* general purpose macro for both DIO and DIO-II */ +#define DIO_SIZE(scode, base) (DIO_ISDIOII((scode)) ? DIOII_SIZE((base)) : DIO_DEVSIZE) + +/* The hardware has primary and secondary IDs; we encode these in a single + * int as PRIMARY ID & (SECONDARY ID << 8). + * In practice this is only important for framebuffers, + * and everybody else just sets ID fields equal to the DIO_ID_FOO value. + */ +#define DIO_ENCODE_ID(pr,sec) ((((int)sec & 0xff) << 8) | ((int)pr & 0xff)) +/* macro to determine whether a given primary ID requires a secondary ID byte */ +#define DIO_NEEDSSECID(id) ((id) == DIO_ID_FBUFFER) +#define DIO_WILDCARD 0xff + +/* Now a whole slew of macros giving device IDs and descriptive strings: */ +#define DIO_ID_DCA0 0x02 /* 98644A serial */ +#define DIO_DESC_DCA0 "98644A DCA0 serial" +#define DIO_ID_DCA0REM 0x82 /* 98644A serial */ +#define DIO_DESC_DCA0REM "98644A DCA0REM serial" +#define DIO_ID_DCA1 0x42 /* 98644A serial */ +#define DIO_DESC_DCA1 "98644A DCA1 serial" +#define DIO_ID_DCA1REM 0xc2 /* 98644A serial */ +#define DIO_DESC_DCA1REM "98644A DCA1REM serial" +#define DIO_ID_DCM 0x05 /* 98642A serial MUX */ +#define DIO_DESC_DCM "98642A DCM serial MUX" +#define DIO_ID_DCMREM 0x85 /* 98642A serial MUX */ +#define DIO_DESC_DCMREM "98642A DCMREM serial MUX" +#define DIO_ID_LAN 0x15 /* 98643A LAN */ +#define DIO_DESC_LAN "98643A LANCE ethernet" +#define DIO_ID_FHPIB 0x08 /* 98625A/98625B fast HP-IB */ +#define DIO_DESC_FHPIB "98625A/98625B fast HPIB" +#define DIO_ID_NHPIB 0x01 /* 98624A HP-IB (normal ie slow) */ +#define DIO_DESC_NHPIB "98624A HPIB" +#define DIO_ID_SCSI0 0x07 /* 98265A SCSI */ +#define DIO_DESC_SCSI0 "98265A SCSI0" +#define DIO_ID_SCSI1 0x27 /* ditto */ +#define DIO_DESC_SCSI1 "98265A SCSI1" +#define DIO_ID_SCSI2 0x47 /* ditto */ +#define DIO_DESC_SCSI2 "98265A SCSI2" +#define DIO_ID_SCSI3 0x67 /* ditto */ +#define DIO_DESC_SCSI3 "98265A SCSI3" +#define DIO_ID_FBUFFER 0x39 /* framebuffer: flavour is distinguished by secondary ID */ +#define DIO_DESC_FBUFFER "bitmapped display" +/* the NetBSD kernel source is a bit unsure as to what these next IDs actually do :-> */ +#define DIO_ID_MISC0 0x03 /* 98622A */ +#define DIO_DESC_MISC0 "98622A" +#define DIO_ID_MISC1 0x04 /* 98623A */ +#define DIO_DESC_MISC1 "98623A" +#define DIO_ID_PARALLEL 0x06 /* internal parallel */ +#define DIO_DESC_PARALLEL "internal parallel" +#define DIO_ID_MISC2 0x09 /* 98287A keyboard */ +#define DIO_DESC_MISC2 "98287A keyboard" +#define DIO_ID_MISC3 0x0a /* HP98635A FP accelerator */ +#define DIO_DESC_MISC3 "HP98635A FP accelerator" +#define DIO_ID_MISC4 0x0b /* timer */ +#define DIO_DESC_MISC4 "timer" +#define DIO_ID_MISC5 0x12 /* 98640A */ +#define DIO_DESC_MISC5 "98640A" +#define DIO_ID_MISC6 0x16 /* 98659A */ +#define DIO_DESC_MISC6 "98659A" +#define DIO_ID_MISC7 0x19 /* 237 display */ +#define DIO_DESC_MISC7 "237 display" +#define DIO_ID_MISC8 0x1a /* quad-wide card */ +#define DIO_DESC_MISC8 "quad-wide card" +#define DIO_ID_MISC9 0x1b /* 98253A */ +#define DIO_DESC_MISC9 "98253A" +#define DIO_ID_MISC10 0x1c /* 98627A */ +#define DIO_DESC_MISC10 "98253A" +#define DIO_ID_MISC11 0x1d /* 98633A */ +#define DIO_DESC_MISC11 "98633A" +#define DIO_ID_MISC12 0x1e /* 98259A */ +#define DIO_DESC_MISC12 "98259A" +#define DIO_ID_MISC13 0x1f /* 8741 */ +#define DIO_DESC_MISC13 "8741" +#define DIO_ID_VME 0x31 /* 98577A VME adapter */ +#define DIO_DESC_VME "98577A VME adapter" +#define DIO_ID_DCL 0x34 /* 98628A serial */ +#define DIO_DESC_DCL "98628A DCL serial" +#define DIO_ID_DCLREM 0xb4 /* 98628A serial */ +#define DIO_DESC_DCLREM "98628A DCLREM serial" +/* These are the secondary IDs for the framebuffers */ +#define DIO_ID2_GATORBOX 0x01 /* 98700/98710 "gatorbox" */ +#define DIO_DESC2_GATORBOX "98700/98710 \"gatorbox\" display" +#define DIO_ID2_TOPCAT 0x02 /* 98544/98545/98547 "topcat" */ +#define DIO_DESC2_TOPCAT "98544/98545/98547 \"topcat\" display" +#define DIO_ID2_RENAISSANCE 0x04 /* 98720/98721 "renaissance" */ +#define DIO_DESC2_RENAISSANCE "98720/98721 \"renaissance\" display" +#define DIO_ID2_LRCATSEYE 0x05 /* lowres "catseye" */ +#define DIO_DESC2_LRCATSEYE "low-res catseye display" +#define DIO_ID2_HRCCATSEYE 0x06 /* highres colour "catseye" */ +#define DIO_DESC2_HRCCATSEYE "high-res color catseye display" +#define DIO_ID2_HRMCATSEYE 0x07 /* highres mono "catseye" */ +#define DIO_DESC2_HRMCATSEYE "high-res mono catseye display" +#define DIO_ID2_DAVINCI 0x08 /* 98730/98731 "davinci" */ +#define DIO_DESC2_DAVINCI "98730/98731 \"davinci\" display" +#define DIO_ID2_XXXCATSEYE 0x09 /* "catseye" */ +#define DIO_DESC2_XXXCATSEYE "catseye display" +#define DIO_ID2_HYPERION 0x0e /* A1096A "hyperion" */ +#define DIO_DESC2_HYPERION "A1096A \"hyperion\" display" +#define DIO_ID2_XGENESIS 0x0b /* "x-genesis"; no NetBSD support */ +#define DIO_DESC2_XGENESIS "\"x-genesis\" display" +#define DIO_ID2_TIGER 0x0c /* "tiger"; no NetBSD support */ +#define DIO_DESC2_TIGER "\"tiger\" display" +#define DIO_ID2_YGENESIS 0x0d /* "y-genesis"; no NetBSD support */ +#define DIO_DESC2_YGENESIS "\"y-genesis\" display" +/* if you add new IDs then you should tell dio.c about them so it can + * identify them... + */ + +extern int dio_find(int deviceid); +extern unsigned long dio_scodetophysaddr(int scode); +extern int dio_create_sysfs_dev_files(struct dio_dev *); + +/* New-style probing */ +extern int dio_register_driver(struct dio_driver *); +extern void dio_unregister_driver(struct dio_driver *); +extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z); +static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d) +{ + return d->driver; +} + +#define dio_resource_start(d) ((d)->resource.start) +#define dio_resource_end(d) ((d)->resource.end) +#define dio_resource_len(d) (resource_size(&(d)->resource)) +#define dio_resource_flags(d) ((d)->resource.flags) + +#define dio_request_device(d, name) \ + request_mem_region(dio_resource_start(d), dio_resource_len(d), name) +#define dio_release_device(d) \ + release_mem_region(dio_resource_start(d), dio_resource_len(d)) + +/* Similar to the helpers above, these manipulate per-dio_dev + * driver-specific data. They are really just a wrapper around + * the generic device structure functions of these calls. + */ +static inline void *dio_get_drvdata (struct dio_dev *d) +{ + return dev_get_drvdata(&d->dev); +} + +static inline void dio_set_drvdata (struct dio_dev *d, void *data) +{ + dev_set_drvdata(&d->dev, data); +} + +#endif /* __KERNEL__ */ +#endif /* ndef _LINUX_DIO_H */ diff --git a/include/linux/dirent.h b/include/linux/dirent.h new file mode 100644 index 00000000..f072fb8d --- /dev/null +++ b/include/linux/dirent.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_DIRENT_H +#define _LINUX_DIRENT_H + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + unsigned short d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +#endif diff --git a/include/linux/dlm.h b/include/linux/dlm.h new file mode 100644 index 00000000..6c7f6e95 --- /dev/null +++ b/include/linux/dlm.h @@ -0,0 +1,233 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. +** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ + +#ifndef __DLM_DOT_H__ +#define __DLM_DOT_H__ + +/* + * Interface to Distributed Lock Manager (DLM) + * routines and structures to use DLM lockspaces + */ + +/* Lock levels and flags are here */ +#include <linux/dlmconstants.h> +#include <linux/types.h> + +typedef void dlm_lockspace_t; + +/* + * Lock status block + * + * Use this structure to specify the contents of the lock value block. For a + * conversion request, this structure is used to specify the lock ID of the + * lock. DLM writes the status of the lock request and the lock ID assigned + * to the request in the lock status block. + * + * sb_lkid: the returned lock ID. It is set on new (non-conversion) requests. + * It is available when dlm_lock returns. + * + * sb_lvbptr: saves or returns the contents of the lock's LVB according to rules + * shown for the DLM_LKF_VALBLK flag. + * + * sb_flags: DLM_SBF_DEMOTED is returned if in the process of promoting a lock, + * it was first demoted to NL to avoid conversion deadlock. + * DLM_SBF_VALNOTVALID is returned if the resource's LVB is marked invalid. + * + * sb_status: the returned status of the lock request set prior to AST + * execution. Possible return values: + * + * 0 if lock request was successful + * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE + * -DLM_EUNLOCK if unlock request was successful + * -DLM_ECANCEL if a cancel completed successfully + * -EDEADLK if a deadlock was detected + * -ETIMEDOUT if the lock request was canceled due to a timeout + */ + +#define DLM_SBF_DEMOTED 0x01 +#define DLM_SBF_VALNOTVALID 0x02 +#define DLM_SBF_ALTMODE 0x04 + +struct dlm_lksb { + int sb_status; + __u32 sb_lkid; + char sb_flags; + char * sb_lvbptr; +}; + +/* dlm_new_lockspace() flags */ + +#define DLM_LSFL_NODIR 0x00000001 +#define DLM_LSFL_TIMEWARN 0x00000002 +#define DLM_LSFL_FS 0x00000004 +#define DLM_LSFL_NEWEXCL 0x00000008 + +#ifdef __KERNEL__ + +struct dlm_slot { + int nodeid; /* 1 to MAX_INT */ + int slot; /* 1 to MAX_INT */ +}; + +/* + * recover_prep: called before the dlm begins lock recovery. + * Notfies lockspace user that locks from failed members will be granted. + * recover_slot: called after recover_prep and before recover_done. + * Identifies a failed lockspace member. + * recover_done: called after the dlm completes lock recovery. + * Identifies lockspace members and lockspace generation number. + */ + +struct dlm_lockspace_ops { + void (*recover_prep) (void *ops_arg); + void (*recover_slot) (void *ops_arg, struct dlm_slot *slot); + void (*recover_done) (void *ops_arg, struct dlm_slot *slots, + int num_slots, int our_slot, uint32_t generation); +}; + +/* + * dlm_new_lockspace + * + * Create/join a lockspace. + * + * name: lockspace name, null terminated, up to DLM_LOCKSPACE_LEN (not + * including terminating null). + * + * cluster: cluster name, null terminated, up to DLM_LOCKSPACE_LEN (not + * including terminating null). Optional. When cluster is null, it + * is not used. When set, dlm_new_lockspace() returns -EBADR if cluster + * is not equal to the dlm cluster name. + * + * flags: + * DLM_LSFL_NODIR + * The dlm should not use a resource directory, but statically assign + * resource mastery to nodes based on the name hash that is otherwise + * used to select the directory node. Must be the same on all nodes. + * DLM_LSFL_TIMEWARN + * The dlm should emit netlink messages if locks have been waiting + * for a configurable amount of time. (Unused.) + * DLM_LSFL_FS + * The lockspace user is in the kernel (i.e. filesystem). Enables + * direct bast/cast callbacks. + * DLM_LSFL_NEWEXCL + * dlm_new_lockspace() should return -EEXIST if the lockspace exists. + * + * lvblen: length of lvb in bytes. Must be multiple of 8. + * dlm_new_lockspace() returns an error if this does not match + * what other nodes are using. + * + * ops: callbacks that indicate lockspace recovery points so the + * caller can coordinate its recovery and know lockspace members. + * This is only used by the initial dlm_new_lockspace() call. + * Optional. + * + * ops_arg: arg for ops callbacks. + * + * ops_result: tells caller if the ops callbacks (if provided) will + * be used or not. 0: will be used, -EXXX will not be used. + * -EOPNOTSUPP: the dlm does not have recovery_callbacks enabled. + * + * lockspace: handle for dlm functions + */ + +int dlm_new_lockspace(const char *name, const char *cluster, + uint32_t flags, int lvblen, + const struct dlm_lockspace_ops *ops, void *ops_arg, + int *ops_result, dlm_lockspace_t **lockspace); + +/* + * dlm_release_lockspace + * + * Stop a lockspace. + */ + +int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force); + +/* + * dlm_lock + * + * Make an asyncronous request to acquire or convert a lock on a named + * resource. + * + * lockspace: context for the request + * mode: the requested mode of the lock (DLM_LOCK_) + * lksb: lock status block for input and async return values + * flags: input flags (DLM_LKF_) + * name: name of the resource to lock, can be binary + * namelen: the length in bytes of the resource name (MAX_RESNAME_LEN) + * parent: the lock ID of a parent lock or 0 if none + * lockast: function DLM executes when it completes processing the request + * astarg: argument passed to lockast and bast functions + * bast: function DLM executes when this lock later blocks another request + * + * Returns: + * 0 if request is successfully queued for processing + * -EINVAL if any input parameters are invalid + * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE + * -ENOMEM if there is no memory to process request + * -ENOTCONN if there is a communication error + * + * If the call to dlm_lock returns an error then the operation has failed and + * the AST routine will not be called. If dlm_lock returns 0 it is still + * possible that the lock operation will fail. The AST routine will be called + * when the locking is complete and the status is returned in the lksb. + * + * If the AST routines or parameter are passed to a conversion operation then + * they will overwrite those values that were passed to a previous dlm_lock + * call. + * + * AST routines should not block (at least not for long), but may make + * any locking calls they please. + */ + +int dlm_lock(dlm_lockspace_t *lockspace, + int mode, + struct dlm_lksb *lksb, + uint32_t flags, + void *name, + unsigned int namelen, + uint32_t parent_lkid, + void (*lockast) (void *astarg), + void *astarg, + void (*bast) (void *astarg, int mode)); + +/* + * dlm_unlock + * + * Asynchronously release a lock on a resource. The AST routine is called + * when the resource is successfully unlocked. + * + * lockspace: context for the request + * lkid: the lock ID as returned in the lksb + * flags: input flags (DLM_LKF_) + * lksb: if NULL the lksb parameter passed to last lock request is used + * astarg: the arg used with the completion ast for the unlock + * + * Returns: + * 0 if request is successfully queued for processing + * -EINVAL if any input parameters are invalid + * -ENOTEMPTY if the lock still has sublocks + * -EBUSY if the lock is waiting for a remote lock operation + * -ENOTCONN if there is a communication error + */ + +int dlm_unlock(dlm_lockspace_t *lockspace, + uint32_t lkid, + uint32_t flags, + struct dlm_lksb *lksb, + void *astarg); + +#endif /* __KERNEL__ */ + +#endif /* __DLM_DOT_H__ */ + diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h new file mode 100644 index 00000000..3060783c --- /dev/null +++ b/include/linux/dlm_device.h @@ -0,0 +1,108 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. +** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ + +#ifndef _LINUX_DLM_DEVICE_H +#define _LINUX_DLM_DEVICE_H + +/* This is the device interface for dlm, most users will use a library + * interface. + */ + +#include <linux/dlm.h> +#include <linux/types.h> + +#define DLM_USER_LVB_LEN 32 + +/* Version of the device interface */ +#define DLM_DEVICE_VERSION_MAJOR 6 +#define DLM_DEVICE_VERSION_MINOR 0 +#define DLM_DEVICE_VERSION_PATCH 1 + +/* struct passed to the lock write */ +struct dlm_lock_params { + __u8 mode; + __u8 namelen; + __u16 unused; + __u32 flags; + __u32 lkid; + __u32 parent; + __u64 xid; + __u64 timeout; + void __user *castparam; + void __user *castaddr; + void __user *bastparam; + void __user *bastaddr; + struct dlm_lksb __user *lksb; + char lvb[DLM_USER_LVB_LEN]; + char name[0]; +}; + +struct dlm_lspace_params { + __u32 flags; + __u32 minor; + char name[0]; +}; + +struct dlm_purge_params { + __u32 nodeid; + __u32 pid; +}; + +struct dlm_write_request { + __u32 version[3]; + __u8 cmd; + __u8 is64bit; + __u8 unused[2]; + + union { + struct dlm_lock_params lock; + struct dlm_lspace_params lspace; + struct dlm_purge_params purge; + } i; +}; + +struct dlm_device_version { + __u32 version[3]; +}; + +/* struct read from the "device" fd, + consists mainly of userspace pointers for the library to use */ + +struct dlm_lock_result { + __u32 version[3]; + __u32 length; + void __user * user_astaddr; + void __user * user_astparam; + struct dlm_lksb __user * user_lksb; + struct dlm_lksb lksb; + __u8 bast_mode; + __u8 unused[3]; + /* Offsets may be zero if no data is present */ + __u32 lvb_offset; +}; + +/* Commands passed to the device */ +#define DLM_USER_LOCK 1 +#define DLM_USER_UNLOCK 2 +#define DLM_USER_QUERY 3 +#define DLM_USER_CREATE_LOCKSPACE 4 +#define DLM_USER_REMOVE_LOCKSPACE 5 +#define DLM_USER_PURGE 6 +#define DLM_USER_DEADLOCK 7 + +/* Lockspace flags */ +#define DLM_USER_LSFLG_AUTOFREE 1 +#define DLM_USER_LSFLG_FORCEFREE 2 + +#endif + diff --git a/include/linux/dlm_netlink.h b/include/linux/dlm_netlink.h new file mode 100644 index 00000000..647c8ef2 --- /dev/null +++ b/include/linux/dlm_netlink.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2007 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + */ + +#ifndef _DLM_NETLINK_H +#define _DLM_NETLINK_H + +#include <linux/types.h> + +enum { + DLM_STATUS_WAITING = 1, + DLM_STATUS_GRANTED = 2, + DLM_STATUS_CONVERT = 3, +}; + +#define DLM_LOCK_DATA_VERSION 1 + +struct dlm_lock_data { + __u16 version; + __u32 lockspace_id; + int nodeid; + int ownpid; + __u32 id; + __u32 remid; + __u64 xid; + __s8 status; + __s8 grmode; + __s8 rqmode; + unsigned long timestamp; + int resource_namelen; + char resource_name[DLM_RESNAME_MAXLEN]; +}; + +enum { + DLM_CMD_UNSPEC = 0, + DLM_CMD_HELLO, /* user->kernel */ + DLM_CMD_TIMEOUT, /* kernel->user */ + __DLM_CMD_MAX, +}; + +#define DLM_CMD_MAX (__DLM_CMD_MAX - 1) + +enum { + DLM_TYPE_UNSPEC = 0, + DLM_TYPE_LOCK, + __DLM_TYPE_MAX, +}; + +#define DLM_TYPE_MAX (__DLM_TYPE_MAX - 1) + +#define DLM_GENL_VERSION 0x1 +#define DLM_GENL_NAME "DLM" + +#endif /* _DLM_NETLINK_H */ diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h new file mode 100644 index 00000000..3b1cc1be --- /dev/null +++ b/include/linux/dlm_plock.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + */ + +#ifndef __DLM_PLOCK_DOT_H__ +#define __DLM_PLOCK_DOT_H__ + +#include <linux/types.h> + +#define DLM_PLOCK_MISC_NAME "dlm_plock" + +#define DLM_PLOCK_VERSION_MAJOR 1 +#define DLM_PLOCK_VERSION_MINOR 2 +#define DLM_PLOCK_VERSION_PATCH 0 + +enum { + DLM_PLOCK_OP_LOCK = 1, + DLM_PLOCK_OP_UNLOCK, + DLM_PLOCK_OP_GET, +}; + +#define DLM_PLOCK_FL_CLOSE 1 + +struct dlm_plock_info { + __u32 version[3]; + __u8 optype; + __u8 ex; + __u8 wait; + __u8 flags; + __u32 pid; + __s32 nodeid; + __s32 rv; + __u32 fsid; + __u64 number; + __u64 start; + __u64 end; + __u64 owner; +}; + +#ifdef __KERNEL__ +int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, + int cmd, struct file_lock *fl); +int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, + struct file_lock *fl); +int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, + struct file_lock *fl); +#endif /* __KERNEL__ */ + +#endif + diff --git a/include/linux/dlmconstants.h b/include/linux/dlmconstants.h new file mode 100644 index 00000000..47bf08dc --- /dev/null +++ b/include/linux/dlmconstants.h @@ -0,0 +1,163 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. +** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ + +#ifndef __DLMCONSTANTS_DOT_H__ +#define __DLMCONSTANTS_DOT_H__ + +/* + * Constants used by DLM interface. + */ + +#define DLM_LOCKSPACE_LEN 64 +#define DLM_RESNAME_MAXLEN 64 + + +/* + * Lock Modes + */ + +#define DLM_LOCK_IV (-1) /* invalid */ +#define DLM_LOCK_NL 0 /* null */ +#define DLM_LOCK_CR 1 /* concurrent read */ +#define DLM_LOCK_CW 2 /* concurrent write */ +#define DLM_LOCK_PR 3 /* protected read */ +#define DLM_LOCK_PW 4 /* protected write */ +#define DLM_LOCK_EX 5 /* exclusive */ + + +/* + * Flags to dlm_lock + * + * DLM_LKF_NOQUEUE + * + * Do not queue the lock request on the wait queue if it cannot be granted + * immediately. If the lock cannot be granted because of this flag, DLM will + * either return -EAGAIN from the dlm_lock call or will return 0 from + * dlm_lock and -EAGAIN in the lock status block when the AST is executed. + * + * DLM_LKF_CANCEL + * + * Used to cancel a pending lock request or conversion. A converting lock is + * returned to its previously granted mode. + * + * DLM_LKF_CONVERT + * + * Indicates a lock conversion request. For conversions the name and namelen + * are ignored and the lock ID in the LKSB is used to identify the lock. + * + * DLM_LKF_VALBLK + * + * Requests DLM to return the current contents of the lock value block in the + * lock status block. When this flag is set in a lock conversion from PW or EX + * modes, DLM assigns the value specified in the lock status block to the lock + * value block of the lock resource. The LVB is a DLM_LVB_LEN size array + * containing application-specific information. + * + * DLM_LKF_QUECVT + * + * Force a conversion request to be queued, even if it is compatible with + * the granted modes of other locks on the same resource. + * + * DLM_LKF_IVVALBLK + * + * Invalidate the lock value block. + * + * DLM_LKF_CONVDEADLK + * + * Allows the dlm to resolve conversion deadlocks internally by demoting the + * granted mode of a converting lock to NL. The DLM_SBF_DEMOTED flag is + * returned for a conversion that's been effected by this. + * + * DLM_LKF_PERSISTENT + * + * Only relevant to locks originating in userspace. A persistent lock will not + * be removed if the process holding the lock exits. + * + * DLM_LKF_NODLCKWT + * + * Do not cancel the lock if it gets into conversion deadlock. + * Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN. + * + * DLM_LKF_NODLCKBLK + * + * net yet implemented + * + * DLM_LKF_EXPEDITE + * + * Used only with new requests for NL mode locks. Tells the lock manager + * to grant the lock, ignoring other locks in convert and wait queues. + * + * DLM_LKF_NOQUEUEBAST + * + * Send blocking AST's before returning -EAGAIN to the caller. It is only + * used along with the NOQUEUE flag. Blocking AST's are not sent for failed + * NOQUEUE requests otherwise. + * + * DLM_LKF_HEADQUE + * + * Add a lock to the head of the convert or wait queue rather than the tail. + * + * DLM_LKF_NOORDER + * + * Disregard the standard grant order rules and grant a lock as soon as it + * is compatible with other granted locks. + * + * DLM_LKF_ORPHAN + * + * not yet implemented + * + * DLM_LKF_ALTPR + * + * If the requested mode cannot be granted immediately, try to grant the lock + * in PR mode instead. If this alternate mode is granted instead of the + * requested mode, DLM_SBF_ALTMODE is returned in the lksb. + * + * DLM_LKF_ALTCW + * + * The same as ALTPR, but the alternate mode is CW. + * + * DLM_LKF_FORCEUNLOCK + * + * Unlock the lock even if it is converting or waiting or has sublocks. + * Only really for use by the userland device.c code. + * + */ + +#define DLM_LKF_NOQUEUE 0x00000001 +#define DLM_LKF_CANCEL 0x00000002 +#define DLM_LKF_CONVERT 0x00000004 +#define DLM_LKF_VALBLK 0x00000008 +#define DLM_LKF_QUECVT 0x00000010 +#define DLM_LKF_IVVALBLK 0x00000020 +#define DLM_LKF_CONVDEADLK 0x00000040 +#define DLM_LKF_PERSISTENT 0x00000080 +#define DLM_LKF_NODLCKWT 0x00000100 +#define DLM_LKF_NODLCKBLK 0x00000200 +#define DLM_LKF_EXPEDITE 0x00000400 +#define DLM_LKF_NOQUEUEBAST 0x00000800 +#define DLM_LKF_HEADQUE 0x00001000 +#define DLM_LKF_NOORDER 0x00002000 +#define DLM_LKF_ORPHAN 0x00004000 +#define DLM_LKF_ALTPR 0x00008000 +#define DLM_LKF_ALTCW 0x00010000 +#define DLM_LKF_FORCEUNLOCK 0x00020000 +#define DLM_LKF_TIMEOUT 0x00040000 + +/* + * Some return codes that are not in errno.h + */ + +#define DLM_ECANCEL 0x10001 +#define DLM_EUNLOCK 0x10002 + +#endif /* __DLMCONSTANTS_DOT_H__ */ diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h new file mode 100644 index 00000000..7084503c --- /dev/null +++ b/include/linux/dm-dirty-log.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2003 Sistina Software + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper dirty region log. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DM_DIRTY_LOG +#define _LINUX_DM_DIRTY_LOG + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/device-mapper.h> + +typedef sector_t region_t; + +struct dm_dirty_log_type; + +struct dm_dirty_log { + struct dm_dirty_log_type *type; + int (*flush_callback_fn)(struct dm_target *ti); + void *context; +}; + +struct dm_dirty_log_type { + const char *name; + struct module *module; + + /* For internal device-mapper use */ + struct list_head list; + + int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, + unsigned argc, char **argv); + void (*dtr)(struct dm_dirty_log *log); + + /* + * There are times when we don't want the log to touch + * the disk. + */ + int (*presuspend)(struct dm_dirty_log *log); + int (*postsuspend)(struct dm_dirty_log *log); + int (*resume)(struct dm_dirty_log *log); + + /* + * Retrieves the smallest size of region that the log can + * deal with. + */ + uint32_t (*get_region_size)(struct dm_dirty_log *log); + + /* + * A predicate to say whether a region is clean or not. + * May block. + */ + int (*is_clean)(struct dm_dirty_log *log, region_t region); + + /* + * Returns: 0, 1, -EWOULDBLOCK, < 0 + * + * A predicate function to check the area given by + * [sector, sector + len) is in sync. + * + * If -EWOULDBLOCK is returned the state of the region is + * unknown, typically this will result in a read being + * passed to a daemon to deal with, since a daemon is + * allowed to block. + */ + int (*in_sync)(struct dm_dirty_log *log, region_t region, + int can_block); + + /* + * Flush the current log state (eg, to disk). This + * function may block. + */ + int (*flush)(struct dm_dirty_log *log); + + /* + * Mark an area as clean or dirty. These functions may + * block, though for performance reasons blocking should + * be extremely rare (eg, allocating another chunk of + * memory for some reason). + */ + void (*mark_region)(struct dm_dirty_log *log, region_t region); + void (*clear_region)(struct dm_dirty_log *log, region_t region); + + /* + * Returns: <0 (error), 0 (no region), 1 (region) + * + * The mirrord will need perform recovery on regions of + * the mirror that are in the NOSYNC state. This + * function asks the log to tell the caller about the + * next region that this machine should recover. + * + * Do not confuse this function with 'in_sync()', one + * tells you if an area is synchronised, the other + * assigns recovery work. + */ + int (*get_resync_work)(struct dm_dirty_log *log, region_t *region); + + /* + * This notifies the log that the resync status of a region + * has changed. It also clears the region from the recovering + * list (if present). + */ + void (*set_region_sync)(struct dm_dirty_log *log, + region_t region, int in_sync); + + /* + * Returns the number of regions that are in sync. + */ + region_t (*get_sync_count)(struct dm_dirty_log *log); + + /* + * Support function for mirror status requests. + */ + int (*status)(struct dm_dirty_log *log, status_type_t status_type, + char *result, unsigned maxlen); + + /* + * is_remote_recovering is necessary for cluster mirroring. It provides + * a way to detect recovery on another node, so we aren't writing + * concurrently. This function is likely to block (when a cluster log + * is used). + * + * Returns: 0, 1 + */ + int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); +}; + +int dm_dirty_log_type_register(struct dm_dirty_log_type *type); +int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type); + +/* + * Make sure you use these two functions, rather than calling + * type->constructor/destructor() directly. + */ +struct dm_dirty_log *dm_dirty_log_create(const char *type_name, + struct dm_target *ti, + int (*flush_callback_fn)(struct dm_target *ti), + unsigned argc, char **argv); +void dm_dirty_log_destroy(struct dm_dirty_log *log); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DM_DIRTY_LOG_H */ diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h new file mode 100644 index 00000000..f4b0aa31 --- /dev/null +++ b/include/linux/dm-io.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2003 Sistina Software + * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper low-level I/O. + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_IO_H +#define _LINUX_DM_IO_H + +#ifdef __KERNEL__ + +#include <linux/types.h> + +struct dm_io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; /* If this is zero the region is ignored. */ +}; + +struct page_list { + struct page_list *next; + struct page *page; +}; + +typedef void (*io_notify_fn)(unsigned long error, void *context); + +enum dm_io_mem_type { + DM_IO_PAGE_LIST,/* Page list */ + DM_IO_BVEC, /* Bio vector */ + DM_IO_VMA, /* Virtual memory area */ + DM_IO_KMEM, /* Kernel memory */ +}; + +struct dm_io_memory { + enum dm_io_mem_type type; + + unsigned offset; + + union { + struct page_list *pl; + struct bio_vec *bvec; + void *vma; + void *addr; + } ptr; +}; + +struct dm_io_notify { + io_notify_fn fn; /* Callback for asynchronous requests */ + void *context; /* Passed to callback */ +}; + +/* + * IO request structure + */ +struct dm_io_client; +struct dm_io_request { + int bi_rw; /* READ|WRITE - not READA */ + struct dm_io_memory mem; /* Memory to use for io */ + struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ + struct dm_io_client *client; /* Client memory handler */ +}; + +/* + * For async io calls, users can alternatively use the dm_io() function below + * and dm_io_client_create() to create private mempools for the client. + * + * Create/destroy may block. + */ +struct dm_io_client *dm_io_client_create(void); +void dm_io_client_destroy(struct dm_io_client *client); + +/* + * IO interface using private per-client pools. + * Each bit in the optional 'sync_error_bits' bitset indicates whether an + * error occurred doing io to the corresponding region. + */ +int dm_io(struct dm_io_request *io_req, unsigned num_regions, + struct dm_io_region *region, unsigned long *sync_error_bits); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DM_IO_H */ diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h new file mode 100644 index 00000000..75fd5573 --- /dev/null +++ b/include/linux/dm-ioctl.h @@ -0,0 +1,337 @@ +/* + * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. + * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DM_IOCTL_V4_H +#define _LINUX_DM_IOCTL_V4_H + +#include <linux/types.h> + +#define DM_DIR "mapper" /* Slashes not supported */ +#define DM_CONTROL_NODE "control" +#define DM_MAX_TYPE_NAME 16 +#define DM_NAME_LEN 128 +#define DM_UUID_LEN 129 + +/* + * A traditional ioctl interface for the device mapper. + * + * Each device can have two tables associated with it, an + * 'active' table which is the one currently used by io passing + * through the device, and an 'inactive' one which is a table + * that is being prepared as a replacement for the 'active' one. + * + * DM_VERSION: + * Just get the version information for the ioctl interface. + * + * DM_REMOVE_ALL: + * Remove all dm devices, destroy all tables. Only really used + * for debug. + * + * DM_LIST_DEVICES: + * Get a list of all the dm device names. + * + * DM_DEV_CREATE: + * Create a new device, neither the 'active' or 'inactive' table + * slots will be filled. The device will be in suspended state + * after creation, however any io to the device will get errored + * since it will be out-of-bounds. + * + * DM_DEV_REMOVE: + * Remove a device, destroy any tables. + * + * DM_DEV_RENAME: + * Rename a device or set its uuid if none was previously supplied. + * + * DM_SUSPEND: + * This performs both suspend and resume, depending which flag is + * passed in. + * Suspend: This command will not return until all pending io to + * the device has completed. Further io will be deferred until + * the device is resumed. + * Resume: It is no longer an error to issue this command on an + * unsuspended device. If a table is present in the 'inactive' + * slot, it will be moved to the active slot, then the old table + * from the active slot will be _destroyed_. Finally the device + * is resumed. + * + * DM_DEV_STATUS: + * Retrieves the status for the table in the 'active' slot. + * + * DM_DEV_WAIT: + * Wait for a significant event to occur to the device. This + * could either be caused by an event triggered by one of the + * targets of the table in the 'active' slot, or a table change. + * + * DM_TABLE_LOAD: + * Load a table into the 'inactive' slot for the device. The + * device does _not_ need to be suspended prior to this command. + * + * DM_TABLE_CLEAR: + * Destroy any table in the 'inactive' slot (ie. abort). + * + * DM_TABLE_DEPS: + * Return a set of device dependencies for the 'active' table. + * + * DM_TABLE_STATUS: + * Return the targets status for the 'active' table. + * + * DM_TARGET_MSG: + * Pass a message string to the target at a specific offset of a device. + * + * DM_DEV_SET_GEOMETRY: + * Set the geometry of a device by passing in a string in this format: + * + * "cylinders heads sectors_per_track start_sector" + * + * Beware that CHS geometry is nearly obsolete and only provided + * for compatibility with dm devices that can be booted by a PC + * BIOS. See struct hd_geometry for range limits. Also note that + * the geometry is erased if the device size changes. + */ + +/* + * All ioctl arguments consist of a single chunk of memory, with + * this structure at the start. If a uuid is specified any + * lookup (eg. for a DM_INFO) will be done on that, *not* the + * name. + */ +struct dm_ioctl { + /* + * The version number is made up of three parts: + * major - no backward or forward compatibility, + * minor - only backwards compatible, + * patch - both backwards and forwards compatible. + * + * All clients of the ioctl interface should fill in the + * version number of the interface that they were + * compiled with. + * + * All recognised ioctl commands (ie. those that don't + * return -ENOTTY) fill out this field, even if the + * command failed. + */ + __u32 version[3]; /* in/out */ + __u32 data_size; /* total size of data passed in + * including this struct */ + + __u32 data_start; /* offset to start of data + * relative to start of this struct */ + + __u32 target_count; /* in/out */ + __s32 open_count; /* out */ + __u32 flags; /* in/out */ + + /* + * event_nr holds either the event number (input and output) or the + * udev cookie value (input only). + * The DM_DEV_WAIT ioctl takes an event number as input. + * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls + * use the field as a cookie to return in the DM_COOKIE + * variable with the uevents they issue. + * For output, the ioctls return the event number, not the cookie. + */ + __u32 event_nr; /* in/out */ + __u32 padding; + + __u64 dev; /* in/out */ + + char name[DM_NAME_LEN]; /* device name */ + char uuid[DM_UUID_LEN]; /* unique identifier for + * the block device */ + char data[7]; /* padding or data */ +}; + +/* + * Used to specify tables. These structures appear after the + * dm_ioctl. + */ +struct dm_target_spec { + __u64 sector_start; + __u64 length; + __s32 status; /* used when reading from kernel only */ + + /* + * Location of the next dm_target_spec. + * - When specifying targets on a DM_TABLE_LOAD command, this value is + * the number of bytes from the start of the "current" dm_target_spec + * to the start of the "next" dm_target_spec. + * - When retrieving targets on a DM_TABLE_STATUS command, this value + * is the number of bytes from the start of the first dm_target_spec + * (that follows the dm_ioctl struct) to the start of the "next" + * dm_target_spec. + */ + __u32 next; + + char target_type[DM_MAX_TYPE_NAME]; + + /* + * Parameter string starts immediately after this object. + * Be careful to add padding after string to ensure correct + * alignment of subsequent dm_target_spec. + */ +}; + +/* + * Used to retrieve the target dependencies. + */ +struct dm_target_deps { + __u32 count; /* Array size */ + __u32 padding; /* unused */ + __u64 dev[0]; /* out */ +}; + +/* + * Used to get a list of all dm devices. + */ +struct dm_name_list { + __u64 dev; + __u32 next; /* offset to the next record from + the _start_ of this */ + char name[0]; +}; + +/* + * Used to retrieve the target versions + */ +struct dm_target_versions { + __u32 next; + __u32 version[3]; + + char name[0]; +}; + +/* + * Used to pass message to a target + */ +struct dm_target_msg { + __u64 sector; /* Device sector */ + + char message[0]; +}; + +/* + * If you change this make sure you make the corresponding change + * to dm-ioctl.c:lookup_ioctl() + */ +enum { + /* Top level cmds */ + DM_VERSION_CMD = 0, + DM_REMOVE_ALL_CMD, + DM_LIST_DEVICES_CMD, + + /* device level cmds */ + DM_DEV_CREATE_CMD, + DM_DEV_REMOVE_CMD, + DM_DEV_RENAME_CMD, + DM_DEV_SUSPEND_CMD, + DM_DEV_STATUS_CMD, + DM_DEV_WAIT_CMD, + + /* Table level cmds */ + DM_TABLE_LOAD_CMD, + DM_TABLE_CLEAR_CMD, + DM_TABLE_DEPS_CMD, + DM_TABLE_STATUS_CMD, + + /* Added later */ + DM_LIST_VERSIONS_CMD, + DM_TARGET_MSG_CMD, + DM_DEV_SET_GEOMETRY_CMD +}; + +#define DM_IOCTL 0xfd + +#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) +#define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) +#define DM_LIST_DEVICES _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, struct dm_ioctl) + +#define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl) +#define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl) +#define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl) +#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl) +#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl) +#define DM_DEV_WAIT _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, struct dm_ioctl) + +#define DM_TABLE_LOAD _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, struct dm_ioctl) +#define DM_TABLE_CLEAR _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, struct dm_ioctl) +#define DM_TABLE_DEPS _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, struct dm_ioctl) +#define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl) + +#define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) + +#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) +#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) + +#define DM_VERSION_MAJOR 4 +#define DM_VERSION_MINOR 22 +#define DM_VERSION_PATCHLEVEL 0 +#define DM_VERSION_EXTRA "-ioctl (2011-10-19)" + +/* Status bits */ +#define DM_READONLY_FLAG (1 << 0) /* In/Out */ +#define DM_SUSPEND_FLAG (1 << 1) /* In/Out */ +#define DM_PERSISTENT_DEV_FLAG (1 << 3) /* In */ + +/* + * Flag passed into ioctl STATUS command to get table information + * rather than current status. + */ +#define DM_STATUS_TABLE_FLAG (1 << 4) /* In */ + +/* + * Flags that indicate whether a table is present in either of + * the two table slots that a device has. + */ +#define DM_ACTIVE_PRESENT_FLAG (1 << 5) /* Out */ +#define DM_INACTIVE_PRESENT_FLAG (1 << 6) /* Out */ + +/* + * Indicates that the buffer passed in wasn't big enough for the + * results. + */ +#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ + +/* + * This flag is now ignored. + */ +#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ + +/* + * Set this to avoid attempting to freeze any filesystem when suspending. + */ +#define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */ + +/* + * Set this to suspend without flushing queued ios. + */ +#define DM_NOFLUSH_FLAG (1 << 11) /* In */ + +/* + * If set, any table information returned will relate to the inactive + * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG + * is set before using the data returned. + */ +#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ + +/* + * If set, a uevent was generated for which the caller may need to wait. + */ +#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ + +/* + * If set, rename changes the uuid not the name. Only permitted + * if no uuid was previously supplied: an existing uuid cannot be changed. + */ +#define DM_UUID_FLAG (1 << 14) /* In */ + +/* + * If set, all buffers are wiped after use. Use when sending + * or requesting sensitive data such as an encryption key. + */ +#define DM_SECURE_DATA_FLAG (1 << 15) /* In */ + +#endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h new file mode 100644 index 00000000..47d9d376 --- /dev/null +++ b/include/linux/dm-kcopyd.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2001 - 2003 Sistina Software + * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved. + * + * kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, either synchronous + * or with an asynchronous completion notification. + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_KCOPYD_H +#define _LINUX_DM_KCOPYD_H + +#ifdef __KERNEL__ + +#include <linux/dm-io.h> + +/* FIXME: make this configurable */ +#define DM_KCOPYD_MAX_REGIONS 8 + +#define DM_KCOPYD_IGNORE_ERROR 1 + +/* + * To use kcopyd you must first create a dm_kcopyd_client object. + */ +struct dm_kcopyd_client; +struct dm_kcopyd_client *dm_kcopyd_client_create(void); +void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + * + * read_err is a boolean, + * write_err is a bitset, with 1 bit for each destination region + */ +typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err, + void *context); + +int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, + unsigned num_dests, struct dm_io_region *dests, + unsigned flags, dm_kcopyd_notify_fn fn, void *context); + +/* + * Prepare a callback and submit it via the kcopyd thread. + * + * dm_kcopyd_prepare_callback allocates a callback structure and returns it. + * It must not be called from interrupt context. + * The returned value should be passed into dm_kcopyd_do_callback. + * + * dm_kcopyd_do_callback submits the callback. + * It may be called from interrupt context. + * The callback is issued from the kcopyd thread. + */ +void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, + dm_kcopyd_notify_fn fn, void *context); +void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err); + +int dm_kcopyd_zero(struct dm_kcopyd_client *kc, + unsigned num_dests, struct dm_io_region *dests, + unsigned flags, dm_kcopyd_notify_fn fn, void *context); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_DM_KCOPYD_H */ diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h new file mode 100644 index 00000000..0678c2ad --- /dev/null +++ b/include/linux/dm-log-userspace.h @@ -0,0 +1,416 @@ +/* + * Copyright (C) 2006-2009 Red Hat, Inc. + * + * This file is released under the LGPL. + */ + +#ifndef __DM_LOG_USERSPACE_H__ +#define __DM_LOG_USERSPACE_H__ + +#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */ + +/* + * The device-mapper userspace log module consists of a kernel component and + * a user-space component. The kernel component implements the API defined + * in dm-dirty-log.h. Its purpose is simply to pass the parameters and + * return values of those API functions between kernel and user-space. + * + * Below are defined the 'request_types' - DM_ULOG_CTR, DM_ULOG_DTR, etc. + * These request types represent the different functions in the device-mapper + * dirty log API. Each of these is described in more detail below. + * + * The user-space program must listen for requests from the kernel (representing + * the various API functions) and process them. + * + * User-space begins by setting up the communication link (error checking + * removed for clarity): + * fd = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); + * addr.nl_family = AF_NETLINK; + * addr.nl_groups = CN_IDX_DM; + * addr.nl_pid = 0; + * r = bind(fd, (struct sockaddr *) &addr, sizeof(addr)); + * opt = addr.nl_groups; + * setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &opt, sizeof(opt)); + * + * User-space will then wait to receive requests form the kernel, which it + * will process as described below. The requests are received in the form, + * ((struct dm_ulog_request) + (additional data)). Depending on the request + * type, there may or may not be 'additional data'. In the descriptions below, + * you will see 'Payload-to-userspace' and 'Payload-to-kernel'. The + * 'Payload-to-userspace' is what the kernel sends in 'additional data' as + * necessary parameters to complete the request. The 'Payload-to-kernel' is + * the 'additional data' returned to the kernel that contains the necessary + * results of the request. The 'data_size' field in the dm_ulog_request + * structure denotes the availability and amount of payload data. + */ + +/* + * DM_ULOG_CTR corresponds to (found in dm-dirty-log.h): + * int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, + * unsigned argc, char **argv); + * + * Payload-to-userspace: + * A single string containing all the argv arguments separated by ' 's + * Payload-to-kernel: + * A NUL-terminated string that is the name of the device that is used + * as the backing store for the log data. 'dm_get_device' will be called + * on this device. ('dm_put_device' will be called on this device + * automatically after calling DM_ULOG_DTR.) If there is no device needed + * for log data, 'data_size' in the dm_ulog_request struct should be 0. + * + * The UUID contained in the dm_ulog_request structure is the reference that + * will be used by all request types to a specific log. The constructor must + * record this association with the instance created. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field, filling the + * data field with the log device if necessary, and setting 'data_size' + * appropriately. + */ +#define DM_ULOG_CTR 1 + +/* + * DM_ULOG_DTR corresponds to (found in dm-dirty-log.h): + * void (*dtr)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * A single string containing all the argv arguments separated by ' 's + * Payload-to-kernel: + * None. ('data_size' in the dm_ulog_request struct should be 0.) + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being destroyed. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_DTR 2 + +/* + * DM_ULOG_PRESUSPEND corresponds to (found in dm-dirty-log.h): + * int (*presuspend)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being presuspended. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_PRESUSPEND 3 + +/* + * DM_ULOG_POSTSUSPEND corresponds to (found in dm-dirty-log.h): + * int (*postsuspend)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being postsuspended. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_POSTSUSPEND 4 + +/* + * DM_ULOG_RESUME corresponds to (found in dm-dirty-log.h): + * int (*resume)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being resumed. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_RESUME 5 + +/* + * DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h): + * uint32_t (*get_region_size)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * uint64_t - contains the region size + * + * The region size is something that was determined at constructor time. + * It is returned in the payload area and 'data_size' is set to + * reflect this. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field appropriately. + */ +#define DM_ULOG_GET_REGION_SIZE 6 + +/* + * DM_ULOG_IS_CLEAN corresponds to (found in dm-dirty-log.h): + * int (*is_clean)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t - the region to get clean status on + * Payload-to-kernel: + * int64_t - 1 if clean, 0 otherwise + * + * Payload is sizeof(uint64_t) and contains the region for which the clean + * status is being made. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - filling the payload with 0 (not clean) or + * 1 (clean), setting 'data_size' and 'error' appropriately. + */ +#define DM_ULOG_IS_CLEAN 7 + +/* + * DM_ULOG_IN_SYNC corresponds to (found in dm-dirty-log.h): + * int (*in_sync)(struct dm_dirty_log *log, region_t region, + * int can_block); + * + * Payload-to-userspace: + * uint64_t - the region to get sync status on + * Payload-to-kernel: + * int64_t - 1 if in-sync, 0 otherwise + * + * Exactly the same as 'is_clean' above, except this time asking "has the + * region been recovered?" vs. "is the region not being modified?" + */ +#define DM_ULOG_IN_SYNC 8 + +/* + * DM_ULOG_FLUSH corresponds to (found in dm-dirty-log.h): + * int (*flush)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * No incoming or outgoing payload. Simply flush log state to disk. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_FLUSH 9 + +/* + * DM_ULOG_MARK_REGION corresponds to (found in dm-dirty-log.h): + * void (*mark_region)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t [] - region(s) to mark + * Payload-to-kernel: + * None. + * + * Incoming payload contains the one or more regions to mark dirty. + * The number of regions contained in the payload can be determined from + * 'data_size/sizeof(uint64_t)'. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_MARK_REGION 10 + +/* + * DM_ULOG_CLEAR_REGION corresponds to (found in dm-dirty-log.h): + * void (*clear_region)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t [] - region(s) to clear + * Payload-to-kernel: + * None. + * + * Incoming payload contains the one or more regions to mark clean. + * The number of regions contained in the payload can be determined from + * 'data_size/sizeof(uint64_t)'. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_CLEAR_REGION 11 + +/* + * DM_ULOG_GET_RESYNC_WORK corresponds to (found in dm-dirty-log.h): + * int (*get_resync_work)(struct dm_dirty_log *log, region_t *region); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * { + * int64_t i; -- 1 if recovery necessary, 0 otherwise + * uint64_t r; -- The region to recover if i=1 + * } + * 'data_size' should be set appropriately. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field appropriately. + */ +#define DM_ULOG_GET_RESYNC_WORK 12 + +/* + * DM_ULOG_SET_REGION_SYNC corresponds to (found in dm-dirty-log.h): + * void (*set_region_sync)(struct dm_dirty_log *log, + * region_t region, int in_sync); + * + * Payload-to-userspace: + * { + * uint64_t - region to set sync state on + * int64_t - 0 if not-in-sync, 1 if in-sync + * } + * Payload-to-kernel: + * None. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_SET_REGION_SYNC 13 + +/* + * DM_ULOG_GET_SYNC_COUNT corresponds to (found in dm-dirty-log.h): + * region_t (*get_sync_count)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * uint64_t - the number of in-sync regions + * + * No incoming payload. Kernel-bound payload contains the number of + * regions that are in-sync (in a size_t). + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_GET_SYNC_COUNT 14 + +/* + * DM_ULOG_STATUS_INFO corresponds to (found in dm-dirty-log.h): + * int (*status)(struct dm_dirty_log *log, STATUSTYPE_INFO, + * char *result, unsigned maxlen); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * Character string containing STATUSTYPE_INFO + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_STATUS_INFO 15 + +/* + * DM_ULOG_STATUS_TABLE corresponds to (found in dm-dirty-log.h): + * int (*status)(struct dm_dirty_log *log, STATUSTYPE_TABLE, + * char *result, unsigned maxlen); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * Character string containing STATUSTYPE_TABLE + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_STATUS_TABLE 16 + +/* + * DM_ULOG_IS_REMOTE_RECOVERING corresponds to (found in dm-dirty-log.h): + * int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t - region to determine recovery status on + * Payload-to-kernel: + * { + * int64_t is_recovering; -- 0 if no, 1 if yes + * uint64_t in_sync_hint; -- lowest region still needing resync + * } + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_IS_REMOTE_RECOVERING 17 + +/* + * (DM_ULOG_REQUEST_MASK & request_type) to get the request type + * + * Payload-to-userspace: + * A single string containing all the argv arguments separated by ' 's + * Payload-to-kernel: + * None. ('data_size' in the dm_ulog_request struct should be 0.) + * + * We are reserving 8 bits of the 32-bit 'request_type' field for the + * various request types above. The remaining 24-bits are currently + * set to zero and are reserved for future use and compatibility concerns. + * + * User-space should always use DM_ULOG_REQUEST_TYPE to acquire the + * request type from the 'request_type' field to maintain forward compatibility. + */ +#define DM_ULOG_REQUEST_MASK 0xFF +#define DM_ULOG_REQUEST_TYPE(request_type) \ + (DM_ULOG_REQUEST_MASK & (request_type)) + +/* + * DM_ULOG_REQUEST_VERSION is incremented when there is a + * change to the way information is passed between kernel + * and userspace. This could be a structure change of + * dm_ulog_request or a change in the way requests are + * issued/handled. Changes are outlined here: + * version 1: Initial implementation + * version 2: DM_ULOG_CTR allowed to return a string containing a + * device name that is to be registered with DM via + * 'dm_get_device'. + */ +#define DM_ULOG_REQUEST_VERSION 2 + +struct dm_ulog_request { + /* + * The local unique identifier (luid) and the universally unique + * identifier (uuid) are used to tie a request to a specific + * mirror log. A single machine log could probably make due with + * just the 'luid', but a cluster-aware log must use the 'uuid' and + * the 'luid'. The uuid is what is required for node to node + * communication concerning a particular log, but the 'luid' helps + * differentiate between logs that are being swapped and have the + * same 'uuid'. (Think "live" and "inactive" device-mapper tables.) + */ + uint64_t luid; + char uuid[DM_UUID_LEN]; + char padding[3]; /* Padding because DM_UUID_LEN = 129 */ + + uint32_t version; /* See DM_ULOG_REQUEST_VERSION */ + int32_t error; /* Used to report back processing errors */ + + uint32_t seq; /* Sequence number for request */ + uint32_t request_type; /* DM_ULOG_* defined above */ + uint32_t data_size; /* How much data (not including this struct) */ + + char data[0]; +}; + +#endif /* __DM_LOG_USERSPACE_H__ */ diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h new file mode 100644 index 00000000..9e2a7a40 --- /dev/null +++ b/include/linux/dm-region-hash.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper dirty region hash interface. + * + * This file is released under the GPL. + */ + +#ifndef DM_REGION_HASH_H +#define DM_REGION_HASH_H + +#include <linux/dm-dirty-log.h> + +/*----------------------------------------------------------------- + * Region hash + *----------------------------------------------------------------*/ +struct dm_region_hash; +struct dm_region; + +/* + * States a region can have. + */ +enum dm_rh_region_states { + DM_RH_CLEAN = 0x01, /* No writes in flight. */ + DM_RH_DIRTY = 0x02, /* Writes in flight. */ + DM_RH_NOSYNC = 0x04, /* Out of sync. */ + DM_RH_RECOVERING = 0x08, /* Under resynchronization. */ +}; + +/* + * Region hash create/destroy. + */ +struct bio_list; +struct dm_region_hash *dm_region_hash_create( + void *context, void (*dispatch_bios)(void *context, + struct bio_list *bios), + void (*wakeup_workers)(void *context), + void (*wakeup_all_recovery_waiters)(void *context), + sector_t target_begin, unsigned max_recovery, + struct dm_dirty_log *log, uint32_t region_size, + region_t nr_regions); +void dm_region_hash_destroy(struct dm_region_hash *rh); + +struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); + +/* + * Conversion functions. + */ +region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); +sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); +void *dm_rh_region_context(struct dm_region *reg); + +/* + * Get region size and key (ie. number of the region). + */ +sector_t dm_rh_get_region_size(struct dm_region_hash *rh); +region_t dm_rh_get_region_key(struct dm_region *reg); + +/* + * Get/set/update region state (and dirty log). + * + */ +int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); +void dm_rh_set_state(struct dm_region_hash *rh, region_t region, + enum dm_rh_region_states state, int may_block); + +/* Non-zero errors_handled leaves the state of the region NOSYNC */ +void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); + +/* Flush the region hash and dirty log. */ +int dm_rh_flush(struct dm_region_hash *rh); + +/* Inc/dec pending count on regions. */ +void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); +void dm_rh_dec(struct dm_region_hash *rh, region_t region); + +/* Delay bios on regions. */ +void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); + +void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); + +/* + * Region recovery control. + */ + +/* Prepare some regions for recovery by starting to quiesce them. */ +void dm_rh_recovery_prepare(struct dm_region_hash *rh); + +/* Try fetching a quiesced region for recovery. */ +struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh); + +/* Report recovery end on a region. */ +void dm_rh_recovery_end(struct dm_region *reg, int error); + +/* Returns number of regions with recovery work outstanding. */ +int dm_rh_recovery_in_flight(struct dm_region_hash *rh); + +/* Start/stop recovery. */ +void dm_rh_start_recovery(struct dm_region_hash *rh); +void dm_rh_stop_recovery(struct dm_region_hash *rh); + +#endif /* DM_REGION_HASH_H */ diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h new file mode 100644 index 00000000..96e87693 --- /dev/null +++ b/include/linux/dm9000.h @@ -0,0 +1,40 @@ +/* include/linux/dm9000.h + * + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * Header file for dm9000 platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#ifndef __DM9000_PLATFORM_DATA +#define __DM9000_PLATFORM_DATA __FILE__ + +/* IO control flags */ + +#define DM9000_PLATF_8BITONLY (0x0001) +#define DM9000_PLATF_16BITONLY (0x0002) +#define DM9000_PLATF_32BITONLY (0x0004) +#define DM9000_PLATF_EXT_PHY (0x0008) +#define DM9000_PLATF_NO_EEPROM (0x0010) +#define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */ + +/* platform data for platform device structure's platform_data field */ + +struct dm9000_plat_data { + unsigned int flags; + unsigned char dev_addr[6]; + + /* allow replacement IO routines */ + + void (*inblk)(void __iomem *reg, void *data, int len); + void (*outblk)(void __iomem *reg, void *data, int len); + void (*dumpblk)(void __iomem *reg, int len); +}; + +#endif /* __DM9000_PLATFORM_DATA */ + diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h new file mode 100644 index 00000000..547ab568 --- /dev/null +++ b/include/linux/dma-attrs.h @@ -0,0 +1,77 @@ +#ifndef _DMA_ATTR_H +#define _DMA_ATTR_H + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/bug.h> + +/** + * an enum dma_attr represents an attribute associated with a DMA + * mapping. The semantics of each attribute should be defined in + * Documentation/DMA-attributes.txt. + */ +enum dma_attr { + DMA_ATTR_WRITE_BARRIER, + DMA_ATTR_WEAK_ORDERING, + DMA_ATTR_WRITE_COMBINE, + DMA_ATTR_NON_CONSISTENT, + DMA_ATTR_MAX, +}; + +#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) + +/** + * struct dma_attrs - an opaque container for DMA attributes + * @flags - bitmask representing a collection of enum dma_attr + */ +struct dma_attrs { + unsigned long flags[__DMA_ATTRS_LONGS]; +}; + +#define DEFINE_DMA_ATTRS(x) \ + struct dma_attrs x = { \ + .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ + } + +static inline void init_dma_attrs(struct dma_attrs *attrs) +{ + bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); +} + +#ifdef CONFIG_HAVE_DMA_ATTRS +/** + * dma_set_attr - set a specific attribute + * @attr: attribute to set + * @attrs: struct dma_attrs (may be NULL) + */ +static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ + if (attrs == NULL) + return; + BUG_ON(attr >= DMA_ATTR_MAX); + __set_bit(attr, attrs->flags); +} + +/** + * dma_get_attr - check for a specific attribute + * @attr: attribute to set + * @attrs: struct dma_attrs (may be NULL) + */ +static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ + if (attrs == NULL) + return 0; + BUG_ON(attr >= DMA_ATTR_MAX); + return test_bit(attr, attrs->flags); +} +#else /* !CONFIG_HAVE_DMA_ATTRS */ +static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ +} + +static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ + return 0; +} +#endif /* CONFIG_HAVE_DMA_ATTRS */ +#endif /* _DMA_ATTR_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h new file mode 100644 index 00000000..1f78d159 --- /dev/null +++ b/include/linux/dma-buf.h @@ -0,0 +1,269 @@ +/* + * Header file for dma buffer sharing framework. + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Author: Sumit Semwal <sumit.semwal@ti.com> + * + * Many thanks to linaro-mm-sig list, and specially + * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and + * Daniel Vetter <daniel@ffwll.ch> for their support in creation and + * refining of this idea. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __DMA_BUF_H__ +#define __DMA_BUF_H__ + +#include <linux/file.h> +#include <linux/err.h> +#include <linux/scatterlist.h> +#include <linux/list.h> +#include <linux/dma-mapping.h> +#include <linux/fs.h> + +struct device; +struct dma_buf; +struct dma_buf_attachment; + +/** + * struct dma_buf_ops - operations possible on struct dma_buf + * @attach: [optional] allows different devices to 'attach' themselves to the + * given buffer. It might return -EBUSY to signal that backing storage + * is already allocated and incompatible with the requirements + * of requesting device. + * @detach: [optional] detach a given device from this buffer. + * @map_dma_buf: returns list of scatter pages allocated, increases usecount + * of the buffer. Requires atleast one attach to be called + * before. Returned sg list should already be mapped into + * _device_ address space. This call may sleep. May also return + * -EINTR. Should return -EINVAL if attach hasn't been called yet. + * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter + * pages. + * @release: release this buffer; to be called after the last dma_buf_put. + * @begin_cpu_access: [optional] called before cpu access to invalidate cpu + * caches and allocate backing storage (if not yet done) + * respectively pin the objet into memory. + * @end_cpu_access: [optional] called after cpu access to flush cashes. + * @kmap_atomic: maps a page from the buffer into kernel address + * space, users may not block until the subsequent unmap call. + * This callback must not sleep. + * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. + * This Callback must not sleep. + * @kmap: maps a page from the buffer into kernel address space. + * @kunmap: [optional] unmaps a page from the buffer. + * @mmap: used to expose the backing storage to userspace. Note that the + * mapping needs to be coherent - if the exporter doesn't directly + * support this, it needs to fake coherency by shooting down any ptes + * when transitioning away from the cpu domain. + */ +struct dma_buf_ops { + int (*attach)(struct dma_buf *, struct device *, + struct dma_buf_attachment *); + + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + + /* For {map,unmap}_dma_buf below, any specific buffer attributes + * required should get added to device_dma_parameters accessible + * via dev->dma_params. + */ + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, + enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, + struct sg_table *, + enum dma_data_direction); + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY + * if the call would block. + */ + + /* after final dma_buf_put() */ + void (*release)(struct dma_buf *); + + int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, + enum dma_data_direction); + void (*end_cpu_access)(struct dma_buf *, size_t, size_t, + enum dma_data_direction); + void *(*kmap_atomic)(struct dma_buf *, unsigned long); + void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); + void *(*kmap)(struct dma_buf *, unsigned long); + void (*kunmap)(struct dma_buf *, unsigned long, void *); + + int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); +}; + +/** + * struct dma_buf - shared buffer object + * @size: size of the buffer + * @file: file pointer used for sharing buffers across, and for refcounting. + * @attachments: list of dma_buf_attachment that denotes all devices attached. + * @ops: dma_buf_ops associated with this buffer object. + * @priv: exporter specific private data for this buffer object. + */ +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + /* mutex to serialize list manipulation and attach/detach */ + struct mutex lock; + void *priv; +}; + +/** + * struct dma_buf_attachment - holds device-buffer attachment data + * @dmabuf: buffer for this attachment. + * @dev: device attached to the buffer. + * @node: list of dma_buf_attachment. + * @priv: exporter specific attachment data. + * + * This structure holds the attachment information between the dma_buf buffer + * and its user device(s). The list contains one attachment struct per device + * attached to the buffer. + */ +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + void *priv; +}; + +/** + * get_dma_buf - convenience wrapper for get_file. + * @dmabuf: [in] pointer to dma_buf + * + * Increments the reference count on the dma-buf, needed in case of drivers + * that either need to create additional references to the dmabuf on the + * kernel side. For example, an exporter that needs to keep a dmabuf ptr + * so that subsequent exports don't create a new dmabuf. + */ +static inline void get_dma_buf(struct dma_buf *dmabuf) +{ + get_file(dmabuf->file); +} + +#ifdef CONFIG_DMA_SHARED_BUFFER +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev); +void dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *dmabuf_attach); +struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, + size_t size, int flags); +int dma_buf_fd(struct dma_buf *dmabuf, int flags); +struct dma_buf *dma_buf_get(int fd); +void dma_buf_put(struct dma_buf *dmabuf); + +struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, + enum dma_data_direction); +void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, + enum dma_data_direction); +int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, + enum dma_data_direction dir); +void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, + enum dma_data_direction dir); +void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); +void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); +void *dma_buf_kmap(struct dma_buf *, unsigned long); +void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); + +int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, + unsigned long); +#else + +static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *dmabuf_attach) +{ + return; +} + +static inline struct dma_buf *dma_buf_export(void *priv, + const struct dma_buf_ops *ops, + size_t size, int flags) +{ + return ERR_PTR(-ENODEV); +} + +static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags) +{ + return -ENODEV; +} + +static inline struct dma_buf *dma_buf_get(int fd) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dma_buf_put(struct dma_buf *dmabuf) +{ + return; +} + +static inline struct sg_table *dma_buf_map_attachment( + struct dma_buf_attachment *attach, enum dma_data_direction write) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, + struct sg_table *sg, enum dma_data_direction dir) +{ + return; +} + +static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + size_t start, size_t len, + enum dma_data_direction dir) +{ + return -ENODEV; +} + +static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf, + size_t start, size_t len, + enum dma_data_direction dir) +{ +} + +static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, + unsigned long pnum) +{ + return NULL; +} + +static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long pnum, void *vaddr) +{ +} + +static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum) +{ + return NULL; +} + +static inline void dma_buf_kunmap(struct dma_buf *dmabuf, + unsigned long pnum, void *vaddr) +{ +} + +static inline int dma_buf_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma, + unsigned long pgoff) +{ + return -ENODEV; +} +#endif /* CONFIG_DMA_SHARED_BUFFER */ + +#endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 00000000..171ad8ae --- /dev/null +++ b/include/linux/dma-debug.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __DMA_DEBUG_H +#define __DMA_DEBUG_H + +#include <linux/types.h> + +struct device; +struct scatterlist; +struct bus_type; + +#ifdef CONFIG_DMA_API_DEBUG + +extern void dma_debug_add_bus(struct bus_type *bus); + +extern void dma_debug_init(u32 num_entries); + +extern int dma_debug_resize_entries(u32 num_entries); + +extern void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single); + +extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single); + +extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction); + +extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir); + +extern void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt); + +extern void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr); + +extern void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction); + +extern void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction); + +extern void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction); + +extern void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction); + +extern void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_dump_mappings(struct device *dev); + +#else /* CONFIG_DMA_API_DEBUG */ + +static inline void dma_debug_add_bus(struct bus_type *bus) +{ +} + +static inline void dma_debug_init(u32 num_entries) +{ +} + +static inline int dma_debug_resize_entries(u32 num_entries) +{ + return 0; +} + +static inline void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single) +{ +} + +static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, + bool map_single) +{ +} + +static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ +} + +static inline void debug_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nelems, int dir) +{ +} + +static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ +} + +static inline void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ +} + +static inline void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_dump_mappings(struct device *dev) +{ +} + +#endif /* CONFIG_DMA_API_DEBUG */ + +#endif /* __DMA_DEBUG_H */ diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h new file mode 100644 index 00000000..95b6a82f --- /dev/null +++ b/include/linux/dma-direction.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_DMA_DIRECTION_H +#define _LINUX_DMA_DIRECTION_H +/* + * These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts. + */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; +#endif diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h new file mode 100644 index 00000000..dfc099e5 --- /dev/null +++ b/include/linux/dma-mapping.h @@ -0,0 +1,237 @@ +#ifndef _LINUX_DMA_MAPPING_H +#define _LINUX_DMA_MAPPING_H + +#include <linux/string.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/dma-attrs.h> +#include <linux/dma-direction.h> +#include <linux/scatterlist.h> + +struct dma_map_ops { + void* (*alloc)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs); + void (*free)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs); + int (*mmap)(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t, struct dma_attrs *attrs); + + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); + int (*dma_supported)(struct device *dev, u64 mask); + int (*set_dma_mask)(struct device *dev, u64 mask); +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK + u64 (*get_required_mask)(struct device *dev); +#endif + int is_phys; +}; + +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) + +#define DMA_MASK_NONE 0x0ULL + +static inline int valid_dma_direction(int dma_direction) +{ + return ((dma_direction == DMA_BIDIRECTIONAL) || + (dma_direction == DMA_TO_DEVICE) || + (dma_direction == DMA_FROM_DEVICE)); +} + +static inline int is_device_dma_capable(struct device *dev) +{ + return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; +} + +#ifdef CONFIG_HAS_DMA +#include <asm/dma-mapping.h> +#else +#include <asm-generic/dma-mapping-broken.h> +#endif + +static inline u64 dma_get_mask(struct device *dev) +{ + if (dev && dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + return DMA_BIT_MASK(32); +} + +#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK +int dma_set_coherent_mask(struct device *dev, u64 mask); +#else +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + if (!dma_supported(dev, mask)) + return -EIO; + dev->coherent_dma_mask = mask; + return 0; +} +#endif + +extern u64 dma_get_required_mask(struct device *dev); + +static inline unsigned int dma_get_max_seg_size(struct device *dev) +{ + return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; +} + +static inline unsigned int dma_set_max_seg_size(struct device *dev, + unsigned int size) +{ + if (dev->dma_parms) { + dev->dma_parms->max_segment_size = size; + return 0; + } else + return -EIO; +} + +static inline unsigned long dma_get_seg_boundary(struct device *dev) +{ + return dev->dma_parms ? + dev->dma_parms->segment_boundary_mask : 0xffffffff; +} + +static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) +{ + if (dev->dma_parms) { + dev->dma_parms->segment_boundary_mask = mask; + return 0; + } else + return -EIO; +} + +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} + +#ifdef CONFIG_HAS_DMA +static inline int dma_get_cache_alignment(void) +{ +#ifdef ARCH_DMA_MINALIGN + return ARCH_DMA_MINALIGN; +#endif + return 1; +} +#endif + +/* flags for the coherent memory api */ +#define DMA_MEMORY_MAP 0x01 +#define DMA_MEMORY_IO 0x02 +#define DMA_MEMORY_INCLUDES_CHILDREN 0x04 +#define DMA_MEMORY_EXCLUSIVE 0x08 + +#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +static inline int +dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags) +{ + return 0; +} + +static inline void +dma_release_declared_memory(struct device *dev) +{ +} + +static inline void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size) +{ + return ERR_PTR(-EBUSY); +} +#endif + +/* + * Managed DMA API + */ +extern void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, + int flags); +extern void dmam_release_declared_memory(struct device *dev); +#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ +static inline int dmam_declare_coherent_memory(struct device *dev, + dma_addr_t bus_addr, dma_addr_t device_addr, + size_t size, gfp_t gfp) +{ + return 0; +} + +static inline void dmam_release_declared_memory(struct device *dev) +{ +} +#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ + +#ifndef CONFIG_HAVE_DMA_ATTRS +struct dma_attrs; + +#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ + dma_map_single(dev, cpu_addr, size, dir) + +#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_single(dev, dma_addr, size, dir) + +#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ + dma_map_sg(dev, sgl, nents, dir) + +#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ + dma_unmap_sg(dev, sgl, nents, dir) + +#endif /* CONFIG_HAVE_DMA_ATTRS */ + +#ifdef CONFIG_NEED_DMA_MAP_STATE +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#else +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) +#define dma_unmap_addr(PTR, ADDR_NAME) (0) +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define dma_unmap_len(PTR, LEN_NAME) (0) +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) +#endif + +#endif diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h new file mode 100644 index 00000000..57c9a8ae --- /dev/null +++ b/include/linux/dma_remapping.h @@ -0,0 +1,52 @@ +#ifndef _DMA_REMAPPING_H +#define _DMA_REMAPPING_H + +/* + * VT-d hardware uses 4KiB page size regardless of host page size. + */ +#define VTD_PAGE_SHIFT (12) +#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) +#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) +#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) + +#define VTD_STRIDE_SHIFT (9) +#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) + +#define DMA_PTE_READ (1) +#define DMA_PTE_WRITE (2) +#define DMA_PTE_LARGE_PAGE (1 << 7) +#define DMA_PTE_SNP (1 << 11) + +#define CONTEXT_TT_MULTI_LEVEL 0 +#define CONTEXT_TT_DEV_IOTLB 1 +#define CONTEXT_TT_PASS_THROUGH 2 + +struct intel_iommu; +struct dmar_domain; +struct root_entry; + + +#ifdef CONFIG_INTEL_IOMMU +extern void free_dmar_iommu(struct intel_iommu *iommu); +extern int iommu_calculate_agaw(struct intel_iommu *iommu); +extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); +extern int dmar_disabled; +extern int intel_iommu_enabled; +#else +static inline int iommu_calculate_agaw(struct intel_iommu *iommu) +{ + return 0; +} +static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) +{ + return 0; +} +static inline void free_dmar_iommu(struct intel_iommu *iommu) +{ +} +#define dmar_disabled (1) +#define intel_iommu_enabled (0) +#endif + + +#endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h new file mode 100644 index 00000000..f9a2e5e6 --- /dev/null +++ b/include/linux/dmaengine.h @@ -0,0 +1,1002 @@ +/* + * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef LINUX_DMAENGINE_H +#define LINUX_DMAENGINE_H + +#include <linux/device.h> +#include <linux/uio.h> +#include <linux/bug.h> +#include <linux/scatterlist.h> +#include <linux/bitmap.h> +#include <linux/types.h> +#include <asm/page.h> + +/** + * typedef dma_cookie_t - an opaque DMA cookie + * + * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code + */ +typedef s32 dma_cookie_t; +#define DMA_MIN_COOKIE 1 +#define DMA_MAX_COOKIE INT_MAX + +#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) + +/** + * enum dma_status - DMA transaction status + * @DMA_SUCCESS: transaction completed successfully + * @DMA_IN_PROGRESS: transaction not yet processed + * @DMA_PAUSED: transaction is paused + * @DMA_ERROR: transaction failed + */ +enum dma_status { + DMA_SUCCESS, + DMA_IN_PROGRESS, + DMA_PAUSED, + DMA_ERROR, +}; + +/** + * enum dma_transaction_type - DMA transaction types/indexes + * + * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is + * automatically set as dma devices are registered. + */ +enum dma_transaction_type { + DMA_MEMCPY, + DMA_XOR, + DMA_PQ, + DMA_XOR_VAL, + DMA_PQ_VAL, + DMA_MEMSET, + DMA_INTERRUPT, + DMA_SG, + DMA_PRIVATE, + DMA_ASYNC_TX, + DMA_SLAVE, + DMA_CYCLIC, + DMA_INTERLEAVE, +/* last transaction type for creation of the capabilities mask */ + DMA_TX_TYPE_END, +}; + +/** + * enum dma_transfer_direction - dma transfer mode and direction indicator + * @DMA_MEM_TO_MEM: Async/Memcpy mode + * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device + * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory + * @DMA_DEV_TO_DEV: Slave mode & From Device to Device + */ +enum dma_transfer_direction { + DMA_MEM_TO_MEM, + DMA_MEM_TO_DEV, + DMA_DEV_TO_MEM, + DMA_DEV_TO_DEV, + DMA_TRANS_NONE, +}; + +/** + * Interleaved Transfer Request + * ---------------------------- + * A chunk is collection of contiguous bytes to be transfered. + * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). + * ICGs may or maynot change between chunks. + * A FRAME is the smallest series of contiguous {chunk,icg} pairs, + * that when repeated an integral number of times, specifies the transfer. + * A transfer template is specification of a Frame, the number of times + * it is to be repeated and other per-transfer attributes. + * + * Practically, a client driver would have ready a template for each + * type of transfer it is going to need during its lifetime and + * set only 'src_start' and 'dst_start' before submitting the requests. + * + * + * | Frame-1 | Frame-2 | ~ | Frame-'numf' | + * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| + * + * == Chunk size + * ... ICG + */ + +/** + * struct data_chunk - Element of scatter-gather list that makes a frame. + * @size: Number of bytes to read from source. + * size_dst := fn(op, size_src), so doesn't mean much for destination. + * @icg: Number of bytes to jump after last src/dst address of this + * chunk and before first src/dst address for next chunk. + * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. + * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. + */ +struct data_chunk { + size_t size; + size_t icg; +}; + +/** + * struct dma_interleaved_template - Template to convey DMAC the transfer pattern + * and attributes. + * @src_start: Bus address of source for the first chunk. + * @dst_start: Bus address of destination for the first chunk. + * @dir: Specifies the type of Source and Destination. + * @src_inc: If the source address increments after reading from it. + * @dst_inc: If the destination address increments after writing to it. + * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). + * Otherwise, source is read contiguously (icg ignored). + * Ignored if src_inc is false. + * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). + * Otherwise, destination is filled contiguously (icg ignored). + * Ignored if dst_inc is false. + * @numf: Number of frames in this template. + * @frame_size: Number of chunks in a frame i.e, size of sgl[]. + * @sgl: Array of {chunk,icg} pairs that make up a frame. + */ +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +/** + * enum dma_ctrl_flags - DMA flags to augment operation preparation, + * control completion, and communicate status. + * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of + * this transaction + * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client + * acknowledges receipt, i.e. has has a chance to establish any dependency + * chains + * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) + * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) + * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single + * (if not set, do the source dma-unmapping as page) + * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single + * (if not set, do the destination dma-unmapping as page) + * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q + * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P + * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as + * sources that were the result of a previous operation, in the case of a PQ + * operation it continues the calculation with new sources + * @DMA_PREP_FENCE - tell the driver that subsequent operations depend + * on the result of this operation + */ +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = (1 << 0), + DMA_CTRL_ACK = (1 << 1), + DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), + DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), + DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), + DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), + DMA_PREP_PQ_DISABLE_P = (1 << 6), + DMA_PREP_PQ_DISABLE_Q = (1 << 7), + DMA_PREP_CONTINUE = (1 << 8), + DMA_PREP_FENCE = (1 << 9), +}; + +/** + * enum dma_ctrl_cmd - DMA operations that can optionally be exercised + * on a running channel. + * @DMA_TERMINATE_ALL: terminate all ongoing transfers + * @DMA_PAUSE: pause ongoing transfers + * @DMA_RESUME: resume paused transfer + * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers + * that need to runtime reconfigure the slave channels (as opposed to passing + * configuration data in statically from the platform). An additional + * argument of struct dma_slave_config must be passed in with this + * command. + * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller + * into external start mode. + */ +enum dma_ctrl_cmd { + DMA_TERMINATE_ALL, + DMA_PAUSE, + DMA_RESUME, + DMA_SLAVE_CONFIG, + FSLDMA_EXTERNAL_START, +}; + +/** + * enum sum_check_bits - bit position of pq_check_flags + */ +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +/** + * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise + * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise + */ +enum sum_check_flags { + SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), + SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), +}; + + +/** + * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. + * See linux/cpumask.h + */ +typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; + +/** + * struct dma_chan_percpu - the per-CPU part of struct dma_chan + * @memcpy_count: transaction counter + * @bytes_transferred: byte counter + */ + +struct dma_chan_percpu { + /* stats */ + unsigned long memcpy_count; + unsigned long bytes_transferred; +}; + +/** + * struct dma_chan - devices supply DMA channels, clients use them + * @device: ptr to the dma device who supplies this channel, always !%NULL + * @cookie: last cookie value returned to client + * @completed_cookie: last completed cookie for this channel + * @chan_id: channel ID for sysfs + * @dev: class device for sysfs + * @device_node: used to add this to the device chan list + * @local: per-cpu pointer to a struct dma_chan_percpu + * @client-count: how many clients are using this channel + * @table_count: number of appearances in the mem-to-mem allocation table + * @private: private data for certain client-channel associations + */ +struct dma_chan { + struct dma_device *device; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + + /* sysfs */ + int chan_id; + struct dma_chan_dev *dev; + + struct list_head device_node; + struct dma_chan_percpu __percpu *local; + int client_count; + int table_count; + void *private; +}; + +/** + * struct dma_chan_dev - relate sysfs device node to backing channel device + * @chan - driver channel device + * @device - sysfs device + * @dev_id - parent dma_device dev_id + * @idr_ref - reference count to gate release of dma_device dev_id + */ +struct dma_chan_dev { + struct dma_chan *chan; + struct device device; + int dev_id; + atomic_t *idr_ref; +}; + +/** + * enum dma_slave_buswidth - defines bus with of the DMA slave + * device, source or target buses + */ +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, +}; + +/** + * struct dma_slave_config - dma slave channel runtime config + * @direction: whether the data shall go in or out on this slave + * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are + * legal values, DMA_BIDIRECTIONAL is not acceptable since we + * need to differentiate source and target addresses. + * @src_addr: this is the physical address where DMA slave data + * should be read (RX), if the source is memory this argument is + * ignored. + * @dst_addr: this is the physical address where DMA slave data + * should be written (TX), if the source is memory this argument + * is ignored. + * @src_addr_width: this is the width in bytes of the source (RX) + * register where DMA data shall be read. If the source + * is memory this may be ignored depending on architecture. + * Legal values: 1, 2, 4, 8. + * @dst_addr_width: same as src_addr_width but for destination + * target (TX) mutatis mutandis. + * @src_maxburst: the maximum number of words (note: words, as in + * units of the src_addr_width member, not bytes) that can be sent + * in one burst to the device. Typically something like half the + * FIFO depth on I/O peripherals so you don't overflow it. This + * may or may not be applicable on memory sources. + * @dst_maxburst: same as src_maxburst but for destination target + * mutatis mutandis. + * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill + * with 'true' if peripheral should be flow controller. Direction will be + * selected at Runtime. + * + * This struct is passed in as configuration data to a DMA engine + * in order to set up a certain channel for DMA transport at runtime. + * The DMA device/engine has to provide support for an additional + * command in the channel config interface, DMA_SLAVE_CONFIG + * and this struct will then be passed in as an argument to the + * DMA engine device_control() function. + * + * The rationale for adding configuration information to this struct + * is as follows: if it is likely that most DMA slave controllers in + * the world will support the configuration option, then make it + * generic. If not: if it is fixed so that it be sent in static from + * the platform data, then prefer to do that. Else, if it is neither + * fixed at runtime, nor generic enough (such as bus mastership on + * some CPU family and whatnot) then create a custom slave config + * struct and pass that, then make this config a member of that + * struct, if applicable. + */ +struct dma_slave_config { + enum dma_transfer_direction direction; + dma_addr_t src_addr; + dma_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + bool device_fc; +}; + +static inline const char *dma_chan_name(struct dma_chan *chan) +{ + return dev_name(&chan->dev->device); +} + +void dma_chan_cleanup(struct kref *kref); + +/** + * typedef dma_filter_fn - callback filter for dma_request_channel + * @chan: channel to be reviewed + * @filter_param: opaque parameter passed through dma_request_channel + * + * When this optional parameter is specified in a call to dma_request_channel a + * suitable channel is passed to this routine for further dispositioning before + * being returned. Where 'suitable' indicates a non-busy channel that + * satisfies the given capability mask. It returns 'true' to indicate that the + * channel is suitable. + */ +typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); + +typedef void (*dma_async_tx_callback)(void *dma_async_param); +/** + * struct dma_async_tx_descriptor - async transaction descriptor + * ---dma generic offload fields--- + * @cookie: tracking cookie for this transaction, set to -EBUSY if + * this tx is sitting on a dependency list + * @flags: flags to augment operation preparation, control completion, and + * communicate status + * @phys: physical address of the descriptor + * @chan: target channel for this operation + * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @callback: routine to call after this operation is complete + * @callback_param: general parameter to pass to the callback routine + * ---async_tx api specific fields--- + * @next: at completion submit this descriptor + * @parent: pointer to the next level up in the dependency chain + * @lock: protect the parent and next pointers + */ +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ + dma_addr_t phys; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); + dma_async_tx_callback callback; + void *callback_param; +#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH + struct dma_async_tx_descriptor *next; + struct dma_async_tx_descriptor *parent; + spinlock_t lock; +#endif +}; + +#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + BUG(); +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} + +#else +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ + spin_lock_bh(&txd->lock); +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ + spin_unlock_bh(&txd->lock); +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + txd->next = next; + next->parent = txd; +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ + txd->parent = NULL; +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ + txd->next = NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return txd->parent; +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return txd->next; +} +#endif + +/** + * struct dma_tx_state - filled in to report the status of + * a transfer. + * @last: last completed DMA cookie + * @used: last issued DMA cookie (i.e. the one in progress) + * @residue: the remaining number of bytes left to transmit + * on the selected transfer for states DMA_IN_PROGRESS and + * DMA_PAUSED if this is implemented in the driver, else 0 + */ +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; +}; + +/** + * struct dma_device - info on the entity supplying DMA services + * @chancnt: how many DMA channels are supported + * @privatecnt: how many DMA channels are requested by dma_request_channel + * @channels: the list of struct dma_chan + * @global_node: list_head for global dma_device_list + * @cap_mask: one or more dma_capability flags + * @max_xor: maximum number of xor sources, 0 if no capability + * @max_pq: maximum number of PQ sources and PQ-continue capability + * @copy_align: alignment shift for memcpy operations + * @xor_align: alignment shift for xor operations + * @pq_align: alignment shift for pq operations + * @fill_align: alignment shift for memset operations + * @dev_id: unique device ID + * @dev: struct device reference for dma mapping api + * @device_alloc_chan_resources: allocate resources and return the + * number of allocated descriptors + * @device_free_chan_resources: release DMA channel's resources + * @device_prep_dma_memcpy: prepares a memcpy operation + * @device_prep_dma_xor: prepares a xor operation + * @device_prep_dma_xor_val: prepares a xor validation operation + * @device_prep_dma_pq: prepares a pq operation + * @device_prep_dma_pq_val: prepares a pqzero_sum operation + * @device_prep_dma_memset: prepares a memset operation + * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_slave_sg: prepares a slave dma operation + * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. + * The function takes a buffer of size buf_len. The callback function will + * be called after period_len bytes have been transferred. + * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_control: manipulate all pending operations on a channel, returns + * zero or error code + * @device_tx_status: poll for transaction completion, the optional + * txstate parameter can be supplied with a pointer to get a + * struct with auxiliary transfer status information, otherwise the call + * will just return a simple status code + * @device_issue_pending: push pending transactions to hardware + */ +struct dma_device { + + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + dma_cap_mask_t cap_mask; + unsigned short max_xor; + unsigned short max_pq; + u8 copy_align; + u8 xor_align; + u8 pq_align; + u8 fill_align; + #define DMA_HAS_PQ_CONTINUE (1 << 15) + + int dev_id; + struct device *dev; + + int (*device_alloc_chan_resources)(struct dma_chan *chan); + void (*device_free_chan_resources)(struct dma_chan *chan); + + struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_xor)( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( + struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, + size_t len, enum sum_check_flags *result, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq)( + struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( + struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_memset)( + struct dma_chan *chan, dma_addr_t dest, int value, size_t len, + unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( + struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_sg)( + struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags); + + struct dma_async_tx_descriptor *(*device_prep_slave_sg)( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context); + struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + void *context); + struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags); + int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg); + + enum dma_status (*device_tx_status)(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate); + void (*device_issue_pending)(struct dma_chan *chan); +}; + +static inline int dmaengine_device_control(struct dma_chan *chan, + enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + return chan->device->device_control(chan, cmd, arg); +} + +static inline int dmaengine_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, + (unsigned long)config); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( + struct dma_chan *chan, void *buf, size_t len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct scatterlist sg; + sg_init_one(&sg, buf, len); + + return chan->device->device_prep_slave_sg(chan, &sg, 1, + dir, flags, NULL); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + return chan->device->device_prep_slave_sg(chan, sgl, sg_len, + dir, flags, NULL); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir) +{ + return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, + period_len, dir, NULL); +} + +static inline int dmaengine_terminate_all(struct dma_chan *chan) +{ + return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); +} + +static inline int dmaengine_pause(struct dma_chan *chan) +{ + return dmaengine_device_control(chan, DMA_PAUSE, 0); +} + +static inline int dmaengine_resume(struct dma_chan *chan) +{ + return dmaengine_device_control(chan, DMA_RESUME, 0); +} + +static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) +{ + return desc->tx_submit(desc); +} + +static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) +{ + size_t mask; + + if (!align) + return true; + mask = (1 << align) - 1; + if (mask & (off1 | off2 | len)) + return false; + return true; +} + +static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->copy_align, off1, off2, len); +} + +static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->xor_align, off1, off2, len); +} + +static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->pq_align, off1, off2, len); +} + +static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->fill_align, off1, off2, len); +} + +static inline void +dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) +{ + dma->max_pq = maxpq; + if (has_pq_continue) + dma->max_pq |= DMA_HAS_PQ_CONTINUE; +} + +static inline bool dmaf_continue(enum dma_ctrl_flags flags) +{ + return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; +} + +static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) +{ + enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; + + return (flags & mask) == mask; +} + +static inline bool dma_dev_has_pq_continue(struct dma_device *dma) +{ + return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; +} + +static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) +{ + return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; +} + +/* dma_maxpq - reduce maxpq in the face of continued operations + * @dma - dma device with PQ capability + * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set + * + * When an engine does not support native continuation we need 3 extra + * source slots to reuse P and Q with the following coefficients: + * 1/ {00} * P : remove P from Q', but use it as a source for P' + * 2/ {01} * Q : use Q to continue Q' calculation + * 3/ {00} * Q : subtract Q from P' to cancel (2) + * + * In the case where P is disabled we only need 1 extra source: + * 1/ {01} * Q : use Q to continue Q' calculation + */ +static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) +{ + if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) + return dma_dev_to_maxpq(dma); + else if (dmaf_p_disabled_continue(flags)) + return dma_dev_to_maxpq(dma) - 1; + else if (dmaf_continue(flags)) + return dma_dev_to_maxpq(dma) - 3; + BUG(); +} + +/* --- public DMA engine API --- */ + +#ifdef CONFIG_DMA_ENGINE +void dmaengine_get(void); +void dmaengine_put(void); +#else +static inline void dmaengine_get(void) +{ +} +static inline void dmaengine_put(void) +{ +} +#endif + +#ifdef CONFIG_NET_DMA +#define net_dmaengine_get() dmaengine_get() +#define net_dmaengine_put() dmaengine_put() +#else +static inline void net_dmaengine_get(void) +{ +} +static inline void net_dmaengine_put(void) +{ +} +#endif + +#ifdef CONFIG_ASYNC_TX_DMA +#define async_dmaengine_get() dmaengine_get() +#define async_dmaengine_put() dmaengine_put() +#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH +#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) +#else +#define async_dma_find_channel(type) dma_find_channel(type) +#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ +#else +static inline void async_dmaengine_get(void) +{ +} +static inline void async_dmaengine_put(void) +{ +} +static inline struct dma_chan * +async_dma_find_channel(enum dma_transaction_type type) +{ + return NULL; +} +#endif /* CONFIG_ASYNC_TX_DMA */ + +dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, + void *dest, void *src, size_t len); +dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, + struct page *page, unsigned int offset, void *kdata, size_t len); +dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, + struct page *dest_pg, unsigned int dest_off, struct page *src_pg, + unsigned int src_off, size_t len); +void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan); + +static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) +{ + tx->flags |= DMA_CTRL_ACK; +} + +static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) +{ + tx->flags &= ~DMA_CTRL_ACK; +} + +static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) +{ + return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; +} + +#define first_dma_cap(mask) __first_dma_cap(&(mask)) +static inline int __first_dma_cap(const dma_cap_mask_t *srcp) +{ + return min_t(int, DMA_TX_TYPE_END, + find_first_bit(srcp->bits, DMA_TX_TYPE_END)); +} + +#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) +static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) +{ + return min_t(int, DMA_TX_TYPE_END, + find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); +} + +#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) +static inline void +__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + set_bit(tx_type, dstp->bits); +} + +#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) +static inline void +__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + clear_bit(tx_type, dstp->bits); +} + +#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) +static inline void __dma_cap_zero(dma_cap_mask_t *dstp) +{ + bitmap_zero(dstp->bits, DMA_TX_TYPE_END); +} + +#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) +static inline int +__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) +{ + return test_bit(tx_type, srcp->bits); +} + +#define for_each_dma_cap_mask(cap, mask) \ + for ((cap) = first_dma_cap(mask); \ + (cap) < DMA_TX_TYPE_END; \ + (cap) = next_dma_cap((cap), (mask))) + +/** + * dma_async_issue_pending - flush pending transactions to HW + * @chan: target DMA channel + * + * This allows drivers to push copies to HW in batches, + * reducing MMIO writes where possible. + */ +static inline void dma_async_issue_pending(struct dma_chan *chan) +{ + chan->device->device_issue_pending(chan); +} + +#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) + +/** + * dma_async_is_tx_complete - poll for transaction completion + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @last: returns last completed cookie, can be NULL + * @used: returns last issued cookie, can be NULL + * + * If @last and @used are passed in, upon return they reflect the driver + * internal state and can be used with dma_async_is_complete() to check + * the status of multiple cookies without re-checking hardware state. + */ +static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) +{ + struct dma_tx_state state; + enum dma_status status; + + status = chan->device->device_tx_status(chan, cookie, &state); + if (last) + *last = state.last; + if (used) + *used = state.used; + return status; +} + +#define dma_async_memcpy_complete(chan, cookie, last, used)\ + dma_async_is_tx_complete(chan, cookie, last, used) + +/** + * dma_async_is_complete - test a cookie against chan state + * @cookie: transaction identifier to test status of + * @last_complete: last know completed transaction + * @last_used: last cookie value handed out + * + * dma_async_is_complete() is used in dma_async_memcpy_complete() + * the test logic is separated for lightweight testing of multiple cookies + */ +static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, + dma_cookie_t last_complete, dma_cookie_t last_used) +{ + if (last_complete <= last_used) { + if ((cookie <= last_complete) || (cookie > last_used)) + return DMA_SUCCESS; + } else { + if ((cookie <= last_complete) && (cookie > last_used)) + return DMA_SUCCESS; + } + return DMA_IN_PROGRESS; +} + +static inline void +dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) +{ + if (st) { + st->last = last; + st->used = used; + st->residue = residue; + } +} + +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); +#ifdef CONFIG_DMA_ENGINE +enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); +void dma_issue_pending_all(void); +struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +void dma_release_channel(struct dma_chan *chan); +#else +static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + return DMA_SUCCESS; +} +static inline void dma_issue_pending_all(void) +{ +} +static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) +{ + return NULL; +} +static inline void dma_release_channel(struct dma_chan *chan) +{ +} +#endif + +/* --- DMA device --- */ + +int dma_async_device_register(struct dma_device *device); +void dma_async_device_unregister(struct dma_device *device); +void dma_run_dependencies(struct dma_async_tx_descriptor *tx); +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +struct dma_chan *net_dma_find_channel(void); +#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) + +/* --- Helper iov-locking functions --- */ + +struct dma_page_list { + char __user *base_address; + int nr_pages; + struct page **pages; +}; + +struct dma_pinned_list { + int nr_iovecs; + struct dma_page_list page_list[0]; +}; + +struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); +void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); + +dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, + struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); +dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, + struct dma_pinned_list *pinned_list, struct page *page, + unsigned int offset, size_t len); + +#endif /* DMAENGINE_H */ diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h new file mode 100644 index 00000000..022e34fc --- /dev/null +++ b/include/linux/dmapool.h @@ -0,0 +1,35 @@ +/* + * include/linux/dmapool.h + * + * Allocation pools for DMAable (coherent) memory. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef LINUX_DMAPOOL_H +#define LINUX_DMAPOOL_H + +#include <asm/io.h> +#include <asm/scatterlist.h> + +struct dma_pool *dma_pool_create(const char *name, struct device *dev, + size_t size, size_t align, size_t allocation); + +void dma_pool_destroy(struct dma_pool *pool); + +void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle); + +void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); + +/* + * Managed DMA pool + */ +struct dma_pool *dmam_pool_create(const char *name, struct device *dev, + size_t size, size_t align, size_t allocation); +void dmam_pool_destroy(struct dma_pool *pool); + +#endif + diff --git a/include/linux/dmar.h b/include/linux/dmar.h new file mode 100644 index 00000000..731a6097 --- /dev/null +++ b/include/linux/dmar.h @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) Ashok Raj <ashok.raj@intel.com> + * Copyright (C) Shaohua Li <shaohua.li@intel.com> + */ + +#ifndef __DMAR_H__ +#define __DMAR_H__ + +#include <linux/acpi.h> +#include <linux/types.h> +#include <linux/msi.h> +#include <linux/irqreturn.h> + +struct acpi_dmar_header; + +/* DMAR Flags */ +#define DMAR_INTR_REMAP 0x1 +#define DMAR_X2APIC_OPT_OUT 0x2 + +struct intel_iommu; +#ifdef CONFIG_DMAR_TABLE +extern struct acpi_table_header *dmar_tbl; +struct dmar_drhd_unit { + struct list_head list; /* list of drhd units */ + struct acpi_dmar_header *hdr; /* ACPI header */ + u64 reg_base_addr; /* register base address*/ + struct pci_dev **devices; /* target device array */ + int devices_cnt; /* target device count */ + u16 segment; /* PCI domain */ + u8 ignored:1; /* ignore drhd */ + u8 include_all:1; + struct intel_iommu *iommu; +}; + +extern struct list_head dmar_drhd_units; + +#define for_each_drhd_unit(drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) + +#define for_each_active_iommu(i, drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, drhd->ignored) {} else + +#define for_each_iommu(i, drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, 0) {} else + +extern int dmar_table_init(void); +extern int dmar_dev_scope_init(void); + +/* Intel IOMMU detection */ +extern int detect_intel_iommu(void); +extern int enable_drhd_fault_handling(void); + +extern int parse_ioapics_under_ir(void); +extern int alloc_iommu(struct dmar_drhd_unit *); +#else +static inline int detect_intel_iommu(void) +{ + return -ENODEV; +} + +static inline int dmar_table_init(void) +{ + return -ENODEV; +} +static inline int enable_drhd_fault_handling(void) +{ + return -1; +} +#endif /* !CONFIG_DMAR_TABLE */ + +struct irte { + union { + struct { + __u64 present : 1, + fpd : 1, + dst_mode : 1, + redir_hint : 1, + trigger_mode : 1, + dlvry_mode : 3, + avail : 4, + __reserved_1 : 4, + vector : 8, + __reserved_2 : 8, + dest_id : 32; + }; + __u64 low; + }; + + union { + struct { + __u64 sid : 16, + sq : 2, + svt : 2, + __reserved_3 : 44; + }; + __u64 high; + }; +}; + +#ifdef CONFIG_IRQ_REMAP +extern int intr_remapping_enabled; +extern int intr_remapping_supported(void); +extern int enable_intr_remapping(void); +extern void disable_intr_remapping(void); +extern int reenable_intr_remapping(int); + +extern int get_irte(int irq, struct irte *entry); +extern int modify_irte(int irq, struct irte *irte_modified); +extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); +extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, + u16 sub_handle); +extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); +extern int free_irte(int irq); + +extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); +extern struct intel_iommu *map_ioapic_to_ir(int apic); +extern struct intel_iommu *map_hpet_to_ir(u8 id); +extern int set_ioapic_sid(struct irte *irte, int apic); +extern int set_hpet_sid(struct irte *irte, u8 id); +extern int set_msi_sid(struct irte *irte, struct pci_dev *dev); +#else +static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +{ + return -1; +} +static inline int modify_irte(int irq, struct irte *irte_modified) +{ + return -1; +} +static inline int free_irte(int irq) +{ + return -1; +} +static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle) +{ + return -1; +} +static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, + u16 sub_handle) +{ + return -1; +} +static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) +{ + return NULL; +} +static inline struct intel_iommu *map_ioapic_to_ir(int apic) +{ + return NULL; +} +static inline struct intel_iommu *map_hpet_to_ir(unsigned int hpet_id) +{ + return NULL; +} +static inline int set_ioapic_sid(struct irte *irte, int apic) +{ + return 0; +} +static inline int set_hpet_sid(struct irte *irte, u8 id) +{ + return -1; +} +static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) +{ + return 0; +} + +#define intr_remapping_enabled (0) + +static inline int enable_intr_remapping(void) +{ + return -1; +} + +static inline void disable_intr_remapping(void) +{ +} + +static inline int reenable_intr_remapping(int eim) +{ + return 0; +} +#endif + +enum { + IRQ_REMAP_XAPIC_MODE, + IRQ_REMAP_X2APIC_MODE, +}; + +/* Can't use the common MSI interrupt functions + * since DMAR is not a pci device + */ +struct irq_data; +extern void dmar_msi_unmask(struct irq_data *data); +extern void dmar_msi_mask(struct irq_data *data); +extern void dmar_msi_read(int irq, struct msi_msg *msg); +extern void dmar_msi_write(int irq, struct msi_msg *msg); +extern int dmar_set_interrupt(struct intel_iommu *iommu); +extern irqreturn_t dmar_fault(int irq, void *dev_id); +extern int arch_setup_dmar_msi(unsigned int irq); + +#ifdef CONFIG_INTEL_IOMMU +extern int iommu_detected, no_iommu; +extern struct list_head dmar_rmrr_units; +struct dmar_rmrr_unit { + struct list_head list; /* list of rmrr units */ + struct acpi_dmar_header *hdr; /* ACPI header */ + u64 base_address; /* reserved base address*/ + u64 end_address; /* reserved end address */ + struct pci_dev **devices; /* target devices */ + int devices_cnt; /* target device count */ +}; + +#define for_each_rmrr_units(rmrr) \ + list_for_each_entry(rmrr, &dmar_rmrr_units, list) + +struct dmar_atsr_unit { + struct list_head list; /* list of ATSR units */ + struct acpi_dmar_header *hdr; /* ACPI header */ + struct pci_dev **devices; /* target devices */ + int devices_cnt; /* target device count */ + u8 include_all:1; /* include all ports */ +}; + +int dmar_parse_rmrr_atsr_dev(void); +extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); +extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); +extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, + struct pci_dev ***devices, u16 segment); +extern int intel_iommu_init(void); +#else /* !CONFIG_INTEL_IOMMU: */ +static inline int intel_iommu_init(void) { return -ENODEV; } +static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) +{ + return 0; +} +static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) +{ + return 0; +} +static inline int dmar_parse_rmrr_atsr_dev(void) +{ + return 0; +} +#endif /* CONFIG_INTEL_IOMMU */ + +#endif /* __DMAR_H__ */ diff --git a/include/linux/dmi.h b/include/linux/dmi.h new file mode 100644 index 00000000..f156cca2 --- /dev/null +++ b/include/linux/dmi.h @@ -0,0 +1,139 @@ +#ifndef __DMI_H__ +#define __DMI_H__ + +#include <linux/list.h> +#include <linux/mod_devicetable.h> + +/* enum dmi_field is in mod_devicetable.h */ + +enum dmi_device_type { + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER, + DMI_DEV_TYPE_UNKNOWN, + DMI_DEV_TYPE_VIDEO, + DMI_DEV_TYPE_SCSI, + DMI_DEV_TYPE_ETHERNET, + DMI_DEV_TYPE_TOKENRING, + DMI_DEV_TYPE_SOUND, + DMI_DEV_TYPE_PATA, + DMI_DEV_TYPE_SATA, + DMI_DEV_TYPE_SAS, + DMI_DEV_TYPE_IPMI = -1, + DMI_DEV_TYPE_OEM_STRING = -2, + DMI_DEV_TYPE_DEV_ONBOARD = -3, +}; + +enum dmi_entry_type { + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM, + DMI_ENTRY_BASEBOARD, + DMI_ENTRY_CHASSIS, + DMI_ENTRY_PROCESSOR, + DMI_ENTRY_MEM_CONTROLLER, + DMI_ENTRY_MEM_MODULE, + DMI_ENTRY_CACHE, + DMI_ENTRY_PORT_CONNECTOR, + DMI_ENTRY_SYSTEM_SLOT, + DMI_ENTRY_ONBOARD_DEVICE, + DMI_ENTRY_OEMSTRINGS, + DMI_ENTRY_SYSCONF, + DMI_ENTRY_BIOS_LANG, + DMI_ENTRY_GROUP_ASSOC, + DMI_ENTRY_SYSTEM_EVENT_LOG, + DMI_ENTRY_PHYS_MEM_ARRAY, + DMI_ENTRY_MEM_DEVICE, + DMI_ENTRY_32_MEM_ERROR, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR, + DMI_ENTRY_BUILTIN_POINTING_DEV, + DMI_ENTRY_PORTABLE_BATTERY, + DMI_ENTRY_SYSTEM_RESET, + DMI_ENTRY_HW_SECURITY, + DMI_ENTRY_SYSTEM_POWER_CONTROLS, + DMI_ENTRY_VOLTAGE_PROBE, + DMI_ENTRY_COOLING_DEV, + DMI_ENTRY_TEMP_PROBE, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE, + DMI_ENTRY_OOB_REMOTE_ACCESS, + DMI_ENTRY_BIS_ENTRY, + DMI_ENTRY_SYSTEM_BOOT, + DMI_ENTRY_MGMT_DEV, + DMI_ENTRY_MGMT_DEV_COMPONENT, + DMI_ENTRY_MGMT_DEV_THRES, + DMI_ENTRY_MEM_CHANNEL, + DMI_ENTRY_IPMI_DEV, + DMI_ENTRY_SYS_POWER_SUPPLY, + DMI_ENTRY_ADDITIONAL, + DMI_ENTRY_ONBOARD_DEV_EXT, + DMI_ENTRY_MGMT_CONTROLLER_HOST, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, +}; + +struct dmi_header { + u8 type; + u8 length; + u16 handle; +}; + +struct dmi_device { + struct list_head list; + int type; + const char *name; + void *device_data; /* Type specific data */ +}; + +#ifdef CONFIG_DMI + +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + +extern int dmi_check_system(const struct dmi_system_id *list); +const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); +extern const char * dmi_get_system_info(int field); +extern const struct dmi_device * dmi_find_device(int type, const char *name, + const struct dmi_device *from); +extern void dmi_scan_machine(void); +extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); +extern int dmi_name_in_vendors(const char *str); +extern int dmi_name_in_serial(const char *str); +extern int dmi_available; +extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), + void *private_data); +extern bool dmi_match(enum dmi_field f, const char *str); + +#else + +static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; } +static inline const char * dmi_get_system_info(int field) { return NULL; } +static inline const struct dmi_device * dmi_find_device(int type, const char *name, + const struct dmi_device *from) { return NULL; } +static inline void dmi_scan_machine(void) { return; } +static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) +{ + if (yearp) + *yearp = 0; + if (monthp) + *monthp = 0; + if (dayp) + *dayp = 0; + return false; +} +static inline int dmi_name_in_vendors(const char *s) { return 0; } +static inline int dmi_name_in_serial(const char *s) { return 0; } +#define dmi_available 0 +static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), + void *private_data) { return -1; } +static inline bool dmi_match(enum dmi_field f, const char *str) + { return false; } +static inline const struct dmi_system_id * + dmi_first_match(const struct dmi_system_id *list) { return NULL; } + +#endif + +#endif /* __DMI_H__ */ diff --git a/include/linux/dn.h b/include/linux/dn.h new file mode 100644 index 00000000..9c504454 --- /dev/null +++ b/include/linux/dn.h @@ -0,0 +1,146 @@ +#ifndef _LINUX_DN_H +#define _LINUX_DN_H + +#include <linux/types.h> + +/* + + DECnet Data Structures and Constants + +*/ + +/* + * DNPROTO_NSP can't be the same as SOL_SOCKET, + * so increment each by one (compared to ULTRIX) + */ +#define DNPROTO_NSP 2 /* NSP protocol number */ +#define DNPROTO_ROU 3 /* Routing protocol number */ +#define DNPROTO_NML 4 /* Net mgt protocol number */ +#define DNPROTO_EVL 5 /* Evl protocol number (usr) */ +#define DNPROTO_EVR 6 /* Evl protocol number (evl) */ +#define DNPROTO_NSPT 7 /* NSP trace protocol number */ + + +#define DN_ADDL 2 +#define DN_MAXADDL 2 /* ULTRIX headers have 20 here, but pathworks has 2 */ +#define DN_MAXOPTL 16 +#define DN_MAXOBJL 16 +#define DN_MAXACCL 40 +#define DN_MAXALIASL 128 +#define DN_MAXNODEL 256 +#define DNBUFSIZE 65023 + +/* + * SET/GET Socket options - must match the DSO_ numbers below + */ +#define SO_CONDATA 1 +#define SO_CONACCESS 2 +#define SO_PROXYUSR 3 +#define SO_LINKINFO 7 + +#define DSO_CONDATA 1 /* Set/Get connect data */ +#define DSO_DISDATA 10 /* Set/Get disconnect data */ +#define DSO_CONACCESS 2 /* Set/Get connect access data */ +#define DSO_ACCEPTMODE 4 /* Set/Get accept mode */ +#define DSO_CONACCEPT 5 /* Accept deferred connection */ +#define DSO_CONREJECT 6 /* Reject deferred connection */ +#define DSO_LINKINFO 7 /* Set/Get link information */ +#define DSO_STREAM 8 /* Set socket type to stream */ +#define DSO_SEQPACKET 9 /* Set socket type to sequenced packet */ +#define DSO_MAXWINDOW 11 /* Maximum window size allowed */ +#define DSO_NODELAY 12 /* Turn off nagle */ +#define DSO_CORK 13 /* Wait for more data! */ +#define DSO_SERVICES 14 /* NSP Services field */ +#define DSO_INFO 15 /* NSP Info field */ +#define DSO_MAX 15 /* Maximum option number */ + + +/* LINK States */ +#define LL_INACTIVE 0 +#define LL_CONNECTING 1 +#define LL_RUNNING 2 +#define LL_DISCONNECTING 3 + +#define ACC_IMMED 0 +#define ACC_DEFER 1 + +#define SDF_WILD 1 /* Wild card object */ +#define SDF_PROXY 2 /* Addr eligible for proxy */ +#define SDF_UICPROXY 4 /* Use uic-based proxy */ + +/* Structures */ + + +struct dn_naddr { + __le16 a_len; + __u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */ +}; + +struct sockaddr_dn { + __u16 sdn_family; + __u8 sdn_flags; + __u8 sdn_objnum; + __le16 sdn_objnamel; + __u8 sdn_objname[DN_MAXOBJL]; + struct dn_naddr sdn_add; +}; +#define sdn_nodeaddrl sdn_add.a_len /* Node address length */ +#define sdn_nodeaddr sdn_add.a_addr /* Node address */ + + + +/* + * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure + */ +struct optdata_dn { + __le16 opt_status; /* Extended status return */ +#define opt_sts opt_status + __le16 opt_optl; /* Length of user data */ + __u8 opt_data[16]; /* User data */ +}; + +struct accessdata_dn { + __u8 acc_accl; + __u8 acc_acc[DN_MAXACCL]; + __u8 acc_passl; + __u8 acc_pass[DN_MAXACCL]; + __u8 acc_userl; + __u8 acc_user[DN_MAXACCL]; +}; + +/* + * DECnet logical link information structure + */ +struct linkinfo_dn { + __u16 idn_segsize; /* Segment size for link */ + __u8 idn_linkstate; /* Logical link state */ +}; + +/* + * Ethernet address format (for DECnet) + */ +union etheraddress { + __u8 dne_addr[6]; /* Full ethernet address */ + struct { + __u8 dne_hiord[4]; /* DECnet HIORD prefix */ + __u8 dne_nodeaddr[2]; /* DECnet node address */ + } dne_remote; +}; + + +/* + * DECnet physical socket address format + */ +struct dn_addr { + __le16 dna_family; /* AF_DECnet */ + union etheraddress dna_netaddr; /* DECnet ethernet address */ +}; + +#define DECNET_IOCTL_BASE 0x89 /* PROTOPRIVATE range */ + +#define SIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, struct dn_naddr) +#define SIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, struct dn_naddr) +#define OSIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, int) +#define OSIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, int) + +#endif /* _LINUX_DN_H */ diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h new file mode 100644 index 00000000..3290555a --- /dev/null +++ b/include/linux/dnotify.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_DNOTIFY_H +#define _LINUX_DNOTIFY_H +/* + * Directory notification for Linux + * + * Copyright (C) 2000,2002 Stephen Rothwell + */ + +#include <linux/fs.h> + +struct dnotify_struct { + struct dnotify_struct * dn_next; + __u32 dn_mask; + int dn_fd; + struct file * dn_filp; + fl_owner_t dn_owner; +}; + +#ifdef __KERNEL__ + + +#ifdef CONFIG_DNOTIFY + +#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ + FS_MODIFY | FS_MODIFY_CHILD |\ + FS_ACCESS | FS_ACCESS_CHILD |\ + FS_ATTRIB | FS_ATTRIB_CHILD |\ + FS_CREATE | FS_DN_RENAME |\ + FS_MOVED_FROM | FS_MOVED_TO) + +extern int dir_notify_enable; +extern void dnotify_flush(struct file *, fl_owner_t); +extern int fcntl_dirnotify(int, struct file *, unsigned long); + +#else + +static inline void dnotify_flush(struct file *filp, fl_owner_t id) +{ +} + +static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) +{ + return -EINVAL; +} + +#endif /* CONFIG_DNOTIFY */ + +#endif /* __KERNEL __ */ + +#endif /* _LINUX_DNOTIFY_H */ diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h new file mode 100644 index 00000000..cc92268a --- /dev/null +++ b/include/linux/dns_resolver.h @@ -0,0 +1,34 @@ +/* + * DNS Resolver upcall management for CIFS DFS and AFS + * Handles host name to IP address resolution and DNS query for AFSDB RR. + * + * Copyright (c) International Business Machines Corp., 2008 + * Author(s): Steve French (sfrench@us.ibm.com) + * Wang Lei (wang840925@gmail.com) + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_DNS_RESOLVER_H +#define _LINUX_DNS_RESOLVER_H + +#ifdef __KERNEL__ + +extern int dns_query(const char *type, const char *name, size_t namelen, + const char *options, char **_result, time_t *_expiry); + +#endif /* KERNEL */ + +#endif /* _LINUX_DNS_RESOLVER_H */ diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h new file mode 100644 index 00000000..82a16527 --- /dev/null +++ b/include/linux/dqblk_qtree.h @@ -0,0 +1,56 @@ +/* + * Definitions of structures and functions for quota formats using trie + */ + +#ifndef _LINUX_DQBLK_QTREE_H +#define _LINUX_DQBLK_QTREE_H + +#include <linux/types.h> + +/* Numbers of blocks needed for updates - we count with the smallest + * possible block size (1024) */ +#define QTREE_INIT_ALLOC 4 +#define QTREE_INIT_REWRITE 2 +#define QTREE_DEL_ALLOC 0 +#define QTREE_DEL_REWRITE 6 + +struct dquot; + +/* Operations */ +struct qtree_fmt_operations { + void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */ + void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */ + int (*is_id)(void *disk, struct dquot *dquot); /* Is this structure for given id? */ +}; + +/* Inmemory copy of version specific information */ +struct qtree_mem_dqinfo { + struct super_block *dqi_sb; /* Sb quota is on */ + int dqi_type; /* Quota type */ + unsigned int dqi_blocks; /* # of blocks in quota file */ + unsigned int dqi_free_blk; /* First block in list of free blocks */ + unsigned int dqi_free_entry; /* First block with free entry */ + unsigned int dqi_blocksize_bits; /* Block size of quota file */ + unsigned int dqi_entry_size; /* Size of quota entry in quota file */ + unsigned int dqi_usable_bs; /* Space usable in block for quota data */ + unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ + struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ +}; + +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); +static inline int qtree_depth(struct qtree_mem_dqinfo *info) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + unsigned long long entries = epb; + int i; + + for (i = 1; entries < (1ULL << 32); i++) + entries *= epb; + return i; +} + +#endif /* _LINUX_DQBLK_QTREE_H */ diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h new file mode 100644 index 00000000..3713a723 --- /dev/null +++ b/include/linux/dqblk_v1.h @@ -0,0 +1,17 @@ +/* + * File with in-memory structures of old quota format + */ + +#ifndef _LINUX_DQBLK_V1_H +#define _LINUX_DQBLK_V1_H + +/* Root squash turned on */ +#define V1_DQF_RSQUASH 1 + +/* Numbers of blocks needed for updates */ +#define V1_INIT_ALLOC 1 +#define V1_INIT_REWRITE 1 +#define V1_DEL_ALLOC 0 +#define V1_DEL_REWRITE 2 + +#endif /* _LINUX_DQBLK_V1_H */ diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h new file mode 100644 index 00000000..18000a54 --- /dev/null +++ b/include/linux/dqblk_v2.h @@ -0,0 +1,16 @@ +/* + * Definitions for vfsv0 quota format + */ + +#ifndef _LINUX_DQBLK_V2_H +#define _LINUX_DQBLK_V2_H + +#include <linux/dqblk_qtree.h> + +/* Numbers of blocks needed for updates */ +#define V2_INIT_ALLOC QTREE_INIT_ALLOC +#define V2_INIT_REWRITE QTREE_INIT_REWRITE +#define V2_DEL_ALLOC QTREE_DEL_ALLOC +#define V2_DEL_REWRITE QTREE_DEL_REWRITE + +#endif /* _LINUX_DQBLK_V2_H */ diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h new file mode 100644 index 00000000..86552807 --- /dev/null +++ b/include/linux/dqblk_xfs.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 1995-2001,2004 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesset General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef _LINUX_DQBLK_XFS_H +#define _LINUX_DQBLK_XFS_H + +#include <linux/types.h> + +/* + * Disk quota - quotactl(2) commands for the XFS Quota Manager (XQM). + */ + +#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ +#define XQM_COMMAND(x) (((x) & (0xff<<8)) == ('X'<<8)) /* test if for XFS */ + +#define XQM_USRQUOTA 0 /* system call user quota type */ +#define XQM_GRPQUOTA 1 /* system call group quota type */ +#define XQM_PRJQUOTA 2 /* system call project quota type */ +#define XQM_MAXQUOTAS 3 + +#define Q_XQUOTAON XQM_CMD(1) /* enable accounting/enforcement */ +#define Q_XQUOTAOFF XQM_CMD(2) /* disable accounting/enforcement */ +#define Q_XGETQUOTA XQM_CMD(3) /* get disk limits and usage */ +#define Q_XSETQLIM XQM_CMD(4) /* set disk limits */ +#define Q_XGETQSTAT XQM_CMD(5) /* get quota subsystem status */ +#define Q_XQUOTARM XQM_CMD(6) /* free disk space used by dquots */ +#define Q_XQUOTASYNC XQM_CMD(7) /* delalloc flush, updates dquots */ + +/* + * fs_disk_quota structure: + * + * This contains the current quota information regarding a user/proj/group. + * It is 64-bit aligned, and all the blk units are in BBs (Basic Blocks) of + * 512 bytes. + */ +#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ +typedef struct fs_disk_quota { + __s8 d_version; /* version of this structure */ + __s8 d_flags; /* FS_{USER,PROJ,GROUP}_QUOTA */ + __u16 d_fieldmask; /* field specifier */ + __u32 d_id; /* user, project, or group ID */ + __u64 d_blk_hardlimit;/* absolute limit on disk blks */ + __u64 d_blk_softlimit;/* preferred limit on disk blks */ + __u64 d_ino_hardlimit;/* maximum # allocated inodes */ + __u64 d_ino_softlimit;/* preferred inode limit */ + __u64 d_bcount; /* # disk blocks owned by the user */ + __u64 d_icount; /* # inodes owned by the user */ + __s32 d_itimer; /* zero if within inode limits */ + /* if not, we refuse service */ + __s32 d_btimer; /* similar to above; for disk blocks */ + __u16 d_iwarns; /* # warnings issued wrt num inodes */ + __u16 d_bwarns; /* # warnings issued wrt disk blocks */ + __s32 d_padding2; /* padding2 - for future use */ + __u64 d_rtb_hardlimit;/* absolute limit on realtime blks */ + __u64 d_rtb_softlimit;/* preferred limit on RT disk blks */ + __u64 d_rtbcount; /* # realtime blocks owned */ + __s32 d_rtbtimer; /* similar to above; for RT disk blks */ + __u16 d_rtbwarns; /* # warnings issued wrt RT disk blks */ + __s16 d_padding3; /* padding3 - for future use */ + char d_padding4[8]; /* yet more padding */ +} fs_disk_quota_t; + +/* + * These fields are sent to Q_XSETQLIM to specify fields that need to change. + */ +#define FS_DQ_ISOFT (1<<0) +#define FS_DQ_IHARD (1<<1) +#define FS_DQ_BSOFT (1<<2) +#define FS_DQ_BHARD (1<<3) +#define FS_DQ_RTBSOFT (1<<4) +#define FS_DQ_RTBHARD (1<<5) +#define FS_DQ_LIMIT_MASK (FS_DQ_ISOFT | FS_DQ_IHARD | FS_DQ_BSOFT | \ + FS_DQ_BHARD | FS_DQ_RTBSOFT | FS_DQ_RTBHARD) +/* + * These timers can only be set in super user's dquot. For others, timers are + * automatically started and stopped. Superusers timer values set the limits + * for the rest. In case these values are zero, the DQ_{F,B}TIMELIMIT values + * defined below are used. + * These values also apply only to the d_fieldmask field for Q_XSETQLIM. + */ +#define FS_DQ_BTIMER (1<<6) +#define FS_DQ_ITIMER (1<<7) +#define FS_DQ_RTBTIMER (1<<8) +#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER) + +/* + * Warning counts are set in both super user's dquot and others. For others, + * warnings are set/cleared by the administrators (or automatically by going + * below the soft limit). Superusers warning values set the warning limits + * for the rest. In case these values are zero, the DQ_{F,B}WARNLIMIT values + * defined below are used. + * These values also apply only to the d_fieldmask field for Q_XSETQLIM. + */ +#define FS_DQ_BWARNS (1<<9) +#define FS_DQ_IWARNS (1<<10) +#define FS_DQ_RTBWARNS (1<<11) +#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS) + +/* + * Accounting values. These can only be set for filesystem with + * non-transactional quotas that require quotacheck(8) in userspace. + */ +#define FS_DQ_BCOUNT (1<<12) +#define FS_DQ_ICOUNT (1<<13) +#define FS_DQ_RTBCOUNT (1<<14) +#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT) + +/* + * Various flags related to quotactl(2). + */ +#define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ +#define FS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ +#define FS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ +#define FS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ +#define FS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */ +#define FS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */ + +#define FS_USER_QUOTA (1<<0) /* user quota type */ +#define FS_PROJ_QUOTA (1<<1) /* project quota type */ +#define FS_GROUP_QUOTA (1<<2) /* group quota type */ + +/* + * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. + * Provides a centralized way to get meta information about the quota subsystem. + * eg. space taken up for user and group quotas, number of dquots currently + * incore. + */ +#define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ + +/* + * Some basic information about 'quota files'. + */ +typedef struct fs_qfilestat { + __u64 qfs_ino; /* inode number */ + __u64 qfs_nblks; /* number of BBs 512-byte-blks */ + __u32 qfs_nextents; /* number of extents */ +} fs_qfilestat_t; + +typedef struct fs_quota_stat { + __s8 qs_version; /* version number for future changes */ + __u16 qs_flags; /* FS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ + __s8 qs_pad; /* unused */ + fs_qfilestat_t qs_uquota; /* user quota storage information */ + fs_qfilestat_t qs_gquota; /* group quota storage information */ + __u32 qs_incoredqs; /* number of dquots incore */ + __s32 qs_btimelimit; /* limit for blks timer */ + __s32 qs_itimelimit; /* limit for inodes timer */ + __s32 qs_rtbtimelimit;/* limit for rt blks timer */ + __u16 qs_bwarnlimit; /* limit for num warnings */ + __u16 qs_iwarnlimit; /* limit for num warnings */ +} fs_quota_stat_t; + +#endif /* _LINUX_DQBLK_XFS_H */ diff --git a/include/linux/drbd.h b/include/linux/drbd.h new file mode 100644 index 00000000..9e5f5607 --- /dev/null +++ b/include/linux/drbd.h @@ -0,0 +1,369 @@ +/* + drbd.h + Kernel module for 2.6.x Kernels + + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. + + Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. + Copyright (C) 2001-2008, Philipp Reisner <philipp.reisner@linbit.com>. + Copyright (C) 2001-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. + + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + +*/ +#ifndef DRBD_H +#define DRBD_H +#include <linux/connector.h> +#include <asm/types.h> + +#ifdef __KERNEL__ +#include <linux/types.h> +#include <asm/byteorder.h> +#else +#include <sys/types.h> +#include <sys/wait.h> +#include <limits.h> + +/* Although the Linux source code makes a difference between + generic endianness and the bitfields' endianness, there is no + architecture as of Linux-2.6.24-rc4 where the bitfields' endianness + does not match the generic endianness. */ + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#else +# error "sorry, weird endianness on this box" +#endif + +#endif + + +extern const char *drbd_buildtag(void); +#define REL_VERSION "8.3.11" +#define API_VERSION 88 +#define PRO_VERSION_MIN 86 +#define PRO_VERSION_MAX 96 + + +enum drbd_io_error_p { + EP_PASS_ON, /* FIXME should the better be named "Ignore"? */ + EP_CALL_HELPER, + EP_DETACH +}; + +enum drbd_fencing_p { + FP_DONT_CARE, + FP_RESOURCE, + FP_STONITH +}; + +enum drbd_disconnect_p { + DP_RECONNECT, + DP_DROP_NET_CONF, + DP_FREEZE_IO +}; + +enum drbd_after_sb_p { + ASB_DISCONNECT, + ASB_DISCARD_YOUNGER_PRI, + ASB_DISCARD_OLDER_PRI, + ASB_DISCARD_ZERO_CHG, + ASB_DISCARD_LEAST_CHG, + ASB_DISCARD_LOCAL, + ASB_DISCARD_REMOTE, + ASB_CONSENSUS, + ASB_DISCARD_SECONDARY, + ASB_CALL_HELPER, + ASB_VIOLENTLY +}; + +enum drbd_on_no_data { + OND_IO_ERROR, + OND_SUSPEND_IO +}; + +enum drbd_on_congestion { + OC_BLOCK, + OC_PULL_AHEAD, + OC_DISCONNECT, +}; + +/* KEEP the order, do not delete or insert. Only append. */ +enum drbd_ret_code { + ERR_CODE_BASE = 100, + NO_ERROR = 101, + ERR_LOCAL_ADDR = 102, + ERR_PEER_ADDR = 103, + ERR_OPEN_DISK = 104, + ERR_OPEN_MD_DISK = 105, + ERR_DISK_NOT_BDEV = 107, + ERR_MD_NOT_BDEV = 108, + ERR_DISK_TO_SMALL = 111, + ERR_MD_DISK_TO_SMALL = 112, + ERR_BDCLAIM_DISK = 114, + ERR_BDCLAIM_MD_DISK = 115, + ERR_MD_IDX_INVALID = 116, + ERR_IO_MD_DISK = 118, + ERR_MD_INVALID = 119, + ERR_AUTH_ALG = 120, + ERR_AUTH_ALG_ND = 121, + ERR_NOMEM = 122, + ERR_DISCARD = 123, + ERR_DISK_CONFIGURED = 124, + ERR_NET_CONFIGURED = 125, + ERR_MANDATORY_TAG = 126, + ERR_MINOR_INVALID = 127, + ERR_INTR = 129, /* EINTR */ + ERR_RESIZE_RESYNC = 130, + ERR_NO_PRIMARY = 131, + ERR_SYNC_AFTER = 132, + ERR_SYNC_AFTER_CYCLE = 133, + ERR_PAUSE_IS_SET = 134, + ERR_PAUSE_IS_CLEAR = 135, + ERR_PACKET_NR = 137, + ERR_NO_DISK = 138, + ERR_NOT_PROTO_C = 139, + ERR_NOMEM_BITMAP = 140, + ERR_INTEGRITY_ALG = 141, /* DRBD 8.2 only */ + ERR_INTEGRITY_ALG_ND = 142, /* DRBD 8.2 only */ + ERR_CPU_MASK_PARSE = 143, /* DRBD 8.2 only */ + ERR_CSUMS_ALG = 144, /* DRBD 8.2 only */ + ERR_CSUMS_ALG_ND = 145, /* DRBD 8.2 only */ + ERR_VERIFY_ALG = 146, /* DRBD 8.2 only */ + ERR_VERIFY_ALG_ND = 147, /* DRBD 8.2 only */ + ERR_CSUMS_RESYNC_RUNNING= 148, /* DRBD 8.2 only */ + ERR_VERIFY_RUNNING = 149, /* DRBD 8.2 only */ + ERR_DATA_NOT_CURRENT = 150, + ERR_CONNECTED = 151, /* DRBD 8.3 only */ + ERR_PERM = 152, + ERR_NEED_APV_93 = 153, + ERR_STONITH_AND_PROT_A = 154, + ERR_CONG_NOT_PROTO_A = 155, + ERR_PIC_AFTER_DEP = 156, + ERR_PIC_PEER_DEP = 157, + + /* insert new ones above this line */ + AFTER_LAST_ERR_CODE +}; + +#define DRBD_PROT_A 1 +#define DRBD_PROT_B 2 +#define DRBD_PROT_C 3 + +enum drbd_role { + R_UNKNOWN = 0, + R_PRIMARY = 1, /* role */ + R_SECONDARY = 2, /* role */ + R_MASK = 3, +}; + +/* The order of these constants is important. + * The lower ones (<C_WF_REPORT_PARAMS) indicate + * that there is no socket! + * >=C_WF_REPORT_PARAMS ==> There is a socket + */ +enum drbd_conns { + C_STANDALONE, + C_DISCONNECTING, /* Temporal state on the way to StandAlone. */ + C_UNCONNECTED, /* >= C_UNCONNECTED -> inc_net() succeeds */ + + /* These temporal states are all used on the way + * from >= C_CONNECTED to Unconnected. + * The 'disconnect reason' states + * I do not allow to change between them. */ + C_TIMEOUT, + C_BROKEN_PIPE, + C_NETWORK_FAILURE, + C_PROTOCOL_ERROR, + C_TEAR_DOWN, + + C_WF_CONNECTION, + C_WF_REPORT_PARAMS, /* we have a socket */ + C_CONNECTED, /* we have introduced each other */ + C_STARTING_SYNC_S, /* starting full sync by admin request. */ + C_STARTING_SYNC_T, /* starting full sync by admin request. */ + C_WF_BITMAP_S, + C_WF_BITMAP_T, + C_WF_SYNC_UUID, + + /* All SyncStates are tested with this comparison + * xx >= C_SYNC_SOURCE && xx <= C_PAUSED_SYNC_T */ + C_SYNC_SOURCE, + C_SYNC_TARGET, + C_VERIFY_S, + C_VERIFY_T, + C_PAUSED_SYNC_S, + C_PAUSED_SYNC_T, + + C_AHEAD, + C_BEHIND, + + C_MASK = 31 +}; + +enum drbd_disk_state { + D_DISKLESS, + D_ATTACHING, /* In the process of reading the meta-data */ + D_FAILED, /* Becomes D_DISKLESS as soon as we told it the peer */ + /* when >= D_FAILED it is legal to access mdev->bc */ + D_NEGOTIATING, /* Late attaching state, we need to talk to the peer */ + D_INCONSISTENT, + D_OUTDATED, + D_UNKNOWN, /* Only used for the peer, never for myself */ + D_CONSISTENT, /* Might be D_OUTDATED, might be D_UP_TO_DATE ... */ + D_UP_TO_DATE, /* Only this disk state allows applications' IO ! */ + D_MASK = 15 +}; + +union drbd_state { +/* According to gcc's docs is the ... + * The order of allocation of bit-fields within a unit (C90 6.5.2.1, C99 6.7.2.1). + * Determined by ABI. + * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com> + * even though we transmit as "cpu_to_be32(state)", + * the offsets of the bitfields still need to be swapped + * on different endianness. + */ + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned role:2 ; /* 3/4 primary/secondary/unknown */ + unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ + unsigned conn:5 ; /* 17/32 cstates */ + unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned susp:1 ; /* 2/2 IO suspended no/yes (by user) */ + unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ + unsigned peer_isp:1 ; + unsigned user_isp:1 ; + unsigned susp_nod:1 ; /* IO suspended because no data */ + unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/ + unsigned _pad:9; /* 0 unused */ +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned _pad:9; + unsigned susp_fen:1 ; + unsigned susp_nod:1 ; + unsigned user_isp:1 ; + unsigned peer_isp:1 ; + unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ + unsigned susp:1 ; /* 2/2 IO suspended no/yes */ + unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ + unsigned conn:5 ; /* 17/32 cstates */ + unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ + unsigned role:2 ; /* 3/4 primary/secondary/unknown */ +#else +# error "this endianness is not supported" +#endif + }; + unsigned int i; +}; + +enum drbd_state_rv { + SS_CW_NO_NEED = 4, + SS_CW_SUCCESS = 3, + SS_NOTHING_TO_DO = 2, + SS_SUCCESS = 1, + SS_UNKNOWN_ERROR = 0, /* Used to sleep longer in _drbd_request_state */ + SS_TWO_PRIMARIES = -1, + SS_NO_UP_TO_DATE_DISK = -2, + SS_NO_LOCAL_DISK = -4, + SS_NO_REMOTE_DISK = -5, + SS_CONNECTED_OUTDATES = -6, + SS_PRIMARY_NOP = -7, + SS_RESYNC_RUNNING = -8, + SS_ALREADY_STANDALONE = -9, + SS_CW_FAILED_BY_PEER = -10, + SS_IS_DISKLESS = -11, + SS_DEVICE_IN_USE = -12, + SS_NO_NET_CONFIG = -13, + SS_NO_VERIFY_ALG = -14, /* drbd-8.2 only */ + SS_NEED_CONNECTION = -15, /* drbd-8.2 only */ + SS_LOWER_THAN_OUTDATED = -16, + SS_NOT_SUPPORTED = -17, /* drbd-8.2 only */ + SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */ + SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */ + SS_AFTER_LAST_ERROR = -20, /* Keep this at bottom */ +}; + +/* from drbd_strings.c */ +extern const char *drbd_conn_str(enum drbd_conns); +extern const char *drbd_role_str(enum drbd_role); +extern const char *drbd_disk_str(enum drbd_disk_state); +extern const char *drbd_set_st_err_str(enum drbd_state_rv); + +#define SHARED_SECRET_MAX 64 + +#define MDF_CONSISTENT (1 << 0) +#define MDF_PRIMARY_IND (1 << 1) +#define MDF_CONNECTED_IND (1 << 2) +#define MDF_FULL_SYNC (1 << 3) +#define MDF_WAS_UP_TO_DATE (1 << 4) +#define MDF_PEER_OUT_DATED (1 << 5) +#define MDF_CRASHED_PRIMARY (1 << 6) + +enum drbd_uuid_index { + UI_CURRENT, + UI_BITMAP, + UI_HISTORY_START, + UI_HISTORY_END, + UI_SIZE, /* nl-packet: number of dirty bits */ + UI_FLAGS, /* nl-packet: flags */ + UI_EXTENDED_SIZE /* Everything. */ +}; + +enum drbd_timeout_flag { + UT_DEFAULT = 0, + UT_DEGRADED = 1, + UT_PEER_OUTDATED = 2, +}; + +#define UUID_JUST_CREATED ((__u64)4) + +#define DRBD_MAGIC 0x83740267 +#define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC) +#define DRBD_MAGIC_BIG 0x835a +#define BE_DRBD_MAGIC_BIG __constant_cpu_to_be16(DRBD_MAGIC_BIG) + +/* these are of type "int" */ +#define DRBD_MD_INDEX_INTERNAL -1 +#define DRBD_MD_INDEX_FLEX_EXT -2 +#define DRBD_MD_INDEX_FLEX_INT -3 + +/* Start of the new netlink/connector stuff */ + +#define DRBD_NL_CREATE_DEVICE 0x01 +#define DRBD_NL_SET_DEFAULTS 0x02 + + +/* For searching a vacant cn_idx value */ +#define CN_IDX_STEP 6977 + +struct drbd_nl_cfg_req { + int packet_type; + unsigned int drbd_minor; + int flags; + unsigned short tag_list[]; +}; + +struct drbd_nl_cfg_reply { + int packet_type; + unsigned int minor; + int ret_code; /* enum ret_code or set_st_err_t */ + unsigned short tag_list[]; /* only used with get_* calls */ +}; + +#endif diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h new file mode 100644 index 00000000..447c3675 --- /dev/null +++ b/include/linux/drbd_limits.h @@ -0,0 +1,168 @@ +/* + drbd_limits.h + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. +*/ + +/* + * Our current limitations. + * Some of them are hard limits, + * some of them are arbitrary range limits, that make it easier to provide + * feedback about nonsense settings for certain configurable values. + */ + +#ifndef DRBD_LIMITS_H +#define DRBD_LIMITS_H 1 + +#define DEBUG_RANGE_CHECK 0 + +#define DRBD_MINOR_COUNT_MIN 1 +#define DRBD_MINOR_COUNT_MAX 256 +#define DRBD_MINOR_COUNT_DEF 32 + +#define DRBD_DIALOG_REFRESH_MIN 0 +#define DRBD_DIALOG_REFRESH_MAX 600 + +/* valid port number */ +#define DRBD_PORT_MIN 1 +#define DRBD_PORT_MAX 0xffff + +/* startup { */ + /* if you want more than 3.4 days, disable */ +#define DRBD_WFC_TIMEOUT_MIN 0 +#define DRBD_WFC_TIMEOUT_MAX 300000 +#define DRBD_WFC_TIMEOUT_DEF 0 + +#define DRBD_DEGR_WFC_TIMEOUT_MIN 0 +#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000 +#define DRBD_DEGR_WFC_TIMEOUT_DEF 0 + +#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0 +#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000 +#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0 +/* }*/ + +/* net { */ + /* timeout, unit centi seconds + * more than one minute timeout is not useful */ +#define DRBD_TIMEOUT_MIN 1 +#define DRBD_TIMEOUT_MAX 600 +#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */ + + /* active connection retries when C_WF_CONNECTION */ +#define DRBD_CONNECT_INT_MIN 1 +#define DRBD_CONNECT_INT_MAX 120 +#define DRBD_CONNECT_INT_DEF 10 /* seconds */ + + /* keep-alive probes when idle */ +#define DRBD_PING_INT_MIN 1 +#define DRBD_PING_INT_MAX 120 +#define DRBD_PING_INT_DEF 10 + + /* timeout for the ping packets.*/ +#define DRBD_PING_TIMEO_MIN 1 +#define DRBD_PING_TIMEO_MAX 100 +#define DRBD_PING_TIMEO_DEF 5 + + /* max number of write requests between write barriers */ +#define DRBD_MAX_EPOCH_SIZE_MIN 1 +#define DRBD_MAX_EPOCH_SIZE_MAX 20000 +#define DRBD_MAX_EPOCH_SIZE_DEF 2048 + + /* I don't think that a tcp send buffer of more than 10M is useful */ +#define DRBD_SNDBUF_SIZE_MIN 0 +#define DRBD_SNDBUF_SIZE_MAX (10<<20) +#define DRBD_SNDBUF_SIZE_DEF 0 + +#define DRBD_RCVBUF_SIZE_MIN 0 +#define DRBD_RCVBUF_SIZE_MAX (10<<20) +#define DRBD_RCVBUF_SIZE_DEF 0 + + /* @4k PageSize -> 128kB - 512MB */ +#define DRBD_MAX_BUFFERS_MIN 32 +#define DRBD_MAX_BUFFERS_MAX 131072 +#define DRBD_MAX_BUFFERS_DEF 2048 + + /* @4k PageSize -> 4kB - 512MB */ +#define DRBD_UNPLUG_WATERMARK_MIN 1 +#define DRBD_UNPLUG_WATERMARK_MAX 131072 +#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16) + + /* 0 is disabled. + * 200 should be more than enough even for very short timeouts */ +#define DRBD_KO_COUNT_MIN 0 +#define DRBD_KO_COUNT_MAX 200 +#define DRBD_KO_COUNT_DEF 0 +/* } */ + +/* syncer { */ + /* FIXME allow rate to be zero? */ +#define DRBD_RATE_MIN 1 +/* channel bonding 10 GbE, or other hardware */ +#define DRBD_RATE_MAX (4 << 20) +#define DRBD_RATE_DEF 250 /* kb/second */ + + /* less than 7 would hit performance unnecessarily. + * 3833 is the largest prime that still does fit + * into 64 sectors of activity log */ +#define DRBD_AL_EXTENTS_MIN 7 +#define DRBD_AL_EXTENTS_MAX 3833 +#define DRBD_AL_EXTENTS_DEF 127 + +#define DRBD_AFTER_MIN -1 +#define DRBD_AFTER_MAX 255 +#define DRBD_AFTER_DEF -1 + +/* } */ + +/* drbdsetup XY resize -d Z + * you are free to reduce the device size to nothing, if you want to. + * the upper limit with 64bit kernel, enough ram and flexible meta data + * is 1 PiB, currently. */ +/* DRBD_MAX_SECTORS */ +#define DRBD_DISK_SIZE_SECT_MIN 0 +#define DRBD_DISK_SIZE_SECT_MAX (1 * (2LLU << 40)) +#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */ + +#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON +#define DRBD_FENCING_DEF FP_DONT_CARE +#define DRBD_AFTER_SB_0P_DEF ASB_DISCONNECT +#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT +#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT +#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT +#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR +#define DRBD_ON_CONGESTION_DEF OC_BLOCK + +#define DRBD_MAX_BIO_BVECS_MIN 0 +#define DRBD_MAX_BIO_BVECS_MAX 128 +#define DRBD_MAX_BIO_BVECS_DEF 0 + +#define DRBD_C_PLAN_AHEAD_MIN 0 +#define DRBD_C_PLAN_AHEAD_MAX 300 +#define DRBD_C_PLAN_AHEAD_DEF 0 /* RS rate controller disabled by default */ + +#define DRBD_C_DELAY_TARGET_MIN 1 +#define DRBD_C_DELAY_TARGET_MAX 100 +#define DRBD_C_DELAY_TARGET_DEF 10 + +#define DRBD_C_FILL_TARGET_MIN 0 +#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */ +#define DRBD_C_FILL_TARGET_DEF 0 /* By default disabled -> controlled by delay_target */ + +#define DRBD_C_MAX_RATE_MIN 250 /* kByte/sec */ +#define DRBD_C_MAX_RATE_MAX (4 << 20) +#define DRBD_C_MAX_RATE_DEF 102400 + +#define DRBD_C_MIN_RATE_MIN 0 /* kByte/sec */ +#define DRBD_C_MIN_RATE_MAX (4 << 20) +#define DRBD_C_MIN_RATE_DEF 4096 + +#define DRBD_CONG_FILL_MIN 0 +#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */ +#define DRBD_CONG_FILL_DEF 0 + +#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN +#define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX +#define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF + +#undef RANGE +#endif diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h new file mode 100644 index 00000000..ab6159e4 --- /dev/null +++ b/include/linux/drbd_nl.h @@ -0,0 +1,160 @@ +/* + PAKET( name, + TYPE ( pn, pr, member ) + ... + ) + + You may never reissue one of the pn arguments +*/ + +#if !defined(NL_PACKET) || !defined(NL_STRING) || !defined(NL_INTEGER) || !defined(NL_BIT) || !defined(NL_INT64) +#error "The macros NL_PACKET, NL_STRING, NL_INTEGER, NL_INT64 and NL_BIT needs to be defined" +#endif + +NL_PACKET(primary, 1, + NL_BIT( 1, T_MAY_IGNORE, primary_force) +) + +NL_PACKET(secondary, 2, ) + +NL_PACKET(disk_conf, 3, + NL_INT64( 2, T_MAY_IGNORE, disk_size) + NL_STRING( 3, T_MANDATORY, backing_dev, 128) + NL_STRING( 4, T_MANDATORY, meta_dev, 128) + NL_INTEGER( 5, T_MANDATORY, meta_dev_idx) + NL_INTEGER( 6, T_MAY_IGNORE, on_io_error) + NL_INTEGER( 7, T_MAY_IGNORE, fencing) + NL_BIT( 37, T_MAY_IGNORE, use_bmbv) + NL_BIT( 53, T_MAY_IGNORE, no_disk_flush) + NL_BIT( 54, T_MAY_IGNORE, no_md_flush) + /* 55 max_bio_size was available in 8.2.6rc2 */ + NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs) + NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier) + NL_BIT( 58, T_MAY_IGNORE, no_disk_drain) +) + +NL_PACKET(detach, 4, ) + +NL_PACKET(net_conf, 5, + NL_STRING( 8, T_MANDATORY, my_addr, 128) + NL_STRING( 9, T_MANDATORY, peer_addr, 128) + NL_STRING( 10, T_MAY_IGNORE, shared_secret, SHARED_SECRET_MAX) + NL_STRING( 11, T_MAY_IGNORE, cram_hmac_alg, SHARED_SECRET_MAX) + NL_STRING( 44, T_MAY_IGNORE, integrity_alg, SHARED_SECRET_MAX) + NL_INTEGER( 14, T_MAY_IGNORE, timeout) + NL_INTEGER( 15, T_MANDATORY, wire_protocol) + NL_INTEGER( 16, T_MAY_IGNORE, try_connect_int) + NL_INTEGER( 17, T_MAY_IGNORE, ping_int) + NL_INTEGER( 18, T_MAY_IGNORE, max_epoch_size) + NL_INTEGER( 19, T_MAY_IGNORE, max_buffers) + NL_INTEGER( 20, T_MAY_IGNORE, unplug_watermark) + NL_INTEGER( 21, T_MAY_IGNORE, sndbuf_size) + NL_INTEGER( 22, T_MAY_IGNORE, ko_count) + NL_INTEGER( 24, T_MAY_IGNORE, after_sb_0p) + NL_INTEGER( 25, T_MAY_IGNORE, after_sb_1p) + NL_INTEGER( 26, T_MAY_IGNORE, after_sb_2p) + NL_INTEGER( 39, T_MAY_IGNORE, rr_conflict) + NL_INTEGER( 40, T_MAY_IGNORE, ping_timeo) + NL_INTEGER( 67, T_MAY_IGNORE, rcvbuf_size) + NL_INTEGER( 81, T_MAY_IGNORE, on_congestion) + NL_INTEGER( 82, T_MAY_IGNORE, cong_fill) + NL_INTEGER( 83, T_MAY_IGNORE, cong_extents) + /* 59 addr_family was available in GIT, never released */ + NL_BIT( 60, T_MANDATORY, mind_af) + NL_BIT( 27, T_MAY_IGNORE, want_lose) + NL_BIT( 28, T_MAY_IGNORE, two_primaries) + NL_BIT( 41, T_MAY_IGNORE, always_asbp) + NL_BIT( 61, T_MAY_IGNORE, no_cork) + NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) + NL_BIT( 70, T_MANDATORY, dry_run) +) + +NL_PACKET(disconnect, 6, + NL_BIT( 84, T_MAY_IGNORE, force) +) + +NL_PACKET(resize, 7, + NL_INT64( 29, T_MAY_IGNORE, resize_size) + NL_BIT( 68, T_MAY_IGNORE, resize_force) + NL_BIT( 69, T_MANDATORY, no_resync) +) + +NL_PACKET(syncer_conf, 8, + NL_INTEGER( 30, T_MAY_IGNORE, rate) + NL_INTEGER( 31, T_MAY_IGNORE, after) + NL_INTEGER( 32, T_MAY_IGNORE, al_extents) +/* NL_INTEGER( 71, T_MAY_IGNORE, dp_volume) + * NL_INTEGER( 72, T_MAY_IGNORE, dp_interval) + * NL_INTEGER( 73, T_MAY_IGNORE, throttle_th) + * NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th) + * feature will be reimplemented differently with 8.3.9 */ + NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX) + NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) + NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) + NL_BIT( 65, T_MAY_IGNORE, use_rle) + NL_INTEGER( 75, T_MAY_IGNORE, on_no_data) + NL_INTEGER( 76, T_MAY_IGNORE, c_plan_ahead) + NL_INTEGER( 77, T_MAY_IGNORE, c_delay_target) + NL_INTEGER( 78, T_MAY_IGNORE, c_fill_target) + NL_INTEGER( 79, T_MAY_IGNORE, c_max_rate) + NL_INTEGER( 80, T_MAY_IGNORE, c_min_rate) +) + +NL_PACKET(invalidate, 9, ) +NL_PACKET(invalidate_peer, 10, ) +NL_PACKET(pause_sync, 11, ) +NL_PACKET(resume_sync, 12, ) +NL_PACKET(suspend_io, 13, ) +NL_PACKET(resume_io, 14, ) +NL_PACKET(outdate, 15, ) +NL_PACKET(get_config, 16, ) +NL_PACKET(get_state, 17, + NL_INTEGER( 33, T_MAY_IGNORE, state_i) +) + +NL_PACKET(get_uuids, 18, + NL_STRING( 34, T_MAY_IGNORE, uuids, (UI_SIZE*sizeof(__u64))) + NL_INTEGER( 35, T_MAY_IGNORE, uuids_flags) +) + +NL_PACKET(get_timeout_flag, 19, + NL_BIT( 36, T_MAY_IGNORE, use_degraded) +) + +NL_PACKET(call_helper, 20, + NL_STRING( 38, T_MAY_IGNORE, helper, 32) +) + +/* Tag nr 42 already allocated in drbd-8.1 development. */ + +NL_PACKET(sync_progress, 23, + NL_INTEGER( 43, T_MAY_IGNORE, sync_progress) +) + +NL_PACKET(dump_ee, 24, + NL_STRING( 45, T_MAY_IGNORE, dump_ee_reason, 32) + NL_STRING( 46, T_MAY_IGNORE, seen_digest, SHARED_SECRET_MAX) + NL_STRING( 47, T_MAY_IGNORE, calc_digest, SHARED_SECRET_MAX) + NL_INT64( 48, T_MAY_IGNORE, ee_sector) + NL_INT64( 49, T_MAY_IGNORE, ee_block_id) + NL_STRING( 50, T_MAY_IGNORE, ee_data, 32 << 10) +) + +NL_PACKET(start_ov, 25, + NL_INT64( 66, T_MAY_IGNORE, start_sector) +) + +NL_PACKET(new_c_uuid, 26, + NL_BIT( 63, T_MANDATORY, clear_bm) +) + +#ifdef NL_RESPONSE +NL_RESPONSE(return_code_only, 27) +#endif + +#undef NL_PACKET +#undef NL_INTEGER +#undef NL_INT64 +#undef NL_BIT +#undef NL_STRING +#undef NL_RESPONSE diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h new file mode 100644 index 00000000..81f52f2c --- /dev/null +++ b/include/linux/drbd_tag_magic.h @@ -0,0 +1,84 @@ +#ifndef DRBD_TAG_MAGIC_H +#define DRBD_TAG_MAGIC_H + +#define TT_END 0 +#define TT_REMOVED 0xE000 + +/* declare packet_type enums */ +enum packet_types { +#define NL_PACKET(name, number, fields) P_ ## name = number, +#define NL_RESPONSE(name, number) P_ ## name = number, +#define NL_INTEGER(pn, pr, member) +#define NL_INT64(pn, pr, member) +#define NL_BIT(pn, pr, member) +#define NL_STRING(pn, pr, member, len) +#include "drbd_nl.h" + P_nl_after_last_packet, +}; + +/* These struct are used to deduce the size of the tag lists: */ +#define NL_PACKET(name, number, fields) \ + struct name ## _tag_len_struct { fields }; +#define NL_INTEGER(pn, pr, member) \ + int member; int tag_and_len ## member; +#define NL_INT64(pn, pr, member) \ + __u64 member; int tag_and_len ## member; +#define NL_BIT(pn, pr, member) \ + unsigned char member:1; int tag_and_len ## member; +#define NL_STRING(pn, pr, member, len) \ + unsigned char member[len]; int member ## _len; \ + int tag_and_len ## member; +#include <linux/drbd_nl.h> + +/* declare tag-list-sizes */ +static const int tag_list_sizes[] = { +#define NL_PACKET(name, number, fields) 2 fields , +#define NL_INTEGER(pn, pr, member) + 4 + 4 +#define NL_INT64(pn, pr, member) + 4 + 8 +#define NL_BIT(pn, pr, member) + 4 + 1 +#define NL_STRING(pn, pr, member, len) + 4 + (len) +#include "drbd_nl.h" +}; + +/* The two highest bits are used for the tag type */ +#define TT_MASK 0xC000 +#define TT_INTEGER 0x0000 +#define TT_INT64 0x4000 +#define TT_BIT 0x8000 +#define TT_STRING 0xC000 +/* The next bit indicates if processing of the tag is mandatory */ +#define T_MANDATORY 0x2000 +#define T_MAY_IGNORE 0x0000 +#define TN_MASK 0x1fff +/* The remaining 13 bits are used to enumerate the tags */ + +#define tag_type(T) ((T) & TT_MASK) +#define tag_number(T) ((T) & TN_MASK) + +/* declare tag enums */ +#define NL_PACKET(name, number, fields) fields +enum drbd_tags { +#define NL_INTEGER(pn, pr, member) T_ ## member = pn | TT_INTEGER | pr , +#define NL_INT64(pn, pr, member) T_ ## member = pn | TT_INT64 | pr , +#define NL_BIT(pn, pr, member) T_ ## member = pn | TT_BIT | pr , +#define NL_STRING(pn, pr, member, len) T_ ## member = pn | TT_STRING | pr , +#include "drbd_nl.h" +}; + +struct tag { + const char *name; + int type_n_flags; + int max_len; +}; + +/* declare tag names */ +#define NL_PACKET(name, number, fields) fields +static const struct tag tag_descriptions[] = { +#define NL_INTEGER(pn, pr, member) [ pn ] = { #member, TT_INTEGER | pr, sizeof(int) }, +#define NL_INT64(pn, pr, member) [ pn ] = { #member, TT_INT64 | pr, sizeof(__u64) }, +#define NL_BIT(pn, pr, member) [ pn ] = { #member, TT_BIT | pr, sizeof(int) }, +#define NL_STRING(pn, pr, member, len) [ pn ] = { #member, TT_STRING | pr, (len) }, +#include "drbd_nl.h" +}; + +#endif diff --git a/include/linux/ds1286.h b/include/linux/ds1286.h new file mode 100644 index 00000000..45ea0aa0 --- /dev/null +++ b/include/linux/ds1286.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 1998, 1999, 2003 Ralf Baechle + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __LINUX_DS1286_H +#define __LINUX_DS1286_H + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_HUNDREDTH_SECOND 0 +#define RTC_SECONDS 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +#define RTC_DAY 6 +#define RTC_DAY_ALARM 7 +#define RTC_DATE 8 +#define RTC_MONTH 9 +#define RTC_YEAR 10 +#define RTC_CMD 11 +#define RTC_WHSEC 12 +#define RTC_WSEC 13 +#define RTC_UNUSED 14 + +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + + +/* + * Bits in the month register + */ +#define RTC_EOSC 0x80 +#define RTC_ESQW 0x40 + +/* + * Bits in the Command register + */ +#define RTC_TDF 0x01 +#define RTC_WAF 0x02 +#define RTC_TDM 0x04 +#define RTC_WAM 0x08 +#define RTC_PU_LVL 0x10 +#define RTC_IBH_LO 0x20 +#define RTC_IPSW 0x40 +#define RTC_TE 0x80 + +#endif /* __LINUX_DS1286_H */ diff --git a/include/linux/ds17287rtc.h b/include/linux/ds17287rtc.h new file mode 100644 index 00000000..d85d3f49 --- /dev/null +++ b/include/linux/ds17287rtc.h @@ -0,0 +1,66 @@ +/* + * ds17287rtc.h - register definitions for the ds1728[57] RTC / CMOS RAM + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) 2003 Guido Guenther <agx@sigxcpu.org> + */ +#ifndef __LINUX_DS17287RTC_H +#define __LINUX_DS17287RTC_H + +#include <linux/rtc.h> /* get the user-level API */ +#include <linux/mc146818rtc.h> + +/* Register A */ +#define DS_REGA_DV2 0x40 /* countdown chain */ +#define DS_REGA_DV1 0x20 /* oscillator enable */ +#define DS_REGA_DV0 0x10 /* bank select */ + +/* bank 1 registers */ +#define DS_B1_MODEL 0x40 /* model number byte */ +#define DS_B1_SN1 0x41 /* serial number byte 1 */ +#define DS_B1_SN2 0x42 /* serial number byte 2 */ +#define DS_B1_SN3 0x43 /* serial number byte 3 */ +#define DS_B1_SN4 0x44 /* serial number byte 4 */ +#define DS_B1_SN5 0x45 /* serial number byte 5 */ +#define DS_B1_SN6 0x46 /* serial number byte 6 */ +#define DS_B1_CRC 0x47 /* CRC byte */ +#define DS_B1_CENTURY 0x48 /* Century byte */ +#define DS_B1_DALARM 0x49 /* date alarm */ +#define DS_B1_XCTRL4A 0x4a /* extendec control register 4a */ +#define DS_B1_XCTRL4B 0x4b /* extendec control register 4b */ +#define DS_B1_RTCADDR2 0x4e /* rtc address 2 */ +#define DS_B1_RTCADDR3 0x4f /* rtc address 3 */ +#define DS_B1_RAMLSB 0x50 /* extended ram LSB */ +#define DS_B1_RAMMSB 0x51 /* extended ram MSB */ +#define DS_B1_RAMDPORT 0x53 /* extended ram data port */ + +/* register details */ +/* extended control register 4a */ +#define DS_XCTRL4A_VRT2 0x80 /* valid ram and time */ +#define DS_XCTRL4A_INCR 0x40 /* increment progress status */ +#define DS_XCTRL4A_BME 0x20 /* burst mode enable */ +#define DS_XCTRL4A_PAB 0x08 /* power active bar ctrl */ +#define DS_XCTRL4A_RF 0x04 /* ram clear flag */ +#define DS_XCTRL4A_WF 0x02 /* wake up alarm flag */ +#define DS_XCTRL4A_KF 0x01 /* kickstart flag */ + +/* interrupt causes */ +#define DS_XCTRL4A_IFS (DS_XCTRL4A_RF|DS_XCTRL4A_WF|DS_XCTRL4A_KF) + +/* extended control register 4b */ +#define DS_XCTRL4B_ABE 0x80 /* auxiliary battery enable */ +#define DS_XCTRL4B_E32K 0x40 /* enable 32.768 kHz Output */ +#define DS_XCTRL4B_CS 0x20 /* crystal select */ +#define DS_XCTRL4B_RCE 0x10 /* ram clear enable */ +#define DS_XCTRL4B_PRS 0x08 /* PAB resec select */ +#define DS_XCTRL4B_RIE 0x04 /* ram clear interrupt enable */ +#define DS_XCTRL4B_WFE 0x02 /* wake up alarm interrupt enable */ +#define DS_XCTRL4B_KFE 0x01 /* kickstart interrupt enable */ + +/* interrupt enable bits */ +#define DS_XCTRL4B_IFES (DS_XCTRL4B_RIE|DS_XCTRL4B_WFE|DS_XCTRL4B_KFE) + +#endif /* __LINUX_DS17287RTC_H */ diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h new file mode 100644 index 00000000..b4e281f6 --- /dev/null +++ b/include/linux/ds2782_battery.h @@ -0,0 +1,8 @@ +#ifndef __LINUX_DS2782_BATTERY_H +#define __LINUX_DS2782_BATTERY_H + +struct ds278x_platform_data { + int rsns; +}; + +#endif diff --git a/include/linux/dtlk.h b/include/linux/dtlk.h new file mode 100644 index 00000000..22a7b9a5 --- /dev/null +++ b/include/linux/dtlk.h @@ -0,0 +1,85 @@ +#define DTLK_MINOR 0 +#define DTLK_IO_EXTENT 0x02 + + /* ioctl's use magic number of 0xa3 */ +#define DTLK_INTERROGATE 0xa390 /* get settings from the DoubleTalk */ +#define DTLK_STATUS 0xa391 /* get status from the DoubleTalk */ + + +#define DTLK_CLEAR 0x18 /* stops speech */ + +#define DTLK_MAX_RETRIES (loops_per_jiffy/(10000/HZ)) + + /* TTS Port Status Flags */ +#define TTS_READABLE 0x80 /* mask for bit which is nonzero if a + byte can be read from the TTS port */ +#define TTS_SPEAKING 0x40 /* mask for SYNC bit, which is nonzero + while DoubleTalk is producing + output with TTS, PCM or CVSD + synthesizers or tone generators + (that is, all but LPC) */ +#define TTS_SPEAKING2 0x20 /* mask for SYNC2 bit, + which falls to zero up to 0.4 sec + before speech stops */ +#define TTS_WRITABLE 0x10 /* mask for RDY bit, which when set to + 1, indicates the TTS port is ready + to accept a byte of data. The RDY + bit goes zero 2-3 usec after + writing, and goes 1 again 180-190 + usec later. */ +#define TTS_ALMOST_FULL 0x08 /* mask for AF bit: When set to 1, + indicates that less than 300 free + bytes are available in the TTS + input buffer. AF is always 0 in the + PCM, TGN and CVSD modes. */ +#define TTS_ALMOST_EMPTY 0x04 /* mask for AE bit: When set to 1, + indicates that less than 300 bytes + of data remain in DoubleTalk's + input (TTS or PCM) buffer. AE is + always 1 in the TGN and CVSD + modes. */ + + /* LPC speak commands */ +#define LPC_5220_NORMAL 0x60 /* 5220 format decoding table, normal rate */ +#define LPC_5220_FAST 0x64 /* 5220 format decoding table, fast rate */ +#define LPC_D6_NORMAL 0x20 /* D6 format decoding table, normal rate */ +#define LPC_D6_FAST 0x24 /* D6 format decoding table, fast rate */ + + /* LPC Port Status Flags (valid only after one of the LPC + speak commands) */ +#define LPC_SPEAKING 0x80 /* mask for TS bit: When set to 1, + indicates the LPC synthesizer is + producing speech.*/ +#define LPC_BUFFER_LOW 0x40 /* mask for BL bit: When set to 1, + indicates that the hardware LPC + data buffer has less than 30 bytes + remaining. (Total internal buffer + size = 4096 bytes.) */ +#define LPC_BUFFER_EMPTY 0x20 /* mask for BE bit: When set to 1, + indicates that the LPC data buffer + ran out of data (error condition if + TS is also 1). */ + + /* data returned by Interrogate command */ +struct dtlk_settings +{ + unsigned short serial_number; /* 0-7Fh:0-7Fh */ + unsigned char rom_version[24]; /* null terminated string */ + unsigned char mode; /* 0=Character; 1=Phoneme; 2=Text */ + unsigned char punc_level; /* nB; 0-7 */ + unsigned char formant_freq; /* nF; 0-9 */ + unsigned char pitch; /* nP; 0-99 */ + unsigned char speed; /* nS; 0-9 */ + unsigned char volume; /* nV; 0-9 */ + unsigned char tone; /* nX; 0-2 */ + unsigned char expression; /* nE; 0-9 */ + unsigned char ext_dict_loaded; /* 1=exception dictionary loaded */ + unsigned char ext_dict_status; /* 1=exception dictionary enabled */ + unsigned char free_ram; /* # pages (truncated) remaining for + text buffer */ + unsigned char articulation; /* nA; 0-9 */ + unsigned char reverb; /* nR; 0-9 */ + unsigned char eob; /* 7Fh value indicating end of + parameter block */ + unsigned char has_indexing; /* nonzero if indexing is implemented */ +}; diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild new file mode 100644 index 00000000..f4dba863 --- /dev/null +++ b/include/linux/dvb/Kbuild @@ -0,0 +1,8 @@ +header-y += audio.h +header-y += ca.h +header-y += dmx.h +header-y += frontend.h +header-y += net.h +header-y += osd.h +header-y += version.h +header-y += video.h diff --git a/include/linux/dvb/audio.h b/include/linux/dvb/audio.h new file mode 100644 index 00000000..d47bccd6 --- /dev/null +++ b/include/linux/dvb/audio.h @@ -0,0 +1,135 @@ +/* + * audio.h + * + * Copyright (C) 2000 Ralph Metzler <ralph@convergence.de> + * & Marcus Metzler <marcus@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBAUDIO_H_ +#define _DVBAUDIO_H_ + +#include <linux/types.h> + +typedef enum { + AUDIO_SOURCE_DEMUX, /* Select the demux as the main source */ + AUDIO_SOURCE_MEMORY /* Select internal memory as the main source */ +} audio_stream_source_t; + + +typedef enum { + AUDIO_STOPPED, /* Device is stopped */ + AUDIO_PLAYING, /* Device is currently playing */ + AUDIO_PAUSED /* Device is paused */ +} audio_play_state_t; + + +typedef enum { + AUDIO_STEREO, + AUDIO_MONO_LEFT, + AUDIO_MONO_RIGHT, + AUDIO_MONO, + AUDIO_STEREO_SWAPPED +} audio_channel_select_t; + + +typedef struct audio_mixer { + unsigned int volume_left; + unsigned int volume_right; + // what else do we need? bass, pass-through, ... +} audio_mixer_t; + + +typedef struct audio_status { + int AV_sync_state; /* sync audio and video? */ + int mute_state; /* audio is muted */ + audio_play_state_t play_state; /* current playback state */ + audio_stream_source_t stream_source; /* current stream source */ + audio_channel_select_t channel_select; /* currently selected channel */ + int bypass_mode; /* pass on audio data to */ + audio_mixer_t mixer_state; /* current mixer state */ +} audio_status_t; /* separate decoder hardware */ + + +typedef +struct audio_karaoke { /* if Vocal1 or Vocal2 are non-zero, they get mixed */ + int vocal1; /* into left and right t at 70% each */ + int vocal2; /* if both, Vocal1 and Vocal2 are non-zero, Vocal1 gets*/ + int melody; /* mixed into the left channel and */ + /* Vocal2 into the right channel at 100% each. */ + /* if Melody is non-zero, the melody channel gets mixed*/ +} audio_karaoke_t; /* into left and right */ + + +typedef __u16 audio_attributes_t; +/* bits: descr. */ +/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */ +/* 12 multichannel extension */ +/* 11-10 audio type (0=not spec, 1=language included) */ +/* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */ +/* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */ +/* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */ +/* 2- 0 number of audio channels (n+1 channels) */ + + +/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */ +#define AUDIO_CAP_DTS 1 +#define AUDIO_CAP_LPCM 2 +#define AUDIO_CAP_MP1 4 +#define AUDIO_CAP_MP2 8 +#define AUDIO_CAP_MP3 16 +#define AUDIO_CAP_AAC 32 +#define AUDIO_CAP_OGG 64 +#define AUDIO_CAP_SDDS 128 +#define AUDIO_CAP_AC3 256 + +#define AUDIO_STOP _IO('o', 1) +#define AUDIO_PLAY _IO('o', 2) +#define AUDIO_PAUSE _IO('o', 3) +#define AUDIO_CONTINUE _IO('o', 4) +#define AUDIO_SELECT_SOURCE _IO('o', 5) +#define AUDIO_SET_MUTE _IO('o', 6) +#define AUDIO_SET_AV_SYNC _IO('o', 7) +#define AUDIO_SET_BYPASS_MODE _IO('o', 8) +#define AUDIO_CHANNEL_SELECT _IO('o', 9) +#define AUDIO_GET_STATUS _IOR('o', 10, audio_status_t) + +#define AUDIO_GET_CAPABILITIES _IOR('o', 11, unsigned int) +#define AUDIO_CLEAR_BUFFER _IO('o', 12) +#define AUDIO_SET_ID _IO('o', 13) +#define AUDIO_SET_MIXER _IOW('o', 14, audio_mixer_t) +#define AUDIO_SET_STREAMTYPE _IO('o', 15) +#define AUDIO_SET_EXT_ID _IO('o', 16) +#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t) +#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t) + +/** + * AUDIO_GET_PTS + * + * Read the 33 bit presentation time stamp as defined + * in ITU T-REC-H.222.0 / ISO/IEC 13818-1. + * + * The PTS should belong to the currently played + * frame if possible, but may also be a value close to it + * like the PTS of the last decoded frame or the last PTS + * extracted by the PES parser. + */ +#define AUDIO_GET_PTS _IOR('o', 19, __u64) +#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20) + +#endif /* _DVBAUDIO_H_ */ diff --git a/include/linux/dvb/ca.h b/include/linux/dvb/ca.h new file mode 100644 index 00000000..c18537f3 --- /dev/null +++ b/include/linux/dvb/ca.h @@ -0,0 +1,90 @@ +/* + * ca.h + * + * Copyright (C) 2000 Ralph Metzler <ralph@convergence.de> + * & Marcus Metzler <marcus@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBCA_H_ +#define _DVBCA_H_ + +/* slot interface types and info */ + +typedef struct ca_slot_info { + int num; /* slot number */ + + int type; /* CA interface this slot supports */ +#define CA_CI 1 /* CI high level interface */ +#define CA_CI_LINK 2 /* CI link layer level interface */ +#define CA_CI_PHYS 4 /* CI physical layer level interface */ +#define CA_DESCR 8 /* built-in descrambler */ +#define CA_SC 128 /* simple smart card interface */ + + unsigned int flags; +#define CA_CI_MODULE_PRESENT 1 /* module (or card) inserted */ +#define CA_CI_MODULE_READY 2 +} ca_slot_info_t; + + +/* descrambler types and info */ + +typedef struct ca_descr_info { + unsigned int num; /* number of available descramblers (keys) */ + unsigned int type; /* type of supported scrambling system */ +#define CA_ECD 1 +#define CA_NDS 2 +#define CA_DSS 4 +} ca_descr_info_t; + +typedef struct ca_caps { + unsigned int slot_num; /* total number of CA card and module slots */ + unsigned int slot_type; /* OR of all supported types */ + unsigned int descr_num; /* total number of descrambler slots (keys) */ + unsigned int descr_type; /* OR of all supported types */ +} ca_caps_t; + +/* a message to/from a CI-CAM */ +typedef struct ca_msg { + unsigned int index; + unsigned int type; + unsigned int length; + unsigned char msg[256]; +} ca_msg_t; + +typedef struct ca_descr { + unsigned int index; + unsigned int parity; /* 0 == even, 1 == odd */ + unsigned char cw[8]; +} ca_descr_t; + +typedef struct ca_pid { + unsigned int pid; + int index; /* -1 == disable*/ +} ca_pid_t; + +#define CA_RESET _IO('o', 128) +#define CA_GET_CAP _IOR('o', 129, ca_caps_t) +#define CA_GET_SLOT_INFO _IOR('o', 130, ca_slot_info_t) +#define CA_GET_DESCR_INFO _IOR('o', 131, ca_descr_info_t) +#define CA_GET_MSG _IOR('o', 132, ca_msg_t) +#define CA_SEND_MSG _IOW('o', 133, ca_msg_t) +#define CA_SET_DESCR _IOW('o', 134, ca_descr_t) +#define CA_SET_PID _IOW('o', 135, ca_pid_t) + +#endif diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h new file mode 100644 index 00000000..f078f3ac --- /dev/null +++ b/include/linux/dvb/dmx.h @@ -0,0 +1,157 @@ +/* + * dmx.h + * + * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de> + * & Ralph Metzler <ralph@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBDMX_H_ +#define _DVBDMX_H_ + +#include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/time.h> +#else +#include <time.h> +#endif + + +#define DMX_FILTER_SIZE 16 + +typedef enum +{ + DMX_OUT_DECODER, /* Streaming directly to decoder. */ + DMX_OUT_TAP, /* Output going to a memory buffer */ + /* (to be retrieved via the read command).*/ + DMX_OUT_TS_TAP, /* Output multiplexed into a new TS */ + /* (to be retrieved by reading from the */ + /* logical DVR device). */ + DMX_OUT_TSDEMUX_TAP /* Like TS_TAP but retrieved from the DMX device */ +} dmx_output_t; + + +typedef enum +{ + DMX_IN_FRONTEND, /* Input from a front-end device. */ + DMX_IN_DVR /* Input from the logical DVR device. */ +} dmx_input_t; + + +typedef enum +{ + DMX_PES_AUDIO0, + DMX_PES_VIDEO0, + DMX_PES_TELETEXT0, + DMX_PES_SUBTITLE0, + DMX_PES_PCR0, + + DMX_PES_AUDIO1, + DMX_PES_VIDEO1, + DMX_PES_TELETEXT1, + DMX_PES_SUBTITLE1, + DMX_PES_PCR1, + + DMX_PES_AUDIO2, + DMX_PES_VIDEO2, + DMX_PES_TELETEXT2, + DMX_PES_SUBTITLE2, + DMX_PES_PCR2, + + DMX_PES_AUDIO3, + DMX_PES_VIDEO3, + DMX_PES_TELETEXT3, + DMX_PES_SUBTITLE3, + DMX_PES_PCR3, + + DMX_PES_OTHER +} dmx_pes_type_t; + +#define DMX_PES_AUDIO DMX_PES_AUDIO0 +#define DMX_PES_VIDEO DMX_PES_VIDEO0 +#define DMX_PES_TELETEXT DMX_PES_TELETEXT0 +#define DMX_PES_SUBTITLE DMX_PES_SUBTITLE0 +#define DMX_PES_PCR DMX_PES_PCR0 + + +typedef struct dmx_filter +{ + __u8 filter[DMX_FILTER_SIZE]; + __u8 mask[DMX_FILTER_SIZE]; + __u8 mode[DMX_FILTER_SIZE]; +} dmx_filter_t; + + +struct dmx_sct_filter_params +{ + __u16 pid; + dmx_filter_t filter; + __u32 timeout; + __u32 flags; +#define DMX_CHECK_CRC 1 +#define DMX_ONESHOT 2 +#define DMX_IMMEDIATE_START 4 +#define DMX_KERNEL_CLIENT 0x8000 +}; + + +struct dmx_pes_filter_params +{ + __u16 pid; + dmx_input_t input; + dmx_output_t output; + dmx_pes_type_t pes_type; + __u32 flags; +}; + +typedef struct dmx_caps { + __u32 caps; + int num_decoders; +} dmx_caps_t; + +typedef enum { + DMX_SOURCE_FRONT0 = 0, + DMX_SOURCE_FRONT1, + DMX_SOURCE_FRONT2, + DMX_SOURCE_FRONT3, + DMX_SOURCE_DVR0 = 16, + DMX_SOURCE_DVR1, + DMX_SOURCE_DVR2, + DMX_SOURCE_DVR3 +} dmx_source_t; + +struct dmx_stc { + unsigned int num; /* input : which STC? 0..N */ + unsigned int base; /* output: divisor for stc to get 90 kHz clock */ + __u64 stc; /* output: stc in 'base'*90 kHz units */ +}; + + +#define DMX_START _IO('o', 41) +#define DMX_STOP _IO('o', 42) +#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params) +#define DMX_SET_PES_FILTER _IOW('o', 44, struct dmx_pes_filter_params) +#define DMX_SET_BUFFER_SIZE _IO('o', 45) +#define DMX_GET_PES_PIDS _IOR('o', 47, __u16[5]) +#define DMX_GET_CAPS _IOR('o', 48, dmx_caps_t) +#define DMX_SET_SOURCE _IOW('o', 49, dmx_source_t) +#define DMX_GET_STC _IOWR('o', 50, struct dmx_stc) +#define DMX_ADD_PID _IOW('o', 51, __u16) +#define DMX_REMOVE_PID _IOW('o', 52, __u16) + +#endif /*_DVBDMX_H_*/ diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h new file mode 100644 index 00000000..cb4428ab --- /dev/null +++ b/include/linux/dvb/frontend.h @@ -0,0 +1,436 @@ +/* + * frontend.h + * + * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de> + * Ralph Metzler <ralph@convergence.de> + * Holger Waechtler <holger@convergence.de> + * Andre Draszik <ad@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBFRONTEND_H_ +#define _DVBFRONTEND_H_ + +#include <linux/types.h> + +typedef enum fe_type { + FE_QPSK, + FE_QAM, + FE_OFDM, + FE_ATSC +} fe_type_t; + + +typedef enum fe_caps { + FE_IS_STUPID = 0, + FE_CAN_INVERSION_AUTO = 0x1, + FE_CAN_FEC_1_2 = 0x2, + FE_CAN_FEC_2_3 = 0x4, + FE_CAN_FEC_3_4 = 0x8, + FE_CAN_FEC_4_5 = 0x10, + FE_CAN_FEC_5_6 = 0x20, + FE_CAN_FEC_6_7 = 0x40, + FE_CAN_FEC_7_8 = 0x80, + FE_CAN_FEC_8_9 = 0x100, + FE_CAN_FEC_AUTO = 0x200, + FE_CAN_QPSK = 0x400, + FE_CAN_QAM_16 = 0x800, + FE_CAN_QAM_32 = 0x1000, + FE_CAN_QAM_64 = 0x2000, + FE_CAN_QAM_128 = 0x4000, + FE_CAN_QAM_256 = 0x8000, + FE_CAN_QAM_AUTO = 0x10000, + FE_CAN_TRANSMISSION_MODE_AUTO = 0x20000, + FE_CAN_BANDWIDTH_AUTO = 0x40000, + FE_CAN_GUARD_INTERVAL_AUTO = 0x80000, + FE_CAN_HIERARCHY_AUTO = 0x100000, + FE_CAN_8VSB = 0x200000, + FE_CAN_16VSB = 0x400000, + FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */ + FE_CAN_TURBO_FEC = 0x8000000, /* frontend supports "turbo fec modulation" */ + FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */ + FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ + FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ + FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */ +} fe_caps_t; + + +struct dvb_frontend_info { + char name[128]; + fe_type_t type; /* DEPRECATED. Use DTV_ENUM_DELSYS instead */ + __u32 frequency_min; + __u32 frequency_max; + __u32 frequency_stepsize; + __u32 frequency_tolerance; + __u32 symbol_rate_min; + __u32 symbol_rate_max; + __u32 symbol_rate_tolerance; /* ppm */ + __u32 notifier_delay; /* DEPRECATED */ + fe_caps_t caps; +}; + + +/** + * Check out the DiSEqC bus spec available on http://www.eutelsat.org/ for + * the meaning of this struct... + */ +struct dvb_diseqc_master_cmd { + __u8 msg [6]; /* { framing, address, command, data [3] } */ + __u8 msg_len; /* valid values are 3...6 */ +}; + + +struct dvb_diseqc_slave_reply { + __u8 msg [4]; /* { framing, data [3] } */ + __u8 msg_len; /* valid values are 0...4, 0 means no msg */ + int timeout; /* return from ioctl after timeout ms with */ +}; /* errorcode when no message was received */ + + +typedef enum fe_sec_voltage { + SEC_VOLTAGE_13, + SEC_VOLTAGE_18, + SEC_VOLTAGE_OFF +} fe_sec_voltage_t; + + +typedef enum fe_sec_tone_mode { + SEC_TONE_ON, + SEC_TONE_OFF +} fe_sec_tone_mode_t; + + +typedef enum fe_sec_mini_cmd { + SEC_MINI_A, + SEC_MINI_B +} fe_sec_mini_cmd_t; + + +typedef enum fe_status { + FE_HAS_SIGNAL = 0x01, /* found something above the noise level */ + FE_HAS_CARRIER = 0x02, /* found a DVB signal */ + FE_HAS_VITERBI = 0x04, /* FEC is stable */ + FE_HAS_SYNC = 0x08, /* found sync bytes */ + FE_HAS_LOCK = 0x10, /* everything's working... */ + FE_TIMEDOUT = 0x20, /* no lock within the last ~2 seconds */ + FE_REINIT = 0x40 /* frontend was reinitialized, */ +} fe_status_t; /* application is recommended to reset */ + /* DiSEqC, tone and parameters */ + +typedef enum fe_spectral_inversion { + INVERSION_OFF, + INVERSION_ON, + INVERSION_AUTO +} fe_spectral_inversion_t; + + +typedef enum fe_code_rate { + FEC_NONE = 0, + FEC_1_2, + FEC_2_3, + FEC_3_4, + FEC_4_5, + FEC_5_6, + FEC_6_7, + FEC_7_8, + FEC_8_9, + FEC_AUTO, + FEC_3_5, + FEC_9_10, +} fe_code_rate_t; + + +typedef enum fe_modulation { + QPSK, + QAM_16, + QAM_32, + QAM_64, + QAM_128, + QAM_256, + QAM_AUTO, + VSB_8, + VSB_16, + PSK_8, + APSK_16, + APSK_32, + DQPSK, +} fe_modulation_t; + +typedef enum fe_transmit_mode { + TRANSMISSION_MODE_2K, + TRANSMISSION_MODE_8K, + TRANSMISSION_MODE_AUTO, + TRANSMISSION_MODE_4K, + TRANSMISSION_MODE_1K, + TRANSMISSION_MODE_16K, + TRANSMISSION_MODE_32K, +} fe_transmit_mode_t; + +#if defined(__DVB_CORE__) || !defined (__KERNEL__) +typedef enum fe_bandwidth { + BANDWIDTH_8_MHZ, + BANDWIDTH_7_MHZ, + BANDWIDTH_6_MHZ, + BANDWIDTH_AUTO, + BANDWIDTH_5_MHZ, + BANDWIDTH_10_MHZ, + BANDWIDTH_1_712_MHZ, +} fe_bandwidth_t; +#endif + +typedef enum fe_guard_interval { + GUARD_INTERVAL_1_32, + GUARD_INTERVAL_1_16, + GUARD_INTERVAL_1_8, + GUARD_INTERVAL_1_4, + GUARD_INTERVAL_AUTO, + GUARD_INTERVAL_1_128, + GUARD_INTERVAL_19_128, + GUARD_INTERVAL_19_256, +} fe_guard_interval_t; + + +typedef enum fe_hierarchy { + HIERARCHY_NONE, + HIERARCHY_1, + HIERARCHY_2, + HIERARCHY_4, + HIERARCHY_AUTO +} fe_hierarchy_t; + + +#if defined(__DVB_CORE__) || !defined (__KERNEL__) +struct dvb_qpsk_parameters { + __u32 symbol_rate; /* symbol rate in Symbols per second */ + fe_code_rate_t fec_inner; /* forward error correction (see above) */ +}; + +struct dvb_qam_parameters { + __u32 symbol_rate; /* symbol rate in Symbols per second */ + fe_code_rate_t fec_inner; /* forward error correction (see above) */ + fe_modulation_t modulation; /* modulation type (see above) */ +}; + +struct dvb_vsb_parameters { + fe_modulation_t modulation; /* modulation type (see above) */ +}; + +struct dvb_ofdm_parameters { + fe_bandwidth_t bandwidth; + fe_code_rate_t code_rate_HP; /* high priority stream code rate */ + fe_code_rate_t code_rate_LP; /* low priority stream code rate */ + fe_modulation_t constellation; /* modulation type (see above) */ + fe_transmit_mode_t transmission_mode; + fe_guard_interval_t guard_interval; + fe_hierarchy_t hierarchy_information; +}; + + +struct dvb_frontend_parameters { + __u32 frequency; /* (absolute) frequency in Hz for QAM/OFDM/ATSC */ + /* intermediate frequency in kHz for QPSK */ + fe_spectral_inversion_t inversion; + union { + struct dvb_qpsk_parameters qpsk; + struct dvb_qam_parameters qam; + struct dvb_ofdm_parameters ofdm; + struct dvb_vsb_parameters vsb; + } u; +}; + +struct dvb_frontend_event { + fe_status_t status; + struct dvb_frontend_parameters parameters; +}; +#endif + +/* S2API Commands */ +#define DTV_UNDEFINED 0 +#define DTV_TUNE 1 +#define DTV_CLEAR 2 +#define DTV_FREQUENCY 3 +#define DTV_MODULATION 4 +#define DTV_BANDWIDTH_HZ 5 +#define DTV_INVERSION 6 +#define DTV_DISEQC_MASTER 7 +#define DTV_SYMBOL_RATE 8 +#define DTV_INNER_FEC 9 +#define DTV_VOLTAGE 10 +#define DTV_TONE 11 +#define DTV_PILOT 12 +#define DTV_ROLLOFF 13 +#define DTV_DISEQC_SLAVE_REPLY 14 + +/* Basic enumeration set for querying unlimited capabilities */ +#define DTV_FE_CAPABILITY_COUNT 15 +#define DTV_FE_CAPABILITY 16 +#define DTV_DELIVERY_SYSTEM 17 + +/* ISDB-T and ISDB-Tsb */ +#define DTV_ISDBT_PARTIAL_RECEPTION 18 +#define DTV_ISDBT_SOUND_BROADCASTING 19 + +#define DTV_ISDBT_SB_SUBCHANNEL_ID 20 +#define DTV_ISDBT_SB_SEGMENT_IDX 21 +#define DTV_ISDBT_SB_SEGMENT_COUNT 22 + +#define DTV_ISDBT_LAYERA_FEC 23 +#define DTV_ISDBT_LAYERA_MODULATION 24 +#define DTV_ISDBT_LAYERA_SEGMENT_COUNT 25 +#define DTV_ISDBT_LAYERA_TIME_INTERLEAVING 26 + +#define DTV_ISDBT_LAYERB_FEC 27 +#define DTV_ISDBT_LAYERB_MODULATION 28 +#define DTV_ISDBT_LAYERB_SEGMENT_COUNT 29 +#define DTV_ISDBT_LAYERB_TIME_INTERLEAVING 30 + +#define DTV_ISDBT_LAYERC_FEC 31 +#define DTV_ISDBT_LAYERC_MODULATION 32 +#define DTV_ISDBT_LAYERC_SEGMENT_COUNT 33 +#define DTV_ISDBT_LAYERC_TIME_INTERLEAVING 34 + +#define DTV_API_VERSION 35 + +#define DTV_CODE_RATE_HP 36 +#define DTV_CODE_RATE_LP 37 +#define DTV_GUARD_INTERVAL 38 +#define DTV_TRANSMISSION_MODE 39 +#define DTV_HIERARCHY 40 + +#define DTV_ISDBT_LAYER_ENABLED 41 + +#define DTV_ISDBS_TS_ID 42 + +#define DTV_DVBT2_PLP_ID 43 + +#define DTV_ENUM_DELSYS 44 + +#define DTV_MAX_COMMAND DTV_ENUM_DELSYS + +typedef enum fe_pilot { + PILOT_ON, + PILOT_OFF, + PILOT_AUTO, +} fe_pilot_t; + +typedef enum fe_rolloff { + ROLLOFF_35, /* Implied value in DVB-S, default for DVB-S2 */ + ROLLOFF_20, + ROLLOFF_25, + ROLLOFF_AUTO, +} fe_rolloff_t; + +typedef enum fe_delivery_system { + SYS_UNDEFINED, + SYS_DVBC_ANNEX_A, + SYS_DVBC_ANNEX_B, + SYS_DVBT, + SYS_DSS, + SYS_DVBS, + SYS_DVBS2, + SYS_DVBH, + SYS_ISDBT, + SYS_ISDBS, + SYS_ISDBC, + SYS_ATSC, + SYS_ATSCMH, + SYS_DMBTH, + SYS_CMMB, + SYS_DAB, + SYS_DVBT2, + SYS_TURBO, + SYS_DVBC_ANNEX_C, +} fe_delivery_system_t; + + +#define SYS_DVBC_ANNEX_AC SYS_DVBC_ANNEX_A + + +struct dtv_cmds_h { + char *name; /* A display name for debugging purposes */ + + __u32 cmd; /* A unique ID */ + + /* Flags */ + __u32 set:1; /* Either a set or get property */ + __u32 buffer:1; /* Does this property use the buffer? */ + __u32 reserved:30; /* Align */ +}; + +struct dtv_property { + __u32 cmd; + __u32 reserved[3]; + union { + __u32 data; + struct { + __u8 data[32]; + __u32 len; + __u32 reserved1[3]; + void *reserved2; + } buffer; + } u; + int result; +} __attribute__ ((packed)); + +/* num of properties cannot exceed DTV_IOCTL_MAX_MSGS per ioctl */ +#define DTV_IOCTL_MAX_MSGS 64 + +struct dtv_properties { + __u32 num; + struct dtv_property *props; +}; + +#define FE_SET_PROPERTY _IOW('o', 82, struct dtv_properties) +#define FE_GET_PROPERTY _IOR('o', 83, struct dtv_properties) + + +/** + * When set, this flag will disable any zigzagging or other "normal" tuning + * behaviour. Additionally, there will be no automatic monitoring of the lock + * status, and hence no frontend events will be generated. If a frontend device + * is closed, this flag will be automatically turned off when the device is + * reopened read-write. + */ +#define FE_TUNE_MODE_ONESHOT 0x01 + + +#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info) + +#define FE_DISEQC_RESET_OVERLOAD _IO('o', 62) +#define FE_DISEQC_SEND_MASTER_CMD _IOW('o', 63, struct dvb_diseqc_master_cmd) +#define FE_DISEQC_RECV_SLAVE_REPLY _IOR('o', 64, struct dvb_diseqc_slave_reply) +#define FE_DISEQC_SEND_BURST _IO('o', 65) /* fe_sec_mini_cmd_t */ + +#define FE_SET_TONE _IO('o', 66) /* fe_sec_tone_mode_t */ +#define FE_SET_VOLTAGE _IO('o', 67) /* fe_sec_voltage_t */ +#define FE_ENABLE_HIGH_LNB_VOLTAGE _IO('o', 68) /* int */ + +#define FE_READ_STATUS _IOR('o', 69, fe_status_t) +#define FE_READ_BER _IOR('o', 70, __u32) +#define FE_READ_SIGNAL_STRENGTH _IOR('o', 71, __u16) +#define FE_READ_SNR _IOR('o', 72, __u16) +#define FE_READ_UNCORRECTED_BLOCKS _IOR('o', 73, __u32) + +#define FE_SET_FRONTEND _IOW('o', 76, struct dvb_frontend_parameters) +#define FE_GET_FRONTEND _IOR('o', 77, struct dvb_frontend_parameters) +#define FE_SET_FRONTEND_TUNE_MODE _IO('o', 81) /* unsigned int */ +#define FE_GET_EVENT _IOR('o', 78, struct dvb_frontend_event) + +#define FE_DISHNETWORK_SEND_LEGACY_CMD _IO('o', 80) /* unsigned int */ + +#endif /*_DVBFRONTEND_H_*/ diff --git a/include/linux/dvb/net.h b/include/linux/dvb/net.h new file mode 100644 index 00000000..f451e7eb --- /dev/null +++ b/include/linux/dvb/net.h @@ -0,0 +1,52 @@ +/* + * net.h + * + * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de> + * & Ralph Metzler <ralph@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBNET_H_ +#define _DVBNET_H_ + +#include <linux/types.h> + +struct dvb_net_if { + __u16 pid; + __u16 if_num; + __u8 feedtype; +#define DVB_NET_FEEDTYPE_MPE 0 /* multi protocol encapsulation */ +#define DVB_NET_FEEDTYPE_ULE 1 /* ultra lightweight encapsulation */ +}; + + +#define NET_ADD_IF _IOWR('o', 52, struct dvb_net_if) +#define NET_REMOVE_IF _IO('o', 53) +#define NET_GET_IF _IOWR('o', 54, struct dvb_net_if) + + +/* binary compatibility cruft: */ +struct __dvb_net_if_old { + __u16 pid; + __u16 if_num; +}; +#define __NET_ADD_IF_OLD _IOWR('o', 52, struct __dvb_net_if_old) +#define __NET_GET_IF_OLD _IOWR('o', 54, struct __dvb_net_if_old) + + +#endif /*_DVBNET_H_*/ diff --git a/include/linux/dvb/osd.h b/include/linux/dvb/osd.h new file mode 100644 index 00000000..880e6843 --- /dev/null +++ b/include/linux/dvb/osd.h @@ -0,0 +1,144 @@ +/* + * osd.h + * + * Copyright (C) 2001 Ralph Metzler <ralph@convergence.de> + * & Marcus Metzler <marcus@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBOSD_H_ +#define _DVBOSD_H_ + +#include <linux/compiler.h> + +typedef enum { + // All functions return -2 on "not open" + OSD_Close=1, // () + // Disables OSD and releases the buffers + // returns 0 on success + OSD_Open, // (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) + // Opens OSD with this size and bit depth + // returns 0 on success, -1 on DRAM allocation error, -2 on "already open" + OSD_Show, // () + // enables OSD mode + // returns 0 on success + OSD_Hide, // () + // disables OSD mode + // returns 0 on success + OSD_Clear, // () + // Sets all pixel to color 0 + // returns 0 on success + OSD_Fill, // (color) + // Sets all pixel to color <col> + // returns 0 on success + OSD_SetColor, // (color,R{x0},G{y0},B{x1},opacity{y1}) + // set palette entry <num> to <r,g,b>, <mix> and <trans> apply + // R,G,B: 0..255 + // R=Red, G=Green, B=Blue + // opacity=0: pixel opacity 0% (only video pixel shows) + // opacity=1..254: pixel opacity as specified in header + // opacity=255: pixel opacity 100% (only OSD pixel shows) + // returns 0 on success, -1 on error + OSD_SetPalette, // (firstcolor{color},lastcolor{x0},data) + // Set a number of entries in the palette + // sets the entries "firstcolor" through "lastcolor" from the array "data" + // data has 4 byte for each color: + // R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel + OSD_SetTrans, // (transparency{color}) + // Sets transparency of mixed pixel (0..15) + // returns 0 on success + OSD_SetPixel, // (x0,y0,color) + // sets pixel <x>,<y> to color number <col> + // returns 0 on success, -1 on error + OSD_GetPixel, // (x0,y0) + // returns color number of pixel <x>,<y>, or -1 + OSD_SetRow, // (x0,y0,x1,data) + // fills pixels x0,y through x1,y with the content of data[] + // returns 0 on success, -1 on clipping all pixel (no pixel drawn) + OSD_SetBlock, // (x0,y0,x1,y1,increment{color},data) + // fills pixels x0,y0 through x1,y1 with the content of data[] + // inc contains the width of one line in the data block, + // inc<=0 uses blockwidth as linewidth + // returns 0 on success, -1 on clipping all pixel + OSD_FillRow, // (x0,y0,x1,color) + // fills pixels x0,y through x1,y with the color <col> + // returns 0 on success, -1 on clipping all pixel + OSD_FillBlock, // (x0,y0,x1,y1,color) + // fills pixels x0,y0 through x1,y1 with the color <col> + // returns 0 on success, -1 on clipping all pixel + OSD_Line, // (x0,y0,x1,y1,color) + // draw a line from x0,y0 to x1,y1 with the color <col> + // returns 0 on success + OSD_Query, // (x0,y0,x1,y1,xasp{color}}), yasp=11 + // fills parameters with the picture dimensions and the pixel aspect ratio + // returns 0 on success + OSD_Test, // () + // draws a test picture. for debugging purposes only + // returns 0 on success +// TODO: remove "test" in final version + OSD_Text, // (x0,y0,size,color,text) + OSD_SetWindow, // (x0) set window with number 0<x0<8 as current + OSD_MoveWindow, // move current window to (x0, y0) + OSD_OpenRaw, // Open other types of OSD windows +} OSD_Command; + +typedef struct osd_cmd_s { + OSD_Command cmd; + int x0; + int y0; + int x1; + int y1; + int color; + void __user *data; +} osd_cmd_t; + +/* OSD_OpenRaw: set 'color' to desired window type */ +typedef enum { + OSD_BITMAP1, /* 1 bit bitmap */ + OSD_BITMAP2, /* 2 bit bitmap */ + OSD_BITMAP4, /* 4 bit bitmap */ + OSD_BITMAP8, /* 8 bit bitmap */ + OSD_BITMAP1HR, /* 1 Bit bitmap half resolution */ + OSD_BITMAP2HR, /* 2 bit bitmap half resolution */ + OSD_BITMAP4HR, /* 4 bit bitmap half resolution */ + OSD_BITMAP8HR, /* 8 bit bitmap half resolution */ + OSD_YCRCB422, /* 4:2:2 YCRCB Graphic Display */ + OSD_YCRCB444, /* 4:4:4 YCRCB Graphic Display */ + OSD_YCRCB444HR, /* 4:4:4 YCRCB graphic half resolution */ + OSD_VIDEOTSIZE, /* True Size Normal MPEG Video Display */ + OSD_VIDEOHSIZE, /* MPEG Video Display Half Resolution */ + OSD_VIDEOQSIZE, /* MPEG Video Display Quarter Resolution */ + OSD_VIDEODSIZE, /* MPEG Video Display Double Resolution */ + OSD_VIDEOTHSIZE, /* True Size MPEG Video Display Half Resolution */ + OSD_VIDEOTQSIZE, /* True Size MPEG Video Display Quarter Resolution*/ + OSD_VIDEOTDSIZE, /* True Size MPEG Video Display Double Resolution */ + OSD_VIDEONSIZE, /* Full Size MPEG Video Display */ + OSD_CURSOR /* Cursor */ +} osd_raw_window_t; + +typedef struct osd_cap_s { + int cmd; +#define OSD_CAP_MEMSIZE 1 /* memory size */ + long val; +} osd_cap_t; + + +#define OSD_SEND_CMD _IOW('o', 160, osd_cmd_t) +#define OSD_GET_CAPABILITY _IOR('o', 161, osd_cap_t) + +#endif diff --git a/include/linux/dvb/version.h b/include/linux/dvb/version.h new file mode 100644 index 00000000..0559e2bd --- /dev/null +++ b/include/linux/dvb/version.h @@ -0,0 +1,29 @@ +/* + * version.h + * + * Copyright (C) 2000 Holger Waechtler <holger@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBVERSION_H_ +#define _DVBVERSION_H_ + +#define DVB_API_VERSION 5 +#define DVB_API_VERSION_MINOR 5 + +#endif /*_DVBVERSION_H_*/ diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h new file mode 100644 index 00000000..1d750c0f --- /dev/null +++ b/include/linux/dvb/video.h @@ -0,0 +1,276 @@ +/* + * video.h + * + * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de> + * & Ralph Metzler <ralph@convergence.de> + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVBVIDEO_H_ +#define _DVBVIDEO_H_ + +#include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/compiler.h> +#else +#include <stdint.h> +#include <time.h> +#endif + +typedef enum { + VIDEO_FORMAT_4_3, /* Select 4:3 format */ + VIDEO_FORMAT_16_9, /* Select 16:9 format. */ + VIDEO_FORMAT_221_1 /* 2.21:1 */ +} video_format_t; + + +typedef enum { + VIDEO_SYSTEM_PAL, + VIDEO_SYSTEM_NTSC, + VIDEO_SYSTEM_PALN, + VIDEO_SYSTEM_PALNc, + VIDEO_SYSTEM_PALM, + VIDEO_SYSTEM_NTSC60, + VIDEO_SYSTEM_PAL60, + VIDEO_SYSTEM_PALM60 +} video_system_t; + + +typedef enum { + VIDEO_PAN_SCAN, /* use pan and scan format */ + VIDEO_LETTER_BOX, /* use letterbox format */ + VIDEO_CENTER_CUT_OUT /* use center cut out format */ +} video_displayformat_t; + +typedef struct { + int w; + int h; + video_format_t aspect_ratio; +} video_size_t; + +typedef enum { + VIDEO_SOURCE_DEMUX, /* Select the demux as the main source */ + VIDEO_SOURCE_MEMORY /* If this source is selected, the stream + comes from the user through the write + system call */ +} video_stream_source_t; + + +typedef enum { + VIDEO_STOPPED, /* Video is stopped */ + VIDEO_PLAYING, /* Video is currently playing */ + VIDEO_FREEZED /* Video is freezed */ +} video_play_state_t; + + +/* Decoder commands */ +#define VIDEO_CMD_PLAY (0) +#define VIDEO_CMD_STOP (1) +#define VIDEO_CMD_FREEZE (2) +#define VIDEO_CMD_CONTINUE (3) + +/* Flags for VIDEO_CMD_FREEZE */ +#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0) + +/* Flags for VIDEO_CMD_STOP */ +#define VIDEO_CMD_STOP_TO_BLACK (1 << 0) +#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1) + +/* Play input formats: */ +/* The decoder has no special format requirements */ +#define VIDEO_PLAY_FMT_NONE (0) +/* The decoder requires full GOPs */ +#define VIDEO_PLAY_FMT_GOP (1) + +/* The structure must be zeroed before use by the application + This ensures it can be extended safely in the future. */ +struct video_command { + __u32 cmd; + __u32 flags; + union { + struct { + __u64 pts; + } stop; + + struct { + /* 0 or 1000 specifies normal speed, + 1 specifies forward single stepping, + -1 specifies backward single stepping, + >1: playback at speed/1000 of the normal speed, + <-1: reverse playback at (-speed/1000) of the normal speed. */ + __s32 speed; + __u32 format; + } play; + + struct { + __u32 data[16]; + } raw; + }; +}; + +/* FIELD_UNKNOWN can be used if the hardware does not know whether + the Vsync is for an odd, even or progressive (i.e. non-interlaced) + field. */ +#define VIDEO_VSYNC_FIELD_UNKNOWN (0) +#define VIDEO_VSYNC_FIELD_ODD (1) +#define VIDEO_VSYNC_FIELD_EVEN (2) +#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3) + +struct video_event { + __s32 type; +#define VIDEO_EVENT_SIZE_CHANGED 1 +#define VIDEO_EVENT_FRAME_RATE_CHANGED 2 +#define VIDEO_EVENT_DECODER_STOPPED 3 +#define VIDEO_EVENT_VSYNC 4 + __kernel_time_t timestamp; + union { + video_size_t size; + unsigned int frame_rate; /* in frames per 1000sec */ + unsigned char vsync_field; /* unknown/odd/even/progressive */ + } u; +}; + + +struct video_status { + int video_blank; /* blank video on freeze? */ + video_play_state_t play_state; /* current state of playback */ + video_stream_source_t stream_source; /* current source (demux/memory) */ + video_format_t video_format; /* current aspect ratio of stream*/ + video_displayformat_t display_format;/* selected cropping mode */ +}; + + +struct video_still_picture { + char __user *iFrame; /* pointer to a single iframe in memory */ + __s32 size; +}; + + +typedef +struct video_highlight { + int active; /* 1=show highlight, 0=hide highlight */ + __u8 contrast1; /* 7- 4 Pattern pixel contrast */ + /* 3- 0 Background pixel contrast */ + __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */ + /* 3- 0 Emphasis pixel-1 contrast */ + __u8 color1; /* 7- 4 Pattern pixel color */ + /* 3- 0 Background pixel color */ + __u8 color2; /* 7- 4 Emphasis pixel-2 color */ + /* 3- 0 Emphasis pixel-1 color */ + __u32 ypos; /* 23-22 auto action mode */ + /* 21-12 start y */ + /* 9- 0 end y */ + __u32 xpos; /* 23-22 button color number */ + /* 21-12 start x */ + /* 9- 0 end x */ +} video_highlight_t; + + +typedef struct video_spu { + int active; + int stream_id; +} video_spu_t; + + +typedef struct video_spu_palette { /* SPU Palette information */ + int length; + __u8 __user *palette; +} video_spu_palette_t; + + +typedef struct video_navi_pack { + int length; /* 0 ... 1024 */ + __u8 data[1024]; +} video_navi_pack_t; + + +typedef __u16 video_attributes_t; +/* bits: descr. */ +/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */ +/* 13-12 TV system (0=525/60, 1=625/50) */ +/* 11-10 Aspect ratio (0=4:3, 3=16:9) */ +/* 9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca */ +/* 7 line 21-1 data present in GOP (1=yes, 0=no) */ +/* 6 line 21-2 data present in GOP (1=yes, 0=no) */ +/* 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */ +/* 2 source letterboxed (1=yes, 0=no) */ +/* 0 film/camera mode (0=camera, 1=film (625/50 only)) */ + + +/* bit definitions for capabilities: */ +/* can the hardware decode MPEG1 and/or MPEG2? */ +#define VIDEO_CAP_MPEG1 1 +#define VIDEO_CAP_MPEG2 2 +/* can you send a system and/or program stream to video device? + (you still have to open the video and the audio device but only + send the stream to the video device) */ +#define VIDEO_CAP_SYS 4 +#define VIDEO_CAP_PROG 8 +/* can the driver also handle SPU, NAVI and CSS encoded data? + (CSS API is not present yet) */ +#define VIDEO_CAP_SPU 16 +#define VIDEO_CAP_NAVI 32 +#define VIDEO_CAP_CSS 64 + + +#define VIDEO_STOP _IO('o', 21) +#define VIDEO_PLAY _IO('o', 22) +#define VIDEO_FREEZE _IO('o', 23) +#define VIDEO_CONTINUE _IO('o', 24) +#define VIDEO_SELECT_SOURCE _IO('o', 25) +#define VIDEO_SET_BLANK _IO('o', 26) +#define VIDEO_GET_STATUS _IOR('o', 27, struct video_status) +#define VIDEO_GET_EVENT _IOR('o', 28, struct video_event) +#define VIDEO_SET_DISPLAY_FORMAT _IO('o', 29) +#define VIDEO_STILLPICTURE _IOW('o', 30, struct video_still_picture) +#define VIDEO_FAST_FORWARD _IO('o', 31) +#define VIDEO_SLOWMOTION _IO('o', 32) +#define VIDEO_GET_CAPABILITIES _IOR('o', 33, unsigned int) +#define VIDEO_CLEAR_BUFFER _IO('o', 34) +#define VIDEO_SET_ID _IO('o', 35) +#define VIDEO_SET_STREAMTYPE _IO('o', 36) +#define VIDEO_SET_FORMAT _IO('o', 37) +#define VIDEO_SET_SYSTEM _IO('o', 38) +#define VIDEO_SET_HIGHLIGHT _IOW('o', 39, video_highlight_t) +#define VIDEO_SET_SPU _IOW('o', 50, video_spu_t) +#define VIDEO_SET_SPU_PALETTE _IOW('o', 51, video_spu_palette_t) +#define VIDEO_GET_NAVI _IOR('o', 52, video_navi_pack_t) +#define VIDEO_SET_ATTRIBUTES _IO('o', 53) +#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t) +#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int) + +/** + * VIDEO_GET_PTS + * + * Read the 33 bit presentation time stamp as defined + * in ITU T-REC-H.222.0 / ISO/IEC 13818-1. + * + * The PTS should belong to the currently played + * frame if possible, but may also be a value close to it + * like the PTS of the last decoded frame or the last PTS + * extracted by the PES parser. + */ +#define VIDEO_GET_PTS _IOR('o', 57, __u64) + +/* Read the number of displayed frames since the decoder was started */ +#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64) + +#define VIDEO_COMMAND _IOWR('o', 59, struct video_command) +#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command) + +#endif /*_DVBVIDEO_H_*/ diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h new file mode 100644 index 00000000..07261d52 --- /dev/null +++ b/include/linux/dw_apb_timer.h @@ -0,0 +1,56 @@ +/* + * (C) Copyright 2009 Intel Corporation + * Author: Jacob Pan (jacob.jun.pan@intel.com) + * + * Shared with ARM platforms, Jamie Iles, Picochip 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support for the Synopsys DesignWare APB Timers. + */ +#ifndef __DW_APB_TIMER_H__ +#define __DW_APB_TIMER_H__ + +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> + +#define APBTMRS_REG_SIZE 0x14 + +struct dw_apb_timer { + void __iomem *base; + unsigned long freq; + int irq; +}; + +struct dw_apb_clock_event_device { + struct clock_event_device ced; + struct dw_apb_timer timer; + struct irqaction irqaction; + void (*eoi)(struct dw_apb_timer *); +}; + +struct dw_apb_clocksource { + struct dw_apb_timer timer; + struct clocksource cs; +}; + +void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced); + +struct dw_apb_clock_event_device * +dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, + void __iomem *base, int irq, unsigned long freq); +struct dw_apb_clocksource * +dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, + unsigned long freq); +void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); +void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); +cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); +void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs); + +#endif /* __DW_APB_TIMER_H__ */ diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h new file mode 100644 index 00000000..2412e02d --- /dev/null +++ b/include/linux/dw_dmac.h @@ -0,0 +1,101 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on + * AVR32 systems.) + * + * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef DW_DMAC_H +#define DW_DMAC_H + +#include <linux/dmaengine.h> + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @is_private: The device channels should be marked as private and not for + * by the general purpose DMA channel allocator. + */ +struct dw_dma_platform_data { + unsigned int nr_channels; + bool is_private; +#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ +#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ + unsigned char chan_allocation_order; +#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ +#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ + unsigned char chan_priority; +}; + +/* bursts size */ +enum dw_dma_msize { + DW_DMA_MSIZE_1, + DW_DMA_MSIZE_4, + DW_DMA_MSIZE_8, + DW_DMA_MSIZE_16, + DW_DMA_MSIZE_32, + DW_DMA_MSIZE_64, + DW_DMA_MSIZE_128, + DW_DMA_MSIZE_256, +}; + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * + * @dma_dev: required DMA master device + * @cfg_hi: Platform-specific initializer for the CFG_HI register + * @cfg_lo: Platform-specific initializer for the CFG_LO register + * @src_master: src master for transfers on allocated channel. + * @dst_master: dest master for transfers on allocated channel. + */ +struct dw_dma_slave { + struct device *dma_dev; + u32 cfg_hi; + u32 cfg_lo; + u8 src_master; + u8 dst_master; +}; + +/* Platform-configurable bits in CFG_HI */ +#define DWC_CFGH_FCMODE (1 << 0) +#define DWC_CFGH_FIFO_MODE (1 << 1) +#define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_SRC_PER(x) ((x) << 7) +#define DWC_CFGH_DST_PER(x) ((x) << 11) + +/* Platform-configurable bits in CFG_LO */ +#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ +#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) +#define DWC_CFGL_LOCK_CH_XACT (2 << 12) +#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ +#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) +#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) +#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ +#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ +#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ +#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ + +/* DMA API extensions */ +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_transfer_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + +#endif /* DW_DMAC_H */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h new file mode 100644 index 00000000..7e3c53a9 --- /dev/null +++ b/include/linux/dynamic_debug.h @@ -0,0 +1,108 @@ +#ifndef _DYNAMIC_DEBUG_H +#define _DYNAMIC_DEBUG_H + +/* + * An instance of this structure is created in a special + * ELF section at every dynamic debug callsite. At runtime, + * the special section is treated as an array of these. + */ +struct _ddebug { + /* + * These fields are used to drive the user interface + * for selecting and displaying debug callsites. + */ + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno:18; + /* + * The flags field controls the behaviour at the callsite. + * The bits here are changed dynamically when the user + * writes commands to <debugfs>/dynamic_debug/control + */ +#define _DPRINTK_FLAGS_NONE 0 +#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ +#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1) +#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) +#define _DPRINTK_FLAGS_INCL_LINENO (1<<3) +#define _DPRINTK_FLAGS_INCL_TID (1<<4) +#if defined DEBUG +#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT +#else +#define _DPRINTK_FLAGS_DEFAULT 0 +#endif + unsigned int flags:8; +} __attribute__((aligned(8))); + + +int ddebug_add_module(struct _ddebug *tab, unsigned int n, + const char *modname); + +#if defined(CONFIG_DYNAMIC_DEBUG) +extern int ddebug_remove_module(const char *mod_name); +extern __printf(2, 3) +int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); + +struct device; + +extern __printf(3, 4) +int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, + const char *fmt, ...); + +struct net_device; + +extern __printf(3, 4) +int __dynamic_netdev_dbg(struct _ddebug *descriptor, + const struct net_device *dev, + const char *fmt, ...); + +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ + static struct _ddebug __used __aligned(8) \ + __attribute__((section("__verbose"))) name = { \ + .modname = KBUILD_MODNAME, \ + .function = __func__, \ + .filename = __FILE__, \ + .format = (fmt), \ + .lineno = __LINE__, \ + .flags = _DPRINTK_FLAGS_DEFAULT, \ + } + +#define dynamic_pr_debug(fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ + ##__VA_ARGS__); \ +} while (0) + +#define dynamic_dev_dbg(dev, fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + __dynamic_dev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) + +#define dynamic_netdev_dbg(dev, fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ + __dynamic_netdev_dbg(&descriptor, dev, fmt, \ + ##__VA_ARGS__); \ +} while (0) + +#else + +static inline int ddebug_remove_module(const char *mod) +{ + return 0; +} + +#define dynamic_pr_debug(fmt, ...) \ + do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define dynamic_dev_dbg(dev, fmt, ...) \ + do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) +#endif + +#endif diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h new file mode 100644 index 00000000..5621547d --- /dev/null +++ b/include/linux/dynamic_queue_limits.h @@ -0,0 +1,97 @@ +/* + * Dynamic queue limits (dql) - Definitions + * + * Copyright (c) 2011, Tom Herbert <therbert@google.com> + * + * This header file contains the definitions for dynamic queue limits (dql). + * dql would be used in conjunction with a producer/consumer type queue + * (possibly a HW queue). Such a queue would have these general properties: + * + * 1) Objects are queued up to some limit specified as number of objects. + * 2) Periodically a completion process executes which retires consumed + * objects. + * 3) Starvation occurs when limit has been reached, all queued data has + * actually been consumed, but completion processing has not yet run + * so queuing new data is blocked. + * 4) Minimizing the amount of queued data is desirable. + * + * The goal of dql is to calculate the limit as the minimum number of objects + * needed to prevent starvation. + * + * The primary functions of dql are: + * dql_queued - called when objects are enqueued to record number of objects + * dql_avail - returns how many objects are available to be queued based + * on the object limit and how many objects are already enqueued + * dql_completed - called at completion time to indicate how many objects + * were retired from the queue + * + * The dql implementation does not implement any locking for the dql data + * structures, the higher layer should provide this. dql_queued should + * be serialized to prevent concurrent execution of the function; this + * is also true for dql_completed. However, dql_queued and dlq_completed can + * be executed concurrently (i.e. they can be protected by different locks). + */ + +#ifndef _LINUX_DQL_H +#define _LINUX_DQL_H + +#ifdef __KERNEL__ + +struct dql { + /* Fields accessed in enqueue path (dql_queued) */ + unsigned int num_queued; /* Total ever queued */ + unsigned int adj_limit; /* limit + num_completed */ + unsigned int last_obj_cnt; /* Count at last queuing */ + + /* Fields accessed only by completion path (dql_completed) */ + + unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */ + unsigned int num_completed; /* Total ever completed */ + + unsigned int prev_ovlimit; /* Previous over limit */ + unsigned int prev_num_queued; /* Previous queue total */ + unsigned int prev_last_obj_cnt; /* Previous queuing cnt */ + + unsigned int lowest_slack; /* Lowest slack found */ + unsigned long slack_start_time; /* Time slacks seen */ + + /* Configuration */ + unsigned int max_limit; /* Max limit */ + unsigned int min_limit; /* Minimum limit */ + unsigned int slack_hold_time; /* Time to measure slack */ +}; + +/* Set some static maximums */ +#define DQL_MAX_OBJECT (UINT_MAX / 16) +#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) + +/* + * Record number of objects queued. Assumes that caller has already checked + * availability in the queue with dql_avail. + */ +static inline void dql_queued(struct dql *dql, unsigned int count) +{ + BUG_ON(count > DQL_MAX_OBJECT); + + dql->num_queued += count; + dql->last_obj_cnt = count; +} + +/* Returns how many objects can be queued, < 0 indicates over limit. */ +static inline int dql_avail(const struct dql *dql) +{ + return dql->adj_limit - dql->num_queued; +} + +/* Record number of completed objects and recalculate the limit. */ +void dql_completed(struct dql *dql, unsigned int count); + +/* Reset dql state */ +void dql_reset(struct dql *dql); + +/* Initialize dql state */ +int dql_init(struct dql *dql, unsigned hold_time); + +#endif /* _KERNEL_ */ + +#endif /* _LINUX_DQL_H */ diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h new file mode 100644 index 00000000..2224a8c0 --- /dev/null +++ b/include/linux/ecryptfs.h @@ -0,0 +1,113 @@ +#ifndef _LINUX_ECRYPTFS_H +#define _LINUX_ECRYPTFS_H + +/* Version verification for shared data structures w/ userspace */ +#define ECRYPTFS_VERSION_MAJOR 0x00 +#define ECRYPTFS_VERSION_MINOR 0x04 +#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03 +/* These flags indicate which features are supported by the kernel + * module; userspace tools such as the mount helper read + * ECRYPTFS_VERSIONING_MASK from a sysfs handle in order to determine + * how to behave. */ +#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001 +#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002 +#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004 +#define ECRYPTFS_VERSIONING_POLICY 0x00000008 +#define ECRYPTFS_VERSIONING_XATTR 0x00000010 +#define ECRYPTFS_VERSIONING_MULTKEY 0x00000020 +#define ECRYPTFS_VERSIONING_DEVMISC 0x00000040 +#define ECRYPTFS_VERSIONING_HMAC 0x00000080 +#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100 +#define ECRYPTFS_VERSIONING_GCM 0x00000200 +#define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \ + | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \ + | ECRYPTFS_VERSIONING_PUBKEY \ + | ECRYPTFS_VERSIONING_XATTR \ + | ECRYPTFS_VERSIONING_MULTKEY \ + | ECRYPTFS_VERSIONING_DEVMISC \ + | ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION) +#define ECRYPTFS_MAX_PASSWORD_LENGTH 64 +#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH +#define ECRYPTFS_SALT_SIZE 8 +#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2) +/* The original signature size is only for what is stored on disk; all + * in-memory representations are expanded hex, so it better adapted to + * be passed around or referenced on the command line */ +#define ECRYPTFS_SIG_SIZE 8 +#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2) +#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX +#define ECRYPTFS_MAX_KEY_BYTES 64 +#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512 +#define ECRYPTFS_FILE_VERSION 0x03 +#define ECRYPTFS_MAX_PKI_NAME_BYTES 16 + +#define RFC2440_CIPHER_DES3_EDE 0x02 +#define RFC2440_CIPHER_CAST_5 0x03 +#define RFC2440_CIPHER_BLOWFISH 0x04 +#define RFC2440_CIPHER_AES_128 0x07 +#define RFC2440_CIPHER_AES_192 0x08 +#define RFC2440_CIPHER_AES_256 0x09 +#define RFC2440_CIPHER_TWOFISH 0x0a +#define RFC2440_CIPHER_CAST_6 0x0b + +#define RFC2440_CIPHER_RSA 0x01 + +/** + * For convenience, we may need to pass around the encrypted session + * key between kernel and userspace because the authentication token + * may not be extractable. For example, the TPM may not release the + * private key, instead requiring the encrypted data and returning the + * decrypted data. + */ +struct ecryptfs_session_key { +#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT 0x00000001 +#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT 0x00000002 +#define ECRYPTFS_CONTAINS_DECRYPTED_KEY 0x00000004 +#define ECRYPTFS_CONTAINS_ENCRYPTED_KEY 0x00000008 + u32 flags; + u32 encrypted_key_size; + u32 decrypted_key_size; + u8 encrypted_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES]; + u8 decrypted_key[ECRYPTFS_MAX_KEY_BYTES]; +}; + +struct ecryptfs_password { + u32 password_bytes; + s32 hash_algo; + u32 hash_iterations; + u32 session_key_encryption_key_bytes; +#define ECRYPTFS_PERSISTENT_PASSWORD 0x01 +#define ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET 0x02 + u32 flags; + /* Iterated-hash concatenation of salt and passphrase */ + u8 session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; + u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; + /* Always in expanded hex */ + u8 salt[ECRYPTFS_SALT_SIZE]; +}; + +enum ecryptfs_token_types {ECRYPTFS_PASSWORD, ECRYPTFS_PRIVATE_KEY}; + +struct ecryptfs_private_key { + u32 key_size; + u32 data_len; + u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1]; + char pki_type[ECRYPTFS_MAX_PKI_NAME_BYTES + 1]; + u8 data[]; +}; + +/* May be a password or a private key */ +struct ecryptfs_auth_tok { + u16 version; /* 8-bit major and 8-bit minor */ + u16 token_type; +#define ECRYPTFS_ENCRYPT_ONLY 0x00000001 + u32 flags; + struct ecryptfs_session_key session_key; + u8 reserved[32]; + union { + struct ecryptfs_password password; + struct ecryptfs_private_key private_key; + } token; +} __attribute__ ((packed)); + +#endif /* _LINUX_ECRYPTFS_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h new file mode 100644 index 00000000..c621d762 --- /dev/null +++ b/include/linux/edac.h @@ -0,0 +1,476 @@ +/* + * Generic EDAC defs + * + * Author: Dave Jiang <djiang@mvista.com> + * + * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ +#ifndef _LINUX_EDAC_H_ +#define _LINUX_EDAC_H_ + +#include <linux/atomic.h> +#include <linux/kobject.h> +#include <linux/completion.h> +#include <linux/workqueue.h> + +struct device; + +#define EDAC_OPSTATE_INVAL -1 +#define EDAC_OPSTATE_POLL 0 +#define EDAC_OPSTATE_NMI 1 +#define EDAC_OPSTATE_INT 2 + +extern int edac_op_state; +extern int edac_err_assert; +extern atomic_t edac_handlers; +extern struct bus_type edac_subsys; + +extern int edac_handler_set(void); +extern void edac_atomic_assert_error(void); +extern struct bus_type *edac_get_sysfs_subsys(void); +extern void edac_put_sysfs_subsys(void); + +static inline void opstate_init(void) +{ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_NMI: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + } + return; +} + +#define EDAC_MC_LABEL_LEN 31 +#define MC_PROC_NAME_MAX_LEN 7 + +/* memory devices */ +enum dev_type { + DEV_UNKNOWN = 0, + DEV_X1, + DEV_X2, + DEV_X4, + DEV_X8, + DEV_X16, + DEV_X32, /* Do these parts exist? */ + DEV_X64 /* Do these parts exist? */ +}; + +#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) +#define DEV_FLAG_X1 BIT(DEV_X1) +#define DEV_FLAG_X2 BIT(DEV_X2) +#define DEV_FLAG_X4 BIT(DEV_X4) +#define DEV_FLAG_X8 BIT(DEV_X8) +#define DEV_FLAG_X16 BIT(DEV_X16) +#define DEV_FLAG_X32 BIT(DEV_X32) +#define DEV_FLAG_X64 BIT(DEV_X64) + +/** + * enum mem_type - memory types. For a more detailed reference, please see + * http://en.wikipedia.org/wiki/DRAM + * + * @MEM_EMPTY Empty csrow + * @MEM_RESERVED: Reserved csrow type + * @MEM_UNKNOWN: Unknown csrow type + * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. + * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. + * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. + * @MEM_SDR: SDR - Single data rate SDRAM + * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory + * They use 3 pins for chip select: Pins 0 and 2 are + * for rank 0; pins 1 and 3 are for rank 1, if the memory + * is dual-rank. + * @MEM_RDR: Registered SDR SDRAM + * @MEM_DDR: Double data rate SDRAM + * http://en.wikipedia.org/wiki/DDR_SDRAM + * @MEM_RDDR: Registered Double data rate SDRAM + * This is a variant of the DDR memories. + * A registered memory has a buffer inside it, hiding + * part of the memory details to the memory controller. + * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. + * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. + * Those memories are labed as "PC2-" instead of "PC" to + * differenciate from DDR. + * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 + * and JESD206. + * Those memories are accessed per DIMM slot, and not by + * a chip select signal. + * @MEM_RDDR2: Registered DDR2 RAM + * This is a variant of the DDR2 memories. + * @MEM_XDR: Rambus XDR + * It is an evolution of the original RAMBUS memories, + * created to compete with DDR2. Weren't used on any + * x86 arch, but cell_edac PPC memory controller uses it. + * @MEM_DDR3: DDR3 RAM + * @MEM_RDDR3: Registered DDR3 RAM + * This is a variant of the DDR3 memories. + */ +enum mem_type { + MEM_EMPTY = 0, + MEM_RESERVED, + MEM_UNKNOWN, + MEM_FPM, + MEM_EDO, + MEM_BEDO, + MEM_SDR, + MEM_RDR, + MEM_DDR, + MEM_RDDR, + MEM_RMBS, + MEM_DDR2, + MEM_FB_DDR2, + MEM_RDDR2, + MEM_XDR, + MEM_DDR3, + MEM_RDDR3, +}; + +#define MEM_FLAG_EMPTY BIT(MEM_EMPTY) +#define MEM_FLAG_RESERVED BIT(MEM_RESERVED) +#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) +#define MEM_FLAG_FPM BIT(MEM_FPM) +#define MEM_FLAG_EDO BIT(MEM_EDO) +#define MEM_FLAG_BEDO BIT(MEM_BEDO) +#define MEM_FLAG_SDR BIT(MEM_SDR) +#define MEM_FLAG_RDR BIT(MEM_RDR) +#define MEM_FLAG_DDR BIT(MEM_DDR) +#define MEM_FLAG_RDDR BIT(MEM_RDDR) +#define MEM_FLAG_RMBS BIT(MEM_RMBS) +#define MEM_FLAG_DDR2 BIT(MEM_DDR2) +#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) +#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) +#define MEM_FLAG_XDR BIT(MEM_XDR) +#define MEM_FLAG_DDR3 BIT(MEM_DDR3) +#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) + +/* chipset Error Detection and Correction capabilities and mode */ +enum edac_type { + EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ + EDAC_NONE, /* Doesn't support ECC */ + EDAC_RESERVED, /* Reserved ECC type */ + EDAC_PARITY, /* Detects parity errors */ + EDAC_EC, /* Error Checking - no correction */ + EDAC_SECDED, /* Single bit error correction, Double detection */ + EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */ + EDAC_S4ECD4ED, /* Chipkill x4 devices */ + EDAC_S8ECD8ED, /* Chipkill x8 devices */ + EDAC_S16ECD16ED, /* Chipkill x16 devices */ +}; + +#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) +#define EDAC_FLAG_NONE BIT(EDAC_NONE) +#define EDAC_FLAG_PARITY BIT(EDAC_PARITY) +#define EDAC_FLAG_EC BIT(EDAC_EC) +#define EDAC_FLAG_SECDED BIT(EDAC_SECDED) +#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) +#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) +#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) +#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) + +/* scrubbing capabilities */ +enum scrub_type { + SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ + SCRUB_NONE, /* No scrubber */ + SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */ + SCRUB_SW_SRC, /* Software scrub only errors */ + SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */ + SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */ + SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */ + SCRUB_HW_SRC, /* Hardware scrub only errors */ + SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */ + SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */ +}; + +#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) +#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) +#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) +#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) +#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) +#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) +#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) +#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) + +/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ + +/* EDAC internal operation states */ +#define OP_ALLOC 0x100 +#define OP_RUNNING_POLL 0x201 +#define OP_RUNNING_INTERRUPT 0x202 +#define OP_RUNNING_POLL_INTR 0x203 +#define OP_OFFLINE 0x300 + +/* + * Concepts used at the EDAC subsystem + * + * There are several things to be aware of that aren't at all obvious: + * + * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. + * + * These are some of the many terms that are thrown about that don't always + * mean what people think they mean (Inconceivable!). In the interest of + * creating a common ground for discussion, terms and their definitions + * will be established. + * + * Memory devices: The individual DRAM chips on a memory stick. These + * devices commonly output 4 and 8 bits each (x4, x8). + * Grouping several of these in parallel provides the + * number of bits that the memory controller expects: + * typically 72 bits, in order to provide 64 bits + + * 8 bits of ECC data. + * + * Memory Stick: A printed circuit board that aggregates multiple + * memory devices in parallel. In general, this is the + * Field Replaceable Unit (FRU) which gets replaced, in + * the case of excessive errors. Most often it is also + * called DIMM (Dual Inline Memory Module). + * + * Memory Socket: A physical connector on the motherboard that accepts + * a single memory stick. Also called as "slot" on several + * datasheets. + * + * Channel: A memory controller channel, responsible to communicate + * with a group of DIMMs. Each channel has its own + * independent control (command) and data bus, and can + * be used independently or grouped with other channels. + * + * Branch: It is typically the highest hierarchy on a + * Fully-Buffered DIMM memory controller. + * Typically, it contains two channels. + * Two channels at the same branch can be used in single + * mode or in lockstep mode. + * When lockstep is enabled, the cacheline is doubled, + * but it generally brings some performance penalty. + * Also, it is generally not possible to point to just one + * memory stick when an error occurs, as the error + * correction code is calculated using two DIMMs instead + * of one. Due to that, it is capable of correcting more + * errors than on single mode. + * + * Single-channel: The data accessed by the memory controller is contained + * into one dimm only. E. g. if the data is 64 bits-wide, + * the data flows to the CPU using one 64 bits parallel + * access. + * Typically used with SDR, DDR, DDR2 and DDR3 memories. + * FB-DIMM and RAMBUS use a different concept for channel, + * so this concept doesn't apply there. + * + * Double-channel: The data size accessed by the memory controller is + * interlaced into two dimms, accessed at the same time. + * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), + * the data flows to the CPU using a 128 bits parallel + * access. + * + * Chip-select row: This is the name of the DRAM signal used to select the + * DRAM ranks to be accessed. Common chip-select rows for + * single channel are 64 bits, for dual channel 128 bits. + * It may not be visible by the memory controller, as some + * DIMM types have a memory buffer that can hide direct + * access to it from the Memory Controller. + * + * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. + * Motherboards commonly drive two chip-select pins to + * a memory stick. A single-ranked stick, will occupy + * only one of those rows. The other will be unused. + * + * Double-Ranked stick: A double-ranked stick has two chip-select rows which + * access different sets of memory devices. The two + * rows cannot be accessed concurrently. + * + * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. + * A double-sided stick has two chip-select rows which + * access different sets of memory devices. The two + * rows cannot be accessed concurrently. "Double-sided" + * is irrespective of the memory devices being mounted + * on both sides of the memory stick. + * + * Socket set: All of the memory sticks that are required for + * a single memory access or all of the memory sticks + * spanned by a chip-select row. A single socket set + * has two chip-select rows and if double-sided sticks + * are used these will occupy those chip-select rows. + * + * Bank: This term is avoided because it is unclear when + * needing to distinguish between chip-select rows and + * socket sets. + * + * Controller pages: + * + * Physical pages: + * + * Virtual pages: + * + * + * STRUCTURE ORGANIZATION AND CHOICES + * + * + * + * PS - I enjoyed writing all that about as much as you enjoyed reading it. + */ + +/** + * struct rank_info - contains the information for one DIMM rank + * + * @chan_idx: channel number where the rank is (typically, 0 or 1) + * @ce_count: number of correctable errors for this rank + * @label: DIMM label. Different ranks for the same DIMM should be + * filled, on userspace, with the same label. + * FIXME: The core currently won't enforce it. + * @csrow: A pointer to the chip select row structure (the parent + * structure). The location of the rank is given by + * the (csrow->csrow_idx, chan_idx) vector. + */ +struct rank_info { + int chan_idx; + u32 ce_count; + char label[EDAC_MC_LABEL_LEN + 1]; + struct csrow_info *csrow; /* the parent */ +}; + +struct csrow_info { + unsigned long first_page; /* first page number in dimm */ + unsigned long last_page; /* last page number in dimm */ + unsigned long page_mask; /* used for interleaving - + * 0UL for non intlv + */ + u32 nr_pages; /* number of pages in csrow */ + u32 grain; /* granularity of reported error in bytes */ + int csrow_idx; /* the chip-select row */ + enum dev_type dtype; /* memory device type */ + u32 ue_count; /* Uncorrectable Errors for this csrow */ + u32 ce_count; /* Correctable Errors for this csrow */ + enum mem_type mtype; /* memory csrow type */ + enum edac_type edac_mode; /* EDAC mode for this csrow */ + struct mem_ctl_info *mci; /* the parent */ + + struct kobject kobj; /* sysfs kobject for this csrow */ + + /* channel information for this csrow */ + u32 nr_channels; + struct rank_info *channels; +}; + +struct mcidev_sysfs_group { + const char *name; /* group name */ + const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */ +}; + +struct mcidev_sysfs_group_kobj { + struct list_head list; /* list for all instances within a mc */ + + struct kobject kobj; /* kobj for the group */ + + const struct mcidev_sysfs_group *grp; /* group description table */ + struct mem_ctl_info *mci; /* the parent */ +}; + +/* mcidev_sysfs_attribute structure + * used for driver sysfs attributes and in mem_ctl_info + * sysfs top level entries + */ +struct mcidev_sysfs_attribute { + /* It should use either attr or grp */ + struct attribute attr; + const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */ + + /* Ops for show/store values at the attribute - not used on group */ + ssize_t (*show)(struct mem_ctl_info *,char *); + ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); +}; + +/* MEMORY controller information structure + */ +struct mem_ctl_info { + struct list_head link; /* for global list of mem_ctl_info structs */ + + struct module *owner; /* Module owner of this control struct */ + + unsigned long mtype_cap; /* memory types supported by mc */ + unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ + unsigned long edac_cap; /* configuration capabilities - this is + * closely related to edac_ctl_cap. The + * difference is that the controller may be + * capable of s4ecd4ed which would be listed + * in edac_ctl_cap, but if channels aren't + * capable of s4ecd4ed then the edac_cap would + * not have that capability. + */ + unsigned long scrub_cap; /* chipset scrub capabilities */ + enum scrub_type scrub_mode; /* current scrub mode */ + + /* Translates sdram memory scrub rate given in bytes/sec to the + internal representation and configures whatever else needs + to be configured. + */ + int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); + + /* Get the current sdram memory scrub rate from the internal + representation and converts it to the closest matching + bandwidth in bytes/sec. + */ + int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); + + + /* pointer to edac checking routine */ + void (*edac_check) (struct mem_ctl_info * mci); + + /* + * Remaps memory pages: controller pages to physical pages. + * For most MC's, this will be NULL. + */ + /* FIXME - why not send the phys page to begin with? */ + unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, + unsigned long page); + int mc_idx; + int nr_csrows; + struct csrow_info *csrows; + /* + * FIXME - what about controllers on other busses? - IDs must be + * unique. dev pointer should be sufficiently unique, but + * BUS:SLOT.FUNC numbers may not be unique. + */ + struct device *dev; + const char *mod_name; + const char *mod_ver; + const char *ctl_name; + const char *dev_name; + char proc_name[MC_PROC_NAME_MAX_LEN + 1]; + void *pvt_info; + u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */ + u32 ce_noinfo_count; /* Correctable Errors w/o info */ + u32 ue_count; /* Total Uncorrectable Errors for this MC */ + u32 ce_count; /* Total Correctable Errors for this MC */ + unsigned long start_time; /* mci load start time (in jiffies) */ + + struct completion complete; + + /* edac sysfs device control */ + struct kobject edac_mci_kobj; + + /* list for all grp instances within a mc */ + struct list_head grp_kobj_list; + + /* Additional top controller level attributes, but specified + * by the low level driver. + * + * Set by the low level driver to provide attributes at the + * controller level, same level as 'ue_count' and 'ce_count' above. + * An array of structures, NULL terminated + * + * If attributes are desired, then set to array of attributes + * If no attributes are desired, leave NULL + */ + const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; + + /* work struct for this MC */ + struct delayed_work work; + + /* the internal state of this controller instance */ + int op_state; +}; + +#endif diff --git a/include/linux/edd.h b/include/linux/edd.h new file mode 100644 index 00000000..4cbd0fe9 --- /dev/null +++ b/include/linux/edd.h @@ -0,0 +1,194 @@ +/* + * linux/include/linux/edd.h + * Copyright (C) 2002, 2003, 2004 Dell Inc. + * by Matt Domsch <Matt_Domsch@dell.com> + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch + * table in the boot_params that contains a list of BIOS-enumerated + * boot devices. + * In arch/{i386,x86_64}/kernel/setup.c, this information is + * transferred into the edd structure, and in drivers/firmware/edd.c, that + * information is used to identify BIOS boot disk. The code in setup.S + * is very sensitive to the size of these structures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_EDD_H +#define _LINUX_EDD_H + +#include <linux/types.h> + +#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF + in boot_params - treat this as 1 byte */ +#define EDDBUF 0xd00 /* addr of edd_info structs in boot_params */ +#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */ +#define EDDEXTSIZE 8 /* change these if you muck with the structures */ +#define EDDPARMSIZE 74 +#define CHECKEXTENSIONSPRESENT 0x41 +#define GETDEVICEPARAMETERS 0x48 +#define LEGACYGETDEVICEPARAMETERS 0x08 +#define EDDMAGIC1 0x55AA +#define EDDMAGIC2 0xAA55 + + +#define READ_SECTORS 0x02 /* int13 AH=0x02 is READ_SECTORS command */ +#define EDD_MBR_SIG_OFFSET 0x1B8 /* offset of signature in the MBR */ +#define EDD_MBR_SIG_BUF 0x290 /* addr in boot params */ +#define EDD_MBR_SIG_MAX 16 /* max number of signatures to store */ +#define EDD_MBR_SIG_NR_BUF 0x1ea /* addr of number of MBR signtaures at EDD_MBR_SIG_BUF + in boot_params - treat this as 1 byte */ + +#ifndef __ASSEMBLY__ + +#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0) +#define EDD_EXT_DEVICE_LOCKING_AND_EJECTING (1 << 1) +#define EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT (1 << 2) +#define EDD_EXT_64BIT_EXTENSIONS (1 << 3) + +#define EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT (1 << 0) +#define EDD_INFO_GEOMETRY_VALID (1 << 1) +#define EDD_INFO_REMOVABLE (1 << 2) +#define EDD_INFO_WRITE_VERIFY (1 << 3) +#define EDD_INFO_MEDIA_CHANGE_NOTIFICATION (1 << 4) +#define EDD_INFO_LOCKABLE (1 << 5) +#define EDD_INFO_NO_MEDIA_PRESENT (1 << 6) +#define EDD_INFO_USE_INT13_FN50 (1 << 7) + +struct edd_device_params { + __u16 length; + __u16 info_flags; + __u32 num_default_cylinders; + __u32 num_default_heads; + __u32 sectors_per_track; + __u64 number_of_sectors; + __u16 bytes_per_sector; + __u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ + __u16 key; /* = 0xBEDD */ + __u8 device_path_info_length; /* = 44 */ + __u8 reserved2; + __u16 reserved3; + __u8 host_bus_type[4]; + __u8 interface_type[8]; + union { + struct { + __u16 base_address; + __u16 reserved1; + __u32 reserved2; + } __attribute__ ((packed)) isa; + struct { + __u8 bus; + __u8 slot; + __u8 function; + __u8 channel; + __u32 reserved; + } __attribute__ ((packed)) pci; + /* pcix is same as pci */ + struct { + __u64 reserved; + } __attribute__ ((packed)) ibnd; + struct { + __u64 reserved; + } __attribute__ ((packed)) xprs; + struct { + __u64 reserved; + } __attribute__ ((packed)) htpt; + struct { + __u64 reserved; + } __attribute__ ((packed)) unknown; + } interface_path; + union { + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) ata; + struct { + __u8 device; + __u8 lun; + __u8 reserved1; + __u8 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) atapi; + struct { + __u16 id; + __u64 lun; + __u16 reserved1; + __u32 reserved2; + } __attribute__ ((packed)) scsi; + struct { + __u64 serial_number; + __u64 reserved; + } __attribute__ ((packed)) usb; + struct { + __u64 eui; + __u64 reserved; + } __attribute__ ((packed)) i1394; + struct { + __u64 wwid; + __u64 lun; + } __attribute__ ((packed)) fibre; + struct { + __u64 identity_tag; + __u64 reserved; + } __attribute__ ((packed)) i2o; + struct { + __u32 array_number; + __u32 reserved1; + __u64 reserved2; + } __attribute__ ((packed)) raid; + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) sata; + struct { + __u64 reserved1; + __u64 reserved2; + } __attribute__ ((packed)) unknown; + } device_path; + __u8 reserved4; + __u8 checksum; +} __attribute__ ((packed)); + +struct edd_info { + __u8 device; + __u8 version; + __u16 interface_support; + __u16 legacy_max_cylinder; + __u8 legacy_max_head; + __u8 legacy_sectors_per_track; + struct edd_device_params params; +} __attribute__ ((packed)); + +struct edd { + unsigned int mbr_signature[EDD_MBR_SIG_MAX]; + struct edd_info edd_info[EDDMAXNR]; + unsigned char mbr_signature_nr; + unsigned char edd_info_nr; +}; + +#ifdef __KERNEL__ +extern struct edd edd; +#endif /* __KERNEL__ */ +#endif /*!__ASSEMBLY__ */ + +#endif /* _LINUX_EDD_H */ diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h new file mode 100644 index 00000000..e50f98b0 --- /dev/null +++ b/include/linux/eeprom_93cx6.h @@ -0,0 +1,82 @@ +/* + Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: eeprom_93cx6 + Abstract: EEPROM reader datastructures for 93cx6 chipsets. + Supported chipsets: 93c46, 93c56 and 93c66. + */ + +/* + * EEPROM operation defines. + */ +#define PCI_EEPROM_WIDTH_93C46 6 +#define PCI_EEPROM_WIDTH_93C56 8 +#define PCI_EEPROM_WIDTH_93C66 8 +#define PCI_EEPROM_WIDTH_93C86 8 +#define PCI_EEPROM_WIDTH_OPCODE 3 +#define PCI_EEPROM_WRITE_OPCODE 0x05 +#define PCI_EEPROM_ERASE_OPCODE 0x07 +#define PCI_EEPROM_READ_OPCODE 0x06 +#define PCI_EEPROM_EWDS_OPCODE 0x10 +#define PCI_EEPROM_EWEN_OPCODE 0x13 + +/** + * struct eeprom_93cx6 - control structure for setting the commands + * for reading the eeprom data. + * @data: private pointer for the driver. + * @register_read(struct eeprom_93cx6 *eeprom): handler to + * read the eeprom register, this function should set all reg_* fields. + * @register_write(struct eeprom_93cx6 *eeprom): handler to + * write to the eeprom register by using all reg_* fields. + * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @drive_data: Set if we're driving the data line. + * @reg_data_in: register field to indicate data input + * @reg_data_out: register field to indicate data output + * @reg_data_clock: register field to set the data clock + * @reg_chip_select: register field to set the chip select + * + * This structure is used for the communication between the driver + * and the eeprom_93cx6 handlers for reading the eeprom. + */ +struct eeprom_93cx6 { + void *data; + + void (*register_read)(struct eeprom_93cx6 *eeprom); + void (*register_write)(struct eeprom_93cx6 *eeprom); + + int width; + + char drive_data; + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, + const u8 word, u16 *data); +extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, + const u8 word, __le16 *data, const u16 words); + +extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); + +extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, + u8 addr, u16 data); diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h new file mode 100644 index 00000000..06791811 --- /dev/null +++ b/include/linux/eeprom_93xx46.h @@ -0,0 +1,18 @@ +/* + * Module: eeprom_93xx46 + * platform description for 93xx46 EEPROMs. + */ + +struct eeprom_93xx46_platform_data { + unsigned char flags; +#define EE_ADDR8 0x01 /* 8 bit addr. cfg */ +#define EE_ADDR16 0x02 /* 16 bit addr. cfg */ +#define EE_READONLY 0x08 /* forbid writing */ + + /* + * optional hooks to control additional logic + * before and after spi transfer. + */ + void (*prepare)(void *); + void (*finish)(void *); +}; diff --git a/include/linux/efi.h b/include/linux/efi.h new file mode 100644 index 00000000..ec45ccd8 --- /dev/null +++ b/include/linux/efi.h @@ -0,0 +1,665 @@ +#ifndef _LINUX_EFI_H +#define _LINUX_EFI_H + +/* + * Extensible Firmware Interface + * Based on 'Extensible Firmware Interface Specification' version 0.9, April 30, 1999 + * + * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> + * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co. + * David Mosberger-Tang <davidm@hpl.hp.com> + * Stephane Eranian <eranian@hpl.hp.com> + */ +#include <linux/init.h> +#include <linux/string.h> +#include <linux/time.h> +#include <linux/types.h> +#include <linux/proc_fs.h> +#include <linux/rtc.h> +#include <linux/ioport.h> +#include <linux/pfn.h> +#include <linux/pstore.h> + +#include <asm/page.h> + +#define EFI_SUCCESS 0 +#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) +#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1))) +#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1))) +#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) +#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1))) +#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) + +typedef unsigned long efi_status_t; +typedef u8 efi_bool_t; +typedef u16 efi_char16_t; /* UNICODE character */ + + +typedef struct { + u8 b[16]; +} efi_guid_t; + +#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ +((efi_guid_t) \ +{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +/* + * Generic EFI table header + */ +typedef struct { + u64 signature; + u32 revision; + u32 headersize; + u32 crc32; + u32 reserved; +} efi_table_hdr_t; + +/* + * Memory map descriptor: + */ + +/* Memory types: */ +#define EFI_RESERVED_TYPE 0 +#define EFI_LOADER_CODE 1 +#define EFI_LOADER_DATA 2 +#define EFI_BOOT_SERVICES_CODE 3 +#define EFI_BOOT_SERVICES_DATA 4 +#define EFI_RUNTIME_SERVICES_CODE 5 +#define EFI_RUNTIME_SERVICES_DATA 6 +#define EFI_CONVENTIONAL_MEMORY 7 +#define EFI_UNUSABLE_MEMORY 8 +#define EFI_ACPI_RECLAIM_MEMORY 9 +#define EFI_ACPI_MEMORY_NVS 10 +#define EFI_MEMORY_MAPPED_IO 11 +#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 +#define EFI_PAL_CODE 13 +#define EFI_MAX_MEMORY_TYPE 14 + +/* Attribute values: */ +#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */ +#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ +#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ +#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ +#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ +#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ +#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ +#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ +#define EFI_MEMORY_DESCRIPTOR_VERSION 1 + +#define EFI_PAGE_SHIFT 12 + +typedef struct { + u32 type; + u32 pad; + u64 phys_addr; + u64 virt_addr; + u64 num_pages; + u64 attribute; +} efi_memory_desc_t; + +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + +/* + * Allocation types for calls to boottime->allocate_pages. + */ +#define EFI_ALLOCATE_ANY_PAGES 0 +#define EFI_ALLOCATE_MAX_ADDRESS 1 +#define EFI_ALLOCATE_ADDRESS 2 +#define EFI_MAX_ALLOCATE_TYPE 3 + +typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); + +/* + * Types and defines for Time Services + */ +#define EFI_TIME_ADJUST_DAYLIGHT 0x1 +#define EFI_TIME_IN_DAYLIGHT 0x2 +#define EFI_UNSPECIFIED_TIMEZONE 0x07ff + +typedef struct { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 pad1; + u32 nanosecond; + s16 timezone; + u8 daylight; + u8 pad2; +} efi_time_t; + +typedef struct { + u32 resolution; + u32 accuracy; + u8 sets_to_zero; +} efi_time_cap_t; + +/* + * EFI Boot Services table + */ +typedef struct { + efi_table_hdr_t hdr; + void *raise_tpl; + void *restore_tpl; + void *allocate_pages; + void *free_pages; + void *get_memory_map; + void *allocate_pool; + void *free_pool; + void *create_event; + void *set_timer; + void *wait_for_event; + void *signal_event; + void *close_event; + void *check_event; + void *install_protocol_interface; + void *reinstall_protocol_interface; + void *uninstall_protocol_interface; + void *handle_protocol; + void *__reserved; + void *register_protocol_notify; + void *locate_handle; + void *locate_device_path; + void *install_configuration_table; + void *load_image; + void *start_image; + void *exit; + void *unload_image; + void *exit_boot_services; + void *get_next_monotonic_count; + void *stall; + void *set_watchdog_timer; + void *connect_controller; + void *disconnect_controller; + void *open_protocol; + void *close_protocol; + void *open_protocol_information; + void *protocols_per_handle; + void *locate_handle_buffer; + void *locate_protocol; + void *install_multiple_protocol_interfaces; + void *uninstall_multiple_protocol_interfaces; + void *calculate_crc32; + void *copy_mem; + void *set_mem; + void *create_event_ex; +} efi_boot_services_t; + +/* + * Types and defines for EFI ResetSystem + */ +#define EFI_RESET_COLD 0 +#define EFI_RESET_WARM 1 +#define EFI_RESET_SHUTDOWN 2 + +/* + * EFI Runtime Services table + */ +#define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL) +#define EFI_RUNTIME_SERVICES_REVISION 0x00010000 + +typedef struct { + efi_table_hdr_t hdr; + unsigned long get_time; + unsigned long set_time; + unsigned long get_wakeup_time; + unsigned long set_wakeup_time; + unsigned long set_virtual_address_map; + unsigned long convert_pointer; + unsigned long get_variable; + unsigned long get_next_variable; + unsigned long set_variable; + unsigned long get_next_high_mono_count; + unsigned long reset_system; + unsigned long update_capsule; + unsigned long query_capsule_caps; + unsigned long query_variable_info; +} efi_runtime_services_t; + +typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); +typedef efi_status_t efi_set_time_t (efi_time_t *tm); +typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending, + efi_time_t *tm); +typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t *tm); +typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, + unsigned long *data_size, void *data); +typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, + efi_guid_t *vendor); +typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data); +typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); +typedef void efi_reset_system_t (int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data); +typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_size, + unsigned long descriptor_size, + u32 descriptor_version, + efi_memory_desc_t *virtual_map); +typedef efi_status_t efi_query_variable_info_t(u32 attr, + u64 *storage_space, + u64 *remaining_space, + u64 *max_variable_size); +typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **capsules, + unsigned long count, + unsigned long sg_list); +typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, + unsigned long count, + u64 *max_size, + int *reset_type); + +/* + * EFI Configuration Table and GUID definitions + */ +#define NULL_GUID \ + EFI_GUID( 0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ) + +#define MPS_TABLE_GUID \ + EFI_GUID( 0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) + +#define ACPI_TABLE_GUID \ + EFI_GUID( 0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) + +#define ACPI_20_TABLE_GUID \ + EFI_GUID( 0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 ) + +#define SMBIOS_TABLE_GUID \ + EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) + +#define SAL_SYSTEM_TABLE_GUID \ + EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) + +#define HCDP_TABLE_GUID \ + EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 ) + +#define UGA_IO_PROTOCOL_GUID \ + EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 ) + +#define EFI_GLOBAL_VARIABLE_GUID \ + EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c ) + +#define UV_SYSTEM_TABLE_GUID \ + EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 ) + +#define LINUX_EFI_CRASH_GUID \ + EFI_GUID( 0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0 ) + +#define LOADED_IMAGE_PROTOCOL_GUID \ + EFI_GUID( 0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) + +#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \ + EFI_GUID( 0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a ) + +#define EFI_UGA_PROTOCOL_GUID \ + EFI_GUID( 0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39 ) + +#define EFI_PCI_IO_PROTOCOL_GUID \ + EFI_GUID( 0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a ) + +#define EFI_FILE_INFO_ID \ + EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) + +#define EFI_FILE_SYSTEM_GUID \ + EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) + +typedef struct { + efi_guid_t guid; + u64 table; +} efi_config_table_64_t; + +typedef struct { + efi_guid_t guid; + u32 table; +} efi_config_table_32_t; + +typedef struct { + efi_guid_t guid; + unsigned long table; +} efi_config_table_t; + +#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) + +#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30)) +#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20)) +#define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10)) +#define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00)) +#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10)) +#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02)) + +typedef struct { + efi_table_hdr_t hdr; + u64 fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + u32 __pad1; + u64 con_in_handle; + u64 con_in; + u64 con_out_handle; + u64 con_out; + u64 stderr_handle; + u64 stderr; + u64 runtime; + u64 boottime; + u32 nr_tables; + u32 __pad2; + u64 tables; +} efi_system_table_64_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + u32 con_in_handle; + u32 con_in; + u32 con_out_handle; + u32 con_out; + u32 stderr_handle; + u32 stderr; + u32 runtime; + u32 boottime; + u32 nr_tables; + u32 tables; +} efi_system_table_32_t; + +typedef struct { + efi_table_hdr_t hdr; + unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ + u32 fw_revision; + unsigned long con_in_handle; + unsigned long con_in; + unsigned long con_out_handle; + unsigned long con_out; + unsigned long stderr_handle; + unsigned long stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + unsigned long nr_tables; + unsigned long tables; +} efi_system_table_t; + +struct efi_memory_map { + void *phys_map; + void *map; + void *map_end; + int nr_map; + unsigned long desc_version; + unsigned long desc_size; +}; + +typedef struct { + u32 revision; + void *parent_handle; + efi_system_table_t *system_table; + void *device_handle; + void *file_path; + void *reserved; + u32 load_options_size; + void *load_options; + void *image_base; + __aligned_u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + unsigned long unload; +} efi_loaded_image_t; + +typedef struct { + u64 revision; + void *open_volume; +} efi_file_io_interface_t; + +typedef struct { + u64 size; + u64 file_size; + u64 phys_size; + efi_time_t create_time; + efi_time_t last_access_time; + efi_time_t modification_time; + __aligned_u64 attribute; + efi_char16_t filename[1]; +} efi_file_info_t; + +typedef struct { + u64 revision; + void *open; + void *close; + void *delete; + void *read; + void *write; + void *get_position; + void *set_position; + void *get_info; + void *set_info; + void *flush; +} efi_file_handle_t; + +#define EFI_FILE_MODE_READ 0x0000000000000001 +#define EFI_FILE_MODE_WRITE 0x0000000000000002 +#define EFI_FILE_MODE_CREATE 0x8000000000000000 + +#define EFI_INVALID_TABLE_ADDR (~0UL) + +/* + * All runtime access to EFI goes through this structure: + */ +extern struct efi { + efi_system_table_t *systab; /* EFI system table */ + unsigned int runtime_version; /* Runtime services version */ + unsigned long mps; /* MPS table */ + unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ + unsigned long acpi20; /* ACPI table (ACPI 2.0) */ + unsigned long smbios; /* SM BIOS table */ + unsigned long sal_systab; /* SAL system table */ + unsigned long boot_info; /* boot info table */ + unsigned long hcdp; /* HCDP table */ + unsigned long uga; /* UGA table */ + unsigned long uv_systab; /* UV system table */ + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_query_variable_info_t *query_variable_info; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + efi_set_virtual_address_map_t *set_virtual_address_map; +} efi; + +static inline int +efi_guidcmp (efi_guid_t left, efi_guid_t right) +{ + return memcmp(&left, &right, sizeof (efi_guid_t)); +} + +static inline char * +efi_guid_unparse(efi_guid_t *guid, char *out) +{ + sprintf(out, "%pUl", guid->b); + return out; +} + +extern void efi_init (void); +extern void *efi_get_pal_addr (void); +extern void efi_map_pal_code (void); +extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); +extern void efi_gettimeofday (struct timespec *ts); +extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +extern u64 efi_get_iobase (void); +extern u32 efi_mem_type (unsigned long phys_addr); +extern u64 efi_mem_attributes (unsigned long phys_addr); +extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); +extern int __init efi_uart_console_only (void); +extern void efi_initialize_iomem_resources(struct resource *code_resource, + struct resource *data_resource, struct resource *bss_resource); +extern unsigned long efi_get_time(void); +extern int efi_set_rtc_mmss(unsigned long nowtime); +extern void efi_reserve_boot_services(void); +extern struct efi_memory_map memmap; + +/** + * efi_range_is_wc - check the WC bit on an address range + * @start: starting kvirt address + * @len: length of range + * + * Consult the EFI memory map and make sure it's ok to set this range WC. + * Returns true or false. + */ +static inline int efi_range_is_wc(unsigned long start, unsigned long len) +{ + unsigned long i; + + for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) { + unsigned long paddr = __pa(start + i); + if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC)) + return 0; + } + /* The range checked out */ + return 1; +} + +#ifdef CONFIG_EFI_PCDP +extern int __init efi_setup_pcdp_console(char *); +#endif + +/* + * We play games with efi_enabled so that the compiler will, if possible, remove + * EFI-related code altogether. + */ +#ifdef CONFIG_EFI +# ifdef CONFIG_X86 + extern int efi_enabled; + extern bool efi_64bit; +# else +# define efi_enabled 1 +# endif +#else +# define efi_enabled 0 +#endif + +/* + * Variable Attributes + */ +#define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001 +#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002 +#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004 +#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008 +#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010 +#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020 +#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040 + +#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \ + EFI_VARIABLE_BOOTSERVICE_ACCESS | \ + EFI_VARIABLE_RUNTIME_ACCESS | \ + EFI_VARIABLE_HARDWARE_ERROR_RECORD | \ + EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_APPEND_WRITE) +/* + * The type of search to perform when calling boottime->locate_handle + */ +#define EFI_LOCATE_ALL_HANDLES 0 +#define EFI_LOCATE_BY_REGISTER_NOTIFY 1 +#define EFI_LOCATE_BY_PROTOCOL 2 + +/* + * EFI Device Path information + */ +#define EFI_DEV_HW 0x01 +#define EFI_DEV_PCI 1 +#define EFI_DEV_PCCARD 2 +#define EFI_DEV_MEM_MAPPED 3 +#define EFI_DEV_VENDOR 4 +#define EFI_DEV_CONTROLLER 5 +#define EFI_DEV_ACPI 0x02 +#define EFI_DEV_BASIC_ACPI 1 +#define EFI_DEV_EXPANDED_ACPI 2 +#define EFI_DEV_MSG 0x03 +#define EFI_DEV_MSG_ATAPI 1 +#define EFI_DEV_MSG_SCSI 2 +#define EFI_DEV_MSG_FC 3 +#define EFI_DEV_MSG_1394 4 +#define EFI_DEV_MSG_USB 5 +#define EFI_DEV_MSG_USB_CLASS 15 +#define EFI_DEV_MSG_I20 6 +#define EFI_DEV_MSG_MAC 11 +#define EFI_DEV_MSG_IPV4 12 +#define EFI_DEV_MSG_IPV6 13 +#define EFI_DEV_MSG_INFINIBAND 9 +#define EFI_DEV_MSG_UART 14 +#define EFI_DEV_MSG_VENDOR 10 +#define EFI_DEV_MEDIA 0x04 +#define EFI_DEV_MEDIA_HARD_DRIVE 1 +#define EFI_DEV_MEDIA_CDROM 2 +#define EFI_DEV_MEDIA_VENDOR 3 +#define EFI_DEV_MEDIA_FILE 4 +#define EFI_DEV_MEDIA_PROTOCOL 5 +#define EFI_DEV_BIOS_BOOT 0x05 +#define EFI_DEV_END_PATH 0x7F +#define EFI_DEV_END_PATH2 0xFF +#define EFI_DEV_END_INSTANCE 0x01 +#define EFI_DEV_END_ENTIRE 0xFF + +struct efi_generic_dev_path { + u8 type; + u8 sub_type; + u16 length; +} __attribute ((packed)); + +static inline void memrange_efi_to_native(u64 *addr, u64 *npages) +{ + *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); + *addr &= PAGE_MASK; +} + +#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) +/* + * EFI Variable support. + * + * Different firmware drivers can expose their EFI-like variables using + * the following. + */ + +struct efivar_operations { + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; +}; + +struct efivars { + /* + * ->lock protects two things: + * 1) ->list - adds, removals, reads, writes + * 2) ops.[gs]et_variable() calls. + * It must not be held when creating sysfs entries or calling kmalloc. + * ops.get_next_variable() is only called from register_efivars(), + * which is protected by the BKL, so that path is safe. + */ + spinlock_t lock; + struct list_head list; + struct kset *kset; + struct bin_attribute *new_var, *del_var; + const struct efivar_operations *ops; + struct efivar_entry *walk_entry; + struct pstore_info efi_pstore_info; +}; + +int register_efivars(struct efivars *efivars, + const struct efivar_operations *ops, + struct kobject *parent_kobj); +void unregister_efivars(struct efivars *efivars); + +#endif /* CONFIG_EFI_VARS */ + +#endif /* _LINUX_EFI_H */ diff --git a/include/linux/efs_fs_sb.h b/include/linux/efs_fs_sb.h new file mode 100644 index 00000000..a01be90c --- /dev/null +++ b/include/linux/efs_fs_sb.h @@ -0,0 +1,62 @@ +/* + * efs_fs_sb.h + * + * Copyright (c) 1999 Al Smith + * + * Portions derived from IRIX header files (c) 1988 Silicon Graphics + */ + +#ifndef __EFS_FS_SB_H__ +#define __EFS_FS_SB_H__ + +#include <linux/types.h> +#include <linux/magic.h> + +/* EFS superblock magic numbers */ +#define EFS_MAGIC 0x072959 +#define EFS_NEWMAGIC 0x07295a + +#define IS_EFS_MAGIC(x) ((x == EFS_MAGIC) || (x == EFS_NEWMAGIC)) + +#define EFS_SUPER 1 +#define EFS_ROOTINODE 2 + +/* efs superblock on disk */ +struct efs_super { + __be32 fs_size; /* size of filesystem, in sectors */ + __be32 fs_firstcg; /* bb offset to first cg */ + __be32 fs_cgfsize; /* size of cylinder group in bb's */ + __be16 fs_cgisize; /* bb's of inodes per cylinder group */ + __be16 fs_sectors; /* sectors per track */ + __be16 fs_heads; /* heads per cylinder */ + __be16 fs_ncg; /* # of cylinder groups in filesystem */ + __be16 fs_dirty; /* fs needs to be fsck'd */ + __be32 fs_time; /* last super-block update */ + __be32 fs_magic; /* magic number */ + char fs_fname[6]; /* file system name */ + char fs_fpack[6]; /* file system pack name */ + __be32 fs_bmsize; /* size of bitmap in bytes */ + __be32 fs_tfree; /* total free data blocks */ + __be32 fs_tinode; /* total free inodes */ + __be32 fs_bmblock; /* bitmap location. */ + __be32 fs_replsb; /* Location of replicated superblock. */ + __be32 fs_lastialloc; /* last allocated inode */ + char fs_spare[20]; /* space for expansion - MUST BE ZERO */ + __be32 fs_checksum; /* checksum of volume portion of fs */ +}; + +/* efs superblock information in memory */ +struct efs_sb_info { + __u32 fs_magic; /* superblock magic number */ + __u32 fs_start; /* first block of filesystem */ + __u32 first_block; /* first data block in filesystem */ + __u32 total_blocks; /* total number of blocks in filesystem */ + __u32 group_size; /* # of blocks a group consists of */ + __u32 data_free; /* # of free data blocks */ + __u32 inode_free; /* # of free inodes */ + __u16 inode_blocks; /* # of blocks used for inodes in every grp */ + __u16 total_groups; /* # of groups */ +}; + +#endif /* __EFS_FS_SB_H__ */ + diff --git a/include/linux/efs_vh.h b/include/linux/efs_vh.h new file mode 100644 index 00000000..8a11150c --- /dev/null +++ b/include/linux/efs_vh.h @@ -0,0 +1,53 @@ +/* + * efs_vh.h + * + * Copyright (c) 1999 Al Smith + * + * Portions derived from IRIX header files (c) 1985 MIPS Computer Systems, Inc. + */ + +#ifndef __EFS_VH_H__ +#define __EFS_VH_H__ + +#define VHMAGIC 0xbe5a941 /* volume header magic number */ +#define NPARTAB 16 /* 16 unix partitions */ +#define NVDIR 15 /* max of 15 directory entries */ +#define BFNAMESIZE 16 /* max 16 chars in boot file name */ +#define VDNAMESIZE 8 + +struct volume_directory { + char vd_name[VDNAMESIZE]; /* name */ + __be32 vd_lbn; /* logical block number */ + __be32 vd_nbytes; /* file length in bytes */ +}; + +struct partition_table { /* one per logical partition */ + __be32 pt_nblks; /* # of logical blks in partition */ + __be32 pt_firstlbn; /* first lbn of partition */ + __be32 pt_type; /* use of partition */ +}; + +struct volume_header { + __be32 vh_magic; /* identifies volume header */ + __be16 vh_rootpt; /* root partition number */ + __be16 vh_swappt; /* swap partition number */ + char vh_bootfile[BFNAMESIZE]; /* name of file to boot */ + char pad[48]; /* device param space */ + struct volume_directory vh_vd[NVDIR]; /* other vol hdr contents */ + struct partition_table vh_pt[NPARTAB]; /* device partition layout */ + __be32 vh_csum; /* volume header checksum */ + __be32 vh_fill; /* fill out to 512 bytes */ +}; + +/* partition type sysv is used for EFS format CD-ROM partitions */ +#define SGI_SYSV 0x05 +#define SGI_EFS 0x07 +#define IS_EFS(x) (((x) == SGI_EFS) || ((x) == SGI_SYSV)) + +struct pt_types { + int pt_type; + char *pt_name; +}; + +#endif /* __EFS_VH_H__ */ + diff --git a/include/linux/eisa.h b/include/linux/eisa.h new file mode 100644 index 00000000..6925249a --- /dev/null +++ b/include/linux/eisa.h @@ -0,0 +1,111 @@ +#ifndef _LINUX_EISA_H +#define _LINUX_EISA_H + +#include <linux/ioport.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +#define EISA_MAX_SLOTS 8 + +#define EISA_MAX_RESOURCES 4 + +/* A few EISA constants/offsets... */ + +#define EISA_DMA1_STATUS 8 +#define EISA_INT1_CTRL 0x20 +#define EISA_INT1_MASK 0x21 +#define EISA_INT2_CTRL 0xA0 +#define EISA_INT2_MASK 0xA1 +#define EISA_DMA2_STATUS 0xD0 +#define EISA_DMA2_WRITE_SINGLE 0xD4 +#define EISA_EXT_NMI_RESET_CTRL 0x461 +#define EISA_INT1_EDGE_LEVEL 0x4D0 +#define EISA_INT2_EDGE_LEVEL 0x4D1 +#define EISA_VENDOR_ID_OFFSET 0xC80 +#define EISA_CONFIG_OFFSET 0xC84 + +#define EISA_CONFIG_ENABLED 1 +#define EISA_CONFIG_FORCED 2 + +/* There is not much we can say about an EISA device, apart from + * signature, slot number, and base address. dma_mask is set by + * default to parent device mask..*/ + +struct eisa_device { + struct eisa_device_id id; + int slot; + int state; + unsigned long base_addr; + struct resource res[EISA_MAX_RESOURCES]; + u64 dma_mask; + struct device dev; /* generic device */ +#ifdef CONFIG_EISA_NAMES + char pretty_name[50]; +#endif +}; + +#define to_eisa_device(n) container_of(n, struct eisa_device, dev) + +static inline int eisa_get_region_index (void *addr) +{ + unsigned long x = (unsigned long) addr; + + x &= 0xc00; + return (x >> 12); +} + +struct eisa_driver { + const struct eisa_device_id *id_table; + struct device_driver driver; +}; + +#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver) + +/* These external functions are only available when EISA support is enabled. */ +#ifdef CONFIG_EISA + +extern struct bus_type eisa_bus_type; +int eisa_driver_register (struct eisa_driver *edrv); +void eisa_driver_unregister (struct eisa_driver *edrv); + +#else /* !CONFIG_EISA */ + +static inline int eisa_driver_register (struct eisa_driver *edrv) { return 0; } +static inline void eisa_driver_unregister (struct eisa_driver *edrv) { } + +#endif /* !CONFIG_EISA */ + +/* Mimics pci.h... */ +static inline void *eisa_get_drvdata (struct eisa_device *edev) +{ + return dev_get_drvdata(&edev->dev); +} + +static inline void eisa_set_drvdata (struct eisa_device *edev, void *data) +{ + dev_set_drvdata(&edev->dev, data); +} + +/* The EISA root device. There's rumours about machines with multiple + * busses (PA-RISC ?), so we try to handle that. */ + +struct eisa_root_device { + struct device *dev; /* Pointer to bridge device */ + struct resource *res; + unsigned long bus_base_addr; + int slots; /* Max slot number */ + int force_probe; /* Probe even when no slot 0 */ + u64 dma_mask; /* from bridge device */ + int bus_nr; /* Set by eisa_root_register */ + struct resource eisa_root_res; /* ditto */ +}; + +int eisa_root_register (struct eisa_root_device *root); + +#ifdef CONFIG_EISA +extern int EISA_bus; +#else +# define EISA_bus 0 +#endif + +#endif diff --git a/include/linux/elevator.h b/include/linux/elevator.h new file mode 100644 index 00000000..7d4e0356 --- /dev/null +++ b/include/linux/elevator.h @@ -0,0 +1,208 @@ +#ifndef _LINUX_ELEVATOR_H +#define _LINUX_ELEVATOR_H + +#include <linux/percpu.h> + +#ifdef CONFIG_BLOCK + +struct io_cq; + +typedef int (elevator_merge_fn) (struct request_queue *, struct request **, + struct bio *); + +typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); + +typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); + +typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); + +typedef void (elevator_bio_merged_fn) (struct request_queue *, + struct request *, struct bio *); + +typedef int (elevator_dispatch_fn) (struct request_queue *, int); + +typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); +typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); +typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); +typedef int (elevator_may_queue_fn) (struct request_queue *, int); + +typedef void (elevator_init_icq_fn) (struct io_cq *); +typedef void (elevator_exit_icq_fn) (struct io_cq *); +typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); +typedef void (elevator_put_req_fn) (struct request *); +typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); +typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); + +typedef void *(elevator_init_fn) (struct request_queue *); +typedef void (elevator_exit_fn) (struct elevator_queue *); + +struct elevator_ops +{ + elevator_merge_fn *elevator_merge_fn; + elevator_merged_fn *elevator_merged_fn; + elevator_merge_req_fn *elevator_merge_req_fn; + elevator_allow_merge_fn *elevator_allow_merge_fn; + elevator_bio_merged_fn *elevator_bio_merged_fn; + + elevator_dispatch_fn *elevator_dispatch_fn; + elevator_add_req_fn *elevator_add_req_fn; + elevator_activate_req_fn *elevator_activate_req_fn; + elevator_deactivate_req_fn *elevator_deactivate_req_fn; + + elevator_completed_req_fn *elevator_completed_req_fn; + + elevator_request_list_fn *elevator_former_req_fn; + elevator_request_list_fn *elevator_latter_req_fn; + + elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */ + elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */ + + elevator_set_req_fn *elevator_set_req_fn; + elevator_put_req_fn *elevator_put_req_fn; + + elevator_may_queue_fn *elevator_may_queue_fn; + + elevator_init_fn *elevator_init_fn; + elevator_exit_fn *elevator_exit_fn; +}; + +#define ELV_NAME_MAX (16) + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +/* + * identifies an elevator type, such as AS or deadline + */ +struct elevator_type +{ + /* managed by elevator core */ + struct kmem_cache *icq_cache; + + /* fields provided by elevator implementation */ + struct elevator_ops ops; + size_t icq_size; /* see iocontext.h */ + size_t icq_align; /* ditto */ + struct elv_fs_entry *elevator_attrs; + char elevator_name[ELV_NAME_MAX]; + struct module *elevator_owner; + + /* managed by elevator core */ + char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ + struct list_head list; +}; + +/* + * each queue has an elevator_queue associated with it + */ +struct elevator_queue +{ + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + struct hlist_head *hash; + unsigned int registered:1; +}; + +/* + * block elevator interface + */ +extern void elv_dispatch_sort(struct request_queue *, struct request *); +extern void elv_dispatch_add_tail(struct request_queue *, struct request *); +extern void elv_add_request(struct request_queue *, struct request *, int); +extern void __elv_add_request(struct request_queue *, struct request *, int); +extern int elv_merge(struct request_queue *, struct request **, struct bio *); +extern void elv_merge_requests(struct request_queue *, struct request *, + struct request *); +extern void elv_merged_request(struct request_queue *, struct request *, int); +extern void elv_bio_merged(struct request_queue *q, struct request *, + struct bio *); +extern void elv_requeue_request(struct request_queue *, struct request *); +extern struct request *elv_former_request(struct request_queue *, struct request *); +extern struct request *elv_latter_request(struct request_queue *, struct request *); +extern int elv_register_queue(struct request_queue *q); +extern void elv_unregister_queue(struct request_queue *q); +extern int elv_may_queue(struct request_queue *, int); +extern void elv_abort_queue(struct request_queue *); +extern void elv_completed_request(struct request_queue *, struct request *); +extern int elv_set_request(struct request_queue *, struct request *, gfp_t); +extern void elv_put_request(struct request_queue *, struct request *); +extern void elv_drain_elevator(struct request_queue *); + +/* + * io scheduler registration + */ +extern int elv_register(struct elevator_type *); +extern void elv_unregister(struct elevator_type *); + +/* + * io scheduler sysfs switching + */ +extern ssize_t elv_iosched_show(struct request_queue *, char *); +extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); + +extern int elevator_init(struct request_queue *, char *); +extern void elevator_exit(struct elevator_queue *); +extern int elevator_change(struct request_queue *, const char *); +extern bool elv_rq_merge_ok(struct request *, struct bio *); + +/* + * Helper functions. + */ +extern struct request *elv_rb_former_request(struct request_queue *, struct request *); +extern struct request *elv_rb_latter_request(struct request_queue *, struct request *); + +/* + * rb support functions. + */ +extern void elv_rb_add(struct rb_root *, struct request *); +extern void elv_rb_del(struct rb_root *, struct request *); +extern struct request *elv_rb_find(struct rb_root *, sector_t); + +/* + * Return values from elevator merger + */ +#define ELEVATOR_NO_MERGE 0 +#define ELEVATOR_FRONT_MERGE 1 +#define ELEVATOR_BACK_MERGE 2 + +/* + * Insertion selection + */ +#define ELEVATOR_INSERT_FRONT 1 +#define ELEVATOR_INSERT_BACK 2 +#define ELEVATOR_INSERT_SORT 3 +#define ELEVATOR_INSERT_REQUEUE 4 +#define ELEVATOR_INSERT_FLUSH 5 +#define ELEVATOR_INSERT_SORT_MERGE 6 + +/* + * return values from elevator_may_queue_fn + */ +enum { + ELV_MQUEUE_MAY, + ELV_MQUEUE_NO, + ELV_MQUEUE_MUST, +}; + +#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) +#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) + +/* + * Hack to reuse the csd.list list_head as the fifo time holder while + * the request is in the io scheduler. Saves an unsigned long in rq. + */ +#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next) +#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp)) +#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) +#define rq_fifo_clear(rq) do { \ + list_del_init(&(rq)->queuelist); \ + INIT_LIST_HEAD(&(rq)->csd.list); \ + } while (0) + +#endif /* CONFIG_BLOCK */ +#endif diff --git a/include/linux/elf-em.h b/include/linux/elf-em.h new file mode 100644 index 00000000..8e2b7bac --- /dev/null +++ b/include/linux/elf-em.h @@ -0,0 +1,56 @@ +#ifndef _LINUX_ELF_EM_H +#define _LINUX_ELF_EM_H + +/* These constants define the various ELF target machines */ +#define EM_NONE 0 +#define EM_M32 1 +#define EM_SPARC 2 +#define EM_386 3 +#define EM_68K 4 +#define EM_88K 5 +#define EM_486 6 /* Perhaps disused */ +#define EM_860 7 +#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */ + /* Next two are historical and binaries and + modules of these types will be rejected by + Linux. */ +#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */ +#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */ + +#define EM_PARISC 15 /* HPPA */ +#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ +#define EM_PPC 20 /* PowerPC */ +#define EM_PPC64 21 /* PowerPC64 */ +#define EM_SPU 23 /* Cell BE SPU */ +#define EM_SH 42 /* SuperH */ +#define EM_SPARCV9 43 /* SPARC v9 64-bit */ +#define EM_IA_64 50 /* HP/Intel IA-64 */ +#define EM_X86_64 62 /* AMD x86-64 */ +#define EM_S390 22 /* IBM S/390 */ +#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ +#define EM_V850 87 /* NEC v850 */ +#define EM_M32R 88 /* Renesas M32R */ +#define EM_H8_300 46 /* Renesas H8/300,300H,H8S */ +#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ +#define EM_BLACKFIN 106 /* ADI Blackfin Processor */ +#define EM_TI_C6000 140 /* TI C6X DSPs */ +#define EM_FRV 0x5441 /* Fujitsu FR-V */ +#define EM_AVR32 0x18ad /* Atmel AVR32 */ + +/* + * This is an interim value that we will use until the committee comes + * up with a final number. + */ +#define EM_ALPHA 0x9026 + +/* Bogus old v850 magic number, used by old tools. */ +#define EM_CYGNUS_V850 0x9080 +/* Bogus old m32r magic number, used by old tools. */ +#define EM_CYGNUS_M32R 0x9041 +/* This is the old interim value for S/390 architecture */ +#define EM_S390_OLD 0xA390 +/* Also Panasonic/MEI MN10300, AM33 */ +#define EM_CYGNUS_MN10300 0xbeef + + +#endif /* _LINUX_ELF_EM_H */ diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h new file mode 100644 index 00000000..7cd2e80c --- /dev/null +++ b/include/linux/elf-fdpic.h @@ -0,0 +1,70 @@ +/* elf-fdpic.h: FDPIC ELF load map + * + * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_ELF_FDPIC_H +#define _LINUX_ELF_FDPIC_H + +#include <linux/elf.h> + +#define PT_GNU_STACK (PT_LOOS + 0x474e551) + +/* segment mappings for ELF FDPIC libraries/executables/interpreters */ +struct elf32_fdpic_loadseg { + Elf32_Addr addr; /* core address to which mapped */ + Elf32_Addr p_vaddr; /* VMA recorded in file */ + Elf32_Word p_memsz; /* allocation size recorded in file */ +}; + +struct elf32_fdpic_loadmap { + Elf32_Half version; /* version of these structures, just in case... */ + Elf32_Half nsegs; /* number of segments */ + struct elf32_fdpic_loadseg segs[]; +}; + +#define ELF32_FDPIC_LOADMAP_VERSION 0x0000 + +/* + * binfmt binary parameters structure + */ +struct elf_fdpic_params { + struct elfhdr hdr; /* ref copy of ELF header */ + struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */ + struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */ + unsigned long elfhdr_addr; /* mapped ELF header user address */ + unsigned long ph_addr; /* mapped PT_PHDR user address */ + unsigned long map_addr; /* mapped loadmap user address */ + unsigned long entry_addr; /* mapped entry user address */ + unsigned long stack_size; /* stack size requested (PT_GNU_STACK) */ + unsigned long dynamic_addr; /* mapped PT_DYNAMIC user address */ + unsigned long load_addr; /* user address at which to map binary */ + unsigned long flags; +#define ELF_FDPIC_FLAG_ARRANGEMENT 0x0000000f /* PT_LOAD arrangement flags */ +#define ELF_FDPIC_FLAG_INDEPENDENT 0x00000000 /* PT_LOADs can be put anywhere */ +#define ELF_FDPIC_FLAG_HONOURVADDR 0x00000001 /* PT_LOAD.vaddr must be honoured */ +#define ELF_FDPIC_FLAG_CONSTDISP 0x00000002 /* PT_LOADs require constant + * displacement */ +#define ELF_FDPIC_FLAG_CONTIGUOUS 0x00000003 /* PT_LOADs should be contiguous */ +#define ELF_FDPIC_FLAG_EXEC_STACK 0x00000010 /* T if stack to be executable */ +#define ELF_FDPIC_FLAG_NOEXEC_STACK 0x00000020 /* T if stack not to be executable */ +#define ELF_FDPIC_FLAG_EXECUTABLE 0x00000040 /* T if this object is the executable */ +#define ELF_FDPIC_FLAG_PRESENT 0x80000000 /* T if this object is present */ +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_MMU +extern void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, + struct elf_fdpic_params *interp_params, + unsigned long *start_stack, + unsigned long *start_brk); +#endif +#endif /* __KERNEL__ */ + +#endif /* _LINUX_ELF_FDPIC_H */ diff --git a/include/linux/elf.h b/include/linux/elf.h new file mode 100644 index 00000000..999b4f52 --- /dev/null +++ b/include/linux/elf.h @@ -0,0 +1,449 @@ +#ifndef _LINUX_ELF_H +#define _LINUX_ELF_H + +#include <linux/types.h> +#include <linux/elf-em.h> +#ifdef __KERNEL__ +#include <asm/elf.h> +#endif + +/* 32-bit ELF base types. */ +typedef __u32 Elf32_Addr; +typedef __u16 Elf32_Half; +typedef __u32 Elf32_Off; +typedef __s32 Elf32_Sword; +typedef __u32 Elf32_Word; + +/* 64-bit ELF base types. */ +typedef __u64 Elf64_Addr; +typedef __u16 Elf64_Half; +typedef __s16 Elf64_SHalf; +typedef __u64 Elf64_Off; +typedef __s32 Elf64_Sword; +typedef __u32 Elf64_Word; +typedef __u64 Elf64_Xword; +typedef __s64 Elf64_Sxword; + +/* These constants are for the segment types stored in the image headers */ +#define PT_NULL 0 +#define PT_LOAD 1 +#define PT_DYNAMIC 2 +#define PT_INTERP 3 +#define PT_NOTE 4 +#define PT_SHLIB 5 +#define PT_PHDR 6 +#define PT_TLS 7 /* Thread local storage segment */ +#define PT_LOOS 0x60000000 /* OS-specific */ +#define PT_HIOS 0x6fffffff /* OS-specific */ +#define PT_LOPROC 0x70000000 +#define PT_HIPROC 0x7fffffff +#define PT_GNU_EH_FRAME 0x6474e550 + +#define PT_GNU_STACK (PT_LOOS + 0x474e551) + +/* + * Extended Numbering + * + * If the real number of program header table entries is larger than + * or equal to PN_XNUM(0xffff), it is set to sh_info field of the + * section header at index 0, and PN_XNUM is set to e_phnum + * field. Otherwise, the section header at index 0 is zero + * initialized, if it exists. + * + * Specifications are available in: + * + * - Sun microsystems: Linker and Libraries. + * Part No: 817-1984-17, September 2008. + * URL: http://docs.sun.com/app/docs/doc/817-1984 + * + * - System V ABI AMD64 Architecture Processor Supplement + * Draft Version 0.99., + * May 11, 2009. + * URL: http://www.x86-64.org/ + */ +#define PN_XNUM 0xffff + +/* These constants define the different elf file types */ +#define ET_NONE 0 +#define ET_REL 1 +#define ET_EXEC 2 +#define ET_DYN 3 +#define ET_CORE 4 +#define ET_LOPROC 0xff00 +#define ET_HIPROC 0xffff + +/* This is the info that is needed to parse the dynamic section of the file */ +#define DT_NULL 0 +#define DT_NEEDED 1 +#define DT_PLTRELSZ 2 +#define DT_PLTGOT 3 +#define DT_HASH 4 +#define DT_STRTAB 5 +#define DT_SYMTAB 6 +#define DT_RELA 7 +#define DT_RELASZ 8 +#define DT_RELAENT 9 +#define DT_STRSZ 10 +#define DT_SYMENT 11 +#define DT_INIT 12 +#define DT_FINI 13 +#define DT_SONAME 14 +#define DT_RPATH 15 +#define DT_SYMBOLIC 16 +#define DT_REL 17 +#define DT_RELSZ 18 +#define DT_RELENT 19 +#define DT_PLTREL 20 +#define DT_DEBUG 21 +#define DT_TEXTREL 22 +#define DT_JMPREL 23 +#define DT_ENCODING 32 +#define OLD_DT_LOOS 0x60000000 +#define DT_LOOS 0x6000000d +#define DT_HIOS 0x6ffff000 +#define DT_VALRNGLO 0x6ffffd00 +#define DT_VALRNGHI 0x6ffffdff +#define DT_ADDRRNGLO 0x6ffffe00 +#define DT_ADDRRNGHI 0x6ffffeff +#define DT_VERSYM 0x6ffffff0 +#define DT_RELACOUNT 0x6ffffff9 +#define DT_RELCOUNT 0x6ffffffa +#define DT_FLAGS_1 0x6ffffffb +#define DT_VERDEF 0x6ffffffc +#define DT_VERDEFNUM 0x6ffffffd +#define DT_VERNEED 0x6ffffffe +#define DT_VERNEEDNUM 0x6fffffff +#define OLD_DT_HIOS 0x6fffffff +#define DT_LOPROC 0x70000000 +#define DT_HIPROC 0x7fffffff + +/* This info is needed when parsing the symbol table */ +#define STB_LOCAL 0 +#define STB_GLOBAL 1 +#define STB_WEAK 2 + +#define STT_NOTYPE 0 +#define STT_OBJECT 1 +#define STT_FUNC 2 +#define STT_SECTION 3 +#define STT_FILE 4 +#define STT_COMMON 5 +#define STT_TLS 6 + +#define ELF_ST_BIND(x) ((x) >> 4) +#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf) +#define ELF32_ST_BIND(x) ELF_ST_BIND(x) +#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x) +#define ELF64_ST_BIND(x) ELF_ST_BIND(x) +#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) + +typedef struct dynamic{ + Elf32_Sword d_tag; + union{ + Elf32_Sword d_val; + Elf32_Addr d_ptr; + } d_un; +} Elf32_Dyn; + +typedef struct { + Elf64_Sxword d_tag; /* entry tag value */ + union { + Elf64_Xword d_val; + Elf64_Addr d_ptr; + } d_un; +} Elf64_Dyn; + +/* The following are used with relocations */ +#define ELF32_R_SYM(x) ((x) >> 8) +#define ELF32_R_TYPE(x) ((x) & 0xff) + +#define ELF64_R_SYM(i) ((i) >> 32) +#define ELF64_R_TYPE(i) ((i) & 0xffffffff) + +typedef struct elf32_rel { + Elf32_Addr r_offset; + Elf32_Word r_info; +} Elf32_Rel; + +typedef struct elf64_rel { + Elf64_Addr r_offset; /* Location at which to apply the action */ + Elf64_Xword r_info; /* index and type of relocation */ +} Elf64_Rel; + +typedef struct elf32_rela{ + Elf32_Addr r_offset; + Elf32_Word r_info; + Elf32_Sword r_addend; +} Elf32_Rela; + +typedef struct elf64_rela { + Elf64_Addr r_offset; /* Location at which to apply the action */ + Elf64_Xword r_info; /* index and type of relocation */ + Elf64_Sxword r_addend; /* Constant addend used to compute value */ +} Elf64_Rela; + +typedef struct elf32_sym{ + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; +} Elf32_Sym; + +typedef struct elf64_sym { + Elf64_Word st_name; /* Symbol name, index in string tbl */ + unsigned char st_info; /* Type and binding attributes */ + unsigned char st_other; /* No defined meaning, 0 */ + Elf64_Half st_shndx; /* Associated section index */ + Elf64_Addr st_value; /* Value of the symbol */ + Elf64_Xword st_size; /* Associated symbol size */ +} Elf64_Sym; + + +#define EI_NIDENT 16 + +typedef struct elf32_hdr{ + unsigned char e_ident[EI_NIDENT]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; /* Entry point */ + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +} Elf32_Ehdr; + +typedef struct elf64_hdr { + unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */ + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; /* Entry point virtual address */ + Elf64_Off e_phoff; /* Program header table file offset */ + Elf64_Off e_shoff; /* Section header table file offset */ + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +} Elf64_Ehdr; + +/* These constants define the permissions on sections in the program + header, p_flags. */ +#define PF_R 0x4 +#define PF_W 0x2 +#define PF_X 0x1 + +typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +} Elf32_Phdr; + +typedef struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; /* Segment file offset */ + Elf64_Addr p_vaddr; /* Segment virtual address */ + Elf64_Addr p_paddr; /* Segment physical address */ + Elf64_Xword p_filesz; /* Segment size in file */ + Elf64_Xword p_memsz; /* Segment size in memory */ + Elf64_Xword p_align; /* Segment alignment, file & memory */ +} Elf64_Phdr; + +/* sh_type */ +#define SHT_NULL 0 +#define SHT_PROGBITS 1 +#define SHT_SYMTAB 2 +#define SHT_STRTAB 3 +#define SHT_RELA 4 +#define SHT_HASH 5 +#define SHT_DYNAMIC 6 +#define SHT_NOTE 7 +#define SHT_NOBITS 8 +#define SHT_REL 9 +#define SHT_SHLIB 10 +#define SHT_DYNSYM 11 +#define SHT_NUM 12 +#define SHT_LOPROC 0x70000000 +#define SHT_HIPROC 0x7fffffff +#define SHT_LOUSER 0x80000000 +#define SHT_HIUSER 0xffffffff + +/* sh_flags */ +#define SHF_WRITE 0x1 +#define SHF_ALLOC 0x2 +#define SHF_EXECINSTR 0x4 +#define SHF_MASKPROC 0xf0000000 + +/* special section indexes */ +#define SHN_UNDEF 0 +#define SHN_LORESERVE 0xff00 +#define SHN_LOPROC 0xff00 +#define SHN_HIPROC 0xff1f +#define SHN_ABS 0xfff1 +#define SHN_COMMON 0xfff2 +#define SHN_HIRESERVE 0xffff + +typedef struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +} Elf32_Shdr; + +typedef struct elf64_shdr { + Elf64_Word sh_name; /* Section name, index in string tbl */ + Elf64_Word sh_type; /* Type of section */ + Elf64_Xword sh_flags; /* Miscellaneous section attributes */ + Elf64_Addr sh_addr; /* Section virtual addr at execution */ + Elf64_Off sh_offset; /* Section file offset */ + Elf64_Xword sh_size; /* Size of section in bytes */ + Elf64_Word sh_link; /* Index of another section */ + Elf64_Word sh_info; /* Additional section information */ + Elf64_Xword sh_addralign; /* Section alignment */ + Elf64_Xword sh_entsize; /* Entry size if section holds table */ +} Elf64_Shdr; + +#define EI_MAG0 0 /* e_ident[] indexes */ +#define EI_MAG1 1 +#define EI_MAG2 2 +#define EI_MAG3 3 +#define EI_CLASS 4 +#define EI_DATA 5 +#define EI_VERSION 6 +#define EI_OSABI 7 +#define EI_PAD 8 + +#define ELFMAG0 0x7f /* EI_MAG */ +#define ELFMAG1 'E' +#define ELFMAG2 'L' +#define ELFMAG3 'F' +#define ELFMAG "\177ELF" +#define SELFMAG 4 + +#define ELFCLASSNONE 0 /* EI_CLASS */ +#define ELFCLASS32 1 +#define ELFCLASS64 2 +#define ELFCLASSNUM 3 + +#define ELFDATANONE 0 /* e_ident[EI_DATA] */ +#define ELFDATA2LSB 1 +#define ELFDATA2MSB 2 + +#define EV_NONE 0 /* e_version, EI_VERSION */ +#define EV_CURRENT 1 +#define EV_NUM 2 + +#define ELFOSABI_NONE 0 +#define ELFOSABI_LINUX 3 + +#ifndef ELF_OSABI +#define ELF_OSABI ELFOSABI_NONE +#endif + +/* + * Notes used in ET_CORE. Architectures export some of the arch register sets + * using the corresponding note types via the PTRACE_GETREGSET and + * PTRACE_SETREGSET requests. + */ +#define NT_PRSTATUS 1 +#define NT_PRFPREG 2 +#define NT_PRPSINFO 3 +#define NT_TASKSTRUCT 4 +#define NT_AUXV 6 +#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ +#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ +#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ +#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ +#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ +#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ +#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ +#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ +#define NT_S390_TIMER 0x301 /* s390 timer register */ +#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */ +#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ +#define NT_S390_CTRS 0x304 /* s390 control registers */ +#define NT_S390_PREFIX 0x305 /* s390 prefix register */ +#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */ +#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ +#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ + + +/* Note header in a PT_NOTE section */ +typedef struct elf32_note { + Elf32_Word n_namesz; /* Name size */ + Elf32_Word n_descsz; /* Content size */ + Elf32_Word n_type; /* Content type */ +} Elf32_Nhdr; + +/* Note header in a PT_NOTE section */ +typedef struct elf64_note { + Elf64_Word n_namesz; /* Name size */ + Elf64_Word n_descsz; /* Content size */ + Elf64_Word n_type; /* Content type */ +} Elf64_Nhdr; + +#ifdef __KERNEL__ +#ifndef elf_read_implies_exec + /* Executables for which elf_read_implies_exec() returns TRUE will + have the READ_IMPLIES_EXEC personality flag set automatically. + Override in asm/elf.h as needed. */ +# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0 +#endif + +#if ELF_CLASS == ELFCLASS32 + +extern Elf32_Dyn _DYNAMIC []; +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_shdr elf32_shdr +#define elf_note elf32_note +#define elf_addr_t Elf32_Off +#define Elf_Half Elf32_Half + +#else + +extern Elf64_Dyn _DYNAMIC []; +#define elfhdr elf64_hdr +#define elf_phdr elf64_phdr +#define elf_shdr elf64_shdr +#define elf_note elf64_note +#define elf_addr_t Elf64_Off +#define Elf_Half Elf64_Half + +#endif + +/* Optional callbacks to write extra ELF notes. */ +struct file; + +#ifndef ARCH_HAVE_EXTRA_ELF_NOTES +static inline int elf_coredump_extra_notes_size(void) { return 0; } +static inline int elf_coredump_extra_notes_write(struct file *file, + loff_t *foffset) { return 0; } +#else +extern int elf_coredump_extra_notes_size(void); +extern int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset); +#endif +#endif /* __KERNEL__ */ +#endif /* _LINUX_ELF_H */ diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h new file mode 100644 index 00000000..0a90e1c3 --- /dev/null +++ b/include/linux/elfcore-compat.h @@ -0,0 +1,55 @@ +#ifndef _LINUX_ELFCORE_COMPAT_H +#define _LINUX_ELFCORE_COMPAT_H + +#include <linux/elf.h> +#include <linux/elfcore.h> +#include <linux/compat.h> + +/* + * Make sure these layouts match the linux/elfcore.h native definitions. + */ + +struct compat_elf_siginfo +{ + compat_int_t si_signo; + compat_int_t si_code; + compat_int_t si_errno; +}; + +struct compat_elf_prstatus +{ + struct compat_elf_siginfo pr_info; + short pr_cursig; + compat_ulong_t pr_sigpend; + compat_ulong_t pr_sighold; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + struct compat_timeval pr_utime; + struct compat_timeval pr_stime; + struct compat_timeval pr_cutime; + struct compat_timeval pr_cstime; + compat_elf_gregset_t pr_reg; +#ifdef CONFIG_BINFMT_ELF_FDPIC + compat_ulong_t pr_exec_fdpic_loadmap; + compat_ulong_t pr_interp_fdpic_loadmap; +#endif + compat_int_t pr_fpvalid; +}; + +struct compat_elf_prpsinfo +{ + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + compat_ulong_t pr_flag; + __compat_uid_t pr_uid; + __compat_gid_t pr_gid; + compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + char pr_fname[16]; + char pr_psargs[ELF_PRARGSZ]; +}; + +#endif /* _LINUX_ELFCORE_COMPAT_H */ diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h new file mode 100644 index 00000000..0698c79f --- /dev/null +++ b/include/linux/elfcore.h @@ -0,0 +1,172 @@ +#ifndef _LINUX_ELFCORE_H +#define _LINUX_ELFCORE_H + +#include <linux/types.h> +#include <linux/signal.h> +#include <linux/time.h> +#ifdef __KERNEL__ +#include <linux/user.h> +#include <linux/bug.h> +#endif +#include <linux/ptrace.h> +#include <linux/elf.h> +#include <linux/fs.h> + +struct elf_siginfo +{ + int si_signo; /* signal number */ + int si_code; /* extra code */ + int si_errno; /* errno */ +}; + +#ifdef __KERNEL__ +#include <asm/elf.h> +#endif + +#ifndef __KERNEL__ +typedef elf_greg_t greg_t; +typedef elf_gregset_t gregset_t; +typedef elf_fpregset_t fpregset_t; +typedef elf_fpxregset_t fpxregset_t; +#define NGREG ELF_NGREG +#endif + +/* + * Definitions to generate Intel SVR4-like core files. + * These mostly have the same names as the SVR4 types with "elf_" + * tacked on the front to prevent clashes with linux definitions, + * and the typedef forms have been avoided. This is mostly like + * the SVR4 structure, but more Linuxy, with things that Linux does + * not support and which gdb doesn't really use excluded. + * Fields present but not used are marked with "XXX". + */ +struct elf_prstatus +{ +#if 0 + long pr_flags; /* XXX Process flags */ + short pr_why; /* XXX Reason for process halt */ + short pr_what; /* XXX More detailed reason */ +#endif + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned long pr_sigpend; /* Set of pending signals */ + unsigned long pr_sighold; /* Set of held signals */ +#if 0 + struct sigaltstack pr_altstack; /* Alternate stack info */ + struct sigaction pr_action; /* Signal action for current sig */ +#endif + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct timeval pr_utime; /* User time */ + struct timeval pr_stime; /* System time */ + struct timeval pr_cutime; /* Cumulative user time */ + struct timeval pr_cstime; /* Cumulative system time */ +#if 0 + long pr_instr; /* Current instruction */ +#endif + elf_gregset_t pr_reg; /* GP registers */ +#ifdef CONFIG_BINFMT_ELF_FDPIC + /* When using FDPIC, the loadmap addresses need to be communicated + * to GDB in order for GDB to do the necessary relocations. The + * fields (below) used to communicate this information are placed + * immediately after ``pr_reg'', so that the loadmap addresses may + * be viewed as part of the register set if so desired. + */ + unsigned long pr_exec_fdpic_loadmap; + unsigned long pr_interp_fdpic_loadmap; +#endif + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define ELF_PRARGSZ (80) /* Number of chars for args */ + +struct elf_prpsinfo +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + unsigned long pr_flag; /* flags */ + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + +#ifndef __KERNEL__ +typedef struct elf_prstatus prstatus_t; +typedef struct elf_prpsinfo prpsinfo_t; +#define PRARGSZ ELF_PRARGSZ +#endif + +#ifdef __KERNEL__ +static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) +{ +#ifdef ELF_CORE_COPY_REGS + ELF_CORE_COPY_REGS((*elfregs), regs) +#else + BUG_ON(sizeof(*elfregs) != sizeof(*regs)); + *(struct pt_regs *)elfregs = *regs; +#endif +} + +static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs) +{ +#ifdef ELF_CORE_COPY_KERNEL_REGS + ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs); +#else + elf_core_copy_regs(elfregs, regs); +#endif +} + +static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) +{ +#if defined (ELF_CORE_COPY_TASK_REGS) + return ELF_CORE_COPY_TASK_REGS(t, elfregs); +#elif defined (task_pt_regs) + elf_core_copy_regs(elfregs, task_pt_regs(t)); +#endif + return 0; +} + +extern int dump_fpu (struct pt_regs *, elf_fpregset_t *); + +static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu) +{ +#ifdef ELF_CORE_COPY_FPREGS + return ELF_CORE_COPY_FPREGS(t, fpu); +#else + return dump_fpu(regs, fpu); +#endif +} + +#ifdef ELF_CORE_COPY_XFPREGS +static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) +{ + return ELF_CORE_COPY_XFPREGS(t, xfpu); +} +#endif + +/* + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the gate DSO contents. Dumping its + * contents makes post-mortem fully interpretable later without matching up + * the same kernel and hardware config to see what PC values meant. + * Dumping its extra ELF program headers includes all the other information + * a debugger needs to easily find how the gate DSO was being used. + */ +extern Elf_Half elf_core_extra_phdrs(void); +extern int +elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, + unsigned long limit); +extern int +elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit); +extern size_t elf_core_extra_data_size(void); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h new file mode 100644 index 00000000..278e3ef0 --- /dev/null +++ b/include/linux/elfnote.h @@ -0,0 +1,98 @@ +#ifndef _LINUX_ELFNOTE_H +#define _LINUX_ELFNOTE_H +/* + * Helper macros to generate ELF Note structures, which are put into a + * PT_NOTE segment of the final vmlinux image. These are useful for + * including name-value pairs of metadata into the kernel binary (or + * modules?) for use by external programs. + * + * Each note has three parts: a name, a type and a desc. The name is + * intended to distinguish the note's originator, so it would be a + * company, project, subsystem, etc; it must be in a suitable form for + * use in a section name. The type is an integer which is used to tag + * the data, and is considered to be within the "name" namespace (so + * "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The + * "desc" field is the actual data. There are no constraints on the + * desc field's contents, though typically they're fairly small. + * + * All notes from a given NAME are put into a section named + * .note.NAME. When the kernel image is finally linked, all the notes + * are packed into a single .notes section, which is mapped into the + * PT_NOTE segment. Because notes for a given name are grouped into + * the same section, they'll all be adjacent the output file. + * + * This file defines macros for both C and assembler use. Their + * syntax is slightly different, but they're semantically similar. + * + * See the ELF specification for more detail about ELF notes. + */ + +#ifdef __ASSEMBLER__ +/* + * Generate a structure with the same shape as Elf{32,64}_Nhdr (which + * turn out to be the same size and shape), followed by the name and + * desc data with appropriate padding. The 'desctype' argument is the + * assembler pseudo op defining the type of the data e.g. .asciz while + * 'descdata' is the data itself e.g. "hello, world". + * + * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two") + * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) + */ +#define ELFNOTE_START(name, type, flags) \ +.pushsection .note.name, flags,@note ; \ + .balign 4 ; \ + .long 2f - 1f /* namesz */ ; \ + .long 4484f - 3f /* descsz */ ; \ + .long type ; \ +1:.asciz #name ; \ +2:.balign 4 ; \ +3: + +#define ELFNOTE_END \ +4484:.balign 4 ; \ +.popsection ; + +#define ELFNOTE(name, type, desc) \ + ELFNOTE_START(name, type, "") \ + desc ; \ + ELFNOTE_END + +#else /* !__ASSEMBLER__ */ +#include <linux/elf.h> +/* + * Use an anonymous structure which matches the shape of + * Elf{32,64}_Nhdr, but includes the name and desc data. The size and + * type of name and desc depend on the macro arguments. "name" must + * be a literal string, and "desc" must be passed by value. You may + * only define one note per line, since __LINE__ is used to generate + * unique symbols. + */ +#define _ELFNOTE_PASTE(a,b) a##b +#define _ELFNOTE(size, name, unique, type, desc) \ + static const struct { \ + struct elf##size##_note _nhdr; \ + unsigned char _name[sizeof(name)] \ + __attribute__((aligned(sizeof(Elf##size##_Word)))); \ + typeof(desc) _desc \ + __attribute__((aligned(sizeof(Elf##size##_Word)))); \ + } _ELFNOTE_PASTE(_note_, unique) \ + __used \ + __attribute__((section(".note." name), \ + aligned(sizeof(Elf##size##_Word)), \ + unused)) = { \ + { \ + sizeof(name), \ + sizeof(desc), \ + type, \ + }, \ + name, \ + desc \ + } +#define ELFNOTE(size, name, type, desc) \ + _ELFNOTE(size, name, __LINE__, type, desc) + +#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc) +#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc) +#endif /* __ASSEMBLER__ */ + +#endif /* _LINUX_ELFNOTE_H */ diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h new file mode 100644 index 00000000..9a33c5f7 --- /dev/null +++ b/include/linux/enclosure.h @@ -0,0 +1,133 @@ +/* + * Enclosure Services + * + * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> + * +**----------------------------------------------------------------------------- +** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the GNU General Public License +** version 2 as published by the Free Software Foundation. +** +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with this program; if not, write to the Free Software +** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +** +**----------------------------------------------------------------------------- +*/ +#ifndef _LINUX_ENCLOSURE_H_ +#define _LINUX_ENCLOSURE_H_ + +#include <linux/device.h> +#include <linux/list.h> + +/* A few generic types ... taken from ses-2 */ +enum enclosure_component_type { + ENCLOSURE_COMPONENT_DEVICE = 0x01, + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, +}; + +/* ses-2 common element status */ +enum enclosure_status { + ENCLOSURE_STATUS_UNSUPPORTED = 0, + ENCLOSURE_STATUS_OK, + ENCLOSURE_STATUS_CRITICAL, + ENCLOSURE_STATUS_NON_CRITICAL, + ENCLOSURE_STATUS_UNRECOVERABLE, + ENCLOSURE_STATUS_NOT_INSTALLED, + ENCLOSURE_STATUS_UNKNOWN, + ENCLOSURE_STATUS_UNAVAILABLE, + /* last element for counting purposes */ + ENCLOSURE_STATUS_MAX +}; + +/* SFF-8485 activity light settings */ +enum enclosure_component_setting { + ENCLOSURE_SETTING_DISABLED = 0, + ENCLOSURE_SETTING_ENABLED = 1, + ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2, + ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3, + ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6, + ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7, +}; + +struct enclosure_device; +struct enclosure_component; +struct enclosure_component_callbacks { + void (*get_status)(struct enclosure_device *, + struct enclosure_component *); + int (*set_status)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_status); + void (*get_fault)(struct enclosure_device *, + struct enclosure_component *); + int (*set_fault)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_component_setting); + void (*get_active)(struct enclosure_device *, + struct enclosure_component *); + int (*set_active)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_component_setting); + void (*get_locate)(struct enclosure_device *, + struct enclosure_component *); + int (*set_locate)(struct enclosure_device *, + struct enclosure_component *, + enum enclosure_component_setting); +}; + + +struct enclosure_component { + void *scratch; + struct device cdev; + struct device *dev; + enum enclosure_component_type type; + int number; + int fault; + int active; + int locate; + enum enclosure_status status; +}; + +struct enclosure_device { + void *scratch; + struct list_head node; + struct device edev; + struct enclosure_component_callbacks *cb; + int components; + struct enclosure_component component[0]; +}; + +static inline struct enclosure_device * +to_enclosure_device(struct device *dev) +{ + return container_of(dev, struct enclosure_device, edev); +} + +static inline struct enclosure_component * +to_enclosure_component(struct device *dev) +{ + return container_of(dev, struct enclosure_component, cdev); +} + +struct enclosure_device * +enclosure_register(struct device *, const char *, int, + struct enclosure_component_callbacks *); +void enclosure_unregister(struct enclosure_device *); +struct enclosure_component * +enclosure_component_register(struct enclosure_device *, unsigned int, + enum enclosure_component_type, const char *); +int enclosure_add_device(struct enclosure_device *enclosure, int component, + struct device *dev); +int enclosure_remove_device(struct enclosure_device *, struct device *); +struct enclosure_device *enclosure_find(struct device *dev, + struct enclosure_device *start); +int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), + void *data); + +#endif /* _LINUX_ENCLOSURE_H_ */ diff --git a/include/linux/err.h b/include/linux/err.h new file mode 100644 index 00000000..f2edce25 --- /dev/null +++ b/include/linux/err.h @@ -0,0 +1,65 @@ +#ifndef _LINUX_ERR_H +#define _LINUX_ERR_H + +#include <linux/compiler.h> + +#include <asm/errno.h> + +/* + * Kernel pointers have redundant information, so we can use a + * scheme where we can return either an error code or a dentry + * pointer with the same return value. + * + * This should be a per-architecture thing, to allow different + * error and pointer decisions. + */ +#define MAX_ERRNO 4095 + +#ifndef __ASSEMBLY__ + +#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) + +static inline void * __must_check ERR_PTR(long error) +{ + return (void *) error; +} + +static inline long __must_check PTR_ERR(const void *ptr) +{ + return (long) ptr; +} + +static inline long __must_check IS_ERR(const void *ptr) +{ + return IS_ERR_VALUE((unsigned long)ptr); +} + +static inline long __must_check IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + +/** + * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type + * @ptr: The pointer to cast. + * + * Explicitly cast an error-valued pointer to another pointer type in such a + * way as to make it clear that's what's going on. + */ +static inline void * __must_check ERR_CAST(const void *ptr) +{ + /* cast away the const */ + return (void *) ptr; +} + +static inline int __must_check PTR_RET(const void *ptr) +{ + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + else + return 0; +} + +#endif + +#endif /* _LINUX_ERR_H */ diff --git a/include/linux/errno.h b/include/linux/errno.h new file mode 100644 index 00000000..2d09bfa5 --- /dev/null +++ b/include/linux/errno.h @@ -0,0 +1,35 @@ +#ifndef _LINUX_ERRNO_H +#define _LINUX_ERRNO_H + +#include <asm/errno.h> + +#ifdef __KERNEL__ + +/* + * These should never be seen by user programs. To return one of ERESTART* + * codes, signal_pending() MUST be set. Note that ptrace can observe these + * at syscall exit tracing, but they will never be left for the debugged user + * process to see. + */ +#define ERESTARTSYS 512 +#define ERESTARTNOINTR 513 +#define ERESTARTNOHAND 514 /* restart if no handler.. */ +#define ENOIOCTLCMD 515 /* No ioctl command */ +#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ +#define EPROBE_DEFER 517 /* Driver requests probe retry */ + +/* Defined for the NFSv3 protocol */ +#define EBADHANDLE 521 /* Illegal NFS file handle */ +#define ENOTSYNC 522 /* Update synchronization mismatch */ +#define EBADCOOKIE 523 /* Cookie is stale */ +#define ENOTSUPP 524 /* Operation is not supported */ +#define ETOOSMALL 525 /* Buffer or request is too small */ +#define ESERVERFAULT 526 /* An untranslatable error occurred */ +#define EBADTYPE 527 /* Type not supported by server */ +#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ +#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ +#define EIOCBRETRY 530 /* iocb queued, will trigger a retry */ + +#endif + +#endif diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h new file mode 100644 index 00000000..fd0628be --- /dev/null +++ b/include/linux/errqueue.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_ERRQUEUE_H +#define _LINUX_ERRQUEUE_H 1 + +#include <linux/types.h> + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + __u32 ee_data; +}; + +#define SO_EE_ORIGIN_NONE 0 +#define SO_EE_ORIGIN_LOCAL 1 +#define SO_EE_ORIGIN_ICMP 2 +#define SO_EE_ORIGIN_ICMP6 3 +#define SO_EE_ORIGIN_TXSTATUS 4 +#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS + +#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) + +#ifdef __KERNEL__ + +#include <net/ip.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <linux/ipv6.h> +#endif + +#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb)) + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; +}; + +#endif + +#endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h new file mode 100644 index 00000000..fe5136d8 --- /dev/null +++ b/include/linux/etherdevice.h @@ -0,0 +1,280 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Ethernet handlers. + * + * Version: @(#)eth.h 1.0.4 05/13/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * Relocated to include/linux where it belongs by Alan Cox + * <gw4pts@gw4pts.ampr.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * WARNING: This move may well be temporary. This file will get merged with others RSN. + * + */ +#ifndef _LINUX_ETHERDEVICE_H +#define _LINUX_ETHERDEVICE_H + +#include <linux/if_ether.h> +#include <linux/netdevice.h> +#include <linux/random.h> +#include <asm/unaligned.h> + +#ifdef __KERNEL__ +extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); +extern const struct header_ops eth_header_ops; + +extern int eth_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned len); +extern int eth_rebuild_header(struct sk_buff *skb); +extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); +extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); +extern void eth_header_cache_update(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr); +extern int eth_mac_addr(struct net_device *dev, void *p); +extern int eth_change_mtu(struct net_device *dev, int new_mtu); +extern int eth_validate_addr(struct net_device *dev); + + + +extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, + unsigned int rxqs); +#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) +#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) + +/** + * is_zero_ether_addr - Determine if give Ethernet address is all zeros. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is all zeroes. + */ +static inline int is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} + +/** + * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a multicast address. + * By definition the broadcast address is also a multicast address. + */ +static inline int is_multicast_ether_addr(const u8 *addr) +{ + return 0x01 & addr[0]; +} + +/** + * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a local address. + */ +static inline int is_local_ether_addr(const u8 *addr) +{ + return 0x02 & addr[0]; +} + +/** + * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is the broadcast address. + */ +static inline int is_broadcast_ether_addr(const u8 *addr) +{ + return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff; +} + +/** + * is_unicast_ether_addr - Determine if the Ethernet address is unicast + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a unicast address. + */ +static inline int is_unicast_ether_addr(const u8 *addr) +{ + return !is_multicast_ether_addr(addr); +} + +/** + * is_valid_ether_addr - Determine if the given Ethernet address is valid + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not + * a multicast address, and is not FF:FF:FF:FF:FF:FF. + * + * Return true if the address is valid. + */ +static inline int is_valid_ether_addr(const u8 *addr) +{ + /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to + * explicitly check for it here. */ + return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); +} + +/** + * random_ether_addr - Generate software assigned random Ethernet address + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Generate a random Ethernet address (MAC) that is not multicast + * and has the local assigned bit set. + */ +static inline void random_ether_addr(u8 *addr) +{ + get_random_bytes (addr, ETH_ALEN); + addr [0] &= 0xfe; /* clear multicast bit */ + addr [0] |= 0x02; /* set local assignment bit (IEEE802) */ +} + +/** + * eth_hw_addr_random - Generate software assigned random Ethernet and + * set device flag + * @dev: pointer to net_device structure + * + * Generate a random Ethernet address (MAC) to be used by a net device + * and set addr_assign_type so the state can be read by sysfs and be + * used by userspace. + */ +static inline void eth_hw_addr_random(struct net_device *dev) +{ + dev->addr_assign_type |= NET_ADDR_RANDOM; + random_ether_addr(dev->dev_addr); +} + +/** + * compare_ether_addr - Compare two Ethernet addresses + * @addr1: Pointer to a six-byte array containing the Ethernet address + * @addr2: Pointer other six-byte array containing the Ethernet address + * + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. + * Unlike memcmp(), it doesn't return a value suitable for sorting. + */ +static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + BUILD_BUG_ON(ETH_ALEN != 6); + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} + +static inline unsigned long zap_last_2bytes(unsigned long value) +{ +#ifdef __BIG_ENDIAN + return value >> 16; +#else + return value << 16; +#endif +} + +/** + * compare_ether_addr_64bits - Compare two Ethernet addresses + * @addr1: Pointer to an array of 8 bytes + * @addr2: Pointer to an other array of 8 bytes + * + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. + * Unlike memcmp(), it doesn't return a value suitable for sorting. + * The function doesn't need any conditional branches and possibly uses + * word memory accesses on CPU allowing cheap unaligned memory reads. + * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} + * + * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. + */ + +static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], + const u8 addr2[6+2]) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + unsigned long fold = ((*(unsigned long *)addr1) ^ + (*(unsigned long *)addr2)); + + if (sizeof(fold) == 8) + return zap_last_2bytes(fold) != 0; + + fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^ + (*(unsigned long *)(addr2 + 4))); + return fold != 0; +#else + return compare_ether_addr(addr1, addr2); +#endif +} + +/** + * is_etherdev_addr - Tell if given Ethernet address belongs to the device. + * @dev: Pointer to a device structure + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Compare passed address with all addresses of the device. Return true if the + * address if one of the device addresses. + * + * Note that this function calls compare_ether_addr_64bits() so take care of + * the right padding. + */ +static inline bool is_etherdev_addr(const struct net_device *dev, + const u8 addr[6 + 2]) +{ + struct netdev_hw_addr *ha; + int res = 1; + + rcu_read_lock(); + for_each_dev_addr(dev, ha) { + res = compare_ether_addr_64bits(addr, ha->addr); + if (!res) + break; + } + rcu_read_unlock(); + return !res; +} +#endif /* __KERNEL__ */ + +/** + * compare_ether_header - Compare two Ethernet headers + * @a: Pointer to Ethernet header + * @b: Pointer to Ethernet header + * + * Compare two ethernet headers, returns 0 if equal. + * This assumes that the network header (i.e., IP header) is 4-byte + * aligned OR the platform can handle unaligned access. This is the + * case for all packets coming into netif_receive_skb or similar + * entry points. + */ + +static inline unsigned long compare_ether_header(const void *a, const void *b) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + unsigned long fold; + + /* + * We want to compare 14 bytes: + * [a0 ... a13] ^ [b0 ... b13] + * Use two long XOR, ORed together, with an overlap of two bytes. + * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | + * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] + * This means the [a6 a7] ^ [b6 b7] part is done two times. + */ + fold = *(unsigned long *)a ^ *(unsigned long *)b; + fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); + return fold; +#else + u32 *a32 = (u32 *)((u8 *)a + 2); + u32 *b32 = (u32 *)((u8 *)b + 2); + + return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | + (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); +#endif +} + +#endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h new file mode 100644 index 00000000..f5647b59 --- /dev/null +++ b/include/linux/ethtool.h @@ -0,0 +1,1210 @@ +/* + * ethtool.h: Defines for Linux ethtool. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright 2001 Jeff Garzik <jgarzik@pobox.com> + * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) + * Portions Copyright 2002 Intel (eli.kupermann@intel.com, + * christopher.leech@intel.com, + * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 + */ + +#ifndef _LINUX_ETHTOOL_H +#define _LINUX_ETHTOOL_H + +#ifdef __KERNEL__ +#include <linux/compat.h> +#endif +#include <linux/types.h> +#include <linux/if_ether.h> + +/* This should work for both 32 and 64 bit userland. */ +struct ethtool_cmd { + __u32 cmd; + __u32 supported; /* Features this interface supports */ + __u32 advertising; /* Features this interface advertises */ + __u16 speed; /* The forced speed (lower bits) in + * Mbps. Please use + * ethtool_cmd_speed()/_set() to + * access it */ + __u8 duplex; /* Duplex, half or full */ + __u8 port; /* Which connector port */ + __u8 phy_address; /* MDIO PHY address (PRTAD for clause 45). + * May be read-only or read-write + * depending on the driver. + */ + __u8 transceiver; /* Which transceiver to use */ + __u8 autoneg; /* Enable or disable autonegotiation */ + __u8 mdio_support; /* MDIO protocols supported. Read-only. + * Not set by all drivers. + */ + __u32 maxtxpkt; /* Tx pkts before generating tx int */ + __u32 maxrxpkt; /* Rx pkts before generating rx int */ + __u16 speed_hi; /* The forced speed (upper + * bits) in Mbps. Please use + * ethtool_cmd_speed()/_set() to + * access it */ + __u8 eth_tp_mdix; + __u8 reserved2; + __u32 lp_advertising; /* Features the link partner advertises */ + __u32 reserved[2]; +}; + +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + +/* Device supports clause 22 register access to PHY or peripherals + * using the interface defined in <linux/mii.h>. This should not be + * set if there are known to be no such peripherals present or if + * the driver only emulates clause 22 registers for compatibility. + */ +#define ETH_MDIO_SUPPORTS_C22 1 + +/* Device supports clause 45 register access to PHY or peripherals + * using the interface defined in <linux/mii.h> and <linux/mdio.h>. + * This should not be set if there are known to be no such peripherals + * present. + */ +#define ETH_MDIO_SUPPORTS_C45 2 + +#define ETHTOOL_FWVERS_LEN 32 +#define ETHTOOL_BUSINFO_LEN 32 +/* these strings are set to whatever the driver author decides... */ +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; /* driver short name, "tulip", "eepro100" */ + char version[32]; /* driver version string */ + char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */ + char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ + /* For PCI devices, use pci_name(pci_dev). */ + char reserved1[32]; + char reserved2[12]; + /* + * Some struct members below are filled in + * using ops->get_sset_count(). Obtaining + * this info from ethtool_drvinfo is now + * deprecated; Use ETHTOOL_GSSET_INFO + * instead. + */ + __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */ + __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ + __u32 testinfo_len; + __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ + __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */ +}; + +#define SOPASS_MAX 6 +/* wake-on-lan settings */ +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; + +/* for passing single values */ +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +/* for passing big chunks of data */ +struct ethtool_regs { + __u32 cmd; + __u32 version; /* driver-specific, indicates different chips/revs */ + __u32 len; /* bytes */ + __u8 data[0]; +}; + +/* for passing EEPROM chunks */ +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; /* in bytes */ + __u32 len; /* in bytes */ + __u8 data[0]; +}; + +/** + * struct ethtool_coalesce - coalescing parameters for IRQs and stats updates + * @cmd: ETHTOOL_{G,S}COALESCE + * @rx_coalesce_usecs: How many usecs to delay an RX interrupt after + * a packet arrives. + * @rx_max_coalesced_frames: Maximum number of packets to receive + * before an RX interrupt. + * @rx_coalesce_usecs_irq: Same as @rx_coalesce_usecs, except that + * this value applies while an IRQ is being serviced by the host. + * @rx_max_coalesced_frames_irq: Same as @rx_max_coalesced_frames, + * except that this value applies while an IRQ is being serviced + * by the host. + * @tx_coalesce_usecs: How many usecs to delay a TX interrupt after + * a packet is sent. + * @tx_max_coalesced_frames: Maximum number of packets to be sent + * before a TX interrupt. + * @tx_coalesce_usecs_irq: Same as @tx_coalesce_usecs, except that + * this value applies while an IRQ is being serviced by the host. + * @tx_max_coalesced_frames_irq: Same as @tx_max_coalesced_frames, + * except that this value applies while an IRQ is being serviced + * by the host. + * @stats_block_coalesce_usecs: How many usecs to delay in-memory + * statistics block updates. Some drivers do not have an + * in-memory statistic block, and in such cases this value is + * ignored. This value must not be zero. + * @use_adaptive_rx_coalesce: Enable adaptive RX coalescing. + * @use_adaptive_tx_coalesce: Enable adaptive TX coalescing. + * @pkt_rate_low: Threshold for low packet rate (packets per second). + * @rx_coalesce_usecs_low: How many usecs to delay an RX interrupt after + * a packet arrives, when the packet rate is below @pkt_rate_low. + * @rx_max_coalesced_frames_low: Maximum number of packets to be received + * before an RX interrupt, when the packet rate is below @pkt_rate_low. + * @tx_coalesce_usecs_low: How many usecs to delay a TX interrupt after + * a packet is sent, when the packet rate is below @pkt_rate_low. + * @tx_max_coalesced_frames_low: Maximum nuumber of packets to be sent before + * a TX interrupt, when the packet rate is below @pkt_rate_low. + * @pkt_rate_high: Threshold for high packet rate (packets per second). + * @rx_coalesce_usecs_high: How many usecs to delay an RX interrupt after + * a packet arrives, when the packet rate is above @pkt_rate_high. + * @rx_max_coalesced_frames_high: Maximum number of packets to be received + * before an RX interrupt, when the packet rate is above @pkt_rate_high. + * @tx_coalesce_usecs_high: How many usecs to delay a TX interrupt after + * a packet is sent, when the packet rate is above @pkt_rate_high. + * @tx_max_coalesced_frames_high: Maximum number of packets to be sent before + * a TX interrupt, when the packet rate is above @pkt_rate_high. + * @rate_sample_interval: How often to do adaptive coalescing packet rate + * sampling, measured in seconds. Must not be zero. + * + * Each pair of (usecs, max_frames) fields specifies this exit + * condition for interrupt coalescing: + * (usecs > 0 && time_since_first_completion >= usecs) || + * (max_frames > 0 && completed_frames >= max_frames) + * It is illegal to set both usecs and max_frames to zero as this + * would cause interrupts to never be generated. To disable + * coalescing, set usecs = 0 and max_frames = 1. + * + * Some implementations ignore the value of max_frames and use the + * condition: + * time_since_first_completion >= usecs + * This is deprecated. Drivers for hardware that does not support + * counting completions should validate that max_frames == !rx_usecs. + * + * Adaptive RX/TX coalescing is an algorithm implemented by some + * drivers to improve latency under low packet rates and improve + * throughput under high packet rates. Some drivers only implement + * one of RX or TX adaptive coalescing. Anything not implemented by + * the driver causes these values to be silently ignored. + * + * When the packet rate is below @pkt_rate_high but above + * @pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +/* for configuring RX/TX ring parameters */ +struct ethtool_ringparam { + __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +/** + * struct ethtool_channels - configuring number of network channel + * @cmd: ETHTOOL_{G,S}CHANNELS + * @max_rx: Read only. Maximum number of receive channel the driver support. + * @max_tx: Read only. Maximum number of transmit channel the driver support. + * @max_other: Read only. Maximum number of other channel the driver support. + * @max_combined: Read only. Maximum number of combined channel the driver + * support. Set of queues RX, TX or other. + * @rx_count: Valid values are in the range 1 to the max_rx. + * @tx_count: Valid values are in the range 1 to the max_tx. + * @other_count: Valid values are in the range 1 to the max_other. + * @combined_count: Valid values are in the range 1 to the max_combined. + * + * This can be used to configure RX, TX and other channels. + */ + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +/* for configuring link flow control parameters */ +struct ethtool_pauseparam { + __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +#define ETH_GSTRING_LEN 32 +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, + ETH_SS_PRIV_FLAGS, + ETH_SS_NTUPLE_FILTERS, /* Do not use, GRXNTUPLE is now deprecated */ + ETH_SS_FEATURES, +}; + +/* for passing string sets for data tagging */ +struct ethtool_gstrings { + __u32 cmd; /* ETHTOOL_GSTRINGS */ + __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + __u32 len; /* number of strings in the string set */ + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; /* ETHTOOL_GSSET_INFO */ + __u32 reserved; + __u64 sset_mask; /* input: each bit selects an sset to query */ + /* output: each bit a returned sset */ + __u32 data[0]; /* ETH_SS_xxx count, in order, based on bits + in sset_mask. One bit implies one + __u32, two bits implies two + __u32's, etc. */ +}; + +/** + * enum ethtool_test_flags - flags definition of ethtool_test + * @ETH_TEST_FL_OFFLINE: if set perform online and offline tests, otherwise + * only online tests. + * @ETH_TEST_FL_FAILED: Driver set this flag if test fails. + * @ETH_TEST_FL_EXTERNAL_LB: Application request to perform external loopback + * test. + * @ETH_TEST_FL_EXTERNAL_LB_DONE: Driver performed the external loopback test + */ + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), + ETH_TEST_FL_EXTERNAL_LB = (1 << 2), + ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3), +}; + +/* for requesting NIC test and getting results*/ +struct ethtool_test { + __u32 cmd; /* ETHTOOL_TEST */ + __u32 flags; /* ETH_TEST_FL_xxx */ + __u32 reserved; + __u32 len; /* result length, in number of u64 elements */ + __u64 data[0]; +}; + +/* for dumping NIC-specific statistics */ +struct ethtool_stats { + __u32 cmd; /* ETHTOOL_GSTATS */ + __u32 n_stats; /* number of u64's being returned */ + __u64 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; /* ETHTOOL_GPERMADDR */ + __u32 size; + __u8 data[0]; +}; + +/* boolean flags controlling per-interface behavior characteristics. + * When reading, the flag indicates whether or not a certain behavior + * is enabled/present. When writing, the flag indicates whether + * or not the driver should turn on (set) or off (clear) a behavior. + * + * Some behaviors may read-only (unconditionally absent or present). + * If such is the case, return EINVAL in the set-flags operation if the + * flag differs from the read-only value. + */ +enum ethtool_flags { + ETH_FLAG_TXVLAN = (1 << 7), /* TX VLAN offload enabled */ + ETH_FLAG_RXVLAN = (1 << 8), /* RX VLAN offload enabled */ + ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ + ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ + ETH_FLAG_RXHASH = (1 << 28), +}; + +/* The following structures are for supporting RX network flow + * classification and RX n-tuple configuration. Note, all multibyte + * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to + * be in network byte order. + */ + +/** + * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc. + * @ip4src: Source host + * @ip4dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tos: Type-of-service + * + * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow. + */ +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +/** + * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @spi: Security parameters index + * @tos: Type-of-service + * + * This can be used to specify an IPsec transport or tunnel over IPv4. + */ +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +#define ETH_RX_NFC_IP4 1 + +/** + * struct ethtool_usrip4_spec - general flow specification for IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tos: Type-of-service + * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0 + * @proto: Transport protocol number; mask must be 0 + */ +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[60]; +}; + +struct ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +/** + * struct ethtool_rx_flow_spec - classification rule for RX flows + * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW + * @h_u: Flow fields to match (dependent on @flow_type) + * @h_ext: Additional fields to match + * @m_u: Masks for flow field bits to be matched + * @m_ext: Masks for additional field bits to be matched + * Note, all additional fields must be ignored unless @flow_type + * includes the %FLOW_EXT flag. + * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC + * if packets should be discarded + * @location: Location of rule in the table. Locations must be + * numbered such that a flow matching multiple rules will be + * classified according to the first (lowest numbered) rule. + */ +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +/** + * struct ethtool_rxnfc - command to get or set RX flow classification rules + * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, + * %ETHTOOL_GRXRINGS, %ETHTOOL_GRXCLSRLCNT, %ETHTOOL_GRXCLSRULE, + * %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS + * @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW + * @data: Command-dependent value + * @fs: Flow classification rule + * @rule_cnt: Number of rules to be affected + * @rule_locs: Array of used rule locations + * + * For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating + * the fields included in the flow hash, e.g. %RXH_IP_SRC. The following + * structure fields must not be used. + * + * For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues + * on return. + * + * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined + * rules on return. If @data is non-zero on return then it is the + * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the + * driver supports any special location values. If that flag is not + * set in @data then special location values should not be used. + * + * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an + * existing rule on entry and @fs contains the rule on return. + * + * For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the + * user buffer for @rule_locs on entry. On return, @data is the size + * of the rule table, @rule_cnt is the number of defined rules, and + * @rule_locs contains the locations of the defined rules. Drivers + * must use the second parameter to get_rxnfc() instead of @rule_locs. + * + * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. + * @fs.@location either specifies the location to use or is a special + * location value with %RX_CLS_LOC_SPECIAL flag set. On return, + * @fs.@location is the actual rule location. + * + * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an + * existing rule on entry. + * + * A driver supporting the special location values for + * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused + * location, and may remove a rule at a later location (lower + * priority) that matches exactly the same set of flows. The special + * values are: %RX_CLS_LOC_ANY, selecting any location; + * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum + * priority); and %RX_CLS_LOC_LAST, selecting the last suitable + * location (minimum priority). Additional special values may be + * defined in future and drivers must return -%EINVAL for any + * unrecognised value. + */ +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + __u32 rule_cnt; + __u32 rule_locs[0]; +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +}; + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +}; + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ + +/** + * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection + * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR + * @size: On entry, the array size of the user buffer, which may be zero. + * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware + * indirection table. + * @ring_index: RX ring/queue index for each hash value + * + * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size + * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means + * the table should be reset to default values. This last feature + * is not supported by the original implementations. + */ +struct ethtool_rxfh_indir { + __u32 cmd; + __u32 size; + __u32 ring_index[0]; +}; + +/** + * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter + * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW + * @h_u: Flow field values to match (dependent on @flow_type) + * @m_u: Masks for flow field value bits to be ignored + * @vlan_tag: VLAN tag to match + * @vlan_tag_mask: Mask for VLAN tag bits to be ignored + * @data: Driver-dependent data to match + * @data_mask: Mask for driver-dependent data bits to be ignored + * @action: RX ring/queue index to deliver to (non-negative) or other action + * (negative, e.g. %ETHTOOL_RXNTUPLE_ACTION_DROP) + * + * For flow types %TCP_V4_FLOW, %UDP_V4_FLOW and %SCTP_V4_FLOW, where + * a field value and mask are both zero this is treated as if all mask + * bits are set i.e. the field is ignored. + */ +struct ethtool_rx_ntuple_flow_spec { + __u32 flow_type; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[72]; + } h_u, m_u; + + __u16 vlan_tag; + __u16 vlan_tag_mask; + __u64 data; + __u64 data_mask; + + __s32 action; +#define ETHTOOL_RXNTUPLE_ACTION_DROP (-1) /* drop packet */ +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) /* clear filter */ +}; + +/** + * struct ethtool_rx_ntuple - command to set or clear RX flow filter + * @cmd: Command number - %ETHTOOL_SRXNTUPLE + * @fs: Flow filter specification + */ +struct ethtool_rx_ntuple { + __u32 cmd; + struct ethtool_rx_ntuple_flow_spec fs; +}; + +#define ETHTOOL_FLASH_MAX_FILENAME 128 +enum ethtool_flash_op_type { + ETHTOOL_FLASH_ALL_REGIONS = 0, +}; + +/* for passing firmware flashing related parameters */ +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[ETHTOOL_FLASH_MAX_FILENAME]; +}; + +/** + * struct ethtool_dump - used for retrieving, setting device dump + * @cmd: Command number - %ETHTOOL_GET_DUMP_FLAG, %ETHTOOL_GET_DUMP_DATA, or + * %ETHTOOL_SET_DUMP + * @version: FW version of the dump, filled in by driver + * @flag: driver dependent flag for dump setting, filled in by driver during + * get and filled in by ethtool for set operation + * @len: length of dump data, used as the length of the user buffer on entry to + * %ETHTOOL_GET_DUMP_DATA and this is returned as dump length by driver + * for %ETHTOOL_GET_DUMP_FLAG command + * @data: data collected for get dump data operation + */ +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +/* for returning and changing feature sets */ + +/** + * struct ethtool_get_features_block - block with state of 32 features + * @available: mask of changeable features + * @requested: mask of features requested to be enabled if possible + * @active: mask of currently enabled features + * @never_changed: mask of features not changeable for any device + */ +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +/** + * struct ethtool_gfeatures - command to get state of device's features + * @cmd: command number = %ETHTOOL_GFEATURES + * @size: in: number of elements in the features[] array; + * out: number of elements in features[] needed to hold all features + * @features: state of features + */ +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +/** + * struct ethtool_set_features_block - block with request for 32 features + * @valid: mask of features to be changed + * @requested: values of features to be changed + */ +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +/** + * struct ethtool_sfeatures - command to request change in device's features + * @cmd: command number = %ETHTOOL_SFEATURES + * @size: array size of the features[] array + * @features: feature change masks + */ +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +/* + * %ETHTOOL_SFEATURES changes features present in features[].valid to the + * values of corresponding bits in features[].requested. Bits in .requested + * not set in .valid or not changeable are ignored. + * + * Returns %EINVAL when .valid contains undefined or never-changeable bits + * or size is not equal to required number of features words (32-bit blocks). + * Returns >= 0 if request was completed; bits set in the value mean: + * %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not + * changeable (not present in %ETHTOOL_GFEATURES' features[].available) + * those bits were ignored. + * %ETHTOOL_F_WISH - some or all changes requested were recorded but the + * resulting state of bits masked by .valid is not equal to .requested. + * Probably there are other device-specific constraints on some features + * in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered + * here as though ignored bits were cleared. + * %ETHTOOL_F_COMPAT - some or all changes requested were made by calling + * compatibility functions. Requested offload state cannot be properly + * managed by kernel. + * + * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of + * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands + * for ETH_SS_FEATURES string set. First entry in the table corresponds to least + * significant bit in features[0] fields. Empty strings mark undefined features. + */ +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT, + ETHTOOL_F_WISH__BIT, + ETHTOOL_F_COMPAT__BIT, +}; + +#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT) +#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT) +#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT) + +#ifdef __KERNEL__ + +#include <linux/rculist.h> + +extern int __ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd); + +/** + * enum ethtool_phys_id_state - indicator state for physical identification + * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated + * @ETHTOOL_ID_ACTIVE: Physical ID indicator should be activated + * @ETHTOOL_ID_ON: LED should be turned on (used iff %ETHTOOL_ID_ACTIVE + * is not supported) + * @ETHTOOL_ID_OFF: LED should be turned off (used iff %ETHTOOL_ID_ACTIVE + * is not supported) + */ +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE, + ETHTOOL_ID_ACTIVE, + ETHTOOL_ID_ON, + ETHTOOL_ID_OFF +}; + +struct net_device; + +/* Some generic methods drivers may use in their ethtool_ops */ +u32 ethtool_op_get_link(struct net_device *dev); + +/** + * ethtool_rxfh_indir_default - get default value for RX flow hash indirection + * @index: Index in RX flow hash indirection table + * @n_rx_rings: Number of RX rings to use + * + * This function provides the default policy for RX flow hash indirection. + */ +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} + +/** + * struct ethtool_ops - optional netdev operations + * @get_settings: Get various device settings including Ethernet link + * settings. The @cmd parameter is expected to have been cleared + * before get_settings is called. Returns a negative error code or + * zero. + * @set_settings: Set various device settings including Ethernet link + * settings. Returns a negative error code or zero. + * @get_drvinfo: Report driver/device information. Should only set the + * @driver, @version, @fw_version and @bus_info fields. If not + * implemented, the @driver and @bus_info fields will be filled in + * according to the netdev's parent device. + * @get_regs_len: Get buffer length required for @get_regs + * @get_regs: Get device registers + * @get_wol: Report whether Wake-on-Lan is enabled + * @set_wol: Turn Wake-on-Lan on or off. Returns a negative error code + * or zero. + * @get_msglevel: Report driver message level. This should be the value + * of the @msg_enable field used by netif logging functions. + * @set_msglevel: Set driver message level + * @nway_reset: Restart autonegotiation. Returns a negative error code + * or zero. + * @get_link: Report whether physical link is up. Will only be called if + * the netdev is up. Should usually be set to ethtool_op_get_link(), + * which uses netif_carrier_ok(). + * @get_eeprom: Read data from the device EEPROM. + * Should fill in the magic field. Don't need to check len for zero + * or wraparound. Fill in the data argument with the eeprom values + * from offset to offset + len. Update len to the amount read. + * Returns an error or zero. + * @set_eeprom: Write data to the device EEPROM. + * Should validate the magic field. Don't need to check len for zero + * or wraparound. Update len to the amount written. Returns an error + * or zero. + * @get_coalesce: Get interrupt coalescing parameters. Returns a negative + * error code or zero. + * @set_coalesce: Set interrupt coalescing parameters. Returns a negative + * error code or zero. + * @get_ringparam: Report ring sizes + * @set_ringparam: Set ring sizes. Returns a negative error code or zero. + * @get_pauseparam: Report pause parameters + * @set_pauseparam: Set pause parameters. Returns a negative error code + * or zero. + * @self_test: Run specified self-tests + * @get_strings: Return a set of strings that describe the requested objects + * @set_phys_id: Identify the physical devices, e.g. by flashing an LED + * attached to it. The implementation may update the indicator + * asynchronously or synchronously, but in either case it must return + * quickly. It is initially called with the argument %ETHTOOL_ID_ACTIVE, + * and must either activate asynchronous updates and return zero, return + * a negative error or return a positive frequency for synchronous + * indication (e.g. 1 for one on/off cycle per second). If it returns + * a frequency then it will be called again at intervals with the + * argument %ETHTOOL_ID_ON or %ETHTOOL_ID_OFF and should set the state of + * the indicator accordingly. Finally, it is called with the argument + * %ETHTOOL_ID_INACTIVE and must deactivate the indicator. Returns a + * negative error code or zero. + * @get_ethtool_stats: Return extended statistics about the device. + * This is only useful if the device maintains statistics not + * included in &struct rtnl_link_stats64. + * @begin: Function to be called before any other operation. Returns a + * negative error code or zero. + * @complete: Function to be called after any other operation except + * @begin. Will be called even if the other operation failed. + * @get_priv_flags: Report driver-specific feature flags. + * @set_priv_flags: Set driver-specific feature flags. Returns a negative + * error code or zero. + * @get_sset_count: Get number of strings that @get_strings will write. + * @get_rxnfc: Get RX flow classification rules. Returns a negative + * error code or zero. + * @set_rxnfc: Set RX flow classification rules. Returns a negative + * error code or zero. + * @flash_device: Write a firmware image to device's flash memory. + * Returns a negative error code or zero. + * @reset: Reset (part of) the device, as specified by a bitmask of + * flags from &enum ethtool_reset_flags. Returns a negative + * error code or zero. + * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. + * Returns zero if not supported for this specific device. + * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. + * Will not be called if @get_rxfh_indir_size returns zero. + * Returns a negative error code or zero. + * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. + * Will not be called if @get_rxfh_indir_size returns zero. + * Returns a negative error code or zero. + * @get_channels: Get number of channels. + * @set_channels: Set number of channels. Returns a negative error code or + * zero. + * @get_dump_flag: Get dump flag indicating current dump length, version, + * and flag of the device. + * @get_dump_data: Get dump data. + * @set_dump: Set dump specific flags to the device. + * + * All operations are optional (i.e. the function pointer may be set + * to %NULL) and callers must take this into account. Callers must + * hold the RTNL lock. + * + * See the structures used by these operations for further documentation. + * + * See &struct net_device and &struct net_device_ops for documentation + * of the generic netdev features interface. + */ +struct ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, + struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, + struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, + struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, + struct ethtool_rxnfc *, u32 *rule_locs); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh_indir)(struct net_device *, u32 *); + int (*set_rxfh_indir)(struct net_device *, const u32 *); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, + struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + +}; +#endif /* __KERNEL__ */ + +/* CMDs currently supported */ +#define ETHTOOL_GSET 0x00000001 /* Get settings. */ +#define ETHTOOL_SSET 0x00000002 /* Set settings. */ +#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ +#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ +#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ +/* Get link status for host, i.e. whether the interface *and* the + * physical port (if there is one) are up (ethtool_value). */ +#define ETHTOOL_GLINK 0x0000000a +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ +#define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ +#define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ +#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ +#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ +#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ +#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ + +#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ +#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ +#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ +#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ +#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */ +#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */ +#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */ +#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */ +#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */ +#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */ +#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ +#define ETHTOOL_RESET 0x00000034 /* Reset hardware */ +#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ +#define ETHTOOL_GRXNTUPLE 0x00000036 /* deprecated */ +#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ +#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ +#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ + +#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */ +#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */ +#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */ +#define ETHTOOL_SCHANNELS 0x0000003d /* Set no of channels */ +#define ETHTOOL_SET_DUMP 0x0000003e /* Set dump settings */ +#define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */ +#define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */ + +/* compatibility with older code */ +#define SPARC_ETH_GSET ETHTOOL_GSET +#define SPARC_ETH_SSET ETHTOOL_SSET + +/* Indicates what features are supported by the interface. */ +#define SUPPORTED_10baseT_Half (1 << 0) +#define SUPPORTED_10baseT_Full (1 << 1) +#define SUPPORTED_100baseT_Half (1 << 2) +#define SUPPORTED_100baseT_Full (1 << 3) +#define SUPPORTED_1000baseT_Half (1 << 4) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_Autoneg (1 << 6) +#define SUPPORTED_TP (1 << 7) +#define SUPPORTED_AUI (1 << 8) +#define SUPPORTED_MII (1 << 9) +#define SUPPORTED_FIBRE (1 << 10) +#define SUPPORTED_BNC (1 << 11) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_10000baseR_FEC (1 << 20) +#define SUPPORTED_20000baseMLD2_Full (1 << 21) +#define SUPPORTED_20000baseKR2_Full (1 << 22) + +/* Indicates what features are advertised by the interface. */ +#define ADVERTISED_10baseT_Half (1 << 0) +#define ADVERTISED_10baseT_Full (1 << 1) +#define ADVERTISED_100baseT_Half (1 << 2) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_1000baseT_Half (1 << 4) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_Autoneg (1 << 6) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_AUI (1 << 8) +#define ADVERTISED_MII (1 << 9) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_BNC (1 << 11) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_Backplane (1 << 16) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseR_FEC (1 << 20) +#define ADVERTISED_20000baseMLD2_Full (1 << 21) +#define ADVERTISED_20000baseKR2_Full (1 << 22) + +/* The following are all involved in forcing a particular link + * mode for the device for setting things. When getting the + * devices settings, these indicate the current mode and whether + * it was forced up into this mode or autonegotiated. + */ + +/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 +#define SPEED_UNKNOWN -1 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 +#define DUPLEX_UNKNOWN 0xff + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +/* Which transceiver to use. */ +#define XCVR_INTERNAL 0x00 +#define XCVR_EXTERNAL 0x01 +#define XCVR_DUMMY1 0x02 +#define XCVR_DUMMY2 0x03 +#define XCVR_DUMMY3 0x04 + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +/* Mode MDI or MDI-X */ +#define ETH_TP_MDI_INVALID 0x00 +#define ETH_TP_MDI 0x01 +#define ETH_TP_MDI_X 0x02 + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +/* L2-L4 network traffic flow types */ +#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ +#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ +#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ +#define AH_ESP_V4_FLOW 0x04 /* hash only */ +#define TCP_V6_FLOW 0x05 /* hash only */ +#define UDP_V6_FLOW 0x06 /* hash only */ +#define SCTP_V6_FLOW 0x07 /* hash only */ +#define AH_ESP_V6_FLOW 0x08 /* hash only */ +#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ +#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ +#define AH_V6_FLOW 0x0b /* hash only */ +#define ESP_V6_FLOW 0x0c /* hash only */ +#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define IPV4_FLOW 0x10 /* hash only */ +#define IPV6_FLOW 0x11 /* hash only */ +#define ETHER_FLOW 0x12 /* spec only (ether_spec) */ +/* Flag to enable additional fields in struct ethtool_rx_flow_spec */ +#define FLOW_EXT 0x80000000 + +/* L3-L4 network traffic flow hash options */ +#define RXH_L2DA (1 << 1) +#define RXH_VLAN (1 << 2) +#define RXH_L3_PROTO (1 << 3) +#define RXH_IP_SRC (1 << 4) +#define RXH_IP_DST (1 << 5) +#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ +#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ +#define RXH_DISCARD (1 << 31) + +#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL + +/* Special RX classification rule insert location values */ +#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ +#define RX_CLS_LOC_ANY 0xffffffff +#define RX_CLS_LOC_FIRST 0xfffffffe +#define RX_CLS_LOC_LAST 0xfffffffd + +/* Reset flags */ +/* The reset() operation must clear the flags for the components which + * were actually reset. On successful return, the flags indicate the + * components which were not reset, either because they do not exist + * in the hardware or because they cannot be reset independently. The + * driver must never reset any components that were not requested. + */ +enum ethtool_reset_flags { + /* These flags represent components dedicated to the interface + * the command is addressed to. Shift any flag left by + * ETH_RESET_SHARED_SHIFT to reset a shared component of the + * same type. + */ + ETH_RESET_MGMT = 1 << 0, /* Management processor */ + ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */ + ETH_RESET_DMA = 1 << 2, /* DMA engine */ + ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */ + ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */ + ETH_RESET_MAC = 1 << 5, /* Media access controller */ + ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ + ETH_RESET_RAM = 1 << 7, /* RAM shared between + * multiple components */ + + ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to + * this interface */ + ETH_RESET_ALL = 0xffffffff, /* All components used by this + * interface, even if shared */ +}; +#define ETH_RESET_SHARED_SHIFT 16 + +#endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h new file mode 100644 index 00000000..91bb4f27 --- /dev/null +++ b/include/linux/eventfd.h @@ -0,0 +1,83 @@ +/* + * include/linux/eventfd.h + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + */ + +#ifndef _LINUX_EVENTFD_H +#define _LINUX_EVENTFD_H + +#include <linux/fcntl.h> +#include <linux/file.h> +#include <linux/wait.h> + +/* + * CAREFUL: Check include/asm-generic/fcntl.h when defining + * new flags, since they might collide with O_* ones. We want + * to re-use O_* flags that couldn't possibly have a meaning + * from eventfd, in order to leave a free define-space for + * shared O_* flags. + */ +#define EFD_SEMAPHORE (1 << 0) +#define EFD_CLOEXEC O_CLOEXEC +#define EFD_NONBLOCK O_NONBLOCK + +#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) +#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) + +#ifdef CONFIG_EVENTFD + +struct file *eventfd_file_create(unsigned int count, int flags); +struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); +void eventfd_ctx_put(struct eventfd_ctx *ctx); +struct file *eventfd_fget(int fd); +struct eventfd_ctx *eventfd_ctx_fdget(int fd); +struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); +int eventfd_signal(struct eventfd_ctx *ctx, int n); +ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, + __u64 *cnt); + +#else /* CONFIG_EVENTFD */ + +/* + * Ugly ugly ugly error layer to support modules that uses eventfd but + * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. + */ +static inline struct file *eventfd_file_create(unsigned int count, int flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) +{ + return -ENOSYS; +} + +static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) +{ + +} + +static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, + __u64 *cnt) +{ + return -ENOSYS; +} + +static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, + wait_queue_t *wait, __u64 *cnt) +{ + return -ENOSYS; +} + +#endif + +#endif /* _LINUX_EVENTFD_H */ + diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h new file mode 100644 index 00000000..6f8be328 --- /dev/null +++ b/include/linux/eventpoll.h @@ -0,0 +1,121 @@ +/* + * include/linux/eventpoll.h ( Efficient event polling implementation ) + * Copyright (C) 2001,...,2006 Davide Libenzi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#ifndef _LINUX_EVENTPOLL_H +#define _LINUX_EVENTPOLL_H + +/* For O_CLOEXEC */ +#include <linux/fcntl.h> +#include <linux/types.h> + +/* Flags for epoll_create1. */ +#define EPOLL_CLOEXEC O_CLOEXEC + +/* Valid opcodes to issue to sys_epoll_ctl() */ +#define EPOLL_CTL_ADD 1 +#define EPOLL_CTL_DEL 2 +#define EPOLL_CTL_MOD 3 + +/* + * Request the handling of system wakeup events so as to prevent system suspends + * from happening while those events are being processed. + * + * Assuming neither EPOLLET nor EPOLLONESHOT is set, system suspends will not be + * re-allowed until epoll_wait is called again after consuming the wakeup + * event(s). + * + * Requires CAP_EPOLLWAKEUP + */ +#define EPOLLWAKEUP (1 << 29) + +/* Set the One Shot behaviour for the target file descriptor */ +#define EPOLLONESHOT (1 << 30) + +/* Set the Edge Triggered behaviour for the target file descriptor */ +#define EPOLLET (1 << 31) + +/* + * On x86-64 make the 64bit structure have the same alignment as the + * 32bit structure. This makes 32bit emulation easier. + * + * UML/x86_64 needs the same packing as x86_64 + */ +#ifdef __x86_64__ +#define EPOLL_PACKED __attribute__((packed)) +#else +#define EPOLL_PACKED +#endif + +struct epoll_event { + __u32 events; + __u64 data; +} EPOLL_PACKED; + +#ifdef __KERNEL__ + +/* Forward declarations to avoid compiler errors */ +struct file; + + +#ifdef CONFIG_EPOLL + +/* Used to initialize the epoll bits inside the "struct file" */ +static inline void eventpoll_init_file(struct file *file) +{ + INIT_LIST_HEAD(&file->f_ep_links); + INIT_LIST_HEAD(&file->f_tfile_llink); +} + + +/* Used to release the epoll bits inside the "struct file" */ +void eventpoll_release_file(struct file *file); + +/* + * This is called from inside fs/file_table.c:__fput() to unlink files + * from the eventpoll interface. We need to have this facility to cleanup + * correctly files that are closed without being removed from the eventpoll + * interface. + */ +static inline void eventpoll_release(struct file *file) +{ + + /* + * Fast check to avoid the get/release of the semaphore. Since + * we're doing this outside the semaphore lock, it might return + * false negatives, but we don't care. It'll help in 99.99% of cases + * to avoid the semaphore lock. False positives simply cannot happen + * because the file in on the way to be removed and nobody ( but + * eventpoll ) has still a reference to this file. + */ + if (likely(list_empty(&file->f_ep_links))) + return; + + /* + * The file is being closed while it is still linked to an epoll + * descriptor. We need to handle this by correctly unlinking it + * from its containers. + */ + eventpoll_release_file(file); +} + +#else + +static inline void eventpoll_init_file(struct file *file) {} +static inline void eventpoll_release(struct file *file) {} + +#endif + +#endif /* #ifdef __KERNEL__ */ + +#endif /* #ifndef _LINUX_EVENTPOLL_H */ + diff --git a/include/linux/evm.h b/include/linux/evm.h new file mode 100644 index 00000000..9fc13a76 --- /dev/null +++ b/include/linux/evm.h @@ -0,0 +1,100 @@ +/* + * evm.h + * + * Copyright (c) 2009 IBM Corporation + * Author: Mimi Zohar <zohar@us.ibm.com> + */ + +#ifndef _LINUX_EVM_H +#define _LINUX_EVM_H + +#include <linux/integrity.h> +#include <linux/xattr.h> + +struct integrity_iint_cache; + +#ifdef CONFIG_EVM +extern enum integrity_status evm_verifyxattr(struct dentry *dentry, + const char *xattr_name, + void *xattr_value, + size_t xattr_value_len, + struct integrity_iint_cache *iint); +extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); +extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); +extern int evm_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size); +extern void evm_inode_post_setxattr(struct dentry *dentry, + const char *xattr_name, + const void *xattr_value, + size_t xattr_value_len); +extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); +extern void evm_inode_post_removexattr(struct dentry *dentry, + const char *xattr_name); +extern int evm_inode_init_security(struct inode *inode, + const struct xattr *xattr_array, + struct xattr *evm); +#ifdef CONFIG_FS_POSIX_ACL +extern int posix_xattr_acl(const char *xattrname); +#else +static inline int posix_xattr_acl(const char *xattrname) +{ + return 0; +} +#endif +#else +#ifdef CONFIG_INTEGRITY +static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, + const char *xattr_name, + void *xattr_value, + size_t xattr_value_len, + struct integrity_iint_cache *iint) +{ + return INTEGRITY_UNKNOWN; +} +#endif + +static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) +{ + return 0; +} + +static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) +{ + return; +} + +static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size) +{ + return 0; +} + +static inline void evm_inode_post_setxattr(struct dentry *dentry, + const char *xattr_name, + const void *xattr_value, + size_t xattr_value_len) +{ + return; +} + +static inline int evm_inode_removexattr(struct dentry *dentry, + const char *xattr_name) +{ + return 0; +} + +static inline void evm_inode_post_removexattr(struct dentry *dentry, + const char *xattr_name) +{ + return; +} + +static inline int evm_inode_init_security(struct inode *inode, + const struct xattr *xattr_array, + struct xattr *evm) +{ + return 0; +} + +#endif /* CONFIG_EVM_H */ +#endif /* LINUX_EVM_H */ diff --git a/include/linux/export.h b/include/linux/export.h new file mode 100644 index 00000000..696c0f48 --- /dev/null +++ b/include/linux/export.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_EXPORT_H +#define _LINUX_EXPORT_H +/* + * Export symbols from the kernel to modules. Forked from module.h + * to reduce the amount of pointless cruft we feed to gcc when only + * exporting a simple symbol or two. + * + * If you feel the need to add #include <linux/foo.h> to this file + * then you are doing something wrong and should go away silently. + */ + +/* Some toolchains use a `_' prefix for all user symbols. */ +#ifdef CONFIG_SYMBOL_PREFIX +#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX +#else +#define MODULE_SYMBOL_PREFIX "" +#endif + +struct kernel_symbol +{ + unsigned long value; + const char *name; +}; + +#ifdef MODULE +extern struct module __this_module; +#define THIS_MODULE (&__this_module) +#else +#define THIS_MODULE ((struct module *)0) +#endif + +#ifdef CONFIG_MODULES + +#ifndef __GENKSYMS__ +#ifdef CONFIG_MODVERSIONS +/* Mark the CRC weak since genksyms apparently decides not to + * generate a checksums for some symbols */ +#define __CRC_SYMBOL(sym, sec) \ + extern void *__crc_##sym __attribute__((weak)); \ + static const unsigned long __kcrctab_##sym \ + __used \ + __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ + = (unsigned long) &__crc_##sym; +#else +#define __CRC_SYMBOL(sym, sec) +#endif + +/* For every exported symbol, place a struct in the __ksymtab section */ +#define __EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ + __attribute__((section("__ksymtab_strings"), aligned(1))) \ + = MODULE_SYMBOL_PREFIX #sym; \ + static const struct kernel_symbol __ksymtab_##sym \ + __used \ + __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ + = { (unsigned long)&sym, __kstrtab_##sym } + +#define EXPORT_SYMBOL(sym) \ + __EXPORT_SYMBOL(sym, "") + +#define EXPORT_SYMBOL_GPL(sym) \ + __EXPORT_SYMBOL(sym, "_gpl") + +#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ + __EXPORT_SYMBOL(sym, "_gpl_future") + +#ifdef CONFIG_UNUSED_SYMBOLS +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#else +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) +#endif + +#endif /* __GENKSYMS__ */ + +#else /* !CONFIG_MODULES... */ + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define EXPORT_SYMBOL_GPL_FUTURE(sym) +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) + +#endif /* CONFIG_MODULES */ + +#endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h new file mode 100644 index 00000000..3a4cef53 --- /dev/null +++ b/include/linux/exportfs.h @@ -0,0 +1,196 @@ +#ifndef LINUX_EXPORTFS_H +#define LINUX_EXPORTFS_H 1 + +#include <linux/types.h> + +struct dentry; +struct inode; +struct super_block; +struct vfsmount; + +/* limit the handle size to NFSv4 handle size now */ +#define MAX_HANDLE_SZ 128 + +/* + * The fileid_type identifies how the file within the filesystem is encoded. + * In theory this is freely set and parsed by the filesystem, but we try to + * stick to conventions so we can share some generic code and don't confuse + * sniffers like ethereal/wireshark. + * + * The filesystem must not use the value '0' or '0xff'. + */ +enum fid_type { + /* + * The root, or export point, of the filesystem. + * (Never actually passed down to the filesystem. + */ + FILEID_ROOT = 0, + + /* + * 32bit inode number, 32 bit generation number. + */ + FILEID_INO32_GEN = 1, + + /* + * 32bit inode number, 32 bit generation number, + * 32 bit parent directory inode number. + */ + FILEID_INO32_GEN_PARENT = 2, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number. + */ + FILEID_BTRFS_WITHOUT_PARENT = 0x4d, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number, + * 64 bit parent object ID, 32 bit parent generation. + */ + FILEID_BTRFS_WITH_PARENT = 0x4e, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number, + * 64 bit parent object ID, 32 bit parent generation, + * 64 bit parent root object ID. + */ + FILEID_BTRFS_WITH_PARENT_ROOT = 0x4f, + + /* + * 32 bit block number, 16 bit partition reference, + * 16 bit unused, 32 bit generation number. + */ + FILEID_UDF_WITHOUT_PARENT = 0x51, + + /* + * 32 bit block number, 16 bit partition reference, + * 16 bit unused, 32 bit generation number, + * 32 bit parent block number, 32 bit parent generation number + */ + FILEID_UDF_WITH_PARENT = 0x52, + + /* + * 64 bit checkpoint number, 64 bit inode number, + * 32 bit generation number. + */ + FILEID_NILFS_WITHOUT_PARENT = 0x61, + + /* + * 64 bit checkpoint number, 64 bit inode number, + * 32 bit generation number, 32 bit parent generation. + * 64 bit parent inode number. + */ + FILEID_NILFS_WITH_PARENT = 0x62, +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + __u32 raw[0]; + }; +}; + +/** + * struct export_operations - for nfsd to communicate with file systems + * @encode_fh: encode a file handle fragment from a dentry + * @fh_to_dentry: find the implied object and get a dentry for it + * @fh_to_parent: find the implied object's parent and get a dentry for it + * @get_name: find the name for a given inode in a given directory + * @get_parent: find the parent of a given directory + * @commit_metadata: commit metadata changes to stable storage + * + * See Documentation/filesystems/nfs/Exporting for details on how to use + * this interface correctly. + * + * encode_fh: + * @encode_fh should store in the file handle fragment @fh (using at most + * @max_len bytes) information that can be used by @decode_fh to recover the + * file referred to by the &struct dentry @de. If the @connectable flag is + * set, the encode_fh() should store sufficient information so that a good + * attempt can be made to find not only the file but also it's place in the + * filesystem. This typically means storing a reference to de->d_parent in + * the filehandle fragment. encode_fh() should return the fileid_type on + * success and on error returns 255 (if the space needed to encode fh is + * greater than @max_len*4 bytes). On error @max_len contains the minimum + * size(in 4 byte unit) needed to encode the file handle. + * + * fh_to_dentry: + * @fh_to_dentry is given a &struct super_block (@sb) and a file handle + * fragment (@fh, @fh_len). It should return a &struct dentry which refers + * to the same file that the file handle fragment refers to. If it cannot, + * it should return a %NULL pointer if the file was found but no acceptable + * &dentries were available, or an %ERR_PTR error code indicating why it + * couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be + * returned including, if necessary, a new dentry created with d_alloc_root. + * The caller can then find any other extant dentries by following the + * d_alias links. + * + * fh_to_parent: + * Same as @fh_to_dentry, except that it returns a pointer to the parent + * dentry if it was encoded into the filehandle fragment by @encode_fh. + * + * get_name: + * @get_name should find a name for the given @child in the given @parent + * directory. The name should be stored in the @name (with the + * understanding that it is already pointing to a a %NAME_MAX+1 sized + * buffer. get_name() should return %0 on success, a negative error code + * or error. @get_name will be called without @parent->i_mutex held. + * + * get_parent: + * @get_parent should find the parent directory for the given @child which + * is also a directory. In the event that it cannot be found, or storage + * space cannot be allocated, a %ERR_PTR should be returned. + * + * commit_metadata: + * @commit_metadata should commit metadata changes to stable storage. + * + * Locking rules: + * get_parent is called with child->d_inode->i_mutex down + * get_name is not (which is possibly inconsistent) + */ + +struct export_operations { + int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, + int connectable); + struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type); + struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type); + int (*get_name)(struct dentry *parent, char *name, + struct dentry *child); + struct dentry * (*get_parent)(struct dentry *child); + int (*commit_metadata)(struct inode *inode); +}; + +extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, + int *max_len, int connectable); +extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, + int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), + void *context); + +/* + * Generic helpers for filesystems. + */ +extern struct dentry *generic_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type, + struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); +extern struct dentry *generic_fh_to_parent(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type, + struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); + +#endif /* LINUX_EXPORTFS_H */ diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h new file mode 100644 index 00000000..2723e715 --- /dev/null +++ b/include/linux/ext2_fs.h @@ -0,0 +1,42 @@ +/* + * linux/include/linux/ext2_fs.h + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/include/linux/minix_fs.h + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#ifndef _LINUX_EXT2_FS_H +#define _LINUX_EXT2_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +#define EXT2_NAME_LEN 255 + +/* + * Maximal count of links to a file + */ +#define EXT2_LINK_MAX 32000 + +#define EXT2_SB_MAGIC_OFFSET 0x38 +#define EXT2_SB_BLOCKS_OFFSET 0x04 +#define EXT2_SB_BSIZE_OFFSET 0x18 + +static inline u64 ext2_image_size(void *ext2_sb) +{ + __u8 *p = ext2_sb; + if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC)) + return 0; + return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) << + le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET)); +} + +#endif /* _LINUX_EXT2_FS_H */ diff --git a/include/linux/f75375s.h b/include/linux/f75375s.h new file mode 100644 index 00000000..e99e2250 --- /dev/null +++ b/include/linux/f75375s.h @@ -0,0 +1,21 @@ +/* + * f75375s.h - platform data structure for f75375s sensor + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007, Riku Voipio <riku.voipio@iki.fi> + */ + +#ifndef __LINUX_F75375S_H +#define __LINUX_F75375S_H + +/* We want to set fans spinning on systems where there is no + * BIOS to do that for us */ +struct f75375s_platform_data { + u8 pwm[2]; + u8 pwm_enable[2]; +}; + +#endif /* __LINUX_F75375S_H */ diff --git a/include/linux/fadvise.h b/include/linux/fadvise.h new file mode 100644 index 00000000..e8e74713 --- /dev/null +++ b/include/linux/fadvise.h @@ -0,0 +1,21 @@ +#ifndef FADVISE_H_INCLUDED +#define FADVISE_H_INCLUDED + +#define POSIX_FADV_NORMAL 0 /* No further special treatment. */ +#define POSIX_FADV_RANDOM 1 /* Expect random page references. */ +#define POSIX_FADV_SEQUENTIAL 2 /* Expect sequential page references. */ +#define POSIX_FADV_WILLNEED 3 /* Will need these pages. */ + +/* + * The advise values for POSIX_FADV_DONTNEED and POSIX_ADV_NOREUSE + * for s390-64 differ from the values for the rest of the world. + */ +#if defined(__s390x__) +#define POSIX_FADV_DONTNEED 6 /* Don't need these pages. */ +#define POSIX_FADV_NOREUSE 7 /* Data will be accessed once. */ +#else +#define POSIX_FADV_DONTNEED 4 /* Don't need these pages. */ +#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */ +#endif + +#endif /* FADVISE_H_INCLUDED */ diff --git a/include/linux/falloc.h b/include/linux/falloc.h new file mode 100644 index 00000000..73e0b628 --- /dev/null +++ b/include/linux/falloc.h @@ -0,0 +1,28 @@ +#ifndef _FALLOC_H_ +#define _FALLOC_H_ + +#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */ +#define FALLOC_FL_PUNCH_HOLE 0x02 /* de-allocates range */ + +#ifdef __KERNEL__ + +/* + * Space reservation ioctls and argument structure + * are designed to be compatible with the legacy XFS ioctls. + */ +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserved area */ +}; + +#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv) +#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv) + +#endif /* __KERNEL__ */ + +#endif /* _FALLOC_H_ */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h new file mode 100644 index 00000000..6c6133f7 --- /dev/null +++ b/include/linux/fanotify.h @@ -0,0 +1,120 @@ +#ifndef _LINUX_FANOTIFY_H +#define _LINUX_FANOTIFY_H + +#include <linux/types.h> + +/* the following events that user-space can register for */ +#define FAN_ACCESS 0x00000001 /* File was accessed */ +#define FAN_MODIFY 0x00000002 /* File was modified */ +#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */ +#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ +#define FAN_OPEN 0x00000020 /* File was opened */ + +#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ + +#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ +#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ + +#define FAN_ONDIR 0x40000000 /* event occurred against dir */ + +#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */ + +/* helper events */ +#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */ + +/* flags used for fanotify_init() */ +#define FAN_CLOEXEC 0x00000001 +#define FAN_NONBLOCK 0x00000002 + +/* These are NOT bitwise flags. Both bits are used togther. */ +#define FAN_CLASS_NOTIF 0x00000000 +#define FAN_CLASS_CONTENT 0x00000004 +#define FAN_CLASS_PRE_CONTENT 0x00000008 +#define FAN_ALL_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ + FAN_CLASS_PRE_CONTENT) + +#define FAN_UNLIMITED_QUEUE 0x00000010 +#define FAN_UNLIMITED_MARKS 0x00000020 + +#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \ + FAN_ALL_CLASS_BITS | FAN_UNLIMITED_QUEUE |\ + FAN_UNLIMITED_MARKS) + +/* flags used for fanotify_modify_mark() */ +#define FAN_MARK_ADD 0x00000001 +#define FAN_MARK_REMOVE 0x00000002 +#define FAN_MARK_DONT_FOLLOW 0x00000004 +#define FAN_MARK_ONLYDIR 0x00000008 +#define FAN_MARK_MOUNT 0x00000010 +#define FAN_MARK_IGNORED_MASK 0x00000020 +#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040 +#define FAN_MARK_FLUSH 0x00000080 +#ifdef __KERNEL__ +/* not valid from userspace, only kernel internal */ +#define FAN_MARK_ONDIR 0x00000100 +#endif + +#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\ + FAN_MARK_REMOVE |\ + FAN_MARK_DONT_FOLLOW |\ + FAN_MARK_ONLYDIR |\ + FAN_MARK_MOUNT |\ + FAN_MARK_IGNORED_MASK |\ + FAN_MARK_IGNORED_SURV_MODIFY |\ + FAN_MARK_FLUSH) + +/* + * All of the events - we build the list by hand so that we can add flags in + * the future and not break backward compatibility. Apps will get only the + * events that they originally wanted. Be sure to add new events here! + */ +#define FAN_ALL_EVENTS (FAN_ACCESS |\ + FAN_MODIFY |\ + FAN_CLOSE |\ + FAN_OPEN) + +/* + * All events which require a permission response from userspace + */ +#define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\ + FAN_ACCESS_PERM) + +#define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\ + FAN_ALL_PERM_EVENTS |\ + FAN_Q_OVERFLOW) + +#define FANOTIFY_METADATA_VERSION 3 + +struct fanotify_event_metadata { + __u32 event_len; + __u8 vers; + __u8 reserved; + __u16 metadata_len; + __aligned_u64 mask; + __s32 fd; + __s32 pid; +}; + +struct fanotify_response { + __s32 fd; + __u32 response; +}; + +/* Legit userspace responses to a _PERM event */ +#define FAN_ALLOW 0x01 +#define FAN_DENY 0x02 +/* No fd set in event */ +#define FAN_NOFD -1 + +/* Helper functions to deal with fanotify_event_metadata buffers */ +#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata)) + +#define FAN_EVENT_NEXT(meta, len) ((len) -= (meta)->event_len, \ + (struct fanotify_event_metadata*)(((char *)(meta)) + \ + (meta)->event_len)) + +#define FAN_EVENT_OK(meta, len) ((long)(len) >= (long)FAN_EVENT_METADATA_LEN && \ + (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \ + (long)(meta)->event_len <= (long)(len)) + +#endif /* _LINUX_FANOTIFY_H */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h new file mode 100644 index 00000000..c6f996f2 --- /dev/null +++ b/include/linux/fault-inject.h @@ -0,0 +1,69 @@ +#ifndef _LINUX_FAULT_INJECT_H +#define _LINUX_FAULT_INJECT_H + +#ifdef CONFIG_FAULT_INJECTION + +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/atomic.h> + +/* + * For explanation of the elements of this struct, see + * Documentation/fault-injection/fault-injection.txt + */ +struct fault_attr { + unsigned long probability; + unsigned long interval; + atomic_t times; + atomic_t space; + unsigned long verbose; + u32 task_filter; + unsigned long stacktrace_depth; + unsigned long require_start; + unsigned long require_end; + unsigned long reject_start; + unsigned long reject_end; + + unsigned long count; +}; + +#define FAULT_ATTR_INITIALIZER { \ + .interval = 1, \ + .times = ATOMIC_INIT(1), \ + .require_end = ULONG_MAX, \ + .stacktrace_depth = 32, \ + .verbose = 2, \ + } + +#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER +int setup_fault_attr(struct fault_attr *attr, char *str); +bool should_fail(struct fault_attr *attr, ssize_t size); + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +struct dentry *fault_create_debugfs_attr(const char *name, + struct dentry *parent, struct fault_attr *attr); + +#else /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +static inline struct dentry *fault_create_debugfs_attr(const char *name, + struct dentry *parent, struct fault_attr *attr) +{ + return ERR_PTR(-ENODEV); +} + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#endif /* CONFIG_FAULT_INJECTION */ + +#ifdef CONFIG_FAILSLAB +extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags); +#else +static inline bool should_failslab(size_t size, gfp_t gfpflags, + unsigned long flags) +{ + return false; +} +#endif /* CONFIG_FAILSLAB */ + +#endif /* _LINUX_FAULT_INJECT_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h new file mode 100644 index 00000000..d31cb682 --- /dev/null +++ b/include/linux/fb.h @@ -0,0 +1,1181 @@ +#ifndef _LINUX_FB_H +#define _LINUX_FB_H + +#include <linux/types.h> +#include <linux/i2c.h> +#ifdef __KERNEL__ +#include <linux/kgdb.h> +#endif /* __KERNEL__ */ + +/* Definitions of frame buffers */ + +#define FB_MAX 32 /* sufficient for now */ + +/* ioctls + 0x46 is 'F' */ +#define FBIOGET_VSCREENINFO 0x4600 +#define FBIOPUT_VSCREENINFO 0x4601 +#define FBIOGET_FSCREENINFO 0x4602 +#define FBIOGETCMAP 0x4604 +#define FBIOPUTCMAP 0x4605 +#define FBIOPAN_DISPLAY 0x4606 +#ifdef __KERNEL__ +#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user) +#else +#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor) +#endif +/* 0x4607-0x460B are defined below */ +/* #define FBIOGET_MONITORSPEC 0x460C */ +/* #define FBIOPUT_MONITORSPEC 0x460D */ +/* #define FBIOSWITCH_MONIBIT 0x460E */ +#define FBIOGET_CON2FBMAP 0x460F +#define FBIOPUT_CON2FBMAP 0x4610 +#define FBIOBLANK 0x4611 /* arg: 0 or vesa level + 1 */ +#define FBIOGET_VBLANK _IOR('F', 0x12, struct fb_vblank) +#define FBIO_ALLOC 0x4613 +#define FBIO_FREE 0x4614 +#define FBIOGET_GLYPH 0x4615 +#define FBIOGET_HWCINFO 0x4616 +#define FBIOPUT_MODEINFO 0x4617 +#define FBIOGET_DISPINFO 0x4618 +#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) + +#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ +#define FB_TYPE_PLANES 1 /* Non interleaved planes */ +#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */ +#define FB_TYPE_TEXT 3 /* Text/attributes */ +#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */ +#define FB_TYPE_FOURCC 5 /* Type identified by a V4L2 FOURCC */ + +#define FB_AUX_TEXT_MDA 0 /* Monochrome text */ +#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */ +#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */ +#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */ +#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */ +#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */ +#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */ +#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */ +#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */ +#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */ +#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */ +#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */ + +#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */ +#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */ +#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */ + +#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ +#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ +#define FB_VISUAL_TRUECOLOR 2 /* True color */ +#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ +#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ +#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ +#define FB_VISUAL_FOURCC 6 /* Visual identified by a V4L2 FOURCC */ + +#define FB_ACCEL_NONE 0 /* no hardware accelerator */ +#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */ +#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */ +#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */ +#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */ +#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */ +#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */ +#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */ +#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */ +#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */ +#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */ +#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */ +#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */ +#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */ +#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */ +#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */ +#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */ +#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */ +#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */ +#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */ +#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */ +#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */ +#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */ +#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */ +#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */ +#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */ +#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */ +#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */ +#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */ +#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */ +#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */ +#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */ +#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */ +#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */ +#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */ +#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */ +#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */ +#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */ +#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */ +#define FB_ACCEL_I810 39 /* Intel 810/815 */ +#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */ +#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */ +#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */ +#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */ +#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ +#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ +#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ +#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ +#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ +#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ +#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ +#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ +#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ +#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ +#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ +#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ +#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ +#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ +#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */ +#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */ +#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */ +#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ +#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ +#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ +#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ + +#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ +#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ +#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */ +#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */ +#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */ +#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */ +#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */ +#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */ +#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */ +#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */ +#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */ +#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */ +#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */ +#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */ +#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */ + +#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */ + +#define FB_CAP_FOURCC 1 /* Device supports FOURCC-based formats */ + +struct fb_fix_screeninfo { + char id[16]; /* identification string eg "TT Builtin" */ + unsigned long smem_start; /* Start of frame buffer mem */ + /* (physical address) */ + __u32 smem_len; /* Length of frame buffer mem */ + __u32 type; /* see FB_TYPE_* */ + __u32 type_aux; /* Interleave for interleaved Planes */ + __u32 visual; /* see FB_VISUAL_* */ + __u16 xpanstep; /* zero if no hardware panning */ + __u16 ypanstep; /* zero if no hardware panning */ + __u16 ywrapstep; /* zero if no hardware ywrap */ + __u32 line_length; /* length of a line in bytes */ + unsigned long mmio_start; /* Start of Memory Mapped I/O */ + /* (physical address) */ + __u32 mmio_len; /* Length of Memory Mapped I/O */ + __u32 accel; /* Indicate to driver which */ + /* specific chip/card we have */ + __u16 capabilities; /* see FB_CAP_* */ + __u16 reserved[2]; /* Reserved for future compatibility */ +}; + +/* Interpretation of offset for color fields: All offsets are from the right, + * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you + * can use the offset as right argument to <<). A pixel afterwards is a bit + * stream and is written to video memory as that unmodified. + * + * For pseudocolor: offset and length should be the same for all color + * components. Offset specifies the position of the least significant bit + * of the pallette index in a pixel value. Length indicates the number + * of available palette entries (i.e. # of entries = 1 << length). + */ +struct fb_bitfield { + __u32 offset; /* beginning of bitfield */ + __u32 length; /* length of bitfield */ + __u32 msb_right; /* != 0 : Most significant bit is */ + /* right */ +}; + +#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ +#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ + +#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ +#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ +#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ +#define FB_ACTIVATE_MASK 15 + /* values */ +#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ +#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ +#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ +#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ +#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ + +#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ + +#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ +#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ +#define FB_SYNC_EXT 4 /* external sync */ +#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ +#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ + /* vtotal = 144d/288n/576i => PAL */ + /* vtotal = 121d/242n/484i => NTSC */ +#define FB_SYNC_ON_GREEN 32 /* sync on green */ + +#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ +#define FB_VMODE_INTERLACED 1 /* interlaced */ +#define FB_VMODE_DOUBLE 2 /* double scan */ +#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ +#define FB_VMODE_MASK 255 + +#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ +#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ +#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ + +/* + * Display rotation support + */ +#define FB_ROTATE_UR 0 +#define FB_ROTATE_CW 1 +#define FB_ROTATE_UD 2 +#define FB_ROTATE_CCW 3 + +#define PICOS2KHZ(a) (1000000000UL/(a)) +#define KHZ2PICOS(a) (1000000000UL/(a)) + +struct fb_var_screeninfo { + __u32 xres; /* visible resolution */ + __u32 yres; + __u32 xres_virtual; /* virtual resolution */ + __u32 yres_virtual; + __u32 xoffset; /* offset from virtual to visible */ + __u32 yoffset; /* resolution */ + + __u32 bits_per_pixel; /* guess what */ + __u32 grayscale; /* 0 = color, 1 = grayscale, */ + /* >1 = FOURCC */ + struct fb_bitfield red; /* bitfield in fb mem if true color, */ + struct fb_bitfield green; /* else only length is significant */ + struct fb_bitfield blue; + struct fb_bitfield transp; /* transparency */ + + __u32 nonstd; /* != 0 Non standard pixel format */ + + __u32 activate; /* see FB_ACTIVATE_* */ + + __u32 height; /* height of picture in mm */ + __u32 width; /* width of picture in mm */ + + __u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ + + /* Timing: All values in pixclocks, except pixclock (of course) */ + __u32 pixclock; /* pixel clock in ps (pico seconds) */ + __u32 left_margin; /* time from sync to picture */ + __u32 right_margin; /* time from picture to sync */ + __u32 upper_margin; /* time from sync to picture */ + __u32 lower_margin; + __u32 hsync_len; /* length of horizontal sync */ + __u32 vsync_len; /* length of vertical sync */ + __u32 sync; /* see FB_SYNC_* */ + __u32 vmode; /* see FB_VMODE_* */ + __u32 rotate; /* angle we rotate counter clockwise */ + __u32 colorspace; /* colorspace for FOURCC-based modes */ + __u32 reserved[4]; /* Reserved for future compatibility */ +}; + +struct fb_cmap { + __u32 start; /* First entry */ + __u32 len; /* Number of entries */ + __u16 *red; /* Red values */ + __u16 *green; + __u16 *blue; + __u16 *transp; /* transparency, can be NULL */ +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +/* VESA Blanking Levels */ +#define VESA_NO_BLANKING 0 +#define VESA_VSYNC_SUSPEND 1 +#define VESA_HSYNC_SUSPEND 2 +#define VESA_POWERDOWN 3 + + +enum { + /* screen: unblanked, hsync: on, vsync: on */ + FB_BLANK_UNBLANK = VESA_NO_BLANKING, + + /* screen: blanked, hsync: on, vsync: on */ + FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, + + /* screen: blanked, hsync: on, vsync: off */ + FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, + + /* screen: blanked, hsync: off, vsync: on */ + FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, + + /* screen: blanked, hsync: off, vsync: off */ + FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 +}; + +#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ +#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ +#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ +#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ +#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ +#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ +#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ +#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ +#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ + +struct fb_vblank { + __u32 flags; /* FB_VBLANK flags */ + __u32 count; /* counter of retraces since boot */ + __u32 vcount; /* current scanline position */ + __u32 hcount; /* current scandot position */ + __u32 reserved[4]; /* reserved for future compatibility */ +}; + +/* Internal HW accel */ +#define ROP_COPY 0 +#define ROP_XOR 1 + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; /* screen-relative */ + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; /* Where to place image */ + __u32 dy; + __u32 width; /* Size of image */ + __u32 height; + __u32 fg_color; /* Only used when a mono bitmap */ + __u32 bg_color; + __u8 depth; /* Depth of the image */ + const char *data; /* Pointer to image data */ + struct fb_cmap cmap; /* color map info */ +}; + +/* + * hardware cursor control + */ + +#define FB_CUR_SETIMAGE 0x01 +#define FB_CUR_SETPOS 0x02 +#define FB_CUR_SETHOT 0x04 +#define FB_CUR_SETCMAP 0x08 +#define FB_CUR_SETSHAPE 0x10 +#define FB_CUR_SETSIZE 0x20 +#define FB_CUR_SETALL 0xFF + +struct fbcurpos { + __u16 x, y; +}; + +struct fb_cursor { + __u16 set; /* what to set */ + __u16 enable; /* cursor on/off */ + __u16 rop; /* bitop operation */ + const char *mask; /* cursor mask bits */ + struct fbcurpos hot; /* cursor hot spot */ + struct fb_image image; /* Cursor image */ +}; + +#ifdef CONFIG_FB_BACKLIGHT +/* Settings for the generic backlight code */ +#define FB_BACKLIGHT_LEVELS 128 +#define FB_BACKLIGHT_MAX 0xFF +#endif + +#ifdef __KERNEL__ + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/workqueue.h> +#include <linux/notifier.h> +#include <linux/list.h> +#include <linux/backlight.h> +#include <linux/slab.h> +#include <asm/io.h> + +struct vm_area_struct; +struct fb_info; +struct device; +struct file; + +/* Definitions below are used in the parsed monitor specs */ +#define FB_DPMS_ACTIVE_OFF 1 +#define FB_DPMS_SUSPEND 2 +#define FB_DPMS_STANDBY 4 + +#define FB_DISP_DDI 1 +#define FB_DISP_ANA_700_300 2 +#define FB_DISP_ANA_714_286 4 +#define FB_DISP_ANA_1000_400 8 +#define FB_DISP_ANA_700_000 16 + +#define FB_DISP_MONO 32 +#define FB_DISP_RGB 64 +#define FB_DISP_MULTI 128 +#define FB_DISP_UNKNOWN 256 + +#define FB_SIGNAL_NONE 0 +#define FB_SIGNAL_BLANK_BLANK 1 +#define FB_SIGNAL_SEPARATE 2 +#define FB_SIGNAL_COMPOSITE 4 +#define FB_SIGNAL_SYNC_ON_GREEN 8 +#define FB_SIGNAL_SERRATION_ON 16 + +#define FB_MISC_PRIM_COLOR 1 +#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ +struct fb_chroma { + __u32 redx; /* in fraction of 1024 */ + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; /* mode database */ + __u8 manufacturer[4]; /* Manufacturer */ + __u8 monitor[14]; /* Monitor String */ + __u8 serial_no[14]; /* Serial Number */ + __u8 ascii[14]; /* ? */ + __u32 modedb_len; /* mode database length */ + __u32 model; /* Monitor Model */ + __u32 serial; /* Serial Number - Integer */ + __u32 year; /* Year manufactured */ + __u32 week; /* Week Manufactured */ + __u32 hfmin; /* hfreq lower limit (Hz) */ + __u32 hfmax; /* hfreq upper limit (Hz) */ + __u32 dclkmin; /* pixelclock lower limit (Hz) */ + __u32 dclkmax; /* pixelclock upper limit (Hz) */ + __u16 input; /* display type - see FB_DISP_* */ + __u16 dpms; /* DPMS support - see FB_DPMS_ */ + __u16 signal; /* Signal Type - see FB_SIGNAL_* */ + __u16 vfmin; /* vfreq lower limit (Hz) */ + __u16 vfmax; /* vfreq upper limit (Hz) */ + __u16 gamma; /* Gamma - in fractions of 100 */ + __u16 gtf : 1; /* supports GTF */ + __u16 misc; /* Misc flags - see FB_MISC_* */ + __u8 version; /* EDID version... */ + __u8 revision; /* ...and revision */ + __u8 max_x; /* Maximum horizontal size (cm) */ + __u8 max_y; /* Maximum vertical size (cm) */ +}; + +struct fb_cmap_user { + __u32 start; /* First entry */ + __u32 len; /* Number of entries */ + __u16 __user *red; /* Red values */ + __u16 __user *green; + __u16 __user *blue; + __u16 __user *transp; /* transparency, can be NULL */ +}; + +struct fb_image_user { + __u32 dx; /* Where to place image */ + __u32 dy; + __u32 width; /* Size of image */ + __u32 height; + __u32 fg_color; /* Only used when a mono bitmap */ + __u32 bg_color; + __u8 depth; /* Depth of the image */ + const char __user *data; /* Pointer to image data */ + struct fb_cmap_user cmap; /* color map info */ +}; + +struct fb_cursor_user { + __u16 set; /* what to set */ + __u16 enable; /* cursor on/off */ + __u16 rop; /* bitop operation */ + const char __user *mask; /* cursor mask bits */ + struct fbcurpos hot; /* cursor hot spot */ + struct fb_image_user image; /* Cursor image */ +}; + +/* + * Register/unregister for framebuffer events + */ + +/* The resolution of the passed in fb_info about to change */ +#define FB_EVENT_MODE_CHANGE 0x01 +/* The display on this fb_info is beeing suspended, no access to the + * framebuffer is allowed any more after that call returns + */ +#define FB_EVENT_SUSPEND 0x02 +/* The display on this fb_info was resumed, you can restore the display + * if you own it + */ +#define FB_EVENT_RESUME 0x03 +/* An entry from the modelist was removed */ +#define FB_EVENT_MODE_DELETE 0x04 +/* A driver registered itself */ +#define FB_EVENT_FB_REGISTERED 0x05 +/* A driver unregistered itself */ +#define FB_EVENT_FB_UNREGISTERED 0x06 +/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ +#define FB_EVENT_GET_CONSOLE_MAP 0x07 +/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ +#define FB_EVENT_SET_CONSOLE_MAP 0x08 +/* A hardware display blank change occurred */ +#define FB_EVENT_BLANK 0x09 +/* Private modelist is to be replaced */ +#define FB_EVENT_NEW_MODELIST 0x0A +/* The resolution of the passed in fb_info about to change and + all vc's should be changed */ +#define FB_EVENT_MODE_CHANGE_ALL 0x0B +/* A software display blank change occurred */ +#define FB_EVENT_CONBLANK 0x0C +/* Get drawing requirements */ +#define FB_EVENT_GET_REQ 0x0D +/* Unbind from the console if possible */ +#define FB_EVENT_FB_UNBIND 0x0E +/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */ +#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; + +extern int fb_register_client(struct notifier_block *nb); +extern int fb_unregister_client(struct notifier_block *nb); +extern int fb_notifier_call_chain(unsigned long val, void *v); +/* + * Pixmap structure definition + * + * The purpose of this structure is to translate data + * from the hardware independent format of fbdev to what + * format the hardware needs. + */ + +#define FB_PIXMAP_DEFAULT 1 /* used internally by fbcon */ +#define FB_PIXMAP_SYSTEM 2 /* memory is in system RAM */ +#define FB_PIXMAP_IO 4 /* memory is iomapped */ +#define FB_PIXMAP_SYNC 256 /* set if GPU can DMA */ + +struct fb_pixmap { + u8 *addr; /* pointer to memory */ + u32 size; /* size of buffer in bytes */ + u32 offset; /* current offset to buffer */ + u32 buf_align; /* byte alignment of each bitmap */ + u32 scan_align; /* alignment per scanline */ + u32 access_align; /* alignment per read/write (bits) */ + u32 flags; /* see FB_PIXMAP_* */ + u32 blit_x; /* supported bit block dimensions (1-32)*/ + u32 blit_y; /* Format: blit_x = 1 << (width - 1) */ + /* blit_y = 1 << (height - 1) */ + /* if 0, will be set to 0xffffffff (all)*/ + /* access methods */ + void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); + void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); +}; + +#ifdef CONFIG_FB_DEFERRED_IO +struct fb_deferred_io { + /* delay between mkwrite and deferred handler */ + unsigned long delay; + struct mutex lock; /* mutex that protects the page list */ + struct list_head pagelist; /* list of touched pages */ + /* callback */ + void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); +}; +#endif + +/* + * Frame buffer operations + * + * LOCKING NOTE: those functions must _ALL_ be called with the console + * semaphore held, this is the only suitable locking mechanism we have + * in 2.6. Some may be called at interrupt time at this point though. + * + * The exception to this is the debug related hooks. Putting the fb + * into a debug state (e.g. flipping to the kernel console) and restoring + * it must be done in a lock-free manner, so low level drivers should + * keep track of the initial console (if applicable) and may need to + * perform direct, unlocked hardware writes in these hooks. + */ + +struct fb_ops { + /* open/release and usage marking */ + struct module *owner; + int (*fb_open)(struct fb_info *info, int user); + int (*fb_release)(struct fb_info *info, int user); + + /* For framebuffers with strange non linear layouts or that do not + * work with normal memory mapped access + */ + ssize_t (*fb_read)(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*fb_write)(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + + /* checks var and eventually tweaks it to something supported, + * DO NOT MODIFY PAR */ + int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info); + + /* set the video mode according to info->var */ + int (*fb_set_par)(struct fb_info *info); + + /* set color register */ + int (*fb_setcolreg)(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, struct fb_info *info); + + /* set color registers in batch */ + int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); + + /* blank display */ + int (*fb_blank)(int blank, struct fb_info *info); + + /* pan display */ + int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); + + /* Draws a rectangle */ + void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); + /* Copy data from area to another */ + void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); + /* Draws a image to the display */ + void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); + + /* Draws cursor */ + int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); + + /* Rotates the display */ + void (*fb_rotate)(struct fb_info *info, int angle); + + /* wait for blit idle, optional */ + int (*fb_sync)(struct fb_info *info); + + /* perform fb specific ioctl (optional) */ + int (*fb_ioctl)(struct fb_info *info, unsigned int cmd, + unsigned long arg); + + /* Handle 32bit compat ioctl (optional) */ + int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd, + unsigned long arg); + + /* perform fb specific mmap */ + int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); + + /* get capability given var */ + void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var); + + /* teardown any resources to do with this framebuffer */ + void (*fb_destroy)(struct fb_info *info); + + /* called at KDB enter and leave time to prepare the console */ + int (*fb_debug_enter)(struct fb_info *info); + int (*fb_debug_leave)(struct fb_info *info); +}; + +#ifdef CONFIG_FB_TILEBLITTING +#define FB_TILE_CURSOR_NONE 0 +#define FB_TILE_CURSOR_UNDERLINE 1 +#define FB_TILE_CURSOR_LOWER_THIRD 2 +#define FB_TILE_CURSOR_LOWER_HALF 3 +#define FB_TILE_CURSOR_TWO_THIRDS 4 +#define FB_TILE_CURSOR_BLOCK 5 + +struct fb_tilemap { + __u32 width; /* width of each tile in pixels */ + __u32 height; /* height of each tile in scanlines */ + __u32 depth; /* color depth of each tile */ + __u32 length; /* number of tiles in the map */ + const __u8 *data; /* actual tile map: a bitmap array, packed + to the nearest byte */ +}; + +struct fb_tilerect { + __u32 sx; /* origin in the x-axis */ + __u32 sy; /* origin in the y-axis */ + __u32 width; /* number of tiles in the x-axis */ + __u32 height; /* number of tiles in the y-axis */ + __u32 index; /* what tile to use: index to tile map */ + __u32 fg; /* foreground color */ + __u32 bg; /* background color */ + __u32 rop; /* raster operation */ +}; + +struct fb_tilearea { + __u32 sx; /* source origin in the x-axis */ + __u32 sy; /* source origin in the y-axis */ + __u32 dx; /* destination origin in the x-axis */ + __u32 dy; /* destination origin in the y-axis */ + __u32 width; /* number of tiles in the x-axis */ + __u32 height; /* number of tiles in the y-axis */ +}; + +struct fb_tileblit { + __u32 sx; /* origin in the x-axis */ + __u32 sy; /* origin in the y-axis */ + __u32 width; /* number of tiles in the x-axis */ + __u32 height; /* number of tiles in the y-axis */ + __u32 fg; /* foreground color */ + __u32 bg; /* background color */ + __u32 length; /* number of tiles to draw */ + __u32 *indices; /* array of indices to tile map */ +}; + +struct fb_tilecursor { + __u32 sx; /* cursor position in the x-axis */ + __u32 sy; /* cursor position in the y-axis */ + __u32 mode; /* 0 = erase, 1 = draw */ + __u32 shape; /* see FB_TILE_CURSOR_* */ + __u32 fg; /* foreground color */ + __u32 bg; /* background color */ +}; + +struct fb_tile_ops { + /* set tile characteristics */ + void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map); + + /* all dimensions from hereon are in terms of tiles */ + + /* move a rectangular region of tiles from one area to another*/ + void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area); + /* fill a rectangular region with a tile */ + void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect); + /* copy an array of tiles */ + void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit); + /* cursor */ + void (*fb_tilecursor)(struct fb_info *info, + struct fb_tilecursor *cursor); + /* get maximum length of the tile map */ + int (*fb_get_tilemax)(struct fb_info *info); +}; +#endif /* CONFIG_FB_TILEBLITTING */ + +/* FBINFO_* = fb_info.flags bit flags */ +#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */ +#define FBINFO_HWACCEL_DISABLED 0x0002 + /* When FBINFO_HWACCEL_DISABLED is set: + * Hardware acceleration is turned off. Software implementations + * of required functions (copyarea(), fillrect(), and imageblit()) + * takes over; acceleration engine should be in a quiescent state */ + +/* hints */ +#define FBINFO_VIRTFB 0x0004 /* FB is System RAM, not device. */ +#define FBINFO_PARTIAL_PAN_OK 0x0040 /* otw use pan only for double-buffering */ +#define FBINFO_READS_FAST 0x0080 /* soft-copy faster than rendering */ + +/* hardware supported ops */ +/* semantics: when a bit is set, it indicates that the operation is + * accelerated by hardware. + * required functions will still work even if the bit is not set. + * optional functions may not even exist if the flag bit is not set. + */ +#define FBINFO_HWACCEL_NONE 0x0000 +#define FBINFO_HWACCEL_COPYAREA 0x0100 /* required */ +#define FBINFO_HWACCEL_FILLRECT 0x0200 /* required */ +#define FBINFO_HWACCEL_IMAGEBLIT 0x0400 /* required */ +#define FBINFO_HWACCEL_ROTATE 0x0800 /* optional */ +#define FBINFO_HWACCEL_XPAN 0x1000 /* optional */ +#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ +#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ + +#define FBINFO_MISC_USEREVENT 0x10000 /* event request + from userspace */ +#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ + +/* A driver may set this flag to indicate that it does want a set_par to be + * called every time when fbcon_switch is executed. The advantage is that with + * this flag set you can really be sure that set_par is always called before + * any of the functions dependent on the correct hardware state or altering + * that state, even if you are using some broken X releases. The disadvantage + * is that it introduces unwanted delays to every console switch if set_par + * is slow. It is a good idea to try this flag in the drivers initialization + * code whenever there is a bug report related to switching between X and the + * framebuffer console. + */ +#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 + +/* where the fb is a firmware driver, and can be replaced with a proper one */ +#define FBINFO_MISC_FIRMWARE 0x80000 +/* + * Host and GPU endianness differ. + */ +#define FBINFO_FOREIGN_ENDIAN 0x100000 +/* + * Big endian math. This is the same flags as above, but with different + * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag + * and host endianness. Drivers should not use this flag. + */ +#define FBINFO_BE_MATH 0x100000 + +/* report to the VT layer that this fb driver can accept forced console + output like oopses */ +#define FBINFO_CAN_FORCE_OUTPUT 0x200000 + +struct fb_info { + atomic_t count; + int node; + int flags; + struct mutex lock; /* Lock for open/release/ioctl funcs */ + struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ + struct fb_var_screeninfo var; /* Current var */ + struct fb_fix_screeninfo fix; /* Current fix */ + struct fb_monspecs monspecs; /* Current Monitor specs */ + struct work_struct queue; /* Framebuffer event queue */ + struct fb_pixmap pixmap; /* Image hardware mapper */ + struct fb_pixmap sprite; /* Cursor hardware mapper */ + struct fb_cmap cmap; /* Current cmap */ + struct list_head modelist; /* mode list */ + struct fb_videomode *mode; /* current mode */ + +#ifdef CONFIG_FB_BACKLIGHT + /* assigned backlight device */ + /* set before framebuffer registration, + remove after unregister */ + struct backlight_device *bl_dev; + + /* Backlight level curve */ + struct mutex bl_curve_mutex; + u8 bl_curve[FB_BACKLIGHT_LEVELS]; +#endif +#ifdef CONFIG_FB_DEFERRED_IO + struct delayed_work deferred_work; + struct fb_deferred_io *fbdefio; +#endif + + struct fb_ops *fbops; + struct device *device; /* This is the parent */ + struct device *dev; /* This is this fb device */ + int class_flag; /* private sysfs flags */ +#ifdef CONFIG_FB_TILEBLITTING + struct fb_tile_ops *tileops; /* Tile Blitting */ +#endif + char __iomem *screen_base; /* Virtual address */ + unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ + void *pseudo_palette; /* Fake palette of 16 colors */ +#define FBINFO_STATE_RUNNING 0 +#define FBINFO_STATE_SUSPENDED 1 + u32 state; /* Hardware state i.e suspend */ + void *fbcon_par; /* fbcon use-only private area */ + /* From here on everything is device dependent */ + void *par; + /* we need the PCI or similar aperture base/size not + smem_start/size as smem_start may just be an object + allocated inside the aperture so may not actually overlap */ + struct apertures_struct { + unsigned int count; + struct aperture { + resource_size_t base; + resource_size_t size; + } ranges[0]; + } *apertures; +}; + +static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { + struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) + + max_num * sizeof(struct aperture), GFP_KERNEL); + if (!a) + return NULL; + a->count = max_num; + return a; +} + +#ifdef MODULE +#define FBINFO_DEFAULT FBINFO_MODULE +#else +#define FBINFO_DEFAULT 0 +#endif + +// This will go away +#define FBINFO_FLAG_MODULE FBINFO_MODULE +#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT + +/* This will go away + * fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags + * when it wants to turn the acceleration engine on. This is + * really a separate operation, and should be modified via sysfs. + * But for now, we leave it broken with the following define + */ +#define STUPID_ACCELF_TEXT_SHIT + +// This will go away +#if defined(__sparc__) + +/* We map all of our framebuffers such that big-endian accesses + * are what we want, so the following is sufficient. + */ + +// This will go away +#define fb_readb sbus_readb +#define fb_readw sbus_readw +#define fb_readl sbus_readl +#define fb_readq sbus_readq +#define fb_writeb sbus_writeb +#define fb_writew sbus_writew +#define fb_writel sbus_writel +#define fb_writeq sbus_writeq +#define fb_memset sbus_memset_io +#define fb_memcpy_fromfb sbus_memcpy_fromio +#define fb_memcpy_tofb sbus_memcpy_toio + +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) + +#define fb_readb __raw_readb +#define fb_readw __raw_readw +#define fb_readl __raw_readl +#define fb_readq __raw_readq +#define fb_writeb __raw_writeb +#define fb_writew __raw_writew +#define fb_writel __raw_writel +#define fb_writeq __raw_writeq +#define fb_memset memset_io +#define fb_memcpy_fromfb memcpy_fromio +#define fb_memcpy_tofb memcpy_toio + +#else + +#define fb_readb(addr) (*(volatile u8 *) (addr)) +#define fb_readw(addr) (*(volatile u16 *) (addr)) +#define fb_readl(addr) (*(volatile u32 *) (addr)) +#define fb_readq(addr) (*(volatile u64 *) (addr)) +#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b)) +#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b)) +#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b)) +#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b)) +#define fb_memset memset +#define fb_memcpy_fromfb memcpy +#define fb_memcpy_tofb memcpy + +#endif + +#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0) +#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \ + (val) << (bits)) +#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \ + (val) >> (bits)) + + /* + * `Generic' versions of the frame buffer device operations + */ + +extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_blank(struct fb_info *info, int blank); +extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); +/* + * Drawing operations where framebuffer is in system RAM + */ +extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); +extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + +/* drivers/video/fbmem.c */ +extern int register_framebuffer(struct fb_info *fb_info); +extern int unregister_framebuffer(struct fb_info *fb_info); +extern int unlink_framebuffer(struct fb_info *fb_info); +extern void remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary); +extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); +extern int fb_show_logo(struct fb_info *fb_info, int rotate); +extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); +extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, + u32 height, u32 shift_high, u32 shift_low, u32 mod); +extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); +extern void fb_set_suspend(struct fb_info *info, int state); +extern int fb_get_color_depth(struct fb_var_screeninfo *var, + struct fb_fix_screeninfo *fix); +extern int fb_get_options(char *name, char **option); +extern int fb_new_modelist(struct fb_info *info); + +extern struct fb_info *registered_fb[FB_MAX]; +extern int num_registered_fb; +extern struct class *fb_class; + +extern int lock_fb_info(struct fb_info *info); + +static inline void unlock_fb_info(struct fb_info *info) +{ + mutex_unlock(&info->lock); +} + +static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, + u8 *src, u32 s_pitch, u32 height) +{ + int i, j; + + d_pitch -= s_pitch; + + for (i = height; i--; ) { + /* s_pitch is a few bytes at the most, memcpy is suboptimal */ + for (j = 0; j < s_pitch; j++) + *dst++ = *src++; + dst += d_pitch; + } +} + +/* drivers/video/fb_defio.c */ +extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file); +extern void fb_deferred_io_cleanup(struct fb_info *info); +extern int fb_deferred_io_fsync(struct file *file, loff_t start, + loff_t end, int datasync); + +static inline bool fb_be_math(struct fb_info *info) +{ +#ifdef CONFIG_FB_FOREIGN_ENDIAN +#if defined(CONFIG_FB_BOTH_ENDIAN) + return info->flags & FBINFO_BE_MATH; +#elif defined(CONFIG_FB_BIG_ENDIAN) + return true; +#elif defined(CONFIG_FB_LITTLE_ENDIAN) + return false; +#endif /* CONFIG_FB_BOTH_ENDIAN */ +#else +#ifdef __BIG_ENDIAN + return true; +#else + return false; +#endif /* __BIG_ENDIAN */ +#endif /* CONFIG_FB_FOREIGN_ENDIAN */ +} + +/* drivers/video/fbsysfs.c */ +extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); +extern void framebuffer_release(struct fb_info *info); +extern int fb_init_device(struct fb_info *fb_info); +extern void fb_cleanup_device(struct fb_info *head); +extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max); + +/* drivers/video/fbmon.c */ +#define FB_MAXTIMINGS 0 +#define FB_VSYNCTIMINGS 1 +#define FB_HSYNCTIMINGS 2 +#define FB_DCLKTIMINGS 3 +#define FB_IGNOREMON 0x100 + +#define FB_MODE_IS_UNKNOWN 0 +#define FB_MODE_IS_DETAILED 1 +#define FB_MODE_IS_STANDARD 2 +#define FB_MODE_IS_VESA 4 +#define FB_MODE_IS_CALCULATED 8 +#define FB_MODE_IS_FIRST 16 +#define FB_MODE_IS_FROM_VAR 32 + +extern int fbmon_dpms(const struct fb_info *fb_info); +extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, + struct fb_info *info); +extern int fb_validate_mode(const struct fb_var_screeninfo *var, + struct fb_info *info); +extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); +extern const unsigned char *fb_firmware_edid(struct device *device); +extern void fb_edid_to_monspecs(unsigned char *edid, + struct fb_monspecs *specs); +extern void fb_edid_add_monspecs(unsigned char *edid, + struct fb_monspecs *specs); +extern void fb_destroy_modedb(struct fb_videomode *modedb); +extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); +extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); + +/* drivers/video/modedb.c */ +#define VESA_MODEDB_SIZE 34 +extern void fb_var_to_videomode(struct fb_videomode *mode, + const struct fb_var_screeninfo *var); +extern void fb_videomode_to_var(struct fb_var_screeninfo *var, + const struct fb_videomode *mode); +extern int fb_mode_is_equal(const struct fb_videomode *mode1, + const struct fb_videomode *mode2); +extern int fb_add_videomode(const struct fb_videomode *mode, + struct list_head *head); +extern void fb_delete_videomode(const struct fb_videomode *mode, + struct list_head *head); +extern const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var, + struct list_head *head); +extern const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var, + struct list_head *head); +extern const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, + struct list_head *head); +extern void fb_destroy_modelist(struct list_head *head); +extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num, + struct list_head *head); +extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs, + struct list_head *head); + +/* drivers/video/fbcmap.c */ +extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); +extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags); +extern void fb_dealloc_cmap(struct fb_cmap *cmap); +extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); +extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); +extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info); +extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info); +extern const struct fb_cmap *fb_default_cmap(int len); +extern void fb_invert_cmaps(void); + +struct fb_videomode { + const char *name; /* optional */ + u32 refresh; /* optional */ + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +extern const char *fb_mode_option; +extern const struct fb_videomode vesa_modes[]; +extern const struct fb_videomode cea_modes[64]; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +extern int fb_find_mode(struct fb_var_screeninfo *var, + struct fb_info *info, const char *mode_option, + const struct fb_videomode *db, + unsigned int dbsize, + const struct fb_videomode *default_mode, + unsigned int default_bpp); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_FB_H */ diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h new file mode 100644 index 00000000..e460ef83 --- /dev/null +++ b/include/linux/fcdevice.h @@ -0,0 +1,33 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Fibre Channel handlers. + * + * Version: @(#)fcdevice.h 1.0.0 09/26/98 + * + * Authors: Vineet Abraham <vma@iol.unh.edu> + * + * Relocated to include/linux where it belongs by Alan Cox + * <gw4pts@gw4pts.ampr.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * WARNING: This move may well be temporary. This file will get merged with others RSN. + * + */ +#ifndef _LINUX_FCDEVICE_H +#define _LINUX_FCDEVICE_H + + +#include <linux/if_fc.h> + +#ifdef __KERNEL__ +extern struct net_device *alloc_fcdev(int sizeof_priv); +#endif + +#endif /* _LINUX_FCDEVICE_H */ diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h new file mode 100644 index 00000000..f550f894 --- /dev/null +++ b/include/linux/fcntl.h @@ -0,0 +1,79 @@ +#ifndef _LINUX_FCNTL_H +#define _LINUX_FCNTL_H + +#include <asm/fcntl.h> + +#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0) +#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1) + +/* + * Cancel a blocking posix lock; internal use only until we expose an + * asynchronous lock api to userspace: + */ +#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5) + +/* Create a file descriptor with FD_CLOEXEC set. */ +#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6) + +/* + * Request nofications on a directory. + * See below for events that may be notified. + */ +#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2) + +/* + * Set and get of pipe page size array + */ +#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7) +#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8) + +/* + * Types of directory notifications that may be requested. + */ +#define DN_ACCESS 0x00000001 /* File accessed */ +#define DN_MODIFY 0x00000002 /* File modified */ +#define DN_CREATE 0x00000004 /* File created */ +#define DN_DELETE 0x00000008 /* File removed */ +#define DN_RENAME 0x00000010 /* File renamed */ +#define DN_ATTRIB 0x00000020 /* File changed attibutes */ +#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */ + +#define AT_FDCWD -100 /* Special value used to indicate + openat should use the current + working directory. */ +#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */ +#define AT_REMOVEDIR 0x200 /* Remove directory instead of + unlinking file. */ +#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */ +#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ +#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ + +#ifdef __KERNEL__ + +#ifndef force_o_largefile +#define force_o_largefile() (BITS_PER_LONG != 32) +#endif + +#if BITS_PER_LONG == 32 +#define IS_GETLK32(cmd) ((cmd) == F_GETLK) +#define IS_SETLK32(cmd) ((cmd) == F_SETLK) +#define IS_SETLKW32(cmd) ((cmd) == F_SETLKW) +#define IS_GETLK64(cmd) ((cmd) == F_GETLK64) +#define IS_SETLK64(cmd) ((cmd) == F_SETLK64) +#define IS_SETLKW64(cmd) ((cmd) == F_SETLKW64) +#else +#define IS_GETLK32(cmd) (0) +#define IS_SETLK32(cmd) (0) +#define IS_SETLKW32(cmd) (0) +#define IS_GETLK64(cmd) ((cmd) == F_GETLK) +#define IS_SETLK64(cmd) ((cmd) == F_SETLK) +#define IS_SETLKW64(cmd) ((cmd) == F_SETLKW) +#endif /* BITS_PER_LONG == 32 */ + +#define IS_GETLK(cmd) (IS_GETLK32(cmd) || IS_GETLK64(cmd)) +#define IS_SETLK(cmd) (IS_SETLK32(cmd) || IS_SETLK64(cmd)) +#define IS_SETLKW(cmd) (IS_SETLKW32(cmd) || IS_SETLKW64(cmd)) + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/fd.h b/include/linux/fd.h new file mode 100644 index 00000000..72202b1b --- /dev/null +++ b/include/linux/fd.h @@ -0,0 +1,402 @@ +#ifndef _LINUX_FD_H +#define _LINUX_FD_H + +#include <linux/ioctl.h> +#include <linux/compiler.h> + +/* New file layout: Now the ioctl definitions immediately follow the + * definitions of the structures that they use */ + +/* + * Geometry + */ +struct floppy_struct { + unsigned int size, /* nr of sectors total */ + sect, /* sectors per track */ + head, /* nr of heads */ + track, /* nr of tracks */ + stretch; /* bit 0 !=0 means double track steps */ + /* bit 1 != 0 means swap sides */ + /* bits 2..9 give the first sector */ + /* number (the LSB is flipped) */ +#define FD_STRETCH 1 +#define FD_SWAPSIDES 2 +#define FD_ZEROBASED 4 +#define FD_SECTBASEMASK 0x3FC +#define FD_MKSECTBASE(s) (((s) ^ 1) << 2) +#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1) + + unsigned char gap, /* gap1 size */ + + rate, /* data rate. |= 0x40 for perpendicular */ +#define FD_2M 0x4 +#define FD_SIZECODEMASK 0x38 +#define FD_SIZECODE(floppy) (((((floppy)->rate&FD_SIZECODEMASK)>> 3)+ 2) %8) +#define FD_SECTSIZE(floppy) ( (floppy)->rate & FD_2M ? \ + 512 : 128 << FD_SIZECODE(floppy) ) +#define FD_PERP 0x40 + + spec1, /* stepping rate, head unload time */ + fmt_gap; /* gap2 size */ + const char * name; /* used only for predefined formats */ +}; + + +/* commands needing write access have 0x40 set */ +/* commands needing super user access have 0x80 set */ + +#define FDCLRPRM _IO(2, 0x41) +/* clear user-defined parameters */ + +#define FDSETPRM _IOW(2, 0x42, struct floppy_struct) +#define FDSETMEDIAPRM FDSETPRM +/* set user-defined parameters for current media */ + +#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct) +#define FDGETPRM _IOR(2, 0x04, struct floppy_struct) +#define FDDEFMEDIAPRM FDDEFPRM +#define FDGETMEDIAPRM FDGETPRM +/* set/get disk parameters */ + + +#define FDMSGON _IO(2,0x45) +#define FDMSGOFF _IO(2,0x46) +/* issue/don't issue kernel messages on media type change */ + + +/* + * Formatting (obsolete) + */ +#define FD_FILL_BYTE 0xF6 /* format fill byte. */ + +struct format_descr { + unsigned int device,head,track; +}; + +#define FDFMTBEG _IO(2,0x47) +/* begin formatting a disk */ +#define FDFMTTRK _IOW(2,0x48, struct format_descr) +/* format the specified track */ +#define FDFMTEND _IO(2,0x49) +/* end formatting a disk */ + + +/* + * Error thresholds + */ +struct floppy_max_errors { + unsigned int + abort, /* number of errors to be reached before aborting */ + read_track, /* maximal number of errors permitted to read an + * entire track at once */ + reset, /* maximal number of errors before a reset is tried */ + recal, /* maximal number of errors before a recalibrate is + * tried */ + + /* + * Threshold for reporting FDC errors to the console. + * Setting this to zero may flood your screen when using + * ultra cheap floppies ;-) + */ + reporting; + +}; + +#define FDSETEMSGTRESH _IO(2,0x4a) +/* set fdc error reporting threshold */ + +#define FDFLUSH _IO(2,0x4b) +/* flush buffers for media; either for verifying media, or for + * handling a media change without closing the file descriptor */ + +#define FDSETMAXERRS _IOW(2, 0x4c, struct floppy_max_errors) +#define FDGETMAXERRS _IOR(2, 0x0e, struct floppy_max_errors) +/* set/get abortion and read_track threshold. See also floppy_drive_params + * structure */ + + +typedef char floppy_drive_name[16]; +#define FDGETDRVTYP _IOR(2, 0x0f, floppy_drive_name) +/* get drive type: 5 1/4 or 3 1/2 */ + + +/* + * Drive parameters (user modifiable) + */ +struct floppy_drive_params { + signed char cmos; /* CMOS type */ + + /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms + * etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA). + */ + unsigned long max_dtr; /* Step rate, usec */ + unsigned long hlt; /* Head load/settle time, msec */ + unsigned long hut; /* Head unload time (remnant of + * 8" drives) */ + unsigned long srt; /* Step rate, usec */ + + unsigned long spinup; /* time needed for spinup (expressed + * in jiffies) */ + unsigned long spindown; /* timeout needed for spindown */ + unsigned char spindown_offset; /* decides in which position the disk + * will stop */ + unsigned char select_delay; /* delay to wait after select */ + unsigned char rps; /* rotations per second */ + unsigned char tracks; /* maximum number of tracks */ + unsigned long timeout; /* timeout for interrupt requests */ + + unsigned char interleave_sect; /* if there are more sectors, use + * interleave */ + + struct floppy_max_errors max_errors; + + char flags; /* various flags, including ftd_msg */ +/* + * Announce successful media type detection and media information loss after + * disk changes. + * Also used to enable/disable printing of overrun warnings. + */ + +#define FTD_MSG 0x10 +#define FD_BROKEN_DCL 0x20 +#define FD_DEBUG 0x02 +#define FD_SILENT_DCL_CLEAR 0x4 +#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware + considerations */ + + char read_track; /* use readtrack during probing? */ + +/* + * Auto-detection. Each drive type has eight formats which are + * used in succession to try to read the disk. If the FDC cannot lock onto + * the disk, the next format is tried. This uses the variable 'probing'. + */ + short autodetect[8]; /* autodetected formats */ + + int checkfreq; /* how often should the drive be checked for disk + * changes */ + int native_format; /* native format of this drive */ +}; + +enum { + FD_NEED_TWADDLE_BIT, /* more magic */ + FD_VERIFY_BIT, /* inquire for write protection */ + FD_DISK_NEWCHANGE_BIT, /* change detected, and no action undertaken yet + * to clear media change status */ + FD_UNUSED_BIT, + FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */ + FD_DISK_WRITABLE_BIT /* disk is writable */ +}; + +#define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params) +#define FDGETDRVPRM _IOR(2, 0x11, struct floppy_drive_params) +/* set/get drive parameters */ + + +/* + * Current drive state (not directly modifiable by user, readonly) + */ +struct floppy_drive_struct { + unsigned long flags; +/* values for these flags */ +#define FD_NEED_TWADDLE (1 << FD_NEED_TWADDLE_BIT) +#define FD_VERIFY (1 << FD_VERIFY_BIT) +#define FD_DISK_NEWCHANGE (1 << FD_DISK_NEWCHANGE_BIT) +#define FD_DISK_CHANGED (1 << FD_DISK_CHANGED_BIT) +#define FD_DISK_WRITABLE (1 << FD_DISK_WRITABLE_BIT) + + unsigned long spinup_date; + unsigned long select_date; + unsigned long first_read_date; + short probed_format; + short track; /* current track */ + short maxblock; /* id of highest block read */ + short maxtrack; /* id of highest half track read */ + int generation; /* how many diskchanges? */ + +/* + * (User-provided) media information is _not_ discarded after a media change + * if the corresponding keep_data flag is non-zero. Positive values are + * decremented after each probe. + */ + int keep_data; + + /* Prevent "aliased" accesses. */ + int fd_ref; + int fd_device; + unsigned long last_checked; /* when was the drive last checked for a disk + * change? */ + + char *dmabuf; + int bufblocks; +}; + +#define FDGETDRVSTAT _IOR(2, 0x12, struct floppy_drive_struct) +#define FDPOLLDRVSTAT _IOR(2, 0x13, struct floppy_drive_struct) +/* get drive state: GET returns the cached state, POLL polls for new state */ + + +/* + * reset FDC + */ +enum reset_mode { + FD_RESET_IF_NEEDED, /* reset only if the reset flags is set */ + FD_RESET_IF_RAWCMD, /* obsolete */ + FD_RESET_ALWAYS /* reset always */ +}; +#define FDRESET _IO(2, 0x54) + + +/* + * FDC state + */ +struct floppy_fdc_state { + int spec1; /* spec1 value last used */ + int spec2; /* spec2 value last used */ + int dtr; + unsigned char version; /* FDC version code */ + unsigned char dor; + unsigned long address; /* io address */ + unsigned int rawcmd:2; + unsigned int reset:1; + unsigned int need_configure:1; + unsigned int perp_mode:2; + unsigned int has_fifo:1; + unsigned int driver_version; /* version code for floppy driver */ +#define FD_DRIVER_VERSION 0x100 +/* user programs using the floppy API should use floppy_fdc_state to + * get the version number of the floppy driver that they are running + * on. If this version number is bigger than the one compiled into the + * user program (the FD_DRIVER_VERSION define), it should be prepared + * to bigger structures + */ + + unsigned char track[4]; + /* Position of the heads of the 4 units attached to this FDC, + * as stored on the FDC. In the future, the position as stored + * on the FDC might not agree with the actual physical + * position of these drive heads. By allowing such + * disagreement, it will be possible to reset the FDC without + * incurring the expensive cost of repositioning all heads. + * Right now, these positions are hard wired to 0. */ + +}; + +#define FDGETFDCSTAT _IOR(2, 0x15, struct floppy_fdc_state) + + +/* + * Asynchronous Write error tracking + */ +struct floppy_write_errors { + /* Write error logging. + * + * These fields can be cleared with the FDWERRORCLR ioctl. + * Only writes that were attempted but failed due to a physical media + * error are logged. write(2) calls that fail and return an error code + * to the user process are not counted. + */ + + unsigned int write_errors; /* number of physical write errors + * encountered */ + + /* position of first and last write errors */ + unsigned long first_error_sector; + int first_error_generation; + unsigned long last_error_sector; + int last_error_generation; + + unsigned int badness; /* highest retry count for a read or write + * operation */ +}; + +#define FDWERRORCLR _IO(2, 0x56) +/* clear write error and badness information */ +#define FDWERRORGET _IOR(2, 0x17, struct floppy_write_errors) +/* get write error and badness information */ + + +/* + * Raw commands + */ +/* new interface flag: now we can do them in batches */ +#define FDHAVEBATCHEDRAWCMD + +struct floppy_raw_cmd { + unsigned int flags; +#define FD_RAW_READ 1 +#define FD_RAW_WRITE 2 +#define FD_RAW_NO_MOTOR 4 +#define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */ +#define FD_RAW_INTR 8 /* wait for an interrupt */ +#define FD_RAW_SPIN 0x10 /* spin up the disk for this command */ +#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command + * completion */ +#define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */ +#define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */ + +/* more "in" flags */ +#define FD_RAW_MORE 0x100 /* more records follow */ +#define FD_RAW_STOP_IF_FAILURE 0x200 /* stop if we encounter a failure */ +#define FD_RAW_STOP_IF_SUCCESS 0x400 /* stop if command successful */ +#define FD_RAW_SOFTFAILURE 0x800 /* consider the return value for failure + * detection too */ + +/* more "out" flags */ +#define FD_RAW_FAILURE 0x10000 /* command sent to fdc, fdc returned error */ +#define FD_RAW_HARDFAILURE 0x20000 /* fdc had to be reset, or timed out */ + + void __user *data; + char *kernel_data; /* location of data buffer in the kernel */ + struct floppy_raw_cmd *next; /* used for chaining of raw cmd's + * within the kernel */ + long length; /* in: length of dma transfer. out: remaining bytes */ + long phys_length; /* physical length, if different from dma length */ + int buffer_length; /* length of allocated buffer */ + + unsigned char rate; + unsigned char cmd_count; + unsigned char cmd[16]; + unsigned char reply_count; + unsigned char reply[16]; + int track; + int resultcode; + + int reserved1; + int reserved2; +}; + +#define FDRAWCMD _IO(2, 0x58) +/* send a raw command to the fdc. Structure size not included, because of + * batches */ + +#define FDTWADDLE _IO(2, 0x59) +/* flicker motor-on bit before reading a sector. Experimental */ + + +#define FDEJECT _IO(2, 0x5a) +/* eject the disk */ + + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> + +struct compat_floppy_struct { + compat_uint_t size; + compat_uint_t sect; + compat_uint_t head; + compat_uint_t track; + compat_uint_t stretch; + unsigned char gap; + unsigned char rate; + unsigned char spec1; + unsigned char fmt_gap; + const compat_caddr_t name; +}; + +#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) +#endif +#endif + +#endif diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h new file mode 100644 index 00000000..155bafd9 --- /dev/null +++ b/include/linux/fddidevice.h @@ -0,0 +1,34 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the FDDI handlers. + * + * Version: @(#)fddidevice.h 1.0.0 08/12/96 + * + * Author: Lawrence V. Stefani, <stefani@lkg.dec.com> + * + * fddidevice.h is based on previous trdevice.h work by + * Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_FDDIDEVICE_H +#define _LINUX_FDDIDEVICE_H + +#include <linux/if_fddi.h> + +#ifdef __KERNEL__ +extern __be16 fddi_type_trans(struct sk_buff *skb, + struct net_device *dev); +extern int fddi_change_mtu(struct net_device *dev, int new_mtu); +extern struct net_device *alloc_fddidev(int sizeof_priv); +#endif + +#endif /* _LINUX_FDDIDEVICE_H */ diff --git a/include/linux/fdreg.h b/include/linux/fdreg.h new file mode 100644 index 00000000..61ce6416 --- /dev/null +++ b/include/linux/fdreg.h @@ -0,0 +1,137 @@ +#ifndef _LINUX_FDREG_H +#define _LINUX_FDREG_H +/* + * This file contains some defines for the floppy disk controller. + * Various sources. Mostly "IBM Microcomputers: A Programmers + * Handbook", Sanches and Canton. + */ + +#ifdef FDPATCHES +#define FD_IOPORT fdc_state[fdc].address +#else +/* It would be a lot saner just to force fdc_state[fdc].address to always + be set ! FIXME */ +#define FD_IOPORT 0x3f0 +#endif + +/* Fd controller regs. S&C, about page 340 */ +#define FD_STATUS (4 + FD_IOPORT ) +#define FD_DATA (5 + FD_IOPORT ) + +/* Digital Output Register */ +#define FD_DOR (2 + FD_IOPORT ) + +/* Digital Input Register (read) */ +#define FD_DIR (7 + FD_IOPORT ) + +/* Diskette Control Register (write)*/ +#define FD_DCR (7 + FD_IOPORT ) + +/* Bits of main status register */ +#define STATUS_BUSYMASK 0x0F /* drive busy mask */ +#define STATUS_BUSY 0x10 /* FDC busy */ +#define STATUS_DMA 0x20 /* 0- DMA mode */ +#define STATUS_DIR 0x40 /* 0- cpu->fdc */ +#define STATUS_READY 0x80 /* Data reg ready */ + +/* Bits of FD_ST0 */ +#define ST0_DS 0x03 /* drive select mask */ +#define ST0_HA 0x04 /* Head (Address) */ +#define ST0_NR 0x08 /* Not Ready */ +#define ST0_ECE 0x10 /* Equipment check error */ +#define ST0_SE 0x20 /* Seek end */ +#define ST0_INTR 0xC0 /* Interrupt code mask */ + +/* Bits of FD_ST1 */ +#define ST1_MAM 0x01 /* Missing Address Mark */ +#define ST1_WP 0x02 /* Write Protect */ +#define ST1_ND 0x04 /* No Data - unreadable */ +#define ST1_OR 0x10 /* OverRun */ +#define ST1_CRC 0x20 /* CRC error in data or addr */ +#define ST1_EOC 0x80 /* End Of Cylinder */ + +/* Bits of FD_ST2 */ +#define ST2_MAM 0x01 /* Missing Address Mark (again) */ +#define ST2_BC 0x02 /* Bad Cylinder */ +#define ST2_SNS 0x04 /* Scan Not Satisfied */ +#define ST2_SEH 0x08 /* Scan Equal Hit */ +#define ST2_WC 0x10 /* Wrong Cylinder */ +#define ST2_CRC 0x20 /* CRC error in data field */ +#define ST2_CM 0x40 /* Control Mark = deleted */ + +/* Bits of FD_ST3 */ +#define ST3_HA 0x04 /* Head (Address) */ +#define ST3_DS 0x08 /* drive is double-sided */ +#define ST3_TZ 0x10 /* Track Zero signal (1=track 0) */ +#define ST3_RY 0x20 /* drive is ready */ +#define ST3_WP 0x40 /* Write Protect */ +#define ST3_FT 0x80 /* Drive Fault */ + +/* Values for FD_COMMAND */ +#define FD_RECALIBRATE 0x07 /* move to track 0 */ +#define FD_SEEK 0x0F /* seek track */ +#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */ +#define FD_WRITE 0xC5 /* write with MT, MFM */ +#define FD_SENSEI 0x08 /* Sense Interrupt Status */ +#define FD_SPECIFY 0x03 /* specify HUT etc */ +#define FD_FORMAT 0x4D /* format one track */ +#define FD_VERSION 0x10 /* get version code */ +#define FD_CONFIGURE 0x13 /* configure FIFO operation */ +#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */ +#define FD_GETSTATUS 0x04 /* read ST3 */ +#define FD_DUMPREGS 0x0E /* dump the contents of the fdc regs */ +#define FD_READID 0xEA /* prints the header of a sector */ +#define FD_UNLOCK 0x14 /* Fifo config unlock */ +#define FD_LOCK 0x94 /* Fifo config lock */ +#define FD_RSEEK_OUT 0x8f /* seek out (i.e. to lower tracks) */ +#define FD_RSEEK_IN 0xcf /* seek in (i.e. to higher tracks) */ + +/* the following commands are new in the 82078. They are not used in the + * floppy driver, except the first three. These commands may be useful for apps + * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at + * http://www.intel.com/design/archives/periphrl/docs/29046803.htm */ + +#define FD_PARTID 0x18 /* part id ("extended" version cmd) */ +#define FD_SAVE 0x2e /* save fdc regs for later restore */ +#define FD_DRIVESPEC 0x8e /* drive specification: Access to the + * 2 Mbps data transfer rate for tape + * drives */ + +#define FD_RESTORE 0x4e /* later restore */ +#define FD_POWERDOWN 0x27 /* configure FDC's powersave features */ +#define FD_FORMAT_N_WRITE 0xef /* format and write in one go. */ +#define FD_OPTION 0x33 /* ISO format (which is a clean way to + * pack more sectors on a track) */ + +/* DMA commands */ +#define DMA_READ 0x46 +#define DMA_WRITE 0x4A + +/* FDC version return types */ +#define FDC_NONE 0x00 +#define FDC_UNKNOWN 0x10 /* DO NOT USE THIS TYPE EXCEPT IF IDENTIFICATION + FAILS EARLY */ +#define FDC_8272A 0x20 /* Intel 8272a, NEC 765 */ +#define FDC_765ED 0x30 /* Non-Intel 1MB-compatible FDC, can't detect */ +#define FDC_82072 0x40 /* Intel 82072; 8272a + FIFO + DUMPREGS */ +#define FDC_82072A 0x45 /* 82072A (on Sparcs) */ +#define FDC_82077_ORIG 0x51 /* Original version of 82077AA, sans LOCK */ +#define FDC_82077 0x52 /* 82077AA-1 */ +#define FDC_82078_UNKN 0x5f /* Unknown 82078 variant */ +#define FDC_82078 0x60 /* 44pin 82078 or 64pin 82078SL */ +#define FDC_82078_1 0x61 /* 82078-1 (2Mbps fdc) */ +#define FDC_S82078B 0x62 /* S82078B (first seen on Adaptec AVA-2825 VLB + * SCSI/EIDE/Floppy controller) */ +#define FDC_87306 0x63 /* National Semiconductor PC 87306 */ + +/* + * Beware: the fdc type list is roughly sorted by increasing features. + * Presence of features is tested by comparing the FDC version id with the + * "oldest" version that has the needed feature. + * If during FDC detection, an obscure test fails late in the sequence, don't + * assign FDC_UNKNOWN. Else the FDC will be treated as a dumb 8272a, or worse. + * This is especially true if the tests are unneeded. + */ + +#define FD_RESET_DELAY 20 +#endif diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h new file mode 100644 index 00000000..158a41ee --- /dev/null +++ b/include/linux/fdtable.h @@ -0,0 +1,130 @@ +/* + * descriptor table internals; you almost certainly want file.h instead. + */ + +#ifndef __LINUX_FDTABLE_H +#define __LINUX_FDTABLE_H + +#include <linux/posix_types.h> +#include <linux/compiler.h> +#include <linux/spinlock.h> +#include <linux/rcupdate.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/fs.h> + +#include <linux/atomic.h> + +/* + * The default fd array needs to be at least BITS_PER_LONG, + * as this is the granularity returned by copy_fdset(). + */ +#define NR_OPEN_DEFAULT BITS_PER_LONG + +struct fdtable { + unsigned int max_fds; + struct file __rcu **fd; /* current fd array */ + unsigned long *close_on_exec; + unsigned long *open_fds; + struct rcu_head rcu; + struct fdtable *next; +}; + +static inline void __set_close_on_exec(int fd, struct fdtable *fdt) +{ + __set_bit(fd, fdt->close_on_exec); +} + +static inline void __clear_close_on_exec(int fd, struct fdtable *fdt) +{ + __clear_bit(fd, fdt->close_on_exec); +} + +static inline bool close_on_exec(int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->close_on_exec); +} + +static inline void __set_open_fd(int fd, struct fdtable *fdt) +{ + __set_bit(fd, fdt->open_fds); +} + +static inline void __clear_open_fd(int fd, struct fdtable *fdt) +{ + __clear_bit(fd, fdt->open_fds); +} + +static inline bool fd_is_open(int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->open_fds); +} + +/* + * Open file table structure + */ +struct files_struct { + /* + * read mostly part + */ + atomic_t count; + struct fdtable __rcu *fdt; + struct fdtable fdtab; + /* + * written part on a separate cache line in SMP + */ + spinlock_t file_lock ____cacheline_aligned_in_smp; + int next_fd; + unsigned long close_on_exec_init[1]; + unsigned long open_fds_init[1]; + struct file __rcu * fd_array[NR_OPEN_DEFAULT]; +}; + +#define rcu_dereference_check_fdtable(files, fdtfd) \ + (rcu_dereference_check((fdtfd), \ + lockdep_is_held(&(files)->file_lock) || \ + atomic_read(&(files)->count) == 1 || \ + rcu_my_thread_group_empty())) + +#define files_fdtable(files) \ + (rcu_dereference_check_fdtable((files), (files)->fdt)) + +struct file_operations; +struct vfsmount; +struct dentry; + +extern int expand_files(struct files_struct *, int nr); +extern void free_fdtable_rcu(struct rcu_head *rcu); +extern void __init files_defer_init(void); + +static inline void free_fdtable(struct fdtable *fdt) +{ + call_rcu(&fdt->rcu, free_fdtable_rcu); +} + +static inline struct file * fcheck_files(struct files_struct *files, unsigned int fd) +{ + struct file * file = NULL; + struct fdtable *fdt = files_fdtable(files); + + if (fd < fdt->max_fds) + file = rcu_dereference_check_fdtable(files, fdt->fd[fd]); + return file; +} + +/* + * Check whether the specified fd has an open file. + */ +#define fcheck(fd) fcheck_files(current->files, fd) + +struct task_struct; + +struct files_struct *get_files_struct(struct task_struct *); +void put_files_struct(struct files_struct *fs); +void reset_files_struct(struct files_struct *); +int unshare_files(struct files_struct **); +struct files_struct *dup_fd(struct files_struct *, int *); + +extern struct kmem_cache *files_cachep; + +#endif /* __LINUX_FDTABLE_H */ diff --git a/include/linux/fec.h b/include/linux/fec.h new file mode 100644 index 00000000..bcff455d --- /dev/null +++ b/include/linux/fec.h @@ -0,0 +1,24 @@ +/* include/linux/fec.h + * + * Copyright (c) 2009 Orex Computed Radiography + * Baruch Siach <baruch@tkos.co.il> + * + * Copyright (C) 2010 Freescale Semiconductor, Inc. + * + * Header file for the FEC platform data + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_FEC_H__ +#define __LINUX_FEC_H__ + +#include <linux/phy.h> + +struct fec_platform_data { + phy_interface_t phy; + unsigned char mac[ETH_ALEN]; +}; + +#endif diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h new file mode 100644 index 00000000..51da65b6 --- /dev/null +++ b/include/linux/fib_rules.h @@ -0,0 +1,72 @@ +#ifndef __LINUX_FIB_RULES_H +#define __LINUX_FIB_RULES_H + +#include <linux/types.h> +#include <linux/rtnetlink.h> + +/* rule is permanent, and cannot be deleted */ +#define FIB_RULE_PERMANENT 0x00000001 +#define FIB_RULE_INVERT 0x00000002 +#define FIB_RULE_UNRESOLVED 0x00000004 +#define FIB_RULE_IIF_DETACHED 0x00000008 +#define FIB_RULE_DEV_DETACHED FIB_RULE_IIF_DETACHED +#define FIB_RULE_OIF_DETACHED 0x00000010 + +/* try to find source address in routing lookups */ +#define FIB_RULE_FIND_SADDR 0x00010000 + +struct fib_rule_hdr { + __u8 family; + __u8 dst_len; + __u8 src_len; + __u8 tos; + + __u8 table; + __u8 res1; /* reserved */ + __u8 res2; /* reserved */ + __u8 action; + + __u32 flags; +}; + +enum { + FRA_UNSPEC, + FRA_DST, /* destination address */ + FRA_SRC, /* source address */ + FRA_IIFNAME, /* interface name */ +#define FRA_IFNAME FRA_IIFNAME + FRA_GOTO, /* target to jump to (FR_ACT_GOTO) */ + FRA_UNUSED2, + FRA_PRIORITY, /* priority/preference */ + FRA_UNUSED3, + FRA_UNUSED4, + FRA_UNUSED5, + FRA_FWMARK, /* mark */ + FRA_FLOW, /* flow/class id */ + FRA_UNUSED6, + FRA_UNUSED7, + FRA_UNUSED8, + FRA_TABLE, /* Extended table id */ + FRA_FWMASK, /* mask for netfilter mark */ + FRA_OIFNAME, + __FRA_MAX +}; + +#define FRA_MAX (__FRA_MAX - 1) + +enum { + FR_ACT_UNSPEC, + FR_ACT_TO_TBL, /* Pass to fixed table */ + FR_ACT_GOTO, /* Jump to another rule */ + FR_ACT_NOP, /* No operation */ + FR_ACT_RES3, + FR_ACT_RES4, + FR_ACT_BLACKHOLE, /* Drop without notification */ + FR_ACT_UNREACHABLE, /* Drop with ENETUNREACH */ + FR_ACT_PROHIBIT, /* Drop with EACCES */ + __FR_ACT_MAX, +}; + +#define FR_ACT_MAX (__FR_ACT_MAX - 1) + +#endif diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h new file mode 100644 index 00000000..d830747f --- /dev/null +++ b/include/linux/fiemap.h @@ -0,0 +1,68 @@ +/* + * FS_IOC_FIEMAP ioctl infrastructure. + * + * Some portions copyright (C) 2007 Cluster File Systems, Inc + * + * Authors: Mark Fasheh <mfasheh@suse.com> + * Kalpak Shah <kalpak.shah@sun.com> + * Andreas Dilger <adilger@sun.com> + */ + +#ifndef _LINUX_FIEMAP_H +#define _LINUX_FIEMAP_H + +#include <linux/types.h> + +struct fiemap_extent { + __u64 fe_logical; /* logical offset in bytes for the start of + * the extent from the beginning of the file */ + __u64 fe_physical; /* physical offset in bytes for the start + * of the extent from the beginning of the disk */ + __u64 fe_length; /* length in bytes for this extent */ + __u64 fe_reserved64[2]; + __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ + __u32 fe_reserved[3]; +}; + +struct fiemap { + __u64 fm_start; /* logical offset (inclusive) at + * which to start mapping (in) */ + __u64 fm_length; /* logical length of mapping which + * userspace wants (in) */ + __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ + __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ + __u32 fm_extent_count; /* size of fm_extents array (in) */ + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */ +}; + +#define FIEMAP_MAX_OFFSET (~0ULL) + +#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ +#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ + +#define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR) + +#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ +#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ +#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. + * Sets EXTENT_UNKNOWN. */ +#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read + * while fs is unmounted */ +#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. + * Sets EXTENT_NO_BYPASS. */ +#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be + * block aligned. */ +#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. + * Sets EXTENT_NOT_ALIGNED.*/ +#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. + * Sets EXTENT_NOT_ALIGNED.*/ +#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but + * no data (i.e. zero). */ +#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively + * support extents. Result + * merged for efficiency. */ +#define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other + * files. */ + +#endif /* _LINUX_FIEMAP_H */ diff --git a/include/linux/file.h b/include/linux/file.h new file mode 100644 index 00000000..58bf158c --- /dev/null +++ b/include/linux/file.h @@ -0,0 +1,42 @@ +/* + * Wrapper functions for accessing the file_struct fd array. + */ + +#ifndef __LINUX_FILE_H +#define __LINUX_FILE_H + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/posix_types.h> + +struct file; + +extern void fput(struct file *); + +struct file_operations; +struct vfsmount; +struct dentry; +struct path; +extern struct file *alloc_file(struct path *, fmode_t mode, + const struct file_operations *fop); + +static inline void fput_light(struct file *file, int fput_needed) +{ + if (fput_needed) + fput(file); +} + +extern struct file *fget(unsigned int fd); +extern struct file *fget_light(unsigned int fd, int *fput_needed); +extern struct file *fget_raw(unsigned int fd); +extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); +extern void set_close_on_exec(unsigned int fd, int flag); +extern void put_filp(struct file *); +extern int alloc_fd(unsigned start, unsigned flags); +extern int get_unused_fd(void); +#define get_unused_fd_flags(flags) alloc_fd(0, (flags)) +extern void put_unused_fd(unsigned int fd); + +extern void fd_install(unsigned int fd, struct file *file); + +#endif /* __LINUX_FILE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h new file mode 100644 index 00000000..8eeb205f --- /dev/null +++ b/include/linux/filter.h @@ -0,0 +1,235 @@ +/* + * Linux Socket Filter Data Structures + */ + +#ifndef __LINUX_FILTER_H__ +#define __LINUX_FILTER_H__ + +#include <linux/compiler.h> +#include <linux/types.h> + +#ifdef __KERNEL__ +#include <linux/atomic.h> +#endif + +/* + * Current version of the filter code architecture. + */ +#define BPF_MAJOR_VERSION 1 +#define BPF_MINOR_VERSION 1 + +/* + * Try and keep these values and structures similar to BSD, especially + * the BPF code definitions which need to match so you can share filters + */ + +struct sock_filter { /* Filter block */ + __u16 code; /* Actual filter code */ + __u8 jt; /* Jump true */ + __u8 jf; /* Jump false */ + __u32 k; /* Generic multiuse field */ +}; + +struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ + unsigned short len; /* Number of filter blocks */ + struct sock_filter __user *filter; +}; + +/* + * Instruction classes + */ + +#define BPF_CLASS(code) ((code) & 0x07) +#define BPF_LD 0x00 +#define BPF_LDX 0x01 +#define BPF_ST 0x02 +#define BPF_STX 0x03 +#define BPF_ALU 0x04 +#define BPF_JMP 0x05 +#define BPF_RET 0x06 +#define BPF_MISC 0x07 + +/* ld/ldx fields */ +#define BPF_SIZE(code) ((code) & 0x18) +#define BPF_W 0x00 +#define BPF_H 0x08 +#define BPF_B 0x10 +#define BPF_MODE(code) ((code) & 0xe0) +#define BPF_IMM 0x00 +#define BPF_ABS 0x20 +#define BPF_IND 0x40 +#define BPF_MEM 0x60 +#define BPF_LEN 0x80 +#define BPF_MSH 0xa0 + +/* alu/jmp fields */ +#define BPF_OP(code) ((code) & 0xf0) +#define BPF_ADD 0x00 +#define BPF_SUB 0x10 +#define BPF_MUL 0x20 +#define BPF_DIV 0x30 +#define BPF_OR 0x40 +#define BPF_AND 0x50 +#define BPF_LSH 0x60 +#define BPF_RSH 0x70 +#define BPF_NEG 0x80 +#define BPF_JA 0x00 +#define BPF_JEQ 0x10 +#define BPF_JGT 0x20 +#define BPF_JGE 0x30 +#define BPF_JSET 0x40 +#define BPF_SRC(code) ((code) & 0x08) +#define BPF_K 0x00 +#define BPF_X 0x08 + +/* ret - BPF_K and BPF_X also apply */ +#define BPF_RVAL(code) ((code) & 0x18) +#define BPF_A 0x10 + +/* misc */ +#define BPF_MISCOP(code) ((code) & 0xf8) +#define BPF_TAX 0x00 +#define BPF_TXA 0x80 + +#ifndef BPF_MAXINSNS +#define BPF_MAXINSNS 4096 +#endif + +/* + * Macros for filter block array initializers. + */ +#ifndef BPF_STMT +#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k } +#endif +#ifndef BPF_JUMP +#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k } +#endif + +/* + * Number of scratch memory words for: BPF_ST and BPF_STX + */ +#define BPF_MEMWORDS 16 + +/* RATIONALE. Negative offsets are invalid in BPF. + We use them to reference ancillary data. + Unlike introduction new instructions, it does not break + existing compilers/optimizers. + */ +#define SKF_AD_OFF (-0x1000) +#define SKF_AD_PROTOCOL 0 +#define SKF_AD_PKTTYPE 4 +#define SKF_AD_IFINDEX 8 +#define SKF_AD_NLATTR 12 +#define SKF_AD_NLATTR_NEST 16 +#define SKF_AD_MARK 20 +#define SKF_AD_QUEUE 24 +#define SKF_AD_HATYPE 28 +#define SKF_AD_RXHASH 32 +#define SKF_AD_CPU 36 +#define SKF_AD_MAX 40 +#define SKF_NET_OFF (-0x100000) +#define SKF_LL_OFF (-0x200000) + +#ifdef __KERNEL__ + +struct sk_buff; +struct sock; + +struct sk_filter +{ + atomic_t refcnt; + unsigned int len; /* Number of filter blocks */ + unsigned int (*bpf_func)(const struct sk_buff *skb, + const struct sock_filter *filter); + struct rcu_head rcu; + struct sock_filter insns[0]; +}; + +static inline unsigned int sk_filter_len(const struct sk_filter *fp) +{ + return fp->len * sizeof(struct sock_filter) + sizeof(*fp); +} + +extern int sk_filter(struct sock *sk, struct sk_buff *skb); +extern unsigned int sk_run_filter(const struct sk_buff *skb, + const struct sock_filter *filter); +extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +extern int sk_detach_filter(struct sock *sk); +extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); + +#ifdef CONFIG_BPF_JIT +extern void bpf_jit_compile(struct sk_filter *fp); +extern void bpf_jit_free(struct sk_filter *fp); +#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) +#else +static inline void bpf_jit_compile(struct sk_filter *fp) +{ +} +static inline void bpf_jit_free(struct sk_filter *fp) +{ +} +#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns) +#endif + +enum { + BPF_S_RET_K = 1, + BPF_S_RET_A, + BPF_S_ALU_ADD_K, + BPF_S_ALU_ADD_X, + BPF_S_ALU_SUB_K, + BPF_S_ALU_SUB_X, + BPF_S_ALU_MUL_K, + BPF_S_ALU_MUL_X, + BPF_S_ALU_DIV_X, + BPF_S_ALU_AND_K, + BPF_S_ALU_AND_X, + BPF_S_ALU_OR_K, + BPF_S_ALU_OR_X, + BPF_S_ALU_LSH_K, + BPF_S_ALU_LSH_X, + BPF_S_ALU_RSH_K, + BPF_S_ALU_RSH_X, + BPF_S_ALU_NEG, + BPF_S_LD_W_ABS, + BPF_S_LD_H_ABS, + BPF_S_LD_B_ABS, + BPF_S_LD_W_LEN, + BPF_S_LD_W_IND, + BPF_S_LD_H_IND, + BPF_S_LD_B_IND, + BPF_S_LD_IMM, + BPF_S_LDX_W_LEN, + BPF_S_LDX_B_MSH, + BPF_S_LDX_IMM, + BPF_S_MISC_TAX, + BPF_S_MISC_TXA, + BPF_S_ALU_DIV_K, + BPF_S_LD_MEM, + BPF_S_LDX_MEM, + BPF_S_ST, + BPF_S_STX, + BPF_S_JMP_JA, + BPF_S_JMP_JEQ_K, + BPF_S_JMP_JEQ_X, + BPF_S_JMP_JGE_K, + BPF_S_JMP_JGE_X, + BPF_S_JMP_JGT_K, + BPF_S_JMP_JGT_X, + BPF_S_JMP_JSET_K, + BPF_S_JMP_JSET_X, + /* Ancillary data */ + BPF_S_ANC_PROTOCOL, + BPF_S_ANC_PKTTYPE, + BPF_S_ANC_IFINDEX, + BPF_S_ANC_NLATTR, + BPF_S_ANC_NLATTR_NEST, + BPF_S_ANC_MARK, + BPF_S_ANC_QUEUE, + BPF_S_ANC_HATYPE, + BPF_S_ANC_RXHASH, + BPF_S_ANC_CPU, +}; + +#endif /* __KERNEL__ */ + +#endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/fips.h b/include/linux/fips.h new file mode 100644 index 00000000..f8fb07b0 --- /dev/null +++ b/include/linux/fips.h @@ -0,0 +1,10 @@ +#ifndef _FIPS_H +#define _FIPS_H + +#ifdef CONFIG_CRYPTO_FIPS +extern int fips_enabled; +#else +#define fips_enabled 0 +#endif + +#endif diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h new file mode 100644 index 00000000..d5003695 --- /dev/null +++ b/include/linux/firewire-cdev.h @@ -0,0 +1,1039 @@ +/* + * Char device interface. + * + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _LINUX_FIREWIRE_CDEV_H +#define _LINUX_FIREWIRE_CDEV_H + +#include <linux/ioctl.h> +#include <linux/types.h> +#include <linux/firewire-constants.h> + +/* available since kernel version 2.6.22 */ +#define FW_CDEV_EVENT_BUS_RESET 0x00 +#define FW_CDEV_EVENT_RESPONSE 0x01 +#define FW_CDEV_EVENT_REQUEST 0x02 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 + +/* available since kernel version 2.6.30 */ +#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 +#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 + +/* available since kernel version 2.6.36 */ +#define FW_CDEV_EVENT_REQUEST2 0x06 +#define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07 +#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08 +#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09 + +/** + * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types + * @closure: For arbitrary use by userspace + * @type: Discriminates the fw_cdev_event_ types + * + * This struct may be used to access generic members of all fw_cdev_event_ + * types regardless of the specific type. + * + * Data passed in the @closure field for a request will be returned in the + * corresponding event. It is big enough to hold a pointer on all platforms. + * The ioctl used to set @closure depends on the @type of event. + */ +struct fw_cdev_event_common { + __u64 closure; + __u32 type; +}; + +/** + * struct fw_cdev_event_bus_reset - Sent when a bus reset occurred + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_GET_INFO ioctl + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_BUS_RESET + * @node_id: New node ID of this node + * @local_node_id: Node ID of the local node, i.e. of the controller + * @bm_node_id: Node ID of the bus manager + * @irm_node_id: Node ID of the iso resource manager + * @root_node_id: Node ID of the root node + * @generation: New bus generation + * + * This event is sent when the bus the device belongs to goes through a bus + * reset. It provides information about the new bus configuration, such as + * new node ID for this device, new root ID, and others. + * + * If @bm_node_id is 0xffff right after bus reset it can be reread by an + * %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished. + * Kernels with ABI version < 4 do not set @bm_node_id. + */ +struct fw_cdev_event_bus_reset { + __u64 closure; + __u32 type; + __u32 node_id; + __u32 local_node_id; + __u32 bm_node_id; + __u32 irm_node_id; + __u32 root_node_id; + __u32 generation; +}; + +/** + * struct fw_cdev_event_response - Sent when a response packet was received + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST + * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST + * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE + * @rcode: Response code returned by the remote node + * @length: Data length, i.e. the response's payload size in bytes + * @data: Payload data, if any + * + * This event is sent when the stack receives a response to an outgoing request + * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses + * carrying data (read and lock responses) follows immediately and can be + * accessed through the @data field. + * + * The event is also generated after conclusions of transactions that do not + * involve response packets. This includes unified write transactions, + * broadcast write transactions, and transmission of asynchronous stream + * packets. @rcode indicates success or failure of such transmissions. + */ +struct fw_cdev_event_response { + __u64 closure; + __u32 type; + __u32 rcode; + __u32 length; + __u32 data[0]; +}; + +/** + * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST + * + * This event is sent instead of &fw_cdev_event_request2 if the kernel or + * the client implements ABI version <= 3. &fw_cdev_event_request lacks + * essential information; use &fw_cdev_event_request2 instead. + */ +struct fw_cdev_event_request { + __u64 closure; + __u32 type; + __u32 tcode; + __u64 offset; + __u32 handle; + __u32 length; + __u32 data[0]; +}; + +/** + * struct fw_cdev_event_request2 - Sent on incoming request to an address region + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2 + * @tcode: Transaction code of the incoming request + * @offset: The offset into the 48-bit per-node address space + * @source_node_id: Sender node ID + * @destination_node_id: Destination node ID + * @card: The index of the card from which the request came + * @generation: Bus generation in which the request is valid + * @handle: Reference to the kernel-side pending request + * @length: Data length, i.e. the request's payload size in bytes + * @data: Incoming data, if any + * + * This event is sent when the stack receives an incoming request to an address + * region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is + * guaranteed to be completely contained in the specified region. Userspace is + * responsible for sending the response by %FW_CDEV_IOC_SEND_RESPONSE ioctl, + * using the same @handle. + * + * The payload data for requests carrying data (write and lock requests) + * follows immediately and can be accessed through the @data field. + * + * Unlike &fw_cdev_event_request, @tcode of lock requests is one of the + * firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT, + * i.e. encodes the extended transaction code. + * + * @card may differ from &fw_cdev_get_info.card because requests are received + * from all cards of the Linux host. @source_node_id, @destination_node_id, and + * @generation pertain to that card. Destination node ID and bus generation may + * therefore differ from the corresponding fields of the last + * &fw_cdev_event_bus_reset. + * + * @destination_node_id may also differ from the current node ID because of a + * non-local bus ID part or in case of a broadcast write request. Note, a + * client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a + * broadcast write request; the kernel will then release the kernel-side pending + * request but will not actually send a response packet. + * + * In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already + * sent a write response immediately after the request was received; in this + * case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to + * release the kernel-side pending request, though another response won't be + * sent. + * + * If the client subsequently needs to initiate requests to the sender node of + * an &fw_cdev_event_request2, it needs to use a device file with matching + * card index, node ID, and generation for outbound requests. + */ +struct fw_cdev_event_request2 { + __u64 closure; + __u32 type; + __u32 tcode; + __u64 offset; + __u32 source_node_id; + __u32 destination_node_id; + __u32 card; + __u32 generation; + __u32 handle; + __u32 length; + __u32 data[0]; +}; + +/** + * struct fw_cdev_event_iso_interrupt - Sent when an iso packet was completed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl + * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_ISO_INTERRUPT + * @cycle: Cycle counter of the last completed packet + * @header_length: Total length of following headers, in bytes + * @header: Stripped headers, if any + * + * This event is sent when the controller has completed an &fw_cdev_iso_packet + * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with + * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets + * without the interrupt bit set that the kernel's internal buffer for @header + * is about to overflow. (In the last case, kernels with ABI version < 5 drop + * header data up to the next interrupt packet.) + * + * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): + * + * In version 3 and some implementations of version 2 of the ABI, &header_length + * is a multiple of 4 and &header contains timestamps of all packets up until + * the interrupt packet. The format of the timestamps is as described below for + * isochronous reception. In version 1 of the ABI, &header_length was 0. + * + * Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE): + * + * The headers stripped of all packets up until and including the interrupt + * packet are returned in the @header field. The amount of header data per + * packet is as specified at iso context creation by + * &fw_cdev_create_iso_context.header_size. + * + * Hence, _interrupt.header_length / _context.header_size is the number of + * packets received in this interrupt event. The client can now iterate + * through the mmap()'ed DMA buffer according to this number of packets and + * to the buffer sizes as the client specified in &fw_cdev_queue_iso. + * + * Since version 2 of this ABI, the portion for each packet in _interrupt.header + * consists of the 1394 isochronous packet header, followed by a timestamp + * quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets + * from the packet payload if &fw_cdev_create_iso_context.header_size > 8. + * + * Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits + * channel, 4 bits tcode, 4 bits sy, in big endian byte order. + * data_length is the actual received size of the packet without the four + * 1394 iso packet header bytes. + * + * Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits + * cycleCount, in big endian byte order. + * + * In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload + * data followed directly after the 1394 is header if header_size > 4. + * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. + */ +struct fw_cdev_event_iso_interrupt { + __u64 closure; + __u32 type; + __u32 cycle; + __u32 header_length; + __u32 header[0]; +}; + +/** + * struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl + * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL + * @completed: Offset into the receive buffer; data before this offset is valid + * + * This event is sent in multichannel contexts (context type + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer + * chunks that have been completely filled and that have the + * %FW_CDEV_ISO_INTERRUPT bit set, or when explicitly requested with + * %FW_CDEV_IOC_FLUSH_ISO. + * + * The buffer is continuously filled with the following data, per packet: + * - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt, + * but in little endian byte order, + * - packet payload (as many bytes as specified in the data_length field of + * the 1394 iso packet header) in big endian byte order, + * - 0...3 padding bytes as needed to align the following trailer quadlet, + * - trailer quadlet, containing the reception timestamp as described at + * &fw_cdev_event_iso_interrupt, but in little endian byte order. + * + * Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8. + * When processing the data, stop before a packet that would cross the + * @completed offset. + * + * A packet near the end of a buffer chunk will typically spill over into the + * next queued buffer chunk. It is the responsibility of the client to check + * for this condition, assemble a broken-up packet from its parts, and not to + * re-queue any buffer chunks in which as yet unread packet parts reside. + */ +struct fw_cdev_event_iso_interrupt_mc { + __u64 closure; + __u32 type; + __u32 completed; +}; + +/** + * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl + * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * @handle: Reference by which an allocated resource can be deallocated + * @channel: Isochronous channel which was (de)allocated, if any + * @bandwidth: Bandwidth allocation units which were (de)allocated, if any + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous + * resource was allocated at the IRM. The client has to check @channel and + * @bandwidth for whether the allocation actually succeeded. + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous + * resource was deallocated at the IRM. It is also sent when automatic + * reallocation after a bus reset failed. + * + * @channel is <0 if no channel was (de)allocated or if reallocation failed. + * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed. + */ +struct fw_cdev_event_iso_resource { + __u64 closure; + __u32 type; + __u32 handle; + __s32 channel; + __s32 bandwidth; +}; + +/** + * struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET + * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl + * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED + * @rcode: %RCODE_..., indicates success or failure of transmission + * @length: Data length in bytes + * @data: Incoming data + * + * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty, + * except in case of a ping packet: Then, @length is 4, and @data[0] is the + * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE. + * + * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data + * consists of the two PHY packet quadlets, in host byte order. + */ +struct fw_cdev_event_phy_packet { + __u64 closure; + __u32 type; + __u32 rcode; + __u32 length; + __u32 data[0]; +}; + +/** + * union fw_cdev_event - Convenience union of fw_cdev_event_ types + * @common: Valid for all types + * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET + * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE + * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST + * @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2 + * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT + * @iso_interrupt_mc: Valid if @common.type == + * %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL + * @iso_resource: Valid if @common.type == + * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * @phy_packet: Valid if @common.type == + * %FW_CDEV_EVENT_PHY_PACKET_SENT or + * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED + * + * Convenience union for userspace use. Events could be read(2) into an + * appropriately aligned char buffer and then cast to this union for further + * processing. Note that for a request, response or iso_interrupt event, + * the data[] or header[] may make the size of the full event larger than + * sizeof(union fw_cdev_event). Also note that if you attempt to read(2) + * an event into a buffer that is not large enough for it, the data that does + * not fit will be discarded so that the next read(2) will return a new event. + */ +union fw_cdev_event { + struct fw_cdev_event_common common; + struct fw_cdev_event_bus_reset bus_reset; + struct fw_cdev_event_response response; + struct fw_cdev_event_request request; + struct fw_cdev_event_request2 request2; /* added in 2.6.36 */ + struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */ + struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */ + struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */ +}; + +/* available since kernel version 2.6.22 */ +#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) +#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) +#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) +#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) +#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) +#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) +#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) +#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) +#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) +#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) + +/* available since kernel version 2.6.24 */ +#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) + +/* available since kernel version 2.6.30 */ +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */ +#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) +#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet) + +/* available since kernel version 2.6.34 */ +#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) + +/* available since kernel version 2.6.36 */ +#define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet) +#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets) +#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels) + +/* available since kernel version 3.4 */ +#define FW_CDEV_IOC_FLUSH_ISO _IOW('#', 0x18, struct fw_cdev_flush_iso) + +/* + * ABI version history + * 1 (2.6.22) - initial version + * (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER + * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if + * &fw_cdev_create_iso_context.header_size is 8 or more + * - added %FW_CDEV_IOC_*_ISO_RESOURCE*, + * %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST, + * %FW_CDEV_IOC_SEND_STREAM_PACKET + * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt + * (2.6.33) - IR has always packet-per-buffer semantics now, not one of + * dual-buffer or packet-per-buffer depending on hardware + * - shared use and auto-response for FCP registers + * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable + * - added %FW_CDEV_IOC_GET_CYCLE_TIMER2 + * 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*, + * and &fw_cdev_allocate.region_end + * - implemented &fw_cdev_event_bus_reset.bm_node_id + * - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS + * - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL, + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and + * %FW_CDEV_IOC_SET_ISO_CHANNELS + * 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to + * avoid dropping data + * - added %FW_CDEV_IOC_FLUSH_ISO + */ + +/** + * struct fw_cdev_get_info - General purpose information ioctl + * @version: The version field is just a running serial number. Both an + * input parameter (ABI version implemented by the client) and + * output parameter (ABI version implemented by the kernel). + * A client shall fill in the ABI @version for which the client + * was implemented. This is necessary for forward compatibility. + * @rom_length: If @rom is non-zero, up to @rom_length bytes of Configuration + * ROM will be copied into that user space address. In either + * case, @rom_length is updated with the actual length of the + * Configuration ROM. + * @rom: If non-zero, address of a buffer to be filled by a copy of the + * device's Configuration ROM + * @bus_reset: If non-zero, address of a buffer to be filled by a + * &struct fw_cdev_event_bus_reset with the current state + * of the bus. This does not cause a bus reset to happen. + * @bus_reset_closure: Value of &closure in this and subsequent bus reset events + * @card: The index of the card this device belongs to + * + * The %FW_CDEV_IOC_GET_INFO ioctl is usually the very first one which a client + * performs right after it opened a /dev/fw* file. + * + * As a side effect, reception of %FW_CDEV_EVENT_BUS_RESET events to be read(2) + * is started by this ioctl. + */ +struct fw_cdev_get_info { + __u32 version; + __u32 rom_length; + __u64 rom; + __u64 bus_reset; + __u64 bus_reset_closure; + __u32 card; +}; + +/** + * struct fw_cdev_send_request - Send an asynchronous request packet + * @tcode: Transaction code of the request + * @length: Length of outgoing payload, in bytes + * @offset: 48-bit offset at destination node + * @closure: Passed back to userspace in the response event + * @data: Userspace pointer to payload + * @generation: The bus generation where packet is valid + * + * Send a request to the device. This ioctl implements all outgoing requests. + * Both quadlet and block request specify the payload as a pointer to the data + * in the @data field. Once the transaction completes, the kernel writes an + * &fw_cdev_event_response event back. The @closure field is passed back to + * user space in the response event. + */ +struct fw_cdev_send_request { + __u32 tcode; + __u32 length; + __u64 offset; + __u64 closure; + __u64 data; + __u32 generation; +}; + +/** + * struct fw_cdev_send_response - Send an asynchronous response packet + * @rcode: Response code as determined by the userspace handler + * @length: Length of outgoing payload, in bytes + * @data: Userspace pointer to payload + * @handle: The handle from the &fw_cdev_event_request + * + * Send a response to an incoming request. By setting up an address range using + * the %FW_CDEV_IOC_ALLOCATE ioctl, userspace can listen for incoming requests. An + * incoming request will generate an %FW_CDEV_EVENT_REQUEST, and userspace must + * send a reply using this ioctl. The event has a handle to the kernel-side + * pending transaction, which should be used with this ioctl. + */ +struct fw_cdev_send_response { + __u32 rcode; + __u32 length; + __u64 data; + __u32 handle; +}; + +/** + * struct fw_cdev_allocate - Allocate a CSR in an address range + * @offset: Start offset of the address range + * @closure: To be passed back to userspace in request events + * @length: Length of the CSR, in bytes + * @handle: Handle to the allocation, written by the kernel + * @region_end: First address above the address range (added in ABI v4, 2.6.36) + * + * Allocate an address range in the 48-bit address space on the local node + * (the controller). This allows userspace to listen for requests with an + * offset within that address range. Every time when the kernel receives a + * request within the range, an &fw_cdev_event_request2 event will be emitted. + * (If the kernel or the client implements ABI version <= 3, an + * &fw_cdev_event_request will be generated instead.) + * + * The @closure field is passed back to userspace in these request events. + * The @handle field is an out parameter, returning a handle to the allocated + * range to be used for later deallocation of the range. + * + * The address range is allocated on all local nodes. The address allocation + * is exclusive except for the FCP command and response registers. If an + * exclusive address region is already in use, the ioctl fails with errno set + * to %EBUSY. + * + * If kernel and client implement ABI version >= 4, the kernel looks up a free + * spot of size @length inside [@offset..@region_end) and, if found, writes + * the start address of the new CSR back in @offset. I.e. @offset is an + * in and out parameter. If this automatic placement of a CSR in a bigger + * address range is not desired, the client simply needs to set @region_end + * = @offset + @length. + * + * If the kernel or the client implements ABI version <= 3, @region_end is + * ignored and effectively assumed to be @offset + @length. + * + * @region_end is only present in a kernel header >= 2.6.36. If necessary, + * this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2. + */ +struct fw_cdev_allocate { + __u64 offset; + __u64 closure; + __u32 length; + __u32 handle; + __u64 region_end; /* available since kernel version 2.6.36 */ +}; + +/** + * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource + * @handle: Handle to the address range or iso resource, as returned by the + * kernel when the range or resource was allocated + */ +struct fw_cdev_deallocate { + __u32 handle; +}; + +#define FW_CDEV_LONG_RESET 0 +#define FW_CDEV_SHORT_RESET 1 + +/** + * struct fw_cdev_initiate_bus_reset - Initiate a bus reset + * @type: %FW_CDEV_SHORT_RESET or %FW_CDEV_LONG_RESET + * + * Initiate a bus reset for the bus this device is on. The bus reset can be + * either the original (long) bus reset or the arbitrated (short) bus reset + * introduced in 1394a-2000. + * + * The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset + * indicates when the reset actually happened. Since ABI v4, this may be + * considerably later than the ioctl because the kernel ensures a grace period + * between subsequent bus resets as per IEEE 1394 bus management specification. + */ +struct fw_cdev_initiate_bus_reset { + __u32 type; +}; + +/** + * struct fw_cdev_add_descriptor - Add contents to the local node's config ROM + * @immediate: If non-zero, immediate key to insert before pointer + * @key: Upper 8 bits of root directory pointer + * @data: Userspace pointer to contents of descriptor block + * @length: Length of descriptor block data, in quadlets + * @handle: Handle to the descriptor, written by the kernel + * + * Add a descriptor block and optionally a preceding immediate key to the local + * node's Configuration ROM. + * + * The @key field specifies the upper 8 bits of the descriptor root directory + * pointer and the @data and @length fields specify the contents. The @key + * should be of the form 0xXX000000. The offset part of the root directory entry + * will be filled in by the kernel. + * + * If not 0, the @immediate field specifies an immediate key which will be + * inserted before the root directory pointer. + * + * @immediate, @key, and @data array elements are CPU-endian quadlets. + * + * If successful, the kernel adds the descriptor and writes back a @handle to + * the kernel-side object to be used for later removal of the descriptor block + * and immediate key. The kernel will also generate a bus reset to signal the + * change of the Configuration ROM to other nodes. + * + * This ioctl affects the Configuration ROMs of all local nodes. + * The ioctl only succeeds on device files which represent a local node. + */ +struct fw_cdev_add_descriptor { + __u32 immediate; + __u32 key; + __u64 data; + __u32 length; + __u32 handle; +}; + +/** + * struct fw_cdev_remove_descriptor - Remove contents from the Configuration ROM + * @handle: Handle to the descriptor, as returned by the kernel when the + * descriptor was added + * + * Remove a descriptor block and accompanying immediate key from the local + * nodes' Configuration ROMs. The kernel will also generate a bus reset to + * signal the change of the Configuration ROM to other nodes. + */ +struct fw_cdev_remove_descriptor { + __u32 handle; +}; + +#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 +#define FW_CDEV_ISO_CONTEXT_RECEIVE 1 +#define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */ + +/** + * struct fw_cdev_create_iso_context - Create a context for isochronous I/O + * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or + * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL + * @header_size: Header size to strip in single-channel reception + * @channel: Channel to bind to in single-channel reception or transmission + * @speed: Transmission speed + * @closure: To be returned in &fw_cdev_event_iso_interrupt or + * &fw_cdev_event_iso_interrupt_multichannel + * @handle: Handle to context, written back by kernel + * + * Prior to sending or receiving isochronous I/O, a context must be created. + * The context records information about the transmit or receive configuration + * and typically maps to an underlying hardware resource. A context is set up + * for either sending or receiving. It is bound to a specific isochronous + * @channel. + * + * In case of multichannel reception, @header_size and @channel are ignored + * and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS. + * + * For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4 + * and must be a multiple of 4. It is ignored in other context types. + * + * @speed is ignored in receive context types. + * + * If a context was successfully created, the kernel writes back a handle to the + * context, which must be passed in for subsequent operations on that context. + * + * Limitations: + * No more than one iso context can be created per fd. + * The total number of contexts that all userspace and kernelspace drivers can + * create on a card at a time is a hardware limit, typically 4 or 8 contexts per + * direction, and of them at most one multichannel receive context. + */ +struct fw_cdev_create_iso_context { + __u32 type; + __u32 header_size; + __u32 channel; + __u32 speed; + __u64 closure; + __u32 handle; +}; + +/** + * struct fw_cdev_set_iso_channels - Select channels in multichannel reception + * @channels: Bitmask of channels to listen to + * @handle: Handle of the mutichannel receive context + * + * @channels is the bitwise or of 1ULL << n for each channel n to listen to. + * + * The ioctl fails with errno %EBUSY if there is already another receive context + * on a channel in @channels. In that case, the bitmask of all unoccupied + * channels is returned in @channels. + */ +struct fw_cdev_set_iso_channels { + __u64 channels; + __u32 handle; +}; + +#define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v) +#define FW_CDEV_ISO_INTERRUPT (1 << 16) +#define FW_CDEV_ISO_SKIP (1 << 17) +#define FW_CDEV_ISO_SYNC (1 << 17) +#define FW_CDEV_ISO_TAG(v) ((v) << 18) +#define FW_CDEV_ISO_SY(v) ((v) << 20) +#define FW_CDEV_ISO_HEADER_LENGTH(v) ((v) << 24) + +/** + * struct fw_cdev_iso_packet - Isochronous packet + * @control: Contains the header length (8 uppermost bits), + * the sy field (4 bits), the tag field (2 bits), a sync flag + * or a skip flag (1 bit), an interrupt flag (1 bit), and the + * payload length (16 lowermost bits) + * @header: Header and payload in case of a transmit context. + * + * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. + * Use the FW_CDEV_ISO_ macros to fill in @control. + * The @header array is empty in case of receive contexts. + * + * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT: + * + * @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of + * bytes in @header that will be prepended to the packet's payload. These bytes + * are copied into the kernel and will not be accessed after the ioctl has + * returned. + * + * The @control.SY and TAG fields are copied to the iso packet header. These + * fields are specified by IEEE 1394a and IEC 61883-1. + * + * The @control.SKIP flag specifies that no packet is to be sent in a frame. + * When using this, all other fields except @control.INTERRUPT must be zero. + * + * When a packet with the @control.INTERRUPT flag set has been completed, an + * &fw_cdev_event_iso_interrupt event will be sent. + * + * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE: + * + * @control.HEADER_LENGTH must be a multiple of the context's header_size. + * If the HEADER_LENGTH is larger than the context's header_size, multiple + * packets are queued for this entry. + * + * The @control.SY and TAG fields are ignored. + * + * If the @control.SYNC flag is set, the context drops all packets until a + * packet with a sy field is received which matches &fw_cdev_start_iso.sync. + * + * @control.PAYLOAD_LENGTH defines how many payload bytes can be received for + * one packet (in addition to payload quadlets that have been defined as headers + * and are stripped and returned in the &fw_cdev_event_iso_interrupt structure). + * If more bytes are received, the additional bytes are dropped. If less bytes + * are received, the remaining bytes in this part of the payload buffer will not + * be written to, not even by the next packet. I.e., packets received in + * consecutive frames will not necessarily be consecutive in memory. If an + * entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally + * among them. + * + * When a packet with the @control.INTERRUPT flag set has been completed, an + * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued + * multiple receive packets is completed when its last packet is completed. + * + * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + * + * Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since + * it specifies a chunk of the mmap()'ed buffer, while the number and alignment + * of packets to be placed into the buffer chunk is not known beforehand. + * + * @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room + * for header, payload, padding, and trailer bytes of one or more packets. + * It must be a multiple of 4. + * + * @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described + * for single-channel reception. + * + * When a buffer chunk with the @control.INTERRUPT flag set has been filled + * entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent. + */ +struct fw_cdev_iso_packet { + __u32 control; + __u32 header[0]; +}; + +/** + * struct fw_cdev_queue_iso - Queue isochronous packets for I/O + * @packets: Userspace pointer to an array of &fw_cdev_iso_packet + * @data: Pointer into mmap()'ed payload buffer + * @size: Size of the @packets array, in bytes + * @handle: Isochronous context handle + * + * Queue a number of isochronous packets for reception or transmission. + * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, + * which describe how to transmit from or receive into a contiguous region + * of a mmap()'ed payload buffer. As part of transmit packet descriptors, + * a series of headers can be supplied, which will be prepended to the + * payload during DMA. + * + * The kernel may or may not queue all packets, but will write back updated + * values of the @packets, @data and @size fields, so the ioctl can be + * resubmitted easily. + * + * In case of a multichannel receive context, @data must be quadlet-aligned + * relative to the buffer start. + */ +struct fw_cdev_queue_iso { + __u64 packets; + __u64 data; + __u32 size; + __u32 handle; +}; + +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1 +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2 +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4 +#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8 +#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15 + +/** + * struct fw_cdev_start_iso - Start an isochronous transmission or reception + * @cycle: Cycle in which to start I/O. If @cycle is greater than or + * equal to 0, the I/O will start on that cycle. + * @sync: Determines the value to wait for for receive packets that have + * the %FW_CDEV_ISO_SYNC bit set + * @tags: Tag filter bit mask. Only valid for isochronous reception. + * Determines the tag values for which packets will be accepted. + * Use FW_CDEV_ISO_CONTEXT_MATCH_ macros to set @tags. + * @handle: Isochronous context handle within which to transmit or receive + */ +struct fw_cdev_start_iso { + __s32 cycle; + __u32 sync; + __u32 tags; + __u32 handle; +}; + +/** + * struct fw_cdev_stop_iso - Stop an isochronous transmission or reception + * @handle: Handle of isochronous context to stop + */ +struct fw_cdev_stop_iso { + __u32 handle; +}; + +/** + * struct fw_cdev_flush_iso - flush completed iso packets + * @handle: handle of isochronous context to flush + * + * For %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, + * report any completed packets. + * + * For %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL contexts, report the current + * offset in the receive buffer, if it has changed; this is typically in the + * middle of some buffer chunk. + * + * Any %FW_CDEV_EVENT_ISO_INTERRUPT or %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL + * events generated by this ioctl are sent synchronously, i.e., are available + * for reading from the file descriptor when this ioctl returns. + */ +struct fw_cdev_flush_iso { + __u32 handle; +}; + +/** + * struct fw_cdev_get_cycle_timer - read cycle timer register + * @local_time: system time, in microseconds since the Epoch + * @cycle_timer: Cycle Time register contents + * + * Same as %FW_CDEV_IOC_GET_CYCLE_TIMER2, but fixed to use %CLOCK_REALTIME + * and only with microseconds resolution. + * + * In version 1 and 2 of the ABI, this ioctl returned unreliable (non- + * monotonic) @cycle_timer values on certain controllers. + */ +struct fw_cdev_get_cycle_timer { + __u64 local_time; + __u32 cycle_timer; +}; + +/** + * struct fw_cdev_get_cycle_timer2 - read cycle timer register + * @tv_sec: system time, seconds + * @tv_nsec: system time, sub-seconds part in nanoseconds + * @clk_id: input parameter, clock from which to get the system time + * @cycle_timer: Cycle Time register contents + * + * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 ioctl reads the isochronous cycle timer + * and also the system clock. This allows to correlate reception time of + * isochronous packets with system time. + * + * @clk_id lets you choose a clock like with POSIX' clock_gettime function. + * Supported @clk_id values are POSIX' %CLOCK_REALTIME and %CLOCK_MONOTONIC + * and Linux' %CLOCK_MONOTONIC_RAW. + * + * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and + * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register + * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394. + */ +struct fw_cdev_get_cycle_timer2 { + __s64 tv_sec; + __s32 tv_nsec; + __s32 clk_id; + __u32 cycle_timer; +}; + +/** + * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth + * @closure: Passed back to userspace in corresponding iso resource events + * @channels: Isochronous channels of which one is to be (de)allocated + * @bandwidth: Isochronous bandwidth units to be (de)allocated + * @handle: Handle to the allocation, written by the kernel (only valid in + * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls) + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an + * isochronous channel and/or of isochronous bandwidth at the isochronous + * resource manager (IRM). Only one of the channels specified in @channels is + * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after + * communication with the IRM, indicating success or failure in the event data. + * The kernel will automatically reallocate the resources after bus resets. + * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event + * will be sent. The kernel will also automatically deallocate the resources + * when the file descriptor is closed. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate + * deallocation of resources which were allocated as described above. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation + * without automatic re- or deallocation. + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation, + * indicating success or failure in its data. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like + * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed + * instead of allocated. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources + * for the lifetime of the fd or @handle. + * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources + * for the duration of a bus generation. + * + * @channels is a host-endian bitfield with the least significant bit + * representing channel 0 and the most significant bit representing channel 63: + * 1ULL << c for each channel c that is a candidate for (de)allocation. + * + * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send + * one quadlet of data (payload or header data) at speed S1600. + */ +struct fw_cdev_allocate_iso_resource { + __u64 closure; + __u64 channels; + __u32 bandwidth; + __u32 handle; +}; + +/** + * struct fw_cdev_send_stream_packet - send an asynchronous stream packet + * @length: Length of outgoing payload, in bytes + * @tag: Data format tag + * @channel: Isochronous channel to transmit to + * @sy: Synchronization code + * @closure: Passed back to userspace in the response event + * @data: Userspace pointer to payload + * @generation: The bus generation where packet is valid + * @speed: Speed to transmit at + * + * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet + * to every device which is listening to the specified channel. The kernel + * writes an &fw_cdev_event_response event which indicates success or failure of + * the transmission. + */ +struct fw_cdev_send_stream_packet { + __u32 length; + __u32 tag; + __u32 channel; + __u32 sy; + __u64 closure; + __u64 data; + __u32 generation; + __u32 speed; +}; + +/** + * struct fw_cdev_send_phy_packet - send a PHY packet + * @closure: Passed back to userspace in the PHY-packet-sent event + * @data: First and second quadlet of the PHY packet + * @generation: The bus generation where packet is valid + * + * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes + * on the same card as this device. After transmission, an + * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated. + * + * The payload @data[] shall be specified in host byte order. Usually, + * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets + * are an exception to this rule. + * + * The ioctl is only permitted on device files which represent a local node. + */ +struct fw_cdev_send_phy_packet { + __u64 closure; + __u32 data[2]; + __u32 generation; +}; + +/** + * struct fw_cdev_receive_phy_packets - start reception of PHY packets + * @closure: Passed back to userspace in phy packet events + * + * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to + * incoming PHY packets from any node on the same bus as the device. + * + * The ioctl is only permitted on device files which represent a local node. + */ +struct fw_cdev_receive_phy_packets { + __u64 closure; +}; + +#define FW_CDEV_VERSION 3 /* Meaningless legacy macro; don't use it. */ + +#endif /* _LINUX_FIREWIRE_CDEV_H */ diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h new file mode 100644 index 00000000..9b4bb5fb --- /dev/null +++ b/include/linux/firewire-constants.h @@ -0,0 +1,92 @@ +/* + * IEEE 1394 constants. + * + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _LINUX_FIREWIRE_CONSTANTS_H +#define _LINUX_FIREWIRE_CONSTANTS_H + +#define TCODE_WRITE_QUADLET_REQUEST 0x0 +#define TCODE_WRITE_BLOCK_REQUEST 0x1 +#define TCODE_WRITE_RESPONSE 0x2 +#define TCODE_READ_QUADLET_REQUEST 0x4 +#define TCODE_READ_BLOCK_REQUEST 0x5 +#define TCODE_READ_QUADLET_RESPONSE 0x6 +#define TCODE_READ_BLOCK_RESPONSE 0x7 +#define TCODE_CYCLE_START 0x8 +#define TCODE_LOCK_REQUEST 0x9 +#define TCODE_STREAM_DATA 0xa +#define TCODE_LOCK_RESPONSE 0xb + +#define EXTCODE_MASK_SWAP 0x1 +#define EXTCODE_COMPARE_SWAP 0x2 +#define EXTCODE_FETCH_ADD 0x3 +#define EXTCODE_LITTLE_ADD 0x4 +#define EXTCODE_BOUNDED_ADD 0x5 +#define EXTCODE_WRAP_ADD 0x6 +#define EXTCODE_VENDOR_DEPENDENT 0x7 + +/* Linux firewire-core (Juju) specific tcodes */ +#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) +#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) +#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) +#define TCODE_LOCK_LITTLE_ADD (0x10 | EXTCODE_LITTLE_ADD) +#define TCODE_LOCK_BOUNDED_ADD (0x10 | EXTCODE_BOUNDED_ADD) +#define TCODE_LOCK_WRAP_ADD (0x10 | EXTCODE_WRAP_ADD) +#define TCODE_LOCK_VENDOR_DEPENDENT (0x10 | EXTCODE_VENDOR_DEPENDENT) + +#define RCODE_COMPLETE 0x0 +#define RCODE_CONFLICT_ERROR 0x4 +#define RCODE_DATA_ERROR 0x5 +#define RCODE_TYPE_ERROR 0x6 +#define RCODE_ADDRESS_ERROR 0x7 + +/* Linux firewire-core (Juju) specific rcodes */ +#define RCODE_SEND_ERROR 0x10 +#define RCODE_CANCELLED 0x11 +#define RCODE_BUSY 0x12 +#define RCODE_GENERATION 0x13 +#define RCODE_NO_ACK 0x14 + +#define SCODE_100 0x0 +#define SCODE_200 0x1 +#define SCODE_400 0x2 +#define SCODE_800 0x3 +#define SCODE_1600 0x4 +#define SCODE_3200 0x5 +#define SCODE_BETA 0x3 + +#define ACK_COMPLETE 0x1 +#define ACK_PENDING 0x2 +#define ACK_BUSY_X 0x4 +#define ACK_BUSY_A 0x5 +#define ACK_BUSY_B 0x6 +#define ACK_DATA_ERROR 0xd +#define ACK_TYPE_ERROR 0xe + +#define RETRY_1 0x00 +#define RETRY_X 0x01 +#define RETRY_A 0x02 +#define RETRY_B 0x03 + +#endif /* _LINUX_FIREWIRE_CONSTANTS_H */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h new file mode 100644 index 00000000..cdc9b719 --- /dev/null +++ b/include/linux/firewire.h @@ -0,0 +1,440 @@ +#ifndef _LINUX_FIREWIRE_H +#define _LINUX_FIREWIRE_H + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/atomic.h> +#include <asm/byteorder.h> + +#define CSR_REGISTER_BASE 0xfffff0000000ULL + +/* register offsets are relative to CSR_REGISTER_BASE */ +#define CSR_STATE_CLEAR 0x0 +#define CSR_STATE_SET 0x4 +#define CSR_NODE_IDS 0x8 +#define CSR_RESET_START 0xc +#define CSR_SPLIT_TIMEOUT_HI 0x18 +#define CSR_SPLIT_TIMEOUT_LO 0x1c +#define CSR_CYCLE_TIME 0x200 +#define CSR_BUS_TIME 0x204 +#define CSR_BUSY_TIMEOUT 0x210 +#define CSR_PRIORITY_BUDGET 0x218 +#define CSR_BUS_MANAGER_ID 0x21c +#define CSR_BANDWIDTH_AVAILABLE 0x220 +#define CSR_CHANNELS_AVAILABLE 0x224 +#define CSR_CHANNELS_AVAILABLE_HI 0x224 +#define CSR_CHANNELS_AVAILABLE_LO 0x228 +#define CSR_MAINT_UTILITY 0x230 +#define CSR_BROADCAST_CHANNEL 0x234 +#define CSR_CONFIG_ROM 0x400 +#define CSR_CONFIG_ROM_END 0x800 +#define CSR_OMPR 0x900 +#define CSR_OPCR(i) (0x904 + (i) * 4) +#define CSR_IMPR 0x980 +#define CSR_IPCR(i) (0x984 + (i) * 4) +#define CSR_FCP_COMMAND 0xB00 +#define CSR_FCP_RESPONSE 0xD00 +#define CSR_FCP_END 0xF00 +#define CSR_TOPOLOGY_MAP 0x1000 +#define CSR_TOPOLOGY_MAP_END 0x1400 +#define CSR_SPEED_MAP 0x2000 +#define CSR_SPEED_MAP_END 0x3000 + +#define CSR_OFFSET 0x40 +#define CSR_LEAF 0x80 +#define CSR_DIRECTORY 0xc0 + +#define CSR_DESCRIPTOR 0x01 +#define CSR_VENDOR 0x03 +#define CSR_HARDWARE_VERSION 0x04 +#define CSR_UNIT 0x11 +#define CSR_SPECIFIER_ID 0x12 +#define CSR_VERSION 0x13 +#define CSR_DEPENDENT_INFO 0x14 +#define CSR_MODEL 0x17 +#define CSR_DIRECTORY_ID 0x20 + +struct fw_csr_iterator { + const u32 *p; + const u32 *end; +}; + +void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); +int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); +int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); + +extern struct bus_type fw_bus_type; + +struct fw_card_driver; +struct fw_node; + +struct fw_card { + const struct fw_card_driver *driver; + struct device *device; + struct kref kref; + struct completion done; + + int node_id; + int generation; + int current_tlabel; + u64 tlabel_mask; + struct list_head transaction_list; + u64 reset_jiffies; + + u32 split_timeout_hi; + u32 split_timeout_lo; + unsigned int split_timeout_cycles; + unsigned int split_timeout_jiffies; + + unsigned long long guid; + unsigned max_receive; + int link_speed; + int config_rom_generation; + + spinlock_t lock; /* Take this lock when handling the lists in + * this struct. */ + struct fw_node *local_node; + struct fw_node *root_node; + struct fw_node *irm_node; + u8 color; /* must be u8 to match the definition in struct fw_node */ + int gap_count; + bool beta_repeaters_present; + + int index; + struct list_head link; + + struct list_head phy_receiver_list; + + struct delayed_work br_work; /* bus reset job */ + bool br_short; + + struct delayed_work bm_work; /* bus manager job */ + int bm_retries; + int bm_generation; + int bm_node_id; + bool bm_abdicate; + + bool priority_budget_implemented; /* controller feature */ + bool broadcast_channel_auto_allocated; /* controller feature */ + + bool broadcast_channel_allocated; + u32 broadcast_channel; + __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; + + __be32 maint_utility_register; +}; + +struct fw_attribute_group { + struct attribute_group *groups[2]; + struct attribute_group group; + struct attribute *attrs[12]; +}; + +enum fw_device_state { + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING, + FW_DEVICE_GONE, + FW_DEVICE_SHUTDOWN, +}; + +/* + * Note, fw_device.generation always has to be read before fw_device.node_id. + * Use SMP memory barriers to ensure this. Otherwise requests will be sent + * to an outdated node_id if the generation was updated in the meantime due + * to a bus reset. + * + * Likewise, fw-core will take care to update .node_id before .generation so + * that whenever fw_device.generation is current WRT the actual bus generation, + * fw_device.node_id is guaranteed to be current too. + * + * The same applies to fw_device.card->node_id vs. fw_device.generation. + * + * fw_device.config_rom and fw_device.config_rom_length may be accessed during + * the lifetime of any fw_unit belonging to the fw_device, before device_del() + * was called on the last fw_unit. Alternatively, they may be accessed while + * holding fw_device_rwsem. + */ +struct fw_device { + atomic_t state; + struct fw_node *node; + int node_id; + int generation; + unsigned max_speed; + struct fw_card *card; + struct device device; + + struct mutex client_list_mutex; + struct list_head client_list; + + const u32 *config_rom; + size_t config_rom_length; + int config_rom_retries; + unsigned is_local:1; + unsigned max_rec:4; + unsigned cmc:1; + unsigned irmc:1; + unsigned bc_implemented:2; + + struct delayed_work work; + struct fw_attribute_group attribute_group; +}; + +static inline struct fw_device *fw_device(struct device *dev) +{ + return container_of(dev, struct fw_device, device); +} + +static inline int fw_device_is_shutdown(struct fw_device *device) +{ + return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; +} + +int fw_device_enable_phys_dma(struct fw_device *device); + +/* + * fw_unit.directory must not be accessed after device_del(&fw_unit.device). + */ +struct fw_unit { + struct device device; + const u32 *directory; + struct fw_attribute_group attribute_group; +}; + +static inline struct fw_unit *fw_unit(struct device *dev) +{ + return container_of(dev, struct fw_unit, device); +} + +static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) +{ + get_device(&unit->device); + + return unit; +} + +static inline void fw_unit_put(struct fw_unit *unit) +{ + put_device(&unit->device); +} + +static inline struct fw_device *fw_parent_device(struct fw_unit *unit) +{ + return fw_device(unit->device.parent); +} + +struct ieee1394_device_id; + +struct fw_driver { + struct device_driver driver; + /* Called when the parent device sits through a bus reset. */ + void (*update)(struct fw_unit *unit); + const struct ieee1394_device_id *id_table; +}; + +struct fw_packet; +struct fw_request; + +typedef void (*fw_packet_callback_t)(struct fw_packet *packet, + struct fw_card *card, int status); +typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, + void *data, size_t length, + void *callback_data); +/* + * Important note: Except for the FCP registers, the callback must guarantee + * that either fw_send_response() or kfree() is called on the @request. + */ +typedef void (*fw_address_callback_t)(struct fw_card *card, + struct fw_request *request, + int tcode, int destination, int source, + int generation, + unsigned long long offset, + void *data, size_t length, + void *callback_data); + +struct fw_packet { + int speed; + int generation; + u32 header[4]; + size_t header_length; + void *payload; + size_t payload_length; + dma_addr_t payload_bus; + bool payload_mapped; + u32 timestamp; + + /* + * This callback is called when the packet transmission has completed. + * For successful transmission, the status code is the ack received + * from the destination. Otherwise it is one of the juju-specific + * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. + * The callback can be called from tasklet context and thus + * must never block. + */ + fw_packet_callback_t callback; + int ack; + struct list_head link; + void *driver_data; +}; + +struct fw_transaction { + int node_id; /* The generation is implied; it is always the current. */ + int tlabel; + struct list_head link; + struct fw_card *card; + bool is_split_transaction; + struct timer_list split_timeout_timer; + + struct fw_packet packet; + + /* + * The data passed to the callback is valid only during the + * callback. + */ + fw_transaction_callback_t callback; + void *callback_data; +}; + +struct fw_address_handler { + u64 offset; + size_t length; + fw_address_callback_t address_callback; + void *callback_data; + struct list_head link; +}; + +struct fw_address_region { + u64 start; + u64 end; +}; + +extern const struct fw_address_region fw_high_memory_region; + +int fw_core_add_address_handler(struct fw_address_handler *handler, + const struct fw_address_region *region); +void fw_core_remove_address_handler(struct fw_address_handler *handler); +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode); +void fw_send_request(struct fw_card *card, struct fw_transaction *t, + int tcode, int destination_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data); +int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction); +int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length); + +static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) +{ + return tag << 14 | channel << 8 | sy; +} + +struct fw_descriptor { + struct list_head link; + size_t length; + u32 immediate; + u32 key; + const u32 *data; +}; + +int fw_core_add_descriptor(struct fw_descriptor *desc); +void fw_core_remove_descriptor(struct fw_descriptor *desc); + +/* + * The iso packet format allows for an immediate header/payload part + * stored in 'header' immediately after the packet info plus an + * indirect payload part that is pointer to by the 'payload' field. + * Applications can use one or the other or both to implement simple + * low-bandwidth streaming (e.g. audio) or more advanced + * scatter-gather streaming (e.g. assembling video frame automatically). + */ +struct fw_iso_packet { + u16 payload_length; /* Length of indirect payload */ + u32 interrupt:1; /* Generate interrupt on this packet */ + u32 skip:1; /* tx: Set to not send packet at all */ + /* rx: Sync bit, wait for matching sy */ + u32 tag:2; /* tx: Tag in packet header */ + u32 sy:4; /* tx: Sy in packet header */ + u32 header_length:8; /* Length of immediate header */ + u32 header[0]; /* tx: Top of 1394 isoch. data_block */ +}; + +#define FW_ISO_CONTEXT_TRANSMIT 0 +#define FW_ISO_CONTEXT_RECEIVE 1 +#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 + +#define FW_ISO_CONTEXT_MATCH_TAG0 1 +#define FW_ISO_CONTEXT_MATCH_TAG1 2 +#define FW_ISO_CONTEXT_MATCH_TAG2 4 +#define FW_ISO_CONTEXT_MATCH_TAG3 8 +#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 + +/* + * An iso buffer is just a set of pages mapped for DMA in the + * specified direction. Since the pages are to be used for DMA, they + * are not mapped into the kernel virtual address space. We store the + * DMA address in the page private. The helper function + * fw_iso_buffer_map() will map the pages into a given vma. + */ +struct fw_iso_buffer { + enum dma_data_direction direction; + struct page **pages; + int page_count; +}; + +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction); +void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); +size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); + +struct fw_iso_context; +typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, + u32 cycle, size_t header_length, + void *header, void *data); +typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, + dma_addr_t completed, void *data); +struct fw_iso_context { + struct fw_card *card; + int type; + int channel; + int speed; + size_t header_size; + union { + fw_iso_callback_t sc; + fw_iso_mc_callback_t mc; + } callback; + void *callback_data; +}; + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data); +int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload); +void fw_iso_context_queue_flush(struct fw_iso_context *ctx); +int fw_iso_context_flush_completions(struct fw_iso_context *ctx); +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags); +int fw_iso_context_stop(struct fw_iso_context *ctx); +void fw_iso_context_destroy(struct fw_iso_context *ctx); +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, + bool allocate); + +extern struct workqueue_struct *fw_workqueue; + +#endif /* _LINUX_FIREWIRE_H */ diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h new file mode 100644 index 00000000..43fe52fc --- /dev/null +++ b/include/linux/firmware-map.h @@ -0,0 +1,43 @@ +/* + * include/linux/firmware-map.h: + * Copyright (C) 2008 SUSE LINUX Products GmbH + * by Bernhard Walle <bernhard.walle@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_FIRMWARE_MAP_H +#define _LINUX_FIRMWARE_MAP_H + +#include <linux/list.h> + +/* + * provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled + */ +#ifdef CONFIG_FIRMWARE_MEMMAP + +int firmware_map_add_early(u64 start, u64 end, const char *type); +int firmware_map_add_hotplug(u64 start, u64 end, const char *type); + +#else /* CONFIG_FIRMWARE_MEMMAP */ + +static inline int firmware_map_add_early(u64 start, u64 end, const char *type) +{ + return 0; +} + +static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type) +{ + return 0; +} + +#endif /* CONFIG_FIRMWARE_MEMMAP */ + +#endif /* _LINUX_FIRMWARE_MAP_H */ diff --git a/include/linux/firmware.h b/include/linux/firmware.h new file mode 100644 index 00000000..1e7c0118 --- /dev/null +++ b/include/linux/firmware.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_FIRMWARE_H +#define _LINUX_FIRMWARE_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/gfp.h> + +#define FW_ACTION_NOHOTPLUG 0 +#define FW_ACTION_HOTPLUG 1 + +struct firmware { + size_t size; + const u8 *data; + struct page **pages; +}; + +struct module; +struct device; + +struct builtin_fw { + char *name; + void *data; + unsigned long size; +}; + +/* We have to play tricks here much like stringify() to get the + __COUNTER__ macro to be expanded as we want it */ +#define __fw_concat1(x, y) x##y +#define __fw_concat(x, y) __fw_concat1(x, y) + +#define DECLARE_BUILTIN_FIRMWARE(name, blob) \ + DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) + +#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ + static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ + __used __section(.builtin_fw) = { name, blob, size } + +#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) +int request_firmware(const struct firmware **fw, const char *name, + struct device *device); +int request_firmware_nowait( + struct module *module, bool uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)); + +void release_firmware(const struct firmware *fw); +#else +static inline int request_firmware(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} +static inline int request_firmware_nowait( + struct module *module, bool uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)) +{ + return -EINVAL; +} + +static inline void release_firmware(const struct firmware *fw) +{ +} +#endif + +#endif diff --git a/include/linux/flat.h b/include/linux/flat.h new file mode 100644 index 00000000..ec56852e --- /dev/null +++ b/include/linux/flat.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com> + * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com> + * The Silver Hammer Group, Ltd. + * + * This file provides the definitions and structures needed to + * support uClinux flat-format executables. + */ + +#ifndef _LINUX_FLAT_H +#define _LINUX_FLAT_H + +#ifdef __KERNEL__ +#include <asm/flat.h> +#endif + +#define FLAT_VERSION 0x00000004L + +#ifdef CONFIG_BINFMT_SHARED_FLAT +#define MAX_SHARED_LIBS (4) +#else +#define MAX_SHARED_LIBS (1) +#endif + +/* + * To make everything easier to port and manage cross platform + * development, all fields are in network byte order. + */ + +struct flat_hdr { + char magic[4]; + unsigned long rev; /* version (as above) */ + unsigned long entry; /* Offset of first executable instruction + with text segment from beginning of file */ + unsigned long data_start; /* Offset of data segment from beginning of + file */ + unsigned long data_end; /* Offset of end of data segment + from beginning of file */ + unsigned long bss_end; /* Offset of end of bss segment from beginning + of file */ + + /* (It is assumed that data_end through bss_end forms the bss segment.) */ + + unsigned long stack_size; /* Size of stack, in bytes */ + unsigned long reloc_start; /* Offset of relocation records from + beginning of file */ + unsigned long reloc_count; /* Number of relocation records */ + unsigned long flags; + unsigned long build_date; /* When the program/library was built */ + unsigned long filler[5]; /* Reservered, set to zero */ +}; + +#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */ +#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */ +#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */ +#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */ +#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */ + + +#ifdef __KERNEL__ /* so systems without linux headers can compile the apps */ +/* + * While it would be nice to keep this header clean, users of older + * tools still need this support in the kernel. So this section is + * purely for compatibility with old tool chains. + * + * DO NOT make changes or enhancements to the old format please, just work + * with the format above, except to fix bugs with old format support. + */ + +#include <asm/byteorder.h> + +#define OLD_FLAT_VERSION 0x00000002L +#define OLD_FLAT_RELOC_TYPE_TEXT 0 +#define OLD_FLAT_RELOC_TYPE_DATA 1 +#define OLD_FLAT_RELOC_TYPE_BSS 2 + +typedef union { + unsigned long value; + struct { +# if defined(mc68000) && !defined(CONFIG_COLDFIRE) + signed long offset : 30; + unsigned long type : 2; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# elif defined(__BIG_ENDIAN_BITFIELD) + unsigned long type : 2; + signed long offset : 30; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# elif defined(__LITTLE_ENDIAN_BITFIELD) + signed long offset : 30; + unsigned long type : 2; +# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +# else +# error "Unknown bitfield order for flat files." +# endif + } reloc; +} flat_v2_reloc_t; + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_FLAT_H */ diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h new file mode 100644 index 00000000..6843cf19 --- /dev/null +++ b/include/linux/flex_array.h @@ -0,0 +1,80 @@ +#ifndef _FLEX_ARRAY_H +#define _FLEX_ARRAY_H + +#include <linux/types.h> +#include <asm/page.h> + +#define FLEX_ARRAY_PART_SIZE PAGE_SIZE +#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE + +struct flex_array_part; + +/* + * This is meant to replace cases where an array-like + * structure has gotten too big to fit into kmalloc() + * and the developer is getting tempted to use + * vmalloc(). + */ + +struct flex_array { + union { + struct { + int element_size; + int total_nr_elements; + int elems_per_part; + u32 reciprocal_elems; + struct flex_array_part *parts[]; + }; + /* + * This little trick makes sure that + * sizeof(flex_array) == PAGE_SIZE + */ + char padding[FLEX_ARRAY_BASE_SIZE]; + }; +}; + +/* Number of bytes left in base struct flex_array, excluding metadata */ +#define FLEX_ARRAY_BASE_BYTES_LEFT \ + (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) + +/* Number of pointers in base to struct flex_array_part pages */ +#define FLEX_ARRAY_NR_BASE_PTRS \ + (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) + +/* Number of elements of size that fit in struct flex_array_part */ +#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ + (FLEX_ARRAY_PART_SIZE / size) + +/* + * Defines a statically allocated flex array and ensures its parameters are + * valid. + */ +#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ + struct flex_array __arrayname = { { { \ + .element_size = (__element_size), \ + .total_nr_elements = (__total), \ + } } }; \ + static inline void __arrayname##_invalid_parameter(void) \ + { \ + BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ + FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ + } + +struct flex_array *flex_array_alloc(int element_size, unsigned int total, + gfp_t flags); +int flex_array_prealloc(struct flex_array *fa, unsigned int start, + unsigned int nr_elements, gfp_t flags); +void flex_array_free(struct flex_array *fa); +void flex_array_free_parts(struct flex_array *fa); +int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, + gfp_t flags); +int flex_array_clear(struct flex_array *fa, unsigned int element_nr); +void *flex_array_get(struct flex_array *fa, unsigned int element_nr); +int flex_array_shrink(struct flex_array *fa); + +#define flex_array_put_ptr(fa, nr, src, gfp) \ + flex_array_put(fa, nr, (void *)&(src), gfp) + +void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); + +#endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/font.h b/include/linux/font.h new file mode 100644 index 00000000..40a24ab4 --- /dev/null +++ b/include/linux/font.h @@ -0,0 +1,58 @@ +/* + * font.h -- `Soft' font definitions + * + * Created 1995 by Geert Uytterhoeven + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _VIDEO_FONT_H +#define _VIDEO_FONT_H + +#include <linux/types.h> + +struct font_desc { + int idx; + const char *name; + int width, height; + const void *data; + int pref; +}; + +#define VGA8x8_IDX 0 +#define VGA8x16_IDX 1 +#define PEARL8x8_IDX 2 +#define VGA6x11_IDX 3 +#define FONT7x14_IDX 4 +#define FONT10x18_IDX 5 +#define SUN8x16_IDX 6 +#define SUN12x22_IDX 7 +#define ACORN8x8_IDX 8 +#define MINI4x6_IDX 9 + +extern const struct font_desc font_vga_8x8, + font_vga_8x16, + font_pearl_8x8, + font_vga_6x11, + font_7x14, + font_10x18, + font_sun_8x16, + font_sun_12x22, + font_acorn_8x8, + font_mini_4x6; + +/* Find a font with a specific name */ + +extern const struct font_desc *find_font(const char *name); + +/* Get the default font for a specific screen size */ + +extern const struct font_desc *get_default_font(int xres, int yres, + u32 font_w, u32 font_h); + +/* Max. length for the name of a predefined font */ +#define MAX_FONT_NAME 32 + +#endif /* _VIDEO_FONT_H */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h new file mode 100644 index 00000000..a628084e --- /dev/null +++ b/include/linux/freezer.h @@ -0,0 +1,214 @@ +/* Freezer declarations */ + +#ifndef FREEZER_H_INCLUDED +#define FREEZER_H_INCLUDED + +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/atomic.h> + +#ifdef CONFIG_FREEZER +extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ +extern bool pm_freezing; /* PM freezing in effect */ +extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ + +/* + * Check if a process has been frozen + */ +static inline bool frozen(struct task_struct *p) +{ + return p->flags & PF_FROZEN; +} + +extern bool freezing_slow_path(struct task_struct *p); + +/* + * Check if there is a request to freeze a process + */ +static inline bool freezing(struct task_struct *p) +{ + if (likely(!atomic_read(&system_freezing_cnt))) + return false; + return freezing_slow_path(p); +} + +/* Takes and releases task alloc lock using task_lock() */ +extern void __thaw_task(struct task_struct *t); + +extern bool __refrigerator(bool check_kthr_stop); +extern int freeze_processes(void); +extern int freeze_kernel_threads(void); +extern void thaw_processes(void); +extern void thaw_kernel_threads(void); + +/* + * HACK: prevent sleeping while atomic warnings due to ARM signal handling + * disabling irqs + */ +static inline bool try_to_freeze_nowarn(void) +{ + if (likely(!freezing(current))) + return false; + return __refrigerator(false); +} + +static inline bool try_to_freeze(void) +{ + might_sleep(); + if (likely(!freezing(current))) + return false; + return __refrigerator(false); +} + +extern bool freeze_task(struct task_struct *p); +extern bool set_freezable(void); + +#ifdef CONFIG_CGROUP_FREEZER +extern bool cgroup_freezing(struct task_struct *task); +#else /* !CONFIG_CGROUP_FREEZER */ +static inline bool cgroup_freezing(struct task_struct *task) +{ + return false; +} +#endif /* !CONFIG_CGROUP_FREEZER */ + +/* + * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it + * calls wait_for_completion(&vfork) and reset right after it returns from this + * function. Next, the parent should call try_to_freeze() to freeze itself + * appropriately in case the child has exited before the freezing of tasks is + * complete. However, we don't want kernel threads to be frozen in unexpected + * places, so we allow them to block freeze_processes() instead or to set + * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the + * parent won't really block freeze_processes(), since ____call_usermodehelper() + * (the child) does a little before exec/exit and it can't be frozen before + * waking up the parent. + */ + + +/* Tell the freezer not to count the current task as freezable. */ +static inline void freezer_do_not_count(void) +{ + current->flags |= PF_FREEZER_SKIP; +} + +/* + * Tell the freezer to count the current task as freezable again and try to + * freeze it. + */ +static inline void freezer_count(void) +{ + current->flags &= ~PF_FREEZER_SKIP; + try_to_freeze(); +} + +/* + * Check if the task should be counted as freezable by the freezer + */ +static inline int freezer_should_skip(struct task_struct *p) +{ + return !!(p->flags & PF_FREEZER_SKIP); +} + +/* + * These macros are intended to be used whenever you want allow a task that's + * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note + * that neither return any clear indication of whether a freeze event happened + * while in this function. + */ + +/* Like schedule(), but should not block the freezer. */ +#define freezable_schedule() \ +({ \ + freezer_do_not_count(); \ + schedule(); \ + freezer_count(); \ +}) + +/* Like schedule_timeout_killable(), but should not block the freezer. */ +#define freezable_schedule_timeout_killable(timeout) \ +({ \ + long __retval; \ + freezer_do_not_count(); \ + __retval = schedule_timeout_killable(timeout); \ + freezer_count(); \ + __retval; \ +}) + +/* + * Freezer-friendly wrappers around wait_event_interruptible(), + * wait_event_killable() and wait_event_interruptible_timeout(), originally + * defined in <linux/wait.h> + */ + +#define wait_event_freezekillable(wq, condition) \ +({ \ + int __retval; \ + freezer_do_not_count(); \ + __retval = wait_event_killable(wq, (condition)); \ + freezer_count(); \ + __retval; \ +}) + +#define wait_event_freezable(wq, condition) \ +({ \ + int __retval; \ + for (;;) { \ + __retval = wait_event_interruptible(wq, \ + (condition) || freezing(current)); \ + if (__retval || (condition)) \ + break; \ + try_to_freeze(); \ + } \ + __retval; \ +}) + +#define wait_event_freezable_timeout(wq, condition, timeout) \ +({ \ + long __retval = timeout; \ + for (;;) { \ + __retval = wait_event_interruptible_timeout(wq, \ + (condition) || freezing(current), \ + __retval); \ + if (__retval <= 0 || (condition)) \ + break; \ + try_to_freeze(); \ + } \ + __retval; \ +}) + +#else /* !CONFIG_FREEZER */ +static inline bool frozen(struct task_struct *p) { return false; } +static inline bool freezing(struct task_struct *p) { return false; } +static inline void __thaw_task(struct task_struct *t) {} + +static inline bool __refrigerator(bool check_kthr_stop) { return false; } +static inline int freeze_processes(void) { return -ENOSYS; } +static inline int freeze_kernel_threads(void) { return -ENOSYS; } +static inline void thaw_processes(void) {} +static inline void thaw_kernel_threads(void) {} + +static inline bool try_to_freeze(void) { return false; } + +static inline void freezer_do_not_count(void) {} +static inline void freezer_count(void) {} +static inline int freezer_should_skip(struct task_struct *p) { return 0; } +static inline void set_freezable(void) {} + +#define freezable_schedule() schedule() + +#define freezable_schedule_timeout_killable(timeout) \ + schedule_timeout_killable(timeout) + +#define wait_event_freezable(wq, condition) \ + wait_event_interruptible(wq, condition) + +#define wait_event_freezable_timeout(wq, condition, timeout) \ + wait_event_interruptible_timeout(wq, condition, timeout) + +#define wait_event_freezekillable(wq, condition) \ + wait_event_killable(wq, condition) + +#endif /* !CONFIG_FREEZER */ + +#endif /* FREEZER_H_INCLUDED */ diff --git a/include/linux/fs.h b/include/linux/fs.h new file mode 100644 index 00000000..25c40b9f --- /dev/null +++ b/include/linux/fs.h @@ -0,0 +1,2676 @@ +#ifndef _LINUX_FS_H +#define _LINUX_FS_H + +/* + * This file has definitions for some important file table + * structures etc. + */ + +#include <linux/limits.h> +#include <linux/ioctl.h> +#include <linux/blk_types.h> +#include <linux/types.h> + +/* + * It's silly to have NR_OPEN bigger than NR_FILE, but you can change + * the file limit at runtime and only root can increase the per-process + * nr_file rlimit, so it's safe to set up a ridiculously high absolute + * upper limit on files-per-process. + * + * Some programs (notably those using select()) may have to be + * recompiled to take full advantage of the new limits.. + */ + +/* Fixed constants first: */ +#undef NR_OPEN +#define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */ +#define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */ + +#define BLOCK_SIZE_BITS 10 +#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) + +#define SEEK_SET 0 /* seek relative to beginning of file */ +#define SEEK_CUR 1 /* seek relative to current file position */ +#define SEEK_END 2 /* seek relative to end of file */ +#define SEEK_DATA 3 /* seek to the next data */ +#define SEEK_HOLE 4 /* seek to the next hole */ +#define SEEK_MAX SEEK_HOLE + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +/* And dynamically-tunable limits and defaults: */ +struct files_stat_struct { + unsigned long nr_files; /* read only */ + unsigned long nr_free_files; /* read only */ + unsigned long max_files; /* tunable */ +}; + +struct inodes_stat_t { + int nr_inodes; + int nr_unused; + int dummy[5]; /* padding for sysctl ABI compatibility */ +}; + + +#define NR_FILE 8192 /* this can well be larger on a larger system */ + +#define MAY_EXEC 0x00000001 +#define MAY_WRITE 0x00000002 +#define MAY_READ 0x00000004 +#define MAY_APPEND 0x00000008 +#define MAY_ACCESS 0x00000010 +#define MAY_OPEN 0x00000020 +#define MAY_CHDIR 0x00000040 +/* called from RCU mode, don't block */ +#define MAY_NOT_BLOCK 0x00000080 + +/* + * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond + * to O_WRONLY and O_RDWR via the strange trick in __dentry_open() + */ + +/* file is open for reading */ +#define FMODE_READ ((__force fmode_t)0x1) +/* file is open for writing */ +#define FMODE_WRITE ((__force fmode_t)0x2) +/* file is seekable */ +#define FMODE_LSEEK ((__force fmode_t)0x4) +/* file can be accessed using pread */ +#define FMODE_PREAD ((__force fmode_t)0x8) +/* file can be accessed using pwrite */ +#define FMODE_PWRITE ((__force fmode_t)0x10) +/* File is opened for execution with sys_execve / sys_uselib */ +#define FMODE_EXEC ((__force fmode_t)0x20) +/* File is opened with O_NDELAY (only set for block devices) */ +#define FMODE_NDELAY ((__force fmode_t)0x40) +/* File is opened with O_EXCL (only set for block devices) */ +#define FMODE_EXCL ((__force fmode_t)0x80) +/* File is opened using open(.., 3, ..) and is writeable only for ioctls + (specialy hack for floppy.c) */ +#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) +/* 32bit hashes as llseek() offset (for directories) */ +#define FMODE_32BITHASH ((__force fmode_t)0x200) +/* 64bit hashes as llseek() offset (for directories) */ +#define FMODE_64BITHASH ((__force fmode_t)0x400) + +/* + * Don't update ctime and mtime. + * + * Currently a special hack for the XFS open_by_handle ioctl, but we'll + * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. + */ +#define FMODE_NOCMTIME ((__force fmode_t)0x800) + +/* Expect random access pattern */ +#define FMODE_RANDOM ((__force fmode_t)0x1000) + +/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ +#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) + +/* File is opened with O_PATH; almost nothing can be done with it */ +#define FMODE_PATH ((__force fmode_t)0x4000) + +/* File was opened by fanotify and shouldn't generate fanotify events */ +#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) + +/* + * The below are the various read and write types that we support. Some of + * them include behavioral modifiers that send information down to the + * block layer and IO scheduler. Terminology: + * + * The block layer uses device plugging to defer IO a little bit, in + * the hope that we will see more IO very shortly. This increases + * coalescing of adjacent IO and thus reduces the number of IOs we + * have to send to the device. It also allows for better queuing, + * if the IO isn't mergeable. If the caller is going to be waiting + * for the IO, then he must ensure that the device is unplugged so + * that the IO is dispatched to the driver. + * + * All IO is handled async in Linux. This is fine for background + * writes, but for reads or writes that someone waits for completion + * on, we want to notify the block layer and IO scheduler so that they + * know about it. That allows them to make better scheduling + * decisions. So when the below references 'sync' and 'async', it + * is referencing this priority hint. + * + * With that in mind, the available types are: + * + * READ A normal read operation. Device will be plugged. + * READ_SYNC A synchronous read. Device is not plugged, caller can + * immediately wait on this read without caring about + * unplugging. + * READA Used for read-ahead operations. Lower priority, and the + * block layer could (in theory) choose to ignore this + * request if it runs into resource problems. + * WRITE A normal async write. Device will be plugged. + * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down + * the hint that someone will be waiting on this IO + * shortly. The write equivalent of READ_SYNC. + * WRITE_ODIRECT Special case write for O_DIRECT only. + * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. + * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on + * non-volatile media on completion. + * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded + * by a cache flush and data is guaranteed to be on + * non-volatile media on completion. + * + */ +#define RW_MASK REQ_WRITE +#define RWA_MASK REQ_RAHEAD + +#define READ 0 +#define WRITE RW_MASK +#define READA RWA_MASK + +#define READ_SYNC (READ | REQ_SYNC) +#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) +#define WRITE_ODIRECT (WRITE | REQ_SYNC) +#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) +#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) +#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) + +#define SEL_IN 1 +#define SEL_OUT 2 +#define SEL_EX 4 + +/* public flags for file_system_type */ +#define FS_REQUIRES_DEV 1 +#define FS_BINARY_MOUNTDATA 2 +#define FS_HAS_SUBTYPE 4 +#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ +#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() + * during rename() internally. + */ + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +/* Inode flags - they have nothing to superblock flags now */ + +#define S_SYNC 1 /* Writes are synced at once */ +#define S_NOATIME 2 /* Do not update access times */ +#define S_APPEND 4 /* Append-only file */ +#define S_IMMUTABLE 8 /* Immutable file */ +#define S_DEAD 16 /* removed, but still open directory */ +#define S_NOQUOTA 32 /* Inode is not counted to quota */ +#define S_DIRSYNC 64 /* Directory modifications are synchronous */ +#define S_NOCMTIME 128 /* Do not update file c/mtime */ +#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ +#define S_PRIVATE 512 /* Inode is fs-internal */ +#define S_IMA 1024 /* Inode has an associated IMA struct */ +#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ +#define S_NOSEC 4096 /* no suid or xattr security attributes */ + +/* + * Note that nosuid etc flags are inode-specific: setting some file-system + * flags just means all the inodes inherit those flags by default. It might be + * possible to override it selectively if you really wanted to with some + * ioctl() that is not currently implemented. + * + * Exception: MS_RDONLY is always applied to the entire file system. + * + * Unfortunately, it is possible to change a filesystems flags with it mounted + * with files in use. This means that all of the inodes will not have their + * i_flags updated. Hence, i_flags no longer inherit the superblock mount + * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org + */ +#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg)) + +#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY) +#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \ + ((inode)->i_flags & S_SYNC)) +#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \ + ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) +#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) +#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) +#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) + +#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) +#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) +#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) +#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) + +#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) +#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) +#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) +#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) +#define IS_IMA(inode) ((inode)->i_flags & S_IMA) +#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) +#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) + +/* the read-only stuff doesn't really belong here, but any other place is + probably as bad and I don't want to create yet another include file. */ + +#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ +#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ +#define BLKRRPART _IO(0x12,95) /* re-read partition table */ +#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */ +#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ +#define BLKRASET _IO(0x12,98) /* set read ahead for block device */ +#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ +#define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */ +#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */ +#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ +#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */ +#define BLKSSZGET _IO(0x12,104)/* get block device sector size */ +#if 0 +#define BLKPG _IO(0x12,105)/* See blkpg.h */ + +/* Some people are morons. Do not use sizeof! */ + +#define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */ +#define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */ +/* This was here just to show that the number is taken - + probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */ +#endif +/* A jump here: 108-111 have been used for various private purposes. */ +#define BLKBSZGET _IOR(0x12,112,size_t) +#define BLKBSZSET _IOW(0x12,113,size_t) +#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ +#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) +#define BLKTRACESTART _IO(0x12,116) +#define BLKTRACESTOP _IO(0x12,117) +#define BLKTRACETEARDOWN _IO(0x12,118) +#define BLKDISCARD _IO(0x12,119) +#define BLKIOMIN _IO(0x12,120) +#define BLKIOOPT _IO(0x12,121) +#define BLKALIGNOFF _IO(0x12,122) +#define BLKPBSZGET _IO(0x12,123) +#define BLKDISCARDZEROES _IO(0x12,124) +#define BLKSECDISCARD _IO(0x12,125) +#define BLKROTATIONAL _IO(0x12,126) + +#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ +#define FIBMAP _IO(0x00,1) /* bmap access */ +#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ +#define FIFREEZE _IOWR('X', 119, int) /* Freeze */ +#define FITHAW _IOWR('X', 120, int) /* Thaw */ +#define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */ + +#define FS_IOC_GETFLAGS _IOR('f', 1, long) +#define FS_IOC_SETFLAGS _IOW('f', 2, long) +#define FS_IOC_GETVERSION _IOR('v', 1, long) +#define FS_IOC_SETVERSION _IOW('v', 2, long) +#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) +#define FS_IOC32_GETFLAGS _IOR('f', 1, int) +#define FS_IOC32_SETFLAGS _IOW('f', 2, int) +#define FS_IOC32_GETVERSION _IOR('v', 1, int) +#define FS_IOC32_SETVERSION _IOW('v', 2, int) + +/* + * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) + */ +#define FS_SECRM_FL 0x00000001 /* Secure deletion */ +#define FS_UNRM_FL 0x00000002 /* Undelete */ +#define FS_COMPR_FL 0x00000004 /* Compress file */ +#define FS_SYNC_FL 0x00000008 /* Synchronous updates */ +#define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ +#define FS_APPEND_FL 0x00000020 /* writes to file may only append */ +#define FS_NODUMP_FL 0x00000040 /* do not dump file */ +#define FS_NOATIME_FL 0x00000080 /* do not update atime */ +/* Reserved for compression usage... */ +#define FS_DIRTY_FL 0x00000100 +#define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ +#define FS_NOCOMP_FL 0x00000400 /* Don't compress */ +#define FS_ECOMPR_FL 0x00000800 /* Compression error */ +/* End compression flags --- maybe not all used */ +#define FS_BTREE_FL 0x00001000 /* btree format dir */ +#define FS_INDEX_FL 0x00001000 /* hash-indexed directory */ +#define FS_IMAGIC_FL 0x00002000 /* AFS directory */ +#define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */ +#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ +#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ +#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define FS_EXTENT_FL 0x00080000 /* Extents */ +#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ +#define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ + +#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ +#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ + + +#define SYNC_FILE_RANGE_WAIT_BEFORE 1 +#define SYNC_FILE_RANGE_WRITE 2 +#define SYNC_FILE_RANGE_WAIT_AFTER 4 + +#ifdef __KERNEL__ + +#include <linux/linkage.h> +#include <linux/wait.h> +#include <linux/kdev_t.h> +#include <linux/dcache.h> +#include <linux/path.h> +#include <linux/stat.h> +#include <linux/cache.h> +#include <linux/list.h> +#include <linux/radix-tree.h> +#include <linux/prio_tree.h> +#include <linux/init.h> +#include <linux/pid.h> +#include <linux/bug.h> +#include <linux/mutex.h> +#include <linux/capability.h> +#include <linux/semaphore.h> +#include <linux/fiemap.h> +#include <linux/rculist_bl.h> +#include <linux/atomic.h> +#include <linux/shrinker.h> +#include <linux/migrate_mode.h> + +#include <asm/byteorder.h> + +struct export_operations; +struct hd_geometry; +struct iovec; +struct nameidata; +struct kiocb; +struct kobject; +struct pipe_inode_info; +struct poll_table_struct; +struct kstatfs; +struct vm_area_struct; +struct vfsmount; +struct cred; + +extern void __init inode_init(void); +extern void __init inode_init_early(void); +extern void __init files_init(unsigned long); + +extern struct files_stat_struct files_stat; +extern unsigned long get_max_files(void); +extern int sysctl_nr_open; +extern struct inodes_stat_t inodes_stat; +extern int leases_enable, lease_break_time; + +struct buffer_head; +typedef int (get_block_t)(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create); +typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, + ssize_t bytes, void *private, int ret, + bool is_async); + +/* + * Attribute flags. These should be or-ed together to figure out what + * has been changed! + */ +#define ATTR_MODE (1 << 0) +#define ATTR_UID (1 << 1) +#define ATTR_GID (1 << 2) +#define ATTR_SIZE (1 << 3) +#define ATTR_ATIME (1 << 4) +#define ATTR_MTIME (1 << 5) +#define ATTR_CTIME (1 << 6) +#define ATTR_ATIME_SET (1 << 7) +#define ATTR_MTIME_SET (1 << 8) +#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ +#define ATTR_ATTR_FLAG (1 << 10) +#define ATTR_KILL_SUID (1 << 11) +#define ATTR_KILL_SGID (1 << 12) +#define ATTR_FILE (1 << 13) +#define ATTR_KILL_PRIV (1 << 14) +#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ +#define ATTR_TIMES_SET (1 << 16) + +/* + * This is the Inode Attributes structure, used for notify_change(). It + * uses the above definitions as flags, to know which values have changed. + * Also, in this manner, a Filesystem can look at only the values it cares + * about. Basically, these are the attributes that the VFS layer can + * request to change from the FS layer. + * + * Derek Atkins <warlord@MIT.EDU> 94-10-20 + */ +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + uid_t ia_uid; + gid_t ia_gid; + loff_t ia_size; + struct timespec ia_atime; + struct timespec ia_mtime; + struct timespec ia_ctime; + + /* + * Not an attribute, but an auxiliary info for filesystems wanting to + * implement an ftruncate() like method. NOTE: filesystem should + * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). + */ + struct file *ia_file; +}; + +/* + * Includes for diskquotas. + */ +#include <linux/quota.h> + +/** + * enum positive_aop_returns - aop return codes with specific semantics + * + * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has + * completed, that the page is still locked, and + * should be considered active. The VM uses this hint + * to return the page to the active list -- it won't + * be a candidate for writeback again in the near + * future. Other callers must be careful to unlock + * the page if they get this return. Returned by + * writepage(); + * + * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has + * unlocked it and the page might have been truncated. + * The caller should back up to acquiring a new page and + * trying again. The aop will be taking reasonable + * precautions not to livelock. If the caller held a page + * reference, it should drop it before retrying. Returned + * by readpage(). + * + * address_space_operation functions return these large constants to indicate + * special semantics to the caller. These are much larger than the bytes in a + * page to allow for functions that return the number of bytes operated on in a + * given page. + */ + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 0x80000, + AOP_TRUNCATED_PAGE = 0x80001, +}; + +#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ +#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct + * helper code (eg buffer layer) + * to clear GFP_FS from alloc */ + +/* + * oh the beauties of C type declarations. + */ +struct page; +struct address_space; +struct writeback_control; + +struct iov_iter { + const struct iovec *iov; + unsigned long nr_segs; + size_t iov_offset; + size_t count; +}; + +size_t iov_iter_copy_from_user_atomic(struct page *page, + struct iov_iter *i, unsigned long offset, size_t bytes); +size_t iov_iter_copy_from_user(struct page *page, + struct iov_iter *i, unsigned long offset, size_t bytes); +void iov_iter_advance(struct iov_iter *i, size_t bytes); +int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); +size_t iov_iter_single_seg_count(struct iov_iter *i); + +static inline void iov_iter_init(struct iov_iter *i, + const struct iovec *iov, unsigned long nr_segs, + size_t count, size_t written) +{ + i->iov = iov; + i->nr_segs = nr_segs; + i->iov_offset = 0; + i->count = count + written; + + iov_iter_advance(i, written); +} + +static inline size_t iov_iter_count(struct iov_iter *i) +{ + return i->count; +} + +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); + +struct address_space_operations { + int (*writepage)(struct page *page, struct writeback_control *wbc); + int (*readpage)(struct file *, struct page *); + + /* Write back some dirty pages from this mapping. */ + int (*writepages)(struct address_space *, struct writeback_control *); + + /* Set a page dirty. Return true if this dirtied it */ + int (*set_page_dirty)(struct page *page); + + int (*readpages)(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + + int (*write_begin)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + int (*write_end)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + + /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidatepage) (struct page *, unsigned long); + int (*releasepage) (struct page *, gfp_t); + void (*freepage)(struct page *); + ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, + loff_t offset, unsigned long nr_segs); + int (*get_xip_mem)(struct address_space *, pgoff_t, int, + void **, unsigned long *); + /* + * migrate the contents of a page to the specified target. If sync + * is false, it must not block. + */ + int (*migratepage) (struct address_space *, + struct page *, struct page *, enum migrate_mode); + int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); + int (*error_remove_page)(struct address_space *, struct page *); +}; + +extern const struct address_space_operations empty_aops; + +/* + * pagecache_write_begin/pagecache_write_end must be used by general code + * to write into the pagecache. + */ +int pagecache_write_begin(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + +int pagecache_write_end(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + +struct backing_dev_info; +struct address_space { + struct inode *host; /* owner: inode, block_device */ + struct radix_tree_root page_tree; /* radix tree of all pages */ + spinlock_t tree_lock; /* and lock protecting it */ + unsigned int i_mmap_writable;/* count VM_SHARED mappings */ + struct prio_tree_root i_mmap; /* tree of private and shared mappings */ + struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ + struct mutex i_mmap_mutex; /* protect tree, count, list */ + /* Protected by tree_lock together with the radix tree */ + unsigned long nrpages; /* number of total pages */ + pgoff_t writeback_index;/* writeback starts here */ + const struct address_space_operations *a_ops; /* methods */ + unsigned long flags; /* error bits/gfp mask */ + struct backing_dev_info *backing_dev_info; /* device readahead, etc */ + spinlock_t private_lock; /* for use by the address_space */ + struct list_head private_list; /* ditto */ + struct address_space *assoc_mapping; /* ditto */ +} __attribute__((aligned(sizeof(long)))); + /* + * On most architectures that alignment is already the case; but + * must be enforced here for CRIS, to let the least significant bit + * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. + */ +struct request_queue; + +struct block_device { + dev_t bd_dev; /* not a kdev_t - it's a search key */ + int bd_openers; + struct inode * bd_inode; /* will die */ + struct super_block * bd_super; + struct mutex bd_mutex; /* open/close mutex */ + struct list_head bd_inodes; + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_disks; +#endif + struct block_device * bd_contains; + unsigned bd_block_size; + struct hd_struct * bd_part; + /* number of times partitions within this device have been opened. */ + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct request_queue * bd_queue; + struct list_head bd_list; + /* + * Private data. You must have bd_claim'ed the block_device + * to use this. NOTE: bd_claim allows an owner to claim + * the same device multiple times, the owner must take special + * care to not mess up bd_private for that case. + */ + unsigned long bd_private; + + /* The counter of freeze processes */ + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +}; + +/* + * Radix-tree tags, for tagging dirty and writeback pages within the pagecache + * radix trees + */ +#define PAGECACHE_TAG_DIRTY 0 +#define PAGECACHE_TAG_WRITEBACK 1 +#define PAGECACHE_TAG_TOWRITE 2 + +int mapping_tagged(struct address_space *mapping, int tag); + +/* + * Might pages of this file be mapped into userspace? + */ +static inline int mapping_mapped(struct address_space *mapping) +{ + return !prio_tree_empty(&mapping->i_mmap) || + !list_empty(&mapping->i_mmap_nonlinear); +} + +/* + * Might pages of this file have been modified in userspace? + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff + * marks vma as VM_SHARED if it is shared, and the file was opened for + * writing i.e. vma may be mprotected writable even if now readonly. + */ +static inline int mapping_writably_mapped(struct address_space *mapping) +{ + return mapping->i_mmap_writable != 0; +} + +/* + * Use sequence counter to get consistent i_size on 32-bit processors. + */ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#include <linux/seqlock.h> +#define __NEED_I_SIZE_ORDERED +#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) +#else +#define i_size_ordered_init(inode) do { } while (0) +#endif + +struct posix_acl; +#define ACL_NOT_CACHED ((void *)(-1)) + +#define IOP_FASTPERM 0x0001 +#define IOP_LOOKUP 0x0002 +#define IOP_NOFOLLOW 0x0004 + +/* + * Keep mostly read-only and often accessed (especially for + * the RCU path lookup and 'stat' data) fields at the beginning + * of the 'struct inode' + */ +struct inode { + umode_t i_mode; + unsigned short i_opflags; + uid_t i_uid; + gid_t i_gid; + unsigned int i_flags; + +#ifdef CONFIG_FS_POSIX_ACL + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; +#endif + + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + +#ifdef CONFIG_SECURITY + void *i_security; +#endif + + /* Stat data, not accessed from path walking */ + unsigned long i_ino; + /* + * Filesystems may only read i_nlink directly. They shall use the + * following functions for modification: + * + * (set|clear|inc|drop)_nlink + * inode_(inc|dec)_link_count + */ + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + struct timespec i_atime; + struct timespec i_mtime; + struct timespec i_ctime; + spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ + unsigned short i_bytes; + blkcnt_t i_blocks; + loff_t i_size; + +#ifdef __NEED_I_SIZE_ORDERED + seqcount_t i_size_seqcount; +#endif + + /* Misc */ + unsigned long i_state; + struct mutex i_mutex; + + unsigned long dirtied_when; /* jiffies of first dirtying */ + + struct hlist_node i_hash; + struct list_head i_wb_list; /* backing dev IO list */ + struct list_head i_lru; /* inode LRU list */ + struct list_head i_sb_list; + union { + struct list_head i_dentry; + struct rcu_head i_rcu; + }; + atomic_t i_count; + unsigned int i_blkbits; + u64 i_version; + atomic_t i_dio_count; + atomic_t i_writecount; + const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ + struct file_lock *i_flock; + struct address_space i_data; +#ifdef CONFIG_QUOTA + struct dquot *i_dquot[MAXQUOTAS]; +#endif + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct block_device *i_bdev; + struct cdev *i_cdev; + }; + + __u32 i_generation; + +#ifdef CONFIG_FSNOTIFY + __u32 i_fsnotify_mask; /* all events this inode cares about */ + struct hlist_head i_fsnotify_marks; +#endif + +#ifdef CONFIG_IMA + atomic_t i_readcount; /* struct files open RO */ +#endif + void *i_private; /* fs or device private pointer */ +}; + +static inline int inode_unhashed(struct inode *inode) +{ + return hlist_unhashed(&inode->i_hash); +} + +/* + * inode->i_mutex nesting subclasses for the lock validator: + * + * 0: the object of the current VFS operation + * 1: parent + * 2: child/target + * 3: quota file + * + * The locking order between these classes is + * parent -> child -> normal -> xattr -> quota + */ +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_XATTR, + I_MUTEX_QUOTA +}; + +/* + * NOTE: in a 32bit arch with a preemptable kernel and + * an UP compile the i_size_read/write must be atomic + * with respect to the local cpu (unlike with preempt disabled), + * but they don't need to be atomic with respect to other cpus like in + * true SMP (so they need either to either locally disable irq around + * the read or for example on x86 they can be still implemented as a + * cmpxchg8b without the need of the lock prefix). For SMP compiles + * and 64bit archs it makes no difference if preempt is enabled or not. + */ +static inline loff_t i_size_read(const struct inode *inode) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + loff_t i_size; + unsigned int seq; + + do { + seq = read_seqcount_begin(&inode->i_size_seqcount); + i_size = inode->i_size; + } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); + return i_size; +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) + loff_t i_size; + + preempt_disable(); + i_size = inode->i_size; + preempt_enable(); + return i_size; +#else + return inode->i_size; +#endif +} + +/* + * NOTE: unlike i_size_read(), i_size_write() does need locking around it + * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount + * can be lost, resulting in subsequent i_size_read() calls spinning forever. + */ +static inline void i_size_write(struct inode *inode, loff_t i_size) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_begin(&inode->i_size_seqcount); + inode->i_size = i_size; + write_seqcount_end(&inode->i_size_seqcount); +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) + preempt_disable(); + inode->i_size = i_size; + preempt_enable(); +#else + inode->i_size = i_size; +#endif +} + +static inline unsigned iminor(const struct inode *inode) +{ + return MINOR(inode->i_rdev); +} + +static inline unsigned imajor(const struct inode *inode) +{ + return MAJOR(inode->i_rdev); +} + +extern struct block_device *I_BDEV(struct inode *inode); + +struct fown_struct { + rwlock_t lock; /* protects pid, uid, euid fields */ + struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ + enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ + uid_t uid, euid; /* uid/euid of process setting the owner */ + int signum; /* posix.1b rt signal to be delivered on IO */ +}; + +/* + * Track a single file's readahead state + */ +struct file_ra_state { + pgoff_t start; /* where readahead started */ + unsigned int size; /* # of readahead pages */ + unsigned int async_size; /* do asynchronous readahead when + there are only # of pages ahead */ + + unsigned int ra_pages; /* Maximum readahead window */ + unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ + loff_t prev_pos; /* Cache last read() position */ +}; + +/* + * Check if @index falls in the readahead windows. + */ +static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) +{ + return (index >= ra->start && + index < ra->start + ra->size); +} + +#define FILE_MNT_WRITE_TAKEN 1 +#define FILE_MNT_WRITE_RELEASED 2 + +struct file { + /* + * fu_list becomes invalid after file_free is called and queued via + * fu_rcuhead for RCU freeing + */ + union { + struct list_head fu_list; + struct rcu_head fu_rcuhead; + } f_u; + struct path f_path; +#define f_dentry f_path.dentry +#define f_vfsmnt f_path.mnt + const struct file_operations *f_op; + + /* + * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. + * Must not be taken from IRQ context. + */ + spinlock_t f_lock; +#ifdef CONFIG_SMP + int f_sb_list_cpu; +#endif + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; + loff_t f_pos; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + + u64 f_version; +#ifdef CONFIG_SECURITY + void *f_security; +#endif + /* needed for tty driver, and maybe others */ + void *private_data; + +#ifdef CONFIG_EPOLL + /* Used by fs/eventpoll.c to link all the hooks to this file */ + struct list_head f_ep_links; + struct list_head f_tfile_llink; +#endif /* #ifdef CONFIG_EPOLL */ + struct address_space *f_mapping; +#ifdef CONFIG_DEBUG_WRITECOUNT + unsigned long f_mnt_write_state; +#endif +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + /* file identifier */ + unsigned char f_handle[0]; +}; + +#define get_file(x) atomic_long_inc(&(x)->f_count) +#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) +#define file_count(x) atomic_long_read(&(x)->f_count) + +#ifdef CONFIG_DEBUG_WRITECOUNT +static inline void file_take_write(struct file *f) +{ + WARN_ON(f->f_mnt_write_state != 0); + f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN; +} +static inline void file_release_write(struct file *f) +{ + f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED; +} +static inline void file_reset_write(struct file *f) +{ + f->f_mnt_write_state = 0; +} +static inline void file_check_state(struct file *f) +{ + /* + * At this point, either both or neither of these bits + * should be set. + */ + WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN); + WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED); +} +static inline int file_check_writeable(struct file *f) +{ + if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN) + return 0; + printk(KERN_WARNING "writeable file with no " + "mnt_want_write()\n"); + WARN_ON(1); + return -EINVAL; +} +#else /* !CONFIG_DEBUG_WRITECOUNT */ +static inline void file_take_write(struct file *filp) {} +static inline void file_release_write(struct file *filp) {} +static inline void file_reset_write(struct file *filp) {} +static inline void file_check_state(struct file *filp) {} +static inline int file_check_writeable(struct file *filp) +{ + return 0; +} +#endif /* CONFIG_DEBUG_WRITECOUNT */ + +#define MAX_NON_LFS ((1UL<<31) - 1) + +/* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ +#if BITS_PER_LONG==32 +#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#elif BITS_PER_LONG==64 +#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL +#endif + +#define FL_POSIX 1 +#define FL_FLOCK 2 +#define FL_ACCESS 8 /* not trying to lock, just looking */ +#define FL_EXISTS 16 /* when unlocking, test for existence */ +#define FL_LEASE 32 /* lease held on this file */ +#define FL_CLOSE 64 /* unlock on close */ +#define FL_SLEEP 128 /* A blocking lock */ +#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ +#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ + +/* + * Special return value from posix_lock_file() and vfs_lock_file() for + * asynchronous locking. + */ +#define FILE_LOCK_DEFERRED 1 + +/* + * The POSIX file lock owner is determined by + * the "struct files_struct" in the thread group + * (or NULL for no owner - BSD locks). + * + * Lockd stuffs a "host" pointer into this. + */ +typedef struct files_struct *fl_owner_t; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + int (*lm_compare_owner)(struct file_lock *, struct file_lock *); + void (*lm_notify)(struct file_lock *); /* unblock callback */ + int (*lm_grant)(struct file_lock *, struct file_lock *, int); + void (*lm_release_private)(struct file_lock *); + void (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock **, int); +}; + +struct lock_manager { + struct list_head list; +}; + +void locks_start_grace(struct lock_manager *); +void locks_end_grace(struct lock_manager *); +int locks_in_grace(void); + +/* that will die - we need it for nfs_lock_info */ +#include <linux/nfs_fs_i.h> + +struct file_lock { + struct file_lock *fl_next; /* singly linked list for this inode */ + struct list_head fl_link; /* doubly linked list of all locks */ + struct list_head fl_block; /* circular list of blocked processes */ + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + struct pid *fl_nspid; + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + + struct fasync_struct * fl_fasync; /* for lease break notifications */ + /* for lease breaks: */ + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + + const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ + const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; /* link in AFS vnode's pending_locks list */ + int state; /* state of grant or error if -ve */ + } afs; + } fl_u; +}; + +/* The following constant reflects the upper bound of the file/locking space */ +#ifndef OFFSET_MAX +#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) +#define OFFSET_MAX INT_LIMIT(loff_t) +#define OFFT_OFFSET_MAX INT_LIMIT(off_t) +#endif + +#include <linux/fcntl.h> + +extern void send_sigio(struct fown_struct *fown, int fd, int band); + +#ifdef CONFIG_FILE_LOCKING +extern int fcntl_getlk(struct file *, struct flock __user *); +extern int fcntl_setlk(unsigned int, struct file *, unsigned int, + struct flock __user *); + +#if BITS_PER_LONG == 32 +extern int fcntl_getlk64(struct file *, struct flock64 __user *); +extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, + struct flock64 __user *); +#endif + +extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); +extern int fcntl_getlease(struct file *filp); + +/* fs/locks.c */ +void locks_free_lock(struct file_lock *fl); +extern void locks_init_lock(struct file_lock *); +extern struct file_lock * locks_alloc_lock(void); +extern void locks_copy_lock(struct file_lock *, struct file_lock *); +extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); +extern void locks_remove_posix(struct file *, fl_owner_t); +extern void locks_remove_flock(struct file *); +extern void locks_release_private(struct file_lock *); +extern void posix_test_lock(struct file *, struct file_lock *); +extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); +extern int posix_lock_file_wait(struct file *, struct file_lock *); +extern int posix_unblock_lock(struct file *, struct file_lock *); +extern int vfs_test_lock(struct file *, struct file_lock *); +extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); +extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); +extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); +extern int __break_lease(struct inode *inode, unsigned int flags); +extern void lease_get_mtime(struct inode *, struct timespec *time); +extern int generic_setlease(struct file *, long, struct file_lock **); +extern int vfs_setlease(struct file *, long, struct file_lock **); +extern int lease_modify(struct file_lock **, int); +extern int lock_may_read(struct inode *, loff_t start, unsigned long count); +extern int lock_may_write(struct inode *, loff_t start, unsigned long count); +extern void locks_delete_block(struct file_lock *waiter); +extern void lock_flocks(void); +extern void unlock_flocks(void); +#else /* !CONFIG_FILE_LOCKING */ +static inline int fcntl_getlk(struct file *file, struct flock __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk(unsigned int fd, struct file *file, + unsigned int cmd, struct flock __user *user) +{ + return -EACCES; +} + +#if BITS_PER_LONG == 32 +static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk64(unsigned int fd, struct file *file, + unsigned int cmd, struct flock64 __user *user) +{ + return -EACCES; +} +#endif +static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) +{ + return 0; +} + +static inline int fcntl_getlease(struct file *filp) +{ + return 0; +} + +static inline void locks_init_lock(struct file_lock *fl) +{ + return; +} + +static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) +{ + return; +} + +static inline void locks_remove_flock(struct file *filp) +{ + return; +} + +static inline void posix_test_lock(struct file *filp, struct file_lock *fl) +{ + return; +} + +static inline int posix_lock_file(struct file *filp, struct file_lock *fl, + struct file_lock *conflock) +{ + return -ENOLCK; +} + +static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) +{ + return -ENOLCK; +} + +static inline int posix_unblock_lock(struct file *filp, + struct file_lock *waiter) +{ + return -ENOENT; +} + +static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int vfs_lock_file(struct file *filp, unsigned int cmd, + struct file_lock *fl, struct file_lock *conf) +{ + return -ENOLCK; +} + +static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int flock_lock_file_wait(struct file *filp, + struct file_lock *request) +{ + return -ENOLCK; +} + +static inline int __break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + +static inline void lease_get_mtime(struct inode *inode, struct timespec *time) +{ + return; +} + +static inline int generic_setlease(struct file *filp, long arg, + struct file_lock **flp) +{ + return -EINVAL; +} + +static inline int vfs_setlease(struct file *filp, long arg, + struct file_lock **lease) +{ + return -EINVAL; +} + +static inline int lease_modify(struct file_lock **before, int arg) +{ + return -EINVAL; +} + +static inline int lock_may_read(struct inode *inode, loff_t start, + unsigned long len) +{ + return 1; +} + +static inline int lock_may_write(struct inode *inode, loff_t start, + unsigned long len) +{ + return 1; +} + +static inline void locks_delete_block(struct file_lock *waiter) +{ +} + +static inline void lock_flocks(void) +{ +} + +static inline void unlock_flocks(void) +{ +} + +#endif /* !CONFIG_FILE_LOCKING */ + + +struct fasync_struct { + spinlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; /* singly linked list */ + struct file *fa_file; + struct rcu_head fa_rcu; +}; + +#define FASYNC_MAGIC 0x4601 + +/* SMP safe fasync helpers: */ +extern int fasync_helper(int, struct file *, int, struct fasync_struct **); +extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); +extern int fasync_remove_entry(struct file *, struct fasync_struct **); +extern struct fasync_struct *fasync_alloc(void); +extern void fasync_free(struct fasync_struct *); + +/* can be called from interrupts */ +extern void kill_fasync(struct fasync_struct **, int, int); + +extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); +extern int f_setown(struct file *filp, unsigned long arg, int force); +extern void f_delown(struct file *filp); +extern pid_t f_getown(struct file *filp); +extern int send_sigurg(struct fown_struct *fown); + +/* + * Umount options + */ + +#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ +#define MNT_DETACH 0x00000002 /* Just detach from the tree */ +#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ +#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ +#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ + +extern struct list_head super_blocks; +extern spinlock_t sb_lock; + +struct super_block { + struct list_head s_list; /* Keep this first */ + dev_t s_dev; /* search index; _not_ kdev_t */ + unsigned char s_dirt; + unsigned char s_blocksize_bits; + unsigned long s_blocksize; + loff_t s_maxbytes; /* Max file size */ + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + unsigned long s_flags; + unsigned long s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + struct mutex s_lock; + int s_count; + atomic_t s_active; +#ifdef CONFIG_SECURITY + void *s_security; +#endif + const struct xattr_handler **s_xattr; + + struct list_head s_inodes; /* all inodes */ + struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ +#ifdef CONFIG_SMP + struct list_head __percpu *s_files; +#else + struct list_head s_files; +#endif + struct list_head s_mounts; /* list of mounts; _not_ for fs use */ + /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */ + struct list_head s_dentry_lru; /* unused dentry lru */ + int s_nr_dentry_unused; /* # of dentry on lru */ + + /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ + spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp; + struct list_head s_inode_lru; /* unused inode lru */ + int s_nr_inodes_unused; /* # of inodes on lru */ + + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + struct quota_info s_dquot; /* Diskquota specific options */ + + int s_frozen; + wait_queue_head_t s_wait_unfrozen; + + char s_id[32]; /* Informational name */ + u8 s_uuid[16]; /* UUID */ + + void *s_fs_info; /* Filesystem private info */ + unsigned int s_max_links; + fmode_t s_mode; + + /* Granularity of c/m/atime in ns. + Cannot be worse than a second */ + u32 s_time_gran; + + /* + * The next field is for VFS *only*. No filesystems have any business + * even looking at it. You had been warned. + */ + struct mutex s_vfs_rename_mutex; /* Kludge */ + + /* + * Filesystem subtype. If non-empty the filesystem type field + * in /proc/mounts will be "type.subtype" + */ + char *s_subtype; + + /* + * Saved mount options for lazy filesystems using + * generic_show_options() + */ + char __rcu *s_options; + const struct dentry_operations *s_d_op; /* default d_op for dentries */ + + /* + * Saved pool identifier for cleancache (-1 means none) + */ + int cleancache_poolid; + + struct shrinker s_shrink; /* per-sb shrinker handle */ + + /* Number of inodes with nlink == 0 but still referenced */ + atomic_long_t s_remove_count; + + /* Being remounted read-only */ + int s_readonly_remount; +}; + +/* superblock cache pruning functions */ +extern void prune_icache_sb(struct super_block *sb, int nr_to_scan); +extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan); + +extern struct timespec current_fs_time(struct super_block *sb); + +/* + * Snapshotting support. + */ +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_TRANS = 2, +}; + +#define vfs_check_frozen(sb, level) \ + wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) + +/* + * until VFS tracks user namespaces for inodes, just make all files + * belong to init_user_ns + */ +extern struct user_namespace init_user_ns; +#define inode_userns(inode) (&init_user_ns) +extern bool inode_owner_or_capable(const struct inode *inode); + +/* not quite ready to be deprecated, but... */ +extern void lock_super(struct super_block *); +extern void unlock_super(struct super_block *); + +/* + * VFS helper functions.. + */ +extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *); +extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); +extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); +extern int vfs_symlink(struct inode *, struct dentry *, const char *); +extern int vfs_link(struct dentry *, struct inode *, struct dentry *); +extern int vfs_rmdir(struct inode *, struct dentry *); +extern int vfs_unlink(struct inode *, struct dentry *); +extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); + +/* + * VFS dentry helper functions. + */ +extern void dentry_unhash(struct dentry *dentry); + +/* + * VFS file helper functions. + */ +extern void inode_init_owner(struct inode *inode, const struct inode *dir, + umode_t mode); +/* + * VFS FS_IOC_FIEMAP helper definitions. + */ +struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ + struct fiemap_extent __user *fi_extents_start; /* Start of + fiemap_extent array */ +}; +int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, + u64 phys, u64 len, u32 flags); +int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); + +/* + * File types + * + * NOTE! These match bits 12..15 of stat.st_mode + * (ie "(i_mode >> 12) & 15"). + */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + +/* + * This is the "filldir" function type, used by readdir() to let + * the kernel specify what kind of dirent layout it wants to have. + * This allows the kernel to read directories into kernel space or + * to have different dirent layouts depending on the binary type. + */ +typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); +struct block_device_operations; + +/* These macros are for out of kernel modules to test that + * the kernel supports the unlocked_ioctl and compat_ioctl + * fields in struct file_operations. */ +#define HAVE_COMPAT_IOCTL 1 +#define HAVE_UNLOCKED_IOCTL 1 + +struct file_operations { + struct module *owner; + loff_t (*llseek) (struct file *, loff_t, int); + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); + ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); + int (*readdir) (struct file *, void *, filldir_t); + unsigned int (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + int (*open) (struct inode *, struct file *); + int (*flush) (struct file *, fl_owner_t id); + int (*release) (struct inode *, struct file *); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); + int (*aio_fsync) (struct kiocb *, int datasync); + int (*fasync) (int, struct file *, int); + int (*lock) (struct file *, int, struct file_lock *); + ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); + unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + int (*check_flags)(int); + int (*flock) (struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long, struct file_lock **); + long (*fallocate)(struct file *file, int mode, loff_t offset, + loff_t len); +}; + +struct inode_operations { + struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); + void * (*follow_link) (struct dentry *, struct nameidata *); + int (*permission) (struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int); + + int (*readlink) (struct dentry *, char __user *,int); + void (*put_link) (struct dentry *, struct nameidata *, void *); + + int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *); + int (*link) (struct dentry *,struct inode *,struct dentry *); + int (*unlink) (struct inode *,struct dentry *); + int (*symlink) (struct inode *,struct dentry *,const char *); + int (*mkdir) (struct inode *,struct dentry *,umode_t); + int (*rmdir) (struct inode *,struct dentry *); + int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); + int (*rename) (struct inode *, struct dentry *, + struct inode *, struct dentry *); + void (*truncate) (struct inode *); + int (*setattr) (struct dentry *, struct iattr *); + int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); + ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); + ssize_t (*listxattr) (struct dentry *, char *, size_t); + int (*removexattr) (struct dentry *, const char *); + void (*truncate_range)(struct inode *, loff_t, loff_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, + u64 len); +} ____cacheline_aligned; + +struct seq_file; + +ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, + unsigned long nr_segs, unsigned long fast_segs, + struct iovec *fast_pointer, + struct iovec **ret_pointer, + int check_access); + +extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); +extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); +extern ssize_t vfs_readv(struct file *, const struct iovec __user *, + unsigned long, loff_t *); +extern ssize_t vfs_writev(struct file *, const struct iovec __user *, + unsigned long, loff_t *); + +struct super_operations { + struct inode *(*alloc_inode)(struct super_block *sb); + void (*destroy_inode)(struct inode *); + + void (*dirty_inode) (struct inode *, int flags); + int (*write_inode) (struct inode *, struct writeback_control *wbc); + int (*drop_inode) (struct inode *); + void (*evict_inode) (struct inode *); + void (*put_super) (struct super_block *); + void (*write_super) (struct super_block *); + int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_fs) (struct super_block *); + int (*unfreeze_fs) (struct super_block *); + int (*statfs) (struct dentry *, struct kstatfs *); + int (*remount_fs) (struct super_block *, int *, char *); + void (*umount_begin) (struct super_block *); + + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); +#ifdef CONFIG_QUOTA + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); +#endif + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + int (*nr_cached_objects)(struct super_block *); + void (*free_cached_objects)(struct super_block *, int); +}; + +/* + * Inode state bits. Protected by inode->i_lock + * + * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, + * I_DIRTY_DATASYNC and I_DIRTY_PAGES. + * + * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, + * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at + * various stages of removing an inode. + * + * Two bits are used for locking and completion notification, I_NEW and I_SYNC. + * + * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on + * fdatasync(). i_atime is the usual cause. + * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of + * these changes separately from I_DIRTY_SYNC so that we + * don't have to write inode on fdatasync() when only + * mtime has changed in it. + * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. + * I_NEW Serves as both a mutex and completion notification. + * New inodes set I_NEW. If two processes both create + * the same inode, one of them will release its inode and + * wait for I_NEW to be released before returning. + * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can + * also cause waiting on I_NEW, without I_NEW actually + * being set. find_inode() uses this to prevent returning + * nearly-dead inodes. + * I_WILL_FREE Must be set when calling write_inode_now() if i_count + * is zero. I_FREEING must be set when I_WILL_FREE is + * cleared. + * I_FREEING Set when inode is about to be freed but still has dirty + * pages or buffers attached or the inode itself is still + * dirty. + * I_CLEAR Added by end_writeback(). In this state the inode is clean + * and can be destroyed. Inode keeps I_FREEING. + * + * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are + * prohibited for many purposes. iget() must wait for + * the inode to be completely released, then create it + * anew. Other functions will just ignore such inodes, + * if appropriate. I_NEW is used for waiting. + * + * I_SYNC Synchonized write of dirty inode data. The bits is + * set during data writeback, and cleared with a wakeup + * on the bit address once it is done. + * + * I_REFERENCED Marks the inode as recently references on the LRU list. + * + * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). + * + * Q: What is the difference between I_WILL_FREE and I_FREEING? + */ +#define I_DIRTY_SYNC (1 << 0) +#define I_DIRTY_DATASYNC (1 << 1) +#define I_DIRTY_PAGES (1 << 2) +#define __I_NEW 3 +#define I_NEW (1 << __I_NEW) +#define I_WILL_FREE (1 << 4) +#define I_FREEING (1 << 5) +#define I_CLEAR (1 << 6) +#define __I_SYNC 7 +#define I_SYNC (1 << __I_SYNC) +#define I_REFERENCED (1 << 8) +#define __I_DIO_WAKEUP 9 +#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP) + +#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) + +extern void __mark_inode_dirty(struct inode *, int); +static inline void mark_inode_dirty(struct inode *inode) +{ + __mark_inode_dirty(inode, I_DIRTY); +} + +static inline void mark_inode_dirty_sync(struct inode *inode) +{ + __mark_inode_dirty(inode, I_DIRTY_SYNC); +} + +extern void inc_nlink(struct inode *inode); +extern void drop_nlink(struct inode *inode); +extern void clear_nlink(struct inode *inode); +extern void set_nlink(struct inode *inode, unsigned int nlink); + +static inline void inode_inc_link_count(struct inode *inode) +{ + inc_nlink(inode); + mark_inode_dirty(inode); +} + +static inline void inode_dec_link_count(struct inode *inode) +{ + drop_nlink(inode); + mark_inode_dirty(inode); +} + +/** + * inode_inc_iversion - increments i_version + * @inode: inode that need to be updated + * + * Every time the inode is modified, the i_version field will be incremented. + * The filesystem has to be mounted with i_version flag + */ + +static inline void inode_inc_iversion(struct inode *inode) +{ + spin_lock(&inode->i_lock); + inode->i_version++; + spin_unlock(&inode->i_lock); +} + +extern void touch_atime(struct path *); +static inline void file_accessed(struct file *file) +{ + if (!(file->f_flags & O_NOATIME)) + touch_atime(&file->f_path); +} + +int sync_inode(struct inode *inode, struct writeback_control *wbc); +int sync_inode_metadata(struct inode *inode, int wait); + +struct file_system_type { + const char *name; + int fs_flags; + struct dentry *(*mount) (struct file_system_type *, int, + const char *, void *); + void (*kill_sb) (struct super_block *); + struct module *owner; + struct file_system_type * next; + struct hlist_head fs_supers; + + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key i_mutex_dir_key; +}; + +extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, + void *data, int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_bdev(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_single(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_nodev(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); +void generic_shutdown_super(struct super_block *sb); +void kill_block_super(struct super_block *sb); +void kill_anon_super(struct super_block *sb); +void kill_litter_super(struct super_block *sb); +void deactivate_super(struct super_block *sb); +void deactivate_locked_super(struct super_block *sb); +int set_anon_super(struct super_block *s, void *data); +int get_anon_bdev(dev_t *); +void free_anon_bdev(dev_t); +struct super_block *sget(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + void *data); +extern struct dentry *mount_pseudo(struct file_system_type *, char *, + const struct super_operations *ops, + const struct dentry_operations *dops, + unsigned long); + +/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ +#define fops_get(fops) \ + (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) +#define fops_put(fops) \ + do { if (fops) module_put((fops)->owner); } while(0) + +extern int register_filesystem(struct file_system_type *); +extern int unregister_filesystem(struct file_system_type *); +extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); +#define kern_mount(type) kern_mount_data(type, NULL) +extern void kern_unmount(struct vfsmount *mnt); +extern int may_umount_tree(struct vfsmount *); +extern int may_umount(struct vfsmount *); +extern long do_mount(char *, char *, char *, unsigned long, void *); +extern struct vfsmount *collect_mounts(struct path *); +extern void drop_collected_mounts(struct vfsmount *); +extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, + struct vfsmount *); +extern int vfs_statfs(struct path *, struct kstatfs *); +extern int user_statfs(const char __user *, struct kstatfs *); +extern int fd_statfs(int, struct kstatfs *); +extern int vfs_ustat(dev_t, struct kstatfs *); +extern int freeze_super(struct super_block *super); +extern int thaw_super(struct super_block *super); +extern bool our_mnt(struct vfsmount *mnt); + +extern int current_umask(void); + +/* /sys/fs */ +extern struct kobject *fs_kobj; + +#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) +extern int rw_verify_area(int, struct file *, loff_t *, size_t); + +#define FLOCK_VERIFY_READ 1 +#define FLOCK_VERIFY_WRITE 2 + +#ifdef CONFIG_FILE_LOCKING +extern int locks_mandatory_locked(struct inode *); +extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); + +/* + * Candidates for mandatory locking have the setgid bit set + * but no group execute bit - an otherwise meaningless combination. + */ + +static inline int __mandatory_lock(struct inode *ino) +{ + return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; +} + +/* + * ... and these candidates should be on MS_MANDLOCK mounted fs, + * otherwise these will be advisory locks + */ + +static inline int mandatory_lock(struct inode *ino) +{ + return IS_MANDLOCK(ino) && __mandatory_lock(ino); +} + +static inline int locks_verify_locked(struct inode *inode) +{ + if (mandatory_lock(inode)) + return locks_mandatory_locked(inode); + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, + struct file *filp, + loff_t size) +{ + if (inode->i_flock && mandatory_lock(inode)) + return locks_mandatory_area( + FLOCK_VERIFY_WRITE, inode, filp, + size < inode->i_size ? size : inode->i_size, + (size < inode->i_size ? inode->i_size - size + : size - inode->i_size) + ); + return 0; +} + +static inline int break_lease(struct inode *inode, unsigned int mode) +{ + if (inode->i_flock) + return __break_lease(inode, mode); + return 0; +} +#else /* !CONFIG_FILE_LOCKING */ +static inline int locks_mandatory_locked(struct inode *inode) +{ + return 0; +} + +static inline int locks_mandatory_area(int rw, struct inode *inode, + struct file *filp, loff_t offset, + size_t count) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +static inline int break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + +#endif /* CONFIG_FILE_LOCKING */ + +/* fs/open.c */ + +extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, + struct file *filp); +extern int do_fallocate(struct file *file, int mode, loff_t offset, + loff_t len); +extern long do_sys_open(int dfd, const char __user *filename, int flags, + umode_t mode); +extern struct file *filp_open(const char *, int, umode_t); +extern struct file *file_open_root(struct dentry *, struct vfsmount *, + const char *, int); +extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, + const struct cred *); +extern int filp_close(struct file *, fl_owner_t id); +extern char * getname(const char __user *); + +/* fs/ioctl.c */ + +extern int ioctl_preallocate(struct file *filp, void __user *argp); + +/* fs/dcache.c */ +extern void __init vfs_caches_init_early(void); +extern void __init vfs_caches_init(unsigned long); + +extern struct kmem_cache *names_cachep; + +#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp)) +#define __getname() __getname_gfp(GFP_KERNEL) +#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +#ifndef CONFIG_AUDITSYSCALL +#define putname(name) __putname(name) +#else +extern void putname(const char *name); +#endif + +#ifdef CONFIG_BLOCK +extern int register_blkdev(unsigned int, const char *); +extern void unregister_blkdev(unsigned int, const char *); +extern struct block_device *bdget(dev_t); +extern struct block_device *bdgrab(struct block_device *bdev); +extern void bd_set_size(struct block_device *, loff_t size); +extern sector_t blkdev_max_block(struct block_device *bdev); +extern void bd_forget(struct inode *inode); +extern void bdput(struct block_device *); +extern void invalidate_bdev(struct block_device *); +extern int sync_blockdev(struct block_device *bdev); +extern void kill_bdev(struct block_device *); +extern struct super_block *freeze_bdev(struct block_device *); +extern void emergency_thaw_all(void); +extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); +extern int fsync_bdev(struct block_device *); +#else +static inline void bd_forget(struct inode *inode) {} +static inline int sync_blockdev(struct block_device *bdev) { return 0; } +static inline void kill_bdev(struct block_device *bdev) {} +static inline void invalidate_bdev(struct block_device *bdev) {} + +static inline struct super_block *freeze_bdev(struct block_device *sb) +{ + return NULL; +} + +static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) +{ + return 0; +} +#endif +extern int sync_filesystem(struct super_block *); +extern const struct file_operations def_blk_fops; +extern const struct file_operations def_chr_fops; +extern const struct file_operations bad_sock_fops; +extern const struct file_operations def_fifo_fops; +#ifdef CONFIG_BLOCK +extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); +extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); +extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, + void *holder); +extern int blkdev_put(struct block_device *bdev, fmode_t mode); +#ifdef CONFIG_SYSFS +extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +extern void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +#endif +#endif + +/* fs/char_dev.c */ +#define CHRDEV_MAJOR_HASH_SIZE 255 +extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); +extern int register_chrdev_region(dev_t, unsigned, const char *); +extern int __register_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name, + const struct file_operations *fops); +extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name); +extern void unregister_chrdev_region(dev_t, unsigned); +extern void chrdev_show(struct seq_file *,off_t); + +static inline int register_chrdev(unsigned int major, const char *name, + const struct file_operations *fops) +{ + return __register_chrdev(major, 0, 256, name, fops); +} + +static inline void unregister_chrdev(unsigned int major, const char *name) +{ + __unregister_chrdev(major, 0, 256, name); +} + +/* fs/block_dev.c */ +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ + +#ifdef CONFIG_BLOCK +#define BLKDEV_MAJOR_HASH_SIZE 255 +extern const char *__bdevname(dev_t, char *buffer); +extern const char *bdevname(struct block_device *bdev, char *buffer); +extern struct block_device *lookup_bdev(const char *); +extern void blkdev_show(struct seq_file *,off_t); + +#else +#define BLKDEV_MAJOR_HASH_SIZE 0 +#endif + +extern void init_special_inode(struct inode *, umode_t, dev_t); + +/* Invalid inode operations -- fs/bad_inode.c */ +extern void make_bad_inode(struct inode *); +extern int is_bad_inode(struct inode *); + +extern const struct file_operations read_pipefifo_fops; +extern const struct file_operations write_pipefifo_fops; +extern const struct file_operations rdwr_pipefifo_fops; + +#ifdef CONFIG_BLOCK +/* + * return READ, READA, or WRITE + */ +#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) + +/* + * return data direction, READ or WRITE + */ +#define bio_data_dir(bio) ((bio)->bi_rw & 1) + +extern void check_disk_size_change(struct gendisk *disk, + struct block_device *bdev); +extern int revalidate_disk(struct gendisk *); +extern int check_disk_change(struct block_device *); +extern int __invalidate_device(struct block_device *, bool); +extern int invalidate_partition(struct gendisk *, int); +#endif +unsigned long invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end); + +static inline void invalidate_remote_inode(struct inode *inode) +{ + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + invalidate_mapping_pages(inode->i_mapping, 0, -1); +} +extern int invalidate_inode_pages2(struct address_space *mapping); +extern int invalidate_inode_pages2_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); +extern int write_inode_now(struct inode *, int); +extern int filemap_fdatawrite(struct address_space *); +extern int filemap_flush(struct address_space *); +extern int filemap_fdatawait(struct address_space *); +extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, + loff_t lend); +extern int filemap_write_and_wait(struct address_space *mapping); +extern int filemap_write_and_wait_range(struct address_space *mapping, + loff_t lstart, loff_t lend); +extern int __filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end, int sync_mode); +extern int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end); + +extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, + int datasync); +extern int vfs_fsync(struct file *file, int datasync); +extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); +extern void sync_supers(void); +extern void emergency_sync(void); +extern void emergency_remount(void); +#ifdef CONFIG_BLOCK +extern sector_t bmap(struct inode *, sector_t); +#endif +extern int notify_change(struct dentry *, struct iattr *); +extern int inode_permission(struct inode *, int); +extern int generic_permission(struct inode *, int); + +static inline bool execute_ok(struct inode *inode) +{ + return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); +} + +/* + * get_write_access() gets write permission for a file. + * put_write_access() releases this write permission. + * This is used for regular files. + * We cannot support write (and maybe mmap read-write shared) accesses and + * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode + * can have the following values: + * 0: no writers, no VM_DENYWRITE mappings + * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist + * > 0: (i_writecount) users are writing to the file. + * + * Normally we operate on that counter with atomic_{inc,dec} and it's safe + * except for the cases where we don't hold i_writecount yet. Then we need to + * use {get,deny}_write_access() - these functions check the sign and refuse + * to do the change if sign is wrong. + */ +static inline int get_write_access(struct inode *inode) +{ + return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; +} +static inline int deny_write_access(struct file *file) +{ + struct inode *inode = file->f_path.dentry->d_inode; + return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; +} +static inline void put_write_access(struct inode * inode) +{ + atomic_dec(&inode->i_writecount); +} +static inline void allow_write_access(struct file *file) +{ + if (file) + atomic_inc(&file->f_path.dentry->d_inode->i_writecount); +} +#ifdef CONFIG_IMA +static inline void i_readcount_dec(struct inode *inode) +{ + BUG_ON(!atomic_read(&inode->i_readcount)); + atomic_dec(&inode->i_readcount); +} +static inline void i_readcount_inc(struct inode *inode) +{ + atomic_inc(&inode->i_readcount); +} +#else +static inline void i_readcount_dec(struct inode *inode) +{ + return; +} +static inline void i_readcount_inc(struct inode *inode) +{ + return; +} +#endif +extern int do_pipe_flags(int *, int); +extern struct file *create_read_pipe(struct file *f, int flags); +extern struct file *create_write_pipe(int flags); +extern void free_write_pipe(struct file *); + +extern int kernel_read(struct file *, loff_t, char *, unsigned long); +extern struct file * open_exec(const char *); + +/* fs/dcache.c -- generic fs support functions */ +extern int is_subdir(struct dentry *, struct dentry *); +extern int path_is_under(struct path *, struct path *); +extern ino_t find_inode_number(struct dentry *, struct qstr *); + +#include <linux/err.h> + +/* needed for stackable file system support */ +extern loff_t default_llseek(struct file *file, loff_t offset, int origin); + +extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); + +extern int inode_init_always(struct super_block *, struct inode *); +extern void inode_init_once(struct inode *); +extern void address_space_init_once(struct address_space *mapping); +extern void ihold(struct inode * inode); +extern void iput(struct inode *); +extern struct inode * igrab(struct inode *); +extern ino_t iunique(struct super_block *, ino_t); +extern int inode_needs_sync(struct inode *inode); +extern int generic_delete_inode(struct inode *inode); +static inline int generic_drop_inode(struct inode *inode) +{ + return !inode->i_nlink || inode_unhashed(inode); +} + +extern struct inode *ilookup5_nowait(struct super_block *sb, + unsigned long hashval, int (*test)(struct inode *, void *), + void *data); +extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, + int (*test)(struct inode *, void *), void *data); +extern struct inode *ilookup(struct super_block *sb, unsigned long ino); + +extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); +extern struct inode * iget_locked(struct super_block *, unsigned long); +extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); +extern int insert_inode_locked(struct inode *); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void lockdep_annotate_inode_mutex_key(struct inode *inode); +#else +static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; +#endif +extern void unlock_new_inode(struct inode *); +extern unsigned int get_next_ino(void); + +extern void __iget(struct inode * inode); +extern void iget_failed(struct inode *); +extern void end_writeback(struct inode *); +extern void __destroy_inode(struct inode *); +extern struct inode *new_inode_pseudo(struct super_block *sb); +extern struct inode *new_inode(struct super_block *sb); +extern void free_inode_nonrcu(struct inode *inode); +extern int should_remove_suid(struct dentry *); +extern int file_remove_suid(struct file *); + +extern void __insert_inode_hash(struct inode *, unsigned long hashval); +static inline void insert_inode_hash(struct inode *inode) +{ + __insert_inode_hash(inode, inode->i_ino); +} + +extern void __remove_inode_hash(struct inode *); +static inline void remove_inode_hash(struct inode *inode) +{ + if (!inode_unhashed(inode)) + __remove_inode_hash(inode); +} + +extern void inode_sb_list_add(struct inode *inode); + +#ifdef CONFIG_BLOCK +extern void submit_bio(int, struct bio *); +extern int bdev_read_only(struct block_device *); +#endif +extern int set_blocksize(struct block_device *, int); +extern int sb_set_blocksize(struct super_block *, int); +extern int sb_min_blocksize(struct super_block *, int); + +extern int generic_file_mmap(struct file *, struct vm_area_struct *); +extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); +extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); +int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); +extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); +extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, + loff_t *); +extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); +extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, + unsigned long *, loff_t, loff_t *, size_t, size_t); +extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, + unsigned long, loff_t, loff_t *, size_t, ssize_t); +extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); +extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); +extern int generic_segment_checks(const struct iovec *iov, + unsigned long *nr_segs, size_t *count, int access_flags); + +/* fs/block_dev.c */ +extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); +extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); +extern void block_sync_page(struct page *page); + +/* fs/splice.c */ +extern ssize_t generic_file_splice_read(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); +extern ssize_t default_file_splice_read(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); +extern ssize_t generic_file_splice_write(struct pipe_inode_info *, + struct file *, loff_t *, size_t, unsigned int); +extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, + struct file *out, loff_t *, size_t len, unsigned int flags); +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + size_t len, unsigned int flags); + +extern void +file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); +extern loff_t noop_llseek(struct file *file, loff_t offset, int origin); +extern loff_t no_llseek(struct file *file, loff_t offset, int origin); +extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); +extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, + int origin, loff_t maxsize); +extern int generic_file_open(struct inode * inode, struct file * filp); +extern int nonseekable_open(struct inode * inode, struct file * filp); + +#ifdef CONFIG_FS_XIP +extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, + loff_t *ppos); +extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); +extern ssize_t xip_file_write(struct file *filp, const char __user *buf, + size_t len, loff_t *ppos); +extern int xip_truncate_page(struct address_space *mapping, loff_t from); +#else +static inline int xip_truncate_page(struct address_space *mapping, loff_t from) +{ + return 0; +} +#endif + +#ifdef CONFIG_BLOCK +typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, + loff_t file_offset); + +enum { + /* need locking between buffered and direct access */ + DIO_LOCKING = 0x01, + + /* filesystem does not support filling holes */ + DIO_SKIP_HOLES = 0x02, +}; + +void dio_end_io(struct bio *bio, int error); +void inode_dio_wait(struct inode *inode); +void inode_dio_done(struct inode *inode); + +ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, + dio_submit_t submit_io, int flags); + +static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, + struct inode *inode, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block) +{ + return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, + offset, nr_segs, get_block, NULL, NULL, + DIO_LOCKING | DIO_SKIP_HOLES); +} +#else +static inline void inode_dio_wait(struct inode *inode) +{ +} +#endif + +extern const struct file_operations generic_ro_fops; + +#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) + +extern int vfs_readlink(struct dentry *, char __user *, int, const char *); +extern int vfs_follow_link(struct nameidata *, const char *); +extern int page_readlink(struct dentry *, char __user *, int); +extern void *page_follow_link_light(struct dentry *, struct nameidata *); +extern void page_put_link(struct dentry *, struct nameidata *, void *); +extern int __page_symlink(struct inode *inode, const char *symname, int len, + int nofs); +extern int page_symlink(struct inode *inode, const char *symname, int len); +extern const struct inode_operations page_symlink_inode_operations; +extern int generic_readlink(struct dentry *, char __user *, int); +extern void generic_fillattr(struct inode *, struct kstat *); +extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +void __inode_add_bytes(struct inode *inode, loff_t bytes); +void inode_add_bytes(struct inode *inode, loff_t bytes); +void inode_sub_bytes(struct inode *inode, loff_t bytes); +loff_t inode_get_bytes(struct inode *inode); +void inode_set_bytes(struct inode *inode, loff_t bytes); + +extern int vfs_readdir(struct file *, filldir_t, void *); + +extern int vfs_stat(const char __user *, struct kstat *); +extern int vfs_lstat(const char __user *, struct kstat *); +extern int vfs_fstat(unsigned int, struct kstat *); +extern int vfs_fstatat(int , const char __user *, struct kstat *, int); + +extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, + unsigned long arg); +extern int __generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, + loff_t start, loff_t len, + get_block_t *get_block); +extern int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block); + +extern void get_filesystem(struct file_system_type *fs); +extern void put_filesystem(struct file_system_type *fs); +extern struct file_system_type *get_fs_type(const char *name); +extern struct super_block *get_super(struct block_device *); +extern struct super_block *get_super_thawed(struct block_device *); +extern struct super_block *get_active_super(struct block_device *bdev); +extern void drop_super(struct super_block *sb); +extern void iterate_supers(void (*)(struct super_block *, void *), void *); +extern void iterate_supers_type(struct file_system_type *, + void (*)(struct super_block *, void *), void *); + +extern int dcache_dir_open(struct inode *, struct file *); +extern int dcache_dir_close(struct inode *, struct file *); +extern loff_t dcache_dir_lseek(struct file *, loff_t, int); +extern int dcache_readdir(struct file *, void *, filldir_t); +extern int simple_setattr(struct dentry *, struct iattr *); +extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int simple_statfs(struct dentry *, struct kstatfs *); +extern int simple_open(struct inode *inode, struct file *file); +extern int simple_link(struct dentry *, struct inode *, struct dentry *); +extern int simple_unlink(struct inode *, struct dentry *); +extern int simple_rmdir(struct inode *, struct dentry *); +extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); +extern int noop_fsync(struct file *, loff_t, loff_t, int); +extern int simple_empty(struct dentry *); +extern int simple_readpage(struct file *file, struct page *page); +extern int simple_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +extern int simple_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + +extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); +extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); +extern const struct file_operations simple_dir_operations; +extern const struct inode_operations simple_dir_inode_operations; +struct tree_descr { char *name; const struct file_operations *ops; int mode; }; +struct dentry *d_alloc_name(struct dentry *, const char *); +extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); +extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); +extern void simple_release_fs(struct vfsmount **mount, int *count); + +extern ssize_t simple_read_from_buffer(void __user *to, size_t count, + loff_t *ppos, const void *from, size_t available); +extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); + +extern int generic_file_fsync(struct file *, loff_t, loff_t, int); + +extern int generic_check_addressable(unsigned, u64); + +#ifdef CONFIG_MIGRATION +extern int buffer_migrate_page(struct address_space *, + struct page *, struct page *, + enum migrate_mode); +#else +#define buffer_migrate_page NULL +#endif + +extern int inode_change_ok(const struct inode *, struct iattr *); +extern int inode_newsize_ok(const struct inode *, loff_t offset); +extern void setattr_copy(struct inode *inode, const struct iattr *attr); + +extern void file_update_time(struct file *file); + +extern int generic_show_options(struct seq_file *m, struct dentry *root); +extern void save_mount_options(struct super_block *sb, char *options); +extern void replace_mount_options(struct super_block *sb, char *options); + +static inline ino_t parent_ino(struct dentry *dentry) +{ + ino_t res; + + /* + * Don't strictly need d_lock here? If the parent ino could change + * then surely we'd have a deeper race in the caller? + */ + spin_lock(&dentry->d_lock); + res = dentry->d_parent->d_inode->i_ino; + spin_unlock(&dentry->d_lock); + return res; +} + +/* Transaction based IO helpers */ + +/* + * An argresp is stored in an allocated page and holds the + * size of the argument or response, along with its content + */ +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) + +char *simple_transaction_get(struct file *file, const char __user *buf, + size_t size); +ssize_t simple_transaction_read(struct file *file, char __user *buf, + size_t size, loff_t *pos); +int simple_transaction_release(struct inode *inode, struct file *file); + +void simple_transaction_set(struct file *file, size_t n); + +/* + * simple attribute files + * + * These attributes behave similar to those in sysfs: + * + * Writing to an attribute immediately sets a value, an open file can be + * written to multiple times. + * + * Reading from an attribute creates a buffer from the value that might get + * read with multiple read calls. When the attribute has been read + * completely, no further read calls are possible until the file is opened + * again. + * + * All attributes contain a text representation of a numeric value + * that are accessed with the get() and set() functions. + */ +#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ +static int __fops ## _open(struct inode *inode, struct file *file) \ +{ \ + __simple_attr_check_format(__fmt, 0ull); \ + return simple_attr_open(inode, file, __get, __set, __fmt); \ +} \ +static const struct file_operations __fops = { \ + .owner = THIS_MODULE, \ + .open = __fops ## _open, \ + .release = simple_attr_release, \ + .read = simple_attr_read, \ + .write = simple_attr_write, \ + .llseek = generic_file_llseek, \ +}; + +static inline __printf(1, 2) +void __simple_attr_check_format(const char *fmt, ...) +{ + /* don't do anything, just let the compiler check the arguments; */ +} + +int simple_attr_open(struct inode *inode, struct file *file, + int (*get)(void *, u64 *), int (*set)(void *, u64), + const char *fmt); +int simple_attr_release(struct inode *inode, struct file *file); +ssize_t simple_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); +ssize_t simple_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos); + +struct ctl_table; +int proc_nr_files(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_dentry(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_inodes(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +int __init get_filesystem_list(char *buf); + +#define __FMODE_EXEC ((__force int) FMODE_EXEC) +#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) + +#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) +#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ + (flag & __FMODE_NONOTIFY))) + +static inline int is_sxid(umode_t mode) +{ + return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); +} + +static inline void inode_has_no_xattr(struct inode *inode) +{ + if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) + inode->i_flags |= S_NOSEC; +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h new file mode 100644 index 00000000..51b79346 --- /dev/null +++ b/include/linux/fs_enet_pd.h @@ -0,0 +1,162 @@ +/* + * Platform information definitions for the + * universal Freescale Ethernet driver. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef FS_ENET_PD_H +#define FS_ENET_PD_H + +#include <linux/string.h> +#include <linux/of_mdio.h> +#include <asm/types.h> + +#define FS_ENET_NAME "fs_enet" + +enum fs_id { + fsid_fec1, + fsid_fec2, + fsid_fcc1, + fsid_fcc2, + fsid_fcc3, + fsid_scc1, + fsid_scc2, + fsid_scc3, + fsid_scc4, +}; + +#define FS_MAX_INDEX 9 + +static inline int fs_get_fec_index(enum fs_id id) +{ + if (id >= fsid_fec1 && id <= fsid_fec2) + return id - fsid_fec1; + return -1; +} + +static inline int fs_get_fcc_index(enum fs_id id) +{ + if (id >= fsid_fcc1 && id <= fsid_fcc3) + return id - fsid_fcc1; + return -1; +} + +static inline int fs_get_scc_index(enum fs_id id) +{ + if (id >= fsid_scc1 && id <= fsid_scc4) + return id - fsid_scc1; + return -1; +} + +static inline int fs_fec_index2id(int index) +{ + int id = fsid_fec1 + index - 1; + if (id >= fsid_fec1 && id <= fsid_fec2) + return id; + return FS_MAX_INDEX; + } + +static inline int fs_fcc_index2id(int index) +{ + int id = fsid_fcc1 + index - 1; + if (id >= fsid_fcc1 && id <= fsid_fcc3) + return id; + return FS_MAX_INDEX; +} + +static inline int fs_scc_index2id(int index) +{ + int id = fsid_scc1 + index - 1; + if (id >= fsid_scc1 && id <= fsid_scc4) + return id; + return FS_MAX_INDEX; +} + +enum fs_mii_method { + fsmii_fixed, + fsmii_fec, + fsmii_bitbang, +}; + +enum fs_ioport { + fsiop_porta, + fsiop_portb, + fsiop_portc, + fsiop_portd, + fsiop_porte, +}; + +struct fs_mii_bit { + u32 offset; + u8 bit; + u8 polarity; +}; +struct fs_mii_bb_platform_info { + struct fs_mii_bit mdio_dir; + struct fs_mii_bit mdio_dat; + struct fs_mii_bit mdc_dat; + int delay; /* delay in us */ + int irq[32]; /* irqs per phy's */ +}; + +struct fs_platform_info { + + void(*init_ioports)(struct fs_platform_info *); + /* device specific information */ + int fs_no; /* controller index */ + char fs_type[4]; /* controller type */ + + u32 cp_page; /* CPM page */ + u32 cp_block; /* CPM sblock */ + u32 cp_command; /* CPM page/sblock/mcn */ + + u32 clk_trx; /* some stuff for pins & mux configuration*/ + u32 clk_rx; + u32 clk_tx; + u32 clk_route; + u32 clk_mask; + + u32 mem_offset; + u32 dpram_offset; + u32 fcc_regs_c; + + u32 device_flags; + + struct device_node *phy_node; + const struct fs_mii_bus_info *bus_info; + + int rx_ring, tx_ring; /* number of buffers on rx */ + __u8 macaddr[6]; /* mac address */ + int rx_copybreak; /* limit we copy small frames */ + int use_napi; /* use NAPI */ + int napi_weight; /* NAPI weight */ + + int use_rmii; /* use RMII mode */ + int has_phy; /* if the network is phy container as well...*/ +}; +struct fs_mii_fec_platform_info { + u32 irq[32]; + u32 mii_speed; +}; + +static inline int fs_get_id(struct fs_platform_info *fpi) +{ + if(strstr(fpi->fs_type, "SCC")) + return fs_scc_index2id(fpi->fs_no); + if(strstr(fpi->fs_type, "FCC")) + return fs_fcc_index2id(fpi->fs_no); + if(strstr(fpi->fs_type, "FEC")) + return fs_fec_index2id(fpi->fs_no); + return fpi->fs_no; +} + +#endif diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h new file mode 100644 index 00000000..da317c71 --- /dev/null +++ b/include/linux/fs_stack.h @@ -0,0 +1,29 @@ +#ifndef _LINUX_FS_STACK_H +#define _LINUX_FS_STACK_H + +/* This file defines generic functions used primarily by stackable + * filesystems; none of these functions require i_mutex to be held. + */ + +#include <linux/fs.h> + +/* externs for fs/stack.c */ +extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src); +extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src); + +/* inlines */ +static inline void fsstack_copy_attr_atime(struct inode *dest, + const struct inode *src) +{ + dest->i_atime = src->i_atime; +} + +static inline void fsstack_copy_attr_times(struct inode *dest, + const struct inode *src) +{ + dest->i_atime = src->i_atime; + dest->i_mtime = src->i_mtime; + dest->i_ctime = src->i_ctime; +} + +#endif /* _LINUX_FS_STACK_H */ diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h new file mode 100644 index 00000000..003dc0fd --- /dev/null +++ b/include/linux/fs_struct.h @@ -0,0 +1,54 @@ +#ifndef _LINUX_FS_STRUCT_H +#define _LINUX_FS_STRUCT_H + +#include <linux/path.h> +#include <linux/spinlock.h> +#include <linux/seqlock.h> + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_t seq; + int umask; + int in_exec; + struct path root, pwd; +}; + +extern struct kmem_cache *fs_cachep; + +extern void exit_fs(struct task_struct *); +extern void set_fs_root(struct fs_struct *, struct path *); +extern void set_fs_pwd(struct fs_struct *, struct path *); +extern struct fs_struct *copy_fs_struct(struct fs_struct *); +extern void free_fs_struct(struct fs_struct *); +extern void daemonize_fs_struct(void); +extern int unshare_fs_struct(void); + +static inline void get_fs_root(struct fs_struct *fs, struct path *root) +{ + spin_lock(&fs->lock); + *root = fs->root; + path_get(root); + spin_unlock(&fs->lock); +} + +static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) +{ + spin_lock(&fs->lock); + *pwd = fs->pwd; + path_get(pwd); + spin_unlock(&fs->lock); +} + +static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, + struct path *pwd) +{ + spin_lock(&fs->lock); + *root = fs->root; + path_get(root); + *pwd = fs->pwd; + path_get(pwd); + spin_unlock(&fs->lock); +} + +#endif /* _LINUX_FS_STRUCT_H */ diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h new file mode 100644 index 00000000..36b61ff3 --- /dev/null +++ b/include/linux/fs_uart_pd.h @@ -0,0 +1,71 @@ +/* + * Platform information definitions for the CPM Uart driver. + * + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef FS_UART_PD_H +#define FS_UART_PD_H + +#include <asm/types.h> + +enum fs_uart_id { + fsid_smc1_uart, + fsid_smc2_uart, + fsid_scc1_uart, + fsid_scc2_uart, + fsid_scc3_uart, + fsid_scc4_uart, + fs_uart_nr, +}; + +static inline int fs_uart_id_scc2fsid(int id) +{ + return fsid_scc1_uart + id - 1; +} + +static inline int fs_uart_id_fsid2scc(int id) +{ + return id - fsid_scc1_uart + 1; +} + +static inline int fs_uart_id_smc2fsid(int id) +{ + return fsid_smc1_uart + id - 1; +} + +static inline int fs_uart_id_fsid2smc(int id) +{ + return id - fsid_smc1_uart + 1; +} + +struct fs_uart_platform_info { + void(*init_ioports)(struct fs_uart_platform_info *); + /* device specific information */ + int fs_no; /* controller index */ + char fs_type[4]; /* controller type */ + u32 uart_clk; + u8 tx_num_fifo; + u8 tx_buf_size; + u8 rx_num_fifo; + u8 rx_buf_size; + u8 brg; + u8 clk_rx; + u8 clk_tx; +}; + +static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi) +{ + if(strstr(fpi->fs_type, "SMC")) + return fs_uart_id_smc2fsid(fpi->fs_no); + if(strstr(fpi->fs_type, "SCC")) + return fs_uart_id_scc2fsid(fpi->fs_no); + return fpi->fs_no; +} + +#endif diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h new file mode 100644 index 00000000..ce31408b --- /dev/null +++ b/include/linux/fscache-cache.h @@ -0,0 +1,516 @@ +/* General filesystem caching backing cache interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/backend-api.txt + * + * for a description of the cache backend interface declared here. + */ + +#ifndef _LINUX_FSCACHE_CACHE_H +#define _LINUX_FSCACHE_CACHE_H + +#include <linux/fscache.h> +#include <linux/sched.h> +#include <linux/workqueue.h> + +#define NR_MAXCACHES BITS_PER_LONG + +struct fscache_cache; +struct fscache_cache_ops; +struct fscache_object; +struct fscache_operation; + +/* + * cache tag definition + */ +struct fscache_cache_tag { + struct list_head link; + struct fscache_cache *cache; /* cache referred to by this tag */ + unsigned long flags; +#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ + atomic_t usage; + char name[0]; /* tag name */ +}; + +/* + * cache definition + */ +struct fscache_cache { + const struct fscache_cache_ops *ops; + struct fscache_cache_tag *tag; /* tag representing this cache */ + struct kobject *kobj; /* system representation of this cache */ + struct list_head link; /* link in list of caches */ + size_t max_index_size; /* maximum size of index data */ + char identifier[36]; /* cache label */ + + /* node management */ + struct work_struct op_gc; /* operation garbage collector */ + struct list_head object_list; /* list of data/index objects */ + struct list_head op_gc_list; /* list of ops to be deleted */ + spinlock_t object_list_lock; + spinlock_t op_gc_list_lock; + atomic_t object_count; /* no. of live objects in this cache */ + struct fscache_object *fsdef; /* object for the fsdef index */ + unsigned long flags; +#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ +#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ +}; + +extern wait_queue_head_t fscache_cache_cleared_wq; + +/* + * operation to be applied to a cache object + * - retrieval initiation operations are done in the context of the process + * that issued them, and not in an async thread pool + */ +typedef void (*fscache_operation_release_t)(struct fscache_operation *op); +typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); + +struct fscache_operation { + struct work_struct work; /* record for async ops */ + struct list_head pend_link; /* link in object->pending_ops */ + struct fscache_object *object; /* object to be operated upon */ + + unsigned long flags; +#define FSCACHE_OP_TYPE 0x000f /* operation type */ +#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ +#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ +#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ +#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ +#define FSCACHE_OP_DEAD 6 /* op is now dead */ +#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */ +#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */ + + atomic_t usage; + unsigned debug_id; /* debugging ID */ + + /* operation processor callback + * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform + * the op in a non-pool thread */ + fscache_operation_processor_t processor; + + /* operation releaser */ + fscache_operation_release_t release; +}; + +extern atomic_t fscache_op_debug_id; +extern void fscache_op_work_func(struct work_struct *work); + +extern void fscache_enqueue_operation(struct fscache_operation *); +extern void fscache_put_operation(struct fscache_operation *); + +/** + * fscache_operation_init - Do basic initialisation of an operation + * @op: The operation to initialise + * @release: The release function to assign + * + * Do basic initialisation of an operation. The caller must still set flags, + * object and processor if needed. + */ +static inline void fscache_operation_init(struct fscache_operation *op, + fscache_operation_processor_t processor, + fscache_operation_release_t release) +{ + INIT_WORK(&op->work, fscache_op_work_func); + atomic_set(&op->usage, 1); + op->debug_id = atomic_inc_return(&fscache_op_debug_id); + op->processor = processor; + op->release = release; + INIT_LIST_HEAD(&op->pend_link); +} + +/* + * data read operation + */ +struct fscache_retrieval { + struct fscache_operation op; + struct address_space *mapping; /* netfs pages */ + fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ + void *context; /* netfs read context (pinned) */ + struct list_head to_do; /* list of things to be done by the backend */ + unsigned long start_time; /* time at which retrieval started */ +}; + +typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, + struct page *page, + gfp_t gfp); + +typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, + struct list_head *pages, + unsigned *nr_pages, + gfp_t gfp); + +/** + * fscache_get_retrieval - Get an extra reference on a retrieval operation + * @op: The retrieval operation to get a reference on + * + * Get an extra reference on a retrieval operation. + */ +static inline +struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) +{ + atomic_inc(&op->op.usage); + return op; +} + +/** + * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing + * @op: The retrieval operation affected + * + * Enqueue a retrieval operation for processing by the FS-Cache thread pool. + */ +static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) +{ + fscache_enqueue_operation(&op->op); +} + +/** + * fscache_put_retrieval - Drop a reference to a retrieval operation + * @op: The retrieval operation affected + * + * Drop a reference to a retrieval operation. + */ +static inline void fscache_put_retrieval(struct fscache_retrieval *op) +{ + fscache_put_operation(&op->op); +} + +/* + * cached page storage work item + * - used to do three things: + * - batch writes to the cache + * - do cache writes asynchronously + * - defer writes until cache object lookup completion + */ +struct fscache_storage { + struct fscache_operation op; + pgoff_t store_limit; /* don't write more than this */ +}; + +/* + * cache operations + */ +struct fscache_cache_ops { + /* name of cache provider */ + const char *name; + + /* allocate an object record for a cookie */ + struct fscache_object *(*alloc_object)(struct fscache_cache *cache, + struct fscache_cookie *cookie); + + /* look up the object for a cookie + * - return -ETIMEDOUT to be requeued + */ + int (*lookup_object)(struct fscache_object *object); + + /* finished looking up */ + void (*lookup_complete)(struct fscache_object *object); + + /* increment the usage count on this object (may fail if unmounting) */ + struct fscache_object *(*grab_object)(struct fscache_object *object); + + /* pin an object in the cache */ + int (*pin_object)(struct fscache_object *object); + + /* unpin an object in the cache */ + void (*unpin_object)(struct fscache_object *object); + + /* store the updated auxiliary data on an object */ + void (*update_object)(struct fscache_object *object); + + /* discard the resources pinned by an object and effect retirement if + * necessary */ + void (*drop_object)(struct fscache_object *object); + + /* dispose of a reference to an object */ + void (*put_object)(struct fscache_object *object); + + /* sync a cache */ + void (*sync_cache)(struct fscache_cache *cache); + + /* notification that the attributes of a non-index object (such as + * i_size) have changed */ + int (*attr_changed)(struct fscache_object *object); + + /* reserve space for an object's data and associated metadata */ + int (*reserve_space)(struct fscache_object *object, loff_t i_size); + + /* request a backing block for a page be read or allocated in the + * cache */ + fscache_page_retrieval_func_t read_or_alloc_page; + + /* request backing blocks for a list of pages be read or allocated in + * the cache */ + fscache_pages_retrieval_func_t read_or_alloc_pages; + + /* request a backing block for a page be allocated in the cache so that + * it can be written directly */ + fscache_page_retrieval_func_t allocate_page; + + /* request backing blocks for pages be allocated in the cache so that + * they can be written directly */ + fscache_pages_retrieval_func_t allocate_pages; + + /* write a page to its backing block in the cache */ + int (*write_page)(struct fscache_storage *op, struct page *page); + + /* detach backing block from a page (optional) + * - must release the cookie lock before returning + * - may sleep + */ + void (*uncache_page)(struct fscache_object *object, + struct page *page); + + /* dissociate a cache from all the pages it was backing */ + void (*dissociate_pages)(struct fscache_cache *cache); +}; + +/* + * data file or index object cookie + * - a file will only appear in one cache + * - a request to cache a file may or may not be honoured, subject to + * constraints such as disk space + * - indices are created on disk just-in-time + */ +struct fscache_cookie { + atomic_t usage; /* number of users of this cookie */ + atomic_t n_children; /* number of children of this cookie */ + spinlock_t lock; + spinlock_t stores_lock; /* lock on page store tree */ + struct hlist_head backing_objects; /* object(s) backing this file/index */ + const struct fscache_cookie_def *def; /* definition */ + struct fscache_cookie *parent; /* parent of this entry */ + void *netfs_data; /* back pointer to netfs */ + struct radix_tree_root stores; /* pages to be stored on this cookie */ +#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ +#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */ + + unsigned long flags; +#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ +#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */ +#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */ +#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ +#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ +#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ +}; + +extern struct fscache_cookie fscache_fsdef_index; + +/* + * on-disk cache file or index handle + */ +struct fscache_object { + enum fscache_object_state { + FSCACHE_OBJECT_INIT, /* object in initial unbound state */ + FSCACHE_OBJECT_LOOKING_UP, /* looking up object */ + FSCACHE_OBJECT_CREATING, /* creating object */ + + /* active states */ + FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ + FSCACHE_OBJECT_ACTIVE, /* object is usable */ + FSCACHE_OBJECT_UPDATING, /* object is updating */ + + /* terminal states */ + FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */ + FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */ + FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */ + FSCACHE_OBJECT_RELEASING, /* releasing object */ + FSCACHE_OBJECT_RECYCLING, /* retiring object */ + FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ + FSCACHE_OBJECT_DEAD, /* object is now dead */ + FSCACHE_OBJECT__NSTATES + } state; + + int debug_id; /* debugging ID */ + int n_children; /* number of child objects */ + int n_ops; /* number of ops outstanding on object */ + int n_obj_ops; /* number of object ops outstanding on object */ + int n_in_progress; /* number of ops in progress */ + int n_exclusive; /* number of exclusive ops queued */ + atomic_t n_reads; /* number of read ops in progress */ + spinlock_t lock; /* state and operations lock */ + + unsigned long lookup_jif; /* time at which lookup started */ + unsigned long event_mask; /* events this object is interested in */ + unsigned long events; /* events to be processed by this object + * (order is important - using fls) */ +#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ +#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ +#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ +#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ +#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ +#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ +#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ +#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/ + + unsigned long flags; +#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ +#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ +#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ + + struct list_head cache_link; /* link in cache->object_list */ + struct hlist_node cookie_link; /* link in cookie->backing_objects */ + struct fscache_cache *cache; /* cache that supplied this object */ + struct fscache_cookie *cookie; /* netfs's file/index object */ + struct fscache_object *parent; /* parent object */ + struct work_struct work; /* attention scheduling record */ + struct list_head dependents; /* FIFO of dependent objects */ + struct list_head dep_link; /* link in parent's dependents list */ + struct list_head pending_ops; /* unstarted operations on this object */ +#ifdef CONFIG_FSCACHE_OBJECT_LIST + struct rb_node objlist_link; /* link in global object list */ +#endif + pgoff_t store_limit; /* current storage limit */ + loff_t store_limit_l; /* current storage limit */ +}; + +extern const char *fscache_object_states[]; + +#define fscache_object_is_active(obj) \ + (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ + (obj)->state < FSCACHE_OBJECT_DYING) + +#define fscache_object_is_dead(obj) \ + (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_DYING) + +extern void fscache_object_work_func(struct work_struct *work); + +/** + * fscache_object_init - Initialise a cache object description + * @object: Object description + * + * Initialise a cache object description to its basic values. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_object_init(struct fscache_object *object, + struct fscache_cookie *cookie, + struct fscache_cache *cache) +{ + atomic_inc(&cache->object_count); + + object->state = FSCACHE_OBJECT_INIT; + spin_lock_init(&object->lock); + INIT_LIST_HEAD(&object->cache_link); + INIT_HLIST_NODE(&object->cookie_link); + INIT_WORK(&object->work, fscache_object_work_func); + INIT_LIST_HEAD(&object->dependents); + INIT_LIST_HEAD(&object->dep_link); + INIT_LIST_HEAD(&object->pending_ops); + object->n_children = 0; + object->n_ops = object->n_in_progress = object->n_exclusive = 0; + object->events = object->event_mask = 0; + object->flags = 0; + object->store_limit = 0; + object->store_limit_l = 0; + object->cache = cache; + object->cookie = cookie; + object->parent = NULL; +} + +extern void fscache_object_lookup_negative(struct fscache_object *object); +extern void fscache_obtained_object(struct fscache_object *object); + +#ifdef CONFIG_FSCACHE_OBJECT_LIST +extern void fscache_object_destroy(struct fscache_object *object); +#else +#define fscache_object_destroy(object) do {} while(0) +#endif + +/** + * fscache_object_destroyed - Note destruction of an object in a cache + * @cache: The cache from which the object came + * + * Note the destruction and deallocation of an object record in a cache. + */ +static inline void fscache_object_destroyed(struct fscache_cache *cache) +{ + if (atomic_dec_and_test(&cache->object_count)) + wake_up_all(&fscache_cache_cleared_wq); +} + +/** + * fscache_object_lookup_error - Note an object encountered an error + * @object: The object on which the error was encountered + * + * Note that an object encountered a fatal error (usually an I/O error) and + * that it should be withdrawn as soon as possible. + */ +static inline void fscache_object_lookup_error(struct fscache_object *object) +{ + set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); +} + +/** + * fscache_set_store_limit - Set the maximum size to be stored in an object + * @object: The object to set the maximum on + * @i_size: The limit to set in bytes + * + * Set the maximum size an object is permitted to reach, implying the highest + * byte that may be written. Intended to be called by the attr_changed() op. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) +{ + object->store_limit_l = i_size; + object->store_limit = i_size >> PAGE_SHIFT; + if (i_size & ~PAGE_MASK) + object->store_limit++; +} + +/** + * fscache_end_io - End a retrieval operation on a page + * @op: The FS-Cache operation covering the retrieval + * @page: The page that was to be fetched + * @error: The error code (0 if successful) + * + * Note the end of an operation to retrieve a page, as covered by a particular + * operation record. + */ +static inline void fscache_end_io(struct fscache_retrieval *op, + struct page *page, int error) +{ + op->end_io_func(page, op->context, error); +} + +/* + * out-of-line cache backend functions + */ +extern __printf(3, 4) +void fscache_init_cache(struct fscache_cache *cache, + const struct fscache_cache_ops *ops, + const char *idfmt, ...); + +extern int fscache_add_cache(struct fscache_cache *cache, + struct fscache_object *fsdef, + const char *tagname); +extern void fscache_withdraw_cache(struct fscache_cache *cache); + +extern void fscache_io_error(struct fscache_cache *cache); + +extern void fscache_mark_pages_cached(struct fscache_retrieval *op, + struct pagevec *pagevec); + +extern bool fscache_object_sleep_till_congested(signed long *timeoutp); + +extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + const void *data, + uint16_t datalen); + +#endif /* _LINUX_FSCACHE_CACHE_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h new file mode 100644 index 00000000..9ec20dec --- /dev/null +++ b/include/linux/fscache.h @@ -0,0 +1,667 @@ +/* General filesystem caching interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/netfs-api.txt + * + * for a description of the network filesystem interface declared here. + */ + +#ifndef _LINUX_FSCACHE_H +#define _LINUX_FSCACHE_H + +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/pagemap.h> +#include <linux/pagevec.h> + +#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) +#define fscache_available() (1) +#define fscache_cookie_valid(cookie) (cookie) +#else +#define fscache_available() (0) +#define fscache_cookie_valid(cookie) (0) +#endif + + +/* + * overload PG_private_2 to give us PG_fscache - this is used to indicate that + * a page is currently backed by a local disk cache + */ +#define PageFsCache(page) PagePrivate2((page)) +#define SetPageFsCache(page) SetPagePrivate2((page)) +#define ClearPageFsCache(page) ClearPagePrivate2((page)) +#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) +#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) + +/* pattern used to fill dead space in an index entry */ +#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 + +struct pagevec; +struct fscache_cache_tag; +struct fscache_cookie; +struct fscache_netfs; + +typedef void (*fscache_rw_complete_t)(struct page *page, + void *context, + int error); + +/* result of index entry consultation */ +enum fscache_checkaux { + FSCACHE_CHECKAUX_OKAY, /* entry okay as is */ + FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */ + FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */ +}; + +/* + * fscache cookie definition + */ +struct fscache_cookie_def { + /* name of cookie type */ + char name[16]; + + /* cookie type */ + uint8_t type; +#define FSCACHE_COOKIE_TYPE_INDEX 0 +#define FSCACHE_COOKIE_TYPE_DATAFILE 1 + + /* select the cache into which to insert an entry in this index + * - optional + * - should return a cache identifier or NULL to cause the cache to be + * inherited from the parent if possible or the first cache picked + * for a non-index file if not + */ + struct fscache_cache_tag *(*select_cache)( + const void *parent_netfs_data, + const void *cookie_netfs_data); + + /* get an index key + * - should store the key data in the buffer + * - should return the amount of data stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_key)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* get certain file attributes from the netfs data + * - this function can be absent for an index + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); + + /* get the auxiliary data from netfs data + * - this function can be absent if the index carries no state data + * - should store the auxiliary data in the buffer + * - should return the amount of amount stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_aux)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* consult the netfs about the state of an object + * - this function can be absent if the index carries no state data + * - the netfs data from the cookie being used as the target is + * presented, as is the auxiliary data + */ + enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, + const void *data, + uint16_t datalen); + + /* get an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*get_context)(void *cookie_netfs_data, void *context); + + /* release an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*put_context)(void *cookie_netfs_data, void *context); + + /* indicate pages that now have cache metadata retained + * - this function should mark the specified pages as now being cached + * - the pages will have been marked with PG_fscache before this is + * called, so this is optional + */ + void (*mark_pages_cached)(void *cookie_netfs_data, + struct address_space *mapping, + struct pagevec *cached_pvec); + + /* indicate the cookie is no longer cached + * - this function is called when the backing store currently caching + * a cookie is removed + * - the netfs should use this to clean up any markers indicating + * cached pages + * - this is mandatory for any object that may have data + */ + void (*now_uncached)(void *cookie_netfs_data); +}; + +/* + * fscache cached network filesystem type + * - name, version and ops must be filled in before registration + * - all other fields will be set during registration + */ +struct fscache_netfs { + uint32_t version; /* indexing version */ + const char *name; /* filesystem name */ + struct fscache_cookie *primary_index; + struct list_head link; /* internal link */ +}; + +/* + * slow-path functions for when there is actually caching available, and the + * netfs does actually have a valid token + * - these are not to be called directly + * - these are undefined symbols when FS-Cache is not configured and the + * optimiser takes care of not using them + */ +extern int __fscache_register_netfs(struct fscache_netfs *); +extern void __fscache_unregister_netfs(struct fscache_netfs *); +extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *); +extern void __fscache_release_cache_tag(struct fscache_cache_tag *); + +extern struct fscache_cookie *__fscache_acquire_cookie( + struct fscache_cookie *, + const struct fscache_cookie_def *, + void *); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); +extern void __fscache_update_cookie(struct fscache_cookie *); +extern int __fscache_attr_changed(struct fscache_cookie *); +extern int __fscache_read_or_alloc_page(struct fscache_cookie *, + struct page *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, + struct address_space *, + struct list_head *, + unsigned *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); +extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); +extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); +extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); +extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, + gfp_t); +extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, + struct inode *); + +/** + * fscache_register_netfs - Register a filesystem as desiring caching services + * @netfs: The description of the filesystem + * + * Register a filesystem as desiring caching services if they're available. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_register_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + return __fscache_register_netfs(netfs); + else + return 0; +} + +/** + * fscache_unregister_netfs - Indicate that a filesystem no longer desires + * caching services + * @netfs: The description of the filesystem + * + * Indicate that a filesystem no longer desires caching services for the + * moment. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unregister_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + __fscache_unregister_netfs(netfs); +} + +/** + * fscache_lookup_cache_tag - Look up a cache tag + * @name: The name of the tag to search for + * + * Acquire a specific cache referral tag that can be used to select a specific + * cache in which to cache an index. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) +{ + if (fscache_available()) + return __fscache_lookup_cache_tag(name); + else + return NULL; +} + +/** + * fscache_release_cache_tag - Release a cache tag + * @tag: The tag to release + * + * Release a reference to a cache referral tag previously looked up. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_release_cache_tag(struct fscache_cache_tag *tag) +{ + if (fscache_available()) + __fscache_release_cache_tag(tag); +} + +/** + * fscache_acquire_cookie - Acquire a cookie to represent a cache object + * @parent: The cookie that's to be the parent of this one + * @def: A description of the cache object, including callback operations + * @netfs_data: An arbitrary piece of data to be kept in the cookie to + * represent the cache object to the netfs + * + * This function is used to inform FS-Cache about part of an index hierarchy + * that can be used to locate files. This is done by requesting a cookie for + * each index in the path to the file. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cookie *fscache_acquire_cookie( + struct fscache_cookie *parent, + const struct fscache_cookie_def *def, + void *netfs_data) +{ + if (fscache_cookie_valid(parent)) + return __fscache_acquire_cookie(parent, def, netfs_data); + else + return NULL; +} + +/** + * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding + * it + * @cookie: The cookie being returned + * @retire: True if the cache object the cookie represents is to be discarded + * + * This function returns a cookie to the cache, forcibly discarding the + * associated cache object if retire is set to true. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) +{ + if (fscache_cookie_valid(cookie)) + __fscache_relinquish_cookie(cookie, retire); +} + +/** + * fscache_update_cookie - Request that a cache object be updated + * @cookie: The cookie representing the cache object + * + * Request an update of the index data for the cache object associated with the + * cookie. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_update_cookie(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + __fscache_update_cookie(cookie); +} + +/** + * fscache_pin_cookie - Pin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be pinned in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_pin_cookie(struct fscache_cookie *cookie) +{ + return -ENOBUFS; +} + +/** + * fscache_pin_cookie - Unpin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be unpinned from the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unpin_cookie(struct fscache_cookie *cookie) +{ +} + +/** + * fscache_attr_changed - Notify cache that an object's attributes changed + * @cookie: The cookie representing the cache object + * + * Send a notification to the cache indicating that an object's attributes have + * changed. This includes the data size. These attributes will be obtained + * through the get_attr() cookie definition op. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_attr_changed(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_attr_changed(cookie); + else + return -ENOBUFS; +} + +/** + * fscache_reserve_space - Reserve data space for a cached object + * @cookie: The cookie representing the cache object + * @i_size: The amount of space to be reserved + * + * Reserve an amount of space in the cache for the cache object attached to a + * cookie so that a write to that object within the space can always be + * honoured. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) +{ + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_page - Read a page from the cache or allocate a block + * in which to store it + * @cookie: The cookie representing the cache object + * @page: The netfs page to fill if possible + * @end_io_func: The callback to invoke when and if the page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a page from the cache, or if that's not possible make a potential + * one-block reservation in the cache into which the page may be stored once + * fetched from the server. + * + * If the page is not backed by the cache object, or if it there's some reason + * it can't be, -ENOBUFS will be returned and nothing more will be done for + * that page. + * + * Else, if that page is backed by the cache, a read will be initiated directly + * to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if the page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_page(struct fscache_cookie *cookie, + struct page *page, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_read_or_alloc_page(cookie, page, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate + * blocks in which to store them + * @cookie: The cookie representing the cache object + * @mapping: The netfs inode mapping to which the pages will be attached + * @pages: A list of potential netfs pages to be filled + * @nr_pages: Number of pages to be read and/or allocated + * @end_io_func: The callback to invoke when and if each page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a set of pages from the cache, or if that's not possible, attempt to + * make a potential one-block reservation for each page in the cache into which + * that page may be stored once fetched from the server. + * + * If some pages are not backed by the cache object, or if it there's some + * reason they can't be, -ENOBUFS will be returned and nothing more will be + * done for that pages. + * + * Else, if some of the pages are backed by the cache, a read will be initiated + * directly to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if a page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in + * regard to different pages, the return values are prioritised in that order. + * Any pages submitted for reading are removed from the pages list. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_read_or_alloc_pages(cookie, mapping, pages, + nr_pages, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_alloc_page - Allocate a block in which to store a page + * @cookie: The cookie representing the cache object + * @page: The netfs page to allocate a page for + * @gfp: The conditions under which memory allocation should be made + * + * Request Allocation a block in the cache in which to store a netfs page + * without retrieving any contents from the cache. + * + * If the page is not backed by a file then -ENOBUFS will be returned and + * nothing more will be done, and no reservation will be made. + * + * Else, a block will be allocated if one wasn't already, and 0 will be + * returned + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_alloc_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_alloc_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_write_page - Request storage of a page in the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page to store + * @gfp: The conditions under which memory allocation should be made + * + * Request the contents of the netfs page be written into the cache. This + * request may be ignored if no cache block is currently allocated, in which + * case it will return -ENOBUFS. + * + * If a cache block was already allocated, a write will be initiated and 0 will + * be returned. The PG_fscache_write page bit is set immediately and will then + * be cleared at the completion of the write to indicate the success or failure + * of the operation. Note that the completion may happen before the return. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_write_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_write_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_uncache_page - Indicate that caching is no longer required on a page + * @cookie: The cookie representing the cache object + * @page: The netfs page that was being cached. + * + * Tell the cache that we no longer want a page to be cached and that it should + * remove any knowledge of the netfs page it may have. + * + * Note that this cannot cancel any outstanding I/O operations between this + * page and the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_uncache_page(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_page(cookie, page); +} + +/** + * fscache_check_page_write - Ask if a page is being writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache if a page is being written to the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +bool fscache_check_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_check_page_write(cookie, page); + return false; +} + +/** + * fscache_wait_on_page_write - Wait for a page to complete writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache to wake us up when a page is no longer being written to the + * cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_wait_on_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_wait_on_page_write(cookie, page); +} + +/** + * fscache_maybe_release_page - Consider releasing a page, cancelling a store + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * @gfp: The gfp flags passed to releasepage() + * + * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's + * releasepage() call. A storage request on the page may cancelled if it is + * not currently being processed. + * + * The function returns true if the page no longer has a storage request on it, + * and false if a storage request is left in place. If true is returned, the + * page will have been passed to fscache_uncache_page(). If false is returned + * the page cannot be freed yet. + */ +static inline +bool fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && PageFsCache(page)) + return __fscache_maybe_release_page(cookie, page, gfp); + return false; +} + +/** + * fscache_uncache_all_inode_pages - Uncache all an inode's pages + * @cookie: The cookie representing the inode's cache object. + * @inode: The inode to uncache pages from. + * + * Uncache all the pages in an inode that are marked PG_fscache, assuming them + * to be associated with the given cookie. + * + * This function may sleep. It will wait for pages that are being written out + * and will wait whilst the PG_fscache mark is removed by the cache. + */ +static inline +void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, + struct inode *inode) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_all_inode_pages(cookie, inode); +} + +#endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h new file mode 100644 index 00000000..11c16a1f --- /dev/null +++ b/include/linux/fsl-diu-fb.h @@ -0,0 +1,164 @@ +/* + * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Freescale DIU Frame Buffer device driver + * + * Authors: Hongjun Chen <hong-jun.chen@freescale.com> + * Paul Widmer <paul.widmer@freescale.com> + * Srikanth Srinivasan <srikanth.srinivasan@freescale.com> + * York Sun <yorksun@freescale.com> + * + * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __FSL_DIU_FB_H__ +#define __FSL_DIU_FB_H__ + +#include <linux/types.h> + +struct mfb_chroma_key { + int enable; + __u8 red_max; + __u8 green_max; + __u8 blue_max; + __u8 red_min; + __u8 green_min; + __u8 blue_min; +}; + +struct aoi_display_offset { + __s32 x_aoi_d; + __s32 y_aoi_d; +}; + +#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) +#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) +#define MFB_SET_ALPHA _IOW('M', 0, __u8) +#define MFB_GET_ALPHA _IOR('M', 0, __u8) +#define MFB_SET_AOID _IOW('M', 4, struct aoi_display_offset) +#define MFB_GET_AOID _IOR('M', 4, struct aoi_display_offset) +#define MFB_SET_PIXFMT _IOW('M', 8, __u32) +#define MFB_GET_PIXFMT _IOR('M', 8, __u32) + +/* + * The original definitions of MFB_SET_PIXFMT and MFB_GET_PIXFMT used the + * wrong value for 'size' field of the ioctl. The current macros above use the + * right size, but we still need to provide backwards compatibility, at least + * for a while. +*/ +#define MFB_SET_PIXFMT_OLD 0x80014d08 +#define MFB_GET_PIXFMT_OLD 0x40014d08 + +#ifdef __KERNEL__ + +/* + * These are the fields of area descriptor(in DDR memory) for every plane + */ +struct diu_ad { + /* Word 0(32-bit) in DDR memory */ +/* __u16 comp; */ +/* __u16 pixel_s:2; */ +/* __u16 pallete:1; */ +/* __u16 red_c:2; */ +/* __u16 green_c:2; */ +/* __u16 blue_c:2; */ +/* __u16 alpha_c:3; */ +/* __u16 byte_f:1; */ +/* __u16 res0:3; */ + + __be32 pix_fmt; /* hard coding pixel format */ + + /* Word 1(32-bit) in DDR memory */ + __le32 addr; + + /* Word 2(32-bit) in DDR memory */ +/* __u32 delta_xs:11; */ +/* __u32 res1:1; */ +/* __u32 delta_ys:11; */ +/* __u32 res2:1; */ +/* __u32 g_alpha:8; */ + __le32 src_size_g_alpha; + + /* Word 3(32-bit) in DDR memory */ +/* __u32 delta_xi:11; */ +/* __u32 res3:5; */ +/* __u32 delta_yi:11; */ +/* __u32 res4:3; */ +/* __u32 flip:2; */ + __le32 aoi_size; + + /* Word 4(32-bit) in DDR memory */ + /*__u32 offset_xi:11; + __u32 res5:5; + __u32 offset_yi:11; + __u32 res6:5; + */ + __le32 offset_xyi; + + /* Word 5(32-bit) in DDR memory */ + /*__u32 offset_xd:11; + __u32 res7:5; + __u32 offset_yd:11; + __u32 res8:5; */ + __le32 offset_xyd; + + + /* Word 6(32-bit) in DDR memory */ + __u8 ckmax_r; + __u8 ckmax_g; + __u8 ckmax_b; + __u8 res9; + + /* Word 7(32-bit) in DDR memory */ + __u8 ckmin_r; + __u8 ckmin_g; + __u8 ckmin_b; + __u8 res10; +/* __u32 res10:8; */ + + /* Word 8(32-bit) in DDR memory */ + __le32 next_ad; + + /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */ + __u32 paddr; +} __attribute__ ((packed)); + +/* DIU register map */ +struct diu { + __be32 desc[3]; + __be32 gamma; + __be32 pallete; + __be32 cursor; + __be32 curs_pos; + __be32 diu_mode; + __be32 bgnd; + __be32 bgnd_wb; + __be32 disp_size; + __be32 wb_size; + __be32 wb_mem_addr; + __be32 hsyn_para; + __be32 vsyn_para; + __be32 syn_pol; + __be32 thresholds; + __be32 int_status; + __be32 int_mask; + __be32 colorbar[8]; + __be32 filling; + __be32 plut; +} __attribute__ ((packed)); + +/* + * Modes of operation of DIU. The DIU supports five different modes, but + * the driver only supports modes 0 and 1. + */ +#define MFB_MODE0 0 /* DIU off */ +#define MFB_MODE1 1 /* All three planes output to display */ + +#endif /* __KERNEL__ */ +#endif /* __FSL_DIU_FB_H__ */ diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h new file mode 100644 index 00000000..203d7c4a --- /dev/null +++ b/include/linux/fsl/mxs-dma.h @@ -0,0 +1,28 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MACH_MXS_DMA_H__ +#define __MACH_MXS_DMA_H__ + +#include <linux/dmaengine.h> + +struct mxs_dma_data { + int chan_irq; +}; + +static inline int mxs_dma_is_apbh(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh"); +} + +static inline int mxs_dma_is_apbx(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx"); +} + +#endif /* __MACH_MXS_DMA_H__ */ diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h new file mode 100644 index 00000000..fffdf00f --- /dev/null +++ b/include/linux/fsl_devices.h @@ -0,0 +1,138 @@ +/* + * include/linux/fsl_devices.h + * + * Definitions for any platform device related flags or structures for + * Freescale processor devices + * + * Maintainer: Kumar Gala <galak@kernel.crashing.org> + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _FSL_DEVICE_H_ +#define _FSL_DEVICE_H_ + +#include <linux/types.h> + +/* + * Some conventions on how we handle peripherals on Freescale chips + * + * unique device: a platform_device entry in fsl_plat_devs[] plus + * associated device information in its platform_data structure. + * + * A chip is described by a set of unique devices. + * + * Each sub-arch has its own master list of unique devices and + * enumerates them by enum fsl_devices in a sub-arch specific header + * + * The platform data structure is broken into two parts. The + * first is device specific information that help identify any + * unique features of a peripheral. The second is any + * information that may be defined by the board or how the device + * is connected externally of the chip. + * + * naming conventions: + * - platform data structures: <driver>_platform_data + * - platform data device flags: FSL_<driver>_DEV_<FLAG> + * - platform data board flags: FSL_<driver>_BRD_<FLAG> + * + */ + +enum fsl_usb2_operating_modes { + FSL_USB2_MPH_HOST, + FSL_USB2_DR_HOST, + FSL_USB2_DR_DEVICE, + FSL_USB2_DR_OTG, +}; + +enum fsl_usb2_phy_modes { + FSL_USB2_PHY_NONE, + FSL_USB2_PHY_ULPI, + FSL_USB2_PHY_UTMI, + FSL_USB2_PHY_UTMI_WIDE, + FSL_USB2_PHY_SERIAL, +}; + +struct clk; +struct platform_device; + +struct fsl_usb2_platform_data { + /* board specific information */ + enum fsl_usb2_operating_modes operating_mode; + enum fsl_usb2_phy_modes phy_mode; + unsigned int port_enables; + unsigned int workaround; + + int (*init)(struct platform_device *); + void (*exit)(struct platform_device *); + void __iomem *regs; /* ioremap'd register base */ + struct clk *clk; + unsigned power_budget; /* hcd->power_budget */ + unsigned big_endian_mmio:1; + unsigned big_endian_desc:1; + unsigned es:1; /* need USBMODE:ES */ + unsigned le_setup_buf:1; + unsigned have_sysif_regs:1; + unsigned invert_drvvbus:1; + unsigned invert_pwr_fault:1; + + unsigned suspended:1; + unsigned already_suspended:1; + + /* register save area for suspend/resume */ + u32 pm_command; + u32 pm_status; + u32 pm_intr_enable; + u32 pm_frame_index; + u32 pm_segment; + u32 pm_frame_list; + u32 pm_async_next; + u32 pm_configured_flag; + u32 pm_portsc; + u32 pm_usbgenctrl; +}; + +/* Flags in fsl_usb2_mph_platform_data */ +#define FSL_USB2_PORT0_ENABLED 0x00000001 +#define FSL_USB2_PORT1_ENABLED 0x00000002 + +#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0) + +struct spi_device; + +struct fsl_spi_platform_data { + u32 initial_spmode; /* initial SPMODE value */ + s16 bus_num; + unsigned int flags; +#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */ +#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */ +#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */ +#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */ +#define SPI_QE (1 << 4) /* SPI unit is in QE block */ + /* board specific information */ + u16 max_chipselect; + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; +}; + +struct mpc8xx_pcmcia_ops { + void(*hw_ctrl)(int slot, int enable); + int(*voltage_set)(int slot, int vcc, int vpp); +}; + +/* Returns non-zero if the current suspend operation would + * lead to a deep sleep (i.e. power removed from the core, + * instead of just the clock). + */ +#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND) +int fsl_deep_sleep(void); +#else +static inline int fsl_deep_sleep(void) { return 0; } +#endif + +#endif /* _FSL_DEVICE_H_ */ diff --git a/include/linux/fsl_hypervisor.h b/include/linux/fsl_hypervisor.h new file mode 100644 index 00000000..1cebaeee --- /dev/null +++ b/include/linux/fsl_hypervisor.h @@ -0,0 +1,241 @@ +/* + * Freescale hypervisor ioctl and kernel interface + * + * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. + * Author: Timur Tabi <timur@freescale.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * This software is provided by Freescale Semiconductor "as is" and any + * express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are + * disclaimed. In no event shall Freescale Semiconductor be liable for any + * direct, indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused and + * on any theory of liability, whether in contract, strict liability, or tort + * (including negligence or otherwise) arising in any way out of the use of this + * software, even if advised of the possibility of such damage. + * + * This file is used by the Freescale hypervisor management driver. It can + * also be included by applications that need to communicate with the driver + * via the ioctl interface. + */ + +#ifndef FSL_HYPERVISOR_H +#define FSL_HYPERVISOR_H + +#include <linux/types.h> + +/** + * struct fsl_hv_ioctl_restart - restart a partition + * @ret: return error code from the hypervisor + * @partition: the ID of the partition to restart, or -1 for the + * calling partition + * + * Used by FSL_HV_IOCTL_PARTITION_RESTART + */ +struct fsl_hv_ioctl_restart { + __u32 ret; + __u32 partition; +}; + +/** + * struct fsl_hv_ioctl_status - get a partition's status + * @ret: return error code from the hypervisor + * @partition: the ID of the partition to query, or -1 for the + * calling partition + * @status: The returned status of the partition + * + * Used by FSL_HV_IOCTL_PARTITION_GET_STATUS + * + * Values of 'status': + * 0 = Stopped + * 1 = Running + * 2 = Starting + * 3 = Stopping + */ +struct fsl_hv_ioctl_status { + __u32 ret; + __u32 partition; + __u32 status; +}; + +/** + * struct fsl_hv_ioctl_start - start a partition + * @ret: return error code from the hypervisor + * @partition: the ID of the partition to control + * @entry_point: The offset within the guest IMA to start execution + * @load: If non-zero, reload the partition's images before starting + * + * Used by FSL_HV_IOCTL_PARTITION_START + */ +struct fsl_hv_ioctl_start { + __u32 ret; + __u32 partition; + __u32 entry_point; + __u32 load; +}; + +/** + * struct fsl_hv_ioctl_stop - stop a partition + * @ret: return error code from the hypervisor + * @partition: the ID of the partition to stop, or -1 for the calling + * partition + * + * Used by FSL_HV_IOCTL_PARTITION_STOP + */ +struct fsl_hv_ioctl_stop { + __u32 ret; + __u32 partition; +}; + +/** + * struct fsl_hv_ioctl_memcpy - copy memory between partitions + * @ret: return error code from the hypervisor + * @source: the partition ID of the source partition, or -1 for this + * partition + * @target: the partition ID of the target partition, or -1 for this + * partition + * @reserved: reserved, must be set to 0 + * @local_addr: user-space virtual address of a buffer in the local + * partition + * @remote_addr: guest physical address of a buffer in the + * remote partition + * @count: the number of bytes to copy. Both the local and remote + * buffers must be at least 'count' bytes long + * + * Used by FSL_HV_IOCTL_MEMCPY + * + * The 'local' partition is the partition that calls this ioctl. The + * 'remote' partition is a different partition. The data is copied from + * the 'source' paritition' to the 'target' partition. + * + * The buffer in the remote partition must be guest physically + * contiguous. + * + * This ioctl does not support copying memory between two remote + * partitions or within the same partition, so either 'source' or + * 'target' (but not both) must be -1. In other words, either + * + * source == local and target == remote + * or + * source == remote and target == local + */ +struct fsl_hv_ioctl_memcpy { + __u32 ret; + __u32 source; + __u32 target; + __u32 reserved; /* padding to ensure local_vaddr is aligned */ + __u64 local_vaddr; + __u64 remote_paddr; + __u64 count; +}; + +/** + * struct fsl_hv_ioctl_doorbell - ring a doorbell + * @ret: return error code from the hypervisor + * @doorbell: the handle of the doorbell to ring doorbell + * + * Used by FSL_HV_IOCTL_DOORBELL + */ +struct fsl_hv_ioctl_doorbell { + __u32 ret; + __u32 doorbell; +}; + +/** + * struct fsl_hv_ioctl_prop - get/set a device tree property + * @ret: return error code from the hypervisor + * @handle: handle of partition whose tree to access + * @path: virtual address of path name of node to access + * @propname: virtual address of name of property to access + * @propval: virtual address of property data buffer + * @proplen: Size of property data buffer + * @reserved: reserved, must be set to 0 + * + * Used by FSL_HV_IOCTL_DOORBELL + */ +struct fsl_hv_ioctl_prop { + __u32 ret; + __u32 handle; + __u64 path; + __u64 propname; + __u64 propval; + __u32 proplen; + __u32 reserved; /* padding to ensure structure is aligned */ +}; + +/* The ioctl type, documented in ioctl-number.txt */ +#define FSL_HV_IOCTL_TYPE 0xAF + +/* Restart another partition */ +#define FSL_HV_IOCTL_PARTITION_RESTART \ + _IOWR(FSL_HV_IOCTL_TYPE, 1, struct fsl_hv_ioctl_restart) + +/* Get a partition's status */ +#define FSL_HV_IOCTL_PARTITION_GET_STATUS \ + _IOWR(FSL_HV_IOCTL_TYPE, 2, struct fsl_hv_ioctl_status) + +/* Boot another partition */ +#define FSL_HV_IOCTL_PARTITION_START \ + _IOWR(FSL_HV_IOCTL_TYPE, 3, struct fsl_hv_ioctl_start) + +/* Stop this or another partition */ +#define FSL_HV_IOCTL_PARTITION_STOP \ + _IOWR(FSL_HV_IOCTL_TYPE, 4, struct fsl_hv_ioctl_stop) + +/* Copy data from one partition to another */ +#define FSL_HV_IOCTL_MEMCPY \ + _IOWR(FSL_HV_IOCTL_TYPE, 5, struct fsl_hv_ioctl_memcpy) + +/* Ring a doorbell */ +#define FSL_HV_IOCTL_DOORBELL \ + _IOWR(FSL_HV_IOCTL_TYPE, 6, struct fsl_hv_ioctl_doorbell) + +/* Get a property from another guest's device tree */ +#define FSL_HV_IOCTL_GETPROP \ + _IOWR(FSL_HV_IOCTL_TYPE, 7, struct fsl_hv_ioctl_prop) + +/* Set a property in another guest's device tree */ +#define FSL_HV_IOCTL_SETPROP \ + _IOWR(FSL_HV_IOCTL_TYPE, 8, struct fsl_hv_ioctl_prop) + +#ifdef __KERNEL__ + +/** + * fsl_hv_event_register() - register a callback for failover events + * @nb: pointer to caller-supplied notifier_block structure + * + * This function is called by device drivers to register their callback + * functions for fail-over events. + * + * The caller should allocate a notifier_block object and initialize the + * 'priority' and 'notifier_call' fields. + */ +int fsl_hv_failover_register(struct notifier_block *nb); + +/** + * fsl_hv_event_unregister() - unregister a callback for failover events + * @nb: the same 'nb' used in previous fsl_hv_failover_register call + */ +int fsl_hv_failover_unregister(struct notifier_block *nb); + +#endif + +#endif diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h new file mode 100644 index 00000000..a6dfe694 --- /dev/null +++ b/include/linux/fsnotify.h @@ -0,0 +1,342 @@ +#ifndef _LINUX_FS_NOTIFY_H +#define _LINUX_FS_NOTIFY_H + +/* + * include/linux/fsnotify.h - generic hooks for filesystem notification, to + * reduce in-source duplication from both dnotify and inotify. + * + * We don't compile any of this away in some complicated menagerie of ifdefs. + * Instead, we rely on the code inside to optimize away as needed. + * + * (C) Copyright 2005 Robert Love + */ + +#include <linux/fsnotify_backend.h> +#include <linux/audit.h> +#include <linux/slab.h> +#include <linux/bug.h> + +/* + * fsnotify_d_instantiate - instantiate a dentry for inode + */ +static inline void fsnotify_d_instantiate(struct dentry *dentry, + struct inode *inode) +{ + __fsnotify_d_instantiate(dentry, inode); +} + +/* Notify this dentry's parent about a child's events. */ +static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) +{ + if (!dentry) + dentry = path->dentry; + + return __fsnotify_parent(path, dentry, mask); +} + +/* simple call site for access decisions */ +static inline int fsnotify_perm(struct file *file, int mask) +{ + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + __u32 fsnotify_mask = 0; + int ret; + + if (file->f_mode & FMODE_NONOTIFY) + return 0; + if (!(mask & (MAY_READ | MAY_OPEN))) + return 0; + if (mask & MAY_OPEN) + fsnotify_mask = FS_OPEN_PERM; + else if (mask & MAY_READ) + fsnotify_mask = FS_ACCESS_PERM; + else + BUG(); + + ret = fsnotify_parent(path, NULL, fsnotify_mask); + if (ret) + return ret; + + return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); +} + +/* + * fsnotify_d_move - dentry has been moved + */ +static inline void fsnotify_d_move(struct dentry *dentry) +{ + /* + * On move we need to update dentry->d_flags to indicate if the new parent + * cares about events from this dentry. + */ + __fsnotify_update_dcache_flags(dentry); +} + +/* + * fsnotify_link_count - inode's link count changed + */ +static inline void fsnotify_link_count(struct inode *inode) +{ + fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +/* + * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir + */ +static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, + const unsigned char *old_name, + int isdir, struct inode *target, struct dentry *moved) +{ + struct inode *source = moved->d_inode; + u32 fs_cookie = fsnotify_get_cookie(); + __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); + __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); + const unsigned char *new_name = moved->d_name.name; + + if (old_dir == new_dir) + old_dir_mask |= FS_DN_RENAME; + + if (isdir) { + old_dir_mask |= FS_ISDIR; + new_dir_mask |= FS_ISDIR; + } + + fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); + fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); + + if (target) + fsnotify_link_count(target); + + if (source) + fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); + audit_inode_child(moved, new_dir); +} + +/* + * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed + */ +static inline void fsnotify_inode_delete(struct inode *inode) +{ + __fsnotify_inode_delete(inode); +} + +/* + * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed + */ +static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) +{ + __fsnotify_vfsmount_delete(mnt); +} + +/* + * fsnotify_nameremove - a filename was removed from a directory + */ +static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) +{ + __u32 mask = FS_DELETE; + + if (isdir) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); +} + +/* + * fsnotify_inoderemove - an inode is going away + */ +static inline void fsnotify_inoderemove(struct inode *inode) +{ + fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + __fsnotify_inode_delete(inode); +} + +/* + * fsnotify_create - 'name' was linked in + */ +static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) +{ + audit_inode_child(dentry, inode); + + fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); +} + +/* + * fsnotify_link - new hardlink in 'inode' directory + * Note: We have to pass also the linked inode ptr as some filesystems leave + * new_dentry->d_inode NULL and instantiate inode pointer later + */ +static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) +{ + fsnotify_link_count(inode); + audit_inode_child(new_dentry, dir); + + fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); +} + +/* + * fsnotify_mkdir - directory 'name' was created + */ +static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) +{ + __u32 mask = (FS_CREATE | FS_ISDIR); + struct inode *d_inode = dentry->d_inode; + + audit_inode_child(dentry, inode); + + fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); +} + +/* + * fsnotify_access - file was read + */ +static inline void fsnotify_access(struct file *file) +{ + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + __u32 mask = FS_ACCESS; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } +} + +/* + * fsnotify_modify - file was modified + */ +static inline void fsnotify_modify(struct file *file) +{ + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + __u32 mask = FS_MODIFY; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } +} + +/* + * fsnotify_open - file was opened + */ +static inline void fsnotify_open(struct file *file) +{ + struct path *path = &file->f_path; + struct inode *inode = path->dentry->d_inode; + __u32 mask = FS_OPEN; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); +} + +/* + * fsnotify_close - file was closed + */ +static inline void fsnotify_close(struct file *file) +{ + struct path *path = &file->f_path; + struct inode *inode = file->f_path.dentry->d_inode; + fmode_t mode = file->f_mode; + __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + if (!(file->f_mode & FMODE_NONOTIFY)) { + fsnotify_parent(path, NULL, mask); + fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + } +} + +/* + * fsnotify_xattr - extended attributes were changed + */ +static inline void fsnotify_xattr(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + __u32 mask = FS_ATTRIB; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +/* + * fsnotify_change - notify_change event. file was modified and/or metadata + * was changed. + */ +static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) +{ + struct inode *inode = dentry->d_inode; + __u32 mask = 0; + + if (ia_valid & ATTR_UID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_GID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_SIZE) + mask |= FS_MODIFY; + + /* both times implies a utime(s) call */ + if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) + mask |= FS_ATTRIB; + else if (ia_valid & ATTR_ATIME) + mask |= FS_ACCESS; + else if (ia_valid & ATTR_MTIME) + mask |= FS_MODIFY; + + if (ia_valid & ATTR_MODE) + mask |= FS_ATTRIB; + + if (mask) { + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + } +} + +#if defined(CONFIG_FSNOTIFY) /* notify helpers */ + +/* + * fsnotify_oldname_init - save off the old filename before we change it + */ +static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) +{ + return kstrdup(name, GFP_KERNEL); +} + +/* + * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init + */ +static inline void fsnotify_oldname_free(const unsigned char *old_name) +{ + kfree(old_name); +} + +#else /* CONFIG_FSNOTIFY */ + +static inline const char *fsnotify_oldname_init(const unsigned char *name) +{ + return NULL; +} + +static inline void fsnotify_oldname_free(const unsigned char *old_name) +{ +} + +#endif /* CONFIG_FSNOTIFY */ + +#endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h new file mode 100644 index 00000000..91d0e0a3 --- /dev/null +++ b/include/linux/fsnotify_backend.h @@ -0,0 +1,470 @@ +/* + * Filesystem access notification for Linux + * + * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> + */ + +#ifndef __LINUX_FSNOTIFY_BACKEND_H +#define __LINUX_FSNOTIFY_BACKEND_H + +#ifdef __KERNEL__ + +#include <linux/idr.h> /* inotify uses this */ +#include <linux/fs.h> /* struct inode */ +#include <linux/list.h> +#include <linux/path.h> /* struct path */ +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <linux/atomic.h> + +/* + * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily + * convert between them. dnotify only needs conversion at watch creation + * so no perf loss there. fanotify isn't defined yet, so it can use the + * wholes if it needs more events. + */ +#define FS_ACCESS 0x00000001 /* File was accessed */ +#define FS_MODIFY 0x00000002 /* File was modified */ +#define FS_ATTRIB 0x00000004 /* Metadata changed */ +#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ +#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ +#define FS_OPEN 0x00000020 /* File was opened */ +#define FS_MOVED_FROM 0x00000040 /* File was moved from X */ +#define FS_MOVED_TO 0x00000080 /* File was moved to Y */ +#define FS_CREATE 0x00000100 /* Subfile was created */ +#define FS_DELETE 0x00000200 /* Subfile was deleted */ +#define FS_DELETE_SELF 0x00000400 /* Self was deleted */ +#define FS_MOVE_SELF 0x00000800 /* Self was moved */ + +#define FS_UNMOUNT 0x00002000 /* inode on umount fs */ +#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define FS_IN_IGNORED 0x00008000 /* last inotify event here */ + +#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ +#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ + +#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ +#define FS_ISDIR 0x40000000 /* event occurred against dir */ +#define FS_IN_ONESHOT 0x80000000 /* only send event once */ + +#define FS_DN_RENAME 0x10000000 /* file renamed */ +#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ + +/* This inode cares about things that happen to its children. Always set for + * dnotify and inotify. */ +#define FS_EVENT_ON_CHILD 0x08000000 + +/* This is a list of all events that may get sent to a parernt based on fs event + * happening to inodes inside that directory */ +#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ + FS_DELETE) + +#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) + +#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) + +#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ + FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ + FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ + FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \ + FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \ + FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) + +struct fsnotify_group; +struct fsnotify_event; +struct fsnotify_mark; +struct fsnotify_event_private_data; + +/* + * Each group much define these ops. The fsnotify infrastructure will call + * these operations for each relevant group. + * + * should_send_event - given a group, inode, and mask this function determines + * if the group is interested in this event. + * handle_event - main call for a group to handle an fs event + * free_group_priv - called when a group refcnt hits 0 to clean up the private union + * freeing-mark - this means that a mark has been flagged to die when everything + * finishes using it. The function is supplied with what must be a + * valid group and inode to use to clean up. + */ +struct fsnotify_ops { + bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, + struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, + __u32 mask, void *data, int data_type); + int (*handle_event)(struct fsnotify_group *group, + struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, + struct fsnotify_event *event); + void (*free_group_priv)(struct fsnotify_group *group); + void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); + void (*free_event_priv)(struct fsnotify_event_private_data *priv); +}; + +/* + * A group is a "thing" that wants to receive notification about filesystem + * events. The mask holds the subset of event types this group cares about. + * refcnt on a group is up to the implementor and at any moment if it goes 0 + * everything will be cleaned up. + */ +struct fsnotify_group { + /* + * How the refcnt is used is up to each group. When the refcnt hits 0 + * fsnotify will clean up all of the resources associated with this group. + * As an example, the dnotify group will always have a refcnt=1 and that + * will never change. Inotify, on the other hand, has a group per + * inotify_init() and the refcnt will hit 0 only when that fd has been + * closed. + */ + atomic_t refcnt; /* things with interest in this group */ + + const struct fsnotify_ops *ops; /* how this group handles things */ + + /* needed to send notification to userspace */ + struct mutex notification_mutex; /* protect the notification_list */ + struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ + wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ + unsigned int q_len; /* events on the queue */ + unsigned int max_events; /* maximum events allowed on the list */ + /* + * Valid fsnotify group priorities. Events are send in order from highest + * priority to lowest priority. We default to the lowest priority. + */ + #define FS_PRIO_0 0 /* normal notifiers, no permissions */ + #define FS_PRIO_1 1 /* fanotify content based access control */ + #define FS_PRIO_2 2 /* fanotify pre-content access */ + unsigned int priority; + + /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ + spinlock_t mark_lock; /* protect marks_list */ + atomic_t num_marks; /* 1 for each mark and 1 for not being + * past the point of no return when freeing + * a group */ + struct list_head marks_list; /* all inode marks for this group */ + + /* groups can define private fields here or use the void *private */ + union { + void *private; +#ifdef CONFIG_INOTIFY_USER + struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + u32 last_wd; + struct fasync_struct *fa; /* async notification */ + struct user_struct *user; + } inotify_data; +#endif +#ifdef CONFIG_FANOTIFY + struct fanotify_group_private_data { +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + /* allows a group to block waiting for a userspace response */ + struct mutex access_mutex; + struct list_head access_list; + wait_queue_head_t access_waitq; + atomic_t bypass_perm; +#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ + int f_flags; + unsigned int max_marks; + struct user_struct *user; + } fanotify_data; +#endif /* CONFIG_FANOTIFY */ + }; +}; + +/* + * A single event can be queued in multiple group->notification_lists. + * + * each group->notification_list will point to an event_holder which in turns points + * to the actual event that needs to be sent to userspace. + * + * Seemed cheaper to create a refcnt'd event and a small holder for every group + * than create a different event for every group + * + */ +struct fsnotify_event_holder { + struct fsnotify_event *event; + struct list_head event_list; +}; + +/* + * Inotify needs to tack data onto an event. This struct lets us later find the + * correct private data of the correct group. + */ +struct fsnotify_event_private_data { + struct fsnotify_group *group; + struct list_head event_list; +}; + +/* + * all of the information about the original object we want to now send to + * a group. If you want to carry more info from the accessing task to the + * listener this structure is where you need to be adding fields. + */ +struct fsnotify_event { + /* + * If we create an event we are also likely going to need a holder + * to link to a group. So embed one holder in the event. Means only + * one allocation for the common case where we only have one group + */ + struct fsnotify_event_holder holder; + spinlock_t lock; /* protection for the associated event_holder and private_list */ + /* to_tell may ONLY be dereferenced during handle_event(). */ + struct inode *to_tell; /* either the inode the event happened to or its parent */ + /* + * depending on the event type we should have either a path or inode + * We hold a reference on path, but NOT on inode. Since we have the ref on + * the path, it may be dereferenced at any point during this object's + * lifetime. That reference is dropped when this object's refcnt hits + * 0. If this event contains an inode instead of a path, the inode may + * ONLY be used during handle_event(). + */ + union { + struct path path; + struct inode *inode; + }; +/* when calling fsnotify tell it if the data is a path or inode */ +#define FSNOTIFY_EVENT_NONE 0 +#define FSNOTIFY_EVENT_PATH 1 +#define FSNOTIFY_EVENT_INODE 2 + int data_type; /* which of the above union we have */ + atomic_t refcnt; /* how many groups still are using/need to send this event */ + __u32 mask; /* the type of access, bitwise OR for FS_* event types */ + + u32 sync_cookie; /* used to corrolate events, namely inotify mv events */ + const unsigned char *file_name; + size_t name_len; + struct pid *tgid; + +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + __u32 response; /* userspace answer to question */ +#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ + + struct list_head private_data_list; /* groups can store private data here */ +}; + +/* + * Inode specific fields in an fsnotify_mark + */ +struct fsnotify_inode_mark { + struct inode *inode; /* inode this mark is associated with */ + struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */ + struct list_head free_i_list; /* tmp list used when freeing this mark */ +}; + +/* + * Mount point specific fields in an fsnotify_mark + */ +struct fsnotify_vfsmount_mark { + struct vfsmount *mnt; /* vfsmount this mark is associated with */ + struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */ + struct list_head free_m_list; /* tmp list used when freeing this mark */ +}; + +/* + * a mark is simply an object attached to an in core inode which allows an + * fsnotify listener to indicate they are either no longer interested in events + * of a type matching mask or only interested in those events. + * + * these are flushed when an inode is evicted from core and may be flushed + * when the inode is modified (as seen by fsnotify_access). Some fsnotify users + * (such as dnotify) will flush these when the open fd is closed and not at + * inode eviction or modification. + */ +struct fsnotify_mark { + __u32 mask; /* mask this mark is for */ + /* we hold ref for each i_list and g_list. also one ref for each 'thing' + * in kernel that found and may be using this mark. */ + atomic_t refcnt; /* active things looking at this mark */ + struct fsnotify_group *group; /* group this mark is for */ + struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ + spinlock_t lock; /* protect group and inode */ + union { + struct fsnotify_inode_mark i; + struct fsnotify_vfsmount_mark m; + }; + struct list_head free_g_list; /* tmp list used when freeing this mark */ + __u32 ignored_mask; /* events types to ignore */ +#define FSNOTIFY_MARK_FLAG_INODE 0x01 +#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 +#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04 +#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 +#define FSNOTIFY_MARK_FLAG_ALIVE 0x10 + unsigned int flags; /* vfsmount or inode mark? */ + struct list_head destroy_list; + void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ +}; + +#ifdef CONFIG_FSNOTIFY + +/* called from the vfs helpers */ + +/* main fsnotify call to send events */ +extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, + const unsigned char *name, u32 cookie); +extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask); +extern void __fsnotify_inode_delete(struct inode *inode); +extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); +extern u32 fsnotify_get_cookie(void); + +static inline int fsnotify_inode_watches_children(struct inode *inode) +{ + /* FS_EVENT_ON_CHILD is set if the inode may care */ + if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) + return 0; + /* this inode might care about child events, does it care about the + * specific set of events that can happen on a child? */ + return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; +} + +/* + * Update the dentry with a flag indicating the interest of its parent to receive + * filesystem events when those events happens to this dentry->d_inode. + */ +static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) +{ + struct dentry *parent; + + assert_spin_locked(&dentry->d_lock); + + /* + * Serialisation of setting PARENT_WATCHED on the dentries is provided + * by d_lock. If inotify_inode_watched changes after we have taken + * d_lock, the following __fsnotify_update_child_dentry_flags call will + * find our entry, so it will spin until we complete here, and update + * us with the new state. + */ + parent = dentry->d_parent; + if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) + dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; + else + dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; +} + +/* + * fsnotify_d_instantiate - instantiate a dentry for inode + */ +static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) +{ + if (!inode) + return; + + spin_lock(&dentry->d_lock); + __fsnotify_update_dcache_flags(dentry); + spin_unlock(&dentry->d_lock); +} + +/* called from fsnotify listeners, such as fanotify or dnotify */ + +/* get a reference to an existing or create a new group */ +extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); +/* drop reference on a group from fsnotify_alloc_group */ +extern void fsnotify_put_group(struct fsnotify_group *group); + +/* take a reference to an event */ +extern void fsnotify_get_event(struct fsnotify_event *event); +extern void fsnotify_put_event(struct fsnotify_event *event); +/* find private data previously attached to an event and unlink it */ +extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, + struct fsnotify_event *event); + +/* attach the event to the group notification queue */ +extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, + struct fsnotify_event *event, + struct fsnotify_event_private_data *priv, + struct fsnotify_event *(*merge)(struct list_head *, + struct fsnotify_event *)); +/* true if the group notification queue is empty */ +extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); +/* return, but do not dequeue the first event on the notification queue */ +extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group); +/* return AND dequeue the first event on the notification queue */ +extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group); + +/* functions used to manipulate the marks attached to inodes */ + +/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */ +extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt); +/* run all marks associated with an inode and update inode->i_fsnotify_mask */ +extern void fsnotify_recalc_inode_mask(struct inode *inode); +extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark)); +/* find (and take a reference) to a mark associated with group and inode */ +extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); +/* find (and take a reference) to a mark associated with group and vfsmount */ +extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); +/* copy the values from old into new */ +extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old); +/* set the ignored_mask of a mark */ +extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask); +/* set the mask of a mark (might pin the object into memory */ +extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask); +/* attach the mark to both the group and the inode */ +extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, + struct inode *inode, struct vfsmount *mnt, int allow_dups); +/* given a mark, flag it to be freed when all references are dropped */ +extern void fsnotify_destroy_mark(struct fsnotify_mark *mark); +/* run all the marks in a group, and clear all of the vfsmount marks */ +extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); +/* run all the marks in a group, and clear all of the inode marks */ +extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); +/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ +extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); +/* run all the marks in a group, and flag them to be freed */ +extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); +extern void fsnotify_get_mark(struct fsnotify_mark *mark); +extern void fsnotify_put_mark(struct fsnotify_mark *mark); +extern void fsnotify_unmount_inodes(struct list_head *list); + +/* put here because inotify does some weird stuff when destroying watches */ +extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, + void *data, int data_is, + const unsigned char *name, + u32 cookie, gfp_t gfp); + +/* fanotify likes to change events after they are on lists... */ +extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event); +extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, + struct fsnotify_event *new_event); + +#else + +static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, + const unsigned char *name, u32 cookie) +{ + return 0; +} + +static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) +{ + return 0; +} + +static inline void __fsnotify_inode_delete(struct inode *inode) +{} + +static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) +{} + +static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) +{} + +static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) +{} + +static inline u32 fsnotify_get_cookie(void) +{ + return 0; +} + +static inline void fsnotify_unmount_inodes(struct list_head *list) +{} + +#endif /* CONFIG_FSNOTIFY */ + +#endif /* __KERNEL __ */ + +#endif /* __LINUX_FSNOTIFY_BACKEND_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h new file mode 100644 index 00000000..72a6cabb --- /dev/null +++ b/include/linux/ftrace.h @@ -0,0 +1,687 @@ +/* + * Ftrace header. For implementation details beyond the random comments + * scattered below, see: Documentation/trace/ftrace-design.txt + */ + +#ifndef _LINUX_FTRACE_H +#define _LINUX_FTRACE_H + +#include <linux/trace_clock.h> +#include <linux/kallsyms.h> +#include <linux/linkage.h> +#include <linux/bitops.h> +#include <linux/ktime.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/fs.h> + +#include <asm/ftrace.h> + +struct module; +struct ftrace_hash; + +#ifdef CONFIG_FUNCTION_TRACER + +extern int ftrace_enabled; +extern int +ftrace_enable_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); + +/* + * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are + * set in the flags member. + * + * ENABLED - set/unset when ftrace_ops is registered/unregistered + * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops + * is part of the global tracers sharing the same filter + * via set_ftrace_* debugfs files. + * DYNAMIC - set when ftrace_ops is registered to denote dynamically + * allocated ftrace_ops which need special care + * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops + * could be controled by following calls: + * ftrace_function_local_enable + * ftrace_function_local_disable + */ +enum { + FTRACE_OPS_FL_ENABLED = 1 << 0, + FTRACE_OPS_FL_GLOBAL = 1 << 1, + FTRACE_OPS_FL_DYNAMIC = 1 << 2, + FTRACE_OPS_FL_CONTROL = 1 << 3, +}; + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; + unsigned long flags; + int __percpu *disabled; +#ifdef CONFIG_DYNAMIC_FTRACE + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; +#endif +}; + +extern int function_trace_stop; + +/* + * Type of the current tracing. + */ +enum ftrace_tracing_type_t { + FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ + FTRACE_TYPE_RETURN, /* Hook the return of the function */ +}; + +/* Current tracing type, default is FTRACE_TYPE_ENTER */ +extern enum ftrace_tracing_type_t ftrace_tracing_type; + +/** + * ftrace_stop - stop function tracer. + * + * A quick way to stop the function tracer. Note this an on off switch, + * it is not something that is recursive like preempt_disable. + * This does not disable the calling of mcount, it only stops the + * calling of functions from mcount. + */ +static inline void ftrace_stop(void) +{ + function_trace_stop = 1; +} + +/** + * ftrace_start - start the function tracer. + * + * This function is the inverse of ftrace_stop. This does not enable + * the function tracing if the function tracer is disabled. This only + * sets the function tracer flag to continue calling the functions + * from mcount. + */ +static inline void ftrace_start(void) +{ + function_trace_stop = 0; +} + +/* + * The ftrace_ops must be a static and should also + * be read_mostly. These functions do modify read_mostly variables + * so use them sparely. Never free an ftrace_op or modify the + * next pointer after it has been registered. Even after unregistering + * it, the next pointer may still be used internally. + */ +int register_ftrace_function(struct ftrace_ops *ops); +int unregister_ftrace_function(struct ftrace_ops *ops); +void clear_ftrace_function(void); + +/** + * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu + * + * This function enables tracing on current cpu by decreasing + * the per cpu control variable. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline void ftrace_function_local_enable(struct ftrace_ops *ops) +{ + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + return; + + (*this_cpu_ptr(ops->disabled))--; +} + +/** + * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu + * + * This function enables tracing on current cpu by decreasing + * the per cpu control variable. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline void ftrace_function_local_disable(struct ftrace_ops *ops) +{ + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + return; + + (*this_cpu_ptr(ops->disabled))++; +} + +/** + * ftrace_function_local_disabled - returns ftrace_ops disabled value + * on current cpu + * + * This function returns value of ftrace_ops::disabled on current cpu. + * It must be called with preemption disabled and only on ftrace_ops + * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. + */ +static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) +{ + WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); + return *this_cpu_ptr(ops->disabled); +} + +extern void ftrace_stub(unsigned long a0, unsigned long a1); + +#else /* !CONFIG_FUNCTION_TRACER */ +/* + * (un)register_ftrace_function must be a macro since the ops parameter + * must not be evaluated. + */ +#define register_ftrace_function(ops) ({ 0; }) +#define unregister_ftrace_function(ops) ({ 0; }) +static inline void clear_ftrace_function(void) { } +static inline void ftrace_kill(void) { } +static inline void ftrace_stop(void) { } +static inline void ftrace_start(void) { } +#endif /* CONFIG_FUNCTION_TRACER */ + +#ifdef CONFIG_STACK_TRACER +extern int stack_tracer_enabled; +int +stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct ftrace_hash *hash, + char *func, char *cmd, + char *params, int enable); +}; + +#ifdef CONFIG_DYNAMIC_FTRACE + +int ftrace_arch_code_modify_prepare(void); +int ftrace_arch_code_modify_post_process(void); + +void ftrace_bug(int err, unsigned long ip); + +struct seq_file; + +struct ftrace_probe_ops { + void (*func)(unsigned long ip, + unsigned long parent_ip, + void **data); + int (*callback)(unsigned long ip, void **data); + void (*free)(void **data); + int (*print)(struct seq_file *m, + unsigned long ip, + struct ftrace_probe_ops *ops, + void *data); +}; + +extern int +register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); +extern void unregister_ftrace_function_probe_all(char *glob); + +extern int ftrace_text_reserved(void *start, void *end); + +enum { + FTRACE_FL_ENABLED = (1 << 30), +}; + +#define FTRACE_FL_MASK (0x3UL << 30) +#define FTRACE_REF_MAX ((1 << 30) - 1) + +struct dyn_ftrace { + union { + unsigned long ip; /* address of mcount call-site */ + struct dyn_ftrace *freelist; + }; + unsigned long flags; + struct dyn_arch_ftrace arch; +}; + +int ftrace_force_update(void); +int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +void ftrace_set_global_filter(unsigned char *buf, int len, int reset); +void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); +void ftrace_free_filter(struct ftrace_ops *ops); + +int register_ftrace_command(struct ftrace_func_command *cmd); +int unregister_ftrace_command(struct ftrace_func_command *cmd); + +enum { + FTRACE_UPDATE_CALLS = (1 << 0), + FTRACE_DISABLE_CALLS = (1 << 1), + FTRACE_UPDATE_TRACE_FUNC = (1 << 2), + FTRACE_START_FUNC_RET = (1 << 3), + FTRACE_STOP_FUNC_RET = (1 << 4), +}; + +enum { + FTRACE_UPDATE_IGNORE, + FTRACE_UPDATE_MAKE_CALL, + FTRACE_UPDATE_MAKE_NOP, +}; + +enum { + FTRACE_ITER_FILTER = (1 << 0), + FTRACE_ITER_NOTRACE = (1 << 1), + FTRACE_ITER_PRINTALL = (1 << 2), + FTRACE_ITER_DO_HASH = (1 << 3), + FTRACE_ITER_HASH = (1 << 4), + FTRACE_ITER_ENABLED = (1 << 5), +}; + +void arch_ftrace_update_code(int command); + +struct ftrace_rec_iter; + +struct ftrace_rec_iter *ftrace_rec_iter_start(void); +struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); +struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); + +int ftrace_update_record(struct dyn_ftrace *rec, int enable); +int ftrace_test_record(struct dyn_ftrace *rec, int enable); +void ftrace_run_stop_machine(int command); +int ftrace_location(unsigned long ip); + +extern ftrace_func_t ftrace_trace_function; + +int ftrace_regex_open(struct ftrace_ops *ops, int flag, + struct inode *inode, struct file *file); +ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos); +ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos); +loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin); +int ftrace_regex_release(struct inode *inode, struct file *file); + +void __init +ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); + +/* defined in arch */ +extern int ftrace_ip_converted(unsigned long ip); +extern int ftrace_dyn_arch_init(void *data); +extern int ftrace_update_ftrace_func(ftrace_func_t func); +extern void ftrace_caller(void); +extern void ftrace_call(void); +extern void mcount_call(void); + +#ifndef FTRACE_ADDR +#define FTRACE_ADDR ((unsigned long)ftrace_caller) +#endif +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern void ftrace_graph_caller(void); +extern int ftrace_enable_ftrace_graph_caller(void); +extern int ftrace_disable_ftrace_graph_caller(void); +#else +static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } +static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } +#endif + +/** + * ftrace_make_nop - convert code into nop + * @mod: module structure if called by module load initialization + * @rec: the mcount call site record + * @addr: the address that the call site should be calling + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a caller to @addr + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr); + +/** + * ftrace_make_call - convert a nop call site into a call to addr + * @rec: the mcount call site record + * @addr: the address that the call site should call + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a nop + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); + +/* May be defined in arch */ +extern int ftrace_arch_read_dyn_info(char *buf, int size); + +extern int skip_trace(unsigned long ip); + +extern void ftrace_disable_daemon(void); +extern void ftrace_enable_daemon(void); +#else +static inline int skip_trace(unsigned long ip) { return 0; } +static inline int ftrace_force_update(void) { return 0; } +static inline void ftrace_disable_daemon(void) { } +static inline void ftrace_enable_daemon(void) { } +static inline void ftrace_release_mod(struct module *mod) {} +static inline int register_ftrace_command(struct ftrace_func_command *cmd) +{ + return -EINVAL; +} +static inline int unregister_ftrace_command(char *cmd_name) +{ + return -EINVAL; +} +static inline int ftrace_text_reserved(void *start, void *end) +{ + return 0; +} + +/* + * Again users of functions that have ftrace_ops may not + * have them defined when ftrace is not enabled, but these + * functions may still be called. Use a macro instead of inline. + */ +#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) +#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) +#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) +#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) +#define ftrace_free_filter(ops) do { } while (0) + +static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) { return -ENODEV; } +static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) { return -ENODEV; } +static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin) +{ + return -ENODEV; +} +static inline int +ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* totally disable ftrace - can not re-enable after this */ +void ftrace_kill(void); + +static inline void tracer_disable(void) +{ +#ifdef CONFIG_FUNCTION_TRACER + ftrace_enabled = 0; +#endif +} + +/* + * Ftrace disable/restore without lock. Some synchronization mechanism + * must be used to prevent ftrace_enabled to be changed between + * disable/restore. + */ +static inline int __ftrace_enabled_save(void) +{ +#ifdef CONFIG_FUNCTION_TRACER + int saved_ftrace_enabled = ftrace_enabled; + ftrace_enabled = 0; + return saved_ftrace_enabled; +#else + return 0; +#endif +} + +static inline void __ftrace_enabled_restore(int enabled) +{ +#ifdef CONFIG_FUNCTION_TRACER + ftrace_enabled = enabled; +#endif +} + +#ifndef HAVE_ARCH_CALLER_ADDR +# ifdef CONFIG_FRAME_POINTER +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) +# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) +# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) +# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) +# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) +# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) +# else +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 0UL +# define CALLER_ADDR2 0UL +# define CALLER_ADDR3 0UL +# define CALLER_ADDR4 0UL +# define CALLER_ADDR5 0UL +# define CALLER_ADDR6 0UL +# endif +#endif /* ifndef HAVE_ARCH_CALLER_ADDR */ + +#ifdef CONFIG_IRQSOFF_TRACER + extern void time_hardirqs_on(unsigned long a0, unsigned long a1); + extern void time_hardirqs_off(unsigned long a0, unsigned long a1); +#else + static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } + static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } +#endif + +#ifdef CONFIG_PREEMPT_TRACER + extern void trace_preempt_on(unsigned long a0, unsigned long a1); + extern void trace_preempt_off(unsigned long a0, unsigned long a1); +#else + static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } + static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } +#endif + +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +extern void ftrace_init(void); +#else +static inline void ftrace_init(void) { } +#endif + +/* + * Structure that defines an entry function trace. + */ +struct ftrace_graph_ent { + unsigned long func; /* Current function */ + int depth; +}; + +/* + * Structure that defines a return function trace. + */ +struct ftrace_graph_ret { + unsigned long func; /* Current function */ + unsigned long long calltime; + unsigned long long rettime; + /* Number of functions that overran the depth limit for current task */ + unsigned long overrun; + int depth; +}; + +/* Type of the callback handlers for tracing function graph*/ +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* for init task */ +#define INIT_FTRACE_GRAPH .ret_stack = NULL, + +/* + * Stack of return addresses for functions + * of a thread. + * Used in struct thread_info + */ +struct ftrace_ret_stack { + unsigned long ret; + unsigned long func; + unsigned long long calltime; + unsigned long long subtime; + unsigned long fp; +}; + +/* + * Primary handler of a function return. + * It relays on ftrace_return_to_handler. + * Defined in entry_32/64.S + */ +extern void return_to_handler(void); + +extern int +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, + unsigned long frame_pointer); + +/* + * Sometimes we don't want to trace a function with the function + * graph tracer but we want them to keep traced by the usual function + * tracer if the function graph tracer is not configured. + */ +#define __notrace_funcgraph notrace + +/* + * We want to which function is an entrypoint of a hardirq. + * That will help us to put a signal on output. + */ +#define __irq_entry __attribute__((__section__(".irqentry.text"))) + +/* Limits of hardirq entrypoints */ +extern char __irqentry_text_start[]; +extern char __irqentry_text_end[]; + +#define FTRACE_RETFUNC_DEPTH 50 +#define FTRACE_RETSTACK_ALLOC_SIZE 32 +extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc); + +extern void ftrace_graph_stop(void); + +/* The current handlers in use */ +extern trace_func_graph_ret_t ftrace_graph_return; +extern trace_func_graph_ent_t ftrace_graph_entry; + +extern void unregister_ftrace_graph(void); + +extern void ftrace_graph_init_task(struct task_struct *t); +extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); + +static inline int task_curr_ret_stack(struct task_struct *t) +{ + return t->curr_ret_stack; +} + +static inline void pause_graph_tracing(void) +{ + atomic_inc(¤t->tracing_graph_pause); +} + +static inline void unpause_graph_tracing(void) +{ + atomic_dec(¤t->tracing_graph_pause); +} +#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ + +#define __notrace_funcgraph +#define __irq_entry +#define INIT_FTRACE_GRAPH + +static inline void ftrace_graph_init_task(struct task_struct *t) { } +static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } + +static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) +{ + return -1; +} +static inline void unregister_ftrace_graph(void) { } + +static inline int task_curr_ret_stack(struct task_struct *tsk) +{ + return -1; +} + +static inline void pause_graph_tracing(void) { } +static inline void unpause_graph_tracing(void) { } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_TRACING + +/* flags for current->trace */ +enum { + TSK_TRACE_FL_TRACE_BIT = 0, + TSK_TRACE_FL_GRAPH_BIT = 1, +}; +enum { + TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, + TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, +}; + +static inline void set_tsk_trace_trace(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline void clear_tsk_trace_trace(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline int test_tsk_trace_trace(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_TRACE; +} + +static inline void set_tsk_trace_graph(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline void clear_tsk_trace_graph(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline int test_tsk_trace_graph(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_GRAPH; +} + +enum ftrace_dump_mode; + +extern enum ftrace_dump_mode ftrace_dump_on_oops; + +#ifdef CONFIG_PREEMPT +#define INIT_TRACE_RECURSION .trace_recursion = 0, +#endif + +#endif /* CONFIG_TRACING */ + +#ifndef INIT_TRACE_RECURSION +#define INIT_TRACE_RECURSION +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS + +unsigned long arch_syscall_addr(int nr); + +#endif /* CONFIG_FTRACE_SYSCALLS */ + +#endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h new file mode 100644 index 00000000..176a939d --- /dev/null +++ b/include/linux/ftrace_event.h @@ -0,0 +1,312 @@ +#ifndef _LINUX_FTRACE_EVENT_H +#define _LINUX_FTRACE_EVENT_H + +#include <linux/ring_buffer.h> +#include <linux/trace_seq.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <linux/perf_event.h> + +struct trace_array; +struct tracer; +struct dentry; + +struct trace_print_flags { + unsigned long mask; + const char *name; +}; + +struct trace_print_flags_u64 { + unsigned long long mask; + const char *name; +}; + +const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, + unsigned long flags, + const struct trace_print_flags *flag_array); + +const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, + const struct trace_print_flags *symbol_array); + +#if BITS_PER_LONG == 32 +const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, + unsigned long long val, + const struct trace_print_flags_u64 + *symbol_array); +#endif + +const char *ftrace_print_hex_seq(struct trace_seq *p, + const unsigned char *buf, int len); + +/* + * The trace entry - the most basic unit of tracing. This is what + * is printed in the end as a single line in the trace output, such as: + * + * bash-15816 [01] 235.197585: idle_cpu <- irq_enter + */ +struct trace_entry { + unsigned short type; + unsigned char flags; + unsigned char preempt_count; + int pid; + int padding; +}; + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) + +/* + * Trace iterator - used by printout routines who present trace + * results to users and which routines might sleep, etc: + */ +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter *buffer_iter[NR_CPUS]; + unsigned long iter_flags; + + /* trace_seq for __print_flags() and __print_symbolic() etc. */ + struct trace_seq tmp_seq; + + /* The below is zeroed out in pipe_read */ + struct trace_seq seq; + struct trace_entry *ent; + unsigned long lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + + loff_t pos; + long idx; + + cpumask_var_t started; +}; + + +struct trace_event; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, + int flags, struct trace_event *event); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + struct trace_event_functions *funcs; +}; + +extern int register_ftrace_event(struct trace_event *event); +extern int unregister_ftrace_event(struct trace_event *event); + +/* Return values for print_line callback */ +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ + TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ +}; + +void tracing_generic_entry_update(struct trace_entry *entry, + unsigned long flags, + int pc); +struct ring_buffer_event * +trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, + int type, unsigned long len, + unsigned long flags, int pc); +void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags, int pc); +void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags, int pc); +void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags, int pc, + struct pt_regs *regs); +void trace_current_buffer_discard_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); + +void tracing_record_cmdline(struct task_struct *tsk); + +struct event_filter; + +enum trace_reg { + TRACE_REG_REGISTER, + TRACE_REG_UNREGISTER, +#ifdef CONFIG_PERF_EVENTS + TRACE_REG_PERF_REGISTER, + TRACE_REG_PERF_UNREGISTER, + TRACE_REG_PERF_OPEN, + TRACE_REG_PERF_CLOSE, + TRACE_REG_PERF_ADD, + TRACE_REG_PERF_DEL, +#endif +}; + +struct ftrace_event_call; + +struct ftrace_event_class { + char *system; + void *probe; +#ifdef CONFIG_PERF_EVENTS + void *perf_probe; +#endif + int (*reg)(struct ftrace_event_call *event, + enum trace_reg type, void *data); + int (*define_fields)(struct ftrace_event_call *); + struct list_head *(*get_fields)(struct ftrace_event_call *); + struct list_head fields; + int (*raw_init)(struct ftrace_event_call *); +}; + +extern int ftrace_event_reg(struct ftrace_event_call *event, + enum trace_reg type, void *data); + +enum { + TRACE_EVENT_FL_ENABLED_BIT, + TRACE_EVENT_FL_FILTERED_BIT, + TRACE_EVENT_FL_RECORDED_CMD_BIT, + TRACE_EVENT_FL_CAP_ANY_BIT, + TRACE_EVENT_FL_NO_SET_FILTER_BIT, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT, +}; + +enum { + TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), + TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), + TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), + TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), + TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), + TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), +}; + +struct ftrace_event_call { + struct list_head list; + struct ftrace_event_class *class; + char *name; + struct dentry *dir; + struct trace_event event; + const char *print_fmt; + struct event_filter *filter; + void *mod; + void *data; + + /* + * 32 bit flags: + * bit 1: enabled + * bit 2: filter_active + * bit 3: enabled cmd record + * + * Changes to flags must hold the event_mutex. + * + * Note: Reads of flags do not hold the event_mutex since + * they occur in critical sections. But the way flags + * is currently used, these changes do no affect the code + * except that when a change is made, it may have a slight + * delay in propagating the changes to other CPUs due to + * caching and such. + */ + unsigned int flags; + +#ifdef CONFIG_PERF_EVENTS + int perf_refcount; + struct hlist_head __percpu *perf_events; +#endif +}; + +#define __TRACE_EVENT_FLAGS(name, value) \ + static int __init trace_init_flags_##name(void) \ + { \ + event_##name.flags = value; \ + return 0; \ + } \ + early_initcall(trace_init_flags_##name); + +#define PERF_MAX_TRACE_SIZE 2048 + +#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ + +extern void destroy_preds(struct ftrace_event_call *call); +extern int filter_match_preds(struct event_filter *filter, void *rec); +extern int filter_current_check_discard(struct ring_buffer *buffer, + struct ftrace_event_call *call, + void *rec, + struct ring_buffer_event *event); + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING, + FILTER_DYN_STRING, + FILTER_PTR_STRING, + FILTER_TRACE_FN, +}; + +#define EVENT_STORAGE_SIZE 128 +extern struct mutex event_storage_mutex; +extern char event_storage[EVENT_STORAGE_SIZE]; + +extern int trace_event_raw_init(struct ftrace_event_call *call); +extern int trace_define_field(struct ftrace_event_call *call, const char *type, + const char *name, int offset, int size, + int is_signed, int filter_type); +extern int trace_add_event_call(struct ftrace_event_call *call); +extern void trace_remove_event_call(struct ftrace_event_call *call); + +#define is_signed_type(type) (((type)(-1)) < 0) + +int trace_set_clr_event(const char *system, const char *event, int set); + +/* + * The double __builtin_constant_p is because gcc will give us an error + * if we try to allocate the static variable to fmt if it is not a + * constant. Even with the outer if statement optimizing out. + */ +#define event_trace_printk(ip, fmt, args...) \ +do { \ + __trace_printk_check_format(fmt, ##args); \ + tracing_record_cmdline(current); \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __trace_bprintk(ip, trace_printk_fmt, ##args); \ + } else \ + __trace_printk(ip, fmt, ##args); \ +} while (0) + +#ifdef CONFIG_PERF_EVENTS +struct perf_event; + +DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); + +extern int perf_trace_init(struct perf_event *event); +extern void perf_trace_destroy(struct perf_event *event); +extern int perf_trace_add(struct perf_event *event, int flags); +extern void perf_trace_del(struct perf_event *event, int flags); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str); +extern void ftrace_profile_free_filter(struct perf_event *event); +extern void *perf_trace_buf_prepare(int size, unsigned short type, + struct pt_regs *regs, int *rctxp); + +static inline void +perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, + u64 count, struct pt_regs *regs, void *head) +{ + perf_tp_event(addr, count, raw_data, size, regs, head, rctx); +} +#endif + +#endif /* _LINUX_FTRACE_EVENT_H */ diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h new file mode 100644 index 00000000..dca7bf8c --- /dev/null +++ b/include/linux/ftrace_irq.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_FTRACE_IRQ_H +#define _LINUX_FTRACE_IRQ_H + + +#ifdef CONFIG_FTRACE_NMI_ENTER +extern void ftrace_nmi_enter(void); +extern void ftrace_nmi_exit(void); +#else +static inline void ftrace_nmi_enter(void) { } +static inline void ftrace_nmi_exit(void) { } +#endif + +#endif /* _LINUX_FTRACE_IRQ_H */ diff --git a/include/linux/fuse.h b/include/linux/fuse.h new file mode 100644 index 00000000..8f2ab8fe --- /dev/null +++ b/include/linux/fuse.h @@ -0,0 +1,648 @@ +/* + FUSE: Filesystem in Userspace + Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> + + This program can be distributed under the terms of the GNU GPL. + See the file COPYING. +*/ + +/* + * This file defines the kernel interface of FUSE + * + * Protocol changelog: + * + * 7.9: + * - new fuse_getattr_in input argument of GETATTR + * - add lk_flags in fuse_lk_in + * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in + * - add blksize field to fuse_attr + * - add file flags field to fuse_read_in and fuse_write_in + * + * 7.10 + * - add nonseekable open flag + * + * 7.11 + * - add IOCTL message + * - add unsolicited notification support + * - add POLL message and NOTIFY_POLL notification + * + * 7.12 + * - add umask flag to input argument of open, mknod and mkdir + * - add notification messages for invalidation of inodes and + * directory entries + * + * 7.13 + * - make max number of background requests and congestion threshold + * tunables + * + * 7.14 + * - add splice support to fuse device + * + * 7.15 + * - add store notify + * - add retrieve notify + * + * 7.16 + * - add BATCH_FORGET request + * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct + * fuse_ioctl_iovec' instead of ambiguous 'struct iovec' + * - add FUSE_IOCTL_32BIT flag + * + * 7.17 + * - add FUSE_FLOCK_LOCKS and FUSE_RELEASE_FLOCK_UNLOCK + * + * 7.18 + * - add FUSE_IOCTL_DIR flag + * - add FUSE_NOTIFY_DELETE + */ + +#ifndef _LINUX_FUSE_H +#define _LINUX_FUSE_H + +#include <linux/types.h> + +/* + * Version negotiation: + * + * Both the kernel and userspace send the version they support in the + * INIT request and reply respectively. + * + * If the major versions match then both shall use the smallest + * of the two minor versions for communication. + * + * If the kernel supports a larger major version, then userspace shall + * reply with the major version it supports, ignore the rest of the + * INIT message and expect a new INIT message from the kernel with a + * matching major version. + * + * If the library supports a larger major version, then it shall fall + * back to the major protocol version sent by the kernel for + * communication and reply with that major version (and an arbitrary + * supported minor version). + */ + +/** Version number of this interface */ +#define FUSE_KERNEL_VERSION 7 + +/** Minor version number of this interface */ +#define FUSE_KERNEL_MINOR_VERSION 18 + +/** The node ID of the root inode */ +#define FUSE_ROOT_ID 1 + +/* Make sure all structures are padded to 64bit boundary, so 32bit + userspace works under 64bit kernels */ + +struct fuse_attr { + __u64 ino; + __u64 size; + __u64 blocks; + __u64 atime; + __u64 mtime; + __u64 ctime; + __u32 atimensec; + __u32 mtimensec; + __u32 ctimensec; + __u32 mode; + __u32 nlink; + __u32 uid; + __u32 gid; + __u32 rdev; + __u32 blksize; + __u32 padding; +}; + +struct fuse_kstatfs { + __u64 blocks; + __u64 bfree; + __u64 bavail; + __u64 files; + __u64 ffree; + __u32 bsize; + __u32 namelen; + __u32 frsize; + __u32 padding; + __u32 spare[6]; +}; + +struct fuse_file_lock { + __u64 start; + __u64 end; + __u32 type; + __u32 pid; /* tgid */ +}; + +/** + * Bitmasks for fuse_setattr_in.valid + */ +#define FATTR_MODE (1 << 0) +#define FATTR_UID (1 << 1) +#define FATTR_GID (1 << 2) +#define FATTR_SIZE (1 << 3) +#define FATTR_ATIME (1 << 4) +#define FATTR_MTIME (1 << 5) +#define FATTR_FH (1 << 6) +#define FATTR_ATIME_NOW (1 << 7) +#define FATTR_MTIME_NOW (1 << 8) +#define FATTR_LOCKOWNER (1 << 9) + +/** + * Flags returned by the OPEN request + * + * FOPEN_DIRECT_IO: bypass page cache for this open file + * FOPEN_KEEP_CACHE: don't invalidate the data cache on open + * FOPEN_NONSEEKABLE: the file is not seekable + */ +#define FOPEN_DIRECT_IO (1 << 0) +#define FOPEN_KEEP_CACHE (1 << 1) +#define FOPEN_NONSEEKABLE (1 << 2) + +/** + * INIT request/reply flags + * + * FUSE_POSIX_LOCKS: remote locking for POSIX file locks + * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." + * FUSE_DONT_MASK: don't apply umask to file mode on create operations + * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks + */ +#define FUSE_ASYNC_READ (1 << 0) +#define FUSE_POSIX_LOCKS (1 << 1) +#define FUSE_FILE_OPS (1 << 2) +#define FUSE_ATOMIC_O_TRUNC (1 << 3) +#define FUSE_EXPORT_SUPPORT (1 << 4) +#define FUSE_BIG_WRITES (1 << 5) +#define FUSE_DONT_MASK (1 << 6) +#define FUSE_FLOCK_LOCKS (1 << 10) + +/** + * CUSE INIT request/reply flags + * + * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl + */ +#define CUSE_UNRESTRICTED_IOCTL (1 << 0) + +/** + * Release flags + */ +#define FUSE_RELEASE_FLUSH (1 << 0) +#define FUSE_RELEASE_FLOCK_UNLOCK (1 << 1) + +/** + * Getattr flags + */ +#define FUSE_GETATTR_FH (1 << 0) + +/** + * Lock flags + */ +#define FUSE_LK_FLOCK (1 << 0) + +/** + * WRITE flags + * + * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed + * FUSE_WRITE_LOCKOWNER: lock_owner field is valid + */ +#define FUSE_WRITE_CACHE (1 << 0) +#define FUSE_WRITE_LOCKOWNER (1 << 1) + +/** + * Read flags + */ +#define FUSE_READ_LOCKOWNER (1 << 1) + +/** + * Ioctl flags + * + * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine + * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed + * FUSE_IOCTL_RETRY: retry with new iovecs + * FUSE_IOCTL_32BIT: 32bit ioctl + * FUSE_IOCTL_DIR: is a directory + * + * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs + */ +#define FUSE_IOCTL_COMPAT (1 << 0) +#define FUSE_IOCTL_UNRESTRICTED (1 << 1) +#define FUSE_IOCTL_RETRY (1 << 2) +#define FUSE_IOCTL_32BIT (1 << 3) +#define FUSE_IOCTL_DIR (1 << 4) + +#define FUSE_IOCTL_MAX_IOV 256 + +/** + * Poll flags + * + * FUSE_POLL_SCHEDULE_NOTIFY: request poll notify + */ +#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0) + +enum fuse_opcode { + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, /* no reply */ + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + + /* CUSE specific operations */ + CUSE_INIT = 4096, +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_CODE_MAX, +}; + +/* The read buffer is required to be at least 8k, but may be much larger */ +#define FUSE_MIN_READ_BUFFER 8192 + +#define FUSE_COMPAT_ENTRY_OUT_SIZE 120 + +struct fuse_entry_out { + __u64 nodeid; /* Inode ID */ + __u64 generation; /* Inode generation: nodeid:gen must + be unique for the fs's lifetime */ + __u64 entry_valid; /* Cache timeout for the name */ + __u64 attr_valid; /* Cache timeout for the attributes */ + __u32 entry_valid_nsec; + __u32 attr_valid_nsec; + struct fuse_attr attr; +}; + +struct fuse_forget_in { + __u64 nlookup; +}; + +struct fuse_forget_one { + __u64 nodeid; + __u64 nlookup; +}; + +struct fuse_batch_forget_in { + __u32 count; + __u32 dummy; +}; + +struct fuse_getattr_in { + __u32 getattr_flags; + __u32 dummy; + __u64 fh; +}; + +#define FUSE_COMPAT_ATTR_OUT_SIZE 96 + +struct fuse_attr_out { + __u64 attr_valid; /* Cache timeout for the attributes */ + __u32 attr_valid_nsec; + __u32 dummy; + struct fuse_attr attr; +}; + +#define FUSE_COMPAT_MKNOD_IN_SIZE 8 + +struct fuse_mknod_in { + __u32 mode; + __u32 rdev; + __u32 umask; + __u32 padding; +}; + +struct fuse_mkdir_in { + __u32 mode; + __u32 umask; +}; + +struct fuse_rename_in { + __u64 newdir; +}; + +struct fuse_link_in { + __u64 oldnodeid; +}; + +struct fuse_setattr_in { + __u32 valid; + __u32 padding; + __u64 fh; + __u64 size; + __u64 lock_owner; + __u64 atime; + __u64 mtime; + __u64 unused2; + __u32 atimensec; + __u32 mtimensec; + __u32 unused3; + __u32 mode; + __u32 unused4; + __u32 uid; + __u32 gid; + __u32 unused5; +}; + +struct fuse_open_in { + __u32 flags; + __u32 unused; +}; + +struct fuse_create_in { + __u32 flags; + __u32 mode; + __u32 umask; + __u32 padding; +}; + +struct fuse_open_out { + __u64 fh; + __u32 open_flags; + __u32 padding; +}; + +struct fuse_release_in { + __u64 fh; + __u32 flags; + __u32 release_flags; + __u64 lock_owner; +}; + +struct fuse_flush_in { + __u64 fh; + __u32 unused; + __u32 padding; + __u64 lock_owner; +}; + +struct fuse_read_in { + __u64 fh; + __u64 offset; + __u32 size; + __u32 read_flags; + __u64 lock_owner; + __u32 flags; + __u32 padding; +}; + +#define FUSE_COMPAT_WRITE_IN_SIZE 24 + +struct fuse_write_in { + __u64 fh; + __u64 offset; + __u32 size; + __u32 write_flags; + __u64 lock_owner; + __u32 flags; + __u32 padding; +}; + +struct fuse_write_out { + __u32 size; + __u32 padding; +}; + +#define FUSE_COMPAT_STATFS_SIZE 48 + +struct fuse_statfs_out { + struct fuse_kstatfs st; +}; + +struct fuse_fsync_in { + __u64 fh; + __u32 fsync_flags; + __u32 padding; +}; + +struct fuse_setxattr_in { + __u32 size; + __u32 flags; +}; + +struct fuse_getxattr_in { + __u32 size; + __u32 padding; +}; + +struct fuse_getxattr_out { + __u32 size; + __u32 padding; +}; + +struct fuse_lk_in { + __u64 fh; + __u64 owner; + struct fuse_file_lock lk; + __u32 lk_flags; + __u32 padding; +}; + +struct fuse_lk_out { + struct fuse_file_lock lk; +}; + +struct fuse_access_in { + __u32 mask; + __u32 padding; +}; + +struct fuse_init_in { + __u32 major; + __u32 minor; + __u32 max_readahead; + __u32 flags; +}; + +struct fuse_init_out { + __u32 major; + __u32 minor; + __u32 max_readahead; + __u32 flags; + __u16 max_background; + __u16 congestion_threshold; + __u32 max_write; +}; + +#define CUSE_INIT_INFO_MAX 4096 + +struct cuse_init_in { + __u32 major; + __u32 minor; + __u32 unused; + __u32 flags; +}; + +struct cuse_init_out { + __u32 major; + __u32 minor; + __u32 unused; + __u32 flags; + __u32 max_read; + __u32 max_write; + __u32 dev_major; /* chardev major */ + __u32 dev_minor; /* chardev minor */ + __u32 spare[10]; +}; + +struct fuse_interrupt_in { + __u64 unique; +}; + +struct fuse_bmap_in { + __u64 block; + __u32 blocksize; + __u32 padding; +}; + +struct fuse_bmap_out { + __u64 block; +}; + +struct fuse_ioctl_in { + __u64 fh; + __u32 flags; + __u32 cmd; + __u64 arg; + __u32 in_size; + __u32 out_size; +}; + +struct fuse_ioctl_iovec { + __u64 base; + __u64 len; +}; + +struct fuse_ioctl_out { + __s32 result; + __u32 flags; + __u32 in_iovs; + __u32 out_iovs; +}; + +struct fuse_poll_in { + __u64 fh; + __u64 kh; + __u32 flags; + __u32 padding; +}; + +struct fuse_poll_out { + __u32 revents; + __u32 padding; +}; + +struct fuse_notify_poll_wakeup_out { + __u64 kh; +}; + +struct fuse_in_header { + __u32 len; + __u32 opcode; + __u64 unique; + __u64 nodeid; + __u32 uid; + __u32 gid; + __u32 pid; + __u32 padding; +}; + +struct fuse_out_header { + __u32 len; + __s32 error; + __u64 unique; +}; + +struct fuse_dirent { + __u64 ino; + __u64 off; + __u32 namelen; + __u32 type; + char name[]; +}; + +#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) +#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) +#define FUSE_DIRENT_SIZE(d) \ + FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) + +struct fuse_notify_inval_inode_out { + __u64 ino; + __s64 off; + __s64 len; +}; + +struct fuse_notify_inval_entry_out { + __u64 parent; + __u32 namelen; + __u32 padding; +}; + +struct fuse_notify_delete_out { + __u64 parent; + __u64 child; + __u32 namelen; + __u32 padding; +}; + +struct fuse_notify_store_out { + __u64 nodeid; + __u64 offset; + __u32 size; + __u32 padding; +}; + +struct fuse_notify_retrieve_out { + __u64 notify_unique; + __u64 nodeid; + __u64 offset; + __u32 size; + __u32 padding; +}; + +/* Matches the size of fuse_write_in */ +struct fuse_notify_retrieve_in { + __u64 dummy1; + __u64 offset; + __u32 size; + __u32 dummy2; + __u64 dummy3; + __u64 dummy4; +}; + +#endif /* _LINUX_FUSE_H */ diff --git a/include/linux/futex.h b/include/linux/futex.h new file mode 100644 index 00000000..1e5a26d7 --- /dev/null +++ b/include/linux/futex.h @@ -0,0 +1,215 @@ +#ifndef _LINUX_FUTEX_H +#define _LINUX_FUTEX_H + +#include <linux/compiler.h> +#include <linux/types.h> + +/* Second argument to futex syscall */ + + +#define FUTEX_WAIT 0 +#define FUTEX_WAKE 1 +#define FUTEX_FD 2 +#define FUTEX_REQUEUE 3 +#define FUTEX_CMP_REQUEUE 4 +#define FUTEX_WAKE_OP 5 +#define FUTEX_LOCK_PI 6 +#define FUTEX_UNLOCK_PI 7 +#define FUTEX_TRYLOCK_PI 8 +#define FUTEX_WAIT_BITSET 9 +#define FUTEX_WAKE_BITSET 10 +#define FUTEX_WAIT_REQUEUE_PI 11 +#define FUTEX_CMP_REQUEUE_PI 12 + +#define FUTEX_PRIVATE_FLAG 128 +#define FUTEX_CLOCK_REALTIME 256 +#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) + +#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) +#define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG) +#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) +#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ + FUTEX_PRIVATE_FLAG) +#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ + FUTEX_PRIVATE_FLAG) + +/* + * Support for robust futexes: the kernel cleans up held futexes at + * thread exit time. + */ + +/* + * Per-lock list entry - embedded in user-space locks, somewhere close + * to the futex field. (Note: user-space uses a double-linked list to + * achieve O(1) list add and remove, but the kernel only needs to know + * about the forward link) + * + * NOTE: this structure is part of the syscall ABI, and must not be + * changed. + */ +struct robust_list { + struct robust_list __user *next; +}; + +/* + * Per-thread list head: + * + * NOTE: this structure is part of the syscall ABI, and must only be + * changed if the change is first communicated with the glibc folks. + * (When an incompatible change is done, we'll increase the structure + * size, which glibc will detect) + */ +struct robust_list_head { + /* + * The head of the list. Points back to itself if empty: + */ + struct robust_list list; + + /* + * This relative offset is set by user-space, it gives the kernel + * the relative position of the futex field to examine. This way + * we keep userspace flexible, to freely shape its data-structure, + * without hardcoding any particular offset into the kernel: + */ + long futex_offset; + + /* + * The death of the thread may race with userspace setting + * up a lock's links. So to handle this race, userspace first + * sets this field to the address of the to-be-taken lock, + * then does the lock acquire, and then adds itself to the + * list, and then clears this field. Hence the kernel will + * always have full knowledge of all locks that the thread + * _might_ have taken. We check the owner TID in any case, + * so only truly owned locks will be handled. + */ + struct robust_list __user *list_op_pending; +}; + +/* + * Are there any waiters for this robust futex: + */ +#define FUTEX_WAITERS 0x80000000 + +/* + * The kernel signals via this bit that a thread holding a futex + * has exited without unlocking the futex. The kernel also does + * a FUTEX_WAKE on such futexes, after setting the bit, to wake + * up any possible waiters: + */ +#define FUTEX_OWNER_DIED 0x40000000 + +/* + * The rest of the robust-futex field is for the TID: + */ +#define FUTEX_TID_MASK 0x3fffffff + +/* + * This limit protects against a deliberately circular list. + * (Not worth introducing an rlimit for it) + */ +#define ROBUST_LIST_LIMIT 2048 + +/* + * bitset with all bits set for the FUTEX_xxx_BITSET OPs to request a + * match of any bit. + */ +#define FUTEX_BITSET_MATCH_ANY 0xffffffff + +#ifdef __KERNEL__ +struct inode; +struct mm_struct; +struct task_struct; +union ktime; + +long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, + u32 __user *uaddr2, u32 val2, u32 val3); + +extern int +handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); + +/* + * Futexes are matched on equal values of this key. + * The key type depends on whether it's a shared or private mapping. + * Don't rearrange members without looking at hash_futex(). + * + * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. + * We use the two low order bits of offset to tell what is the kind of key : + * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE) + * (no reference on an inode or mm) + * 01 : Shared futex (PTHREAD_PROCESS_SHARED) + * mapped on a file (reference on the underlying inode) + * 10 : Shared futex (PTHREAD_PROCESS_SHARED) + * (but private mapping on an mm, and reference taken on it) +*/ + +#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */ +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */ + +union futex_key { + struct { + unsigned long pgoff; + struct inode *inode; + int offset; + } shared; + struct { + unsigned long address; + struct mm_struct *mm; + int offset; + } private; + struct { + unsigned long word; + void *ptr; + int offset; + } both; +}; + +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } + +#ifdef CONFIG_FUTEX +extern void exit_robust_list(struct task_struct *curr); +extern void exit_pi_state_list(struct task_struct *curr); +extern int futex_cmpxchg_enabled; +#else +static inline void exit_robust_list(struct task_struct *curr) +{ +} +static inline void exit_pi_state_list(struct task_struct *curr) +{ +} +#endif +#endif /* __KERNEL__ */ + +#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ +#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ +#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ +#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */ +#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */ + +#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */ + +#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */ +#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */ +#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */ +#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */ +#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */ +#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */ + +/* FUTEX_WAKE_OP will perform atomically + int oldval = *(int *)UADDR2; + *(int *)UADDR2 = oldval OP OPARG; + if (oldval CMP CMPARG) + wake UADDR2; */ + +#define FUTEX_OP(op, oparg, cmp, cmparg) \ + (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ + | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) + +#endif diff --git a/include/linux/gameport.h b/include/linux/gameport.h new file mode 100644 index 00000000..b456b08d --- /dev/null +++ b/include/linux/gameport.h @@ -0,0 +1,226 @@ +#ifndef _GAMEPORT_H +#define _GAMEPORT_H + +/* + * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifdef __KERNEL__ +#include <asm/io.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/timer.h> +#include <linux/slab.h> + +struct gameport { + + void *port_data; /* Private pointer for gameport drivers */ + char name[32]; + char phys[32]; + + int io; + int speed; + int fuzz; + + void (*trigger)(struct gameport *); + unsigned char (*read)(struct gameport *); + int (*cooked_read)(struct gameport *, int *, int *); + int (*calibrate)(struct gameport *, int *, int *); + int (*open)(struct gameport *, int); + void (*close)(struct gameport *); + + struct timer_list poll_timer; + unsigned int poll_interval; /* in msecs */ + spinlock_t timer_lock; + unsigned int poll_cnt; + void (*poll_handler)(struct gameport *); + + struct gameport *parent, *child; + + struct gameport_driver *drv; + struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ + + struct device dev; + + struct list_head node; +}; +#define to_gameport_port(d) container_of(d, struct gameport, dev) + +struct gameport_driver { + const char *description; + + int (*connect)(struct gameport *, struct gameport_driver *drv); + int (*reconnect)(struct gameport *); + void (*disconnect)(struct gameport *); + + struct device_driver driver; + + bool ignore; +}; +#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver) + +int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode); +void gameport_close(struct gameport *gameport); + +#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) + +void __gameport_register_port(struct gameport *gameport, struct module *owner); +/* use a define to avoid include chaining to get THIS_MODULE */ +#define gameport_register_port(gameport) \ + __gameport_register_port(gameport, THIS_MODULE) + +void gameport_unregister_port(struct gameport *gameport); + +__printf(2, 3) +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...); + +#else + +static inline void gameport_register_port(struct gameport *gameport) +{ + return; +} + +static inline void gameport_unregister_port(struct gameport *gameport) +{ + return; +} + +static inline __printf(2, 3) +void gameport_set_phys(struct gameport *gameport, const char *fmt, ...) +{ + return; +} + +#endif + +static inline struct gameport *gameport_allocate_port(void) +{ + struct gameport *gameport = kzalloc(sizeof(struct gameport), GFP_KERNEL); + + return gameport; +} + +static inline void gameport_free_port(struct gameport *gameport) +{ + kfree(gameport); +} + +static inline void gameport_set_name(struct gameport *gameport, const char *name) +{ + strlcpy(gameport->name, name, sizeof(gameport->name)); +} + +/* + * Use the following functions to manipulate gameport's per-port + * driver-specific data. + */ +static inline void *gameport_get_drvdata(struct gameport *gameport) +{ + return dev_get_drvdata(&gameport->dev); +} + +static inline void gameport_set_drvdata(struct gameport *gameport, void *data) +{ + dev_set_drvdata(&gameport->dev, data); +} + +/* + * Use the following functions to pin gameport's driver in process context + */ +static inline int gameport_pin_driver(struct gameport *gameport) +{ + return mutex_lock_interruptible(&gameport->drv_mutex); +} + +static inline void gameport_unpin_driver(struct gameport *gameport) +{ + mutex_unlock(&gameport->drv_mutex); +} + +int __must_check __gameport_register_driver(struct gameport_driver *drv, + struct module *owner, const char *mod_name); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define gameport_register_driver(drv) \ + __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) + +void gameport_unregister_driver(struct gameport_driver *drv); + +#endif /* __KERNEL__ */ + +#define GAMEPORT_MODE_DISABLED 0 +#define GAMEPORT_MODE_RAW 1 +#define GAMEPORT_MODE_COOKED 2 + +#define GAMEPORT_ID_VENDOR_ANALOG 0x0001 +#define GAMEPORT_ID_VENDOR_MADCATZ 0x0002 +#define GAMEPORT_ID_VENDOR_LOGITECH 0x0003 +#define GAMEPORT_ID_VENDOR_CREATIVE 0x0004 +#define GAMEPORT_ID_VENDOR_GENIUS 0x0005 +#define GAMEPORT_ID_VENDOR_INTERACT 0x0006 +#define GAMEPORT_ID_VENDOR_MICROSOFT 0x0007 +#define GAMEPORT_ID_VENDOR_THRUSTMASTER 0x0008 +#define GAMEPORT_ID_VENDOR_GRAVIS 0x0009 +#define GAMEPORT_ID_VENDOR_GUILLEMOT 0x000a + +#ifdef __KERNEL__ + +static inline void gameport_trigger(struct gameport *gameport) +{ + if (gameport->trigger) + gameport->trigger(gameport); + else + outb(0xff, gameport->io); +} + +static inline unsigned char gameport_read(struct gameport *gameport) +{ + if (gameport->read) + return gameport->read(gameport); + else + return inb(gameport->io); +} + +static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons) +{ + if (gameport->cooked_read) + return gameport->cooked_read(gameport, axes, buttons); + else + return -1; +} + +static inline int gameport_calibrate(struct gameport *gameport, int *axes, int *max) +{ + if (gameport->calibrate) + return gameport->calibrate(gameport, axes, max); + else + return -1; +} + +static inline int gameport_time(struct gameport *gameport, int time) +{ + return (time * gameport->speed) / 1000; +} + +static inline void gameport_set_poll_handler(struct gameport *gameport, void (*handler)(struct gameport *)) +{ + gameport->poll_handler = handler; +} + +static inline void gameport_set_poll_interval(struct gameport *gameport, unsigned int msecs) +{ + gameport->poll_interval = msecs; +} + +void gameport_start_polling(struct gameport *gameport); +void gameport_stop_polling(struct gameport *gameport); + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/gcd.h b/include/linux/gcd.h new file mode 100644 index 00000000..69f5e8a0 --- /dev/null +++ b/include/linux/gcd.h @@ -0,0 +1,8 @@ +#ifndef _GCD_H +#define _GCD_H + +#include <linux/compiler.h> + +unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _GCD_H */ diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h new file mode 100644 index 00000000..552c8a0a --- /dev/null +++ b/include/linux/gen_stats.h @@ -0,0 +1,67 @@ +#ifndef __LINUX_GEN_STATS_H +#define __LINUX_GEN_STATS_H + +#include <linux/types.h> + +enum { + TCA_STATS_UNSPEC, + TCA_STATS_BASIC, + TCA_STATS_RATE_EST, + TCA_STATS_QUEUE, + TCA_STATS_APP, + __TCA_STATS_MAX, +}; +#define TCA_STATS_MAX (__TCA_STATS_MAX - 1) + +/** + * struct gnet_stats_basic - byte/packet throughput statistics + * @bytes: number of seen bytes + * @packets: number of seen packets + */ +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; +}; +struct gnet_stats_basic_packed { + __u64 bytes; + __u32 packets; +} __attribute__ ((packed)); + +/** + * struct gnet_stats_rate_est - rate estimator + * @bps: current byte rate + * @pps: current packet rate + */ +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +/** + * struct gnet_stats_queue - queuing statistics + * @qlen: queue length + * @backlog: backlog size of queue + * @drops: number of dropped packets + * @requeues: number of requeues + * @overlimits: number of enqueues over the limit + */ +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +/** + * struct gnet_estimator - rate estimator configuration + * @interval: sampling period + * @ewma_log: the log of measurement window weight + */ +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + + +#endif /* __LINUX_GEN_STATS_H */ diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h new file mode 100644 index 00000000..5e98eeb2 --- /dev/null +++ b/include/linux/genalloc.h @@ -0,0 +1,81 @@ +/* + * Basic general purpose allocator for managing special purpose + * memory, for example, memory that is not managed by the regular + * kmalloc/kfree interface. Uses for this includes on-device special + * memory, uncached memory etc. + * + * It is safe to use the allocator in NMI handlers and other special + * unblockable contexts that could otherwise deadlock on locks. This + * is implemented by using atomic operations and retries on any + * conflicts. The disadvantage is that there may be livelocks in + * extreme cases. For better scalability, one allocator can be used + * for each CPU. + * + * The lockless operation only works if there is enough memory + * available. If new memory is added to the pool a lock has to be + * still taken. So any user relying on locklessness has to ensure + * that sufficient memory is preallocated. + * + * The basic atomic operation of this allocator is cmpxchg on long. + * On architectures that don't have NMI-safe cmpxchg implementation, + * the allocator can NOT be used in NMI handler. So code uses the + * allocator in NMI handler should depend on + * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + + +#ifndef __GENALLOC_H__ +#define __GENALLOC_H__ +/* + * General purpose special memory pool descriptor. + */ +struct gen_pool { + spinlock_t lock; + struct list_head chunks; /* list of chunks in this pool */ + int min_alloc_order; /* minimum allocation order */ +}; + +/* + * General purpose special memory pool chunk descriptor. + */ +struct gen_pool_chunk { + struct list_head next_chunk; /* next chunk in pool */ + atomic_t avail; + phys_addr_t phys_addr; /* physical starting address of memory chunk */ + unsigned long start_addr; /* starting address of memory chunk */ + unsigned long end_addr; /* ending address of memory chunk */ + unsigned long bits[0]; /* bitmap for allocating memory chunk */ +}; + +extern struct gen_pool *gen_pool_create(int, int); +extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); +extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, + size_t, int); +/** + * gen_pool_add - add a new chunk of special memory to the pool + * @pool: pool to add new memory chunk to + * @addr: starting address of memory chunk to add to pool + * @size: size in bytes of the memory chunk to add to pool + * @nid: node id of the node the chunk structure and bitmap should be + * allocated on, or -1 + * + * Add a new chunk of special memory to the specified pool. + * + * Returns 0 on success or a -ve errno on failure. + */ +static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, + size_t size, int nid) +{ + return gen_pool_add_virt(pool, addr, -1, size, nid); +} +extern void gen_pool_destroy(struct gen_pool *); +extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); +extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); +extern void gen_pool_for_each_chunk(struct gen_pool *, + void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); +extern size_t gen_pool_avail(struct gen_pool *); +extern size_t gen_pool_size(struct gen_pool *); +#endif /* __GENALLOC_H__ */ diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h new file mode 100644 index 00000000..b6d65754 --- /dev/null +++ b/include/linux/generic_acl.h @@ -0,0 +1,14 @@ +#ifndef LINUX_GENERIC_ACL_H +#define LINUX_GENERIC_ACL_H + +#include <linux/xattr.h> + +struct inode; + +extern const struct xattr_handler generic_acl_access_handler; +extern const struct xattr_handler generic_acl_default_handler; + +int generic_acl_init(struct inode *, struct inode *); +int generic_acl_chmod(struct inode *); + +#endif /* LINUX_GENERIC_ACL_H */ diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h new file mode 100644 index 00000000..fadff285 --- /dev/null +++ b/include/linux/generic_serial.h @@ -0,0 +1,95 @@ +/* + * generic_serial.h + * + * Copyright (C) 1998 R.E.Wolff@BitWizard.nl + * + * written for the SX serial driver. + * Contains the code that should be shared over all the serial drivers. + * + * Version 0.1 -- December, 1998. + */ + +#ifndef GENERIC_SERIAL_H +#define GENERIC_SERIAL_H + +#ifdef __KERNEL__ +#include <linux/mutex.h> +#include <linux/tty.h> + +struct real_driver { + void (*disable_tx_interrupts) (void *); + void (*enable_tx_interrupts) (void *); + void (*disable_rx_interrupts) (void *); + void (*enable_rx_interrupts) (void *); + void (*shutdown_port) (void*); + int (*set_real_termios) (void*); + int (*chars_in_buffer) (void*); + void (*close) (void*); + void (*hungup) (void*); + void (*getserial) (void*, struct serial_struct *sp); +}; + + + +struct gs_port { + int magic; + struct tty_port port; + unsigned char *xmit_buf; + int xmit_head; + int xmit_tail; + int xmit_cnt; + struct mutex port_write_mutex; + unsigned long event; + unsigned short closing_wait; + int close_delay; + struct real_driver *rd; + int wakeup_chars; + int baud_base; + int baud; + int custom_divisor; + spinlock_t driver_lock; +}; + +#endif /* __KERNEL__ */ + +/* Flags */ +/* Warning: serial.h defines some ASYNC_ flags, they say they are "only" + used in serial.c, but they are also used in all other serial drivers. + Make sure they don't clash with these here... */ +#define GS_TX_INTEN 0x00800000 +#define GS_RX_INTEN 0x00400000 +#define GS_ACTIVE 0x00200000 + + + +#define GS_TYPE_NORMAL 1 + +#define GS_DEBUG_FLUSH 0x00000001 +#define GS_DEBUG_BTR 0x00000002 +#define GS_DEBUG_TERMIOS 0x00000004 +#define GS_DEBUG_STUFF 0x00000008 +#define GS_DEBUG_CLOSE 0x00000010 +#define GS_DEBUG_FLOW 0x00000020 +#define GS_DEBUG_WRITE 0x00000040 + +#ifdef __KERNEL__ +int gs_put_char(struct tty_struct *tty, unsigned char ch); +int gs_write(struct tty_struct *tty, + const unsigned char *buf, int count); +int gs_write_room(struct tty_struct *tty); +int gs_chars_in_buffer(struct tty_struct *tty); +void gs_flush_buffer(struct tty_struct *tty); +void gs_flush_chars(struct tty_struct *tty); +void gs_stop(struct tty_struct *tty); +void gs_start(struct tty_struct *tty); +void gs_hangup(struct tty_struct *tty); +int gs_block_til_ready(void *port, struct file *filp); +void gs_close(struct tty_struct *tty, struct file *filp); +void gs_set_termios (struct tty_struct * tty, + struct ktermios * old_termios); +int gs_init_port(struct gs_port *port); +int gs_setserial(struct gs_port *port, struct serial_struct __user *sp); +int gs_getserial(struct gs_port *port, struct serial_struct __user *sp); +void gs_got_break(struct gs_port *port); +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h new file mode 100644 index 00000000..73c28dea --- /dev/null +++ b/include/linux/genetlink.h @@ -0,0 +1,115 @@ +#ifndef __LINUX_GENERIC_NETLINK_H +#define __LINUX_GENERIC_NETLINK_H + +#include <linux/types.h> +#include <linux/netlink.h> + +#define GENL_NAMSIZ 16 /* length of family name */ + +#define GENL_MIN_ID NLMSG_MIN_TYPE +#define GENL_MAX_ID 1023 + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +#define GENL_HDRLEN NLMSG_ALIGN(sizeof(struct genlmsghdr)) + +#define GENL_ADMIN_PERM 0x01 +#define GENL_CMD_CAP_DO 0x02 +#define GENL_CMD_CAP_DUMP 0x04 +#define GENL_CMD_CAP_HASPOL 0x08 + +/* + * List of reserved static generic netlink identifiers: + */ +#define GENL_ID_GENERATE 0 +#define GENL_ID_CTRL NLMSG_MIN_TYPE + +/************************************************************************** + * Controller + **************************************************************************/ + +enum { + CTRL_CMD_UNSPEC, + CTRL_CMD_NEWFAMILY, + CTRL_CMD_DELFAMILY, + CTRL_CMD_GETFAMILY, + CTRL_CMD_NEWOPS, + CTRL_CMD_DELOPS, + CTRL_CMD_GETOPS, + CTRL_CMD_NEWMCAST_GRP, + CTRL_CMD_DELMCAST_GRP, + CTRL_CMD_GETMCAST_GRP, /* unused */ + __CTRL_CMD_MAX, +}; + +#define CTRL_CMD_MAX (__CTRL_CMD_MAX - 1) + +enum { + CTRL_ATTR_UNSPEC, + CTRL_ATTR_FAMILY_ID, + CTRL_ATTR_FAMILY_NAME, + CTRL_ATTR_VERSION, + CTRL_ATTR_HDRSIZE, + CTRL_ATTR_MAXATTR, + CTRL_ATTR_OPS, + CTRL_ATTR_MCAST_GROUPS, + __CTRL_ATTR_MAX, +}; + +#define CTRL_ATTR_MAX (__CTRL_ATTR_MAX - 1) + +enum { + CTRL_ATTR_OP_UNSPEC, + CTRL_ATTR_OP_ID, + CTRL_ATTR_OP_FLAGS, + __CTRL_ATTR_OP_MAX, +}; + +#define CTRL_ATTR_OP_MAX (__CTRL_ATTR_OP_MAX - 1) + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC, + CTRL_ATTR_MCAST_GRP_NAME, + CTRL_ATTR_MCAST_GRP_ID, + __CTRL_ATTR_MCAST_GRP_MAX, +}; + +#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1) + +#ifdef __KERNEL__ + +/* All generic netlink requests are serialized by a global lock. */ +extern void genl_lock(void); +extern void genl_unlock(void); +#ifdef CONFIG_PROVE_LOCKING +extern int lockdep_genl_is_held(void); +#endif + +/** + * rcu_dereference_genl - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() + */ +#define rcu_dereference_genl(p) \ + rcu_dereference_check(p, lockdep_genl_is_held()) + +/** + * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * caller holds genl mutex. + */ +#define genl_dereference(p) \ + rcu_dereference_protected(p, lockdep_genl_is_held()) + +#endif /* __KERNEL__ */ + +#endif /* __LINUX_GENERIC_NETLINK_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h new file mode 100644 index 00000000..017a7fb5 --- /dev/null +++ b/include/linux/genhd.h @@ -0,0 +1,663 @@ +#ifndef _LINUX_GENHD_H +#define _LINUX_GENHD_H + +/* + * genhd.h Copyright (C) 1992 Drew Eckhardt + * Generic hard disk header file by + * Drew Eckhardt + * + * <drew@colorado.edu> + */ + +#include <linux/types.h> +#include <linux/kdev_t.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> + +#ifdef CONFIG_BLOCK + +#define kobj_to_dev(k) container_of((k), struct device, kobj) +#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) +#define dev_to_part(device) container_of((device), struct hd_struct, __dev) +#define disk_to_dev(disk) (&(disk)->part0.__dev) +#define part_to_dev(part) (&((part)->__dev)) + +extern struct device_type part_type; +extern struct kobject *block_depr; +extern struct class block_class; + +enum { +/* These three have identical behaviour; use the second one if DOS FDISK gets + confused about extended/logical partitions starting past cylinder 1023. */ + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 0x85, + WIN98_EXTENDED_PARTITION = 0x0f, + + SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION, + + LINUX_SWAP_PARTITION = 0x82, + LINUX_DATA_PARTITION = 0x83, + LINUX_LVM_PARTITION = 0x8e, + LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ + + SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION, + NEW_SOLARIS_X86_PARTITION = 0xbf, + + DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ + DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ + DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ + EZD_PARTITION = 0x55, /* EZ-DRIVE */ + + FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ + OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ + NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ + BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ + MINIX_PARTITION = 0x81, /* Minix Partition ID */ + UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ +}; + +#define DISK_MAX_PARTS 256 +#define DISK_NAME_LEN 32 + +#include <linux/major.h> +#include <linux/device.h> +#include <linux/smp.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/workqueue.h> + +struct partition { + unsigned char boot_ind; /* 0x80 - active */ + unsigned char head; /* starting head */ + unsigned char sector; /* starting sector */ + unsigned char cyl; /* starting cylinder */ + unsigned char sys_ind; /* What partition type */ + unsigned char end_head; /* end head */ + unsigned char end_sector; /* end sector */ + unsigned char end_cyl; /* end cylinder */ + __le32 start_sect; /* starting sector counting from 0 */ + __le32 nr_sects; /* nr of sectors in partition */ +} __attribute__((packed)); + +struct disk_stats { + unsigned long sectors[2]; /* READs and WRITEs */ + unsigned long ios[2]; + unsigned long merges[2]; + unsigned long ticks[2]; + unsigned long io_ticks; + unsigned long time_in_queue; +}; + +#define PARTITION_META_INFO_VOLNAMELTH 64 +#define PARTITION_META_INFO_UUIDLTH 16 + +struct partition_meta_info { + u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */ + u8 volname[PARTITION_META_INFO_VOLNAMELTH]; +}; + +struct hd_struct { + sector_t start_sect; + sector_t nr_sects; + sector_t alignment_offset; + unsigned int discard_alignment; + struct device __dev; + struct kobject *holder_dir; + int policy, partno; + struct partition_meta_info *info; +#ifdef CONFIG_FAIL_MAKE_REQUEST + int make_it_fail; +#endif + unsigned long stamp; + atomic_t in_flight[2]; +#ifdef CONFIG_SMP + struct disk_stats __percpu *dkstats; +#else + struct disk_stats dkstats; +#endif + atomic_t ref; + struct rcu_head rcu_head; +}; + +#define GENHD_FL_REMOVABLE 1 +/* 2 is unused */ +#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 +#define GENHD_FL_CD 8 +#define GENHD_FL_UP 16 +#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 +#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ +#define GENHD_FL_NATIVE_CAPACITY 128 +#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256 +#define GENHD_FL_NO_PART_SCAN 512 + +enum { + DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ + DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ +}; + +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct blk_scsi_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; + struct kobject kobj; +}; + +struct disk_part_tbl { + struct rcu_head rcu_head; + int len; + struct hd_struct __rcu *last_lookup; + struct hd_struct __rcu *part[]; +}; + +struct disk_events; + +struct gendisk { + /* major, first_minor and minors are input parameters only, + * don't use directly. Use disk_devt() and disk_max_parts(). + */ + int major; /* major number of driver */ + int first_minor; + int minors; /* maximum number of minors, =1 for + * disks that can't be partitioned. */ + + char disk_name[DISK_NAME_LEN]; /* name of major driver */ + char *(*devnode)(struct gendisk *gd, umode_t *mode); + + unsigned int events; /* supported events */ + unsigned int async_events; /* async events, subset of all */ + + /* Array of pointers to partitions indexed by partno. + * Protected with matching bdev lock but stat and other + * non-critical accesses use RCU. Always access through + * helpers. + */ + struct disk_part_tbl __rcu *part_tbl; + struct hd_struct part0; + + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + + int flags; + struct device *driverfs_dev; // FIXME: remove + struct kobject *slave_dir; + + struct timer_rand_state *random; + atomic_t sync_io; /* RAID */ + struct disk_events *ev; +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *integrity; +#endif + int node_id; +}; + +static inline struct gendisk *part_to_disk(struct hd_struct *part) +{ + if (likely(part)) { + if (part->partno) + return dev_to_disk(part_to_dev(part)->parent); + else + return dev_to_disk(part_to_dev(part)); + } + return NULL; +} + +static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) +{ + int i; + for (i = 0; i < 16; ++i) { + *to++ = (hex_to_bin(*uuid_str) << 4) | + (hex_to_bin(*(uuid_str + 1))); + uuid_str += 2; + switch (i) { + case 3: + case 5: + case 7: + case 9: + uuid_str++; + continue; + } + } +} + +static inline int disk_max_parts(struct gendisk *disk) +{ + if (disk->flags & GENHD_FL_EXT_DEVT) + return DISK_MAX_PARTS; + return disk->minors; +} + +static inline bool disk_part_scan_enabled(struct gendisk *disk) +{ + return disk_max_parts(disk) > 1 && + !(disk->flags & GENHD_FL_NO_PART_SCAN); +} + +static inline dev_t disk_devt(struct gendisk *disk) +{ + return disk_to_dev(disk)->devt; +} + +static inline dev_t part_devt(struct hd_struct *part) +{ + return part_to_dev(part)->devt; +} + +extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); + +static inline void disk_put_part(struct hd_struct *part) +{ + if (likely(part)) + put_device(part_to_dev(part)); +} + +/* + * Smarter partition iterator without context limits. + */ +#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ +#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ +#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ +#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ + +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +extern void disk_part_iter_init(struct disk_part_iter *piter, + struct gendisk *disk, unsigned int flags); +extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); +extern void disk_part_iter_exit(struct disk_part_iter *piter); + +extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, + sector_t sector); + +/* + * Macros to operate on percpu disk statistics: + * + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters + * and should be called between disk_stat_lock() and + * disk_stat_unlock(). + * + * part_stat_read() can be called at any time. + * + * part_stat_{add|set_all}() and {init|free}_part_stats are for + * internal use only. + */ +#ifdef CONFIG_SMP +#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) +#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) + +#define __part_stat_add(cpu, part, field, addnd) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) + +#define part_stat_read(part, field) \ +({ \ + typeof((part)->dkstats->field) res = 0; \ + unsigned int _cpu; \ + for_each_possible_cpu(_cpu) \ + res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ + res; \ +}) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + int i; + + for_each_possible_cpu(i) + memset(per_cpu_ptr(part->dkstats, i), value, + sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + part->dkstats = alloc_percpu(struct disk_stats); + if (!part->dkstats) + return 0; + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ + free_percpu(part->dkstats); +} + +#else /* !CONFIG_SMP */ +#define part_stat_lock() ({ rcu_read_lock(); 0; }) +#define part_stat_unlock() rcu_read_unlock() + +#define __part_stat_add(cpu, part, field, addnd) \ + ((part)->dkstats.field += addnd) + +#define part_stat_read(part, field) ((part)->dkstats.field) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + memset(&part->dkstats, value, sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ +} + +#endif /* CONFIG_SMP */ + +#define part_stat_add(cpu, part, field, addnd) do { \ + __part_stat_add((cpu), (part), field, addnd); \ + if ((part)->partno) \ + __part_stat_add((cpu), &part_to_disk((part))->part0, \ + field, addnd); \ +} while (0) + +#define part_stat_dec(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, -1) +#define part_stat_inc(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, 1) +#define part_stat_sub(cpu, gendiskp, field, subnd) \ + part_stat_add(cpu, gendiskp, field, -subnd) + +static inline void part_inc_in_flight(struct hd_struct *part, int rw) +{ + atomic_inc(&part->in_flight[rw]); + if (part->partno) + atomic_inc(&part_to_disk(part)->part0.in_flight[rw]); +} + +static inline void part_dec_in_flight(struct hd_struct *part, int rw) +{ + atomic_dec(&part->in_flight[rw]); + if (part->partno) + atomic_dec(&part_to_disk(part)->part0.in_flight[rw]); +} + +static inline int part_in_flight(struct hd_struct *part) +{ + return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]); +} + +static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) +{ + if (disk) + return kzalloc_node(sizeof(struct partition_meta_info), + GFP_KERNEL, disk->node_id); + return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); +} + +static inline void free_part_info(struct hd_struct *part) +{ + kfree(part->info); +} + +/* block/blk-core.c */ +extern void part_round_stats(int cpu, struct hd_struct *part); + +/* block/genhd.c */ +extern void add_disk(struct gendisk *disk); +extern void del_gendisk(struct gendisk *gp); +extern struct gendisk *get_gendisk(dev_t dev, int *partno); +extern struct block_device *bdget_disk(struct gendisk *disk, int partno); + +extern void set_device_ro(struct block_device *bdev, int flag); +extern void set_disk_ro(struct gendisk *disk, int flag); + +static inline int get_disk_ro(struct gendisk *disk) +{ + return disk->part0.policy; +} + +extern void disk_block_events(struct gendisk *disk); +extern void disk_unblock_events(struct gendisk *disk); +extern void disk_flush_events(struct gendisk *disk, unsigned int mask); +extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); + +/* drivers/char/random.c */ +extern void add_disk_randomness(struct gendisk *disk); +extern void rand_initialize_disk(struct gendisk *disk); + +static inline sector_t get_start_sect(struct block_device *bdev) +{ + return bdev->bd_part->start_sect; +} +static inline sector_t get_capacity(struct gendisk *disk) +{ + return disk->part0.nr_sects; +} +static inline void set_capacity(struct gendisk *disk, sector_t size) +{ + disk->part0.nr_sects = size; +} + +#ifdef CONFIG_SOLARIS_X86_PARTITION + +#define SOLARIS_X86_NUMSLICE 16 +#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) + +struct solaris_x86_slice { + __le16 s_tag; /* ID tag of partition */ + __le16 s_flag; /* permission flags */ + __le32 s_start; /* start sector no of partition */ + __le32 s_size; /* # of blocks in partition */ +}; + +struct solaris_x86_vtoc { + unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */ + __le32 v_sanity; /* to verify vtoc sanity */ + __le32 v_version; /* layout version */ + char v_volume[8]; /* volume name */ + __le16 v_sectorsz; /* sector size in bytes */ + __le16 v_nparts; /* number of partitions */ + unsigned int v_reserved[10]; /* free space */ + struct solaris_x86_slice + v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */ + unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */ + char v_asciilabel[128]; /* for compatibility */ +}; + +#endif /* CONFIG_SOLARIS_X86_PARTITION */ + +#ifdef CONFIG_BSD_DISKLABEL +/* + * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il> + * updated by Marc Espie <Marc.Espie@openbsd.org> + */ + +/* check against BSD src/sys/sys/disklabel.h for consistency */ + +#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ +#define BSD_MAXPARTITIONS 16 +#define OPENBSD_MAXPARTITIONS 16 +#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ +struct bsd_disklabel { + __le32 d_magic; /* the magic number */ + __s16 d_type; /* drive type */ + __s16 d_subtype; /* controller/d_type specific */ + char d_typename[16]; /* type name, e.g. "eagle" */ + char d_packname[16]; /* pack identifier */ + __u32 d_secsize; /* # of bytes per sector */ + __u32 d_nsectors; /* # of data sectors per track */ + __u32 d_ntracks; /* # of tracks per cylinder */ + __u32 d_ncylinders; /* # of data cylinders per unit */ + __u32 d_secpercyl; /* # of data sectors per cylinder */ + __u32 d_secperunit; /* # of data sectors per unit */ + __u16 d_sparespertrack; /* # of spare sectors per track */ + __u16 d_sparespercyl; /* # of spare sectors per cylinder */ + __u32 d_acylinders; /* # of alt. cylinders per unit */ + __u16 d_rpm; /* rotational speed */ + __u16 d_interleave; /* hardware sector interleave */ + __u16 d_trackskew; /* sector 0 skew, per track */ + __u16 d_cylskew; /* sector 0 skew, per cylinder */ + __u32 d_headswitch; /* head switch time, usec */ + __u32 d_trkseek; /* track-to-track seek, usec */ + __u32 d_flags; /* generic flags */ +#define NDDATA 5 + __u32 d_drivedata[NDDATA]; /* drive-type specific information */ +#define NSPARE 5 + __u32 d_spare[NSPARE]; /* reserved for future use */ + __le32 d_magic2; /* the magic number (again) */ + __le16 d_checksum; /* xor of data incl. partitions */ + + /* filesystem and partition information: */ + __le16 d_npartitions; /* number of partitions in following */ + __le32 d_bbsize; /* size of boot area at sn0, bytes */ + __le32 d_sbsize; /* max size of fs superblock, bytes */ + struct bsd_partition { /* the partition table */ + __le32 p_size; /* number of sectors in partition */ + __le32 p_offset; /* starting sector */ + __le32 p_fsize; /* filesystem basic fragment size */ + __u8 p_fstype; /* filesystem type, see below */ + __u8 p_frag; /* filesystem fragments per block */ + __le16 p_cpg; /* filesystem cylinders per group */ + } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ +}; + +#endif /* CONFIG_BSD_DISKLABEL */ + +#ifdef CONFIG_UNIXWARE_DISKLABEL +/* + * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl> + * and Krzysztof G. Baranowski <kgb@knm.org.pl> + */ + +#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */ +#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */ +#define UNIXWARE_NUMSLICE 16 +#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */ + +struct unixware_slice { + __le16 s_label; /* label */ + __le16 s_flags; /* permission flags */ + __le32 start_sect; /* starting sector */ + __le32 nr_sects; /* number of sectors in slice */ +}; + +struct unixware_disklabel { + __le32 d_type; /* drive type */ + __le32 d_magic; /* the magic number */ + __le32 d_version; /* version number */ + char d_serial[12]; /* serial number of the device */ + __le32 d_ncylinders; /* # of data cylinders per device */ + __le32 d_ntracks; /* # of tracks per cylinder */ + __le32 d_nsectors; /* # of data sectors per track */ + __le32 d_secsize; /* # of bytes per sector */ + __le32 d_part_start; /* # of first sector of this partition */ + __le32 d_unknown1[12]; /* ? */ + __le32 d_alt_tbl; /* byte offset of alternate table */ + __le32 d_alt_len; /* byte length of alternate table */ + __le32 d_phys_cyl; /* # of physical cylinders per device */ + __le32 d_phys_trk; /* # of physical tracks per cylinder */ + __le32 d_phys_sec; /* # of physical sectors per track */ + __le32 d_phys_bytes; /* # of physical bytes per sector */ + __le32 d_unknown2; /* ? */ + __le32 d_unknown3; /* ? */ + __le32 d_pad[8]; /* pad */ + + struct unixware_vtoc { + __le32 v_magic; /* the magic number */ + __le32 v_version; /* version number */ + char v_name[8]; /* volume name */ + __le16 v_nslices; /* # of slices */ + __le16 v_unknown1; /* ? */ + __le32 v_reserved[10]; /* reserved */ + struct unixware_slice + v_slice[UNIXWARE_NUMSLICE]; /* slice headers */ + } vtoc; + +}; /* 408 */ + +#endif /* CONFIG_UNIXWARE_DISKLABEL */ + +#ifdef CONFIG_MINIX_SUBPARTITION +# define MINIX_NR_SUBPARTITIONS 4 +#endif /* CONFIG_MINIX_SUBPARTITION */ + +#define ADDPART_FLAG_NONE 0 +#define ADDPART_FLAG_RAID 1 +#define ADDPART_FLAG_WHOLEDISK 2 + +extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); +extern void blk_free_devt(dev_t devt); +extern dev_t blk_lookup_devt(const char *name, int partno); +extern char *disk_name (struct gendisk *hd, int partno, char *buf); + +extern int disk_expand_part_tbl(struct gendisk *disk, int target); +extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); +extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev); +extern struct hd_struct * __must_check add_partition(struct gendisk *disk, + int partno, sector_t start, + sector_t len, int flags, + struct partition_meta_info + *info); +extern void __delete_partition(struct hd_struct *); +extern void delete_partition(struct gendisk *, int); +extern void printk_all_partitions(void); + +extern struct gendisk *alloc_disk_node(int minors, int node_id); +extern struct gendisk *alloc_disk(int minors); +extern struct kobject *get_disk(struct gendisk *disk); +extern void put_disk(struct gendisk *disk); +extern void blk_register_region(dev_t devt, unsigned long range, + struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), + void *data); +extern void blk_unregister_region(dev_t devt, unsigned long range); + +extern ssize_t part_size_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_stat_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf); +#ifdef CONFIG_FAIL_MAKE_REQUEST +extern ssize_t part_fail_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_fail_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline void hd_ref_init(struct hd_struct *part) +{ + atomic_set(&part->ref, 1); + smp_mb(); +} + +static inline void hd_struct_get(struct hd_struct *part) +{ + atomic_inc(&part->ref); + smp_mb__after_atomic_inc(); +} + +static inline int hd_struct_try_get(struct hd_struct *part) +{ + return atomic_inc_not_zero(&part->ref); +} + +static inline void hd_struct_put(struct hd_struct *part) +{ + if (atomic_dec_and_test(&part->ref)) + __delete_partition(part); +} + +#else /* CONFIG_BLOCK */ + +static inline void printk_all_partitions(void) { } + +static inline dev_t blk_lookup_devt(const char *name, int partno) +{ + dev_t devt = MKDEV(0, 0); + return devt; +} + +#endif /* CONFIG_BLOCK */ + +#endif /* _LINUX_GENHD_H */ diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h new file mode 100644 index 00000000..c7372d7a --- /dev/null +++ b/include/linux/getcpu.h @@ -0,0 +1,18 @@ +#ifndef _LINUX_GETCPU_H +#define _LINUX_GETCPU_H 1 + +/* Cache for getcpu() to speed it up. Results might be a short time + out of date, but will be faster. + + User programs should not refer to the contents of this structure. + I repeat they should not refer to it. If they do they will break + in future kernels. + + It is only a private cache for vgetcpu(). It will change in future kernels. + The user program must store this information per thread (__thread) + If you want 100% accurate information pass NULL instead. */ +struct getcpu_cache { + unsigned long blob[128 / sizeof(long)]; +}; + +#endif diff --git a/include/linux/gfp.h b/include/linux/gfp.h new file mode 100644 index 00000000..581e74b7 --- /dev/null +++ b/include/linux/gfp.h @@ -0,0 +1,394 @@ +#ifndef __LINUX_GFP_H +#define __LINUX_GFP_H + +#include <linux/mmzone.h> +#include <linux/stddef.h> +#include <linux/linkage.h> +#include <linux/topology.h> +#include <linux/mmdebug.h> + +struct vm_area_struct; + +/* Plain integer GFP bitmasks. Do not use this directly. */ +#define ___GFP_DMA 0x01u +#define ___GFP_HIGHMEM 0x02u +#define ___GFP_DMA32 0x04u +#define ___GFP_MOVABLE 0x08u +#define ___GFP_WAIT 0x10u +#define ___GFP_HIGH 0x20u +#define ___GFP_IO 0x40u +#define ___GFP_FS 0x80u +#define ___GFP_COLD 0x100u +#define ___GFP_NOWARN 0x200u +#define ___GFP_REPEAT 0x400u +#define ___GFP_NOFAIL 0x800u +#define ___GFP_NORETRY 0x1000u +#define ___GFP_COMP 0x4000u +#define ___GFP_ZERO 0x8000u +#define ___GFP_NOMEMALLOC 0x10000u +#define ___GFP_HARDWALL 0x20000u +#define ___GFP_THISNODE 0x40000u +#define ___GFP_RECLAIMABLE 0x80000u +#ifdef CONFIG_KMEMCHECK +#define ___GFP_NOTRACK 0x200000u +#else +#define ___GFP_NOTRACK 0 +#endif +#define ___GFP_NO_KSWAPD 0x400000u +#define ___GFP_OTHER_NODE 0x800000u +#define ___GFP_WRITE 0x1000000u + +/* + * GFP bitmasks.. + * + * Zone modifiers (see linux/mmzone.h - low three bits) + * + * Do not put any conditional on these. If necessary modify the definitions + * without the underscores and use them consistently. The definitions here may + * be used in bit comparisons. + */ +#define __GFP_DMA ((__force gfp_t)___GFP_DMA) +#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) +#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) +/* + * Action modifiers - doesn't change the zoning + * + * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt + * _might_ fail. This depends upon the particular VM implementation. + * + * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller + * cannot handle allocation failures. This modifier is deprecated and no new + * users should be added. + * + * __GFP_NORETRY: The VM implementation must not retry indefinitely. + * + * __GFP_MOVABLE: Flag that this page will be movable by the page migration + * mechanism or reclaimed + */ +#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ +#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ + +#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) +#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ + +/* + * This may seem redundant, but it's a way of annotating false positives vs. + * allocations that simply cannot be supported (e.g. page tables). + */ +#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) + +#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + +/* This equals 0, but use constants in case they ever change */ +#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) +/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ +#define GFP_ATOMIC (__GFP_HIGH) +#define GFP_NOIO (__GFP_WAIT) +#define GFP_NOFS (__GFP_WAIT | __GFP_IO) +#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) +#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ + __GFP_RECLAIMABLE) +#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ + __GFP_HIGHMEM) +#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ + __GFP_HARDWALL | __GFP_HIGHMEM | \ + __GFP_MOVABLE) +#define GFP_IOFS (__GFP_IO | __GFP_FS) +#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ + __GFP_NO_KSWAPD) + +#ifdef CONFIG_NUMA +#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) +#else +#define GFP_THISNODE ((__force gfp_t)0) +#endif + +/* This mask makes up all the page movable related flags */ +#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) + +/* Control page allocator reclaim behavior */ +#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ + __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ + __GFP_NORETRY|__GFP_NOMEMALLOC) + +/* Control slab gfp mask during early boot */ +#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) + +/* Control allocation constraints */ +#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) + +/* Do not use these with a slab allocator */ +#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) + +/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some + platforms, used as appropriate on others */ + +#define GFP_DMA __GFP_DMA + +/* 4GB DMA on some platforms */ +#define GFP_DMA32 __GFP_DMA32 + +/* Convert GFP flags to their corresponding migrate type */ +static inline int allocflags_to_migratetype(gfp_t gfp_flags) +{ + WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + + if (unlikely(page_group_by_mobility_disabled)) + return MIGRATE_UNMOVABLE; + + /* Group based on mobility */ + return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | + ((gfp_flags & __GFP_RECLAIMABLE) != 0); +} + +#ifdef CONFIG_HIGHMEM +#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM +#else +#define OPT_ZONE_HIGHMEM ZONE_NORMAL +#endif + +#ifdef CONFIG_ZONE_DMA +#define OPT_ZONE_DMA ZONE_DMA +#else +#define OPT_ZONE_DMA ZONE_NORMAL +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define OPT_ZONE_DMA32 ZONE_DMA32 +#else +#define OPT_ZONE_DMA32 ZONE_NORMAL +#endif + +/* + * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the + * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long + * and there are 16 of them to cover all possible combinations of + * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. + * + * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. + * But GFP_MOVABLE is not only a zone specifier but also an allocation + * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. + * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". + * + * bit result + * ================= + * 0x0 => NORMAL + * 0x1 => DMA or NORMAL + * 0x2 => HIGHMEM or NORMAL + * 0x3 => BAD (DMA+HIGHMEM) + * 0x4 => DMA32 or DMA or NORMAL + * 0x5 => BAD (DMA+DMA32) + * 0x6 => BAD (HIGHMEM+DMA32) + * 0x7 => BAD (HIGHMEM+DMA32+DMA) + * 0x8 => NORMAL (MOVABLE+0) + * 0x9 => DMA or NORMAL (MOVABLE+DMA) + * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) + * 0xb => BAD (MOVABLE+HIGHMEM+DMA) + * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) + * 0xd => BAD (MOVABLE+DMA32+DMA) + * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) + * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) + * + * ZONES_SHIFT must be <= 2 on 32 bit platforms. + */ + +#if 16 * ZONES_SHIFT > BITS_PER_LONG +#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer +#endif + +#define GFP_ZONE_TABLE ( \ + (ZONE_NORMAL << 0 * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ + | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ + | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ + | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ +) + +/* + * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 + * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per + * entry starting with bit 0. Bit is set if the combination is not + * allowed. + */ +#define GFP_ZONE_BAD ( \ + 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ + | 1 << (___GFP_DMA | ___GFP_DMA32) \ + | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ +) + +static inline enum zone_type gfp_zone(gfp_t flags) +{ + enum zone_type z; + int bit = (__force int) (flags & GFP_ZONEMASK); + + z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & + ((1 << ZONES_SHIFT) - 1); + VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); + return z; +} + +/* + * There is only one page-allocator function, and two main namespaces to + * it. The alloc_page*() variants return 'struct page *' and as such + * can allocate highmem pages, the *get*page*() variants return + * virtual kernel addresses to the allocated page(s). + */ + +static inline int gfp_zonelist(gfp_t flags) +{ + if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) + return 1; + + return 0; +} + +/* + * We get the zone list from the current node and the gfp_mask. + * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. + * There are two zonelists per node, one for all zones with memory and + * one containing just zones from the node the zonelist belongs to. + * + * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets + * optimized to &contig_page_data at compile-time. + */ +static inline struct zonelist *node_zonelist(int nid, gfp_t flags) +{ + return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); +} + +#ifndef HAVE_ARCH_FREE_PAGE +static inline void arch_free_page(struct page *page, int order) { } +#endif +#ifndef HAVE_ARCH_ALLOC_PAGE +static inline void arch_alloc_page(struct page *page, int order) { } +#endif + +struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask); + +static inline struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist) +{ + return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); +} + +static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + /* Unknown node is current node */ + if (nid < 0) + nid = numa_node_id(); + + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); +} + +static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); + + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); +} + +#ifdef CONFIG_NUMA +extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); + +static inline struct page * +alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + return alloc_pages_current(gfp_mask, order); +} +extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, + struct vm_area_struct *vma, unsigned long addr, + int node); +#else +#define alloc_pages(gfp_mask, order) \ + alloc_pages_node(numa_node_id(), gfp_mask, order) +#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ + alloc_pages(gfp_mask, order) +#endif +#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) +#define alloc_page_vma(gfp_mask, vma, addr) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) +#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ + alloc_pages_vma(gfp_mask, 0, vma, addr, node) + +extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); +extern unsigned long get_zeroed_page(gfp_t gfp_mask); + +void *alloc_pages_exact(size_t size, gfp_t gfp_mask); +void free_pages_exact(void *virt, size_t size); +/* This is different from alloc_pages_exact_node !!! */ +void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); + +#define __get_free_page(gfp_mask) \ + __get_free_pages((gfp_mask), 0) + +#define __get_dma_pages(gfp_mask, order) \ + __get_free_pages((gfp_mask) | GFP_DMA, (order)) + +extern void __free_pages(struct page *page, unsigned int order); +extern void free_pages(unsigned long addr, unsigned int order); +extern void free_hot_cold_page(struct page *page, int cold); +extern void free_hot_cold_page_list(struct list_head *list, int cold); + +#define __free_page(page) __free_pages((page), 0) +#define free_page(addr) free_pages((addr), 0) + +void page_alloc_init(void); +void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); +void drain_all_pages(void); +void drain_local_pages(void *dummy); + +/* + * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what + * GFP flags are used before interrupts are enabled. Once interrupts are + * enabled, it is set to __GFP_BITS_MASK while the system is running. During + * hibernation, it is used by PM to avoid I/O during memory allocation while + * devices are suspended. + */ +extern gfp_t gfp_allowed_mask; + +extern void pm_restrict_gfp_mask(void); +extern void pm_restore_gfp_mask(void); + +#ifdef CONFIG_PM_SLEEP +extern bool pm_suspended_storage(void); +#else +static inline bool pm_suspended_storage(void) +{ + return false; +} +#endif /* CONFIG_PM_SLEEP */ + +#endif /* __LINUX_GFP_H */ diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h new file mode 100644 index 00000000..fa98bdb0 --- /dev/null +++ b/include/linux/gfs2_ondisk.h @@ -0,0 +1,452 @@ +/* + * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. + * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + */ + +#ifndef __GFS2_ONDISK_DOT_H__ +#define __GFS2_ONDISK_DOT_H__ + +#include <linux/types.h> + +#define GFS2_MAGIC 0x01161970 +#define GFS2_BASIC_BLOCK 512 +#define GFS2_BASIC_BLOCK_SHIFT 9 + +/* Lock numbers of the LM_TYPE_NONDISK type */ + +#define GFS2_MOUNT_LOCK 0 +#define GFS2_LIVE_LOCK 1 +#define GFS2_TRANS_LOCK 2 +#define GFS2_RENAME_LOCK 3 +#define GFS2_CONTROL_LOCK 4 +#define GFS2_MOUNTED_LOCK 5 + +/* Format numbers for various metadata types */ + +#define GFS2_FORMAT_NONE 0 +#define GFS2_FORMAT_SB 100 +#define GFS2_FORMAT_RG 200 +#define GFS2_FORMAT_RB 300 +#define GFS2_FORMAT_DI 400 +#define GFS2_FORMAT_IN 500 +#define GFS2_FORMAT_LF 600 +#define GFS2_FORMAT_JD 700 +#define GFS2_FORMAT_LH 800 +#define GFS2_FORMAT_LD 900 +#define GFS2_FORMAT_LB 1000 +#define GFS2_FORMAT_EA 1600 +#define GFS2_FORMAT_ED 1700 +#define GFS2_FORMAT_QC 1400 +/* These are format numbers for entities contained in files */ +#define GFS2_FORMAT_RI 1100 +#define GFS2_FORMAT_DE 1200 +#define GFS2_FORMAT_QU 1500 +/* These are part of the superblock */ +#define GFS2_FORMAT_FS 1801 +#define GFS2_FORMAT_MULTI 1900 + +/* + * An on-disk inode number + */ + +struct gfs2_inum { + __be64 no_formal_ino; + __be64 no_addr; +}; + +/* + * Generic metadata head structure + * Every inplace buffer logged in the journal must start with this. + */ + +#define GFS2_METATYPE_NONE 0 +#define GFS2_METATYPE_SB 1 +#define GFS2_METATYPE_RG 2 +#define GFS2_METATYPE_RB 3 +#define GFS2_METATYPE_DI 4 +#define GFS2_METATYPE_IN 5 +#define GFS2_METATYPE_LF 6 +#define GFS2_METATYPE_JD 7 +#define GFS2_METATYPE_LH 8 +#define GFS2_METATYPE_LD 9 +#define GFS2_METATYPE_LB 12 +#define GFS2_METATYPE_EA 10 +#define GFS2_METATYPE_ED 11 +#define GFS2_METATYPE_QC 14 + +struct gfs2_meta_header { + __be32 mh_magic; + __be32 mh_type; + __be64 __pad0; /* Was generation number in gfs1 */ + __be32 mh_format; + /* This union is to keep userspace happy */ + union { + __be32 mh_jid; /* Was incarnation number in gfs1 */ + __be32 __pad1; + }; +}; + +/* + * super-block structure + * + * It's probably good if SIZEOF_SB <= GFS2_BASIC_BLOCK (512 bytes) + * + * Order is important, need to be able to read old superblocks to do on-disk + * version upgrades. + */ + +/* Address of superblock in GFS2 basic blocks */ +#define GFS2_SB_ADDR 128 + +/* The lock number for the superblock (must be zero) */ +#define GFS2_SB_LOCK 0 + +/* Requirement: GFS2_LOCKNAME_LEN % 8 == 0 + Includes: the fencing zero at the end */ +#define GFS2_LOCKNAME_LEN 64 + +struct gfs2_sb { + struct gfs2_meta_header sb_header; + + __be32 sb_fs_format; + __be32 sb_multihost_format; + __u32 __pad0; /* Was superblock flags in gfs1 */ + + __be32 sb_bsize; + __be32 sb_bsize_shift; + __u32 __pad1; /* Was journal segment size in gfs1 */ + + struct gfs2_inum sb_master_dir; /* Was jindex dinode in gfs1 */ + struct gfs2_inum __pad2; /* Was rindex dinode in gfs1 */ + struct gfs2_inum sb_root_dir; + + char sb_lockproto[GFS2_LOCKNAME_LEN]; + char sb_locktable[GFS2_LOCKNAME_LEN]; + + struct gfs2_inum __pad3; /* Was quota inode in gfs1 */ + struct gfs2_inum __pad4; /* Was licence inode in gfs1 */ +#define GFS2_HAS_UUID 1 + __u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */ +}; + +/* + * resource index structure + */ + +struct gfs2_rindex { + __be64 ri_addr; /* grp block disk address */ + __be32 ri_length; /* length of rgrp header in fs blocks */ + __u32 __pad; + + __be64 ri_data0; /* first data location */ + __be32 ri_data; /* num of data blocks in rgrp */ + + __be32 ri_bitbytes; /* number of bytes in data bitmaps */ + + __u8 ri_reserved[64]; +}; + +/* + * resource group header structure + */ + +/* Number of blocks per byte in rgrp */ +#define GFS2_NBBY 4 +#define GFS2_BIT_SIZE 2 +#define GFS2_BIT_MASK 0x00000003 + +#define GFS2_BLKST_FREE 0 +#define GFS2_BLKST_USED 1 +#define GFS2_BLKST_UNLINKED 2 +#define GFS2_BLKST_DINODE 3 + +#define GFS2_RGF_JOURNAL 0x00000001 +#define GFS2_RGF_METAONLY 0x00000002 +#define GFS2_RGF_DATAONLY 0x00000004 +#define GFS2_RGF_NOALLOC 0x00000008 +#define GFS2_RGF_TRIMMED 0x00000010 + +struct gfs2_rgrp { + struct gfs2_meta_header rg_header; + + __be32 rg_flags; + __be32 rg_free; + __be32 rg_dinodes; + __be32 __pad; + __be64 rg_igeneration; + + __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */ +}; + +/* + * quota structure + */ + +struct gfs2_quota { + __be64 qu_limit; + __be64 qu_warn; + __be64 qu_value; + __u8 qu_reserved[64]; +}; + +/* + * dinode structure + */ + +#define GFS2_MAX_META_HEIGHT 10 +#define GFS2_DIR_MAX_DEPTH 17 + +#define DT2IF(dt) (((dt) << 12) & S_IFMT) +#define IF2DT(sif) (((sif) & S_IFMT) >> 12) + +enum { + gfs2fl_Jdata = 0, + gfs2fl_ExHash = 1, + gfs2fl_Unused = 2, + gfs2fl_EaIndirect = 3, + gfs2fl_Directio = 4, + gfs2fl_Immutable = 5, + gfs2fl_AppendOnly = 6, + gfs2fl_NoAtime = 7, + gfs2fl_Sync = 8, + gfs2fl_System = 9, + gfs2fl_TruncInProg = 29, + gfs2fl_InheritDirectio = 30, + gfs2fl_InheritJdata = 31, +}; + +/* Dinode flags */ +#define GFS2_DIF_JDATA 0x00000001 +#define GFS2_DIF_EXHASH 0x00000002 +#define GFS2_DIF_UNUSED 0x00000004 /* only in gfs1 */ +#define GFS2_DIF_EA_INDIRECT 0x00000008 +#define GFS2_DIF_DIRECTIO 0x00000010 +#define GFS2_DIF_IMMUTABLE 0x00000020 +#define GFS2_DIF_APPENDONLY 0x00000040 +#define GFS2_DIF_NOATIME 0x00000080 +#define GFS2_DIF_SYNC 0x00000100 +#define GFS2_DIF_SYSTEM 0x00000200 /* New in gfs2 */ +#define GFS2_DIF_TRUNC_IN_PROG 0x20000000 /* New in gfs2 */ +#define GFS2_DIF_INHERIT_DIRECTIO 0x40000000 +#define GFS2_DIF_INHERIT_JDATA 0x80000000 + +struct gfs2_dinode { + struct gfs2_meta_header di_header; + + struct gfs2_inum di_num; + + __be32 di_mode; /* mode of file */ + __be32 di_uid; /* owner's user id */ + __be32 di_gid; /* owner's group id */ + __be32 di_nlink; /* number of links to this file */ + __be64 di_size; /* number of bytes in file */ + __be64 di_blocks; /* number of blocks in file */ + __be64 di_atime; /* time last accessed */ + __be64 di_mtime; /* time last modified */ + __be64 di_ctime; /* time last changed */ + __be32 di_major; /* device major number */ + __be32 di_minor; /* device minor number */ + + /* This section varies from gfs1. Padding added to align with + * remainder of dinode + */ + __be64 di_goal_meta; /* rgrp to alloc from next */ + __be64 di_goal_data; /* data block goal */ + __be64 di_generation; /* generation number for NFS */ + + __be32 di_flags; /* GFS2_DIF_... */ + __be32 di_payload_format; /* GFS2_FORMAT_... */ + __u16 __pad1; /* Was ditype in gfs1 */ + __be16 di_height; /* height of metadata */ + __u32 __pad2; /* Unused incarnation number from gfs1 */ + + /* These only apply to directories */ + __u16 __pad3; /* Padding */ + __be16 di_depth; /* Number of bits in the table */ + __be32 di_entries; /* The number of entries in the directory */ + + struct gfs2_inum __pad4; /* Unused even in current gfs1 */ + + __be64 di_eattr; /* extended attribute block number */ + __be32 di_atime_nsec; /* nsec portion of atime */ + __be32 di_mtime_nsec; /* nsec portion of mtime */ + __be32 di_ctime_nsec; /* nsec portion of ctime */ + + __u8 di_reserved[44]; +}; + +/* + * directory structure - many of these per directory file + */ + +#define GFS2_FNAMESIZE 255 +#define GFS2_DIRENT_SIZE(name_len) ((sizeof(struct gfs2_dirent) + (name_len) + 7) & ~7) + +struct gfs2_dirent { + struct gfs2_inum de_inum; + __be32 de_hash; + __be16 de_rec_len; + __be16 de_name_len; + __be16 de_type; + __u8 __pad[14]; +}; + +/* + * Header of leaf directory nodes + */ + +struct gfs2_leaf { + struct gfs2_meta_header lf_header; + + __be16 lf_depth; /* Depth of leaf */ + __be16 lf_entries; /* Number of dirents in leaf */ + __be32 lf_dirent_format; /* Format of the dirents */ + __be64 lf_next; /* Next leaf, if overflow */ + + __u8 lf_reserved[64]; +}; + +/* + * Extended attribute header format + * + * This works in a similar way to dirents. There is a fixed size header + * followed by a variable length section made up of the name and the + * associated data. In the case of a "stuffed" entry, the value is + * inline directly after the name, the ea_num_ptrs entry will be + * zero in that case. For non-"stuffed" entries, there will be + * a set of pointers (aligned to 8 byte boundary) to the block(s) + * containing the value. + * + * The blocks containing the values and the blocks containing the + * extended attribute headers themselves all start with the common + * metadata header. Each inode, if it has extended attributes, will + * have either a single block containing the extended attribute headers + * or a single indirect block pointing to blocks containing the + * extended attribure headers. + * + * The maximim size of the data part of an extended attribute is 64k + * so the number of blocks required depends upon block size. Since the + * block size also determines the number of pointers in an indirect + * block, its a fairly complicated calculation to work out the maximum + * number of blocks that an inode may have relating to extended attributes. + * + */ + +#define GFS2_EA_MAX_NAME_LEN 255 +#define GFS2_EA_MAX_DATA_LEN 65536 + +#define GFS2_EATYPE_UNUSED 0 +#define GFS2_EATYPE_USR 1 +#define GFS2_EATYPE_SYS 2 +#define GFS2_EATYPE_SECURITY 3 + +#define GFS2_EATYPE_LAST 3 +#define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST) + +#define GFS2_EAFLAG_LAST 0x01 /* last ea in block */ + +struct gfs2_ea_header { + __be32 ea_rec_len; + __be32 ea_data_len; + __u8 ea_name_len; /* no NULL pointer after the string */ + __u8 ea_type; /* GFS2_EATYPE_... */ + __u8 ea_flags; /* GFS2_EAFLAG_... */ + __u8 ea_num_ptrs; + __u32 __pad; +}; + +/* + * Log header structure + */ + +#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */ + +struct gfs2_log_header { + struct gfs2_meta_header lh_header; + + __be64 lh_sequence; /* Sequence number of this transaction */ + __be32 lh_flags; /* GFS2_LOG_HEAD_... */ + __be32 lh_tail; /* Block number of log tail */ + __be32 lh_blkno; + __be32 lh_hash; +}; + +/* + * Log type descriptor + */ + +#define GFS2_LOG_DESC_METADATA 300 +/* ld_data1 is the number of metadata blocks in the descriptor. + ld_data2 is unused. */ + +#define GFS2_LOG_DESC_REVOKE 301 +/* ld_data1 is the number of revoke blocks in the descriptor. + ld_data2 is unused. */ + +#define GFS2_LOG_DESC_JDATA 302 +/* ld_data1 is the number of data blocks in the descriptor. + ld_data2 is unused. */ + +struct gfs2_log_descriptor { + struct gfs2_meta_header ld_header; + + __be32 ld_type; /* GFS2_LOG_DESC_... */ + __be32 ld_length; /* Number of buffers in this chunk */ + __be32 ld_data1; /* descriptor-specific field */ + __be32 ld_data2; /* descriptor-specific field */ + + __u8 ld_reserved[32]; +}; + +/* + * Inum Range + * Describe a range of formal inode numbers allocated to + * one machine to assign to inodes. + */ + +#define GFS2_INUM_QUANTUM 1048576 + +struct gfs2_inum_range { + __be64 ir_start; + __be64 ir_length; +}; + +/* + * Statfs change + * Describes an change to the pool of free and allocated + * blocks. + */ + +struct gfs2_statfs_change { + __be64 sc_total; + __be64 sc_free; + __be64 sc_dinodes; +}; + +/* + * Quota change + * Describes an allocation change for a particular + * user or group. + */ + +#define GFS2_QCF_USER 0x00000001 + +struct gfs2_quota_change { + __be64 qc_change; + __be32 qc_flags; /* GFS2_QCF_... */ + __be32 qc_id; +}; + +struct gfs2_quota_lvb { + __be32 qb_magic; + __u32 __pad; + __be64 qb_limit; /* Hard limit of # blocks to alloc */ + __be64 qb_warn; /* Warn user when alloc is above this # */ + __be64 qb_value; /* Current # blocks allocated */ +}; + +#endif /* __GFS2_ONDISK_DOT_H__ */ diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h new file mode 100644 index 00000000..258ba829 --- /dev/null +++ b/include/linux/gigaset_dev.h @@ -0,0 +1,38 @@ +/* + * interface to user space for the gigaset driver + * + * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de> + * + * ===================================================================== + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * ===================================================================== + */ + +#ifndef GIGASET_INTERFACE_H +#define GIGASET_INTERFACE_H + +#include <linux/ioctl.h> + +/* The magic IOCTL value for this interface. */ +#define GIGASET_IOCTL 0x47 + +/* enable/disable device control via character device (lock out ISDN subsys) */ +#define GIGASET_REDIR _IOWR(GIGASET_IOCTL, 0, int) + +/* enable adapter configuration mode (M10x only) */ +#define GIGASET_CONFIG _IOWR(GIGASET_IOCTL, 1, int) + +/* set break characters (M105 only) */ +#define GIGASET_BRKCHARS _IOW(GIGASET_IOCTL, 2, unsigned char[6]) + +/* get version information selected by arg[0] */ +#define GIGASET_VERSION _IOWR(GIGASET_IOCTL, 3, unsigned[4]) +/* values for GIGASET_VERSION arg[0] */ +#define GIGVER_DRIVER 0 /* get driver version */ +#define GIGVER_COMPAT 1 /* get interface compatibility version */ +#define GIGVER_FWBASE 2 /* get base station firmware version */ + +#endif diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h new file mode 100644 index 00000000..09665916 --- /dev/null +++ b/include/linux/gpio-fan.h @@ -0,0 +1,36 @@ +/* + * include/linux/gpio-fan.h + * + * Platform data structure for GPIO fan driver + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_GPIO_FAN_H +#define __LINUX_GPIO_FAN_H + +struct gpio_fan_alarm { + unsigned gpio; + unsigned active_low; +}; + +struct gpio_fan_speed { + int rpm; + int ctrl_val; +}; + +struct gpio_fan_platform_data { + int num_ctrl; + unsigned *ctrl; /* fan control GPIOs. */ + struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */ + /* + * Speed conversion array: rpm from/to GPIO bit field. + * This array _must_ be sorted in ascending rpm order. + */ + int num_speed; + struct gpio_fan_speed *speed; +}; + +#endif /* __LINUX_GPIO_FAN_H */ diff --git a/include/linux/gpio-i2cmux.h b/include/linux/gpio-i2cmux.h new file mode 100644 index 00000000..4a333bb0 --- /dev/null +++ b/include/linux/gpio-i2cmux.h @@ -0,0 +1,38 @@ +/* + * gpio-i2cmux interface to platform code + * + * Peter Korsgaard <peter.korsgaard@barco.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_GPIO_I2CMUX_H +#define _LINUX_GPIO_I2CMUX_H + +/* MUX has no specific idle mode */ +#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1) + +/** + * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux + * @parent: Parent I2C bus adapter number + * @base_nr: Base I2C bus number to number adapters from or zero for dynamic + * @values: Array of bitmasks of GPIO settings (low/high) for each + * position + * @n_values: Number of multiplexer positions (busses to instantiate) + * @gpios: Array of GPIO numbers used to control MUX + * @n_gpios: Number of GPIOs used to control MUX + * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used + */ +struct gpio_i2cmux_platform_data { + int parent; + int base_nr; + const unsigned *values; + int n_values; + const unsigned *gpios; + int n_gpios; + unsigned idle; +}; + +#endif /* _LINUX_GPIO_I2CMUX_H */ diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h new file mode 100644 index 00000000..d755b28b --- /dev/null +++ b/include/linux/gpio-pxa.h @@ -0,0 +1,20 @@ +#ifndef __GPIO_PXA_H +#define __GPIO_PXA_H + +#define GPIO_bit(x) (1 << ((x) & 0x1f)) + +#define gpio_to_bank(gpio) ((gpio) >> 5) + +/* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85). + * Those cases currently cause holes in the GPIO number space, the + * actual number of the last GPIO is recorded by 'pxa_last_gpio'. + */ +extern int pxa_last_gpio; + +extern int pxa_irq_to_gpio(int irq); + +struct pxa_gpio_platform_data { + int (*gpio_set_wake)(unsigned int gpio, unsigned int on); +}; + +#endif /* __GPIO_PXA_H */ diff --git a/include/linux/gpio.h b/include/linux/gpio.h new file mode 100644 index 00000000..6155ecf1 --- /dev/null +++ b/include/linux/gpio.h @@ -0,0 +1,177 @@ +#ifndef __LINUX_GPIO_H +#define __LINUX_GPIO_H + +/* see Documentation/gpio.txt */ + +/* make these flag values available regardless of GPIO kconfig options */ +#define GPIOF_DIR_OUT (0 << 0) +#define GPIOF_DIR_IN (1 << 0) + +#define GPIOF_INIT_LOW (0 << 1) +#define GPIOF_INIT_HIGH (1 << 1) + +#define GPIOF_IN (GPIOF_DIR_IN) +#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) +#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) + +/* Gpio pin is open drain */ +#define GPIOF_OPEN_DRAIN (1 << 2) + +/* Gpio pin is open source */ +#define GPIOF_OPEN_SOURCE (1 << 3) + +/** + * struct gpio - a structure describing a GPIO with configuration + * @gpio: the GPIO number + * @flags: GPIO configuration as specified by GPIOF_* + * @label: a literal description string of this GPIO + */ +struct gpio { + unsigned gpio; + unsigned long flags; + const char *label; +}; + +#ifdef CONFIG_GENERIC_GPIO +#include <asm/gpio.h> + +#else + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/bug.h> + +struct device; +struct gpio_chip; + +static inline bool gpio_is_valid(int number) +{ + return false; +} + +static inline int gpio_request(unsigned gpio, const char *label) +{ + return -ENOSYS; +} + +static inline int gpio_request_one(unsigned gpio, + unsigned long flags, const char *label) +{ + return -ENOSYS; +} + +static inline int gpio_request_array(const struct gpio *array, size_t num) +{ + return -ENOSYS; +} + +static inline void gpio_free(unsigned gpio) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline void gpio_free_array(const struct gpio *array, size_t num) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline int gpio_direction_input(unsigned gpio) +{ + return -ENOSYS; +} + +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return -ENOSYS; +} + +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return -ENOSYS; +} + +static inline int gpio_get_value(unsigned gpio) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return 0; +} + +static inline void gpio_set_value(unsigned gpio, int value) +{ + /* GPIO can never have been requested or set as output */ + WARN_ON(1); +} + +static inline int gpio_cansleep(unsigned gpio) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return 0; +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + /* GPIO can never have been requested or set as output */ + WARN_ON(1); +} + +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return -EINVAL; +} + +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); + return -EINVAL; +} + +static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpio_unexport(unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); +} + +static inline int gpio_to_irq(unsigned gpio) +{ + /* GPIO can never have been requested or set as input */ + WARN_ON(1); + return -EINVAL; +} + +static inline int irq_to_gpio(unsigned irq) +{ + /* irq can never have been returned from gpio_to_irq() */ + WARN_ON(1); + return -EINVAL; +} + +#endif + +#endif /* __LINUX_GPIO_H */ diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h new file mode 100644 index 00000000..2613fc5e --- /dev/null +++ b/include/linux/gpio_event.h @@ -0,0 +1,170 @@ +/* include/linux/gpio_event.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_GPIO_EVENT_H +#define _LINUX_GPIO_EVENT_H + +#include <linux/input.h> + +struct gpio_event_input_devs { + int count; + struct input_dev *dev[]; +}; +enum { + GPIO_EVENT_FUNC_UNINIT = 0x0, + GPIO_EVENT_FUNC_INIT = 0x1, + GPIO_EVENT_FUNC_SUSPEND = 0x2, + GPIO_EVENT_FUNC_RESUME = 0x3, +}; +struct gpio_event_info { + int (*func)(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, + void **data, int func); + int (*event)(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, + void **data, unsigned int dev, unsigned int type, + unsigned int code, int value); /* out events */ + bool no_suspend; +}; + +struct gpio_event_platform_data { + const char *name; + struct gpio_event_info **info; + size_t info_count; + int (*power)(const struct gpio_event_platform_data *pdata, bool on); + const char *names[]; /* If name is NULL, names contain a NULL */ + /* terminated list of input devices to create */ +}; + +#define GPIO_EVENT_DEV_NAME "gpio-event" + +/* Key matrix */ + +enum gpio_event_matrix_flags { + /* unset: drive active output low, set: drive active output high */ + GPIOKPF_ACTIVE_HIGH = 1U << 0, + GPIOKPF_DEBOUNCE = 1U << 1, + GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2, + GPIOKPF_REMOVE_PHANTOM_KEYS = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS | + GPIOKPF_DEBOUNCE, + GPIOKPF_DRIVE_INACTIVE = 1U << 3, + GPIOKPF_LEVEL_TRIGGERED_IRQ = 1U << 4, + GPIOKPF_PRINT_UNMAPPED_KEYS = 1U << 16, + GPIOKPF_PRINT_MAPPED_KEYS = 1U << 17, + GPIOKPF_PRINT_PHANTOM_KEYS = 1U << 18, +}; + +#define MATRIX_CODE_BITS (10) +#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1) +#define MATRIX_KEY(dev, code) \ + (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK)) + +extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +struct gpio_event_matrix_info { + /* initialize to gpio_event_matrix_func */ + struct gpio_event_info info; + /* size must be ninputs * noutputs */ + const unsigned short *keymap; + unsigned int *input_gpios; + unsigned int *output_gpios; + unsigned int ninputs; + unsigned int noutputs; + /* time to wait before reading inputs after driving each output */ + ktime_t settle_time; + /* time to wait before scanning the keypad a second time */ + ktime_t debounce_delay; + ktime_t poll_time; + unsigned flags; +}; + +/* Directly connected inputs and outputs */ + +enum gpio_event_direct_flags { + GPIOEDF_ACTIVE_HIGH = 1U << 0, +/* GPIOEDF_USE_DOWN_IRQ = 1U << 1, */ +/* GPIOEDF_USE_IRQ = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */ + GPIOEDF_PRINT_KEYS = 1U << 8, + GPIOEDF_PRINT_KEY_DEBOUNCE = 1U << 9, + GPIOEDF_PRINT_KEY_UNSTABLE = 1U << 10, +}; + +struct gpio_event_direct_entry { + uint32_t gpio:16; + uint32_t code:10; + uint32_t dev:6; +}; + +/* inputs */ +extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +struct gpio_event_input_info { + /* initialize to gpio_event_input_func */ + struct gpio_event_info info; + ktime_t debounce_time; + ktime_t poll_time; + uint16_t flags; + uint16_t type; + const struct gpio_event_direct_entry *keymap; + size_t keymap_size; +}; + +/* outputs */ +extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, + unsigned int dev, unsigned int type, + unsigned int code, int value); +struct gpio_event_output_info { + /* initialize to gpio_event_output_func and gpio_event_output_event */ + struct gpio_event_info info; + uint16_t flags; + uint16_t type; + const struct gpio_event_direct_entry *keymap; + size_t keymap_size; +}; + + +/* axes */ + +enum gpio_event_axis_flags { + GPIOEAF_PRINT_UNKNOWN_DIRECTION = 1U << 16, + GPIOEAF_PRINT_RAW = 1U << 17, + GPIOEAF_PRINT_EVENT = 1U << 18, +}; + +extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func); +struct gpio_event_axis_info { + /* initialize to gpio_event_axis_func */ + struct gpio_event_info info; + uint8_t count; /* number of gpios for this axis */ + uint8_t dev; /* device index when using multiple input devices */ + uint8_t type; /* EV_REL or EV_ABS */ + uint16_t code; + uint16_t decoded_size; + uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in); + uint32_t *gpio; + uint32_t flags; +}; +#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map +#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map +uint16_t gpio_axis_4bit_gray_map( + struct gpio_event_axis_info *info, uint16_t in); +uint16_t gpio_axis_5bit_singletrack_map( + struct gpio_event_axis_info *info, uint16_t in); + +#endif diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h new file mode 100644 index 00000000..a7e977ff --- /dev/null +++ b/include/linux/gpio_keys.h @@ -0,0 +1,31 @@ +#ifndef _GPIO_KEYS_H +#define _GPIO_KEYS_H + +struct device; + +struct gpio_keys_button { + /* Configuration parameters */ + unsigned int code; /* input event code (KEY_*, SW_*) */ + int gpio; /* -1 if this key does not support gpio */ + int active_low; + const char *desc; + unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */ + int wakeup; /* configure the button as a wake-up source */ + int debounce_interval; /* debounce ticks interval in msecs */ + bool can_disable; + int value; /* axis value for EV_ABS */ + unsigned int irq; /* Irq number in case of interrupt keys */ +}; + +struct gpio_keys_platform_data { + struct gpio_keys_button *buttons; + int nbuttons; + unsigned int poll_interval; /* polling interval in msecs - + for polling driver only */ + unsigned int rep:1; /* enable input subsystem auto repeat */ + int (*enable)(struct device *dev); + void (*disable)(struct device *dev); + const char *name; /* input device name */ +}; + +#endif diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h new file mode 100644 index 00000000..44ed7aa1 --- /dev/null +++ b/include/linux/gpio_mouse.h @@ -0,0 +1,61 @@ +/* + * Driver for simulating a mouse on GPIO lines. + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _GPIO_MOUSE_H +#define _GPIO_MOUSE_H + +#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00 +#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01 + +#define GPIO_MOUSE_PIN_UP 0 +#define GPIO_MOUSE_PIN_DOWN 1 +#define GPIO_MOUSE_PIN_LEFT 2 +#define GPIO_MOUSE_PIN_RIGHT 3 +#define GPIO_MOUSE_PIN_BLEFT 4 +#define GPIO_MOUSE_PIN_BMIDDLE 5 +#define GPIO_MOUSE_PIN_BRIGHT 6 +#define GPIO_MOUSE_PIN_MAX 7 + +/** + * struct gpio_mouse_platform_data + * @scan_ms: integer in ms specifying the scan periode. + * @polarity: Pin polarity, active high or low. + * @up: GPIO line for up value. + * @down: GPIO line for down value. + * @left: GPIO line for left value. + * @right: GPIO line for right value. + * @bleft: GPIO line for left button. + * @bmiddle: GPIO line for middle button. + * @bright: GPIO line for right button. + * + * This struct must be added to the platform_device in the board code. + * It is used by the gpio_mouse driver to setup GPIO lines and to + * calculate mouse movement. + */ +struct gpio_mouse_platform_data { + int scan_ms; + int polarity; + + union { + struct { + int up; + int down; + int left; + int right; + + int bleft; + int bmiddle; + int bright; + }; + int pins[GPIO_MOUSE_PIN_MAX]; + }; +}; + +#endif /* _GPIO_MOUSE_H */ diff --git a/include/linux/gsmmux.h b/include/linux/gsmmux.h new file mode 100644 index 00000000..c25e9477 --- /dev/null +++ b/include/linux/gsmmux.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_GSMMUX_H +#define _LINUX_GSMMUX_H + +struct gsm_config +{ + unsigned int adaption; + unsigned int encapsulation; + unsigned int initiator; + unsigned int t1; + unsigned int t2; + unsigned int t3; + unsigned int n2; + unsigned int mru; + unsigned int mtu; + unsigned int k; + unsigned int i; + unsigned int unused[8]; /* Padding for expansion without + breaking stuff */ +}; + +#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config) +#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config) + +struct gsm_netconfig { + unsigned int adaption; /* Adaption to use in network mode */ + unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */ + unsigned short unused2; + char if_name[IFNAMSIZ]; /* interface name format string */ + __u8 unused[28]; /* For future use */ +}; + +#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig) +#define GSMIOC_DISABLE_NET _IO('G', 3) + + +#endif diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h new file mode 100644 index 00000000..bb7f3097 --- /dev/null +++ b/include/linux/hardirq.h @@ -0,0 +1,210 @@ +#ifndef LINUX_HARDIRQ_H +#define LINUX_HARDIRQ_H + +#include <linux/preempt.h> +#include <linux/lockdep.h> +#include <linux/ftrace_irq.h> +#include <asm/hardirq.h> + +/* + * We put the hardirq and softirq counter into the preemption + * counter. The bitmask has the following meaning: + * + * - bits 0-7 are the preemption count (max preemption depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * + * The hardirq count can in theory reach the same as NR_IRQS. + * In reality, the number of nested IRQS is limited to the stack + * size as well. For archs with over 1000 IRQS it is not practical + * to expect that they will all nest. We give a max of 10 bits for + * hardirq nesting. An arch may choose to give less than 10 bits. + * m68k expects it to be 8. + * + * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) + * - bit 26 is the NMI_MASK + * - bit 28 is the PREEMPT_ACTIVE flag + * + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0x03ff0000 + * NMI_MASK: 0x04000000 + */ +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 +#define NMI_BITS 1 + +#define MAX_HARDIRQ_BITS 10 + +#ifndef HARDIRQ_BITS +# define HARDIRQ_BITS MAX_HARDIRQ_BITS +#endif + +#if HARDIRQ_BITS > MAX_HARDIRQ_BITS +#error HARDIRQ_BITS too high! +#endif + +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) + +#define __IRQ_MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) +#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#define NMI_OFFSET (1UL << NMI_SHIFT) + +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) + +#ifndef PREEMPT_ACTIVE +#define PREEMPT_ACTIVE_BITS 1 +#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) +#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) +#endif + +#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) +#error PREEMPT_ACTIVE is too low! +#endif + +#define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) + +/* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? + * in_softirq - Are we currently processing softirq or have bh disabled? + * in_serving_softirq - Are we currently processing softirq? + */ +#define in_irq() (hardirq_count()) +#define in_softirq() (softirq_count()) +#define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + +/* + * Are we in NMI context? + */ +#define in_nmi() (preempt_count() & NMI_MASK) + +#if defined(CONFIG_PREEMPT_COUNT) +# define PREEMPT_CHECK_OFFSET 1 +#else +# define PREEMPT_CHECK_OFFSET 0 +#endif + +/* + * Are we running in atomic context? WARNING: this macro cannot + * always detect atomic context; in particular, it cannot know about + * held spinlocks in non-preemptible kernels. Thus it should not be + * used in the general case to determine whether sleeping is possible. + * Do not use in_atomic() in driver code. + */ +#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) + +/* + * Check whether we were atomic before we did preempt_disable(): + * (used by the scheduler, *after* releasing the kernel lock) + */ +#define in_atomic_preempt_off() \ + ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) + +#ifdef CONFIG_PREEMPT_COUNT +# define preemptible() (preempt_count() == 0 && !irqs_disabled()) +# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) +#else +# define preemptible() 0 +# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET +#endif + +#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) +extern void synchronize_irq(unsigned int irq); +#else +# define synchronize_irq(irq) barrier() +#endif + +struct task_struct; + +#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) +static inline void account_system_vtime(struct task_struct *tsk) +{ +} +#else +extern void account_system_vtime(struct task_struct *tsk); +#endif + +#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) + +static inline void rcu_nmi_enter(void) +{ +} + +static inline void rcu_nmi_exit(void) +{ +} + +#else +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +#endif + +/* + * It is safe to do non-atomic ops on ->hardirq_context, + * because NMI handlers may not preempt and the ops are + * always balanced, so the interrupted value of ->hardirq_context + * will always be restored. + */ +#define __irq_enter() \ + do { \ + account_system_vtime(current); \ + add_preempt_count(HARDIRQ_OFFSET); \ + trace_hardirq_enter(); \ + } while (0) + +/* + * Enter irq context (on NO_HZ, update jiffies): + */ +extern void irq_enter(void); + +/* + * Exit irq context without processing softirqs: + */ +#define __irq_exit() \ + do { \ + trace_hardirq_exit(); \ + account_system_vtime(current); \ + sub_preempt_count(HARDIRQ_OFFSET); \ + } while (0) + +/* + * Exit irq context and process softirqs if needed: + */ +extern void irq_exit(void); + +#define nmi_enter() \ + do { \ + ftrace_nmi_enter(); \ + BUG_ON(in_nmi()); \ + add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + lockdep_off(); \ + rcu_nmi_enter(); \ + trace_hardirq_enter(); \ + } while (0) + +#define nmi_exit() \ + do { \ + trace_hardirq_exit(); \ + rcu_nmi_exit(); \ + lockdep_on(); \ + BUG_ON(!in_nmi()); \ + sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ + } while (0) + +#endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hash.h b/include/linux/hash.h new file mode 100644 index 00000000..b80506bd --- /dev/null +++ b/include/linux/hash.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_HASH_H +#define _LINUX_HASH_H +/* Fast hashing routine for ints, longs and pointers. + (C) 2002 William Lee Irwin III, IBM */ + +/* + * Knuth recommends primes in approximately golden ratio to the maximum + * integer representable by a machine word for multiplicative hashing. + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf + * + * These primes are chosen to be bit-sparse, that is operations on + * them can use shifts and additions instead of multiplications for + * machines where multiplications are slow. + */ + +#include <asm/types.h> + +/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ +#define GOLDEN_RATIO_PRIME_32 0x9e370001UL +/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ +#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL + +#if BITS_PER_LONG == 32 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 +#define hash_long(val, bits) hash_32(val, bits) +#elif BITS_PER_LONG == 64 +#define hash_long(val, bits) hash_64(val, bits) +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 +#else +#error Wordsize not 32 or 64 +#endif + +static inline u64 hash_64(u64 val, unsigned int bits) +{ + u64 hash = val; + + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ + u64 n = hash; + n <<= 18; + hash -= n; + n <<= 33; + hash -= n; + n <<= 3; + hash += n; + n <<= 3; + hash -= n; + n <<= 4; + hash += n; + n <<= 2; + hash += n; + + /* High bits are more random, so use them. */ + return hash >> (64 - bits); +} + +static inline u32 hash_32(u32 val, unsigned int bits) +{ + /* On some cpus multiply is faster, on others gcc will do shifts */ + u32 hash = val * GOLDEN_RATIO_PRIME_32; + + /* High bits are more random, so use them. */ + return hash >> (32 - bits); +} + +static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) +{ + return hash_long((unsigned long)ptr, bits); +} +#endif /* _LINUX_HASH_H */ diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h new file mode 100644 index 00000000..ee275c8b --- /dev/null +++ b/include/linux/hdlc.h @@ -0,0 +1,131 @@ +/* + * Generic HDLC support routines for Linux + * + * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef __HDLC_H +#define __HDLC_H + + +#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ +#if 0 +#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ +#else +#define HDLC_MAX_MRU 1600 /* as required for FR network */ +#endif + + +#ifdef __KERNEL__ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/hdlc/ioctl.h> + +/* This structure is a private property of HDLC protocols. + Hardware drivers have no interest here */ + +struct hdlc_proto { + int (*open)(struct net_device *dev); + void (*close)(struct net_device *dev); + void (*start)(struct net_device *dev); /* if open & DCD */ + void (*stop)(struct net_device *dev); /* if open & !DCD */ + void (*detach)(struct net_device *dev); + int (*ioctl)(struct net_device *dev, struct ifreq *ifr); + __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); + int (*netif_rx)(struct sk_buff *skb); + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); + struct module *module; + struct hdlc_proto *next; /* next protocol in the list */ +}; + + +/* Pointed to by netdev_priv(dev) */ +typedef struct hdlc_device { + /* used by HDLC layer to take control over HDLC device from hw driver*/ + int (*attach)(struct net_device *dev, + unsigned short encoding, unsigned short parity); + + /* hardware driver must handle this instead of dev->hard_start_xmit */ + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); + + /* Things below are for HDLC layer internal use only */ + const struct hdlc_proto *proto; + int carrier; + int open; + spinlock_t state_lock; + void *state; + void *priv; +} hdlc_device; + + + +/* Exported from hdlc module */ + +/* Called by hardware driver when a user requests HDLC service */ +int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); + +/* Must be used by hardware driver on module startup/exit */ +#define register_hdlc_device(dev) register_netdev(dev) +void unregister_hdlc_device(struct net_device *dev); + + +void register_hdlc_protocol(struct hdlc_proto *proto); +void unregister_hdlc_protocol(struct hdlc_proto *proto); + +struct net_device *alloc_hdlcdev(void *priv); + +static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev) +{ + return netdev_priv(dev); +} + +static __inline__ void debug_frame(const struct sk_buff *skb) +{ + int i; + + for (i=0; i < skb->len; i++) { + if (i == 100) { + printk("...\n"); + return; + } + printk(" %02X", skb->data[i]); + } + printk("\n"); +} + + +/* Must be called by hardware driver when HDLC device is being opened */ +int hdlc_open(struct net_device *dev); +/* Must be called by hardware driver when HDLC device is being closed */ +void hdlc_close(struct net_device *dev); +/* May be used by hardware driver */ +int hdlc_change_mtu(struct net_device *dev, int new_mtu); +/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ +netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); + +int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, + size_t size); +/* May be used by hardware driver to gain control over HDLC device */ +void detach_hdlc_protocol(struct net_device *dev); + +static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + + skb->dev = dev; + skb_reset_mac_header(skb); + + if (hdlc->proto->type_trans) + return hdlc->proto->type_trans(skb, dev); + else + return htons(ETH_P_HDLC); +} + +#endif /* __KERNEL */ +#endif /* __HDLC_H */ diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild new file mode 100644 index 00000000..1fb26448 --- /dev/null +++ b/include/linux/hdlc/Kbuild @@ -0,0 +1 @@ +header-y += ioctl.h diff --git a/include/linux/hdlc/ioctl.h b/include/linux/hdlc/ioctl.h new file mode 100644 index 00000000..58397236 --- /dev/null +++ b/include/linux/hdlc/ioctl.h @@ -0,0 +1,81 @@ +#ifndef __HDLC_IOCTL_H__ +#define __HDLC_IOCTL_H__ + + +#define GENERIC_HDLC_VERSION 4 /* For synchronization with sethdlc utility */ + +#define CLOCK_DEFAULT 0 /* Default setting */ +#define CLOCK_EXT 1 /* External TX and RX clock - DTE */ +#define CLOCK_INT 2 /* Internal TX and RX clock - DCE */ +#define CLOCK_TXINT 3 /* Internal TX and external RX clock */ +#define CLOCK_TXFROMRX 4 /* TX clock derived from external RX clock */ + + +#define ENCODING_DEFAULT 0 /* Default setting */ +#define ENCODING_NRZ 1 +#define ENCODING_NRZI 2 +#define ENCODING_FM_MARK 3 +#define ENCODING_FM_SPACE 4 +#define ENCODING_MANCHESTER 5 + + +#define PARITY_DEFAULT 0 /* Default setting */ +#define PARITY_NONE 1 /* No parity */ +#define PARITY_CRC16_PR0 2 /* CRC16, initial value 0x0000 */ +#define PARITY_CRC16_PR1 3 /* CRC16, initial value 0xFFFF */ +#define PARITY_CRC16_PR0_CCITT 4 /* CRC16, initial 0x0000, ITU-T version */ +#define PARITY_CRC16_PR1_CCITT 5 /* CRC16, initial 0xFFFF, ITU-T version */ +#define PARITY_CRC32_PR0_CCITT 6 /* CRC32, initial value 0x00000000 */ +#define PARITY_CRC32_PR1_CCITT 7 /* CRC32, initial value 0xFFFFFFFF */ + +#define LMI_DEFAULT 0 /* Default setting */ +#define LMI_NONE 1 /* No LMI, all PVCs are static */ +#define LMI_ANSI 2 /* ANSI Annex D */ +#define LMI_CCITT 3 /* ITU-T Annex A */ +#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */ + +typedef struct { + unsigned int clock_rate; /* bits per second */ + unsigned int clock_type; /* internal, external, TX-internal etc. */ + unsigned short loopback; +} sync_serial_settings; /* V.35, V.24, X.21 */ + +typedef struct { + unsigned int clock_rate; /* bits per second */ + unsigned int clock_type; /* internal, external, TX-internal etc. */ + unsigned short loopback; + unsigned int slot_map; +} te1_settings; /* T1, E1 */ + +typedef struct { + unsigned short encoding; + unsigned short parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + unsigned short lmi; + unsigned short dce; /* 1 for DCE (network side) operation */ +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; /* for creating/deleting FR PVCs */ + +typedef struct { + unsigned int dlci; + char master[IFNAMSIZ]; /* Name of master FRAD device */ +}fr_proto_pvc_info; /* for returning PVC information only */ + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +/* PPP doesn't need any info now - supply length = 0 to ioctl */ + +#endif /* __HDLC_IOCTL_H__ */ diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h new file mode 100644 index 00000000..c010b4a7 --- /dev/null +++ b/include/linux/hdlcdrv.h @@ -0,0 +1,377 @@ +/* + * hdlcdrv.h -- HDLC packet radio network driver. + * The Linux soundcard driver for 1200 baud and 9600 baud packet radio + * (C) 1996-1998 by Thomas Sailer, HB9JNX/AE4WA + */ + +#ifndef _HDLCDRV_H +#define _HDLCDRV_H + +/* -------------------------------------------------------------------- */ +/* + * structs for the IOCTL commands + */ + +struct hdlcdrv_params { + int iobase; + int irq; + int dma; + int dma2; + int seriobase; + int pariobase; + int midiiobase; +}; + +struct hdlcdrv_channel_params { + int tx_delay; /* the transmitter keyup delay in 10ms units */ + int tx_tail; /* the transmitter keyoff delay in 10ms units */ + int slottime; /* the slottime in 10ms; usually 10 = 100ms */ + int ppersist; /* the p-persistence 0..255 */ + int fulldup; /* some driver do not support full duplex, setting */ + /* this just makes them send even if DCD is on */ +}; + +struct hdlcdrv_old_channel_state { + int ptt; + int dcd; + int ptt_keyed; +}; + +struct hdlcdrv_channel_state { + int ptt; + int dcd; + int ptt_keyed; + unsigned long tx_packets; + unsigned long tx_errors; + unsigned long rx_packets; + unsigned long rx_errors; +}; + +struct hdlcdrv_ioctl { + int cmd; + union { + struct hdlcdrv_params mp; + struct hdlcdrv_channel_params cp; + struct hdlcdrv_channel_state cs; + struct hdlcdrv_old_channel_state ocs; + unsigned int calibrate; + unsigned char bits; + char modename[128]; + char drivername[32]; + } data; +}; + +/* -------------------------------------------------------------------- */ + +/* + * ioctl values + */ +#define HDLCDRVCTL_GETMODEMPAR 0 +#define HDLCDRVCTL_SETMODEMPAR 1 +#define HDLCDRVCTL_MODEMPARMASK 2 /* not handled by hdlcdrv */ +#define HDLCDRVCTL_GETCHANNELPAR 10 +#define HDLCDRVCTL_SETCHANNELPAR 11 +#define HDLCDRVCTL_OLDGETSTAT 20 +#define HDLCDRVCTL_CALIBRATE 21 +#define HDLCDRVCTL_GETSTAT 22 + +/* + * these are mainly for debugging purposes + */ +#define HDLCDRVCTL_GETSAMPLES 30 +#define HDLCDRVCTL_GETBITS 31 + +/* + * not handled by hdlcdrv, but by its depending drivers + */ +#define HDLCDRVCTL_GETMODE 40 +#define HDLCDRVCTL_SETMODE 41 +#define HDLCDRVCTL_MODELIST 42 +#define HDLCDRVCTL_DRIVERNAME 43 + +/* + * mask of needed modem parameters, returned by HDLCDRVCTL_MODEMPARMASK + */ +#define HDLCDRV_PARMASK_IOBASE (1<<0) +#define HDLCDRV_PARMASK_IRQ (1<<1) +#define HDLCDRV_PARMASK_DMA (1<<2) +#define HDLCDRV_PARMASK_DMA2 (1<<3) +#define HDLCDRV_PARMASK_SERIOBASE (1<<4) +#define HDLCDRV_PARMASK_PARIOBASE (1<<5) +#define HDLCDRV_PARMASK_MIDIIOBASE (1<<6) + +/* -------------------------------------------------------------------- */ + +#ifdef __KERNEL__ + +#include <linux/netdevice.h> +#include <linux/if.h> +#include <linux/spinlock.h> + +#define HDLCDRV_MAGIC 0x5ac6e778 +#define HDLCDRV_HDLCBUFFER 32 /* should be a power of 2 for speed reasons */ +#define HDLCDRV_BITBUFFER 256 /* should be a power of 2 for speed reasons */ +#undef HDLCDRV_LOOPBACK /* define for HDLC debugging purposes */ +#define HDLCDRV_DEBUG + +/* maximum packet length, excluding CRC */ +#define HDLCDRV_MAXFLEN 400 + + +struct hdlcdrv_hdlcbuffer { + spinlock_t lock; + unsigned rd, wr; + unsigned short buf[HDLCDRV_HDLCBUFFER]; +}; + +#ifdef HDLCDRV_DEBUG +struct hdlcdrv_bitbuffer { + unsigned int rd; + unsigned int wr; + unsigned int shreg; + unsigned char buffer[HDLCDRV_BITBUFFER]; +}; + +static inline void hdlcdrv_add_bitbuffer(struct hdlcdrv_bitbuffer *buf, + unsigned int bit) +{ + unsigned char new; + + new = buf->shreg & 1; + buf->shreg >>= 1; + buf->shreg |= (!!bit) << 7; + if (new) { + buf->buffer[buf->wr] = buf->shreg; + buf->wr = (buf->wr+1) % sizeof(buf->buffer); + buf->shreg = 0x80; + } +} + +static inline void hdlcdrv_add_bitbuffer_word(struct hdlcdrv_bitbuffer *buf, + unsigned int bits) +{ + buf->buffer[buf->wr] = bits & 0xff; + buf->wr = (buf->wr+1) % sizeof(buf->buffer); + buf->buffer[buf->wr] = (bits >> 8) & 0xff; + buf->wr = (buf->wr+1) % sizeof(buf->buffer); + +} +#endif /* HDLCDRV_DEBUG */ + +/* -------------------------------------------------------------------- */ +/* + * Information that need to be kept for each driver. + */ + +struct hdlcdrv_ops { + /* + * first some informations needed by the hdlcdrv routines + */ + const char *drvname; + const char *drvinfo; + /* + * the routines called by the hdlcdrv routines + */ + int (*open)(struct net_device *); + int (*close)(struct net_device *); + int (*ioctl)(struct net_device *, struct ifreq *, + struct hdlcdrv_ioctl *, int); +}; + +struct hdlcdrv_state { + int magic; + int opened; + + const struct hdlcdrv_ops *ops; + + struct { + int bitrate; + } par; + + struct hdlcdrv_pttoutput { + int dma2; + int seriobase; + int pariobase; + int midiiobase; + unsigned int flags; + } ptt_out; + + struct hdlcdrv_channel_params ch_params; + + struct hdlcdrv_hdlcrx { + struct hdlcdrv_hdlcbuffer hbuf; + unsigned long in_hdlc_rx; + /* 0 = sync hunt, != 0 receiving */ + int rx_state; + unsigned int bitstream; + unsigned int bitbuf; + int numbits; + unsigned char dcd; + + int len; + unsigned char *bp; + unsigned char buffer[HDLCDRV_MAXFLEN+2]; + } hdlcrx; + + struct hdlcdrv_hdlctx { + struct hdlcdrv_hdlcbuffer hbuf; + unsigned long in_hdlc_tx; + /* + * 0 = send flags + * 1 = send txtail (flags) + * 2 = send packet + */ + int tx_state; + int numflags; + unsigned int bitstream; + unsigned char ptt; + int calibrate; + int slotcnt; + + unsigned int bitbuf; + int numbits; + + int len; + unsigned char *bp; + unsigned char buffer[HDLCDRV_MAXFLEN+2]; + } hdlctx; + +#ifdef HDLCDRV_DEBUG + struct hdlcdrv_bitbuffer bitbuf_channel; + struct hdlcdrv_bitbuffer bitbuf_hdlc; +#endif /* HDLCDRV_DEBUG */ + + int ptt_keyed; + + /* queued skb for transmission */ + struct sk_buff *skb; +}; + + +/* -------------------------------------------------------------------- */ + +static inline int hdlcdrv_hbuf_full(struct hdlcdrv_hdlcbuffer *hb) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&hb->lock, flags); + ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER); + spin_unlock_irqrestore(&hb->lock, flags); + return ret; +} + +/* -------------------------------------------------------------------- */ + +static inline int hdlcdrv_hbuf_empty(struct hdlcdrv_hdlcbuffer *hb) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&hb->lock, flags); + ret = (hb->rd == hb->wr); + spin_unlock_irqrestore(&hb->lock, flags); + return ret; +} + +/* -------------------------------------------------------------------- */ + +static inline unsigned short hdlcdrv_hbuf_get(struct hdlcdrv_hdlcbuffer *hb) +{ + unsigned long flags; + unsigned short val; + unsigned newr; + + spin_lock_irqsave(&hb->lock, flags); + if (hb->rd == hb->wr) + val = 0; + else { + newr = (hb->rd+1) % HDLCDRV_HDLCBUFFER; + val = hb->buf[hb->rd]; + hb->rd = newr; + } + spin_unlock_irqrestore(&hb->lock, flags); + return val; +} + +/* -------------------------------------------------------------------- */ + +static inline void hdlcdrv_hbuf_put(struct hdlcdrv_hdlcbuffer *hb, + unsigned short val) +{ + unsigned newp; + unsigned long flags; + + spin_lock_irqsave(&hb->lock, flags); + newp = (hb->wr+1) % HDLCDRV_HDLCBUFFER; + if (newp != hb->rd) { + hb->buf[hb->wr] = val & 0xffff; + hb->wr = newp; + } + spin_unlock_irqrestore(&hb->lock, flags); +} + +/* -------------------------------------------------------------------- */ + +static inline void hdlcdrv_putbits(struct hdlcdrv_state *s, unsigned int bits) +{ + hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, bits); +} + +static inline unsigned int hdlcdrv_getbits(struct hdlcdrv_state *s) +{ + unsigned int ret; + + if (hdlcdrv_hbuf_empty(&s->hdlctx.hbuf)) { + if (s->hdlctx.calibrate > 0) + s->hdlctx.calibrate--; + else + s->hdlctx.ptt = 0; + ret = 0; + } else + ret = hdlcdrv_hbuf_get(&s->hdlctx.hbuf); +#ifdef HDLCDRV_LOOPBACK + hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, ret); +#endif /* HDLCDRV_LOOPBACK */ + return ret; +} + +static inline void hdlcdrv_channelbit(struct hdlcdrv_state *s, unsigned int bit) +{ +#ifdef HDLCDRV_DEBUG + hdlcdrv_add_bitbuffer(&s->bitbuf_channel, bit); +#endif /* HDLCDRV_DEBUG */ +} + +static inline void hdlcdrv_setdcd(struct hdlcdrv_state *s, int dcd) +{ + s->hdlcrx.dcd = !!dcd; +} + +static inline int hdlcdrv_ptt(struct hdlcdrv_state *s) +{ + return s->hdlctx.ptt || (s->hdlctx.calibrate > 0); +} + +/* -------------------------------------------------------------------- */ + +void hdlcdrv_receiver(struct net_device *, struct hdlcdrv_state *); +void hdlcdrv_transmitter(struct net_device *, struct hdlcdrv_state *); +void hdlcdrv_arbitrate(struct net_device *, struct hdlcdrv_state *); +struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops, + unsigned int privsize, const char *ifname, + unsigned int baseaddr, unsigned int irq, + unsigned int dma); +void hdlcdrv_unregister(struct net_device *dev); + +/* -------------------------------------------------------------------- */ + + + +#endif /* __KERNEL__ */ + +/* -------------------------------------------------------------------- */ + +#endif /* _HDLCDRV_H */ + +/* -------------------------------------------------------------------- */ diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h new file mode 100644 index 00000000..29ee2873 --- /dev/null +++ b/include/linux/hdreg.h @@ -0,0 +1,658 @@ +#ifndef _LINUX_HDREG_H +#define _LINUX_HDREG_H + +#include <linux/types.h> + +/* + * Command Header sizes for IOCTL commands + */ + +#define HDIO_DRIVE_CMD_HDR_SIZE (4 * sizeof(__u8)) +#define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8)) +#define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8)) + +#define IDE_DRIVE_TASK_NO_DATA 0 +#ifndef __KERNEL__ +#define IDE_DRIVE_TASK_INVALID -1 +#define IDE_DRIVE_TASK_SET_XFER 1 +#define IDE_DRIVE_TASK_IN 2 +#define IDE_DRIVE_TASK_OUT 3 +#endif +#define IDE_DRIVE_TASK_RAW_WRITE 4 + +/* + * Define standard taskfile in/out register + */ +#define IDE_TASKFILE_STD_IN_FLAGS 0xFE +#define IDE_HOB_STD_IN_FLAGS 0x3C +#ifndef __KERNEL__ +#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE +#define IDE_HOB_STD_OUT_FLAGS 0x3C + +typedef unsigned char task_ioreg_t; +typedef unsigned long sata_ioreg_t; +#endif + +typedef union ide_reg_valid_s { + unsigned all : 16; + struct { + unsigned data : 1; + unsigned error_feature : 1; + unsigned sector : 1; + unsigned nsector : 1; + unsigned lcyl : 1; + unsigned hcyl : 1; + unsigned select : 1; + unsigned status_command : 1; + + unsigned data_hob : 1; + unsigned error_feature_hob : 1; + unsigned sector_hob : 1; + unsigned nsector_hob : 1; + unsigned lcyl_hob : 1; + unsigned hcyl_hob : 1; + unsigned select_hob : 1; + unsigned control_hob : 1; + } b; +} ide_reg_valid_t; + +typedef struct ide_task_request_s { + __u8 io_ports[8]; + __u8 hob_ports[8]; /* bytes 6 and 7 are unused */ + ide_reg_valid_t out_flags; + ide_reg_valid_t in_flags; + int data_phase; + int req_cmd; + unsigned long out_size; + unsigned long in_size; +} ide_task_request_t; + +typedef struct ide_ioctl_request_s { + ide_task_request_t *task_request; + unsigned char *out_buffer; + unsigned char *in_buffer; +} ide_ioctl_request_t; + +struct hd_drive_cmd_hdr { + __u8 command; + __u8 sector_number; + __u8 feature; + __u8 sector_count; +}; + +#ifndef __KERNEL__ +typedef struct hd_drive_task_hdr { + __u8 data; + __u8 feature; + __u8 sector_count; + __u8 sector_number; + __u8 low_cylinder; + __u8 high_cylinder; + __u8 device_head; + __u8 command; +} task_struct_t; + +typedef struct hd_drive_hob_hdr { + __u8 data; + __u8 feature; + __u8 sector_count; + __u8 sector_number; + __u8 low_cylinder; + __u8 high_cylinder; + __u8 device_head; + __u8 control; +} hob_struct_t; +#endif + +#define TASKFILE_NO_DATA 0x0000 + +#define TASKFILE_IN 0x0001 +#define TASKFILE_MULTI_IN 0x0002 + +#define TASKFILE_OUT 0x0004 +#define TASKFILE_MULTI_OUT 0x0008 +#define TASKFILE_IN_OUT 0x0010 + +#define TASKFILE_IN_DMA 0x0020 +#define TASKFILE_OUT_DMA 0x0040 +#define TASKFILE_IN_DMAQ 0x0080 +#define TASKFILE_OUT_DMAQ 0x0100 + +#ifndef __KERNEL__ +#define TASKFILE_P_IN 0x0200 +#define TASKFILE_P_OUT 0x0400 +#define TASKFILE_P_IN_DMA 0x0800 +#define TASKFILE_P_OUT_DMA 0x1000 +#define TASKFILE_P_IN_DMAQ 0x2000 +#define TASKFILE_P_OUT_DMAQ 0x4000 +#define TASKFILE_48 0x8000 +#define TASKFILE_INVALID 0x7fff +#endif + +#ifndef __KERNEL__ +/* ATA/ATAPI Commands pre T13 Spec */ +#define WIN_NOP 0x00 +/* + * 0x01->0x02 Reserved + */ +#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */ +/* + * 0x04->0x07 Reserved + */ +#define WIN_SRST 0x08 /* ATAPI soft reset command */ +#define WIN_DEVICE_RESET 0x08 +/* + * 0x09->0x0F Reserved + */ +#define WIN_RECAL 0x10 +#define WIN_RESTORE WIN_RECAL +/* + * 0x10->0x1F Reserved + */ +#define WIN_READ 0x20 /* 28-Bit */ +#define WIN_READ_ONCE 0x21 /* 28-Bit without retries */ +#define WIN_READ_LONG 0x22 /* 28-Bit */ +#define WIN_READ_LONG_ONCE 0x23 /* 28-Bit without retries */ +#define WIN_READ_EXT 0x24 /* 48-Bit */ +#define WIN_READDMA_EXT 0x25 /* 48-Bit */ +#define WIN_READDMA_QUEUED_EXT 0x26 /* 48-Bit */ +#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */ +/* + * 0x28 + */ +#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */ +/* + * 0x2A->0x2F Reserved + */ +#define WIN_WRITE 0x30 /* 28-Bit */ +#define WIN_WRITE_ONCE 0x31 /* 28-Bit without retries */ +#define WIN_WRITE_LONG 0x32 /* 28-Bit */ +#define WIN_WRITE_LONG_ONCE 0x33 /* 28-Bit without retries */ +#define WIN_WRITE_EXT 0x34 /* 48-Bit */ +#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */ +#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */ +#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */ +#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */ +#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */ +/* + * 0x3A->0x3B Reserved + */ +#define WIN_WRITE_VERIFY 0x3C /* 28-Bit */ +/* + * 0x3D->0x3F Reserved + */ +#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */ +#define WIN_VERIFY_ONCE 0x41 /* 28-Bit - without retries */ +#define WIN_VERIFY_EXT 0x42 /* 48-Bit */ +/* + * 0x43->0x4F Reserved + */ +#define WIN_FORMAT 0x50 +/* + * 0x51->0x5F Reserved + */ +#define WIN_INIT 0x60 +/* + * 0x61->0x5F Reserved + */ +#define WIN_SEEK 0x70 /* 0x70-0x7F Reserved */ + +#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */ +#define WIN_DIAGNOSE 0x90 +#define WIN_SPECIFY 0x91 /* set drive geometry translation */ +#define WIN_DOWNLOAD_MICROCODE 0x92 +#define WIN_STANDBYNOW2 0x94 +#define WIN_STANDBY2 0x96 +#define WIN_SETIDLE2 0x97 +#define WIN_CHECKPOWERMODE2 0x98 +#define WIN_SLEEPNOW2 0x99 +/* + * 0x9A VENDOR + */ +#define WIN_PACKETCMD 0xA0 /* Send a packet command. */ +#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */ +#define WIN_QUEUED_SERVICE 0xA2 +#define WIN_SMART 0xB0 /* self-monitoring and reporting */ +#define CFA_ERASE_SECTORS 0xC0 +#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ +#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ +#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */ +#define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers */ +#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ +#define WIN_READDMA_ONCE 0xC9 /* 28-Bit - without retries */ +#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ +#define WIN_WRITEDMA_ONCE 0xCB /* 28-Bit - without retries */ +#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers */ +#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */ +#define WIN_GETMEDIASTATUS 0xDA +#define WIN_ACKMEDIACHANGE 0xDB /* ATA-1, ATA-2 vendor */ +#define WIN_POSTBOOT 0xDC +#define WIN_PREBOOT 0xDD +#define WIN_DOORLOCK 0xDE /* lock door on removable drives */ +#define WIN_DOORUNLOCK 0xDF /* unlock door on removable drives */ +#define WIN_STANDBYNOW1 0xE0 +#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */ +#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */ +#define WIN_SETIDLE1 0xE3 +#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */ +#define WIN_CHECKPOWERMODE1 0xE5 +#define WIN_SLEEPNOW1 0xE6 +#define WIN_FLUSH_CACHE 0xE7 +#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */ +#define WIN_WRITE_SAME 0xE9 /* read ata-2 to use */ + /* SET_FEATURES 0x22 or 0xDD */ +#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */ +#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */ +#define WIN_MEDIAEJECT 0xED +#define WIN_IDENTIFY_DMA 0xEE /* same as WIN_IDENTIFY, but DMA */ +#define WIN_SETFEATURES 0xEF /* set special drive features */ +#define EXABYTE_ENABLE_NEST 0xF0 +#define WIN_SECURITY_SET_PASS 0xF1 +#define WIN_SECURITY_UNLOCK 0xF2 +#define WIN_SECURITY_ERASE_PREPARE 0xF3 +#define WIN_SECURITY_ERASE_UNIT 0xF4 +#define WIN_SECURITY_FREEZE_LOCK 0xF5 +#define WIN_SECURITY_DISABLE 0xF6 +#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */ +#define WIN_SET_MAX 0xF9 +#define DISABLE_SEAGATE 0xFB + +/* WIN_SMART sub-commands */ + +#define SMART_READ_VALUES 0xD0 +#define SMART_READ_THRESHOLDS 0xD1 +#define SMART_AUTOSAVE 0xD2 +#define SMART_SAVE 0xD3 +#define SMART_IMMEDIATE_OFFLINE 0xD4 +#define SMART_READ_LOG_SECTOR 0xD5 +#define SMART_WRITE_LOG_SECTOR 0xD6 +#define SMART_WRITE_THRESHOLDS 0xD7 +#define SMART_ENABLE 0xD8 +#define SMART_DISABLE 0xD9 +#define SMART_STATUS 0xDA +#define SMART_AUTO_OFFLINE 0xDB + +/* Password used in TF4 & TF5 executing SMART commands */ + +#define SMART_LCYL_PASS 0x4F +#define SMART_HCYL_PASS 0xC2 + +/* WIN_SETFEATURES sub-commands */ +#define SETFEATURES_EN_8BIT 0x01 /* Enable 8-Bit Transfers */ +#define SETFEATURES_EN_WCACHE 0x02 /* Enable write cache */ +#define SETFEATURES_DIS_DEFECT 0x04 /* Disable Defect Management */ +#define SETFEATURES_EN_APM 0x05 /* Enable advanced power management */ +#define SETFEATURES_EN_SAME_R 0x22 /* for a region ATA-1 */ +#define SETFEATURES_DIS_MSN 0x31 /* Disable Media Status Notification */ +#define SETFEATURES_DIS_RETRY 0x33 /* Disable Retry */ +#define SETFEATURES_EN_AAM 0x42 /* Enable Automatic Acoustic Management */ +#define SETFEATURES_RW_LONG 0x44 /* Set Length of VS bytes */ +#define SETFEATURES_SET_CACHE 0x54 /* Set Cache segments to SC Reg. Val */ +#define SETFEATURES_DIS_RLA 0x55 /* Disable read look-ahead feature */ +#define SETFEATURES_EN_RI 0x5D /* Enable release interrupt */ +#define SETFEATURES_EN_SI 0x5E /* Enable SERVICE interrupt */ +#define SETFEATURES_DIS_RPOD 0x66 /* Disable reverting to power on defaults */ +#define SETFEATURES_DIS_ECC 0x77 /* Disable ECC byte count */ +#define SETFEATURES_DIS_8BIT 0x81 /* Disable 8-Bit Transfers */ +#define SETFEATURES_DIS_WCACHE 0x82 /* Disable write cache */ +#define SETFEATURES_EN_DEFECT 0x84 /* Enable Defect Management */ +#define SETFEATURES_DIS_APM 0x85 /* Disable advanced power management */ +#define SETFEATURES_EN_ECC 0x88 /* Enable ECC byte count */ +#define SETFEATURES_EN_MSN 0x95 /* Enable Media Status Notification */ +#define SETFEATURES_EN_RETRY 0x99 /* Enable Retry */ +#define SETFEATURES_EN_RLA 0xAA /* Enable read look-ahead feature */ +#define SETFEATURES_PREFETCH 0xAB /* Sets drive prefetch value */ +#define SETFEATURES_EN_REST 0xAC /* ATA-1 */ +#define SETFEATURES_4B_RW_LONG 0xBB /* Set Length of 4 bytes */ +#define SETFEATURES_DIS_AAM 0xC2 /* Disable Automatic Acoustic Management */ +#define SETFEATURES_EN_RPOD 0xCC /* Enable reverting to power on defaults */ +#define SETFEATURES_DIS_RI 0xDD /* Disable release interrupt ATAPI */ +#define SETFEATURES_EN_SAME_M 0xDD /* for a entire device ATA-1 */ +#define SETFEATURES_DIS_SI 0xDE /* Disable SERVICE interrupt ATAPI */ + +/* WIN_SECURITY sub-commands */ + +#define SECURITY_SET_PASSWORD 0xBA +#define SECURITY_UNLOCK 0xBB +#define SECURITY_ERASE_PREPARE 0xBC +#define SECURITY_ERASE_UNIT 0xBD +#define SECURITY_FREEZE_LOCK 0xBE +#define SECURITY_DISABLE_PASSWORD 0xBF +#endif /* __KERNEL__ */ + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + unsigned long start; +}; + +/* hd/ide ctl's that pass (arg) ptrs to user space are numbered 0x030n/0x031n */ +#define HDIO_GETGEO 0x0301 /* get device geometry */ +#define HDIO_GET_UNMASKINTR 0x0302 /* get current unmask setting */ +#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */ +#define HDIO_GET_QDMA 0x0305 /* get use-qdma flag */ + +#define HDIO_SET_XFER 0x0306 /* set transfer rate via proc */ + +#define HDIO_OBSOLETE_IDENTITY 0x0307 /* OBSOLETE, DO NOT USE: returns 142 bytes */ +#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */ +#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */ +#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */ +#define HDIO_GET_DMA 0x030b /* get use-dma flag */ +#define HDIO_GET_NICE 0x030c /* get nice flags */ +#define HDIO_GET_IDENTITY 0x030d /* get IDE identification info */ +#define HDIO_GET_WCACHE 0x030e /* get write cache mode on|off */ +#define HDIO_GET_ACOUSTIC 0x030f /* get acoustic value */ +#define HDIO_GET_ADDRESS 0x0310 /* */ + +#define HDIO_GET_BUSSTATE 0x031a /* get the bus state of the hwif */ +#define HDIO_TRISTATE_HWIF 0x031b /* execute a channel tristate */ +#define HDIO_DRIVE_RESET 0x031c /* execute a device reset */ +#define HDIO_DRIVE_TASKFILE 0x031d /* execute raw taskfile */ +#define HDIO_DRIVE_TASK 0x031e /* execute task and special drive command */ +#define HDIO_DRIVE_CMD 0x031f /* execute a special drive command */ +#define HDIO_DRIVE_CMD_AEB HDIO_DRIVE_TASK + +/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */ +#define HDIO_SET_MULTCOUNT 0x0321 /* change IDE blockmode */ +#define HDIO_SET_UNMASKINTR 0x0322 /* permit other irqs during I/O */ +#define HDIO_SET_KEEPSETTINGS 0x0323 /* keep ioctl settings on reset */ +#define HDIO_SET_32BIT 0x0324 /* change io_32bit flags */ +#define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */ +#define HDIO_SET_DMA 0x0326 /* change use-dma flag */ +#define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */ +#ifndef __KERNEL__ +#define HDIO_SCAN_HWIF 0x0328 /* register and (re)scan interface */ +#define HDIO_UNREGISTER_HWIF 0x032a /* unregister interface */ +#endif +#define HDIO_SET_NICE 0x0329 /* set nice flags */ +#define HDIO_SET_WCACHE 0x032b /* change write cache enable-disable */ +#define HDIO_SET_ACOUSTIC 0x032c /* change acoustic behavior */ +#define HDIO_SET_BUSSTATE 0x032d /* set the bus state of the hwif */ +#define HDIO_SET_QDMA 0x032e /* change use-qdma flag */ +#define HDIO_SET_ADDRESS 0x032f /* change lba addressing modes */ + +/* bus states */ +enum { + BUSSTATE_OFF = 0, + BUSSTATE_ON, + BUSSTATE_TRISTATE +}; + +/* hd/ide ctl's that pass (arg) ptrs to user space are numbered 0x033n/0x033n */ +/* 0x330 is reserved - used to be HDIO_GETGEO_BIG */ +/* 0x331 is reserved - used to be HDIO_GETGEO_BIG_RAW */ +/* 0x338 is reserved - used to be HDIO_SET_IDE_SCSI */ +/* 0x339 is reserved - used to be HDIO_SET_SCSI_IDE */ + +#define __NEW_HD_DRIVE_ID + +#ifndef __KERNEL__ +/* + * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. + * + * If you change something here, please remember to update fix_driveid() in + * ide/probe.c. + */ +struct hd_driveid { + unsigned short config; /* lots of obsolete bit flags */ + unsigned short cyls; /* Obsolete, "physical" cyls */ + unsigned short reserved2; /* reserved (word 2) */ + unsigned short heads; /* Obsolete, "physical" heads */ + unsigned short track_bytes; /* unformatted bytes per track */ + unsigned short sector_bytes; /* unformatted bytes per sector */ + unsigned short sectors; /* Obsolete, "physical" sectors per track */ + unsigned short vendor0; /* vendor unique */ + unsigned short vendor1; /* vendor unique */ + unsigned short vendor2; /* Retired vendor unique */ + unsigned char serial_no[20]; /* 0 = not_specified */ + unsigned short buf_type; /* Retired */ + unsigned short buf_size; /* Retired, 512 byte increments + * 0 = not_specified + */ + unsigned short ecc_bytes; /* for r/w long cmds; 0 = not_specified */ + unsigned char fw_rev[8]; /* 0 = not_specified */ + unsigned char model[40]; /* 0 = not_specified */ + unsigned char max_multsect; /* 0=not_implemented */ + unsigned char vendor3; /* vendor unique */ + unsigned short dword_io; /* 0=not_implemented; 1=implemented */ + unsigned char vendor4; /* vendor unique */ + unsigned char capability; /* (upper byte of word 49) + * 3: IORDYsup + * 2: IORDYsw + * 1: LBA + * 0: DMA + */ + unsigned short reserved50; /* reserved (word 50) */ + unsigned char vendor5; /* Obsolete, vendor unique */ + unsigned char tPIO; /* Obsolete, 0=slow, 1=medium, 2=fast */ + unsigned char vendor6; /* Obsolete, vendor unique */ + unsigned char tDMA; /* Obsolete, 0=slow, 1=medium, 2=fast */ + unsigned short field_valid; /* (word 53) + * 2: ultra_ok word 88 + * 1: eide_ok words 64-70 + * 0: cur_ok words 54-58 + */ + unsigned short cur_cyls; /* Obsolete, logical cylinders */ + unsigned short cur_heads; /* Obsolete, l heads */ + unsigned short cur_sectors; /* Obsolete, l sectors per track */ + unsigned short cur_capacity0; /* Obsolete, l total sectors on drive */ + unsigned short cur_capacity1; /* Obsolete, (2 words, misaligned int) */ + unsigned char multsect; /* current multiple sector count */ + unsigned char multsect_valid; /* when (bit0==1) multsect is ok */ + unsigned int lba_capacity; /* Obsolete, total number of sectors */ + unsigned short dma_1word; /* Obsolete, single-word dma info */ + unsigned short dma_mword; /* multiple-word dma info */ + unsigned short eide_pio_modes; /* bits 0:mode3 1:mode4 */ + unsigned short eide_dma_min; /* min mword dma cycle time (ns) */ + unsigned short eide_dma_time; /* recommended mword dma cycle time (ns) */ + unsigned short eide_pio; /* min cycle time (ns), no IORDY */ + unsigned short eide_pio_iordy; /* min cycle time (ns), with IORDY */ + unsigned short words69_70[2]; /* reserved words 69-70 + * future command overlap and queuing + */ + unsigned short words71_74[4]; /* reserved words 71-74 + * for IDENTIFY PACKET DEVICE command + */ + unsigned short queue_depth; /* (word 75) + * 15:5 reserved + * 4:0 Maximum queue depth -1 + */ + unsigned short words76_79[4]; /* reserved words 76-79 */ + unsigned short major_rev_num; /* (word 80) */ + unsigned short minor_rev_num; /* (word 81) */ + unsigned short command_set_1; /* (word 82) supported + * 15: Obsolete + * 14: NOP command + * 13: READ_BUFFER + * 12: WRITE_BUFFER + * 11: Obsolete + * 10: Host Protected Area + * 9: DEVICE Reset + * 8: SERVICE Interrupt + * 7: Release Interrupt + * 6: look-ahead + * 5: write cache + * 4: PACKET Command + * 3: Power Management Feature Set + * 2: Removable Feature Set + * 1: Security Feature Set + * 0: SMART Feature Set + */ + unsigned short command_set_2; /* (word 83) + * 15: Shall be ZERO + * 14: Shall be ONE + * 13: FLUSH CACHE EXT + * 12: FLUSH CACHE + * 11: Device Configuration Overlay + * 10: 48-bit Address Feature Set + * 9: Automatic Acoustic Management + * 8: SET MAX security + * 7: reserved 1407DT PARTIES + * 6: SetF sub-command Power-Up + * 5: Power-Up in Standby Feature Set + * 4: Removable Media Notification + * 3: APM Feature Set + * 2: CFA Feature Set + * 1: READ/WRITE DMA QUEUED + * 0: Download MicroCode + */ + unsigned short cfsse; /* (word 84) + * cmd set-feature supported extensions + * 15: Shall be ZERO + * 14: Shall be ONE + * 13:6 reserved + * 5: General Purpose Logging + * 4: Streaming Feature Set + * 3: Media Card Pass Through + * 2: Media Serial Number Valid + * 1: SMART selt-test supported + * 0: SMART error logging + */ + unsigned short cfs_enable_1; /* (word 85) + * command set-feature enabled + * 15: Obsolete + * 14: NOP command + * 13: READ_BUFFER + * 12: WRITE_BUFFER + * 11: Obsolete + * 10: Host Protected Area + * 9: DEVICE Reset + * 8: SERVICE Interrupt + * 7: Release Interrupt + * 6: look-ahead + * 5: write cache + * 4: PACKET Command + * 3: Power Management Feature Set + * 2: Removable Feature Set + * 1: Security Feature Set + * 0: SMART Feature Set + */ + unsigned short cfs_enable_2; /* (word 86) + * command set-feature enabled + * 15: Shall be ZERO + * 14: Shall be ONE + * 13: FLUSH CACHE EXT + * 12: FLUSH CACHE + * 11: Device Configuration Overlay + * 10: 48-bit Address Feature Set + * 9: Automatic Acoustic Management + * 8: SET MAX security + * 7: reserved 1407DT PARTIES + * 6: SetF sub-command Power-Up + * 5: Power-Up in Standby Feature Set + * 4: Removable Media Notification + * 3: APM Feature Set + * 2: CFA Feature Set + * 1: READ/WRITE DMA QUEUED + * 0: Download MicroCode + */ + unsigned short csf_default; /* (word 87) + * command set-feature default + * 15: Shall be ZERO + * 14: Shall be ONE + * 13:6 reserved + * 5: General Purpose Logging enabled + * 4: Valid CONFIGURE STREAM executed + * 3: Media Card Pass Through enabled + * 2: Media Serial Number Valid + * 1: SMART selt-test supported + * 0: SMART error logging + */ + unsigned short dma_ultra; /* (word 88) */ + unsigned short trseuc; /* time required for security erase */ + unsigned short trsEuc; /* time required for enhanced erase */ + unsigned short CurAPMvalues; /* current APM values */ + unsigned short mprc; /* master password revision code */ + unsigned short hw_config; /* hardware config (word 93) + * 15: Shall be ZERO + * 14: Shall be ONE + * 13: + * 12: + * 11: + * 10: + * 9: + * 8: + * 7: + * 6: + * 5: + * 4: + * 3: + * 2: + * 1: + * 0: Shall be ONE + */ + unsigned short acoustic; /* (word 94) + * 15:8 Vendor's recommended value + * 7:0 current value + */ + unsigned short msrqs; /* min stream request size */ + unsigned short sxfert; /* stream transfer time */ + unsigned short sal; /* stream access latency */ + unsigned int spg; /* stream performance granularity */ + unsigned long long lba_capacity_2;/* 48-bit total number of sectors */ + unsigned short words104_125[22];/* reserved words 104-125 */ + unsigned short last_lun; /* (word 126) */ + unsigned short word127; /* (word 127) Feature Set + * Removable Media Notification + * 15:2 reserved + * 1:0 00 = not supported + * 01 = supported + * 10 = reserved + * 11 = reserved + */ + unsigned short dlf; /* (word 128) + * device lock function + * 15:9 reserved + * 8 security level 1:max 0:high + * 7:6 reserved + * 5 enhanced erase + * 4 expire + * 3 frozen + * 2 locked + * 1 en/disabled + * 0 capability + */ + unsigned short csfo; /* (word 129) + * current set features options + * 15:4 reserved + * 3: auto reassign + * 2: reverting + * 1: read-look-ahead + * 0: write cache + */ + unsigned short words130_155[26];/* reserved vendor words 130-155 */ + unsigned short word156; /* reserved vendor word 156 */ + unsigned short words157_159[3];/* reserved vendor words 157-159 */ + unsigned short cfa_power; /* (word 160) CFA Power Mode + * 15 word 160 supported + * 14 reserved + * 13 + * 12 + * 11:0 + */ + unsigned short words161_175[15];/* Reserved for CFA */ + unsigned short words176_205[30];/* Current Media Serial Number */ + unsigned short words206_254[49];/* reserved words 206-254 */ + unsigned short integrity_word; /* (word 255) + * 15:8 Checksum + * 7:0 Signature + */ +}; +#endif /* __KERNEL__ */ + +/* + * IDE "nice" flags. These are used on a per drive basis to determine + * when to be nice and give more bandwidth to the other devices which + * share the same IDE bus. + */ +#define IDE_NICE_DSC_OVERLAP (0) /* per the DSC overlap protocol */ +#define IDE_NICE_ATAPI_OVERLAP (1) /* not supported yet */ +#define IDE_NICE_1 (3) /* when probably won't affect us much */ +#ifndef __KERNEL__ +#define IDE_NICE_0 (2) /* when sure that it won't affect us */ +#define IDE_NICE_2 (4) /* when we know it's on our expense */ +#endif + +#endif /* _LINUX_HDREG_H */ diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h new file mode 100644 index 00000000..53744fa1 --- /dev/null +++ b/include/linux/hid-debug.h @@ -0,0 +1,65 @@ +#ifndef __HID_DEBUG_H +#define __HID_DEBUG_H + +/* + * Copyright (c) 2007-2009 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define HID_DEBUG_BUFSIZE 512 + +#ifdef CONFIG_DEBUG_FS + +void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); +void hid_dump_device(struct hid_device *, struct seq_file *); +void hid_dump_field(struct hid_field *, int, struct seq_file *); +char *hid_resolv_usage(unsigned, struct seq_file *); +void hid_debug_register(struct hid_device *, const char *); +void hid_debug_unregister(struct hid_device *); +void hid_debug_init(void); +void hid_debug_exit(void); +void hid_debug_event(struct hid_device *, char *); + + +struct hid_debug_list { + char *hid_debug_buf; + int head; + int tail; + struct fasync_struct *fasync; + struct hid_device *hdev; + struct list_head node; + struct mutex read_mutex; +}; + +#else + +#define hid_dump_input(a,b,c) do { } while (0) +#define hid_dump_device(a,b) do { } while (0) +#define hid_dump_field(a,b,c) do { } while (0) +#define hid_resolv_usage(a,b) do { } while (0) +#define hid_debug_register(a, b) do { } while (0) +#define hid_debug_unregister(a) do { } while (0) +#define hid_debug_init() do { } while (0) +#define hid_debug_exit() do { } while (0) +#define hid_debug_event(a,b) do { } while (0) + +#endif + +#endif + diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h new file mode 100644 index 00000000..24e1ca01 --- /dev/null +++ b/include/linux/hid-roccat.h @@ -0,0 +1,29 @@ +#ifndef __HID_ROCCAT_H +#define __HID_ROCCAT_H + +/* + * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/hid.h> +#include <linux/types.h> + +#define ROCCATIOCGREPSIZE _IOR('H', 0xf1, int) + +#ifdef __KERNEL__ + +int roccat_connect(struct class *klass, struct hid_device *hid, + int report_size); +void roccat_disconnect(int minor); +int roccat_report_event(int minor, u8 const *data); + +#endif + +#endif diff --git a/include/linux/hid.h b/include/linux/hid.h new file mode 100644 index 00000000..773b4acf --- /dev/null +++ b/include/linux/hid.h @@ -0,0 +1,949 @@ +#ifndef __HID_H +#define __HID_H + +/* + * Copyright (c) 1999 Andreas Gal + * Copyright (c) 2000-2001 Vojtech Pavlik + * Copyright (c) 2006-2007 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: + * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic + */ + +/* + * USB HID (Human Interface Device) interface class code + */ + +#define USB_INTERFACE_CLASS_HID 3 + +/* + * USB HID interface subclass and protocol codes + */ + +#define USB_INTERFACE_SUBCLASS_BOOT 1 +#define USB_INTERFACE_PROTOCOL_KEYBOARD 1 +#define USB_INTERFACE_PROTOCOL_MOUSE 2 + +/* + * HID class requests + */ + +#define HID_REQ_GET_REPORT 0x01 +#define HID_REQ_GET_IDLE 0x02 +#define HID_REQ_GET_PROTOCOL 0x03 +#define HID_REQ_SET_REPORT 0x09 +#define HID_REQ_SET_IDLE 0x0A +#define HID_REQ_SET_PROTOCOL 0x0B + +/* + * HID class descriptor types + */ + +#define HID_DT_HID (USB_TYPE_CLASS | 0x01) +#define HID_DT_REPORT (USB_TYPE_CLASS | 0x02) +#define HID_DT_PHYSICAL (USB_TYPE_CLASS | 0x03) + +#define HID_MAX_DESCRIPTOR_SIZE 4096 + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/mod_devicetable.h> /* hid_device_id */ +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/input.h> +#include <linux/semaphore.h> +#include <linux/power_supply.h> + +/* + * We parse each description item into this structure. Short items data + * values are expanded to 32-bit signed int, long items contain a pointer + * into the data area. + */ + +struct hid_item { + unsigned format; + __u8 size; + __u8 type; + __u8 tag; + union { + __u8 u8; + __s8 s8; + __u16 u16; + __s16 s16; + __u32 u32; + __s32 s32; + __u8 *longdata; + } data; +}; + +/* + * HID report item format + */ + +#define HID_ITEM_FORMAT_SHORT 0 +#define HID_ITEM_FORMAT_LONG 1 + +/* + * Special tag indicating long items + */ + +#define HID_ITEM_TAG_LONG 15 + +/* + * HID report descriptor item type (prefix bit 2,3) + */ + +#define HID_ITEM_TYPE_MAIN 0 +#define HID_ITEM_TYPE_GLOBAL 1 +#define HID_ITEM_TYPE_LOCAL 2 +#define HID_ITEM_TYPE_RESERVED 3 + +/* + * HID report descriptor main item tags + */ + +#define HID_MAIN_ITEM_TAG_INPUT 8 +#define HID_MAIN_ITEM_TAG_OUTPUT 9 +#define HID_MAIN_ITEM_TAG_FEATURE 11 +#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10 +#define HID_MAIN_ITEM_TAG_END_COLLECTION 12 + +/* + * HID report descriptor main item contents + */ + +#define HID_MAIN_ITEM_CONSTANT 0x001 +#define HID_MAIN_ITEM_VARIABLE 0x002 +#define HID_MAIN_ITEM_RELATIVE 0x004 +#define HID_MAIN_ITEM_WRAP 0x008 +#define HID_MAIN_ITEM_NONLINEAR 0x010 +#define HID_MAIN_ITEM_NO_PREFERRED 0x020 +#define HID_MAIN_ITEM_NULL_STATE 0x040 +#define HID_MAIN_ITEM_VOLATILE 0x080 +#define HID_MAIN_ITEM_BUFFERED_BYTE 0x100 + +/* + * HID report descriptor collection item types + */ + +#define HID_COLLECTION_PHYSICAL 0 +#define HID_COLLECTION_APPLICATION 1 +#define HID_COLLECTION_LOGICAL 2 + +/* + * HID report descriptor global item tags + */ + +#define HID_GLOBAL_ITEM_TAG_USAGE_PAGE 0 +#define HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM 1 +#define HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM 2 +#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM 3 +#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM 4 +#define HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT 5 +#define HID_GLOBAL_ITEM_TAG_UNIT 6 +#define HID_GLOBAL_ITEM_TAG_REPORT_SIZE 7 +#define HID_GLOBAL_ITEM_TAG_REPORT_ID 8 +#define HID_GLOBAL_ITEM_TAG_REPORT_COUNT 9 +#define HID_GLOBAL_ITEM_TAG_PUSH 10 +#define HID_GLOBAL_ITEM_TAG_POP 11 + +/* + * HID report descriptor local item tags + */ + +#define HID_LOCAL_ITEM_TAG_USAGE 0 +#define HID_LOCAL_ITEM_TAG_USAGE_MINIMUM 1 +#define HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM 2 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_INDEX 3 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MINIMUM 4 +#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MAXIMUM 5 +#define HID_LOCAL_ITEM_TAG_STRING_INDEX 7 +#define HID_LOCAL_ITEM_TAG_STRING_MINIMUM 8 +#define HID_LOCAL_ITEM_TAG_STRING_MAXIMUM 9 +#define HID_LOCAL_ITEM_TAG_DELIMITER 10 + +/* + * HID usage tables + */ + +#define HID_USAGE_PAGE 0xffff0000 + +#define HID_UP_UNDEFINED 0x00000000 +#define HID_UP_GENDESK 0x00010000 +#define HID_UP_SIMULATION 0x00020000 +#define HID_UP_GENDEVCTRLS 0x00060000 +#define HID_UP_KEYBOARD 0x00070000 +#define HID_UP_LED 0x00080000 +#define HID_UP_BUTTON 0x00090000 +#define HID_UP_ORDINAL 0x000a0000 +#define HID_UP_CONSUMER 0x000c0000 +#define HID_UP_DIGITIZER 0x000d0000 +#define HID_UP_PID 0x000f0000 +#define HID_UP_HPVENDOR 0xff7f0000 +#define HID_UP_MSVENDOR 0xff000000 +#define HID_UP_CUSTOM 0x00ff0000 +#define HID_UP_LOGIVENDOR 0xffbc0000 + +#define HID_USAGE 0x0000ffff + +#define HID_GD_POINTER 0x00010001 +#define HID_GD_MOUSE 0x00010002 +#define HID_GD_JOYSTICK 0x00010004 +#define HID_GD_GAMEPAD 0x00010005 +#define HID_GD_KEYBOARD 0x00010006 +#define HID_GD_KEYPAD 0x00010007 +#define HID_GD_MULTIAXIS 0x00010008 +#define HID_GD_X 0x00010030 +#define HID_GD_Y 0x00010031 +#define HID_GD_Z 0x00010032 +#define HID_GD_RX 0x00010033 +#define HID_GD_RY 0x00010034 +#define HID_GD_RZ 0x00010035 +#define HID_GD_SLIDER 0x00010036 +#define HID_GD_DIAL 0x00010037 +#define HID_GD_WHEEL 0x00010038 +#define HID_GD_HATSWITCH 0x00010039 +#define HID_GD_BUFFER 0x0001003a +#define HID_GD_BYTECOUNT 0x0001003b +#define HID_GD_MOTION 0x0001003c +#define HID_GD_START 0x0001003d +#define HID_GD_SELECT 0x0001003e +#define HID_GD_VX 0x00010040 +#define HID_GD_VY 0x00010041 +#define HID_GD_VZ 0x00010042 +#define HID_GD_VBRX 0x00010043 +#define HID_GD_VBRY 0x00010044 +#define HID_GD_VBRZ 0x00010045 +#define HID_GD_VNO 0x00010046 +#define HID_GD_FEATURE 0x00010047 +#define HID_GD_UP 0x00010090 +#define HID_GD_DOWN 0x00010091 +#define HID_GD_RIGHT 0x00010092 +#define HID_GD_LEFT 0x00010093 + +#define HID_DC_BATTERYSTRENGTH 0x00060020 + +#define HID_DG_DIGITIZER 0x000d0001 +#define HID_DG_PEN 0x000d0002 +#define HID_DG_LIGHTPEN 0x000d0003 +#define HID_DG_TOUCHSCREEN 0x000d0004 +#define HID_DG_TOUCHPAD 0x000d0005 +#define HID_DG_STYLUS 0x000d0020 +#define HID_DG_PUCK 0x000d0021 +#define HID_DG_FINGER 0x000d0022 +#define HID_DG_TIPPRESSURE 0x000d0030 +#define HID_DG_BARRELPRESSURE 0x000d0031 +#define HID_DG_INRANGE 0x000d0032 +#define HID_DG_TOUCH 0x000d0033 +#define HID_DG_UNTOUCH 0x000d0034 +#define HID_DG_TAP 0x000d0035 +#define HID_DG_TABLETFUNCTIONKEY 0x000d0039 +#define HID_DG_PROGRAMCHANGEKEY 0x000d003a +#define HID_DG_INVERT 0x000d003c +#define HID_DG_TIPSWITCH 0x000d0042 +#define HID_DG_TIPSWITCH2 0x000d0043 +#define HID_DG_BARRELSWITCH 0x000d0044 +#define HID_DG_ERASER 0x000d0045 +#define HID_DG_TABLETPICK 0x000d0046 +/* + * as of May 20, 2009 the usages below are not yet in the official USB spec + * but are being pushed by Microsft as described in their paper "Digitizer + * Drivers for Windows Touch and Pen-Based Computers" + */ +#define HID_DG_CONFIDENCE 0x000d0047 +#define HID_DG_WIDTH 0x000d0048 +#define HID_DG_HEIGHT 0x000d0049 +#define HID_DG_CONTACTID 0x000d0051 +#define HID_DG_INPUTMODE 0x000d0052 +#define HID_DG_DEVICEINDEX 0x000d0053 +#define HID_DG_CONTACTCOUNT 0x000d0054 +#define HID_DG_CONTACTMAX 0x000d0055 + +/* + * HID report types --- Ouch! HID spec says 1 2 3! + */ + +#define HID_INPUT_REPORT 0 +#define HID_OUTPUT_REPORT 1 +#define HID_FEATURE_REPORT 2 + +/* + * HID connect requests + */ + +#define HID_CONNECT_HIDINPUT 0x01 +#define HID_CONNECT_HIDINPUT_FORCE 0x02 +#define HID_CONNECT_HIDRAW 0x04 +#define HID_CONNECT_HIDDEV 0x08 +#define HID_CONNECT_HIDDEV_FORCE 0x10 +#define HID_CONNECT_FF 0x20 +#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ + HID_CONNECT_HIDDEV|HID_CONNECT_FF) + +/* + * HID device quirks. + */ + +/* + * Increase this if you need to configure more HID quirks at module load time + */ +#define MAX_USBHID_BOOT_QUIRKS 4 + +#define HID_QUIRK_INVERT 0x00000001 +#define HID_QUIRK_NOTOUCH 0x00000002 +#define HID_QUIRK_IGNORE 0x00000004 +#define HID_QUIRK_NOGET 0x00000008 +#define HID_QUIRK_HIDDEV_FORCE 0x00000010 +#define HID_QUIRK_BADPAD 0x00000020 +#define HID_QUIRK_MULTI_INPUT 0x00000040 +#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 +#define HID_QUIRK_MULTITOUCH 0x00000100 +#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 +#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 +#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 +#define HID_QUIRK_NO_IGNORE 0x40000000 +#define HID_QUIRK_NO_INPUT_SYNC 0x80000000 + +/* + * This is the global environment of the parser. This information is + * persistent for main-items. The global environment can be saved and + * restored with PUSH/POP statements. + */ + +struct hid_global { + unsigned usage_page; + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned unit; + unsigned report_id; + unsigned report_size; + unsigned report_count; +}; + +/* + * This is the local environment. It is persistent up the next main-item. + */ + +#define HID_MAX_USAGES 12288 +#define HID_DEFAULT_NUM_COLLECTIONS 16 + +struct hid_local { + unsigned usage[HID_MAX_USAGES]; /* usage array */ + unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ + unsigned usage_index; + unsigned usage_minimum; + unsigned delimiter_depth; + unsigned delimiter_branch; +}; + +/* + * This is the collection stack. We climb up the stack to determine + * application and function of each field. + */ + +struct hid_collection { + unsigned type; + unsigned usage; + unsigned level; +}; + +struct hid_usage { + unsigned hid; /* hid usage code */ + unsigned collection_index; /* index into collection array */ + /* hidinput data */ + __u16 code; /* input driver code */ + __u8 type; /* input driver type */ + __s8 hat_min; /* hat switch fun */ + __s8 hat_max; /* ditto */ + __s8 hat_dir; /* ditto */ +}; + +struct hid_input; + +struct hid_field { + unsigned physical; /* physical usage for this field */ + unsigned logical; /* logical usage for this field */ + unsigned application; /* application usage for this field */ + struct hid_usage *usage; /* usage table for this function */ + unsigned maxusage; /* maximum usage index */ + unsigned flags; /* main-item flags (i.e. volatile,array,constant) */ + unsigned report_offset; /* bit offset in the report */ + unsigned report_size; /* size of this field in the report */ + unsigned report_count; /* number of this field in the report */ + unsigned report_type; /* (input,output,feature) */ + __s32 *value; /* last known value(s) */ + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __s32 unit_exponent; + unsigned unit; + struct hid_report *report; /* associated report */ + unsigned index; /* index into report->field[] */ + /* hidinput data */ + struct hid_input *hidinput; /* associated input structure */ + __u16 dpad; /* dpad input code */ +}; + +#define HID_MAX_FIELDS 128 + +struct hid_report { + struct list_head list; + unsigned id; /* id of this report */ + unsigned type; /* report type */ + struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ + unsigned maxfield; /* maximum valid field index */ + unsigned size; /* size of the report (bits) */ + struct hid_device *device; /* associated device */ +}; + +struct hid_report_enum { + unsigned numbered; + struct list_head report_list; + struct hid_report *report_id_hash[256]; +}; + +#define HID_REPORT_TYPES 3 + +#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ +#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */ +#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ +#define HID_OUTPUT_FIFO_SIZE 64 + +struct hid_control_fifo { + unsigned char dir; + struct hid_report *report; + char *raw_report; +}; + +struct hid_output_fifo { + struct hid_report *report; + char *raw_report; +}; + +#define HID_CLAIMED_INPUT 1 +#define HID_CLAIMED_HIDDEV 2 +#define HID_CLAIMED_HIDRAW 4 + +#define HID_STAT_ADDED 1 +#define HID_STAT_PARSED 2 + +struct hid_input { + struct list_head list; + struct hid_report *report; + struct input_dev *input; +}; + +enum hid_type { + HID_TYPE_OTHER = 0, + HID_TYPE_USBMOUSE, + HID_TYPE_USBNONE +}; + +struct hid_driver; +struct hid_ll_driver; + +struct hid_device { /* device report descriptor */ + __u8 *rdesc; + unsigned rsize; + struct hid_collection *collection; /* List of HID collections */ + unsigned collection_size; /* Number of allocated hid_collections */ + unsigned maxcollection; /* Number of parsed collections */ + unsigned maxapplication; /* Number of applications */ + __u16 bus; /* BUS ID */ + __u32 vendor; /* Vendor ID */ + __u32 product; /* Product ID */ + __u32 version; /* HID version */ + enum hid_type type; /* device type (mouse, kbd, ...) */ + unsigned country; /* HID country */ + struct hid_report_enum report_enum[HID_REPORT_TYPES]; + + struct semaphore driver_lock; /* protects the current driver */ + struct device dev; /* device */ + struct hid_driver *driver; + struct hid_ll_driver *ll_driver; + +#ifdef CONFIG_HID_BATTERY_STRENGTH + /* + * Power supply information for HID devices which report + * battery strength. power_supply is registered iff + * battery.name is non-NULL. + */ + struct power_supply battery; + __s32 battery_min; + __s32 battery_max; + __s32 battery_report_type; + __s32 battery_report_id; +#endif + + unsigned int status; /* see STAT flags above */ + unsigned claimed; /* Claimed by hidinput, hiddev? */ + unsigned quirks; /* Various quirks the device can pull on us */ + + struct list_head inputs; /* The list of inputs */ + void *hiddev; /* The hiddev structure */ + void *hidraw; + int minor; /* Hiddev minor number */ + + int open; /* is the device open by anyone? */ + char name[128]; /* Device name */ + char phys[64]; /* Device physical location */ + char uniq[64]; /* Device unique identifier (serial #) */ + + void *driver_data; + + /* temporary hid_ff handling (until moved to the drivers) */ + int (*ff_init)(struct hid_device *); + + /* hiddev event handler */ + int (*hiddev_connect)(struct hid_device *, unsigned int); + void (*hiddev_disconnect)(struct hid_device *); + void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, + struct hid_usage *, __s32); + void (*hiddev_report_event) (struct hid_device *, struct hid_report *); + + /* handler for raw input (Get_Report) data, used by hidraw */ + int (*hid_get_raw_report) (struct hid_device *, unsigned char, __u8 *, size_t, unsigned char); + + /* handler for raw output data, used by hidraw */ + int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char); + + /* debugging support via debugfs */ + unsigned short debug; + struct dentry *debug_dir; + struct dentry *debug_rdesc; + struct dentry *debug_events; + struct list_head debug_list; + wait_queue_head_t debug_wait; +}; + +static inline void *hid_get_drvdata(struct hid_device *hdev) +{ + return dev_get_drvdata(&hdev->dev); +} + +static inline void hid_set_drvdata(struct hid_device *hdev, void *data) +{ + dev_set_drvdata(&hdev->dev, data); +} + +#define HID_GLOBAL_STACK_SIZE 4 +#define HID_COLLECTION_STACK_SIZE 4 + +struct hid_parser { + struct hid_global global; + struct hid_global global_stack[HID_GLOBAL_STACK_SIZE]; + unsigned global_stack_ptr; + struct hid_local local; + unsigned collection_stack[HID_COLLECTION_STACK_SIZE]; + unsigned collection_stack_ptr; + struct hid_device *device; +}; + +struct hid_class_descriptor { + __u8 bDescriptorType; + __le16 wDescriptorLength; +} __attribute__ ((packed)); + +struct hid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdHID; + __u8 bCountryCode; + __u8 bNumDescriptors; + + struct hid_class_descriptor desc[1]; +} __attribute__ ((packed)); + +#define HID_DEVICE(b, ven, prod) \ + .bus = (b), \ + .vendor = (ven), .product = (prod) + +#define HID_USB_DEVICE(ven, prod) HID_DEVICE(BUS_USB, ven, prod) +#define HID_BLUETOOTH_DEVICE(ven, prod) HID_DEVICE(BUS_BLUETOOTH, ven, prod) + +#define HID_REPORT_ID(rep) \ + .report_type = (rep) +#define HID_USAGE_ID(uhid, utype, ucode) \ + .usage_hid = (uhid), .usage_type = (utype), .usage_code = (ucode) +/* we don't want to catch types and codes equal to 0 */ +#define HID_TERMINATOR (HID_ANY_ID - 1) + +struct hid_report_id { + __u32 report_type; +}; +struct hid_usage_id { + __u32 usage_hid; + __u32 usage_type; + __u32 usage_code; +}; + +/** + * struct hid_driver + * @name: driver name (e.g. "Footech_bar-wheel") + * @id_table: which devices is this driver for (must be non-NULL for probe + * to be called) + * @dyn_list: list of dynamically added device ids + * @dyn_lock: lock protecting @dyn_list + * @probe: new device inserted + * @remove: device removed (NULL if not a hot-plug capable driver) + * @report_table: on which reports to call raw_event (NULL means all) + * @raw_event: if report in report_table, this hook is called (NULL means nop) + * @usage_table: on which events to call event (NULL means all) + * @event: if usage in usage_table, this hook is called (NULL means nop) + * @report_fixup: called before report descriptor parsing (NULL means nop) + * @input_mapping: invoked on input registering before mapping an usage + * @input_mapped: invoked on input registering after mapping an usage + * @feature_mapping: invoked on feature registering + * @input_register: called just before input device is registered after reports + * are parsed. + * @suspend: invoked on suspend (NULL means nop) + * @resume: invoked on resume if device was not reset (NULL means nop) + * @reset_resume: invoked on resume if device was reset (NULL means nop) + * + * raw_event and event should return 0 on no action performed, 1 when no + * further processing should be done and negative on error + * + * input_mapping shall return a negative value to completely ignore this usage + * (e.g. doubled or invalid usage), zero to continue with parsing of this + * usage by generic code (no special handling needed) or positive to skip + * generic parsing (needed special handling which was done in the hook already) + * input_mapped shall return negative to inform the layer that this usage + * should not be considered for further processing or zero to notify that + * no processing was performed and should be done in a generic manner + * Both these functions may be NULL which means the same behavior as returning + * zero from them. + */ +struct hid_driver { + char *name; + const struct hid_device_id *id_table; + + struct list_head dyn_list; + spinlock_t dyn_lock; + + int (*probe)(struct hid_device *dev, const struct hid_device_id *id); + void (*remove)(struct hid_device *dev); + + const struct hid_report_id *report_table; + int (*raw_event)(struct hid_device *hdev, struct hid_report *report, + u8 *data, int size); + const struct hid_usage_id *usage_table; + int (*event)(struct hid_device *hdev, struct hid_field *field, + struct hid_usage *usage, __s32 value); + + __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, + unsigned int *size); + + int (*input_mapping)(struct hid_device *hdev, + struct hid_input *hidinput, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max); + int (*input_mapped)(struct hid_device *hdev, + struct hid_input *hidinput, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max); + void (*feature_mapping)(struct hid_device *hdev, + struct hid_field *field, + struct hid_usage *usage); + int (*input_register)(struct hid_device *hdev, struct hid_input + *hidinput); +#ifdef CONFIG_PM + int (*suspend)(struct hid_device *hdev, pm_message_t message); + int (*resume)(struct hid_device *hdev); + int (*reset_resume)(struct hid_device *hdev); +#endif +/* private: */ + struct device_driver driver; +}; + +/** + * hid_ll_driver - low level driver callbacks + * @start: called on probe to start the device + * @stop: called on remove + * @open: called by input layer on open + * @close: called by input layer on close + * @hidinput_input_event: event input event (e.g. ff or leds) + * @parse: this method is called only once to parse the device data, + * shouldn't allocate anything to not leak memory + */ +struct hid_ll_driver { + int (*start)(struct hid_device *hdev); + void (*stop)(struct hid_device *hdev); + + int (*open)(struct hid_device *hdev); + void (*close)(struct hid_device *hdev); + + int (*power)(struct hid_device *hdev, int level); + + int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, + unsigned int code, int value); + + int (*parse)(struct hid_device *hdev); +}; + +#define PM_HINT_FULLON 1<<5 +#define PM_HINT_NORMAL 1<<1 + +/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ +/* We ignore a few input applications that are not widely used */ +#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006))) + +/* HID core API */ + +extern int hid_debug; + +extern int hid_add_device(struct hid_device *); +extern void hid_destroy_device(struct hid_device *); + +extern int __must_check __hid_register_driver(struct hid_driver *, + struct module *, const char *mod_name); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define hid_register_driver(driver) \ + __hid_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) + +extern void hid_unregister_driver(struct hid_driver *); + +extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); +extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report); +extern int hidinput_connect(struct hid_device *hid, unsigned int force); +extern void hidinput_disconnect(struct hid_device *); + +int hid_set_field(struct hid_field *, unsigned, __s32); +int hid_input_report(struct hid_device *, int type, u8 *, int, int); +int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); +struct hid_field *hidinput_get_led_field(struct hid_device *hid); +unsigned int hidinput_count_leds(struct hid_device *hid); +void hid_output_report(struct hid_report *report, __u8 *data); +struct hid_device *hid_allocate_device(void); +struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); +int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); +int hid_check_keys_pressed(struct hid_device *hid); +int hid_connect(struct hid_device *hid, unsigned int connect_mask); +void hid_disconnect(struct hid_device *hid); +const struct hid_device_id *hid_match_id(struct hid_device *hdev, + const struct hid_device_id *id); + +/** + * hid_map_usage - map usage input bits + * + * @hidinput: hidinput which we are interested in + * @usage: usage to fill in + * @bit: pointer to input->{}bit (out parameter) + * @max: maximal valid usage->code to consider later (out parameter) + * @type: input event type (EV_KEY, EV_REL, ...) + * @c: code which corresponds to this usage and type + */ +static inline void hid_map_usage(struct hid_input *hidinput, + struct hid_usage *usage, unsigned long **bit, int *max, + __u8 type, __u16 c) +{ + struct input_dev *input = hidinput->input; + + usage->type = type; + usage->code = c; + + switch (type) { + case EV_ABS: + *bit = input->absbit; + *max = ABS_MAX; + break; + case EV_REL: + *bit = input->relbit; + *max = REL_MAX; + break; + case EV_KEY: + *bit = input->keybit; + *max = KEY_MAX; + break; + case EV_LED: + *bit = input->ledbit; + *max = LED_MAX; + break; + } +} + +/** + * hid_map_usage_clear - map usage input bits and clear the input bit + * + * The same as hid_map_usage, except the @c bit is also cleared in supported + * bits (@bit). + */ +static inline void hid_map_usage_clear(struct hid_input *hidinput, + struct hid_usage *usage, unsigned long **bit, int *max, + __u8 type, __u16 c) +{ + hid_map_usage(hidinput, usage, bit, max, type, c); + clear_bit(c, *bit); +} + +/** + * hid_parse - parse HW reports + * + * @hdev: hid device + * + * Call this from probe after you set up the device (if needed). Your + * report_fixup will be called (if non-NULL) after reading raw report from + * device before passing it to hid layer for real parsing. + */ +static inline int __must_check hid_parse(struct hid_device *hdev) +{ + int ret; + + if (hdev->status & HID_STAT_PARSED) + return 0; + + ret = hdev->ll_driver->parse(hdev); + if (!ret) + hdev->status |= HID_STAT_PARSED; + + return ret; +} + +/** + * hid_hw_start - start underlaying HW + * + * @hdev: hid device + * @connect_mask: which outputs to connect, see HID_CONNECT_* + * + * Call this in probe function *after* hid_parse. This will setup HW buffers + * and start the device (if not deffered to device open). hid_hw_stop must be + * called if this was successful. + */ +static inline int __must_check hid_hw_start(struct hid_device *hdev, + unsigned int connect_mask) +{ + int ret = hdev->ll_driver->start(hdev); + if (ret || !connect_mask) + return ret; + ret = hid_connect(hdev, connect_mask); + if (ret) + hdev->ll_driver->stop(hdev); + return ret; +} + +/** + * hid_hw_stop - stop underlaying HW + * + * @hdev: hid device + * + * This is usually called from remove function or from probe when something + * failed and hid_hw_start was called already. + */ +static inline void hid_hw_stop(struct hid_device *hdev) +{ + hid_disconnect(hdev); + hdev->ll_driver->stop(hdev); +} + +/** + * hid_hw_open - signal underlaying HW to start delivering events + * + * @hdev: hid device + * + * Tell underlying HW to start delivering events from the device. + * This function should be called sometime after successful call + * to hid_hiw_start(). + */ +static inline int __must_check hid_hw_open(struct hid_device *hdev) +{ + return hdev->ll_driver->open(hdev); +} + +/** + * hid_hw_close - signal underlaying HW to stop delivering events + * + * @hdev: hid device + * + * This function indicates that we are not interested in the events + * from this device anymore. Delivery of events may or may not stop, + * depending on the number of users still outstanding. + */ +static inline void hid_hw_close(struct hid_device *hdev) +{ + hdev->ll_driver->close(hdev); +} + +/** + * hid_hw_power - requests underlying HW to go into given power mode + * + * @hdev: hid device + * @level: requested power level (one of %PM_HINT_* defines) + * + * This function requests underlying hardware to enter requested power + * mode. + */ + +static inline int hid_hw_power(struct hid_device *hdev, int level) +{ + return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0; +} + +void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, + int interrupt); + +extern int hid_generic_init(void); +extern void hid_generic_exit(void); + +/* HID quirks API */ +u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); +int usbhid_quirks_init(char **quirks_param); +void usbhid_quirks_exit(void); +void usbhid_set_leds(struct hid_device *hid); + +#ifdef CONFIG_HID_PID +int hid_pidff_init(struct hid_device *hid); +#else +#define hid_pidff_init NULL +#endif + +#define dbg_hid(format, arg...) \ +do { \ + if (hid_debug) \ + printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \ +} while (0) + +#define hid_printk(level, hid, fmt, arg...) \ + dev_printk(level, &(hid)->dev, fmt, ##arg) +#define hid_emerg(hid, fmt, arg...) \ + dev_emerg(&(hid)->dev, fmt, ##arg) +#define hid_crit(hid, fmt, arg...) \ + dev_crit(&(hid)->dev, fmt, ##arg) +#define hid_alert(hid, fmt, arg...) \ + dev_alert(&(hid)->dev, fmt, ##arg) +#define hid_err(hid, fmt, arg...) \ + dev_err(&(hid)->dev, fmt, ##arg) +#define hid_notice(hid, fmt, arg...) \ + dev_notice(&(hid)->dev, fmt, ##arg) +#define hid_warn(hid, fmt, arg...) \ + dev_warn(&(hid)->dev, fmt, ##arg) +#define hid_info(hid, fmt, arg...) \ + dev_info(&(hid)->dev, fmt, ##arg) +#define hid_dbg(hid, fmt, arg...) \ + dev_dbg(&(hid)->dev, fmt, ##arg) + +#endif /* __KERNEL__ */ + +#endif + diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h new file mode 100644 index 00000000..a3f481a3 --- /dev/null +++ b/include/linux/hiddev.h @@ -0,0 +1,240 @@ +#ifndef _HIDDEV_H +#define _HIDDEV_H + +/* + * Copyright (c) 1999-2000 Vojtech Pavlik + * + * Sponsored by SuSE + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: + * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic + */ + +#include <linux/types.h> + +/* + * The event structure itself + */ + +struct hiddev_event { + unsigned hid; + signed int value; +}; + +struct hiddev_devinfo { + __u32 bustype; + __u32 busnum; + __u32 devnum; + __u32 ifnum; + __s16 vendor; + __s16 product; + __s16 version; + __u32 num_applications; +}; + +struct hiddev_collection_info { + __u32 index; + __u32 type; + __u32 usage; + __u32 level; +}; + +#define HID_STRING_SIZE 256 +struct hiddev_string_descriptor { + __s32 index; + char value[HID_STRING_SIZE]; +}; + +struct hiddev_report_info { + __u32 report_type; + __u32 report_id; + __u32 num_fields; +}; + +/* To do a GUSAGE/SUSAGE, fill in at least usage_code, report_type and + * report_id. Set report_id to REPORT_ID_UNKNOWN if the rest of the fields + * are unknown. Otherwise use a usage_ref struct filled in from a previous + * successful GUSAGE call to save time. To actually send a value to the + * device, perform a SUSAGE first, followed by a SREPORT. An INITREPORT or a + * GREPORT isn't necessary for a GUSAGE to return valid data. + */ +#define HID_REPORT_ID_UNKNOWN 0xffffffff +#define HID_REPORT_ID_FIRST 0x00000100 +#define HID_REPORT_ID_NEXT 0x00000200 +#define HID_REPORT_ID_MASK 0x000000ff +#define HID_REPORT_ID_MAX 0x000000ff + +#define HID_REPORT_TYPE_INPUT 1 +#define HID_REPORT_TYPE_OUTPUT 2 +#define HID_REPORT_TYPE_FEATURE 3 +#define HID_REPORT_TYPE_MIN 1 +#define HID_REPORT_TYPE_MAX 3 + +struct hiddev_field_info { + __u32 report_type; + __u32 report_id; + __u32 field_index; + __u32 maxusage; + __u32 flags; + __u32 physical; /* physical usage for this field */ + __u32 logical; /* logical usage for this field */ + __u32 application; /* application usage for this field */ + __s32 logical_minimum; + __s32 logical_maximum; + __s32 physical_minimum; + __s32 physical_maximum; + __u32 unit_exponent; + __u32 unit; +}; + +/* Fill in report_type, report_id and field_index to get the information on a + * field. + */ +#define HID_FIELD_CONSTANT 0x001 +#define HID_FIELD_VARIABLE 0x002 +#define HID_FIELD_RELATIVE 0x004 +#define HID_FIELD_WRAP 0x008 +#define HID_FIELD_NONLINEAR 0x010 +#define HID_FIELD_NO_PREFERRED 0x020 +#define HID_FIELD_NULL_STATE 0x040 +#define HID_FIELD_VOLATILE 0x080 +#define HID_FIELD_BUFFERED_BYTE 0x100 + +struct hiddev_usage_ref { + __u32 report_type; + __u32 report_id; + __u32 field_index; + __u32 usage_index; + __u32 usage_code; + __s32 value; +}; + +/* hiddev_usage_ref_multi is used for sending multiple bytes to a control. + * It really manifests itself as setting the value of consecutive usages */ +#define HID_MAX_MULTI_USAGES 1024 +struct hiddev_usage_ref_multi { + struct hiddev_usage_ref uref; + __u32 num_values; + __s32 values[HID_MAX_MULTI_USAGES]; +}; + +/* FIELD_INDEX_NONE is returned in read() data from the kernel when flags + * is set to (HIDDEV_FLAG_UREF | HIDDEV_FLAG_REPORT) and a new report has + * been sent by the device + */ +#define HID_FIELD_INDEX_NONE 0xffffffff + +/* + * Protocol version. + */ + +#define HID_VERSION 0x010004 + +/* + * IOCTLs (0x00 - 0x7f) + */ + +#define HIDIOCGVERSION _IOR('H', 0x01, int) +#define HIDIOCAPPLICATION _IO('H', 0x02) +#define HIDIOCGDEVINFO _IOR('H', 0x03, struct hiddev_devinfo) +#define HIDIOCGSTRING _IOR('H', 0x04, struct hiddev_string_descriptor) +#define HIDIOCINITREPORT _IO('H', 0x05) +#define HIDIOCGNAME(len) _IOC(_IOC_READ, 'H', 0x06, len) +#define HIDIOCGREPORT _IOW('H', 0x07, struct hiddev_report_info) +#define HIDIOCSREPORT _IOW('H', 0x08, struct hiddev_report_info) +#define HIDIOCGREPORTINFO _IOWR('H', 0x09, struct hiddev_report_info) +#define HIDIOCGFIELDINFO _IOWR('H', 0x0A, struct hiddev_field_info) +#define HIDIOCGUSAGE _IOWR('H', 0x0B, struct hiddev_usage_ref) +#define HIDIOCSUSAGE _IOW('H', 0x0C, struct hiddev_usage_ref) +#define HIDIOCGUCODE _IOWR('H', 0x0D, struct hiddev_usage_ref) +#define HIDIOCGFLAG _IOR('H', 0x0E, int) +#define HIDIOCSFLAG _IOW('H', 0x0F, int) +#define HIDIOCGCOLLECTIONINDEX _IOW('H', 0x10, struct hiddev_usage_ref) +#define HIDIOCGCOLLECTIONINFO _IOWR('H', 0x11, struct hiddev_collection_info) +#define HIDIOCGPHYS(len) _IOC(_IOC_READ, 'H', 0x12, len) + +/* For writing/reading to multiple/consecutive usages */ +#define HIDIOCGUSAGES _IOWR('H', 0x13, struct hiddev_usage_ref_multi) +#define HIDIOCSUSAGES _IOW('H', 0x14, struct hiddev_usage_ref_multi) + +/* + * Flags to be used in HIDIOCSFLAG + */ +#define HIDDEV_FLAG_UREF 0x1 +#define HIDDEV_FLAG_REPORT 0x2 +#define HIDDEV_FLAGS 0x3 + +/* To traverse the input report descriptor info for a HID device, perform the + * following: + * + * rinfo.report_type = HID_REPORT_TYPE_INPUT; + * rinfo.report_id = HID_REPORT_ID_FIRST; + * ret = ioctl(fd, HIDIOCGREPORTINFO, &rinfo); + * + * while (ret >= 0) { + * for (i = 0; i < rinfo.num_fields; i++) { + * finfo.report_type = rinfo.report_type; + * finfo.report_id = rinfo.report_id; + * finfo.field_index = i; + * ioctl(fd, HIDIOCGFIELDINFO, &finfo); + * for (j = 0; j < finfo.maxusage; j++) { + * uref.report_type = rinfo.report_type; + * uref.report_id = rinfo.report_id; + * uref.field_index = i; + * uref.usage_index = j; + * ioctl(fd, HIDIOCGUCODE, &uref); + * ioctl(fd, HIDIOCGUSAGE, &uref); + * } + * } + * rinfo.report_id |= HID_REPORT_ID_NEXT; + * ret = ioctl(fd, HIDIOCGREPORTINFO, &rinfo); + * } + */ + + +#ifdef __KERNEL__ + +/* + * In-kernel definitions. + */ + +struct hid_device; +struct hid_usage; +struct hid_field; +struct hid_report; + +#ifdef CONFIG_USB_HIDDEV +int hiddev_connect(struct hid_device *hid, unsigned int force); +void hiddev_disconnect(struct hid_device *); +void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value); +void hiddev_report_event(struct hid_device *hid, struct hid_report *report); +#else +static inline int hiddev_connect(struct hid_device *hid, + unsigned int force) +{ return -1; } +static inline void hiddev_disconnect(struct hid_device *hid) { } +static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value) { } +static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } +#endif + +#endif +#endif diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h new file mode 100644 index 00000000..4b88e697 --- /dev/null +++ b/include/linux/hidraw.h @@ -0,0 +1,92 @@ +#ifndef _HIDRAW_H +#define _HIDRAW_H + +/* + * Copyright (c) 2007 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/hid.h> +#include <linux/types.h> + +struct hidraw_report_descriptor { + __u32 size; + __u8 value[HID_MAX_DESCRIPTOR_SIZE]; +}; + +struct hidraw_devinfo { + __u32 bustype; + __s16 vendor; + __s16 product; +}; + +/* ioctl interface */ +#define HIDIOCGRDESCSIZE _IOR('H', 0x01, int) +#define HIDIOCGRDESC _IOR('H', 0x02, struct hidraw_report_descriptor) +#define HIDIOCGRAWINFO _IOR('H', 0x03, struct hidraw_devinfo) +#define HIDIOCGRAWNAME(len) _IOC(_IOC_READ, 'H', 0x04, len) +#define HIDIOCGRAWPHYS(len) _IOC(_IOC_READ, 'H', 0x05, len) +/* The first byte of SFEATURE and GFEATURE is the report number */ +#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len) +#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len) + +#define HIDRAW_FIRST_MINOR 0 +#define HIDRAW_MAX_DEVICES 64 +/* number of reports to buffer */ +#define HIDRAW_BUFFER_SIZE 64 + + +/* kernel-only API declarations */ +#ifdef __KERNEL__ + +struct hidraw { + unsigned int minor; + int exist; + int open; + wait_queue_head_t wait; + struct hid_device *hid; + struct device *dev; + struct list_head list; +}; + +struct hidraw_report { + __u8 *value; + int len; +}; + +struct hidraw_list { + struct hidraw_report buffer[HIDRAW_BUFFER_SIZE]; + int head; + int tail; + struct fasync_struct *fasync; + struct hidraw *hidraw; + struct list_head node; + struct mutex read_mutex; +}; + +#ifdef CONFIG_HIDRAW +int hidraw_init(void); +void hidraw_exit(void); +void hidraw_report_event(struct hid_device *, u8 *, int); +int hidraw_connect(struct hid_device *); +void hidraw_disconnect(struct hid_device *); +#else +static inline int hidraw_init(void) { return 0; } +static inline void hidraw_exit(void) { } +static inline void hidraw_report_event(struct hid_device *hid, u8 *data, int len) { } +static inline int hidraw_connect(struct hid_device *hid) { return -1; } +static inline void hidraw_disconnect(struct hid_device *hid) { } +#endif + +#endif + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h new file mode 100644 index 00000000..d3999b4e --- /dev/null +++ b/include/linux/highmem.h @@ -0,0 +1,287 @@ +#ifndef _LINUX_HIGHMEM_H +#define _LINUX_HIGHMEM_H + +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/bug.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/hardirq.h> + +#include <asm/cacheflush.h> + +#ifndef ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) +{ +} +#endif + +#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) +{ +} +static inline void flush_kernel_vmap_range(void *vaddr, int size) +{ +} +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) +{ +} +#endif + +#include <asm/kmap_types.h> + +#ifdef CONFIG_HIGHMEM +#include <asm/highmem.h> + +/* declarations for linux/mm/highmem.c */ +unsigned int nr_free_highpages(void); +extern unsigned long totalhigh_pages; + +void kmap_flush_unused(void); + +#else /* CONFIG_HIGHMEM */ + +static inline unsigned int nr_free_highpages(void) { return 0; } + +#define totalhigh_pages 0UL + +#ifndef ARCH_HAS_KMAP +static inline void *kmap(struct page *page) +{ + might_sleep(); + return page_address(page); +} + +static inline void kunmap(struct page *page) +{ +} + +static inline void *kmap_atomic(struct page *page) +{ + pagefault_disable(); + return page_address(page); +} +#define kmap_atomic_prot(page, prot) kmap_atomic(page) + +static inline void __kunmap_atomic(void *addr) +{ + pagefault_enable(); +} + +#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) +#define kmap_atomic_to_page(ptr) virt_to_page(ptr) + +#define kmap_flush_unused() do {} while(0) +#endif + +#endif /* CONFIG_HIGHMEM */ + +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + +DECLARE_PER_CPU(int, __kmap_atomic_idx); + +static inline int kmap_atomic_idx_push(void) +{ + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +#ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx > KM_TYPE_NR); +#endif + return idx; +} + +static inline int kmap_atomic_idx(void) +{ + return __this_cpu_read(__kmap_atomic_idx) - 1; +} + +static inline void kmap_atomic_idx_pop(void) +{ +#ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +#else + __this_cpu_dec(__kmap_atomic_idx); +#endif +} + +#endif + +/* + * NOTE: + * kmap_atomic() and kunmap_atomic() with two arguments are deprecated. + * We only keep them for backward compatibility, any usage of them + * are now warned. + */ + +#define PASTE(a, b) a ## b +#define PASTE2(a, b) PASTE(a, b) + +#define NARG_(_2, _1, n, ...) n +#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :) + +static inline void __deprecated *kmap_atomic_deprecated(struct page *page, + enum km_type km) +{ + return kmap_atomic(page); +} + +#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__) +#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__) +#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__)) + +static inline void __deprecated __kunmap_atomic_deprecated(void *addr, + enum km_type km) +{ + __kunmap_atomic(addr); +} + +/* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +#define kunmap_atomic_deprecated(addr, km) \ +do { \ + BUILD_BUG_ON(__same_type((addr), struct page *)); \ + __kunmap_atomic_deprecated(addr, km); \ +} while (0) + +#define kunmap_atomic_withcheck(addr) \ +do { \ + BUILD_BUG_ON(__same_type((addr), struct page *)); \ + __kunmap_atomic(addr); \ +} while (0) + +#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__) +#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__) +#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__)) +/**** End of C pre-processor tricks for deprecated macros ****/ + +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ +#ifndef clear_user_highpage +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *addr = kmap_atomic(page); + clear_user_page(addr, vaddr, page); + kunmap_atomic(addr); +} +#endif + +#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +/** + * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags + * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE + * @vma: The VMA the page is to be allocated for + * @vaddr: The virtual address the page will be inserted into + * + * This function will allocate a page for a VMA but the caller is expected + * to specify via movableflags whether the page will be movable in the + * future or not + * + * An architecture may override this function by defining + * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own + * implementation. + */ +static inline struct page * +__alloc_zeroed_user_highpage(gfp_t movableflags, + struct vm_area_struct *vma, + unsigned long vaddr) +{ + struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, + vma, vaddr); + + if (page) + clear_user_highpage(page, vaddr); + + return page; +} +#endif + +/** + * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move + * @vma: The VMA the page is to be allocated for + * @vaddr: The virtual address the page will be inserted into + * + * This function will allocate a page for a VMA that the caller knows will + * be able to migrate in the future using move_pages() or reclaimed + */ +static inline struct page * +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +} + +static inline void clear_highpage(struct page *page) +{ + void *kaddr = kmap_atomic(page); + clear_page(kaddr); + kunmap_atomic(kaddr); +} + +static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +{ + void *kaddr = kmap_atomic(page); + + BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); + + if (end1 > start1) + memset(kaddr + start1, 0, end1 - start1); + + if (end2 > start2) + memset(kaddr + start2, 0, end2 - start2); + + kunmap_atomic(kaddr); + flush_dcache_page(page); +} + +static inline void zero_user_segment(struct page *page, + unsigned start, unsigned end) +{ + zero_user_segments(page, start, end, 0, 0); +} + +static inline void zero_user(struct page *page, + unsigned start, unsigned size) +{ + zero_user_segments(page, start, start + size, 0, 0); +} + +static inline void __deprecated memclear_highpage_flush(struct page *page, + unsigned int offset, unsigned int size) +{ + zero_user(page, offset, size); +} + +#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE + +static inline void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vto); + kunmap_atomic(vfrom); +} + +#endif + +static inline void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_page(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); +} + +#endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/highuid.h b/include/linux/highuid.h new file mode 100644 index 00000000..434e5624 --- /dev/null +++ b/include/linux/highuid.h @@ -0,0 +1,97 @@ +#ifndef _LINUX_HIGHUID_H +#define _LINUX_HIGHUID_H + +#include <linux/types.h> + +/* + * general notes: + * + * CONFIG_UID16 is defined if the given architecture needs to + * support backwards compatibility for old system calls. + * + * kernel code should use uid_t and gid_t at all times when dealing with + * kernel-private data. + * + * old_uid_t and old_gid_t should only be different if CONFIG_UID16 is + * defined, else the platform should provide dummy typedefs for them + * such that they are equivalent to __kernel_{u,g}id_t. + * + * uid16_t and gid16_t are used on all architectures. (when dealing + * with structures hard coded to 16 bits, such as in filesystems) + */ + + +/* + * This is the "overflow" UID and GID. They are used to signify uid/gid + * overflow to old programs when they request uid/gid information but are + * using the old 16 bit interfaces. + * When you run a libc5 program, it will think that all highuid files or + * processes are owned by this uid/gid. + * The idea is that it's better to do so than possibly return 0 in lieu of + * 65536, etc. + */ + +extern int overflowuid; +extern int overflowgid; + +extern void __bad_uid(void); +extern void __bad_gid(void); + +#define DEFAULT_OVERFLOWUID 65534 +#define DEFAULT_OVERFLOWGID 65534 + +#ifdef CONFIG_UID16 + +/* prevent uid mod 65536 effect by returning a default value for high UIDs */ +#define high2lowuid(uid) ((uid) & ~0xFFFF ? (old_uid_t)overflowuid : (old_uid_t)(uid)) +#define high2lowgid(gid) ((gid) & ~0xFFFF ? (old_gid_t)overflowgid : (old_gid_t)(gid)) +/* + * -1 is different in 16 bits than it is in 32 bits + * these macros are used by chown(), setreuid(), ..., + */ +#define low2highuid(uid) ((uid) == (old_uid_t)-1 ? (uid_t)-1 : (uid_t)(uid)) +#define low2highgid(gid) ((gid) == (old_gid_t)-1 ? (gid_t)-1 : (gid_t)(gid)) + +#define __convert_uid(size, uid) \ + (size >= sizeof(uid) ? (uid) : high2lowuid(uid)) +#define __convert_gid(size, gid) \ + (size >= sizeof(gid) ? (gid) : high2lowgid(gid)) + + +#else + +#define __convert_uid(size, uid) (uid) +#define __convert_gid(size, gid) (gid) + +#endif /* !CONFIG_UID16 */ + +/* uid/gid input should be always 32bit uid_t */ +#define SET_UID(var, uid) do { (var) = __convert_uid(sizeof(var), (uid)); } while (0) +#define SET_GID(var, gid) do { (var) = __convert_gid(sizeof(var), (gid)); } while (0) + +/* + * Everything below this line is needed on all architectures, to deal with + * filesystems that only store 16 bits of the UID/GID, etc. + */ + +/* + * This is the UID and GID that will get written to disk if a filesystem + * only supports 16-bit UIDs and the kernel has a high UID/GID to write + */ +extern int fs_overflowuid; +extern int fs_overflowgid; + +#define DEFAULT_FS_OVERFLOWUID 65534 +#define DEFAULT_FS_OVERFLOWGID 65534 + +/* + * Since these macros are used in architectures that only need limited + * 16-bit UID back compatibility, we won't use old_uid_t and old_gid_t + */ +#define fs_high2lowuid(uid) ((uid) & ~0xFFFF ? (uid16_t)fs_overflowuid : (uid16_t)(uid)) +#define fs_high2lowgid(gid) ((gid) & ~0xFFFF ? (gid16_t)fs_overflowgid : (gid16_t)(gid)) + +#define low_16_bits(x) ((x) & 0xFFFF) +#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) + +#endif /* _LINUX_HIGHUID_H */ diff --git a/include/linux/hil.h b/include/linux/hil.h new file mode 100644 index 00000000..523785a9 --- /dev/null +++ b/include/linux/hil.h @@ -0,0 +1,483 @@ +#ifndef _HIL_H_ +#define _HIL_H_ + +/* + * Hewlett Packard Human Interface Loop (HP-HIL) Protocol -- header. + * + * Copyright (c) 2001 Brian S. Julin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * + * References: + * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A + * + * A note of thanks to HP for providing and shipping reference materials + * free of charge to help in the development of HIL support for Linux. + * + */ + +#include <asm/types.h> + +/* Physical constants relevant to raw loop/device timing. + */ + +#define HIL_CLOCK 8MHZ +#define HIL_EK1_CLOCK 30HZ +#define HIL_EK2_CLOCK 60HZ + +#define HIL_TIMEOUT_DEV 5 /* ms */ +#define HIL_TIMEOUT_DEVS 10 /* ms */ +#define HIL_TIMEOUT_NORESP 10 /* ms */ +#define HIL_TIMEOUT_DEVS_DATA 16 /* ms */ +#define HIL_TIMEOUT_SELFTEST 200 /* ms */ + + +/* Actual wire line coding. These will only be useful if someone is + * implementing a software MLC to run HIL devices on a non-parisc machine. + */ + +#define HIL_WIRE_PACKET_LEN 15 +enum hil_wire_bitpos { + HIL_WIRE_START = 0, + HIL_WIRE_ADDR2, + HIL_WIRE_ADDR1, + HIL_WIRE_ADDR0, + HIL_WIRE_COMMAND, + HIL_WIRE_DATA7, + HIL_WIRE_DATA6, + HIL_WIRE_DATA5, + HIL_WIRE_DATA4, + HIL_WIRE_DATA3, + HIL_WIRE_DATA2, + HIL_WIRE_DATA1, + HIL_WIRE_DATA0, + HIL_WIRE_PARITY, + HIL_WIRE_STOP +}; + +/* HP documentation uses these bit positions to refer to commands; + * we will call these "packets". + */ +enum hil_pkt_bitpos { + HIL_PKT_CMD = 0x00000800, + HIL_PKT_ADDR2 = 0x00000400, + HIL_PKT_ADDR1 = 0x00000200, + HIL_PKT_ADDR0 = 0x00000100, + HIL_PKT_ADDR_MASK = 0x00000700, + HIL_PKT_ADDR_SHIFT = 8, + HIL_PKT_DATA7 = 0x00000080, + HIL_PKT_DATA6 = 0x00000040, + HIL_PKT_DATA5 = 0x00000020, + HIL_PKT_DATA4 = 0x00000010, + HIL_PKT_DATA3 = 0x00000008, + HIL_PKT_DATA2 = 0x00000004, + HIL_PKT_DATA1 = 0x00000002, + HIL_PKT_DATA0 = 0x00000001, + HIL_PKT_DATA_MASK = 0x000000FF, + HIL_PKT_DATA_SHIFT = 0 +}; + +/* The HIL MLC also has several error/status/control bits. We extend the + * "packet" to include these when direct access to the MLC is available, + * or emulate them in cases where they are not available. + * + * This way the device driver knows that the underlying MLC driver + * has had to deal with loop errors. + */ +enum hil_error_bitpos { + HIL_ERR_OB = 0x00000800, /* MLC is busy sending an auto-poll, + or we have filled up the output + buffer and must wait. */ + HIL_ERR_INT = 0x00010000, /* A normal interrupt has occurred. */ + HIL_ERR_NMI = 0x00020000, /* An NMI has occurred. */ + HIL_ERR_LERR = 0x00040000, /* A poll didn't come back. */ + HIL_ERR_PERR = 0x01000000, /* There was a Parity Error. */ + HIL_ERR_FERR = 0x02000000, /* There was a Framing Error. */ + HIL_ERR_FOF = 0x04000000 /* Input FIFO Overflowed. */ +}; + +enum hil_control_bitpos { + HIL_CTRL_TEST = 0x00010000, + HIL_CTRL_IPF = 0x00040000, + HIL_CTRL_APE = 0x02000000 +}; + +/* Bits 30,31 are unused, we use them to control write behavior. */ +#define HIL_DO_ALTER_CTRL 0x40000000 /* Write MSW of packet to control + before writing LSW to loop */ +#define HIL_CTRL_ONLY 0xc0000000 /* *Only* alter the control registers */ + +/* This gives us a 32-bit "packet" + */ +typedef u32 hil_packet; + + +/* HIL Loop commands + */ +enum hil_command { + HIL_CMD_IFC = 0x00, /* Interface Clear */ + HIL_CMD_EPT = 0x01, /* Enter Pass-Thru Mode */ + HIL_CMD_ELB = 0x02, /* Enter Loop-Back Mode */ + HIL_CMD_IDD = 0x03, /* Identify and Describe */ + HIL_CMD_DSR = 0x04, /* Device Soft Reset */ + HIL_CMD_PST = 0x05, /* Perform Self Test */ + HIL_CMD_RRG = 0x06, /* Read Register */ + HIL_CMD_WRG = 0x07, /* Write Register */ + HIL_CMD_ACF = 0x08, /* Auto Configure */ + HIL_CMDID_ACF = 0x07, /* Auto Configure bits with incremented ID */ + HIL_CMD_POL = 0x10, /* Poll */ + HIL_CMDCT_POL = 0x0f, /* Poll command bits with item count */ + HIL_CMD_RPL = 0x20, /* RePoll */ + HIL_CMDCT_RPL = 0x0f, /* RePoll command bits with item count */ + HIL_CMD_RNM = 0x30, /* Report Name */ + HIL_CMD_RST = 0x31, /* Report Status */ + HIL_CMD_EXD = 0x32, /* Extended Describe */ + HIL_CMD_RSC = 0x33, /* Report Security Code */ + + /* 0x34 to 0x3c reserved for future use */ + + HIL_CMD_DKA = 0x3d, /* Disable Keyswitch Autorepeat */ + HIL_CMD_EK1 = 0x3e, /* Enable Keyswitch Autorepeat 1 */ + HIL_CMD_EK2 = 0x3f, /* Enable Keyswitch Autorepeat 2 */ + HIL_CMD_PR1 = 0x40, /* Prompt1 */ + HIL_CMD_PR2 = 0x41, /* Prompt2 */ + HIL_CMD_PR3 = 0x42, /* Prompt3 */ + HIL_CMD_PR4 = 0x43, /* Prompt4 */ + HIL_CMD_PR5 = 0x44, /* Prompt5 */ + HIL_CMD_PR6 = 0x45, /* Prompt6 */ + HIL_CMD_PR7 = 0x46, /* Prompt7 */ + HIL_CMD_PRM = 0x47, /* Prompt (General Purpose) */ + HIL_CMD_AK1 = 0x48, /* Acknowledge1 */ + HIL_CMD_AK2 = 0x49, /* Acknowledge2 */ + HIL_CMD_AK3 = 0x4a, /* Acknowledge3 */ + HIL_CMD_AK4 = 0x4b, /* Acknowledge4 */ + HIL_CMD_AK5 = 0x4c, /* Acknowledge5 */ + HIL_CMD_AK6 = 0x4d, /* Acknowledge6 */ + HIL_CMD_AK7 = 0x4e, /* Acknowledge7 */ + HIL_CMD_ACK = 0x4f, /* Acknowledge (General Purpose) */ + + /* 0x50 to 0x78 reserved for future use */ + /* 0x80 to 0xEF device-specific commands */ + /* 0xf0 to 0xf9 reserved for future use */ + + HIL_CMD_RIO = 0xfa, /* Register I/O Error */ + HIL_CMD_SHR = 0xfb, /* System Hard Reset */ + HIL_CMD_TER = 0xfc, /* Transmission Error */ + HIL_CMD_CAE = 0xfd, /* Configuration Address Error */ + HIL_CMD_DHR = 0xfe, /* Device Hard Reset */ + + /* 0xff is prohibited from use. */ +}; + + +/* + * Response "records" to HIL commands + */ + +/* Device ID byte + */ +#define HIL_IDD_DID_TYPE_MASK 0xe0 /* Primary type bits */ +#define HIL_IDD_DID_TYPE_KB_INTEGRAL 0xa0 /* Integral keyboard */ +#define HIL_IDD_DID_TYPE_KB_ITF 0xc0 /* ITD keyboard */ +#define HIL_IDD_DID_TYPE_KB_RSVD 0xe0 /* Reserved keyboard type */ +#define HIL_IDD_DID_TYPE_KB_LANG_MASK 0x1f /* Keyboard locale bits */ +#define HIL_IDD_DID_KBLANG_USE_ESD 0x00 /* Use ESD Locale instead */ +#define HIL_IDD_DID_TYPE_ABS 0x80 /* Absolute Positioners */ +#define HIL_IDD_DID_ABS_RSVD1_MASK 0xf8 /* Reserved */ +#define HIL_IDD_DID_ABS_RSVD1 0x98 +#define HIL_IDD_DID_ABS_TABLET_MASK 0xf8 /* Tablets and digitizers */ +#define HIL_IDD_DID_ABS_TABLET 0x90 +#define HIL_IDD_DID_ABS_TSCREEN_MASK 0xfc /* Touch screens */ +#define HIL_IDD_DID_ABS_TSCREEN 0x8c +#define HIL_IDD_DID_ABS_RSVD2_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_ABS_RSVD2 0x88 +#define HIL_IDD_DID_ABS_RSVD3_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_ABS_RSVD3 0x80 +#define HIL_IDD_DID_TYPE_REL 0x60 /* Relative Positioners */ +#define HIL_IDD_DID_REL_RSVD1_MASK 0xf0 /* Reserved */ +#define HIL_IDD_DID_REL_RSVD1 0x70 +#define HIL_IDD_DID_REL_RSVD2_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_REL_RSVD2 0x6c +#define HIL_IDD_DID_REL_MOUSE_MASK 0xfc /* Mouse */ +#define HIL_IDD_DID_REL_MOUSE 0x68 +#define HIL_IDD_DID_REL_QUAD_MASK 0xf8 /* Other Quadrature Devices */ +#define HIL_IDD_DID_REL_QUAD 0x60 +#define HIL_IDD_DID_TYPE_CHAR 0x40 /* Character Entry */ +#define HIL_IDD_DID_CHAR_BARCODE_MASK 0xfc /* Barcode Reader */ +#define HIL_IDD_DID_CHAR_BARCODE 0x5c +#define HIL_IDD_DID_CHAR_RSVD1_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_CHAR_RSVD1 0x58 +#define HIL_IDD_DID_CHAR_RSVD2_MASK 0xf8 /* Reserved */ +#define HIL_IDD_DID_CHAR_RSVD2 0x50 +#define HIL_IDD_DID_CHAR_RSVD3_MASK 0xf0 /* Reserved */ +#define HIL_IDD_DID_CHAR_RSVD3 0x40 +#define HIL_IDD_DID_TYPE_OTHER 0x20 /* Miscellaneous */ +#define HIL_IDD_DID_OTHER_RSVD1_MASK 0xf0 /* Reserved */ +#define HIL_IDD_DID_OTHER_RSVD1 0x30 +#define HIL_IDD_DID_OTHER_BARCODE_MASK 0xfc /* Tone Generator */ +#define HIL_IDD_DID_OTHER_BARCODE 0x2c +#define HIL_IDD_DID_OTHER_RSVD2_MASK 0xfc /* Reserved */ +#define HIL_IDD_DID_OTHER_RSVD2 0x28 +#define HIL_IDD_DID_OTHER_RSVD3_MASK 0xf8 /* Reserved */ +#define HIL_IDD_DID_OTHER_RSVD3 0x20 +#define HIL_IDD_DID_TYPE_KEYPAD 0x00 /* Vectra Keyboard */ + +/* IDD record header + */ +#define HIL_IDD_HEADER_AXSET_MASK 0x03 /* Number of axis in a set */ +#define HIL_IDD_HEADER_RSC 0x04 /* Supports RSC command */ +#define HIL_IDD_HEADER_EXD 0x08 /* Supports EXD command */ +#define HIL_IDD_HEADER_IOD 0x10 /* IOD byte to follow */ +#define HIL_IDD_HEADER_16BIT 0x20 /* 16 (vs. 8) bit resolution */ +#define HIL_IDD_HEADER_ABS 0x40 /* Reports Absolute Position */ +#define HIL_IDD_HEADER_2X_AXIS 0x80 /* Two sets of 1-3 axis */ + +/* I/O Descriptor + */ +#define HIL_IDD_IOD_NBUTTON_MASK 0x07 /* Number of buttons */ +#define HIL_IDD_IOD_PROXIMITY 0x08 /* Proximity in/out events */ +#define HIL_IDD_IOD_PROMPT_MASK 0x70 /* Number of prompts/acks */ +#define HIL_IDD_IOD_PROMPT_SHIFT 4 +#define HIL_IDD_IOD_PROMPT 0x80 /* Generic prompt/ack */ + +#define HIL_IDD_NUM_AXES_PER_SET(header_packet) \ +((header_packet) & HIL_IDD_HEADER_AXSET_MASK) + +#define HIL_IDD_NUM_AXSETS(header_packet) \ +(2 - !((header_packet) & HIL_IDD_HEADER_2X_AXIS)) + +#define HIL_IDD_LEN(header_packet) \ +((4 - !(header_packet & HIL_IDD_HEADER_IOD) - \ + 2 * !(HIL_IDD_NUM_AXES_PER_SET(header_packet))) + \ + 2 * HIL_IDD_NUM_AXES_PER_SET(header_packet) * \ + !!((header_packet) & HIL_IDD_HEADER_ABS)) + +/* The following HIL_IDD_* macros assume you have an array of + * packets and/or unpacked 8-bit data in the order that they + * were received. + */ + +#define HIL_IDD_AXIS_COUNTS_PER_M(header_ptr) \ +(!(HIL_IDD_NUM_AXSETS(*(header_ptr))) ? -1 : \ +(((*(header_ptr + 1) & HIL_PKT_DATA_MASK) + \ + ((*(header_ptr + 2) & HIL_PKT_DATA_MASK)) << 8) \ +* ((*(header_ptr) & HIL_IDD_HEADER_16BIT) ? 100 : 1))) + +#define HIL_IDD_AXIS_MAX(header_ptr, __axnum) \ +((!(*(header_ptr) & HIL_IDD_HEADER_ABS) || \ + (HIL_IDD_NUM_AXES_PER_SET(*(header_ptr)) <= __axnum)) ? 0 : \ + ((HIL_PKT_DATA_MASK & *((header_ptr) + 3 + 2 * __axnum)) + \ + ((HIL_PKT_DATA_MASK & *((header_ptr) + 4 + 2 * __axnum)) << 8))) + +#define HIL_IDD_IOD(header_ptr) \ +(*(header_ptr + HIL_IDD_LEN((*header_ptr)) - 1)) + +#define HIL_IDD_HAS_GEN_PROMPT(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) && \ + (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_PROMPT)) + +#define HIL_IDD_HAS_GEN_PROXIMITY(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) && \ + (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_PROXIMITY)) + +#define HIL_IDD_NUM_BUTTONS(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) ? \ + (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_NBUTTON_MASK) : 0) + +#define HIL_IDD_NUM_PROMPTS(header_ptr) \ +((*header_ptr & HIL_IDD_HEADER_IOD) ? \ + ((HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_NPROMPT_MASK) \ + >> HIL_IDD_IOD_PROMPT_SHIFT) : 0) + +/* The response to HIL EXD commands -- the "extended describe record" */ +#define HIL_EXD_HEADER_WRG 0x03 /* Supports type2 WRG */ +#define HIL_EXD_HEADER_WRG_TYPE1 0x01 /* Supports type1 WRG */ +#define HIL_EXD_HEADER_WRG_TYPE2 0x02 /* Supports type2 WRG */ +#define HIL_EXD_HEADER_RRG 0x04 /* Supports RRG command */ +#define HIL_EXD_HEADER_RNM 0x10 /* Supports RNM command */ +#define HIL_EXD_HEADER_RST 0x20 /* Supports RST command */ +#define HIL_EXD_HEADER_LOCALE 0x40 /* Contains locale code */ + +#define HIL_EXD_NUM_RRG(header_ptr) \ +((*header_ptr & HIL_EXD_HEADER_RRG) ? \ + (*(header_ptr + 1) & HIL_PKT_DATA_MASK) : 0) + +#define HIL_EXD_NUM_WWG(header_ptr) \ +((*header_ptr & HIL_EXD_HEADER_WRG) ? \ + (*(header_ptr + 2 - !(*header_ptr & HIL_EXD_HEADER_RRG)) & \ + HIL_PKT_DATA_MASK) : 0) + +#define HIL_EXD_LEN(header_ptr) \ +(!!(*header_ptr & HIL_EXD_HEADER_RRG) + \ + !!(*header_ptr & HIL_EXD_HEADER_WRG) + \ + !!(*header_ptr & HIL_EXD_HEADER_LOCALE) + \ + 2 * !!(*header_ptr & HIL_EXD_HEADER_WRG_TYPE2) + 1) + +#define HIL_EXD_LOCALE(header_ptr) \ +(!(*header_ptr & HIL_EXD_HEADER_LOCALE) ? -1 : \ + (*(header_ptr + HIL_EXD_LEN(header_ptr) - 1) & HIL_PKT_DATA_MASK)) + +#define HIL_EXD_WRG_TYPE2_LEN(header_ptr) \ +(!(*header_ptr & HIL_EXD_HEADER_WRG_TYPE2) ? -1 : \ + (*(header_ptr + HIL_EXD_LEN(header_ptr) - 2 - \ + !!(*header_ptr & HIL_EXD_HEADER_LOCALE)) & HIL_PKT_DATA_MASK) + \ + ((*(header_ptr + HIL_EXD_LEN(header_ptr) - 1 - \ + !!(*header_ptr & HIL_EXD_HEADER_LOCALE)) & HIL_PKT_DATA_MASK) << 8)) + +/* Device locale codes. */ + +/* Last defined locale code. Everything above this is "Reserved", + and note that this same table applies to the Device ID Byte where + keyboards may have a nationality code which is only 5 bits. */ +#define HIL_LOCALE_MAX 0x1f + +/* Map to hopefully useful strings. I was trying to make these look + like locale.aliases strings do; maybe that isn't the right table to + emulate. In either case, I didn't have much to work on. */ +#define HIL_LOCALE_MAP \ +"", /* 0x00 Reserved */ \ +"", /* 0x01 Reserved */ \ +"", /* 0x02 Reserved */ \ +"swiss.french", /* 0x03 Swiss/French */ \ +"portuguese", /* 0x04 Portuguese */ \ +"arabic", /* 0x05 Arabic */ \ +"hebrew", /* 0x06 Hebrew */ \ +"english.canadian", /* 0x07 Canadian English */ \ +"turkish", /* 0x08 Turkish */ \ +"greek", /* 0x09 Greek */ \ +"thai", /* 0x0a Thai (Thailand) */ \ +"italian", /* 0x0b Italian */ \ +"korean", /* 0x0c Hangul (Korea) */ \ +"dutch", /* 0x0d Dutch */ \ +"swedish", /* 0x0e Swedish */ \ +"german", /* 0x0f German */ \ +"chinese", /* 0x10 Chinese-PRC */ \ +"chinese", /* 0x11 Chinese-ROC */ \ +"swiss.french", /* 0x12 Swiss/French II */ \ +"spanish", /* 0x13 Spanish */ \ +"swiss.german", /* 0x14 Swiss/German II */ \ +"flemish", /* 0x15 Belgian (Flemish) */ \ +"finnish", /* 0x16 Finnish */ \ +"english.uk", /* 0x17 United Kingdom */ \ +"french.canadian", /* 0x18 French/Canadian */ \ +"swiss.german", /* 0x19 Swiss/German */ \ +"norwegian", /* 0x1a Norwegian */ \ +"french", /* 0x1b French */ \ +"danish", /* 0x1c Danish */ \ +"japanese", /* 0x1d Katakana */ \ +"spanish", /* 0x1e Latin American/Spanish*/\ +"english.us" /* 0x1f United States */ \ + + +/* HIL keycodes */ +#define HIL_KEYCODES_SET1_TBLSIZE 128 +#define HIL_KEYCODES_SET1 \ + KEY_5, KEY_RESERVED, KEY_RIGHTALT, KEY_LEFTALT, \ + KEY_RIGHTSHIFT, KEY_LEFTSHIFT, KEY_LEFTCTRL, KEY_SYSRQ, \ + KEY_KP4, KEY_KP8, KEY_KP5, KEY_KP9, \ + KEY_KP6, KEY_KP7, KEY_KPCOMMA, KEY_KPENTER, \ + KEY_KP1, KEY_KPSLASH, KEY_KP2, KEY_KPPLUS, \ + KEY_KP3, KEY_KPASTERISK, KEY_KP0, KEY_KPMINUS, \ + KEY_B, KEY_V, KEY_C, KEY_X, \ + KEY_Z, KEY_RESERVED, KEY_RESERVED, KEY_ESC, \ + KEY_6, KEY_F10, KEY_3, KEY_F11, \ + KEY_KPDOT, KEY_F9, KEY_TAB /*KP*/, KEY_F12, \ + KEY_H, KEY_G, KEY_F, KEY_D, \ + KEY_S, KEY_A, KEY_RESERVED, KEY_CAPSLOCK, \ + KEY_U, KEY_Y, KEY_T, KEY_R, \ + KEY_E, KEY_W, KEY_Q, KEY_TAB, \ + KEY_7, KEY_6, KEY_5, KEY_4, \ + KEY_3, KEY_2, KEY_1, KEY_GRAVE, \ + KEY_F13, KEY_F14, KEY_F15, KEY_F16, \ + KEY_F17, KEY_F18, KEY_F19, KEY_F20, \ + KEY_MENU, KEY_F4, KEY_F3, KEY_F2, \ + KEY_F1, KEY_VOLUMEUP, KEY_STOP, KEY_SENDFILE, \ + KEY_SYSRQ, KEY_F5, KEY_F6, KEY_F7, \ + KEY_F8, KEY_VOLUMEDOWN, KEY_DEL_EOL, KEY_DEL_EOS, \ + KEY_8, KEY_9, KEY_0, KEY_MINUS, \ + KEY_EQUAL, KEY_BACKSPACE, KEY_INS_LINE, KEY_DEL_LINE, \ + KEY_I, KEY_O, KEY_P, KEY_LEFTBRACE, \ + KEY_RIGHTBRACE, KEY_BACKSLASH, KEY_INSERT, KEY_DELETE, \ + KEY_J, KEY_K, KEY_L, KEY_SEMICOLON, \ + KEY_APOSTROPHE, KEY_ENTER, KEY_HOME, KEY_PAGEUP, \ + KEY_M, KEY_COMMA, KEY_DOT, KEY_SLASH, \ + KEY_BACKSLASH, KEY_SELECT, KEY_102ND, KEY_PAGEDOWN, \ + KEY_N, KEY_SPACE, KEY_NEXT, KEY_RESERVED, \ + KEY_LEFT, KEY_DOWN, KEY_UP, KEY_RIGHT + + +#define HIL_KEYCODES_SET3_TBLSIZE 128 +#define HIL_KEYCODES_SET3 \ + KEY_RESERVED, KEY_ESC, KEY_1, KEY_2, \ + KEY_3, KEY_4, KEY_5, KEY_6, \ + KEY_7, KEY_8, KEY_9, KEY_0, \ + KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, KEY_TAB, \ + KEY_Q, KEY_W, KEY_E, KEY_R, \ + KEY_T, KEY_Y, KEY_U, KEY_I, \ + KEY_O, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, \ + KEY_ENTER, KEY_LEFTCTRL, KEY_A, KEY_S, \ + KEY_D, KEY_F, KEY_G, KEY_H, \ + KEY_J, KEY_K, KEY_L, KEY_SEMICOLON, \ + KEY_APOSTROPHE,KEY_GRAVE, KEY_LEFTSHIFT, KEY_BACKSLASH, \ + KEY_Z, KEY_X, KEY_C, KEY_V, \ + KEY_B, KEY_N, KEY_M, KEY_COMMA, \ + KEY_DOT, KEY_SLASH, KEY_RIGHTSHIFT, KEY_KPASTERISK, \ + KEY_LEFTALT, KEY_SPACE, KEY_CAPSLOCK, KEY_F1, \ + KEY_F2, KEY_F3, KEY_F4, KEY_F5, \ + KEY_F6, KEY_F7, KEY_F8, KEY_F9, \ + KEY_F10, KEY_NUMLOCK, KEY_SCROLLLOCK, KEY_KP7, \ + KEY_KP8, KEY_KP9, KEY_KPMINUS, KEY_KP4, \ + KEY_KP5, KEY_KP6, KEY_KPPLUS, KEY_KP1, \ + KEY_KP2, KEY_KP3, KEY_KP0, KEY_KPDOT, \ + KEY_SYSRQ, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_UP, KEY_LEFT, KEY_DOWN, KEY_RIGHT, \ + KEY_HOME, KEY_PAGEUP, KEY_END, KEY_PAGEDOWN, \ + KEY_INSERT, KEY_DELETE, KEY_102ND, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_F1, KEY_F2, KEY_F3, KEY_F4, \ + KEY_F5, KEY_F6, KEY_F7, KEY_F8, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \ + KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED + + +/* Response to POL command, the "poll record header" */ + +#define HIL_POL_NUM_AXES_MASK 0x03 /* Number of axis reported */ +#define HIL_POL_CTS 0x04 /* Device ready to receive data */ +#define HIL_POL_STATUS_PENDING 0x08 /* Device has status to report */ +#define HIL_POL_CHARTYPE_MASK 0x70 /* Type of character data to follow */ +#define HIL_POL_CHARTYPE_NONE 0x00 /* No character data to follow */ +#define HIL_POL_CHARTYPE_RSVD1 0x10 /* Reserved Set 1 */ +#define HIL_POL_CHARTYPE_ASCII 0x20 /* U.S. ASCII */ +#define HIL_POL_CHARTYPE_BINARY 0x30 /* Binary data */ +#define HIL_POL_CHARTYPE_SET1 0x40 /* Keycode Set 1 */ +#define HIL_POL_CHARTYPE_RSVD2 0x50 /* Reserved Set 2 */ +#define HIL_POL_CHARTYPE_SET2 0x60 /* Keycode Set 2 */ +#define HIL_POL_CHARTYPE_SET3 0x70 /* Keycode Set 3 */ +#define HIL_POL_AXIS_ALT 0x80 /* Data is from axis set 2 */ + + +#endif /* _HIL_H_ */ diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h new file mode 100644 index 00000000..394a8405 --- /dev/null +++ b/include/linux/hil_mlc.h @@ -0,0 +1,168 @@ +/* + * HP Human Interface Loop Master Link Controller driver. + * + * Copyright (c) 2001 Brian S. Julin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * + * References: + * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A + * + */ + +#include <linux/hil.h> +#include <linux/time.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/serio.h> +#include <linux/list.h> + +typedef struct hil_mlc hil_mlc; + +/* The HIL has a complicated state engine. + * We define the structure of nodes in the state engine here. + */ +enum hilse_act { + /* HILSE_OUT prepares to receive input if the next node + * is an IN or EXPECT, and then sends the given packet. + */ + HILSE_OUT = 0, + + /* HILSE_CTS checks if the loop is busy. */ + HILSE_CTS, + + /* HILSE_OUT_LAST sends the given command packet to + * the last configured/running device on the loop. + */ + HILSE_OUT_LAST, + + /* HILSE_OUT_DISC sends the given command packet to + * the next device past the last configured/running one. + */ + HILSE_OUT_DISC, + + /* HILSE_FUNC runs a callback function with given arguments. + * a positive return value causes the "ugly" branch to be taken. + */ + HILSE_FUNC, + + /* HILSE_IN simply expects any non-errored packet to arrive + * within arg usecs. + */ + HILSE_IN = 0x100, + + /* HILSE_EXPECT expects a particular packet to arrive + * within arg usecs, any other packet is considered an error. + */ + HILSE_EXPECT, + + /* HILSE_EXPECT_LAST as above but dev field should be last + * discovered/operational device. + */ + HILSE_EXPECT_LAST, + + /* HILSE_EXPECT_LAST as above but dev field should be first + * undiscovered/inoperational device. + */ + HILSE_EXPECT_DISC +}; + +typedef int (hilse_func) (hil_mlc *mlc, int arg); +struct hilse_node { + enum hilse_act act; /* How to process this node */ + union { + hilse_func *func; /* Function to call if HILSE_FUNC */ + hil_packet packet; /* Packet to send or to compare */ + } object; + int arg; /* Timeout in usec or parm for func */ + int good; /* Node to jump to on success */ + int bad; /* Node to jump to on error */ + int ugly; /* Node to jump to on timeout */ +}; + +/* Methods for back-end drivers, e.g. hp_sdc_mlc */ +typedef int (hil_mlc_cts) (hil_mlc *mlc); +typedef void (hil_mlc_out) (hil_mlc *mlc); +typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); + +struct hil_mlc_devinfo { + uint8_t idd[16]; /* Device ID Byte and Describe Record */ + uint8_t rsc[16]; /* Security Code Header and Record */ + uint8_t exd[16]; /* Extended Describe Record */ + uint8_t rnm[16]; /* Device name as returned by RNM command */ +}; + +struct hil_mlc_serio_map { + hil_mlc *mlc; + int di_revmap; + int didx; +}; + +/* How many (possibly old/detached) devices the we try to keep track of */ +#define HIL_MLC_DEVMEM 16 + +struct hil_mlc { + struct list_head list; /* hil_mlc is organized as linked list */ + + rwlock_t lock; + + void *priv; /* Data specific to a particular type of MLC */ + + int seidx; /* Current node in state engine */ + int istarted, ostarted; + + hil_mlc_cts *cts; + struct semaphore csem; /* Raised when loop idle */ + + hil_mlc_out *out; + struct semaphore osem; /* Raised when outpacket dispatched */ + hil_packet opacket; + + hil_mlc_in *in; + struct semaphore isem; /* Raised when a packet arrives */ + hil_packet ipacket[16]; + hil_packet imatch; + int icount; + struct timeval instart; + suseconds_t intimeout; + + int ddi; /* Last operational device id */ + int lcv; /* LCV to throttle loops */ + struct timeval lcv_tv; /* Time loop was started */ + + int di_map[7]; /* Maps below items to live devs */ + struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; + struct serio *serio[HIL_MLC_DEVMEM]; + struct hil_mlc_serio_map serio_map[HIL_MLC_DEVMEM]; + hil_packet serio_opacket[HIL_MLC_DEVMEM]; + int serio_oidx[HIL_MLC_DEVMEM]; + struct hil_mlc_devinfo di_scratch; /* Temporary area */ + + int opercnt; + + struct tasklet_struct *tasklet; +}; + +int hil_mlc_register(hil_mlc *mlc); +int hil_mlc_unregister(hil_mlc *mlc); diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h new file mode 100644 index 00000000..f148e490 --- /dev/null +++ b/include/linux/hippidevice.h @@ -0,0 +1,41 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the HIPPI handlers. + * + * Version: @(#)hippidevice.h 1.0.0 05/26/97 + * + * Author: Jes Sorensen, <Jes.Sorensen@cern.ch> + * + * hippidevice.h is based on previous fddidevice.h work by + * Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * Lawrence V. Stefani, <stefani@lkg.dec.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_HIPPIDEVICE_H +#define _LINUX_HIPPIDEVICE_H + +#include <linux/if_hippi.h> + +#ifdef __KERNEL__ + +struct hippi_cb { + __u32 ifield; +}; + +extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); +extern int hippi_change_mtu(struct net_device *dev, int new_mtu); +extern int hippi_mac_addr(struct net_device *dev, void *p); +extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); +extern struct net_device *alloc_hippi_dev(int sizeof_priv); +#endif + +#endif /* _LINUX_HIPPIDEVICE_H */ diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h new file mode 100644 index 00000000..d392975d --- /dev/null +++ b/include/linux/hp_sdc.h @@ -0,0 +1,301 @@ +/* + * HP i8042 System Device Controller -- header + * + * Copyright (c) 2001 Brian S. Julin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * + * References: + * + * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A + * + * System Device Controller Microprocessor Firmware Theory of Operation + * for Part Number 1820-4784 Revision B. Dwg No. A-1820-4784-2 + * + */ + +#ifndef _LINUX_HP_SDC_H +#define _LINUX_HP_SDC_H + +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/time.h> +#include <linux/timer.h> +#if defined(__hppa__) +#include <asm/hardware.h> +#endif + + +/* No 4X status reads take longer than this (in usec). + */ +#define HP_SDC_MAX_REG_DELAY 20000 + +typedef void (hp_sdc_irqhook) (int irq, void *dev_id, + uint8_t status, uint8_t data); + +int hp_sdc_request_timer_irq(hp_sdc_irqhook *callback); +int hp_sdc_request_hil_irq(hp_sdc_irqhook *callback); +int hp_sdc_request_cooked_irq(hp_sdc_irqhook *callback); +int hp_sdc_release_timer_irq(hp_sdc_irqhook *callback); +int hp_sdc_release_hil_irq(hp_sdc_irqhook *callback); +int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback); + +typedef struct { + int actidx; /* Start of act. Acts are atomic WRT I/O to SDC */ + int idx; /* Index within the act */ + int endidx; /* transaction is over and done if idx == endidx */ + uint8_t *seq; /* commands/data for the transaction */ + union { + hp_sdc_irqhook *irqhook; /* Callback, isr or tasklet context */ + struct semaphore *semaphore; /* Semaphore to sleep on. */ + } act; +} hp_sdc_transaction; +int __hp_sdc_enqueue_transaction(hp_sdc_transaction *this); +int hp_sdc_enqueue_transaction(hp_sdc_transaction *this); +int hp_sdc_dequeue_transaction(hp_sdc_transaction *this); + +/* The HP_SDC_ACT* values are peculiar to this driver. + * Nuance: never HP_SDC_ACT_DATAIN | HP_SDC_ACT_DEALLOC, use another + * act to perform the dealloc. + */ +#define HP_SDC_ACT_PRECMD 0x01 /* Send a command first */ +#define HP_SDC_ACT_DATAREG 0x02 /* Set data registers */ +#define HP_SDC_ACT_DATAOUT 0x04 /* Send data bytes */ +#define HP_SDC_ACT_POSTCMD 0x08 /* Send command after */ +#define HP_SDC_ACT_DATAIN 0x10 /* Collect data after */ +#define HP_SDC_ACT_DURING 0x1f +#define HP_SDC_ACT_SEMAPHORE 0x20 /* Raise semaphore after */ +#define HP_SDC_ACT_CALLBACK 0x40 /* Pass data to IRQ handler */ +#define HP_SDC_ACT_DEALLOC 0x80 /* Destroy transaction after */ +#define HP_SDC_ACT_AFTER 0xe0 +#define HP_SDC_ACT_DEAD 0x60 /* Act timed out. */ + +/* Rest of the flags are straightforward representation of the SDC interface */ +#define HP_SDC_STATUS_IBF 0x02 /* Input buffer full */ + +#define HP_SDC_STATUS_IRQMASK 0xf0 /* Bits containing "level 1" irq */ +#define HP_SDC_STATUS_PERIODIC 0x10 /* Periodic 10ms timer */ +#define HP_SDC_STATUS_USERTIMER 0x20 /* "Special purpose" timer */ +#define HP_SDC_STATUS_TIMER 0x30 /* Both PERIODIC and USERTIMER */ +#define HP_SDC_STATUS_REG 0x40 /* Data from an i8042 register */ +#define HP_SDC_STATUS_HILCMD 0x50 /* Command from HIL MLC */ +#define HP_SDC_STATUS_HILDATA 0x60 /* Data from HIL MLC */ +#define HP_SDC_STATUS_PUP 0x70 /* Successful power-up self test */ +#define HP_SDC_STATUS_KCOOKED 0x80 /* Key from cooked kbd */ +#define HP_SDC_STATUS_KRPG 0xc0 /* Key from Repeat Gen */ +#define HP_SDC_STATUS_KMOD_SUP 0x10 /* Shift key is up */ +#define HP_SDC_STATUS_KMOD_CUP 0x20 /* Control key is up */ + +#define HP_SDC_NMISTATUS_FHS 0x40 /* NMI is a fast handshake irq */ + +/* Internal i8042 registers (there are more, but they are not too useful). */ + +#define HP_SDC_USE 0x02 /* Resource usage (including OB bit) */ +#define HP_SDC_IM 0x04 /* Interrupt mask */ +#define HP_SDC_CFG 0x11 /* Configuration register */ +#define HP_SDC_KBLANGUAGE 0x12 /* Keyboard language */ + +#define HP_SDC_D0 0x70 /* General purpose data buffer 0 */ +#define HP_SDC_D1 0x71 /* General purpose data buffer 1 */ +#define HP_SDC_D2 0x72 /* General purpose data buffer 2 */ +#define HP_SDC_D3 0x73 /* General purpose data buffer 3 */ +#define HP_SDC_VT1 0x74 /* Timer for voice 1 */ +#define HP_SDC_VT2 0x75 /* Timer for voice 2 */ +#define HP_SDC_VT3 0x76 /* Timer for voice 3 */ +#define HP_SDC_VT4 0x77 /* Timer for voice 4 */ +#define HP_SDC_KBN 0x78 /* Which HIL devs are Nimitz */ +#define HP_SDC_KBC 0x79 /* Which HIL devs are cooked kbds */ +#define HP_SDC_LPS 0x7a /* i8042's view of HIL status */ +#define HP_SDC_LPC 0x7b /* i8042's view of HIL "control" */ +#define HP_SDC_RSV 0x7c /* Reserved "for testing" */ +#define HP_SDC_LPR 0x7d /* i8042 count of HIL reconfigs */ +#define HP_SDC_XTD 0x7e /* "Extended Configuration" register */ +#define HP_SDC_STR 0x7f /* i8042 self-test result */ + +/* Bitfields for above registers */ +#define HP_SDC_USE_LOOP 0x04 /* Command is currently on the loop. */ + +#define HP_SDC_IM_MASK 0x1f /* these bits not part of cmd/status */ +#define HP_SDC_IM_FH 0x10 /* Mask the fast handshake irq */ +#define HP_SDC_IM_PT 0x08 /* Mask the periodic timer irq */ +#define HP_SDC_IM_TIMERS 0x04 /* Mask the MT/DT/CT irq */ +#define HP_SDC_IM_RESET 0x02 /* Mask the reset key irq */ +#define HP_SDC_IM_HIL 0x01 /* Mask the HIL MLC irq */ + +#define HP_SDC_CFG_ROLLOVER 0x08 /* WTF is "N-key rollover"? */ +#define HP_SDC_CFG_KBD 0x10 /* There is a keyboard */ +#define HP_SDC_CFG_NEW 0x20 /* Supports/uses HIL MLC */ +#define HP_SDC_CFG_KBD_OLD 0x03 /* keyboard code for non-HIL */ +#define HP_SDC_CFG_KBD_NEW 0x07 /* keyboard code from HIL autoconfig */ +#define HP_SDC_CFG_REV 0x40 /* Code revision bit */ +#define HP_SDC_CFG_IDPROM 0x80 /* IDPROM present in kbd (not HIL) */ + +#define HP_SDC_LPS_NDEV 0x07 /* # devices autoconfigured on HIL */ +#define HP_SDC_LPS_ACSUCC 0x08 /* loop autoconfigured successfully */ +#define HP_SDC_LPS_ACFAIL 0x80 /* last loop autoconfigure failed */ + +#define HP_SDC_LPC_APE_IPF 0x01 /* HIL MLC APE/IPF (autopoll) set */ +#define HP_SDC_LPC_ARCONERR 0x02 /* i8042 autoreconfigs loop on err */ +#define HP_SDC_LPC_ARCQUIET 0x03 /* i8042 doesn't report autoreconfigs*/ +#define HP_SDC_LPC_COOK 0x10 /* i8042 cooks devices in _KBN */ +#define HP_SDC_LPC_RC 0x80 /* causes autoreconfig */ + +#define HP_SDC_XTD_REV 0x07 /* contains revision code */ +#define HP_SDC_XTD_REV_STRINGS(val, str) \ +switch (val) { \ + case 0x1: str = "1820-3712"; break; \ + case 0x2: str = "1820-4379"; break; \ + case 0x3: str = "1820-4784"; break; \ + default: str = "unknown"; \ +}; +#define HP_SDC_XTD_BEEPER 0x08 /* TI SN76494 beeper available */ +#define HP_SDC_XTD_BBRTC 0x20 /* OKI MSM-58321 BBRTC present */ + +#define HP_SDC_CMD_LOAD_RT 0x31 /* Load real time (from 8042) */ +#define HP_SDC_CMD_LOAD_FHS 0x36 /* Load the fast handshake timer */ +#define HP_SDC_CMD_LOAD_MT 0x38 /* Load the match timer */ +#define HP_SDC_CMD_LOAD_DT 0x3B /* Load the delay timer */ +#define HP_SDC_CMD_LOAD_CT 0x3E /* Load the cycle timer */ + +#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */ + +/* The documents provided do not explicitly state that all registers betweem + * 0x01 and 0x1f inclusive can be read by sending their register index as a + * command, but this is implied and appears to be the case. + */ +#define HP_SDC_CMD_READ_RAM 0x00 /* Load from i8042 RAM (autoinc) */ +#define HP_SDC_CMD_READ_USE 0x02 /* Undocumented! Load from usage reg */ +#define HP_SDC_CMD_READ_IM 0x04 /* Load current interrupt mask */ +#define HP_SDC_CMD_READ_KCC 0x11 /* Load primary kbd config code */ +#define HP_SDC_CMD_READ_KLC 0x12 /* Load primary kbd language code */ +#define HP_SDC_CMD_READ_T1 0x13 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T2 0x14 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T3 0x15 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T4 0x16 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_T5 0x17 /* Load timer output buffer byte 1 */ +#define HP_SDC_CMD_READ_D0 0xf0 /* Load from i8042 RAM location 0x70 */ +#define HP_SDC_CMD_READ_D1 0xf1 /* Load from i8042 RAM location 0x71 */ +#define HP_SDC_CMD_READ_D2 0xf2 /* Load from i8042 RAM location 0x72 */ +#define HP_SDC_CMD_READ_D3 0xf3 /* Load from i8042 RAM location 0x73 */ +#define HP_SDC_CMD_READ_VT1 0xf4 /* Load from i8042 RAM location 0x74 */ +#define HP_SDC_CMD_READ_VT2 0xf5 /* Load from i8042 RAM location 0x75 */ +#define HP_SDC_CMD_READ_VT3 0xf6 /* Load from i8042 RAM location 0x76 */ +#define HP_SDC_CMD_READ_VT4 0xf7 /* Load from i8042 RAM location 0x77 */ +#define HP_SDC_CMD_READ_KBN 0xf8 /* Load from i8042 RAM location 0x78 */ +#define HP_SDC_CMD_READ_KBC 0xf9 /* Load from i8042 RAM location 0x79 */ +#define HP_SDC_CMD_READ_LPS 0xfa /* Load from i8042 RAM location 0x7a */ +#define HP_SDC_CMD_READ_LPC 0xfb /* Load from i8042 RAM location 0x7b */ +#define HP_SDC_CMD_READ_RSV 0xfc /* Load from i8042 RAM location 0x7c */ +#define HP_SDC_CMD_READ_LPR 0xfd /* Load from i8042 RAM location 0x7d */ +#define HP_SDC_CMD_READ_XTD 0xfe /* Load from i8042 RAM location 0x7e */ +#define HP_SDC_CMD_READ_STR 0xff /* Load from i8042 RAM location 0x7f */ + +#define HP_SDC_CMD_SET_ARD 0xA0 /* Set emulated autorepeat delay */ +#define HP_SDC_CMD_SET_ARR 0xA2 /* Set emulated autorepeat rate */ +#define HP_SDC_CMD_SET_BELL 0xA3 /* Set voice 3 params for "beep" cmd */ +#define HP_SDC_CMD_SET_RPGR 0xA6 /* Set "RPG" irq rate (doesn't work) */ +#define HP_SDC_CMD_SET_RTMS 0xAD /* Set the RTC time (milliseconds) */ +#define HP_SDC_CMD_SET_RTD 0xAF /* Set the RTC time (days) */ +#define HP_SDC_CMD_SET_FHS 0xB2 /* Set fast handshake timer */ +#define HP_SDC_CMD_SET_MT 0xB4 /* Set match timer */ +#define HP_SDC_CMD_SET_DT 0xB7 /* Set delay timer */ +#define HP_SDC_CMD_SET_CT 0xBA /* Set cycle timer */ +#define HP_SDC_CMD_SET_RAMP 0xC1 /* Reset READ_RAM autoinc counter */ +#define HP_SDC_CMD_SET_D0 0xe0 /* Load to i8042 RAM location 0x70 */ +#define HP_SDC_CMD_SET_D1 0xe1 /* Load to i8042 RAM location 0x71 */ +#define HP_SDC_CMD_SET_D2 0xe2 /* Load to i8042 RAM location 0x72 */ +#define HP_SDC_CMD_SET_D3 0xe3 /* Load to i8042 RAM location 0x73 */ +#define HP_SDC_CMD_SET_VT1 0xe4 /* Load to i8042 RAM location 0x74 */ +#define HP_SDC_CMD_SET_VT2 0xe5 /* Load to i8042 RAM location 0x75 */ +#define HP_SDC_CMD_SET_VT3 0xe6 /* Load to i8042 RAM location 0x76 */ +#define HP_SDC_CMD_SET_VT4 0xe7 /* Load to i8042 RAM location 0x77 */ +#define HP_SDC_CMD_SET_KBN 0xe8 /* Load to i8042 RAM location 0x78 */ +#define HP_SDC_CMD_SET_KBC 0xe9 /* Load to i8042 RAM location 0x79 */ +#define HP_SDC_CMD_SET_LPS 0xea /* Load to i8042 RAM location 0x7a */ +#define HP_SDC_CMD_SET_LPC 0xeb /* Load to i8042 RAM location 0x7b */ +#define HP_SDC_CMD_SET_RSV 0xec /* Load to i8042 RAM location 0x7c */ +#define HP_SDC_CMD_SET_LPR 0xed /* Load to i8042 RAM location 0x7d */ +#define HP_SDC_CMD_SET_XTD 0xee /* Load to i8042 RAM location 0x7e */ +#define HP_SDC_CMD_SET_STR 0xef /* Load to i8042 RAM location 0x7f */ + +#define HP_SDC_CMD_DO_RTCW 0xc2 /* i8042 RAM 0x70 --> RTC */ +#define HP_SDC_CMD_DO_RTCR 0xc3 /* RTC[0x70 0:3] --> irq/status/data */ +#define HP_SDC_CMD_DO_BEEP 0xc4 /* i8042 RAM 0x70-74 --> beeper,VT3 */ +#define HP_SDC_CMD_DO_HIL 0xc5 /* i8042 RAM 0x70-73 --> + HIL MLC R0,R1 i8042 HIL watchdog */ + +/* Values used to (de)mangle input/output to/from the HIL MLC */ +#define HP_SDC_DATA 0x40 /* Data from an 8042 register */ +#define HP_SDC_HIL_CMD 0x50 /* Data from HIL MLC R1/8042 */ +#define HP_SDC_HIL_R1MASK 0x0f /* Contents of HIL MLC R1 0:3 */ +#define HP_SDC_HIL_AUTO 0x10 /* Set if POL results from i8042 */ +#define HP_SDC_HIL_ISERR 0x80 /* Has meaning as in next 4 values */ +#define HP_SDC_HIL_RC_DONE 0x80 /* i8042 auto-configured loop */ +#define HP_SDC_HIL_ERR 0x81 /* HIL MLC R2 had a bit set */ +#define HP_SDC_HIL_TO 0x82 /* i8042 HIL watchdog expired */ +#define HP_SDC_HIL_RC 0x84 /* i8042 is auto-configuring loop */ +#define HP_SDC_HIL_DAT 0x60 /* Data from HIL MLC R0 */ + + +typedef struct { + rwlock_t ibf_lock; + rwlock_t lock; /* user/tasklet lock */ + rwlock_t rtq_lock; /* isr/tasklet lock */ + rwlock_t hook_lock; /* isr/user lock for handler add/del */ + + unsigned int irq, nmi; /* Our IRQ lines */ + unsigned long base_io, status_io, data_io; /* Our IO ports */ + + uint8_t im; /* Interrupt mask */ + int set_im; /* Interrupt mask needs to be set. */ + + int ibf; /* Last known status of IBF flag */ + uint8_t wi; /* current i8042 write index */ + uint8_t r7[4]; /* current i8042[0x70 - 0x74] values */ + uint8_t r11, r7e; /* Values from version/revision regs */ + + hp_sdc_irqhook *timer, *reg, *hil, *pup, *cooked; + +#define HP_SDC_QUEUE_LEN 16 + hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ + + int rcurr, rqty; /* Current read transact in process */ + struct timeval rtv; /* Time when current read started */ + int wcurr; /* Current write transact in process */ + + int dev_err; /* carries status from registration */ +#if defined(__hppa__) + struct parisc_device *dev; +#elif defined(__mc68000__) + void *dev; +#else +#error No support for device registration on this arch yet. +#endif + + struct timer_list kicker; /* Keeps below task alive */ + struct tasklet_struct task; + +} hp_i8042_sdc; + +#endif /* _LINUX_HP_SDC_H */ diff --git a/include/linux/hpet.h b/include/linux/hpet.h new file mode 100644 index 00000000..219ca4f6 --- /dev/null +++ b/include/linux/hpet.h @@ -0,0 +1,131 @@ +#ifndef __HPET__ +#define __HPET__ 1 + +#include <linux/compiler.h> + +#ifdef __KERNEL__ + +/* + * Offsets into HPET Registers + */ + +struct hpet { + u64 hpet_cap; /* capabilities */ + u64 res0; /* reserved */ + u64 hpet_config; /* configuration */ + u64 res1; /* reserved */ + u64 hpet_isr; /* interrupt status reg */ + u64 res2[25]; /* reserved */ + union { /* main counter */ + u64 _hpet_mc64; + u32 _hpet_mc32; + unsigned long _hpet_mc; + } _u0; + u64 res3; /* reserved */ + struct hpet_timer { + u64 hpet_config; /* configuration/cap */ + union { /* timer compare register */ + u64 _hpet_hc64; + u32 _hpet_hc32; + unsigned long _hpet_compare; + } _u1; + u64 hpet_fsb[2]; /* FSB route */ + } hpet_timers[1]; +}; + +#define hpet_mc _u0._hpet_mc +#define hpet_compare _u1._hpet_compare + +#define HPET_MAX_TIMERS (32) +#define HPET_MAX_IRQ (32) + +/* + * HPET general capabilities register + */ + +#define HPET_COUNTER_CLK_PERIOD_MASK (0xffffffff00000000ULL) +#define HPET_COUNTER_CLK_PERIOD_SHIFT (32UL) +#define HPET_VENDOR_ID_MASK (0x00000000ffff0000ULL) +#define HPET_VENDOR_ID_SHIFT (16ULL) +#define HPET_LEG_RT_CAP_MASK (0x8000) +#define HPET_COUNTER_SIZE_MASK (0x2000) +#define HPET_NUM_TIM_CAP_MASK (0x1f00) +#define HPET_NUM_TIM_CAP_SHIFT (8ULL) + +/* + * HPET general configuration register + */ + +#define HPET_LEG_RT_CNF_MASK (2UL) +#define HPET_ENABLE_CNF_MASK (1UL) + + +/* + * Timer configuration register + */ + +#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) +#define Tn_INT_ROUTE_CAP_SHIFT (32UL) +#define Tn_FSB_INT_DELCAP_MASK (0x8000UL) +#define Tn_FSB_INT_DELCAP_SHIFT (15) +#define Tn_FSB_EN_CNF_MASK (0x4000UL) +#define Tn_FSB_EN_CNF_SHIFT (14) +#define Tn_INT_ROUTE_CNF_MASK (0x3e00UL) +#define Tn_INT_ROUTE_CNF_SHIFT (9) +#define Tn_32MODE_CNF_MASK (0x0100UL) +#define Tn_VAL_SET_CNF_MASK (0x0040UL) +#define Tn_SIZE_CAP_MASK (0x0020UL) +#define Tn_PER_INT_CAP_MASK (0x0010UL) +#define Tn_TYPE_CNF_MASK (0x0008UL) +#define Tn_INT_ENB_CNF_MASK (0x0004UL) +#define Tn_INT_TYPE_CNF_MASK (0x0002UL) + +/* + * Timer FSB Interrupt Route Register + */ + +#define Tn_FSB_INT_ADDR_MASK (0xffffffff00000000ULL) +#define Tn_FSB_INT_ADDR_SHIFT (32UL) +#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) + +/* + * exported interfaces + */ + +struct hpet_data { + unsigned long hd_phys_address; + void __iomem *hd_address; + unsigned short hd_nirqs; + unsigned int hd_state; /* timer allocated */ + unsigned int hd_irq[HPET_MAX_TIMERS]; +}; + +static inline void hpet_reserve_timer(struct hpet_data *hd, int timer) +{ + hd->hd_state |= (1 << timer); + return; +} + +int hpet_alloc(struct hpet_data *); + +#endif /* __KERNEL__ */ + +struct hpet_info { + unsigned long hi_ireqfreq; /* Hz */ + unsigned long hi_flags; /* information */ + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +#define HPET_INFO_PERIODIC 0x0010 /* periodic-capable comparator */ + +#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ +#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ +#define HPET_INFO _IOR('h', 0x03, struct hpet_info) +#define HPET_EPI _IO('h', 0x04) /* enable periodic */ +#define HPET_DPI _IO('h', 0x05) /* disable periodic */ +#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ + +#define MAX_HPET_TBS 8 /* maximum hpet timer blocks */ + +#endif /* !__HPET__ */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h new file mode 100644 index 00000000..fd0dc30c --- /dev/null +++ b/include/linux/hrtimer.h @@ -0,0 +1,453 @@ +/* + * include/linux/hrtimer.h + * + * hrtimers - High-resolution kernel timers + * + * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar + * + * data type definitions, declarations, prototypes + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_HRTIMER_H +#define _LINUX_HRTIMER_H + +#include <linux/rbtree.h> +#include <linux/ktime.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/percpu.h> +#include <linux/timer.h> +#include <linux/timerqueue.h> + +struct hrtimer_clock_base; +struct hrtimer_cpu_base; + +/* + * Mode arguments of xxx_hrtimer functions: + */ +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ + HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ + HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ + HRTIMER_MODE_ABS_PINNED = 0x02, + HRTIMER_MODE_REL_PINNED = 0x03, +}; + +/* + * Return values for the callback function + */ +enum hrtimer_restart { + HRTIMER_NORESTART, /* Timer is not restarted */ + HRTIMER_RESTART, /* Timer must be restarted */ +}; + +/* + * Values to track state of the timer + * + * Possible states: + * + * 0x00 inactive + * 0x01 enqueued into rbtree + * 0x02 callback function running + * 0x04 timer is migrated to another cpu + * + * Special cases: + * 0x03 callback function running and enqueued + * (was requeued on another CPU) + * 0x05 timer was migrated on CPU hotunplug + * + * The "callback function running and enqueued" status is only possible on + * SMP. It happens for example when a posix timer expired and the callback + * queued a signal. Between dropping the lock which protects the posix timer + * and reacquiring the base lock of the hrtimer, another CPU can deliver the + * signal and rearm the timer. We have to preserve the callback running state, + * as otherwise the timer could be removed before the softirq code finishes the + * the handling of the timer. + * + * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state + * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This + * also affects HRTIMER_STATE_MIGRATE where the preservation is not + * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is + * enqueued on the new cpu. + * + * All state transitions are protected by cpu_base->lock. + */ +#define HRTIMER_STATE_INACTIVE 0x00 +#define HRTIMER_STATE_ENQUEUED 0x01 +#define HRTIMER_STATE_CALLBACK 0x02 +#define HRTIMER_STATE_MIGRATE 0x04 + +/** + * struct hrtimer - the basic hrtimer structure + * @node: timerqueue node, which also manages node.expires, + * the absolute expiry time in the hrtimers internal + * representation. The time is related to the clock on + * which the timer is based. Is setup by adding + * slack to the _softexpires value. For non range timers + * identical to _softexpires. + * @_softexpires: the absolute earliest expiry time of the hrtimer. + * The time which was given as expiry time when the timer + * was armed. + * @function: timer expiry callback function + * @base: pointer to the timer base (per cpu and per clock) + * @state: state information (See bit values above) + * @start_site: timer statistics field to store the site where the timer + * was started + * @start_comm: timer statistics field to store the name of the process which + * started the timer + * @start_pid: timer statistics field to store the pid of the task which + * started the timer + * + * The hrtimer structure must be initialized by hrtimer_init() + */ +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; +#ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; + char start_comm[16]; +#endif +}; + +/** + * struct hrtimer_sleeper - simple sleeper structure + * @timer: embedded timer structure + * @task: task to wake up + * + * task is set to NULL, when the timer expires. + */ +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; + +/** + * struct hrtimer_clock_base - the timer base for a specific clock + * @cpu_base: per cpu clock base + * @index: clock type index for per_cpu support when moving a + * timer to a base on another cpu. + * @clockid: clock id for per_cpu support + * @active: red black tree root node for the active timers + * @resolution: the resolution of the clock, in nanoseconds + * @get_time: function to retrieve the current time of the clock + * @softirq_time: the time when running the hrtimer queue in the softirq + * @offset: offset of this clock to the monotonic base + */ +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + int index; + clockid_t clockid; + struct timerqueue_head active; + ktime_t resolution; + ktime_t (*get_time)(void); + ktime_t softirq_time; + ktime_t offset; +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC, + HRTIMER_BASE_REALTIME, + HRTIMER_BASE_BOOTTIME, + HRTIMER_MAX_CLOCK_BASES, +}; + +/* + * struct hrtimer_cpu_base - the per cpu clock bases + * @lock: lock protecting the base and associated clock bases + * and timers + * @active_bases: Bitfield to mark bases with active timers + * @expires_next: absolute time of the next event which was scheduled + * via clock_set_next_event() + * @hres_active: State of high resolution mode + * @hang_detected: The last hrtimer interrupt detected a hang + * @nr_events: Total number of hrtimer interrupt events + * @nr_retries: Total number of hrtimer interrupt retries + * @nr_hangs: Total number of hrtimer interrupt hangs + * @max_hang_time: Maximum time spent in hrtimer_interrupt + * @clock_base: array of clock bases for this cpu + */ +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned long active_bases; +#ifdef CONFIG_HIGH_RES_TIMERS + ktime_t expires_next; + int hres_active; + int hang_detected; + unsigned long nr_events; + unsigned long nr_retries; + unsigned long nr_hangs; + ktime_t max_hang_time; +#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; +}; + +static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) +{ + timer->node.expires = time; + timer->_softexpires = time; +} + +static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) +{ + timer->_softexpires = time; + timer->node.expires = ktime_add_safe(time, delta); +} + +static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) +{ + timer->_softexpires = time; + timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); +} + +static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) +{ + timer->node.expires.tv64 = tv64; + timer->_softexpires.tv64 = tv64; +} + +static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) +{ + timer->node.expires = ktime_add_safe(timer->node.expires, time); + timer->_softexpires = ktime_add_safe(timer->_softexpires, time); +} + +static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) +{ + timer->node.expires = ktime_add_ns(timer->node.expires, ns); + timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); +} + +static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) +{ + return timer->node.expires; +} + +static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) +{ + return timer->_softexpires; +} + +static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) +{ + return timer->node.expires.tv64; +} +static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) +{ + return timer->_softexpires.tv64; +} + +static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) +{ + return ktime_to_ns(timer->node.expires); +} + +static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) +{ + return ktime_sub(timer->node.expires, timer->base->get_time()); +} + +#ifdef CONFIG_HIGH_RES_TIMERS +struct clock_event_device; + +extern void hrtimer_interrupt(struct clock_event_device *dev); + +/* + * In high resolution mode the time reference must be read accurate + */ +static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +{ + return timer->base->get_time(); +} + +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return timer->base->cpu_base->hres_active; +} + +extern void hrtimer_peek_ahead_timers(void); + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +# define HIGH_RES_NSEC 1 +# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } +# define MONOTONIC_RES_NSEC HIGH_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_HIGH_RES + +#else + +# define MONOTONIC_RES_NSEC LOW_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_LOW_RES + +static inline void hrtimer_peek_ahead_timers(void) { } + +/* + * In non high resolution mode the time reference is taken from + * the base softirq time variable. + */ +static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +{ + return timer->base->softirq_time; +} + +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return 0; +} +#endif + +extern void clock_was_set(void); +#ifdef CONFIG_TIMERFD +extern void timerfd_clock_was_set(void); +#else +static inline void timerfd_clock_was_set(void) { } +#endif +extern void hrtimers_resume(void); + +extern ktime_t ktime_get(void); +extern ktime_t ktime_get_real(void); +extern ktime_t ktime_get_boottime(void); +extern ktime_t ktime_get_monotonic_offset(void); + +DECLARE_PER_CPU(struct tick_device, tick_cpu_device); + + +/* Exported timer functions: */ + +/* Initialize timers: */ +extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); + +#ifdef CONFIG_DEBUG_OBJECTS_TIMERS +extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); + +extern void destroy_hrtimer_on_stack(struct hrtimer *timer); +#else +static inline void hrtimer_init_on_stack(struct hrtimer *timer, + clockid_t which_clock, + enum hrtimer_mode mode) +{ + hrtimer_init(timer, which_clock, mode); +} +static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } +#endif + +/* Basic timer operations: */ +extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode); +extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long range_ns, const enum hrtimer_mode mode); +extern int +__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long delta_ns, + const enum hrtimer_mode mode, int wakeup); + +extern int hrtimer_cancel(struct hrtimer *timer); +extern int hrtimer_try_to_cancel(struct hrtimer *timer); + +static inline int hrtimer_start_expires(struct hrtimer *timer, + enum hrtimer_mode mode) +{ + unsigned long delta; + ktime_t soft, hard; + soft = hrtimer_get_softexpires(timer); + hard = hrtimer_get_expires(timer); + delta = ktime_to_ns(ktime_sub(hard, soft)); + return hrtimer_start_range_ns(timer, soft, delta, mode); +} + +static inline int hrtimer_restart(struct hrtimer *timer) +{ + return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); +} + +/* Query timers: */ +extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); +extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); + +extern ktime_t hrtimer_get_next_event(void); + +/* + * A timer is active, when it is enqueued into the rbtree or the + * callback function is running or it's in the state of being migrated + * to another cpu. + */ +static inline int hrtimer_active(const struct hrtimer *timer) +{ + return timer->state != HRTIMER_STATE_INACTIVE; +} + +/* + * Helper function to check, whether the timer is on one of the queues + */ +static inline int hrtimer_is_queued(struct hrtimer *timer) +{ + return timer->state & HRTIMER_STATE_ENQUEUED; +} + +/* + * Helper function to check, whether the timer is running the callback + * function + */ +static inline int hrtimer_callback_running(struct hrtimer *timer) +{ + return timer->state & HRTIMER_STATE_CALLBACK; +} + +/* Forward a hrtimer so it expires after now: */ +extern u64 +hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); + +/* Forward a hrtimer so it expires after the hrtimer's current now */ +static inline u64 hrtimer_forward_now(struct hrtimer *timer, + ktime_t interval) +{ + return hrtimer_forward(timer, timer->base->get_time(), interval); +} + +/* Precise sleep: */ +extern long hrtimer_nanosleep(struct timespec *rqtp, + struct timespec __user *rmtp, + const enum hrtimer_mode mode, + const clockid_t clockid); +extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); + +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, + struct task_struct *tsk); + +extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, + const enum hrtimer_mode mode); +extern int schedule_hrtimeout_range_clock(ktime_t *expires, + unsigned long delta, const enum hrtimer_mode mode, int clock); +extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); + +/* Soft interrupt function to run the hrtimer queues: */ +extern void hrtimer_run_queues(void); +extern void hrtimer_run_pending(void); + +/* Bootup initialization: */ +extern void __init hrtimers_init(void); + +#if BITS_PER_LONG < 64 +extern u64 ktime_divns(const ktime_t kt, s64 div); +#else /* BITS_PER_LONG < 64 */ +# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) +#endif + +/* Show pending timers: */ +extern void sysrq_timer_list_show(void); + +#endif diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild new file mode 100644 index 00000000..271a770b --- /dev/null +++ b/include/linux/hsi/Kbuild @@ -0,0 +1 @@ +header-y += hsi_char.h diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h new file mode 100644 index 00000000..56fae865 --- /dev/null +++ b/include/linux/hsi/hsi.h @@ -0,0 +1,413 @@ +/* + * HSI core header file. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_HSI_H__ +#define __LINUX_HSI_H__ + +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/notifier.h> + +/* HSI message ttype */ +#define HSI_MSG_READ 0 +#define HSI_MSG_WRITE 1 + +/* HSI configuration values */ +enum { + HSI_MODE_STREAM = 1, + HSI_MODE_FRAME, +}; + +enum { + HSI_FLOW_SYNC, /* Synchronized flow */ + HSI_FLOW_PIPE, /* Pipelined flow */ +}; + +enum { + HSI_ARB_RR, /* Round-robin arbitration */ + HSI_ARB_PRIO, /* Channel priority arbitration */ +}; + +#define HSI_MAX_CHANNELS 16 + +/* HSI message status codes */ +enum { + HSI_STATUS_COMPLETED, /* Message transfer is completed */ + HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */ + HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */ + HSI_STATUS_QUEUED, /* Message waiting to be served */ + HSI_STATUS_ERROR, /* Error when message transfer was ongoing */ +}; + +/* HSI port event codes */ +enum { + HSI_EVENT_START_RX, + HSI_EVENT_STOP_RX, +}; + +/** + * struct hsi_config - Configuration for RX/TX HSI modules + * @mode: Bit transmission mode (STREAM or FRAME) + * @channels: Number of channels to use [1..16] + * @speed: Max bit transmission speed (Kbit/s) + * @flow: RX flow type (SYNCHRONIZED or PIPELINE) + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct hsi_config { + unsigned int mode; + unsigned int channels; + unsigned int speed; + union { + unsigned int flow; /* RX only */ + unsigned int arb_mode; /* TX only */ + }; +}; + +/** + * struct hsi_board_info - HSI client board info + * @name: Name for the HSI device + * @hsi_id: HSI controller id where the client sits + * @port: Port number in the controller where the client sits + * @tx_cfg: HSI TX configuration + * @rx_cfg: HSI RX configuration + * @platform_data: Platform related data + * @archdata: Architecture-dependent device data + */ +struct hsi_board_info { + const char *name; + unsigned int hsi_id; + unsigned int port; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + void *platform_data; + struct dev_archdata *archdata; +}; + +#ifdef CONFIG_HSI_BOARDINFO +extern int hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len); +#else +static inline int hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len) +{ + return 0; +} +#endif /* CONFIG_HSI_BOARDINFO */ + +/** + * struct hsi_client - HSI client attached to an HSI port + * @device: Driver model representation of the device + * @tx_cfg: HSI TX configuration + * @rx_cfg: HSI RX configuration + * @e_handler: Callback for handling port events (RX Wake High/Low) + * @pclaimed: Keeps tracks if the clients claimed its associated HSI port + * @nb: Notifier block for port events + */ +struct hsi_client { + struct device device; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + /* private: */ + void (*ehandler)(struct hsi_client *, unsigned long); + unsigned int pclaimed:1; + struct notifier_block nb; +}; + +#define to_hsi_client(dev) container_of(dev, struct hsi_client, device) + +static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data) +{ + dev_set_drvdata(&cl->device, data); +} + +static inline void *hsi_client_drvdata(struct hsi_client *cl) +{ + return dev_get_drvdata(&cl->device); +} + +int hsi_register_port_event(struct hsi_client *cl, + void (*handler)(struct hsi_client *, unsigned long)); +int hsi_unregister_port_event(struct hsi_client *cl); + +/** + * struct hsi_client_driver - Driver associated to an HSI client + * @driver: Driver model representation of the driver + */ +struct hsi_client_driver { + struct device_driver driver; +}; + +#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\ + driver) + +int hsi_register_client_driver(struct hsi_client_driver *drv); + +static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv) +{ + driver_unregister(&drv->driver); +} + +/** + * struct hsi_msg - HSI message descriptor + * @link: Free to use by the current descriptor owner + * @cl: HSI device client that issues the transfer + * @sgt: Head of the scatterlist array + * @context: Client context data associated to the transfer + * @complete: Transfer completion callback + * @destructor: Destructor to free resources when flushing + * @status: Status of the transfer when completed + * @actual_len: Actual length of data transfered on completion + * @channel: Channel were to TX/RX the message + * @ttype: Transfer type (TX if set, RX otherwise) + * @break_frame: if true HSI will send/receive a break frame. Data buffers are + * ignored in the request. + */ +struct hsi_msg { + struct list_head link; + struct hsi_client *cl; + struct sg_table sgt; + void *context; + + void (*complete)(struct hsi_msg *msg); + void (*destructor)(struct hsi_msg *msg); + + int status; + unsigned int actual_len; + unsigned int channel; + unsigned int ttype:1; + unsigned int break_frame:1; +}; + +struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags); +void hsi_free_msg(struct hsi_msg *msg); + +/** + * struct hsi_port - HSI port device + * @device: Driver model representation of the device + * @tx_cfg: Current TX path configuration + * @rx_cfg: Current RX path configuration + * @num: Port number + * @shared: Set when port can be shared by different clients + * @claimed: Reference count of clients which claimed the port + * @lock: Serialize port claim + * @async: Asynchronous transfer callback + * @setup: Callback to set the HSI client configuration + * @flush: Callback to clean the HW state and destroy all pending transfers + * @start_tx: Callback to inform that a client wants to TX data + * @stop_tx: Callback to inform that a client no longer wishes to TX data + * @release: Callback to inform that a client no longer uses the port + * @n_head: Notifier chain for signaling port events to the clients. + */ +struct hsi_port { + struct device device; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + unsigned int num; + unsigned int shared:1; + int claimed; + struct mutex lock; + int (*async)(struct hsi_msg *msg); + int (*setup)(struct hsi_client *cl); + int (*flush)(struct hsi_client *cl); + int (*start_tx)(struct hsi_client *cl); + int (*stop_tx)(struct hsi_client *cl); + int (*release)(struct hsi_client *cl); + /* private */ + struct atomic_notifier_head n_head; +}; + +#define to_hsi_port(dev) container_of(dev, struct hsi_port, device) +#define hsi_get_port(cl) to_hsi_port((cl)->device.parent) + +int hsi_event(struct hsi_port *port, unsigned long event); +int hsi_claim_port(struct hsi_client *cl, unsigned int share); +void hsi_release_port(struct hsi_client *cl); + +static inline int hsi_port_claimed(struct hsi_client *cl) +{ + return cl->pclaimed; +} + +static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data) +{ + dev_set_drvdata(&port->device, data); +} + +static inline void *hsi_port_drvdata(struct hsi_port *port) +{ + return dev_get_drvdata(&port->device); +} + +/** + * struct hsi_controller - HSI controller device + * @device: Driver model representation of the device + * @owner: Pointer to the module owning the controller + * @id: HSI controller ID + * @num_ports: Number of ports in the HSI controller + * @port: Array of HSI ports + */ +struct hsi_controller { + struct device device; + struct module *owner; + unsigned int id; + unsigned int num_ports; + struct hsi_port **port; +}; + +#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) + +struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); +void hsi_put_controller(struct hsi_controller *hsi); +int hsi_register_controller(struct hsi_controller *hsi); +void hsi_unregister_controller(struct hsi_controller *hsi); + +static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, + void *data) +{ + dev_set_drvdata(&hsi->device, data); +} + +static inline void *hsi_controller_drvdata(struct hsi_controller *hsi) +{ + return dev_get_drvdata(&hsi->device); +} + +static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, + unsigned int num) +{ + return (num < hsi->num_ports) ? hsi->port[num] : NULL; +} + +/* + * API for HSI clients + */ +int hsi_async(struct hsi_client *cl, struct hsi_msg *msg); + +/** + * hsi_id - Get HSI controller ID associated to a client + * @cl: Pointer to a HSI client + * + * Return the controller id where the client is attached to + */ +static inline unsigned int hsi_id(struct hsi_client *cl) +{ + return to_hsi_controller(cl->device.parent->parent)->id; +} + +/** + * hsi_port_id - Gets the port number a client is attached to + * @cl: Pointer to HSI client + * + * Return the port number associated to the client + */ +static inline unsigned int hsi_port_id(struct hsi_client *cl) +{ + return to_hsi_port(cl->device.parent)->num; +} + +/** + * hsi_setup - Configure the client's port + * @cl: Pointer to the HSI client + * + * When sharing ports, clients should either relay on a single + * client setup or have the same setup for all of them. + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_setup(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->setup(cl); +} + +/** + * hsi_flush - Flush all pending transactions on the client's port + * @cl: Pointer to the HSI client + * + * This function will destroy all pending hsi_msg in the port and reset + * the HW port so it is ready to receive and transmit from a clean state. + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_flush(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->flush(cl); +} + +/** + * hsi_async_read - Submit a read transfer + * @cl: Pointer to the HSI client + * @msg: HSI message descriptor of the transfer + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg) +{ + msg->ttype = HSI_MSG_READ; + return hsi_async(cl, msg); +} + +/** + * hsi_async_write - Submit a write transfer + * @cl: Pointer to the HSI client + * @msg: HSI message descriptor of the transfer + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg) +{ + msg->ttype = HSI_MSG_WRITE; + return hsi_async(cl, msg); +} + +/** + * hsi_start_tx - Signal the port that the client wants to start a TX + * @cl: Pointer to the HSI client + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_start_tx(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->start_tx(cl); +} + +/** + * hsi_stop_tx - Signal the port that the client no longer wants to transmit + * @cl: Pointer to the HSI client + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_stop_tx(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->stop_tx(cl); +} +#endif /* __LINUX_HSI_H__ */ diff --git a/include/linux/hsi/hsi_char.h b/include/linux/hsi/hsi_char.h new file mode 100644 index 00000000..76160b4f --- /dev/null +++ b/include/linux/hsi/hsi_char.h @@ -0,0 +1,63 @@ +/* + * Part of the HSI character device driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Andras Domokos <andras.domokos at nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + + +#ifndef __HSI_CHAR_H +#define __HSI_CHAR_H + +#define HSI_CHAR_MAGIC 'k' +#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype) +#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype) +#define HSC_IOWR(num, dtype) _IOWR(HSI_CHAR_MAGIC, num, dtype) +#define HSC_IO(num) _IO(HSI_CHAR_MAGIC, num) + +#define HSC_RESET HSC_IO(16) +#define HSC_SET_PM HSC_IO(17) +#define HSC_SEND_BREAK HSC_IO(18) +#define HSC_SET_RX HSC_IOW(19, struct hsc_rx_config) +#define HSC_GET_RX HSC_IOW(20, struct hsc_rx_config) +#define HSC_SET_TX HSC_IOW(21, struct hsc_tx_config) +#define HSC_GET_TX HSC_IOW(22, struct hsc_tx_config) + +#define HSC_PM_DISABLE 0 +#define HSC_PM_ENABLE 1 + +#define HSC_MODE_STREAM 1 +#define HSC_MODE_FRAME 2 +#define HSC_FLOW_SYNC 0 +#define HSC_ARB_RR 0 +#define HSC_ARB_PRIO 1 + +struct hsc_rx_config { + uint32_t mode; + uint32_t flow; + uint32_t channels; +}; + +struct hsc_tx_config { + uint32_t mode; + uint32_t channels; + uint32_t speed; + uint32_t arb_mode; +}; + +#endif /* __HSI_CHAR_H */ diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h new file mode 100644 index 00000000..ab3f6cb4 --- /dev/null +++ b/include/linux/htcpld.h @@ -0,0 +1,24 @@ +#ifndef __LINUX_HTCPLD_H +#define __LINUX_HTCPLD_H + +struct htcpld_chip_platform_data { + unsigned int addr; + unsigned int reset; + unsigned int num_gpios; + unsigned int gpio_out_base; + unsigned int gpio_in_base; + unsigned int irq_base; + unsigned int num_irqs; +}; + +struct htcpld_core_platform_data { + unsigned int int_reset_gpio_hi; + unsigned int int_reset_gpio_lo; + unsigned int i2c_adapter_id; + + struct htcpld_chip_platform_data *chip; + unsigned int num_chip; +}; + +#endif /* __LINUX_HTCPLD_H */ + diff --git a/include/linux/htirq.h b/include/linux/htirq.h new file mode 100644 index 00000000..70a1dbbf --- /dev/null +++ b/include/linux/htirq.h @@ -0,0 +1,24 @@ +#ifndef LINUX_HTIRQ_H +#define LINUX_HTIRQ_H + +struct ht_irq_msg { + u32 address_lo; /* low 32 bits of the ht irq message */ + u32 address_hi; /* high 32 bits of the it irq message */ +}; + +/* Helper functions.. */ +void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); +void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); +struct irq_data; +void mask_ht_irq(struct irq_data *data); +void unmask_ht_irq(struct irq_data *data); + +/* The arch hook for getting things started */ +int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); + +/* For drivers of buggy hardware */ +typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq, + struct ht_irq_msg *msg); +int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update); + +#endif /* LINUX_HTIRQ_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h new file mode 100644 index 00000000..c8af7a2e --- /dev/null +++ b/include/linux/huge_mm.h @@ -0,0 +1,199 @@ +#ifndef _LINUX_HUGE_MM_H +#define _LINUX_HUGE_MM_H + +extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + unsigned int flags); +extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma); +extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, + pmd_t orig_pmd); +extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); +extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmd, + unsigned int flags); +extern int zap_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr); +extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned char *vec); +extern int move_huge_pmd(struct vm_area_struct *vma, + struct vm_area_struct *new_vma, + unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd); +extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, pgprot_t newprot); + +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_FLAG, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, +#ifdef CONFIG_DEBUG_VM + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, +#endif +}; + +enum page_check_address_pmd_flag { + PAGE_CHECK_ADDRESS_PMD_FLAG, + PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, + PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, +}; +extern pmd_t *page_check_address_pmd(struct page *page, + struct mm_struct *mm, + unsigned long address, + enum page_check_address_pmd_flag flag); + +#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) +#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define HPAGE_PMD_SHIFT HPAGE_SHIFT +#define HPAGE_PMD_MASK HPAGE_MASK +#define HPAGE_PMD_SIZE HPAGE_SIZE + +#define transparent_hugepage_enabled(__vma) \ + ((transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ + ((__vma)->vm_flags & VM_HUGEPAGE))) && \ + !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ + !is_vma_temporary_stack(__vma)) +#define transparent_hugepage_defrag(__vma) \ + ((transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ + (__vma)->vm_flags & VM_HUGEPAGE)) +#ifdef CONFIG_DEBUG_VM +#define transparent_hugepage_debug_cow() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) +#else /* CONFIG_DEBUG_VM */ +#define transparent_hugepage_debug_cow() 0 +#endif /* CONFIG_DEBUG_VM */ + +extern unsigned long transparent_hugepage_flags; +extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, + struct vm_area_struct *vma, + unsigned long addr, unsigned long end); +extern int handle_pte_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + pte_t *pte, pmd_t *pmd, unsigned int flags); +extern int split_huge_page(struct page *page); +extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); +#define split_huge_page_pmd(__mm, __pmd) \ + do { \ + pmd_t *____pmd = (__pmd); \ + if (unlikely(pmd_trans_huge(*____pmd))) \ + __split_huge_page_pmd(__mm, ____pmd); \ + } while (0) +#define wait_split_huge_page(__anon_vma, __pmd) \ + do { \ + pmd_t *____pmd = (__pmd); \ + anon_vma_lock(__anon_vma); \ + anon_vma_unlock(__anon_vma); \ + BUG_ON(pmd_trans_splitting(*____pmd) || \ + pmd_trans_huge(*____pmd)); \ + } while (0) +#if HPAGE_PMD_ORDER > MAX_ORDER +#error "hugepages can't be allocated by the buddy allocator" +#endif +extern int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice); +extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next); +extern int __pmd_trans_huge_lock(pmd_t *pmd, + struct vm_area_struct *vma); +/* mmap_sem must be held on entry */ +static inline int pmd_trans_huge_lock(pmd_t *pmd, + struct vm_area_struct *vma) +{ + VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); + if (pmd_trans_huge(*pmd)) + return __pmd_trans_huge_lock(pmd, vma); + else + return 0; +} +static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next) +{ + if (!vma->anon_vma || vma->vm_ops) + return; + __vma_adjust_trans_huge(vma, start, end, adjust_next); +} +static inline int hpage_nr_pages(struct page *page) +{ + if (unlikely(PageTransHuge(page))) + return HPAGE_PMD_NR; + return 1; +} +static inline struct page *compound_trans_head(struct page *page) +{ + if (PageTail(page)) { + struct page *head; + head = page->first_page; + smp_rmb(); + /* + * head may be a dangling pointer. + * __split_huge_page_refcount clears PageTail before + * overwriting first_page, so if PageTail is still + * there it means the head pointer isn't dangling. + */ + if (PageTail(page)) + return head; + } + return page; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) +#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) + +#define hpage_nr_pages(x) 1 + +#define transparent_hugepage_enabled(__vma) 0 + +#define transparent_hugepage_flags 0UL +static inline int split_huge_page(struct page *page) +{ + return 0; +} +#define split_huge_page_pmd(__mm, __pmd) \ + do { } while (0) +#define wait_split_huge_page(__anon_vma, __pmd) \ + do { } while (0) +#define compound_trans_head(page) compound_head(page) +static inline int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice) +{ + BUG(); + return 0; +} +static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next) +{ +} +static inline int pmd_trans_huge_lock(pmd_t *pmd, + struct vm_area_struct *vma) +{ + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h new file mode 100644 index 00000000..000837e1 --- /dev/null +++ b/include/linux/hugetlb.h @@ -0,0 +1,317 @@ +#ifndef _LINUX_HUGETLB_H +#define _LINUX_HUGETLB_H + +#include <linux/mm_types.h> +#include <linux/fs.h> +#include <linux/hugetlb_inline.h> + +struct ctl_table; +struct user_struct; + +#ifdef CONFIG_HUGETLB_PAGE + +#include <linux/mempolicy.h> +#include <linux/shm.h> +#include <asm/tlbflush.h> + +struct hugepage_subpool { + spinlock_t lock; + long count; + long max_hpages, used_hpages; +}; + +struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); +void hugepage_put_subpool(struct hugepage_subpool *spool); + +int PageHuge(struct page *page); + +void reset_vma_resv_huge_pages(struct vm_area_struct *vma); +int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); + +#ifdef CONFIG_NUMA +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#endif + +int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); +int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, + struct page **, struct vm_area_struct **, + unsigned long *, int *, int, unsigned int flags); +void unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); +void __unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); +int hugetlb_prefault(struct address_space *, struct vm_area_struct *); +void hugetlb_report_meminfo(struct seq_file *); +int hugetlb_report_node_meminfo(int, char *); +unsigned long hugetlb_total_pages(void); +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, unsigned int flags); +int hugetlb_reserve_pages(struct inode *inode, long from, long to, + struct vm_area_struct *vma, + vm_flags_t vm_flags); +void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); +int dequeue_hwpoisoned_huge_page(struct page *page); +void copy_huge_page(struct page *dst, struct page *src); + +extern unsigned long hugepages_treat_as_movable; +extern const unsigned long hugetlb_zero, hugetlb_infinity; +extern int sysctl_hugetlb_shm_group; +extern struct list_head huge_boot_pages; + +/* arch callbacks */ + +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz); +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write); +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write); +struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int write); +int pmd_huge(pmd_t pmd); +int pud_huge(pud_t pmd); +void hugetlb_change_protection(struct vm_area_struct *vma, + unsigned long address, unsigned long end, pgprot_t newprot); + +#else /* !CONFIG_HUGETLB_PAGE */ + +static inline int PageHuge(struct page *page) +{ + return 0; +} + +static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) +{ +} + +static inline unsigned long hugetlb_total_pages(void) +{ + return 0; +} + +#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) +#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) +#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) +#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) +#define unmap_hugepage_range(vma, start, end, page) BUG() +static inline void hugetlb_report_meminfo(struct seq_file *m) +{ +} +#define hugetlb_report_node_meminfo(n, buf) 0 +#define follow_huge_pmd(mm, addr, pmd, write) NULL +#define follow_huge_pud(mm, addr, pud, write) NULL +#define prepare_hugepage_range(file, addr, len) (-EINVAL) +#define pmd_huge(x) 0 +#define pud_huge(x) 0 +#define is_hugepage_only_range(mm, addr, len) 0 +#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) +#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) +#define huge_pte_offset(mm, address) 0 +#define dequeue_hwpoisoned_huge_page(page) 0 +static inline void copy_huge_page(struct page *dst, struct page *src) +{ +} + +#define hugetlb_change_protection(vma, address, end, newprot) + +#endif /* !CONFIG_HUGETLB_PAGE */ + +#define HUGETLB_ANON_FILE "anon_hugepage" + +enum { + /* + * The file will be used as an shm file so shmfs accounting rules + * apply + */ + HUGETLB_SHMFS_INODE = 1, + /* + * The file is being created on the internal vfs mount and shmfs + * accounting rules do not apply + */ + HUGETLB_ANONHUGE_INODE = 2, +}; + +#ifdef CONFIG_HUGETLBFS +struct hugetlbfs_sb_info { + long max_inodes; /* inodes allowed */ + long free_inodes; /* inodes free */ + spinlock_t stat_lock; + struct hstate *hstate; + struct hugepage_subpool *spool; +}; + +static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +extern const struct file_operations hugetlbfs_file_operations; +extern const struct vm_operations_struct hugetlb_vm_ops; +struct file *hugetlb_file_setup(const char *name, unsigned long addr, + size_t size, vm_flags_t acct, + struct user_struct **user, int creat_flags); + +static inline int is_file_hugepages(struct file *file) +{ + if (file->f_op == &hugetlbfs_file_operations) + return 1; + if (is_file_shm_hugepages(file)) + return 1; + + return 0; +} + +#else /* !CONFIG_HUGETLBFS */ + +#define is_file_hugepages(file) 0 +static inline struct file * +hugetlb_file_setup(const char *name, unsigned long addr, size_t size, + vm_flags_t acctflag, struct user_struct **user, int creat_flags) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* !CONFIG_HUGETLBFS */ + +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ + +#ifdef CONFIG_HUGETLB_PAGE + +#define HSTATE_NAME_LEN 32 +/* Defines one hugetlb page size */ +struct hstate { + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_freelists[MAX_NUMNODES]; + unsigned int nr_huge_pages_node[MAX_NUMNODES]; + unsigned int free_huge_pages_node[MAX_NUMNODES]; + unsigned int surplus_huge_pages_node[MAX_NUMNODES]; + char name[HSTATE_NAME_LEN]; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +#ifdef CONFIG_HIGHMEM + phys_addr_t phys; +#endif +}; + +struct page *alloc_huge_page_node(struct hstate *h, int nid); + +/* arch callback */ +int __init alloc_bootmem_huge_page(struct hstate *h); + +void __init hugetlb_add_hstate(unsigned order); +struct hstate *size_to_hstate(unsigned long size); + +#ifndef HUGE_MAX_HSTATE +#define HUGE_MAX_HSTATE 1 +#endif + +extern struct hstate hstates[HUGE_MAX_HSTATE]; +extern unsigned int default_hstate_idx; + +#define default_hstate (hstates[default_hstate_idx]) + +static inline struct hstate *hstate_inode(struct inode *i) +{ + struct hugetlbfs_sb_info *hsb; + hsb = HUGETLBFS_SB(i->i_sb); + return hsb->hstate; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return hstate_inode(f->f_dentry->d_inode); +} + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return hstate_file(vma->vm_file); +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return (unsigned long)PAGE_SIZE << h->order; +} + +extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); + +extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return h->mask; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return h->order; +} + +static inline unsigned huge_page_shift(struct hstate *h) +{ + return h->order + PAGE_SHIFT; +} + +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1 << h->order; +} + +static inline unsigned int blocks_per_huge_page(struct hstate *h) +{ + return huge_page_size(h) / 512; +} + +#include <asm/hugetlb.h> + +static inline struct hstate *page_hstate(struct page *page) +{ + return size_to_hstate(PAGE_SIZE << compound_order(page)); +} + +static inline unsigned hstate_index_to_shift(unsigned index) +{ + return hstates[index].order + PAGE_SHIFT; +} + +#else +struct hstate {}; +#define alloc_huge_page_node(h, nid) NULL +#define alloc_bootmem_huge_page(h) NULL +#define hstate_file(f) NULL +#define hstate_vma(v) NULL +#define hstate_inode(i) NULL +#define huge_page_size(h) PAGE_SIZE +#define huge_page_mask(h) PAGE_MASK +#define vma_kernel_pagesize(v) PAGE_SIZE +#define vma_mmu_pagesize(v) PAGE_SIZE +#define huge_page_order(h) 0 +#define huge_page_shift(h) PAGE_SHIFT +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1; +} +#define hstate_index_to_shift(index) 0 +#endif + +#endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h new file mode 100644 index 00000000..2bb681fb --- /dev/null +++ b/include/linux/hugetlb_inline.h @@ -0,0 +1,22 @@ +#ifndef _LINUX_HUGETLB_INLINE_H +#define _LINUX_HUGETLB_INLINE_H + +#ifdef CONFIG_HUGETLB_PAGE + +#include <linux/mm.h> + +static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +{ + return !!(vma->vm_flags & VM_HUGETLB); +} + +#else + +static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) +{ + return 0; +} + +#endif + +#endif diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h new file mode 100644 index 00000000..6ae9c631 --- /dev/null +++ b/include/linux/hw_breakpoint.h @@ -0,0 +1,156 @@ +#ifndef _LINUX_HW_BREAKPOINT_H +#define _LINUX_HW_BREAKPOINT_H + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_8 = 8, +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X, +}; + +enum bp_type_idx { + TYPE_INST = 0, +#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS + TYPE_DATA = 0, +#else + TYPE_DATA = 1, +#endif + TYPE_MAX +}; + +#ifdef __KERNEL__ + +#include <linux/perf_event.h> + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + +extern int __init init_hw_breakpoint(void); + +static inline void hw_breakpoint_init(struct perf_event_attr *attr) +{ + memset(attr, 0, sizeof(*attr)); + + attr->type = PERF_TYPE_BREAKPOINT; + attr->size = sizeof(*attr); + /* + * As it's for in-kernel or ptrace use, we want it to be pinned + * and to call its callback every hits. + */ + attr->pinned = 1; + attr->sample_period = 1; +} + +static inline void ptrace_breakpoint_init(struct perf_event_attr *attr) +{ + hw_breakpoint_init(attr); + attr->exclude_kernel = 1; +} + +static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) +{ + return bp->attr.bp_addr; +} + +static inline int hw_breakpoint_type(struct perf_event *bp) +{ + return bp->attr.bp_type; +} + +static inline unsigned long hw_breakpoint_len(struct perf_event *bp) +{ + return bp->attr.bp_len; +} + +extern struct perf_event * +register_user_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + struct task_struct *tsk); + +/* FIXME: only change from the attr, and don't unregister */ +extern int +modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); + +/* + * Kernel breakpoints are not associated with any particular thread. + */ +extern struct perf_event * +register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + int cpu); + +extern struct perf_event * __percpu * +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context); + +extern int register_perf_hw_breakpoint(struct perf_event *bp); +extern int __register_perf_hw_breakpoint(struct perf_event *bp); +extern void unregister_hw_breakpoint(struct perf_event *bp); +extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); + +extern int dbg_reserve_bp_slot(struct perf_event *bp); +extern int dbg_release_bp_slot(struct perf_event *bp); +extern int reserve_bp_slot(struct perf_event *bp); +extern void release_bp_slot(struct perf_event *bp); + +extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); + +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return &bp->hw.info; +} + +#else /* !CONFIG_HAVE_HW_BREAKPOINT */ + +static inline int __init init_hw_breakpoint(void) { return 0; } + +static inline struct perf_event * +register_user_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + struct task_struct *tsk) { return NULL; } +static inline int +modify_user_hw_breakpoint(struct perf_event *bp, + struct perf_event_attr *attr) { return -ENOSYS; } +static inline struct perf_event * +register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context, + int cpu) { return NULL; } +static inline struct perf_event * __percpu * +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + void *context) { return NULL; } +static inline int +register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline int +__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline void unregister_hw_breakpoint(struct perf_event *bp) { } +static inline void +unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } +static inline int +reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } +static inline void release_bp_slot(struct perf_event *bp) { } + +static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { } + +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return NULL; +} + +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#endif /* __KERNEL__ */ + +#endif /* _LINUX_HW_BREAKPOINT_H */ diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h new file mode 100644 index 00000000..b4b0eef5 --- /dev/null +++ b/include/linux/hw_random.h @@ -0,0 +1,51 @@ +/* + Hardware Random Number Generator + + Please read Documentation/hw_random.txt for details on use. + + ---------------------------------------------------------- + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + */ + +#ifndef LINUX_HWRANDOM_H_ +#define LINUX_HWRANDOM_H_ + +#include <linux/types.h> +#include <linux/list.h> + +/** + * struct hwrng - Hardware Random Number Generator driver + * @name: Unique RNG name. + * @init: Initialization callback (can be NULL). + * @cleanup: Cleanup callback (can be NULL). + * @data_present: Callback to determine if data is available + * on the RNG. If NULL, it is assumed that + * there is always data available. *OBSOLETE* + * @data_read: Read data from the RNG device. + * Returns the number of lower random bytes in "data". + * Must not be NULL. *OBSOLETE* + * @read: New API. drivers can fill up to max bytes of data + * into the buffer. The buffer is aligned for any type. + * @priv: Private data, for use by the RNG driver. + */ +struct hwrng { + const char *name; + int (*init)(struct hwrng *rng); + void (*cleanup)(struct hwrng *rng); + int (*data_present)(struct hwrng *rng, int wait); + int (*data_read)(struct hwrng *rng, u32 *data); + int (*read)(struct hwrng *rng, void *data, size_t max, bool wait); + unsigned long priv; + + /* internal. */ + struct list_head list; +}; + +/** Register a new Hardware Random Number Generator driver. */ +extern int hwrng_register(struct hwrng *rng); +/** Unregister a Hardware Random Number Generator driver. */ +extern void hwrng_unregister(struct hwrng *rng); + +#endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h new file mode 100644 index 00000000..1c7b89ae --- /dev/null +++ b/include/linux/hwmon-sysfs.h @@ -0,0 +1,57 @@ +/* + * hwmon-sysfs.h - hardware monitoring chip driver sysfs defines + * + * Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_HWMON_SYSFS_H +#define _LINUX_HWMON_SYSFS_H + +#include <linux/device.h> + +struct sensor_device_attribute{ + struct device_attribute dev_attr; + int index; +}; +#define to_sensor_dev_attr(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute, dev_attr) + +#define SENSOR_ATTR(_name, _mode, _show, _store, _index) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .index = _index } + +#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \ +struct sensor_device_attribute sensor_dev_attr_##_name \ + = SENSOR_ATTR(_name, _mode, _show, _store, _index) + +struct sensor_device_attribute_2 { + struct device_attribute dev_attr; + u8 index; + u8 nr; +}; +#define to_sensor_dev_attr_2(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr) + +#define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \ + { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .index = _index, \ + .nr = _nr } + +#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \ +struct sensor_device_attribute_2 sensor_dev_attr_##_name \ + = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) + +#endif /* _LINUX_HWMON_SYSFS_H */ diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h new file mode 100644 index 00000000..f346e4d5 --- /dev/null +++ b/include/linux/hwmon-vid.h @@ -0,0 +1,45 @@ +/* + hwmon-vid.h - VID/VRM/VRD voltage conversions + + Originally part of lm_sensors + Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> + With assistance from Trent Piepho <xyzzy@speakeasy.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _LINUX_HWMON_VID_H +#define _LINUX_HWMON_VID_H + +int vid_from_reg(int val, u8 vrm); +u8 vid_which_vrm(void); + +/* vrm is the VRM/VRD document version multiplied by 10. + val is in mV to avoid floating point in the kernel. + Returned value is the 4-, 5- or 6-bit VID code. + Note that only VRM 9.x is supported for now. */ +static inline int vid_to_reg(int val, u8 vrm) +{ + switch (vrm) { + case 91: /* VRM 9.1 */ + case 90: /* VRM 9.0 */ + return ((val >= 1100) && (val <= 1850) ? + ((18499 - val * 10) / 25 + 5) / 10 : -1); + default: + return -1; + } +} + +#endif /* _LINUX_HWMON_VID_H */ diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h new file mode 100644 index 00000000..82b29ae6 --- /dev/null +++ b/include/linux/hwmon.h @@ -0,0 +1,35 @@ +/* + hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring + + This file declares helper functions for the sysfs class "hwmon", + for use by sensors drivers. + + Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. +*/ + +#ifndef _HWMON_H_ +#define _HWMON_H_ + +struct device; + +struct device *hwmon_device_register(struct device *dev); + +void hwmon_device_unregister(struct device *dev); + +/* Scale user input to sensible values */ +static inline int SENSORS_LIMIT(long value, long low, long high) +{ + if (value < low) + return low; + else if (value > high) + return high; + else + return value; +} + +#endif + diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h new file mode 100644 index 00000000..3343298e --- /dev/null +++ b/include/linux/hwspinlock.h @@ -0,0 +1,313 @@ +/* + * Hardware spinlock public header + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen <ohad@wizery.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_HWSPINLOCK_H +#define __LINUX_HWSPINLOCK_H + +#include <linux/err.h> +#include <linux/sched.h> + +/* hwspinlock mode argument */ +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ + +struct device; +struct hwspinlock; +struct hwspinlock_device; +struct hwspinlock_ops; + +/** + * struct hwspinlock_pdata - platform data for hwspinlock drivers + * @base_id: base id for this hwspinlock device + * + * hwspinlock devices provide system-wide hardware locks that are used + * by remote processors that have no other way to achieve synchronization. + * + * To achieve that, each physical lock must have a system-wide id number + * that is agreed upon, otherwise remote processors can't possibly assume + * they're using the same hardware lock. + * + * Usually boards have a single hwspinlock device, which provides several + * hwspinlocks, and in this case, they can be trivially numbered 0 to + * (num-of-locks - 1). + * + * In case boards have several hwspinlocks devices, a different base id + * should be used for each hwspinlock device (they can't all use 0 as + * a starting id!). + * + * This platform data structure should be used to provide the base id + * for each device (which is trivially 0 when only a single hwspinlock + * device exists). It can be shared between different platforms, hence + * its location. + */ +struct hwspinlock_pdata { + int base_id; +}; + +#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE) + +int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, + const struct hwspinlock_ops *ops, int base_id, int num_locks); +int hwspin_lock_unregister(struct hwspinlock_device *bank); +struct hwspinlock *hwspin_lock_request(void); +struct hwspinlock *hwspin_lock_request_specific(unsigned int id); +int hwspin_lock_free(struct hwspinlock *hwlock); +int hwspin_lock_get_id(struct hwspinlock *hwlock); +int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, + unsigned long *); +int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); +void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); + +#else /* !CONFIG_HWSPINLOCK */ + +/* + * We don't want these functions to fail if CONFIG_HWSPINLOCK is not + * enabled. We prefer to silently succeed in this case, and let the + * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not + * required on a given setup, users will still work. + * + * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which + * we _do_ want users to fail (no point in registering hwspinlock instances if + * the framework is not available). + * + * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking + * users. Others, which care, can still check this with IS_ERR. + */ +static inline struct hwspinlock *hwspin_lock_request(void) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id) +{ + return ERR_PTR(-ENODEV); +} + +static inline int hwspin_lock_free(struct hwspinlock *hwlock) +{ + return 0; +} + +static inline +int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, + int mode, unsigned long *flags) +{ + return 0; +} + +static inline +int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + return 0; +} + +static inline +void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ +} + +static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) +{ + return 0; +} + +#endif /* !CONFIG_HWSPINLOCK */ + +/** + * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts + * @hwlock: an hwspinlock which we want to trylock + * @flags: a pointer to where the caller's interrupt state will be saved at + * + * This function attempts to lock the underlying hwspinlock, and will + * immediately fail if the hwspinlock is already locked. + * + * Upon a successful return from this function, preemption and local + * interrupts are disabled (previous interrupts state is saved at @flags), + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline +int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags) +{ + return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock the underlying hwspinlock, and will + * immediately fail if the hwspinlock is already locked. + * + * Upon a successful return from this function, preemption and local + * interrupts are disabled, so the caller must not sleep, and is advised + * to release the hwspinlock as soon as possible. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_trylock() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * Upon a successful return from this function, preemption is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. This is required in order to minimize remote cores + * polling on the hardware interconnect. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, 0, NULL); +} + +/** + * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * @flags: a pointer to where the caller's interrupt state will be saved at + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption and local interrupts + * are disabled (plus previous interrupt state is saved), so the caller must + * not sleep, and is advised to release the hwspinlock as soon as possible. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, + unsigned int to, unsigned long *flags) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption and local interrupts + * are disabled so the caller must not sleep, and is advised to release the + * hwspinlock as soon as possible. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_lock_timeout() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption is disabled + * so the caller must not sleep, and is advised to release the hwspinlock + * as soon as possible. + * This is required in order to minimize remote cores polling on the + * hardware interconnect. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, 0, NULL); +} + +/** + * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * @flags: previous caller's interrupt state to restore + * + * This function will unlock a specific hwspinlock, enable preemption and + * restore the previous state of the local interrupts. It should be used + * to undo, e.g., hwspin_trylock_irqsave(). + * + * @hwlock must be already locked before calling this function: it is a bug + * to call unlock on a @hwlock that is already unlocked. + */ +static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock, + unsigned long *flags) +{ + __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags); +} + +/** + * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock, enable preemption and + * enable local interrupts. Should be used to undo hwspin_lock_irq(). + * + * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before + * calling this function: it is a bug to call unlock on a @hwlock that is + * already unlocked. + */ +static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL); +} + +/** + * hwspin_unlock() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock and enable preemption + * back. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, 0, NULL); +} + +#endif /* __LINUX_HWSPINLOCK_H */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h new file mode 100644 index 00000000..5852545e --- /dev/null +++ b/include/linux/hyperv.h @@ -0,0 +1,1046 @@ +/* + * + * Copyright (c) 2011, Microsoft Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Authors: + * Haiyang Zhang <haiyangz@microsoft.com> + * Hank Janssen <hjanssen@microsoft.com> + * K. Y. Srinivasan <kys@microsoft.com> + * + */ + +#ifndef _HYPERV_H +#define _HYPERV_H + +#include <linux/types.h> + +/* + * An implementation of HyperV key value pair (KVP) functionality for Linux. + * + * + * Copyright (C) 2010, Novell, Inc. + * Author : K. Y. Srinivasan <ksrinivasan@novell.com> + * + */ + +/* + * Maximum value size - used for both key names and value data, and includes + * any applicable NULL terminators. + * + * Note: This limit is somewhat arbitrary, but falls easily within what is + * supported for all native guests (back to Win 2000) and what is reasonable + * for the IC KVP exchange functionality. Note that Windows Me/98/95 are + * limited to 255 character key names. + * + * MSDN recommends not storing data values larger than 2048 bytes in the + * registry. + * + * Note: This value is used in defining the KVP exchange message - this value + * cannot be modified without affecting the message size and compatibility. + */ + +/* + * bytes, including any null terminators + */ +#define HV_KVP_EXCHANGE_MAX_VALUE_SIZE (2048) + + +/* + * Maximum key size - the registry limit for the length of an entry name + * is 256 characters, including the null terminator + */ + +#define HV_KVP_EXCHANGE_MAX_KEY_SIZE (512) + +/* + * In Linux, we implement the KVP functionality in two components: + * 1) The kernel component which is packaged as part of the hv_utils driver + * is responsible for communicating with the host and responsible for + * implementing the host/guest protocol. 2) A user level daemon that is + * responsible for data gathering. + * + * Host/Guest Protocol: The host iterates over an index and expects the guest + * to assign a key name to the index and also return the value corresponding to + * the key. The host will have atmost one KVP transaction outstanding at any + * given point in time. The host side iteration stops when the guest returns + * an error. Microsoft has specified the following mapping of key names to + * host specified index: + * + * Index Key Name + * 0 FullyQualifiedDomainName + * 1 IntegrationServicesVersion + * 2 NetworkAddressIPv4 + * 3 NetworkAddressIPv6 + * 4 OSBuildNumber + * 5 OSName + * 6 OSMajorVersion + * 7 OSMinorVersion + * 8 OSVersion + * 9 ProcessorArchitecture + * + * The Windows host expects the Key Name and Key Value to be encoded in utf16. + * + * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the + * data gathering functionality in a user mode daemon. The user level daemon + * is also responsible for binding the key name to the index as well. The + * kernel and user-level daemon communicate using a connector channel. + * + * The user mode component first registers with the + * the kernel component. Subsequently, the kernel component requests, data + * for the specified keys. In response to this message the user mode component + * fills in the value corresponding to the specified key. We overload the + * sequence field in the cn_msg header to define our KVP message types. + * + * + * The kernel component simply acts as a conduit for communication between the + * Windows host and the user-level daemon. The kernel component passes up the + * index received from the Host to the user-level daemon. If the index is + * valid (supported), the corresponding key as well as its + * value (both are strings) is returned. If the index is invalid + * (not supported), a NULL key string is returned. + */ + + +/* + * Registry value types. + */ + +#define REG_SZ 1 +#define REG_U32 4 +#define REG_U64 8 + +enum hv_kvp_exchg_op { + KVP_OP_GET = 0, + KVP_OP_SET, + KVP_OP_DELETE, + KVP_OP_ENUMERATE, + KVP_OP_REGISTER, + KVP_OP_COUNT /* Number of operations, must be last. */ +}; + +enum hv_kvp_exchg_pool { + KVP_POOL_EXTERNAL = 0, + KVP_POOL_GUEST, + KVP_POOL_AUTO, + KVP_POOL_AUTO_EXTERNAL, + KVP_POOL_AUTO_INTERNAL, + KVP_POOL_COUNT /* Number of pools, must be last. */ +}; + +struct hv_kvp_hdr { + __u8 operation; + __u8 pool; + __u16 pad; +} __attribute__((packed)); + +struct hv_kvp_exchg_msg_value { + __u32 value_type; + __u32 key_size; + __u32 value_size; + __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; + union { + __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; + __u32 value_u32; + __u64 value_u64; + }; +} __attribute__((packed)); + +struct hv_kvp_msg_enumerate { + __u32 index; + struct hv_kvp_exchg_msg_value data; +} __attribute__((packed)); + +struct hv_kvp_msg_get { + struct hv_kvp_exchg_msg_value data; +}; + +struct hv_kvp_msg_set { + struct hv_kvp_exchg_msg_value data; +}; + +struct hv_kvp_msg_delete { + __u32 key_size; + __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; +}; + +struct hv_kvp_register { + __u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; +}; + +struct hv_kvp_msg { + struct hv_kvp_hdr kvp_hdr; + union { + struct hv_kvp_msg_get kvp_get; + struct hv_kvp_msg_set kvp_set; + struct hv_kvp_msg_delete kvp_delete; + struct hv_kvp_msg_enumerate kvp_enum_data; + struct hv_kvp_register kvp_register; + } body; +} __attribute__((packed)); + +#ifdef __KERNEL__ +#include <linux/scatterlist.h> +#include <linux/list.h> +#include <linux/uuid.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> + + +#define MAX_PAGE_BUFFER_COUNT 19 +#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ + +#pragma pack(push, 1) + +/* Single-page buffer */ +struct hv_page_buffer { + u32 len; + u32 offset; + u64 pfn; +}; + +/* Multiple-page buffer */ +struct hv_multipage_buffer { + /* Length and Offset determines the # of pfns in the array */ + u32 len; + u32 offset; + u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; +}; + +/* 0x18 includes the proprietary packet header */ +#define MAX_PAGE_BUFFER_PACKET (0x18 + \ + (sizeof(struct hv_page_buffer) * \ + MAX_PAGE_BUFFER_COUNT)) +#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ + sizeof(struct hv_multipage_buffer)) + + +#pragma pack(pop) + +struct hv_ring_buffer { + /* Offset in bytes from the start of ring data below */ + u32 write_index; + + /* Offset in bytes from the start of ring data below */ + u32 read_index; + + u32 interrupt_mask; + + /* Pad it to PAGE_SIZE so that data starts on page boundary */ + u8 reserved[4084]; + + /* NOTE: + * The interrupt_mask field is used only for channels but since our + * vmbus connection also uses this data structure and its data starts + * here, we commented out this field. + */ + + /* + * Ring data starts here + RingDataStartOffset + * !!! DO NOT place any fields below this !!! + */ + u8 buffer[0]; +} __packed; + +struct hv_ring_buffer_info { + struct hv_ring_buffer *ring_buffer; + u32 ring_size; /* Include the shared header */ + spinlock_t ring_lock; + + u32 ring_datasize; /* < ring_size */ + u32 ring_data_startoffset; +}; + +struct hv_ring_buffer_debug_info { + u32 current_interrupt_mask; + u32 current_read_index; + u32 current_write_index; + u32 bytes_avail_toread; + u32 bytes_avail_towrite; +}; + +/* + * We use the same version numbering for all Hyper-V modules. + * + * Definition of versioning is as follows; + * + * Major Number Changes for these scenarios; + * 1. When a new version of Windows Hyper-V + * is released. + * 2. A Major change has occurred in the + * Linux IC's. + * (For example the merge for the first time + * into the kernel) Every time the Major Number + * changes, the Revision number is reset to 0. + * Minor Number Changes when new functionality is added + * to the Linux IC's that is not a bug fix. + * + * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync + */ +#define HV_DRV_VERSION "3.1" + + +/* + * A revision number of vmbus that is used for ensuring both ends on a + * partition are using compatible versions. + */ +#define VMBUS_REVISION_NUMBER 13 + +/* Make maximum size of pipe payload of 16K */ +#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) + +/* Define PipeMode values. */ +#define VMBUS_PIPE_TYPE_BYTE 0x00000000 +#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 + +/* The size of the user defined data buffer for non-pipe offers. */ +#define MAX_USER_DEFINED_BYTES 120 + +/* The size of the user defined data buffer for pipe offers. */ +#define MAX_PIPE_USER_DEFINED_BYTES 116 + +/* + * At the center of the Channel Management library is the Channel Offer. This + * struct contains the fundamental information about an offer. + */ +struct vmbus_channel_offer { + uuid_le if_type; + uuid_le if_instance; + u64 int_latency; /* in 100ns units */ + u32 if_revision; + u32 server_ctx_size; /* in bytes */ + u16 chn_flags; + u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ + + union { + /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ + struct { + unsigned char user_def[MAX_USER_DEFINED_BYTES]; + } std; + + /* + * Pipes: + * The following sructure is an integrated pipe protocol, which + * is implemented on top of standard user-defined data. Pipe + * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own + * use. + */ + struct { + u32 pipe_mode; + unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; + } pipe; + } u; + u32 padding; +} __packed; + +/* Server Flags */ +#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 +#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 +#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 +#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 +#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 +#define VMBUS_CHANNEL_PARENT_OFFER 0x200 +#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 + +struct vmpacket_descriptor { + u16 type; + u16 offset8; + u16 len8; + u16 flags; + u64 trans_id; +} __packed; + +struct vmpacket_header { + u32 prev_pkt_start_offset; + struct vmpacket_descriptor descriptor; +} __packed; + +struct vmtransfer_page_range { + u32 byte_count; + u32 byte_offset; +} __packed; + +struct vmtransfer_page_packet_header { + struct vmpacket_descriptor d; + u16 xfer_pageset_id; + bool sender_owns_set; + u8 reserved; + u32 range_cnt; + struct vmtransfer_page_range ranges[1]; +} __packed; + +struct vmgpadl_packet_header { + struct vmpacket_descriptor d; + u32 gpadl; + u32 reserved; +} __packed; + +struct vmadd_remove_transfer_page_set { + struct vmpacket_descriptor d; + u32 gpadl; + u16 xfer_pageset_id; + u16 reserved; +} __packed; + +/* + * This structure defines a range in guest physical space that can be made to + * look virtually contiguous. + */ +struct gpa_range { + u32 byte_count; + u32 byte_offset; + u64 pfn_array[0]; +}; + +/* + * This is the format for an Establish Gpadl packet, which contains a handle by + * which this GPADL will be known and a set of GPA ranges associated with it. + * This can be converted to a MDL by the guest OS. If there are multiple GPA + * ranges, then the resulting MDL will be "chained," representing multiple VA + * ranges. + */ +struct vmestablish_gpadl { + struct vmpacket_descriptor d; + u32 gpadl; + u32 range_cnt; + struct gpa_range range[1]; +} __packed; + +/* + * This is the format for a Teardown Gpadl packet, which indicates that the + * GPADL handle in the Establish Gpadl packet will never be referenced again. + */ +struct vmteardown_gpadl { + struct vmpacket_descriptor d; + u32 gpadl; + u32 reserved; /* for alignment to a 8-byte boundary */ +} __packed; + +/* + * This is the format for a GPA-Direct packet, which contains a set of GPA + * ranges, in addition to commands and/or data. + */ +struct vmdata_gpa_direct { + struct vmpacket_descriptor d; + u32 reserved; + u32 range_cnt; + struct gpa_range range[1]; +} __packed; + +/* This is the format for a Additional Data Packet. */ +struct vmadditional_data { + struct vmpacket_descriptor d; + u64 total_bytes; + u32 offset; + u32 byte_cnt; + unsigned char data[1]; +} __packed; + +union vmpacket_largest_possible_header { + struct vmpacket_descriptor simple_hdr; + struct vmtransfer_page_packet_header xfer_page_hdr; + struct vmgpadl_packet_header gpadl_hdr; + struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; + struct vmestablish_gpadl establish_gpadl_hdr; + struct vmteardown_gpadl teardown_gpadl_hdr; + struct vmdata_gpa_direct data_gpa_direct_hdr; +}; + +#define VMPACKET_DATA_START_ADDRESS(__packet) \ + (void *)(((unsigned char *)__packet) + \ + ((struct vmpacket_descriptor)__packet)->offset8 * 8) + +#define VMPACKET_DATA_LENGTH(__packet) \ + ((((struct vmpacket_descriptor)__packet)->len8 - \ + ((struct vmpacket_descriptor)__packet)->offset8) * 8) + +#define VMPACKET_TRANSFER_MODE(__packet) \ + (((struct IMPACT)__packet)->type) + +enum vmbus_packet_type { + VM_PKT_INVALID = 0x0, + VM_PKT_SYNCH = 0x1, + VM_PKT_ADD_XFER_PAGESET = 0x2, + VM_PKT_RM_XFER_PAGESET = 0x3, + VM_PKT_ESTABLISH_GPADL = 0x4, + VM_PKT_TEARDOWN_GPADL = 0x5, + VM_PKT_DATA_INBAND = 0x6, + VM_PKT_DATA_USING_XFER_PAGES = 0x7, + VM_PKT_DATA_USING_GPADL = 0x8, + VM_PKT_DATA_USING_GPA_DIRECT = 0x9, + VM_PKT_CANCEL_REQUEST = 0xa, + VM_PKT_COMP = 0xb, + VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, + VM_PKT_ADDITIONAL_DATA = 0xd +}; + +#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 + + +/* Version 1 messages */ +enum vmbus_channel_message_type { + CHANNELMSG_INVALID = 0, + CHANNELMSG_OFFERCHANNEL = 1, + CHANNELMSG_RESCIND_CHANNELOFFER = 2, + CHANNELMSG_REQUESTOFFERS = 3, + CHANNELMSG_ALLOFFERS_DELIVERED = 4, + CHANNELMSG_OPENCHANNEL = 5, + CHANNELMSG_OPENCHANNEL_RESULT = 6, + CHANNELMSG_CLOSECHANNEL = 7, + CHANNELMSG_GPADL_HEADER = 8, + CHANNELMSG_GPADL_BODY = 9, + CHANNELMSG_GPADL_CREATED = 10, + CHANNELMSG_GPADL_TEARDOWN = 11, + CHANNELMSG_GPADL_TORNDOWN = 12, + CHANNELMSG_RELID_RELEASED = 13, + CHANNELMSG_INITIATE_CONTACT = 14, + CHANNELMSG_VERSION_RESPONSE = 15, + CHANNELMSG_UNLOAD = 16, +#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD + CHANNELMSG_VIEWRANGE_ADD = 17, + CHANNELMSG_VIEWRANGE_REMOVE = 18, +#endif + CHANNELMSG_COUNT +}; + +struct vmbus_channel_message_header { + enum vmbus_channel_message_type msgtype; + u32 padding; +} __packed; + +/* Query VMBus Version parameters */ +struct vmbus_channel_query_vmbus_version { + struct vmbus_channel_message_header header; + u32 version; +} __packed; + +/* VMBus Version Supported parameters */ +struct vmbus_channel_version_supported { + struct vmbus_channel_message_header header; + bool version_supported; +} __packed; + +/* Offer Channel parameters */ +struct vmbus_channel_offer_channel { + struct vmbus_channel_message_header header; + struct vmbus_channel_offer offer; + u32 child_relid; + u8 monitorid; + bool monitor_allocated; +} __packed; + +/* Rescind Offer parameters */ +struct vmbus_channel_rescind_offer { + struct vmbus_channel_message_header header; + u32 child_relid; +} __packed; + +/* + * Request Offer -- no parameters, SynIC message contains the partition ID + * Set Snoop -- no parameters, SynIC message contains the partition ID + * Clear Snoop -- no parameters, SynIC message contains the partition ID + * All Offers Delivered -- no parameters, SynIC message contains the partition + * ID + * Flush Client -- no parameters, SynIC message contains the partition ID + */ + +/* Open Channel parameters */ +struct vmbus_channel_open_channel { + struct vmbus_channel_message_header header; + + /* Identifies the specific VMBus channel that is being opened. */ + u32 child_relid; + + /* ID making a particular open request at a channel offer unique. */ + u32 openid; + + /* GPADL for the channel's ring buffer. */ + u32 ringbuffer_gpadlhandle; + + /* GPADL for the channel's server context save area. */ + u32 server_contextarea_gpadlhandle; + + /* + * The upstream ring buffer begins at offset zero in the memory + * described by RingBufferGpadlHandle. The downstream ring buffer + * follows it at this offset (in pages). + */ + u32 downstream_ringbuffer_pageoffset; + + /* User-specific data to be passed along to the server endpoint. */ + unsigned char userdata[MAX_USER_DEFINED_BYTES]; +} __packed; + +/* Open Channel Result parameters */ +struct vmbus_channel_open_result { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 openid; + u32 status; +} __packed; + +/* Close channel parameters; */ +struct vmbus_channel_close_channel { + struct vmbus_channel_message_header header; + u32 child_relid; +} __packed; + +/* Channel Message GPADL */ +#define GPADL_TYPE_RING_BUFFER 1 +#define GPADL_TYPE_SERVER_SAVE_AREA 2 +#define GPADL_TYPE_TRANSACTION 8 + +/* + * The number of PFNs in a GPADL message is defined by the number of + * pages that would be spanned by ByteCount and ByteOffset. If the + * implied number of PFNs won't fit in this packet, there will be a + * follow-up packet that contains more. + */ +struct vmbus_channel_gpadl_header { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 gpadl; + u16 range_buflen; + u16 rangecount; + struct gpa_range range[0]; +} __packed; + +/* This is the followup packet that contains more PFNs. */ +struct vmbus_channel_gpadl_body { + struct vmbus_channel_message_header header; + u32 msgnumber; + u32 gpadl; + u64 pfn[0]; +} __packed; + +struct vmbus_channel_gpadl_created { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 gpadl; + u32 creation_status; +} __packed; + +struct vmbus_channel_gpadl_teardown { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 gpadl; +} __packed; + +struct vmbus_channel_gpadl_torndown { + struct vmbus_channel_message_header header; + u32 gpadl; +} __packed; + +#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD +struct vmbus_channel_view_range_add { + struct vmbus_channel_message_header header; + PHYSICAL_ADDRESS viewrange_base; + u64 viewrange_length; + u32 child_relid; +} __packed; + +struct vmbus_channel_view_range_remove { + struct vmbus_channel_message_header header; + PHYSICAL_ADDRESS viewrange_base; + u32 child_relid; +} __packed; +#endif + +struct vmbus_channel_relid_released { + struct vmbus_channel_message_header header; + u32 child_relid; +} __packed; + +struct vmbus_channel_initiate_contact { + struct vmbus_channel_message_header header; + u32 vmbus_version_requested; + u32 padding2; + u64 interrupt_page; + u64 monitor_page1; + u64 monitor_page2; +} __packed; + +struct vmbus_channel_version_response { + struct vmbus_channel_message_header header; + bool version_supported; +} __packed; + +enum vmbus_channel_state { + CHANNEL_OFFER_STATE, + CHANNEL_OPENING_STATE, + CHANNEL_OPEN_STATE, +}; + +struct vmbus_channel_debug_info { + u32 relid; + enum vmbus_channel_state state; + uuid_le interfacetype; + uuid_le interface_instance; + u32 monitorid; + u32 servermonitor_pending; + u32 servermonitor_latency; + u32 servermonitor_connectionid; + u32 clientmonitor_pending; + u32 clientmonitor_latency; + u32 clientmonitor_connectionid; + + struct hv_ring_buffer_debug_info inbound; + struct hv_ring_buffer_debug_info outbound; +}; + +/* + * Represents each channel msg on the vmbus connection This is a + * variable-size data structure depending on the msg type itself + */ +struct vmbus_channel_msginfo { + /* Bookkeeping stuff */ + struct list_head msglistentry; + + /* So far, this is only used to handle gpadl body message */ + struct list_head submsglist; + + /* Synchronize the request/response if needed */ + struct completion waitevent; + union { + struct vmbus_channel_version_supported version_supported; + struct vmbus_channel_open_result open_result; + struct vmbus_channel_gpadl_torndown gpadl_torndown; + struct vmbus_channel_gpadl_created gpadl_created; + struct vmbus_channel_version_response version_response; + } response; + + u32 msgsize; + /* + * The channel message that goes out on the "wire". + * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header + */ + unsigned char msg[0]; +}; + +struct vmbus_close_msg { + struct vmbus_channel_msginfo info; + struct vmbus_channel_close_channel msg; +}; + +struct vmbus_channel { + struct list_head listentry; + + struct hv_device *device_obj; + + struct work_struct work; + + enum vmbus_channel_state state; + + struct vmbus_channel_offer_channel offermsg; + /* + * These are based on the OfferMsg.MonitorId. + * Save it here for easy access. + */ + u8 monitor_grp; + u8 monitor_bit; + + u32 ringbuffer_gpadlhandle; + + /* Allocated memory for ring buffer */ + void *ringbuffer_pages; + u32 ringbuffer_pagecount; + struct hv_ring_buffer_info outbound; /* send to parent */ + struct hv_ring_buffer_info inbound; /* receive from parent */ + spinlock_t inbound_lock; + struct workqueue_struct *controlwq; + + struct vmbus_close_msg close_msg; + + /* Channel callback are invoked in this workqueue context */ + /* HANDLE dataWorkQueue; */ + + void (*onchannel_callback)(void *context); + void *channel_callback_context; +}; + +void vmbus_onmessage(void *context); + +int vmbus_request_offers(void); + +/* The format must be the same as struct vmdata_gpa_direct */ +struct vmbus_channel_packet_page_buffer { + u16 type; + u16 dataoffset8; + u16 length8; + u16 flags; + u64 transactionid; + u32 reserved; + u32 rangecount; + struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; +} __packed; + +/* The format must be the same as struct vmdata_gpa_direct */ +struct vmbus_channel_packet_multipage_buffer { + u16 type; + u16 dataoffset8; + u16 length8; + u16 flags; + u64 transactionid; + u32 reserved; + u32 rangecount; /* Always 1 in this case */ + struct hv_multipage_buffer range; +} __packed; + + +extern int vmbus_open(struct vmbus_channel *channel, + u32 send_ringbuffersize, + u32 recv_ringbuffersize, + void *userdata, + u32 userdatalen, + void(*onchannel_callback)(void *context), + void *context); + +extern void vmbus_close(struct vmbus_channel *channel); + +extern int vmbus_sendpacket(struct vmbus_channel *channel, + const void *buffer, + u32 bufferLen, + u64 requestid, + enum vmbus_packet_type type, + u32 flags); + +extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, + struct hv_page_buffer pagebuffers[], + u32 pagecount, + void *buffer, + u32 bufferlen, + u64 requestid); + +extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, + struct hv_multipage_buffer *mpb, + void *buffer, + u32 bufferlen, + u64 requestid); + +extern int vmbus_establish_gpadl(struct vmbus_channel *channel, + void *kbuffer, + u32 size, + u32 *gpadl_handle); + +extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, + u32 gpadl_handle); + +extern int vmbus_recvpacket(struct vmbus_channel *channel, + void *buffer, + u32 bufferlen, + u32 *buffer_actual_len, + u64 *requestid); + +extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, + void *buffer, + u32 bufferlen, + u32 *buffer_actual_len, + u64 *requestid); + + +extern void vmbus_get_debug_info(struct vmbus_channel *channel, + struct vmbus_channel_debug_info *debug); + +extern void vmbus_ontimer(unsigned long data); + +struct hv_dev_port_info { + u32 int_mask; + u32 read_idx; + u32 write_idx; + u32 bytes_avail_toread; + u32 bytes_avail_towrite; +}; + +/* Base driver object */ +struct hv_driver { + const char *name; + + /* the device type supported by this driver */ + uuid_le dev_type; + const struct hv_vmbus_device_id *id_table; + + struct device_driver driver; + + int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); + int (*remove)(struct hv_device *); + void (*shutdown)(struct hv_device *); + +}; + +/* Base device object */ +struct hv_device { + /* the device type id of this device */ + uuid_le dev_type; + + /* the device instance id of this device */ + uuid_le dev_instance; + + struct device device; + + struct vmbus_channel *channel; +}; + + +static inline struct hv_device *device_to_hv_device(struct device *d) +{ + return container_of(d, struct hv_device, device); +} + +static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) +{ + return container_of(d, struct hv_driver, driver); +} + +static inline void hv_set_drvdata(struct hv_device *dev, void *data) +{ + dev_set_drvdata(&dev->device, data); +} + +static inline void *hv_get_drvdata(struct hv_device *dev) +{ + return dev_get_drvdata(&dev->device); +} + +/* Vmbus interface */ +#define vmbus_driver_register(driver) \ + __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, + struct module *owner, + const char *mod_name); +void vmbus_driver_unregister(struct hv_driver *hv_driver); + +/** + * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device + * + * This macro is used to create a struct hv_vmbus_device_id that matches a + * specific device. + */ +#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ + g8, g9, ga, gb, gc, gd, ge, gf) \ + .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ + g8, g9, ga, gb, gc, gd, ge, gf }, + +/* + * Common header for Hyper-V ICs + */ + +#define ICMSGTYPE_NEGOTIATE 0 +#define ICMSGTYPE_HEARTBEAT 1 +#define ICMSGTYPE_KVPEXCHANGE 2 +#define ICMSGTYPE_SHUTDOWN 3 +#define ICMSGTYPE_TIMESYNC 4 +#define ICMSGTYPE_VSS 5 + +#define ICMSGHDRFLAG_TRANSACTION 1 +#define ICMSGHDRFLAG_REQUEST 2 +#define ICMSGHDRFLAG_RESPONSE 4 + +#define HV_S_OK 0x00000000 +#define HV_E_FAIL 0x80004005 +#define HV_S_CONT 0x80070103 +#define HV_ERROR_NOT_SUPPORTED 0x80070032 +#define HV_ERROR_MACHINE_LOCKED 0x800704F7 + +/* + * While we want to handle util services as regular devices, + * there is only one instance of each of these services; so + * we statically allocate the service specific state. + */ + +struct hv_util_service { + u8 *recv_buffer; + void (*util_cb)(void *); + int (*util_init)(struct hv_util_service *); + void (*util_deinit)(void); +}; + +struct vmbuspipe_hdr { + u32 flags; + u32 msgsize; +} __packed; + +struct ic_version { + u16 major; + u16 minor; +} __packed; + +struct icmsg_hdr { + struct ic_version icverframe; + u16 icmsgtype; + struct ic_version icvermsg; + u16 icmsgsize; + u32 status; + u8 ictransaction_id; + u8 icflags; + u8 reserved[2]; +} __packed; + +struct icmsg_negotiate { + u16 icframe_vercnt; + u16 icmsg_vercnt; + u32 reserved; + struct ic_version icversion_data[1]; /* any size array */ +} __packed; + +struct shutdown_msg_data { + u32 reason_code; + u32 timeout_seconds; + u32 flags; + u8 display_message[2048]; +} __packed; + +struct heartbeat_msg_data { + u64 seq_num; + u32 reserved[8]; +} __packed; + +/* Time Sync IC defs */ +#define ICTIMESYNCFLAG_PROBE 0 +#define ICTIMESYNCFLAG_SYNC 1 +#define ICTIMESYNCFLAG_SAMPLE 2 + +#ifdef __x86_64__ +#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ +#else +#define WLTIMEDELTA 116444736000000000LL +#endif + +struct ictimesync_data { + u64 parenttime; + u64 childtime; + u64 roundtriptime; + u8 flags; +} __packed; + +struct hyperv_service_callback { + u8 msg_type; + char *log_msg; + uuid_le data; + struct vmbus_channel *channel; + void (*callback) (void *context); +}; + +extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *, + struct icmsg_negotiate *, u8 *); + +int hv_kvp_init(struct hv_util_service *); +void hv_kvp_deinit(void); +void hv_kvp_onchannelcallback(void *); + +#endif /* __KERNEL__ */ +#endif /* _HYPERV_H */ diff --git a/include/linux/hysdn_if.h b/include/linux/hysdn_if.h new file mode 100644 index 00000000..00236ae3 --- /dev/null +++ b/include/linux/hysdn_if.h @@ -0,0 +1,33 @@ +/* $Id: hysdn_if.h,v 1.1.8.3 2001/09/23 22:25:05 kai Exp $ + * + * Linux driver for HYSDN cards + * ioctl definitions shared by hynetmgr and driver. + * + * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH + * Copyright 1999 by Werner Cornelius (werner@titro.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +/****************/ +/* error values */ +/****************/ +#define ERR_NONE 0 /* no error occurred */ +#define ERR_ALREADY_BOOT 1000 /* we are already booting */ +#define EPOF_BAD_MAGIC 1001 /* bad magic in POF header */ +#define ERR_BOARD_DPRAM 1002 /* board DPRAM failed */ +#define EPOF_INTERNAL 1003 /* internal POF handler error */ +#define EPOF_BAD_IMG_SIZE 1004 /* POF boot image size invalid */ +#define ERR_BOOTIMG_FAIL 1005 /* 1. stage boot image did not start */ +#define ERR_BOOTSEQ_FAIL 1006 /* 2. stage boot seq handshake timeout */ +#define ERR_POF_TIMEOUT 1007 /* timeout waiting for card pof ready */ +#define ERR_NOT_BOOTED 1008 /* operation only allowed when booted */ +#define ERR_CONF_LONG 1009 /* conf line is too long */ +#define ERR_INV_CHAN 1010 /* invalid channel number */ +#define ERR_ASYNC_TIME 1011 /* timeout sending async data */ + + + + diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h new file mode 100644 index 00000000..63904ba6 --- /dev/null +++ b/include/linux/i2c-algo-bit.h @@ -0,0 +1,55 @@ +/* ------------------------------------------------------------------------- */ +/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-99 Simon G. Vogl + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even + Frodo Looijaard <frodol@dds.nl> */ + +#ifndef _LINUX_I2C_ALGO_BIT_H +#define _LINUX_I2C_ALGO_BIT_H + +/* --- Defines for bit-adapters --------------------------------------- */ +/* + * This struct contains the hw-dependent functions of bit-style adapters to + * manipulate the line states, and to init any hw-specific features. This is + * only used if you have more than one hw-type of adapter running. + */ +struct i2c_algo_bit_data { + void *data; /* private data for lowlevel routines */ + void (*setsda) (void *data, int state); + void (*setscl) (void *data, int state); + int (*getsda) (void *data); + int (*getscl) (void *data); + int (*pre_xfer) (struct i2c_adapter *); + void (*post_xfer) (struct i2c_adapter *); + + /* local settings */ + int udelay; /* half clock cycle time in us, + minimum 2 us for fast-mode I2C, + minimum 5 us for standard-mode I2C and SMBus, + maximum 50 us for SMBus */ + int timeout; /* in jiffies */ +}; + +int i2c_bit_add_bus(struct i2c_adapter *); +int i2c_bit_add_numbered_bus(struct i2c_adapter *); +extern const struct i2c_algorithm i2c_bit_algo; + +#endif /* _LINUX_I2C_ALGO_BIT_H */ diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h new file mode 100644 index 00000000..1364d62e --- /dev/null +++ b/include/linux/i2c-algo-pca.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_I2C_ALGO_PCA_H +#define _LINUX_I2C_ALGO_PCA_H + +/* Chips known to the pca algo */ +#define I2C_PCA_CHIP_9564 0x00 +#define I2C_PCA_CHIP_9665 0x01 + +/* Internal period for PCA9665 oscilator */ +#define I2C_PCA_OSC_PER 3 /* e10-8s */ + +/* Clock speeds for the bus for PCA9564*/ +#define I2C_PCA_CON_330kHz 0x00 +#define I2C_PCA_CON_288kHz 0x01 +#define I2C_PCA_CON_217kHz 0x02 +#define I2C_PCA_CON_146kHz 0x03 +#define I2C_PCA_CON_88kHz 0x04 +#define I2C_PCA_CON_59kHz 0x05 +#define I2C_PCA_CON_44kHz 0x06 +#define I2C_PCA_CON_36kHz 0x07 + +/* PCA9564 registers */ +#define I2C_PCA_STA 0x00 /* STATUS Read Only */ +#define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */ +#define I2C_PCA_DAT 0x01 /* DATA Read/Write */ +#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */ +#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */ + +/* PCA9665 registers */ +#define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */ +#define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */ + +/* PCA9665 indirect registers */ +#define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */ +#define I2C_PCA_IADR 0x01 /* OWN ADR */ +#define I2C_PCA_ISCLL 0x02 /* SCL LOW period */ +#define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */ +#define I2C_PCA_ITO 0x04 /* TIMEOUT */ +#define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */ +#define I2C_PCA_IMODE 0x06 /* I2C Bus mode */ + +/* PCA9665 I2C bus mode */ +#define I2C_PCA_MODE_STD 0x00 /* Standard mode */ +#define I2C_PCA_MODE_FAST 0x01 /* Fast mode */ +#define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */ +#define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */ + + +#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */ +#define I2C_PCA_CON_ENSIO 0x40 /* Enable */ +#define I2C_PCA_CON_STA 0x20 /* Start */ +#define I2C_PCA_CON_STO 0x10 /* Stop */ +#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ +#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ + +struct i2c_algo_pca_data { + void *data; /* private low level data */ + void (*write_byte) (void *data, int reg, int val); + int (*read_byte) (void *data, int reg); + int (*wait_for_completion) (void *data); + void (*reset_chip) (void *data); + /* For PCA9564, use one of the predefined frequencies: + * 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000 + * For PCA9665, use the frequency you want here. */ + unsigned int i2c_clock; +}; + +int i2c_pca_add_bus(struct i2c_adapter *); +int i2c_pca_add_numbered_bus(struct i2c_adapter *); + +#endif /* _LINUX_I2C_ALGO_PCA_H */ diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h new file mode 100644 index 00000000..538e8f41 --- /dev/null +++ b/include/linux/i2c-algo-pcf.h @@ -0,0 +1,49 @@ +/* ------------------------------------------------------------------------- */ +/* adap-pcf.h i2c driver algorithms for PCF8584 adapters */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-97 Simon G. Vogl + 1998-99 Hans Berglund + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even + Frodo Looijaard <frodol@dds.nl> */ + +#ifndef _LINUX_I2C_ALGO_PCF_H +#define _LINUX_I2C_ALGO_PCF_H + +struct i2c_algo_pcf_data { + void *data; /* private data for lolevel routines */ + void (*setpcf) (void *data, int ctl, int val); + int (*getpcf) (void *data, int ctl); + int (*getown) (void *data); + int (*getclock) (void *data); + void (*waitforpin) (void *data); + + void (*xfer_begin) (void *data); + void (*xfer_end) (void *data); + + /* Multi-master lost arbitration back-off delay (msecs) + * This should be set by the bus adapter or knowledgable client + * if bus is multi-mastered, else zero + */ + unsigned long lab_mdelay; +}; + +int i2c_pcf_add_bus(struct i2c_adapter *); + +#endif /* _LINUX_I2C_ALGO_PCF_H */ diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h new file mode 100644 index 00000000..8a7406b2 --- /dev/null +++ b/include/linux/i2c-dev.h @@ -0,0 +1,75 @@ +/* + i2c-dev.h - i2c-bus driver, char device interface + + Copyright (C) 1995-97 Simon G. Vogl + Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. +*/ + +#ifndef _LINUX_I2C_DEV_H +#define _LINUX_I2C_DEV_H + +#include <linux/types.h> +#include <linux/compiler.h> + +/* /dev/i2c-X ioctl commands. The ioctl's parameter is always an + * unsigned long, except for: + * - I2C_FUNCS, takes pointer to an unsigned long + * - I2C_RDWR, takes pointer to struct i2c_rdwr_ioctl_data + * - I2C_SMBUS, takes pointer to struct i2c_smbus_ioctl_data + */ +#define I2C_RETRIES 0x0701 /* number of times a device address should + be polled when not acknowledging */ +#define I2C_TIMEOUT 0x0702 /* set timeout in units of 10 ms */ + +/* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses + * are NOT supported! (due to code brokenness) + */ +#define I2C_SLAVE 0x0703 /* Use this slave address */ +#define I2C_SLAVE_FORCE 0x0706 /* Use this slave address, even if it + is already in use by a driver! */ +#define I2C_TENBIT 0x0704 /* 0 for 7 bit addrs, != 0 for 10 bit */ + +#define I2C_FUNCS 0x0705 /* Get the adapter functionality mask */ + +#define I2C_RDWR 0x0707 /* Combined R/W transfer (one STOP only) */ + +#define I2C_PEC 0x0708 /* != 0 to use PEC with SMBus */ +#define I2C_SMBUS 0x0720 /* SMBus transfer */ + + +/* This is the structure as used in the I2C_SMBUS ioctl call */ +struct i2c_smbus_ioctl_data { + __u8 read_write; + __u8 command; + __u32 size; + union i2c_smbus_data __user *data; +}; + +/* This is the structure as used in the I2C_RDWR ioctl call */ +struct i2c_rdwr_ioctl_data { + struct i2c_msg __user *msgs; /* pointers to i2c_msgs */ + __u32 nmsgs; /* number of i2c_msgs */ +}; + +#define I2C_RDRW_IOCTL_MAX_MSGS 42 + +#ifdef __KERNEL__ +#define I2C_MAJOR 89 /* Device major number */ +#endif + +#endif /* _LINUX_I2C_DEV_H */ diff --git a/include/linux/i2c-gpio.h b/include/linux/i2c-gpio.h new file mode 100644 index 00000000..c1bcb1f1 --- /dev/null +++ b/include/linux/i2c-gpio.h @@ -0,0 +1,38 @@ +/* + * i2c-gpio interface to platform code + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_I2C_GPIO_H +#define _LINUX_I2C_GPIO_H + +/** + * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio + * @sda_pin: GPIO pin ID to use for SDA + * @scl_pin: GPIO pin ID to use for SCL + * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz + * @timeout: clock stretching timeout in jiffies. If the slave keeps + * SCL low for longer than this, the transfer will time out. + * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin + * isn't actively driven high when setting the output value high. + * gpio_get_value() must return the actual pin state even if the + * pin is configured as an output. + * @scl_is_open_drain: SCL is set up as open drain. Same requirements + * as for sda_is_open_drain apply. + * @scl_is_output_only: SCL output drivers cannot be turned off. + */ +struct i2c_gpio_platform_data { + unsigned int sda_pin; + unsigned int scl_pin; + int udelay; + int timeout; + unsigned int sda_is_open_drain:1; + unsigned int scl_is_open_drain:1; + unsigned int scl_is_output_only:1; +}; + +#endif /* _LINUX_I2C_GPIO_H */ diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h new file mode 100644 index 00000000..747f0cde --- /dev/null +++ b/include/linux/i2c-mux.h @@ -0,0 +1,47 @@ +/* + * + * i2c-mux.h - functions for the i2c-bus mux support + * + * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it> + * Michael Lawnick <michael.lawnick.ext@nsn.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. + */ + +#ifndef _LINUX_I2C_MUX_H +#define _LINUX_I2C_MUX_H + +#ifdef __KERNEL__ + +/* + * Called to create a i2c bus on a multiplexed bus segment. + * The mux_dev and chan_id parameters are passed to the select + * and deselect callback functions to perform hardware-specific + * mux control. + */ +struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, + void *mux_dev, u32 force_nr, u32 chan_id, + int (*select) (struct i2c_adapter *, + void *mux_dev, u32 chan_id), + int (*deselect) (struct i2c_adapter *, + void *mux_dev, u32 chan_id)); + +int i2c_del_mux_adapter(struct i2c_adapter *adap); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_I2C_MUX_H */ diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h new file mode 100644 index 00000000..4d5e57ff --- /dev/null +++ b/include/linux/i2c-ocores.h @@ -0,0 +1,21 @@ +/* + * i2c-ocores.h - definitions for the i2c-ocores interface + * + * Peter Korsgaard <jacmet@sunsite.dk> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef _LINUX_I2C_OCORES_H +#define _LINUX_I2C_OCORES_H + +struct ocores_i2c_platform_data { + u32 regstep; /* distance between registers */ + u32 clock_khz; /* input clock in kHz */ + u8 num_devices; /* number of devices in the devices list */ + struct i2c_board_info const *devices; /* devices connected to the bus */ +}; + +#endif /* _LINUX_I2C_OCORES_H */ diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h new file mode 100644 index 00000000..92a0dc75 --- /dev/null +++ b/include/linux/i2c-omap.h @@ -0,0 +1,40 @@ +#ifndef __I2C_OMAP_H__ +#define __I2C_OMAP_H__ + +#include <linux/platform_device.h> + +/* + * Version 2 of the I2C peripheral unit has a different register + * layout and extra registers. The ID register in the V2 peripheral + * unit on the OMAP4430 reports the same ID as the V1 peripheral + * unit on the OMAP3530, so we must inform the driver which IP + * version we know it is running on from platform / cpu-specific + * code using these constants in the hwmod class definition. + */ + +#define OMAP_I2C_IP_VERSION_1 1 +#define OMAP_I2C_IP_VERSION_2 2 + +/* struct omap_i2c_bus_platform_data .flags meanings */ + +#define OMAP_I2C_FLAG_NO_FIFO BIT(0) +#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) +#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) +#define OMAP_I2C_FLAG_RESET_REGS_POSTIDLE BIT(3) +#define OMAP_I2C_FLAG_APPLY_ERRATA_I207 BIT(4) +#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) +#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) +/* how the CPU address bus must be translated for I2C unit access */ +#define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0 +#define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7) +#define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8) +#define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7 + +struct omap_i2c_bus_platform_data { + u32 clkrate; + u32 rev; + u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); +}; + +#endif diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h new file mode 100644 index 00000000..aba33759 --- /dev/null +++ b/include/linux/i2c-pca-platform.h @@ -0,0 +1,12 @@ +#ifndef I2C_PCA9564_PLATFORM_H +#define I2C_PCA9564_PLATFORM_H + +struct i2c_pca9564_pf_platform_data { + int gpio; /* pin to reset chip. driver will work when + * not supplied (negative value), but it + * cannot exit some error conditions then */ + int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ + int timeout; /* timeout in jiffies */ +}; + +#endif /* I2C_PCA9564_PLATFORM_H */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h new file mode 100644 index 00000000..a87124d4 --- /dev/null +++ b/include/linux/i2c-pnx.h @@ -0,0 +1,42 @@ +/* + * Header file for I2C support on PNX010x/4008. + * + * Author: Dennis Kovalev <dkovalev@ru.mvista.com> + * + * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifndef __I2C_PNX_H__ +#define __I2C_PNX_H__ + +struct platform_device; +struct clk; + +struct i2c_pnx_mif { + int ret; /* Return value */ + int mode; /* Interface mode */ + struct completion complete; /* I/O completion */ + struct timer_list timer; /* Timeout */ + u8 * buf; /* Data buffer */ + int len; /* Length of data buffer */ +}; + +struct i2c_pnx_algo_data { + void __iomem *ioaddr; + struct i2c_pnx_mif mif; + int last; + struct clk *clk; + struct i2c_pnx_data *i2c_pnx; + struct i2c_adapter adapter; +}; + +struct i2c_pnx_data { + const char *name; + u32 base; + int irq; +}; + +#endif /* __I2C_PNX_H__ */ diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h new file mode 100644 index 00000000..41dcdfe7 --- /dev/null +++ b/include/linux/i2c-pxa.h @@ -0,0 +1,17 @@ +#ifndef _LINUX_I2C_ALGO_PXA_H +#define _LINUX_I2C_ALGO_PXA_H + +typedef enum i2c_slave_event_e { + I2C_SLAVE_EVENT_START_READ, + I2C_SLAVE_EVENT_START_WRITE, + I2C_SLAVE_EVENT_STOP +} i2c_slave_event_t; + +struct i2c_slave_client { + void *data; + void (*event)(void *ptr, i2c_slave_event_t event); + int (*read) (void *ptr); + void (*write)(void *ptr, unsigned int val); +}; + +#endif /* _LINUX_I2C_ALGO_PXA_H */ diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h new file mode 100644 index 00000000..017fb40f --- /dev/null +++ b/include/linux/i2c-smbus.h @@ -0,0 +1,51 @@ +/* + * i2c-smbus.h - SMBus extensions to the I2C protocol + * + * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. + */ + +#ifndef _LINUX_I2C_SMBUS_H +#define _LINUX_I2C_SMBUS_H + +#include <linux/i2c.h> + + +/** + * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client + * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0) + * triggered + * @irq: IRQ number, if the smbus_alert driver should take care of interrupt + * handling + * + * If irq is not specified, the smbus_alert driver doesn't take care of + * interrupt handling. In that case it is up to the I2C bus driver to either + * handle the interrupts or to poll for alerts. + * + * If irq is specified then it it crucial that alert_edge_triggered is + * properly set. + */ +struct i2c_smbus_alert_setup { + unsigned int alert_edge_triggered:1; + int irq; +}; + +struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup); +int i2c_handle_smbus_alert(struct i2c_client *ara); + +#endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c-tegra.h b/include/linux/i2c-tegra.h new file mode 100644 index 00000000..9c85da49 --- /dev/null +++ b/include/linux/i2c-tegra.h @@ -0,0 +1,25 @@ +/* + * drivers/i2c/busses/i2c-tegra.c + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_I2C_TEGRA_H +#define _LINUX_I2C_TEGRA_H + +struct tegra_i2c_platform_data { + unsigned long bus_clk_rate; +}; + +#endif /* _LINUX_I2C_TEGRA_H */ diff --git a/include/linux/i2c-xiic.h b/include/linux/i2c-xiic.h new file mode 100644 index 00000000..4f9f2256 --- /dev/null +++ b/include/linux/i2c-xiic.h @@ -0,0 +1,43 @@ +/* + * i2c-xiic.h + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Xilinx IIC + */ + +#ifndef _LINUX_I2C_XIIC_H +#define _LINUX_I2C_XIIC_H + +/** + * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The actuall devices to add. + * + * This purpose of this platform data struct is to be able to provide a number + * of devices that should be added to the I2C bus. The reason is that sometimes + * the I2C board info is not enough, a new PCI board can for instance be + * plugged into a standard PC, and the bus number might be unknown at + * early init time. + */ +struct xiic_i2c_platform_data { + u8 num_devices; + struct i2c_board_info const *devices; +}; + +#endif /* _LINUX_I2C_XIIC_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h new file mode 100644 index 00000000..195d8b3d --- /dev/null +++ b/include/linux/i2c.h @@ -0,0 +1,619 @@ +/* ------------------------------------------------------------------------- */ +/* */ +/* i2c.h - definitions for the i2c-bus interface */ +/* */ +/* ------------------------------------------------------------------------- */ +/* Copyright (C) 1995-2000 Simon G. Vogl + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + MA 02110-1301 USA. */ +/* ------------------------------------------------------------------------- */ + +/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and + Frodo Looijaard <frodol@dds.nl> */ + +#ifndef _LINUX_I2C_H +#define _LINUX_I2C_H + +#include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/mod_devicetable.h> +#include <linux/device.h> /* for struct device */ +#include <linux/sched.h> /* for completion */ +#include <linux/mutex.h> +#include <linux/of.h> /* for struct device_node */ +#include <linux/swab.h> /* for swab16 */ + +extern struct bus_type i2c_bus_type; +extern struct device_type i2c_adapter_type; + +/* --- General options ------------------------------------------------ */ + +struct i2c_msg; +struct i2c_algorithm; +struct i2c_adapter; +struct i2c_client; +struct i2c_driver; +union i2c_smbus_data; +struct i2c_board_info; + +struct module; + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +/* + * The master routines are the ones normally used to transmit data to devices + * on a bus (or read from them). Apart from two basic transfer functions to + * transmit one message at a time, a more complex version can be used to + * transmit an arbitrary number of messages without interruption. + * @count must be be less than 64k since msg.len is u16. + */ +extern int i2c_master_send(const struct i2c_client *client, const char *buf, + int count); +extern int i2c_master_recv(const struct i2c_client *client, char *buf, + int count); + +/* Transfer num messages. + */ +extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); + +/* This is the very generalized SMBus access routine. You probably do not + want to use this, though; one of the functions below may be much easier, + and probably just as fast. + Note that we use i2c_adapter here, because you do not need a specific + smbus adapter to call this function. */ +extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + unsigned short flags, char read_write, u8 command, + int size, union i2c_smbus_data *data); + +/* Now follow the 'nice' access routines. These also document the calling + conventions of i2c_smbus_xfer. */ + +extern s32 i2c_smbus_read_byte(const struct i2c_client *client); +extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); +extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client, + u8 command); +extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client, + u8 command, u8 value); +extern s32 i2c_smbus_read_word_data(const struct i2c_client *client, + u8 command); +extern s32 i2c_smbus_write_word_data(const struct i2c_client *client, + u8 command, u16 value); + +static inline s32 +i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command) +{ + s32 value = i2c_smbus_read_word_data(client, command); + + return (value < 0) ? value : swab16(value); +} + +static inline s32 +i2c_smbus_write_word_swapped(const struct i2c_client *client, + u8 command, u16 value) +{ + return i2c_smbus_write_word_data(client, command, swab16(value)); +} + +/* Returns the number of read bytes */ +extern s32 i2c_smbus_read_block_data(const struct i2c_client *client, + u8 command, u8 *values); +extern s32 i2c_smbus_write_block_data(const struct i2c_client *client, + u8 command, u8 length, const u8 *values); +/* Returns the number of read bytes */ +extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, + u8 command, u8 length, u8 *values); +extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, + u8 command, u8 length, + const u8 *values); +#endif /* I2C */ + +/** + * struct i2c_driver - represent an I2C device driver + * @class: What kind of i2c device we instantiate (for detect) + * @attach_adapter: Callback for bus addition (deprecated) + * @detach_adapter: Callback for bus removal (deprecated) + * @probe: Callback for device binding + * @remove: Callback for device unbinding + * @shutdown: Callback for device shutdown + * @suspend: Callback for device suspend + * @resume: Callback for device resume + * @alert: Alert callback, for example for the SMBus alert protocol + * @command: Callback for bus-wide signaling (optional) + * @driver: Device driver model driver + * @id_table: List of I2C devices supported by this driver + * @detect: Callback for device detection + * @address_list: The I2C addresses to probe (for detect) + * @clients: List of detected clients we created (for i2c-core use only) + * + * The driver.owner field should be set to the module owner of this driver. + * The driver.name field should be set to the name of this driver. + * + * For automatic device detection, both @detect and @address_data must + * be defined. @class should also be set, otherwise only devices forced + * with module parameters will be created. The detect function must + * fill at least the name field of the i2c_board_info structure it is + * handed upon successful detection, and possibly also the flags field. + * + * If @detect is missing, the driver will still work fine for enumerated + * devices. Detected devices simply won't be supported. This is expected + * for the many I2C/SMBus devices which can't be detected reliably, and + * the ones which can always be enumerated in practice. + * + * The i2c_client structure which is handed to the @detect callback is + * not a real i2c_client. It is initialized just enough so that you can + * call i2c_smbus_read_byte_data and friends on it. Don't do anything + * else with it. In particular, calling dev_dbg and friends on it is + * not allowed. + */ +struct i2c_driver { + unsigned int class; + + /* Notifies the driver that a new bus has appeared or is about to be + * removed. You should avoid using this, it will be removed in a + * near future. + */ + int (*attach_adapter)(struct i2c_adapter *) __deprecated; + int (*detach_adapter)(struct i2c_adapter *) __deprecated; + + /* Standard driver model interfaces */ + int (*probe)(struct i2c_client *, const struct i2c_device_id *); + int (*remove)(struct i2c_client *); + + /* driver model interfaces that don't relate to enumeration */ + void (*shutdown)(struct i2c_client *); + int (*suspend)(struct i2c_client *, pm_message_t mesg); + int (*resume)(struct i2c_client *); + + /* Alert callback, for example for the SMBus alert protocol. + * The format and meaning of the data value depends on the protocol. + * For the SMBus alert protocol, there is a single bit of data passed + * as the alert response's low bit ("event flag"). + */ + void (*alert)(struct i2c_client *, unsigned int data); + + /* a ioctl like command that can be used to perform specific functions + * with the device. + */ + int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); + + struct device_driver driver; + const struct i2c_device_id *id_table; + + /* Device detection callback for automatic device creation */ + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const unsigned short *address_list; + struct list_head clients; +}; +#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) + +/** + * struct i2c_client - represent an I2C slave device + * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; + * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking + * @addr: Address used on the I2C bus connected to the parent adapter. + * @name: Indicates the type of the device, usually a chip name that's + * generic enough to hide second-sourcing and compatible revisions. + * @adapter: manages the bus segment hosting this I2C device + * @driver: device's driver, hence pointer to access routines + * @dev: Driver model device node for the slave. + * @irq: indicates the IRQ generated by this device (if any) + * @detected: member of an i2c_driver.clients list or i2c-core's + * userspace_devices list + * + * An i2c_client identifies a single device (i.e. chip) connected to an + * i2c bus. The behaviour exposed to Linux is defined by the driver + * managing the device. + */ +struct i2c_client { + unsigned short flags; /* div., see below */ + unsigned short addr; /* chip address - NOTE: 7bit */ + /* addresses are stored in the */ + /* _LOWER_ 7 bits */ + char name[I2C_NAME_SIZE]; + struct i2c_adapter *adapter; /* the adapter we sit on */ + struct i2c_driver *driver; /* and our access routines */ + struct device dev; /* the device structure */ + int irq; /* irq issued by device */ + struct list_head detected; +}; +#define to_i2c_client(d) container_of(d, struct i2c_client, dev) + +extern struct i2c_client *i2c_verify_client(struct device *dev); + +static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) +{ + struct device * const dev = container_of(kobj, struct device, kobj); + return to_i2c_client(dev); +} + +static inline void *i2c_get_clientdata(const struct i2c_client *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +/** + * struct i2c_board_info - template for device creation + * @type: chip type, to initialize i2c_client.name + * @flags: to initialize i2c_client.flags + * @addr: stored in i2c_client.addr + * @platform_data: stored in i2c_client.dev.platform_data + * @archdata: copied into i2c_client.dev.archdata + * @of_node: pointer to OpenFirmware device node + * @irq: stored in i2c_client.irq + * + * I2C doesn't actually support hardware probing, although controllers and + * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's + * a device at a given address. Drivers commonly need more information than + * that, such as chip type, configuration, associated IRQ, and so on. + * + * i2c_board_info is used to build tables of information listing I2C devices + * that are present. This information is used to grow the driver model tree. + * For mainboards this is done statically using i2c_register_board_info(); + * bus numbers identify adapters that aren't yet available. For add-on boards, + * i2c_new_device() does this dynamically with the adapter already known. + */ +struct i2c_board_info { + char type[I2C_NAME_SIZE]; + unsigned short flags; + unsigned short addr; + void *platform_data; + struct dev_archdata *archdata; + struct device_node *of_node; + int irq; +}; + +/** + * I2C_BOARD_INFO - macro used to list an i2c device and its address + * @dev_type: identifies the device type + * @dev_addr: the device's address on the bus. + * + * This macro initializes essential fields of a struct i2c_board_info, + * declaring what has been provided on a particular board. Optional + * fields (such as associated irq, or device-specific platform_data) + * are provided using conventional syntax. + */ +#define I2C_BOARD_INFO(dev_type, dev_addr) \ + .type = dev_type, .addr = (dev_addr) + + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +/* Add-on boards should register/unregister their devices; e.g. a board + * with integrated I2C, a config eeprom, sensors, and a codec that's + * used in conjunction with the primary hardware. + */ +extern struct i2c_client * +i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); + +/* If you don't know the exact address of an I2C device, use this variant + * instead, which can probe for device presence in a list of possible + * addresses. The "probe" callback function is optional. If it is provided, + * it must return 1 on successful probe, 0 otherwise. If it is not provided, + * a default probing method is used. + */ +extern struct i2c_client * +i2c_new_probed_device(struct i2c_adapter *adap, + struct i2c_board_info *info, + unsigned short const *addr_list, + int (*probe)(struct i2c_adapter *, unsigned short addr)); + +/* Common custom probe functions */ +extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); + +/* For devices that use several addresses, use i2c_new_dummy() to make + * client handles for the extra addresses. + */ +extern struct i2c_client * +i2c_new_dummy(struct i2c_adapter *adap, u16 address); + +extern void i2c_unregister_device(struct i2c_client *); +#endif /* I2C */ + +/* Mainboard arch_initcall() code should register all its I2C devices. + * This is done at arch_initcall time, before declaring any i2c adapters. + * Modules for add-on boards must use other calls. + */ +#ifdef CONFIG_I2C_BOARDINFO +extern int +i2c_register_board_info(int busnum, struct i2c_board_info const *info, + unsigned n); +#else +static inline int +i2c_register_board_info(int busnum, struct i2c_board_info const *info, + unsigned n) +{ + return 0; +} +#endif /* I2C_BOARDINFO */ + +/* + * The following structs are for those who like to implement new bus drivers: + * i2c_algorithm is the interface to a class of hardware solutions which can + * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 + * to name two of the most common. + */ +struct i2c_algorithm { + /* If an adapter algorithm can't do I2C-level access, set master_xfer + to NULL. If an adapter algorithm can do SMBus access, set + smbus_xfer. If set to NULL, the SMBus protocol is simulated + using common I2C messages */ + /* master_xfer should return the number of messages successfully + processed, or a negative value on error */ + int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num); + int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, union i2c_smbus_data *data); + + /* To determine what the adapter supports */ + u32 (*functionality) (struct i2c_adapter *); +}; + +/* + * i2c_adapter is the structure used to identify a physical i2c bus along + * with the access algorithms necessary to access it. + */ +struct i2c_adapter { + struct module *owner; + unsigned int class; /* classes to allow probing for */ + const struct i2c_algorithm *algo; /* the algorithm to access the bus */ + void *algo_data; + + /* data fields that are valid for all devices */ + struct rt_mutex bus_lock; + + int timeout; /* in jiffies */ + int retries; + struct device dev; /* the adapter device */ + + int nr; + char name[48]; + struct completion dev_released; + + struct mutex userspace_clients_lock; + struct list_head userspace_clients; +}; +#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) + +static inline void *i2c_get_adapdata(const struct i2c_adapter *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +static inline struct i2c_adapter * +i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) +{ + struct device *parent = adapter->dev.parent; + + if (parent != NULL && parent->type == &i2c_adapter_type) + return to_i2c_adapter(parent); + else + return NULL; +} + +int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); + +/* Adapter locking functions, exported for shared pin cases */ +void i2c_lock_adapter(struct i2c_adapter *); +void i2c_unlock_adapter(struct i2c_adapter *); + +/*flags for the client struct: */ +#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ +#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ + /* Must equal I2C_M_TEN below */ +#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ + +/* i2c adapter classes (bitmask) */ +#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ +#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ +#define I2C_CLASS_SPD (1<<7) /* Memory modules */ + +/* Internal numbers to terminate lists */ +#define I2C_CLIENT_END 0xfffeU + +/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ +#define I2C_ADDRS(addr, addrs...) \ + ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) + + +/* ----- functions exported by i2c.o */ + +/* administration... + */ +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +extern int i2c_add_adapter(struct i2c_adapter *); +extern int i2c_del_adapter(struct i2c_adapter *); +extern int i2c_add_numbered_adapter(struct i2c_adapter *); + +extern int i2c_register_driver(struct module *, struct i2c_driver *); +extern void i2c_del_driver(struct i2c_driver *); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define i2c_add_driver(driver) \ + i2c_register_driver(THIS_MODULE, driver) + +extern struct i2c_client *i2c_use_client(struct i2c_client *client); +extern void i2c_release_client(struct i2c_client *client); + +/* call the i2c_client->command() of all attached clients with + * the given arguments */ +extern void i2c_clients_command(struct i2c_adapter *adap, + unsigned int cmd, void *arg); + +extern struct i2c_adapter *i2c_get_adapter(int nr); +extern void i2c_put_adapter(struct i2c_adapter *adap); + + +/* Return the functionality mask */ +static inline u32 i2c_get_functionality(struct i2c_adapter *adap) +{ + return adap->algo->functionality(adap); +} + +/* Return 1 if adapter supports everything we need, 0 if not. */ +static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func) +{ + return (func & i2c_get_functionality(adap)) == func; +} + +/* Return the adapter number for a specific adapter */ +static inline int i2c_adapter_id(struct i2c_adapter *adap) +{ + return adap->nr; +} + +/** + * module_i2c_driver() - Helper macro for registering a I2C driver + * @__i2c_driver: i2c_driver struct + * + * Helper macro for I2C drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_i2c_driver(__i2c_driver) \ + module_driver(__i2c_driver, i2c_add_driver, \ + i2c_del_driver) + +#endif /* I2C */ +#endif /* __KERNEL__ */ + +/** + * struct i2c_msg - an I2C transaction segment beginning with START + * @addr: Slave address, either seven or ten bits. When this is a ten + * bit address, I2C_M_TEN must be set in @flags and the adapter + * must support I2C_FUNC_10BIT_ADDR. + * @flags: I2C_M_RD is handled by all adapters. No other flags may be + * provided unless the adapter exported the relevant I2C_FUNC_* + * flags through i2c_check_functionality(). + * @len: Number of data bytes in @buf being read from or written to the + * I2C slave address. For read transactions where I2C_M_RECV_LEN + * is set, the caller guarantees that this buffer can hold up to + * 32 bytes in addition to the initial length byte sent by the + * slave (plus, if used, the SMBus PEC); and this value will be + * incremented by the number of block data bytes received. + * @buf: The buffer into which data is read, or from which it's written. + * + * An i2c_msg is the low level representation of one segment of an I2C + * transaction. It is visible to drivers in the @i2c_transfer() procedure, + * to userspace from i2c-dev, and to I2C adapter drivers through the + * @i2c_adapter.@master_xfer() method. + * + * Except when I2C "protocol mangling" is used, all I2C adapters implement + * the standard rules for I2C transactions. Each transaction begins with a + * START. That is followed by the slave address, and a bit encoding read + * versus write. Then follow all the data bytes, possibly including a byte + * with SMBus PEC. The transfer terminates with a NAK, or when all those + * bytes have been transferred and ACKed. If this is the last message in a + * group, it is followed by a STOP. Otherwise it is followed by the next + * @i2c_msg transaction segment, beginning with a (repeated) START. + * + * Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then + * passing certain @flags may have changed those standard protocol behaviors. + * Those flags are only for use with broken/nonconforming slaves, and with + * adapters which are known to support the specific mangling options they + * need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR). + */ +struct i2c_msg { + __u16 addr; /* slave address */ + __u16 flags; +#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ +#define I2C_M_RD 0x0001 /* read data, from slave to master */ +#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ +#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ + __u16 len; /* msg length */ + __u8 *buf; /* pointer to msg data */ +}; + +/* To determine what functionality is present */ + +#define I2C_FUNC_I2C 0x00000001 +#define I2C_FUNC_10BIT_ADDR 0x00000002 +#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */ +#define I2C_FUNC_SMBUS_PEC 0x00000008 +#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ +#define I2C_FUNC_SMBUS_QUICK 0x00010000 +#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 +#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 +#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 +#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 +#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 +#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 +#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 +#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 +#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 +#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ +#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ + +#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ + I2C_FUNC_SMBUS_WRITE_BYTE) +#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \ + I2C_FUNC_SMBUS_WRITE_BYTE_DATA) +#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \ + I2C_FUNC_SMBUS_WRITE_WORD_DATA) +#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ + I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) +#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \ + I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) + +#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \ + I2C_FUNC_SMBUS_BYTE | \ + I2C_FUNC_SMBUS_BYTE_DATA | \ + I2C_FUNC_SMBUS_WORD_DATA | \ + I2C_FUNC_SMBUS_PROC_CALL | \ + I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \ + I2C_FUNC_SMBUS_I2C_BLOCK | \ + I2C_FUNC_SMBUS_PEC) + +/* + * Data for SMBus Messages + */ +#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */ +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */ + /* and one more for user-space compatibility */ +}; + +/* i2c_smbus_xfer read or write markers */ +#define I2C_SMBUS_READ 1 +#define I2C_SMBUS_WRITE 0 + +/* SMBus transaction types (size parameter in the above functions) + Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */ +#define I2C_SMBUS_QUICK 0 +#define I2C_SMBUS_BYTE 1 +#define I2C_SMBUS_BYTE_DATA 2 +#define I2C_SMBUS_WORD_DATA 3 +#define I2C_SMBUS_PROC_CALL 4 +#define I2C_SMBUS_BLOCK_DATA 5 +#define I2C_SMBUS_I2C_BLOCK_BROKEN 6 +#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ +#define I2C_SMBUS_I2C_BLOCK_DATA 8 + +#endif /* _LINUX_I2C_H */ diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h new file mode 100644 index 00000000..cec17cf6 --- /dev/null +++ b/include/linux/i2c/adp5588.h @@ -0,0 +1,171 @@ +/* + * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _ADP5588_H +#define _ADP5588_H + +#define DEV_ID 0x00 /* Device ID */ +#define CFG 0x01 /* Configuration Register1 */ +#define INT_STAT 0x02 /* Interrupt Status Register */ +#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */ +#define Key_EVENTA 0x04 /* Key Event Register A */ +#define Key_EVENTB 0x05 /* Key Event Register B */ +#define Key_EVENTC 0x06 /* Key Event Register C */ +#define Key_EVENTD 0x07 /* Key Event Register D */ +#define Key_EVENTE 0x08 /* Key Event Register E */ +#define Key_EVENTF 0x09 /* Key Event Register F */ +#define Key_EVENTG 0x0A /* Key Event Register G */ +#define Key_EVENTH 0x0B /* Key Event Register H */ +#define Key_EVENTI 0x0C /* Key Event Register I */ +#define Key_EVENTJ 0x0D /* Key Event Register J */ +#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */ +#define UNLOCK1 0x0F /* Unlock Key1 */ +#define UNLOCK2 0x10 /* Unlock Key2 */ +#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */ +#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */ +#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */ +#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */ +#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */ +#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */ +#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */ +#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */ +#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */ +#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */ +#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */ +#define GPI_EM1 0x20 /* GPI Event Mode 1 */ +#define GPI_EM2 0x21 /* GPI Event Mode 2 */ +#define GPI_EM3 0x22 /* GPI Event Mode 3 */ +#define GPIO_DIR1 0x23 /* GPIO Data Direction */ +#define GPIO_DIR2 0x24 /* GPIO Data Direction */ +#define GPIO_DIR3 0x25 /* GPIO Data Direction */ +#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */ +#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */ +#define Debounce_DIS1 0x29 /* Debounce Disable */ +#define Debounce_DIS2 0x2A /* Debounce Disable */ +#define Debounce_DIS3 0x2B /* Debounce Disable */ +#define GPIO_PULL1 0x2C /* GPIO Pull Disable */ +#define GPIO_PULL2 0x2D /* GPIO Pull Disable */ +#define GPIO_PULL3 0x2E /* GPIO Pull Disable */ +#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */ +#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */ +#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */ +#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */ +#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */ +#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */ +#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */ +#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */ +#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */ +#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */ +#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */ +#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */ +#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */ +#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */ +#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */ + +#define ADP5588_DEVICE_ID_MASK 0xF + + /* Configuration Register1 */ +#define ADP5588_AUTO_INC (1 << 7) +#define ADP5588_GPIEM_CFG (1 << 6) +#define ADP5588_OVR_FLOW_M (1 << 5) +#define ADP5588_INT_CFG (1 << 4) +#define ADP5588_OVR_FLOW_IEN (1 << 3) +#define ADP5588_K_LCK_IM (1 << 2) +#define ADP5588_GPI_IEN (1 << 1) +#define ADP5588_KE_IEN (1 << 0) + +/* Interrupt Status Register */ +#define ADP5588_CMP2_INT (1 << 5) +#define ADP5588_CMP1_INT (1 << 4) +#define ADP5588_OVR_FLOW_INT (1 << 3) +#define ADP5588_K_LCK_INT (1 << 2) +#define ADP5588_GPI_INT (1 << 1) +#define ADP5588_KE_INT (1 << 0) + +/* Key Lock and Event Counter Register */ +#define ADP5588_K_LCK_EN (1 << 6) +#define ADP5588_LCK21 0x30 +#define ADP5588_KEC 0xF + +#define ADP5588_MAXGPIO 18 +#define ADP5588_BANK(offs) ((offs) >> 3) +#define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) + +/* Put one of these structures in i2c_board_info platform_data */ + +#define ADP5588_KEYMAPSIZE 80 + +#define GPI_PIN_ROW0 97 +#define GPI_PIN_ROW1 98 +#define GPI_PIN_ROW2 99 +#define GPI_PIN_ROW3 100 +#define GPI_PIN_ROW4 101 +#define GPI_PIN_ROW5 102 +#define GPI_PIN_ROW6 103 +#define GPI_PIN_ROW7 104 +#define GPI_PIN_COL0 105 +#define GPI_PIN_COL1 106 +#define GPI_PIN_COL2 107 +#define GPI_PIN_COL3 108 +#define GPI_PIN_COL4 109 +#define GPI_PIN_COL5 110 +#define GPI_PIN_COL6 111 +#define GPI_PIN_COL7 112 +#define GPI_PIN_COL8 113 +#define GPI_PIN_COL9 114 + +#define GPI_PIN_ROW_BASE GPI_PIN_ROW0 +#define GPI_PIN_ROW_END GPI_PIN_ROW7 +#define GPI_PIN_COL_BASE GPI_PIN_COL0 +#define GPI_PIN_COL_END GPI_PIN_COL9 + +#define GPI_PIN_BASE GPI_PIN_ROW_BASE +#define GPI_PIN_END GPI_PIN_COL_END + +#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1) + +struct adp5588_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + +struct adp5588_kpad_platform_data { + int rows; /* Number of rows */ + int cols; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ + unsigned en_keylock:1; /* Enable Key Lock feature */ + unsigned short unlock_key1; /* Unlock Key 1 */ + unsigned short unlock_key2; /* Unlock Key 2 */ + const struct adp5588_gpi_map *gpimap; + unsigned short gpimapsize; + const struct adp5588_gpio_platform_data *gpio_data; +}; + +struct i2c_client; /* forward declaration */ + +struct adp5588_gpio_platform_data { + int gpio_start; /* GPIO Chip base # */ + unsigned irq_base; /* interrupt base # */ + unsigned pullup_dis_mask; /* Pull-Up Disable Mask */ + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif diff --git a/include/linux/i2c/adp8860.h b/include/linux/i2c/adp8860.h new file mode 100644 index 00000000..0b4d3985 --- /dev/null +++ b/include/linux/i2c/adp8860.h @@ -0,0 +1,154 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8860 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8860_H +#define __LINUX_I2C_ADP8860_H + +#include <linux/leds.h> +#include <linux/types.h> + +#define ID_ADP8860 8860 + +#define ADP8860_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8860_LED_ONT_200ms 0 +#define ADP8860_LED_ONT_600ms 1 +#define ADP8860_LED_ONT_800ms 2 +#define ADP8860_LED_ONT_1200ms 3 + +#define ADP8860_LED_D7 (7) +#define ADP8860_LED_D6 (6) +#define ADP8860_LED_D5 (5) +#define ADP8860_LED_D4 (4) +#define ADP8860_LED_D3 (3) +#define ADP8860_LED_D2 (2) +#define ADP8860_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8860_BL_D7 (1 << 6) +#define ADP8860_BL_D6 (1 << 5) +#define ADP8860_BL_D5 (1 << 4) +#define ADP8860_BL_D4 (1 << 3) +#define ADP8860_BL_D3 (1 << 2) +#define ADP8860_BL_D2 (1 << 1) +#define ADP8860_BL_D1 (1 << 0) + +#define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8860_FADE_T_600ms 2 +#define ADP8860_FADE_T_900ms 3 +#define ADP8860_FADE_T_1200ms 4 +#define ADP8860_FADE_T_1500ms 5 +#define ADP8860_FADE_T_1800ms 6 +#define ADP8860_FADE_T_2100ms 7 +#define ADP8860_FADE_T_2400ms 8 +#define ADP8860_FADE_T_2700ms 9 +#define ADP8860_FADE_T_3000ms 10 +#define ADP8860_FADE_T_3500ms 11 +#define ADP8860_FADE_T_4000ms 12 +#define ADP8860_FADE_T_4500ms 13 +#define ADP8860_FADE_T_5000ms 14 +#define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8860_FADE_LAW_LINEAR 0 +#define ADP8860_FADE_LAW_SQUARE 1 +#define ADP8860_FADE_LAW_CUBIC1 2 +#define ADP8860_FADE_LAW_CUBIC2 3 + +#define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8860_BL_AMBL_FILT_160ms 1 +#define ADP8860_BL_AMBL_FILT_320ms 2 +#define ADP8860_BL_AMBL_FILT_640ms 3 +#define ADP8860_BL_AMBL_FILT_1280ms 4 +#define ADP8860_BL_AMBL_FILT_2560ms 5 +#define ADP8860_BL_AMBL_FILT_5120ms 6 +#define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8860_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..138uA + */ +#define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8860_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; + + /** + * Gain down disable. Setting this option does not allow the + * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860 + * 1 = the charge pump doesn't switch down in gain until all LEDs are 0. + * The charge pump switches up in gain as needed. This feature is + * useful if the ADP8863 charge pump is used to drive an external load. + * This feature must be used when utilizing small fly capacitors + * (0402 or smaller). + * 0 = the charge pump automatically switches up and down in gain. + * This provides optimal efficiency, but is not suitable for driving + * loads that are not connected through the ADP8863 diode drivers. + * Additionally, the charge pump fly capacitors should be low ESR + * and sized 0603 or greater. + */ + + u8 gdwn_dis; +}; + +#endif /* __LINUX_I2C_ADP8860_H */ diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h new file mode 100644 index 00000000..624dcecc --- /dev/null +++ b/include/linux/i2c/adp8870.h @@ -0,0 +1,153 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8870 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8870_H +#define __LINUX_I2C_ADP8870_H + +#define ID_ADP8870 8870 + +#define ADP8870_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8870_LED_ONT_200ms 0 +#define ADP8870_LED_ONT_600ms 1 +#define ADP8870_LED_ONT_800ms 2 +#define ADP8870_LED_ONT_1200ms 3 + +#define ADP8870_LED_D7 (7) +#define ADP8870_LED_D6 (6) +#define ADP8870_LED_D5 (5) +#define ADP8870_LED_D4 (4) +#define ADP8870_LED_D3 (3) +#define ADP8870_LED_D2 (2) +#define ADP8870_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8870_BL_D7 (1 << 6) +#define ADP8870_BL_D6 (1 << 5) +#define ADP8870_BL_D5 (1 << 4) +#define ADP8870_BL_D4 (1 << 3) +#define ADP8870_BL_D3 (1 << 2) +#define ADP8870_BL_D2 (1 << 1) +#define ADP8870_BL_D1 (1 << 0) + +#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8870_FADE_T_600ms 2 +#define ADP8870_FADE_T_900ms 3 +#define ADP8870_FADE_T_1200ms 4 +#define ADP8870_FADE_T_1500ms 5 +#define ADP8870_FADE_T_1800ms 6 +#define ADP8870_FADE_T_2100ms 7 +#define ADP8870_FADE_T_2400ms 8 +#define ADP8870_FADE_T_2700ms 9 +#define ADP8870_FADE_T_3000ms 10 +#define ADP8870_FADE_T_3500ms 11 +#define ADP8870_FADE_T_4000ms 12 +#define ADP8870_FADE_T_4500ms 13 +#define ADP8870_FADE_T_5000ms 14 +#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8870_FADE_LAW_LINEAR 0 +#define ADP8870_FADE_LAW_SQUARE 1 +#define ADP8870_FADE_LAW_CUBIC1 2 +#define ADP8870_FADE_LAW_CUBIC2 3 + +#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8870_BL_AMBL_FILT_160ms 1 +#define ADP8870_BL_AMBL_FILT_320ms 2 +#define ADP8870_BL_AMBL_FILT_640ms 3 +#define ADP8870_BL_AMBL_FILT_1280ms 4 +#define ADP8870_BL_AMBL_FILT_2560ms 5 +#define ADP8870_BL_AMBL_FILT_5120ms 6 +#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..551uA + */ +#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551) + +/* + * L4 comparator current 0..275uA + */ +#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275) + +/* + * L5 comparator current 0..138uA + */ +#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8870_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + u8 pwm_assign; /* 1 = Enables PWM mode */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ + u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ + u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; +}; + +#endif /* __LINUX_I2C_ADP8870_H */ diff --git a/include/linux/i2c/ads1015.h b/include/linux/i2c/ads1015.h new file mode 100644 index 00000000..d5aa2a04 --- /dev/null +++ b/include/linux/i2c/ads1015.h @@ -0,0 +1,36 @@ +/* + * Platform Data for ADS1015 12-bit 4-input ADC + * (C) Copyright 2010 + * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_ADS1015_H +#define LINUX_ADS1015_H + +#define ADS1015_CHANNELS 8 + +struct ads1015_channel_data { + bool enabled; + unsigned int pga; + unsigned int data_rate; +}; + +struct ads1015_platform_data { + struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; +}; + +#endif /* LINUX_ADS1015_H */ diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h new file mode 100644 index 00000000..d186fcc5 --- /dev/null +++ b/include/linux/i2c/apds990x.h @@ -0,0 +1,79 @@ +/* + * This file is part of the APDS990x sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __APDS990X_H__ +#define __APDS990X_H__ + + +#define APDS_IRLED_CURR_12mA 0x3 +#define APDS_IRLED_CURR_25mA 0x2 +#define APDS_IRLED_CURR_50mA 0x1 +#define APDS_IRLED_CURR_100mA 0x0 + +/** + * struct apds990x_chip_factors - defines effect of the cover window + * @ga: Total glass attenuation + * @cf1: clear channel factor 1 for raw to lux conversion + * @irf1: IR channel factor 1 for raw to lux conversion + * @cf2: clear channel factor 2 for raw to lux conversion + * @irf2: IR channel factor 2 for raw to lux conversion + * @df: device factor for conversion formulas + * + * Structure for tuning ALS calculation to match with environment. + * Values depend on the material above the sensor and the sensor + * itself. If the GA is zero, driver will use uncovered sensor default values + * format: decimal value * APDS_PARAM_SCALE except df which is plain integer. + */ +#define APDS_PARAM_SCALE 4096 +struct apds990x_chip_factors { + int ga; + int cf1; + int irf1; + int cf2; + int irf2; + int df; +}; + +/** + * struct apds990x_platform_data - platform data for apsd990x.c driver + * @cf: chip factor data + * @pddrive: IR-led driving current + * @ppcount: number of IR pulses used for proximity estimation + * @setup_resources: interrupt line setup call back function + * @release_resources: interrupt line release call back function + * + * Proximity detection result depends heavily on correct ppcount, pdrive + * and cover window. + * + */ + +struct apds990x_platform_data { + struct apds990x_chip_factors cf; + u8 pdrive; + u8 ppcount; + int (*setup_resources)(void); + int (*release_resources)(void); +}; + +#endif diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h new file mode 100644 index 00000000..285025a9 --- /dev/null +++ b/include/linux/i2c/at24.h @@ -0,0 +1,55 @@ +/* + * at24.h - platform_data for the at24 (generic eeprom) driver + * (C) Copyright 2008 by Pengutronix + * (C) Copyright 2012 by Wolfram Sang + * same license as the driver + */ + +#ifndef _LINUX_AT24_H +#define _LINUX_AT24_H + +#include <linux/types.h> +#include <linux/memory.h> + +/** + * struct at24_platform_data - data to set up at24 (generic eeprom) driver + * @byte_len: size of eeprom in byte + * @page_size: number of byte which can be written in one go + * @flags: tunable options, check AT24_FLAG_* defines + * @setup: an optional callback invoked after eeprom is probed; enables kernel + code to access eeprom via memory_accessor, see example + * @context: optional parameter passed to setup() + * + * If you set up a custom eeprom type, please double-check the parameters. + * Especially page_size needs extra care, as you risk data loss if your value + * is bigger than what the chip actually supports! + * + * An example in pseudo code for a setup() callback: + * + * void get_mac_addr(struct memory_accessor *mem_acc, void *context) + * { + * u8 *mac_addr = ethernet_pdata->mac_addr; + * off_t offset = context; + * + * // Read MAC addr from EEPROM + * if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN) + * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); + * } + * + * This function pointer and context can now be set up in at24_platform_data. + */ + +struct at24_platform_data { + u32 byte_len; /* size (sum of all addr) */ + u16 page_size; /* for writes */ + u8 flags; +#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */ +#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ +#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ +#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ + + void (*setup)(struct memory_accessor *, void *context); + void *context; +}; + +#endif /* _LINUX_AT24_H */ diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h new file mode 100644 index 00000000..f027f7a6 --- /dev/null +++ b/include/linux/i2c/atmel_mxt_ts.h @@ -0,0 +1,44 @@ +/* + * Atmel maXTouch Touchscreen driver + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_ATMEL_MXT_TS_H +#define __LINUX_ATMEL_MXT_TS_H + +#include <linux/types.h> + +/* Orient */ +#define MXT_NORMAL 0x0 +#define MXT_DIAGONAL 0x1 +#define MXT_HORIZONTAL_FLIP 0x2 +#define MXT_ROTATED_90_COUNTER 0x3 +#define MXT_VERTICAL_FLIP 0x4 +#define MXT_ROTATED_90 0x5 +#define MXT_ROTATED_180 0x6 +#define MXT_DIAGONAL_COUNTER 0x7 + +/* The platform data for the Atmel maXTouch touchscreen driver */ +struct mxt_platform_data { + const u8 *config; + size_t config_length; + + unsigned int x_line; + unsigned int y_line; + unsigned int x_size; + unsigned int y_size; + unsigned int blen; + unsigned int threshold; + unsigned int voltage; + unsigned char orient; + unsigned long irqflags; +}; + +#endif /* __LINUX_ATMEL_MXT_TS_H */ diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h new file mode 100644 index 00000000..8b5e2df3 --- /dev/null +++ b/include/linux/i2c/bh1770glc.h @@ -0,0 +1,53 @@ +/* + * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BH1770_H__ +#define __BH1770_H__ + +/** + * struct bh1770_platform_data - platform data for bh1770glc driver + * @led_def_curr: IR led driving current. + * @glass_attenuation: Attenuation factor for covering window. + * @setup_resources: Call back for interrupt line setup function + * @release_resources: Call back for interrupte line release function + * + * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor + * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85 + */ + +struct bh1770_platform_data { +#define BH1770_LED_5mA 0 +#define BH1770_LED_10mA 1 +#define BH1770_LED_20mA 2 +#define BH1770_LED_50mA 3 +#define BH1770_LED_100mA 4 +#define BH1770_LED_150mA 5 +#define BH1770_LED_200mA 6 + __u8 led_def_curr; +#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */ + __u32 glass_attenuation; + int (*setup_resources)(void); + int (*release_resources)(void); +}; +#endif diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/i2c/dm355evm_msp.h new file mode 100644 index 00000000..37247035 --- /dev/null +++ b/include/linux/i2c/dm355evm_msp.h @@ -0,0 +1,79 @@ +/* + * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board + */ +#ifndef __LINUX_I2C_DM355EVM_MSP +#define __LINUX_I2C_DM355EVM_MSP + +/* + * Written against Spectrum's writeup for the A4 firmware revision, + * and tweaked to match source and rev D2 schematics by removing CPLD + * and NOR flash hooks (which were last appropriate in rev B boards). + * + * Note that the firmware supports a flavor of write posting ... to be + * sure a write completes, issue another read or write. + */ + +/* utilities to access "registers" emulated by msp430 firmware */ +extern int dm355evm_msp_write(u8 value, u8 reg); +extern int dm355evm_msp_read(u8 reg); + + +/* command/control registers */ +#define DM355EVM_MSP_COMMAND 0x00 +# define MSP_COMMAND_NULL 0 +# define MSP_COMMAND_RESET_COLD 1 +# define MSP_COMMAND_RESET_WARM 2 +# define MSP_COMMAND_RESET_WARM_I 3 +# define MSP_COMMAND_POWEROFF 4 +# define MSP_COMMAND_IR_REINIT 5 +#define DM355EVM_MSP_STATUS 0x01 +# define MSP_STATUS_BAD_OFFSET BIT(0) +# define MSP_STATUS_BAD_COMMAND BIT(1) +# define MSP_STATUS_POWER_ERROR BIT(2) +# define MSP_STATUS_RXBUF_OVERRUN BIT(3) +#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */ +# define MSP_RESET_DC5 BIT(0) +# define MSP_RESET_TVP5154 BIT(2) +# define MSP_RESET_IMAGER BIT(3) +# define MSP_RESET_ETHERNET BIT(4) +# define MSP_RESET_SYS BIT(5) +# define MSP_RESET_AIC33 BIT(7) + +/* GPIO registers ... bit patterns mostly match the source MSP ports */ +#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */ +#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */ +# define MSP_SWITCH1_SW6_1 BIT(0) +# define MSP_SWITCH1_SW6_2 BIT(1) +# define MSP_SWITCH1_SW6_3 BIT(2) +# define MSP_SWITCH1_SW6_4 BIT(3) +# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */ +# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */ +#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */ +# define MSP_SWITCH2_SW10 BIT(3) +# define MSP_SWITCH2_SW11 BIT(4) +# define MSP_SWITCH2_SW12 BIT(5) +# define MSP_SWITCH2_SW13 BIT(6) +# define MSP_SWITCH2_SW14 BIT(7) +#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */ +# define MSP_SDMMC_0_WP BIT(1) +# define MSP_SDMMC_0_CD BIT(2) /* active low */ +# define MSP_SDMMC_1_WP BIT(3) +# define MSP_SDMMC_1_CD BIT(4) /* active low */ +#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */ +#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */ +# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */ + +/* power supply registers are currently omitted */ + +/* RTC registers */ +#define DM355EVM_MSP_RTC_0 0x12 /* LSB */ +#define DM355EVM_MSP_RTC_1 0x13 +#define DM355EVM_MSP_RTC_2 0x14 +#define DM355EVM_MSP_RTC_3 0x15 /* MSB */ + +/* input event queue registers; code == ((HIGH << 8) | LOW) */ +#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */ +#define DM355EVM_MSP_INPUT_HIGH 0x17 +#define DM355EVM_MSP_INPUT_LOW 0x18 + +#endif /* __LINUX_I2C_DM355EVM_MSP */ diff --git a/include/linux/i2c/ds620.h b/include/linux/i2c/ds620.h new file mode 100644 index 00000000..736bb87a --- /dev/null +++ b/include/linux/i2c/ds620.h @@ -0,0 +1,21 @@ +#ifndef _LINUX_DS620_H +#define _LINUX_DS620_H + +#include <linux/types.h> +#include <linux/i2c.h> + +/* platform data for the DS620 temperature sensor and thermostat */ + +struct ds620_platform_data { + /* + * Thermostat output pin PO mode: + * 0 = always low (default) + * 1 = PO_LOW + * 2 = PO_HIGH + * + * (see Documentation/hwmon/ds620) + */ + int pomode; +}; + +#endif /* _LINUX_DS620_H */ diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h new file mode 100644 index 00000000..beda7081 --- /dev/null +++ b/include/linux/i2c/i2c-sh_mobile.h @@ -0,0 +1,10 @@ +#ifndef __I2C_SH_MOBILE_H__ +#define __I2C_SH_MOBILE_H__ + +#include <linux/platform_device.h> + +struct i2c_sh_mobile_platform_data { + unsigned long bus_speed; +}; + +#endif /* __I2C_SH_MOBILE_H__ */ diff --git a/include/linux/i2c/lm8323.h b/include/linux/i2c/lm8323.h new file mode 100644 index 00000000..478d668b --- /dev/null +++ b/include/linux/i2c/lm8323.h @@ -0,0 +1,46 @@ +/* + * lm8323.h - Configuration for LM8323 keypad driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation (version 2 of the License only). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_LM8323_H +#define __LINUX_LM8323_H + +#include <linux/types.h> + +/* + * Largest keycode that the chip can send, plus one, + * so keys can be mapped directly at the index of the + * LM8323 keycode instead of subtracting one. + */ +#define LM8323_KEYMAP_SIZE (0x7f + 1) + +#define LM8323_NUM_PWMS 3 + +struct lm8323_platform_data { + int debounce_time; /* Time to watch for key bouncing, in ms. */ + int active_time; /* Idle time until sleep, in ms. */ + + int size_x; + int size_y; + bool repeat; + const unsigned short *keymap; + + const char *pwm_names[LM8323_NUM_PWMS]; + + const char *name; /* Device name. */ +}; + +#endif /* __LINUX_LM8323_H */ diff --git a/include/linux/i2c/ltc4245.h b/include/linux/i2c/ltc4245.h new file mode 100644 index 00000000..56bda4be --- /dev/null +++ b/include/linux/i2c/ltc4245.h @@ -0,0 +1,21 @@ +/* + * Platform Data for LTC4245 hardware monitor chip + * + * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_LTC4245_H +#define LINUX_LTC4245_H + +#include <linux/types.h> + +struct ltc4245_platform_data { + bool use_extra_gpios; +}; + +#endif /* LINUX_LTC4245_H */ diff --git a/include/linux/i2c/max6639.h b/include/linux/i2c/max6639.h new file mode 100644 index 00000000..6011c420 --- /dev/null +++ b/include/linux/i2c/max6639.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_MAX6639_H +#define _LINUX_MAX6639_H + +#include <linux/types.h> + +/* platform data for the MAX6639 temperature sensor and fan control */ + +struct max6639_platform_data { + bool pwm_polarity; /* Polarity low (0) or high (1, default) */ + int ppr; /* Pulses per rotation 1..4 (default == 2) */ + int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */ +}; + +#endif /* _LINUX_MAX6639_H */ diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h new file mode 100644 index 00000000..c04bac8b --- /dev/null +++ b/include/linux/i2c/max732x.h @@ -0,0 +1,22 @@ +#ifndef __LINUX_I2C_MAX732X_H +#define __LINUX_I2C_MAX732X_H + +/* platform data for the MAX732x 8/16-bit I/O expander driver */ + +struct max732x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* interrupt base */ + int irq_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); +}; +#endif /* __LINUX_I2C_MAX732X_H */ diff --git a/include/linux/i2c/mcs.h b/include/linux/i2c/mcs.h new file mode 100644 index 00000000..61bb18a4 --- /dev/null +++ b/include/linux/i2c/mcs.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim <jy0922.shim@samsung.com> + * Author: HeungJun Kim <riverful.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS_H +#define __LINUX_MCS_H + +#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) +#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) +#define MCS_KEY_CODE(v) ((v) & 0xffff) + +struct mcs_platform_data { + void (*poweron)(bool); + void (*cfg_pin)(void); + + /* touchscreen */ + unsigned int x_size; + unsigned int y_size; + + /* touchkey */ + const u32 *keymap; + unsigned int keymap_size; + unsigned int key_maxval; + bool no_autorepeat; +}; + +#endif /* __LINUX_MCS_H */ diff --git a/include/linux/i2c/mpr121_touchkey.h b/include/linux/i2c/mpr121_touchkey.h new file mode 100644 index 00000000..f0bcc38b --- /dev/null +++ b/include/linux/i2c/mpr121_touchkey.h @@ -0,0 +1,20 @@ +/* Header file for Freescale MPR121 Capacitive Touch Sensor */ + +#ifndef _MPR121_TOUCHKEY_H +#define _MPR121_TOUCHKEY_H + +/** + * struct mpr121_platform_data - platform data for mpr121 sensor + * @keymap: pointer to array of KEY_* values representing keymap + * @keymap_size: size of the keymap + * @wakeup: configure the button as a wake-up source + * @vdd_uv: VDD voltage in uV + */ +struct mpr121_platform_data { + const unsigned short *keymap; + unsigned int keymap_size; + bool wakeup; + int vdd_uv; +}; + +#endif /* _MPR121_TOUCHKEY_H */ diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h new file mode 100644 index 00000000..139ba526 --- /dev/null +++ b/include/linux/i2c/pca953x.h @@ -0,0 +1,30 @@ +#ifndef _LINUX_PCA953X_H +#define _LINUX_PCA953X_H + +#include <linux/types.h> +#include <linux/i2c.h> + +/* platform data for the PCA9539 16-bit I/O expander driver */ + +struct pca953x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* initial polarity inversion setting */ + uint16_t invert; + + /* interrupt base */ + int irq_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + const char *const *names; +}; + +#endif /* _LINUX_PCA953X_H */ diff --git a/include/linux/i2c/pca954x.h b/include/linux/i2c/pca954x.h new file mode 100644 index 00000000..28f1f8d5 --- /dev/null +++ b/include/linux/i2c/pca954x.h @@ -0,0 +1,47 @@ +/* + * + * pca954x.h - I2C multiplexer/switch support + * + * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it> + * Michael Lawnick <michael.lawnick.ext@nsn.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#ifndef _LINUX_I2C_PCA954X_H +#define _LINUX_I2C_PCA954X_H + +/* Platform data for the PCA954x I2C multiplexers */ + +/* Per channel initialisation data: + * @adap_id: bus number for the adapter. 0 = don't care + * @deselect_on_exit: set this entry to 1, if your H/W needs deselection + * of this channel after transaction. + * + */ +struct pca954x_platform_mode { + int adap_id; + unsigned int deselect_on_exit:1; +}; + +/* Per mux/switch data, used with i2c_register_board_info */ +struct pca954x_platform_data { + struct pca954x_platform_mode *modes; + int num_modes; +}; + +#endif /* _LINUX_I2C_PCA954X_H */ diff --git a/include/linux/i2c/pcf857x.h b/include/linux/i2c/pcf857x.h new file mode 100644 index 00000000..0767a2a6 --- /dev/null +++ b/include/linux/i2c/pcf857x.h @@ -0,0 +1,44 @@ +#ifndef __LINUX_PCF857X_H +#define __LINUX_PCF857X_H + +/** + * struct pcf857x_platform_data - data to set up pcf857x driver + * @gpio_base: number of the chip's first GPIO + * @n_latch: optional bit-inverse of initial register value; if + * you leave this initialized to zero the driver will act + * like the chip was just reset + * @setup: optional callback issued once the GPIOs are valid + * @teardown: optional callback issued before the GPIOs are invalidated + * @context: optional parameter passed to setup() and teardown() + * + * In addition to the I2C_BOARD_INFO() state appropriate to each chip, + * the i2c_board_info used with the pcf875x driver must provide its + * platform_data (pointer to one of these structures) with at least + * the gpio_base value initialized. + * + * The @setup callback may be used with the kind of board-specific glue + * which hands the (now-valid) GPIOs to other drivers, or which puts + * devices in their initial states using these GPIOs. + * + * These GPIO chips are only "quasi-bidirectional"; read the chip specs + * to understand the behavior. They don't have separate registers to + * record which pins are used for input or output, record which output + * values are driven, or provide access to input values. That must be + * inferred by reading the chip's value and knowing the last value written + * to it. If you leave n_latch initialized to zero, that last written + * value is presumed to be all ones (as if the chip were just reset). + */ +struct pcf857x_platform_data { + unsigned gpio_base; + unsigned n_latch; + + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif /* __LINUX_PCF857X_H */ diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h new file mode 100644 index 00000000..69280db0 --- /dev/null +++ b/include/linux/i2c/pmbus.h @@ -0,0 +1,45 @@ +/* + * Hardware monitoring driver for PMBus devices + * + * Copyright (c) 2010, 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PMBUS_H_ +#define _PMBUS_H_ + +/* flags */ + +/* + * PMBUS_SKIP_STATUS_CHECK + * + * During register detection, skip checking the status register for + * communication or command errors. + * + * Some PMBus chips respond with valid data when trying to read an unsupported + * register. For such chips, checking the status register is mandatory when + * trying to determine if a chip register exists or not. + * Other PMBus chips don't support the STATUS_CML register, or report + * communication errors for no explicable reason. For such chips, checking + * the status register must be disabled. + */ +#define PMBUS_SKIP_STATUS_CHECK (1 << 0) + +struct pmbus_platform_data { + u32 flags; /* Device specific flags */ +}; + +#endif /* _PMBUS_H_ */ diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/i2c/pxa-i2c.h new file mode 100644 index 00000000..1a9f65e6 --- /dev/null +++ b/include/linux/i2c/pxa-i2c.h @@ -0,0 +1,82 @@ +/* + * i2c_pxa.h + * + * Copyright (C) 2002 Intrinsyc Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef _I2C_PXA_H_ +#define _I2C_PXA_H_ + +#if 0 +#define DEF_TIMEOUT 3 +#else +/* need a longer timeout if we're dealing with the fact we may well be + * looking at a multi-master environment +*/ +#define DEF_TIMEOUT 32 +#endif + +#define BUS_ERROR (-EREMOTEIO) +#define XFER_NAKED (-ECONNREFUSED) +#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ + +/* ICR initialize bit values +* +* 15. FM 0 (100 Khz operation) +* 14. UR 0 (No unit reset) +* 13. SADIE 0 (Disables the unit from interrupting on slave addresses +* matching its slave address) +* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration +* in master mode) +* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) +* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) +* 9. IRFIE 1 (Enable interrupts from full buffer received) +* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) +* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) +* 6. IUE 0 (Disable unit until we change settings) +* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) +* 4. MA 0 (Only send stop with the ICR stop bit) +* 3. TB 0 (We are not transmitting a byte initially) +* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) +* 1. STOP 0 (Do not send a STOP) +* 0. START 0 (Do not send a START) +* +*/ +#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) + +/* I2C status register init values + * + * 10. BED 1 (Clear bus error detected) + * 9. SAD 1 (Clear slave address detected) + * 7. IRF 1 (Clear IDBR Receive Full) + * 6. ITE 1 (Clear IDBR Transmit Empty) + * 5. ALD 1 (Clear Arbitration Loss Detected) + * 4. SSD 1 (Clear Slave Stop Detected) + */ +#define I2C_ISR_INIT 0x7FF /* status register init */ + +struct i2c_slave_client; + +struct i2c_pxa_platform_data { + unsigned int slave_addr; + struct i2c_slave_client *slave; + unsigned int class; + unsigned int use_pio :1; + unsigned int fast_mode :1; +}; + +extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info); + +#ifdef CONFIG_PXA27x +extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info); +#endif + +#ifdef CONFIG_PXA3xx +extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info); +#endif + +#endif diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h new file mode 100644 index 00000000..d9b34bfd --- /dev/null +++ b/include/linux/i2c/s6000.h @@ -0,0 +1,10 @@ +#ifndef __LINUX_I2C_S6000_H +#define __LINUX_I2C_S6000_H + +struct s6_i2c_platform_data { + const char *clock; /* the clock to use */ + int bus_num; /* the bus number to register */ +}; + +#endif + diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h new file mode 100644 index 00000000..52baa79d --- /dev/null +++ b/include/linux/i2c/sx150x.h @@ -0,0 +1,82 @@ +/* + * Driver for the Semtech SX150x I2C GPIO Expanders + * + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ +#ifndef __LINUX_I2C_SX150X_H +#define __LINUX_I2C_SX150X_H + +/** + * struct sx150x_platform_data - config data for SX150x driver + * @gpio_base: The index number of the first GPIO assigned to this + * GPIO expander. The expander will create a block of + * consecutively numbered gpios beginning at the given base, + * with the size of the block depending on the model of the + * expander chip. + * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO + * instead of as an oscillator, increasing the size of the + * GP(I)O pool created by this expander by one. The + * output-only GPO pin will be added at the end of the block. + * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor + * for each IO line in the expander. Setting the bit at + * position n will enable the pull-up for the IO at + * the corresponding offset. For chips with fewer than + * 16 IO pins, high-end bits are ignored. + * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down + * resistor for each IO line in the expander. Setting the + * bit at position n will enable the pull-down for the IO at + * the corresponding offset. For chips with fewer than + * 16 IO pins, high-end bits are ignored. + * @io_open_drain_ena: A bit-mask which enables-or disables open-drain + * operation for each IO line in the expander. Setting the + * bit at position n enables open-drain operation for + * the IO at the corresponding offset. Clearing the bit + * enables regular push-pull operation for that IO. + * For chips with fewer than 16 IO pins, high-end bits + * are ignored. + * @io_polarity: A bit-mask which enables polarity inversion for each IO line + * in the expander. Setting the bit at position n inverts + * the polarity of that IO line, while clearing it results + * in normal polarity. For chips with fewer than 16 IO pins, + * high-end bits are ignored. + * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line + * is connected, via which it reports interrupt events + * across all GPIO lines. This must be a real, + * pre-existing IRQ line. + * Setting this value < 0 disables the irq_chip functionality + * of the driver. + * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based + * IRQ lines will appear. Similarly to gpio_base, the expander + * will create a block of irqs beginning at this number. + * This value is ignored if irq_summary is < 0. + * @reset_during_probe: If set to true, the driver will trigger a full + * reset of the chip at the beginning of the probe + * in order to place it in a known state. + */ +struct sx150x_platform_data { + unsigned gpio_base; + bool oscio_is_gpo; + u16 io_pullup_ena; + u16 io_pulldn_ena; + u16 io_open_drain_ena; + u16 io_polarity; + int irq_summary; + unsigned irq_base; + bool reset_during_probe; +}; + +#endif /* __LINUX_I2C_SX150X_H */ diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h new file mode 100644 index 00000000..cd6a51c7 --- /dev/null +++ b/include/linux/i2c/tc35876x.h @@ -0,0 +1,11 @@ + +#ifndef _TC35876X_H +#define _TC35876X_H + +struct tc35876x_platform_data { + int gpio_bridge_reset; + int gpio_panel_bl_en; + int gpio_panel_vadd; +}; + +#endif /* _TC35876X_H */ diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h new file mode 100644 index 00000000..08aa9227 --- /dev/null +++ b/include/linux/i2c/tps65010.h @@ -0,0 +1,205 @@ +/* linux/i2c/tps65010.h + * + * Functions to access TPS65010 power management device. + * + * Copyright (C) 2004 Dirk Behme <dirk.behme@de.bosch.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_I2C_TPS65010_H +#define __LINUX_I2C_TPS65010_H + +/* + * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + +#define TPS_CHGSTATUS 0x01 +# define TPS_CHG_USB (1 << 7) +# define TPS_CHG_AC (1 << 6) +# define TPS_CHG_THERM (1 << 5) +# define TPS_CHG_TERM (1 << 4) +# define TPS_CHG_TAPER_TMO (1 << 3) +# define TPS_CHG_CHG_TMO (1 << 2) +# define TPS_CHG_PRECHG_TMO (1 << 1) +# define TPS_CHG_TEMP_ERR (1 << 0) +#define TPS_REGSTATUS 0x02 +# define TPS_REG_ONOFF (1 << 7) +# define TPS_REG_COVER (1 << 6) +# define TPS_REG_UVLO (1 << 5) +# define TPS_REG_NO_CHG (1 << 4) /* tps65013 */ +# define TPS_REG_PG_LD02 (1 << 3) +# define TPS_REG_PG_LD01 (1 << 2) +# define TPS_REG_PG_MAIN (1 << 1) +# define TPS_REG_PG_CORE (1 << 0) +#define TPS_MASK1 0x03 +#define TPS_MASK2 0x04 +#define TPS_ACKINT1 0x05 +#define TPS_ACKINT2 0x06 +#define TPS_CHGCONFIG 0x07 +# define TPS_CHARGE_POR (1 << 7) /* 65010/65012 */ +# define TPS65013_AUA (1 << 7) /* 65011/65013 */ +# define TPS_CHARGE_RESET (1 << 6) +# define TPS_CHARGE_FAST (1 << 5) +# define TPS_CHARGE_CURRENT (3 << 3) +# define TPS_VBUS_500MA (1 << 2) +# define TPS_VBUS_CHARGING (1 << 1) +# define TPS_CHARGE_ENABLE (1 << 0) +#define TPS_LED1_ON 0x08 +#define TPS_LED1_PER 0x09 +#define TPS_LED2_ON 0x0a +#define TPS_LED2_PER 0x0b +#define TPS_VDCDC1 0x0c +# define TPS_ENABLE_LP (1 << 3) +#define TPS_VDCDC2 0x0d +# define TPS_LP_COREOFF (1 << 7) +# define TPS_VCORE_1_8V (7<<4) +# define TPS_VCORE_1_5V (6 << 4) +# define TPS_VCORE_1_4V (5 << 4) +# define TPS_VCORE_1_3V (4 << 4) +# define TPS_VCORE_1_2V (3 << 4) +# define TPS_VCORE_1_1V (2 << 4) +# define TPS_VCORE_1_0V (1 << 4) +# define TPS_VCORE_0_85V (0 << 4) +# define TPS_VCORE_LP_1_2V (3 << 2) +# define TPS_VCORE_LP_1_1V (2 << 2) +# define TPS_VCORE_LP_1_0V (1 << 2) +# define TPS_VCORE_LP_0_85V (0 << 2) +# define TPS_VIB (1 << 1) +# define TPS_VCORE_DISCH (1 << 0) +#define TPS_VREGS1 0x0e +# define TPS_LDO2_ENABLE (1 << 7) +# define TPS_LDO2_OFF (1 << 6) +# define TPS_VLDO2_3_0V (3 << 4) +# define TPS_VLDO2_2_75V (2 << 4) +# define TPS_VLDO2_2_5V (1 << 4) +# define TPS_VLDO2_1_8V (0 << 4) +# define TPS_LDO1_ENABLE (1 << 3) +# define TPS_LDO1_OFF (1 << 2) +# define TPS_VLDO1_3_0V (3 << 0) +# define TPS_VLDO1_2_75V (2 << 0) +# define TPS_VLDO1_2_5V (1 << 0) +# define TPS_VLDO1_ADJ (0 << 0) +#define TPS_MASK3 0x0f +#define TPS_DEFGPIO 0x10 + +/* + * ---------------------------------------------------------------------------- + * Macros used by exported functions + * ---------------------------------------------------------------------------- + */ + +#define LED1 1 +#define LED2 2 +#define OFF 0 +#define ON 1 +#define BLINK 2 +#define GPIO1 1 +#define GPIO2 2 +#define GPIO3 3 +#define GPIO4 4 +#define LOW 0 +#define HIGH 1 + +/* + * ---------------------------------------------------------------------------- + * Exported functions + * ---------------------------------------------------------------------------- + */ + +/* Draw from VBUS: + * 0 mA -- DON'T DRAW (might supply power instead) + * 100 mA -- usb unit load (slowest charge rate) + * 500 mA -- usb high power (fast battery charge) + */ +extern int tps65010_set_vbus_draw(unsigned mA); + +/* tps65010_set_gpio_out_value parameter: + * gpio: GPIO1, GPIO2, GPIO3 or GPIO4 + * value: LOW or HIGH + */ +extern int tps65010_set_gpio_out_value(unsigned gpio, unsigned value); + +/* tps65010_set_led parameter: + * led: LED1 or LED2 + * mode: ON, OFF or BLINK + */ +extern int tps65010_set_led(unsigned led, unsigned mode); + +/* tps65010_set_vib parameter: + * value: ON or OFF + */ +extern int tps65010_set_vib(unsigned value); + +/* tps65010_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65010_set_low_pwr(unsigned mode); + +/* tps65010_config_vregs1 parameter: + * value to be written to VREGS1 register + * Note: The complete register is written, set all bits you need + */ +extern int tps65010_config_vregs1(unsigned value); + +/* tps65013_set_low_pwr parameter: + * mode: ON or OFF + */ +extern int tps65013_set_low_pwr(unsigned mode); + +/* tps65010_set_vdcdc2 + * value to be written to VDCDC2 + */ +extern int tps65010_config_vdcdc2(unsigned value); + +struct i2c_client; + +/** + * struct tps65010_board - packages GPIO and LED lines + * @base: the GPIO number to assign to GPIO-1 + * @outmask: bit (N-1) is set to allow GPIO-N to be used as an + * (open drain) output + * @setup: optional callback issued once the GPIOs are valid + * @teardown: optional callback issued before the GPIOs are invalidated + * @context: optional parameter passed to setup() and teardown() + * + * Board data may be used to package the GPIO (and LED) lines for use + * in by the generic GPIO and LED frameworks. The first four GPIOs + * starting at gpio_base are GPIO1..GPIO4. The next two are LED1/nPG + * and LED2 (with hardware blinking capability, not currently exposed). + * + * The @setup callback may be used with the kind of board-specific glue + * which hands the (now-valid) GPIOs to other drivers, or which puts + * devices in their initial states using these GPIOs. + */ +struct tps65010_board { + int base; + unsigned outmask; + + int (*setup)(struct i2c_client *client, void *context); + int (*teardown)(struct i2c_client *client, void *context); + void *context; +}; + +#endif /* __LINUX_I2C_TPS65010_H */ + diff --git a/include/linux/i2c/tsc2007.h b/include/linux/i2c/tsc2007.h new file mode 100644 index 00000000..506a9f7a --- /dev/null +++ b/include/linux/i2c/tsc2007.h @@ -0,0 +1,24 @@ +#ifndef __LINUX_I2C_TSC2007_H +#define __LINUX_I2C_TSC2007_H + +/* linux/i2c/tsc2007.h */ + +struct tsc2007_platform_data { + u16 model; /* 2007. */ + u16 x_plate_ohms; /* must be non-zero value */ + u16 max_rt; /* max. resistance above which samples are ignored */ + unsigned long poll_delay; /* delay (in ms) after pen-down event + before polling starts */ + unsigned long poll_period; /* time (in ms) between samples */ + int fuzzx; /* fuzz factor for X, Y and pressure axes */ + int fuzzy; + int fuzzz; + + int (*get_pendown_state)(void); + void (*clear_penirq)(void); /* If needed, clear 2nd level + interrupt source */ + int (*init_platform_hw)(void); + void (*exit_platform_hw)(void); +}; + +#endif diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h new file mode 100644 index 00000000..1f90de0c --- /dev/null +++ b/include/linux/i2c/twl.h @@ -0,0 +1,846 @@ +/* + * twl4030.h - header for TWL4030 PM and audio CODEC device + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * Based on tlv320aic23.c: + * Copyright (c) by Kai Svahn <kai.svahn@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __TWL_H_ +#define __TWL_H_ + +#include <linux/types.h> +#include <linux/input/matrix_keypad.h> + +/* + * Using the twl4030 core we address registers using a pair + * { module id, relative register offset } + * which that core then maps to the relevant + * { i2c slave, absolute register address } + * + * The module IDs are meaningful only to the twl4030 core code, + * which uses them as array indices to look up the first register + * address each module uses within a given i2c slave. + */ + +/* Slave 0 (i2c address 0x48) */ +#define TWL4030_MODULE_USB 0x00 + +/* Slave 1 (i2c address 0x49) */ +#define TWL4030_MODULE_AUDIO_VOICE 0x01 +#define TWL4030_MODULE_GPIO 0x02 +#define TWL4030_MODULE_INTBR 0x03 +#define TWL4030_MODULE_PIH 0x04 +#define TWL4030_MODULE_TEST 0x05 + +/* Slave 2 (i2c address 0x4a) */ +#define TWL4030_MODULE_KEYPAD 0x06 +#define TWL4030_MODULE_MADC 0x07 +#define TWL4030_MODULE_INTERRUPTS 0x08 +#define TWL4030_MODULE_LED 0x09 +#define TWL4030_MODULE_MAIN_CHARGE 0x0A +#define TWL4030_MODULE_PRECHARGE 0x0B +#define TWL4030_MODULE_PWM0 0x0C +#define TWL4030_MODULE_PWM1 0x0D +#define TWL4030_MODULE_PWMA 0x0E +#define TWL4030_MODULE_PWMB 0x0F + +#define TWL5031_MODULE_ACCESSORY 0x10 +#define TWL5031_MODULE_INTERRUPTS 0x11 + +/* Slave 3 (i2c address 0x4b) */ +#define TWL4030_MODULE_BACKUP 0x12 +#define TWL4030_MODULE_INT 0x13 +#define TWL4030_MODULE_PM_MASTER 0x14 +#define TWL4030_MODULE_PM_RECEIVER 0x15 +#define TWL4030_MODULE_RTC 0x16 +#define TWL4030_MODULE_SECURED_REG 0x17 + +#define TWL_MODULE_USB TWL4030_MODULE_USB +#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE +#define TWL_MODULE_PIH TWL4030_MODULE_PIH +#define TWL_MODULE_MADC TWL4030_MODULE_MADC +#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE +#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER +#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER +#define TWL_MODULE_RTC TWL4030_MODULE_RTC +#define TWL_MODULE_PWM TWL4030_MODULE_PWM0 + +#define TWL6030_MODULE_ID0 0x0D +#define TWL6030_MODULE_ID1 0x0E +#define TWL6030_MODULE_ID2 0x0F + +#define GPIO_INTR_OFFSET 0 +#define KEYPAD_INTR_OFFSET 1 +#define BCI_INTR_OFFSET 2 +#define MADC_INTR_OFFSET 3 +#define USB_INTR_OFFSET 4 +#define CHARGERFAULT_INTR_OFFSET 5 +#define BCI_PRES_INTR_OFFSET 9 +#define USB_PRES_INTR_OFFSET 10 +#define RTC_INTR_OFFSET 11 + +/* + * Offset from TWL6030_IRQ_BASE / pdata->irq_base + */ +#define PWR_INTR_OFFSET 0 +#define HOTDIE_INTR_OFFSET 12 +#define SMPSLDO_INTR_OFFSET 13 +#define BATDETECT_INTR_OFFSET 14 +#define SIMDETECT_INTR_OFFSET 15 +#define MMCDETECT_INTR_OFFSET 16 +#define GASGAUGE_INTR_OFFSET 17 +#define USBOTG_INTR_OFFSET 4 +#define CHARGER_INTR_OFFSET 2 +#define RSV_INTR_OFFSET 0 + +/* INT register offsets */ +#define REG_INT_STS_A 0x00 +#define REG_INT_STS_B 0x01 +#define REG_INT_STS_C 0x02 + +#define REG_INT_MSK_LINE_A 0x03 +#define REG_INT_MSK_LINE_B 0x04 +#define REG_INT_MSK_LINE_C 0x05 + +#define REG_INT_MSK_STS_A 0x06 +#define REG_INT_MSK_STS_B 0x07 +#define REG_INT_MSK_STS_C 0x08 + +/* MASK INT REG GROUP A */ +#define TWL6030_PWR_INT_MASK 0x07 +#define TWL6030_RTC_INT_MASK 0x18 +#define TWL6030_HOTDIE_INT_MASK 0x20 +#define TWL6030_SMPSLDOA_INT_MASK 0xC0 + +/* MASK INT REG GROUP B */ +#define TWL6030_SMPSLDOB_INT_MASK 0x01 +#define TWL6030_BATDETECT_INT_MASK 0x02 +#define TWL6030_SIMDETECT_INT_MASK 0x04 +#define TWL6030_MMCDETECT_INT_MASK 0x08 +#define TWL6030_GPADC_INT_MASK 0x60 +#define TWL6030_GASGAUGE_INT_MASK 0x80 + +/* MASK INT REG GROUP C */ +#define TWL6030_USBOTG_INT_MASK 0x0F +#define TWL6030_CHARGER_CTRL_INT_MASK 0x10 +#define TWL6030_CHARGER_FAULT_INT_MASK 0x60 + +#define TWL6030_MMCCTRL 0xEE +#define VMMC_AUTO_OFF (0x1 << 3) +#define SW_FC (0x1 << 2) +#define STS_MMC 0x1 + +#define TWL6030_CFG_INPUT_PUPD3 0xF2 +#define MMC_PU (0x1 << 3) +#define MMC_PD (0x1 << 2) + +#define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF) +#define TWL_SIL_REV(rev) ((rev) >> 24) +#define TWL_SIL_5030 0x09002F +#define TWL5030_REV_1_0 0x00 +#define TWL5030_REV_1_1 0x10 +#define TWL5030_REV_1_2 0x30 + +#define TWL4030_CLASS_ID 0x4030 +#define TWL6030_CLASS_ID 0x6030 +unsigned int twl_rev(void); +#define GET_TWL_REV (twl_rev()) +#define TWL_CLASS_IS(class, id) \ +static inline int twl_class_is_ ##class(void) \ +{ \ + return ((id) == (GET_TWL_REV)) ? 1 : 0; \ +} + +TWL_CLASS_IS(4030, TWL4030_CLASS_ID) +TWL_CLASS_IS(6030, TWL6030_CLASS_ID) + +#define TWL6025_SUBCLASS BIT(4) /* TWL6025 has changed registers */ + +/* + * Read and write single 8-bit registers + */ +int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg); +int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); + +/* + * Read and write several 8-bit registers at once. + * + * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1 + * for the value, and populate your data starting at offset 1. + */ +int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); + +int twl_get_type(void); +int twl_get_version(void); + +int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); +int twl6030_interrupt_mask(u8 bit_mask, u8 offset); + +/* Card detect Configuration for MMC1 Controller on OMAP4 */ +#ifdef CONFIG_TWL4030_CORE +int twl6030_mmc_card_detect_config(void); +#else +static inline int twl6030_mmc_card_detect_config(void) +{ + pr_debug("twl6030_mmc_card_detect_config not supported\n"); + return 0; +} +#endif + +/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */ +#ifdef CONFIG_TWL4030_CORE +int twl6030_mmc_card_detect(struct device *dev, int slot); +#else +static inline int twl6030_mmc_card_detect(struct device *dev, int slot) +{ + pr_debug("Call back twl6030_mmc_card_detect not supported\n"); + return -EIO; +} +#endif +/*----------------------------------------------------------------------*/ + +/* + * NOTE: at up to 1024 registers, this is a big chip. + * + * Avoid putting register declarations in this file, instead of into + * a driver-private file, unless some of the registers in a block + * need to be shared with other drivers. One example is blocks that + * have Secondary IRQ Handler (SIH) registers. + */ + +#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0) +#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1) +#define TWL4030_SIH_CTRL_COR_MASK BIT(2) + +/*----------------------------------------------------------------------*/ + +/* + * GPIO Block Register offsets (use TWL4030_MODULE_GPIO) + */ + +#define REG_GPIODATAIN1 0x0 +#define REG_GPIODATAIN2 0x1 +#define REG_GPIODATAIN3 0x2 +#define REG_GPIODATADIR1 0x3 +#define REG_GPIODATADIR2 0x4 +#define REG_GPIODATADIR3 0x5 +#define REG_GPIODATAOUT1 0x6 +#define REG_GPIODATAOUT2 0x7 +#define REG_GPIODATAOUT3 0x8 +#define REG_CLEARGPIODATAOUT1 0x9 +#define REG_CLEARGPIODATAOUT2 0xA +#define REG_CLEARGPIODATAOUT3 0xB +#define REG_SETGPIODATAOUT1 0xC +#define REG_SETGPIODATAOUT2 0xD +#define REG_SETGPIODATAOUT3 0xE +#define REG_GPIO_DEBEN1 0xF +#define REG_GPIO_DEBEN2 0x10 +#define REG_GPIO_DEBEN3 0x11 +#define REG_GPIO_CTRL 0x12 +#define REG_GPIOPUPDCTR1 0x13 +#define REG_GPIOPUPDCTR2 0x14 +#define REG_GPIOPUPDCTR3 0x15 +#define REG_GPIOPUPDCTR4 0x16 +#define REG_GPIOPUPDCTR5 0x17 +#define REG_GPIO_ISR1A 0x19 +#define REG_GPIO_ISR2A 0x1A +#define REG_GPIO_ISR3A 0x1B +#define REG_GPIO_IMR1A 0x1C +#define REG_GPIO_IMR2A 0x1D +#define REG_GPIO_IMR3A 0x1E +#define REG_GPIO_ISR1B 0x1F +#define REG_GPIO_ISR2B 0x20 +#define REG_GPIO_ISR3B 0x21 +#define REG_GPIO_IMR1B 0x22 +#define REG_GPIO_IMR2B 0x23 +#define REG_GPIO_IMR3B 0x24 +#define REG_GPIO_EDR1 0x28 +#define REG_GPIO_EDR2 0x29 +#define REG_GPIO_EDR3 0x2A +#define REG_GPIO_EDR4 0x2B +#define REG_GPIO_EDR5 0x2C +#define REG_GPIO_SIH_CTRL 0x2D + +/* Up to 18 signals are available as GPIOs, when their + * pins are not assigned to another use (such as ULPI/USB). + */ +#define TWL4030_GPIO_MAX 18 + +/*----------------------------------------------------------------------*/ + +/*Interface Bit Register (INTBR) offsets + *(Use TWL_4030_MODULE_INTBR) + */ + +#define REG_IDCODE_7_0 0x00 +#define REG_IDCODE_15_8 0x01 +#define REG_IDCODE_16_23 0x02 +#define REG_IDCODE_31_24 0x03 +#define REG_GPPUPDCTR1 0x0F +#define REG_UNLOCK_TEST_REG 0x12 + +/*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */ + +#define I2C_SCL_CTRL_PU BIT(0) +#define I2C_SDA_CTRL_PU BIT(2) +#define SR_I2C_SCL_CTRL_PU BIT(4) +#define SR_I2C_SDA_CTRL_PU BIT(6) + +#define TWL_EEPROM_R_UNLOCK 0x49 + +/*----------------------------------------------------------------------*/ + +/* + * Keypad register offsets (use TWL4030_MODULE_KEYPAD) + * ... SIH/interrupt only + */ + +#define TWL4030_KEYPAD_KEYP_ISR1 0x11 +#define TWL4030_KEYPAD_KEYP_IMR1 0x12 +#define TWL4030_KEYPAD_KEYP_ISR2 0x13 +#define TWL4030_KEYPAD_KEYP_IMR2 0x14 +#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */ +#define TWL4030_KEYPAD_KEYP_EDR 0x16 +#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17 + +/*----------------------------------------------------------------------*/ + +/* + * Multichannel ADC register offsets (use TWL4030_MODULE_MADC) + * ... SIH/interrupt only + */ + +#define TWL4030_MADC_ISR1 0x61 +#define TWL4030_MADC_IMR1 0x62 +#define TWL4030_MADC_ISR2 0x63 +#define TWL4030_MADC_IMR2 0x64 +#define TWL4030_MADC_SIR 0x65 /* test register */ +#define TWL4030_MADC_EDR 0x66 +#define TWL4030_MADC_SIH_CTRL 0x67 + +/*----------------------------------------------------------------------*/ + +/* + * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS) + */ + +#define TWL4030_INTERRUPTS_BCIISR1A 0x0 +#define TWL4030_INTERRUPTS_BCIISR2A 0x1 +#define TWL4030_INTERRUPTS_BCIIMR1A 0x2 +#define TWL4030_INTERRUPTS_BCIIMR2A 0x3 +#define TWL4030_INTERRUPTS_BCIISR1B 0x4 +#define TWL4030_INTERRUPTS_BCIISR2B 0x5 +#define TWL4030_INTERRUPTS_BCIIMR1B 0x6 +#define TWL4030_INTERRUPTS_BCIIMR2B 0x7 +#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */ +#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */ +#define TWL4030_INTERRUPTS_BCIEDR1 0xa +#define TWL4030_INTERRUPTS_BCIEDR2 0xb +#define TWL4030_INTERRUPTS_BCIEDR3 0xc +#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd + +/*----------------------------------------------------------------------*/ + +/* + * Power Interrupt block register offsets (use TWL4030_MODULE_INT) + */ + +#define TWL4030_INT_PWR_ISR1 0x0 +#define TWL4030_INT_PWR_IMR1 0x1 +#define TWL4030_INT_PWR_ISR2 0x2 +#define TWL4030_INT_PWR_IMR2 0x3 +#define TWL4030_INT_PWR_SIR 0x4 /* test register */ +#define TWL4030_INT_PWR_EDR1 0x5 +#define TWL4030_INT_PWR_EDR2 0x6 +#define TWL4030_INT_PWR_SIH_CTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * Accessory Interrupts + */ +#define TWL5031_ACIIMR_LSB 0x05 +#define TWL5031_ACIIMR_MSB 0x06 +#define TWL5031_ACIIDR_LSB 0x07 +#define TWL5031_ACIIDR_MSB 0x08 +#define TWL5031_ACCISR1 0x0F +#define TWL5031_ACCIMR1 0x10 +#define TWL5031_ACCISR2 0x11 +#define TWL5031_ACCIMR2 0x12 +#define TWL5031_ACCSIR 0x13 +#define TWL5031_ACCEDR1 0x14 +#define TWL5031_ACCSIHCTRL 0x15 + +/*----------------------------------------------------------------------*/ + +/* + * Battery Charger Controller + */ + +#define TWL5031_INTERRUPTS_BCIISR1 0x0 +#define TWL5031_INTERRUPTS_BCIIMR1 0x1 +#define TWL5031_INTERRUPTS_BCIISR2 0x2 +#define TWL5031_INTERRUPTS_BCIIMR2 0x3 +#define TWL5031_INTERRUPTS_BCISIR 0x4 +#define TWL5031_INTERRUPTS_BCIEDR1 0x5 +#define TWL5031_INTERRUPTS_BCIEDR2 0x6 +#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7 + +/*----------------------------------------------------------------------*/ + +/* + * PM Master module register offsets (use TWL4030_MODULE_PM_MASTER) + */ + +#define TWL4030_PM_MASTER_CFG_P1_TRANSITION 0x00 +#define TWL4030_PM_MASTER_CFG_P2_TRANSITION 0x01 +#define TWL4030_PM_MASTER_CFG_P3_TRANSITION 0x02 +#define TWL4030_PM_MASTER_CFG_P123_TRANSITION 0x03 +#define TWL4030_PM_MASTER_STS_BOOT 0x04 +#define TWL4030_PM_MASTER_CFG_BOOT 0x05 +#define TWL4030_PM_MASTER_SHUNDAN 0x06 +#define TWL4030_PM_MASTER_BOOT_BCI 0x07 +#define TWL4030_PM_MASTER_CFG_PWRANA1 0x08 +#define TWL4030_PM_MASTER_CFG_PWRANA2 0x09 +#define TWL4030_PM_MASTER_BACKUP_MISC_STS 0x0b +#define TWL4030_PM_MASTER_BACKUP_MISC_CFG 0x0c +#define TWL4030_PM_MASTER_BACKUP_MISC_TST 0x0d +#define TWL4030_PM_MASTER_PROTECT_KEY 0x0e +#define TWL4030_PM_MASTER_STS_HW_CONDITIONS 0x0f +#define TWL4030_PM_MASTER_P1_SW_EVENTS 0x10 +#define TWL4030_PM_MASTER_P2_SW_EVENTS 0x11 +#define TWL4030_PM_MASTER_P3_SW_EVENTS 0x12 +#define TWL4030_PM_MASTER_STS_P123_STATE 0x13 +#define TWL4030_PM_MASTER_PB_CFG 0x14 +#define TWL4030_PM_MASTER_PB_WORD_MSB 0x15 +#define TWL4030_PM_MASTER_PB_WORD_LSB 0x16 +#define TWL4030_PM_MASTER_SEQ_ADD_W2P 0x1c +#define TWL4030_PM_MASTER_SEQ_ADD_P2A 0x1d +#define TWL4030_PM_MASTER_SEQ_ADD_A2W 0x1e +#define TWL4030_PM_MASTER_SEQ_ADD_A2S 0x1f +#define TWL4030_PM_MASTER_SEQ_ADD_S2A12 0x20 +#define TWL4030_PM_MASTER_SEQ_ADD_S2A3 0x21 +#define TWL4030_PM_MASTER_SEQ_ADD_WARM 0x22 +#define TWL4030_PM_MASTER_MEMORY_ADDRESS 0x23 +#define TWL4030_PM_MASTER_MEMORY_DATA 0x24 + +#define TWL4030_PM_MASTER_KEY_CFG1 0xc0 +#define TWL4030_PM_MASTER_KEY_CFG2 0x0c + +#define TWL4030_PM_MASTER_KEY_TST1 0xe0 +#define TWL4030_PM_MASTER_KEY_TST2 0x0e + +#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6 + +/*----------------------------------------------------------------------*/ + +/* Power bus message definitions */ + +/* The TWL4030/5030 splits its power-management resources (the various + * regulators, clock and reset lines) into 3 processor groups - P1, P2 and + * P3. These groups can then be configured to transition between sleep, wait-on + * and active states by sending messages to the power bus. See Section 5.4.2 + * Power Resources of TWL4030 TRM + */ + +/* Processor groups */ +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */ +#define DEV_GRP_P2 0x2 /* P2: all Modem devices */ +#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */ + +/* Resource groups */ +#define RES_GRP_RES 0x0 /* Reserved */ +#define RES_GRP_PP 0x1 /* Power providers */ +#define RES_GRP_RC 0x2 /* Reset and control */ +#define RES_GRP_PP_RC 0x3 +#define RES_GRP_PR 0x4 /* Power references */ +#define RES_GRP_PP_PR 0x5 +#define RES_GRP_RC_PR 0x6 +#define RES_GRP_ALL 0x7 /* All resource groups */ + +#define RES_TYPE2_R0 0x0 + +#define RES_TYPE_ALL 0x7 + +/* Resource states */ +#define RES_STATE_WRST 0xF +#define RES_STATE_ACTIVE 0xE +#define RES_STATE_SLEEP 0x8 +#define RES_STATE_OFF 0x0 + +/* Power resources */ + +/* Power providers */ +#define RES_VAUX1 1 +#define RES_VAUX2 2 +#define RES_VAUX3 3 +#define RES_VAUX4 4 +#define RES_VMMC1 5 +#define RES_VMMC2 6 +#define RES_VPLL1 7 +#define RES_VPLL2 8 +#define RES_VSIM 9 +#define RES_VDAC 10 +#define RES_VINTANA1 11 +#define RES_VINTANA2 12 +#define RES_VINTDIG 13 +#define RES_VIO 14 +#define RES_VDD1 15 +#define RES_VDD2 16 +#define RES_VUSB_1V5 17 +#define RES_VUSB_1V8 18 +#define RES_VUSB_3V1 19 +#define RES_VUSBCP 20 +#define RES_REGEN 21 +/* Reset and control */ +#define RES_NRES_PWRON 22 +#define RES_CLKEN 23 +#define RES_SYSEN 24 +#define RES_HFCLKOUT 25 +#define RES_32KCLKOUT 26 +#define RES_RESET 27 +/* Power Reference */ +#define RES_MAIN_REF 28 + +#define TOTAL_RESOURCES 28 +/* + * Power Bus Message Format ... these can be sent individually by Linux, + * but are usually part of downloaded scripts that are run when various + * power events are triggered. + * + * Broadcast Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] + * RES_STATE[3:0] + * + * Singular Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] + */ + +#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ + ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ + | (type) << 4 | (state)) + +#define MSG_SINGULAR(devgrp, id, state) \ + ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) + +#define MSG_BROADCAST_ALL(devgrp, state) \ + ((devgrp) << 5 | (state)) + +#define MSG_BROADCAST_REF MSG_BROADCAST_ALL +#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL +#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL +/*----------------------------------------------------------------------*/ + +struct twl4030_clock_init_data { + bool ck32k_lowpwr_enable; +}; + +struct twl4030_bci_platform_data { + int *battery_tmp_tbl; + unsigned int tblsize; +}; + +/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ +struct twl4030_gpio_platform_data { + int gpio_base; + unsigned irq_base, irq_end; + + /* package the two LED signals as output-only GPIOs? */ + bool use_leds; + + /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ + u8 mmc_cd; + + /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */ + u32 debounce; + + /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup + * should be enabled. Else, if that bit is set in "pulldowns", + * that pulldown is enabled. Don't waste power by letting any + * digital inputs float... + */ + u32 pullups; + u32 pulldowns; + + int (*setup)(struct device *dev, + unsigned gpio, unsigned ngpio); + int (*teardown)(struct device *dev, + unsigned gpio, unsigned ngpio); +}; + +struct twl4030_madc_platform_data { + int irq_line; +}; + +/* Boards have unique mappings of {row, col} --> keycode. + * Column and row are 8 bits each, but range only from 0..7. + * a PERSISTENT_KEY is "always on" and never reported. + */ +#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) + +struct twl4030_keypad_data { + const struct matrix_keymap_data *keymap_data; + unsigned rows; + unsigned cols; + bool rep; +}; + +enum twl4030_usb_mode { + T2_USB_MODE_ULPI = 1, + T2_USB_MODE_CEA2011_3PIN = 2, +}; + +struct twl4030_usb_data { + enum twl4030_usb_mode usb_mode; + unsigned long features; + + int (*phy_init)(struct device *dev); + int (*phy_exit)(struct device *dev); + /* Power on/off the PHY */ + int (*phy_power)(struct device *dev, int iD, int on); + /* enable/disable phy clocks */ + int (*phy_set_clock)(struct device *dev, int on); + /* suspend/resume of phy */ + int (*phy_suspend)(struct device *dev, int suspend); +}; + +struct twl4030_ins { + u16 pmb_message; + u8 delay; +}; + +struct twl4030_script { + struct twl4030_ins *script; + unsigned size; + u8 flags; +#define TWL4030_WRST_SCRIPT (1<<0) +#define TWL4030_WAKEUP12_SCRIPT (1<<1) +#define TWL4030_WAKEUP3_SCRIPT (1<<2) +#define TWL4030_SLEEP_SCRIPT (1<<3) +}; + +struct twl4030_resconfig { + u8 resource; + u8 devgroup; /* Processor group that Power resource belongs to */ + u8 type; /* Power resource addressed, 6 / broadcast message */ + u8 type2; /* Power resource addressed, 3 / broadcast message */ + u8 remap_off; /* off state remapping */ + u8 remap_sleep; /* sleep state remapping */ +}; + +struct twl4030_power_data { + struct twl4030_script **scripts; + unsigned num; + struct twl4030_resconfig *resource_config; +#define TWL4030_RESCONFIG_UNDEF ((u8)-1) + bool use_poweroff; /* Board is wired for TWL poweroff */ +}; + +extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); +extern int twl4030_remove_script(u8 flags); +extern void twl4030_power_off(void); + +struct twl4030_codec_data { + unsigned int digimic_delay; /* in ms */ + unsigned int ramp_delay_value; + unsigned int offset_cncl_path; + unsigned int check_defaults:1; + unsigned int reset_registers:1; + unsigned int hs_extmute:1; + void (*set_hs_extmute)(int mute); +}; + +struct twl4030_vibra_data { + unsigned int coexist; +}; + +struct twl4030_audio_data { + unsigned int audio_mclk; + struct twl4030_codec_data *codec; + struct twl4030_vibra_data *vibra; + + /* twl6040 */ + int audpwron_gpio; /* audio power-on gpio */ + int naudint_irq; /* audio interrupt */ + unsigned int irq_base; +}; + +struct twl4030_platform_data { + unsigned irq_base, irq_end; + struct twl4030_clock_init_data *clock; + struct twl4030_bci_platform_data *bci; + struct twl4030_gpio_platform_data *gpio; + struct twl4030_madc_platform_data *madc; + struct twl4030_keypad_data *keypad; + struct twl4030_usb_data *usb; + struct twl4030_power_data *power; + struct twl4030_audio_data *audio; + + /* Common LDO regulators for TWL4030/TWL6030 */ + struct regulator_init_data *vdac; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + struct regulator_init_data *vdd1; + struct regulator_init_data *vdd2; + struct regulator_init_data *vdd3; + /* TWL4030 LDO regulators */ + struct regulator_init_data *vpll1; + struct regulator_init_data *vpll2; + struct regulator_init_data *vmmc1; + struct regulator_init_data *vmmc2; + struct regulator_init_data *vsim; + struct regulator_init_data *vaux4; + struct regulator_init_data *vio; + struct regulator_init_data *vintana1; + struct regulator_init_data *vintana2; + struct regulator_init_data *vintdig; + /* TWL6030 LDO regulators */ + struct regulator_init_data *vmmc; + struct regulator_init_data *vpp; + struct regulator_init_data *vusim; + struct regulator_init_data *vana; + struct regulator_init_data *vcxio; + struct regulator_init_data *vusb; + struct regulator_init_data *clk32kg; + struct regulator_init_data *v1v8; + struct regulator_init_data *v2v1; + /* TWL6025 LDO regulators */ + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldoln; + struct regulator_init_data *ldousb; + /* TWL6025 DCDC regulators */ + struct regulator_init_data *smps3; + struct regulator_init_data *smps4; + struct regulator_init_data *vio6025; +}; + +struct twl_regulator_driver_data { + int (*set_voltage)(void *data, int target_uV); + int (*get_voltage)(void *data); + void *data; + unsigned long features; +}; + +/*----------------------------------------------------------------------*/ + +int twl4030_sih_setup(struct device *dev, int module, int irq_base); + +/* Offsets to Power Registers */ +#define TWL4030_VDAC_DEV_GRP 0x3B +#define TWL4030_VDAC_DEDICATED 0x3E +#define TWL4030_VAUX1_DEV_GRP 0x17 +#define TWL4030_VAUX1_DEDICATED 0x1A +#define TWL4030_VAUX2_DEV_GRP 0x1B +#define TWL4030_VAUX2_DEDICATED 0x1E +#define TWL4030_VAUX3_DEV_GRP 0x1F +#define TWL4030_VAUX3_DEDICATED 0x22 + +static inline int twl4030charger_usb_en(int enable) { return 0; } + +/*----------------------------------------------------------------------*/ + +/* Linux-specific regulator identifiers ... for now, we only support + * the LDOs, and leave the three buck converters alone. VDD1 and VDD2 + * need to tie into hardware based voltage scaling (cpufreq etc), while + * VIO is generally fixed. + */ + +/* TWL4030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck converters */ +#define TWL4030_REG_VDD1 0 +#define TWL4030_REG_VDD2 1 +#define TWL4030_REG_VIO 2 + +/* EXTERNAL LDOs */ +#define TWL4030_REG_VDAC 3 +#define TWL4030_REG_VPLL1 4 +#define TWL4030_REG_VPLL2 5 /* not on all chips */ +#define TWL4030_REG_VMMC1 6 +#define TWL4030_REG_VMMC2 7 /* not on all chips */ +#define TWL4030_REG_VSIM 8 /* not on all chips */ +#define TWL4030_REG_VAUX1 9 /* not on all chips */ +#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */ +#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */ +#define TWL4030_REG_VAUX3 12 /* not on all chips */ +#define TWL4030_REG_VAUX4 13 /* not on all chips */ + +/* INTERNAL LDOs */ +#define TWL4030_REG_VINTANA1 14 +#define TWL4030_REG_VINTANA2 15 +#define TWL4030_REG_VINTDIG 16 +#define TWL4030_REG_VUSB1V5 17 +#define TWL4030_REG_VUSB1V8 18 +#define TWL4030_REG_VUSB3V1 19 + +/* TWL6030 SMPS/LDO's */ +/* EXTERNAL dc-to-dc buck convertor controllable via SR */ +#define TWL6030_REG_VDD1 30 +#define TWL6030_REG_VDD2 31 +#define TWL6030_REG_VDD3 32 + +/* Non SR compliant dc-to-dc buck convertors */ +#define TWL6030_REG_VMEM 33 +#define TWL6030_REG_V2V1 34 +#define TWL6030_REG_V1V29 35 +#define TWL6030_REG_V1V8 36 + +/* EXTERNAL LDOs */ +#define TWL6030_REG_VAUX1_6030 37 +#define TWL6030_REG_VAUX2_6030 38 +#define TWL6030_REG_VAUX3_6030 39 +#define TWL6030_REG_VMMC 40 +#define TWL6030_REG_VPP 41 +#define TWL6030_REG_VUSIM 42 +#define TWL6030_REG_VANA 43 +#define TWL6030_REG_VCXIO 44 +#define TWL6030_REG_VDAC 45 +#define TWL6030_REG_VUSB 46 + +/* INTERNAL LDOs */ +#define TWL6030_REG_VRTC 47 +#define TWL6030_REG_CLK32KG 48 + +/* LDOs on 6025 have different names */ +#define TWL6025_REG_LDO2 49 +#define TWL6025_REG_LDO4 50 +#define TWL6025_REG_LDO3 51 +#define TWL6025_REG_LDO5 52 +#define TWL6025_REG_LDO1 53 +#define TWL6025_REG_LDO7 54 +#define TWL6025_REG_LDO6 55 +#define TWL6025_REG_LDOLN 56 +#define TWL6025_REG_LDOUSB 57 + +/* 6025 DCDC supplies */ +#define TWL6025_REG_SMPS3 58 +#define TWL6025_REG_SMPS4 59 +#define TWL6025_REG_VIO 60 + + +#endif /* End of __TWL4030_H */ diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h new file mode 100644 index 00000000..530e11ba --- /dev/null +++ b/include/linux/i2c/twl4030-madc.h @@ -0,0 +1,145 @@ +/* + * twl4030_madc.h - Header for TWL4030 MADC + * + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * J Keerthy <j-keerthy@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef _TWL4030_MADC_H +#define _TWL4030_MADC_H + +struct twl4030_madc_conversion_method { + u8 sel; + u8 avg; + u8 rbase; + u8 ctrl; +}; + +#define TWL4030_MADC_MAX_CHANNELS 16 + + +/* + * twl4030_madc_request- madc request packet for channel conversion + * @channels: 16 bit bitmap for individual channels + * @do_avgP: sample the input channel for 4 consecutive cycles + * @method: RT, SW1, SW2 + * @type: Polling or interrupt based method + */ + +struct twl4030_madc_request { + unsigned long channels; + u16 do_avg; + u16 method; + u16 type; + bool active; + bool result_pending; + int rbuf[TWL4030_MADC_MAX_CHANNELS]; + void (*func_cb)(int len, int channels, int *buf); +}; + +enum conversion_methods { + TWL4030_MADC_RT, + TWL4030_MADC_SW1, + TWL4030_MADC_SW2, + TWL4030_MADC_NUM_METHODS +}; + +enum sample_type { + TWL4030_MADC_WAIT, + TWL4030_MADC_IRQ_ONESHOT, + TWL4030_MADC_IRQ_REARM +}; + +#define TWL4030_MADC_CTRL1 0x00 +#define TWL4030_MADC_CTRL2 0x01 + +#define TWL4030_MADC_RTSELECT_LSB 0x02 +#define TWL4030_MADC_SW1SELECT_LSB 0x06 +#define TWL4030_MADC_SW2SELECT_LSB 0x0A + +#define TWL4030_MADC_RTAVERAGE_LSB 0x04 +#define TWL4030_MADC_SW1AVERAGE_LSB 0x08 +#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C + +#define TWL4030_MADC_CTRL_SW1 0x12 +#define TWL4030_MADC_CTRL_SW2 0x13 + +#define TWL4030_MADC_RTCH0_LSB 0x17 +#define TWL4030_MADC_GPCH0_LSB 0x37 + +#define TWL4030_MADC_MADCON (1 << 0) /* MADC power on */ +#define TWL4030_MADC_BUSY (1 << 0) /* MADC busy */ +/* MADC conversion completion */ +#define TWL4030_MADC_EOC_SW (1 << 1) +/* MADC SWx start conversion */ +#define TWL4030_MADC_SW_START (1 << 5) +#define TWL4030_MADC_ADCIN0 (1 << 0) +#define TWL4030_MADC_ADCIN1 (1 << 1) +#define TWL4030_MADC_ADCIN2 (1 << 2) +#define TWL4030_MADC_ADCIN3 (1 << 3) +#define TWL4030_MADC_ADCIN4 (1 << 4) +#define TWL4030_MADC_ADCIN5 (1 << 5) +#define TWL4030_MADC_ADCIN6 (1 << 6) +#define TWL4030_MADC_ADCIN7 (1 << 7) +#define TWL4030_MADC_ADCIN8 (1 << 8) +#define TWL4030_MADC_ADCIN9 (1 << 9) +#define TWL4030_MADC_ADCIN10 (1 << 10) +#define TWL4030_MADC_ADCIN11 (1 << 11) +#define TWL4030_MADC_ADCIN12 (1 << 12) +#define TWL4030_MADC_ADCIN13 (1 << 13) +#define TWL4030_MADC_ADCIN14 (1 << 14) +#define TWL4030_MADC_ADCIN15 (1 << 15) + +/* Fixed channels */ +#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1 +#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8 +#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9 +#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10 +#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11 +#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12 + +/* Step size and prescaler ratio */ +#define TEMP_STEP_SIZE 147 +#define TEMP_PSR_R 100 +#define CURR_STEP_SIZE 147 +#define CURR_PSR_R1 44 +#define CURR_PSR_R2 88 + +#define TWL4030_BCI_BCICTL1 0x23 +#define TWL4030_BCI_CGAIN 0x020 +#define TWL4030_BCI_MESBAT (1 << 1) +#define TWL4030_BCI_TYPEN (1 << 4) +#define TWL4030_BCI_ITHEN (1 << 3) + +#define REG_BCICTL2 0x024 +#define TWL4030_BCI_ITHSENS 0x007 + +/* Register and bits for GPBR1 register */ +#define TWL4030_REG_GPBR1 0x0c +#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7) + +struct twl4030_madc_user_parms { + int channel; + int average; + int status; + u16 result; +}; + +int twl4030_madc_conversion(struct twl4030_madc_request *conv); +int twl4030_get_madc_conversion(int channel_no); +#endif diff --git a/include/linux/i2o-dev.h b/include/linux/i2o-dev.h new file mode 100644 index 00000000..a0b23dd4 --- /dev/null +++ b/include/linux/i2o-dev.h @@ -0,0 +1,421 @@ +/* + * I2O user space accessible structures/APIs + * + * (c) Copyright 1999, 2000 Red Hat Software + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + ************************************************************************* + * + * This header file defines the I2O APIs that are available to both + * the kernel and user level applications. Kernel specific structures + * are defined in i2o_osm. OSMs should include _only_ i2o_osm.h which + * automatically includes this file. + * + */ + +#ifndef _I2O_DEV_H +#define _I2O_DEV_H + +/* How many controllers are we allowing */ +#define MAX_I2O_CONTROLLERS 32 + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* + * I2O Control IOCTLs and structures + */ +#define I2O_MAGIC_NUMBER 'i' +#define I2OGETIOPS _IOR(I2O_MAGIC_NUMBER,0,__u8[MAX_I2O_CONTROLLERS]) +#define I2OHRTGET _IOWR(I2O_MAGIC_NUMBER,1,struct i2o_cmd_hrtlct) +#define I2OLCTGET _IOWR(I2O_MAGIC_NUMBER,2,struct i2o_cmd_hrtlct) +#define I2OPARMSET _IOWR(I2O_MAGIC_NUMBER,3,struct i2o_cmd_psetget) +#define I2OPARMGET _IOWR(I2O_MAGIC_NUMBER,4,struct i2o_cmd_psetget) +#define I2OSWDL _IOWR(I2O_MAGIC_NUMBER,5,struct i2o_sw_xfer) +#define I2OSWUL _IOWR(I2O_MAGIC_NUMBER,6,struct i2o_sw_xfer) +#define I2OSWDEL _IOWR(I2O_MAGIC_NUMBER,7,struct i2o_sw_xfer) +#define I2OVALIDATE _IOR(I2O_MAGIC_NUMBER,8,__u32) +#define I2OHTML _IOWR(I2O_MAGIC_NUMBER,9,struct i2o_html) +#define I2OEVTREG _IOW(I2O_MAGIC_NUMBER,10,struct i2o_evt_id) +#define I2OEVTGET _IOR(I2O_MAGIC_NUMBER,11,struct i2o_evt_info) +#define I2OPASSTHRU _IOR(I2O_MAGIC_NUMBER,12,struct i2o_cmd_passthru) +#define I2OPASSTHRU32 _IOR(I2O_MAGIC_NUMBER,12,struct i2o_cmd_passthru32) + +struct i2o_cmd_passthru32 { + unsigned int iop; /* IOP unit number */ + __u32 msg; /* message */ +}; + +struct i2o_cmd_passthru { + unsigned int iop; /* IOP unit number */ + void __user *msg; /* message */ +}; + +struct i2o_cmd_hrtlct { + unsigned int iop; /* IOP unit number */ + void __user *resbuf; /* Buffer for result */ + unsigned int __user *reslen; /* Buffer length in bytes */ +}; + +struct i2o_cmd_psetget { + unsigned int iop; /* IOP unit number */ + unsigned int tid; /* Target device TID */ + void __user *opbuf; /* Operation List buffer */ + unsigned int oplen; /* Operation List buffer length in bytes */ + void __user *resbuf; /* Result List buffer */ + unsigned int __user *reslen; /* Result List buffer length in bytes */ +}; + +struct i2o_sw_xfer { + unsigned int iop; /* IOP unit number */ + unsigned char flags; /* Flags field */ + unsigned char sw_type; /* Software type */ + unsigned int sw_id; /* Software ID */ + void __user *buf; /* Pointer to software buffer */ + unsigned int __user *swlen; /* Length of software data */ + unsigned int __user *maxfrag; /* Maximum fragment count */ + unsigned int __user *curfrag; /* Current fragment count */ +}; + +struct i2o_html { + unsigned int iop; /* IOP unit number */ + unsigned int tid; /* Target device ID */ + unsigned int page; /* HTML page */ + void __user *resbuf; /* Buffer for reply HTML page */ + unsigned int __user *reslen; /* Length in bytes of reply buffer */ + void __user *qbuf; /* Pointer to HTTP query string */ + unsigned int qlen; /* Length in bytes of query string buffer */ +}; + +#define I2O_EVT_Q_LEN 32 + +struct i2o_evt_id { + unsigned int iop; + unsigned int tid; + unsigned int evt_mask; +}; + +/* Event data size = frame size - message header + evt indicator */ +#define I2O_EVT_DATA_SIZE 88 + +struct i2o_evt_info { + struct i2o_evt_id id; + unsigned char evt_data[I2O_EVT_DATA_SIZE]; + unsigned int data_size; +}; + +struct i2o_evt_get { + struct i2o_evt_info info; + int pending; + int lost; +}; + +typedef struct i2o_sg_io_hdr { + unsigned int flags; /* see I2O_DPT_SG_IO_FLAGS */ +} i2o_sg_io_hdr_t; + +/************************************************************************** + * HRT related constants and structures + **************************************************************************/ +#define I2O_BUS_LOCAL 0 +#define I2O_BUS_ISA 1 +#define I2O_BUS_EISA 2 +#define I2O_BUS_MCA 3 +#define I2O_BUS_PCI 4 +#define I2O_BUS_PCMCIA 5 +#define I2O_BUS_NUBUS 6 +#define I2O_BUS_CARDBUS 7 +#define I2O_BUS_UNKNOWN 0x80 + +typedef struct _i2o_pci_bus { + __u8 PciFunctionNumber; + __u8 PciDeviceNumber; + __u8 PciBusNumber; + __u8 reserved; + __u16 PciVendorID; + __u16 PciDeviceID; +} i2o_pci_bus; + +typedef struct _i2o_local_bus { + __u16 LbBaseIOPort; + __u16 reserved; + __u32 LbBaseMemoryAddress; +} i2o_local_bus; + +typedef struct _i2o_isa_bus { + __u16 IsaBaseIOPort; + __u8 CSN; + __u8 reserved; + __u32 IsaBaseMemoryAddress; +} i2o_isa_bus; + +typedef struct _i2o_eisa_bus_info { + __u16 EisaBaseIOPort; + __u8 reserved; + __u8 EisaSlotNumber; + __u32 EisaBaseMemoryAddress; +} i2o_eisa_bus; + +typedef struct _i2o_mca_bus { + __u16 McaBaseIOPort; + __u8 reserved; + __u8 McaSlotNumber; + __u32 McaBaseMemoryAddress; +} i2o_mca_bus; + +typedef struct _i2o_other_bus { + __u16 BaseIOPort; + __u16 reserved; + __u32 BaseMemoryAddress; +} i2o_other_bus; + +typedef struct _i2o_hrt_entry { + __u32 adapter_id; + __u32 parent_tid:12; + __u32 state:4; + __u32 bus_num:8; + __u32 bus_type:8; + union { + i2o_pci_bus pci_bus; + i2o_local_bus local_bus; + i2o_isa_bus isa_bus; + i2o_eisa_bus eisa_bus; + i2o_mca_bus mca_bus; + i2o_other_bus other_bus; + } bus; +} i2o_hrt_entry; + +typedef struct _i2o_hrt { + __u16 num_entries; + __u8 entry_len; + __u8 hrt_version; + __u32 change_ind; + i2o_hrt_entry hrt_entry[1]; +} i2o_hrt; + +typedef struct _i2o_lct_entry { + __u32 entry_size:16; + __u32 tid:12; + __u32 reserved:4; + __u32 change_ind; + __u32 device_flags; + __u32 class_id:12; + __u32 version:4; + __u32 vendor_id:16; + __u32 sub_class; + __u32 user_tid:12; + __u32 parent_tid:12; + __u32 bios_info:8; + __u8 identity_tag[8]; + __u32 event_capabilities; +} i2o_lct_entry; + +typedef struct _i2o_lct { + __u32 table_size:16; + __u32 boot_tid:12; + __u32 lct_ver:4; + __u32 iop_flags; + __u32 change_ind; + i2o_lct_entry lct_entry[1]; +} i2o_lct; + +typedef struct _i2o_status_block { + __u16 org_id; + __u16 reserved; + __u16 iop_id:12; + __u16 reserved1:4; + __u16 host_unit_id; + __u16 segment_number:12; + __u16 i2o_version:4; + __u8 iop_state; + __u8 msg_type; + __u16 inbound_frame_size; + __u8 init_code; + __u8 reserved2; + __u32 max_inbound_frames; + __u32 cur_inbound_frames; + __u32 max_outbound_frames; + char product_id[24]; + __u32 expected_lct_size; + __u32 iop_capabilities; + __u32 desired_mem_size; + __u32 current_mem_size; + __u32 current_mem_base; + __u32 desired_io_size; + __u32 current_io_size; + __u32 current_io_base; + __u32 reserved3:24; + __u32 cmd_status:8; +} i2o_status_block; + +/* Event indicator mask flags */ +#define I2O_EVT_IND_STATE_CHANGE 0x80000000 +#define I2O_EVT_IND_GENERAL_WARNING 0x40000000 +#define I2O_EVT_IND_CONFIGURATION_FLAG 0x20000000 +#define I2O_EVT_IND_LOCK_RELEASE 0x10000000 +#define I2O_EVT_IND_CAPABILITY_CHANGE 0x08000000 +#define I2O_EVT_IND_DEVICE_RESET 0x04000000 +#define I2O_EVT_IND_EVT_MASK_MODIFIED 0x02000000 +#define I2O_EVT_IND_FIELD_MODIFIED 0x01000000 +#define I2O_EVT_IND_VENDOR_EVT 0x00800000 +#define I2O_EVT_IND_DEVICE_STATE 0x00400000 + +/* Executive event indicitors */ +#define I2O_EVT_IND_EXEC_RESOURCE_LIMITS 0x00000001 +#define I2O_EVT_IND_EXEC_CONNECTION_FAIL 0x00000002 +#define I2O_EVT_IND_EXEC_ADAPTER_FAULT 0x00000004 +#define I2O_EVT_IND_EXEC_POWER_FAIL 0x00000008 +#define I2O_EVT_IND_EXEC_RESET_PENDING 0x00000010 +#define I2O_EVT_IND_EXEC_RESET_IMMINENT 0x00000020 +#define I2O_EVT_IND_EXEC_HW_FAIL 0x00000040 +#define I2O_EVT_IND_EXEC_XCT_CHANGE 0x00000080 +#define I2O_EVT_IND_EXEC_NEW_LCT_ENTRY 0x00000100 +#define I2O_EVT_IND_EXEC_MODIFIED_LCT 0x00000200 +#define I2O_EVT_IND_EXEC_DDM_AVAILABILITY 0x00000400 + +/* Random Block Storage Event Indicators */ +#define I2O_EVT_IND_BSA_VOLUME_LOAD 0x00000001 +#define I2O_EVT_IND_BSA_VOLUME_UNLOAD 0x00000002 +#define I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ 0x00000004 +#define I2O_EVT_IND_BSA_CAPACITY_CHANGE 0x00000008 +#define I2O_EVT_IND_BSA_SCSI_SMART 0x00000010 + +/* Event data for generic events */ +#define I2O_EVT_STATE_CHANGE_NORMAL 0x00 +#define I2O_EVT_STATE_CHANGE_SUSPENDED 0x01 +#define I2O_EVT_STATE_CHANGE_RESTART 0x02 +#define I2O_EVT_STATE_CHANGE_NA_RECOVER 0x03 +#define I2O_EVT_STATE_CHANGE_NA_NO_RECOVER 0x04 +#define I2O_EVT_STATE_CHANGE_QUIESCE_REQUEST 0x05 +#define I2O_EVT_STATE_CHANGE_FAILED 0x10 +#define I2O_EVT_STATE_CHANGE_FAULTED 0x11 + +#define I2O_EVT_GEN_WARNING_NORMAL 0x00 +#define I2O_EVT_GEN_WARNING_ERROR_THRESHOLD 0x01 +#define I2O_EVT_GEN_WARNING_MEDIA_FAULT 0x02 + +#define I2O_EVT_CAPABILITY_OTHER 0x01 +#define I2O_EVT_CAPABILITY_CHANGED 0x02 + +#define I2O_EVT_SENSOR_STATE_CHANGED 0x01 + +/* + * I2O classes / subclasses + */ + +/* Class ID and Code Assignments + * (LCT.ClassID.Version field) + */ +#define I2O_CLASS_VERSION_10 0x00 +#define I2O_CLASS_VERSION_11 0x01 + +/* Class code names + * (from v1.5 Table 6-1 Class Code Assignments.) + */ + +#define I2O_CLASS_EXECUTIVE 0x000 +#define I2O_CLASS_DDM 0x001 +#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010 +#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011 +#define I2O_CLASS_LAN 0x020 +#define I2O_CLASS_WAN 0x030 +#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040 +#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041 +#define I2O_CLASS_SCSI_PERIPHERAL 0x051 +#define I2O_CLASS_ATE_PORT 0x060 +#define I2O_CLASS_ATE_PERIPHERAL 0x061 +#define I2O_CLASS_FLOPPY_CONTROLLER 0x070 +#define I2O_CLASS_FLOPPY_DEVICE 0x071 +#define I2O_CLASS_BUS_ADAPTER 0x080 +#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090 +#define I2O_CLASS_PEER_TRANSPORT 0x091 +#define I2O_CLASS_END 0xfff + +/* + * Rest of 0x092 - 0x09f reserved for peer-to-peer classes + */ + +#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff + +/* + * Subclasses + */ + +#define I2O_SUBCLASS_i960 0x001 +#define I2O_SUBCLASS_HDM 0x020 +#define I2O_SUBCLASS_ISM 0x021 + +/* Operation functions */ + +#define I2O_PARAMS_FIELD_GET 0x0001 +#define I2O_PARAMS_LIST_GET 0x0002 +#define I2O_PARAMS_MORE_GET 0x0003 +#define I2O_PARAMS_SIZE_GET 0x0004 +#define I2O_PARAMS_TABLE_GET 0x0005 +#define I2O_PARAMS_FIELD_SET 0x0006 +#define I2O_PARAMS_LIST_SET 0x0007 +#define I2O_PARAMS_ROW_ADD 0x0008 +#define I2O_PARAMS_ROW_DELETE 0x0009 +#define I2O_PARAMS_TABLE_CLEAR 0x000A + +/* + * I2O serial number conventions / formats + * (circa v1.5) + */ + +#define I2O_SNFORMAT_UNKNOWN 0 +#define I2O_SNFORMAT_BINARY 1 +#define I2O_SNFORMAT_ASCII 2 +#define I2O_SNFORMAT_UNICODE 3 +#define I2O_SNFORMAT_LAN48_MAC 4 +#define I2O_SNFORMAT_WAN 5 + +/* + * Plus new in v2.0 (Yellowstone pdf doc) + */ + +#define I2O_SNFORMAT_LAN64_MAC 6 +#define I2O_SNFORMAT_DDM 7 +#define I2O_SNFORMAT_IEEE_REG64 8 +#define I2O_SNFORMAT_IEEE_REG128 9 +#define I2O_SNFORMAT_UNKNOWN2 0xff + +/* + * I2O Get Status State values + */ + +#define ADAPTER_STATE_INITIALIZING 0x01 +#define ADAPTER_STATE_RESET 0x02 +#define ADAPTER_STATE_HOLD 0x04 +#define ADAPTER_STATE_READY 0x05 +#define ADAPTER_STATE_OPERATIONAL 0x08 +#define ADAPTER_STATE_FAILED 0x10 +#define ADAPTER_STATE_FAULTED 0x11 + +/* + * Software module types + */ +#define I2O_SOFTWARE_MODULE_IRTOS 0x11 +#define I2O_SOFTWARE_MODULE_IOP_PRIVATE 0x22 +#define I2O_SOFTWARE_MODULE_IOP_CONFIG 0x23 + +/* + * Vendors + */ +#define I2O_VENDOR_DPT 0x001b + +/* + * DPT / Adaptec specific values for i2o_sg_io_hdr flags. + */ +#define I2O_DPT_SG_FLAG_INTERPRET 0x00010000 +#define I2O_DPT_SG_FLAG_PHYSICAL 0x00020000 + +#define I2O_DPT_FLASH_FRAG_SIZE 0x10000 +#define I2O_DPT_FLASH_READ 0x0101 +#define I2O_DPT_FLASH_WRITE 0x0102 + +#endif /* _I2O_DEV_H */ diff --git a/include/linux/i2o.h b/include/linux/i2o.h new file mode 100644 index 00000000..d23c3c20 --- /dev/null +++ b/include/linux/i2o.h @@ -0,0 +1,988 @@ +/* + * I2O kernel space accessible structures/APIs + * + * (c) Copyright 1999, 2000 Red Hat Software + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + ************************************************************************* + * + * This header file defined the I2O APIs/structures for use by + * the I2O kernel modules. + * + */ + +#ifndef _I2O_H +#define _I2O_H + +#include <linux/i2o-dev.h> + +/* How many different OSM's are we allowing */ +#define I2O_MAX_DRIVERS 8 + +#include <linux/pci.h> +#include <linux/bug.h> +#include <linux/dma-mapping.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/workqueue.h> /* work_struct */ +#include <linux/mempool.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/semaphore.h> /* Needed for MUTEX init macros */ + +#include <asm/io.h> + +/* message queue empty */ +#define I2O_QUEUE_EMPTY 0xffffffff + +/* + * Cache strategies + */ + +/* The NULL strategy leaves everything up to the controller. This tends to be a + * pessimal but functional choice. + */ +#define CACHE_NULL 0 +/* Prefetch data when reading. We continually attempt to load the next 32 sectors + * into the controller cache. + */ +#define CACHE_PREFETCH 1 +/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors + * into the controller cache. When an I/O is less <= 8K we assume its probably + * not sequential and don't prefetch (default) + */ +#define CACHE_SMARTFETCH 2 +/* Data is written to the cache and then out on to the disk. The I/O must be + * physically on the medium before the write is acknowledged (default without + * NVRAM) + */ +#define CACHE_WRITETHROUGH 17 +/* Data is written to the cache and then out on to the disk. The controller + * is permitted to write back the cache any way it wants. (default if battery + * backed NVRAM is present). It can be useful to set this for swap regardless of + * battery state. + */ +#define CACHE_WRITEBACK 18 +/* Optimise for under powered controllers, especially on RAID1 and RAID0. We + * write large I/O's directly to disk bypassing the cache to avoid the extra + * memory copy hits. Small writes are writeback cached + */ +#define CACHE_SMARTBACK 19 +/* Optimise for under powered controllers, especially on RAID1 and RAID0. We + * write large I/O's directly to disk bypassing the cache to avoid the extra + * memory copy hits. Small writes are writethrough cached. Suitable for devices + * lacking battery backup + */ +#define CACHE_SMARTTHROUGH 20 + +/* + * Ioctl structures + */ + +#define BLKI2OGRSTRAT _IOR('2', 1, int) +#define BLKI2OGWSTRAT _IOR('2', 2, int) +#define BLKI2OSRSTRAT _IOW('2', 3, int) +#define BLKI2OSWSTRAT _IOW('2', 4, int) + +/* + * I2O Function codes + */ + +/* + * Executive Class + */ +#define I2O_CMD_ADAPTER_ASSIGN 0xB3 +#define I2O_CMD_ADAPTER_READ 0xB2 +#define I2O_CMD_ADAPTER_RELEASE 0xB5 +#define I2O_CMD_BIOS_INFO_SET 0xA5 +#define I2O_CMD_BOOT_DEVICE_SET 0xA7 +#define I2O_CMD_CONFIG_VALIDATE 0xBB +#define I2O_CMD_CONN_SETUP 0xCA +#define I2O_CMD_DDM_DESTROY 0xB1 +#define I2O_CMD_DDM_ENABLE 0xD5 +#define I2O_CMD_DDM_QUIESCE 0xC7 +#define I2O_CMD_DDM_RESET 0xD9 +#define I2O_CMD_DDM_SUSPEND 0xAF +#define I2O_CMD_DEVICE_ASSIGN 0xB7 +#define I2O_CMD_DEVICE_RELEASE 0xB9 +#define I2O_CMD_HRT_GET 0xA8 +#define I2O_CMD_ADAPTER_CLEAR 0xBE +#define I2O_CMD_ADAPTER_CONNECT 0xC9 +#define I2O_CMD_ADAPTER_RESET 0xBD +#define I2O_CMD_LCT_NOTIFY 0xA2 +#define I2O_CMD_OUTBOUND_INIT 0xA1 +#define I2O_CMD_PATH_ENABLE 0xD3 +#define I2O_CMD_PATH_QUIESCE 0xC5 +#define I2O_CMD_PATH_RESET 0xD7 +#define I2O_CMD_STATIC_MF_CREATE 0xDD +#define I2O_CMD_STATIC_MF_RELEASE 0xDF +#define I2O_CMD_STATUS_GET 0xA0 +#define I2O_CMD_SW_DOWNLOAD 0xA9 +#define I2O_CMD_SW_UPLOAD 0xAB +#define I2O_CMD_SW_REMOVE 0xAD +#define I2O_CMD_SYS_ENABLE 0xD1 +#define I2O_CMD_SYS_MODIFY 0xC1 +#define I2O_CMD_SYS_QUIESCE 0xC3 +#define I2O_CMD_SYS_TAB_SET 0xA3 + +/* + * Utility Class + */ +#define I2O_CMD_UTIL_NOP 0x00 +#define I2O_CMD_UTIL_ABORT 0x01 +#define I2O_CMD_UTIL_CLAIM 0x09 +#define I2O_CMD_UTIL_RELEASE 0x0B +#define I2O_CMD_UTIL_PARAMS_GET 0x06 +#define I2O_CMD_UTIL_PARAMS_SET 0x05 +#define I2O_CMD_UTIL_EVT_REGISTER 0x13 +#define I2O_CMD_UTIL_EVT_ACK 0x14 +#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 +#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D +#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F +#define I2O_CMD_UTIL_LOCK 0x17 +#define I2O_CMD_UTIL_LOCK_RELEASE 0x19 +#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 + +/* + * SCSI Host Bus Adapter Class + */ +#define I2O_CMD_SCSI_EXEC 0x81 +#define I2O_CMD_SCSI_ABORT 0x83 +#define I2O_CMD_SCSI_BUSRESET 0x27 + +/* + * Bus Adapter Class + */ +#define I2O_CMD_BUS_ADAPTER_RESET 0x85 +#define I2O_CMD_BUS_RESET 0x87 +#define I2O_CMD_BUS_SCAN 0x89 +#define I2O_CMD_BUS_QUIESCE 0x8b + +/* + * Random Block Storage Class + */ +#define I2O_CMD_BLOCK_READ 0x30 +#define I2O_CMD_BLOCK_WRITE 0x31 +#define I2O_CMD_BLOCK_CFLUSH 0x37 +#define I2O_CMD_BLOCK_MLOCK 0x49 +#define I2O_CMD_BLOCK_MUNLOCK 0x4B +#define I2O_CMD_BLOCK_MMOUNT 0x41 +#define I2O_CMD_BLOCK_MEJECT 0x43 +#define I2O_CMD_BLOCK_POWER 0x70 + +#define I2O_CMD_PRIVATE 0xFF + +/* Command status values */ + +#define I2O_CMD_IN_PROGRESS 0x01 +#define I2O_CMD_REJECTED 0x02 +#define I2O_CMD_FAILED 0x03 +#define I2O_CMD_COMPLETED 0x04 + +/* I2O API function return values */ + +#define I2O_RTN_NO_ERROR 0 +#define I2O_RTN_NOT_INIT 1 +#define I2O_RTN_FREE_Q_EMPTY 2 +#define I2O_RTN_TCB_ERROR 3 +#define I2O_RTN_TRANSACTION_ERROR 4 +#define I2O_RTN_ADAPTER_ALREADY_INIT 5 +#define I2O_RTN_MALLOC_ERROR 6 +#define I2O_RTN_ADPTR_NOT_REGISTERED 7 +#define I2O_RTN_MSG_REPLY_TIMEOUT 8 +#define I2O_RTN_NO_STATUS 9 +#define I2O_RTN_NO_FIRM_VER 10 +#define I2O_RTN_NO_LINK_SPEED 11 + +/* Reply message status defines for all messages */ + +#define I2O_REPLY_STATUS_SUCCESS 0x00 +#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 +#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 +#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 +#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 +#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 +#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 +#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 +#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 +#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A +#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B +#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 + +/* Status codes and Error Information for Parameter functions */ + +#define I2O_PARAMS_STATUS_SUCCESS 0x00 +#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 +#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 +#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 +#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 +#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 +#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 +#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 +#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 +#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 +#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A +#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B +#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C +#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D +#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E +#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F +#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 + +/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error + * messages: Table 3-2 Detailed Status Codes.*/ + +#define I2O_DSC_SUCCESS 0x0000 +#define I2O_DSC_BAD_KEY 0x0002 +#define I2O_DSC_TCL_ERROR 0x0003 +#define I2O_DSC_REPLY_BUFFER_FULL 0x0004 +#define I2O_DSC_NO_SUCH_PAGE 0x0005 +#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 +#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 +#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 +#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A +#define I2O_DSC_DEVICE_LOCKED 0x000B +#define I2O_DSC_DEVICE_RESET 0x000C +#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D +#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E +#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F +#define I2O_DSC_INVALID_OFFSET 0x0010 +#define I2O_DSC_INVALID_PARAMETER 0x0011 +#define I2O_DSC_INVALID_REQUEST 0x0012 +#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 +#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 +#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 +#define I2O_DSC_MISSING_PARAMETER 0x0016 +#define I2O_DSC_TIMEOUT 0x0017 +#define I2O_DSC_UNKNOWN_ERROR 0x0018 +#define I2O_DSC_UNKNOWN_FUNCTION 0x0019 +#define I2O_DSC_UNSUPPORTED_VERSION 0x001A +#define I2O_DSC_DEVICE_BUSY 0x001B +#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C + +/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed + Status Codes.*/ + +#define I2O_BSA_DSC_SUCCESS 0x0000 +#define I2O_BSA_DSC_MEDIA_ERROR 0x0001 +#define I2O_BSA_DSC_ACCESS_ERROR 0x0002 +#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003 +#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004 +#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005 +#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006 +#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007 +#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008 +#define I2O_BSA_DSC_BUS_FAILURE 0x0009 +#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A +#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B +#define I2O_BSA_DSC_DEVICE_RESET 0x000C +#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D +#define I2O_BSA_DSC_TIMEOUT 0x000E + +/* FailureStatusCodes, Table 3-3 Message Failure Codes */ + +#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81 +#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82 +#define I2O_FSC_TRANSPORT_CONGESTION 0x83 +#define I2O_FSC_TRANSPORT_FAILURE 0x84 +#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85 +#define I2O_FSC_TRANSPORT_TIME_OUT 0x86 +#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87 +#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88 +#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89 +#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A +#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B +#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C +#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D +#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E +#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F +#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF + +/* Device Claim Types */ +#define I2O_CLAIM_PRIMARY 0x01000000 +#define I2O_CLAIM_MANAGEMENT 0x02000000 +#define I2O_CLAIM_AUTHORIZED 0x03000000 +#define I2O_CLAIM_SECONDARY 0x04000000 + +/* Message header defines for VersionOffset */ +#define I2OVER15 0x0001 +#define I2OVER20 0x0002 + +/* Default is 1.5 */ +#define I2OVERSION I2OVER15 + +#define SGL_OFFSET_0 I2OVERSION +#define SGL_OFFSET_4 (0x0040 | I2OVERSION) +#define SGL_OFFSET_5 (0x0050 | I2OVERSION) +#define SGL_OFFSET_6 (0x0060 | I2OVERSION) +#define SGL_OFFSET_7 (0x0070 | I2OVERSION) +#define SGL_OFFSET_8 (0x0080 | I2OVERSION) +#define SGL_OFFSET_9 (0x0090 | I2OVERSION) +#define SGL_OFFSET_10 (0x00A0 | I2OVERSION) +#define SGL_OFFSET_11 (0x00B0 | I2OVERSION) +#define SGL_OFFSET_12 (0x00C0 | I2OVERSION) +#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) + +/* Transaction Reply Lists (TRL) Control Word structure */ +#define TRL_SINGLE_FIXED_LENGTH 0x00 +#define TRL_SINGLE_VARIABLE_LENGTH 0x40 +#define TRL_MULTIPLE_FIXED_LENGTH 0x80 + + /* msg header defines for MsgFlags */ +#define MSG_STATIC 0x0100 +#define MSG_64BIT_CNTXT 0x0200 +#define MSG_MULTI_TRANS 0x1000 +#define MSG_FAIL 0x2000 +#define MSG_FINAL 0x4000 +#define MSG_REPLY 0x8000 + + /* minimum size msg */ +#define THREE_WORD_MSG_SIZE 0x00030000 +#define FOUR_WORD_MSG_SIZE 0x00040000 +#define FIVE_WORD_MSG_SIZE 0x00050000 +#define SIX_WORD_MSG_SIZE 0x00060000 +#define SEVEN_WORD_MSG_SIZE 0x00070000 +#define EIGHT_WORD_MSG_SIZE 0x00080000 +#define NINE_WORD_MSG_SIZE 0x00090000 +#define TEN_WORD_MSG_SIZE 0x000A0000 +#define ELEVEN_WORD_MSG_SIZE 0x000B0000 +#define I2O_MESSAGE_SIZE(x) ((x)<<16) + +/* special TID assignments */ +#define ADAPTER_TID 0 +#define HOST_TID 1 + +/* outbound queue defines */ +#define I2O_MAX_OUTBOUND_MSG_FRAMES 128 +#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ + +/* inbound queue definitions */ +#define I2O_MSG_INPOOL_MIN 32 +#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ + +#define I2O_POST_WAIT_OK 0 +#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT + +#define I2O_CONTEXT_LIST_MIN_LENGTH 15 +#define I2O_CONTEXT_LIST_USED 0x01 +#define I2O_CONTEXT_LIST_DELETED 0x02 + +/* timeouts */ +#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 +#define I2O_TIMEOUT_MESSAGE_GET 5 +#define I2O_TIMEOUT_RESET 30 +#define I2O_TIMEOUT_STATUS_GET 5 +#define I2O_TIMEOUT_LCT_GET 360 +#define I2O_TIMEOUT_SCSI_SCB_ABORT 240 + +/* retries */ +#define I2O_HRT_GET_TRIES 3 +#define I2O_LCT_GET_TRIES 3 + +/* defines for max_sectors and max_phys_segments */ +#define I2O_MAX_SECTORS 1024 +#define I2O_MAX_SECTORS_LIMITED 128 +#define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS + +/* + * Message structures + */ +struct i2o_message { + union { + struct { + u8 version_offset; + u8 flags; + u16 size; + u32 target_tid:12; + u32 init_tid:12; + u32 function:8; + u32 icntxt; /* initiator context */ + u32 tcntxt; /* transaction context */ + } s; + u32 head[4]; + } u; + /* List follows */ + u32 body[0]; +}; + +/* MFA and I2O message used by mempool */ +struct i2o_msg_mfa { + u32 mfa; /* MFA returned by the controller */ + struct i2o_message msg; /* I2O message */ +}; + +/* + * Each I2O device entity has one of these. There is one per device. + */ +struct i2o_device { + i2o_lct_entry lct_data; /* Device LCT information */ + + struct i2o_controller *iop; /* Controlling IOP */ + struct list_head list; /* node in IOP devices list */ + + struct device device; + + struct mutex lock; /* device lock */ +}; + +/* + * Event structure provided to the event handling function + */ +struct i2o_event { + struct work_struct work; + struct i2o_device *i2o_dev; /* I2O device pointer from which the + event reply was initiated */ + u16 size; /* Size of data in 32-bit words */ + u32 tcntxt; /* Transaction context used at + registration */ + u32 event_indicator; /* Event indicator from reply */ + u32 data[0]; /* Event data from reply */ +}; + +/* + * I2O classes which could be handled by the OSM + */ +struct i2o_class_id { + u16 class_id:12; +}; + +/* + * I2O driver structure for OSMs + */ +struct i2o_driver { + char *name; /* OSM name */ + int context; /* Low 8 bits of the transaction info */ + struct i2o_class_id *classes; /* I2O classes that this OSM handles */ + + /* Message reply handler */ + int (*reply) (struct i2o_controller *, u32, struct i2o_message *); + + /* Event handler */ + work_func_t event; + + struct workqueue_struct *event_queue; /* Event queue */ + + struct device_driver driver; + + /* notification of changes */ + void (*notify_controller_add) (struct i2o_controller *); + void (*notify_controller_remove) (struct i2o_controller *); + void (*notify_device_add) (struct i2o_device *); + void (*notify_device_remove) (struct i2o_device *); + + struct semaphore lock; +}; + +/* + * Contains DMA mapped address information + */ +struct i2o_dma { + void *virt; + dma_addr_t phys; + size_t len; +}; + +/* + * Contains slab cache and mempool information + */ +struct i2o_pool { + char *name; + struct kmem_cache *slab; + mempool_t *mempool; +}; + +/* + * Contains IO mapped address information + */ +struct i2o_io { + void __iomem *virt; + unsigned long phys; + unsigned long len; +}; + +/* + * Context queue entry, used for 32-bit context on 64-bit systems + */ +struct i2o_context_list_element { + struct list_head list; + u32 context; + void *ptr; + unsigned long timestamp; +}; + +/* + * Each I2O controller has one of these objects + */ +struct i2o_controller { + char name[16]; + int unit; + int type; + + struct pci_dev *pdev; /* PCI device */ + + unsigned int promise:1; /* Promise controller */ + unsigned int adaptec:1; /* DPT / Adaptec controller */ + unsigned int raptor:1; /* split bar */ + unsigned int no_quiesce:1; /* dont quiesce before reset */ + unsigned int short_req:1; /* use small block sizes */ + unsigned int limit_sectors:1; /* limit number of sectors / request */ + unsigned int pae_support:1; /* controller has 64-bit SGL support */ + + struct list_head devices; /* list of I2O devices */ + struct list_head list; /* Controller list */ + + void __iomem *in_port; /* Inbout port address */ + void __iomem *out_port; /* Outbound port address */ + void __iomem *irq_status; /* Interrupt status register address */ + void __iomem *irq_mask; /* Interrupt mask register address */ + + struct i2o_dma status; /* IOP status block */ + + struct i2o_dma hrt; /* HW Resource Table */ + i2o_lct *lct; /* Logical Config Table */ + struct i2o_dma dlct; /* Temp LCT */ + struct mutex lct_lock; /* Lock for LCT updates */ + struct i2o_dma status_block; /* IOP status block */ + + struct i2o_io base; /* controller messaging unit */ + struct i2o_io in_queue; /* inbound message queue Host->IOP */ + struct i2o_dma out_queue; /* outbound message queue IOP->Host */ + + struct i2o_pool in_msg; /* mempool for inbound messages */ + + unsigned int battery:1; /* Has a battery backup */ + unsigned int io_alloc:1; /* An I/O resource was allocated */ + unsigned int mem_alloc:1; /* A memory resource was allocated */ + + struct resource io_resource; /* I/O resource allocated to the IOP */ + struct resource mem_resource; /* Mem resource allocated to the IOP */ + + struct device device; + struct i2o_device *exec; /* Executive */ +#if BITS_PER_LONG == 64 + spinlock_t context_list_lock; /* lock for context_list */ + atomic_t context_list_counter; /* needed for unique contexts */ + struct list_head context_list; /* list of context id's + and pointers */ +#endif + spinlock_t lock; /* lock for controller + configuration */ + void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ +}; + +/* + * I2O System table entry + * + * The system table contains information about all the IOPs in the + * system. It is sent to all IOPs so that they can create peer2peer + * connections between them. + */ +struct i2o_sys_tbl_entry { + u16 org_id; + u16 reserved1; + u32 iop_id:12; + u32 reserved2:20; + u16 seg_num:12; + u16 i2o_version:4; + u8 iop_state; + u8 msg_type; + u16 frame_size; + u16 reserved3; + u32 last_changed; + u32 iop_capabilities; + u32 inbound_low; + u32 inbound_high; +}; + +struct i2o_sys_tbl { + u8 num_entries; + u8 version; + u16 reserved1; + u32 change_ind; + u32 reserved2; + u32 reserved3; + struct i2o_sys_tbl_entry iops[0]; +}; + +extern struct list_head i2o_controllers; + +/* Message functions */ +extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); +extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, + unsigned long, struct i2o_dma *); + +/* IOP functions */ +extern int i2o_status_get(struct i2o_controller *); + +extern int i2o_event_register(struct i2o_device *, struct i2o_driver *, int, + u32); +extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16); +extern struct i2o_controller *i2o_find_iop(int); + +/* Functions needed for handling 64-bit pointers in 32-bit context */ +#if BITS_PER_LONG == 64 +extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *); +extern void *i2o_cntxt_list_get(struct i2o_controller *, u32); +extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *); +extern u32 i2o_cntxt_list_get_ptr(struct i2o_controller *, void *); + +static inline u32 i2o_ptr_low(void *ptr) +{ + return (u32) (u64) ptr; +}; + +static inline u32 i2o_ptr_high(void *ptr) +{ + return (u32) ((u64) ptr >> 32); +}; + +static inline u32 i2o_dma_low(dma_addr_t dma_addr) +{ + return (u32) (u64) dma_addr; +}; + +static inline u32 i2o_dma_high(dma_addr_t dma_addr) +{ + return (u32) ((u64) dma_addr >> 32); +}; +#else +static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) +{ + return (u32) ptr; +}; + +static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) +{ + return (void *)context; +}; + +static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr) +{ + return (u32) ptr; +}; + +static inline u32 i2o_cntxt_list_get_ptr(struct i2o_controller *c, void *ptr) +{ + return (u32) ptr; +}; + +static inline u32 i2o_ptr_low(void *ptr) +{ + return (u32) ptr; +}; + +static inline u32 i2o_ptr_high(void *ptr) +{ + return 0; +}; + +static inline u32 i2o_dma_low(dma_addr_t dma_addr) +{ + return (u32) dma_addr; +}; + +static inline u32 i2o_dma_high(dma_addr_t dma_addr) +{ + return 0; +}; +#endif + +extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size); +extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, + size_t size, + enum dma_data_direction direction, + u32 ** sg_ptr); +extern int i2o_dma_map_sg(struct i2o_controller *c, + struct scatterlist *sg, int sg_count, + enum dma_data_direction direction, + u32 ** sg_ptr); +extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len); +extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr); +extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, + size_t len); +extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name, + size_t size, int min_nr); +extern void i2o_pool_free(struct i2o_pool *pool); +/* I2O driver (OSM) functions */ +extern int i2o_driver_register(struct i2o_driver *); +extern void i2o_driver_unregister(struct i2o_driver *); + +/** + * i2o_driver_notify_controller_add - Send notification of added controller + * @drv: I2O driver + * @c: I2O controller + * + * Send notification of added controller to a single registered driver. + */ +static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv, + struct i2o_controller *c) +{ + if (drv->notify_controller_add) + drv->notify_controller_add(c); +}; + +/** + * i2o_driver_notify_controller_remove - Send notification of removed controller + * @drv: I2O driver + * @c: I2O controller + * + * Send notification of removed controller to a single registered driver. + */ +static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv, + struct i2o_controller *c) +{ + if (drv->notify_controller_remove) + drv->notify_controller_remove(c); +}; + +/** + * i2o_driver_notify_device_add - Send notification of added device + * @drv: I2O driver + * @i2o_dev: the added i2o_device + * + * Send notification of added device to a single registered driver. + */ +static inline void i2o_driver_notify_device_add(struct i2o_driver *drv, + struct i2o_device *i2o_dev) +{ + if (drv->notify_device_add) + drv->notify_device_add(i2o_dev); +}; + +/** + * i2o_driver_notify_device_remove - Send notification of removed device + * @drv: I2O driver + * @i2o_dev: the added i2o_device + * + * Send notification of removed device to a single registered driver. + */ +static inline void i2o_driver_notify_device_remove(struct i2o_driver *drv, + struct i2o_device *i2o_dev) +{ + if (drv->notify_device_remove) + drv->notify_device_remove(i2o_dev); +}; + +extern void i2o_driver_notify_controller_add_all(struct i2o_controller *); +extern void i2o_driver_notify_controller_remove_all(struct i2o_controller *); +extern void i2o_driver_notify_device_add_all(struct i2o_device *); +extern void i2o_driver_notify_device_remove_all(struct i2o_device *); + +/* I2O device functions */ +extern int i2o_device_claim(struct i2o_device *); +extern int i2o_device_claim_release(struct i2o_device *); + +/* Exec OSM functions */ +extern int i2o_exec_lct_get(struct i2o_controller *); + +/* device / driver / kobject conversion functions */ +#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) +#define to_i2o_device(dev) container_of(dev, struct i2o_device, device) +#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) + +/** + * i2o_out_to_virt - Turn an I2O message to a virtual address + * @c: controller + * @m: message engine value + * + * Turn a receive message from an I2O controller bus address into + * a Linux virtual address. The shared page frame is a linear block + * so we simply have to shift the offset. This function does not + * work for sender side messages as they are ioremap objects + * provided by the I2O controller. + */ +static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, + u32 m) +{ + BUG_ON(m < c->out_queue.phys + || m >= c->out_queue.phys + c->out_queue.len); + + return c->out_queue.virt + (m - c->out_queue.phys); +}; + +/** + * i2o_msg_in_to_virt - Turn an I2O message to a virtual address + * @c: controller + * @m: message engine value + * + * Turn a send message from an I2O controller bus address into + * a Linux virtual address. The shared page frame is a linear block + * so we simply have to shift the offset. This function does not + * work for receive side messages as they are kmalloc objects + * in a different pool. + */ +static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct + i2o_controller *c, + u32 m) +{ + return c->in_queue.virt + m; +}; + +/** + * i2o_msg_get - obtain an I2O message from the IOP + * @c: I2O controller + * + * This function tries to get a message frame. If no message frame is + * available do not wait until one is available (see also i2o_msg_get_wait). + * The returned pointer to the message frame is not in I/O memory, it is + * allocated from a mempool. But because a MFA is allocated from the + * controller too it is guaranteed that i2o_msg_post() will never fail. + * + * On a success a pointer to the message frame is returned. If the message + * queue is empty -EBUSY is returned and if no memory is available -ENOMEM + * is returned. + */ +static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c) +{ + struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC); + if (!mmsg) + return ERR_PTR(-ENOMEM); + + mmsg->mfa = readl(c->in_port); + if (unlikely(mmsg->mfa >= c->in_queue.len)) { + u32 mfa = mmsg->mfa; + + mempool_free(mmsg, c->in_msg.mempool); + + if (mfa == I2O_QUEUE_EMPTY) + return ERR_PTR(-EBUSY); + return ERR_PTR(-EFAULT); + } + + return &mmsg->msg; +}; + +/** + * i2o_msg_post - Post I2O message to I2O controller + * @c: I2O controller to which the message should be send + * @msg: message returned by i2o_msg_get() + * + * Post the message to the I2O controller and return immediately. + */ +static inline void i2o_msg_post(struct i2o_controller *c, + struct i2o_message *msg) +{ + struct i2o_msg_mfa *mmsg; + + mmsg = container_of(msg, struct i2o_msg_mfa, msg); + memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg, + (le32_to_cpu(msg->u.head[0]) >> 16) << 2); + writel(mmsg->mfa, c->in_port); + mempool_free(mmsg, c->in_msg.mempool); +}; + +/** + * i2o_msg_post_wait - Post and wait a message and wait until return + * @c: controller + * @msg: message to post + * @timeout: time in seconds to wait + * + * This API allows an OSM to post a message and then be told whether or + * not the system received a successful reply. If the message times out + * then the value '-ETIMEDOUT' is returned. + * + * Returns 0 on success or negative error code on failure. + */ +static inline int i2o_msg_post_wait(struct i2o_controller *c, + struct i2o_message *msg, + unsigned long timeout) +{ + return i2o_msg_post_wait_mem(c, msg, timeout, NULL); +}; + +/** + * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller + * @c: I2O controller from which the MFA was fetched + * @mfa: MFA which should be returned + * + * This function must be used for preserved messages, because i2o_msg_nop() + * also returns the allocated memory back to the msg_pool mempool. + */ +static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa) +{ + struct i2o_message __iomem *msg; + u32 nop[3] = { + THREE_WORD_MSG_SIZE | SGL_OFFSET_0, + I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, + 0x00000000 + }; + + msg = i2o_msg_in_to_virt(c, mfa); + memcpy_toio(msg, nop, sizeof(nop)); + writel(mfa, c->in_port); +}; + +/** + * i2o_msg_nop - Returns a message which is not used + * @c: I2O controller from which the message was created + * @msg: message which should be returned + * + * If you fetch a message via i2o_msg_get, and can't use it, you must + * return the message with this function. Otherwise the MFA is lost as well + * as the allocated memory from the mempool. + */ +static inline void i2o_msg_nop(struct i2o_controller *c, + struct i2o_message *msg) +{ + struct i2o_msg_mfa *mmsg; + mmsg = container_of(msg, struct i2o_msg_mfa, msg); + + i2o_msg_nop_mfa(c, mmsg->mfa); + mempool_free(mmsg, c->in_msg.mempool); +}; + +/** + * i2o_flush_reply - Flush reply from I2O controller + * @c: I2O controller + * @m: the message identifier + * + * The I2O controller must be informed that the reply message is not needed + * anymore. If you forget to flush the reply, the message frame can't be + * used by the controller anymore and is therefore lost. + */ +static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) +{ + writel(m, c->out_port); +}; + +/* + * Endian handling wrapped into the macro - keeps the core code + * cleaner. + */ + +#define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem) + +extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int); +extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int, + void *, int); + +/* debugging and troubleshooting/diagnostic helpers. */ +#define osm_printk(level, format, arg...) \ + printk(level "%s: " format, OSM_NAME , ## arg) + +#ifdef DEBUG +#define osm_debug(format, arg...) \ + osm_printk(KERN_DEBUG, format , ## arg) +#else +#define osm_debug(format, arg...) \ + do { } while (0) +#endif + +#define osm_err(format, arg...) \ + osm_printk(KERN_ERR, format , ## arg) +#define osm_info(format, arg...) \ + osm_printk(KERN_INFO, format , ## arg) +#define osm_warn(format, arg...) \ + osm_printk(KERN_WARNING, format , ## arg) + +/* debugging functions */ +extern void i2o_report_status(const char *, const char *, struct i2o_message *); +extern void i2o_dump_message(struct i2o_message *); +extern void i2o_dump_hrt(struct i2o_controller *c); +extern void i2o_debug_state(struct i2o_controller *c); + +#endif /* _I2O_H */ diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h new file mode 100644 index 00000000..1587b7de --- /dev/null +++ b/include/linux/i7300_idle.h @@ -0,0 +1,83 @@ + +#ifndef I7300_IDLE_H +#define I7300_IDLE_H + +#include <linux/pci.h> + +/* + * I/O AT controls (PCI bus 0 device 8 function 0) + * DIMM controls (PCI bus 0 device 16 function 1) + */ +#define IOAT_BUS 0 +#define IOAT_DEVFN PCI_DEVFN(8, 0) +#define MEMCTL_BUS 0 +#define MEMCTL_DEVFN PCI_DEVFN(16, 1) + +struct fbd_ioat { + unsigned int vendor; + unsigned int ioat_dev; + unsigned int enabled; +}; + +/* + * The i5000 chip-set has the same hooks as the i7300 + * but it is not enabled by default and must be manually + * manually enabled with "forceload=1" because it is + * only lightly validated. + */ + +static const struct fbd_ioat fbd_ioat_list[] = { + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, + {0, 0} +}; + +/* table of devices that work with this driver */ +static const struct pci_device_id pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, + { } /* Terminating entry */ +}; + +/* Check for known platforms with I/O-AT */ +static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, + struct pci_dev **ioat_dev, + int enable_all) +{ + int i; + struct pci_dev *memdev, *dmadev; + + memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN); + if (!memdev) + return -ENODEV; + + for (i = 0; pci_tbl[i].vendor != 0; i++) { + if (memdev->vendor == pci_tbl[i].vendor && + memdev->device == pci_tbl[i].device) { + break; + } + } + if (pci_tbl[i].vendor == 0) + return -ENODEV; + + dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN); + if (!dmadev) + return -ENODEV; + + for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { + if (dmadev->vendor == fbd_ioat_list[i].vendor && + dmadev->device == fbd_ioat_list[i].ioat_dev) { + if (!(fbd_ioat_list[i].enabled || enable_all)) + continue; + if (fbd_dev) + *fbd_dev = memdev; + if (ioat_dev) + *ioat_dev = dmadev; + + return 0; + } + } + return -ENODEV; +} + +#endif diff --git a/include/linux/i8042.h b/include/linux/i8042.h new file mode 100644 index 00000000..a986ff58 --- /dev/null +++ b/include/linux/i8042.h @@ -0,0 +1,81 @@ +#ifndef _LINUX_I8042_H +#define _LINUX_I8042_H + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/types.h> + +/* + * Standard commands. + */ + +#define I8042_CMD_CTL_RCTR 0x0120 +#define I8042_CMD_CTL_WCTR 0x1060 +#define I8042_CMD_CTL_TEST 0x01aa + +#define I8042_CMD_KBD_DISABLE 0x00ad +#define I8042_CMD_KBD_ENABLE 0x00ae +#define I8042_CMD_KBD_TEST 0x01ab +#define I8042_CMD_KBD_LOOP 0x11d2 + +#define I8042_CMD_AUX_DISABLE 0x00a7 +#define I8042_CMD_AUX_ENABLE 0x00a8 +#define I8042_CMD_AUX_TEST 0x01a9 +#define I8042_CMD_AUX_SEND 0x10d4 +#define I8042_CMD_AUX_LOOP 0x11d3 + +#define I8042_CMD_MUX_PFX 0x0090 +#define I8042_CMD_MUX_SEND 0x1090 + +struct serio; + +#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE) + +void i8042_lock_chip(void); +void i8042_unlock_chip(void); +int i8042_command(unsigned char *param, int command); +bool i8042_check_port_owner(const struct serio *); +int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); +int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); + +#else + +static inline void i8042_lock_chip(void) +{ +} + +static inline void i8042_unlock_chip(void) +{ +} + +static inline int i8042_command(unsigned char *param, int command) +{ + return -ENODEV; +} + +static inline bool i8042_check_port_owner(const struct serio *serio) +{ + return false; +} + +static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + return -ENODEV; +} + +static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) +{ + return -ENODEV; +} + +#endif + +#endif diff --git a/include/linux/i8253.h b/include/linux/i8253.h new file mode 100644 index 00000000..e6bb36a9 --- /dev/null +++ b/include/linux/i8253.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Machine specific IO port address definition for generic. + * Written by Osamu Tomita <tomita@cinet.co.jp> + */ +#ifndef __LINUX_I8253_H +#define __LINUX_I8253_H + +#include <linux/param.h> +#include <linux/spinlock.h> +#include <linux/timex.h> + +/* i8253A PIT registers */ +#define PIT_MODE 0x43 +#define PIT_CH0 0x40 +#define PIT_CH2 0x42 + +#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) + +extern raw_spinlock_t i8253_lock; +extern struct clock_event_device i8253_clockevent; +extern void clockevent_i8253_init(bool oneshot); + +extern void setup_pit_timer(void); + +#endif /* __LINUX_I8253_H */ diff --git a/include/linux/i82593.h b/include/linux/i82593.h new file mode 100644 index 00000000..afac5c7a --- /dev/null +++ b/include/linux/i82593.h @@ -0,0 +1,229 @@ +/* + * Definitions for Intel 82593 CSMA/CD Core LAN Controller + * The definitions are taken from the 1992 users manual with Intel + * order number 297125-001. + * + * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp + * + * Copyright 1994, Anders Klemets <klemets@it.kth.se> + * + * HISTORY + * i82593.h,v + * Revision 1.4 2005/11/4 09:15:00 baroniunas + * Modified copyright with permission of author as follows: + * + * "If I82539.H is the only file with my copyright statement + * that is included in the Source Forge project, then you have + * my approval to change the copyright statement to be a GPL + * license, in the way you proposed on October 10." + * + * Revision 1.1 1996/07/17 15:23:12 root + * Initial revision + * + * Revision 1.3 1995/04/05 15:13:58 adj + * Initial alpha release + * + * Revision 1.2 1994/06/16 23:57:31 klemets + * Mirrored all the fields in the configuration block. + * + * Revision 1.1 1994/06/02 20:25:34 klemets + * Initial revision + * + * + */ +#ifndef _I82593_H +#define _I82593_H + +/* Intel 82593 CSMA/CD Core LAN Controller */ + +/* Port 0 Command Register definitions */ + +/* Execution operations */ +#define OP0_NOP 0 /* CHNL = 0 */ +#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */ +#define OP0_IA_SETUP 1 +#define OP0_CONFIGURE 2 +#define OP0_MC_SETUP 3 +#define OP0_TRANSMIT 4 +#define OP0_TDR 5 +#define OP0_DUMP 6 +#define OP0_DIAGNOSE 7 +#define OP0_TRANSMIT_NO_CRC 9 +#define OP0_RETRANSMIT 12 +#define OP0_ABORT 13 +/* Reception operations */ +#define OP0_RCV_ENABLE 8 +#define OP0_RCV_DISABLE 10 +#define OP0_STOP_RCV 11 +/* Status pointer control operations */ +#define OP0_FIX_PTR 15 /* CHNL = 1 */ +#define OP0_RLS_PTR 15 /* CHNL = 0 */ +#define OP0_RESET 14 + +#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */ +#define CR0_STATUS_0 0x00 +#define CR0_STATUS_1 0x20 +#define CR0_STATUS_2 0x40 +#define CR0_STATUS_3 0x60 +#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */ + +/* Port 0 Status Register definitions */ + +#define SR0_NO_RESULT 0 /* dummy */ +#define SR0_EVENT_MASK 0x0f +#define SR0_IA_SETUP_DONE 1 +#define SR0_CONFIGURE_DONE 2 +#define SR0_MC_SETUP_DONE 3 +#define SR0_TRANSMIT_DONE 4 +#define SR0_TDR_DONE 5 +#define SR0_DUMP_DONE 6 +#define SR0_DIAGNOSE_PASSED 7 +#define SR0_TRANSMIT_NO_CRC_DONE 9 +#define SR0_RETRANSMIT_DONE 12 +#define SR0_EXECUTION_ABORTED 13 +#define SR0_END_OF_FRAME 8 +#define SR0_RECEPTION_ABORTED 10 +#define SR0_DIAGNOSE_FAILED 15 +#define SR0_STOP_REG_HIT 11 + +#define SR0_CHNL (1 << 4) +#define SR0_EXECUTION (1 << 5) +#define SR0_RECEPTION (1 << 6) +#define SR0_INTERRUPT (1 << 7) +#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION) + +#define SR3_EXEC_STATE_MASK 0x03 +#define SR3_EXEC_IDLE 0 +#define SR3_TX_ABORT_IN_PROGRESS 1 +#define SR3_EXEC_ACTIVE 2 +#define SR3_ABORT_IN_PROGRESS 3 +#define SR3_EXEC_CHNL (1 << 2) +#define SR3_STP_ON_NO_RSRC (1 << 3) +#define SR3_RCVING_NO_RSRC (1 << 4) +#define SR3_RCV_STATE_MASK 0x60 +#define SR3_RCV_IDLE 0x00 +#define SR3_RCV_READY 0x20 +#define SR3_RCV_ACTIVE 0x40 +#define SR3_RCV_STOP_IN_PROG 0x60 +#define SR3_RCV_CHNL (1 << 7) + +/* Port 1 Command Register definitions */ + +#define OP1_NOP 0 +#define OP1_SWIT_TO_PORT_0 1 +#define OP1_INT_DISABLE 2 +#define OP1_INT_ENABLE 3 +#define OP1_SET_TS 5 +#define OP1_RST_TS 7 +#define OP1_POWER_DOWN 8 +#define OP1_RESET_RING_MNGMT 11 +#define OP1_RESET 14 +#define OP1_SEL_RST 15 + +#define CR1_STATUS_4 0x00 +#define CR1_STATUS_5 0x20 +#define CR1_STATUS_6 0x40 +#define CR1_STOP_REG_UPDATE (1 << 7) + +/* Receive frame status bits */ + +#define RX_RCLD (1 << 0) +#define RX_IA_MATCH (1 << 1) +#define RX_NO_AD_MATCH (1 << 2) +#define RX_NO_SFD (1 << 3) +#define RX_SRT_FRM (1 << 7) +#define RX_OVRRUN (1 << 8) +#define RX_ALG_ERR (1 << 10) +#define RX_CRC_ERR (1 << 11) +#define RX_LEN_ERR (1 << 12) +#define RX_RCV_OK (1 << 13) +#define RX_TYP_LEN (1 << 15) + +/* Transmit status bits */ + +#define TX_NCOL_MASK 0x0f +#define TX_FRTL (1 << 4) +#define TX_MAX_COL (1 << 5) +#define TX_HRT_BEAT (1 << 6) +#define TX_DEFER (1 << 7) +#define TX_UND_RUN (1 << 8) +#define TX_LOST_CTS (1 << 9) +#define TX_LOST_CRS (1 << 10) +#define TX_LTCOL (1 << 11) +#define TX_OK (1 << 13) +#define TX_COLL (1 << 15) + +struct i82593_conf_block { + u_char fifo_limit : 4, + forgnesi : 1, + fifo_32 : 1, + d6mod : 1, + throttle_enb : 1; + u_char throttle : 6, + cntrxint : 1, + contin : 1; + u_char addr_len : 3, + acloc : 1, + preamb_len : 2, + loopback : 2; + u_char lin_prio : 3, + tbofstop : 1, + exp_prio : 3, + bof_met : 1; + u_char : 4, + ifrm_spc : 4; + u_char : 5, + slottim_low : 3; + u_char slottim_hi : 3, + : 1, + max_retr : 4; + u_char prmisc : 1, + bc_dis : 1, + : 1, + crs_1 : 1, + nocrc_ins : 1, + crc_1632 : 1, + : 1, + crs_cdt : 1; + u_char cs_filter : 3, + crs_src : 1, + cd_filter : 3, + : 1; + u_char : 2, + min_fr_len : 6; + u_char lng_typ : 1, + lng_fld : 1, + rxcrc_xf : 1, + artx : 1, + sarec : 1, + tx_jabber : 1, /* why is this called max_len in the manual? */ + hash_1 : 1, + lbpkpol : 1; + u_char : 6, + fdx : 1, + : 1; + u_char dummy_6 : 6, /* supposed to be ones */ + mult_ia : 1, + dis_bof : 1; + u_char dummy_1 : 1, /* supposed to be one */ + tx_ifs_retrig : 2, + mc_all : 1, + rcv_mon : 2, + frag_acpt : 1, + tstrttrs : 1; + u_char fretx : 1, + runt_eop : 1, + hw_sw_pin : 1, + big_endn : 1, + syncrqs : 1, + sttlen : 1, + tx_eop : 1, + rx_eop : 1; + u_char rbuf_size : 5, + rcvstop : 1, + : 2; +}; + +#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */ + +#endif /* _I82593_H */ diff --git a/include/linux/i8k.h b/include/linux/i8k.h new file mode 100644 index 00000000..1c45ba50 --- /dev/null +++ b/include/linux/i8k.h @@ -0,0 +1,46 @@ +/* + * i8k.h -- Linux driver for accessing the SMM BIOS on Dell laptops + * + * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _LINUX_I8K_H +#define _LINUX_I8K_H + +#define I8K_PROC "/proc/i8k" +#define I8K_PROC_FMT "1.0" + +#define I8K_BIOS_VERSION _IOR ('i', 0x80, int) /* broken: meant 4 bytes */ +#define I8K_MACHINE_ID _IOR ('i', 0x81, int) /* broken: meant 16 bytes */ +#define I8K_POWER_STATUS _IOR ('i', 0x82, size_t) +#define I8K_FN_STATUS _IOR ('i', 0x83, size_t) +#define I8K_GET_TEMP _IOR ('i', 0x84, size_t) +#define I8K_GET_SPEED _IOWR('i', 0x85, size_t) +#define I8K_GET_FAN _IOWR('i', 0x86, size_t) +#define I8K_SET_FAN _IOWR('i', 0x87, size_t) + +#define I8K_FAN_LEFT 1 +#define I8K_FAN_RIGHT 0 +#define I8K_FAN_OFF 0 +#define I8K_FAN_LOW 1 +#define I8K_FAN_HIGH 2 +#define I8K_FAN_MAX I8K_FAN_HIGH + +#define I8K_VOL_UP 1 +#define I8K_VOL_DOWN 2 +#define I8K_VOL_MUTE 4 + +#define I8K_AC 1 +#define I8K_BATTERY 0 + +#endif diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h new file mode 100644 index 00000000..06695b74 --- /dev/null +++ b/include/linux/ibmtr.h @@ -0,0 +1,373 @@ +#ifndef __LINUX_IBMTR_H__ +#define __LINUX_IBMTR_H__ + +/* Definitions for an IBM Token Ring card. */ +/* This file is distributed under the GNU GPL */ + +/* ported to the Alpha architecture 02/20/96 (just used the HZ macro) */ + +#define TR_RETRY_INTERVAL (30*HZ) /* 500 on PC = 5 s */ +#define TR_RST_TIME (msecs_to_jiffies(50)) /* 5 on PC = 50 ms */ +#define TR_BUSY_INTERVAL (msecs_to_jiffies(200)) /* 5 on PC = 200 ms */ +#define TR_SPIN_INTERVAL (3*HZ) /* 3 seconds before init timeout */ + +#define TR_ISA 1 +#define TR_MCA 2 +#define TR_ISAPNP 3 +#define NOTOK 0 + +#define IBMTR_SHARED_RAM_SIZE 0x10000 +#define IBMTR_IO_EXTENT 4 +#define IBMTR_MAX_ADAPTERS 4 + +#define CHANNEL_ID 0X1F30 +#define AIP 0X1F00 +#define AIPADAPTYPE 0X1FA0 +#define AIPDATARATE 0X1FA2 +#define AIPEARLYTOKEN 0X1FA4 +#define AIPAVAILSHRAM 0X1FA6 +#define AIPSHRAMPAGE 0X1FA8 +#define AIP4MBDHB 0X1FAA +#define AIP16MBDHB 0X1FAC +#define AIPFID 0X1FBA + +#define ADAPTRESET 0x1 /* Control Adapter reset (add to base) */ +#define ADAPTRESETREL 0x2 /* Release Adapter from reset ( """) */ +#define ADAPTINTREL 0x3 /* Adapter interrupt release */ + +#define GLOBAL_INT_ENABLE 0x02f0 + +/* MMIO bits 0-4 select register */ +#define RRR_EVEN 0x00 /* Shared RAM relocation registers - even and odd */ +/* Used to set the starting address of shared RAM */ +/* Bits 1 through 7 of this register map to bits 13 through 19 of the shared + RAM address.*/ +/* ie: 0x02 sets RAM address to ...ato! issy su wazzoo !! GODZILLA!!! */ +#define RRR_ODD 0x01 +/* Bits 2 and 3 of this register can be read to determine shared RAM size */ +/* 00 for 8k, 01 for 16k, 10 for 32k, 11 for 64k */ +#define WRBR_EVEN 0x02 /* Write region base registers - even and odd */ +#define WRBR_ODD 0x03 +#define WWOR_EVEN 0x04 /* Write window open registers - even and odd */ +#define WWOR_ODD 0x05 +#define WWCR_EVEN 0x06 /* Write window close registers - even and odd */ +#define WWCR_ODD 0x07 + +/* Interrupt status registers - PC system - even and odd */ +#define ISRP_EVEN 0x08 + +#define TCR_INT 0x10 /* Bit 4 - Timer interrupt. The TVR_EVEN timer has + expired. */ +#define ERR_INT 0x08 /* Bit 3 - Error interrupt. The adapter has had an + internal error. */ +#define ACCESS_INT 0x04 /* Bit 2 - Access interrupt. You have attempted to + write to an invalid area of shared RAM + or an invalid register within the MMIO. */ +/* In addition, the following bits within ISRP_EVEN can be turned on or off */ +/* by you to control the interrupt processing: */ +#define INT_ENABLE 0x40 /* Bit 6 - Interrupt enable. If 0, no interrupts will + occur. If 1, interrupts will occur normally. + Normally set to 1. */ +/* Bit 0 - Primary or alternate adapter. Set to zero if this adapter is the + primary adapter, 1 if this adapter is the alternate adapter. */ + + +#define ISRP_ODD 0x09 + +#define ADAP_CHK_INT 0x40 /* Bit 6 - Adapter check. the adapter has + encountered a serious problem and has closed + itself. Whoa. */ +#define SRB_RESP_INT 0x20 /* Bit 5 - SRB response. The adapter has accepted + an SRB request and set the return code within + the SRB. */ +#define ASB_FREE_INT 0x10 /* Bit 4 - ASB free. The adapter has read the ASB + and this area can be safely reused. This interrupt + is only used if your application has set the ASB + free request bit in ISRA_ODD or if an error was + detected in your response. */ +#define ARB_CMD_INT 0x08 /* Bit 3 - ARB command. The adapter has given you a + command for action. The command is located in the + ARB area of shared memory. */ +#define SSB_RESP_INT 0x04 /* Bit 2 - SSB response. The adapter has posted a + response to your SRB (the response is located in + the SSB area of shared memory). */ +/* Bit 1 - Bridge frame forward complete. */ + + + +#define ISRA_EVEN 0x0A /*Interrupt status registers - adapter - even and odd */ +/* Bit 7 - Internal parity error (on adapter's internal bus) */ +/* Bit 6 - Timer interrupt pending */ +/* Bit 5 - Access interrupt (attempt by adapter to access illegal address) */ +/* Bit 4 - Adapter microcode problem (microcode dead-man timer expired) */ +/* Bit 3 - Adapter processor check status */ +/* Bit 2 - Reserved */ +/* Bit 1 - Adapter hardware interrupt mask (prevents internal interrupts) */ +/* Bit 0 - Adapter software interrupt mask (prevents internal software ints) */ + +#define ISRA_ODD 0x0B +#define CMD_IN_SRB 0x20 /* Bit 5 - Indicates that you have placed a new + command in the SRB and are ready for the adapter to + process the command. */ +#define RESP_IN_ASB 0x10 /* Bit 4 - Indicates that you have placed a response + (an ASB) in the shared RAM which is available for + the adapter's use. */ +/* Bit 3 - Indicates that you are ready to put an SRB in the shared RAM, but + that a previous command is still pending. The adapter will then + interrupt you when the previous command is completed */ +/* Bit 2 - Indicates that you are ready to put an ASB in the shared RAM, but + that a previous ASB is still pending. The adapter will then interrupt + you when the previous ASB is copied. */ +#define ARB_FREE 0x2 +#define SSB_FREE 0x1 + +#define TCR_EVEN 0x0C /* Timer control registers - even and odd */ +#define TCR_ODD 0x0D +#define TVR_EVEN 0x0E /* Timer value registers - even and odd */ +#define TVR_ODD 0x0F +#define SRPR_EVEN 0x18 /* Shared RAM paging registers - even and odd */ +#define SRPR_ENABLE_PAGING 0xc0 +#define SRPR_ODD 0x19 /* Not used. */ +#define TOKREAD 0x60 +#define TOKOR 0x40 +#define TOKAND 0x20 +#define TOKWRITE 0x00 + +/* MMIO bits 5-6 select operation */ +/* 00 is used to write to a register */ +/* 01 is used to bitwise AND a byte with a register */ +/* 10 is used to bitwise OR a byte with a register */ +/* 11 is used to read from a register */ + +/* MMIO bits 7-8 select area of interest.. see below */ +/* 00 selects attachment control area. */ +/* 01 is reserved. */ +/* 10 selects adapter identification area A containing the adapter encoded + address. */ +/* 11 selects the adapter identification area B containing test patterns. */ + +#define PCCHANNELID 5049434F3631313039393020 +#define MCCHANNELID 4D4152533633583435313820 + +#define ACA_OFFSET 0x1e00 +#define ACA_SET 0x40 +#define ACA_RESET 0x20 +#define ACA_RW 0x00 + +#ifdef ENABLE_PAGING +#define SET_PAGE(x) (writeb((x), ti->mmio + ACA_OFFSET+ ACA_RW + SRPR_EVEN)) +#else +#define SET_PAGE(x) +#endif + +/* do_tok_int possible values */ +#define FIRST_INT 1 +#define NOT_FIRST 2 + +typedef enum { CLOSED, OPEN } open_state; +//staic const char *printstate[] = { "CLOSED","OPEN"}; + +struct tok_info { + unsigned char irq; + void __iomem *mmio; + unsigned char hw_address[32]; + unsigned char adapter_type; + unsigned char data_rate; + unsigned char token_release; + unsigned char avail_shared_ram; + unsigned char shared_ram_paging; + unsigned char turbo; + unsigned short dhb_size4mb; + unsigned short rbuf_len4; + unsigned short rbuf_cnt4; + unsigned short maxmtu4; + unsigned short dhb_size16mb; + unsigned short rbuf_len16; + unsigned short rbuf_cnt16; + unsigned short maxmtu16; + /* Additions by David Morris */ + unsigned char do_tok_int; + wait_queue_head_t wait_for_reset; + unsigned char sram_base; + /* Additions by Peter De Schrijver */ + unsigned char page_mask; /* mask to select RAM page to Map*/ + unsigned char mapped_ram_size; /* size of RAM page */ + __u32 sram_phys; /* Shared memory base address */ + void __iomem *sram_virt; /* Shared memory base address */ + void __iomem *init_srb; /* Initial System Request Block address */ + void __iomem *srb; /* System Request Block address */ + void __iomem *ssb; /* System Status Block address */ + void __iomem *arb; /* Adapter Request Block address */ + void __iomem *asb; /* Adapter Status Block address */ + __u8 init_srb_page; + __u8 srb_page; + __u8 ssb_page; + __u8 arb_page; + __u8 asb_page; + unsigned short exsap_station_id; + unsigned short global_int_enable; + struct sk_buff *current_skb; + + unsigned char auto_speedsave; + open_state open_status, sap_status; + enum {MANUAL, AUTOMATIC} open_mode; + enum {FAIL, RESTART, REOPEN} open_action; + enum {NO, YES} open_failure; + unsigned char readlog_pending; + unsigned short adapter_int_enable; /* Adapter-specific int enable */ + struct timer_list tr_timer; + unsigned char ring_speed; + spinlock_t lock; /* SMP protection */ +}; + +/* token ring adapter commands */ +#define DIR_INTERRUPT 0x00 /* struct srb_interrupt */ +#define DIR_MOD_OPEN_PARAMS 0x01 +#define DIR_OPEN_ADAPTER 0x03 /* struct dir_open_adapter */ +#define DIR_CLOSE_ADAPTER 0x04 +#define DIR_SET_GRP_ADDR 0x06 +#define DIR_SET_FUNC_ADDR 0x07 /* struct srb_set_funct_addr */ +#define DIR_READ_LOG 0x08 /* struct srb_read_log */ +#define DLC_OPEN_SAP 0x15 /* struct dlc_open_sap */ +#define DLC_CLOSE_SAP 0x16 +#define DATA_LOST 0x20 /* struct asb_rec */ +#define REC_DATA 0x81 /* struct arb_rec_req */ +#define XMIT_DATA_REQ 0x82 /* struct arb_xmit_req */ +#define DLC_STATUS 0x83 /* struct arb_dlc_status */ +#define RING_STAT_CHANGE 0x84 /* struct dlc_open_sap ??? */ + +/* DIR_OPEN_ADAPTER options */ +#define OPEN_PASS_BCON_MAC 0x0100 +#define NUM_RCV_BUF 2 +#define RCV_BUF_LEN 1024 +#define DHB_LENGTH 2048 +#define NUM_DHB 2 +#define DLC_MAX_SAP 2 +#define DLC_MAX_STA 1 + +/* DLC_OPEN_SAP options */ +#define MAX_I_FIELD 0x0088 +#define SAP_OPEN_IND_SAP 0x04 +#define SAP_OPEN_PRIORITY 0x20 +#define SAP_OPEN_STATION_CNT 0x1 +#define XMIT_DIR_FRAME 0x0A +#define XMIT_UI_FRAME 0x0d +#define XMIT_XID_CMD 0x0e +#define XMIT_TEST_CMD 0x11 + +/* srb close return code */ +#define SIGNAL_LOSS 0x8000 +#define HARD_ERROR 0x4000 +#define XMIT_BEACON 0x1000 +#define LOBE_FAULT 0x0800 +#define AUTO_REMOVAL 0x0400 +#define REMOVE_RECV 0x0100 +#define LOG_OVERFLOW 0x0080 +#define RING_RECOVER 0x0020 + +struct srb_init_response { + unsigned char command; + unsigned char init_status; + unsigned char init_status_2; + unsigned char reserved[3]; + __u16 bring_up_code; + __u16 encoded_address; + __u16 level_address; + __u16 adapter_address; + __u16 parms_address; + __u16 mac_address; +}; + +struct dir_open_adapter { + unsigned char command; + char reserved[7]; + __u16 open_options; + unsigned char node_address[6]; + unsigned char group_address[4]; + unsigned char funct_address[4]; + __u16 num_rcv_buf; + __u16 rcv_buf_len; + __u16 dhb_length; + unsigned char num_dhb; + char reserved2; + unsigned char dlc_max_sap; + unsigned char dlc_max_sta; + unsigned char dlc_max_gsap; + unsigned char dlc_max_gmem; + unsigned char dlc_t1_tick_1; + unsigned char dlc_t2_tick_1; + unsigned char dlc_ti_tick_1; + unsigned char dlc_t1_tick_2; + unsigned char dlc_t2_tick_2; + unsigned char dlc_ti_tick_2; + unsigned char product_id[18]; +}; + +struct dlc_open_sap { + unsigned char command; + unsigned char reserved1; + unsigned char ret_code; + unsigned char reserved2; + __u16 station_id; + unsigned char timer_t1; + unsigned char timer_t2; + unsigned char timer_ti; + unsigned char maxout; + unsigned char maxin; + unsigned char maxout_incr; + unsigned char max_retry_count; + unsigned char gsap_max_mem; + __u16 max_i_field; + unsigned char sap_value; + unsigned char sap_options; + unsigned char station_count; + unsigned char sap_gsap_mem; + unsigned char gsap[0]; +}; + +struct srb_xmit { + unsigned char command; + unsigned char cmd_corr; + unsigned char ret_code; + unsigned char reserved1; + __u16 station_id; +}; + +struct arb_rec_req { + unsigned char command; + unsigned char reserved1[3]; + __u16 station_id; + __u16 rec_buf_addr; + unsigned char lan_hdr_len; + unsigned char dlc_hdr_len; + __u16 frame_len; + unsigned char msg_type; +}; + +struct asb_rec { + unsigned char command; + unsigned char reserved1; + unsigned char ret_code; + unsigned char reserved2; + __u16 station_id; + __u16 rec_buf_addr; +}; + +struct rec_buf { + unsigned char reserved1[2]; + __u16 buf_ptr; + unsigned char reserved2; + unsigned char receive_fs; + __u16 buf_len; + unsigned char data[0]; +}; + +struct srb_set_funct_addr { + unsigned char command; + unsigned char reserved1; + unsigned char ret_code; + unsigned char reserved2[3]; + unsigned char funct_address[4]; +}; + +#endif diff --git a/include/linux/icmp.h b/include/linux/icmp.h new file mode 100644 index 00000000..474f2a51 --- /dev/null +++ b/include/linux/icmp.h @@ -0,0 +1,105 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the ICMP protocol. + * + * Version: @(#)icmp.h 1.0.3 04/28/93 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_ICMP_H +#define _LINUX_ICMP_H + +#include <linux/types.h> + +#define ICMP_ECHOREPLY 0 /* Echo Reply */ +#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */ +#define ICMP_SOURCE_QUENCH 4 /* Source Quench */ +#define ICMP_REDIRECT 5 /* Redirect (change route) */ +#define ICMP_ECHO 8 /* Echo Request */ +#define ICMP_TIME_EXCEEDED 11 /* Time Exceeded */ +#define ICMP_PARAMETERPROB 12 /* Parameter Problem */ +#define ICMP_TIMESTAMP 13 /* Timestamp Request */ +#define ICMP_TIMESTAMPREPLY 14 /* Timestamp Reply */ +#define ICMP_INFO_REQUEST 15 /* Information Request */ +#define ICMP_INFO_REPLY 16 /* Information Reply */ +#define ICMP_ADDRESS 17 /* Address Mask Request */ +#define ICMP_ADDRESSREPLY 18 /* Address Mask Reply */ +#define NR_ICMP_TYPES 18 + + +/* Codes for UNREACH. */ +#define ICMP_NET_UNREACH 0 /* Network Unreachable */ +#define ICMP_HOST_UNREACH 1 /* Host Unreachable */ +#define ICMP_PROT_UNREACH 2 /* Protocol Unreachable */ +#define ICMP_PORT_UNREACH 3 /* Port Unreachable */ +#define ICMP_FRAG_NEEDED 4 /* Fragmentation Needed/DF set */ +#define ICMP_SR_FAILED 5 /* Source Route failed */ +#define ICMP_NET_UNKNOWN 6 +#define ICMP_HOST_UNKNOWN 7 +#define ICMP_HOST_ISOLATED 8 +#define ICMP_NET_ANO 9 +#define ICMP_HOST_ANO 10 +#define ICMP_NET_UNR_TOS 11 +#define ICMP_HOST_UNR_TOS 12 +#define ICMP_PKT_FILTERED 13 /* Packet filtered */ +#define ICMP_PREC_VIOLATION 14 /* Precedence violation */ +#define ICMP_PREC_CUTOFF 15 /* Precedence cut off */ +#define NR_ICMP_UNREACH 15 /* instead of hardcoding immediate value */ + +/* Codes for REDIRECT. */ +#define ICMP_REDIR_NET 0 /* Redirect Net */ +#define ICMP_REDIR_HOST 1 /* Redirect Host */ +#define ICMP_REDIR_NETTOS 2 /* Redirect Net for TOS */ +#define ICMP_REDIR_HOSTTOS 3 /* Redirect Host for TOS */ + +/* Codes for TIME_EXCEEDED. */ +#define ICMP_EXC_TTL 0 /* TTL count exceeded */ +#define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */ + + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + } un; +}; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) +{ + return (struct icmphdr *)skb_transport_header(skb); +} +#endif + +/* + * constants for (set|get)sockopt + */ + +#define ICMP_FILTER 1 + +struct icmp_filter { + __u32 data; +}; + + +#endif /* _LINUX_ICMP_H */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h new file mode 100644 index 00000000..ba45e6bc --- /dev/null +++ b/include/linux/icmpv6.h @@ -0,0 +1,196 @@ +#ifndef _LINUX_ICMPV6_H +#define _LINUX_ICMPV6_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +struct icmp6hdr { + + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + + + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + + struct icmpv6_echo { + __be16 identifier; + __be16 sequence; + } u_echo; + + struct icmpv6_nd_advt { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u32 reserved:5, + override:1, + solicited:1, + router:1, + reserved2:24; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u32 router:1, + solicited:1, + override:1, + reserved:29; +#else +#error "Please fix <asm/byteorder.h>" +#endif + } u_nd_advt; + + struct icmpv6_nd_ra { + __u8 hop_limit; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 reserved:3, + router_pref:2, + home_agent:1, + other:1, + managed:1; + +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 managed:1, + other:1, + home_agent:1, + router_pref:2, + reserved:3; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __be16 rt_lifetime; + } u_nd_ra; + + } icmp6_dataun; + +#define icmp6_identifier icmp6_dataun.u_echo.identifier +#define icmp6_sequence icmp6_dataun.u_echo.sequence +#define icmp6_pointer icmp6_dataun.un_data32[0] +#define icmp6_mtu icmp6_dataun.un_data32[0] +#define icmp6_unused icmp6_dataun.un_data32[0] +#define icmp6_maxdelay icmp6_dataun.un_data16[0] +#define icmp6_router icmp6_dataun.u_nd_advt.router +#define icmp6_solicited icmp6_dataun.u_nd_advt.solicited +#define icmp6_override icmp6_dataun.u_nd_advt.override +#define icmp6_ndiscreserved icmp6_dataun.u_nd_advt.reserved +#define icmp6_hop_limit icmp6_dataun.u_nd_ra.hop_limit +#define icmp6_addrconf_managed icmp6_dataun.u_nd_ra.managed +#define icmp6_addrconf_other icmp6_dataun.u_nd_ra.other +#define icmp6_rt_lifetime icmp6_dataun.u_nd_ra.rt_lifetime +#define icmp6_router_pref icmp6_dataun.u_nd_ra.router_pref +}; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) +{ + return (struct icmp6hdr *)skb_transport_header(skb); +} +#endif + +#define ICMPV6_ROUTER_PREF_LOW 0x3 +#define ICMPV6_ROUTER_PREF_MEDIUM 0x0 +#define ICMPV6_ROUTER_PREF_HIGH 0x1 +#define ICMPV6_ROUTER_PREF_INVALID 0x2 + +#define ICMPV6_DEST_UNREACH 1 +#define ICMPV6_PKT_TOOBIG 2 +#define ICMPV6_TIME_EXCEED 3 +#define ICMPV6_PARAMPROB 4 + +#define ICMPV6_INFOMSG_MASK 0x80 + +#define ICMPV6_ECHO_REQUEST 128 +#define ICMPV6_ECHO_REPLY 129 +#define ICMPV6_MGM_QUERY 130 +#define ICMPV6_MGM_REPORT 131 +#define ICMPV6_MGM_REDUCTION 132 + +#define ICMPV6_NI_QUERY 139 +#define ICMPV6_NI_REPLY 140 + +#define ICMPV6_MLD2_REPORT 143 + +#define ICMPV6_DHAAD_REQUEST 144 +#define ICMPV6_DHAAD_REPLY 145 +#define ICMPV6_MOBILE_PREFIX_SOL 146 +#define ICMPV6_MOBILE_PREFIX_ADV 147 + +/* + * Codes for Destination Unreachable + */ +#define ICMPV6_NOROUTE 0 +#define ICMPV6_ADM_PROHIBITED 1 +#define ICMPV6_NOT_NEIGHBOUR 2 +#define ICMPV6_ADDR_UNREACH 3 +#define ICMPV6_PORT_UNREACH 4 + +/* + * Codes for Time Exceeded + */ +#define ICMPV6_EXC_HOPLIMIT 0 +#define ICMPV6_EXC_FRAGTIME 1 + +/* + * Codes for Parameter Problem + */ +#define ICMPV6_HDR_FIELD 0 +#define ICMPV6_UNK_NEXTHDR 1 +#define ICMPV6_UNK_OPTION 2 + +/* + * constants for (set|get)sockopt + */ + +#define ICMPV6_FILTER 1 + +/* + * ICMPV6 filter + */ + +#define ICMPV6_FILTER_BLOCK 1 +#define ICMPV6_FILTER_PASS 2 +#define ICMPV6_FILTER_BLOCKOTHERS 3 +#define ICMPV6_FILTER_PASSONLY 4 + +struct icmp6_filter { + __u32 data[8]; +}; + +/* + * Definitions for MLDv2 + */ +#define MLD2_MODE_IS_INCLUDE 1 +#define MLD2_MODE_IS_EXCLUDE 2 +#define MLD2_CHANGE_TO_INCLUDE 3 +#define MLD2_CHANGE_TO_EXCLUDE 4 +#define MLD2_ALLOW_NEW_SOURCES 5 +#define MLD2_BLOCK_OLD_SOURCES 6 + +#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } } + +#ifdef __KERNEL__ + +#include <linux/netdevice.h> + +extern void icmpv6_send(struct sk_buff *skb, + u8 type, u8 code, + __u32 info); + +extern int icmpv6_init(void); +extern int icmpv6_err_convert(u8 type, u8 code, + int *err); +extern void icmpv6_cleanup(void); +extern void icmpv6_param_prob(struct sk_buff *skb, + u8 code, int pos); + +struct flowi6; +struct in6_addr; +extern void icmpv6_flow_init(struct sock *sk, + struct flowi6 *fl6, + u8 type, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + int oif); +#endif + +#endif diff --git a/include/linux/ide.h b/include/linux/ide.h new file mode 100644 index 00000000..b1797491 --- /dev/null +++ b/include/linux/ide.h @@ -0,0 +1,1558 @@ +#ifndef _IDE_H +#define _IDE_H +/* + * linux/include/linux/ide.h + * + * Copyright (C) 1994-2002 Linus Torvalds & authors + */ + +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/ata.h> +#include <linux/blkdev.h> +#include <linux/proc_fs.h> +#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/bio.h> +#include <linux/pci.h> +#include <linux/completion.h> +#include <linux/pm.h> +#include <linux/mutex.h> +#ifdef CONFIG_BLK_DEV_IDEACPI +#include <acpi/acpi.h> +#endif +#include <asm/byteorder.h> +#include <asm/io.h> + +/* for request_sense */ +#include <linux/cdrom.h> + +#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) +# define SUPPORT_VLB_SYNC 0 +#else +# define SUPPORT_VLB_SYNC 1 +#endif + +/* + * Probably not wise to fiddle with these + */ +#define IDE_DEFAULT_MAX_FAILURES 1 +#define ERROR_MAX 8 /* Max read/write errors per sector */ +#define ERROR_RESET 3 /* Reset controller every 4th retry */ +#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ + +struct device; + +/* Error codes returned in rq->errors to the higher part of the driver. */ +enum { + IDE_DRV_ERROR_GENERAL = 101, + IDE_DRV_ERROR_FILEMARK = 102, + IDE_DRV_ERROR_EOD = 103, +}; + +/* + * Definitions for accessing IDE controller registers + */ +#define IDE_NR_PORTS (10) + +struct ide_io_ports { + unsigned long data_addr; + + union { + unsigned long error_addr; /* read: error */ + unsigned long feature_addr; /* write: feature */ + }; + + unsigned long nsect_addr; + unsigned long lbal_addr; + unsigned long lbam_addr; + unsigned long lbah_addr; + + unsigned long device_addr; + + union { + unsigned long status_addr; /* read: status */ + unsigned long command_addr; /* write: command */ + }; + + unsigned long ctl_addr; + + unsigned long irq_addr; +}; + +#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) + +#define BAD_R_STAT (ATA_BUSY | ATA_ERR) +#define BAD_W_STAT (BAD_R_STAT | ATA_DF) +#define BAD_STAT (BAD_R_STAT | ATA_DRQ) +#define DRIVE_READY (ATA_DRDY | ATA_DSC) + +#define BAD_CRC (ATA_ABORTED | ATA_ICRC) + +#define SATA_NR_PORTS (3) /* 16 possible ?? */ + +#define SATA_STATUS_OFFSET (0) +#define SATA_ERROR_OFFSET (1) +#define SATA_CONTROL_OFFSET (2) + +/* + * Our Physical Region Descriptor (PRD) table should be large enough + * to handle the biggest I/O request we are likely to see. Since requests + * can have no more than 256 sectors, and since the typical blocksize is + * two or more sectors, we could get by with a limit of 128 entries here for + * the usual worst case. Most requests seem to include some contiguous blocks, + * further reducing the number of table entries required. + * + * The driver reverts to PIO mode for individual requests that exceed + * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling + * 100% of all crazy scenarios here is not necessary. + * + * As it turns out though, we must allocate a full 4KB page for this, + * so the two PRD tables (ide0 & ide1) will each get half of that, + * allowing each to have about 256 entries (8 bytes each) from this. + */ +#define PRD_BYTES 8 +#define PRD_ENTRIES 256 + +/* + * Some more useful definitions + */ +#define PARTN_BITS 6 /* number of minor dev bits for partitions */ +#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ +#define SECTOR_SIZE 512 + +/* + * Timeouts for various operations: + */ +enum { + /* spec allows up to 20ms, but CF cards and SSD drives need more */ + WAIT_DRQ = 1 * HZ, /* 1s */ + /* some laptops are very slow */ + WAIT_READY = 5 * HZ, /* 5s */ + /* should be less than 3ms (?), if all ATAPI CD is closed at boot */ + WAIT_PIDENTIFY = 10 * HZ, /* 10s */ + /* worst case when spinning up */ + WAIT_WORSTCASE = 30 * HZ, /* 30s */ + /* maximum wait for an IRQ to happen */ + WAIT_CMD = 10 * HZ, /* 10s */ + /* Some drives require a longer IRQ timeout. */ + WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */ + /* + * Some drives (for example, Seagate STT3401A Travan) require a very + * long timeout, because they don't return an interrupt or clear their + * BSY bit until after the command completes (even retension commands). + */ + WAIT_TAPE_CMD = 900 * HZ, /* 900s */ + /* minimum sleep time */ + WAIT_MIN_SLEEP = HZ / 50, /* 20ms */ +}; + +/* + * Op codes for special requests to be handled by ide_special_rq(). + * Values should be in the range of 0x20 to 0x3f. + */ +#define REQ_DRIVE_RESET 0x20 +#define REQ_DEVSET_EXEC 0x21 +#define REQ_PARK_HEADS 0x22 +#define REQ_UNPARK_HEADS 0x23 + +/* + * hwif_chipset_t is used to keep track of the specific hardware + * chipset used by each IDE interface, if known. + */ +enum { ide_unknown, ide_generic, ide_pci, + ide_cmd640, ide_dtc2278, ide_ali14xx, + ide_qd65xx, ide_umc8672, ide_ht6560b, + ide_4drives, ide_pmac, ide_acorn, + ide_au1xxx, ide_palm3710 +}; + +typedef u8 hwif_chipset_t; + +/* + * Structure to hold all information about the location of this port + */ +struct ide_hw { + union { + struct ide_io_ports io_ports; + unsigned long io_ports_array[IDE_NR_PORTS]; + }; + + int irq; /* our irq number */ + struct device *dev, *parent; + unsigned long config; +}; + +static inline void ide_std_init_ports(struct ide_hw *hw, + unsigned long io_addr, + unsigned long ctl_addr) +{ + unsigned int i; + + for (i = 0; i <= 7; i++) + hw->io_ports_array[i] = io_addr++; + + hw->io_ports.ctl_addr = ctl_addr; +} + +#define MAX_HWIFS 10 + +/* + * Now for the data we need to maintain per-drive: ide_drive_t + */ + +#define ide_scsi 0x21 +#define ide_disk 0x20 +#define ide_optical 0x7 +#define ide_cdrom 0x5 +#define ide_tape 0x1 +#define ide_floppy 0x0 + +/* + * Special Driver Flags + */ +enum { + IDE_SFLAG_SET_GEOMETRY = (1 << 0), + IDE_SFLAG_RECALIBRATE = (1 << 1), + IDE_SFLAG_SET_MULTMODE = (1 << 2), +}; + +/* + * Status returned from various ide_ functions + */ +typedef enum { + ide_stopped, /* no drive operation was started */ + ide_started, /* a drive operation was started, handler was set */ +} ide_startstop_t; + +enum { + IDE_VALID_ERROR = (1 << 1), + IDE_VALID_FEATURE = IDE_VALID_ERROR, + IDE_VALID_NSECT = (1 << 2), + IDE_VALID_LBAL = (1 << 3), + IDE_VALID_LBAM = (1 << 4), + IDE_VALID_LBAH = (1 << 5), + IDE_VALID_DEVICE = (1 << 6), + IDE_VALID_LBA = IDE_VALID_LBAL | + IDE_VALID_LBAM | + IDE_VALID_LBAH, + IDE_VALID_OUT_TF = IDE_VALID_FEATURE | + IDE_VALID_NSECT | + IDE_VALID_LBA, + IDE_VALID_IN_TF = IDE_VALID_NSECT | + IDE_VALID_LBA, + IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF, + IDE_VALID_IN_HOB = IDE_VALID_ERROR | + IDE_VALID_NSECT | + IDE_VALID_LBA, +}; + +enum { + IDE_TFLAG_LBA48 = (1 << 0), + IDE_TFLAG_WRITE = (1 << 1), + IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), + IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), + /* force 16-bit I/O operations */ + IDE_TFLAG_IO_16BIT = (1 << 4), + /* struct ide_cmd was allocated using kmalloc() */ + IDE_TFLAG_DYN = (1 << 5), + IDE_TFLAG_FS = (1 << 6), + IDE_TFLAG_MULTI_PIO = (1 << 7), + IDE_TFLAG_SET_XFER = (1 << 8), +}; + +enum { + IDE_FTFLAG_FLAGGED = (1 << 0), + IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), + IDE_FTFLAG_OUT_DATA = (1 << 2), + IDE_FTFLAG_IN_DATA = (1 << 3), +}; + +struct ide_taskfile { + u8 data; /* 0: data byte (for TASKFILE ioctl) */ + union { /* 1: */ + u8 error; /* read: error */ + u8 feature; /* write: feature */ + }; + u8 nsect; /* 2: number of sectors */ + u8 lbal; /* 3: LBA low */ + u8 lbam; /* 4: LBA mid */ + u8 lbah; /* 5: LBA high */ + u8 device; /* 6: device select */ + union { /* 7: */ + u8 status; /* read: status */ + u8 command; /* write: command */ + }; +}; + +struct ide_cmd { + struct ide_taskfile tf; + struct ide_taskfile hob; + struct { + struct { + u8 tf; + u8 hob; + } out, in; + } valid; + + u16 tf_flags; + u8 ftf_flags; /* for TASKFILE ioctl */ + int protocol; + + int sg_nents; /* number of sg entries */ + int orig_sg_nents; + int sg_dma_direction; /* DMA transfer direction */ + + unsigned int nbytes; + unsigned int nleft; + unsigned int last_xfer_len; + + struct scatterlist *cursg; + unsigned int cursg_ofs; + + struct request *rq; /* copy of request */ +}; + +/* ATAPI packet command flags */ +enum { + /* set when an error is considered normal - no retry (ide-tape) */ + PC_FLAG_ABORT = (1 << 0), + PC_FLAG_SUPPRESS_ERROR = (1 << 1), + PC_FLAG_WAIT_FOR_DSC = (1 << 2), + PC_FLAG_DMA_OK = (1 << 3), + PC_FLAG_DMA_IN_PROGRESS = (1 << 4), + PC_FLAG_DMA_ERROR = (1 << 5), + PC_FLAG_WRITING = (1 << 6), +}; + +#define ATAPI_WAIT_PC (60 * HZ) + +struct ide_atapi_pc { + /* actual packet bytes */ + u8 c[12]; + /* incremented on each retry */ + int retries; + int error; + + /* bytes to transfer */ + int req_xfer; + + /* the corresponding request */ + struct request *rq; + + unsigned long flags; + + /* + * those are more or less driver-specific and some of them are subject + * to change/removal later. + */ + unsigned long timeout; +}; + +struct ide_devset; +struct ide_driver; + +#ifdef CONFIG_BLK_DEV_IDEACPI +struct ide_acpi_drive_link; +struct ide_acpi_hwif_link; +#endif + +struct ide_drive_s; + +struct ide_disk_ops { + int (*check)(struct ide_drive_s *, const char *); + int (*get_capacity)(struct ide_drive_s *); + void (*unlock_native_capacity)(struct ide_drive_s *); + void (*setup)(struct ide_drive_s *); + void (*flush)(struct ide_drive_s *); + int (*init_media)(struct ide_drive_s *, struct gendisk *); + int (*set_doorlock)(struct ide_drive_s *, struct gendisk *, + int); + ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, + sector_t); + int (*ioctl)(struct ide_drive_s *, struct block_device *, + fmode_t, unsigned int, unsigned long); +}; + +/* ATAPI device flags */ +enum { + IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + + /* ide-cd */ + /* Drive cannot eject the disc. */ + IDE_AFLAG_NO_EJECT = (1 << 1), + /* Drive is a pre ATAPI 1.2 drive. */ + IDE_AFLAG_PRE_ATAPI12 = (1 << 2), + /* TOC addresses are in BCD. */ + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), + /* TOC track numbers are in BCD. */ + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), + /* Saved TOC information is current. */ + IDE_AFLAG_TOC_VALID = (1 << 6), + /* We think that the drive door is locked. */ + IDE_AFLAG_DOOR_LOCKED = (1 << 7), + /* SET_CD_SPEED command is unsupported. */ + IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), + IDE_AFLAG_VERTOS_300_SSD = (1 << 9), + IDE_AFLAG_VERTOS_600_ESD = (1 << 10), + IDE_AFLAG_SANYO_3CD = (1 << 11), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), + + /* ide-floppy */ + /* Avoid commands not supported in Clik drive */ + IDE_AFLAG_CLIK_DRIVE = (1 << 15), + /* Requires BH algorithm for packets */ + IDE_AFLAG_ZIP_DRIVE = (1 << 16), + /* Supports format progress report */ + IDE_AFLAG_SRFP = (1 << 17), + + /* ide-tape */ + IDE_AFLAG_IGNORE_DSC = (1 << 18), + /* 0 When the tape position is unknown */ + IDE_AFLAG_ADDRESS_VALID = (1 << 19), + /* Device already opened */ + IDE_AFLAG_BUSY = (1 << 20), + /* Attempt to auto-detect the current user block size */ + IDE_AFLAG_DETECT_BS = (1 << 21), + /* Currently on a filemark */ + IDE_AFLAG_FILEMARK = (1 << 22), + /* 0 = no tape is loaded, so we don't rewind after ejecting */ + IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), + + IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), +}; + +/* device flags */ +enum { + /* restore settings after device reset */ + IDE_DFLAG_KEEP_SETTINGS = (1 << 0), + /* device is using DMA for read/write */ + IDE_DFLAG_USING_DMA = (1 << 1), + /* okay to unmask other IRQs */ + IDE_DFLAG_UNMASK = (1 << 2), + /* don't attempt flushes */ + IDE_DFLAG_NOFLUSH = (1 << 3), + /* DSC overlap */ + IDE_DFLAG_DSC_OVERLAP = (1 << 4), + /* give potential excess bandwidth */ + IDE_DFLAG_NICE1 = (1 << 5), + /* device is physically present */ + IDE_DFLAG_PRESENT = (1 << 6), + /* disable Host Protected Area */ + IDE_DFLAG_NOHPA = (1 << 7), + /* id read from device (synthetic if not set) */ + IDE_DFLAG_ID_READ = (1 << 8), + IDE_DFLAG_NOPROBE = (1 << 9), + /* need to do check_media_change() */ + IDE_DFLAG_REMOVABLE = (1 << 10), + /* needed for removable devices */ + IDE_DFLAG_ATTACH = (1 << 11), + IDE_DFLAG_FORCED_GEOM = (1 << 12), + /* disallow setting unmask bit */ + IDE_DFLAG_NO_UNMASK = (1 << 13), + /* disallow enabling 32-bit I/O */ + IDE_DFLAG_NO_IO_32BIT = (1 << 14), + /* for removable only: door lock/unlock works */ + IDE_DFLAG_DOORLOCKING = (1 << 15), + /* disallow DMA */ + IDE_DFLAG_NODMA = (1 << 16), + /* powermanagement told us not to do anything, so sleep nicely */ + IDE_DFLAG_BLOCKED = (1 << 17), + /* sleeping & sleep field valid */ + IDE_DFLAG_SLEEPING = (1 << 18), + IDE_DFLAG_POST_RESET = (1 << 19), + IDE_DFLAG_UDMA33_WARNED = (1 << 20), + IDE_DFLAG_LBA48 = (1 << 21), + /* status of write cache */ + IDE_DFLAG_WCACHE = (1 << 22), + /* used for ignoring ATA_DF */ + IDE_DFLAG_NOWERR = (1 << 23), + /* retrying in PIO */ + IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), + IDE_DFLAG_LBA = (1 << 25), + /* don't unload heads */ + IDE_DFLAG_NO_UNLOAD = (1 << 26), + /* heads unloaded, please don't reset port */ + IDE_DFLAG_PARKED = (1 << 27), + IDE_DFLAG_MEDIA_CHANGED = (1 << 28), + /* write protect */ + IDE_DFLAG_WP = (1 << 29), + IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), + IDE_DFLAG_NIEN_QUIRK = (1 << 31), +}; + +struct ide_drive_s { + char name[4]; /* drive name, such as "hda" */ + char driver_req[10]; /* requests specific driver */ + + struct request_queue *queue; /* request queue */ + + struct request *rq; /* current request */ + void *driver_data; /* extra driver data */ + u16 *id; /* identification info */ +#ifdef CONFIG_IDE_PROC_FS + struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ + const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */ +#endif + struct hwif_s *hwif; /* actually (ide_hwif_t *) */ + + const struct ide_disk_ops *disk_ops; + + unsigned long dev_flags; + + unsigned long sleep; /* sleep until this time */ + unsigned long timeout; /* max time to wait for irq */ + + u8 special_flags; /* special action flags */ + + u8 select; /* basic drive/head select reg value */ + u8 retry_pio; /* retrying dma capable host in pio */ + u8 waiting_for_dma; /* dma currently in progress */ + u8 dma; /* atapi dma flag */ + + u8 init_speed; /* transfer rate set at boot */ + u8 current_speed; /* current transfer rate set */ + u8 desired_speed; /* desired transfer rate set */ + u8 pio_mode; /* for ->set_pio_mode _only_ */ + u8 dma_mode; /* for ->set_dma_mode _only_ */ + u8 dn; /* now wide spread use */ + u8 acoustic; /* acoustic management */ + u8 media; /* disk, cdrom, tape, floppy, ... */ + u8 ready_stat; /* min status value for drive ready */ + u8 mult_count; /* current multiple sector setting */ + u8 mult_req; /* requested multiple sector setting */ + u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ + u8 bad_wstat; /* used for ignoring ATA_DF */ + u8 head; /* "real" number of heads */ + u8 sect; /* "real" sectors per track */ + u8 bios_head; /* BIOS/fdisk/LILO number of heads */ + u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */ + + /* delay this long before sending packet command */ + u8 pc_delay; + + unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ + unsigned int cyl; /* "real" number of cyls */ + void *drive_data; /* used by set_pio_mode/dev_select() */ + unsigned int failures; /* current failure count */ + unsigned int max_failures; /* maximum allowed failure count */ + u64 probed_capacity;/* initial/native media capacity */ + u64 capacity64; /* total number of sectors */ + + int lun; /* logical unit */ + int crc_count; /* crc counter to reduce drive speed */ + + unsigned long debug_mask; /* debugging levels switch */ + +#ifdef CONFIG_BLK_DEV_IDEACPI + struct ide_acpi_drive_link *acpidata; +#endif + struct list_head list; + struct device gendev; + struct completion gendev_rel_comp; /* to deal with device release() */ + + /* current packet command */ + struct ide_atapi_pc *pc; + + /* last failed packet command */ + struct ide_atapi_pc *failed_pc; + + /* callback for packet commands */ + int (*pc_callback)(struct ide_drive_s *, int); + + ide_startstop_t (*irq_handler)(struct ide_drive_s *); + + unsigned long atapi_flags; + + struct ide_atapi_pc request_sense_pc; + + /* current sense rq and buffer */ + bool sense_rq_armed; + struct request sense_rq; + struct request_sense sense_data; +}; + +typedef struct ide_drive_s ide_drive_t; + +#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev) + +#define to_ide_drv(obj, cont_type) \ + container_of(obj, struct cont_type, dev) + +#define ide_drv_g(disk, cont_type) \ + container_of((disk)->private_data, struct cont_type, driver) + +struct ide_port_info; + +struct ide_tp_ops { + void (*exec_command)(struct hwif_s *, u8); + u8 (*read_status)(struct hwif_s *); + u8 (*read_altstatus)(struct hwif_s *); + void (*write_devctl)(struct hwif_s *, u8); + + void (*dev_select)(ide_drive_t *); + void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8); + void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8); + + void (*input_data)(ide_drive_t *, struct ide_cmd *, + void *, unsigned int); + void (*output_data)(ide_drive_t *, struct ide_cmd *, + void *, unsigned int); +}; + +extern const struct ide_tp_ops default_tp_ops; + +/** + * struct ide_port_ops - IDE port operations + * + * @init_dev: host specific initialization of a device + * @set_pio_mode: routine to program host for PIO mode + * @set_dma_mode: routine to program host for DMA mode + * @reset_poll: chipset polling based on hba specifics + * @pre_reset: chipset specific changes to default for device-hba resets + * @resetproc: routine to reset controller after a disk reset + * @maskproc: special host masking for drive selection + * @quirkproc: check host's drive quirk list + * @clear_irq: clear IRQ + * + * @mdma_filter: filter MDMA modes + * @udma_filter: filter UDMA modes + * + * @cable_detect: detect cable type + */ +struct ide_port_ops { + void (*init_dev)(ide_drive_t *); + void (*set_pio_mode)(struct hwif_s *, ide_drive_t *); + void (*set_dma_mode)(struct hwif_s *, ide_drive_t *); + int (*reset_poll)(ide_drive_t *); + void (*pre_reset)(ide_drive_t *); + void (*resetproc)(ide_drive_t *); + void (*maskproc)(ide_drive_t *, int); + void (*quirkproc)(ide_drive_t *); + void (*clear_irq)(ide_drive_t *); + int (*test_irq)(struct hwif_s *); + + u8 (*mdma_filter)(ide_drive_t *); + u8 (*udma_filter)(ide_drive_t *); + + u8 (*cable_detect)(struct hwif_s *); +}; + +struct ide_dma_ops { + void (*dma_host_set)(struct ide_drive_s *, int); + int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *); + void (*dma_start)(struct ide_drive_s *); + int (*dma_end)(struct ide_drive_s *); + int (*dma_test_irq)(struct ide_drive_s *); + void (*dma_lost_irq)(struct ide_drive_s *); + /* below ones are optional */ + int (*dma_check)(struct ide_drive_s *, struct ide_cmd *); + int (*dma_timer_expiry)(struct ide_drive_s *); + void (*dma_clear)(struct ide_drive_s *); + /* + * The following method is optional and only required to be + * implemented for the SFF-8038i compatible controllers. + */ + u8 (*dma_sff_read_status)(struct hwif_s *); +}; + +enum { + IDE_PFLAG_PROBING = (1 << 0), +}; + +struct ide_host; + +typedef struct hwif_s { + struct hwif_s *mate; /* other hwif from same PCI chip */ + struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ + + struct ide_host *host; + + char name[6]; /* name of interface, eg. "ide0" */ + + struct ide_io_ports io_ports; + + unsigned long sata_scr[SATA_NR_PORTS]; + + ide_drive_t *devices[MAX_DRIVES + 1]; + + unsigned long port_flags; + + u8 major; /* our major number */ + u8 index; /* 0 for ide0; 1 for ide1; ... */ + u8 channel; /* for dual-port chips: 0=primary, 1=secondary */ + + u32 host_flags; + + u8 pio_mask; + + u8 ultra_mask; + u8 mwdma_mask; + u8 swdma_mask; + + u8 cbl; /* cable type */ + + hwif_chipset_t chipset; /* sub-module for tuning.. */ + + struct device *dev; + + void (*rw_disk)(ide_drive_t *, struct request *); + + const struct ide_tp_ops *tp_ops; + const struct ide_port_ops *port_ops; + const struct ide_dma_ops *dma_ops; + + /* dma physical region descriptor table (cpu view) */ + unsigned int *dmatable_cpu; + /* dma physical region descriptor table (dma view) */ + dma_addr_t dmatable_dma; + + /* maximum number of PRD table entries */ + int prd_max_nents; + /* PRD entry size in bytes */ + int prd_ent_size; + + /* Scatter-gather list used to build the above */ + struct scatterlist *sg_table; + int sg_max_nents; /* Maximum number of entries in it */ + + struct ide_cmd cmd; /* current command */ + + int rqsize; /* max sectors per request */ + int irq; /* our irq number */ + + unsigned long dma_base; /* base addr for dma ports */ + + unsigned long config_data; /* for use by chipset-specific code */ + unsigned long select_data; /* for use by chipset-specific code */ + + unsigned long extra_base; /* extra addr for dma ports */ + unsigned extra_ports; /* number of extra dma ports */ + + unsigned present : 1; /* this interface exists */ + unsigned busy : 1; /* serializes devices on a port */ + + struct device gendev; + struct device *portdev; + + struct completion gendev_rel_comp; /* To deal with device release() */ + + void *hwif_data; /* extra hwif data */ + +#ifdef CONFIG_BLK_DEV_IDEACPI + struct ide_acpi_hwif_link *acpidata; +#endif + + /* IRQ handler, if active */ + ide_startstop_t (*handler)(ide_drive_t *); + + /* BOOL: polling active & poll_timeout field valid */ + unsigned int polling : 1; + + /* current drive */ + ide_drive_t *cur_dev; + + /* current request */ + struct request *rq; + + /* failsafe timer */ + struct timer_list timer; + /* timeout value during long polls */ + unsigned long poll_timeout; + /* queried upon timeouts */ + int (*expiry)(ide_drive_t *); + + int req_gen; + int req_gen_timer; + + spinlock_t lock; +} ____cacheline_internodealigned_in_smp ide_hwif_t; + +#define MAX_HOST_PORTS 4 + +struct ide_host { + ide_hwif_t *ports[MAX_HOST_PORTS + 1]; + unsigned int n_ports; + struct device *dev[2]; + + int (*init_chipset)(struct pci_dev *); + + void (*get_lock)(irq_handler_t, void *); + void (*release_lock)(void); + + irq_handler_t irq_handler; + + unsigned long host_flags; + + int irq_flags; + + void *host_priv; + ide_hwif_t *cur_port; /* for hosts requiring serialization */ + + /* used for hosts requiring serialization */ + volatile unsigned long host_busy; +}; + +#define IDE_HOST_BUSY 0 + +/* + * internal ide interrupt handler type + */ +typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); +typedef int (ide_expiry_t)(ide_drive_t *); + +/* used by ide-cd, ide-floppy, etc. */ +typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned); + +extern struct mutex ide_setting_mtx; + +/* + * configurable drive settings + */ + +#define DS_SYNC (1 << 0) + +struct ide_devset { + int (*get)(ide_drive_t *); + int (*set)(ide_drive_t *, int); + unsigned int flags; +}; + +#define __DEVSET(_flags, _get, _set) { \ + .flags = _flags, \ + .get = _get, \ + .set = _set, \ +} + +#define ide_devset_get(name, field) \ +static int get_##name(ide_drive_t *drive) \ +{ \ + return drive->field; \ +} + +#define ide_devset_set(name, field) \ +static int set_##name(ide_drive_t *drive, int arg) \ +{ \ + drive->field = arg; \ + return 0; \ +} + +#define ide_devset_get_flag(name, flag) \ +static int get_##name(ide_drive_t *drive) \ +{ \ + return !!(drive->dev_flags & flag); \ +} + +#define ide_devset_set_flag(name, flag) \ +static int set_##name(ide_drive_t *drive, int arg) \ +{ \ + if (arg) \ + drive->dev_flags |= flag; \ + else \ + drive->dev_flags &= ~flag; \ + return 0; \ +} + +#define __IDE_DEVSET(_name, _flags, _get, _set) \ +const struct ide_devset ide_devset_##_name = \ + __DEVSET(_flags, _get, _set) + +#define IDE_DEVSET(_name, _flags, _get, _set) \ +static __IDE_DEVSET(_name, _flags, _get, _set) + +#define ide_devset_rw(_name, _func) \ +IDE_DEVSET(_name, 0, get_##_func, set_##_func) + +#define ide_devset_w(_name, _func) \ +IDE_DEVSET(_name, 0, NULL, set_##_func) + +#define ide_ext_devset_rw(_name, _func) \ +__IDE_DEVSET(_name, 0, get_##_func, set_##_func) + +#define ide_ext_devset_rw_sync(_name, _func) \ +__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func) + +#define ide_decl_devset(_name) \ +extern const struct ide_devset ide_devset_##_name + +ide_decl_devset(io_32bit); +ide_decl_devset(keepsettings); +ide_decl_devset(pio_mode); +ide_decl_devset(unmaskirq); +ide_decl_devset(using_dma); + +#ifdef CONFIG_IDE_PROC_FS +/* + * /proc/ide interface + */ + +#define ide_devset_rw_field(_name, _field) \ +ide_devset_get(_name, _field); \ +ide_devset_set(_name, _field); \ +IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) + +#define ide_devset_rw_flag(_name, _field) \ +ide_devset_get_flag(_name, _field); \ +ide_devset_set_flag(_name, _field); \ +IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) + +struct ide_proc_devset { + const char *name; + const struct ide_devset *setting; + int min, max; + int (*mulf)(ide_drive_t *); + int (*divf)(ide_drive_t *); +}; + +#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \ + .name = __stringify(_name), \ + .setting = &ide_devset_##_name, \ + .min = _min, \ + .max = _max, \ + .mulf = _mulf, \ + .divf = _divf, \ +} + +#define IDE_PROC_DEVSET(_name, _min, _max) \ +__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL) + +typedef struct { + const char *name; + umode_t mode; + const struct file_operations *proc_fops; +} ide_proc_entry_t; + +void proc_ide_create(void); +void proc_ide_destroy(void); +void ide_proc_register_port(ide_hwif_t *); +void ide_proc_port_register_devices(ide_hwif_t *); +void ide_proc_unregister_device(ide_drive_t *); +void ide_proc_unregister_port(ide_hwif_t *); +void ide_proc_register_driver(ide_drive_t *, struct ide_driver *); +void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *); + +extern const struct file_operations ide_capacity_proc_fops; +extern const struct file_operations ide_geometry_proc_fops; +#else +static inline void proc_ide_create(void) { ; } +static inline void proc_ide_destroy(void) { ; } +static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } +static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; } +static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; } +static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } +static inline void ide_proc_register_driver(ide_drive_t *drive, + struct ide_driver *driver) { ; } +static inline void ide_proc_unregister_driver(ide_drive_t *drive, + struct ide_driver *driver) { ; } +#endif + +enum { + /* enter/exit functions */ + IDE_DBG_FUNC = (1 << 0), + /* sense key/asc handling */ + IDE_DBG_SENSE = (1 << 1), + /* packet commands handling */ + IDE_DBG_PC = (1 << 2), + /* request handling */ + IDE_DBG_RQ = (1 << 3), + /* driver probing/setup */ + IDE_DBG_PROBE = (1 << 4), +}; + +/* DRV_NAME has to be defined in the driver before using the macro below */ +#define __ide_debug_log(lvl, fmt, args...) \ +{ \ + if (unlikely(drive->debug_mask & lvl)) \ + printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \ + __func__, ## args); \ +} + +/* + * Power Management state machine (rq->pm->pm_step). + * + * For each step, the core calls ide_start_power_step() first. + * This can return: + * - ide_stopped : In this case, the core calls us back again unless + * step have been set to ide_power_state_completed. + * - ide_started : In this case, the channel is left busy until an + * async event (interrupt) occurs. + * Typically, ide_start_power_step() will issue a taskfile request with + * do_rw_taskfile(). + * + * Upon reception of the interrupt, the core will call ide_complete_power_step() + * with the error code if any. This routine should update the step value + * and return. It should not start a new request. The core will call + * ide_start_power_step() for the new step value, unless step have been + * set to IDE_PM_COMPLETED. + */ +enum { + IDE_PM_START_SUSPEND, + IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND, + IDE_PM_STANDBY, + + IDE_PM_START_RESUME, + IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME, + IDE_PM_IDLE, + IDE_PM_RESTORE_DMA, + + IDE_PM_COMPLETED, +}; + +int generic_ide_suspend(struct device *, pm_message_t); +int generic_ide_resume(struct device *); + +void ide_complete_power_step(ide_drive_t *, struct request *); +ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); +void ide_complete_pm_rq(ide_drive_t *, struct request *); +void ide_check_pm_state(ide_drive_t *, struct request *); + +/* + * Subdrivers support. + * + * The gendriver.owner field should be set to the module owner of this driver. + * The gendriver.name field should be set to the name of this driver + */ +struct ide_driver { + const char *version; + ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); + struct device_driver gen_driver; + int (*probe)(ide_drive_t *); + void (*remove)(ide_drive_t *); + void (*resume)(ide_drive_t *); + void (*shutdown)(ide_drive_t *); +#ifdef CONFIG_IDE_PROC_FS + ide_proc_entry_t * (*proc_entries)(ide_drive_t *); + const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *); +#endif +}; + +#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver) + +int ide_device_get(ide_drive_t *); +void ide_device_put(ide_drive_t *); + +struct ide_ioctl_devset { + unsigned int get_ioctl; + unsigned int set_ioctl; + const struct ide_devset *setting; +}; + +int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int, + unsigned long, const struct ide_ioctl_devset *); + +int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long); + +extern int ide_vlb_clk; +extern int ide_pci_clk; + +int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); +void ide_kill_rq(ide_drive_t *, struct request *); + +void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); +void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); + +void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *, + unsigned int); + +void ide_pad_transfer(ide_drive_t *, int, int); + +ide_startstop_t ide_error(ide_drive_t *, const char *, u8); + +void ide_fix_driveid(u16 *); + +extern void ide_fixstring(u8 *, const int, const int); + +int ide_busy_sleep(ide_drive_t *, unsigned long, int); + +int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *); +int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); + +ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *); +ide_startstop_t ide_do_devset(ide_drive_t *, struct request *); + +extern ide_startstop_t ide_do_reset (ide_drive_t *); + +extern int ide_devset_execute(ide_drive_t *drive, + const struct ide_devset *setting, int arg); + +void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); +int ide_complete_rq(ide_drive_t *, int, unsigned int); + +void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); +void ide_tf_dump(const char *, struct ide_cmd *); + +void ide_exec_command(ide_hwif_t *, u8); +u8 ide_read_status(ide_hwif_t *); +u8 ide_read_altstatus(ide_hwif_t *); +void ide_write_devctl(ide_hwif_t *, u8); + +void ide_dev_select(ide_drive_t *); +void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8); +void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8); + +void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); +void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); + +void SELECT_MASK(ide_drive_t *, int); + +u8 ide_read_error(ide_drive_t *); +void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); + +int ide_check_ireason(ide_drive_t *, struct request *, int, int, int); + +int ide_check_atapi_device(ide_drive_t *, const char *); + +void ide_init_pc(struct ide_atapi_pc *); + +/* Disk head parking */ +extern wait_queue_head_t ide_park_wq; +ssize_t ide_park_show(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t ide_park_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len); + +/* + * Special requests for ide-tape block device strategy routine. + * + * In order to service a character device command, we add special requests to + * the tail of our block device request queue and wait for their completion. + */ +enum { + REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */ + REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ + REQ_IDETAPE_READ = (1 << 2), + REQ_IDETAPE_WRITE = (1 << 3), +}; + +int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, + void *, unsigned int); + +int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); +int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); +int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); +void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); +void ide_retry_pc(ide_drive_t *drive); + +void ide_prep_sense(ide_drive_t *drive, struct request *rq); +int ide_queue_sense_rq(ide_drive_t *drive, void *special); + +int ide_cd_expiry(ide_drive_t *); + +int ide_cd_get_xferlen(struct request *); + +ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *); + +ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *); + +void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int); + +void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8); + +int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16); +int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *); + +int ide_taskfile_ioctl(ide_drive_t *, unsigned long); + +int ide_dev_read_id(ide_drive_t *, u8, u16 *, int); + +extern int ide_driveid_update(ide_drive_t *); +extern int ide_config_drive_speed(ide_drive_t *, u8); +extern u8 eighty_ninty_three (ide_drive_t *); +extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *); + +extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout); + +extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); + +extern void ide_timer_expiry(unsigned long); +extern irqreturn_t ide_intr(int irq, void *dev_id); +extern void do_ide_request(struct request_queue *); +extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); + +void ide_init_disk(struct gendisk *, ide_drive_t *); + +#ifdef CONFIG_IDEPCI_PCIBUS_ORDER +extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); +#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) +#else +#define ide_pci_register_driver(d) pci_register_driver(d) +#endif + +static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5) + return 1; + return 0; +} + +void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, + struct ide_hw *, struct ide_hw **); +void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); + +#ifdef CONFIG_BLK_DEV_IDEDMA_PCI +int ide_pci_set_master(struct pci_dev *, const char *); +unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); +int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); +int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); +#else +static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, + const struct ide_port_info *d) +{ + return -EINVAL; +} +#endif + +struct ide_pci_enablebit { + u8 reg; /* byte pci reg holding the enable-bit */ + u8 mask; /* mask to isolate the enable-bit */ + u8 val; /* value of masked reg when "enabled" */ +}; + +enum { + /* Uses ISA control ports not PCI ones. */ + IDE_HFLAG_ISA_PORTS = (1 << 0), + /* single port device */ + IDE_HFLAG_SINGLE = (1 << 1), + /* don't use legacy PIO blacklist */ + IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2), + /* set for the second port of QD65xx */ + IDE_HFLAG_QD_2ND_PORT = (1 << 3), + /* use PIO8/9 for prefetch off/on */ + IDE_HFLAG_ABUSE_PREFETCH = (1 << 4), + /* use PIO6/7 for fast-devsel off/on */ + IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5), + /* use 100-102 and 200-202 PIO values to set DMA modes */ + IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6), + /* + * keep DMA setting when programming PIO mode, may be used only + * for hosts which have separate PIO and DMA timings (ie. PMAC) + */ + IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7), + /* program host for the transfer mode after programming device */ + IDE_HFLAG_POST_SET_MODE = (1 << 8), + /* don't program host/device for the transfer mode ("smart" hosts) */ + IDE_HFLAG_NO_SET_MODE = (1 << 9), + /* trust BIOS for programming chipset/device for DMA */ + IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10), + /* host is CS5510/CS5520 */ + IDE_HFLAG_CS5520 = (1 << 11), + /* ATAPI DMA is unsupported */ + IDE_HFLAG_NO_ATAPI_DMA = (1 << 12), + /* set if host is a "non-bootable" controller */ + IDE_HFLAG_NON_BOOTABLE = (1 << 13), + /* host doesn't support DMA */ + IDE_HFLAG_NO_DMA = (1 << 14), + /* check if host is PCI IDE device before allowing DMA */ + IDE_HFLAG_NO_AUTODMA = (1 << 15), + /* host uses MMIO */ + IDE_HFLAG_MMIO = (1 << 16), + /* no LBA48 */ + IDE_HFLAG_NO_LBA48 = (1 << 17), + /* no LBA48 DMA */ + IDE_HFLAG_NO_LBA48_DMA = (1 << 18), + /* data FIFO is cleared by an error */ + IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), + /* serialize ports */ + IDE_HFLAG_SERIALIZE = (1 << 20), + /* host is DTC2278 */ + IDE_HFLAG_DTC2278 = (1 << 21), + /* 4 devices on a single set of I/O ports */ + IDE_HFLAG_4DRIVES = (1 << 22), + /* host is TRM290 */ + IDE_HFLAG_TRM290 = (1 << 23), + /* use 32-bit I/O ops */ + IDE_HFLAG_IO_32BIT = (1 << 24), + /* unmask IRQs */ + IDE_HFLAG_UNMASK_IRQS = (1 << 25), + IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26), + /* serialize ports if DMA is possible (for sl82c105) */ + IDE_HFLAG_SERIALIZE_DMA = (1 << 27), + /* force host out of "simplex" mode */ + IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28), + /* DSC overlap is unsupported */ + IDE_HFLAG_NO_DSC = (1 << 29), + /* never use 32-bit I/O ops */ + IDE_HFLAG_NO_IO_32BIT = (1 << 30), + /* never unmask IRQs */ + IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), +}; + +#ifdef CONFIG_BLK_DEV_OFFBOARD +# define IDE_HFLAG_OFF_BOARD 0 +#else +# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE +#endif + +struct ide_port_info { + char *name; + + int (*init_chipset)(struct pci_dev *); + + void (*get_lock)(irq_handler_t, void *); + void (*release_lock)(void); + + void (*init_iops)(ide_hwif_t *); + void (*init_hwif)(ide_hwif_t *); + int (*init_dma)(ide_hwif_t *, + const struct ide_port_info *); + + const struct ide_tp_ops *tp_ops; + const struct ide_port_ops *port_ops; + const struct ide_dma_ops *dma_ops; + + struct ide_pci_enablebit enablebits[2]; + + hwif_chipset_t chipset; + + u16 max_sectors; /* if < than the default one */ + + u32 host_flags; + + int irq_flags; + + u8 pio_mask; + u8 swdma_mask; + u8 mwdma_mask; + u8 udma_mask; +}; + +int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); +int ide_pci_init_two(struct pci_dev *, struct pci_dev *, + const struct ide_port_info *, void *); +void ide_pci_remove(struct pci_dev *); + +#ifdef CONFIG_PM +int ide_pci_suspend(struct pci_dev *, pm_message_t); +int ide_pci_resume(struct pci_dev *); +#else +#define ide_pci_suspend NULL +#define ide_pci_resume NULL +#endif + +void ide_map_sg(ide_drive_t *, struct ide_cmd *); +void ide_init_sg_cmd(struct ide_cmd *, unsigned int); + +#define BAD_DMA_DRIVE 0 +#define GOOD_DMA_DRIVE 1 + +struct drive_list_entry { + const char *id_model; + const char *id_firmware; +}; + +int ide_in_drive_list(u16 *, const struct drive_list_entry *); + +#ifdef CONFIG_BLK_DEV_IDEDMA +int ide_dma_good_drive(ide_drive_t *); +int __ide_dma_bad_drive(ide_drive_t *); + +u8 ide_find_dma_mode(ide_drive_t *, u8); + +static inline u8 ide_max_dma_mode(ide_drive_t *drive) +{ + return ide_find_dma_mode(drive, XFER_UDMA_6); +} + +void ide_dma_off_quietly(ide_drive_t *); +void ide_dma_off(ide_drive_t *); +void ide_dma_on(ide_drive_t *); +int ide_set_dma(ide_drive_t *); +void ide_check_dma_crc(ide_drive_t *); +ide_startstop_t ide_dma_intr(ide_drive_t *); + +int ide_allocate_dma_engine(ide_hwif_t *); +void ide_release_dma_engine(ide_hwif_t *); + +int ide_dma_prepare(ide_drive_t *, struct ide_cmd *); +void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *); + +#ifdef CONFIG_BLK_DEV_IDEDMA_SFF +int config_drive_for_dma(ide_drive_t *); +int ide_build_dmatable(ide_drive_t *, struct ide_cmd *); +void ide_dma_host_set(ide_drive_t *, int); +int ide_dma_setup(ide_drive_t *, struct ide_cmd *); +extern void ide_dma_start(ide_drive_t *); +int ide_dma_end(ide_drive_t *); +int ide_dma_test_irq(ide_drive_t *); +int ide_dma_sff_timer_expiry(ide_drive_t *); +u8 ide_dma_sff_read_status(ide_hwif_t *); +extern const struct ide_dma_ops sff_dma_ops; +#else +static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } +#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ + +void ide_dma_lost_irq(ide_drive_t *); +ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); + +#else +static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; } +static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } +static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; } +static inline void ide_dma_off(ide_drive_t *drive) { ; } +static inline void ide_dma_on(ide_drive_t *drive) { ; } +static inline void ide_dma_verbose(ide_drive_t *drive) { ; } +static inline int ide_set_dma(ide_drive_t *drive) { return 1; } +static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } +static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; } +static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } +static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } +static inline int ide_dma_prepare(ide_drive_t *drive, + struct ide_cmd *cmd) { return 1; } +static inline void ide_dma_unmap_sg(ide_drive_t *drive, + struct ide_cmd *cmd) { ; } +#endif /* CONFIG_BLK_DEV_IDEDMA */ + +#ifdef CONFIG_BLK_DEV_IDEACPI +int ide_acpi_init(void); +bool ide_port_acpi(ide_hwif_t *hwif); +extern int ide_acpi_exec_tfs(ide_drive_t *drive); +extern void ide_acpi_get_timing(ide_hwif_t *hwif); +extern void ide_acpi_push_timing(ide_hwif_t *hwif); +void ide_acpi_init_port(ide_hwif_t *); +void ide_acpi_port_init_devices(ide_hwif_t *); +extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); +#else +static inline int ide_acpi_init(void) { return 0; } +static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; } +static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } +static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} +#endif + +void ide_register_region(struct gendisk *); +void ide_unregister_region(struct gendisk *); + +void ide_check_nien_quirk_list(ide_drive_t *); +void ide_undecoded_slave(ide_drive_t *); + +void ide_port_apply_params(ide_hwif_t *); +int ide_sysfs_register_port(ide_hwif_t *); + +struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, + unsigned int); +void ide_host_free(struct ide_host *); +int ide_host_register(struct ide_host *, const struct ide_port_info *, + struct ide_hw **); +int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, + struct ide_host **); +void ide_host_remove(struct ide_host *); +int ide_legacy_device_add(const struct ide_port_info *, unsigned long); +void ide_port_unregister_devices(ide_hwif_t *); +void ide_port_scan(ide_hwif_t *); + +static inline void *ide_get_hwifdata (ide_hwif_t * hwif) +{ + return hwif->hwif_data; +} + +static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) +{ + hwif->hwif_data = data; +} + +extern void ide_toggle_bounce(ide_drive_t *drive, int on); + +u64 ide_get_lba_addr(struct ide_cmd *, int); +u8 ide_dump_status(ide_drive_t *, const char *, u8); + +struct ide_timing { + u8 mode; + u8 setup; /* t1 */ + u16 act8b; /* t2 for 8-bit io */ + u16 rec8b; /* t2i for 8-bit io */ + u16 cyc8b; /* t0 for 8-bit io */ + u16 active; /* t2 or tD */ + u16 recover; /* t2i or tK */ + u16 cycle; /* t0 */ + u16 udma; /* t2CYCTYP/2 */ +}; + +enum { + IDE_TIMING_SETUP = (1 << 0), + IDE_TIMING_ACT8B = (1 << 1), + IDE_TIMING_REC8B = (1 << 2), + IDE_TIMING_CYC8B = (1 << 3), + IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | + IDE_TIMING_CYC8B, + IDE_TIMING_ACTIVE = (1 << 4), + IDE_TIMING_RECOVER = (1 << 5), + IDE_TIMING_CYCLE = (1 << 6), + IDE_TIMING_UDMA = (1 << 7), + IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | + IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | + IDE_TIMING_CYCLE | IDE_TIMING_UDMA, +}; + +struct ide_timing *ide_timing_find_mode(u8); +u16 ide_pio_cycle_time(ide_drive_t *, u8); +void ide_timing_merge(struct ide_timing *, struct ide_timing *, + struct ide_timing *, unsigned int); +int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); + +#ifdef CONFIG_IDE_XFER_MODE +int ide_scan_pio_blacklist(char *); +const char *ide_xfer_verbose(u8); +int ide_pio_need_iordy(ide_drive_t *, const u8); +int ide_set_pio_mode(ide_drive_t *, u8); +int ide_set_dma_mode(ide_drive_t *, u8); +void ide_set_pio(ide_drive_t *, u8); +int ide_set_xfer_rate(ide_drive_t *, u8); +#else +static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; } +static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; } +#endif + +static inline void ide_set_max_pio(ide_drive_t *drive) +{ + ide_set_pio(drive, 255); +} + +char *ide_media_string(ide_drive_t *); + +extern struct device_attribute ide_dev_attrs[]; +extern struct bus_type ide_bus_type; +extern struct class *ide_port_class; + +static inline void ide_dump_identify(u8 *id) +{ + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0); +} + +static inline int hwif_to_node(ide_hwif_t *hwif) +{ + return hwif->dev ? dev_to_node(hwif->dev) : -1; +} + +static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive) +{ + ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1]; + + return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL; +} + +static inline void *ide_get_drivedata(ide_drive_t *drive) +{ + return drive->drive_data; +} + +static inline void ide_set_drivedata(ide_drive_t *drive, void *data) +{ + drive->drive_data = data; +} + +#define ide_port_for_each_dev(i, dev, port) \ + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) + +#define ide_port_for_each_present_dev(i, dev, port) \ + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \ + if ((dev)->dev_flags & IDE_DFLAG_PRESENT) + +#define ide_host_for_each_port(i, port, host) \ + for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) + +#endif /* _IDE_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h new file mode 100644 index 00000000..255491cf --- /dev/null +++ b/include/linux/idr.h @@ -0,0 +1,155 @@ +/* + * include/linux/idr.h + * + * 2002-10-18 written by Jim Houston jim.houston@ccur.com + * Copyright (C) 2002 by Concurrent Computer Corporation + * Distributed under the GNU GPL license version 2. + * + * Small id to pointer translation service avoiding fixed sized + * tables. + */ + +#ifndef __IDR_H__ +#define __IDR_H__ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/init.h> +#include <linux/rcupdate.h> + +#if BITS_PER_LONG == 32 +# define IDR_BITS 5 +# define IDR_FULL 0xfffffffful +/* We can only use two of the bits in the top level because there is + only one possible bit in the top level (5 bits * 7 levels = 35 + bits, but you only use 31 bits in the id). */ +# define TOP_LEVEL_FULL (IDR_FULL >> 30) +#elif BITS_PER_LONG == 64 +# define IDR_BITS 6 +# define IDR_FULL 0xfffffffffffffffful +/* We can only use two of the bits in the top level because there is + only one possible bit in the top level (6 bits * 6 levels = 36 + bits, but you only use 31 bits in the id). */ +# define TOP_LEVEL_FULL (IDR_FULL >> 62) +#else +# error "BITS_PER_LONG is not 32 or 64" +#endif + +#define IDR_SIZE (1 << IDR_BITS) +#define IDR_MASK ((1 << IDR_BITS)-1) + +#define MAX_ID_SHIFT (sizeof(int)*8 - 1) +#define MAX_ID_BIT (1U << MAX_ID_SHIFT) +#define MAX_ID_MASK (MAX_ID_BIT - 1) + +/* Leave the possibility of an incomplete final layer */ +#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS + +/* Number of id_layer structs to leave in free list */ +#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL + +struct idr_layer { + unsigned long bitmap; /* A zero bit means "space here" */ + struct idr_layer __rcu *ary[1<<IDR_BITS]; + int count; /* When zero, we can release it */ + int layer; /* distance from leaf */ + struct rcu_head rcu_head; +}; + +struct idr { + struct idr_layer __rcu *top; + struct idr_layer *id_free; + int layers; /* only valid without concurrent changes */ + int id_free_cnt; + spinlock_t lock; +}; + +#define IDR_INIT(name) \ +{ \ + .top = NULL, \ + .id_free = NULL, \ + .layers = 0, \ + .id_free_cnt = 0, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ +} +#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) + +/* Actions to be taken after a call to _idr_sub_alloc */ +#define IDR_NEED_TO_GROW -2 +#define IDR_NOMORE_SPACE -3 + +#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) + +/** + * DOC: idr sync + * idr synchronization (stolen from radix-tree.h) + * + * idr_find() is able to be called locklessly, using RCU. The caller must + * ensure calls to this function are made within rcu_read_lock() regions. + * Other readers (lock-free or otherwise) and modifications may be running + * concurrently. + * + * It is still required that the caller manage the synchronization and + * lifetimes of the items. So if RCU lock-free lookups are used, typically + * this would mean that the items have their own locks, or are amenable to + * lock-free access; and that the items are freed by RCU (or only freed after + * having been deleted from the idr tree *and* a synchronize_rcu() grace + * period). + */ + +/* + * This is what we export. + */ + +void *idr_find(struct idr *idp, int id); +int idr_pre_get(struct idr *idp, gfp_t gfp_mask); +int idr_get_new(struct idr *idp, void *ptr, int *id); +int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); +int idr_for_each(struct idr *idp, + int (*fn)(int id, void *p, void *data), void *data); +void *idr_get_next(struct idr *idp, int *nextid); +void *idr_replace(struct idr *idp, void *ptr, int id); +void idr_remove(struct idr *idp, int id); +void idr_remove_all(struct idr *idp); +void idr_destroy(struct idr *idp); +void idr_init(struct idr *idp); + + +/* + * IDA - IDR based id allocator, use when translation from id to + * pointer isn't necessary. + * + * IDA_BITMAP_LONGS is calculated to be one less to accommodate + * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. + */ +#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) +#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) + +struct ida_bitmap { + long nr_busy; + unsigned long bitmap[IDA_BITMAP_LONGS]; +}; + +struct ida { + struct idr idr; + struct ida_bitmap *free_bitmap; +}; + +#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } +#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) + +int ida_pre_get(struct ida *ida, gfp_t gfp_mask); +int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); +int ida_get_new(struct ida *ida, int *p_id); +void ida_remove(struct ida *ida, int id); +void ida_destroy(struct ida *ida); +void ida_init(struct ida *ida); + +int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, + gfp_t gfp_mask); +void ida_simple_remove(struct ida *ida, unsigned int id); + +void __init idr_init_cache(void); + +#endif /* __IDR_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h new file mode 100644 index 00000000..210e2c32 --- /dev/null +++ b/include/linux/ieee80211.h @@ -0,0 +1,1878 @@ +/* + * IEEE 802.11 defines + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * <jkmaline@cc.hut.fi> + * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> + * Copyright (c) 2005, Devicescape Software, Inc. + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LINUX_IEEE80211_H +#define LINUX_IEEE80211_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +/* + * DS bit usage + * + * TA = transmitter address + * RA = receiver address + * DA = destination address + * SA = source address + * + * ToDS FromDS A1(RA) A2(TA) A3 A4 Use + * ----------------------------------------------------------------- + * 0 0 DA SA BSSID - IBSS/DLS + * 0 1 DA BSSID SA - AP -> STA + * 1 0 BSSID SA DA - AP <- STA + * 1 1 RA TA DA SA unspecified (WDS) + */ + +#define FCS_LEN 4 + +#define IEEE80211_FCTL_VERS 0x0003 +#define IEEE80211_FCTL_FTYPE 0x000c +#define IEEE80211_FCTL_STYPE 0x00f0 +#define IEEE80211_FCTL_TODS 0x0100 +#define IEEE80211_FCTL_FROMDS 0x0200 +#define IEEE80211_FCTL_MOREFRAGS 0x0400 +#define IEEE80211_FCTL_RETRY 0x0800 +#define IEEE80211_FCTL_PM 0x1000 +#define IEEE80211_FCTL_MOREDATA 0x2000 +#define IEEE80211_FCTL_PROTECTED 0x4000 +#define IEEE80211_FCTL_ORDER 0x8000 + +#define IEEE80211_SCTL_FRAG 0x000F +#define IEEE80211_SCTL_SEQ 0xFFF0 + +#define IEEE80211_FTYPE_MGMT 0x0000 +#define IEEE80211_FTYPE_CTL 0x0004 +#define IEEE80211_FTYPE_DATA 0x0008 + +/* management */ +#define IEEE80211_STYPE_ASSOC_REQ 0x0000 +#define IEEE80211_STYPE_ASSOC_RESP 0x0010 +#define IEEE80211_STYPE_REASSOC_REQ 0x0020 +#define IEEE80211_STYPE_REASSOC_RESP 0x0030 +#define IEEE80211_STYPE_PROBE_REQ 0x0040 +#define IEEE80211_STYPE_PROBE_RESP 0x0050 +#define IEEE80211_STYPE_BEACON 0x0080 +#define IEEE80211_STYPE_ATIM 0x0090 +#define IEEE80211_STYPE_DISASSOC 0x00A0 +#define IEEE80211_STYPE_AUTH 0x00B0 +#define IEEE80211_STYPE_DEAUTH 0x00C0 +#define IEEE80211_STYPE_ACTION 0x00D0 + +/* control */ +#define IEEE80211_STYPE_BACK_REQ 0x0080 +#define IEEE80211_STYPE_BACK 0x0090 +#define IEEE80211_STYPE_PSPOLL 0x00A0 +#define IEEE80211_STYPE_RTS 0x00B0 +#define IEEE80211_STYPE_CTS 0x00C0 +#define IEEE80211_STYPE_ACK 0x00D0 +#define IEEE80211_STYPE_CFEND 0x00E0 +#define IEEE80211_STYPE_CFENDACK 0x00F0 + +/* data */ +#define IEEE80211_STYPE_DATA 0x0000 +#define IEEE80211_STYPE_DATA_CFACK 0x0010 +#define IEEE80211_STYPE_DATA_CFPOLL 0x0020 +#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030 +#define IEEE80211_STYPE_NULLFUNC 0x0040 +#define IEEE80211_STYPE_CFACK 0x0050 +#define IEEE80211_STYPE_CFPOLL 0x0060 +#define IEEE80211_STYPE_CFACKPOLL 0x0070 +#define IEEE80211_STYPE_QOS_DATA 0x0080 +#define IEEE80211_STYPE_QOS_DATA_CFACK 0x0090 +#define IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0 +#define IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0 +#define IEEE80211_STYPE_QOS_NULLFUNC 0x00C0 +#define IEEE80211_STYPE_QOS_CFACK 0x00D0 +#define IEEE80211_STYPE_QOS_CFPOLL 0x00E0 +#define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0 + + +/* miscellaneous IEEE 802.11 constants */ +#define IEEE80211_MAX_FRAG_THRESHOLD 2352 +#define IEEE80211_MAX_RTS_THRESHOLD 2353 +#define IEEE80211_MAX_AID 2007 +#define IEEE80211_MAX_TIM_LEN 251 +/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section + 6.2.1.1.2. + + 802.11e clarifies the figure in section 7.1.2. The frame body is + up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */ +#define IEEE80211_MAX_DATA_LEN 2304 +/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ +#define IEEE80211_MAX_FRAME_LEN 2352 + +#define IEEE80211_MAX_SSID_LEN 32 + +#define IEEE80211_MAX_MESH_ID_LEN 32 + +#define IEEE80211_QOS_CTL_LEN 2 +/* 1d tag mask */ +#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 +/* TID mask */ +#define IEEE80211_QOS_CTL_TID_MASK 0x000f +/* EOSP */ +#define IEEE80211_QOS_CTL_EOSP 0x0010 +/* ACK policy */ +#define IEEE80211_QOS_CTL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 +#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 +#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 +#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060 +/* A-MSDU 802.11n */ +#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 +/* Mesh Control 802.11s */ +#define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100 + +/* U-APSD queue for WMM IEs sent by AP */ +#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7) +#define IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK 0x0f + +/* U-APSD queues for WMM IEs sent by STA */ +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f + +/* U-APSD max SP length for WMM IEs sent by STA */ +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5 + +#define IEEE80211_HT_CTL_LEN 4 + +struct ieee80211_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + u8 addr4[6]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_3addr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; +} __attribute__ ((packed)); + +struct ieee80211_qos_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + __le16 qos_ctrl; +} __attribute__ ((packed)); + +/** + * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_tods(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; +} + +/** + * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_fromds(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; +} + +/** + * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_a4(__le16 fc) +{ + __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); + return (fc & tmp) == tmp; +} + +/** + * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_morefrags(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; +} + +/** + * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_retry(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; +} + +/** + * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_pm(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; +} + +/** + * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_moredata(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; +} + +/** + * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_protected(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; +} + +/** + * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_order(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; +} + +/** + * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_mgmt(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT); +} + +/** + * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_ctl(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL); +} + +/** + * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data_qos(__le16 fc) +{ + /* + * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need + * to check the one bit + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA); +} + +/** + * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data_present(__le16 fc) +{ + /* + * mask with 0x40 and test that that bit is clear to only return true + * for the data-containing substypes. + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_assoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); +} + +/** + * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_assoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); +} + +/** + * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_reassoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); +} + +/** + * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_reassoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); +} + +/** + * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_probe_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); +} + +/** + * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_probe_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); +} + +/** + * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_beacon(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); +} + +/** + * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_atim(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); +} + +/** + * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_disassoc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); +} + +/** + * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_auth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); +} + +/** + * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_deauth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); +} + +/** + * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_action(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); +} + +/** + * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_back_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); +} + +/** + * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_back(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); +} + +/** + * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_pspoll(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); +} + +/** + * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_rts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); +} + +/** + * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); +} + +/** + * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_ack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); +} + +/** + * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cfend(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); +} + +/** + * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cfendack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); +} + +/** + * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); +} + +/** + * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_qos_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); +} + +/** + * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set + * @seq_ctrl: frame sequence control bytes in little-endian byteorder + */ +static inline int ieee80211_is_first_frag(__le16 seq_ctrl) +{ + return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; +} + +struct ieee80211s_hdr { + u8 flags; + u8 ttl; + __le32 seqnum; + u8 eaddr1[6]; + u8 eaddr2[6]; +} __attribute__ ((packed)); + +/* Mesh flags */ +#define MESH_FLAGS_AE_A4 0x1 +#define MESH_FLAGS_AE_A5_A6 0x2 +#define MESH_FLAGS_AE 0x3 +#define MESH_FLAGS_PS_DEEP 0x4 + +/** + * struct ieee80211_quiet_ie + * + * This structure refers to "Quiet information element" + */ +struct ieee80211_quiet_ie { + u8 count; + u8 period; + __le16 duration; + __le16 offset; +} __attribute__ ((packed)); + +/** + * struct ieee80211_msrment_ie + * + * This structure refers to "Measurement Request/Report information element" + */ +struct ieee80211_msrment_ie { + u8 token; + u8 mode; + u8 type; + u8 request[0]; +} __attribute__ ((packed)); + +/** + * struct ieee80211_channel_sw_ie + * + * This structure refers to "Channel Switch Announcement information element" + */ +struct ieee80211_channel_sw_ie { + u8 mode; + u8 new_ch_num; + u8 count; +} __attribute__ ((packed)); + +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[1]; +} __attribute__ ((packed)); + +/** + * struct ieee80211_meshconf_ie + * + * This structure refers to "Mesh Configuration information element" + */ +struct ieee80211_meshconf_ie { + u8 meshconf_psel; + u8 meshconf_pmetric; + u8 meshconf_congest; + u8 meshconf_synch; + u8 meshconf_auth; + u8 meshconf_form; + u8 meshconf_cap; +} __attribute__ ((packed)); + +/** + * struct ieee80211_rann_ie + * + * This structure refers to "Root Announcement information element" + */ +struct ieee80211_rann_ie { + u8 rann_flags; + u8 rann_hopcount; + u8 rann_ttl; + u8 rann_addr[6]; + u32 rann_seq; + u32 rann_interval; + u32 rann_metric; +} __attribute__ ((packed)); + +enum ieee80211_rann_flags { + RANN_FLAG_IS_GATE = 1 << 0, +}; + +#define WLAN_SA_QUERY_TR_ID_LEN 2 + +struct ieee80211_mgmt { + __le16 frame_control; + __le16 duration; + u8 da[6]; + u8 sa[6]; + u8 bssid[6]; + __le16 seq_ctrl; + union { + struct { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[0]; + } __attribute__ ((packed)) auth; + struct { + __le16 reason_code; + } __attribute__ ((packed)) deauth; + struct { + __le16 capab_info; + __le16 listen_interval; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) assoc_req; + struct { + __le16 capab_info; + __le16 status_code; + __le16 aid; + /* followed by Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) assoc_resp, reassoc_resp; + struct { + __le16 capab_info; + __le16 listen_interval; + u8 current_ap[6]; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) reassoc_req; + struct { + __le16 reason_code; + } __attribute__ ((packed)) disassoc; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params, TIM */ + u8 variable[0]; + } __attribute__ ((packed)) beacon; + struct { + /* only variable items: SSID, Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) probe_req; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params */ + u8 variable[0]; + } __attribute__ ((packed)) probe_resp; + struct { + u8 category; + union { + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } __attribute__ ((packed)) wme_action; + struct{ + u8 action_code; + u8 element_id; + u8 length; + struct ieee80211_channel_sw_ie sw_elem; + } __attribute__((packed)) chan_switch; + struct{ + u8 action_code; + u8 dialog_token; + u8 element_id; + u8 length; + struct ieee80211_msrment_ie msr_elem; + } __attribute__((packed)) measurement; + struct{ + u8 action_code; + u8 dialog_token; + __le16 capab; + __le16 timeout; + __le16 start_seq_num; + } __attribute__((packed)) addba_req; + struct{ + u8 action_code; + u8 dialog_token; + __le16 status; + __le16 capab; + __le16 timeout; + } __attribute__((packed)) addba_resp; + struct{ + u8 action_code; + __le16 params; + __le16 reason_code; + } __attribute__((packed)) delba; + struct { + u8 action_code; + u8 variable[0]; + } __attribute__((packed)) self_prot; + struct{ + u8 action_code; + u8 variable[0]; + } __attribute__((packed)) mesh_action; + struct { + u8 action; + u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN]; + } __attribute__ ((packed)) sa_query; + struct { + u8 action; + u8 smps_control; + } __attribute__ ((packed)) ht_smps; + struct { + u8 action_code; + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __packed tdls_discover_resp; + } u; + } __attribute__ ((packed)) action; + } u; +} __attribute__ ((packed)); + +/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ +#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 + +/* mgmt header + 1 byte category code */ +#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) + + +/* Management MIC information element (IEEE 802.11w) */ +struct ieee80211_mmie { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[8]; +} __attribute__ ((packed)); + +struct ieee80211_vendor_ie { + u8 element_id; + u8 len; + u8 oui[3]; + u8 oui_type; +} __packed; + +/* Control frames */ +struct ieee80211_rts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; + u8 ta[6]; +} __attribute__ ((packed)); + +struct ieee80211_cts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; +} __attribute__ ((packed)); + +struct ieee80211_pspoll { + __le16 frame_control; + __le16 aid; + u8 bssid[6]; + u8 ta[6]; +} __attribute__ ((packed)); + +/* TDLS */ + +/* Link-id information element */ +struct ieee80211_tdls_lnkie { + u8 ie_type; /* Link Identifier IE */ + u8 ie_len; + u8 bssid[6]; + u8 init_sta[6]; + u8 resp_sta[6]; +} __packed; + +struct ieee80211_tdls_data { + u8 da[6]; + u8 sa[6]; + __be16 ether_type; + u8 payload_type; + u8 category; + u8 action_code; + union { + struct { + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __packed setup_req; + struct { + __le16 status_code; + u8 dialog_token; + __le16 capability; + u8 variable[0]; + } __packed setup_resp; + struct { + __le16 status_code; + u8 dialog_token; + u8 variable[0]; + } __packed setup_cfm; + struct { + __le16 reason_code; + u8 variable[0]; + } __packed teardown; + struct { + u8 dialog_token; + u8 variable[0]; + } __packed discover_req; + } u; +} __packed; + +/** + * struct ieee80211_bar - HT Block Ack Request + * + * This structure refers to "HT BlockAckReq" as + * described in 802.11n draft section 7.2.1.7.1 + */ +struct ieee80211_bar { + __le16 frame_control; + __le16 duration; + __u8 ra[6]; + __u8 ta[6]; + __le16 control; + __le16 start_seq_num; +} __attribute__((packed)); + +/* 802.11 BAR control masks */ +#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002 +#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 +#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000 +#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12 + +#define IEEE80211_HT_MCS_MASK_LEN 10 + +/** + * struct ieee80211_mcs_info - MCS information + * @rx_mask: RX mask + * @rx_highest: highest supported RX rate. If set represents + * the highest supported RX data rate in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest RX data rate supported. + * @tx_params: TX parameters + */ +struct ieee80211_mcs_info { + u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +} __attribute__((packed)); + +/* 802.11n HT capability MSC set */ +#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff +#define IEEE80211_HT_MCS_TX_DEFINED 0x01 +#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02 +/* value 0 == 1 stream etc */ +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2 +#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 +#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 + +/* + * 802.11n D5.0 20.3.5 / 20.6 says: + * - indices 0 to 7 and 32 are single spatial stream + * - 8 to 31 are multiple spatial streams using equal modulation + * [8..15 for two streams, 16..23 for three and 24..31 for four] + * - remainder are multiple spatial streams using unequal modulation + */ +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33 +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \ + (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) + +/** + * struct ieee80211_ht_cap - HT capabilities + * + * This structure is the "HT capabilities element" as + * described in 802.11n D5.0 7.3.2.57 + */ +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + + /* 16 bytes MCS information */ + struct ieee80211_mcs_info mcs; + + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__ ((packed)); + +/* 802.11n HT capabilities masks (for cap_info) */ +#define IEEE80211_HT_CAP_LDPC_CODING 0x0001 +#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002 +#define IEEE80211_HT_CAP_SM_PS 0x000C +#define IEEE80211_HT_CAP_SM_PS_SHIFT 2 +#define IEEE80211_HT_CAP_GRN_FLD 0x0010 +#define IEEE80211_HT_CAP_SGI_20 0x0020 +#define IEEE80211_HT_CAP_SGI_40 0x0040 +#define IEEE80211_HT_CAP_TX_STBC 0x0080 +#define IEEE80211_HT_CAP_RX_STBC 0x0300 +#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8 +#define IEEE80211_HT_CAP_DELAY_BA 0x0400 +#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 +#define IEEE80211_HT_CAP_RESERVED 0x2000 +#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 +#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 + +/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */ +#define IEEE80211_HT_EXT_CAP_PCO 0x0001 +#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006 +#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1 +#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300 +#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8 +#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400 +#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800 + +/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */ +#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 +#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C +#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 + +/* + * Maximum length of AMPDU that the STA can receive. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_max_ampdu_length_exp { + IEEE80211_HT_MAX_AMPDU_8K = 0, + IEEE80211_HT_MAX_AMPDU_16K = 1, + IEEE80211_HT_MAX_AMPDU_32K = 2, + IEEE80211_HT_MAX_AMPDU_64K = 3 +}; + +#define IEEE80211_HT_MAX_AMPDU_FACTOR 13 + +/* Minimum MPDU start spacing */ +enum ieee80211_min_mpdu_spacing { + IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */ + IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */ + IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */ + IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */ + IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */ + IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */ + IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */ + IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */ +}; + +/** + * struct ieee80211_ht_info - HT information + * + * This structure is the "HT information element" as + * described in 802.11n D5.0 7.3.2.58 + */ +struct ieee80211_ht_info { + u8 control_chan; + u8 ht_param; + __le16 operation_mode; + __le16 stbc_param; + u8 basic_set[16]; +} __attribute__ ((packed)); + +/* for ht_param */ +#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03 +#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00 +#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01 +#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 +#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 +#define IEEE80211_HT_PARAM_RIFS_MODE 0x08 +#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10 +#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0 + +/* for operation_mode */ +#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1 +#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 +#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 +#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 + +/* for stbc_param */ +#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 +#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080 +#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100 +#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200 +#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400 +#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800 + + +/* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 +#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +/* + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) + */ +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF 0x40 + + +/* Spatial Multiplexing Power Save Modes (for capability) */ +#define WLAN_HT_CAP_SM_PS_STATIC 0 +#define WLAN_HT_CAP_SM_PS_DYNAMIC 1 +#define WLAN_HT_CAP_SM_PS_INVALID 2 +#define WLAN_HT_CAP_SM_PS_DISABLED 3 + +/* for SM power control field lower two bits */ +#define WLAN_HT_SMPS_CONTROL_DISABLED 0 +#define WLAN_HT_SMPS_CONTROL_STATIC 1 +#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3 + +/* Authentication algorithms */ +#define WLAN_AUTH_OPEN 0 +#define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_FT 2 +#define WLAN_AUTH_SAE 3 +#define WLAN_AUTH_LEAP 128 + +#define WLAN_AUTH_CHALLENGE_LEN 128 + +#define WLAN_CAPABILITY_ESS (1<<0) +#define WLAN_CAPABILITY_IBSS (1<<1) + +/* + * A mesh STA sets the ESS and IBSS capability bits to zero. + * however, this holds true for p2p probe responses (in the p2p_find + * phase) as well. + */ +#define WLAN_CAPABILITY_IS_STA_BSS(cap) \ + (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS))) + +#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) +#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) +#define WLAN_CAPABILITY_PRIVACY (1<<4) +#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) +#define WLAN_CAPABILITY_PBCC (1<<6) +#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) + +/* 802.11h */ +#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) +#define WLAN_CAPABILITY_QOS (1<<9) +#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) +#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) +/* measurement */ +#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0) +#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1) +#define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2) + +#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 + + +/* 802.11g ERP information element */ +#define WLAN_ERP_NON_ERP_PRESENT (1<<0) +#define WLAN_ERP_USE_PROTECTION (1<<1) +#define WLAN_ERP_BARKER_PREAMBLE (1<<2) + +/* WLAN_ERP_BARKER_PREAMBLE values */ +enum { + WLAN_ERP_PREAMBLE_SHORT = 0, + WLAN_ERP_PREAMBLE_LONG = 1, +}; + +/* Status codes */ +enum ieee80211_statuscode { + WLAN_STATUS_SUCCESS = 0, + WLAN_STATUS_UNSPECIFIED_FAILURE = 1, + WLAN_STATUS_CAPS_UNSUPPORTED = 10, + WLAN_STATUS_REASSOC_NO_ASSOC = 11, + WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, + WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, + WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, + WLAN_STATUS_CHALLENGE_FAIL = 15, + WLAN_STATUS_AUTH_TIMEOUT = 16, + WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, + WLAN_STATUS_ASSOC_DENIED_RATES = 18, + /* 802.11b */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, + WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, + WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, + /* 802.11h */ + WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, + WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, + WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, + /* 802.11g */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, + WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, + /* 802.11w */ + WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30, + WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, + /* 802.11i */ + WLAN_STATUS_INVALID_IE = 40, + WLAN_STATUS_INVALID_GROUP_CIPHER = 41, + WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, + WLAN_STATUS_INVALID_AKMP = 43, + WLAN_STATUS_UNSUPP_RSN_VERSION = 44, + WLAN_STATUS_INVALID_RSN_IE_CAP = 45, + WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, + /* 802.11e */ + WLAN_STATUS_UNSPECIFIED_QOS = 32, + WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33, + WLAN_STATUS_ASSOC_DENIED_LOWACK = 34, + WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35, + WLAN_STATUS_REQUEST_DECLINED = 37, + WLAN_STATUS_INVALID_QOS_PARAM = 38, + WLAN_STATUS_CHANGE_TSPEC = 39, + WLAN_STATUS_WAIT_TS_DELAY = 47, + WLAN_STATUS_NO_DIRECT_LINK = 48, + WLAN_STATUS_STA_NOT_PRESENT = 49, + WLAN_STATUS_STA_NOT_QSTA = 50, + /* 802.11s */ + WLAN_STATUS_ANTI_CLOG_REQUIRED = 76, + WLAN_STATUS_FCG_NOT_SUPP = 78, + WLAN_STATUS_STA_NO_TBTT = 78, +}; + + +/* Reason codes */ +enum ieee80211_reasoncode { + WLAN_REASON_UNSPECIFIED = 1, + WLAN_REASON_PREV_AUTH_NOT_VALID = 2, + WLAN_REASON_DEAUTH_LEAVING = 3, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, + WLAN_REASON_DISASSOC_AP_BUSY = 5, + WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, + WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, + WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, + WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, + /* 802.11h */ + WLAN_REASON_DISASSOC_BAD_POWER = 10, + WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, + /* 802.11i */ + WLAN_REASON_INVALID_IE = 13, + WLAN_REASON_MIC_FAILURE = 14, + WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, + WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, + WLAN_REASON_IE_DIFFERENT = 17, + WLAN_REASON_INVALID_GROUP_CIPHER = 18, + WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, + WLAN_REASON_INVALID_AKMP = 20, + WLAN_REASON_UNSUPP_RSN_VERSION = 21, + WLAN_REASON_INVALID_RSN_IE_CAP = 22, + WLAN_REASON_IEEE8021X_FAILED = 23, + WLAN_REASON_CIPHER_SUITE_REJECTED = 24, + /* 802.11e */ + WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, + WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, + WLAN_REASON_DISASSOC_LOW_ACK = 34, + WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35, + WLAN_REASON_QSTA_LEAVE_QBSS = 36, + WLAN_REASON_QSTA_NOT_USE = 37, + WLAN_REASON_QSTA_REQUIRE_SETUP = 38, + WLAN_REASON_QSTA_TIMEOUT = 39, + WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, + /* 802.11s */ + WLAN_REASON_MESH_PEER_CANCELED = 52, + WLAN_REASON_MESH_MAX_PEERS = 53, + WLAN_REASON_MESH_CONFIG = 54, + WLAN_REASON_MESH_CLOSE = 55, + WLAN_REASON_MESH_MAX_RETRIES = 56, + WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57, + WLAN_REASON_MESH_INVALID_GTK = 58, + WLAN_REASON_MESH_INCONSISTENT_PARAM = 59, + WLAN_REASON_MESH_INVALID_SECURITY = 60, + WLAN_REASON_MESH_PATH_ERROR = 61, + WLAN_REASON_MESH_PATH_NOFORWARD = 62, + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63, + WLAN_REASON_MAC_EXISTS_IN_MBSS = 64, + WLAN_REASON_MESH_CHAN_REGULATORY = 65, + WLAN_REASON_MESH_CHAN = 66, +}; + + +/* Information Element IDs */ +enum ieee80211_eid { + WLAN_EID_SSID = 0, + WLAN_EID_SUPP_RATES = 1, + WLAN_EID_FH_PARAMS = 2, + WLAN_EID_DS_PARAMS = 3, + WLAN_EID_CF_PARAMS = 4, + WLAN_EID_TIM = 5, + WLAN_EID_IBSS_PARAMS = 6, + WLAN_EID_CHALLENGE = 16, + + WLAN_EID_COUNTRY = 7, + WLAN_EID_HP_PARAMS = 8, + WLAN_EID_HP_TABLE = 9, + WLAN_EID_REQUEST = 10, + + WLAN_EID_QBSS_LOAD = 11, + WLAN_EID_EDCA_PARAM_SET = 12, + WLAN_EID_TSPEC = 13, + WLAN_EID_TCLAS = 14, + WLAN_EID_SCHEDULE = 15, + WLAN_EID_TS_DELAY = 43, + WLAN_EID_TCLAS_PROCESSING = 44, + WLAN_EID_QOS_CAPA = 46, + /* 802.11z */ + WLAN_EID_LINK_ID = 101, + /* 802.11s */ + WLAN_EID_MESH_CONFIG = 113, + WLAN_EID_MESH_ID = 114, + WLAN_EID_LINK_METRIC_REPORT = 115, + WLAN_EID_CONGESTION_NOTIFICATION = 116, + WLAN_EID_PEER_MGMT = 117, + WLAN_EID_CHAN_SWITCH_PARAM = 118, + WLAN_EID_MESH_AWAKE_WINDOW = 119, + WLAN_EID_BEACON_TIMING = 120, + WLAN_EID_MCCAOP_SETUP_REQ = 121, + WLAN_EID_MCCAOP_SETUP_RESP = 122, + WLAN_EID_MCCAOP_ADVERT = 123, + WLAN_EID_MCCAOP_TEARDOWN = 124, + WLAN_EID_GANN = 125, + WLAN_EID_RANN = 126, + WLAN_EID_PREQ = 130, + WLAN_EID_PREP = 131, + WLAN_EID_PERR = 132, + WLAN_EID_PXU = 137, + WLAN_EID_PXUC = 138, + WLAN_EID_AUTH_MESH_PEER_EXCH = 139, + WLAN_EID_MIC = 140, + + WLAN_EID_PWR_CONSTRAINT = 32, + WLAN_EID_PWR_CAPABILITY = 33, + WLAN_EID_TPC_REQUEST = 34, + WLAN_EID_TPC_REPORT = 35, + WLAN_EID_SUPPORTED_CHANNELS = 36, + WLAN_EID_CHANNEL_SWITCH = 37, + WLAN_EID_MEASURE_REQUEST = 38, + WLAN_EID_MEASURE_REPORT = 39, + WLAN_EID_QUIET = 40, + WLAN_EID_IBSS_DFS = 41, + + WLAN_EID_ERP_INFO = 42, + WLAN_EID_EXT_SUPP_RATES = 50, + + WLAN_EID_HT_CAPABILITY = 45, + WLAN_EID_HT_INFORMATION = 61, + + WLAN_EID_RSN = 48, + WLAN_EID_MMIE = 76, + WLAN_EID_WPA = 221, + WLAN_EID_GENERIC = 221, + WLAN_EID_VENDOR_SPECIFIC = 221, + WLAN_EID_QOS_PARAMETER = 222, + + WLAN_EID_AP_CHAN_REPORT = 51, + WLAN_EID_NEIGHBOR_REPORT = 52, + WLAN_EID_RCPI = 53, + WLAN_EID_BSS_AVG_ACCESS_DELAY = 63, + WLAN_EID_ANTENNA_INFO = 64, + WLAN_EID_RSNI = 65, + WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66, + WLAN_EID_BSS_AVAILABLE_CAPACITY = 67, + WLAN_EID_BSS_AC_ACCESS_DELAY = 68, + WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, + WLAN_EID_MULTIPLE_BSSID = 71, + WLAN_EID_BSS_COEX_2040 = 72, + WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, + WLAN_EID_EXT_CAPABILITY = 127, + + WLAN_EID_MOBILITY_DOMAIN = 54, + WLAN_EID_FAST_BSS_TRANSITION = 55, + WLAN_EID_TIMEOUT_INTERVAL = 56, + WLAN_EID_RIC_DATA = 57, + WLAN_EID_RIC_DESCRIPTOR = 75, + + WLAN_EID_DSE_REGISTERED_LOCATION = 58, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, + WLAN_EID_EXT_CHANSWITCH_ANN = 60, +}; + +/* Action category code */ +enum ieee80211_category { + WLAN_CATEGORY_SPECTRUM_MGMT = 0, + WLAN_CATEGORY_QOS = 1, + WLAN_CATEGORY_DLS = 2, + WLAN_CATEGORY_BACK = 3, + WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_HT = 7, + WLAN_CATEGORY_SA_QUERY = 8, + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_TDLS = 12, + WLAN_CATEGORY_MESH_ACTION = 13, + WLAN_CATEGORY_MULTIHOP_ACTION = 14, + WLAN_CATEGORY_SELF_PROTECTED = 15, + WLAN_CATEGORY_WMM = 17, + WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + WLAN_CATEGORY_VENDOR_SPECIFIC = 127, +}; + +/* SPECTRUM_MGMT action code */ +enum ieee80211_spectrum_mgmt_actioncode { + WLAN_ACTION_SPCT_MSR_REQ = 0, + WLAN_ACTION_SPCT_MSR_RPRT = 1, + WLAN_ACTION_SPCT_TPC_REQ = 2, + WLAN_ACTION_SPCT_TPC_RPRT = 3, + WLAN_ACTION_SPCT_CHL_SWITCH = 4, +}; + +/* HT action codes */ +enum ieee80211_ht_actioncode { + WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0, + WLAN_HT_ACTION_SMPS = 1, + WLAN_HT_ACTION_PSMP = 2, + WLAN_HT_ACTION_PCO_PHASE = 3, + WLAN_HT_ACTION_CSI = 4, + WLAN_HT_ACTION_NONCOMPRESSED_BF = 5, + WLAN_HT_ACTION_COMPRESSED_BF = 6, + WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7, +}; + +/* Self Protected Action codes */ +enum ieee80211_self_protected_actioncode { + WLAN_SP_RESERVED = 0, + WLAN_SP_MESH_PEERING_OPEN = 1, + WLAN_SP_MESH_PEERING_CONFIRM = 2, + WLAN_SP_MESH_PEERING_CLOSE = 3, + WLAN_SP_MGK_INFORM = 4, + WLAN_SP_MGK_ACK = 5, +}; + +/* Mesh action codes */ +enum ieee80211_mesh_actioncode { + WLAN_MESH_ACTION_LINK_METRIC_REPORT, + WLAN_MESH_ACTION_HWMP_PATH_SELECTION, + WLAN_MESH_ACTION_GATE_ANNOUNCEMENT, + WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION, + WLAN_MESH_ACTION_MCCA_SETUP_REQUEST, + WLAN_MESH_ACTION_MCCA_SETUP_REPLY, + WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST, + WLAN_MESH_ACTION_MCCA_ADVERTISEMENT, + WLAN_MESH_ACTION_MCCA_TEARDOWN, + WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST, + WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE, +}; + +/* Security key length */ +enum ieee80211_key_len { + WLAN_KEY_LEN_WEP40 = 5, + WLAN_KEY_LEN_WEP104 = 13, + WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_TKIP = 32, + WLAN_KEY_LEN_AES_CMAC = 16, +}; + +/* Public action codes */ +enum ieee80211_pub_actioncode { + WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14, +}; + +/* TDLS action codes */ +enum ieee80211_tdls_actioncode { + WLAN_TDLS_SETUP_REQUEST = 0, + WLAN_TDLS_SETUP_RESPONSE = 1, + WLAN_TDLS_SETUP_CONFIRM = 2, + WLAN_TDLS_TEARDOWN = 3, + WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4, + WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5, + WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6, + WLAN_TDLS_PEER_PSM_REQUEST = 7, + WLAN_TDLS_PEER_PSM_RESPONSE = 8, + WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9, + WLAN_TDLS_DISCOVERY_REQUEST = 10, +}; + +/* + * TDLS capabililites to be enabled in the 5th byte of the + * @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) +#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) + +/* TDLS specific payload type in the LLC/SNAP header */ +#define WLAN_TDLS_SNAP_RFTYPE 0x2 + +/** + * enum - mesh path selection protocol identifier + * + * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol + * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will + * be specified in a vendor specific information element + */ +enum { + IEEE80211_PATH_PROTOCOL_HWMP = 0, + IEEE80211_PATH_PROTOCOL_VENDOR = 255, +}; + +/** + * enum - mesh path selection metric identifier + * + * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric + * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be + * specified in a vendor specific information element + */ +enum { + IEEE80211_PATH_METRIC_AIRTIME = 0, + IEEE80211_PATH_METRIC_VENDOR = 255, +}; + + +/* + * IEEE 802.11-2007 7.3.2.9 Country information element + * + * Minimum length is 8 octets, ie len must be evenly + * divisible by 2 + */ + +/* Although the spec says 8 I'm seeing 6 in practice */ +#define IEEE80211_COUNTRY_IE_MIN_LEN 6 + +/* The Country String field of the element shall be 3 octets in length */ +#define IEEE80211_COUNTRY_STRING_LEN 3 + +/* + * For regulatory extension stuff see IEEE 802.11-2007 + * Annex I (page 1141) and Annex J (page 1147). Also + * review 7.3.2.9. + * + * When dot11RegulatoryClassesRequired is true and the + * first_channel/reg_extension_id is >= 201 then the IE + * compromises of the 'ext' struct represented below: + * + * - Regulatory extension ID - when generating IE this just needs + * to be monotonically increasing for each triplet passed in + * the IE + * - Regulatory class - index into set of rules + * - Coverage class - index into air propagation time (Table 7-27), + * in microseconds, you can compute the air propagation time from + * the index by multiplying by 3, so index 10 yields a propagation + * of 10 us. Valid values are 0-31, values 32-255 are not defined + * yet. A value of 0 inicates air propagation of <= 1 us. + * + * See also Table I.2 for Emission limit sets and table + * I.3 for Behavior limit sets. Table J.1 indicates how to map + * a reg_class to an emission limit set and behavior limit set. + */ +#define IEEE80211_COUNTRY_EXTENSION_ID 201 + +/* + * Channels numbers in the IE must be monotonically increasing + * if dot11RegulatoryClassesRequired is not true. + * + * If dot11RegulatoryClassesRequired is true consecutive + * subband triplets following a regulatory triplet shall + * have monotonically increasing first_channel number fields. + * + * Channel numbers shall not overlap. + * + * Note that max_power is signed. + */ +struct ieee80211_country_ie_triplet { + union { + struct { + u8 first_channel; + u8 num_channels; + s8 max_power; + } __attribute__ ((packed)) chans; + struct { + u8 reg_extension_id; + u8 reg_class; + u8 coverage_class; + } __attribute__ ((packed)) ext; + }; +} __attribute__ ((packed)); + +enum ieee80211_timeout_interval_type { + WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */, + WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */, + WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */, +}; + +/* BACK action code */ +enum ieee80211_back_actioncode { + WLAN_ACTION_ADDBA_REQ = 0, + WLAN_ACTION_ADDBA_RESP = 1, + WLAN_ACTION_DELBA = 2, +}; + +/* BACK (block-ack) parties */ +enum ieee80211_back_parties { + WLAN_BACK_RECIPIENT = 0, + WLAN_BACK_INITIATOR = 1, +}; + +/* SA Query action */ +enum ieee80211_sa_query_action { + WLAN_ACTION_SA_QUERY_REQUEST = 0, + WLAN_ACTION_SA_QUERY_RESPONSE = 1, +}; + + +/* cipher suite selectors */ +#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 +#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 +#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 +/* reserved: 0x000FAC03 */ +#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 +#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 +#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 + +#define WLAN_CIPHER_SUITE_SMS4 0x00147201 + +/* AKM suite selectors */ +#define WLAN_AKM_SUITE_8021X 0x000FAC01 +#define WLAN_AKM_SUITE_PSK 0x000FAC02 +#define WLAN_AKM_SUITE_SAE 0x000FAC08 +#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 + +#define WLAN_MAX_KEY_LEN 32 + +#define WLAN_PMKID_LEN 16 + +#define WLAN_OUI_WFA 0x506f9a +#define WLAN_OUI_TYPE_WFA_P2P 9 + +/* + * WMM/802.11e Tspec Element + */ +#define IEEE80211_WMM_IE_TSPEC_TID_MASK 0x0F +#define IEEE80211_WMM_IE_TSPEC_TID_SHIFT 1 + +enum ieee80211_tspec_status_code { + IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED = 0, + IEEE80211_TSPEC_STATUS_ADDTS_INVAL_PARAMS = 0x1, +}; + +struct ieee80211_tspec_ie { + u8 element_id; + u8 len; + u8 oui[3]; + u8 oui_type; + u8 oui_subtype; + u8 version; + __le16 tsinfo; + u8 tsinfo_resvd; + __le16 nominal_msdu; + __le16 max_msdu; + __le32 min_service_int; + __le32 max_service_int; + __le32 inactivity_int; + __le32 suspension_int; + __le32 service_start_time; + __le32 min_data_rate; + __le32 mean_data_rate; + __le32 peak_data_rate; + __le32 max_burst_size; + __le32 delay_bound; + __le32 min_phy_rate; + __le16 sba; + __le16 medium_time; +} __packed; + +/** + * ieee80211_get_qos_ctl - get pointer to qos control bytes + * @hdr: the frame + * + * The qos ctrl bytes come after the frame_control, duration, seq_num + * and 3 or 4 addresses of length ETH_ALEN. + * 3 addr: 2 + 2 + 2 + 3*6 = 24 + * 4 addr: 2 + 2 + 2 + 4*6 = 30 + */ +static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return (u8 *)hdr + 30; + else + return (u8 *)hdr + 24; +} + +/** + * ieee80211_get_SA - get pointer to SA + * @hdr: the frame + * + * Given an 802.11 frame, this function returns the offset + * to the source address (SA). It does not verify that the + * header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + */ +static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return hdr->addr4; + if (ieee80211_has_fromds(hdr->frame_control)) + return hdr->addr3; + return hdr->addr2; +} + +/** + * ieee80211_get_DA - get pointer to DA + * @hdr: the frame + * + * Given an 802.11 frame, this function returns the offset + * to the destination address (DA). It does not verify that + * the header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + */ +static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_tods(hdr->frame_control)) + return hdr->addr3; + else + return hdr->addr1; +} + +/** + * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame + * @hdr: the frame (buffer must include at least the first octet of payload) + */ +static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) +{ + if (ieee80211_is_disassoc(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control)) + return true; + + if (ieee80211_is_action(hdr->frame_control)) { + u8 *category; + + /* + * Action frames, excluding Public Action frames, are Robust + * Management Frames. However, if we are looking at a Protected + * frame, skip the check since the data may be encrypted and + * the frame has already been found to be a Robust Management + * Frame (by the other end). + */ + if (ieee80211_has_protected(hdr->frame_control)) + return true; + category = ((u8 *) hdr) + 24; + return *category != WLAN_CATEGORY_PUBLIC && + *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_SELF_PROTECTED && + *category != WLAN_CATEGORY_VENDOR_SPECIFIC; + } + + return false; +} + +/** + * ieee80211_is_public_action - check if frame is a public action frame + * @hdr: the frame + * @len: length of the frame + */ +static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, + size_t len) +{ + struct ieee80211_mgmt *mgmt = (void *)hdr; + + if (len < IEEE80211_MIN_ACTION_SIZE) + return false; + if (!ieee80211_is_action(hdr->frame_control)) + return false; + return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC; +} + +/** + * ieee80211_fhss_chan_to_freq - get channel frequency + * @channel: the FHSS channel + * + * Convert IEEE802.11 FHSS channel to frequency (MHz) + * Ref IEEE 802.11-2007 section 14.6 + */ +static inline int ieee80211_fhss_chan_to_freq(int channel) +{ + if ((channel > 1) && (channel < 96)) + return channel + 2400; + else + return -1; +} + +/** + * ieee80211_freq_to_fhss_chan - get channel + * @freq: the channels frequency + * + * Convert frequency (MHz) to IEEE802.11 FHSS channel + * Ref IEEE 802.11-2007 section 14.6 + */ +static inline int ieee80211_freq_to_fhss_chan(int freq) +{ + if ((freq > 2401) && (freq < 2496)) + return freq - 2400; + else + return -1; +} + +/** + * ieee80211_dsss_chan_to_freq - get channel center frequency + * @channel: the DSSS channel + * + * Convert IEEE802.11 DSSS channel to the center frequency (MHz). + * Ref IEEE 802.11-2007 section 15.6 + */ +static inline int ieee80211_dsss_chan_to_freq(int channel) +{ + if ((channel > 0) && (channel < 14)) + return 2407 + (channel * 5); + else if (channel == 14) + return 2484; + else + return -1; +} + +/** + * ieee80211_freq_to_dsss_chan - get channel + * @freq: the frequency + * + * Convert frequency (MHz) to IEEE802.11 DSSS channel + * Ref IEEE 802.11-2007 section 15.6 + * + * This routine selects the channel with the closest center frequency. + */ +static inline int ieee80211_freq_to_dsss_chan(int freq) +{ + if ((freq >= 2410) && (freq < 2475)) + return (freq - 2405) / 5; + else if ((freq >= 2482) && (freq < 2487)) + return 14; + else + return -1; +} + +/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back + * Ref IEEE 802.11-2007 section 18.4.6.2 + * + * The channels and frequencies are the same as those defined for DSSS + */ +#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan) +#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq) + +/* Convert IEEE802.11 ERP channel to frequency (MHz) and back + * Ref IEEE 802.11-2007 section 19.4.2 + */ +#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan) +#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq) + +/** + * ieee80211_ofdm_chan_to_freq - get channel center frequency + * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz + * @channel: the OFDM channel + * + * Convert IEEE802.11 OFDM channel to center frequency (MHz) + * Ref IEEE 802.11-2007 section 17.3.8.3.2 + */ +static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel) +{ + if ((channel > 0) && (channel <= 200) && + (s_freq >= 4000)) + return s_freq + (channel * 5); + else + return -1; +} + +/** + * ieee80211_freq_to_ofdm_channel - get channel + * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz + * @freq: the frequency + * + * Convert frequency (MHz) to IEEE802.11 OFDM channel + * Ref IEEE 802.11-2007 section 17.3.8.3.2 + * + * This routine selects the channel with the closest center frequency. + */ +static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq) +{ + if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) && + (s_freq >= 4000)) + return (freq + 2 - s_freq) / 5; + else + return -1; +} + +/** + * ieee80211_tu_to_usec - convert time units (TU) to microseconds + * @tu: the TUs + */ +static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) +{ + return 1024 * tu; +} + +/** + * ieee80211_check_tim - check if AID bit is set in TIM + * @tim: the TIM IE + * @tim_len: length of the TIM IE + * @aid: the AID to look for + */ +static inline bool ieee80211_check_tim(struct ieee80211_tim_ie *tim, + u8 tim_len, u16 aid) +{ + u8 mask; + u8 index, indexn1, indexn2; + + if (unlikely(!tim || tim_len < sizeof(*tim))) + return false; + + aid &= 0x3fff; + index = aid / 8; + mask = 1 << (aid & 7); + + indexn1 = tim->bitmap_ctrl & 0xfe; + indexn2 = tim_len + indexn1 - 4; + + if (index < indexn1 || index > indexn2) + return false; + + index -= indexn1; + + return !!(tim->virtual_map[index] & mask); +} + +#endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if.h b/include/linux/if.h new file mode 100644 index 00000000..f995c663 --- /dev/null +++ b/include/linux/if.h @@ -0,0 +1,233 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the INET interface module. + * + * Version: @(#)if.h 1.0.2 04/18/93 + * + * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988 + * Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_H +#define _LINUX_IF_H + +#include <linux/types.h> /* for "__kernel_caddr_t" et al */ +#include <linux/socket.h> /* for "struct sockaddr" et al */ +#include <linux/compiler.h> /* for "__user" et al */ + +#define IFNAMSIZ 16 +#define IFALIASZ 256 +#include <linux/hdlc/ioctl.h> + +/* Standard interface flags (netdevice->flags). */ +#define IFF_UP 0x1 /* interface is up */ +#define IFF_BROADCAST 0x2 /* broadcast address valid */ +#define IFF_DEBUG 0x4 /* turn on debugging */ +#define IFF_LOOPBACK 0x8 /* is a loopback net */ +#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */ +#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */ +#define IFF_RUNNING 0x40 /* interface RFC2863 OPER_UP */ +#define IFF_NOARP 0x80 /* no ARP protocol */ +#define IFF_PROMISC 0x100 /* receive all packets */ +#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/ + +#define IFF_MASTER 0x400 /* master of a load balancer */ +#define IFF_SLAVE 0x800 /* slave of a load balancer */ + +#define IFF_MULTICAST 0x1000 /* Supports multicast */ + +#define IFF_PORTSEL 0x2000 /* can set media type */ +#define IFF_AUTOMEDIA 0x4000 /* auto media select active */ +#define IFF_DYNAMIC 0x8000 /* dialup device with changing addresses*/ + +#define IFF_LOWER_UP 0x10000 /* driver signals L1 up */ +#define IFF_DORMANT 0x20000 /* driver signals dormant */ + +#define IFF_ECHO 0x40000 /* echo sent packets */ + +#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ + IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) + +/* Private (from user) interface flags (netdevice->priv_flags). */ +#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ +#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ +#define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */ +#define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */ +#define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */ +#define IFF_BONDING 0x20 /* bonding master or slave */ +#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ +#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ +#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ +#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */ +#define IFF_XMIT_DST_RELEASE 0x400 /* dev_hard_start_xmit() is allowed to + * release skb->dst + */ +#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */ +#define IFF_DISABLE_NETPOLL 0x1000 /* disable netpoll at run-time */ +#define IFF_MACVLAN_PORT 0x2000 /* device used as macvlan port */ +#define IFF_BRIDGE_PORT 0x4000 /* device used as bridge port */ +#define IFF_OVS_DATAPATH 0x8000 /* device used as Open vSwitch + * datapath port */ +#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing + * skbs on transmit */ +#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ +#define IFF_TEAM_PORT 0x40000 /* device used as team port */ +#define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */ + + +#define IF_GET_IFACE 0x0001 /* for querying only */ +#define IF_GET_PROTO 0x0002 + +/* For definitions see hdlc.h */ +#define IF_IFACE_V35 0x1000 /* V.35 serial interface */ +#define IF_IFACE_V24 0x1001 /* V.24 serial interface */ +#define IF_IFACE_X21 0x1002 /* X.21 serial interface */ +#define IF_IFACE_T1 0x1003 /* T1 telco serial interface */ +#define IF_IFACE_E1 0x1004 /* E1 telco serial interface */ +#define IF_IFACE_SYNC_SERIAL 0x1005 /* can't be set by software */ +#define IF_IFACE_X21D 0x1006 /* X.21 Dual Clocking (FarSite) */ + +/* For definitions see hdlc.h */ +#define IF_PROTO_HDLC 0x2000 /* raw HDLC protocol */ +#define IF_PROTO_PPP 0x2001 /* PPP protocol */ +#define IF_PROTO_CISCO 0x2002 /* Cisco HDLC protocol */ +#define IF_PROTO_FR 0x2003 /* Frame Relay protocol */ +#define IF_PROTO_FR_ADD_PVC 0x2004 /* Create FR PVC */ +#define IF_PROTO_FR_DEL_PVC 0x2005 /* Delete FR PVC */ +#define IF_PROTO_X25 0x2006 /* X.25 */ +#define IF_PROTO_HDLC_ETH 0x2007 /* raw HDLC, Ethernet emulation */ +#define IF_PROTO_FR_ADD_ETH_PVC 0x2008 /* Create FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_DEL_ETH_PVC 0x2009 /* Delete FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_PVC 0x200A /* for reading PVC status */ +#define IF_PROTO_FR_ETH_PVC 0x200B +#define IF_PROTO_RAW 0x200C /* RAW Socket */ + +/* RFC 2863 operational status */ +enum { + IF_OPER_UNKNOWN, + IF_OPER_NOTPRESENT, + IF_OPER_DOWN, + IF_OPER_LOWERLAYERDOWN, + IF_OPER_TESTING, + IF_OPER_DORMANT, + IF_OPER_UP, +}; + +/* link modes */ +enum { + IF_LINK_MODE_DEFAULT, + IF_LINK_MODE_DORMANT, /* limit upward transition to dormant */ +}; + +/* + * Device mapping structure. I'd just gone off and designed a + * beautiful scheme using only loadable modules with arguments + * for driver options and along come the PCMCIA people 8) + * + * Ah well. The get() side of this is good for WDSETUP, and it'll + * be handy for debugging things. The set side is fine for now and + * being very small might be worth keeping for clean configuration. + */ + +struct ifmap { + unsigned long mem_start; + unsigned long mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; + /* 3 bytes spare */ +}; + +struct if_settings { + unsigned int type; /* Type of physical device or protocol */ + unsigned int size; /* Size of the data allocated by the caller */ + union { + /* {atm/eth/dsl}_settings anyone ? */ + raw_hdlc_proto __user *raw_hdlc; + cisco_proto __user *cisco; + fr_proto __user *fr; + fr_proto_pvc __user *fr_pvc; + fr_proto_pvc_info __user *fr_pvc_info; + + /* interface settings */ + sync_serial_settings __user *sync; + te1_settings __user *te1; + } ifs_ifsu; +}; + +/* + * Interface request structure used for socket + * ioctl's. All interface ioctl's must have parameter + * definitions which begin with ifr_name. The + * remainder may be interface specific. + */ + +struct ifreq { +#define IFHWADDRLEN 6 + union + { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + } ifr_ifrn; + + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[IFNAMSIZ]; /* Just fits the size */ + char ifru_newname[IFNAMSIZ]; + void __user * ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +#define ifr_name ifr_ifrn.ifrn_name /* interface name */ +#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */ +#define ifr_addr ifr_ifru.ifru_addr /* address */ +#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */ +#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ +#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */ +#define ifr_flags ifr_ifru.ifru_flags /* flags */ +#define ifr_metric ifr_ifru.ifru_ivalue /* metric */ +#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ +#define ifr_map ifr_ifru.ifru_map /* device map */ +#define ifr_slave ifr_ifru.ifru_slave /* slave device */ +#define ifr_data ifr_ifru.ifru_data /* for use by interface */ +#define ifr_ifindex ifr_ifru.ifru_ivalue /* interface index */ +#define ifr_bandwidth ifr_ifru.ifru_ivalue /* link bandwidth */ +#define ifr_qlen ifr_ifru.ifru_ivalue /* Queue length */ +#define ifr_newname ifr_ifru.ifru_newname /* New name */ +#define ifr_settings ifr_ifru.ifru_settings /* Device/proto settings*/ + +/* + * Structure used in SIOCGIFCONF request. + * Used to retrieve interface configuration + * for machine (useful for programs which + * must know all networks accessible). + */ + +struct ifconf { + int ifc_len; /* size of buffer */ + union { + char __user *ifcu_buf; + struct ifreq __user *ifcu_req; + } ifc_ifcu; +}; +#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ +#define ifc_req ifc_ifcu.ifcu_req /* array of structures */ + +#endif /* _LINUX_IF_H */ diff --git a/include/linux/if_addr.h b/include/linux/if_addr.h new file mode 100644 index 00000000..23357ab8 --- /dev/null +++ b/include/linux/if_addr.h @@ -0,0 +1,61 @@ +#ifndef __LINUX_IF_ADDR_H +#define __LINUX_IF_ADDR_H + +#include <linux/types.h> +#include <linux/netlink.h> + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; /* The prefix length */ + __u8 ifa_flags; /* Flags */ + __u8 ifa_scope; /* Address scope */ + __u32 ifa_index; /* Link index */ +}; + +/* + * Important comment: + * IFA_ADDRESS is prefix address, rather than local interface address. + * It makes no difference for normally configured broadcast interfaces, + * but for point-to-point IFA_ADDRESS is DESTINATION address, + * local address is supplied in IFA_LOCAL attribute. + */ +enum { + IFA_UNSPEC, + IFA_ADDRESS, + IFA_LOCAL, + IFA_LABEL, + IFA_BROADCAST, + IFA_ANYCAST, + IFA_CACHEINFO, + IFA_MULTICAST, + __IFA_MAX, +}; + +#define IFA_MAX (__IFA_MAX - 1) + +/* ifa_flags */ +#define IFA_F_SECONDARY 0x01 +#define IFA_F_TEMPORARY IFA_F_SECONDARY + +#define IFA_F_NODAD 0x02 +#define IFA_F_OPTIMISTIC 0x04 +#define IFA_F_DADFAILED 0x08 +#define IFA_F_HOMEADDRESS 0x10 +#define IFA_F_DEPRECATED 0x20 +#define IFA_F_TENTATIVE 0x40 +#define IFA_F_PERMANENT 0x80 + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; /* created timestamp, hundredths of seconds */ + __u32 tstamp; /* updated timestamp, hundredths of seconds */ +}; + +/* backwards compatibility for userspace */ +#ifndef __KERNEL__ +#define IFA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifaddrmsg)))) +#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg)) +#endif + +#endif diff --git a/include/linux/if_addrlabel.h b/include/linux/if_addrlabel.h new file mode 100644 index 00000000..54580c29 --- /dev/null +++ b/include/linux/if_addrlabel.h @@ -0,0 +1,32 @@ +/* + * if_addrlabel.h - netlink interface for address labels + * + * Copyright (C)2007 USAGI/WIDE Project, All Rights Reserved. + * + * Authors: + * YOSHIFUJI Hideaki @ USAGI/WIDE <yoshfuji@linux-ipv6.org> + */ + +#ifndef __LINUX_IF_ADDRLABEL_H +#define __LINUX_IF_ADDRLABEL_H + +#include <linux/types.h> + +struct ifaddrlblmsg { + __u8 ifal_family; /* Address family */ + __u8 __ifal_reserved; /* Reserved */ + __u8 ifal_prefixlen; /* Prefix length */ + __u8 ifal_flags; /* Flags */ + __u32 ifal_index; /* Link index */ + __u32 ifal_seq; /* sequence number */ +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX +}; + +#define IFAL_MAX (__IFAL_MAX - 1) + +#endif diff --git a/include/linux/if_alg.h b/include/linux/if_alg.h new file mode 100644 index 00000000..0f9acce5 --- /dev/null +++ b/include/linux/if_alg.h @@ -0,0 +1,40 @@ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _LINUX_IF_ALG_H +#define _LINUX_IF_ALG_H + +#include <linux/types.h> + +struct sockaddr_alg { + __u16 salg_family; + __u8 salg_type[14]; + __u32 salg_feat; + __u32 salg_mask; + __u8 salg_name[64]; +}; + +struct af_alg_iv { + __u32 ivlen; + __u8 iv[0]; +}; + +/* Socket options */ +#define ALG_SET_KEY 1 +#define ALG_SET_IV 2 +#define ALG_SET_OP 3 + +/* Operations */ +#define ALG_OP_DECRYPT 0 +#define ALG_OP_ENCRYPT 1 + +#endif /* _LINUX_IF_ALG_H */ diff --git a/include/linux/if_arcnet.h b/include/linux/if_arcnet.h new file mode 100644 index 00000000..46e34bd0 --- /dev/null +++ b/include/linux/if_arcnet.h @@ -0,0 +1,132 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the ARCnet interface. + * + * Authors: David Woodhouse and Avery Pennarun + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IF_ARCNET_H +#define _LINUX_IF_ARCNET_H + +#include <linux/types.h> +#include <linux/if_ether.h> + + +/* + * These are the defined ARCnet Protocol ID's. + */ + +/* CAP mode */ +/* No macro but uses 1-8 */ + +/* RFC1201 Protocol ID's */ +#define ARC_P_IP 212 /* 0xD4 */ +#define ARC_P_IPV6 196 /* 0xC4: RFC2497 */ +#define ARC_P_ARP 213 /* 0xD5 */ +#define ARC_P_RARP 214 /* 0xD6 */ +#define ARC_P_IPX 250 /* 0xFA */ +#define ARC_P_NOVELL_EC 236 /* 0xEC */ + +/* Old RFC1051 Protocol ID's */ +#define ARC_P_IP_RFC1051 240 /* 0xF0 */ +#define ARC_P_ARP_RFC1051 241 /* 0xF1 */ + +/* MS LanMan/WfWg "NDIS" encapsulation */ +#define ARC_P_ETHER 232 /* 0xE8 */ + +/* Unsupported/indirectly supported protocols */ +#define ARC_P_DATAPOINT_BOOT 0 /* very old Datapoint equipment */ +#define ARC_P_DATAPOINT_MOUNT 1 +#define ARC_P_POWERLAN_BEACON 8 /* Probably ATA-Netbios related */ +#define ARC_P_POWERLAN_BEACON2 243 /* 0xF3 */ +#define ARC_P_LANSOFT 251 /* 0xFB - what is this? */ +#define ARC_P_ATALK 0xDD + +/* Hardware address length */ +#define ARCNET_ALEN 1 + +/* + * The RFC1201-specific components of an arcnet packet header. + */ +struct arc_rfc1201 { + __u8 proto; /* protocol ID field - varies */ + __u8 split_flag; /* for use with split packets */ + __be16 sequence; /* sequence number */ + __u8 payload[0]; /* space remaining in packet (504 bytes)*/ +}; +#define RFC1201_HDR_SIZE 4 + + +/* + * The RFC1051-specific components. + */ +struct arc_rfc1051 { + __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ + __u8 payload[0]; /* 507 bytes */ +}; +#define RFC1051_HDR_SIZE 1 + + +/* + * The ethernet-encap-specific components. We have a real ethernet header + * and some data. + */ +struct arc_eth_encap { + __u8 proto; /* Always ARC_P_ETHER */ + struct ethhdr eth; /* standard ethernet header (yuck!) */ + __u8 payload[0]; /* 493 bytes */ +}; +#define ETH_ENCAP_HDR_SIZE 14 + + +struct arc_cap { + __u8 proto; + __u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */ + union { + __u8 ack; + __u8 raw[0]; /* 507 bytes */ + } mes; +}; + +/* + * The data needed by the actual arcnet hardware. + * + * Now, in the real arcnet hardware, the third and fourth bytes are the + * 'offset' specification instead of the length, and the soft data is at + * the _end_ of the 512-byte buffer. We hide this complexity inside the + * driver. + */ +struct arc_hardware { + __u8 source, /* source ARCnet - filled in automagically */ + dest, /* destination ARCnet - 0 for broadcast */ + offset[2]; /* offset bytes (some weird semantics) */ +}; +#define ARC_HDR_SIZE 4 + +/* + * This is an ARCnet frame header, as seen by the kernel (and userspace, + * when you do a raw packet capture). + */ +struct archdr { + /* hardware requirements */ + struct arc_hardware hard; + + /* arcnet encapsulation-specific bits */ + union { + struct arc_rfc1201 rfc1201; + struct arc_rfc1051 rfc1051; + struct arc_eth_encap eth_encap; + struct arc_cap cap; + __u8 raw[0]; /* 508 bytes */ + } soft; +}; + +#endif /* _LINUX_IF_ARCNET_H */ diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h new file mode 100644 index 00000000..6d722f41 --- /dev/null +++ b/include/linux/if_arp.h @@ -0,0 +1,171 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the ARP (RFC 826) protocol. + * + * Version: @(#)if_arp.h 1.0.1 04/16/93 + * + * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988 + * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source. + * Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Florian La Roche, + * Jonathan Layes <layes@loran.com> + * Arnaldo Carvalho de Melo <acme@conectiva.com.br> ARPHRD_HWX25 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_ARP_H +#define _LINUX_IF_ARP_H + +#include <linux/netdevice.h> + +/* ARP protocol HARDWARE identifiers. */ +#define ARPHRD_NETROM 0 /* from KA9Q: NET/ROM pseudo */ +#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */ +#define ARPHRD_EETHER 2 /* Experimental Ethernet */ +#define ARPHRD_AX25 3 /* AX.25 Level 2 */ +#define ARPHRD_PRONET 4 /* PROnet token ring */ +#define ARPHRD_CHAOS 5 /* Chaosnet */ +#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */ +#define ARPHRD_ARCNET 7 /* ARCnet */ +#define ARPHRD_APPLETLK 8 /* APPLEtalk */ +#define ARPHRD_DLCI 15 /* Frame Relay DLCI */ +#define ARPHRD_ATM 19 /* ATM */ +#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */ +#define ARPHRD_IEEE1394 24 /* IEEE 1394 IPv4 - RFC 2734 */ +#define ARPHRD_EUI64 27 /* EUI-64 */ +#define ARPHRD_INFINIBAND 32 /* InfiniBand */ + +/* Dummy types for non ARP hardware */ +#define ARPHRD_SLIP 256 +#define ARPHRD_CSLIP 257 +#define ARPHRD_SLIP6 258 +#define ARPHRD_CSLIP6 259 +#define ARPHRD_RSRVD 260 /* Notional KISS type */ +#define ARPHRD_ADAPT 264 +#define ARPHRD_ROSE 270 +#define ARPHRD_X25 271 /* CCITT X.25 */ +#define ARPHRD_HWX25 272 /* Boards with X.25 in firmware */ +#define ARPHRD_CAN 280 /* Controller Area Network */ +#define ARPHRD_PPP 512 +#define ARPHRD_CISCO 513 /* Cisco HDLC */ +#define ARPHRD_HDLC ARPHRD_CISCO +#define ARPHRD_LAPB 516 /* LAPB */ +#define ARPHRD_DDCMP 517 /* Digital's DDCMP protocol */ +#define ARPHRD_RAWHDLC 518 /* Raw HDLC */ + +#define ARPHRD_TUNNEL 768 /* IPIP tunnel */ +#define ARPHRD_TUNNEL6 769 /* IP6IP6 tunnel */ +#define ARPHRD_FRAD 770 /* Frame Relay Access Device */ +#define ARPHRD_SKIP 771 /* SKIP vif */ +#define ARPHRD_LOOPBACK 772 /* Loopback device */ +#define ARPHRD_LOCALTLK 773 /* Localtalk device */ +#define ARPHRD_FDDI 774 /* Fiber Distributed Data Interface */ +#define ARPHRD_BIF 775 /* AP1000 BIF */ +#define ARPHRD_SIT 776 /* sit0 device - IPv6-in-IPv4 */ +#define ARPHRD_IPDDP 777 /* IP over DDP tunneller */ +#define ARPHRD_IPGRE 778 /* GRE over IP */ +#define ARPHRD_PIMREG 779 /* PIMSM register interface */ +#define ARPHRD_HIPPI 780 /* High Performance Parallel Interface */ +#define ARPHRD_ASH 781 /* Nexus 64Mbps Ash */ +#define ARPHRD_ECONET 782 /* Acorn Econet */ +#define ARPHRD_IRDA 783 /* Linux-IrDA */ +/* ARP works differently on different FC media .. so */ +#define ARPHRD_FCPP 784 /* Point to point fibrechannel */ +#define ARPHRD_FCAL 785 /* Fibrechannel arbitrated loop */ +#define ARPHRD_FCPL 786 /* Fibrechannel public loop */ +#define ARPHRD_FCFABRIC 787 /* Fibrechannel fabric */ + /* 787->799 reserved for fibrechannel media types */ +#define ARPHRD_IEEE802_TR 800 /* Magic type ident for TR */ +#define ARPHRD_IEEE80211 801 /* IEEE 802.11 */ +#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ +#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ +#define ARPHRD_IEEE802154 804 + +#define ARPHRD_PHONET 820 /* PhoNet media type */ +#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ +#define ARPHRD_CAIF 822 /* CAIF media type */ + +#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ +#define ARPHRD_NONE 0xFFFE /* zero header length */ + +/* ARP protocol opcodes. */ +#define ARPOP_REQUEST 1 /* ARP request */ +#define ARPOP_REPLY 2 /* ARP reply */ +#define ARPOP_RREQUEST 3 /* RARP request */ +#define ARPOP_RREPLY 4 /* RARP reply */ +#define ARPOP_InREQUEST 8 /* InARP request */ +#define ARPOP_InREPLY 9 /* InARP reply */ +#define ARPOP_NAK 10 /* (ATM)ARP NAK */ + + +/* ARP ioctl request. */ +struct arpreq { + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ + struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ + char arp_dev[16]; +}; + +struct arpreq_old { + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ + struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ +}; + +/* ARP Flag values. */ +#define ATF_COM 0x02 /* completed entry (ha valid) */ +#define ATF_PERM 0x04 /* permanent entry */ +#define ATF_PUBL 0x08 /* publish entry */ +#define ATF_USETRAILERS 0x10 /* has requested trailers */ +#define ATF_NETMASK 0x20 /* want to use a netmask (only + for proxy entries) */ +#define ATF_DONTPUB 0x40 /* don't answer this addresses */ + +/* + * This structure defines an ethernet arp header. + */ + +struct arphdr { + __be16 ar_hrd; /* format of hardware address */ + __be16 ar_pro; /* format of protocol address */ + unsigned char ar_hln; /* length of hardware address */ + unsigned char ar_pln; /* length of protocol address */ + __be16 ar_op; /* ARP opcode (command) */ + +#if 0 + /* + * Ethernet looks like this : This bit is variable sized however... + */ + unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ + unsigned char ar_sip[4]; /* sender IP address */ + unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ + unsigned char ar_tip[4]; /* target IP address */ +#endif + +}; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct arphdr *arp_hdr(const struct sk_buff *skb) +{ + return (struct arphdr *)skb_network_header(skb); +} + +static inline int arp_hdr_len(struct net_device *dev) +{ + /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ + return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; +} +#endif + +#endif /* _LINUX_IF_ARP_H */ diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h new file mode 100644 index 00000000..a17edda8 --- /dev/null +++ b/include/linux/if_bonding.h @@ -0,0 +1,128 @@ +/* + * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. + * + * + * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * BUT, I'm the one who modified it for ethernet, so: + * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov + * + * This software may be used and distributed according to the terms + * of the GNU Public License, incorporated herein by reference. + * + * 2003/03/18 - Amir Noam <amir.noam at intel dot com> + * - Added support for getting slave's speed and duplex via ethtool. + * Needed for 802.3ad and other future modes. + * + * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and + * Shmulik Hen <shmulik.hen at intel dot com> + * - Enable support of modes that need to use the unique mac address of + * each slave. + * + * 2003/03/18 - Tsippy Mendelson <tsippy.mendelson at intel dot com> and + * Amir Noam <amir.noam at intel dot com> + * - Moved driver's private data types to bonding.h + * + * 2003/03/18 - Amir Noam <amir.noam at intel dot com>, + * Tsippy Mendelson <tsippy.mendelson at intel dot com> and + * Shmulik Hen <shmulik.hen at intel dot com> + * - Added support for IEEE 802.3ad Dynamic link aggregation mode. + * + * 2003/05/01 - Amir Noam <amir.noam at intel dot com> + * - Added ABI version control to restore compatibility between + * new/old ifenslave and new/old bonding. + * + * 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com> + * - Code cleanup and style changes + * + * 2005/05/05 - Jason Gabler <jygabler at lbl dot gov> + * - added definitions for various XOR hashing policies + */ + +#ifndef _LINUX_IF_BONDING_H +#define _LINUX_IF_BONDING_H + +#include <linux/if.h> +#include <linux/types.h> +#include <linux/if_ether.h> + +/* userland - kernel ABI version (2003/05/08) */ +#define BOND_ABI_VERSION 2 + +/* + * We can remove these ioctl definitions in 2.5. People should use the + * SIOC*** versions of them instead + */ +#define BOND_ENSLAVE_OLD (SIOCDEVPRIVATE) +#define BOND_RELEASE_OLD (SIOCDEVPRIVATE + 1) +#define BOND_SETHWADDR_OLD (SIOCDEVPRIVATE + 2) +#define BOND_SLAVE_INFO_QUERY_OLD (SIOCDEVPRIVATE + 11) +#define BOND_INFO_QUERY_OLD (SIOCDEVPRIVATE + 12) +#define BOND_CHANGE_ACTIVE_OLD (SIOCDEVPRIVATE + 13) + +#define BOND_CHECK_MII_STATUS (SIOCGMIIPHY) + +#define BOND_MODE_ROUNDROBIN 0 +#define BOND_MODE_ACTIVEBACKUP 1 +#define BOND_MODE_XOR 2 +#define BOND_MODE_BROADCAST 3 +#define BOND_MODE_8023AD 4 +#define BOND_MODE_TLB 5 +#define BOND_MODE_ALB 6 /* TLB + RLB (receive load balancing) */ + +/* each slave's link has 4 states */ +#define BOND_LINK_UP 0 /* link is up and running */ +#define BOND_LINK_FAIL 1 /* link has just gone down */ +#define BOND_LINK_DOWN 2 /* link has been down for too long time */ +#define BOND_LINK_BACK 3 /* link is going back */ + +/* each slave has several states */ +#define BOND_STATE_ACTIVE 0 /* link is active */ +#define BOND_STATE_BACKUP 1 /* link is backup */ + +#define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ + +#define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ + +#define BOND_DEFAULT_RESEND_IGMP 1 /* Default number of IGMP membership reports */ + +/* hashing types */ +#define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ +#define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ +#define BOND_XMIT_POLICY_LAYER23 2 /* layer 2+3 (IP ^ MAC) */ + +typedef struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +} ifbond; + +typedef struct ifslave { + __s32 slave_id; /* Used as an IN param to the BOND_SLAVE_INFO_QUERY ioctl */ + char slave_name[IFNAMSIZ]; + __s8 link; + __s8 state; + __u32 link_failure_count; +} ifslave; + +struct ad_info { + __u16 aggregator_id; + __u16 ports; + __u16 actor_key; + __u16 partner_key; + __u8 partner_system[ETH_ALEN]; +}; + +#endif /* _LINUX_IF_BONDING_H */ + +/* + * Local variables: + * version-control: t + * kept-new-versions: 5 + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ + diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h new file mode 100644 index 00000000..dd3f2013 --- /dev/null +++ b/include/linux/if_bridge.h @@ -0,0 +1,111 @@ +/* + * Linux ethernet bridge + * + * Authors: + * Lennert Buytenhek <buytenh@gnu.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IF_BRIDGE_H +#define _LINUX_IF_BRIDGE_H + +#include <linux/types.h> + +#define SYSFS_BRIDGE_ATTR "bridge" +#define SYSFS_BRIDGE_FDB "brforward" +#define SYSFS_BRIDGE_PORT_SUBDIR "brif" +#define SYSFS_BRIDGE_PORT_ATTR "brport" +#define SYSFS_BRIDGE_PORT_LINK "bridge" + +#define BRCTL_VERSION 1 + +#define BRCTL_GET_VERSION 0 +#define BRCTL_GET_BRIDGES 1 +#define BRCTL_ADD_BRIDGE 2 +#define BRCTL_DEL_BRIDGE 3 +#define BRCTL_ADD_IF 4 +#define BRCTL_DEL_IF 5 +#define BRCTL_GET_BRIDGE_INFO 6 +#define BRCTL_GET_PORT_LIST 7 +#define BRCTL_SET_BRIDGE_FORWARD_DELAY 8 +#define BRCTL_SET_BRIDGE_HELLO_TIME 9 +#define BRCTL_SET_BRIDGE_MAX_AGE 10 +#define BRCTL_SET_AGEING_TIME 11 +#define BRCTL_SET_GC_INTERVAL 12 +#define BRCTL_GET_PORT_INFO 13 +#define BRCTL_SET_BRIDGE_STP_STATE 14 +#define BRCTL_SET_BRIDGE_PRIORITY 15 +#define BRCTL_SET_PORT_PRIORITY 16 +#define BRCTL_SET_PATH_COST 17 +#define BRCTL_GET_FDB_ENTRIES 18 + +#define BR_STATE_DISABLED 0 +#define BR_STATE_LISTENING 1 +#define BR_STATE_LEARNING 2 +#define BR_STATE_FORWARDING 3 +#define BR_STATE_BLOCKING 4 + +struct __bridge_info { + __u64 designated_root; + __u64 bridge_id; + __u32 root_path_cost; + __u32 max_age; + __u32 hello_time; + __u32 forward_delay; + __u32 bridge_max_age; + __u32 bridge_hello_time; + __u32 bridge_forward_delay; + __u8 topology_change; + __u8 topology_change_detected; + __u8 root_port; + __u8 stp_enabled; + __u32 ageing_time; + __u32 gc_interval; + __u32 hello_timer_value; + __u32 tcn_timer_value; + __u32 topology_change_timer_value; + __u32 gc_timer_value; +}; + +struct __port_info { + __u64 designated_root; + __u64 designated_bridge; + __u16 port_id; + __u16 designated_port; + __u32 path_cost; + __u32 designated_cost; + __u8 state; + __u8 top_change_ack; + __u8 config_pending; + __u8 unused0; + __u32 message_age_timer_value; + __u32 forward_delay_timer_value; + __u32 hold_timer_value; +}; + +struct __fdb_entry { + __u8 mac_addr[6]; + __u8 port_no; + __u8 is_local; + __u32 ageing_timer_value; + __u8 port_hi; + __u8 pad0; + __u16 unused; +}; + +#ifdef __KERNEL__ + +#include <linux/netdevice.h> + +extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); + +typedef int br_should_route_hook_t(struct sk_buff *skb); +extern br_should_route_hook_t __rcu *br_should_route_hook; + +#endif + +#endif diff --git a/include/linux/if_cablemodem.h b/include/linux/if_cablemodem.h new file mode 100644 index 00000000..9ca1007e --- /dev/null +++ b/include/linux/if_cablemodem.h @@ -0,0 +1,22 @@ +#ifndef _LINUX_CABLEMODEM_H_ +#define _LINUX_CABLEMODEM_H_ +/* + * Author: Franco Venturi <fventuri@mediaone.net> + * Copyright 1998 Franco Venturi + * + * This program is free software; you can redistribute it + * and/or modify it under the terms of the GNU General + * Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +/* some useful defines for sb1000.c e cmconfig.c - fv */ +#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */ +#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */ +#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */ +#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */ +#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */ +#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */ + +#endif diff --git a/include/linux/if_ec.h b/include/linux/if_ec.h new file mode 100644 index 00000000..d85f9f48 --- /dev/null +++ b/include/linux/if_ec.h @@ -0,0 +1,68 @@ +/* Definitions for Econet sockets. */ + +#ifndef __LINUX_IF_EC +#define __LINUX_IF_EC + +/* User visible stuff. Glibc provides its own but libc5 folk will use these */ + +struct ec_addr { + unsigned char station; /* Station number. */ + unsigned char net; /* Network number. */ +}; + +struct sockaddr_ec { + unsigned short sec_family; + unsigned char port; /* Port number. */ + unsigned char cb; /* Control/flag byte. */ + unsigned char type; /* Type of message. */ + struct ec_addr addr; + unsigned long cookie; +}; + +#define ECTYPE_PACKET_RECEIVED 0 /* Packet received */ +#define ECTYPE_TRANSMIT_STATUS 0x10 /* Transmit completed, + low nibble holds status */ + +#define ECTYPE_TRANSMIT_OK 1 +#define ECTYPE_TRANSMIT_NOT_LISTENING 2 +#define ECTYPE_TRANSMIT_NET_ERROR 3 +#define ECTYPE_TRANSMIT_NO_CLOCK 4 +#define ECTYPE_TRANSMIT_LINE_JAMMED 5 +#define ECTYPE_TRANSMIT_NOT_PRESENT 6 + +#ifdef __KERNEL__ + +#define EC_HLEN 6 + +/* This is what an Econet frame looks like on the wire. */ +struct ec_framehdr { + unsigned char dst_stn; + unsigned char dst_net; + unsigned char src_stn; + unsigned char src_net; + unsigned char cb; + unsigned char port; +}; + +struct econet_sock { + /* struct sock has to be the first member of econet_sock */ + struct sock sk; + unsigned char cb; + unsigned char port; + unsigned char station; + unsigned char net; + unsigned short num; +}; + +static inline struct econet_sock *ec_sk(const struct sock *sk) +{ + return (struct econet_sock *)sk; +} + +struct ec_device { + unsigned char station, net; /* Econet protocol address */ +}; + +#endif + +#endif diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h new file mode 100644 index 00000000..18a5d02a --- /dev/null +++ b/include/linux/if_eql.h @@ -0,0 +1,83 @@ +/* + * Equalizer Load-balancer for serial network interfaces. + * + * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * The author may be reached as simon@ncm.com, or C/O + * NCM + * Attn: Simon Janes + * 6803 Whittier Ave + * McLean VA 22101 + * Phone: 1-703-847-0040 ext 103 + */ + +#ifndef _LINUX_IF_EQL_H +#define _LINUX_IF_EQL_H + +#define EQL_DEFAULT_SLAVE_PRIORITY 28800 +#define EQL_DEFAULT_MAX_SLAVES 4 +#define EQL_DEFAULT_MTU 576 +#define EQL_DEFAULT_RESCHED_IVAL HZ + +#define EQL_ENSLAVE (SIOCDEVPRIVATE) +#define EQL_EMANCIPATE (SIOCDEVPRIVATE + 1) + +#define EQL_GETSLAVECFG (SIOCDEVPRIVATE + 2) +#define EQL_SETSLAVECFG (SIOCDEVPRIVATE + 3) + +#define EQL_GETMASTRCFG (SIOCDEVPRIVATE + 4) +#define EQL_SETMASTRCFG (SIOCDEVPRIVATE + 5) + +#ifdef __KERNEL__ + +#include <linux/timer.h> +#include <linux/spinlock.h> + +typedef struct slave { + struct list_head list; + struct net_device *dev; + long priority; + long priority_bps; + long priority_Bps; + long bytes_queued; +} slave_t; + +typedef struct slave_queue { + spinlock_t lock; + struct list_head all_slaves; + int num_slaves; + struct net_device *master_dev; +} slave_queue_t; + +typedef struct equalizer { + slave_queue_t queue; + int min_slaves; + int max_slaves; + struct timer_list timer; +} equalizer_t; + +#endif /* __KERNEL__ */ + +typedef struct master_config { + char master_name[16]; + int max_slaves; + int min_slaves; +} master_config_t; + +typedef struct slave_config { + char slave_name[16]; + long priority; +} slave_config_t; + +typedef struct slaving_request { + char slave_name[16]; + long priority; +} slaving_request_t; + + +#endif /* _LINUX_EQL_H */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h new file mode 100644 index 00000000..56d907a2 --- /dev/null +++ b/include/linux/if_ether.h @@ -0,0 +1,148 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Ethernet IEEE 802.3 interface. + * + * Version: @(#)if_ether.h 1.0.1a 02/08/94 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald Becker, <becker@super.org> + * Alan Cox, <alan@lxorguk.ukuu.org.uk> + * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IF_ETHER_H +#define _LINUX_IF_ETHER_H + +#include <linux/types.h> + +/* + * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble + * and FCS/CRC (frame check sequence). + */ + +#define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_HLEN 14 /* Total octets in header. */ +#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ +#define ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ +#define ETH_FCS_LEN 4 /* Octets in the FCS */ + +/* + * These are the defined Ethernet Protocol ID's. + */ + +#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */ +#define ETH_P_PUP 0x0200 /* Xerox PUP packet */ +#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */ +#define ETH_P_IP 0x0800 /* Internet Protocol packet */ +#define ETH_P_X25 0x0805 /* CCITT X.25 */ +#define ETH_P_ARP 0x0806 /* Address Resolution packet */ +#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_IEEEPUP 0x0a00 /* Xerox IEEE802.3 PUP packet */ +#define ETH_P_IEEEPUPAT 0x0a01 /* Xerox IEEE802.3 PUP Addr Trans packet */ +#define ETH_P_DEC 0x6000 /* DEC Assigned proto */ +#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */ +#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */ +#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */ +#define ETH_P_LAT 0x6004 /* DEC LAT */ +#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */ +#define ETH_P_CUST 0x6006 /* DEC Customer use */ +#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */ +#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */ +#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */ +#define ETH_P_ATALK 0x809B /* Appletalk DDP */ +#define ETH_P_AARP 0x80F3 /* Appletalk AARP */ +#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ +#define ETH_P_IPX 0x8137 /* IPX over DIX */ +#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ +#define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */ +#define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ +#define ETH_P_WCCP 0x883E /* Web-cache coordination protocol + * defined in draft-wilson-wrec-wccp-v2-00.txt */ +#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ +#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */ +#define ETH_P_MPLS_UC 0x8847 /* MPLS Unicast traffic */ +#define ETH_P_MPLS_MC 0x8848 /* MPLS Multicast traffic */ +#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */ +#define ETH_P_LINK_CTL 0x886c /* HPNA, wlan link local tunnel */ +#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport + * over Ethernet + */ +#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ +#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ +#define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ +#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ +#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define ETH_P_TDLS 0x890D /* TDLS */ +#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ +#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ + +/* + * Non DIX types. Won't clash for 1500 types. + */ + +#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */ +#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */ +#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */ +#define ETH_P_802_2 0x0004 /* 802.2 frames */ +#define ETH_P_SNAP 0x0005 /* Internal only */ +#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */ +#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/ +#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */ +#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */ +#define ETH_P_CAN 0x000C /* Controller Area Network */ +#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/ +#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */ +#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */ +#define ETH_P_CONTROL 0x0016 /* Card specific control frames */ +#define ETH_P_IRDA 0x0017 /* Linux-IrDA */ +#define ETH_P_ECONET 0x0018 /* Acorn Econet */ +#define ETH_P_HDLC 0x0019 /* HDLC frames */ +#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */ +#define ETH_P_DSA 0x001B /* Distributed Switch Arch. */ +#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ +#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ +#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ +#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */ + +/* + * This is an Ethernet frame header. + */ + +struct ethhdr { + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ + unsigned char h_source[ETH_ALEN]; /* source ether addr */ + __be16 h_proto; /* packet type ID field */ +} __attribute__((packed)); + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_mac_header(skb); +} + +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); + +int mac_pton(const char *s, u8 *mac); +extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); + +#endif + +#endif /* _LINUX_IF_ETHER_H */ diff --git a/include/linux/if_fc.h b/include/linux/if_fc.h new file mode 100644 index 00000000..6ed7f1bf --- /dev/null +++ b/include/linux/if_fc.h @@ -0,0 +1,51 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for Fibre Channel. + * + * Version: @(#)if_fc.h 0.0 11/20/98 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald Becker, <becker@super.org> + * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> + * Vineet Abraham, <vma@iol.unh.edu> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_FC_H +#define _LINUX_IF_FC_H + +#include <linux/types.h> + +#define FC_ALEN 6 /* Octets in one ethernet addr */ +#define FC_HLEN (sizeof(struct fch_hdr)+sizeof(struct fcllc)) +#define FC_ID_LEN 3 /* Octets in a Fibre Channel Address */ + +/* LLC and SNAP constants */ +#define EXTENDED_SAP 0xAA +#define UI_CMD 0x03 + +/* This is NOT the Fibre Channel frame header. The FC frame header is + * constructed in the driver as the Tachyon needs certain fields in + * certains positions. So, it can't be generalized here.*/ + +struct fch_hdr { + __u8 daddr[FC_ALEN]; /* destination address */ + __u8 saddr[FC_ALEN]; /* source address */ +}; + +/* This is a Fibre Channel LLC structure */ +struct fcllc { + __u8 dsap; /* destination SAP */ + __u8 ssap; /* source SAP */ + __u8 llc; /* LLC control field */ + __u8 protid[3]; /* protocol id */ + __be16 ethertype; /* ether type field */ +}; + +#endif /* _LINUX_IF_FC_H */ diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h new file mode 100644 index 00000000..e6dc11e7 --- /dev/null +++ b/include/linux/if_fddi.h @@ -0,0 +1,199 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the ANSI FDDI interface. + * + * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004 + * + * Author: Lawrence V. Stefani, <stefani@lkg.dec.com> + * + * if_fddi.h is based on previous if_ether.h and if_tr.h work by + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald Becker, <becker@super.org> + * Alan Cox, <alan@lxorguk.ukuu.org.uk> + * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk> + * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_FDDI_H +#define _LINUX_IF_FDDI_H + +#include <linux/types.h> + +/* + * Define max and min legal sizes. The frame sizes do not include + * 4 byte FCS/CRC (frame check sequence). + */ +#define FDDI_K_ALEN 6 /* Octets in one FDDI address */ +#define FDDI_K_8022_HLEN 16 /* Total octets in 802.2 header */ +#define FDDI_K_SNAP_HLEN 21 /* Total octets in 802.2 SNAP header */ +#define FDDI_K_8022_ZLEN 16 /* Min octets in 802.2 frame sans FCS */ +#define FDDI_K_SNAP_ZLEN 21 /* Min octets in 802.2 SNAP frame sans FCS */ +#define FDDI_K_8022_DLEN 4475 /* Max octets in 802.2 payload */ +#define FDDI_K_SNAP_DLEN 4470 /* Max octets in 802.2 SNAP payload */ +#define FDDI_K_LLC_ZLEN 13 /* Min octets in LLC frame sans FCS */ +#define FDDI_K_LLC_LEN 4491 /* Max octets in LLC frame sans FCS */ + +/* Define FDDI Frame Control (FC) Byte values */ +#define FDDI_FC_K_VOID 0x00 +#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80 +#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0 +#define FDDI_FC_K_SMT_MIN 0x41 +#define FDDI_FC_K_SMT_MAX 0x4F +#define FDDI_FC_K_MAC_MIN 0xC1 +#define FDDI_FC_K_MAC_MAX 0xCF +#define FDDI_FC_K_ASYNC_LLC_MIN 0x50 +#define FDDI_FC_K_ASYNC_LLC_DEF 0x54 +#define FDDI_FC_K_ASYNC_LLC_MAX 0x5F +#define FDDI_FC_K_SYNC_LLC_MIN 0xD0 +#define FDDI_FC_K_SYNC_LLC_MAX 0xD7 +#define FDDI_FC_K_IMPLEMENTOR_MIN 0x60 +#define FDDI_FC_K_IMPLEMENTOR_MAX 0x6F +#define FDDI_FC_K_RESERVED_MIN 0x70 +#define FDDI_FC_K_RESERVED_MAX 0x7F + +/* Define LLC and SNAP constants */ +#define FDDI_EXTENDED_SAP 0xAA +#define FDDI_UI_CMD 0x03 + +/* Define 802.2 Type 1 header */ +struct fddi_8022_1_hdr { + __u8 dsap; /* destination service access point */ + __u8 ssap; /* source service access point */ + __u8 ctrl; /* control byte #1 */ +} __attribute__((packed)); + +/* Define 802.2 Type 2 header */ +struct fddi_8022_2_hdr { + __u8 dsap; /* destination service access point */ + __u8 ssap; /* source service access point */ + __u8 ctrl_1; /* control byte #1 */ + __u8 ctrl_2; /* control byte #2 */ +} __attribute__((packed)); + +/* Define 802.2 SNAP header */ +#define FDDI_K_OUI_LEN 3 +struct fddi_snap_hdr { + __u8 dsap; /* always 0xAA */ + __u8 ssap; /* always 0xAA */ + __u8 ctrl; /* always 0x03 */ + __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ + __be16 ethertype; /* packet type ID field */ +} __attribute__((packed)); + +/* Define FDDI LLC frame header */ +struct fddihdr { + __u8 fc; /* frame control */ + __u8 daddr[FDDI_K_ALEN]; /* destination address */ + __u8 saddr[FDDI_K_ALEN]; /* source address */ + union + { + struct fddi_8022_1_hdr llc_8022_1; + struct fddi_8022_2_hdr llc_8022_2; + struct fddi_snap_hdr llc_snap; + } hdr; +} __attribute__((packed)); + +#ifdef __KERNEL__ +#include <linux/netdevice.h> + +/* Define FDDI statistics structure */ +struct fddi_statistics { + + /* Generic statistics. */ + + struct net_device_stats gen; + + /* Detailed FDDI statistics. Adopted from RFC 1512 */ + + __u8 smt_station_id[8]; + __u32 smt_op_version_id; + __u32 smt_hi_version_id; + __u32 smt_lo_version_id; + __u8 smt_user_data[32]; + __u32 smt_mib_version_id; + __u32 smt_mac_cts; + __u32 smt_non_master_cts; + __u32 smt_master_cts; + __u32 smt_available_paths; + __u32 smt_config_capabilities; + __u32 smt_config_policy; + __u32 smt_connection_policy; + __u32 smt_t_notify; + __u32 smt_stat_rpt_policy; + __u32 smt_trace_max_expiration; + __u32 smt_bypass_present; + __u32 smt_ecm_state; + __u32 smt_cf_state; + __u32 smt_remote_disconnect_flag; + __u32 smt_station_status; + __u32 smt_peer_wrap_flag; + __u32 smt_time_stamp; + __u32 smt_transition_time_stamp; + __u32 mac_frame_status_functions; + __u32 mac_t_max_capability; + __u32 mac_tvx_capability; + __u32 mac_available_paths; + __u32 mac_current_path; + __u8 mac_upstream_nbr[FDDI_K_ALEN]; + __u8 mac_downstream_nbr[FDDI_K_ALEN]; + __u8 mac_old_upstream_nbr[FDDI_K_ALEN]; + __u8 mac_old_downstream_nbr[FDDI_K_ALEN]; + __u32 mac_dup_address_test; + __u32 mac_requested_paths; + __u32 mac_downstream_port_type; + __u8 mac_smt_address[FDDI_K_ALEN]; + __u32 mac_t_req; + __u32 mac_t_neg; + __u32 mac_t_max; + __u32 mac_tvx_value; + __u32 mac_frame_cts; + __u32 mac_copied_cts; + __u32 mac_transmit_cts; + __u32 mac_error_cts; + __u32 mac_lost_cts; + __u32 mac_frame_error_threshold; + __u32 mac_frame_error_ratio; + __u32 mac_rmt_state; + __u32 mac_da_flag; + __u32 mac_una_da_flag; + __u32 mac_frame_error_flag; + __u32 mac_ma_unitdata_available; + __u32 mac_hardware_present; + __u32 mac_ma_unitdata_enable; + __u32 path_tvx_lower_bound; + __u32 path_t_max_lower_bound; + __u32 path_max_t_req; + __u32 path_configuration[8]; + __u32 port_my_type[2]; + __u32 port_neighbor_type[2]; + __u32 port_connection_policies[2]; + __u32 port_mac_indicated[2]; + __u32 port_current_path[2]; + __u8 port_requested_paths[3*2]; + __u32 port_mac_placement[2]; + __u32 port_available_paths[2]; + __u32 port_pmd_class[2]; + __u32 port_connection_capabilities[2]; + __u32 port_bs_flag[2]; + __u32 port_lct_fail_cts[2]; + __u32 port_ler_estimate[2]; + __u32 port_lem_reject_cts[2]; + __u32 port_lem_cts[2]; + __u32 port_ler_cutoff[2]; + __u32 port_ler_alarm[2]; + __u32 port_connect_state[2]; + __u32 port_pcm_state[2]; + __u32 port_pc_withhold[2]; + __u32 port_ler_flag[2]; + __u32 port_hardware_present[2]; +}; +#endif /* __KERNEL__ */ + +#endif /* _LINUX_IF_FDDI_H */ diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h new file mode 100644 index 00000000..191ee086 --- /dev/null +++ b/include/linux/if_frad.h @@ -0,0 +1,193 @@ +/* + * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are + * created for each DLCI associated with a FRAD. The FRAD driver + * is not truly a network device, but the lower level device + * handler. This allows other FRAD manufacturers to use the DLCI + * code, including its RFC1490 encapsulation alongside the current + * implementation for the Sangoma cards. + * + * Version: @(#)if_ifrad.h 0.15 31 Mar 96 + * + * Author: Mike McLagan <mike.mclagan@linux.org> + * + * Changes: + * 0.15 Mike McLagan changed structure defs (packed) + * re-arranged flags + * added DLCI_RET vars + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _FRAD_H_ +#define _FRAD_H_ + +#include <linux/if.h> + +/* Structures and constants associated with the DLCI device driver */ + +struct dlci_add +{ + char devname[IFNAMSIZ]; + short dlci; +}; + +#define DLCI_GET_CONF (SIOCDEVPRIVATE + 2) +#define DLCI_SET_CONF (SIOCDEVPRIVATE + 3) + +/* + * These are related to the Sangoma SDLA and should remain in order. + * Code within the SDLA module is based on the specifics of this + * structure. Change at your own peril. + */ +struct dlci_conf { + short flags; + short CIR_fwd; + short Bc_fwd; + short Be_fwd; + short CIR_bwd; + short Bc_bwd; + short Be_bwd; + +/* these are part of the status read */ + short Tc_fwd; + short Tc_bwd; + short Tf_max; + short Tb_max; + +/* add any new fields here above is a mirror of sdla_dlci_conf */ +}; + +#define DLCI_GET_SLAVE (SIOCDEVPRIVATE + 4) + +/* configuration flags for DLCI */ +#define DLCI_IGNORE_CIR_OUT 0x0001 +#define DLCI_ACCOUNT_CIR_IN 0x0002 +#define DLCI_BUFFER_IF 0x0008 + +#define DLCI_VALID_FLAGS 0x000B + +/* defines for the actual Frame Relay hardware */ +#define FRAD_GET_CONF (SIOCDEVPRIVATE) +#define FRAD_SET_CONF (SIOCDEVPRIVATE + 1) + +#define FRAD_LAST_IOCTL FRAD_SET_CONF + +/* + * Based on the setup for the Sangoma SDLA. If changes are + * necessary to this structure, a routine will need to be + * added to that module to copy fields. + */ +struct frad_conf +{ + short station; + short flags; + short kbaud; + short clocking; + short mtu; + short T391; + short T392; + short N391; + short N392; + short N393; + short CIR_fwd; + short Bc_fwd; + short Be_fwd; + short CIR_bwd; + short Bc_bwd; + short Be_bwd; + +/* Add new fields here, above is a mirror of the sdla_conf */ + +}; + +#define FRAD_STATION_CPE 0x0000 +#define FRAD_STATION_NODE 0x0001 + +#define FRAD_TX_IGNORE_CIR 0x0001 +#define FRAD_RX_ACCOUNT_CIR 0x0002 +#define FRAD_DROP_ABORTED 0x0004 +#define FRAD_BUFFERIF 0x0008 +#define FRAD_STATS 0x0010 +#define FRAD_MCI 0x0100 +#define FRAD_AUTODLCI 0x8000 +#define FRAD_VALID_FLAGS 0x811F + +#define FRAD_CLOCK_INT 0x0001 +#define FRAD_CLOCK_EXT 0x0000 + +#ifdef __KERNEL__ + +#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) + +/* these are the fields of an RFC 1490 header */ +struct frhdr +{ + unsigned char control; + + /* for IP packets, this can be the NLPID */ + unsigned char pad; + + unsigned char NLPID; + unsigned char OUI[3]; + __be16 PID; + +#define IP_NLPID pad +} __packed; + +/* see RFC 1490 for the definition of the following */ +#define FRAD_I_UI 0x03 + +#define FRAD_P_PADDING 0x00 +#define FRAD_P_Q933 0x08 +#define FRAD_P_SNAP 0x80 +#define FRAD_P_CLNP 0x81 +#define FRAD_P_IP 0xCC + +struct dlci_local +{ + struct net_device *master; + struct net_device *slave; + struct dlci_conf config; + int configured; + struct list_head list; + + /* callback function */ + void (*receive)(struct sk_buff *skb, struct net_device *); +}; + +struct frad_local +{ + struct net_device_stats stats; + + /* devices which this FRAD is slaved to */ + struct net_device *master[CONFIG_DLCI_MAX]; + short dlci[CONFIG_DLCI_MAX]; + + struct frad_conf config; + int configured; /* has this device been configured */ + int initialized; /* mem_start, port, irq set ? */ + + /* callback functions */ + int (*activate)(struct net_device *, struct net_device *); + int (*deactivate)(struct net_device *, struct net_device *); + int (*assoc)(struct net_device *, struct net_device *); + int (*deassoc)(struct net_device *, struct net_device *); + int (*dlci_conf)(struct net_device *, struct net_device *, int get); + + /* fields that are used by the Sangoma SDLA cards */ + struct timer_list timer; + int type; /* adapter type */ + int state; /* state of the S502/8 control latch */ + int buffer; /* current buffer for S508 firmware */ +}; + +#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ + +extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)); + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h new file mode 100644 index 00000000..cdc049f1 --- /dev/null +++ b/include/linux/if_hippi.h @@ -0,0 +1,153 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the HIPPI interface. + * + * Version: @(#)if_hippi.h 1.0.0 05/26/97 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald Becker, <becker@super.org> + * Alan Cox, <alan@lxorguk.ukuu.org.uk> + * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk> + * Jes Sorensen, <Jes.Sorensen@cern.ch> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IF_HIPPI_H +#define _LINUX_IF_HIPPI_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +/* + * HIPPI magic constants. + */ + +#define HIPPI_ALEN 6 /* Bytes in one HIPPI hw-addr */ +#define HIPPI_HLEN sizeof(struct hippi_hdr) +#define HIPPI_ZLEN 0 /* Min. bytes in frame without FCS */ +#define HIPPI_DATA_LEN 65280 /* Max. bytes in payload */ +#define HIPPI_FRAME_LEN (HIPPI_DATA_LEN + HIPPI_HLEN) + /* Max. bytes in frame without FCS */ + +/* + * Define LLC and SNAP constants. + */ +#define HIPPI_EXTENDED_SAP 0xAA +#define HIPPI_UI_CMD 0x03 + + +/* + * Do we need to list some sort of ID's here? + */ + +/* + * HIPPI statistics collection data. + */ + +struct hipnet_statistics { + int rx_packets; /* total packets received */ + int tx_packets; /* total packets transmitted */ + int rx_errors; /* bad packets received */ + int tx_errors; /* packet transmit problems */ + int rx_dropped; /* no space in linux buffers */ + int tx_dropped; /* no space available in linux */ + + /* detailed rx_errors: */ + int rx_length_errors; + int rx_over_errors; /* receiver ring buff overflow */ + int rx_crc_errors; /* recved pkt with crc error */ + int rx_frame_errors; /* recv'd frame alignment error */ + int rx_fifo_errors; /* recv'r fifo overrun */ + int rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ + int tx_aborted_errors; + int tx_carrier_errors; + int tx_fifo_errors; + int tx_heartbeat_errors; + int tx_window_errors; +}; + + +struct hippi_fp_hdr { +#if 0 + __u8 ulp; /* must contain 4 */ +#if defined (__BIG_ENDIAN_BITFIELD) + __u8 d1_data_present:1; /* must be 1 */ + __u8 start_d2_burst_boundary:1; /* must be zero */ + __u8 reserved:6; /* must be zero */ +#if 0 + __u16 reserved1:5; + __u16 d1_area_size:8; /* must be 3 */ + __u16 d2_offset:3; /* must be zero */ +#endif +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 reserved:6; /* must be zero */ + __u8 start_d2_burst_boundary:1; /* must be zero */ + __u8 d1_data_present:1; /* must be 1 */ +#if 0 + __u16 d2_offset:3; /* must be zero */ + __u16 d1_area_size:8; /* must be 3 */ + __u16 reserved1:5; /* must be zero */ +#endif +#else +#error "Please fix <asm/byteorder.h>" +#endif +#else + __be32 fixed; +#endif + __be32 d2_size; +} __attribute__((packed)); + +struct hippi_le_hdr { +#if defined (__BIG_ENDIAN_BITFIELD) + __u8 fc:3; + __u8 double_wide:1; + __u8 message_type:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 message_type:4; + __u8 double_wide:1; + __u8 fc:3; +#endif + __u8 dest_switch_addr[3]; +#if defined (__BIG_ENDIAN_BITFIELD) + __u8 dest_addr_type:4, + src_addr_type:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 src_addr_type:4, + dest_addr_type:4; +#endif + __u8 src_switch_addr[3]; + __u16 reserved; + __u8 daddr[HIPPI_ALEN]; + __u16 locally_administered; + __u8 saddr[HIPPI_ALEN]; +} __attribute__((packed)); + +#define HIPPI_OUI_LEN 3 +/* + * Looks like the dsap and ssap fields have been swapped by mistake in + * RFC 2067 "IP over HIPPI". + */ +struct hippi_snap_hdr { + __u8 dsap; /* always 0xAA */ + __u8 ssap; /* always 0xAA */ + __u8 ctrl; /* always 0x03 */ + __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ + __be16 ethertype; /* packet type ID field */ +} __attribute__((packed)); + +struct hippi_hdr { + struct hippi_fp_hdr fp; + struct hippi_le_hdr le; + struct hippi_snap_hdr snap; +} __attribute__((packed)); + +#endif /* _LINUX_IF_HIPPI_H */ diff --git a/include/linux/if_infiniband.h b/include/linux/if_infiniband.h new file mode 100644 index 00000000..7d958475 --- /dev/null +++ b/include/linux/if_infiniband.h @@ -0,0 +1,29 @@ +/* + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available at + * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD + * license, available in the LICENSE.TXT file accompanying this + * software. These details are also available at + * <http://www.openfabrics.org/software_license.htm>. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * $Id$ + */ + +#ifndef _LINUX_IF_INFINIBAND_H +#define _LINUX_IF_INFINIBAND_H + +#define INFINIBAND_ALEN 20 /* Octets in IPoIB HW addr */ + +#endif /* _LINUX_IF_INFINIBAND_H */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h new file mode 100644 index 00000000..4b24ff45 --- /dev/null +++ b/include/linux/if_link.h @@ -0,0 +1,394 @@ +#ifndef _LINUX_IF_LINK_H +#define _LINUX_IF_LINK_H + +#include <linux/types.h> +#include <linux/netlink.h> + +/* This struct should be in sync with struct rtnl_link_stats64 */ +struct rtnl_link_stats { + __u32 rx_packets; /* total packets received */ + __u32 tx_packets; /* total packets transmitted */ + __u32 rx_bytes; /* total bytes received */ + __u32 tx_bytes; /* total bytes transmitted */ + __u32 rx_errors; /* bad packets received */ + __u32 tx_errors; /* packet transmit problems */ + __u32 rx_dropped; /* no space in linux buffers */ + __u32 tx_dropped; /* no space available in linux */ + __u32 multicast; /* multicast packets received */ + __u32 collisions; + + /* detailed rx_errors: */ + __u32 rx_length_errors; + __u32 rx_over_errors; /* receiver ring buff overflow */ + __u32 rx_crc_errors; /* recved pkt with crc error */ + __u32 rx_frame_errors; /* recv'd frame alignment error */ + __u32 rx_fifo_errors; /* recv'r fifo overrun */ + __u32 rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + + /* for cslip etc */ + __u32 rx_compressed; + __u32 tx_compressed; +}; + +/* The main device statistics structure */ +struct rtnl_link_stats64 { + __u64 rx_packets; /* total packets received */ + __u64 tx_packets; /* total packets transmitted */ + __u64 rx_bytes; /* total bytes received */ + __u64 tx_bytes; /* total bytes transmitted */ + __u64 rx_errors; /* bad packets received */ + __u64 tx_errors; /* packet transmit problems */ + __u64 rx_dropped; /* no space in linux buffers */ + __u64 tx_dropped; /* no space available in linux */ + __u64 multicast; /* multicast packets received */ + __u64 collisions; + + /* detailed rx_errors: */ + __u64 rx_length_errors; + __u64 rx_over_errors; /* receiver ring buff overflow */ + __u64 rx_crc_errors; /* recved pkt with crc error */ + __u64 rx_frame_errors; /* recv'd frame alignment error */ + __u64 rx_fifo_errors; /* recv'r fifo overrun */ + __u64 rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + + /* for cslip etc */ + __u64 rx_compressed; + __u64 tx_compressed; +}; + +/* The struct should be in sync with struct ifmap */ +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; +}; + +/* + * IFLA_AF_SPEC + * Contains nested attributes for address family specific attributes. + * Each address family may create a attribute with the address family + * number as type and create its own attribute structure in it. + * + * Example: + * [IFLA_AF_SPEC] = { + * [AF_INET] = { + * [IFLA_INET_CONF] = ..., + * }, + * [AF_INET6] = { + * [IFLA_INET6_FLAGS] = ..., + * [IFLA_INET6_CONF] = ..., + * } + * } + */ + +enum { + IFLA_UNSPEC, + IFLA_ADDRESS, + IFLA_BROADCAST, + IFLA_IFNAME, + IFLA_MTU, + IFLA_LINK, + IFLA_QDISC, + IFLA_STATS, + IFLA_COST, +#define IFLA_COST IFLA_COST + IFLA_PRIORITY, +#define IFLA_PRIORITY IFLA_PRIORITY + IFLA_MASTER, +#define IFLA_MASTER IFLA_MASTER + IFLA_WIRELESS, /* Wireless Extension event - see wireless.h */ +#define IFLA_WIRELESS IFLA_WIRELESS + IFLA_PROTINFO, /* Protocol specific information for a link */ +#define IFLA_PROTINFO IFLA_PROTINFO + IFLA_TXQLEN, +#define IFLA_TXQLEN IFLA_TXQLEN + IFLA_MAP, +#define IFLA_MAP IFLA_MAP + IFLA_WEIGHT, +#define IFLA_WEIGHT IFLA_WEIGHT + IFLA_OPERSTATE, + IFLA_LINKMODE, + IFLA_LINKINFO, +#define IFLA_LINKINFO IFLA_LINKINFO + IFLA_NET_NS_PID, + IFLA_IFALIAS, + IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ + IFLA_VFINFO_LIST, + IFLA_STATS64, + IFLA_VF_PORTS, + IFLA_PORT_SELF, + IFLA_AF_SPEC, + IFLA_GROUP, /* Group the device belongs to */ + IFLA_NET_NS_FD, + IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ + __IFLA_MAX +}; + + +#define IFLA_MAX (__IFLA_MAX - 1) + +/* backwards compatibility for userspace */ +#ifndef __KERNEL__ +#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) +#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) +#endif + +enum { + IFLA_INET_UNSPEC, + IFLA_INET_CONF, + __IFLA_INET_MAX, +}; + +#define IFLA_INET_MAX (__IFLA_INET_MAX - 1) + +/* ifi_flags. + + IFF_* flags. + + The only change is: + IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are + more not changeable by user. They describe link media + characteristics and set by device driver. + + Comments: + - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid + - If neither of these three flags are set; + the interface is NBMA. + + - IFF_MULTICAST does not mean anything special: + multicasts can be used on all not-NBMA links. + IFF_MULTICAST means that this media uses special encapsulation + for multicast frames. Apparently, all IFF_POINTOPOINT and + IFF_BROADCAST devices are able to use multicasts too. + */ + +/* IFLA_LINK. + For usual devices it is equal ifi_index. + If it is a "virtual interface" (f.e. tunnel), ifi_link + can point to real physical interface (f.e. for bandwidth calculations), + or maybe 0, what means, that real media is unknown (usual + for IPIP tunnels, when route to endpoint is allowed to change) + */ + +/* Subtype attributes for IFLA_PROTINFO */ +enum { + IFLA_INET6_UNSPEC, + IFLA_INET6_FLAGS, /* link flags */ + IFLA_INET6_CONF, /* sysctl parameters */ + IFLA_INET6_STATS, /* statistics */ + IFLA_INET6_MCAST, /* MC things. What of them? */ + IFLA_INET6_CACHEINFO, /* time values and max reasm size */ + IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ + __IFLA_INET6_MAX +}; + +#define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1) + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; /* ipv6InterfaceTable updated timestamp */ + __u32 reachable_time; + __u32 retrans_time; +}; + +enum { + IFLA_INFO_UNSPEC, + IFLA_INFO_KIND, + IFLA_INFO_DATA, + IFLA_INFO_XSTATS, + __IFLA_INFO_MAX, +}; + +#define IFLA_INFO_MAX (__IFLA_INFO_MAX - 1) + +/* VLAN section */ + +enum { + IFLA_VLAN_UNSPEC, + IFLA_VLAN_ID, + IFLA_VLAN_FLAGS, + IFLA_VLAN_EGRESS_QOS, + IFLA_VLAN_INGRESS_QOS, + __IFLA_VLAN_MAX, +}; + +#define IFLA_VLAN_MAX (__IFLA_VLAN_MAX - 1) + +struct ifla_vlan_flags { + __u32 flags; + __u32 mask; +}; + +enum { + IFLA_VLAN_QOS_UNSPEC, + IFLA_VLAN_QOS_MAPPING, + __IFLA_VLAN_QOS_MAX +}; + +#define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1) + +struct ifla_vlan_qos_mapping { + __u32 from; + __u32 to; +}; + +/* MACVLAN section */ +enum { + IFLA_MACVLAN_UNSPEC, + IFLA_MACVLAN_MODE, + __IFLA_MACVLAN_MAX, +}; + +#define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1) + +enum macvlan_mode { + MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ + MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ + MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ + MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ +}; + +/* SR-IOV virtual function management section */ + +enum { + IFLA_VF_INFO_UNSPEC, + IFLA_VF_INFO, + __IFLA_VF_INFO_MAX, +}; + +#define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) + +enum { + IFLA_VF_UNSPEC, + IFLA_VF_MAC, /* Hardware queue specific attributes */ + IFLA_VF_VLAN, + IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ + IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ + __IFLA_VF_MAX, +}; + +#define IFLA_VF_MAX (__IFLA_VF_MAX - 1) + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; /* MAX_ADDR_LEN */ +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ + __u32 qos; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; +#ifdef __KERNEL__ + +/* We don't want this structure exposed to user space */ +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 tx_rate; + __u32 spoofchk; +}; +#endif + +/* VF ports management section + * + * Nested layout of set/get msg is: + * + * [IFLA_NUM_VF] + * [IFLA_VF_PORTS] + * [IFLA_VF_PORT] + * [IFLA_PORT_*], ... + * [IFLA_VF_PORT] + * [IFLA_PORT_*], ... + * ... + * [IFLA_PORT_SELF] + * [IFLA_PORT_*], ... + */ + +enum { + IFLA_VF_PORT_UNSPEC, + IFLA_VF_PORT, /* nest */ + __IFLA_VF_PORT_MAX, +}; + +#define IFLA_VF_PORT_MAX (__IFLA_VF_PORT_MAX - 1) + +enum { + IFLA_PORT_UNSPEC, + IFLA_PORT_VF, /* __u32 */ + IFLA_PORT_PROFILE, /* string */ + IFLA_PORT_VSI_TYPE, /* 802.1Qbg (pre-)standard VDP */ + IFLA_PORT_INSTANCE_UUID, /* binary UUID */ + IFLA_PORT_HOST_UUID, /* binary UUID */ + IFLA_PORT_REQUEST, /* __u8 */ + IFLA_PORT_RESPONSE, /* __u16, output only */ + __IFLA_PORT_MAX, +}; + +#define IFLA_PORT_MAX (__IFLA_PORT_MAX - 1) + +#define PORT_PROFILE_MAX 40 +#define PORT_UUID_MAX 16 +#define PORT_SELF_VF -1 + +enum { + PORT_REQUEST_PREASSOCIATE = 0, + PORT_REQUEST_PREASSOCIATE_RR, + PORT_REQUEST_ASSOCIATE, + PORT_REQUEST_DISASSOCIATE, +}; + +enum { + PORT_VDP_RESPONSE_SUCCESS = 0, + PORT_VDP_RESPONSE_INVALID_FORMAT, + PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES, + PORT_VDP_RESPONSE_UNUSED_VTID, + PORT_VDP_RESPONSE_VTID_VIOLATION, + PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION, + PORT_VDP_RESPONSE_OUT_OF_SYNC, + /* 0x08-0xFF reserved for future VDP use */ + PORT_PROFILE_RESPONSE_SUCCESS = 0x100, + PORT_PROFILE_RESPONSE_INPROGRESS, + PORT_PROFILE_RESPONSE_INVALID, + PORT_PROFILE_RESPONSE_BADSTATE, + PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES, + PORT_PROFILE_RESPONSE_ERROR, +}; + +struct ifla_port_vsi { + __u8 vsi_mgr_id; + __u8 vsi_type_id[3]; + __u8 vsi_type_version; + __u8 pad[3]; +}; + +#endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h new file mode 100644 index 00000000..76525760 --- /dev/null +++ b/include/linux/if_ltalk.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_LTALK_H +#define __LINUX_LTALK_H + +#define LTALK_HLEN 1 +#define LTALK_MTU 600 +#define LTALK_ALEN 1 + +#ifdef __KERNEL__ +extern struct net_device *alloc_ltalkdev(int sizeof_priv); +#endif + +#endif diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h new file mode 100644 index 00000000..d103dca5 --- /dev/null +++ b/include/linux/if_macvlan.h @@ -0,0 +1,108 @@ +#ifndef _LINUX_IF_MACVLAN_H +#define _LINUX_IF_MACVLAN_H + +#include <linux/if_link.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/netlink.h> +#include <net/netlink.h> +#include <linux/u64_stats_sync.h> + +#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE) +struct socket *macvtap_get_socket(struct file *); +#else +#include <linux/err.h> +#include <linux/errno.h> +struct file; +struct socket; +static inline struct socket *macvtap_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_MACVTAP */ + +struct macvlan_port; +struct macvtap_queue; + +/** + * struct macvlan_pcpu_stats - MACVLAN percpu stats + * @rx_packets: number of received packets + * @rx_bytes: number of received bytes + * @rx_multicast: number of received multicast packets + * @tx_packets: number of transmitted packets + * @tx_bytes: number of transmitted bytes + * @syncp: synchronization point for 64bit counters + * @rx_errors: number of rx errors + * @tx_dropped: number of tx dropped packets + */ +struct macvlan_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errors; + u32 tx_dropped; +}; + +/* + * Maximum times a macvtap device can be opened. This can be used to + * configure the number of receive queue, e.g. for multiqueue virtio. + */ +#define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16) + +struct macvlan_dev { + struct net_device *dev; + struct list_head list; + struct hlist_node hlist; + struct macvlan_port *port; + struct net_device *lowerdev; + struct macvlan_pcpu_stats __percpu *pcpu_stats; + enum macvlan_mode mode; + int (*receive)(struct sk_buff *skb); + int (*forward)(struct net_device *dev, struct sk_buff *skb); + struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; + int numvtaps; + int minor; +}; + +static inline void macvlan_count_rx(const struct macvlan_dev *vlan, + unsigned int len, bool success, + bool multicast) +{ + if (likely(success)) { + struct macvlan_pcpu_stats *pcpu_stats; + + pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += len; + if (multicast) + pcpu_stats->rx_multicast++; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(vlan->pcpu_stats->rx_errors); + } +} + +extern void macvlan_common_setup(struct net_device *dev); + +extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + int (*receive)(struct sk_buff *skb), + int (*forward)(struct net_device *dev, + struct sk_buff *skb)); + +extern void macvlan_count_rx(const struct macvlan_dev *vlan, + unsigned int len, bool success, + bool multicast); + +extern void macvlan_dellink(struct net_device *dev, struct list_head *head); + +extern int macvlan_link_register(struct rtnl_link_ops *ops); + +extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, + struct net_device *dev); + +#endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h new file mode 100644 index 00000000..f3799295 --- /dev/null +++ b/include/linux/if_packet.h @@ -0,0 +1,272 @@ +#ifndef __LINUX_IF_PACKET_H +#define __LINUX_IF_PACKET_H + +#include <linux/types.h> + +struct sockaddr_pkt { + unsigned short spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + unsigned short sll_family; + __be16 sll_protocol; + int sll_ifindex; + unsigned short sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; + +/* Packet types */ + +#define PACKET_HOST 0 /* To us */ +#define PACKET_BROADCAST 1 /* To all */ +#define PACKET_MULTICAST 2 /* To group */ +#define PACKET_OTHERHOST 3 /* To someone else */ +#define PACKET_OUTGOING 4 /* Outgoing of any type */ +/* These ones are invisible by user level */ +#define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */ +#define PACKET_FASTROUTE 6 /* Fastrouted frame */ + +/* Packet socket options */ + +#define PACKET_ADD_MEMBERSHIP 1 +#define PACKET_DROP_MEMBERSHIP 2 +#define PACKET_RECV_OUTPUT 3 +/* Value 4 is still used by obsolete turbo-packet. */ +#define PACKET_RX_RING 5 +#define PACKET_STATISTICS 6 +#define PACKET_COPY_THRESH 7 +#define PACKET_AUXDATA 8 +#define PACKET_ORIGDEV 9 +#define PACKET_VERSION 10 +#define PACKET_HDRLEN 11 +#define PACKET_RESERVE 12 +#define PACKET_TX_RING 13 +#define PACKET_LOSS 14 +#define PACKET_VNET_HDR 15 +#define PACKET_TX_TIMESTAMP 16 +#define PACKET_TIMESTAMP 17 +#define PACKET_FANOUT 18 + +#define PACKET_FANOUT_HASH 0 +#define PACKET_FANOUT_LB 1 +#define PACKET_FANOUT_CPU 2 +#define PACKET_FANOUT_FLAG_DEFRAG 0x8000 + +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_padding; +}; + +/* Rx ring - header status */ +#define TP_STATUS_KERNEL 0x0 +#define TP_STATUS_USER 0x1 +#define TP_STATUS_COPY 0x2 +#define TP_STATUS_LOSING 0x4 +#define TP_STATUS_CSUMNOTREADY 0x8 +#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */ +#define TP_STATUS_BLK_TMO 0x20 + +/* Tx ring - header status */ +#define TP_STATUS_AVAILABLE 0x0 +#define TP_STATUS_SEND_REQUEST 0x1 +#define TP_STATUS_SENDING 0x2 +#define TP_STATUS_WRONG_FORMAT 0x4 + +/* Rx ring - feature request bits */ +#define TP_FT_REQ_FILL_RXHASH 0x1 + +struct tpacket_hdr { + unsigned long tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + unsigned short tp_mac; + unsigned short tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + +#define TPACKET_ALIGNMENT 16 +#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1)) +#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll)) + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_padding; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + /* pkt_hdr variants */ + union { + struct tpacket_hdr_variant1 hv1; + }; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + + /* Number of valid bytes (including padding) + * blk_len <= tp_block_size + */ + __u32 blk_len; + + /* + * Quite a few uses of sequence number: + * 1. Make sure cache flush etc worked. + * Well, one can argue - why not use the increasing ts below? + * But look at 2. below first. + * 2. When you pass around blocks to other user space decoders, + * you can see which blk[s] is[are] outstanding etc. + * 3. Validate kernel code. + */ + __aligned_u64 seq_num; + + /* + * ts_last_pkt: + * + * Case 1. Block has 'N'(N >=1) packets and TMO'd(timed out) + * ts_last_pkt == 'time-stamp of last packet' and NOT the + * time when the timer fired and the block was closed. + * By providing the ts of the last packet we can absolutely + * guarantee that time-stamp wise, the first packet in the + * next block will never precede the last packet of the + * previous block. + * Case 2. Block has zero packets and TMO'd + * ts_last_pkt = time when the timer fired and the block + * was closed. + * Case 3. Block has 'N' packets and NO TMO. + * ts_last_pkt = time-stamp of the last pkt in the block. + * + * ts_first_pkt: + * Is always the time-stamp when the block was opened. + * Case a) ZERO packets + * No packets to deal with but atleast you know the + * time-interval of this block. + * Case b) Non-zero packets + * Use the ts of the first packet in the block. + * + */ + struct tpacket_bd_ts ts_first_pkt, ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + +#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll)) +#define TPACKET3_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket3_hdr)) + sizeof(struct sockaddr_ll)) + +enum tpacket_versions { + TPACKET_V1, + TPACKET_V2, + TPACKET_V3 +}; + +/* + Frame structure: + + - Start. Frame must be aligned to TPACKET_ALIGNMENT=16 + - struct tpacket_hdr + - pad to TPACKET_ALIGNMENT=16 + - struct sockaddr_ll + - Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16 + - Start+tp_mac: [ Optional MAC header ] + - Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16. + - Pad to align to TPACKET_ALIGNMENT=16 + */ + +struct tpacket_req { + unsigned int tp_block_size; /* Minimal size of contiguous block */ + unsigned int tp_block_nr; /* Number of blocks */ + unsigned int tp_frame_size; /* Size of frame */ + unsigned int tp_frame_nr; /* Total number of frames */ +}; + +struct tpacket_req3 { + unsigned int tp_block_size; /* Minimal size of contiguous block */ + unsigned int tp_block_nr; /* Number of blocks */ + unsigned int tp_frame_size; /* Size of frame */ + unsigned int tp_frame_nr; /* Total number of frames */ + unsigned int tp_retire_blk_tov; /* timeout in msecs */ + unsigned int tp_sizeof_priv; /* offset to private data area */ + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct packet_mreq { + int mr_ifindex; + unsigned short mr_type; + unsigned short mr_alen; + unsigned char mr_address[8]; +}; + +#define PACKET_MR_MULTICAST 0 +#define PACKET_MR_PROMISC 1 +#define PACKET_MR_ALLMULTI 2 +#define PACKET_MR_UNICAST 3 + +#endif diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h new file mode 100644 index 00000000..d70034bc --- /dev/null +++ b/include/linux/if_phonet.h @@ -0,0 +1,19 @@ +/* + * File: if_phonet.h + * + * Phonet interface kernel definitions + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + */ +#ifndef LINUX_IF_PHONET_H +#define LINUX_IF_PHONET_H + +#define PHONET_MIN_MTU 6 /* pn_length = 0 */ +#define PHONET_MAX_MTU 65541 /* pn_length = 0xffff */ +#define PHONET_DEV_MTU PHONET_MAX_MTU + +#ifdef __KERNEL__ +extern struct header_ops phonet_header_ops; +#endif + +#endif diff --git a/include/linux/if_plip.h b/include/linux/if_plip.h new file mode 100644 index 00000000..6298c7e8 --- /dev/null +++ b/include/linux/if_plip.h @@ -0,0 +1,27 @@ +/* + * NET3 PLIP tuning facilities for the new Niibe PLIP. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _LINUX_IF_PLIP_H +#define _LINUX_IF_PLIP_H + +#include <linux/sockios.h> + +#define SIOCDEVPLIP SIOCDEVPRIVATE + +struct plipconf { + unsigned short pcmd; + unsigned long nibble; + unsigned long trigger; +}; + +#define PLIP_GET_TIMEOUT 0x1 +#define PLIP_SET_TIMEOUT 0x2 + +#endif diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h new file mode 100644 index 00000000..9048fabb --- /dev/null +++ b/include/linux/if_ppp.h @@ -0,0 +1 @@ +#include <linux/ppp-ioctl.h> diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h new file mode 100644 index 00000000..23cefa11 --- /dev/null +++ b/include/linux/if_pppol2tp.h @@ -0,0 +1,82 @@ +/*************************************************************************** + * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661) + * + * This file supplies definitions required by the PPP over L2TP driver + * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c + * + * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef __LINUX_IF_PPPOL2TP_H +#define __LINUX_IF_PPPOL2TP_H + +#include <linux/types.h> + +#ifdef __KERNEL__ +#include <linux/in.h> +#endif + +/* Structure used to connect() the socket to a particular tunnel UDP + * socket. + */ +struct pppol2tp_addr { + __kernel_pid_t pid; /* pid that owns the fd. + * 0 => current */ + int fd; /* FD of UDP socket to use */ + + struct sockaddr_in addr; /* IP address and port to send to */ + + __u16 s_tunnel, s_session; /* For matching incoming packets */ + __u16 d_tunnel, d_session; /* For sending outgoing packets */ +}; + +/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 + * bits. So we need a different sockaddr structure. + */ +struct pppol2tpv3_addr { + __kernel_pid_t pid; /* pid that owns the fd. + * 0 => current */ + int fd; /* FD of UDP or IP socket to use */ + + struct sockaddr_in addr; /* IP address and port to send to */ + + __u32 s_tunnel, s_session; /* For matching incoming packets */ + __u32 d_tunnel, d_session; /* For sending outgoing packets */ +}; + +/* Socket options: + * DEBUG - bitmask of debug message categories + * SENDSEQ - 0 => don't send packets with sequence numbers + * 1 => send packets with sequence numbers + * RECVSEQ - 0 => receive packet sequence numbers are optional + * 1 => drop receive packets without sequence numbers + * LNSMODE - 0 => act as LAC. + * 1 => act as LNS. + * REORDERTO - reorder timeout (in millisecs). If 0, don't try to reorder. + */ +enum { + PPPOL2TP_SO_DEBUG = 1, + PPPOL2TP_SO_RECVSEQ = 2, + PPPOL2TP_SO_SENDSEQ = 3, + PPPOL2TP_SO_LNSMODE = 4, + PPPOL2TP_SO_REORDERTO = 5, +}; + +/* Debug message categories for the DEBUG socket option */ +enum { + PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if + * compiled in) */ + PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel + * interface */ + PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ + PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */ +}; + + + +#endif diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h new file mode 100644 index 00000000..c06bd6c8 --- /dev/null +++ b/include/linux/if_pppolac.h @@ -0,0 +1,33 @@ +/* include/linux/if_pppolac.h + * + * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661) + * + * Copyright (C) 2009 Google, Inc. + * Author: Chia-chi Yeh <chiachi@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_IF_PPPOLAC_H +#define __LINUX_IF_PPPOLAC_H + +#include <linux/socket.h> +#include <linux/types.h> + +struct sockaddr_pppolac { + sa_family_t sa_family; /* AF_PPPOX */ + unsigned int sa_protocol; /* PX_PROTO_OLAC */ + int udp_socket; + struct __attribute__((packed)) { + __u16 tunnel, session; + } local, remote; +} __attribute__((packed)); + +#endif /* __LINUX_IF_PPPOLAC_H */ diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h new file mode 100644 index 00000000..0cf34b4d --- /dev/null +++ b/include/linux/if_pppopns.h @@ -0,0 +1,32 @@ +/* include/linux/if_pppopns.h + * + * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637) + * + * Copyright (C) 2009 Google, Inc. + * Author: Chia-chi Yeh <chiachi@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_IF_PPPOPNS_H +#define __LINUX_IF_PPPOPNS_H + +#include <linux/socket.h> +#include <linux/types.h> + +struct sockaddr_pppopns { + sa_family_t sa_family; /* AF_PPPOX */ + unsigned int sa_protocol; /* PX_PROTO_OPNS */ + int tcp_socket; + __u16 local; + __u16 remote; +} __attribute__((packed)); + +#endif /* __LINUX_IF_PPPOPNS_H */ diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h new file mode 100644 index 00000000..1597b8bc --- /dev/null +++ b/include/linux/if_pppox.h @@ -0,0 +1,250 @@ +/*************************************************************************** + * Linux PPP over X - Generic PPP transport layer sockets + * Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516) + * + * This file supplies definitions required by the PPP over Ethernet driver + * (pppox.c). All version information wrt this file is located in pppox.c + * + * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef __LINUX_IF_PPPOX_H +#define __LINUX_IF_PPPOX_H + + +#include <linux/types.h> +#include <asm/byteorder.h> + +#include <linux/socket.h> +#include <linux/if_ether.h> +#ifdef __KERNEL__ +#include <linux/if.h> +#include <linux/netdevice.h> +#include <linux/ppp_channel.h> +#endif /* __KERNEL__ */ +#include <linux/if_pppol2tp.h> +#include <linux/if_pppolac.h> +#include <linux/if_pppopns.h> + +/* For user-space programs to pick up these definitions + * which they wouldn't get otherwise without defining __KERNEL__ + */ +#ifndef AF_PPPOX +#define AF_PPPOX 24 +#define PF_PPPOX AF_PPPOX +#endif /* !(AF_PPPOX) */ + +/************************************************************************ + * PPPoE addressing definition + */ +typedef __be16 sid_t; +struct pppoe_addr { + sid_t sid; /* Session identifier */ + unsigned char remote[ETH_ALEN]; /* Remote address */ + char dev[IFNAMSIZ]; /* Local device to use */ +}; + +/************************************************************************ + * PPTP addressing definition + */ +struct pptp_addr { + __be16 call_id; + struct in_addr sin_addr; +}; + +/************************************************************************ + * Protocols supported by AF_PPPOX + */ +#define PX_PROTO_OE 0 /* Currently just PPPoE */ +#define PX_PROTO_OL2TP 1 /* Now L2TP also */ +#define PX_PROTO_PPTP 2 +#define PX_PROTO_OLAC 3 +#define PX_PROTO_OPNS 4 +#define PX_MAX_PROTO 5 + +struct sockaddr_pppox { + __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ + unsigned int sa_protocol; /* protocol identifier */ + union { + struct pppoe_addr pppoe; + struct pptp_addr pptp; + } sa_addr; +} __attribute__((packed)); + +/* The use of the above union isn't viable because the size of this + * struct must stay fixed over time -- applications use sizeof(struct + * sockaddr_pppox) to fill it. We use a protocol specific sockaddr + * type instead. + */ +struct sockaddr_pppol2tp { + __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ + unsigned int sa_protocol; /* protocol identifier */ + struct pppol2tp_addr pppol2tp; +} __attribute__((packed)); + +/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 + * bits. So we need a different sockaddr structure. + */ +struct sockaddr_pppol2tpv3 { + __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ + unsigned int sa_protocol; /* protocol identifier */ + struct pppol2tpv3_addr pppol2tp; +} __attribute__((packed)); + +/********************************************************************* + * + * ioctl interface for defining forwarding of connections + * + ********************************************************************/ + +#define PPPOEIOCSFWD _IOW(0xB1 ,0, size_t) +#define PPPOEIOCDFWD _IO(0xB1 ,1) +/*#define PPPOEIOCGFWD _IOWR(0xB1,2, size_t)*/ + +/* Codes to identify message types */ +#define PADI_CODE 0x09 +#define PADO_CODE 0x07 +#define PADR_CODE 0x19 +#define PADS_CODE 0x65 +#define PADT_CODE 0xa7 +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +} __attribute__ ((packed)); + +/* Tag identifiers */ +#define PTT_EOL __cpu_to_be16(0x0000) +#define PTT_SRV_NAME __cpu_to_be16(0x0101) +#define PTT_AC_NAME __cpu_to_be16(0x0102) +#define PTT_HOST_UNIQ __cpu_to_be16(0x0103) +#define PTT_AC_COOKIE __cpu_to_be16(0x0104) +#define PTT_VENDOR __cpu_to_be16(0x0105) +#define PTT_RELAY_SID __cpu_to_be16(0x0110) +#define PTT_SRV_ERR __cpu_to_be16(0x0201) +#define PTT_SYS_ERR __cpu_to_be16(0x0202) +#define PTT_GEN_ERR __cpu_to_be16(0x0203) + +struct pppoe_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ver : 4; + __u8 type : 4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 type : 4; + __u8 ver : 4; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +} __attribute__((packed)); + +/* Length of entire PPPoE + PPP header */ +#define PPPOE_SES_HLEN 8 + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) +{ + return (struct pppoe_hdr *)skb_network_header(skb); +} + +struct pppoe_opt { + struct net_device *dev; /* device associated with socket*/ + int ifindex; /* ifindex of device associated with socket */ + struct pppoe_addr pa; /* what this socket is bound to*/ + struct sockaddr_pppox relay; /* what socket data will be + relayed to (PPPoE relaying) */ +}; + +struct pptp_opt { + struct pptp_addr src_addr; + struct pptp_addr dst_addr; + u32 ack_sent, ack_recv; + u32 seq_sent, seq_recv; + int ppp_flags; +}; + +struct pppolac_opt { + __u32 local; + __u32 remote; + __u32 recv_sequence; + __u32 xmit_sequence; + atomic_t sequencing; + int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb); +}; + +struct pppopns_opt { + __u16 local; + __u16 remote; + __u32 recv_sequence; + __u32 xmit_sequence; + void (*data_ready)(struct sock *sk_raw, int length); + int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb); +}; + +#include <net/sock.h> + +struct pppox_sock { + /* struct sock must be the first member of pppox_sock */ + struct sock sk; + struct ppp_channel chan; + struct pppox_sock *next; /* for hash table */ + union { + struct pppoe_opt pppoe; + struct pptp_opt pptp; + struct pppolac_opt lac; + struct pppopns_opt pns; + } proto; + __be16 num; +}; +#define pppoe_dev proto.pppoe.dev +#define pppoe_ifindex proto.pppoe.ifindex +#define pppoe_pa proto.pppoe.pa +#define pppoe_relay proto.pppoe.relay + +static inline struct pppox_sock *pppox_sk(struct sock *sk) +{ + return (struct pppox_sock *)sk; +} + +static inline struct sock *sk_pppox(struct pppox_sock *po) +{ + return (struct sock *)po; +} + +struct module; + +struct pppox_proto { + int (*create)(struct net *net, struct socket *sock); + int (*ioctl)(struct socket *sock, unsigned int cmd, + unsigned long arg); + struct module *owner; +}; + +extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); +extern void unregister_pppox_proto(int proto_num); +extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ +extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +/* PPPoX socket states */ +enum { + PPPOX_NONE = 0, /* initial state */ + PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */ + PPPOX_BOUND = 2, /* bound to ppp device */ + PPPOX_RELAY = 4, /* forwarding is enabled */ + PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */ + PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ +}; + +#endif /* __KERNEL__ */ + +#endif /* !(__LINUX_IF_PPPOX_H) */ diff --git a/include/linux/if_slip.h b/include/linux/if_slip.h new file mode 100644 index 00000000..1eb4e3a8 --- /dev/null +++ b/include/linux/if_slip.h @@ -0,0 +1,30 @@ +/* + * Swansea University Computer Society NET3 + * + * This file declares the constants of special use with the SLIP/CSLIP/ + * KISS TNC driver. + */ + +#ifndef __LINUX_SLIP_H +#define __LINUX_SLIP_H + +#define SL_MODE_SLIP 0 +#define SL_MODE_CSLIP 1 +#define SL_MODE_KISS 4 + +#define SL_OPT_SIXBIT 2 +#define SL_OPT_ADAPTIVE 8 + +/* + * VSV = ioctl for keepalive & outfill in SLIP driver + */ + +#define SIOCSKEEPALIVE (SIOCDEVPRIVATE) /* Set keepalive timeout in sec */ +#define SIOCGKEEPALIVE (SIOCDEVPRIVATE+1) /* Get keepalive timeout */ +#define SIOCSOUTFILL (SIOCDEVPRIVATE+2) /* Set outfill timeout */ +#define SIOCGOUTFILL (SIOCDEVPRIVATE+3) /* Get outfill timeout */ +#define SIOCSLEASE (SIOCDEVPRIVATE+4) /* Set "leased" line type */ +#define SIOCGLEASE (SIOCDEVPRIVATE+5) /* Get line type */ + + +#endif diff --git a/include/linux/if_strip.h b/include/linux/if_strip.h new file mode 100644 index 00000000..6526a623 --- /dev/null +++ b/include/linux/if_strip.h @@ -0,0 +1,27 @@ +/* + * if_strip.h -- + * + * Definitions for the STRIP interface + * + * Copyright 1996 The Board of Trustees of The Leland Stanford + * Junior University. All Rights Reserved. + * + * Permission to use, copy, modify, and distribute this + * software and its documentation for any purpose and without + * fee is hereby granted, provided that the above copyright + * notice appear in all copies. Stanford University + * makes no representations about the suitability of this + * software for any purpose. It is provided "as is" without + * express or implied warranty. + */ + +#ifndef __LINUX_STRIP_H +#define __LINUX_STRIP_H + +#include <linux/types.h> + +typedef struct { + __u8 c[6]; +} MetricomAddress; + +#endif diff --git a/include/linux/if_team.h b/include/linux/if_team.h new file mode 100644 index 00000000..58404b0c --- /dev/null +++ b/include/linux/if_team.h @@ -0,0 +1,252 @@ +/* + * include/linux/if_team.h - Network team device driver header + * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _LINUX_IF_TEAM_H_ +#define _LINUX_IF_TEAM_H_ + +#ifdef __KERNEL__ + +struct team_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_dropped; + u32 tx_dropped; +}; + +struct team; + +struct team_port { + struct net_device *dev; + struct hlist_node hlist; /* node in hash list */ + struct list_head list; /* node in ordinary list */ + struct team *team; + int index; + + /* + * A place for storing original values of the device before it + * become a port. + */ + struct { + unsigned char dev_addr[MAX_ADDR_LEN]; + unsigned int mtu; + } orig; + + bool linkup; + u32 speed; + u8 duplex; + + /* Custom gennetlink interface related flags */ + bool changed; + bool removed; + + struct rcu_head rcu; +}; + +struct team_mode_ops { + int (*init)(struct team *team); + void (*exit)(struct team *team); + rx_handler_result_t (*receive)(struct team *team, + struct team_port *port, + struct sk_buff *skb); + bool (*transmit)(struct team *team, struct sk_buff *skb); + int (*port_enter)(struct team *team, struct team_port *port); + void (*port_leave)(struct team *team, struct team_port *port); + void (*port_change_mac)(struct team *team, struct team_port *port); +}; + +enum team_option_type { + TEAM_OPTION_TYPE_U32, + TEAM_OPTION_TYPE_STRING, +}; + +struct team_option { + struct list_head list; + const char *name; + enum team_option_type type; + int (*getter)(struct team *team, void *arg); + int (*setter)(struct team *team, void *arg); + + /* Custom gennetlink interface related flags */ + bool changed; + bool removed; +}; + +struct team_mode { + struct list_head list; + const char *kind; + struct module *owner; + size_t priv_size; + const struct team_mode_ops *ops; +}; + +#define TEAM_PORT_HASHBITS 4 +#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) + +#define TEAM_MODE_PRIV_LONGS 4 +#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) + +struct team { + struct net_device *dev; /* associated netdevice */ + struct team_pcpu_stats __percpu *pcpu_stats; + + struct mutex lock; /* used for overall locking, e.g. port lists write */ + + /* + * port lists with port count + */ + int port_count; + struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; + struct list_head port_list; + + struct list_head option_list; + + const struct team_mode *mode; + struct team_mode_ops ops; + long mode_priv[TEAM_MODE_PRIV_LONGS]; +}; + +static inline struct hlist_head *team_port_index_hash(struct team *team, + int port_index) +{ + return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; +} + +static inline struct team_port *team_get_port_by_index(struct team *team, + int port_index) +{ + struct hlist_node *p; + struct team_port *port; + struct hlist_head *head = team_port_index_hash(team, port_index); + + hlist_for_each_entry(port, p, head, hlist) + if (port->index == port_index) + return port; + return NULL; +} +static inline struct team_port *team_get_port_by_index_rcu(struct team *team, + int port_index) +{ + struct hlist_node *p; + struct team_port *port; + struct hlist_head *head = team_port_index_hash(team, port_index); + + hlist_for_each_entry_rcu(port, p, head, hlist) + if (port->index == port_index) + return port; + return NULL; +} + +extern int team_port_set_team_mac(struct team_port *port); +extern int team_options_register(struct team *team, + const struct team_option *option, + size_t option_count); +extern void team_options_unregister(struct team *team, + const struct team_option *option, + size_t option_count); +extern int team_mode_register(struct team_mode *mode); +extern int team_mode_unregister(struct team_mode *mode); + +#endif /* __KERNEL__ */ + +#define TEAM_STRING_MAX_LEN 32 + +/********************************** + * NETLINK_GENERIC netlink family. + **********************************/ + +enum { + TEAM_CMD_NOOP, + TEAM_CMD_OPTIONS_SET, + TEAM_CMD_OPTIONS_GET, + TEAM_CMD_PORT_LIST_GET, + + __TEAM_CMD_MAX, + TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1), +}; + +enum { + TEAM_ATTR_UNSPEC, + TEAM_ATTR_TEAM_IFINDEX, /* u32 */ + TEAM_ATTR_LIST_OPTION, /* nest */ + TEAM_ATTR_LIST_PORT, /* nest */ + + __TEAM_ATTR_MAX, + TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1, +}; + +/* Nested layout of get/set msg: + * + * [TEAM_ATTR_LIST_OPTION] + * [TEAM_ATTR_ITEM_OPTION] + * [TEAM_ATTR_OPTION_*], ... + * [TEAM_ATTR_ITEM_OPTION] + * [TEAM_ATTR_OPTION_*], ... + * ... + * [TEAM_ATTR_LIST_PORT] + * [TEAM_ATTR_ITEM_PORT] + * [TEAM_ATTR_PORT_*], ... + * [TEAM_ATTR_ITEM_PORT] + * [TEAM_ATTR_PORT_*], ... + * ... + */ + +enum { + TEAM_ATTR_ITEM_OPTION_UNSPEC, + TEAM_ATTR_ITEM_OPTION, /* nest */ + + __TEAM_ATTR_ITEM_OPTION_MAX, + TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1, +}; + +enum { + TEAM_ATTR_OPTION_UNSPEC, + TEAM_ATTR_OPTION_NAME, /* string */ + TEAM_ATTR_OPTION_CHANGED, /* flag */ + TEAM_ATTR_OPTION_TYPE, /* u8 */ + TEAM_ATTR_OPTION_DATA, /* dynamic */ + TEAM_ATTR_OPTION_REMOVED, /* flag */ + + __TEAM_ATTR_OPTION_MAX, + TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, +}; + +enum { + TEAM_ATTR_ITEM_PORT_UNSPEC, + TEAM_ATTR_ITEM_PORT, /* nest */ + + __TEAM_ATTR_ITEM_PORT_MAX, + TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1, +}; + +enum { + TEAM_ATTR_PORT_UNSPEC, + TEAM_ATTR_PORT_IFINDEX, /* u32 */ + TEAM_ATTR_PORT_CHANGED, /* flag */ + TEAM_ATTR_PORT_LINKUP, /* flag */ + TEAM_ATTR_PORT_SPEED, /* u32 */ + TEAM_ATTR_PORT_DUPLEX, /* u8 */ + TEAM_ATTR_PORT_REMOVED, /* flag */ + + __TEAM_ATTR_PORT_MAX, + TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, +}; + +/* + * NETLINK_GENERIC related info + */ +#define TEAM_GENL_NAME "team" +#define TEAM_GENL_VERSION 0x1 +#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" + +#endif /* _LINUX_IF_TEAM_H_ */ diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h new file mode 100644 index 00000000..fc23aeb0 --- /dev/null +++ b/include/linux/if_tr.h @@ -0,0 +1,103 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Token-Ring IEEE 802.5 interface. + * + * Version: @(#)if_tr.h 0.0 07/11/94 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald Becker, <becker@super.org> + * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_TR_H +#define _LINUX_IF_TR_H + +#include <linux/types.h> +#include <asm/byteorder.h> /* For __be16 */ + +/* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble + and FCS/CRC (frame check sequence). */ +#define TR_ALEN 6 /* Octets in one token-ring addr */ +#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc)) +#define AC 0x10 +#define LLC_FRAME 0x40 + +/* LLC and SNAP constants */ +#define EXTENDED_SAP 0xAA +#define UI_CMD 0x03 + +/* This is an Token-Ring frame header. */ +struct trh_hdr { + __u8 ac; /* access control field */ + __u8 fc; /* frame control field */ + __u8 daddr[TR_ALEN]; /* destination address */ + __u8 saddr[TR_ALEN]; /* source address */ + __be16 rcf; /* route control field */ + __be16 rseg[8]; /* routing registers */ +}; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb) +{ + return (struct trh_hdr *)skb_mac_header(skb); +} +#endif + +/* This is an Token-Ring LLC structure */ +struct trllc { + __u8 dsap; /* destination SAP */ + __u8 ssap; /* source SAP */ + __u8 llc; /* LLC control field */ + __u8 protid[3]; /* protocol id */ + __be16 ethertype; /* ether type field */ +}; + +/* Token-Ring statistics collection data. */ +struct tr_statistics { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* no space in linux buffers */ + unsigned long tx_dropped; /* no space available in linux */ + unsigned long multicast; /* multicast packets received */ + unsigned long transmit_collision; + + /* detailed Token-Ring errors. See IBM Token-Ring Network + Architecture for more info */ + + unsigned long line_errors; + unsigned long internal_errors; + unsigned long burst_errors; + unsigned long A_C_errors; + unsigned long abort_delimiters; + unsigned long lost_frames; + unsigned long recv_congest_count; + unsigned long frame_copied_errors; + unsigned long frequency_errors; + unsigned long token_errors; + unsigned long dummy1; +}; + +/* source routing stuff */ +#define TR_RII 0x80 +#define TR_RCF_DIR_BIT 0x80 +#define TR_RCF_LEN_MASK 0x1f00 +#define TR_RCF_BROADCAST 0x8000 /* all-routes broadcast */ +#define TR_RCF_LIMITED_BROADCAST 0xC000 /* single-route broadcast */ +#define TR_RCF_FRAME2K 0x20 +#define TR_RCF_BROADCAST_MASK 0xC000 +#define TR_MAXRIFLEN 18 + +#endif /* _LINUX_IF_TR_H */ diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h new file mode 100644 index 00000000..06b18297 --- /dev/null +++ b/include/linux/if_tun.h @@ -0,0 +1,108 @@ +/* + * Universal TUN/TAP device driver. + * Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __IF_TUN_H +#define __IF_TUN_H + +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/filter.h> + +/* Read queue size */ +#define TUN_READQ_SIZE 500 + +/* TUN device flags */ +#define TUN_TUN_DEV 0x0001 +#define TUN_TAP_DEV 0x0002 +#define TUN_TYPE_MASK 0x000f + +#define TUN_FASYNC 0x0010 +#define TUN_NOCHECKSUM 0x0020 +#define TUN_NO_PI 0x0040 +#define TUN_ONE_QUEUE 0x0080 +#define TUN_PERSIST 0x0100 +#define TUN_VNET_HDR 0x0200 + +/* Ioctl defines */ +#define TUNSETNOCSUM _IOW('T', 200, int) +#define TUNSETDEBUG _IOW('T', 201, int) +#define TUNSETIFF _IOW('T', 202, int) +#define TUNSETPERSIST _IOW('T', 203, int) +#define TUNSETOWNER _IOW('T', 204, int) +#define TUNSETLINK _IOW('T', 205, int) +#define TUNSETGROUP _IOW('T', 206, int) +#define TUNGETFEATURES _IOR('T', 207, unsigned int) +#define TUNSETOFFLOAD _IOW('T', 208, unsigned int) +#define TUNSETTXFILTER _IOW('T', 209, unsigned int) +#define TUNGETIFF _IOR('T', 210, unsigned int) +#define TUNGETSNDBUF _IOR('T', 211, int) +#define TUNSETSNDBUF _IOW('T', 212, int) +#define TUNATTACHFILTER _IOW('T', 213, struct sock_fprog) +#define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog) +#define TUNGETVNETHDRSZ _IOR('T', 215, int) +#define TUNSETVNETHDRSZ _IOW('T', 216, int) + +/* TUNSETIFF ifr flags */ +#define IFF_TUN 0x0001 +#define IFF_TAP 0x0002 +#define IFF_NO_PI 0x1000 +#define IFF_ONE_QUEUE 0x2000 +#define IFF_VNET_HDR 0x4000 +#define IFF_TUN_EXCL 0x8000 + +/* Features for GSO (TUNSETOFFLOAD). */ +#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ +#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ +#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ +#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ +#define TUN_F_UFO 0x10 /* I can handle UFO packets */ + +/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */ +#define TUN_PKT_STRIP 0x0001 +struct tun_pi { + __u16 flags; + __be16 proto; +}; + +/* + * Filter spec (used for SETXXFILTER ioctls) + * This stuff is applicable only to the TAP (Ethernet) devices. + * If the count is zero the filter is disabled and the driver accepts + * all packets (promisc mode). + * If the filter is enabled in order to accept broadcast packets + * broadcast addr must be explicitly included in the addr list. + */ +#define TUN_FLT_ALLMULTI 0x0001 /* Accept all multicast packets */ +struct tun_filter { + __u16 flags; /* TUN_FLT_ flags see above */ + __u16 count; /* Number of addresses */ + __u8 addr[0][ETH_ALEN]; +}; + +#ifdef __KERNEL__ +#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) +struct socket *tun_get_socket(struct file *); +#else +#include <linux/err.h> +#include <linux/errno.h> +struct file; +struct socket; +static inline struct socket *tun_get_socket(struct file *f) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_TUN */ +#endif /* __KERNEL__ */ +#endif /* __IF_TUN_H */ diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h new file mode 100644 index 00000000..16b92d00 --- /dev/null +++ b/include/linux/if_tunnel.h @@ -0,0 +1,83 @@ +#ifndef _IF_TUNNEL_H_ +#define _IF_TUNNEL_H_ + +#include <linux/types.h> +#include <asm/byteorder.h> + +#ifdef __KERNEL__ +#include <linux/ip.h> +#include <linux/in6.h> +#endif + +#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) +#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) +#define SIOCDELTUNNEL (SIOCDEVPRIVATE + 2) +#define SIOCCHGTUNNEL (SIOCDEVPRIVATE + 3) +#define SIOCGETPRL (SIOCDEVPRIVATE + 4) +#define SIOCADDPRL (SIOCDEVPRIVATE + 5) +#define SIOCDELPRL (SIOCDEVPRIVATE + 6) +#define SIOCCHGPRL (SIOCDEVPRIVATE + 7) +#define SIOCGET6RD (SIOCDEVPRIVATE + 8) +#define SIOCADD6RD (SIOCDEVPRIVATE + 9) +#define SIOCDEL6RD (SIOCDEVPRIVATE + 10) +#define SIOCCHG6RD (SIOCDEVPRIVATE + 11) + +#define GRE_CSUM __cpu_to_be16(0x8000) +#define GRE_ROUTING __cpu_to_be16(0x4000) +#define GRE_KEY __cpu_to_be16(0x2000) +#define GRE_SEQ __cpu_to_be16(0x1000) +#define GRE_STRICT __cpu_to_be16(0x0800) +#define GRE_REC __cpu_to_be16(0x0700) +#define GRE_FLAGS __cpu_to_be16(0x00F8) +#define GRE_VERSION __cpu_to_be16(0x0007) + +struct ip_tunnel_parm { + char name[IFNAMSIZ]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +/* SIT-mode i_flags */ +#define SIT_ISATAP 0x0001 + +struct ip_tunnel_prl { + __be32 addr; + __u16 flags; + __u16 __reserved; + __u32 datalen; + __u32 __reserved2; + /* data follows */ +}; + +/* PRL flags */ +#define PRL_DEFAULT 0x0001 + +struct ip_tunnel_6rd { + struct in6_addr prefix; + __be32 relay_prefix; + __u16 prefixlen; + __u16 relay_prefixlen; +}; + +enum { + IFLA_GRE_UNSPEC, + IFLA_GRE_LINK, + IFLA_GRE_IFLAGS, + IFLA_GRE_OFLAGS, + IFLA_GRE_IKEY, + IFLA_GRE_OKEY, + IFLA_GRE_LOCAL, + IFLA_GRE_REMOTE, + IFLA_GRE_TTL, + IFLA_GRE_TOS, + IFLA_GRE_PMTUDISC, + __IFLA_GRE_MAX, +}; + +#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1) + +#endif /* _IF_TUNNEL_H_ */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h new file mode 100644 index 00000000..a810987c --- /dev/null +++ b/include/linux/if_vlan.h @@ -0,0 +1,407 @@ +/* + * VLAN An implementation of 802.1Q VLAN tagging. + * + * Authors: Ben Greear <greearb@candelatech.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _LINUX_IF_VLAN_H_ +#define _LINUX_IF_VLAN_H_ + +#ifdef __KERNEL__ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/bug.h> + +#define VLAN_HLEN 4 /* The additional bytes required by VLAN + * (in addition to the Ethernet header) + */ +#define VLAN_ETH_HLEN 18 /* Total octets in header. */ +#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ + +/* + * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan + */ +#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ + +/* + * struct vlan_hdr - vlan header + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +/** + * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) + * @h_dest: destination ethernet address + * @h_source: source ethernet address + * @h_vlan_proto: ethernet protocol (always 0x8100) + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +#include <linux/skbuff.h> + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb_mac_header(skb); +} + +#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ +#define VLAN_PRIO_SHIFT 13 +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ +#define VLAN_N_VID 4096 + +/* found in socket.c */ +extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); + +struct vlan_info; + +static inline int is_vlan_dev(struct net_device *dev) +{ + return dev->priv_flags & IFF_802_1Q_VLAN; +} + +#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + +extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, + u16 vlan_id); +extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); +extern u16 vlan_dev_vlan_id(const struct net_device *dev); + +extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); +extern struct sk_buff *vlan_untag(struct sk_buff *skb); + +extern int vlan_vid_add(struct net_device *dev, unsigned short vid); +extern void vlan_vid_del(struct net_device *dev, unsigned short vid); + +extern int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev); +extern void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev); +#else +static inline struct net_device * +__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) +{ + return NULL; +} + +static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} + +static inline u16 vlan_dev_vlan_id(const struct net_device *dev) +{ + BUG(); + return 0; +} + +static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler) +{ + if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler) + (*skb)->pkt_type = PACKET_OTHERHOST; + return false; +} + +static inline struct sk_buff *vlan_untag(struct sk_buff *skb) +{ + return skb; +} + +static inline int vlan_vid_add(struct net_device *dev, unsigned short vid) +{ + return 0; +} + +static inline void vlan_vid_del(struct net_device *dev, unsigned short vid) +{ +} + +static inline int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ + return 0; +} + +static inline void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ +} +#endif + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci) +{ + struct vlan_ethhdr *veth; + + if (skb_cow_head(skb, VLAN_HLEN) < 0) { + kfree_skb(skb); + return NULL; + } + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the beginning of the new header. */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + skb->mac_header -= VLAN_HLEN; + + /* first, the ethernet type */ + veth->h_vlan_proto = htons(ETH_P_8021Q); + + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); + + return skb; +} + +/** + * __vlan_put_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) +{ + skb = vlan_insert_tag(skb, vlan_tci); + if (skb) + skb->protocol = htons(ETH_P_8021Q); + return skb; +} + +/** + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + */ +static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + return skb; +} + +#define HAVE_VLAN_PUT_TAG + +/** + * vlan_put_tag - inserts VLAN tag according to device features + * @skb: skbuff to tag + * @vlan_tci: VLAN TCI to insert + * + * Assumes skb->dev is the target that will xmit this frame. + * Returns a VLAN tagged skb. + */ +static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) +{ + if (skb->dev->features & NETIF_F_HW_VLAN_TX) { + return __vlan_hwaccel_put_tag(skb, vlan_tci); + } else { + return __vlan_put_tag(skb, vlan_tci); + } +} + +/** + * __vlan_get_tag - get the VLAN ID that is part of the payload + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if the skb is not of VLAN type + */ +static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { + return -EINVAL; + } + + *vlan_tci = ntohs(veth->h_vlan_TCI); + return 0; +} + +/** + * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if @skb->vlan_tci is not set correctly + */ +static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, + u16 *vlan_tci) +{ + if (vlan_tx_tag_present(skb)) { + *vlan_tci = vlan_tx_tag_get(skb); + return 0; + } else { + *vlan_tci = 0; + return -EINVAL; + } +} + +#define HAVE_VLAN_GET_TAG + +/** + * vlan_get_tag - get the VLAN ID from the skb + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if the skb is not VLAN tagged + */ +static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + if (skb->dev->features & NETIF_F_HW_VLAN_TX) { + return __vlan_hwaccel_get_tag(skb, vlan_tci); + } else { + return __vlan_get_tag(skb, vlan_tci); + } +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + __be16 protocol = 0; + + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + protocol = skb->protocol; + else { + __be16 proto, *protop; + protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, + h_vlan_encapsulated_proto), + sizeof(proto), &proto); + if (likely(protop)) + protocol = *protop; + } + + return protocol; +} + +static inline void vlan_set_encap_proto(struct sk_buff *skb, + struct vlan_hdr *vhdr) +{ + __be16 proto; + unsigned char *rawp; + + /* + * Was a VLAN packet, grab the encapsulated protocol, which the layer + * three protocols care about. + */ + + proto = vhdr->h_vlan_encapsulated_proto; + if (ntohs(proto) >= 1536) { + skb->protocol = proto; + return; + } + + rawp = skb->data; + if (*(unsigned short *) rawp == 0xFFFF) + /* + * This is a magic hack to spot IPX packets. Older Novell + * breaks the protocol design and runs IPX over 802.3 without + * an 802.2 LLC layer. We look for FFFF which isn't a used + * 802.2 SSAP/DSAP. This won't work for fault tolerant netware + * but does for the rest. + */ + skb->protocol = htons(ETH_P_802_3); + else + /* + * Real 802.2 LLC + */ + skb->protocol = htons(ETH_P_802_2); +} +#endif /* __KERNEL__ */ + +/* VLAN IOCTLs are found in sockios.h */ + +/* Passed in vlan_ioctl_args structure to determine behaviour. */ +enum vlan_ioctl_cmds { + ADD_VLAN_CMD, + DEL_VLAN_CMD, + SET_VLAN_INGRESS_PRIORITY_CMD, + SET_VLAN_EGRESS_PRIORITY_CMD, + GET_VLAN_INGRESS_PRIORITY_CMD, + GET_VLAN_EGRESS_PRIORITY_CMD, + SET_VLAN_NAME_TYPE_CMD, + SET_VLAN_FLAG_CMD, + GET_VLAN_REALDEV_NAME_CMD, /* If this works, you know it's a VLAN device, btw */ + GET_VLAN_VID_CMD /* Get the VID of this VLAN (specified by name) */ +}; + +enum vlan_flags { + VLAN_FLAG_REORDER_HDR = 0x1, + VLAN_FLAG_GVRP = 0x2, + VLAN_FLAG_LOOSE_BINDING = 0x4, +}; + +enum vlan_name_types { + VLAN_NAME_TYPE_PLUS_VID, /* Name will look like: vlan0005 */ + VLAN_NAME_TYPE_RAW_PLUS_VID, /* name will look like: eth1.0005 */ + VLAN_NAME_TYPE_PLUS_VID_NO_PAD, /* Name will look like: vlan5 */ + VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD, /* Name will look like: eth0.5 */ + VLAN_NAME_TYPE_HIGHEST +}; + +struct vlan_ioctl_args { + int cmd; /* Should be one of the vlan_ioctl_cmds enum above. */ + char device1[24]; + + union { + char device2[24]; + int VID; + unsigned int skb_priority; + unsigned int name_type; + unsigned int bind_type; + unsigned int flag; /* Matches vlan_dev_priv flags */ + } u; + + short vlan_qos; +}; + +#endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/include/linux/if_x25.h b/include/linux/if_x25.h new file mode 100644 index 00000000..897765f5 --- /dev/null +++ b/include/linux/if_x25.h @@ -0,0 +1,26 @@ +/* + * Linux X.25 packet to device interface + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _IF_X25_H +#define _IF_X25_H + +#include <linux/types.h> + +/* Documentation/networking/x25-iface.txt */ +#define X25_IFACE_DATA 0x00 +#define X25_IFACE_CONNECT 0x01 +#define X25_IFACE_DISCONNECT 0x02 +#define X25_IFACE_PARAMS 0x03 + +#endif /* _IF_X25_H */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h new file mode 100644 index 00000000..82de336b --- /dev/null +++ b/include/linux/igmp.h @@ -0,0 +1,244 @@ +/* + * Linux NET3: Internet Group Management Protocol [IGMP] + * + * Authors: + * Alan Cox <alan@lxorguk.ukuu.org.uk> + * + * Extended to talk the BSD extended IGMP protocol of mrouted 3.6 + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IGMP_H +#define _LINUX_IGMP_H + +#include <linux/types.h> +#include <asm/byteorder.h> + +/* + * IGMP protocol structures + */ + +/* + * Header in on cable format + */ + +struct igmphdr { + __u8 type; + __u8 code; /* For newer IGMP */ + __sum16 csum; + __be32 group; +}; + +/* V3 group record types [grec_type] */ +#define IGMPV3_MODE_IS_INCLUDE 1 +#define IGMPV3_MODE_IS_EXCLUDE 2 +#define IGMPV3_CHANGE_TO_INCLUDE 3 +#define IGMPV3_CHANGE_TO_EXCLUDE 4 +#define IGMPV3_ALLOW_NEW_SOURCES 5 +#define IGMPV3_BLOCK_OLD_SOURCES 6 + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __be16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __be16 csum; + __be32 group; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 qrv:3, + suppress:1, + resv:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 resv:4, + suppress:1, + qrv:3; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */ +#define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */ +#define IGMP_DVMRP 0x13 /* DVMRP routing */ +#define IGMP_PIM 0x14 /* PIM routing */ +#define IGMP_TRACE 0x15 +#define IGMPV2_HOST_MEMBERSHIP_REPORT 0x16 /* V2 version of 0x12 */ +#define IGMP_HOST_LEAVE_MESSAGE 0x17 +#define IGMPV3_HOST_MEMBERSHIP_REPORT 0x22 /* V3 version of 0x12 */ + +#define IGMP_MTRACE_RESP 0x1e +#define IGMP_MTRACE 0x1f + + +/* + * Use the BSD names for these for compatibility + */ + +#define IGMP_DELAYING_MEMBER 0x01 +#define IGMP_IDLE_MEMBER 0x02 +#define IGMP_LAZY_MEMBER 0x03 +#define IGMP_SLEEPING_MEMBER 0x04 +#define IGMP_AWAKENING_MEMBER 0x05 + +#define IGMP_MINLEN 8 + +#define IGMP_MAX_HOST_REPORT_DELAY 10 /* max delay for response to */ + /* query (in seconds) */ + +#define IGMP_TIMER_SCALE 10 /* denotes that the igmphdr->timer field */ + /* specifies time in 10th of seconds */ + +#define IGMP_AGE_THRESHOLD 400 /* If this host don't hear any IGMP V1 */ + /* message in this period of time, */ + /* revert to IGMP v2 router. */ + +#define IGMP_ALL_HOSTS htonl(0xE0000001L) +#define IGMP_ALL_ROUTER htonl(0xE0000002L) +#define IGMPV3_ALL_MCR htonl(0xE0000016L) +#define IGMP_LOCAL_GROUP htonl(0xE0000000L) +#define IGMP_LOCAL_GROUP_MASK htonl(0xFFFFFF00L) + +/* + * struct for keeping the multicast list in + */ + +#ifdef __KERNEL__ +#include <linux/skbuff.h> +#include <linux/timer.h> +#include <linux/in.h> + +static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) +{ + return (struct igmphdr *)skb_transport_header(skb); +} + +static inline struct igmpv3_report * + igmpv3_report_hdr(const struct sk_buff *skb) +{ + return (struct igmpv3_report *)skb_transport_header(skb); +} + +static inline struct igmpv3_query * + igmpv3_query_hdr(const struct sk_buff *skb) +{ + return (struct igmpv3_query *)skb_transport_header(skb); +} + +extern int sysctl_igmp_max_memberships; +extern int sysctl_igmp_max_msf; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct rcu_head rcu; + __be32 sl_addr[0]; +}; + +#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ + (count) * sizeof(__be32)) + +#define IP_SFBLOCK 10 /* allocate this many at once */ + +/* ip_mc_socklist is real list now. Speed is not argument; + this list never used in fast path code + */ + +struct ip_mc_socklist { + struct ip_mc_socklist __rcu *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ + struct ip_sf_socklist __rcu *sflist; + struct rcu_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + __be32 sf_inaddr; + unsigned long sf_count[2]; /* include/exclude counts */ + unsigned char sf_gsresp; /* include in g & s response? */ + unsigned char sf_oldin; /* change state */ + unsigned char sf_crcount; /* retrans. left to send */ +}; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + unsigned long sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list __rcu *next_rcu; + }; + struct timer_list timer; + int users; + atomic_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; /* check source marks? */ + unsigned char crcount; + struct rcu_head rcu; +}; + +/* V3 exponential field decoding */ +#define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) +#define IGMPV3_EXP(thresh, nbmant, nbexp, value) \ + ((value) < (thresh) ? (value) : \ + ((IGMPV3_MASK(value, nbmant) | (1<<(nbmant))) << \ + (IGMPV3_MASK((value) >> (nbmant), nbexp) + (nbexp)))) + +#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) +#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) + +extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto); +extern int igmp_rcv(struct sk_buff *); +extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); +extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); +extern void ip_mc_drop_socket(struct sock *sk); +extern int ip_mc_source(int add, int omode, struct sock *sk, + struct ip_mreq_source *mreqs, int ifindex); +extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); +extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, + struct ip_msfilter __user *optval, int __user *optlen); +extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, + struct group_filter __user *optval, int __user *optlen); +extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif); +extern void ip_mc_init_dev(struct in_device *); +extern void ip_mc_destroy_dev(struct in_device *); +extern void ip_mc_up(struct in_device *); +extern void ip_mc_down(struct in_device *); +extern void ip_mc_unmap(struct in_device *); +extern void ip_mc_remap(struct in_device *); +extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); +extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); +extern void ip_mc_rejoin_groups(struct in_device *in_dev); + +#endif +#endif diff --git a/include/linux/ihex.h b/include/linux/ihex.h new file mode 100644 index 00000000..31d8629e --- /dev/null +++ b/include/linux/ihex.h @@ -0,0 +1,74 @@ +/* + * Compact binary representation of ihex records. Some devices need their + * firmware loaded in strange orders rather than a single big blob, but + * actually parsing ihex-as-text within the kernel seems silly. Thus,... + */ + +#ifndef __LINUX_IHEX_H__ +#define __LINUX_IHEX_H__ + +#include <linux/types.h> +#include <linux/firmware.h> +#include <linux/device.h> + +/* Intel HEX files actually limit the length to 256 bytes, but we have + drivers which would benefit from using separate records which are + longer than that, so we extend to 16 bits of length */ +struct ihex_binrec { + __be32 addr; + __be16 len; + uint8_t data[0]; +} __attribute__((packed)); + +/* Find the next record, taking into account the 4-byte alignment */ +static inline const struct ihex_binrec * +ihex_next_binrec(const struct ihex_binrec *rec) +{ + int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; + rec = (void *)&rec->data[next]; + + return be16_to_cpu(rec->len) ? rec : NULL; +} + +/* Check that ihex_next_binrec() won't take us off the end of the image... */ +static inline int ihex_validate_fw(const struct firmware *fw) +{ + const struct ihex_binrec *rec; + size_t ofs = 0; + + while (ofs <= fw->size - sizeof(*rec)) { + rec = (void *)&fw->data[ofs]; + + /* Zero length marks end of records */ + if (!be16_to_cpu(rec->len)) + return 0; + + /* Point to next record... */ + ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; + } + return -EINVAL; +} + +/* Request firmware and validate it so that we can trust we won't + * run off the end while reading records... */ +static inline int request_ihex_firmware(const struct firmware **fw, + const char *fw_name, + struct device *dev) +{ + const struct firmware *lfw; + int ret; + + ret = request_firmware(&lfw, fw_name, dev); + if (ret) + return ret; + ret = ihex_validate_fw(lfw); + if (ret) { + dev_err(dev, "Firmware \"%s\" not valid IHEX records\n", + fw_name); + release_firmware(lfw); + return ret; + } + *fw = lfw; + return 0; +} +#endif /* __LINUX_IHEX_H__ */ diff --git a/include/linux/ima.h b/include/linux/ima.h new file mode 100644 index 00000000..6ac8e50c --- /dev/null +++ b/include/linux/ima.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2008 IBM Corporation + * Author: Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _LINUX_IMA_H +#define _LINUX_IMA_H + +#include <linux/fs.h> +struct linux_binprm; + +#ifdef CONFIG_IMA +extern int ima_bprm_check(struct linux_binprm *bprm); +extern int ima_file_check(struct file *file, int mask); +extern void ima_file_free(struct file *file); +extern int ima_file_mmap(struct file *file, unsigned long prot); + +#else +static inline int ima_bprm_check(struct linux_binprm *bprm) +{ + return 0; +} + +static inline int ima_file_check(struct file *file, int mask) +{ + return 0; +} + +static inline void ima_file_free(struct file *file) +{ + return; +} + +static inline int ima_file_mmap(struct file *file, unsigned long prot) +{ + return 0; +} +#endif /* CONFIG_IMA_H */ +#endif /* _LINUX_IMA_H */ diff --git a/include/linux/in.h b/include/linux/in.h new file mode 100644 index 00000000..e0337f11 --- /dev/null +++ b/include/linux/in.h @@ -0,0 +1,337 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions of the Internet Protocol. + * + * Version: @(#)in.h 1.0.1 04/21/93 + * + * Authors: Original taken from the GNU Project <netinet/in.h> file. + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IN_H +#define _LINUX_IN_H + +#include <linux/types.h> +#include <linux/socket.h> + +/* Standard well-defined IP protocols. */ +enum { + IPPROTO_IP = 0, /* Dummy protocol for TCP */ + IPPROTO_ICMP = 1, /* Internet Control Message Protocol */ + IPPROTO_IGMP = 2, /* Internet Group Management Protocol */ + IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */ + IPPROTO_TCP = 6, /* Transmission Control Protocol */ + IPPROTO_EGP = 8, /* Exterior Gateway Protocol */ + IPPROTO_PUP = 12, /* PUP protocol */ + IPPROTO_UDP = 17, /* User Datagram Protocol */ + IPPROTO_IDP = 22, /* XNS IDP protocol */ + IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */ + IPPROTO_RSVP = 46, /* RSVP protocol */ + IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */ + + IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */ + + IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */ + IPPROTO_AH = 51, /* Authentication Header protocol */ + IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */ + IPPROTO_PIM = 103, /* Protocol Independent Multicast */ + + IPPROTO_COMP = 108, /* Compression Header protocol */ + IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */ + IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */ + + IPPROTO_RAW = 255, /* Raw IP packets */ + IPPROTO_MAX +}; + + +/* Internet address. */ +struct in_addr { + __be32 s_addr; +}; + +#define IP_TOS 1 +#define IP_TTL 2 +#define IP_HDRINCL 3 +#define IP_OPTIONS 4 +#define IP_ROUTER_ALERT 5 +#define IP_RECVOPTS 6 +#define IP_RETOPTS 7 +#define IP_PKTINFO 8 +#define IP_PKTOPTIONS 9 +#define IP_MTU_DISCOVER 10 +#define IP_RECVERR 11 +#define IP_RECVTTL 12 +#define IP_RECVTOS 13 +#define IP_MTU 14 +#define IP_FREEBIND 15 +#define IP_IPSEC_POLICY 16 +#define IP_XFRM_POLICY 17 +#define IP_PASSSEC 18 +#define IP_TRANSPARENT 19 + +/* BSD compatibility */ +#define IP_RECVRETOPTS IP_RETOPTS + +/* TProxy original addresses */ +#define IP_ORIGDSTADDR 20 +#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR + +#define IP_MINTTL 21 +#define IP_NODEFRAG 22 + +/* IP_MTU_DISCOVER values */ +#define IP_PMTUDISC_DONT 0 /* Never send DF frames */ +#define IP_PMTUDISC_WANT 1 /* Use per route hints */ +#define IP_PMTUDISC_DO 2 /* Always DF */ +#define IP_PMTUDISC_PROBE 3 /* Ignore dst pmtu */ + +#define IP_MULTICAST_IF 32 +#define IP_MULTICAST_TTL 33 +#define IP_MULTICAST_LOOP 34 +#define IP_ADD_MEMBERSHIP 35 +#define IP_DROP_MEMBERSHIP 36 +#define IP_UNBLOCK_SOURCE 37 +#define IP_BLOCK_SOURCE 38 +#define IP_ADD_SOURCE_MEMBERSHIP 39 +#define IP_DROP_SOURCE_MEMBERSHIP 40 +#define IP_MSFILTER 41 +#define MCAST_JOIN_GROUP 42 +#define MCAST_BLOCK_SOURCE 43 +#define MCAST_UNBLOCK_SOURCE 44 +#define MCAST_LEAVE_GROUP 45 +#define MCAST_JOIN_SOURCE_GROUP 46 +#define MCAST_LEAVE_SOURCE_GROUP 47 +#define MCAST_MSFILTER 48 +#define IP_MULTICAST_ALL 49 +#define IP_UNICAST_IF 50 + +#define MCAST_EXCLUDE 0 +#define MCAST_INCLUDE 1 + +/* These need to appear somewhere around here */ +#define IP_DEFAULT_MULTICAST_TTL 1 +#define IP_DEFAULT_MULTICAST_LOOP 1 + +/* Request struct for multicast socket ops */ + +struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_address; /* local IP address of interface */ + int imr_ifindex; /* Interface index */ +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist[1]; +}; + +#define IP_MSFILTER_SIZE(numsrc) \ + (sizeof(struct ip_msfilter) - sizeof(__u32) \ + + (numsrc) * sizeof(__u32)) + +struct group_req { + __u32 gr_interface; /* interface index */ + struct __kernel_sockaddr_storage gr_group; /* group address */ +}; + +struct group_source_req { + __u32 gsr_interface; /* interface index */ + struct __kernel_sockaddr_storage gsr_group; /* group address */ + struct __kernel_sockaddr_storage gsr_source; /* source address */ +}; + +struct group_filter { + __u32 gf_interface; /* interface index */ + struct __kernel_sockaddr_storage gf_group; /* multicast address */ + __u32 gf_fmode; /* filter mode */ + __u32 gf_numsrc; /* number of sources */ + struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */ +}; + +#define GROUP_FILTER_SIZE(numsrc) \ + (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \ + + (numsrc) * sizeof(struct __kernel_sockaddr_storage)) + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +/* Structure describing an Internet (IP) socket address. */ +#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */ +struct sockaddr_in { + __kernel_sa_family_t sin_family; /* Address family */ + __be16 sin_port; /* Port number */ + struct in_addr sin_addr; /* Internet address */ + + /* Pad to size of `struct sockaddr'. */ + unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) - + sizeof(unsigned short int) - sizeof(struct in_addr)]; +}; +#define sin_zero __pad /* for BSD UNIX comp. -FvK */ + + +/* + * Definitions of the bits in an Internet address integer. + * On subnets, host and network parts are found according + * to the subnet mask, not these masks. + */ +#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0) +#define IN_CLASSA_NET 0xff000000 +#define IN_CLASSA_NSHIFT 24 +#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET) +#define IN_CLASSA_MAX 128 + +#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000) +#define IN_CLASSB_NET 0xffff0000 +#define IN_CLASSB_NSHIFT 16 +#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET) +#define IN_CLASSB_MAX 65536 + +#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000) +#define IN_CLASSC_NET 0xffffff00 +#define IN_CLASSC_NSHIFT 8 +#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET) + +#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) +#define IN_MULTICAST(a) IN_CLASSD(a) +#define IN_MULTICAST_NET 0xF0000000 + +#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) +#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) + +/* Address to accept any incoming messages. */ +#define INADDR_ANY ((unsigned long int) 0x00000000) + +/* Address to send to all hosts. */ +#define INADDR_BROADCAST ((unsigned long int) 0xffffffff) + +/* Address indicating an error return. */ +#define INADDR_NONE ((unsigned long int) 0xffffffff) + +/* Network number for local host loopback. */ +#define IN_LOOPBACKNET 127 + +/* Address to loopback in software to local host. */ +#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */ +#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000) + +/* Defines for Multicast INADDR */ +#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */ +#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */ +#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */ +#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */ + + +/* <asm/byteorder.h> contains the htonl type stuff.. */ +#include <asm/byteorder.h> + +#ifdef __KERNEL__ + +#include <linux/errno.h> + +static inline int proto_ports_offset(int proto) +{ + switch (proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + case IPPROTO_DCCP: + case IPPROTO_ESP: /* SPI */ + case IPPROTO_SCTP: + case IPPROTO_UDPLITE: + return 0; + case IPPROTO_AH: /* SPI */ + return 4; + default: + return -EINVAL; + } +} + +static inline bool ipv4_is_loopback(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x7f000000); +} + +static inline bool ipv4_is_multicast(__be32 addr) +{ + return (addr & htonl(0xf0000000)) == htonl(0xe0000000); +} + +static inline bool ipv4_is_local_multicast(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xe0000000); +} + +static inline bool ipv4_is_lbcast(__be32 addr) +{ + /* limited broadcast */ + return addr == htonl(INADDR_BROADCAST); +} + +static inline bool ipv4_is_zeronet(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x00000000); +} + +/* Special-Use IPv4 Addresses (RFC3330) */ + +static inline bool ipv4_is_private_10(__be32 addr) +{ + return (addr & htonl(0xff000000)) == htonl(0x0a000000); +} + +static inline bool ipv4_is_private_172(__be32 addr) +{ + return (addr & htonl(0xfff00000)) == htonl(0xac100000); +} + +static inline bool ipv4_is_private_192(__be32 addr) +{ + return (addr & htonl(0xffff0000)) == htonl(0xc0a80000); +} + +static inline bool ipv4_is_linklocal_169(__be32 addr) +{ + return (addr & htonl(0xffff0000)) == htonl(0xa9fe0000); +} + +static inline bool ipv4_is_anycast_6to4(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xc0586300); +} + +static inline bool ipv4_is_test_192(__be32 addr) +{ + return (addr & htonl(0xffffff00)) == htonl(0xc0000200); +} + +static inline bool ipv4_is_test_198(__be32 addr) +{ + return (addr & htonl(0xfffe0000)) == htonl(0xc6120000); +} +#endif + +#endif /* _LINUX_IN_H */ diff --git a/include/linux/in6.h b/include/linux/in6.h new file mode 100644 index 00000000..5c83d9e3 --- /dev/null +++ b/include/linux/in6.h @@ -0,0 +1,291 @@ +/* + * Types and definitions for AF_INET6 + * Linux INET6 implementation + * + * Authors: + * Pedro Roque <roque@di.fc.ul.pt> + * + * Sources: + * IPv6 Program Interfaces for BSD Systems + * <draft-ietf-ipngwg-bsd-api-05.txt> + * + * Advanced Sockets API for IPv6 + * <draft-stevens-advanced-api-00.txt> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IN6_H +#define _LINUX_IN6_H + +#include <linux/types.h> + +/* + * IPv6 address structure + */ + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +#define s6_addr in6_u.u6_addr8 +#define s6_addr16 in6_u.u6_addr16 +#define s6_addr32 in6_u.u6_addr32 +}; + +/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553 + * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined + * in network byte order, not in host byte order as are the IPv4 equivalents + */ +#ifdef __KERNEL__ +extern const struct in6_addr in6addr_any; +#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } +extern const struct in6_addr in6addr_loopback; +#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } +extern const struct in6_addr in6addr_linklocal_allnodes; +#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ + { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } +extern const struct in6_addr in6addr_linklocal_allrouters; +#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ + { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } +#endif + +struct sockaddr_in6 { + unsigned short int sin6_family; /* AF_INET6 */ + __be16 sin6_port; /* Transport layer port # */ + __be32 sin6_flowinfo; /* IPv6 flow information */ + struct in6_addr sin6_addr; /* IPv6 address */ + __u32 sin6_scope_id; /* scope id (new in RFC2553) */ +}; + +struct ipv6_mreq { + /* IPv6 multicast address of group */ + struct in6_addr ipv6mr_multiaddr; + + /* local IPv6 address of interface */ + int ipv6mr_ifindex; +}; + +#define ipv6mr_acaddr ipv6mr_multiaddr + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; + /* Options in format of IPV6_PKTOPTIONS */ +}; + +#define IPV6_FL_A_GET 0 +#define IPV6_FL_A_PUT 1 +#define IPV6_FL_A_RENEW 2 + +#define IPV6_FL_F_CREATE 1 +#define IPV6_FL_F_EXCL 2 + +#define IPV6_FL_S_NONE 0 +#define IPV6_FL_S_EXCL 1 +#define IPV6_FL_S_PROCESS 2 +#define IPV6_FL_S_USER 3 +#define IPV6_FL_S_ANY 255 + + +/* + * Bitmask constant declarations to help applications select out the + * flow label and priority fields. + * + * Note that this are in host byte order while the flowinfo field of + * sockaddr_in6 is in network byte order. + */ + +#define IPV6_FLOWINFO_FLOWLABEL 0x000fffff +#define IPV6_FLOWINFO_PRIORITY 0x0ff00000 + +/* These definitions are obsolete */ +#define IPV6_PRIORITY_UNCHARACTERIZED 0x0000 +#define IPV6_PRIORITY_FILLER 0x0100 +#define IPV6_PRIORITY_UNATTENDED 0x0200 +#define IPV6_PRIORITY_RESERVED1 0x0300 +#define IPV6_PRIORITY_BULK 0x0400 +#define IPV6_PRIORITY_RESERVED2 0x0500 +#define IPV6_PRIORITY_INTERACTIVE 0x0600 +#define IPV6_PRIORITY_CONTROL 0x0700 +#define IPV6_PRIORITY_8 0x0800 +#define IPV6_PRIORITY_9 0x0900 +#define IPV6_PRIORITY_10 0x0a00 +#define IPV6_PRIORITY_11 0x0b00 +#define IPV6_PRIORITY_12 0x0c00 +#define IPV6_PRIORITY_13 0x0d00 +#define IPV6_PRIORITY_14 0x0e00 +#define IPV6_PRIORITY_15 0x0f00 + +/* + * IPV6 extension headers + */ +#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */ +#define IPPROTO_ROUTING 43 /* IPv6 routing header */ +#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */ +#define IPPROTO_ICMPV6 58 /* ICMPv6 */ +#define IPPROTO_NONE 59 /* IPv6 no next header */ +#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */ +#define IPPROTO_MH 135 /* IPv6 mobility header */ + +/* + * IPv6 TLV options. + */ +#define IPV6_TLV_PAD0 0 +#define IPV6_TLV_PADN 1 +#define IPV6_TLV_ROUTERALERT 5 +#define IPV6_TLV_JUMBO 194 +#define IPV6_TLV_HAO 201 /* home address option */ + +/* + * IPV6 socket options + */ + +#define IPV6_ADDRFORM 1 +#define IPV6_2292PKTINFO 2 +#define IPV6_2292HOPOPTS 3 +#define IPV6_2292DSTOPTS 4 +#define IPV6_2292RTHDR 5 +#define IPV6_2292PKTOPTIONS 6 +#define IPV6_CHECKSUM 7 +#define IPV6_2292HOPLIMIT 8 +#define IPV6_NEXTHOP 9 +#define IPV6_AUTHHDR 10 /* obsolete */ +#define IPV6_FLOWINFO 11 + +#define IPV6_UNICAST_HOPS 16 +#define IPV6_MULTICAST_IF 17 +#define IPV6_MULTICAST_HOPS 18 +#define IPV6_MULTICAST_LOOP 19 +#define IPV6_ADD_MEMBERSHIP 20 +#define IPV6_DROP_MEMBERSHIP 21 +#define IPV6_ROUTER_ALERT 22 +#define IPV6_MTU_DISCOVER 23 +#define IPV6_MTU 24 +#define IPV6_RECVERR 25 +#define IPV6_V6ONLY 26 +#define IPV6_JOIN_ANYCAST 27 +#define IPV6_LEAVE_ANYCAST 28 + +/* IPV6_MTU_DISCOVER values */ +#define IPV6_PMTUDISC_DONT 0 +#define IPV6_PMTUDISC_WANT 1 +#define IPV6_PMTUDISC_DO 2 +#define IPV6_PMTUDISC_PROBE 3 + +/* Flowlabel */ +#define IPV6_FLOWLABEL_MGR 32 +#define IPV6_FLOWINFO_SEND 33 + +#define IPV6_IPSEC_POLICY 34 +#define IPV6_XFRM_POLICY 35 + +/* + * Multicast: + * Following socket options are shared between IPv4 and IPv6. + * + * MCAST_JOIN_GROUP 42 + * MCAST_BLOCK_SOURCE 43 + * MCAST_UNBLOCK_SOURCE 44 + * MCAST_LEAVE_GROUP 45 + * MCAST_JOIN_SOURCE_GROUP 46 + * MCAST_LEAVE_SOURCE_GROUP 47 + * MCAST_MSFILTER 48 + */ + +/* + * Advanced API (RFC3542) (1) + * + * Note: IPV6_RECVRTHDRDSTOPTS does not exist. see net/ipv6/datagram.c. + */ + +#define IPV6_RECVPKTINFO 49 +#define IPV6_PKTINFO 50 +#define IPV6_RECVHOPLIMIT 51 +#define IPV6_HOPLIMIT 52 +#define IPV6_RECVHOPOPTS 53 +#define IPV6_HOPOPTS 54 +#define IPV6_RTHDRDSTOPTS 55 +#define IPV6_RECVRTHDR 56 +#define IPV6_RTHDR 57 +#define IPV6_RECVDSTOPTS 58 +#define IPV6_DSTOPTS 59 +#define IPV6_RECVPATHMTU 60 +#define IPV6_PATHMTU 61 +#define IPV6_DONTFRAG 62 +#if 0 /* not yet */ +#define IPV6_USE_MIN_MTU 63 +#endif + +/* + * Netfilter (1) + * + * Following socket options are used in ip6_tables; + * see include/linux/netfilter_ipv6/ip6_tables.h. + * + * IP6T_SO_SET_REPLACE / IP6T_SO_GET_INFO 64 + * IP6T_SO_SET_ADD_COUNTERS / IP6T_SO_GET_ENTRIES 65 + */ + +/* + * Advanced API (RFC3542) (2) + */ +#define IPV6_RECVTCLASS 66 +#define IPV6_TCLASS 67 + +/* + * Netfilter (2) + * + * Following socket options are used in ip6_tables; + * see include/linux/netfilter_ipv6/ip6_tables.h. + * + * IP6T_SO_GET_REVISION_MATCH 68 + * IP6T_SO_GET_REVISION_TARGET 69 + */ + +/* RFC5014: Source address selection */ +#define IPV6_ADDR_PREFERENCES 72 + +#define IPV6_PREFER_SRC_TMP 0x0001 +#define IPV6_PREFER_SRC_PUBLIC 0x0002 +#define IPV6_PREFER_SRC_PUBTMP_DEFAULT 0x0100 +#define IPV6_PREFER_SRC_COA 0x0004 +#define IPV6_PREFER_SRC_HOME 0x0400 +#define IPV6_PREFER_SRC_CGA 0x0008 +#define IPV6_PREFER_SRC_NONCGA 0x0800 + +/* RFC5082: Generalized Ttl Security Mechanism */ +#define IPV6_MINHOPCOUNT 73 + +#define IPV6_ORIGDSTADDR 74 +#define IPV6_RECVORIGDSTADDR IPV6_ORIGDSTADDR +#define IPV6_TRANSPARENT 75 +#define IPV6_UNICAST_IF 76 + +/* + * Multicast Routing: + * see include/linux/mroute6.h. + * + * MRT6_INIT 200 + * MRT6_DONE 201 + * MRT6_ADD_MIF 202 + * MRT6_DEL_MIF 203 + * MRT6_ADD_MFC 204 + * MRT6_DEL_MFC 205 + * MRT6_VERSION 206 + * MRT6_ASSERT 207 + * MRT6_PIM 208 + * (reserved) 209 + */ +#endif diff --git a/include/linux/in_route.h b/include/linux/in_route.h new file mode 100644 index 00000000..b261b8c9 --- /dev/null +++ b/include/linux/in_route.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_IN_ROUTE_H +#define _LINUX_IN_ROUTE_H + +/* IPv4 routing cache flags */ + +#define RTCF_DEAD RTNH_F_DEAD +#define RTCF_ONLINK RTNH_F_ONLINK + +/* Obsolete flag. About to be deleted */ +#define RTCF_NOPMTUDISC RTM_F_NOPMTUDISC + +#define RTCF_NOTIFY 0x00010000 +#define RTCF_DIRECTDST 0x00020000 /* unused */ +#define RTCF_REDIRECTED 0x00040000 +#define RTCF_TPROXY 0x00080000 /* unused */ + +#define RTCF_FAST 0x00200000 /* unused */ +#define RTCF_MASQ 0x00400000 /* unused */ +#define RTCF_SNAT 0x00800000 /* unused */ +#define RTCF_DOREDIRECT 0x01000000 +#define RTCF_DIRECTSRC 0x04000000 +#define RTCF_DNAT 0x08000000 +#define RTCF_BROADCAST 0x10000000 +#define RTCF_MULTICAST 0x20000000 +#define RTCF_REJECT 0x40000000 /* unused */ +#define RTCF_LOCAL 0x80000000 + +#define RTCF_NAT (RTCF_DNAT|RTCF_SNAT) + +#define RT_TOS(tos) ((tos)&IPTOS_TOS_MASK) + +#endif /* _LINUX_IN_ROUTE_H */ diff --git a/include/linux/inet.h b/include/linux/inet.h new file mode 100644 index 00000000..4cca05c9 --- /dev/null +++ b/include/linux/inet.h @@ -0,0 +1,57 @@ +/* + * Swansea University Computer Society NET3 + * + * This work is derived from NET2Debugged, which is in turn derived + * from NET2D which was written by: + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This work was derived from Ross Biro's inspirational work + * for the LINUX operating system. His version numbers were: + * + * $Id: Space.c,v 0.8.4.5 1992/12/12 19:25:04 bir7 Exp $ + * $Id: arp.c,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $ + * $Id: arp.h,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $ + * $Id: dev.c,v 0.8.4.13 1993/01/23 18:00:11 bir7 Exp $ + * $Id: dev.h,v 0.8.4.7 1993/01/23 18:00:11 bir7 Exp $ + * $Id: eth.c,v 0.8.4.4 1993/01/22 23:21:38 bir7 Exp $ + * $Id: eth.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * $Id: icmp.c,v 0.8.4.9 1993/01/23 18:00:11 bir7 Exp $ + * $Id: icmp.h,v 0.8.4.2 1992/11/15 14:55:30 bir7 Exp $ + * $Id: ip.c,v 0.8.4.8 1992/12/12 19:25:04 bir7 Exp $ + * $Id: ip.h,v 0.8.4.2 1993/01/23 18:00:11 bir7 Exp $ + * $Id: loopback.c,v 0.8.4.8 1993/01/23 18:00:11 bir7 Exp $ + * $Id: packet.c,v 0.8.4.7 1993/01/26 22:04:00 bir7 Exp $ + * $Id: protocols.c,v 0.8.4.3 1992/11/15 14:55:30 bir7 Exp $ + * $Id: raw.c,v 0.8.4.12 1993/01/26 22:04:00 bir7 Exp $ + * $Id: sock.c,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $ + * $Id: sock.h,v 0.8.4.7 1993/01/26 22:04:00 bir7 Exp $ + * $Id: tcp.c,v 0.8.4.16 1993/01/26 22:04:00 bir7 Exp $ + * $Id: tcp.h,v 0.8.4.7 1993/01/22 22:58:08 bir7 Exp $ + * $Id: timer.c,v 0.8.4.8 1993/01/23 18:00:11 bir7 Exp $ + * $Id: timer.h,v 0.8.4.2 1993/01/23 18:00:11 bir7 Exp $ + * $Id: udp.c,v 0.8.4.12 1993/01/26 22:04:00 bir7 Exp $ + * $Id: udp.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * $Id: we.c,v 0.8.4.10 1993/01/23 18:00:11 bir7 Exp $ + * $Id: wereg.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_INET_H +#define _LINUX_INET_H + +#include <linux/types.h> + +/* + * These mimic similar macros defined in user-space for inet_ntop(3). + * See /usr/include/netinet/in.h . + */ +#define INET_ADDRSTRLEN (16) +#define INET6_ADDRSTRLEN (48) + +extern __be32 in_aton(const char *str); +extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); +extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); +#endif /* _LINUX_INET_H */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h new file mode 100644 index 00000000..f1362b54 --- /dev/null +++ b/include/linux/inet_diag.h @@ -0,0 +1,177 @@ +#ifndef _INET_DIAG_H_ +#define _INET_DIAG_H_ 1 + +#include <linux/types.h> + +/* Just some random number */ +#define TCPDIAG_GETSOCK 18 +#define DCCPDIAG_GETSOCK 19 + +#define INET_DIAG_GETSOCK_MAX 24 + +/* Socket identity */ +struct inet_diag_sockid { + __be16 idiag_sport; + __be16 idiag_dport; + __be32 idiag_src[4]; + __be32 idiag_dst[4]; + __u32 idiag_if; + __u32 idiag_cookie[2]; +#define INET_DIAG_NOCOOKIE (~0U) +}; + +/* Request structure */ + +struct inet_diag_req { + __u8 idiag_family; /* Family of addresses. */ + __u8 idiag_src_len; + __u8 idiag_dst_len; + __u8 idiag_ext; /* Query extended information */ + + struct inet_diag_sockid id; + + __u32 idiag_states; /* States to dump */ + __u32 idiag_dbs; /* Tables to dump (NI) */ +}; + +struct inet_diag_req_v2 { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u8 idiag_ext; + __u8 pad; + __u32 idiag_states; + struct inet_diag_sockid id; +}; + +enum { + INET_DIAG_REQ_NONE, + INET_DIAG_REQ_BYTECODE, +}; + +#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE + +/* Bytecode is sequence of 4 byte commands followed by variable arguments. + * All the commands identified by "code" are conditional jumps forward: + * to offset cc+"yes" or to offset cc+"no". "yes" is supposed to be + * length of the command and its arguments. + */ + +struct inet_diag_bc_op { + unsigned char code; + unsigned char yes; + unsigned short no; +}; + +enum { + INET_DIAG_BC_NOP, + INET_DIAG_BC_JMP, + INET_DIAG_BC_S_GE, + INET_DIAG_BC_S_LE, + INET_DIAG_BC_D_GE, + INET_DIAG_BC_D_LE, + INET_DIAG_BC_AUTO, + INET_DIAG_BC_S_COND, + INET_DIAG_BC_D_COND, +}; + +struct inet_diag_hostcond { + __u8 family; + __u8 prefix_len; + int port; + __be32 addr[0]; +}; + +/* Base info structure. It contains socket identity (addrs/ports/cookie) + * and, alas, the information shown by netstat. */ +struct inet_diag_msg { + __u8 idiag_family; + __u8 idiag_state; + __u8 idiag_timer; + __u8 idiag_retrans; + + struct inet_diag_sockid id; + + __u32 idiag_expires; + __u32 idiag_rqueue; + __u32 idiag_wqueue; + __u32 idiag_uid; + __u32 idiag_inode; +}; + +/* Extensions */ + +enum { + INET_DIAG_NONE, + INET_DIAG_MEMINFO, + INET_DIAG_INFO, + INET_DIAG_VEGASINFO, + INET_DIAG_CONG, + INET_DIAG_TOS, + INET_DIAG_TCLASS, + INET_DIAG_SKMEMINFO, +}; + +#define INET_DIAG_MAX INET_DIAG_SKMEMINFO + + +/* INET_DIAG_MEM */ + +struct inet_diag_meminfo { + __u32 idiag_rmem; + __u32 idiag_wmem; + __u32 idiag_fmem; + __u32 idiag_tmem; +}; + +/* INET_DIAG_VEGASINFO */ + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +#ifdef __KERNEL__ +struct sock; +struct inet_hashinfo; +struct nlattr; +struct nlmsghdr; +struct sk_buff; +struct netlink_callback; + +struct inet_diag_handler { + void (*dump)(struct sk_buff *skb, + struct netlink_callback *cb, + struct inet_diag_req_v2 *r, + struct nlattr *bc); + + int (*dump_one)(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + struct inet_diag_req_v2 *req); + + void (*idiag_get_info)(struct sock *sk, + struct inet_diag_msg *r, + void *info); + __u16 idiag_type; +}; + +struct inet_connection_sock; +int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, + struct sk_buff *skb, struct inet_diag_req_v2 *req, + u32 pid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh); +void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, + struct netlink_callback *cb, struct inet_diag_req_v2 *r, + struct nlattr *bc); +int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, + struct sk_buff *in_skb, const struct nlmsghdr *nlh, + struct inet_diag_req_v2 *req); + +int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); + +extern int inet_diag_register(const struct inet_diag_handler *handler); +extern void inet_diag_unregister(const struct inet_diag_handler *handler); +#endif /* __KERNEL__ */ + +#endif /* _INET_DIAG_H_ */ diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h new file mode 100644 index 00000000..2cf55afb --- /dev/null +++ b/include/linux/inet_lro.h @@ -0,0 +1,165 @@ +/* + * linux/include/linux/inet_lro.h + * + * Large Receive Offload (ipv4 / tcp) + * + * (C) Copyright IBM Corp. 2007 + * + * Authors: + * Jan-Bernd Themann <themann@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __INET_LRO_H_ +#define __INET_LRO_H_ + +#include <net/ip.h> +#include <net/tcp.h> + +/* + * LRO statistics + */ + +struct net_lro_stats { + unsigned long aggregated; + unsigned long flushed; + unsigned long no_desc; +}; + +/* + * LRO descriptor for a tcp session + */ +struct net_lro_desc { + struct sk_buff *parent; + struct sk_buff *last_skb; + struct skb_frag_struct *next_frag; + struct iphdr *iph; + struct tcphdr *tcph; + __wsum data_csum; + __be32 tcp_rcv_tsecr; + __be32 tcp_rcv_tsval; + __be32 tcp_ack; + u32 tcp_next_seq; + u32 skb_tot_frags_len; + u16 ip_tot_len; + u16 tcp_saw_tstamp; /* timestamps enabled */ + __be16 tcp_window; + int pkt_aggr_cnt; /* counts aggregated packets */ + int vlan_packet; + int mss; + int active; +}; + +/* + * Large Receive Offload (LRO) Manager + * + * Fields must be set by driver + */ + +struct net_lro_mgr { + struct net_device *dev; + struct net_lro_stats stats; + + /* LRO features */ + unsigned long features; +#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */ +#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted + from received packets and eth protocol + is still ETH_P_8021Q */ + + /* + * Set for generated SKBs that are not added to + * the frag list in fragmented mode + */ + u32 ip_summed; + u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY + * or CHECKSUM_NONE */ + + int max_desc; /* Max number of LRO descriptors */ + int max_aggr; /* Max number of LRO packets to be aggregated */ + + int frag_align_pad; /* Padding required to properly align layer 3 + * headers in generated skb when using frags */ + + struct net_lro_desc *lro_arr; /* Array of LRO descriptors */ + + /* + * Optimized driver functions + * + * get_skb_header: returns tcp and ip header for packet in SKB + */ + int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr, + void **tcpudp_hdr, u64 *hdr_flags, void *priv); + + /* hdr_flags: */ +#define LRO_IPV4 1 /* ip_hdr is IPv4 header */ +#define LRO_TCP 2 /* tcpudp_hdr is TCP header */ + + /* + * get_frag_header: returns mac, tcp and ip header for packet in SKB + * + * @hdr_flags: Indicate what kind of LRO has to be done + * (IPv4/IPv6/TCP/UDP) + */ + int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr, + void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, + void *priv); +}; + +/* + * Processes a SKB + * + * @lro_mgr: LRO manager to use + * @skb: SKB to aggregate + * @priv: Private data that may be used by driver functions + * (for example get_tcp_ip_hdr) + */ + +void lro_receive_skb(struct net_lro_mgr *lro_mgr, + struct sk_buff *skb, + void *priv); + +/* + * Processes a fragment list + * + * This functions aggregate fragments and generate SKBs do pass + * the packets to the stack. + * + * @lro_mgr: LRO manager to use + * @frags: Fragment to be processed. Must contain entire header in first + * element. + * @len: Length of received data + * @true_size: Actual size of memory the fragment is consuming + * @priv: Private data that may be used by driver functions + * (for example get_tcp_ip_hdr) + */ + +void lro_receive_frags(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, void *priv, __wsum sum); + +/* + * Forward all aggregated SKBs held by lro_mgr to network stack + */ + +void lro_flush_all(struct net_lro_mgr *lro_mgr); + +void lro_flush_pkt(struct net_lro_mgr *lro_mgr, + struct iphdr *iph, struct tcphdr *tcph); + +#endif diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h new file mode 100644 index 00000000..597f4a9f --- /dev/null +++ b/include/linux/inetdevice.h @@ -0,0 +1,259 @@ +#ifndef _LINUX_INETDEVICE_H +#define _LINUX_INETDEVICE_H + +#ifdef __KERNEL__ + +#include <linux/bitmap.h> +#include <linux/if.h> +#include <linux/netdevice.h> +#include <linux/rcupdate.h> +#include <linux/timer.h> +#include <linux/sysctl.h> +#include <linux/rtnetlink.h> + +enum +{ + IPV4_DEVCONF_FORWARDING=1, + IPV4_DEVCONF_MC_FORWARDING, + IPV4_DEVCONF_PROXY_ARP, + IPV4_DEVCONF_ACCEPT_REDIRECTS, + IPV4_DEVCONF_SECURE_REDIRECTS, + IPV4_DEVCONF_SEND_REDIRECTS, + IPV4_DEVCONF_SHARED_MEDIA, + IPV4_DEVCONF_RP_FILTER, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE, + IPV4_DEVCONF_BOOTP_RELAY, + IPV4_DEVCONF_LOG_MARTIANS, + IPV4_DEVCONF_TAG, + IPV4_DEVCONF_ARPFILTER, + IPV4_DEVCONF_MEDIUM_ID, + IPV4_DEVCONF_NOXFRM, + IPV4_DEVCONF_NOPOLICY, + IPV4_DEVCONF_FORCE_IGMP_VERSION, + IPV4_DEVCONF_ARP_ANNOUNCE, + IPV4_DEVCONF_ARP_IGNORE, + IPV4_DEVCONF_PROMOTE_SECONDARIES, + IPV4_DEVCONF_ARP_ACCEPT, + IPV4_DEVCONF_ARP_NOTIFY, + IPV4_DEVCONF_ACCEPT_LOCAL, + IPV4_DEVCONF_SRC_VMARK, + IPV4_DEVCONF_PROXY_ARP_PVLAN, + __IPV4_DEVCONF_MAX +}; + +#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1) + +struct ipv4_devconf { + void *sysctl; + int data[IPV4_DEVCONF_MAX]; + DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); +}; + +struct in_device { + struct net_device *dev; + atomic_t refcnt; + int dead; + struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ + int mc_count; /* Number of installed mcasts */ + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + unsigned long mr_v1_seen; + unsigned long mr_v2_seen; + unsigned long mr_maxdelay; + unsigned char mr_qrv; + unsigned char mr_gq_running; + unsigned char mr_ifc_count; + struct timer_list mr_gq_timer; /* general query timer */ + struct timer_list mr_ifc_timer; /* interface change timer */ + + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct rcu_head rcu_head; +}; + +#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) +#define IPV4_DEVCONF_ALL(net, attr) \ + IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) + +static inline int ipv4_devconf_get(struct in_device *in_dev, int index) +{ + index--; + return in_dev->cnf.data[index]; +} + +static inline void ipv4_devconf_set(struct in_device *in_dev, int index, + int val) +{ + index--; + set_bit(index, in_dev->cnf.state); + in_dev->cnf.data[index] = val; +} + +static inline void ipv4_devconf_setall(struct in_device *in_dev) +{ + bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX); +} + +#define IN_DEV_CONF_GET(in_dev, attr) \ + ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr) +#define IN_DEV_CONF_SET(in_dev, attr, val) \ + ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val)) + +#define IN_DEV_ANDCONF(in_dev, attr) \ + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ + IN_DEV_CONF_GET((in_dev), attr)) +#define IN_DEV_ORCONF(in_dev, attr) \ + (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \ + IN_DEV_CONF_GET((in_dev), attr)) +#define IN_DEV_MAXCONF(in_dev, attr) \ + (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ + IN_DEV_CONF_GET((in_dev), attr))) + +#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) +#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) +#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) +#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) +#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ + ACCEPT_SOURCE_ROUTE) +#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) +#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) + +#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) +#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) +#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN) +#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) +#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) +#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ + SECURE_REDIRECTS) +#define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG) +#define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID) +#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ + IN_DEV_ORCONF((in_dev), \ + PROMOTE_SECONDARIES) + +#define IN_DEV_RX_REDIRECTS(in_dev) \ + ((IN_DEV_FORWARD(in_dev) && \ + IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \ + || (!IN_DEV_FORWARD(in_dev) && \ + IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) + +#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) +#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) +#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) +#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) +#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) + +struct in_ifaddr { + struct hlist_node hash; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct rcu_head rcu_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_flags; + unsigned char ifa_prefixlen; + char ifa_label[IFNAMSIZ]; +}; + +extern int register_inetaddr_notifier(struct notifier_block *nb); +extern int unregister_inetaddr_notifier(struct notifier_block *nb); + +extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); +static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) +{ + return __ip_dev_find(net, addr, true); +} + +extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); +extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); +extern void devinet_init(void); +extern struct in_device *inetdev_by_index(struct net *, int); +extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); +extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope); +extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); + +static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) +{ + return !((addr^ifa->ifa_address)&ifa->ifa_mask); +} + +/* + * Check if a mask is acceptable. + */ + +static __inline__ int bad_mask(__be32 mask, __be32 addr) +{ + __u32 hmask; + if (addr & (mask = ~mask)) + return 1; + hmask = ntohl(mask); + if (hmask & (hmask+1)) + return 1; + return 0; +} + +#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ + for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) + +#define for_ifa(in_dev) { struct in_ifaddr *ifa; \ + for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) + + +#define endfor_ifa(in_dev) } + +static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) +{ + return rcu_dereference(dev->ip_ptr); +} + +static inline struct in_device *in_dev_get(const struct net_device *dev) +{ + struct in_device *in_dev; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + if (in_dev) + atomic_inc(&in_dev->refcnt); + rcu_read_unlock(); + return in_dev; +} + +static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) +{ + return rtnl_dereference(dev->ip_ptr); +} + +extern void in_dev_finish_destroy(struct in_device *idev); + +static inline void in_dev_put(struct in_device *idev) +{ + if (atomic_dec_and_test(&idev->refcnt)) + in_dev_finish_destroy(idev); +} + +#define __in_dev_put(idev) atomic_dec(&(idev)->refcnt) +#define in_dev_hold(idev) atomic_inc(&(idev)->refcnt) + +#endif /* __KERNEL__ */ + +static __inline__ __be32 inet_make_mask(int logmask) +{ + if (logmask) + return htonl(~((1<<(32-logmask))-1)); + return 0; +} + +static __inline__ int inet_mask_len(__be32 mask) +{ + __u32 hmask = ntohl(mask); + if (!hmask) + return 0; + return 32 - ffz(~hmask); +} + + +#endif /* _LINUX_INETDEVICE_H */ diff --git a/include/linux/init.h b/include/linux/init.h new file mode 100644 index 00000000..6b951095 --- /dev/null +++ b/include/linux/init.h @@ -0,0 +1,350 @@ +#ifndef _LINUX_INIT_H +#define _LINUX_INIT_H + +#include <linux/compiler.h> +#include <linux/types.h> + +/* These macros are used to mark some functions or + * initialized data (doesn't apply to uninitialized data) + * as `initialization' functions. The kernel can take this + * as hint that the function is used only during the initialization + * phase and free up used memory resources after + * + * Usage: + * For functions: + * + * You should add __init immediately before the function name, like: + * + * static void __init initme(int x, int y) + * { + * extern int z; z = x * y; + * } + * + * If the function has a prototype somewhere, you can also add + * __init between closing brace of the prototype and semicolon: + * + * extern int initialize_foobar_device(int, int, int) __init; + * + * For initialized data: + * You should insert __initdata between the variable name and equal + * sign followed by value, e.g.: + * + * static int init_variable __initdata = 0; + * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; + * + * Don't forget to initialize data not at file scope, i.e. within a function, + * as gcc otherwise puts the data into the bss section and not into the init + * section. + * + * Also note, that this data cannot be "const". + */ + +/* These are for everybody (although not all archs will actually + discard it in modules) */ +#define __init __section(.init.text) __cold notrace +#define __initdata __section(.init.data) +#define __initconst __section(.init.rodata) +#define __exitdata __section(.exit.data) +#define __exit_call __used __section(.exitcall.exit) + +/* + * modpost check for section mismatches during the kernel build. + * A section mismatch happens when there are references from a + * code or data section to an init section (both code or data). + * The init sections are (for most archs) discarded by the kernel + * when early init has completed so all such references are potential bugs. + * For exit sections the same issue exists. + * + * The following markers are used for the cases where the reference to + * the *init / *exit section (code or data) is valid and will teach + * modpost not to issue a warning. Intended semantics is that a code or + * data tagged __ref* can reference code or data from init section without + * producing a warning (of course, no warning does not mean code is + * correct, so optimally document why the __ref is needed and why it's OK). + * + * The markers follow same syntax rules as __init / __initdata. + */ +#define __ref __section(.ref.text) noinline +#define __refdata __section(.ref.data) +#define __refconst __section(.ref.rodata) + +/* compatibility defines */ +#define __init_refok __ref +#define __initdata_refok __refdata +#define __exit_refok __ref + + +#ifdef MODULE +#define __exitused +#else +#define __exitused __used +#endif + +#define __exit __section(.exit.text) __exitused __cold notrace + +/* Used for HOTPLUG */ +#define __devinit __section(.devinit.text) __cold notrace +#define __devinitdata __section(.devinit.data) +#define __devinitconst __section(.devinit.rodata) +#define __devexit __section(.devexit.text) __exitused __cold notrace +#define __devexitdata __section(.devexit.data) +#define __devexitconst __section(.devexit.rodata) + +/* Used for HOTPLUG_CPU */ +#define __cpuinit __section(.cpuinit.text) __cold notrace +#define __cpuinitdata __section(.cpuinit.data) +#define __cpuinitconst __section(.cpuinit.rodata) +#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace +#define __cpuexitdata __section(.cpuexit.data) +#define __cpuexitconst __section(.cpuexit.rodata) + +/* Used for MEMORY_HOTPLUG */ +#define __meminit __section(.meminit.text) __cold notrace +#define __meminitdata __section(.meminit.data) +#define __meminitconst __section(.meminit.rodata) +#define __memexit __section(.memexit.text) __exitused __cold notrace +#define __memexitdata __section(.memexit.data) +#define __memexitconst __section(.memexit.rodata) + +/* For assembly routines */ +#define __HEAD .section ".head.text","ax" +#define __INIT .section ".init.text","ax" +#define __FINIT .previous + +#define __INITDATA .section ".init.data","aw",%progbits +#define __INITRODATA .section ".init.rodata","a",%progbits +#define __FINITDATA .previous + +#define __DEVINIT .section ".devinit.text", "ax" +#define __DEVINITDATA .section ".devinit.data", "aw" +#define __DEVINITRODATA .section ".devinit.rodata", "a" + +#define __CPUINIT .section ".cpuinit.text", "ax" +#define __CPUINITDATA .section ".cpuinit.data", "aw" +#define __CPUINITRODATA .section ".cpuinit.rodata", "a" + +#define __MEMINIT .section ".meminit.text", "ax" +#define __MEMINITDATA .section ".meminit.data", "aw" +#define __MEMINITRODATA .section ".meminit.rodata", "a" + +/* silence warnings when references are OK */ +#define __REF .section ".ref.text", "ax" +#define __REFDATA .section ".ref.data", "aw" +#define __REFCONST .section ".ref.rodata", "a" + +#ifndef __ASSEMBLY__ +/* + * Used for initialization calls.. + */ +typedef int (*initcall_t)(void); +typedef void (*exitcall_t)(void); + +extern initcall_t __con_initcall_start[], __con_initcall_end[]; +extern initcall_t __security_initcall_start[], __security_initcall_end[]; + +/* Used for contructor calls. */ +typedef void (*ctor_fn_t)(void); + +/* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); +extern char __initdata boot_command_line[]; +extern char *saved_command_line; +extern unsigned int reset_devices; + +/* used by init/main.c */ +void setup_arch(char **); +void prepare_namespace(void); + +extern void (*late_time_init)(void); + +extern bool initcall_debug; + +#endif + +#ifndef MODULE + +#ifndef __ASSEMBLY__ + +/* initcalls are now grouped by functionality into separate + * subsections. Ordering inside the subsections is determined + * by link order. + * For backwards compatibility, initcall() puts the call in + * the device init subsection. + * + * The `id' arg to __define_initcall() is needed so that multiple initcalls + * can point at the same handler without causing duplicate-symbol build errors. + */ + +#define __define_initcall(level,fn,id) \ + static initcall_t __initcall_##fn##id __used \ + __attribute__((__section__(".initcall" level ".init"))) = fn + +/* + * Early initcalls run before initializing SMP. + * + * Only for built-in code, not modules. + */ +#define early_initcall(fn) __define_initcall("early",fn,early) + +/* + * A "pure" initcall has no dependencies on anything else, and purely + * initializes variables that couldn't be statically initialized. + * + * This only exists for built-in code, not for modules. + */ +#define pure_initcall(fn) __define_initcall("0",fn,0) + +#define core_initcall(fn) __define_initcall("1",fn,1) +#define core_initcall_sync(fn) __define_initcall("1s",fn,1s) +#define postcore_initcall(fn) __define_initcall("2",fn,2) +#define postcore_initcall_sync(fn) __define_initcall("2s",fn,2s) +#define arch_initcall(fn) __define_initcall("3",fn,3) +#define arch_initcall_sync(fn) __define_initcall("3s",fn,3s) +#define subsys_initcall(fn) __define_initcall("4",fn,4) +#define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s) +#define fs_initcall(fn) __define_initcall("5",fn,5) +#define fs_initcall_sync(fn) __define_initcall("5s",fn,5s) +#define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs) +#define device_initcall(fn) __define_initcall("6",fn,6) +#define device_initcall_sync(fn) __define_initcall("6s",fn,6s) +#define late_initcall(fn) __define_initcall("7",fn,7) +#define late_initcall_sync(fn) __define_initcall("7s",fn,7s) + +#define __initcall(fn) device_initcall(fn) + +#define __exitcall(fn) \ + static exitcall_t __exitcall_##fn __exit_call = fn + +#define console_initcall(fn) \ + static initcall_t __initcall_##fn \ + __used __section(.con_initcall.init) = fn + +#define security_initcall(fn) \ + static initcall_t __initcall_##fn \ + __used __section(.security_initcall.init) = fn + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +/* + * Only for really core code. See moduleparam.h for the normal way. + * + * Force the alignment so the compiler doesn't space elements of the + * obs_kernel_param "array" too far apart in .init.setup. + */ +#define __setup_param(str, unique_id, fn, early) \ + static const char __setup_str_##unique_id[] __initconst \ + __aligned(1) = str; \ + static struct obs_kernel_param __setup_##unique_id \ + __used __section(.init.setup) \ + __attribute__((aligned((sizeof(long))))) \ + = { __setup_str_##unique_id, fn, early } + +#define __setup(str, fn) \ + __setup_param(str, fn, fn, 0) + +/* NOTE: fn is as per module_param, not __setup! Emits warning if fn + * returns non-zero. */ +#define early_param(str, fn) \ + __setup_param(str, fn, fn, 1) + +/* Relies on boot_command_line being set */ +void __init parse_early_param(void); +void __init parse_early_options(char *cmdline); +#endif /* __ASSEMBLY__ */ + +/** + * module_init() - driver initialization entry point + * @x: function to be run at kernel boot time or module insertion + * + * module_init() will either be called during do_initcalls() (if + * builtin) or at module insertion time (if a module). There can only + * be one per module. + */ +#define module_init(x) __initcall(x); + +/** + * module_exit() - driver exit entry point + * @x: function to be run when driver is removed + * + * module_exit() will wrap the driver clean-up code + * with cleanup_module() when used with rmmod when + * the driver is a module. If the driver is statically + * compiled into the kernel, module_exit() has no effect. + * There can only be one per module. + */ +#define module_exit(x) __exitcall(x); + +#else /* MODULE */ + +/* Don't use these in modules, but some people do... */ +#define early_initcall(fn) module_init(fn) +#define core_initcall(fn) module_init(fn) +#define postcore_initcall(fn) module_init(fn) +#define arch_initcall(fn) module_init(fn) +#define subsys_initcall(fn) module_init(fn) +#define fs_initcall(fn) module_init(fn) +#define device_initcall(fn) module_init(fn) +#define late_initcall(fn) module_init(fn) + +#define security_initcall(fn) module_init(fn) + +/* Each module must use one module_init(). */ +#define module_init(initfn) \ + static inline initcall_t __inittest(void) \ + { return initfn; } \ + int init_module(void) __attribute__((alias(#initfn))); + +/* This is only required if you want to be unloadable. */ +#define module_exit(exitfn) \ + static inline exitcall_t __exittest(void) \ + { return exitfn; } \ + void cleanup_module(void) __attribute__((alias(#exitfn))); + +#define __setup_param(str, unique_id, fn) /* nothing */ +#define __setup(str, func) /* nothing */ +#endif + +/* Data marked not to be saved by software suspend */ +#define __nosavedata __section(.data..nosave) + +/* This means "can be init if no module support, otherwise module load + may call it." */ +#ifdef CONFIG_MODULES +#define __init_or_module +#define __initdata_or_module +#define __initconst_or_module +#define __INIT_OR_MODULE .text +#define __INITDATA_OR_MODULE .data +#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits +#else +#define __init_or_module __init +#define __initdata_or_module __initdata +#define __initconst_or_module __initconst +#define __INIT_OR_MODULE __INIT +#define __INITDATA_OR_MODULE __INITDATA +#define __INITRODATA_OR_MODULE __INITRODATA +#endif /*CONFIG_MODULES*/ + +/* Functions marked as __devexit may be discarded at kernel link time, depending + on config options. Newer versions of binutils detect references from + retained sections to discarded sections and flag an error. Pointers to + __devexit functions must use __devexit_p(function_name), the wrapper will + insert either the function_name or NULL, depending on the config options. + */ +#if defined(MODULE) || defined(CONFIG_HOTPLUG) +#define __devexit_p(x) x +#else +#define __devexit_p(x) NULL +#endif + +#ifdef MODULE +#define __exit_p(x) x +#else +#define __exit_p(x) NULL +#endif + +#endif /* _LINUX_INIT_H */ diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h new file mode 100644 index 00000000..3c03a4bb --- /dev/null +++ b/include/linux/init_ohci1394_dma.h @@ -0,0 +1,4 @@ +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT +extern int __initdata init_ohci1394_dma_early; +extern void __init init_ohci1394_dma_on_all_controllers(void); +#endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h new file mode 100644 index 00000000..e4baff5f --- /dev/null +++ b/include/linux/init_task.h @@ -0,0 +1,217 @@ +#ifndef _LINUX__INIT_TASK_H +#define _LINUX__INIT_TASK_H + +#include <linux/rcupdate.h> +#include <linux/irqflags.h> +#include <linux/utsname.h> +#include <linux/lockdep.h> +#include <linux/ftrace.h> +#include <linux/ipc.h> +#include <linux/pid_namespace.h> +#include <linux/user_namespace.h> +#include <linux/securebits.h> +#include <net/net_namespace.h> + +#ifdef CONFIG_SMP +# define INIT_PUSHABLE_TASKS(tsk) \ + .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), +#else +# define INIT_PUSHABLE_TASKS(tsk) +#endif + +extern struct files_struct init_files; +extern struct fs_struct init_fs; + +#ifdef CONFIG_CGROUPS +#define INIT_GROUP_RWSEM(sig) \ + .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), +#else +#define INIT_GROUP_RWSEM(sig) +#endif + +#ifdef CONFIG_CPUSETS +#define INIT_CPUSET_SEQ \ + .mems_allowed_seq = SEQCNT_ZERO, +#else +#define INIT_CPUSET_SEQ +#endif + +#define INIT_SIGNALS(sig) { \ + .nr_threads = 1, \ + .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ + .shared_pending = { \ + .list = LIST_HEAD_INIT(sig.shared_pending.list), \ + .signal = {{0}}}, \ + .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ + .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ + .rlim = INIT_RLIMITS, \ + .cputimer = { \ + .cputime = INIT_CPUTIME, \ + .running = 0, \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ + }, \ + .cred_guard_mutex = \ + __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ + INIT_GROUP_RWSEM(sig) \ +} + +extern struct nsproxy init_nsproxy; + +#define INIT_SIGHAND(sighand) { \ + .count = ATOMIC_INIT(1), \ + .action = { { { .sa_handler = SIG_DFL, } }, }, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ + .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ +} + +extern struct group_info init_groups; + +#define INIT_STRUCT_PID { \ + .count = ATOMIC_INIT(1), \ + .tasks = { \ + { .first = NULL }, \ + { .first = NULL }, \ + { .first = NULL }, \ + }, \ + .level = 0, \ + .numbers = { { \ + .nr = 0, \ + .ns = &init_pid_ns, \ + .pid_chain = { .next = NULL, .pprev = NULL }, \ + }, } \ +} + +#define INIT_PID_LINK(type) \ +{ \ + .node = { \ + .next = NULL, \ + .pprev = NULL, \ + }, \ + .pid = &init_struct_pid, \ +} + +#ifdef CONFIG_AUDITSYSCALL +#define INIT_IDS \ + .loginuid = -1, \ + .sessionid = -1, +#else +#define INIT_IDS +#endif + +#ifdef CONFIG_RCU_BOOST +#define INIT_TASK_RCU_BOOST() \ + .rcu_boost_mutex = NULL, +#else +#define INIT_TASK_RCU_BOOST() +#endif +#ifdef CONFIG_TREE_PREEMPT_RCU +#define INIT_TASK_RCU_TREE_PREEMPT() \ + .rcu_blocked_node = NULL, +#else +#define INIT_TASK_RCU_TREE_PREEMPT(tsk) +#endif +#ifdef CONFIG_PREEMPT_RCU +#define INIT_TASK_RCU_PREEMPT(tsk) \ + .rcu_read_lock_nesting = 0, \ + .rcu_read_unlock_special = 0, \ + .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ + INIT_TASK_RCU_TREE_PREEMPT() \ + INIT_TASK_RCU_BOOST() +#else +#define INIT_TASK_RCU_PREEMPT(tsk) +#endif + +extern struct cred init_cred; + +#ifdef CONFIG_PERF_EVENTS +# define INIT_PERF_EVENTS(tsk) \ + .perf_event_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ + .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), +#else +# define INIT_PERF_EVENTS(tsk) +#endif + +#define INIT_TASK_COMM "swapper" + +/* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) + */ +#define INIT_TASK(tsk) \ +{ \ + .state = 0, \ + .stack = &init_thread_info, \ + .usage = ATOMIC_INIT(2), \ + .flags = PF_KTHREAD, \ + .prio = MAX_PRIO-20, \ + .static_prio = MAX_PRIO-20, \ + .normal_prio = MAX_PRIO-20, \ + .policy = SCHED_NORMAL, \ + .cpus_allowed = CPU_MASK_ALL, \ + .mm = NULL, \ + .active_mm = &init_mm, \ + .se = { \ + .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ + }, \ + .rt = { \ + .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ + .time_slice = RR_TIMESLICE, \ + .nr_cpus_allowed = NR_CPUS, \ + }, \ + .tasks = LIST_HEAD_INIT(tsk.tasks), \ + INIT_PUSHABLE_TASKS(tsk) \ + .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ + .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ + .real_parent = &tsk, \ + .parent = &tsk, \ + .children = LIST_HEAD_INIT(tsk.children), \ + .sibling = LIST_HEAD_INIT(tsk.sibling), \ + .group_leader = &tsk, \ + RCU_INIT_POINTER(.real_cred, &init_cred), \ + RCU_INIT_POINTER(.cred, &init_cred), \ + .comm = INIT_TASK_COMM, \ + .thread = INIT_THREAD, \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ + .sighand = &init_sighand, \ + .nsproxy = &init_nsproxy, \ + .pending = { \ + .list = LIST_HEAD_INIT(tsk.pending.list), \ + .signal = {{0}}}, \ + .blocked = {{0}}, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ + .journal_info = NULL, \ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ + [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ + }, \ + .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ + INIT_IDS \ + INIT_PERF_EVENTS(tsk) \ + INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ + INIT_FTRACE_GRAPH \ + INIT_TRACE_RECURSION \ + INIT_TASK_RCU_PREEMPT(tsk) \ + INIT_CPUSET_SEQ \ +} + + +#define INIT_CPU_TIMERS(cpu_timers) \ +{ \ + LIST_HEAD_INIT(cpu_timers[0]), \ + LIST_HEAD_INIT(cpu_timers[1]), \ + LIST_HEAD_INIT(cpu_timers[2]), \ +} + +/* Attach to the init_task data structure for proper alignment */ +#define __init_task_data __attribute__((__section__(".data..init_task"))) + + +#endif diff --git a/include/linux/initrd.h b/include/linux/initrd.h new file mode 100644 index 00000000..55289d26 --- /dev/null +++ b/include/linux/initrd.h @@ -0,0 +1,20 @@ + +#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ + +/* 1 = load ramdisk, 0 = don't load */ +extern int rd_doload; + +/* 1 = prompt for ramdisk, 0 = don't prompt */ +extern int rd_prompt; + +/* starting block # of image */ +extern int rd_image_start; + +/* 1 if it is not an error if initrd_start < memory_start */ +extern int initrd_below_start_ok; + +/* free_initrd_mem always gets called with the next two as arguments.. */ +extern unsigned long initrd_start, initrd_end; +extern void free_initrd_mem(unsigned long, unsigned long); + +extern unsigned int real_root_dev; diff --git a/include/linux/inotify.h b/include/linux/inotify.h new file mode 100644 index 00000000..d33041e2 --- /dev/null +++ b/include/linux/inotify.h @@ -0,0 +1,87 @@ +/* + * Inode based directory notification for Linux + * + * Copyright (C) 2005 John McCutchan + */ + +#ifndef _LINUX_INOTIFY_H +#define _LINUX_INOTIFY_H + +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> +#include <linux/types.h> + +/* + * struct inotify_event - structure read from the inotify device for each event + * + * When you are watching a directory, you will receive the filename for events + * such as IN_CREATE, IN_DELETE, IN_OPEN, IN_CLOSE, ..., relative to the wd. + */ +struct inotify_event { + __s32 wd; /* watch descriptor */ + __u32 mask; /* watch mask */ + __u32 cookie; /* cookie to synchronize two events */ + __u32 len; /* length (including nulls) of name */ + char name[0]; /* stub for possible name */ +}; + +/* the following are legal, implemented events that user-space can watch for */ +#define IN_ACCESS 0x00000001 /* File was accessed */ +#define IN_MODIFY 0x00000002 /* File was modified */ +#define IN_ATTRIB 0x00000004 /* Metadata changed */ +#define IN_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ +#define IN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ +#define IN_OPEN 0x00000020 /* File was opened */ +#define IN_MOVED_FROM 0x00000040 /* File was moved from X */ +#define IN_MOVED_TO 0x00000080 /* File was moved to Y */ +#define IN_CREATE 0x00000100 /* Subfile was created */ +#define IN_DELETE 0x00000200 /* Subfile was deleted */ +#define IN_DELETE_SELF 0x00000400 /* Self was deleted */ +#define IN_MOVE_SELF 0x00000800 /* Self was moved */ + +/* the following are legal events. they are sent as needed to any watch */ +#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */ +#define IN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define IN_IGNORED 0x00008000 /* File was ignored */ + +/* helper events */ +#define IN_CLOSE (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) /* close */ +#define IN_MOVE (IN_MOVED_FROM | IN_MOVED_TO) /* moves */ + +/* special flags */ +#define IN_ONLYDIR 0x01000000 /* only watch the path if it is a directory */ +#define IN_DONT_FOLLOW 0x02000000 /* don't follow a sym link */ +#define IN_EXCL_UNLINK 0x04000000 /* exclude events on unlinked objects */ +#define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */ +#define IN_ISDIR 0x40000000 /* event occurred against dir */ +#define IN_ONESHOT 0x80000000 /* only send event once */ + +/* + * All of the events - we build the list by hand so that we can add flags in + * the future and not break backward compatibility. Apps will get only the + * events that they originally wanted. Be sure to add new events here! + */ +#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ + IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ + IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \ + IN_MOVE_SELF) + +/* Flags for sys_inotify_init1. */ +#define IN_CLOEXEC O_CLOEXEC +#define IN_NONBLOCK O_NONBLOCK + +#ifdef __KERNEL__ +#include <linux/sysctl.h> +extern struct ctl_table inotify_table[]; /* for sysctl */ + +#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ + IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ + IN_MOVED_TO | IN_CREATE | IN_DELETE | \ + IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \ + IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \ + IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \ + IN_ISDIR | IN_ONESHOT) + +#endif + +#endif /* _LINUX_INOTIFY_H */ diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h new file mode 100644 index 00000000..ce0b7246 --- /dev/null +++ b/include/linux/input-polldev.h @@ -0,0 +1,58 @@ +#ifndef _INPUT_POLLDEV_H +#define _INPUT_POLLDEV_H + +/* + * Copyright (c) 2007 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/input.h> +#include <linux/workqueue.h> + +/** + * struct input_polled_dev - simple polled input device + * @private: private driver data. + * @open: driver-supplied method that prepares device for polling + * (enabled the device and maybe flushes device state). + * @close: driver-supplied method that is called when device is no + * longer being polled. Used to put device into low power mode. + * @poll: driver-supplied method that polls the device and posts + * input events (mandatory). + * @poll_interval: specifies how often the poll() method should be called. + * Defaults to 500 msec unless overridden when registering the device. + * @poll_interval_max: specifies upper bound for the poll interval. + * Defaults to the initial value of @poll_interval. + * @poll_interval_min: specifies lower bound for the poll interval. + * Defaults to 0. + * @input: input device structure associated with the polled device. + * Must be properly initialized by the driver (id, name, phys, bits). + * + * Polled input device provides a skeleton for supporting simple input + * devices that do not raise interrupts but have to be periodically + * scanned or polled to detect changes in their state. + */ +struct input_polled_dev { + void *private; + + void (*open)(struct input_polled_dev *dev); + void (*close)(struct input_polled_dev *dev); + void (*poll)(struct input_polled_dev *dev); + unsigned int poll_interval; /* msec */ + unsigned int poll_interval_max; /* msec */ + unsigned int poll_interval_min; /* msec */ + + struct input_dev *input; + +/* private: */ + struct delayed_work work; +}; + +struct input_polled_dev *input_allocate_polled_device(void); +void input_free_polled_device(struct input_polled_dev *dev); +int input_register_polled_device(struct input_polled_dev *dev); +void input_unregister_polled_device(struct input_polled_dev *dev); + +#endif diff --git a/include/linux/input.h b/include/linux/input.h new file mode 100644 index 00000000..49fb20e3 --- /dev/null +++ b/include/linux/input.h @@ -0,0 +1,1656 @@ +#ifndef _INPUT_H +#define _INPUT_H + +/* + * Copyright (c) 1999-2002 Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifdef __KERNEL__ +#include <linux/time.h> +#include <linux/list.h> +#else +#include <sys/time.h> +#include <sys/ioctl.h> +#include <sys/types.h> +#include <linux/types.h> +#endif + +/* + * The event structure itself + */ + +struct input_event { + struct timeval time; + __u16 type; + __u16 code; + __s32 value; +}; + +/* + * Protocol version. + */ + +#define EV_VERSION 0x010001 + +/* + * IOCTLs (0x00 - 0x7f) + */ + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +/** + * struct input_absinfo - used by EVIOCGABS/EVIOCSABS ioctls + * @value: latest reported value for the axis. + * @minimum: specifies minimum value for the axis. + * @maximum: specifies maximum value for the axis. + * @fuzz: specifies fuzz value that is used to filter noise from + * the event stream. + * @flat: values that are within this value will be discarded by + * joydev interface and reported as 0 instead. + * @resolution: specifies resolution for the values reported for + * the axis. + * + * Note that input core does not clamp reported values to the + * [minimum, maximum] limits, such task is left to userspace. + * + * Resolution for main axes (ABS_X, ABS_Y, ABS_Z) is reported in + * units per millimeter (units/mm), resolution for rotational axes + * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian. + */ +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +/** + * struct input_keymap_entry - used by EVIOCGKEYCODE/EVIOCSKEYCODE ioctls + * @scancode: scancode represented in machine-endian form. + * @len: length of the scancode that resides in @scancode buffer. + * @index: index in the keymap, may be used instead of scancode + * @flags: allows to specify how kernel should handle the request. For + * example, setting INPUT_KEYMAP_BY_INDEX flag indicates that kernel + * should perform lookup in keymap by @index instead of @scancode + * @keycode: key code assigned to this scancode + * + * The structure is used to retrieve and modify keymap data. Users have + * option of performing lookup either by @scancode itself or by @index + * in keymap entry. EVIOCGKEYCODE will also return scancode or index + * (depending on which element was used to perform lookup). + */ +struct input_keymap_entry { +#define INPUT_KEYMAP_BY_INDEX (1 << 0) + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +#define EVIOCGVERSION _IOR('E', 0x01, int) /* get driver version */ +#define EVIOCGID _IOR('E', 0x02, struct input_id) /* get device ID */ +#define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */ +#define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */ + +#define EVIOCGKEYCODE _IOR('E', 0x04, unsigned int[2]) /* get keycode */ +#define EVIOCGKEYCODE_V2 _IOR('E', 0x04, struct input_keymap_entry) +#define EVIOCSKEYCODE _IOW('E', 0x04, unsigned int[2]) /* set keycode */ +#define EVIOCSKEYCODE_V2 _IOW('E', 0x04, struct input_keymap_entry) + +#define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */ +#define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */ +#define EVIOCGUNIQ(len) _IOC(_IOC_READ, 'E', 0x08, len) /* get unique identifier */ +#define EVIOCGPROP(len) _IOC(_IOC_READ, 'E', 0x09, len) /* get device properties */ + +/** + * EVIOCGMTSLOTS(len) - get MT slot values + * + * The ioctl buffer argument should be binary equivalent to + * + * struct input_mt_request_layout { + * __u32 code; + * __s32 values[num_slots]; + * }; + * + * where num_slots is the (arbitrary) number of MT slots to extract. + * + * The ioctl size argument (len) is the size of the buffer, which + * should satisfy len = (num_slots + 1) * sizeof(__s32). If len is + * too small to fit all available slots, the first num_slots are + * returned. + * + * Before the call, code is set to the wanted ABS_MT event type. On + * return, values[] is filled with the slot values for the specified + * ABS_MT code. + * + * If the request code is not an ABS_MT value, -EINVAL is returned. + */ +#define EVIOCGMTSLOTS(len) _IOC(_IOC_READ, 'E', 0x0a, len) + +#define EVIOCGKEY(len) _IOC(_IOC_READ, 'E', 0x18, len) /* get global key state */ +#define EVIOCGLED(len) _IOC(_IOC_READ, 'E', 0x19, len) /* get all LEDs */ +#define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */ +#define EVIOCGSW(len) _IOC(_IOC_READ, 'E', 0x1b, len) /* get all switch states */ + +#define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + (ev), len) /* get event bits */ +#define EVIOCGABS(abs) _IOR('E', 0x40 + (abs), struct input_absinfo) /* get abs value/limits */ +#define EVIOCSABS(abs) _IOW('E', 0xc0 + (abs), struct input_absinfo) /* set abs value/limits */ + +#define EVIOCSFF _IOC(_IOC_WRITE, 'E', 0x80, sizeof(struct ff_effect)) /* send a force effect to a force feedback device */ +#define EVIOCRMFF _IOW('E', 0x81, int) /* Erase a force effect */ +#define EVIOCGEFFECTS _IOR('E', 0x84, int) /* Report number of effects playable at the same time */ + +#define EVIOCGRAB _IOW('E', 0x90, int) /* Grab/Release device */ + +#define EVIOCGSUSPENDBLOCK _IOR('E', 0x91, int) /* get suspend block enable */ +#define EVIOCSSUSPENDBLOCK _IOW('E', 0x91, int) /* set suspend block enable */ + +#define EVIOCSCLOCKID _IOW('E', 0xa0, int) /* Set clockid to be used for timestamps */ + +/* + * Device properties and quirks + */ + +#define INPUT_PROP_POINTER 0x00 /* needs a pointer */ +#define INPUT_PROP_DIRECT 0x01 /* direct input devices */ +#define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */ +#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */ + +#define INPUT_PROP_MAX 0x1f +#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1) + +/* + * Event types + */ + +#define EV_SYN 0x00 +#define EV_KEY 0x01 +#define EV_REL 0x02 +#define EV_ABS 0x03 +#define EV_MSC 0x04 +#define EV_SW 0x05 +#define EV_LED 0x11 +#define EV_SND 0x12 +#define EV_REP 0x14 +#define EV_FF 0x15 +#define EV_PWR 0x16 +#define EV_FF_STATUS 0x17 +#define EV_MAX 0x1f +#define EV_CNT (EV_MAX+1) + +/* + * Synchronization events. + */ + +#define SYN_REPORT 0 +#define SYN_CONFIG 1 +#define SYN_MT_REPORT 2 +#define SYN_DROPPED 3 + +/* + * Keys and buttons + * + * Most of the keys/buttons are modeled after USB HUT 1.12 + * (see http://www.usb.org/developers/hidpage). + * Abbreviations in the comments: + * AC - Application Control + * AL - Application Launch Button + * SC - System Control + */ + +#define KEY_RESERVED 0 +#define KEY_ESC 1 +#define KEY_1 2 +#define KEY_2 3 +#define KEY_3 4 +#define KEY_4 5 +#define KEY_5 6 +#define KEY_6 7 +#define KEY_7 8 +#define KEY_8 9 +#define KEY_9 10 +#define KEY_0 11 +#define KEY_MINUS 12 +#define KEY_EQUAL 13 +#define KEY_BACKSPACE 14 +#define KEY_TAB 15 +#define KEY_Q 16 +#define KEY_W 17 +#define KEY_E 18 +#define KEY_R 19 +#define KEY_T 20 +#define KEY_Y 21 +#define KEY_U 22 +#define KEY_I 23 +#define KEY_O 24 +#define KEY_P 25 +#define KEY_LEFTBRACE 26 +#define KEY_RIGHTBRACE 27 +#define KEY_ENTER 28 +#define KEY_LEFTCTRL 29 +#define KEY_A 30 +#define KEY_S 31 +#define KEY_D 32 +#define KEY_F 33 +#define KEY_G 34 +#define KEY_H 35 +#define KEY_J 36 +#define KEY_K 37 +#define KEY_L 38 +#define KEY_SEMICOLON 39 +#define KEY_APOSTROPHE 40 +#define KEY_GRAVE 41 +#define KEY_LEFTSHIFT 42 +#define KEY_BACKSLASH 43 +#define KEY_Z 44 +#define KEY_X 45 +#define KEY_C 46 +#define KEY_V 47 +#define KEY_B 48 +#define KEY_N 49 +#define KEY_M 50 +#define KEY_COMMA 51 +#define KEY_DOT 52 +#define KEY_SLASH 53 +#define KEY_RIGHTSHIFT 54 +#define KEY_KPASTERISK 55 +#define KEY_LEFTALT 56 +#define KEY_SPACE 57 +#define KEY_CAPSLOCK 58 +#define KEY_F1 59 +#define KEY_F2 60 +#define KEY_F3 61 +#define KEY_F4 62 +#define KEY_F5 63 +#define KEY_F6 64 +#define KEY_F7 65 +#define KEY_F8 66 +#define KEY_F9 67 +#define KEY_F10 68 +#define KEY_NUMLOCK 69 +#define KEY_SCROLLLOCK 70 +#define KEY_KP7 71 +#define KEY_KP8 72 +#define KEY_KP9 73 +#define KEY_KPMINUS 74 +#define KEY_KP4 75 +#define KEY_KP5 76 +#define KEY_KP6 77 +#define KEY_KPPLUS 78 +#define KEY_KP1 79 +#define KEY_KP2 80 +#define KEY_KP3 81 +#define KEY_KP0 82 +#define KEY_KPDOT 83 + +#define KEY_ZENKAKUHANKAKU 85 +#define KEY_102ND 86 +#define KEY_F11 87 +#define KEY_F12 88 +#define KEY_RO 89 +#define KEY_KATAKANA 90 +#define KEY_HIRAGANA 91 +#define KEY_HENKAN 92 +#define KEY_KATAKANAHIRAGANA 93 +#define KEY_MUHENKAN 94 +#define KEY_KPJPCOMMA 95 +#define KEY_KPENTER 96 +#define KEY_RIGHTCTRL 97 +#define KEY_KPSLASH 98 +#define KEY_SYSRQ 99 +#define KEY_RIGHTALT 100 +#define KEY_LINEFEED 101 +#define KEY_HOME 102 +#define KEY_UP 103 +#define KEY_PAGEUP 104 +#define KEY_LEFT 105 +#define KEY_RIGHT 106 +#define KEY_END 107 +#define KEY_DOWN 108 +#define KEY_PAGEDOWN 109 +#define KEY_INSERT 110 +#define KEY_DELETE 111 +#define KEY_MACRO 112 +#define KEY_MUTE 113 +#define KEY_VOLUMEDOWN 114 +#define KEY_VOLUMEUP 115 +#define KEY_POWER 116 /* SC System Power Down */ +#define KEY_KPEQUAL 117 +#define KEY_KPPLUSMINUS 118 +#define KEY_PAUSE 119 +#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */ + +#define KEY_KPCOMMA 121 +#define KEY_HANGEUL 122 +#define KEY_HANGUEL KEY_HANGEUL +#define KEY_HANJA 123 +#define KEY_YEN 124 +#define KEY_LEFTMETA 125 +#define KEY_RIGHTMETA 126 +#define KEY_COMPOSE 127 + +#define KEY_STOP 128 /* AC Stop */ +#define KEY_AGAIN 129 +#define KEY_PROPS 130 /* AC Properties */ +#define KEY_UNDO 131 /* AC Undo */ +#define KEY_FRONT 132 +#define KEY_COPY 133 /* AC Copy */ +#define KEY_OPEN 134 /* AC Open */ +#define KEY_PASTE 135 /* AC Paste */ +#define KEY_FIND 136 /* AC Search */ +#define KEY_CUT 137 /* AC Cut */ +#define KEY_HELP 138 /* AL Integrated Help Center */ +#define KEY_MENU 139 /* Menu (show menu) */ +#define KEY_CALC 140 /* AL Calculator */ +#define KEY_SETUP 141 +#define KEY_SLEEP 142 /* SC System Sleep */ +#define KEY_WAKEUP 143 /* System Wake Up */ +#define KEY_FILE 144 /* AL Local Machine Browser */ +#define KEY_SENDFILE 145 +#define KEY_DELETEFILE 146 +#define KEY_XFER 147 +#define KEY_PROG1 148 +#define KEY_PROG2 149 +#define KEY_WWW 150 /* AL Internet Browser */ +#define KEY_MSDOS 151 +#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */ +#define KEY_SCREENLOCK KEY_COFFEE +#define KEY_DIRECTION 153 +#define KEY_CYCLEWINDOWS 154 +#define KEY_MAIL 155 +#define KEY_BOOKMARKS 156 /* AC Bookmarks */ +#define KEY_COMPUTER 157 +#define KEY_BACK 158 /* AC Back */ +#define KEY_FORWARD 159 /* AC Forward */ +#define KEY_CLOSECD 160 +#define KEY_EJECTCD 161 +#define KEY_EJECTCLOSECD 162 +#define KEY_NEXTSONG 163 +#define KEY_PLAYPAUSE 164 +#define KEY_PREVIOUSSONG 165 +#define KEY_STOPCD 166 +#define KEY_RECORD 167 +#define KEY_REWIND 168 +#define KEY_PHONE 169 /* Media Select Telephone */ +#define KEY_ISO 170 +#define KEY_CONFIG 171 /* AL Consumer Control Configuration */ +#define KEY_HOMEPAGE 172 /* AC Home */ +#define KEY_REFRESH 173 /* AC Refresh */ +#define KEY_EXIT 174 /* AC Exit */ +#define KEY_MOVE 175 +#define KEY_EDIT 176 +#define KEY_SCROLLUP 177 +#define KEY_SCROLLDOWN 178 +#define KEY_KPLEFTPAREN 179 +#define KEY_KPRIGHTPAREN 180 +#define KEY_NEW 181 /* AC New */ +#define KEY_REDO 182 /* AC Redo/Repeat */ + +#define KEY_F13 183 +#define KEY_F14 184 +#define KEY_F15 185 +#define KEY_F16 186 +#define KEY_F17 187 +#define KEY_F18 188 +#define KEY_F19 189 +#define KEY_F20 190 +#define KEY_F21 191 +#define KEY_F22 192 +#define KEY_F23 193 +#define KEY_F24 194 + +#define KEY_PLAYCD 200 +#define KEY_PAUSECD 201 +#define KEY_PROG3 202 +#define KEY_PROG4 203 +#define KEY_DASHBOARD 204 /* AL Dashboard */ +#define KEY_SUSPEND 205 +#define KEY_CLOSE 206 /* AC Close */ +#define KEY_PLAY 207 +#define KEY_FASTFORWARD 208 +#define KEY_BASSBOOST 209 +#define KEY_PRINT 210 /* AC Print */ +#define KEY_HP 211 +#define KEY_CAMERA 212 +#define KEY_SOUND 213 +#define KEY_QUESTION 214 +#define KEY_EMAIL 215 +#define KEY_CHAT 216 +#define KEY_SEARCH 217 +#define KEY_CONNECT 218 +#define KEY_FINANCE 219 /* AL Checkbook/Finance */ +#define KEY_SPORT 220 +#define KEY_SHOP 221 +#define KEY_ALTERASE 222 +#define KEY_CANCEL 223 /* AC Cancel */ +#define KEY_BRIGHTNESSDOWN 224 +#define KEY_BRIGHTNESSUP 225 +#define KEY_MEDIA 226 + +#define KEY_SWITCHVIDEOMODE 227 /* Cycle between available video + outputs (Monitor/LCD/TV-out/etc) */ +#define KEY_KBDILLUMTOGGLE 228 +#define KEY_KBDILLUMDOWN 229 +#define KEY_KBDILLUMUP 230 + +#define KEY_SEND 231 /* AC Send */ +#define KEY_REPLY 232 /* AC Reply */ +#define KEY_FORWARDMAIL 233 /* AC Forward Msg */ +#define KEY_SAVE 234 /* AC Save */ +#define KEY_DOCUMENTS 235 + +#define KEY_BATTERY 236 + +#define KEY_BLUETOOTH 237 +#define KEY_WLAN 238 +#define KEY_UWB 239 + +#define KEY_UNKNOWN 240 + +#define KEY_VIDEO_NEXT 241 /* drive next video source */ +#define KEY_VIDEO_PREV 242 /* drive previous video source */ +#define KEY_BRIGHTNESS_CYCLE 243 /* brightness up, after max is min */ +#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */ +#define KEY_DISPLAY_OFF 245 /* display device to off state */ + +#define KEY_WIMAX 246 +#define KEY_RFKILL 247 /* Key that controls all radios */ + +#define KEY_MICMUTE 248 /* Mute / unmute the microphone */ + +/* Code 255 is reserved for special needs of AT keyboard driver */ + +#define BTN_MISC 0x100 +#define BTN_0 0x100 +#define BTN_1 0x101 +#define BTN_2 0x102 +#define BTN_3 0x103 +#define BTN_4 0x104 +#define BTN_5 0x105 +#define BTN_6 0x106 +#define BTN_7 0x107 +#define BTN_8 0x108 +#define BTN_9 0x109 + +#define BTN_MOUSE 0x110 +#define BTN_LEFT 0x110 +#define BTN_RIGHT 0x111 +#define BTN_MIDDLE 0x112 +#define BTN_SIDE 0x113 +#define BTN_EXTRA 0x114 +#define BTN_FORWARD 0x115 +#define BTN_BACK 0x116 +#define BTN_TASK 0x117 + +#define BTN_JOYSTICK 0x120 +#define BTN_TRIGGER 0x120 +#define BTN_THUMB 0x121 +#define BTN_THUMB2 0x122 +#define BTN_TOP 0x123 +#define BTN_TOP2 0x124 +#define BTN_PINKIE 0x125 +#define BTN_BASE 0x126 +#define BTN_BASE2 0x127 +#define BTN_BASE3 0x128 +#define BTN_BASE4 0x129 +#define BTN_BASE5 0x12a +#define BTN_BASE6 0x12b +#define BTN_DEAD 0x12f + +#define BTN_GAMEPAD 0x130 +#define BTN_A 0x130 +#define BTN_B 0x131 +#define BTN_C 0x132 +#define BTN_X 0x133 +#define BTN_Y 0x134 +#define BTN_Z 0x135 +#define BTN_TL 0x136 +#define BTN_TR 0x137 +#define BTN_TL2 0x138 +#define BTN_TR2 0x139 +#define BTN_SELECT 0x13a +#define BTN_START 0x13b +#define BTN_MODE 0x13c +#define BTN_THUMBL 0x13d +#define BTN_THUMBR 0x13e + +#define BTN_DIGI 0x140 +#define BTN_TOOL_PEN 0x140 +#define BTN_TOOL_RUBBER 0x141 +#define BTN_TOOL_BRUSH 0x142 +#define BTN_TOOL_PENCIL 0x143 +#define BTN_TOOL_AIRBRUSH 0x144 +#define BTN_TOOL_FINGER 0x145 +#define BTN_TOOL_MOUSE 0x146 +#define BTN_TOOL_LENS 0x147 +#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */ +#define BTN_TOUCH 0x14a +#define BTN_STYLUS 0x14b +#define BTN_STYLUS2 0x14c +#define BTN_TOOL_DOUBLETAP 0x14d +#define BTN_TOOL_TRIPLETAP 0x14e +#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */ + +#define BTN_WHEEL 0x150 +#define BTN_GEAR_DOWN 0x150 +#define BTN_GEAR_UP 0x151 + +#define KEY_OK 0x160 +#define KEY_SELECT 0x161 +#define KEY_GOTO 0x162 +#define KEY_CLEAR 0x163 +#define KEY_POWER2 0x164 +#define KEY_OPTION 0x165 +#define KEY_INFO 0x166 /* AL OEM Features/Tips/Tutorial */ +#define KEY_TIME 0x167 +#define KEY_VENDOR 0x168 +#define KEY_ARCHIVE 0x169 +#define KEY_PROGRAM 0x16a /* Media Select Program Guide */ +#define KEY_CHANNEL 0x16b +#define KEY_FAVORITES 0x16c +#define KEY_EPG 0x16d +#define KEY_PVR 0x16e /* Media Select Home */ +#define KEY_MHP 0x16f +#define KEY_LANGUAGE 0x170 +#define KEY_TITLE 0x171 +#define KEY_SUBTITLE 0x172 +#define KEY_ANGLE 0x173 +#define KEY_ZOOM 0x174 +#define KEY_MODE 0x175 +#define KEY_KEYBOARD 0x176 +#define KEY_SCREEN 0x177 +#define KEY_PC 0x178 /* Media Select Computer */ +#define KEY_TV 0x179 /* Media Select TV */ +#define KEY_TV2 0x17a /* Media Select Cable */ +#define KEY_VCR 0x17b /* Media Select VCR */ +#define KEY_VCR2 0x17c /* VCR Plus */ +#define KEY_SAT 0x17d /* Media Select Satellite */ +#define KEY_SAT2 0x17e +#define KEY_CD 0x17f /* Media Select CD */ +#define KEY_TAPE 0x180 /* Media Select Tape */ +#define KEY_RADIO 0x181 +#define KEY_TUNER 0x182 /* Media Select Tuner */ +#define KEY_PLAYER 0x183 +#define KEY_TEXT 0x184 +#define KEY_DVD 0x185 /* Media Select DVD */ +#define KEY_AUX 0x186 +#define KEY_MP3 0x187 +#define KEY_AUDIO 0x188 /* AL Audio Browser */ +#define KEY_VIDEO 0x189 /* AL Movie Browser */ +#define KEY_DIRECTORY 0x18a +#define KEY_LIST 0x18b +#define KEY_MEMO 0x18c /* Media Select Messages */ +#define KEY_CALENDAR 0x18d +#define KEY_RED 0x18e +#define KEY_GREEN 0x18f +#define KEY_YELLOW 0x190 +#define KEY_BLUE 0x191 +#define KEY_CHANNELUP 0x192 /* Channel Increment */ +#define KEY_CHANNELDOWN 0x193 /* Channel Decrement */ +#define KEY_FIRST 0x194 +#define KEY_LAST 0x195 /* Recall Last */ +#define KEY_AB 0x196 +#define KEY_NEXT 0x197 +#define KEY_RESTART 0x198 +#define KEY_SLOW 0x199 +#define KEY_SHUFFLE 0x19a +#define KEY_BREAK 0x19b +#define KEY_PREVIOUS 0x19c +#define KEY_DIGITS 0x19d +#define KEY_TEEN 0x19e +#define KEY_TWEN 0x19f +#define KEY_VIDEOPHONE 0x1a0 /* Media Select Video Phone */ +#define KEY_GAMES 0x1a1 /* Media Select Games */ +#define KEY_ZOOMIN 0x1a2 /* AC Zoom In */ +#define KEY_ZOOMOUT 0x1a3 /* AC Zoom Out */ +#define KEY_ZOOMRESET 0x1a4 /* AC Zoom */ +#define KEY_WORDPROCESSOR 0x1a5 /* AL Word Processor */ +#define KEY_EDITOR 0x1a6 /* AL Text Editor */ +#define KEY_SPREADSHEET 0x1a7 /* AL Spreadsheet */ +#define KEY_GRAPHICSEDITOR 0x1a8 /* AL Graphics Editor */ +#define KEY_PRESENTATION 0x1a9 /* AL Presentation App */ +#define KEY_DATABASE 0x1aa /* AL Database App */ +#define KEY_NEWS 0x1ab /* AL Newsreader */ +#define KEY_VOICEMAIL 0x1ac /* AL Voicemail */ +#define KEY_ADDRESSBOOK 0x1ad /* AL Contacts/Address Book */ +#define KEY_MESSENGER 0x1ae /* AL Instant Messaging */ +#define KEY_DISPLAYTOGGLE 0x1af /* Turn display (LCD) on and off */ +#define KEY_SPELLCHECK 0x1b0 /* AL Spell Check */ +#define KEY_LOGOFF 0x1b1 /* AL Logoff */ + +#define KEY_DOLLAR 0x1b2 +#define KEY_EURO 0x1b3 + +#define KEY_FRAMEBACK 0x1b4 /* Consumer - transport controls */ +#define KEY_FRAMEFORWARD 0x1b5 +#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ +#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ +#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ +#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ +#define KEY_IMAGES 0x1ba /* AL Image Browser */ + +#define KEY_DEL_EOL 0x1c0 +#define KEY_DEL_EOS 0x1c1 +#define KEY_INS_LINE 0x1c2 +#define KEY_DEL_LINE 0x1c3 + +#define KEY_FN 0x1d0 +#define KEY_FN_ESC 0x1d1 +#define KEY_FN_F1 0x1d2 +#define KEY_FN_F2 0x1d3 +#define KEY_FN_F3 0x1d4 +#define KEY_FN_F4 0x1d5 +#define KEY_FN_F5 0x1d6 +#define KEY_FN_F6 0x1d7 +#define KEY_FN_F7 0x1d8 +#define KEY_FN_F8 0x1d9 +#define KEY_FN_F9 0x1da +#define KEY_FN_F10 0x1db +#define KEY_FN_F11 0x1dc +#define KEY_FN_F12 0x1dd +#define KEY_FN_1 0x1de +#define KEY_FN_2 0x1df +#define KEY_FN_D 0x1e0 +#define KEY_FN_E 0x1e1 +#define KEY_FN_F 0x1e2 +#define KEY_FN_S 0x1e3 +#define KEY_FN_B 0x1e4 + +#define KEY_BRL_DOT1 0x1f1 +#define KEY_BRL_DOT2 0x1f2 +#define KEY_BRL_DOT3 0x1f3 +#define KEY_BRL_DOT4 0x1f4 +#define KEY_BRL_DOT5 0x1f5 +#define KEY_BRL_DOT6 0x1f6 +#define KEY_BRL_DOT7 0x1f7 +#define KEY_BRL_DOT8 0x1f8 +#define KEY_BRL_DOT9 0x1f9 +#define KEY_BRL_DOT10 0x1fa + +#define KEY_NUMERIC_0 0x200 /* used by phones, remote controls, */ +#define KEY_NUMERIC_1 0x201 /* and other keypads */ +#define KEY_NUMERIC_2 0x202 +#define KEY_NUMERIC_3 0x203 +#define KEY_NUMERIC_4 0x204 +#define KEY_NUMERIC_5 0x205 +#define KEY_NUMERIC_6 0x206 +#define KEY_NUMERIC_7 0x207 +#define KEY_NUMERIC_8 0x208 +#define KEY_NUMERIC_9 0x209 +#define KEY_NUMERIC_STAR 0x20a +#define KEY_NUMERIC_POUND 0x20b + +#define KEY_CAMERA_FOCUS 0x210 +#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */ + +#define KEY_TOUCHPAD_TOGGLE 0x212 /* Request switch touchpad on or off */ +#define KEY_TOUCHPAD_ON 0x213 +#define KEY_TOUCHPAD_OFF 0x214 + +#define KEY_CAMERA_ZOOMIN 0x215 +#define KEY_CAMERA_ZOOMOUT 0x216 +#define KEY_CAMERA_UP 0x217 +#define KEY_CAMERA_DOWN 0x218 +#define KEY_CAMERA_LEFT 0x219 +#define KEY_CAMERA_RIGHT 0x21a + +#define BTN_TRIGGER_HAPPY 0x2c0 +#define BTN_TRIGGER_HAPPY1 0x2c0 +#define BTN_TRIGGER_HAPPY2 0x2c1 +#define BTN_TRIGGER_HAPPY3 0x2c2 +#define BTN_TRIGGER_HAPPY4 0x2c3 +#define BTN_TRIGGER_HAPPY5 0x2c4 +#define BTN_TRIGGER_HAPPY6 0x2c5 +#define BTN_TRIGGER_HAPPY7 0x2c6 +#define BTN_TRIGGER_HAPPY8 0x2c7 +#define BTN_TRIGGER_HAPPY9 0x2c8 +#define BTN_TRIGGER_HAPPY10 0x2c9 +#define BTN_TRIGGER_HAPPY11 0x2ca +#define BTN_TRIGGER_HAPPY12 0x2cb +#define BTN_TRIGGER_HAPPY13 0x2cc +#define BTN_TRIGGER_HAPPY14 0x2cd +#define BTN_TRIGGER_HAPPY15 0x2ce +#define BTN_TRIGGER_HAPPY16 0x2cf +#define BTN_TRIGGER_HAPPY17 0x2d0 +#define BTN_TRIGGER_HAPPY18 0x2d1 +#define BTN_TRIGGER_HAPPY19 0x2d2 +#define BTN_TRIGGER_HAPPY20 0x2d3 +#define BTN_TRIGGER_HAPPY21 0x2d4 +#define BTN_TRIGGER_HAPPY22 0x2d5 +#define BTN_TRIGGER_HAPPY23 0x2d6 +#define BTN_TRIGGER_HAPPY24 0x2d7 +#define BTN_TRIGGER_HAPPY25 0x2d8 +#define BTN_TRIGGER_HAPPY26 0x2d9 +#define BTN_TRIGGER_HAPPY27 0x2da +#define BTN_TRIGGER_HAPPY28 0x2db +#define BTN_TRIGGER_HAPPY29 0x2dc +#define BTN_TRIGGER_HAPPY30 0x2dd +#define BTN_TRIGGER_HAPPY31 0x2de +#define BTN_TRIGGER_HAPPY32 0x2df +#define BTN_TRIGGER_HAPPY33 0x2e0 +#define BTN_TRIGGER_HAPPY34 0x2e1 +#define BTN_TRIGGER_HAPPY35 0x2e2 +#define BTN_TRIGGER_HAPPY36 0x2e3 +#define BTN_TRIGGER_HAPPY37 0x2e4 +#define BTN_TRIGGER_HAPPY38 0x2e5 +#define BTN_TRIGGER_HAPPY39 0x2e6 +#define BTN_TRIGGER_HAPPY40 0x2e7 + +/* We avoid low common keys in module aliases so they don't get huge. */ +#define KEY_MIN_INTERESTING KEY_MUTE +#define KEY_MAX 0x2ff +#define KEY_CNT (KEY_MAX+1) + +/* + * Relative axes + */ + +#define REL_X 0x00 +#define REL_Y 0x01 +#define REL_Z 0x02 +#define REL_RX 0x03 +#define REL_RY 0x04 +#define REL_RZ 0x05 +#define REL_HWHEEL 0x06 +#define REL_DIAL 0x07 +#define REL_WHEEL 0x08 +#define REL_MISC 0x09 +#define REL_MAX 0x0f +#define REL_CNT (REL_MAX+1) + +/* + * Absolute axes + */ + +#define ABS_X 0x00 +#define ABS_Y 0x01 +#define ABS_Z 0x02 +#define ABS_RX 0x03 +#define ABS_RY 0x04 +#define ABS_RZ 0x05 +#define ABS_THROTTLE 0x06 +#define ABS_RUDDER 0x07 +#define ABS_WHEEL 0x08 +#define ABS_GAS 0x09 +#define ABS_BRAKE 0x0a +#define ABS_HAT0X 0x10 +#define ABS_HAT0Y 0x11 +#define ABS_HAT1X 0x12 +#define ABS_HAT1Y 0x13 +#define ABS_HAT2X 0x14 +#define ABS_HAT2Y 0x15 +#define ABS_HAT3X 0x16 +#define ABS_HAT3Y 0x17 +#define ABS_PRESSURE 0x18 +#define ABS_DISTANCE 0x19 +#define ABS_TILT_X 0x1a +#define ABS_TILT_Y 0x1b +#define ABS_TOOL_WIDTH 0x1c + +#define ABS_VOLUME 0x20 + +#define ABS_MISC 0x28 + +#define ABS_MT_SLOT 0x2f /* MT slot being modified */ +#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ +#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ +#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */ +#define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */ +#define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */ +#define ABS_MT_POSITION_X 0x35 /* Center X ellipse position */ +#define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */ +#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */ +#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */ +#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */ +#define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */ +#define ABS_MT_DISTANCE 0x3b /* Contact hover distance */ + +#ifdef __KERNEL__ +/* Implementation details, userspace should not care about these */ +#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR +#define ABS_MT_LAST ABS_MT_DISTANCE +#endif + +#define ABS_MAX 0x3f +#define ABS_CNT (ABS_MAX+1) + +/* + * Switch events + */ + +#define SW_LID 0x00 /* set = lid shut */ +#define SW_TABLET_MODE 0x01 /* set = tablet mode */ +#define SW_HEADPHONE_INSERT 0x02 /* set = inserted */ +#define SW_RFKILL_ALL 0x03 /* rfkill master switch, type "any" + set = radio enabled */ +#define SW_RADIO SW_RFKILL_ALL /* deprecated */ +#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */ +#define SW_DOCK 0x05 /* set = plugged into dock */ +#define SW_LINEOUT_INSERT 0x06 /* set = inserted */ +#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */ +#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */ +#define SW_CAMERA_LENS_COVER 0x09 /* set = lens covered */ +#define SW_KEYPAD_SLIDE 0x0a /* set = keypad slide out */ +#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */ +#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */ +#define SW_LINEIN_INSERT 0x0d /* set = inserted */ +#define SW_MAX 0x0f +#define SW_CNT (SW_MAX+1) + +/* + * Misc events + */ + +#define MSC_SERIAL 0x00 +#define MSC_PULSELED 0x01 +#define MSC_GESTURE 0x02 +#define MSC_RAW 0x03 +#define MSC_SCAN 0x04 +#define MSC_MAX 0x07 +#define MSC_CNT (MSC_MAX+1) + +/* + * LEDs + */ + +#define LED_NUML 0x00 +#define LED_CAPSL 0x01 +#define LED_SCROLLL 0x02 +#define LED_COMPOSE 0x03 +#define LED_KANA 0x04 +#define LED_SLEEP 0x05 +#define LED_SUSPEND 0x06 +#define LED_MUTE 0x07 +#define LED_MISC 0x08 +#define LED_MAIL 0x09 +#define LED_CHARGING 0x0a +#define LED_MAX 0x0f +#define LED_CNT (LED_MAX+1) + +/* + * Autorepeat values + */ + +#define REP_DELAY 0x00 +#define REP_PERIOD 0x01 +#define REP_MAX 0x01 +#define REP_CNT (REP_MAX+1) + +/* + * Sounds + */ + +#define SND_CLICK 0x00 +#define SND_BELL 0x01 +#define SND_TONE 0x02 +#define SND_MAX 0x07 +#define SND_CNT (SND_MAX+1) + +/* + * IDs. + */ + +#define ID_BUS 0 +#define ID_VENDOR 1 +#define ID_PRODUCT 2 +#define ID_VERSION 3 + +#define BUS_PCI 0x01 +#define BUS_ISAPNP 0x02 +#define BUS_USB 0x03 +#define BUS_HIL 0x04 +#define BUS_BLUETOOTH 0x05 +#define BUS_VIRTUAL 0x06 + +#define BUS_ISA 0x10 +#define BUS_I8042 0x11 +#define BUS_XTKBD 0x12 +#define BUS_RS232 0x13 +#define BUS_GAMEPORT 0x14 +#define BUS_PARPORT 0x15 +#define BUS_AMIGA 0x16 +#define BUS_ADB 0x17 +#define BUS_I2C 0x18 +#define BUS_HOST 0x19 +#define BUS_GSC 0x1A +#define BUS_ATARI 0x1B +#define BUS_SPI 0x1C + +/* + * MT_TOOL types + */ +#define MT_TOOL_FINGER 0 +#define MT_TOOL_PEN 1 +#define MT_TOOL_MAX 1 + +/* + * Values describing the status of a force-feedback effect + */ +#define FF_STATUS_STOPPED 0x00 +#define FF_STATUS_PLAYING 0x01 +#define FF_STATUS_MAX 0x01 + +/* + * Structures used in ioctls to upload effects to a device + * They are pieces of a bigger structure (called ff_effect) + */ + +/* + * All duration values are expressed in ms. Values above 32767 ms (0x7fff) + * should not be used and have unspecified results. + */ + +/** + * struct ff_replay - defines scheduling of the force-feedback effect + * @length: duration of the effect + * @delay: delay before effect should start playing + */ +struct ff_replay { + __u16 length; + __u16 delay; +}; + +/** + * struct ff_trigger - defines what triggers the force-feedback effect + * @button: number of the button triggering the effect + * @interval: controls how soon the effect can be re-triggered + */ +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +/** + * struct ff_envelope - generic force-feedback effect envelope + * @attack_length: duration of the attack (ms) + * @attack_level: level at the beginning of the attack + * @fade_length: duration of fade (ms) + * @fade_level: level at the end of fade + * + * The @attack_level and @fade_level are absolute values; when applying + * envelope force-feedback core will convert to positive/negative + * value based on polarity of the default level of the effect. + * Valid range for the attack and fade levels is 0x0000 - 0x7fff + */ +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +/** + * struct ff_constant_effect - defines parameters of a constant force-feedback effect + * @level: strength of the effect; may be negative + * @envelope: envelope data + */ +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +/** + * struct ff_ramp_effect - defines parameters of a ramp force-feedback effect + * @start_level: beginning strength of the effect; may be negative + * @end_level: final strength of the effect; may be negative + * @envelope: envelope data + */ +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +/** + * struct ff_condition_effect - defines a spring or friction force-feedback effect + * @right_saturation: maximum level when joystick moved all way to the right + * @left_saturation: same for the left side + * @right_coeff: controls how fast the force grows when the joystick moves + * to the right + * @left_coeff: same for the left side + * @deadband: size of the dead zone, where no force is produced + * @center: position of the dead zone + */ +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + + __s16 right_coeff; + __s16 left_coeff; + + __u16 deadband; + __s16 center; +}; + +/** + * struct ff_periodic_effect - defines parameters of a periodic force-feedback effect + * @waveform: kind of the effect (wave) + * @period: period of the wave (ms) + * @magnitude: peak value + * @offset: mean value of the wave (roughly) + * @phase: 'horizontal' shift + * @envelope: envelope data + * @custom_len: number of samples (FF_CUSTOM only) + * @custom_data: buffer of samples (FF_CUSTOM only) + * + * Known waveforms - FF_SQUARE, FF_TRIANGLE, FF_SINE, FF_SAW_UP, + * FF_SAW_DOWN, FF_CUSTOM. The exact syntax FF_CUSTOM is undefined + * for the time being as no driver supports it yet. + * + * Note: the data pointed by custom_data is copied by the driver. + * You can therefore dispose of the memory after the upload/update. + */ +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + + struct ff_envelope envelope; + + __u32 custom_len; + __s16 __user *custom_data; +}; + +/** + * struct ff_rumble_effect - defines parameters of a periodic force-feedback effect + * @strong_magnitude: magnitude of the heavy motor + * @weak_magnitude: magnitude of the light one + * + * Some rumble pads have two motors of different weight. Strong_magnitude + * represents the magnitude of the vibration generated by the heavy one. + */ +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +/** + * struct ff_effect - defines force feedback effect + * @type: type of the effect (FF_CONSTANT, FF_PERIODIC, FF_RAMP, FF_SPRING, + * FF_FRICTION, FF_DAMPER, FF_RUMBLE, FF_INERTIA, or FF_CUSTOM) + * @id: an unique id assigned to an effect + * @direction: direction of the effect + * @trigger: trigger conditions (struct ff_trigger) + * @replay: scheduling of the effect (struct ff_replay) + * @u: effect-specific structure (one of ff_constant_effect, ff_ramp_effect, + * ff_periodic_effect, ff_condition_effect, ff_rumble_effect) further + * defining effect parameters + * + * This structure is sent through ioctl from the application to the driver. + * To create a new effect application should set its @id to -1; the kernel + * will return assigned @id which can later be used to update or delete + * this effect. + * + * Direction of the effect is encoded as follows: + * 0 deg -> 0x0000 (down) + * 90 deg -> 0x4000 (left) + * 180 deg -> 0x8000 (up) + * 270 deg -> 0xC000 (right) + */ +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; /* One for each axis */ + struct ff_rumble_effect rumble; + } u; +}; + +/* + * Force feedback effect types + */ + +#define FF_RUMBLE 0x50 +#define FF_PERIODIC 0x51 +#define FF_CONSTANT 0x52 +#define FF_SPRING 0x53 +#define FF_FRICTION 0x54 +#define FF_DAMPER 0x55 +#define FF_INERTIA 0x56 +#define FF_RAMP 0x57 + +#define FF_EFFECT_MIN FF_RUMBLE +#define FF_EFFECT_MAX FF_RAMP + +/* + * Force feedback periodic effect types + */ + +#define FF_SQUARE 0x58 +#define FF_TRIANGLE 0x59 +#define FF_SINE 0x5a +#define FF_SAW_UP 0x5b +#define FF_SAW_DOWN 0x5c +#define FF_CUSTOM 0x5d + +#define FF_WAVEFORM_MIN FF_SQUARE +#define FF_WAVEFORM_MAX FF_CUSTOM + +/* + * Set ff device properties + */ + +#define FF_GAIN 0x60 +#define FF_AUTOCENTER 0x61 + +#define FF_MAX 0x7f +#define FF_CNT (FF_MAX+1) + +#ifdef __KERNEL__ + +/* + * In-kernel definitions. + */ + +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/timer.h> +#include <linux/mod_devicetable.h> + +/** + * struct input_dev - represents an input device + * @name: name of the device + * @phys: physical path to the device in the system hierarchy + * @uniq: unique identification code for the device (if device has it) + * @id: id of the device (struct input_id) + * @propbit: bitmap of device properties and quirks + * @evbit: bitmap of types of events supported by the device (EV_KEY, + * EV_REL, etc.) + * @keybit: bitmap of keys/buttons this device has + * @relbit: bitmap of relative axes for the device + * @absbit: bitmap of absolute axes for the device + * @mscbit: bitmap of miscellaneous events supported by the device + * @ledbit: bitmap of leds present on the device + * @sndbit: bitmap of sound effects supported by the device + * @ffbit: bitmap of force feedback effects supported by the device + * @swbit: bitmap of switches present on the device + * @hint_events_per_packet: average number of events generated by the + * device in a packet (between EV_SYN/SYN_REPORT events). Used by + * event handlers to estimate size of the buffer needed to hold + * events. + * @keycodemax: size of keycode table + * @keycodesize: size of elements in keycode table + * @keycode: map of scancodes to keycodes for this device + * @getkeycode: optional legacy method to retrieve current keymap. + * @setkeycode: optional method to alter current keymap, used to implement + * sparse keymaps. If not supplied default mechanism will be used. + * The method is being called while holding event_lock and thus must + * not sleep + * @ff: force feedback structure associated with the device if device + * supports force feedback effects + * @repeat_key: stores key code of the last key pressed; used to implement + * software autorepeat + * @timer: timer for software autorepeat + * @rep: current values for autorepeat parameters (delay, rate) + * @mt: pointer to array of struct input_mt_slot holding current values + * of tracked contacts + * @mtsize: number of MT slots the device uses + * @slot: MT slot currently being transmitted + * @trkid: stores MT tracking ID for the current contact + * @absinfo: array of &struct input_absinfo elements holding information + * about absolute axes (current value, min, max, flat, fuzz, + * resolution) + * @key: reflects current state of device's keys/buttons + * @led: reflects current state of device's LEDs + * @snd: reflects current state of sound effects + * @sw: reflects current state of device's switches + * @open: this method is called when the very first user calls + * input_open_device(). The driver must prepare the device + * to start generating events (start polling thread, + * request an IRQ, submit URB, etc.) + * @close: this method is called when the very last user calls + * input_close_device(). + * @flush: purges the device. Most commonly used to get rid of force + * feedback effects loaded into the device when disconnecting + * from it + * @event: event handler for events sent _to_ the device, like EV_LED + * or EV_SND. The device is expected to carry out the requested + * action (turn on a LED, play sound, etc.) The call is protected + * by @event_lock and must not sleep + * @grab: input handle that currently has the device grabbed (via + * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole + * recipient for all input events coming from the device + * @event_lock: this spinlock is is taken when input core receives + * and processes a new event for the device (in input_event()). + * Code that accesses and/or modifies parameters of a device + * (such as keymap or absmin, absmax, absfuzz, etc.) after device + * has been registered with input core must take this lock. + * @mutex: serializes calls to open(), close() and flush() methods + * @users: stores number of users (input handlers) that opened this + * device. It is used by input_open_device() and input_close_device() + * to make sure that dev->open() is only called when the first + * user opens device and dev->close() is called when the very + * last user closes the device + * @going_away: marks devices that are in a middle of unregistering and + * causes input_open_device*() fail with -ENODEV. + * @sync: set to %true when there were no new events since last EV_SYN + * @dev: driver model's view of this device + * @h_list: list of input handles associated with the device. When + * accessing the list dev->mutex must be held + * @node: used to place the device onto input_dev_list + */ +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + + unsigned long propbit[BITS_TO_LONGS(INPUT_PROP_CNT)]; + + unsigned long evbit[BITS_TO_LONGS(EV_CNT)]; + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long relbit[BITS_TO_LONGS(REL_CNT)]; + unsigned long absbit[BITS_TO_LONGS(ABS_CNT)]; + unsigned long mscbit[BITS_TO_LONGS(MSC_CNT)]; + unsigned long ledbit[BITS_TO_LONGS(LED_CNT)]; + unsigned long sndbit[BITS_TO_LONGS(SND_CNT)]; + unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; + unsigned long swbit[BITS_TO_LONGS(SW_CNT)]; + + unsigned int hint_events_per_packet; + + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + + int (*setkeycode)(struct input_dev *dev, + const struct input_keymap_entry *ke, + unsigned int *old_keycode); + int (*getkeycode)(struct input_dev *dev, + struct input_keymap_entry *ke); + + struct ff_device *ff; + + unsigned int repeat_key; + struct timer_list timer; + + int rep[REP_CNT]; + + struct input_mt_slot *mt; + int mtsize; + int slot; + int trkid; + + struct input_absinfo *absinfo; + + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; + unsigned long led[BITS_TO_LONGS(LED_CNT)]; + unsigned long snd[BITS_TO_LONGS(SND_CNT)]; + unsigned long sw[BITS_TO_LONGS(SW_CNT)]; + + int (*open)(struct input_dev *dev); + void (*close)(struct input_dev *dev); + int (*flush)(struct input_dev *dev, struct file *file); + int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); + + struct input_handle __rcu *grab; + + spinlock_t event_lock; + struct mutex mutex; + + unsigned int users; + bool going_away; + + bool sync; + + struct device dev; + + struct list_head h_list; + struct list_head node; +}; +#define to_input_dev(d) container_of(d, struct input_dev, dev) + +/* + * Verify that we are in sync with input_device_id mod_devicetable.h #defines + */ + +#if EV_MAX != INPUT_DEVICE_ID_EV_MAX +#error "EV_MAX and INPUT_DEVICE_ID_EV_MAX do not match" +#endif + +#if KEY_MIN_INTERESTING != INPUT_DEVICE_ID_KEY_MIN_INTERESTING +#error "KEY_MIN_INTERESTING and INPUT_DEVICE_ID_KEY_MIN_INTERESTING do not match" +#endif + +#if KEY_MAX != INPUT_DEVICE_ID_KEY_MAX +#error "KEY_MAX and INPUT_DEVICE_ID_KEY_MAX do not match" +#endif + +#if REL_MAX != INPUT_DEVICE_ID_REL_MAX +#error "REL_MAX and INPUT_DEVICE_ID_REL_MAX do not match" +#endif + +#if ABS_MAX != INPUT_DEVICE_ID_ABS_MAX +#error "ABS_MAX and INPUT_DEVICE_ID_ABS_MAX do not match" +#endif + +#if MSC_MAX != INPUT_DEVICE_ID_MSC_MAX +#error "MSC_MAX and INPUT_DEVICE_ID_MSC_MAX do not match" +#endif + +#if LED_MAX != INPUT_DEVICE_ID_LED_MAX +#error "LED_MAX and INPUT_DEVICE_ID_LED_MAX do not match" +#endif + +#if SND_MAX != INPUT_DEVICE_ID_SND_MAX +#error "SND_MAX and INPUT_DEVICE_ID_SND_MAX do not match" +#endif + +#if FF_MAX != INPUT_DEVICE_ID_FF_MAX +#error "FF_MAX and INPUT_DEVICE_ID_FF_MAX do not match" +#endif + +#if SW_MAX != INPUT_DEVICE_ID_SW_MAX +#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" +#endif + +#define INPUT_DEVICE_ID_MATCH_DEVICE \ + (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) +#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ + (INPUT_DEVICE_ID_MATCH_DEVICE | INPUT_DEVICE_ID_MATCH_VERSION) + +struct input_handle; + +/** + * struct input_handler - implements one of interfaces for input devices + * @private: driver-specific data + * @event: event handler. This method is being called by input core with + * interrupts disabled and dev->event_lock spinlock held and so + * it may not sleep + * @filter: similar to @event; separates normal event handlers from + * "filters". + * @match: called after comparing device's id with handler's id_table + * to perform fine-grained matching between device and handler + * @connect: called when attaching a handler to an input device + * @disconnect: disconnects a handler from input device + * @start: starts handler for given handle. This function is called by + * input core right after connect() method and also when a process + * that "grabbed" a device releases it + * @fops: file operations this driver implements + * @minor: beginning of range of 32 minors for devices this driver + * can provide + * @name: name of the handler, to be shown in /proc/bus/input/handlers + * @id_table: pointer to a table of input_device_ids this driver can + * handle + * @h_list: list of input handles associated with the handler + * @node: for placing the driver onto input_handler_list + * + * Input handlers attach to input devices and create input handles. There + * are likely several handlers attached to any given input device at the + * same time. All of them will get their copy of input event generated by + * the device. + * + * The very same structure is used to implement input filters. Input core + * allows filters to run first and will not pass event to regular handlers + * if any of the filters indicate that the event should be filtered (by + * returning %true from their filter() method). + * + * Note that input core serializes calls to connect() and disconnect() + * methods. + */ +struct input_handler { + + void *private; + + void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); + bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value); + bool (*match)(struct input_handler *handler, struct input_dev *dev); + int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); + void (*disconnect)(struct input_handle *handle); + void (*start)(struct input_handle *handle); + + const struct file_operations *fops; + int minor; + const char *name; + + const struct input_device_id *id_table; + + struct list_head h_list; + struct list_head node; +}; + +/** + * struct input_handle - links input device with an input handler + * @private: handler-specific data + * @open: counter showing whether the handle is 'open', i.e. should deliver + * events from its device + * @name: name given to the handle by handler that created it + * @dev: input device the handle is attached to + * @handler: handler that works with the device through this handle + * @d_node: used to put the handle on device's list of attached handles + * @h_node: used to put the handle on handler's list of handles from which + * it gets events + */ +struct input_handle { + + void *private; + + int open; + const char *name; + + struct input_dev *dev; + struct input_handler *handler; + + struct list_head d_node; + struct list_head h_node; +}; + +struct input_dev *input_allocate_device(void); +void input_free_device(struct input_dev *dev); + +static inline struct input_dev *input_get_device(struct input_dev *dev) +{ + return dev ? to_input_dev(get_device(&dev->dev)) : NULL; +} + +static inline void input_put_device(struct input_dev *dev) +{ + if (dev) + put_device(&dev->dev); +} + +static inline void *input_get_drvdata(struct input_dev *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void input_set_drvdata(struct input_dev *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +int __must_check input_register_device(struct input_dev *); +void input_unregister_device(struct input_dev *); + +void input_reset_device(struct input_dev *); + +int __must_check input_register_handler(struct input_handler *); +void input_unregister_handler(struct input_handler *); + +int input_handler_for_each_handle(struct input_handler *, void *data, + int (*fn)(struct input_handle *, void *)); + +int input_register_handle(struct input_handle *); +void input_unregister_handle(struct input_handle *); + +int input_grab_device(struct input_handle *); +void input_release_device(struct input_handle *); + +int input_open_device(struct input_handle *); +void input_close_device(struct input_handle *); + +int input_flush_device(struct input_handle *handle, struct file *file); + +void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); +void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); + +static inline void input_report_key(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_KEY, code, !!value); +} + +static inline void input_report_rel(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_REL, code, value); +} + +static inline void input_report_abs(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_ABS, code, value); +} + +static inline void input_report_ff_status(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_FF_STATUS, code, value); +} + +static inline void input_report_switch(struct input_dev *dev, unsigned int code, int value) +{ + input_event(dev, EV_SW, code, !!value); +} + +static inline void input_sync(struct input_dev *dev) +{ + input_event(dev, EV_SYN, SYN_REPORT, 0); +} + +static inline void input_mt_sync(struct input_dev *dev) +{ + input_event(dev, EV_SYN, SYN_MT_REPORT, 0); +} + +void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); + +/** + * input_set_events_per_packet - tell handlers about the driver event rate + * @dev: the input device used by the driver + * @n_events: the average number of events between calls to input_sync() + * + * If the event rate sent from a device is unusually large, use this + * function to set the expected event rate. This will allow handlers + * to set up an appropriate buffer size for the event stream, in order + * to minimize information loss. + */ +static inline void input_set_events_per_packet(struct input_dev *dev, int n_events) +{ + dev->hint_events_per_packet = n_events; +} + +void input_alloc_absinfo(struct input_dev *dev); +void input_set_abs_params(struct input_dev *dev, unsigned int axis, + int min, int max, int fuzz, int flat); + +#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \ +static inline int input_abs_get_##_suffix(struct input_dev *dev, \ + unsigned int axis) \ +{ \ + return dev->absinfo ? dev->absinfo[axis]._item : 0; \ +} \ + \ +static inline void input_abs_set_##_suffix(struct input_dev *dev, \ + unsigned int axis, int val) \ +{ \ + input_alloc_absinfo(dev); \ + if (dev->absinfo) \ + dev->absinfo[axis]._item = val; \ +} + +INPUT_GENERATE_ABS_ACCESSORS(val, value) +INPUT_GENERATE_ABS_ACCESSORS(min, minimum) +INPUT_GENERATE_ABS_ACCESSORS(max, maximum) +INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz) +INPUT_GENERATE_ABS_ACCESSORS(flat, flat) +INPUT_GENERATE_ABS_ACCESSORS(res, resolution) + +int input_scancode_to_scalar(const struct input_keymap_entry *ke, + unsigned int *scancode); + +int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); +int input_set_keycode(struct input_dev *dev, + const struct input_keymap_entry *ke); + +extern struct class input_class; + +/** + * struct ff_device - force-feedback part of an input device + * @upload: Called to upload an new effect into device + * @erase: Called to erase an effect from device + * @playback: Called to request device to start playing specified effect + * @set_gain: Called to set specified gain + * @set_autocenter: Called to auto-center device + * @destroy: called by input core when parent input device is being + * destroyed + * @private: driver-specific data, will be freed automatically + * @ffbit: bitmap of force feedback capabilities truly supported by + * device (not emulated like ones in input_dev->ffbit) + * @mutex: mutex for serializing access to the device + * @max_effects: maximum number of effects supported by device + * @effects: pointer to an array of effects currently loaded into device + * @effect_owners: array of effect owners; when file handle owning + * an effect gets closed the effect is automatically erased + * + * Every force-feedback device must implement upload() and playback() + * methods; erase() is optional. set_gain() and set_autocenter() need + * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER + * bits. + * + * Note that playback(), set_gain() and set_autocenter() are called with + * dev->event_lock spinlock held and interrupts off and thus may not + * sleep. + */ +struct ff_device { + int (*upload)(struct input_dev *dev, struct ff_effect *effect, + struct ff_effect *old); + int (*erase)(struct input_dev *dev, int effect_id); + + int (*playback)(struct input_dev *dev, int effect_id, int value); + void (*set_gain)(struct input_dev *dev, u16 gain); + void (*set_autocenter)(struct input_dev *dev, u16 magnitude); + + void (*destroy)(struct ff_device *); + + void *private; + + unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; + + struct mutex mutex; + + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[]; +}; + +int input_ff_create(struct input_dev *dev, unsigned int max_effects); +void input_ff_destroy(struct input_dev *dev); + +int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); + +int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); +int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); + +int input_ff_create_memless(struct input_dev *dev, void *data, + int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); + +#endif +#endif diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h new file mode 100644 index 00000000..d388d857 --- /dev/null +++ b/include/linux/input/ad714x.h @@ -0,0 +1,64 @@ +/* + * include/linux/input/ad714x.h + * + * AD714x is very flexible, it can be used as buttons, scrollwheel, + * slider, touchpad at the same time. That depends on the boards. + * The platform_data for the device's "struct device" holds this + * information. + * + * Copyright 2009-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_INPUT_AD714X_H__ +#define __LINUX_INPUT_AD714X_H__ + +#define STAGE_NUM 12 +#define STAGE_CFGREG_NUM 8 +#define SYS_CFGREG_NUM 8 + +/* board information which need be initialized in arch/mach... */ +struct ad714x_slider_plat { + int start_stage; + int end_stage; + int max_coord; +}; + +struct ad714x_wheel_plat { + int start_stage; + int end_stage; + int max_coord; +}; + +struct ad714x_touchpad_plat { + int x_start_stage; + int x_end_stage; + int x_max_coord; + + int y_start_stage; + int y_end_stage; + int y_max_coord; +}; + +struct ad714x_button_plat { + int keycode; + unsigned short l_mask; + unsigned short h_mask; +}; + +struct ad714x_platform_data { + int slider_num; + int wheel_num; + int touchpad_num; + int button_num; + struct ad714x_slider_plat *slider; + struct ad714x_wheel_plat *wheel; + struct ad714x_touchpad_plat *touchpad; + struct ad714x_button_plat *button; + unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM]; + unsigned short sys_cfg_reg[SYS_CFGREG_NUM]; + unsigned long irqflags; +}; + +#endif diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h new file mode 100644 index 00000000..1a05eee1 --- /dev/null +++ b/include/linux/input/adp5589.h @@ -0,0 +1,188 @@ +/* + * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller + * + * Copyright 2010-2011 Analog Devices Inc. + * + * Licensed under the GPL-2. + */ + +#ifndef _ADP5589_H +#define _ADP5589_H + +/* + * ADP5589 specific GPI and Keymap defines + */ + +#define ADP5589_KEYMAPSIZE 88 + +#define ADP5589_GPI_PIN_ROW0 97 +#define ADP5589_GPI_PIN_ROW1 98 +#define ADP5589_GPI_PIN_ROW2 99 +#define ADP5589_GPI_PIN_ROW3 100 +#define ADP5589_GPI_PIN_ROW4 101 +#define ADP5589_GPI_PIN_ROW5 102 +#define ADP5589_GPI_PIN_ROW6 103 +#define ADP5589_GPI_PIN_ROW7 104 +#define ADP5589_GPI_PIN_COL0 105 +#define ADP5589_GPI_PIN_COL1 106 +#define ADP5589_GPI_PIN_COL2 107 +#define ADP5589_GPI_PIN_COL3 108 +#define ADP5589_GPI_PIN_COL4 109 +#define ADP5589_GPI_PIN_COL5 110 +#define ADP5589_GPI_PIN_COL6 111 +#define ADP5589_GPI_PIN_COL7 112 +#define ADP5589_GPI_PIN_COL8 113 +#define ADP5589_GPI_PIN_COL9 114 +#define ADP5589_GPI_PIN_COL10 115 +#define GPI_LOGIC1 116 +#define GPI_LOGIC2 117 + +#define ADP5589_GPI_PIN_ROW_BASE ADP5589_GPI_PIN_ROW0 +#define ADP5589_GPI_PIN_ROW_END ADP5589_GPI_PIN_ROW7 +#define ADP5589_GPI_PIN_COL_BASE ADP5589_GPI_PIN_COL0 +#define ADP5589_GPI_PIN_COL_END ADP5589_GPI_PIN_COL10 + +#define ADP5589_GPI_PIN_BASE ADP5589_GPI_PIN_ROW_BASE +#define ADP5589_GPI_PIN_END ADP5589_GPI_PIN_COL_END + +#define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1) + +/* + * ADP5585 specific GPI and Keymap defines + */ + +#define ADP5585_KEYMAPSIZE 30 + +#define ADP5585_GPI_PIN_ROW0 37 +#define ADP5585_GPI_PIN_ROW1 38 +#define ADP5585_GPI_PIN_ROW2 39 +#define ADP5585_GPI_PIN_ROW3 40 +#define ADP5585_GPI_PIN_ROW4 41 +#define ADP5585_GPI_PIN_ROW5 42 +#define ADP5585_GPI_PIN_COL0 43 +#define ADP5585_GPI_PIN_COL1 44 +#define ADP5585_GPI_PIN_COL2 45 +#define ADP5585_GPI_PIN_COL3 46 +#define ADP5585_GPI_PIN_COL4 47 +#define GPI_LOGIC 48 + +#define ADP5585_GPI_PIN_ROW_BASE ADP5585_GPI_PIN_ROW0 +#define ADP5585_GPI_PIN_ROW_END ADP5585_GPI_PIN_ROW5 +#define ADP5585_GPI_PIN_COL_BASE ADP5585_GPI_PIN_COL0 +#define ADP5585_GPI_PIN_COL_END ADP5585_GPI_PIN_COL4 + +#define ADP5585_GPI_PIN_BASE ADP5585_GPI_PIN_ROW_BASE +#define ADP5585_GPI_PIN_END ADP5585_GPI_PIN_COL_END + +#define ADP5585_GPIMAPSIZE_MAX (ADP5585_GPI_PIN_END - ADP5585_GPI_PIN_BASE + 1) + +struct adp5589_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + +/* scan_cycle_time */ +#define ADP5589_SCAN_CYCLE_10ms 0 +#define ADP5589_SCAN_CYCLE_20ms 1 +#define ADP5589_SCAN_CYCLE_30ms 2 +#define ADP5589_SCAN_CYCLE_40ms 3 + +/* RESET_CFG */ +#define RESET_PULSE_WIDTH_500us 0 +#define RESET_PULSE_WIDTH_1ms 1 +#define RESET_PULSE_WIDTH_2ms 2 +#define RESET_PULSE_WIDTH_10ms 3 + +#define RESET_TRIG_TIME_0ms (0 << 2) +#define RESET_TRIG_TIME_1000ms (1 << 2) +#define RESET_TRIG_TIME_1500ms (2 << 2) +#define RESET_TRIG_TIME_2000ms (3 << 2) +#define RESET_TRIG_TIME_2500ms (4 << 2) +#define RESET_TRIG_TIME_3000ms (5 << 2) +#define RESET_TRIG_TIME_3500ms (6 << 2) +#define RESET_TRIG_TIME_4000ms (7 << 2) + +#define RESET_PASSTHRU_EN (1 << 5) +#define RESET1_POL_HIGH (1 << 6) +#define RESET1_POL_LOW (0 << 6) +#define RESET2_POL_HIGH (1 << 7) +#define RESET2_POL_LOW (0 << 7) + +/* ADP5589 Mask Bits: + * C C C C C C C C C C C | R R R R R R R R + * 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 + * 0 + * ---------------- BIT ------------------ + * 1 1 1 1 1 1 1 1 1 0 0 | 0 0 0 0 0 0 0 0 + * 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0 + */ + +#define ADP_ROW(x) (1 << (x)) +#define ADP_COL(x) (1 << (x + 8)) +#define ADP5589_ROW_MASK 0xFF +#define ADP5589_COL_MASK 0xFF +#define ADP5589_COL_SHIFT 8 +#define ADP5589_MAX_ROW_NUM 7 +#define ADP5589_MAX_COL_NUM 10 + +/* ADP5585 Mask Bits: + * C C C C C | R R R R R R + * 4 3 2 1 0 | 5 4 3 2 1 0 + * + * ---- BIT -- ----------- + * 1 0 0 0 0 | 0 0 0 0 0 0 + * 0 9 8 7 6 | 5 4 3 2 1 0 + */ + +#define ADP5585_ROW_MASK 0x3F +#define ADP5585_COL_MASK 0x1F +#define ADP5585_ROW_SHIFT 0 +#define ADP5585_COL_SHIFT 6 +#define ADP5585_MAX_ROW_NUM 5 +#define ADP5585_MAX_COL_NUM 4 + +#define ADP5585_ROW(x) (1 << ((x) & ADP5585_ROW_MASK)) +#define ADP5585_COL(x) (1 << (((x) & ADP5585_COL_MASK) + ADP5585_COL_SHIFT)) + +/* Put one of these structures in i2c_board_info platform_data */ + +struct adp5589_kpad_platform_data { + unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + bool repeat; /* Enable key repeat */ + bool en_keylock; /* Enable key lock feature (ADP5589 only)*/ + unsigned char unlock_key1; /* Unlock Key 1 (ADP5589 only) */ + unsigned char unlock_key2; /* Unlock Key 2 (ADP5589 only) */ + unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable (ADP5589 only) */ + unsigned char scan_cycle_time; /* Time between consecutive scan cycles */ + unsigned char reset_cfg; /* Reset config */ + unsigned short reset1_key_1; /* Reset Key 1 */ + unsigned short reset1_key_2; /* Reset Key 2 */ + unsigned short reset1_key_3; /* Reset Key 3 */ + unsigned short reset2_key_1; /* Reset Key 1 */ + unsigned short reset2_key_2; /* Reset Key 2 */ + unsigned debounce_dis_mask; /* Disable debounce mask */ + unsigned pull_dis_mask; /* Disable all pull resistors mask */ + unsigned pullup_en_100k; /* Pull-Up 100k Enable Mask */ + unsigned pullup_en_300k; /* Pull-Up 300k Enable Mask */ + unsigned pulldown_en_300k; /* Pull-Down 300k Enable Mask */ + const struct adp5589_gpi_map *gpimap; + unsigned short gpimapsize; + const struct adp5589_gpio_platform_data *gpio_data; +}; + +struct i2c_client; /* forward declaration */ + +struct adp5589_gpio_platform_data { + int gpio_start; /* GPIO Chip base # */ + int (*setup)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + int gpio, unsigned ngpio, + void *context); + void *context; +}; + +#endif diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h new file mode 100644 index 00000000..57e01a7c --- /dev/null +++ b/include/linux/input/adxl34x.h @@ -0,0 +1,356 @@ +/* + * include/linux/input/adxl34x.h + * + * Digital Accelerometer characteristics are highly application specific + * and may vary between boards and models. The platform_data for the + * device's "struct device" holds this information. + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_INPUT_ADXL34X_H__ +#define __LINUX_INPUT_ADXL34X_H__ + +struct adxl34x_platform_data { + + /* + * X,Y,Z Axis Offset: + * offer user offset adjustments in twoscompliment + * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g) + */ + + s8 x_axis_offset; + s8 y_axis_offset; + s8 z_axis_offset; + + /* + * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X, + * Y, or Z participation in Tap detection. A '0' excludes the + * selected axis from participation in Tap detection. + * Setting the SUPPRESS bit suppresses Double Tap detection if + * acceleration greater than tap_threshold is present during the + * tap_latency period, i.e. after the first tap but before the + * opening of the second tap window. + */ + +#define ADXL_SUPPRESS (1 << 3) +#define ADXL_TAP_X_EN (1 << 2) +#define ADXL_TAP_Y_EN (1 << 1) +#define ADXL_TAP_Z_EN (1 << 0) + + u8 tap_axis_control; + + /* + * tap_threshold: + * holds the threshold value for tap detection/interrupts. + * The data format is unsigned. The scale factor is 62.5 mg/LSB + * (i.e. 0xFF = +16 g). A zero value may result in undesirable + * behavior if Tap/Double Tap is enabled. + */ + + u8 tap_threshold; + + /* + * tap_duration: + * is an unsigned time value representing the maximum + * time that an event must be above the tap_threshold threshold + * to qualify as a tap event. The scale factor is 625 us/LSB. A zero + * value will prevent Tap/Double Tap functions from working. + */ + + u8 tap_duration; + + /* + * tap_latency: + * is an unsigned time value representing the wait time + * from the detection of a tap event to the opening of the time + * window tap_window for a possible second tap event. The scale + * factor is 1.25 ms/LSB. A zero value will disable the Double Tap + * function. + */ + + u8 tap_latency; + + /* + * tap_window: + * is an unsigned time value representing the amount + * of time after the expiration of tap_latency during which a second + * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will + * disable the Double Tap function. + */ + + u8 tap_window; + + /* + * act_axis_control: + * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity + * or inactivity detection. A '0' excludes the selected axis from + * participation. If all of the axes are excluded, the function is + * disabled. + * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled + * operation. In DC coupled operation, the current acceleration is + * compared with activity_threshold and inactivity_threshold directly + * to determine whether activity or inactivity is detected. In AC + * coupled operation for activity detection, the acceleration value + * at the start of activity detection is taken as a reference value. + * New samples of acceleration are then compared to this + * reference value and if the magnitude of the difference exceeds + * activity_threshold the device will trigger an activity interrupt. In + * AC coupled operation for inactivity detection, a reference value + * is used again for comparison and is updated whenever the + * device exceeds the inactivity threshold. Once the reference + * value is selected, the device compares the magnitude of the + * difference between the reference value and the current + * acceleration with inactivity_threshold. If the difference is below + * inactivity_threshold for a total of inactivity_time, the device is + * considered inactive and the inactivity interrupt is triggered. + */ + +#define ADXL_ACT_ACDC (1 << 7) +#define ADXL_ACT_X_EN (1 << 6) +#define ADXL_ACT_Y_EN (1 << 5) +#define ADXL_ACT_Z_EN (1 << 4) +#define ADXL_INACT_ACDC (1 << 3) +#define ADXL_INACT_X_EN (1 << 2) +#define ADXL_INACT_Y_EN (1 << 1) +#define ADXL_INACT_Z_EN (1 << 0) + + u8 act_axis_control; + + /* + * activity_threshold: + * holds the threshold value for activity detection. + * The data format is unsigned. The scale factor is + * 62.5 mg/LSB. A zero value may result in undesirable behavior if + * Activity interrupt is enabled. + */ + + u8 activity_threshold; + + /* + * inactivity_threshold: + * holds the threshold value for inactivity + * detection. The data format is unsigned. The scale + * factor is 62.5 mg/LSB. A zero value may result in undesirable + * behavior if Inactivity interrupt is enabled. + */ + + u8 inactivity_threshold; + + /* + * inactivity_time: + * is an unsigned time value representing the + * amount of time that acceleration must be below the value in + * inactivity_threshold for inactivity to be declared. The scale factor + * is 1 second/LSB. Unlike the other interrupt functions, which + * operate on unfiltered data, the inactivity function operates on the + * filtered output data. At least one output sample must be + * generated for the inactivity interrupt to be triggered. This will + * result in the function appearing un-responsive if the + * inactivity_time register is set with a value less than the time + * constant of the Output Data Rate. A zero value will result in an + * interrupt when the output data is below inactivity_threshold. + */ + + u8 inactivity_time; + + /* + * free_fall_threshold: + * holds the threshold value for Free-Fall detection. + * The data format is unsigned. The root-sum-square(RSS) value + * of all axes is calculated and compared to the value in + * free_fall_threshold to determine if a free fall event may be + * occurring. The scale factor is 62.5 mg/LSB. A zero value may + * result in undesirable behavior if Free-Fall interrupt is + * enabled. Values between 300 and 600 mg (0x05 to 0x09) are + * recommended. + */ + + u8 free_fall_threshold; + + /* + * free_fall_time: + * is an unsigned time value representing the minimum + * time that the RSS value of all axes must be less than + * free_fall_threshold to generate a Free-Fall interrupt. The + * scale factor is 5 ms/LSB. A zero value may result in + * undesirable behavior if Free-Fall interrupt is enabled. + * Values between 100 to 350 ms (0x14 to 0x46) are recommended. + */ + + u8 free_fall_time; + + /* + * data_rate: + * Selects device bandwidth and output data rate. + * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz + * Output Data Rate. An Output Data Rate should be selected that + * is appropriate for the communication protocol and frequency + * selected. Selecting too high of an Output Data Rate with a low + * communication speed will result in samples being discarded. + */ + + u8 data_rate; + + /* + * data_range: + * FULL_RES: When this bit is set with the device is + * in Full-Resolution Mode, where the output resolution increases + * with RANGE to maintain a 4 mg/LSB scale factor. When this + * bit is cleared the device is in 10-bit Mode and RANGE determine the + * maximum g-Range and scale factor. + */ + +#define ADXL_FULL_RES (1 << 3) +#define ADXL_RANGE_PM_2g 0 +#define ADXL_RANGE_PM_4g 1 +#define ADXL_RANGE_PM_8g 2 +#define ADXL_RANGE_PM_16g 3 + + u8 data_range; + + /* + * low_power_mode: + * A '0' = Normal operation and a '1' = Reduced + * power operation with somewhat higher noise. + */ + + u8 low_power_mode; + + /* + * power_mode: + * LINK: A '1' with both the activity and inactivity functions + * enabled will delay the start of the activity function until + * inactivity is detected. Once activity is detected, inactivity + * detection will begin and prevent the detection of activity. This + * bit serially links the activity and inactivity functions. When '0' + * the inactivity and activity functions are concurrent. Additional + * information can be found in the ADXL34x datasheet's Application + * section under Link Mode. + * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode + * when inactivity (acceleration has been below inactivity_threshold + * for at least inactivity_time) is detected and the LINK bit is set. + * A '0' disables automatic switching to Sleep Mode. See the + * Sleep Bit section of the ADXL34x datasheet for more information. + */ + +#define ADXL_LINK (1 << 5) +#define ADXL_AUTO_SLEEP (1 << 4) + + u8 power_mode; + + /* + * fifo_mode: + * BYPASS The FIFO is bypassed + * FIFO FIFO collects up to 32 values then stops collecting data + * STREAM FIFO holds the last 32 data values. Once full, the FIFO's + * oldest data is lost as it is replaced with newer data + * + * DEFAULT should be ADXL_FIFO_STREAM + */ + +#define ADXL_FIFO_BYPASS 0 +#define ADXL_FIFO_FIFO 1 +#define ADXL_FIFO_STREAM 2 + + u8 fifo_mode; + + /* + * watermark: + * The Watermark feature can be used to reduce the interrupt load + * of the system. The FIFO fills up to the value stored in watermark + * [1..32] and then generates an interrupt. + * A '0' disables the watermark feature. + */ + + u8 watermark; + + /* + * When acceleration measurements are received from the ADXL34x + * events are sent to the event subsystem. The following settings + * select the event type and event code for new x, y and z axis data + * respectively. + */ + u32 ev_type; /* EV_ABS or EV_REL */ + + u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */ + + /* + * A valid BTN or KEY Code; use tap_axis_control to disable + * event reporting + */ + + u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */ + + /* + * A valid BTN or KEY Code for Free-Fall or Activity enables + * input event reporting. A '0' disables the Free-Fall or + * Activity reporting. + */ + + u32 ev_code_ff; /* EV_KEY */ + u32 ev_code_act_inactivity; /* EV_KEY */ + + /* + * Use ADXL34x INT2 pin instead of INT1 pin for interrupt output + */ + u8 use_int2; + + /* + * ADXL346 only ORIENTATION SENSING feature + * The orientation function of the ADXL346 reports both 2-D and + * 3-D orientation concurrently. + */ + +#define ADXL_EN_ORIENTATION_2D 1 +#define ADXL_EN_ORIENTATION_3D 2 +#define ADXL_EN_ORIENTATION_2D_3D 3 + + u8 orientation_enable; + + /* + * The width of the deadzone region between two or more + * orientation positions is determined by setting the Deadzone + * value. The deadzone region size can be specified with a + * resolution of 3.6deg. The deadzone angle represents the total + * angle where the orientation is considered invalid. + */ + +#define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */ +#define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */ +#define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */ +#define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */ + + u8 deadzone_angle; + + /* + * To eliminate most human motion such as walking or shaking, + * a Divisor value should be selected to effectively limit the + * orientation bandwidth. Set the depth of the filter used to + * low-pass filter the measured acceleration for stable + * orientation sensing + */ + +#define ADXL_LP_FILTER_DIVISOR_2 0 +#define ADXL_LP_FILTER_DIVISOR_4 1 +#define ADXL_LP_FILTER_DIVISOR_8 2 +#define ADXL_LP_FILTER_DIVISOR_16 3 +#define ADXL_LP_FILTER_DIVISOR_32 4 +#define ADXL_LP_FILTER_DIVISOR_64 5 +#define ADXL_LP_FILTER_DIVISOR_128 6 +#define ADXL_LP_FILTER_DIVISOR_256 7 + + u8 divisor_length; + + u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */ + u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ +}; +#endif diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h new file mode 100644 index 00000000..1affd0dd --- /dev/null +++ b/include/linux/input/as5011.h @@ -0,0 +1,20 @@ +#ifndef _AS5011_H +#define _AS5011_H + +/* + * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +struct as5011_platform_data { + unsigned int button_gpio; + unsigned int axis_irq; /* irq number */ + unsigned long axis_irqflags; + char xp, xn; /* threshold for x axis */ + char yp, yn; /* threshold for y axis */ +}; + +#endif /* _AS5011_H */ diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h new file mode 100644 index 00000000..75d4be71 --- /dev/null +++ b/include/linux/input/auo-pixcir-ts.h @@ -0,0 +1,56 @@ +/* + * Driver for AUO in-cell touchscreens + * + * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de> + * + * based on auo_touch.h from Dell Streak kernel + * + * Copyright (c) 2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __AUO_PIXCIR_TS_H__ +#define __AUO_PIXCIR_TS_H__ + +/* + * Interrupt modes: + * periodical: interrupt is asserted periodicaly + * compare coordinates: interrupt is asserted when coordinates change + * indicate touch: interrupt is asserted during touch + */ +#define AUO_PIXCIR_INT_PERIODICAL 0x00 +#define AUO_PIXCIR_INT_COMP_COORD 0x01 +#define AUO_PIXCIR_INT_TOUCH_IND 0x02 + +/* + * @gpio_int interrupt gpio + * @int_setting one of AUO_PIXCIR_INT_* + * @init_hw hardwarespecific init + * @exit_hw hardwarespecific shutdown + * @x_max x-resolution + * @y_max y-resolution + */ +struct auo_pixcir_ts_platdata { + int gpio_int; + + int int_setting; + + void (*init_hw)(struct i2c_client *); + void (*exit_hw)(struct i2c_client *); + + unsigned int x_max; + unsigned int y_max; +}; + +#endif diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h new file mode 100644 index 00000000..05e03284 --- /dev/null +++ b/include/linux/input/bu21013.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson + * License terms:GNU General Public License (GPL) version 2 + */ + +#ifndef _BU21013_H +#define _BU21013_H + +/** + * struct bu21013_platform_device - Handle the platform data + * @cs_en: pointer to the cs enable function + * @cs_dis: pointer to the cs disable function + * @irq_read_val: pointer to read the pen irq value function + * @touch_x_max: touch x max + * @touch_y_max: touch y max + * @cs_pin: chip select pin + * @irq: irq pin + * @ext_clk: external clock flag + * @x_flip: x flip flag + * @y_flip: y flip flag + * @wakeup: wakeup flag + * + * This is used to handle the platform data + */ +struct bu21013_platform_device { + int (*cs_en)(int reset_pin); + int (*cs_dis)(int reset_pin); + int (*irq_read_val)(void); + int touch_x_max; + int touch_y_max; + unsigned int cs_pin; + unsigned int irq; + bool ext_clk; + bool x_flip; + bool y_flip; + bool wakeup; +}; + +#endif diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h new file mode 100644 index 00000000..cbbaac27 --- /dev/null +++ b/include/linux/input/cma3000.h @@ -0,0 +1,59 @@ +/* + * VTI CMA3000_Dxx Accelerometer driver + * + * Copyright (C) 2010 Texas Instruments + * Author: Hemanth V <hemanthv@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _LINUX_CMA3000_H +#define _LINUX_CMA3000_H + +#define CMAMODE_DEFAULT 0 +#define CMAMODE_MEAS100 1 +#define CMAMODE_MEAS400 2 +#define CMAMODE_MEAS40 3 +#define CMAMODE_MOTDET 4 +#define CMAMODE_FF100 5 +#define CMAMODE_FF400 6 +#define CMAMODE_POFF 7 + +#define CMARANGE_2G 2000 +#define CMARANGE_8G 8000 + +/** + * struct cma3000_i2c_platform_data - CMA3000 Platform data + * @fuzz_x: Noise on X Axis + * @fuzz_y: Noise on Y Axis + * @fuzz_z: Noise on Z Axis + * @g_range: G range in milli g i.e 2000 or 8000 + * @mode: Operating mode + * @mdthr: Motion detect threshold value + * @mdfftmr: Motion detect and free fall time value + * @ffthr: Free fall threshold value + */ + +struct cma3000_platform_data { + int fuzz_x; + int fuzz_y; + int fuzz_z; + int g_range; + uint8_t mode; + uint8_t mdthr; + uint8_t mdfftmr; + uint8_t ffthr; + unsigned long irqflags; +}; + +#endif diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h new file mode 100644 index 00000000..09522cb5 --- /dev/null +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_CY8CTMG110_PDATA_H +#define _LINUX_CY8CTMG110_PDATA_H + +struct cy8ctmg110_pdata +{ + int reset_pin; /* Reset pin is wired to this GPIO (optional) */ + int irq_pin; /* IRQ pin is wired to this GPIO */ +}; + +#endif diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h new file mode 100644 index 00000000..5af7c66f --- /dev/null +++ b/include/linux/input/cyttsp.h @@ -0,0 +1,58 @@ +/* + * Header file for: + * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. + * For use with Cypress Txx3xx parts. + * Supported parts include: + * CY8CTST341 + * CY8CTMA340 + * + * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. + * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) + * + */ +#ifndef _CYTTSP_H_ +#define _CYTTSP_H_ + +#define CY_SPI_NAME "cyttsp-spi" +#define CY_I2C_NAME "cyttsp-i2c" +/* Active Power state scanning/processing refresh interval */ +#define CY_ACT_INTRVL_DFLT 0x00 /* ms */ +/* touch timeout for the Active power */ +#define CY_TCH_TMOUT_DFLT 0xFF /* ms */ +/* Low Power state scanning/processing refresh interval */ +#define CY_LP_INTRVL_DFLT 0x0A /* ms */ +/* Active distance in pixels for a gesture to be reported */ +#define CY_ACT_DIST_DFLT 0xF8 /* pixels */ + +struct cyttsp_platform_data { + u32 maxx; + u32 maxy; + bool use_hndshk; + u8 act_dist; /* Active distance */ + u8 act_intrvl; /* Active refresh interval; ms */ + u8 tch_tmout; /* Active touch timeout; ms */ + u8 lp_intrvl; /* Low power refresh interval; ms */ + int (*init)(void); + void (*exit)(void); + char *name; + s16 irq_gpio; + u8 *bl_keys; +}; + +#endif /* _CYTTSP_H_ */ diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h new file mode 100644 index 00000000..f875b316 --- /dev/null +++ b/include/linux/input/eeti_ts.h @@ -0,0 +1,9 @@ +#ifndef LINUX_INPUT_EETI_TS_H +#define LINUX_INPUT_EETI_TS_H + +struct eeti_ts_platform_data { + unsigned int irq_active_high; +}; + +#endif /* LINUX_INPUT_EETI_TS_H */ + diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h new file mode 100644 index 00000000..aad2fd44 --- /dev/null +++ b/include/linux/input/gp2ap002a00f.h @@ -0,0 +1,22 @@ +#ifndef _GP2AP002A00F_H_ +#define _GP2AP002A00F_H_ + +#include <linux/i2c.h> + +#define GP2A_I2C_NAME "gp2ap002a00f" + +/** + * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data + * @vout_gpio: The gpio connected to the object detected pin (VOUT) + * @wakeup: Set to true if the proximity can wake the device from suspend + * @hw_setup: Callback for setting up hardware such as gpios and vregs + * @hw_shutdown: Callback for properly shutting down hardware + */ +struct gp2a_platform_data { + int vout_gpio; + bool wakeup; + int (*hw_setup)(struct i2c_client *client); + int (*hw_shutdown)(struct i2c_client *client); +}; + +#endif diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h new file mode 100644 index 00000000..c1cc52d3 --- /dev/null +++ b/include/linux/input/gpio_tilt.h @@ -0,0 +1,73 @@ +#ifndef _INPUT_GPIO_TILT_H +#define _INPUT_GPIO_TILT_H + +/** + * struct gpio_tilt_axis - Axis used by the tilt switch + * @axis: Constant describing the axis, e.g. ABS_X + * @min: minimum value for abs_param + * @max: maximum value for abs_param + * @fuzz: fuzz value for abs_param + * @flat: flat value for abs_param + */ +struct gpio_tilt_axis { + int axis; + int min; + int max; + int fuzz; + int flat; +}; + +/** + * struct gpio_tilt_state - state description + * @gpios: bitfield of gpio target-states for the value + * @axes: array containing the axes settings for the gpio state + * The array indizes must correspond to the axes defined + * in platform_data + * + * This structure describes a supported axis settings + * and the necessary gpio-state which represent it. + * + * The n-th bit in the bitfield describes the state of the n-th GPIO + * from the gpios-array defined in gpio_regulator_config below. + */ +struct gpio_tilt_state { + int gpios; + int *axes; +}; + +/** + * struct gpio_tilt_platform_data + * @gpios: Array containing the gpios determining the tilt state + * @nr_gpios: Number of gpios + * @axes: Array of gpio_tilt_axis descriptions + * @nr_axes: Number of axes + * @states: Array of gpio_tilt_state entries describing + * the gpio state for specific tilts + * @nr_states: Number of states available + * @debounce_interval: debounce ticks interval in msecs + * @poll_interval: polling interval in msecs - for polling driver only + * @enable: callback to enable the tilt switch + * @disable: callback to disable the tilt switch + * + * This structure contains gpio-tilt-switch configuration + * information that must be passed by platform code to the + * gpio-tilt input driver. + */ +struct gpio_tilt_platform_data { + struct gpio *gpios; + int nr_gpios; + + struct gpio_tilt_axis *axes; + int nr_axes; + + struct gpio_tilt_state *states; + int nr_states; + + int debounce_interval; + + unsigned int poll_interval; + int (*enable)(struct device *dev); + void (*disable)(struct device *dev); +}; + +#endif diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h new file mode 100644 index 00000000..a5471245 --- /dev/null +++ b/include/linux/input/ili210x.h @@ -0,0 +1,10 @@ +#ifndef _ILI210X_H +#define _ILI210X_H + +struct ili210x_platform_data { + unsigned long irq_flags; + unsigned int poll_period; + bool (*get_pendown_state)(void); +}; + +#endif diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h new file mode 100644 index 00000000..d415579b --- /dev/null +++ b/include/linux/input/kxtj9.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2011 Kionix, Inc. + * Written by Chris Hudson <chudson@kionix.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + * 02111-1307, USA + */ + +#ifndef __KXTJ9_H__ +#define __KXTJ9_H__ + +#define KXTJ9_I2C_ADDR 0x0F + +struct kxtj9_platform_data { + unsigned int min_interval; /* minimum poll interval (in milli-seconds) */ + unsigned int init_interval; /* initial poll interval (in milli-seconds) */ + + /* + * By default, x is axis 0, y is axis 1, z is axis 2; these can be + * changed to account for sensor orientation within the host device. + */ + u8 axis_map_x; + u8 axis_map_y; + u8 axis_map_z; + + /* + * Each axis can be negated to account for sensor orientation within + * the host device. + */ + bool negate_x; + bool negate_y; + bool negate_z; + + /* CTRL_REG1: set resolution, g-range, data ready enable */ + /* Output resolution: 8-bit valid or 12-bit valid */ + #define RES_8BIT 0 + #define RES_12BIT (1 << 6) + u8 res_12bit; + /* Output g-range: +/-2g, 4g, or 8g */ + #define KXTJ9_G_2G 0 + #define KXTJ9_G_4G (1 << 3) + #define KXTJ9_G_8G (1 << 4) + u8 g_range; + + int (*init)(void); + void (*exit)(void); + int (*power_on)(void); + int (*power_off)(void); +}; +#endif /* __KXTJ9_H__ */ diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h new file mode 100644 index 00000000..6c07ced0 --- /dev/null +++ b/include/linux/input/matrix_keypad.h @@ -0,0 +1,128 @@ +#ifndef _MATRIX_KEYPAD_H +#define _MATRIX_KEYPAD_H + +#include <linux/types.h> +#include <linux/input.h> +#include <linux/of.h> + +#define MATRIX_MAX_ROWS 32 +#define MATRIX_MAX_COLS 32 + +#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ + (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ + ((val) & 0xffff)) + +#define KEY_ROW(k) (((k) >> 24) & 0xff) +#define KEY_COL(k) (((k) >> 16) & 0xff) +#define KEY_VAL(k) ((k) & 0xffff) + +#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col)) + +/** + * struct matrix_keymap_data - keymap for matrix keyboards + * @keymap: pointer to array of uint32 values encoded with KEY() macro + * representing keymap + * @keymap_size: number of entries (initialized) in this keymap + * + * This structure is supposed to be used by platform code to supply + * keymaps to drivers that implement matrix-like keypads/keyboards. + */ +struct matrix_keymap_data { + const uint32_t *keymap; + unsigned int keymap_size; +}; + +/** + * struct matrix_keypad_platform_data - platform-dependent keypad data + * @keymap_data: pointer to &matrix_keymap_data + * @row_gpios: pointer to array of gpio numbers representing rows + * @col_gpios: pointer to array of gpio numbers reporesenting colums + * @num_row_gpios: actual number of row gpios used by device + * @num_col_gpios: actual number of col gpios used by device + * @col_scan_delay_us: delay, measured in microseconds, that is + * needed before we can keypad after activating column gpio + * @debounce_ms: debounce interval in milliseconds + * @clustered_irq: may be specified if interrupts of all row/column GPIOs + * are bundled to one single irq + * @clustered_irq_flags: flags that are needed for the clustered irq + * @active_low: gpio polarity + * @wakeup: controls whether the device should be set up as wakeup + * source + * @no_autorepeat: disable key autorepeat + * + * This structure represents platform-specific data that use used by + * matrix_keypad driver to perform proper initialization. + */ +struct matrix_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + const unsigned int *row_gpios; + const unsigned int *col_gpios; + + unsigned int num_row_gpios; + unsigned int num_col_gpios; + + unsigned int col_scan_delay_us; + + /* key debounce interval in milli-second */ + unsigned int debounce_ms; + + unsigned int clustered_irq; + unsigned int clustered_irq_flags; + + bool active_low; + bool wakeup; + bool no_autorepeat; +}; + +/** + * matrix_keypad_build_keymap - convert platform keymap into matrix keymap + * @keymap_data: keymap supplied by the platform code + * @row_shift: number of bits to shift row value by to advance to the next + * line in the keymap + * @keymap: expanded version of keymap that is suitable for use by + * matrix keyboad driver + * @keybit: pointer to bitmap of keys supported by input device + * + * This function converts platform keymap (encoded with KEY() macro) into + * an array of keycodes that is suitable for using in a standard matrix + * keyboard driver that uses row and col as indices. + */ +static inline void +matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, + unsigned int row_shift, + unsigned short *keymap, unsigned long *keybit) +{ + int i; + + for (i = 0; i < keymap_data->keymap_size; i++) { + unsigned int key = keymap_data->keymap[i]; + unsigned int row = KEY_ROW(key); + unsigned int col = KEY_COL(key); + unsigned short code = KEY_VAL(key); + + keymap[MATRIX_SCAN_CODE(row, col, row_shift)] = code; + __set_bit(code, keybit); + } + __clear_bit(KEY_RESERVED, keybit); +} + +#ifdef CONFIG_INPUT_OF_MATRIX_KEYMAP +struct matrix_keymap_data * +matrix_keyboard_of_fill_keymap(struct device_node *np, const char *propname); + +void matrix_keyboard_of_free_keymap(const struct matrix_keymap_data *kd); +#else +static inline struct matrix_keymap_data * +matrix_keyboard_of_fill_keymap(struct device_node *np, const char *propname) +{ + return NULL; +} + +static inline void +matrix_keyboard_of_free_keymap(const struct matrix_keymap_data *kd) +{ +} +#endif + +#endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h new file mode 100644 index 00000000..f8673758 --- /dev/null +++ b/include/linux/input/mt.h @@ -0,0 +1,67 @@ +#ifndef _INPUT_MT_H +#define _INPUT_MT_H + +/* + * Input Multitouch Library + * + * Copyright (c) 2010 Henrik Rydberg + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/input.h> + +#define TRKID_MAX 0xffff + +/** + * struct input_mt_slot - represents the state of an input MT slot + * @abs: holds current values of ABS_MT axes for this slot + */ +struct input_mt_slot { + int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; +}; + +static inline void input_mt_set_value(struct input_mt_slot *slot, + unsigned code, int value) +{ + slot->abs[code - ABS_MT_FIRST] = value; +} + +static inline int input_mt_get_value(const struct input_mt_slot *slot, + unsigned code) +{ + return slot->abs[code - ABS_MT_FIRST]; +} + +int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots); +void input_mt_destroy_slots(struct input_dev *dev); + +static inline int input_mt_new_trkid(struct input_dev *dev) +{ + return dev->trkid++ & TRKID_MAX; +} + +static inline void input_mt_slot(struct input_dev *dev, int slot) +{ + input_event(dev, EV_ABS, ABS_MT_SLOT, slot); +} + +static inline bool input_is_mt_value(int axis) +{ + return axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST; +} + +static inline bool input_is_mt_axis(int axis) +{ + return axis == ABS_MT_SLOT || input_is_mt_value(axis); +} + +void input_mt_report_slot_state(struct input_dev *dev, + unsigned int tool_type, bool active); + +void input_mt_report_finger_count(struct input_dev *dev, int count); +void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); + +#endif diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h new file mode 100644 index 00000000..7163d91c --- /dev/null +++ b/include/linux/input/pixcir_ts.h @@ -0,0 +1,10 @@ +#ifndef _PIXCIR_I2C_TS_H +#define _PIXCIR_I2C_TS_H + +struct pixcir_ts_platform_data { + int (*attb_read_val)(void); + int x_max; + int y_max; +}; + +#endif diff --git a/include/linux/input/pmic8xxx-keypad.h b/include/linux/input/pmic8xxx-keypad.h new file mode 100644 index 00000000..5f1e2f9a --- /dev/null +++ b/include/linux/input/pmic8xxx-keypad.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __PMIC8XXX_KEYPAD_H__ +#define __PMIC8XXX_KEYPAD_H__ + +#include <linux/input/matrix_keypad.h> + +#define PM8XXX_KEYPAD_DEV_NAME "pm8xxx-keypad" + +/** + * struct pm8xxx_keypad_platform_data - platform data for keypad + * @keymap_data - matrix keymap data + * @input_name - input device name + * @input_phys_device - input device name + * @num_cols - number of columns of keypad + * @num_rows - number of row of keypad + * @debounce_ms - debounce period in milliseconds + * @scan_delay_ms - scan delay in milliseconds + * @row_hold_ns - row hold period in nanoseconds + * @wakeup - configure keypad as wakeup + * @rep - enable or disable key repeat bit + */ +struct pm8xxx_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + const char *input_name; + const char *input_phys_device; + + unsigned int num_cols; + unsigned int num_rows; + unsigned int rows_gpio_start; + unsigned int cols_gpio_start; + + unsigned int debounce_ms; + unsigned int scan_delay_ms; + unsigned int row_hold_ns; + + bool wakeup; + bool rep; +}; + +#endif /*__PMIC8XXX_KEYPAD_H__ */ diff --git a/include/linux/input/pmic8xxx-pwrkey.h b/include/linux/input/pmic8xxx-pwrkey.h new file mode 100644 index 00000000..6d2974e5 --- /dev/null +++ b/include/linux/input/pmic8xxx-pwrkey.h @@ -0,0 +1,31 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __PMIC8XXX_PWRKEY_H__ +#define __PMIC8XXX_PWRKEY_H__ + +#define PM8XXX_PWRKEY_DEV_NAME "pm8xxx-pwrkey" + +/** + * struct pm8xxx_pwrkey_platform_data - platform data for pwrkey driver + * @pull up: power on register control for pull up/down configuration + * @kpd_trigger_delay_us: time delay for power key state change interrupt + * trigger. + * @wakeup: configure power key as wakeup source + */ +struct pm8xxx_pwrkey_platform_data { + bool pull_up; + u32 kpd_trigger_delay_us; + u32 wakeup; +}; + +#endif /* __PMIC8XXX_PWRKEY_H__ */ diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h new file mode 100644 index 00000000..f25619bf --- /dev/null +++ b/include/linux/input/samsung-keypad.h @@ -0,0 +1,43 @@ +/* + * Samsung Keypad platform data definitions + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __SAMSUNG_KEYPAD_H +#define __SAMSUNG_KEYPAD_H + +#include <linux/input/matrix_keypad.h> + +#define SAMSUNG_MAX_ROWS 8 +#define SAMSUNG_MAX_COLS 8 + +/** + * struct samsung_keypad_platdata - Platform device data for Samsung Keypad. + * @keymap_data: pointer to &matrix_keymap_data. + * @rows: number of keypad row supported. + * @cols: number of keypad col supported. + * @no_autorepeat: disable key autorepeat. + * @wakeup: controls whether the device should be set up as wakeup source. + * @cfg_gpio: configure the GPIO. + * + * Initialisation data specific to either the machine or the platform + * for the device driver to use or call-back when configuring gpio. + */ +struct samsung_keypad_platdata { + const struct matrix_keymap_data *keymap_data; + unsigned int rows; + unsigned int cols; + bool no_autorepeat; + bool wakeup; + + void (*cfg_gpio)(unsigned int rows, unsigned int cols); +}; + +#endif /* __SAMSUNG_KEYPAD_H */ diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h new file mode 100644 index 00000000..5d253cd9 --- /dev/null +++ b/include/linux/input/sh_keysc.h @@ -0,0 +1,15 @@ +#ifndef __SH_KEYSC_H__ +#define __SH_KEYSC_H__ + +#define SH_KEYSC_MAXKEYS 64 + +struct sh_keysc_info { + enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, + SH_KEYSC_MODE_4, SH_KEYSC_MODE_5, SH_KEYSC_MODE_6 } mode; + int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */ + int delay; + int kycr2_delay; + int keycodes[SH_KEYSC_MAXKEYS]; /* KEYIN * KEYOUT */ +}; + +#endif /* __SH_KEYSC_H__ */ diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h new file mode 100644 index 00000000..52db6206 --- /dev/null +++ b/include/linux/input/sparse-keymap.h @@ -0,0 +1,62 @@ +#ifndef _SPARSE_KEYMAP_H +#define _SPARSE_KEYMAP_H + +/* + * Copyright (c) 2009 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#define KE_END 0 /* Indicates end of keymap */ +#define KE_KEY 1 /* Ordinary key/button */ +#define KE_SW 2 /* Switch (predetermined value) */ +#define KE_VSW 3 /* Switch (value supplied at runtime) */ +#define KE_IGNORE 4 /* Known entry that should be ignored */ +#define KE_LAST KE_IGNORE + +/** + * struct key_entry - keymap entry for use in sparse keymap + * @type: Type of the key entry (KE_KEY, KE_SW, KE_VSW, KE_END); + * drivers are allowed to extend the list with their own + * private definitions. + * @code: Device-specific data identifying the button/switch + * @keycode: KEY_* code assigned to a key/button + * @sw.code: SW_* code assigned to a switch + * @sw.value: Value that should be sent in an input even when KE_SW + * switch is toggled. KE_VSW switches ignore this field and + * expect driver to supply value for the event. + * + * This structure defines an entry in a sparse keymap used by some + * input devices for which traditional table-based approach is not + * suitable. + */ +struct key_entry { + int type; /* See KE_* above */ + u32 code; + union { + u16 keycode; /* For KE_KEY */ + struct { /* For KE_SW, KE_VSW */ + u8 code; + u8 value; /* For KE_SW, ignored by KE_VSW */ + } sw; + }; +}; + +struct key_entry *sparse_keymap_entry_from_scancode(struct input_dev *dev, + unsigned int code); +struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev, + unsigned int code); +int sparse_keymap_setup(struct input_dev *dev, + const struct key_entry *keymap, + int (*setup)(struct input_dev *, struct key_entry *)); +void sparse_keymap_free(struct input_dev *dev); + +void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke, + unsigned int value, bool autorelease); + +bool sparse_keymap_report_event(struct input_dev *dev, unsigned int code, + unsigned int value, bool autorelease); + +#endif /* _SPARSE_KEYMAP_H */ diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h new file mode 100644 index 00000000..e71a85dc --- /dev/null +++ b/include/linux/input/tca8418_keypad.h @@ -0,0 +1,44 @@ +/* + * TCA8418 keypad platform support + * + * Copyright (C) 2011 Fuel7, Inc. All rights reserved. + * + * Author: Kyle Manna <kyle.manna@fuel7.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * If you can't comply with GPLv2, alternative licensing terms may be + * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary + * alternative licensing inquiries. + */ + +#ifndef _TCA8418_KEYPAD_H +#define _TCA8418_KEYPAD_H + +#include <linux/types.h> +#include <linux/input/matrix_keypad.h> + +#define TCA8418_I2C_ADDR 0x34 +#define TCA8418_NAME "tca8418_keypad" + +struct tca8418_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + unsigned rows; + unsigned cols; + bool rep; + bool irq_is_gpio; +}; + +#endif diff --git a/include/linux/input/ti_tscadc.h b/include/linux/input/ti_tscadc.h new file mode 100644 index 00000000..b10a527a --- /dev/null +++ b/include/linux/input/ti_tscadc.h @@ -0,0 +1,17 @@ +#ifndef __LINUX_TI_TSCADC_H +#define __LINUX_TI_TSCADC_H + +/** + * struct tsc_data Touchscreen wire configuration + * @wires: Wires refer to application modes + * i.e. 4/5/8 wire touchscreen support + * on the platform. + * @x_plate_resistance: X plate resistance. + */ + +struct tsc_data { + int wires; + int x_plate_resistance; +}; + +#endif diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h new file mode 100644 index 00000000..ab144031 --- /dev/null +++ b/include/linux/input/tps6507x-ts.h @@ -0,0 +1,24 @@ +/* linux/i2c/tps6507x-ts.h + * + * Functions to access TPS65070 touch screen chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * + * For licencing details see kernel-base/COPYING + */ + +#ifndef __LINUX_I2C_TPS6507X_TS_H +#define __LINUX_I2C_TPS6507X_TS_H + +/* Board specific touch screen initial values */ +struct touchscreen_init_data { + int poll_period; /* ms */ + int vref; /* non-zero to leave vref on */ + __u16 min_pressure; /* min reading to be treated as a touch */ + __u16 vendor; + __u16 product; + __u16 version; +}; + +#endif /* __LINUX_I2C_TPS6507X_TS_H */ diff --git a/include/linux/integrity.h b/include/linux/integrity.h new file mode 100644 index 00000000..a0c41256 --- /dev/null +++ b/include/linux/integrity.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2009 IBM Corporation + * Author: Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _LINUX_INTEGRITY_H +#define _LINUX_INTEGRITY_H + +#include <linux/fs.h> + +enum integrity_status { + INTEGRITY_PASS = 0, + INTEGRITY_FAIL, + INTEGRITY_NOLABEL, + INTEGRITY_NOXATTRS, + INTEGRITY_UNKNOWN, +}; + +/* List of EVM protected security xattrs */ +#ifdef CONFIG_INTEGRITY +extern int integrity_inode_alloc(struct inode *inode); +extern void integrity_inode_free(struct inode *inode); + +#else +static inline int integrity_inode_alloc(struct inode *inode) +{ + return 0; +} + +static inline void integrity_inode_free(struct inode *inode) +{ + return; +} +#endif /* CONFIG_INTEGRITY_H */ +#endif /* _LINUX_INTEGRITY_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h new file mode 100644 index 00000000..e6ca56de --- /dev/null +++ b/include/linux/intel-iommu.h @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) 2006-2008 Intel Corporation + * Author: Ashok Raj <ashok.raj@intel.com> + * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + */ + +#ifndef _INTEL_IOMMU_H_ +#define _INTEL_IOMMU_H_ + +#include <linux/types.h> +#include <linux/iova.h> +#include <linux/io.h> +#include <linux/dma_remapping.h> +#include <asm/cacheflush.h> +#include <asm/iommu.h> + +/* + * Intel IOMMU register specification per version 1.0 public spec. + */ + +#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ +#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ +#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ +#define DMAR_GCMD_REG 0x18 /* Global command register */ +#define DMAR_GSTS_REG 0x1c /* Global status register */ +#define DMAR_RTADDR_REG 0x20 /* Root entry table */ +#define DMAR_CCMD_REG 0x28 /* Context command reg */ +#define DMAR_FSTS_REG 0x34 /* Fault Status register */ +#define DMAR_FECTL_REG 0x38 /* Fault control register */ +#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ +#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ +#define DMAR_FEUADDR_REG 0x44 /* Upper address register */ +#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ +#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ +#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ +#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ +#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ +#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ +#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ +#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ +#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ +#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ +#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ +#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ + +#define OFFSET_STRIDE (9) +/* +#define dmar_readl(dmar, reg) readl(dmar + reg) +#define dmar_readq(dmar, reg) ({ \ + u32 lo, hi; \ + lo = readl(dmar + reg); \ + hi = readl(dmar + reg + 4); \ + (((u64) hi) << 32) + lo; }) +*/ +static inline u64 dmar_readq(void __iomem *addr) +{ + u32 lo, hi; + lo = readl(addr); + hi = readl(addr + 4); + return (((u64) hi) << 32) + lo; +} + +static inline void dmar_writeq(void __iomem *addr, u64 val) +{ + writel((u32)val, addr); + writel((u32)(val >> 32), addr + 4); +} + +#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) +#define DMAR_VER_MINOR(v) ((v) & 0x0f) + +/* + * Decoding Capability Register + */ +#define cap_read_drain(c) (((c) >> 55) & 1) +#define cap_write_drain(c) (((c) >> 54) & 1) +#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) +#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) +#define cap_pgsel_inv(c) (((c) >> 39) & 1) + +#define cap_super_page_val(c) (((c) >> 34) & 0xf) +#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ + * OFFSET_STRIDE) + 21) + +#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) +#define cap_max_fault_reg_offset(c) \ + (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) + +#define cap_zlr(c) (((c) >> 22) & 1) +#define cap_isoch(c) (((c) >> 23) & 1) +#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) +#define cap_sagaw(c) (((c) >> 8) & 0x1f) +#define cap_caching_mode(c) (((c) >> 7) & 1) +#define cap_phmr(c) (((c) >> 6) & 1) +#define cap_plmr(c) (((c) >> 5) & 1) +#define cap_rwbf(c) (((c) >> 4) & 1) +#define cap_afl(c) (((c) >> 3) & 1) +#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) +/* + * Extended Capability Register + */ + +#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1) +#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) +#define ecap_max_iotlb_offset(e) \ + (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) +#define ecap_coherent(e) ((e) & 0x1) +#define ecap_qis(e) ((e) & 0x2) +#define ecap_pass_through(e) ((e >> 6) & 0x1) +#define ecap_eim_support(e) ((e >> 4) & 0x1) +#define ecap_ir_support(e) ((e >> 3) & 0x1) +#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) +#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) +#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ + +/* IOTLB_REG */ +#define DMA_TLB_FLUSH_GRANU_OFFSET 60 +#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) +#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) +#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) +#define DMA_TLB_IIRG(type) ((type >> 60) & 7) +#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) +#define DMA_TLB_READ_DRAIN (((u64)1) << 49) +#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) +#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) +#define DMA_TLB_IVT (((u64)1) << 63) +#define DMA_TLB_IH_NONLEAF (((u64)1) << 6) +#define DMA_TLB_MAX_SIZE (0x3f) + +/* INVALID_DESC */ +#define DMA_CCMD_INVL_GRANU_OFFSET 61 +#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) +#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) +#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) +#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) +#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) +#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) +#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) +#define DMA_ID_TLB_ADDR(addr) (addr) +#define DMA_ID_TLB_ADDR_MASK(mask) (mask) + +/* PMEN_REG */ +#define DMA_PMEN_EPM (((u32)1)<<31) +#define DMA_PMEN_PRS (((u32)1)<<0) + +/* GCMD_REG */ +#define DMA_GCMD_TE (((u32)1) << 31) +#define DMA_GCMD_SRTP (((u32)1) << 30) +#define DMA_GCMD_SFL (((u32)1) << 29) +#define DMA_GCMD_EAFL (((u32)1) << 28) +#define DMA_GCMD_WBF (((u32)1) << 27) +#define DMA_GCMD_QIE (((u32)1) << 26) +#define DMA_GCMD_SIRTP (((u32)1) << 24) +#define DMA_GCMD_IRE (((u32) 1) << 25) +#define DMA_GCMD_CFI (((u32) 1) << 23) + +/* GSTS_REG */ +#define DMA_GSTS_TES (((u32)1) << 31) +#define DMA_GSTS_RTPS (((u32)1) << 30) +#define DMA_GSTS_FLS (((u32)1) << 29) +#define DMA_GSTS_AFLS (((u32)1) << 28) +#define DMA_GSTS_WBFS (((u32)1) << 27) +#define DMA_GSTS_QIES (((u32)1) << 26) +#define DMA_GSTS_IRTPS (((u32)1) << 24) +#define DMA_GSTS_IRES (((u32)1) << 25) +#define DMA_GSTS_CFIS (((u32)1) << 23) + +/* CCMD_REG */ +#define DMA_CCMD_ICC (((u64)1) << 63) +#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) +#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) +#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) +#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) +#define DMA_CCMD_MASK_NOBIT 0 +#define DMA_CCMD_MASK_1BIT 1 +#define DMA_CCMD_MASK_2BIT 2 +#define DMA_CCMD_MASK_3BIT 3 +#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) +#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) + +/* FECTL_REG */ +#define DMA_FECTL_IM (((u32)1) << 31) + +/* FSTS_REG */ +#define DMA_FSTS_PPF ((u32)2) +#define DMA_FSTS_PFO ((u32)1) +#define DMA_FSTS_IQE (1 << 4) +#define DMA_FSTS_ICE (1 << 5) +#define DMA_FSTS_ITE (1 << 6) +#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) + +/* FRCD_REG, 32 bits access */ +#define DMA_FRCD_F (((u32)1) << 31) +#define dma_frcd_type(d) ((d >> 30) & 1) +#define dma_frcd_fault_reason(c) (c & 0xff) +#define dma_frcd_source_id(c) (c & 0xffff) +/* low 64 bit */ +#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) + +#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ +do { \ + cycles_t start_time = get_cycles(); \ + while (1) { \ + sts = op(iommu->reg + offset); \ + if (cond) \ + break; \ + if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ + panic("DMAR hardware is malfunctioning\n"); \ + cpu_relax(); \ + } \ +} while (0) + +#define QI_LENGTH 256 /* queue length */ + +enum { + QI_FREE, + QI_IN_USE, + QI_DONE, + QI_ABORT +}; + +#define QI_CC_TYPE 0x1 +#define QI_IOTLB_TYPE 0x2 +#define QI_DIOTLB_TYPE 0x3 +#define QI_IEC_TYPE 0x4 +#define QI_IWD_TYPE 0x5 + +#define QI_IEC_SELECTIVE (((u64)1) << 4) +#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) +#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) + +#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) +#define QI_IWD_STATUS_WRITE (((u64)1) << 5) + +#define QI_IOTLB_DID(did) (((u64)did) << 16) +#define QI_IOTLB_DR(dr) (((u64)dr) << 7) +#define QI_IOTLB_DW(dw) (((u64)dw) << 6) +#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) +#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) +#define QI_IOTLB_IH(ih) (((u64)ih) << 6) +#define QI_IOTLB_AM(am) (((u8)am)) + +#define QI_CC_FM(fm) (((u64)fm) << 48) +#define QI_CC_SID(sid) (((u64)sid) << 32) +#define QI_CC_DID(did) (((u64)did) << 16) +#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) + +#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) +#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_DEV_IOTLB_SIZE 1 +#define QI_DEV_IOTLB_MAX_INVS 32 + +struct qi_desc { + u64 low, high; +}; + +struct q_inval { + raw_spinlock_t q_lock; + struct qi_desc *desc; /* invalidation queue */ + int *desc_status; /* desc status */ + int free_head; /* first free entry */ + int free_tail; /* last free entry */ + int free_cnt; +}; + +#ifdef CONFIG_IRQ_REMAP +/* 1MB - maximum possible interrupt remapping table size */ +#define INTR_REMAP_PAGE_ORDER 8 +#define INTR_REMAP_TABLE_REG_SIZE 0xf + +#define INTR_REMAP_TABLE_ENTRIES 65536 + +struct ir_table { + struct irte *base; +}; +#endif + +struct iommu_flush { + void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); + void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +}; + +enum { + SR_DMAR_FECTL_REG, + SR_DMAR_FEDATA_REG, + SR_DMAR_FEADDR_REG, + SR_DMAR_FEUADDR_REG, + MAX_SR_DMAR_REGS +}; + +struct intel_iommu { + void __iomem *reg; /* Pointer to hardware regs, virtual addr */ + u64 cap; + u64 ecap; + u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ + raw_spinlock_t register_lock; /* protect register handling */ + int seq_id; /* sequence id of the iommu */ + int agaw; /* agaw of this iommu */ + int msagaw; /* max sagaw of this iommu */ + unsigned int irq; + unsigned char name[13]; /* Device Name */ + +#ifdef CONFIG_INTEL_IOMMU + unsigned long *domain_ids; /* bitmap of domains */ + struct dmar_domain **domains; /* ptr to domains */ + spinlock_t lock; /* protect context, domain ids */ + struct root_entry *root_entry; /* virtual address */ + + struct iommu_flush flush; +#endif + struct q_inval *qi; /* Queued invalidation info */ + u32 *iommu_state; /* Store iommu states between suspend and resume.*/ + +#ifdef CONFIG_IRQ_REMAP + struct ir_table *ir_table; /* Interrupt remapping info */ +#endif + int node; +}; + +static inline void __iommu_flush_cache( + struct intel_iommu *iommu, void *addr, int size) +{ + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(addr, size); +} + +extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); +extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); + +extern int alloc_iommu(struct dmar_drhd_unit *drhd); +extern void free_iommu(struct intel_iommu *iommu); +extern int dmar_enable_qi(struct intel_iommu *iommu); +extern void dmar_disable_qi(struct intel_iommu *iommu); +extern int dmar_reenable_qi(struct intel_iommu *iommu); +extern void qi_global_iec(struct intel_iommu *iommu); + +extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); +extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, + u64 addr, unsigned mask); + +extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); + +extern int dmar_ir_support(void); + +#endif diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h new file mode 100644 index 00000000..10496bd2 --- /dev/null +++ b/include/linux/intel_mid_dma.h @@ -0,0 +1,76 @@ +/* + * intel_mid_dma.h - Intel MID DMA Drivers + * + * Copyright (C) 2008-10 Intel Corp + * Author: Vinod Koul <vinod.koul@intel.com> + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * + */ +#ifndef __INTEL_MID_DMA_H__ +#define __INTEL_MID_DMA_H__ + +#include <linux/dmaengine.h> + +#define DMA_PREP_CIRCULAR_LIST (1 << 10) + +/*DMA mode configurations*/ +enum intel_mid_dma_mode { + LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/ + LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/ + LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/ +}; + +/*DMA handshaking*/ +enum intel_mid_dma_hs_mode { + LNW_DMA_HW_HS = 0, /*HW Handshaking only*/ + LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/ +}; + +/*Burst size configuration*/ +enum intel_mid_dma_msize { + LNW_DMA_MSIZE_1 = 0x0, + LNW_DMA_MSIZE_4 = 0x1, + LNW_DMA_MSIZE_8 = 0x2, + LNW_DMA_MSIZE_16 = 0x3, + LNW_DMA_MSIZE_32 = 0x4, + LNW_DMA_MSIZE_64 = 0x5, +}; + +/** + * struct intel_mid_dma_slave - DMA slave structure + * + * @dirn: DMA trf direction + * @src_width: tx register width + * @dst_width: rx register width + * @hs_mode: HW/SW handshaking mode + * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) + * @src_msize: Source DMA burst size + * @dst_msize: Dst DMA burst size + * @per_addr: Periphral address + * @device_instance: DMA peripheral device instance, we can have multiple + * peripheral device connected to single DMAC + */ +struct intel_mid_dma_slave { + enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ + enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ + unsigned int device_instance; /*0, 1 for periphral instance*/ + struct dma_slave_config dma_slave; +}; + +#endif /*__INTEL_MID_DMA_H__*/ diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h new file mode 100644 index 00000000..920109a2 --- /dev/null +++ b/include/linux/intel_pmic_gpio.h @@ -0,0 +1,15 @@ +#ifndef LINUX_INTEL_PMIC_H +#define LINUX_INTEL_PMIC_H + +struct intel_pmic_gpio_platform_data { + /* the first IRQ of the chip */ + unsigned irq_base; + /* number assigned to the first GPIO */ + unsigned gpio_base; + /* sram address for gpiointr register, the langwell chip will map + * the PMIC spi GPIO expander's GPIOINTR register in sram. + */ + unsigned gpiointr; +}; + +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h new file mode 100644 index 00000000..2aea5d22 --- /dev/null +++ b/include/linux/interrupt.h @@ -0,0 +1,703 @@ +/* interrupt.h */ +#ifndef _LINUX_INTERRUPT_H +#define _LINUX_INTERRUPT_H + +#include <linux/kernel.h> +#include <linux/linkage.h> +#include <linux/bitops.h> +#include <linux/preempt.h> +#include <linux/cpumask.h> +#include <linux/irqreturn.h> +#include <linux/irqnr.h> +#include <linux/hardirq.h> +#include <linux/irqflags.h> +#include <linux/smp.h> +#include <linux/percpu.h> +#include <linux/hrtimer.h> +#include <linux/kref.h> +#include <linux/workqueue.h> + +#include <linux/atomic.h> +#include <asm/ptrace.h> + +/* + * These correspond to the IORESOURCE_IRQ_* defines in + * linux/ioport.h to select the interrupt line behaviour. When + * requesting an interrupt without specifying a IRQF_TRIGGER, the + * setting should be assumed to be "as already configured", which + * may be as per machine or firmware initialisation. + */ +#define IRQF_TRIGGER_NONE 0x00000000 +#define IRQF_TRIGGER_RISING 0x00000001 +#define IRQF_TRIGGER_FALLING 0x00000002 +#define IRQF_TRIGGER_HIGH 0x00000004 +#define IRQF_TRIGGER_LOW 0x00000008 +#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) +#define IRQF_TRIGGER_PROBE 0x00000010 + +/* + * These flags used only by the kernel as part of the + * irq handling routines. + * + * IRQF_DISABLED - keep irqs disabled when calling the action handler. + * DEPRECATED. This flag is a NOOP and scheduled to be removed + * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator + * IRQF_SHARED - allow sharing the irq among several devices + * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur + * IRQF_TIMER - Flag to mark this interrupt as timer interrupt + * IRQF_PERCPU - Interrupt is per cpu + * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing + * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is + * registered first in an shared interrupt is considered for + * performance reasons) + * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. + * Used by threaded interrupts which need to keep the + * irq line disabled until the threaded handler has been run. + * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend + * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set + * IRQF_NO_THREAD - Interrupt cannot be threaded + * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device + * resume time. + */ +#define IRQF_DISABLED 0x00000020 +#define IRQF_SAMPLE_RANDOM 0x00000040 +#define IRQF_SHARED 0x00000080 +#define IRQF_PROBE_SHARED 0x00000100 +#define __IRQF_TIMER 0x00000200 +#define IRQF_PERCPU 0x00000400 +#define IRQF_NOBALANCING 0x00000800 +#define IRQF_IRQPOLL 0x00001000 +#define IRQF_ONESHOT 0x00002000 +#define IRQF_NO_SUSPEND 0x00004000 +#define IRQF_FORCE_RESUME 0x00008000 +#define IRQF_NO_THREAD 0x00010000 +#define IRQF_EARLY_RESUME 0x00020000 + +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +/* + * These values can be returned by request_any_context_irq() and + * describe the context the interrupt will be run in. + * + * IRQC_IS_HARDIRQ - interrupt runs in hardirq context + * IRQC_IS_NESTED - interrupt runs in a nested threaded context + */ +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED, +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +/** + * struct irqaction - per interrupt action descriptor + * @handler: interrupt handler function + * @flags: flags (see IRQF_* above) + * @name: name of the device + * @dev_id: cookie to identify the device + * @percpu_dev_id: cookie to identify the device + * @next: pointer to the next irqaction for shared interrupts + * @irq: interrupt number + * @dir: pointer to the proc/irq/NN/name entry + * @thread_fn: interrupt handler function for threaded interrupts + * @thread: thread pointer for threaded interrupts + * @thread_flags: flags related to @thread + * @thread_mask: bitmask for keeping track of @thread activity + */ +struct irqaction { + irq_handler_t handler; + unsigned long flags; + void *dev_id; + void __percpu *percpu_dev_id; + struct irqaction *next; + int irq; + irq_handler_t thread_fn; + struct task_struct *thread; + unsigned long thread_flags; + unsigned long thread_mask; + const char *name; + struct proc_dir_entry *dir; +} ____cacheline_internodealigned_in_smp; + +extern irqreturn_t no_action(int cpl, void *dev_id); + +#ifdef CONFIG_GENERIC_HARDIRQS +extern int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev); + +static inline int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev) +{ + return request_threaded_irq(irq, handler, NULL, flags, name, dev); +} + +extern int __must_check +request_any_context_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *name, void *dev_id); + +extern int __must_check +request_percpu_irq(unsigned int irq, irq_handler_t handler, + const char *devname, void __percpu *percpu_dev_id); + +extern void exit_irq_thread(void); +#else + +extern int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); + +/* + * Special function to avoid ifdeffery in kernel/irq/devres.c which + * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, + * m68k). I really love these $@%#!* obvious Makefile references: + * ../../../kernel/irq/devres.o + */ +static inline int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev) +{ + return request_irq(irq, handler, flags, name, dev); +} + +static inline int __must_check +request_any_context_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *name, void *dev_id) +{ + return request_irq(irq, handler, flags, name, dev_id); +} + +static inline int __must_check +request_percpu_irq(unsigned int irq, irq_handler_t handler, + const char *devname, void __percpu *percpu_dev_id) +{ + return request_irq(irq, handler, 0, devname, percpu_dev_id); +} + +static inline void exit_irq_thread(void) { } +#endif + +extern void free_irq(unsigned int, void *); +extern void free_percpu_irq(unsigned int, void __percpu *); + +struct device; + +extern int __must_check +devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id); + +static inline int __must_check +devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, + devname, dev_id); +} + +extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); + +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq() do { } while (0) +#else +# define local_irq_enable_in_hardirq() local_irq_enable() +#endif + +extern void disable_irq_nosync(unsigned int irq); +extern void disable_irq(unsigned int irq); +extern void disable_percpu_irq(unsigned int irq); +extern void enable_irq(unsigned int irq); +extern void enable_percpu_irq(unsigned int irq, unsigned int type); + +/* The following three functions are for the core kernel use only. */ +#ifdef CONFIG_GENERIC_HARDIRQS +extern void suspend_device_irqs(void); +extern void resume_device_irqs(void); +#ifdef CONFIG_PM_SLEEP +extern int check_wakeup_irqs(void); +#else +static inline int check_wakeup_irqs(void) { return 0; } +#endif +#else +static inline void suspend_device_irqs(void) { }; +static inline void resume_device_irqs(void) { }; +static inline int check_wakeup_irqs(void) { return 0; } +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) + +extern cpumask_var_t irq_default_affinity; + +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); +extern int irq_can_set_affinity(unsigned int irq); +extern int irq_select_affinity(unsigned int irq); + +extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); + +/** + * struct irq_affinity_notify - context for notification of IRQ affinity changes + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use + * @work: Work item, for internal use + * @notify: Function to be called on change. This will be + * called in process context. + * @release: Function to be called on release. This will be + * called in process context. Once registered, the + * structure must only be freed when this function is + * called or later. + */ +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); +}; + +extern int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); + +static inline void irq_run_affinity_notifiers(void) +{ + flush_scheduled_work(); +} + +#else /* CONFIG_SMP */ + +static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) +{ + return -EINVAL; +} + +static inline int irq_can_set_affinity(unsigned int irq) +{ + return 0; +} + +static inline int irq_select_affinity(unsigned int irq) { return 0; } + +static inline int irq_set_affinity_hint(unsigned int irq, + const struct cpumask *m) +{ + return -EINVAL; +} +#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ + +#ifdef CONFIG_GENERIC_HARDIRQS +/* + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. + */ +static inline void disable_irq_nosync_lockdep(unsigned int irq) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_save(*flags); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP + local_irq_enable(); +#endif + enable_irq(irq); +} + +static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) +{ +#ifdef CONFIG_LOCKDEP + local_irq_restore(*flags); +#endif + enable_irq(irq); +} + +/* IRQ wakeup (PM) control: */ +extern int irq_set_irq_wake(unsigned int irq, unsigned int on); + +static inline int enable_irq_wake(unsigned int irq) +{ + return irq_set_irq_wake(irq, 1); +} + +static inline int disable_irq_wake(unsigned int irq) +{ + return irq_set_irq_wake(irq, 0); +} + +#else /* !CONFIG_GENERIC_HARDIRQS */ +/* + * NOTE: non-genirq architectures, if they want to support the lock + * validator need to define the methods below in their asm/irq.h + * files, under an #ifdef CONFIG_LOCKDEP section. + */ +#ifndef CONFIG_LOCKDEP +# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ + disable_irq_nosync(irq) +# define disable_irq_lockdep(irq) disable_irq(irq) +# define enable_irq_lockdep(irq) enable_irq(irq) +# define enable_irq_lockdep_irqrestore(irq, flags) \ + enable_irq(irq) +# endif + +static inline int enable_irq_wake(unsigned int irq) +{ + return 0; +} + +static inline int disable_irq_wake(unsigned int irq) +{ + return 0; +} +#endif /* CONFIG_GENERIC_HARDIRQS */ + + +#ifdef CONFIG_IRQ_FORCED_THREADING +extern bool force_irqthreads; +#else +#define force_irqthreads (0) +#endif + +#ifndef __ARCH_SET_SOFTIRQ_PENDING +#define set_softirq_pending(x) (local_softirq_pending() = (x)) +#define or_softirq_pending(x) (local_softirq_pending() |= (x)) +#endif + +/* Some architectures might implement lazy enabling/disabling of + * interrupts. In some cases, such as stop_machine, we might want + * to ensure that after a local_irq_disable(), interrupts have + * really been disabled in hardware. Such architectures need to + * implement the following hook. + */ +#ifndef hard_irq_disable +#define hard_irq_disable() do { } while(0) +#endif + +/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high + frequency threaded job scheduling. For almost all the purposes + tasklets are more than enough. F.e. all serial device BHs et + al. should be converted to tasklets, not to softirqs. + */ + +enum +{ + HI_SOFTIRQ=0, + TIMER_SOFTIRQ, + NET_TX_SOFTIRQ, + NET_RX_SOFTIRQ, + BLOCK_SOFTIRQ, + BLOCK_IOPOLL_SOFTIRQ, + TASKLET_SOFTIRQ, + SCHED_SOFTIRQ, + HRTIMER_SOFTIRQ, + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + + NR_SOFTIRQS +}; + +/* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +extern char *softirq_to_name[NR_SOFTIRQS]; + +/* softirq mask and active fields moved to irq_cpustat_t in + * asm/hardirq.h to get better cache usage. KAO + */ + +struct softirq_action +{ + void (*action)(struct softirq_action *); +}; + +asmlinkage void do_softirq(void); +asmlinkage void __do_softirq(void); +extern void open_softirq(int nr, void (*action)(struct softirq_action *)); +extern void softirq_init(void); +extern void __raise_softirq_irqoff(unsigned int nr); + +extern void raise_softirq_irqoff(unsigned int nr); +extern void raise_softirq(unsigned int nr); + +/* This is the worklist that queues up per-cpu softirq work. + * + * send_remote_sendirq() adds work to these lists, and + * the softirq handler itself dequeues from them. The queues + * are protected by disabling local cpu interrupts and they must + * only be accessed by the local cpu that they are for. + */ +DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); + +DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +static inline struct task_struct *this_cpu_ksoftirqd(void) +{ + return this_cpu_read(ksoftirqd); +} + +/* Try to send a softirq to a remote cpu. If this cannot be done, the + * work will be queued to the local cpu. + */ +extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); + +/* Like send_remote_softirq(), but the caller must disable local cpu interrupts + * and compute the current cpu, passed in as 'this_cpu'. + */ +extern void __send_remote_softirq(struct call_single_data *cp, int cpu, + int this_cpu, int softirq); + +/* Tasklets --- multithreaded analogue of BHs. + + Main feature differing them of generic softirqs: tasklet + is running only on one CPU simultaneously. + + Main feature differing them of BHs: different tasklets + may be run simultaneously on different CPUs. + + Properties: + * If tasklet_schedule() is called, then tasklet is guaranteed + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. + * If this tasklet is already running on another CPU (or schedule is called + from tasklet itself), it is rescheduled for later. + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. + */ + +struct tasklet_struct +{ + struct tasklet_struct *next; + unsigned long state; + atomic_t count; + void (*func)(unsigned long); + unsigned long data; +}; + +#define DECLARE_TASKLET(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + +#define DECLARE_TASKLET_DISABLED(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } + + +enum +{ + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ +}; + +#ifdef CONFIG_SMP +static inline int tasklet_trylock(struct tasklet_struct *t) +{ + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline void tasklet_unlock(struct tasklet_struct *t) +{ + smp_mb__before_clear_bit(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +} +#else +#define tasklet_trylock(t) 1 +#define tasklet_unlock_wait(t) do { } while (0) +#define tasklet_unlock(t) do { } while (0) +#endif + +extern void __tasklet_schedule(struct tasklet_struct *t); + +static inline void tasklet_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_schedule(t); +} + +extern void __tasklet_hi_schedule(struct tasklet_struct *t); + +static inline void tasklet_hi_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule(t); +} + +extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); + +/* + * This version avoids touching any other tasklets. Needed for kmemcheck + * in order not to take any page faults while enqueueing this tasklet; + * consider VERY carefully whether you really need this or + * tasklet_hi_schedule()... + */ +static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule_first(t); +} + + +static inline void tasklet_disable_nosync(struct tasklet_struct *t) +{ + atomic_inc(&t->count); + smp_mb__after_atomic_inc(); +} + +static inline void tasklet_disable(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_wait(t); + smp_mb(); +} + +static inline void tasklet_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&t->count); +} + +static inline void tasklet_hi_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&t->count); +} + +extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); + +struct tasklet_hrtimer { + struct hrtimer timer; + struct tasklet_struct tasklet; + enum hrtimer_restart (*function)(struct hrtimer *); +}; + +extern void +tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t which_clock, enum hrtimer_mode mode); + +static inline +int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, + const enum hrtimer_mode mode) +{ + return hrtimer_start(&ttimer->timer, time, mode); +} + +static inline +void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) +{ + hrtimer_cancel(&ttimer->timer); + tasklet_kill(&ttimer->tasklet); +} + +/* + * Autoprobing for irqs: + * + * probe_irq_on() and probe_irq_off() provide robust primitives + * for accurate IRQ probing during kernel initialization. They are + * reasonably simple to use, are not "fooled" by spurious interrupts, + * and, unlike other attempts at IRQ probing, they do not get hung on + * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). + * + * For reasonably foolproof probing, use them as follows: + * + * 1. clear and/or mask the device's internal interrupt. + * 2. sti(); + * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs + * 4. enable the device and cause it to trigger an interrupt. + * 5. wait for the device to interrupt, using non-intrusive polling or a delay. + * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple + * 7. service the device to clear its pending interrupt. + * 8. loop again if paranoia is required. + * + * probe_irq_on() returns a mask of allocated irq's. + * + * probe_irq_off() takes the mask as a parameter, + * and returns the irq number which occurred, + * or zero if none occurred, or a negative irq number + * if more than one irq occurred. + */ + +#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) +static inline unsigned long probe_irq_on(void) +{ + return 0; +} +static inline int probe_irq_off(unsigned long val) +{ + return 0; +} +static inline unsigned int probe_irq_mask(unsigned long val) +{ + return 0; +} +#else +extern unsigned long probe_irq_on(void); /* returns 0 on failure */ +extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ +extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ +#endif + +#ifdef CONFIG_PROC_FS +/* Initialize /proc/irq/ */ +extern void init_irq_proc(void); +#else +static inline void init_irq_proc(void) +{ +} +#endif + +struct seq_file; +int show_interrupts(struct seq_file *p, void *v); +int arch_show_interrupts(struct seq_file *p, int prec); + +extern int early_irq_init(void); +extern int arch_probe_nr_irqs(void); +extern int arch_early_irq_init(void); + +#endif diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h new file mode 100644 index 00000000..657fab4e --- /dev/null +++ b/include/linux/io-mapping.h @@ -0,0 +1,168 @@ +/* + * Copyright © 2008 Keith Packard <keithp@keithp.com> + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_IO_MAPPING_H +#define _LINUX_IO_MAPPING_H + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/bug.h> +#include <asm/io.h> +#include <asm/page.h> + +/* + * The io_mapping mechanism provides an abstraction for mapping + * individual pages from an io device to the CPU in an efficient fashion. + * + * See Documentation/io-mapping.txt + */ + +#ifdef CONFIG_HAVE_ATOMIC_IOMAP + +#include <asm/iomap.h> + +struct io_mapping { + resource_size_t base; + unsigned long size; + pgprot_t prot; +}; + +/* + * For small address space machines, mapping large objects + * into the kernel virtual space isn't practical. Where + * available, use fixmap support to dynamically map pages + * of the object at run time. + */ + +static inline struct io_mapping * +io_mapping_create_wc(resource_size_t base, unsigned long size) +{ + struct io_mapping *iomap; + pgprot_t prot; + + iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); + if (!iomap) + goto out_err; + + if (iomap_create_wc(base, size, &prot)) + goto out_free; + + iomap->base = base; + iomap->size = size; + iomap->prot = prot; + return iomap; + +out_free: + kfree(iomap); +out_err: + return NULL; +} + +static inline void +io_mapping_free(struct io_mapping *mapping) +{ + iomap_free(mapping->base, mapping->size); + kfree(mapping); +} + +/* Atomic map/unmap */ +static inline void __iomem * +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset) +{ + resource_size_t phys_addr; + unsigned long pfn; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); + return iomap_atomic_prot_pfn(pfn, mapping->prot); +} + +static inline void +io_mapping_unmap_atomic(void __iomem *vaddr) +{ + iounmap_atomic(vaddr); +} + +static inline void __iomem * +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +{ + resource_size_t phys_addr; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + + return ioremap_wc(phys_addr, PAGE_SIZE); +} + +static inline void +io_mapping_unmap(void __iomem *vaddr) +{ + iounmap(vaddr); +} + +#else + +#include <linux/uaccess.h> + +/* this struct isn't actually defined anywhere */ +struct io_mapping; + +/* Create the io_mapping object*/ +static inline struct io_mapping * +io_mapping_create_wc(resource_size_t base, unsigned long size) +{ + return (struct io_mapping __force *) ioremap_wc(base, size); +} + +static inline void +io_mapping_free(struct io_mapping *mapping) +{ + iounmap((void __force __iomem *) mapping); +} + +/* Atomic map/unmap */ +static inline void __iomem * +io_mapping_map_atomic_wc(struct io_mapping *mapping, + unsigned long offset) +{ + pagefault_disable(); + return ((char __force __iomem *) mapping) + offset; +} + +static inline void +io_mapping_unmap_atomic(void __iomem *vaddr) +{ + pagefault_enable(); +} + +/* Non-atomic map/unmap */ +static inline void __iomem * +io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +{ + return ((char __force __iomem *) mapping) + offset; +} + +static inline void +io_mapping_unmap(void __iomem *vaddr) +{ +} + +#endif /* HAVE_ATOMIC_IOMAP */ + +#endif /* _LINUX_IO_MAPPING_H */ diff --git a/include/linux/io.h b/include/linux/io.h new file mode 100644 index 00000000..7fd2d213 --- /dev/null +++ b/include/linux/io.h @@ -0,0 +1,70 @@ +/* + * Copyright 2006 PathScale, Inc. All Rights Reserved. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_IO_H +#define _LINUX_IO_H + +#include <linux/types.h> +#include <asm/io.h> +#include <asm/page.h> + +struct device; + +void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +void __iowrite64_copy(void __iomem *to, const void *from, size_t count); + +#ifdef CONFIG_MMU +int ioremap_page_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot); +#else +static inline int ioremap_page_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot) +{ + return 0; +} +#endif + +/* + * Managed iomap interface + */ +#ifdef CONFIG_HAS_IOPORT +void __iomem * devm_ioport_map(struct device *dev, unsigned long port, + unsigned int nr); +void devm_ioport_unmap(struct device *dev, void __iomem *addr); +#else +static inline void __iomem *devm_ioport_map(struct device *dev, + unsigned long port, + unsigned int nr) +{ + return NULL; +} + +static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) +{ +} +#endif + +void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, + unsigned long size); +void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, + unsigned long size); +void devm_iounmap(struct device *dev, void __iomem *addr); +int check_signature(const volatile void __iomem *io_addr, + const unsigned char *signature, int length); +void devm_ioremap_release(struct device *dev, void *res); + +#endif /* _LINUX_IO_H */ diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h new file mode 100644 index 00000000..38b286e9 --- /dev/null +++ b/include/linux/ioc3.h @@ -0,0 +1,93 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org> + */ + +#ifndef _LINUX_IOC3_H +#define _LINUX_IOC3_H + +#include <asm/sn/ioc3.h> + +#define IOC3_MAX_SUBMODULES 32 + +#define IOC3_CLASS_NONE 0 +#define IOC3_CLASS_BASE_IP27 1 +#define IOC3_CLASS_BASE_IP30 2 +#define IOC3_CLASS_MENET_123 3 +#define IOC3_CLASS_MENET_4 4 +#define IOC3_CLASS_CADDUO 5 +#define IOC3_CLASS_SERIAL 6 + +/* One of these per IOC3 */ +struct ioc3_driver_data { + struct list_head list; + int id; /* IOC3 sequence number */ + /* PCI mapping */ + unsigned long pma; /* physical address */ + struct ioc3 __iomem *vma; /* pointer to registers */ + struct pci_dev *pdev; /* PCI device */ + /* IRQ stuff */ + int dual_irq; /* set if separate IRQs are used */ + int irq_io, irq_eth; /* IRQ numbers */ + /* GPIO magic */ + spinlock_t gpio_lock; + unsigned int gpdr_shadow; + /* NIC identifiers */ + char nic_part[32]; + char nic_serial[16]; + char nic_mac[6]; + /* submodule set */ + int class; + void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */ + int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */ + /* is_ir_lock must be held while + * modifying sio_ie values, so + * we can be sure that sio_ie is + * not changing when we read it + * along with sio_ir. + */ + spinlock_t ir_lock; /* SIO_IE[SC] mod lock */ +}; + +/* One per submodule */ +struct ioc3_submodule { + char *name; /* descriptive submodule name */ + struct module *owner; /* owning kernel module */ + int ethernet; /* set for ethernet drivers */ + int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *); + int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *); + int id; /* assigned by IOC3, index for the "data" array */ + /* IRQ stuff */ + unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */ + int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */ + int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); + /* private submodule data */ + void *data; /* assigned by submodule */ +}; + +/********************************** + * Functions needed by submodules * + **********************************/ + +#define IOC3_W_IES 0 +#define IOC3_W_IEC 1 + +/* registers a submodule for all existing and future IOC3 chips */ +extern int ioc3_register_submodule(struct ioc3_submodule *); +/* unregisters a submodule */ +extern void ioc3_unregister_submodule(struct ioc3_submodule *); +/* enables IRQs indicated by irq_mask for a specified IOC3 chip */ +extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* ackowledges specified IRQs */ +extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* disables IRQs indicated by irq_mask for a specified IOC3 chip */ +extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); +/* atomically sets GPCR bits */ +extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int); +/* general ireg writer */ +extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg); + +#endif diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h new file mode 100644 index 00000000..51e2b9fb --- /dev/null +++ b/include/linux/ioc4.h @@ -0,0 +1,184 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _LINUX_IOC4_H +#define _LINUX_IOC4_H + +#include <linux/interrupt.h> + +/*************** + * Definitions * + ***************/ + +/* Miscellaneous values inherent to hardware */ + +#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */ + +/*********************************** + * Structures needed by subdrivers * + ***********************************/ + +/* This structure fully describes the IOC4 miscellaneous registers which + * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding + * PCI resource is managed by the main IOC4 driver because it contains + * registers of interest to many different IOC4 subdrivers. + */ +struct ioc4_misc_regs { + /* Miscellaneous IOC4 registers */ + union ioc4_pci_err_addr_l { + uint32_t raw; + struct { + uint32_t valid:1; /* Address captured */ + uint32_t master_id:4; /* Unit causing error + * 0/1: Serial port 0 TX/RX + * 2/3: Serial port 1 TX/RX + * 4/5: Serial port 2 TX/RX + * 6/7: Serial port 3 TX/RX + * 8: ATA/ATAPI + * 9-15: Undefined + */ + uint32_t mul_err:1; /* Multiple errors occurred */ + uint32_t addr:26; /* Bits 31-6 of error addr */ + } fields; + } pci_err_addr_l; + uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */ + union ioc4_sio_int { + uint32_t raw; + struct { + uint8_t tx_mt:1; /* TX ring buffer empty */ + uint8_t rx_full:1; /* RX ring buffer full */ + uint8_t rx_high:1; /* RX high-water exceeded */ + uint8_t rx_timer:1; /* RX timer has triggered */ + uint8_t delta_dcd:1; /* DELTA_DCD seen */ + uint8_t delta_cts:1; /* DELTA_CTS seen */ + uint8_t intr_pass:1; /* Interrupt pass-through */ + uint8_t tx_explicit:1; /* TX, MCW, or delay complete */ + } fields[4]; + } sio_ir; /* Serial interrupt state */ + union ioc4_other_int { + uint32_t raw; + struct { + uint32_t ata_int:1; /* ATA port passthru */ + uint32_t ata_memerr:1; /* ATA halted by mem error */ + uint32_t memerr:4; /* Serial halted by mem err */ + uint32_t kbd_int:1; /* kbd/mouse intr asserted */ + uint32_t reserved:16; /* zero */ + uint32_t rt_int:1; /* INT_OUT section latch */ + uint32_t gen_int:8; /* Intr. from generic pins */ + } fields; + } other_ir; /* Other interrupt state */ + union ioc4_sio_int sio_ies; /* Serial interrupt enable set */ + union ioc4_other_int other_ies; /* Other interrupt enable set */ + union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */ + union ioc4_other_int other_iec; /* Other interrupt enable clear */ + union ioc4_sio_cr { + uint32_t raw; + struct { + uint32_t cmd_pulse:4; /* Bytebus strobe width */ + uint32_t arb_diag:3; /* PCI bus requester */ + uint32_t sio_diag_idle:1; /* Active ser req? */ + uint32_t ata_diag_idle:1; /* Active ATA req? */ + uint32_t ata_diag_active:1; /* ATA req is winner */ + uint32_t reserved:22; /* zero */ + } fields; + } sio_cr; + uint32_t unused1; + union ioc4_int_out { + uint32_t raw; + struct { + uint32_t count:16; /* Period control */ + uint32_t mode:3; /* Output signal shape */ + uint32_t reserved:11; /* zero */ + uint32_t diag:1; /* Timebase control */ + uint32_t int_out:1; /* Current value */ + } fields; + } int_out; /* External interrupt output control */ + uint32_t unused2; + union ioc4_gpcr { + uint32_t raw; + struct { + uint32_t dir:8; /* Pin direction */ + uint32_t edge:8; /* Edge/level mode */ + uint32_t reserved1:4; /* zero */ + uint32_t int_out_en:1; /* INT_OUT enable */ + uint32_t reserved2:11; /* zero */ + } fields; + } gpcr_s; /* Generic PIO control set */ + union ioc4_gpcr gpcr_c; /* Generic PIO control clear */ + union ioc4_gpdr { + uint32_t raw; + struct { + uint32_t gen_pin:8; /* State of pins */ + uint32_t reserved:24; + } fields; + } gpdr; /* Generic PIO data */ + uint32_t unused3; + union ioc4_gppr { + uint32_t raw; + struct { + uint32_t gen_pin:1; /* Single pin state */ + uint32_t reserved:31; + } fields; + } gppr[8]; /* Generic PIO pins */ +}; + +/* Masks for GPCR DIR pins */ +#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */ +#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_DIR_2 0x04 +#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */ +#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */ +#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */ + +/* Masks for GPCR EDGE pins */ +#define IOC4_GPCR_EDGE_0 0x01 +#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */ +#define IOC4_GPCR_EDGE_2 0x04 +#define IOC4_GPCR_EDGE_3 0x08 +#define IOC4_GPCR_EDGE_4 0x10 +#define IOC4_GPCR_EDGE_5 0x20 +#define IOC4_GPCR_EDGE_6 0x40 +#define IOC4_GPCR_EDGE_7 0x80 + +#define IOC4_VARIANT_IO9 0x0900 +#define IOC4_VARIANT_PCI_RT 0x0901 +#define IOC4_VARIANT_IO10 0x1000 + +/* One of these per IOC4 */ +struct ioc4_driver_data { + struct list_head idd_list; + unsigned long idd_bar0; + struct pci_dev *idd_pdev; + const struct pci_device_id *idd_pci_id; + struct ioc4_misc_regs __iomem *idd_misc_regs; + unsigned long count_period; + void *idd_serial_data; + unsigned int idd_variant; +}; + +/* One per submodule */ +struct ioc4_submodule { + struct list_head is_list; + char *is_name; + struct module *is_owner; + int (*is_probe) (struct ioc4_driver_data *); + int (*is_remove) (struct ioc4_driver_data *); +}; + +#define IOC4_NUM_CARDS 8 /* max cards per partition */ + +/********************************** + * Functions needed by submodules * + **********************************/ + +extern int ioc4_register_submodule(struct ioc4_submodule *); +extern void ioc4_unregister_submodule(struct ioc4_submodule *); + +#endif /* _LINUX_IOC4_H */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h new file mode 100644 index 00000000..1a301806 --- /dev/null +++ b/include/linux/iocontext.h @@ -0,0 +1,152 @@ +#ifndef IOCONTEXT_H +#define IOCONTEXT_H + +#include <linux/radix-tree.h> +#include <linux/rcupdate.h> +#include <linux/workqueue.h> + +enum { + ICQ_IOPRIO_CHANGED = 1 << 0, + ICQ_CGROUP_CHANGED = 1 << 1, + ICQ_EXITED = 1 << 2, + + ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED, +}; + +/* + * An io_cq (icq) is association between an io_context (ioc) and a + * request_queue (q). This is used by elevators which need to track + * information per ioc - q pair. + * + * Elevator can request use of icq by setting elevator_type->icq_size and + * ->icq_align. Both size and align must be larger than that of struct + * io_cq and elevator can use the tail area for private information. The + * recommended way to do this is defining a struct which contains io_cq as + * the first member followed by private members and using its size and + * align. For example, + * + * struct snail_io_cq { + * struct io_cq icq; + * int poke_snail; + * int feed_snail; + * }; + * + * struct elevator_type snail_elv_type { + * .ops = { ... }, + * .icq_size = sizeof(struct snail_io_cq), + * .icq_align = __alignof__(struct snail_io_cq), + * ... + * }; + * + * If icq_size is set, block core will manage icq's. All requests will + * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() + * is called and be holding a reference to the associated io_context. + * + * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is + * called and, on destruction, ->elevator_exit_icq_fn(). Both functions + * are called with both the associated io_context and queue locks held. + * + * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding + * queue lock but the returned icq is valid only until the queue lock is + * released. Elevators can not and should not try to create or destroy + * icq's. + * + * As icq's are linked from both ioc and q, the locking rules are a bit + * complex. + * + * - ioc lock nests inside q lock. + * + * - ioc->icq_list and icq->ioc_node are protected by ioc lock. + * q->icq_list and icq->q_node by q lock. + * + * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq + * itself is protected by q lock. However, both the indexes and icq + * itself are also RCU managed and lookup can be performed holding only + * the q lock. + * + * - icq's are not reference counted. They are destroyed when either the + * ioc or q goes away. Each request with icq set holds an extra + * reference to ioc to ensure it stays until the request is completed. + * + * - Linking and unlinking icq's are performed while holding both ioc and q + * locks. Due to the lock ordering, q exit is simple but ioc exit + * requires reverse-order double lock dance. + */ +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + + /* + * q_node and ioc_node link io_cq through icq_list of q and ioc + * respectively. Both fields are unused once ioc_exit_icq() is + * called and shared with __rcu_icq_cache and __rcu_head which are + * used for RCU free of io_cq. + */ + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct rcu_head __rcu_head; + }; + + unsigned int flags; +}; + +/* + * I/O subsystem state of the associated processes. It is refcounted + * and kmalloc'ed. These could be shared between processes. + */ +struct io_context { + atomic_long_t refcount; + atomic_t nr_tasks; + + /* all the fields below are protected by this lock */ + spinlock_t lock; + + unsigned short ioprio; + + /* + * For request batching + */ + int nr_batch_requests; /* Number of requests left in the batch */ + unsigned long last_waited; /* Time last woken after wait for request */ + + struct radix_tree_root icq_tree; + struct io_cq __rcu *icq_hint; + struct hlist_head icq_list; + + struct work_struct release_work; +}; + +static inline struct io_context *ioc_task_link(struct io_context *ioc) +{ + /* + * if ref count is zero, don't allow sharing (ioc is going away, it's + * a race). + */ + if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { + atomic_inc(&ioc->nr_tasks); + return ioc; + } + + return NULL; +} + +struct task_struct; +#ifdef CONFIG_BLOCK +void put_io_context(struct io_context *ioc); +void exit_io_context(struct task_struct *task); +struct io_context *get_task_io_context(struct task_struct *task, + gfp_t gfp_flags, int node); +void ioc_ioprio_changed(struct io_context *ioc, int ioprio); +void ioc_cgroup_changed(struct io_context *ioc); +unsigned int icq_get_changed(struct io_cq *icq); +#else +struct io_context; +static inline void put_io_context(struct io_context *ioc) { } +static inline void exit_io_context(struct task_struct *task) { } +#endif + +#endif diff --git a/include/linux/ioctl.h b/include/linux/ioctl.h new file mode 100644 index 00000000..aa91eb39 --- /dev/null +++ b/include/linux/ioctl.h @@ -0,0 +1,7 @@ +#ifndef _LINUX_IOCTL_H +#define _LINUX_IOCTL_H + +#include <asm/ioctl.h> + +#endif /* _LINUX_IOCTL_H */ + diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h new file mode 100644 index 00000000..86bdeffe --- /dev/null +++ b/include/linux/iommu-helper.h @@ -0,0 +1,34 @@ +#ifndef _LINUX_IOMMU_HELPER_H +#define _LINUX_IOMMU_HELPER_H + +#include <linux/kernel.h> + +static inline unsigned long iommu_device_max_index(unsigned long size, + unsigned long offset, + u64 dma_mask) +{ + if (size + offset > dma_mask) + return dma_mask - offset + 1; + else + return size; +} + +extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, + unsigned long shift, + unsigned long boundary_size); +extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, + unsigned long shift, + unsigned long boundary_size, + unsigned long align_mask); + +static inline unsigned long iommu_num_pages(unsigned long addr, + unsigned long len, + unsigned long io_page_size) +{ + unsigned long size = (addr & (io_page_size - 1)) + len; + + return DIV_ROUND_UP(size, io_page_size); +} + +#endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h new file mode 100644 index 00000000..d9375804 --- /dev/null +++ b/include/linux/iommu.h @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_IOMMU_H +#define __LINUX_IOMMU_H + +#include <linux/errno.h> + +#define IOMMU_READ (1) +#define IOMMU_WRITE (2) +#define IOMMU_CACHE (4) /* DMA cache coherency */ + +struct iommu_ops; +struct bus_type; +struct device; +struct iommu_domain; + +/* iommu fault flags */ +#define IOMMU_FAULT_READ 0x0 +#define IOMMU_FAULT_WRITE 0x1 + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, + struct device *, unsigned long, int); + +struct iommu_domain { + struct iommu_ops *ops; + void *priv; + iommu_fault_handler_t handler; +}; + +#define IOMMU_CAP_CACHE_COHERENCY 0x1 +#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ + +#ifdef CONFIG_IOMMU_API + +/** + * struct iommu_ops - iommu ops and capabilities + * @domain_init: init iommu domain + * @domain_destroy: destroy iommu domain + * @attach_dev: attach device to an iommu domain + * @detach_dev: detach device from an iommu domain + * @map: map a physically contiguous memory region to an iommu domain + * @unmap: unmap a physically contiguous memory region from an iommu domain + * @iova_to_phys: translate iova to physical address + * @domain_has_cap: domain capabilities query + * @commit: commit iommu domain + * @pgsize_bitmap: bitmap of supported page sizes + */ +struct iommu_ops { + int (*domain_init)(struct iommu_domain *domain); + void (*domain_destroy)(struct iommu_domain *domain); + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size); + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, + unsigned long iova); + int (*domain_has_cap)(struct iommu_domain *domain, + unsigned long cap); + int (*device_group)(struct device *dev, unsigned int *groupid); + unsigned long pgsize_bitmap; +}; + +extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops); +extern bool iommu_present(struct bus_type *bus); +extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); +extern void iommu_domain_free(struct iommu_domain *domain); +extern int iommu_attach_device(struct iommu_domain *domain, + struct device *dev); +extern void iommu_detach_device(struct iommu_domain *domain, + struct device *dev); +extern int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size); +extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova); +extern int iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap); +extern void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler); +extern int iommu_device_group(struct device *dev, unsigned int *groupid); + +/** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) + * + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. + * + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). + */ +static inline int report_iommu_fault(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags) +{ + int ret = -ENOSYS; + + /* + * if upper layers showed interest and installed a fault handler, + * invoke it. + */ + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags); + + return ret; +} + +#else /* CONFIG_IOMMU_API */ + +struct iommu_ops {}; + +static inline bool iommu_present(struct bus_type *bus) +{ + return false; +} + +static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) +{ + return NULL; +} + +static inline void iommu_domain_free(struct iommu_domain *domain) +{ +} + +static inline int iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + return -ENODEV; +} + +static inline void iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ +} + +static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot) +{ + return -ENODEV; +} + +static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + int gfp_order) +{ + return -ENODEV; +} + +static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) +{ + return 0; +} + +static inline int domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + return 0; +} + +static inline void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler) +{ +} + +static inline int iommu_device_group(struct device *dev, unsigned int *groupid) +{ + return -ENODEV; +} + +#endif /* CONFIG_IOMMU_API */ + +#endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/ion.h b/include/linux/ion.h new file mode 100644 index 00000000..a7d399c4 --- /dev/null +++ b/include/linux/ion.h @@ -0,0 +1,356 @@ +/* + * include/linux/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include <linux/types.h> + +struct ion_handle; +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous + * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask + * is used to identify the heaps, so only 32 + * total heap types are supported + */ +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always + are at the end of this enum */ + ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) + +/** + * heap flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1 /* mappings of this buffer should be + cached, ion will do cache + maintenance when the buffer is + mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created + at mmap time, if this is set + caches must be managed manually */ + +#ifdef __KERNEL__ +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type: type of the heap from ion_heap_type enum + * @id: unique identifier for heap. When allocating (lower numbers + * will be allocated from first) + * @name: used for debug purposes + * @base: base address of heap in physical memory if applicable + * @size: size of the heap in bytes if applicable + * + * Provided by the board file. + */ +struct ion_platform_heap { + enum ion_heap_type type; + unsigned int id; + const char *name; + ion_phys_addr_t base; + size_t size; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr: number of structures in the array + * @heaps: array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { + int nr; + struct ion_platform_heap heaps[]; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data: platform data specifying starting physical address and + * size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specfic sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device + * @heap_mask: mask of heaps this client can allocate from + * @name: used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, + unsigned int heap_mask, const char *name); + +/** + * ion_client_destroy() - free's a client and all it's handles + * @client: the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks have + * alignment requirements of some kind + * @heap_mask: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from lowest to highest order bit + * @flags: heap flags, the low 16 bits are consumed by ion, the high 16 + * bits are passed on to the respective heap and can be heap + * custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_mask, + unsigned int flags); + +/** + * ion_free - free a handle + * @client: the client + * @handle: the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client: the client + * @handle: the handle + * @addr: a pointer to put the address in + * @len: a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address. It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead. Returns -EINVAL if the handle is invalid. This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client: the client + * @handle: the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - given an ion client, create a dma-buf fd + * @client: the client + * @handle: the handle + */ +int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client: the client + * @fd: the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it. If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* __KERNEL__ */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len: size of the allocation + * @align: required alignment of the allocation + * @heap_mask: mask of heaps to allocate from + * @flags: flags passed to heap + * @handle: pointer that will be populated with a cookie to use to refer + * to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { + size_t len; + size_t align; + unsigned int heap_mask; + unsigned int flags; + struct ion_handle *handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + struct ion_handle *handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + struct ion_handle *handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _LINUX_ION_H */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h new file mode 100644 index 00000000..e885ba23 --- /dev/null +++ b/include/linux/ioport.h @@ -0,0 +1,227 @@ +/* + * ioport.h Definitions of routines for detecting, reserving and + * allocating system resources. + * + * Authors: Linus Torvalds + */ + +#ifndef _LINUX_IOPORT_H +#define _LINUX_IOPORT_H + +#ifndef __ASSEMBLY__ +#include <linux/compiler.h> +#include <linux/types.h> +/* + * Resources are tree-like, allowing + * nesting etc.. + */ +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + unsigned long flags; + struct resource *parent, *sibling, *child; +}; + +/* + * IO resources have these defined flags. + */ +#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ + +#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 +#define IORESOURCE_IRQ 0x00000400 +#define IORESOURCE_DMA 0x00000800 +#define IORESOURCE_BUS 0x00001000 + +#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ +#define IORESOURCE_READONLY 0x00004000 +#define IORESOURCE_CACHEABLE 0x00008000 +#define IORESOURCE_RANGELENGTH 0x00010000 +#define IORESOURCE_SHADOWABLE 0x00020000 + +#define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */ +#define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */ + +#define IORESOURCE_MEM_64 0x00100000 +#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ +#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ + +#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ +#define IORESOURCE_DISABLED 0x10000000 +#define IORESOURCE_UNSET 0x20000000 +#define IORESOURCE_AUTO 0x40000000 +#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ + +/* PnP IRQ specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IRQ_HIGHEDGE (1<<0) +#define IORESOURCE_IRQ_LOWEDGE (1<<1) +#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) +#define IORESOURCE_IRQ_LOWLEVEL (1<<3) +#define IORESOURCE_IRQ_SHAREABLE (1<<4) +#define IORESOURCE_IRQ_OPTIONAL (1<<5) + +/* PnP DMA specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_DMA_TYPE_MASK (3<<0) +#define IORESOURCE_DMA_8BIT (0<<0) +#define IORESOURCE_DMA_8AND16BIT (1<<0) +#define IORESOURCE_DMA_16BIT (2<<0) + +#define IORESOURCE_DMA_MASTER (1<<2) +#define IORESOURCE_DMA_BYTE (1<<3) +#define IORESOURCE_DMA_WORD (1<<4) + +#define IORESOURCE_DMA_SPEED_MASK (3<<6) +#define IORESOURCE_DMA_COMPATIBLE (0<<6) +#define IORESOURCE_DMA_TYPEA (1<<6) +#define IORESOURCE_DMA_TYPEB (2<<6) +#define IORESOURCE_DMA_TYPEF (3<<6) + +/* PnP memory I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ +#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ +#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ +#define IORESOURCE_MEM_TYPE_MASK (3<<3) +#define IORESOURCE_MEM_8BIT (0<<3) +#define IORESOURCE_MEM_16BIT (1<<3) +#define IORESOURCE_MEM_8AND16BIT (2<<3) +#define IORESOURCE_MEM_32BIT (3<<3) +#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ +#define IORESOURCE_MEM_EXPANSIONROM (1<<6) + +/* PnP I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IO_16BIT_ADDR (1<<0) +#define IORESOURCE_IO_FIXED (1<<1) + +/* PCI ROM control bits (IORESOURCE_BITS) */ +#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ +#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ +#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ +#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ + +/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ +#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ + + +/* helpers to define resources */ +#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ + { \ + .start = (_start), \ + .end = (_start) + (_size) - 1, \ + .name = (_name), \ + .flags = (_flags), \ + } + +#define DEFINE_RES_IO_NAMED(_start, _size, _name) \ + DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO) +#define DEFINE_RES_IO(_start, _size) \ + DEFINE_RES_IO_NAMED((_start), (_size), NULL) + +#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \ + DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM) +#define DEFINE_RES_MEM(_start, _size) \ + DEFINE_RES_MEM_NAMED((_start), (_size), NULL) + +#define DEFINE_RES_IRQ_NAMED(_irq, _name) \ + DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ) +#define DEFINE_RES_IRQ(_irq) \ + DEFINE_RES_IRQ_NAMED((_irq), NULL) + +#define DEFINE_RES_DMA_NAMED(_dma, _name) \ + DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA) +#define DEFINE_RES_DMA(_dma) \ + DEFINE_RES_DMA_NAMED((_dma), NULL) + +/* PC/ISA/whatever - the normal PC address spaces: IO and memory */ +extern struct resource ioport_resource; +extern struct resource iomem_resource; + +extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); +extern int request_resource(struct resource *root, struct resource *new); +extern int release_resource(struct resource *new); +void release_child_resources(struct resource *new); +extern void reserve_region_with_split(struct resource *root, + resource_size_t start, resource_size_t end, + const char *name); +extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); +extern int insert_resource(struct resource *parent, struct resource *new); +extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); +extern void arch_remove_reservations(struct resource *avail); +extern int allocate_resource(struct resource *root, struct resource *new, + resource_size_t size, resource_size_t min, + resource_size_t max, resource_size_t align, + resource_size_t (*alignf)(void *, + const struct resource *, + resource_size_t, + resource_size_t), + void *alignf_data); +struct resource *lookup_resource(struct resource *root, resource_size_t start); +int adjust_resource(struct resource *res, resource_size_t start, + resource_size_t size); +resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(const struct resource *res) +{ + return res->end - res->start + 1; +} +static inline unsigned long resource_type(const struct resource *res) +{ + return res->flags & IORESOURCE_TYPE_BITS; +} + +/* Convenience shorthand with allocation */ +#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) +#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) +#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) +#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_exclusive(start,n,name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) +#define rename_region(region, newname) do { (region)->name = (newname); } while (0) + +extern struct resource * __request_region(struct resource *, + resource_size_t start, + resource_size_t n, + const char *name, int flags); + +/* Compatibility cruft */ +#define release_region(start,n) __release_region(&ioport_resource, (start), (n)) +#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n)) +#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) + +extern int __check_region(struct resource *, resource_size_t, resource_size_t); +extern void __release_region(struct resource *, resource_size_t, + resource_size_t); + +static inline int __deprecated check_region(resource_size_t s, + resource_size_t n) +{ + return __check_region(&ioport_resource, s, n); +} + +/* Wrappers for managed devices */ +struct device; +#define devm_request_region(dev,start,n,name) \ + __devm_request_region(dev, &ioport_resource, (start), (n), (name)) +#define devm_request_mem_region(dev,start,n,name) \ + __devm_request_region(dev, &iomem_resource, (start), (n), (name)) + +extern struct resource * __devm_request_region(struct device *dev, + struct resource *parent, resource_size_t start, + resource_size_t n, const char *name); + +#define devm_release_region(dev, start, n) \ + __devm_release_region(dev, &ioport_resource, (start), (n)) +#define devm_release_mem_region(dev, start, n) \ + __devm_release_region(dev, &iomem_resource, (start), (n)) + +extern void __devm_release_region(struct device *dev, struct resource *parent, + resource_size_t start, resource_size_t n); +extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); +extern int iomem_is_exclusive(u64 addr); + +extern int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)); + +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h new file mode 100644 index 00000000..76dad480 --- /dev/null +++ b/include/linux/ioprio.h @@ -0,0 +1,91 @@ +#ifndef IOPRIO_H +#define IOPRIO_H + +#include <linux/sched.h> +#include <linux/iocontext.h> + +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_BITS (16) +#define IOPRIO_CLASS_SHIFT (13) +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) +#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) +#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) + +#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) + +/* + * These are the io priority groups as implemented by CFQ. RT is the realtime + * class, it always gets premium service. BE is the best-effort scheduling + * class, the default for any process. IDLE is the idle scheduling class, it + * is only served when no one else is using the disk. + */ +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + +/* + * 8 best effort priority levels are supported + */ +#define IOPRIO_BE_NR (8) + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * if process has set io priority explicitly, use that. if not, convert + * the cpu scheduler nice value to an io priority + */ +#define IOPRIO_NORM (4) +static inline int task_ioprio(struct io_context *ioc) +{ + if (ioprio_valid(ioc->ioprio)) + return IOPRIO_PRIO_DATA(ioc->ioprio); + + return IOPRIO_NORM; +} + +static inline int task_ioprio_class(struct io_context *ioc) +{ + if (ioprio_valid(ioc->ioprio)) + return IOPRIO_PRIO_CLASS(ioc->ioprio); + + return IOPRIO_CLASS_BE; +} + +static inline int task_nice_ioprio(struct task_struct *task) +{ + return (task_nice(task) + 20) / 5; +} + +/* + * This is for the case where the task hasn't asked for a specific IO class. + * Check for idle and rt task process, and return appropriate IO class. + */ +static inline int task_nice_ioclass(struct task_struct *task) +{ + if (task->policy == SCHED_IDLE) + return IOPRIO_CLASS_IDLE; + else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR) + return IOPRIO_CLASS_RT; + else + return IOPRIO_CLASS_BE; +} + +/* + * For inheritance, return the highest of the two given priorities + */ +extern int ioprio_best(unsigned short aprio, unsigned short bprio); + +extern int set_task_ioprio(struct task_struct *task, int ioprio); + +#endif diff --git a/include/linux/iova.h b/include/linux/iova.h new file mode 100644 index 00000000..76a0759e --- /dev/null +++ b/include/linux/iova.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This file is released under the GPLv2. + * + * Copyright (C) 2006-2008 Intel Corporation + * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * + */ + +#ifndef _IOVA_H_ +#define _IOVA_H_ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/rbtree.h> +#include <linux/dma-mapping.h> + +/* IO virtual address start page frame number */ +#define IOVA_START_PFN (1) + +/* iova structure */ +struct iova { + struct rb_node node; + unsigned long pfn_hi; /* IOMMU dish out addr hi */ + unsigned long pfn_lo; /* IOMMU dish out addr lo */ +}; + +/* holds all the iova translations for a domain */ +struct iova_domain { + spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ + struct rb_root rbroot; /* iova domain rbtree root */ + struct rb_node *cached32_node; /* Save last alloced node */ + unsigned long dma_32bit_pfn; +}; + +struct iova *alloc_iova_mem(void); +void free_iova_mem(struct iova *iova); +void free_iova(struct iova_domain *iovad, unsigned long pfn); +void __free_iova(struct iova_domain *iovad, struct iova *iova); +struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn, + bool size_aligned); +struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, + unsigned long pfn_hi); +void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); +void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); +struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); +void put_iova_domain(struct iova_domain *iovad); + +#endif diff --git a/include/linux/ip.h b/include/linux/ip.h new file mode 100644 index 00000000..bd0a2a86 --- /dev/null +++ b/include/linux/ip.h @@ -0,0 +1,149 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the IP protocol. + * + * Version: @(#)ip.h 1.0.2 04/28/93 + * + * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IP_H +#define _LINUX_IP_H +#include <linux/types.h> +#include <asm/byteorder.h> + +#define IPTOS_TOS_MASK 0x1E +#define IPTOS_TOS(tos) ((tos)&IPTOS_TOS_MASK) +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_MINCOST 0x02 + +#define IPTOS_PREC_MASK 0xE0 +#define IPTOS_PREC(tos) ((tos)&IPTOS_PREC_MASK) +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + + +/* IP options */ +#define IPOPT_COPY 0x80 +#define IPOPT_CLASS_MASK 0x60 +#define IPOPT_NUMBER_MASK 0x1f + +#define IPOPT_COPIED(o) ((o)&IPOPT_COPY) +#define IPOPT_CLASS(o) ((o)&IPOPT_CLASS_MASK) +#define IPOPT_NUMBER(o) ((o)&IPOPT_NUMBER_MASK) + +#define IPOPT_CONTROL 0x00 +#define IPOPT_RESERVED1 0x20 +#define IPOPT_MEASUREMENT 0x40 +#define IPOPT_RESERVED2 0x60 + +#define IPOPT_END (0 |IPOPT_CONTROL) +#define IPOPT_NOOP (1 |IPOPT_CONTROL) +#define IPOPT_SEC (2 |IPOPT_CONTROL|IPOPT_COPY) +#define IPOPT_LSRR (3 |IPOPT_CONTROL|IPOPT_COPY) +#define IPOPT_TIMESTAMP (4 |IPOPT_MEASUREMENT) +#define IPOPT_CIPSO (6 |IPOPT_CONTROL|IPOPT_COPY) +#define IPOPT_RR (7 |IPOPT_CONTROL) +#define IPOPT_SID (8 |IPOPT_CONTROL|IPOPT_COPY) +#define IPOPT_SSRR (9 |IPOPT_CONTROL|IPOPT_COPY) +#define IPOPT_RA (20|IPOPT_CONTROL|IPOPT_COPY) + +#define IPVERSION 4 +#define MAXTTL 255 +#define IPDEFTTL 64 + +#define IPOPT_OPTVAL 0 +#define IPOPT_OLEN 1 +#define IPOPT_OFFSET 2 +#define IPOPT_MINOFF 4 +#define MAX_IPOPTLEN 40 +#define IPOPT_NOP IPOPT_NOOP +#define IPOPT_EOL IPOPT_END +#define IPOPT_TS IPOPT_TIMESTAMP + +#define IPOPT_TS_TSONLY 0 /* timestamps only */ +#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */ +#define IPOPT_TS_PRESPEC 3 /* specified modules only */ + +#define IPV4_BEET_PHMAXLEN 8 + +struct iphdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 ihl:4, + version:4; +#elif defined (__BIG_ENDIAN_BITFIELD) + __u8 version:4, + ihl:4; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + __be32 saddr; + __be32 daddr; + /*The options start here. */ +}; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct iphdr *ip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_network_header(skb); +} + +static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_transport_header(skb); +} +#endif + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; /* This one is measured in 32 bit units! */ + __be16 reserved; + __be32 spi; + __be32 seq_no; /* Sequence number */ + __u8 auth_data[0]; /* Variable len but >=4. Mind the 64 bit alignment! */ +}; + +struct ip_esp_hdr { + __be32 spi; + __be32 seq_no; /* Sequence number */ + __u8 enc_data[0]; /* Variable len but >=8. Mind the 64 bit alignment! */ +}; + +struct ip_comp_hdr { + __u8 nexthdr; + __u8 flags; + __be16 cpi; +}; + +struct ip_beet_phdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 padlen; + __u8 reserved; +}; + +#endif /* _LINUX_IP_H */ diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h new file mode 100644 index 00000000..bf22b031 --- /dev/null +++ b/include/linux/ip6_tunnel.h @@ -0,0 +1,34 @@ +#ifndef _IP6_TUNNEL_H +#define _IP6_TUNNEL_H + +#include <linux/types.h> + +#define IPV6_TLV_TNL_ENCAP_LIMIT 4 +#define IPV6_DEFAULT_TNL_ENCAP_LIMIT 4 + +/* don't add encapsulation limit if one isn't present in inner packet */ +#define IP6_TNL_F_IGN_ENCAP_LIMIT 0x1 +/* copy the traffic class field from the inner packet */ +#define IP6_TNL_F_USE_ORIG_TCLASS 0x2 +/* copy the flowlabel from the inner packet */ +#define IP6_TNL_F_USE_ORIG_FLOWLABEL 0x4 +/* being used for Mobile IPv6 */ +#define IP6_TNL_F_MIP6_DEV 0x8 +/* copy DSCP from the outer packet */ +#define IP6_TNL_F_RCV_DSCP_COPY 0x10 +/* copy fwmark from inner packet */ +#define IP6_TNL_F_USE_ORIG_FWMARK 0x20 + +struct ip6_tnl_parm { + char name[IFNAMSIZ]; /* name of tunnel device */ + int link; /* ifindex of underlying L2 interface */ + __u8 proto; /* tunnel protocol */ + __u8 encap_limit; /* encapsulation limit for tunnel */ + __u8 hop_limit; /* hop limit for tunnel */ + __be32 flowinfo; /* traffic class and flowlabel for tunnel */ + __u32 flags; /* tunnel flags */ + struct in6_addr laddr; /* local tunnel end-point address */ + struct in6_addr raddr; /* remote tunnel end-point address */ +}; + +#endif diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h new file mode 100644 index 00000000..4deb3834 --- /dev/null +++ b/include/linux/ip_vs.h @@ -0,0 +1,429 @@ +/* + * IP Virtual Server + * data structure and functionality definitions + */ + +#ifndef _IP_VS_H +#define _IP_VS_H + +#include <linux/types.h> /* For __beXX types in userland */ + +#define IP_VS_VERSION_CODE 0x010201 +#define NVERSION(version) \ + (version >> 16) & 0xFF, \ + (version >> 8) & 0xFF, \ + version & 0xFF + +/* + * Virtual Service Flags + */ +#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ +#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ +#define IP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ + +/* + * Destination Server Flags + */ +#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ +#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ + +/* + * IPVS sync daemon states + */ +#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ +#define IP_VS_STATE_MASTER 0x0001 /* started as master */ +#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ + +/* + * IPVS socket options + */ +#define IP_VS_BASE_CTL (64+1024+64) /* base */ + +#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ +#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) +#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) +#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) +#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) +#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) +#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) +#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) +#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) +#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) +#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) +#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) +#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) +#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) +#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) +#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO + +#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL +#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) +#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) +#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) +#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) +#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ +#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) +#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) +#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON + + +/* + * IPVS Connection Flags + * Only flags 0..15 are sent to backup server + */ +#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ +#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ +#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ +#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ +#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ +#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ +#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ +#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ +#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ +#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ +#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ +#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ +#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ +#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ +#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ +#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ + +#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \ + IP_VS_CONN_F_NOOUTPUT | \ + IP_VS_CONN_F_INACTIVE | \ + IP_VS_CONN_F_SEQ_MASK | \ + IP_VS_CONN_F_NO_CPORT | \ + IP_VS_CONN_F_TEMPLATE \ + ) + +/* Flags that are not sent to backup server start from bit 16 */ +#define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */ + +/* Connection flags from destination that can be changed by user space */ +#define IP_VS_CONN_F_DEST_MASK (IP_VS_CONN_F_FWD_MASK | \ + IP_VS_CONN_F_ONE_PACKET | \ + IP_VS_CONN_F_NFCT | \ + 0) + +#define IP_VS_SCHEDNAME_MAXLEN 16 +#define IP_VS_PENAME_MAXLEN 16 +#define IP_VS_IFNAME_MAXLEN 16 + +#define IP_VS_PEDATA_MAXLEN 255 + +/* + * The struct ip_vs_service_user and struct ip_vs_dest_user are + * used to set IPVS rules through setsockopt. + */ +struct ip_vs_service_user { + /* virtual service addresses */ + __u16 protocol; + __be32 addr; /* virtual ip address */ + __be16 port; + __u32 fwmark; /* firwall mark of service */ + + /* virtual service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout in sec */ + __be32 netmask; /* persistent netmask */ +}; + + +struct ip_vs_dest_user { + /* destination server address */ + __be32 addr; + __be16 port; + + /* real server options */ + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + /* thresholds for active connections */ + __u32 u_threshold; /* upper threshold */ + __u32 l_threshold; /* lower threshold */ +}; + + +/* + * IPVS statistics object (for user space) + */ +struct ip_vs_stats_user { + __u32 conns; /* connections scheduled */ + __u32 inpkts; /* incoming packets */ + __u32 outpkts; /* outgoing packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outbytes; /* outgoing bytes */ + + __u32 cps; /* current connection rate */ + __u32 inpps; /* current in packet rate */ + __u32 outpps; /* current out packet rate */ + __u32 inbps; /* current in byte rate */ + __u32 outbps; /* current out byte rate */ +}; + + +/* The argument to IP_VS_SO_GET_INFO */ +struct ip_vs_getinfo { + /* version number */ + unsigned int version; + + /* size of connection hash table */ + unsigned int size; + + /* number of virtual services */ + unsigned int num_services; +}; + + +/* The argument to IP_VS_SO_GET_SERVICE */ +struct ip_vs_service_entry { + /* which service: user fills in these */ + __u16 protocol; + __be32 addr; /* virtual address */ + __be16 port; + __u32 fwmark; /* firwall mark of service */ + + /* service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout */ + __be32 netmask; /* persistent netmask */ + + /* number of real servers */ + unsigned int num_dests; + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +struct ip_vs_dest_entry { + __be32 addr; /* destination address */ + __be16 port; + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + __u32 u_threshold; /* upper threshold */ + __u32 l_threshold; /* lower threshold */ + + __u32 activeconns; /* active connections */ + __u32 inactconns; /* inactive connections */ + __u32 persistconns; /* persistent connections */ + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +/* The argument to IP_VS_SO_GET_DESTS */ +struct ip_vs_get_dests { + /* which service: user fills in these */ + __u16 protocol; + __be32 addr; /* virtual address */ + __be16 port; + __u32 fwmark; /* firwall mark of service */ + + /* number of real servers */ + unsigned int num_dests; + + /* the real servers */ + struct ip_vs_dest_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_SERVICES */ +struct ip_vs_get_services { + /* number of virtual services */ + unsigned int num_services; + + /* service table */ + struct ip_vs_service_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_TIMEOUT */ +struct ip_vs_timeout_user { + int tcp_timeout; + int tcp_fin_timeout; + int udp_timeout; +}; + + +/* The argument to IP_VS_SO_GET_DAEMON */ +struct ip_vs_daemon_user { + /* sync daemon state (master/backup) */ + int state; + + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + + /* SyncID we belong to */ + int syncid; +}; + +/* + * + * IPVS Generic Netlink interface definitions + * + */ + +/* Generic Netlink family info */ + +#define IPVS_GENL_NAME "IPVS" +#define IPVS_GENL_VERSION 0x1 + +struct ip_vs_flags { + __be32 flags; + __be32 mask; +}; + +/* Generic Netlink command attributes */ +enum { + IPVS_CMD_UNSPEC = 0, + + IPVS_CMD_NEW_SERVICE, /* add service */ + IPVS_CMD_SET_SERVICE, /* modify service */ + IPVS_CMD_DEL_SERVICE, /* delete service */ + IPVS_CMD_GET_SERVICE, /* get service info */ + + IPVS_CMD_NEW_DEST, /* add destination */ + IPVS_CMD_SET_DEST, /* modify destination */ + IPVS_CMD_DEL_DEST, /* delete destination */ + IPVS_CMD_GET_DEST, /* get destination info */ + + IPVS_CMD_NEW_DAEMON, /* start sync daemon */ + IPVS_CMD_DEL_DAEMON, /* stop sync daemon */ + IPVS_CMD_GET_DAEMON, /* get sync daemon status */ + + IPVS_CMD_SET_CONFIG, /* set config settings */ + IPVS_CMD_GET_CONFIG, /* get config settings */ + + IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */ + IPVS_CMD_GET_INFO, /* get general IPVS info */ + + IPVS_CMD_ZERO, /* zero all counters and stats */ + IPVS_CMD_FLUSH, /* flush services and dests */ + + __IPVS_CMD_MAX, +}; + +#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1) + +/* Attributes used in the first level of commands */ +enum { + IPVS_CMD_ATTR_UNSPEC = 0, + IPVS_CMD_ATTR_SERVICE, /* nested service attribute */ + IPVS_CMD_ATTR_DEST, /* nested destination attribute */ + IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */ + IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */ + IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */ + IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */ + __IPVS_CMD_ATTR_MAX, +}; + +#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) + +/* + * Attributes used to describe a service + * + * Used inside nested attribute IPVS_CMD_ATTR_SERVICE + */ +enum { + IPVS_SVC_ATTR_UNSPEC = 0, + IPVS_SVC_ATTR_AF, /* address family */ + IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */ + IPVS_SVC_ATTR_ADDR, /* virtual service address */ + IPVS_SVC_ATTR_PORT, /* virtual service port */ + IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */ + + IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */ + IPVS_SVC_ATTR_FLAGS, /* virtual service flags */ + IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */ + IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ + + IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ + + IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */ + + __IPVS_SVC_ATTR_MAX, +}; + +#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) + +/* + * Attributes used to describe a destination (real server) + * + * Used inside nested attribute IPVS_CMD_ATTR_DEST + */ +enum { + IPVS_DEST_ATTR_UNSPEC = 0, + IPVS_DEST_ATTR_ADDR, /* real server address */ + IPVS_DEST_ATTR_PORT, /* real server port */ + + IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */ + IPVS_DEST_ATTR_WEIGHT, /* destination weight */ + + IPVS_DEST_ATTR_U_THRESH, /* upper threshold */ + IPVS_DEST_ATTR_L_THRESH, /* lower threshold */ + + IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */ + IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */ + IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ + + IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ + __IPVS_DEST_ATTR_MAX, +}; + +#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1) + +/* + * Attributes describing a sync daemon + * + * Used inside nested attribute IPVS_CMD_ATTR_DAEMON + */ +enum { + IPVS_DAEMON_ATTR_UNSPEC = 0, + IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */ + IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */ + IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */ + __IPVS_DAEMON_ATTR_MAX, +}; + +#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1) + +/* + * Attributes used to describe service or destination entry statistics + * + * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS + */ +enum { + IPVS_STATS_ATTR_UNSPEC = 0, + IPVS_STATS_ATTR_CONNS, /* connections scheduled */ + IPVS_STATS_ATTR_INPKTS, /* incoming packets */ + IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */ + IPVS_STATS_ATTR_INBYTES, /* incoming bytes */ + IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */ + + IPVS_STATS_ATTR_CPS, /* current connection rate */ + IPVS_STATS_ATTR_INPPS, /* current in packet rate */ + IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */ + IPVS_STATS_ATTR_INBPS, /* current in byte rate */ + IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */ + __IPVS_STATS_ATTR_MAX, +}; + +#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1) + +/* Attributes used in response to IPVS_CMD_GET_INFO command */ +enum { + IPVS_INFO_ATTR_UNSPEC = 0, + IPVS_INFO_ATTR_VERSION, /* IPVS version number */ + IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */ + __IPVS_INFO_ATTR_MAX, +}; + +#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1) + +#endif /* _IP_VS_H */ diff --git a/include/linux/ipc.h b/include/linux/ipc.h new file mode 100644 index 00000000..30e81614 --- /dev/null +++ b/include/linux/ipc.h @@ -0,0 +1,103 @@ +#ifndef _LINUX_IPC_H +#define _LINUX_IPC_H + +#include <linux/types.h> + +#define IPC_PRIVATE ((__kernel_key_t) 0) + +/* Obsolete, used only for backwards compatibility and libc5 compiles */ +struct ipc_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; +}; + +/* Include the definition of ipc64_perm */ +#include <asm/ipcbuf.h> + +/* resource get request flags */ +#define IPC_CREAT 00001000 /* create if key is nonexistent */ +#define IPC_EXCL 00002000 /* fail if key exists */ +#define IPC_NOWAIT 00004000 /* return error on wait */ + +/* these fields are used by the DIPC package so the kernel as standard + should avoid using them if possible */ + +#define IPC_DIPC 00010000 /* make it distributed */ +#define IPC_OWN 00020000 /* this machine is the DIPC owner */ + +/* + * Control commands used with semctl, msgctl and shmctl + * see also specific commands in sem.h, msg.h and shm.h + */ +#define IPC_RMID 0 /* remove resource */ +#define IPC_SET 1 /* set ipc_perm options */ +#define IPC_STAT 2 /* get ipc_perm options */ +#define IPC_INFO 3 /* see ipcs */ + +/* + * Version flags for semctl, msgctl, and shmctl commands + * These are passed as bitflags or-ed with the actual command + */ +#define IPC_OLD 0 /* Old version (no 32-bit UID support on many + architectures) */ +#define IPC_64 0x0100 /* New version (support 32-bit UIDs, bigger + message sizes, etc. */ + +/* + * These are used to wrap system calls. + * + * See architecture code for ugly details.. + */ +struct ipc_kludge { + struct msgbuf __user *msgp; + long msgtyp; +}; + +#define SEMOP 1 +#define SEMGET 2 +#define SEMCTL 3 +#define SEMTIMEDOP 4 +#define MSGSND 11 +#define MSGRCV 12 +#define MSGGET 13 +#define MSGCTL 14 +#define SHMAT 21 +#define SHMDT 22 +#define SHMGET 23 +#define SHMCTL 24 + +/* Used by the DIPC package, try and avoid reusing it */ +#define DIPC 25 + +#define IPCCALL(version,op) ((version)<<16 | (op)) + +#ifdef __KERNEL__ +#include <linux/spinlock.h> + +#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ + +/* used by in-kernel data structures */ +struct kern_ipc_perm +{ + spinlock_t lock; + int deleted; + int id; + key_t key; + uid_t uid; + gid_t gid; + uid_t cuid; + gid_t cgid; + umode_t mode; + unsigned long seq; + void *security; +}; + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_IPC_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h new file mode 100644 index 00000000..8a297a5e --- /dev/null +++ b/include/linux/ipc_namespace.h @@ -0,0 +1,146 @@ +#ifndef __IPC_NAMESPACE_H__ +#define __IPC_NAMESPACE_H__ + +#include <linux/err.h> +#include <linux/idr.h> +#include <linux/rwsem.h> +#include <linux/notifier.h> +#include <linux/nsproxy.h> + +/* + * ipc namespace events + */ +#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ +#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ +#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ + +#define IPCNS_CALLBACK_PRI 0 + +struct user_namespace; + +struct ipc_ids { + int in_use; + unsigned short seq; + unsigned short seq_max; + struct rw_semaphore rw_mutex; + struct idr ipcs_idr; +}; + +struct ipc_namespace { + atomic_t count; + struct ipc_ids ids[3]; + + int sem_ctls[4]; + int used_sems; + + int msg_ctlmax; + int msg_ctlmnb; + int msg_ctlmni; + atomic_t msg_bytes; + atomic_t msg_hdrs; + int auto_msgmni; + + size_t shm_ctlmax; + size_t shm_ctlall; + int shm_ctlmni; + int shm_tot; + /* + * Defines whether IPC_RMID is forced for _all_ shm segments regardless + * of shmctl() + */ + int shm_rmid_forced; + + struct notifier_block ipcns_nb; + + /* The kern_mount of the mqueuefs sb. We take a ref on it */ + struct vfsmount *mq_mnt; + + /* # queues in this ns, protected by mq_lock */ + unsigned int mq_queues_count; + + /* next fields are set through sysctl */ + unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ + unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ + unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ + + /* user_ns which owns the ipc ns */ + struct user_namespace *user_ns; +}; + +extern struct ipc_namespace init_ipc_ns; +extern atomic_t nr_ipc_ns; + +extern spinlock_t mq_lock; + +#ifdef CONFIG_SYSVIPC +extern int register_ipcns_notifier(struct ipc_namespace *); +extern int cond_register_ipcns_notifier(struct ipc_namespace *); +extern void unregister_ipcns_notifier(struct ipc_namespace *); +extern int ipcns_notify(unsigned long); +extern void shm_destroy_orphaned(struct ipc_namespace *ns); +#else /* CONFIG_SYSVIPC */ +static inline int register_ipcns_notifier(struct ipc_namespace *ns) +{ return 0; } +static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) +{ return 0; } +static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } +static inline int ipcns_notify(unsigned long l) { return 0; } +static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} +#endif /* CONFIG_SYSVIPC */ + +#ifdef CONFIG_POSIX_MQUEUE +extern int mq_init_ns(struct ipc_namespace *ns); +/* default values */ +#define DFLT_QUEUESMAX 256 /* max number of message queues */ +#define DFLT_MSGMAX 10 /* max number of messages in each queue */ +#define HARD_MSGMAX (32768*sizeof(void *)/4) +#define DFLT_MSGSIZEMAX 8192 /* max message size */ +#else +static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } +#endif + +#if defined(CONFIG_IPC_NS) +extern struct ipc_namespace *copy_ipcs(unsigned long flags, + struct task_struct *tsk); +static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) +{ + if (ns) + atomic_inc(&ns->count); + return ns; +} + +extern void put_ipc_ns(struct ipc_namespace *ns); +#else +static inline struct ipc_namespace *copy_ipcs(unsigned long flags, + struct task_struct *tsk) +{ + if (flags & CLONE_NEWIPC) + return ERR_PTR(-EINVAL); + + return tsk->nsproxy->ipc_ns; +} + +static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) +{ + return ns; +} + +static inline void put_ipc_ns(struct ipc_namespace *ns) +{ +} +#endif + +#ifdef CONFIG_POSIX_MQUEUE_SYSCTL + +struct ctl_table_header; +extern struct ctl_table_header *mq_register_sysctl_table(void); + +#else /* CONFIG_POSIX_MQUEUE_SYSCTL */ + +static inline struct ctl_table_header *mq_register_sysctl_table(void) +{ + return NULL; +} + +#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */ +#endif diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h new file mode 100644 index 00000000..48dcba9b --- /dev/null +++ b/include/linux/ipmi.h @@ -0,0 +1,736 @@ +/* + * ipmi.h + * + * MontaVista IPMI interface + * + * Author: MontaVista Software, Inc. + * Corey Minyard <minyard@mvista.com> + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_IPMI_H +#define __LINUX_IPMI_H + +#include <linux/ipmi_msgdefs.h> +#include <linux/compiler.h> + +/* + * This file describes an interface to an IPMI driver. You have to + * have a fairly good understanding of IPMI to use this, so go read + * the specs first before actually trying to do anything. + * + * With that said, this driver provides a multi-user interface to the + * IPMI driver, and it allows multiple IPMI physical interfaces below + * the driver. The physical interfaces bind as a lower layer on the + * driver. They appear as interfaces to the application using this + * interface. + * + * Multi-user means that multiple applications may use the driver, + * send commands, receive responses, etc. The driver keeps track of + * commands the user sends and tracks the responses. The responses + * will go back to the application that send the command. If the + * response doesn't come back in time, the driver will return a + * timeout error response to the application. Asynchronous events + * from the BMC event queue will go to all users bound to the driver. + * The incoming event queue in the BMC will automatically be flushed + * if it becomes full and it is queried once a second to see if + * anything is in it. Incoming commands to the driver will get + * delivered as commands. + * + * This driver provides two main interfaces: one for in-kernel + * applications and another for userland applications. The + * capabilities are basically the same for both interface, although + * the interfaces are somewhat different. The stuff in the + * #ifdef __KERNEL__ below is the in-kernel interface. The userland + * interface is defined later in the file. */ + + + +/* + * This is an overlay for all the address types, so it's easy to + * determine the actual address type. This is kind of like addresses + * work for sockets. + */ +#define IPMI_MAX_ADDR_SIZE 32 +struct ipmi_addr { + /* Try to take these from the "Channel Medium Type" table + in section 6.5 of the IPMI 1.5 manual. */ + int addr_type; + short channel; + char data[IPMI_MAX_ADDR_SIZE]; +}; + +/* + * When the address is not used, the type will be set to this value. + * The channel is the BMC's channel number for the channel (usually + * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC. + */ +#define IPMI_SYSTEM_INTERFACE_ADDR_TYPE 0x0c +struct ipmi_system_interface_addr { + int addr_type; + short channel; + unsigned char lun; +}; + +/* An IPMB Address. */ +#define IPMI_IPMB_ADDR_TYPE 0x01 +/* Used for broadcast get device id as described in section 17.9 of the + IPMI 1.5 manual. */ +#define IPMI_IPMB_BROADCAST_ADDR_TYPE 0x41 +struct ipmi_ipmb_addr { + int addr_type; + short channel; + unsigned char slave_addr; + unsigned char lun; +}; + +/* + * A LAN Address. This is an address to/from a LAN interface bridged + * by the BMC, not an address actually out on the LAN. + * + * A conscious decision was made here to deviate slightly from the IPMI + * spec. We do not use rqSWID and rsSWID like it shows in the + * message. Instead, we use remote_SWID and local_SWID. This means + * that any message (a request or response) from another device will + * always have exactly the same address. If you didn't do this, + * requests and responses from the same device would have different + * addresses, and that's not too cool. + * + * In this address, the remote_SWID is always the SWID the remote + * message came from, or the SWID we are sending the message to. + * local_SWID is always our SWID. Note that having our SWID in the + * message is a little weird, but this is required. + */ +#define IPMI_LAN_ADDR_TYPE 0x04 +struct ipmi_lan_addr { + int addr_type; + short channel; + unsigned char privilege; + unsigned char session_handle; + unsigned char remote_SWID; + unsigned char local_SWID; + unsigned char lun; +}; + + +/* + * Channel for talking directly with the BMC. When using this + * channel, This is for the system interface address type only. FIXME + * - is this right, or should we use -1? + */ +#define IPMI_BMC_CHANNEL 0xf +#define IPMI_NUM_CHANNELS 0x10 + +/* + * Used to signify an "all channel" bitmask. This is more than the + * actual number of channels because this is used in userland and + * will cover us if the number of channels is extended. + */ +#define IPMI_CHAN_ALL (~0) + + +/* + * A raw IPMI message without any addressing. This covers both + * commands and responses. The completion code is always the first + * byte of data in the response (as the spec shows the messages laid + * out). + */ +struct ipmi_msg { + unsigned char netfn; + unsigned char cmd; + unsigned short data_len; + unsigned char __user *data; +}; + +struct kernel_ipmi_msg { + unsigned char netfn; + unsigned char cmd; + unsigned short data_len; + unsigned char *data; +}; + +/* + * Various defines that are useful for IPMI applications. + */ +#define IPMI_INVALID_CMD_COMPLETION_CODE 0xC1 +#define IPMI_TIMEOUT_COMPLETION_CODE 0xC3 +#define IPMI_UNKNOWN_ERR_COMPLETION_CODE 0xff + + +/* + * Receive types for messages coming from the receive interface. This + * is used for the receive in-kernel interface and in the receive + * IOCTL. + * + * The "IPMI_RESPONSE_RESPNOSE_TYPE" is a little strange sounding, but + * it allows you to get the message results when you send a response + * message. + */ +#define IPMI_RESPONSE_RECV_TYPE 1 /* A response to a command */ +#define IPMI_ASYNC_EVENT_RECV_TYPE 2 /* Something from the event queue */ +#define IPMI_CMD_RECV_TYPE 3 /* A command from somewhere else */ +#define IPMI_RESPONSE_RESPONSE_TYPE 4 /* The response for + a sent response, giving any + error status for sending the + response. When you send a + response message, this will + be returned. */ +#define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */ + +/* Note that async events and received commands do not have a completion + code as the first byte of the incoming data, unlike a response. */ + + +/* + * Modes for ipmi_set_maint_mode() and the userland IOCTL. The AUTO + * setting is the default and means it will be set on certain + * commands. Hard setting it on and off will override automatic + * operation. + */ +#define IPMI_MAINTENANCE_MODE_AUTO 0 +#define IPMI_MAINTENANCE_MODE_OFF 1 +#define IPMI_MAINTENANCE_MODE_ON 2 + +#ifdef __KERNEL__ + +/* + * The in-kernel interface. + */ +#include <linux/list.h> +#include <linux/proc_fs.h> + +struct module; +struct device; + +/* Opaque type for a IPMI message user. One of these is needed to + send and receive messages. */ +typedef struct ipmi_user *ipmi_user_t; + +/* + * Stuff coming from the receive interface comes as one of these. + * They are allocated, the receiver must free them with + * ipmi_free_recv_msg() when done with the message. The link is not + * used after the message is delivered, so the upper layer may use the + * link to build a linked list, if it likes. + */ +struct ipmi_recv_msg { + struct list_head link; + + /* The type of message as defined in the "Receive Types" + defines above. */ + int recv_type; + + ipmi_user_t user; + struct ipmi_addr addr; + long msgid; + struct kernel_ipmi_msg msg; + + /* The user_msg_data is the data supplied when a message was + sent, if this is a response to a sent message. If this is + not a response to a sent message, then user_msg_data will + be NULL. If the user above is NULL, then this will be the + intf. */ + void *user_msg_data; + + /* Call this when done with the message. It will presumably free + the message and do any other necessary cleanup. */ + void (*done)(struct ipmi_recv_msg *msg); + + /* Place-holder for the data, don't make any assumptions about + the size or existence of this, since it may change. */ + unsigned char msg_data[IPMI_MAX_MSG_LENGTH]; +}; + +/* Allocate and free the receive message. */ +void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); + +struct ipmi_user_hndl { + /* Routine type to call when a message needs to be routed to + the upper layer. This will be called with some locks held, + the only IPMI routines that can be called are ipmi_request + and the alloc/free operations. The handler_data is the + variable supplied when the receive handler was registered. */ + void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg, + void *user_msg_data); + + /* Called when the interface detects a watchdog pre-timeout. If + this is NULL, it will be ignored for the user. */ + void (*ipmi_watchdog_pretimeout)(void *handler_data); +}; + +/* Create a new user of the IPMI layer on the given interface number. */ +int ipmi_create_user(unsigned int if_num, + struct ipmi_user_hndl *handler, + void *handler_data, + ipmi_user_t *user); + +/* Destroy the given user of the IPMI layer. Note that after this + function returns, the system is guaranteed to not call any + callbacks for the user. Thus as long as you destroy all the users + before you unload a module, you will be safe. And if you destroy + the users before you destroy the callback structures, it should be + safe, too. */ +int ipmi_destroy_user(ipmi_user_t user); + +/* Get the IPMI version of the BMC we are talking to. */ +void ipmi_get_version(ipmi_user_t user, + unsigned char *major, + unsigned char *minor); + +/* Set and get the slave address and LUN that we will use for our + source messages. Note that this affects the interface, not just + this user, so it will affect all users of this interface. This is + so some initialization code can come in and do the OEM-specific + things it takes to determine your address (if not the BMC) and set + it for everyone else. Note that each channel can have its own address. */ +int ipmi_set_my_address(ipmi_user_t user, + unsigned int channel, + unsigned char address); +int ipmi_get_my_address(ipmi_user_t user, + unsigned int channel, + unsigned char *address); +int ipmi_set_my_LUN(ipmi_user_t user, + unsigned int channel, + unsigned char LUN); +int ipmi_get_my_LUN(ipmi_user_t user, + unsigned int channel, + unsigned char *LUN); + +/* + * Like ipmi_request, but lets you specify the number of retries and + * the retry time. The retries is the number of times the message + * will be resent if no reply is received. If set to -1, the default + * value will be used. The retry time is the time in milliseconds + * between retries. If set to zero, the default value will be + * used. + * + * Don't use this unless you *really* have to. It's primarily for the + * IPMI over LAN converter; since the LAN stuff does its own retries, + * it makes no sense to do it here. However, this can be used if you + * have unusual requirements. + */ +int ipmi_request_settime(ipmi_user_t user, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + int priority, + int max_retries, + unsigned int retry_time_ms); + +/* + * Like ipmi_request, but with messages supplied. This will not + * allocate any memory, and the messages may be statically allocated + * (just make sure to do the "done" handling on them). Note that this + * is primarily for the watchdog timer, since it should be able to + * send messages even if no memory is available. This is subject to + * change as the system changes, so don't use it unless you REALLY + * have to. + */ +int ipmi_request_supply_msgs(ipmi_user_t user, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + void *supplied_smi, + struct ipmi_recv_msg *supplied_recv, + int priority); + +/* + * Poll the IPMI interface for the user. This causes the IPMI code to + * do an immediate check for information from the driver and handle + * anything that is immediately pending. This will not block in any + * way. This is useful if you need to spin waiting for something to + * happen in the IPMI driver. + */ +void ipmi_poll_interface(ipmi_user_t user); + +/* + * When commands come in to the SMS, the user can register to receive + * them. Only one user can be listening on a specific netfn/cmd/chan tuple + * at a time, you will get an EBUSY error if the command is already + * registered. If a command is received that does not have a user + * registered, the driver will automatically return the proper + * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to + * mean all channels. + */ +int ipmi_register_for_cmd(ipmi_user_t user, + unsigned char netfn, + unsigned char cmd, + unsigned int chans); +int ipmi_unregister_for_cmd(ipmi_user_t user, + unsigned char netfn, + unsigned char cmd, + unsigned int chans); + +/* + * Go into a mode where the driver will not autonomously attempt to do + * things with the interface. It will still respond to attentions and + * interrupts, and it will expect that commands will complete. It + * will not automatcially check for flags, events, or things of that + * nature. + * + * This is primarily used for firmware upgrades. The idea is that + * when you go into firmware upgrade mode, you do this operation + * and the driver will not attempt to do anything but what you tell + * it or what the BMC asks for. + * + * Note that if you send a command that resets the BMC, the driver + * will still expect a response from that command. So the BMC should + * reset itself *after* the response is sent. Resetting before the + * response is just silly. + * + * If in auto maintenance mode, the driver will automatically go into + * maintenance mode for 30 seconds if it sees a cold reset, a warm + * reset, or a firmware NetFN. This means that code that uses only + * firmware NetFN commands to do upgrades will work automatically + * without change, assuming it sends a message every 30 seconds or + * less. + * + * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. + */ +int ipmi_get_maintenance_mode(ipmi_user_t user); +int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); + +/* + * When the user is created, it will not receive IPMI events by + * default. The user must set this to TRUE to get incoming events. + * The first user that sets this to TRUE will receive all events that + * have been queued while no one was waiting for events. + */ +int ipmi_set_gets_events(ipmi_user_t user, int val); + +/* + * Called when a new SMI is registered. This will also be called on + * every existing interface when a new watcher is registered with + * ipmi_smi_watcher_register(). + */ +struct ipmi_smi_watcher { + struct list_head link; + + /* You must set the owner to the current module, if you are in + a module (generally just set it to "THIS_MODULE"). */ + struct module *owner; + + /* These two are called with read locks held for the interface + the watcher list. So you can add and remove users from the + IPMI interface, send messages, etc., but you cannot add + or remove SMI watchers or SMI interfaces. */ + void (*new_smi)(int if_num, struct device *dev); + void (*smi_gone)(int if_num); +}; + +int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher); +int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher); + +/* The following are various helper functions for dealing with IPMI + addresses. */ + +/* Return the maximum length of an IPMI address given it's type. */ +unsigned int ipmi_addr_length(int addr_type); + +/* Validate that the given IPMI address is valid. */ +int ipmi_validate_addr(struct ipmi_addr *addr, int len); + +/* + * How did the IPMI driver find out about the device? + */ +enum ipmi_addr_src { + SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, + SI_PCI, SI_DEVICETREE, SI_DEFAULT +}; + +union ipmi_smi_info_union { + /* + * the acpi_info element is defined for the SI_ACPI + * address type + */ + struct { + void *acpi_handle; + } acpi_info; +}; + +struct ipmi_smi_info { + enum ipmi_addr_src addr_src; + + /* + * Base device for the interface. Don't forget to put this when + * you are done. + */ + struct device *dev; + + /* + * The addr_info provides more detailed info for some IPMI + * devices, depending on the addr_src. Currently only SI_ACPI + * info is provided. + */ + union ipmi_smi_info_union addr_info; +}; + +/* This is to get the private info of ipmi_smi_t */ +extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); + +#endif /* __KERNEL__ */ + + +/* + * The userland interface + */ + +/* + * The userland interface for the IPMI driver is a standard character + * device, with each instance of an interface registered as a minor + * number under the major character device. + * + * The read and write calls do not work, to get messages in and out + * requires ioctl calls because of the complexity of the data. select + * and poll do work, so you can wait for input using the file + * descriptor, you just can use read to get it. + * + * In general, you send a command down to the interface and receive + * responses back. You can use the msgid value to correlate commands + * and responses, the driver will take care of figuring out which + * incoming messages are for which command and find the proper msgid + * value to report. You will only receive reponses for commands you + * send. Asynchronous events, however, go to all open users, so you + * must be ready to handle these (or ignore them if you don't care). + * + * The address type depends upon the channel type. When talking + * directly to the BMC (IPMC_BMC_CHANNEL), the address is ignored + * (IPMI_UNUSED_ADDR_TYPE). When talking to an IPMB channel, you must + * supply a valid IPMB address with the addr_type set properly. + * + * When talking to normal channels, the driver takes care of the + * details of formatting and sending messages on that channel. You do + * not, for instance, have to format a send command, you just send + * whatever command you want to the channel, the driver will create + * the send command, automatically issue receive command and get even + * commands, and pass those up to the proper user. + */ + + +/* The magic IOCTL value for this interface. */ +#define IPMI_IOC_MAGIC 'i' + + +/* Messages sent to the interface are this format. */ +struct ipmi_req { + unsigned char __user *addr; /* Address to send the message to. */ + unsigned int addr_len; + + long msgid; /* The sequence number for the message. This + exact value will be reported back in the + response to this request if it is a command. + If it is a response, this will be used as + the sequence value for the response. */ + + struct ipmi_msg msg; +}; +/* + * Send a message to the interfaces. error values are: + * - EFAULT - an address supplied was invalid. + * - EINVAL - The address supplied was not valid, or the command + * was not allowed. + * - EMSGSIZE - The message to was too large. + * - ENOMEM - Buffers could not be allocated for the command. + */ +#define IPMICTL_SEND_COMMAND _IOR(IPMI_IOC_MAGIC, 13, \ + struct ipmi_req) + +/* Messages sent to the interface with timing parameters are this + format. */ +struct ipmi_req_settime { + struct ipmi_req req; + + /* See ipmi_request_settime() above for details on these + values. */ + int retries; + unsigned int retry_time_ms; +}; +/* + * Send a message to the interfaces with timing parameters. error values + * are: + * - EFAULT - an address supplied was invalid. + * - EINVAL - The address supplied was not valid, or the command + * was not allowed. + * - EMSGSIZE - The message to was too large. + * - ENOMEM - Buffers could not be allocated for the command. + */ +#define IPMICTL_SEND_COMMAND_SETTIME _IOR(IPMI_IOC_MAGIC, 21, \ + struct ipmi_req_settime) + +/* Messages received from the interface are this format. */ +struct ipmi_recv { + int recv_type; /* Is this a command, response or an + asyncronous event. */ + + unsigned char __user *addr; /* Address the message was from is put + here. The caller must supply the + memory. */ + unsigned int addr_len; /* The size of the address buffer. + The caller supplies the full buffer + length, this value is updated to + the actual message length when the + message is received. */ + + long msgid; /* The sequence number specified in the request + if this is a response. If this is a command, + this will be the sequence number from the + command. */ + + struct ipmi_msg msg; /* The data field must point to a buffer. + The data_size field must be set to the + size of the message buffer. The + caller supplies the full buffer + length, this value is updated to the + actual message length when the message + is received. */ +}; + +/* + * Receive a message. error values: + * - EAGAIN - no messages in the queue. + * - EFAULT - an address supplied was invalid. + * - EINVAL - The address supplied was not valid. + * - EMSGSIZE - The message to was too large to fit into the message buffer, + * the message will be left in the buffer. */ +#define IPMICTL_RECEIVE_MSG _IOWR(IPMI_IOC_MAGIC, 12, \ + struct ipmi_recv) + +/* + * Like RECEIVE_MSG, but if the message won't fit in the buffer, it + * will truncate the contents instead of leaving the data in the + * buffer. + */ +#define IPMICTL_RECEIVE_MSG_TRUNC _IOWR(IPMI_IOC_MAGIC, 11, \ + struct ipmi_recv) + +/* Register to get commands from other entities on this interface. */ +struct ipmi_cmdspec { + unsigned char netfn; + unsigned char cmd; +}; + +/* + * Register to receive a specific command. error values: + * - EFAULT - an address supplied was invalid. + * - EBUSY - The netfn/cmd supplied was already in use. + * - ENOMEM - could not allocate memory for the entry. + */ +#define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \ + struct ipmi_cmdspec) +/* + * Unregister a regsitered command. error values: + * - EFAULT - an address supplied was invalid. + * - ENOENT - The netfn/cmd was not found registered for this user. + */ +#define IPMICTL_UNREGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 15, \ + struct ipmi_cmdspec) + +/* + * Register to get commands from other entities on specific channels. + * This way, you can only listen on specific channels, or have messages + * from some channels go to one place and other channels to someplace + * else. The chans field is a bitmask, (1 << channel) for each channel. + * It may be IPMI_CHAN_ALL for all channels. + */ +struct ipmi_cmdspec_chans { + unsigned int netfn; + unsigned int cmd; + unsigned int chans; +}; + +/* + * Register to receive a specific command on specific channels. error values: + * - EFAULT - an address supplied was invalid. + * - EBUSY - One of the netfn/cmd/chans supplied was already in use. + * - ENOMEM - could not allocate memory for the entry. + */ +#define IPMICTL_REGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 28, \ + struct ipmi_cmdspec_chans) +/* + * Unregister some netfn/cmd/chans. error values: + * - EFAULT - an address supplied was invalid. + * - ENOENT - None of the netfn/cmd/chans were found registered for this user. + */ +#define IPMICTL_UNREGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 29, \ + struct ipmi_cmdspec_chans) + +/* + * Set whether this interface receives events. Note that the first + * user registered for events will get all pending events for the + * interface. error values: + * - EFAULT - an address supplied was invalid. + */ +#define IPMICTL_SET_GETS_EVENTS_CMD _IOR(IPMI_IOC_MAGIC, 16, int) + +/* + * Set and get the slave address and LUN that we will use for our + * source messages. Note that this affects the interface, not just + * this user, so it will affect all users of this interface. This is + * so some initialization code can come in and do the OEM-specific + * things it takes to determine your address (if not the BMC) and set + * it for everyone else. You should probably leave the LUN alone. + */ +struct ipmi_channel_lun_address_set { + unsigned short channel; + unsigned char value; +}; +#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD \ + _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set) +#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD \ + _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set) +#define IPMICTL_SET_MY_CHANNEL_LUN_CMD \ + _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set) +#define IPMICTL_GET_MY_CHANNEL_LUN_CMD \ + _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set) +/* Legacy interfaces, these only set IPMB 0. */ +#define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int) +#define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int) +#define IPMICTL_SET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 19, unsigned int) +#define IPMICTL_GET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 20, unsigned int) + +/* + * Get/set the default timing values for an interface. You shouldn't + * generally mess with these. + */ +struct ipmi_timing_parms { + int retries; + unsigned int retry_time_ms; +}; +#define IPMICTL_SET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 22, \ + struct ipmi_timing_parms) +#define IPMICTL_GET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 23, \ + struct ipmi_timing_parms) + +/* + * Set the maintenance mode. See ipmi_set_maintenance_mode() above + * for a description of what this does. + */ +#define IPMICTL_GET_MAINTENANCE_MODE_CMD _IOR(IPMI_IOC_MAGIC, 30, int) +#define IPMICTL_SET_MAINTENANCE_MODE_CMD _IOW(IPMI_IOC_MAGIC, 31, int) + +#endif /* __LINUX_IPMI_H */ diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h new file mode 100644 index 00000000..df97e6e3 --- /dev/null +++ b/include/linux/ipmi_msgdefs.h @@ -0,0 +1,121 @@ +/* + * ipmi_smi.h + * + * MontaVista IPMI system management interface + * + * Author: MontaVista Software, Inc. + * Corey Minyard <minyard@mvista.com> + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_IPMI_MSGDEFS_H +#define __LINUX_IPMI_MSGDEFS_H + +/* Various definitions for IPMI messages used by almost everything in + the IPMI stack. */ + +/* NetFNs and commands used inside the IPMI stack. */ + +#define IPMI_NETFN_SENSOR_EVENT_REQUEST 0x04 +#define IPMI_NETFN_SENSOR_EVENT_RESPONSE 0x05 +#define IPMI_GET_EVENT_RECEIVER_CMD 0x01 + +#define IPMI_NETFN_APP_REQUEST 0x06 +#define IPMI_NETFN_APP_RESPONSE 0x07 +#define IPMI_GET_DEVICE_ID_CMD 0x01 +#define IPMI_COLD_RESET_CMD 0x02 +#define IPMI_WARM_RESET_CMD 0x03 +#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 +#define IPMI_GET_DEVICE_GUID_CMD 0x08 +#define IPMI_GET_MSG_FLAGS_CMD 0x31 +#define IPMI_SEND_MSG_CMD 0x34 +#define IPMI_GET_MSG_CMD 0x33 +#define IPMI_SET_BMC_GLOBAL_ENABLES_CMD 0x2e +#define IPMI_GET_BMC_GLOBAL_ENABLES_CMD 0x2f +#define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35 +#define IPMI_GET_CHANNEL_INFO_CMD 0x42 + +/* Bit for BMC global enables. */ +#define IPMI_BMC_RCV_MSG_INTR 0x01 +#define IPMI_BMC_EVT_MSG_INTR 0x02 +#define IPMI_BMC_EVT_MSG_BUFF 0x04 +#define IPMI_BMC_SYS_LOG 0x08 + +#define IPMI_NETFN_STORAGE_REQUEST 0x0a +#define IPMI_NETFN_STORAGE_RESPONSE 0x0b +#define IPMI_ADD_SEL_ENTRY_CMD 0x44 + +#define IPMI_NETFN_FIRMWARE_REQUEST 0x08 +#define IPMI_NETFN_FIRMWARE_RESPONSE 0x09 + +/* The default slave address */ +#define IPMI_BMC_SLAVE_ADDR 0x20 + +/* The BT interface on high-end HP systems supports up to 255 bytes in + * one transfer. Its "virtual" BMC supports some commands that are longer + * than 128 bytes. Use the full 256, plus NetFn/LUN, Cmd, cCode, plus + * some overhead; it's not worth the effort to dynamically size this based + * on the results of the "Get BT Capabilities" command. */ +#define IPMI_MAX_MSG_LENGTH 272 /* multiple of 16 */ + +#define IPMI_CC_NO_ERROR 0x00 +#define IPMI_NODE_BUSY_ERR 0xc0 +#define IPMI_INVALID_COMMAND_ERR 0xc1 +#define IPMI_TIMEOUT_ERR 0xc3 +#define IPMI_ERR_MSG_TRUNCATED 0xc6 +#define IPMI_REQ_LEN_INVALID_ERR 0xc7 +#define IPMI_REQ_LEN_EXCEEDED_ERR 0xc8 +#define IPMI_NOT_IN_MY_STATE_ERR 0xd5 /* IPMI 2.0 */ +#define IPMI_LOST_ARBITRATION_ERR 0x81 +#define IPMI_BUS_ERR 0x82 +#define IPMI_NAK_ON_WRITE_ERR 0x83 +#define IPMI_ERR_UNSPECIFIED 0xff + +#define IPMI_CHANNEL_PROTOCOL_IPMB 1 +#define IPMI_CHANNEL_PROTOCOL_ICMB 2 +#define IPMI_CHANNEL_PROTOCOL_SMBUS 4 +#define IPMI_CHANNEL_PROTOCOL_KCS 5 +#define IPMI_CHANNEL_PROTOCOL_SMIC 6 +#define IPMI_CHANNEL_PROTOCOL_BT10 7 +#define IPMI_CHANNEL_PROTOCOL_BT15 8 +#define IPMI_CHANNEL_PROTOCOL_TMODE 9 + +#define IPMI_CHANNEL_MEDIUM_IPMB 1 +#define IPMI_CHANNEL_MEDIUM_ICMB10 2 +#define IPMI_CHANNEL_MEDIUM_ICMB09 3 +#define IPMI_CHANNEL_MEDIUM_8023LAN 4 +#define IPMI_CHANNEL_MEDIUM_ASYNC 5 +#define IPMI_CHANNEL_MEDIUM_OTHER_LAN 6 +#define IPMI_CHANNEL_MEDIUM_PCI_SMBUS 7 +#define IPMI_CHANNEL_MEDIUM_SMBUS1 8 +#define IPMI_CHANNEL_MEDIUM_SMBUS2 9 +#define IPMI_CHANNEL_MEDIUM_USB1 10 +#define IPMI_CHANNEL_MEDIUM_USB2 11 +#define IPMI_CHANNEL_MEDIUM_SYSINTF 12 +#define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60 +#define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f + +#endif /* __LINUX_IPMI_MSGDEFS_H */ diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h new file mode 100644 index 00000000..fcb5d44e --- /dev/null +++ b/include/linux/ipmi_smi.h @@ -0,0 +1,242 @@ +/* + * ipmi_smi.h + * + * MontaVista IPMI system management interface + * + * Author: MontaVista Software, Inc. + * Corey Minyard <minyard@mvista.com> + * source@mvista.com + * + * Copyright 2002 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_IPMI_SMI_H +#define __LINUX_IPMI_SMI_H + +#include <linux/ipmi_msgdefs.h> +#include <linux/proc_fs.h> +#include <linux/platform_device.h> +#include <linux/ipmi.h> + +struct device; + +/* This files describes the interface for IPMI system management interface + drivers to bind into the IPMI message handler. */ + +/* Structure for the low-level drivers. */ +typedef struct ipmi_smi *ipmi_smi_t; + +/* + * Messages to/from the lower layer. The smi interface will take one + * of these to send. After the send has occurred and a response has + * been received, it will report this same data structure back up to + * the upper layer. If an error occurs, it should fill in the + * response with an error code in the completion code location. When + * asynchronous data is received, one of these is allocated, the + * data_size is set to zero and the response holds the data from the + * get message or get event command that the interface initiated. + * Note that it is the interfaces responsibility to detect + * asynchronous data and messages and request them from the + * interface. + */ +struct ipmi_smi_msg { + struct list_head link; + + long msgid; + void *user_data; + + int data_size; + unsigned char data[IPMI_MAX_MSG_LENGTH]; + + int rsp_size; + unsigned char rsp[IPMI_MAX_MSG_LENGTH]; + + /* Will be called when the system is done with the message + (presumably to free it). */ + void (*done)(struct ipmi_smi_msg *msg); +}; + +struct ipmi_smi_handlers { + struct module *owner; + + /* The low-level interface cannot start sending messages to + the upper layer until this function is called. This may + not be NULL, the lower layer must take the interface from + this call. */ + int (*start_processing)(void *send_info, + ipmi_smi_t new_intf); + + /* + * Get the detailed private info of the low level interface and store + * it into the structure of ipmi_smi_data. For example: the + * ACPI device handle will be returned for the pnp_acpi IPMI device. + */ + int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); + + /* Called to enqueue an SMI message to be sent. This + operation is not allowed to fail. If an error occurs, it + should report back the error in a received message. It may + do this in the current call context, since no write locks + are held when this is run. If the priority is > 0, the + message will go into a high-priority queue and be sent + first. Otherwise, it goes into a normal-priority queue. */ + void (*sender)(void *send_info, + struct ipmi_smi_msg *msg, + int priority); + + /* Called by the upper layer to request that we try to get + events from the BMC we are attached to. */ + void (*request_events)(void *send_info); + + /* Called when the interface should go into "run to + completion" mode. If this call sets the value to true, the + interface should make sure that all messages are flushed + out and that none are pending, and any new requests are run + to completion immediately. */ + void (*set_run_to_completion)(void *send_info, int run_to_completion); + + /* Called to poll for work to do. This is so upper layers can + poll for operations during things like crash dumps. */ + void (*poll)(void *send_info); + + /* Enable/disable firmware maintenance mode. Note that this + is *not* the modes defined, this is simply an on/off + setting. The message handler does the mode handling. Note + that this is called from interrupt context, so it cannot + block. */ + void (*set_maintenance_mode)(void *send_info, int enable); + + /* Tell the handler that we are using it/not using it. The + message handler get the modules that this handler belongs + to; this function lets the SMI claim any modules that it + uses. These may be NULL if this is not required. */ + int (*inc_usecount)(void *send_info); + void (*dec_usecount)(void *send_info); +}; + +struct ipmi_device_id { + unsigned char device_id; + unsigned char device_revision; + unsigned char firmware_revision_1; + unsigned char firmware_revision_2; + unsigned char ipmi_version; + unsigned char additional_device_support; + unsigned int manufacturer_id; + unsigned int product_id; + unsigned char aux_firmware_revision[4]; + unsigned int aux_firmware_revision_set : 1; +}; + +#define ipmi_version_major(v) ((v)->ipmi_version & 0xf) +#define ipmi_version_minor(v) ((v)->ipmi_version >> 4) + +/* Take a pointer to a raw data buffer and a length and extract device + id information from it. The first byte of data must point to the + netfn << 2, the data should be of the format: + netfn << 2, cmd, completion code, data + as normally comes from a device interface. */ +static inline int ipmi_demangle_device_id(const unsigned char *data, + unsigned int data_len, + struct ipmi_device_id *id) +{ + if (data_len < 9) + return -EINVAL; + if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 || + data[1] != IPMI_GET_DEVICE_ID_CMD) + /* Strange, didn't get the response we expected. */ + return -EINVAL; + if (data[2] != 0) + /* That's odd, it shouldn't be able to fail. */ + return -EINVAL; + + data += 3; + data_len -= 3; + id->device_id = data[0]; + id->device_revision = data[1]; + id->firmware_revision_1 = data[2]; + id->firmware_revision_2 = data[3]; + id->ipmi_version = data[4]; + id->additional_device_support = data[5]; + if (data_len >= 11) { + id->manufacturer_id = (data[6] | (data[7] << 8) | + (data[8] << 16)); + id->product_id = data[9] | (data[10] << 8); + } else { + id->manufacturer_id = 0; + id->product_id = 0; + } + if (data_len >= 15) { + memcpy(id->aux_firmware_revision, data+11, 4); + id->aux_firmware_revision_set = 1; + } else + id->aux_firmware_revision_set = 0; + + return 0; +} + +/* Add a low-level interface to the IPMI driver. Note that if the + interface doesn't know its slave address, it should pass in zero. + The low-level interface should not deliver any messages to the + upper layer until the start_processing() function in the handlers + is called, and the lower layer must get the interface from that + call. */ +int ipmi_register_smi(struct ipmi_smi_handlers *handlers, + void *send_info, + struct ipmi_device_id *device_id, + struct device *dev, + const char *sysfs_name, + unsigned char slave_addr); + +/* + * Remove a low-level interface from the IPMI driver. This will + * return an error if the interface is still in use by a user. + */ +int ipmi_unregister_smi(ipmi_smi_t intf); + +/* + * The lower layer reports received messages through this interface. + * The data_size should be zero if this is an asyncronous message. If + * the lower layer gets an error sending a message, it should format + * an error response in the message response. + */ +void ipmi_smi_msg_received(ipmi_smi_t intf, + struct ipmi_smi_msg *msg); + +/* The lower layer received a watchdog pre-timeout on interface. */ +void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf); + +struct ipmi_smi_msg *ipmi_alloc_smi_msg(void); +static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) +{ + msg->done(msg); +} + +/* Allow the lower layer to add things to the proc filesystem + directory for this interface. Note that the entry will + automatically be dstroyed when the interface is destroyed. */ +int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, + const struct file_operations *proc_ops, + void *data); + +#endif /* __LINUX_IPMI_SMI_H */ diff --git a/include/linux/ipsec.h b/include/linux/ipsec.h new file mode 100644 index 00000000..d17a6302 --- /dev/null +++ b/include/linux/ipsec.h @@ -0,0 +1,47 @@ +#ifndef _LINUX_IPSEC_H +#define _LINUX_IPSEC_H + +/* The definitions, required to talk to KAME racoon IKE. */ + +#include <linux/pfkeyv2.h> + +#define IPSEC_PORT_ANY 0 +#define IPSEC_ULPROTO_ANY 255 +#define IPSEC_PROTO_ANY 255 + +enum { + IPSEC_MODE_ANY = 0, /* We do not support this for SA */ + IPSEC_MODE_TRANSPORT = 1, + IPSEC_MODE_TUNNEL = 2, + IPSEC_MODE_BEET = 3 +}; + +enum { + IPSEC_DIR_ANY = 0, + IPSEC_DIR_INBOUND = 1, + IPSEC_DIR_OUTBOUND = 2, + IPSEC_DIR_FWD = 3, /* It is our own */ + IPSEC_DIR_MAX = 4, + IPSEC_DIR_INVALID = 5 +}; + +enum { + IPSEC_POLICY_DISCARD = 0, + IPSEC_POLICY_NONE = 1, + IPSEC_POLICY_IPSEC = 2, + IPSEC_POLICY_ENTRUST = 3, + IPSEC_POLICY_BYPASS = 4 +}; + +enum { + IPSEC_LEVEL_DEFAULT = 0, + IPSEC_LEVEL_USE = 1, + IPSEC_LEVEL_REQUIRE = 2, + IPSEC_LEVEL_UNIQUE = 3 +}; + +#define IPSEC_MANUAL_REQID_MAX 0x3fff + +#define IPSEC_REPLAYWSIZE 32 + +#endif /* _LINUX_IPSEC_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h new file mode 100644 index 00000000..8260ef77 --- /dev/null +++ b/include/linux/ipv6.h @@ -0,0 +1,544 @@ +#ifndef _IPV6_H +#define _IPV6_H + +#include <linux/types.h> +#include <linux/in6.h> +#include <asm/byteorder.h> + +/* The latest drafts declared increase in minimal mtu up to 1280. */ + +#define IPV6_MIN_MTU 1280 + +/* + * Advanced API + * source interface/address selection, source routing, etc... + * *under construction* + */ + + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +#define IPV6_SRCRT_STRICT 0x01 /* Deprecated; will be removed */ +#define IPV6_SRCRT_TYPE_0 0 /* Deprecated; will be removed */ +#define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */ + +/* + * routing header + */ +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + + /* + * type specific data + * variable length field + */ +}; + + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; + /* + * TLV encoded option data follows. + */ +} __attribute__((packed)); /* required for some archs */ + +#define ipv6_destopt_hdr ipv6_opt_hdr +#define ipv6_hopopt_hdr ipv6_opt_hdr + +#ifdef __KERNEL__ +#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) +#endif + +/* + * routing header type 0 (used in cmsghdr struct) + */ + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; + +#define rt0_type rt_hdr.type +}; + +/* + * routing header type 2 + */ + +struct rt2_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr; + +#define rt2_type rt_hdr.type +}; + +/* + * home address option in destination options header + */ + +struct ipv6_destopt_hao { + __u8 type; + __u8 length; + struct in6_addr addr; +} __attribute__((packed)); + +/* + * IPv6 fixed header + * + * BEWARE, it is incorrect. The first 4 bits of flow_lbl + * are glued to priority now, forming "class". + */ + +struct ipv6hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 priority:4, + version:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 version:4, + priority:4; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 flow_lbl[3]; + + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + + struct in6_addr saddr; + struct in6_addr daddr; +}; + +#ifdef __KERNEL__ +/* + * This structure contains configuration options per IPv6 link. + */ +struct ipv6_devconf { + __s32 forwarding; + __s32 hop_limit; + __s32 mtu6; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; +#ifdef CONFIG_IPV6_PRIVACY + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_max_retry; + __s32 max_desync_factor; +#endif + __s32 max_addresses; + __s32 accept_ra_defrtr; + __s32 accept_ra_pinfo; +#ifdef CONFIG_IPV6_ROUTER_PREF + __s32 accept_ra_rtr_pref; + __s32 rtr_probe_interval; +#ifdef CONFIG_IPV6_ROUTE_INFO + __s32 accept_ra_rt_info_max_plen; +#endif +#endif + __s32 proxy_ndp; + __s32 accept_source_route; +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + __s32 optimistic_dad; +#endif +#ifdef CONFIG_IPV6_MROUTE + __s32 mc_forwarding; +#endif + __s32 disable_ipv6; + __s32 accept_dad; + __s32 force_tllao; + void *sysctl; +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; +extern struct ipv6_params ipv6_defaults; +#endif + +/* index values for the variables in ipv6_devconf */ +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT, + DEVCONF_MTU6, + DEVCONF_ACCEPT_RA, + DEVCONF_ACCEPT_REDIRECTS, + DEVCONF_AUTOCONF, + DEVCONF_DAD_TRANSMITS, + DEVCONF_RTR_SOLICITS, + DEVCONF_RTR_SOLICIT_INTERVAL, + DEVCONF_RTR_SOLICIT_DELAY, + DEVCONF_USE_TEMPADDR, + DEVCONF_TEMP_VALID_LFT, + DEVCONF_TEMP_PREFERED_LFT, + DEVCONF_REGEN_MAX_RETRY, + DEVCONF_MAX_DESYNC_FACTOR, + DEVCONF_MAX_ADDRESSES, + DEVCONF_FORCE_MLD_VERSION, + DEVCONF_ACCEPT_RA_DEFRTR, + DEVCONF_ACCEPT_RA_PINFO, + DEVCONF_ACCEPT_RA_RTR_PREF, + DEVCONF_RTR_PROBE_INTERVAL, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN, + DEVCONF_PROXY_NDP, + DEVCONF_OPTIMISTIC_DAD, + DEVCONF_ACCEPT_SOURCE_ROUTE, + DEVCONF_MC_FORWARDING, + DEVCONF_DISABLE_IPV6, + DEVCONF_ACCEPT_DAD, + DEVCONF_FORCE_TLLAO, + DEVCONF_MAX +}; + +#ifdef __KERNEL__ +#include <linux/icmpv6.h> +#include <linux/tcp.h> +#include <linux/udp.h> + +#include <net/inet_sock.h> + +static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb_network_header(skb); +} + +static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb_transport_header(skb); +} + +static inline __u8 ipv6_tclass(const struct ipv6hdr *iph) +{ + return (ntohl(*(__be32 *)iph) >> 20) & 0xff; +} + +/* + This structure contains results of exthdrs parsing + as offsets from skb->nh. + */ + +struct inet6_skb_parm { + int iif; + __u16 ra; + __u16 hop; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; +#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) + __u16 dsthao; +#endif + +#define IP6SKB_XFRM_TRANSFORMED 1 +#define IP6SKB_FORWARDED 2 +#define IP6SKB_REROUTED 4 +}; + +#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) +#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb)) + +static inline int inet6_iif(const struct sk_buff *skb) +{ + return IP6CB(skb)->iif; +} + +struct inet6_request_sock { + struct in6_addr loc_addr; + struct in6_addr rmt_addr; + struct sk_buff *pktopts; + int iif; +}; + +struct tcp6_request_sock { + struct tcp_request_sock tcp6rsk_tcp; + struct inet6_request_sock tcp6rsk_inet6; +}; + +struct ipv6_mc_socklist; +struct ipv6_ac_socklist; +struct ipv6_fl_socklist; + +/** + * struct ipv6_pinfo - ipv6 private area + * + * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc) + * this _must_ be the last member, so that inet6_sk_generic + * is able to calculate its offset from the base struct sock + * by using the struct proto->slab_obj_size member. -acme + */ +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_addr rcv_saddr; + struct in6_addr daddr; + struct in6_pktinfo sticky_pktinfo; + struct in6_addr *daddr_cache; +#ifdef CONFIG_IPV6_SUBTREES + struct in6_addr *saddr_cache; +#endif + + __be32 flow_label; + __u32 frag_size; + + /* + * Packed in 16bits. + * Omit one shift by by putting the signed field at MSB. + */ +#if defined(__BIG_ENDIAN_BITFIELD) + __s16 hop_limit:9; + __u16 __unused_1:7; +#else + __u16 __unused_1:7; + __s16 hop_limit:9; +#endif + +#if defined(__BIG_ENDIAN_BITFIELD) + /* Packed in 16bits. */ + __s16 mcast_hops:9; + __u16 __unused_2:6, + mc_loop:1; +#else + __u16 mc_loop:1, + __unused_2:6; + __s16 mcast_hops:9; +#endif + int ucast_oif; + int mcast_oif; + + /* pktoption flags */ + union { + struct { + __u16 srcrt:1, + osrcrt:1, + rxinfo:1, + rxoinfo:1, + rxhlim:1, + rxohlim:1, + hopopts:1, + ohopopts:1, + dstopts:1, + odstopts:1, + rxflow:1, + rxtclass:1, + rxpmtu:1, + rxorigdstaddr:1; + /* 2 bits hole */ + } bits; + __u16 all; + } rxopt; + + /* sockopt flags */ + __u16 recverr:1, + sndflow:1, + pmtudisc:2, + ipv6only:1, + srcprefs:3, /* 001: prefer temporary address + * 010: prefer public address + * 100: prefer care-of address + */ + dontfrag:1; + __u8 min_hopcount; + __u8 tclass; + __u8 rcv_tclass; + + __u32 dst_cookie; + + struct ipv6_mc_socklist __rcu *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; + } cork; +}; + +/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ +struct raw6_sock { + /* inet_sock has to be the first member of raw6_sock */ + struct inet_sock inet; + __u32 checksum; /* perform checksum */ + __u32 offset; /* checksum offset */ + struct icmp6_filter filter; + __u32 ip6mr_table; + /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +struct udp6_sock { + struct udp_sock udp; + /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +extern int inet6_sk_rebuild_header(struct sock *sk); + +#if IS_ENABLED(CONFIG_IPV6) +static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) +{ + return inet_sk(__sk)->pinet6; +} + +static inline struct inet6_request_sock * + inet6_rsk(const struct request_sock *rsk) +{ + return (struct inet6_request_sock *)(((u8 *)rsk) + + inet_rsk(rsk)->inet6_rsk_offset); +} + +static inline u32 inet6_rsk_offset(struct request_sock *rsk) +{ + return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock); +} + +static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops) +{ + struct request_sock *req = reqsk_alloc(ops); + + if (req != NULL) { + inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req); + inet6_rsk(req)->pktopts = NULL; + } + + return req; +} + +static inline struct raw6_sock *raw6_sk(const struct sock *sk) +{ + return (struct raw6_sock *)sk; +} + +static inline void inet_sk_copy_descendant(struct sock *sk_to, + const struct sock *sk_from) +{ + int ancestor_size = sizeof(struct inet_sock); + + if (sk_from->sk_family == PF_INET6) + ancestor_size += sizeof(struct ipv6_pinfo); + + __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); +} + +#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only) +#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk)) + +struct inet6_timewait_sock { + struct in6_addr tw_v6_daddr; + struct in6_addr tw_v6_rcv_saddr; +}; + +struct tcp6_timewait_sock { + struct tcp_timewait_sock tcp6tw_tcp; + struct inet6_timewait_sock tcp6tw_inet6; +}; + +static inline u16 inet6_tw_offset(const struct proto *prot) +{ + return prot->twsk_prot->twsk_obj_size - + sizeof(struct inet6_timewait_sock); +} + +static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk) +{ + return (struct inet6_timewait_sock *)(((u8 *)sk) + + inet_twsk(sk)->tw_ipv6_offset); +} + +static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk) +{ + return likely(sk->sk_state != TCP_TIME_WAIT) ? + &inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr; +} + +static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk) +{ + return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL; +} + +static inline int inet_v6_ipv6only(const struct sock *sk) +{ + return likely(sk->sk_state != TCP_TIME_WAIT) ? + ipv6_only_sock(sk) : inet_twsk(sk)->tw_ipv6only; +} +#else +#define __ipv6_only_sock(sk) 0 +#define ipv6_only_sock(sk) 0 + +static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) +{ + return NULL; +} + +static inline struct inet6_request_sock * + inet6_rsk(const struct request_sock *rsk) +{ + return NULL; +} + +static inline struct raw6_sock *raw6_sk(const struct sock *sk) +{ + return NULL; +} + +#define __inet6_rcv_saddr(__sk) NULL +#define inet6_rcv_saddr(__sk) NULL +#define tcp_twsk_ipv6only(__sk) 0 +#define inet_v6_ipv6only(__sk) 0 +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ + ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \ + ((__sk)->sk_family == AF_INET6) && \ + ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ + ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) + +#define INET6_TW_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ + (*((__portpair *)&(inet_twsk(__sk)->tw_dport)) == (__ports)) && \ + ((__sk)->sk_family == PF_INET6) && \ + (ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))) && \ + (ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr))) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) + +#endif /* __KERNEL__ */ + +#endif /* _IPV6_H */ diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h new file mode 100644 index 00000000..1e7d8af2 --- /dev/null +++ b/include/linux/ipv6_route.h @@ -0,0 +1,62 @@ +/* + * Linux INET6 implementation + * + * Authors: + * Pedro Roque <roque@di.fc.ul.pt> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IPV6_ROUTE_H +#define _LINUX_IPV6_ROUTE_H + +#include <linux/types.h> + +#define RTF_DEFAULT 0x00010000 /* default - learned via ND */ +#define RTF_ALLONLINK 0x00020000 /* (deprecated and will be removed) + fallback, no routers on link */ +#define RTF_ADDRCONF 0x00040000 /* addrconf route - RA */ +#define RTF_PREFIX_RT 0x00080000 /* A prefix only route - RA */ +#define RTF_ANYCAST 0x00100000 /* Anycast */ + +#define RTF_NONEXTHOP 0x00200000 /* route with no nexthop */ +#define RTF_EXPIRES 0x00400000 + +#define RTF_ROUTEINFO 0x00800000 /* route information - RA */ + +#define RTF_CACHE 0x01000000 /* cache entry */ +#define RTF_FLOW 0x02000000 /* flow significant route */ +#define RTF_POLICY 0x04000000 /* policy route */ + +#define RTF_PREF(pref) ((pref) << 27) +#define RTF_PREF_MASK 0x18000000 + +#define RTF_LOCAL 0x80000000 + +#ifdef __KERNEL__ +#define IPV6_EXTRACT_PREF(flag) (((flag) & RTF_PREF_MASK) >> 27) +#define IPV6_DECODE_PREF(pref) ((pref) ^ 2) /* 1:low,2:med,3:high */ +#endif + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + unsigned long rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +#define RTMSG_NEWDEVICE 0x11 +#define RTMSG_DELDEVICE 0x12 +#define RTMSG_NEWROUTE 0x21 +#define RTMSG_DELROUTE 0x22 + +#endif diff --git a/include/linux/ipx.h b/include/linux/ipx.h new file mode 100644 index 00000000..3d48014c --- /dev/null +++ b/include/linux/ipx.h @@ -0,0 +1,75 @@ +#ifndef _IPX_H_ +#define _IPX_H_ +#include <linux/types.h> +#include <linux/sockios.h> +#include <linux/socket.h> +#define IPX_NODE_LEN 6 +#define IPX_MTU 576 + +struct sockaddr_ipx { + __kernel_sa_family_t sipx_family; + __be16 sipx_port; + __be32 sipx_network; + unsigned char sipx_node[IPX_NODE_LEN]; + __u8 sipx_type; + unsigned char sipx_zero; /* 16 byte fill */ +}; + +/* + * So we can fit the extra info for SIOCSIFADDR into the address nicely + */ +#define sipx_special sipx_port +#define sipx_action sipx_zero +#define IPX_DLTITF 0 +#define IPX_CRTITF 1 + +struct ipx_route_definition { + __be32 ipx_network; + __be32 ipx_router_network; + unsigned char ipx_router_node[IPX_NODE_LEN]; +}; + +struct ipx_interface_definition { + __be32 ipx_network; + unsigned char ipx_device[16]; + unsigned char ipx_dlink_type; +#define IPX_FRAME_NONE 0 +#define IPX_FRAME_SNAP 1 +#define IPX_FRAME_8022 2 +#define IPX_FRAME_ETHERII 3 +#define IPX_FRAME_8023 4 +#define IPX_FRAME_TR_8022 5 /* obsolete */ + unsigned char ipx_special; +#define IPX_SPECIAL_NONE 0 +#define IPX_PRIMARY 1 +#define IPX_INTERNAL 2 + unsigned char ipx_node[IPX_NODE_LEN]; +}; + +struct ipx_config_data { + unsigned char ipxcfg_auto_select_primary; + unsigned char ipxcfg_auto_create_interfaces; +}; + +/* + * OLD Route Definition for backward compatibility. + */ + +struct ipx_route_def { + __be32 ipx_network; + __be32 ipx_router_network; +#define IPX_ROUTE_NO_ROUTER 0 + unsigned char ipx_router_node[IPX_NODE_LEN]; + unsigned char ipx_device[16]; + unsigned short ipx_flags; +#define IPX_RT_SNAP 8 +#define IPX_RT_8022 4 +#define IPX_RT_BLUEBOOK 2 +#define IPX_RT_ROUTED 1 +}; + +#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE) +#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1) +#define SIOCIPXCFGDATA (SIOCPROTOPRIVATE + 2) +#define SIOCIPXNCPCONN (SIOCPROTOPRIVATE + 3) +#endif /* _IPX_H_ */ diff --git a/include/linux/irda.h b/include/linux/irda.h new file mode 100644 index 00000000..a014c325 --- /dev/null +++ b/include/linux/irda.h @@ -0,0 +1,251 @@ +/********************************************************************* + * + * Filename: irda.h + * Version: + * Description: + * Status: Experimental. + * Author: Dag Brattli <dagb@cs.uit.no> + * Created at: Mon Mar 8 14:06:12 1999 + * Modified at: Sat Dec 25 16:06:42 1999 + * Modified by: Dag Brattli <dagb@cs.uit.no> + * + * Copyright (c) 1999 Dag Brattli, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * Neither Dag Brattli nor University of Tromsø admit liability nor + * provide warranty for any of this software. This material is + * provided "AS-IS" and at no charge. + * + ********************************************************************/ + +#ifndef KERNEL_IRDA_H +#define KERNEL_IRDA_H + +#include <linux/types.h> +#include <linux/socket.h> + +/* Note that this file is shared with user space. */ + +/* Hint bit positions for first hint byte */ +#define HINT_PNP 0x01 +#define HINT_PDA 0x02 +#define HINT_COMPUTER 0x04 +#define HINT_PRINTER 0x08 +#define HINT_MODEM 0x10 +#define HINT_FAX 0x20 +#define HINT_LAN 0x40 +#define HINT_EXTENSION 0x80 + +/* Hint bit positions for second hint byte (first extension byte) */ +#define HINT_TELEPHONY 0x01 +#define HINT_FILE_SERVER 0x02 +#define HINT_COMM 0x04 +#define HINT_MESSAGE 0x08 +#define HINT_HTTP 0x10 +#define HINT_OBEX 0x20 + +/* IrLMP character code values */ +#define CS_ASCII 0x00 +#define CS_ISO_8859_1 0x01 +#define CS_ISO_8859_2 0x02 +#define CS_ISO_8859_3 0x03 +#define CS_ISO_8859_4 0x04 +#define CS_ISO_8859_5 0x05 +#define CS_ISO_8859_6 0x06 +#define CS_ISO_8859_7 0x07 +#define CS_ISO_8859_8 0x08 +#define CS_ISO_8859_9 0x09 +#define CS_UNICODE 0xff + +/* These are the currently known dongles */ +typedef enum { + IRDA_TEKRAM_DONGLE = 0, + IRDA_ESI_DONGLE = 1, + IRDA_ACTISYS_DONGLE = 2, + IRDA_ACTISYS_PLUS_DONGLE = 3, + IRDA_GIRBIL_DONGLE = 4, + IRDA_LITELINK_DONGLE = 5, + IRDA_AIRPORT_DONGLE = 6, + IRDA_OLD_BELKIN_DONGLE = 7, + IRDA_EP7211_IR = 8, + IRDA_MCP2120_DONGLE = 9, + IRDA_ACT200L_DONGLE = 10, + IRDA_MA600_DONGLE = 11, + IRDA_TOIM3232_DONGLE = 12, + IRDA_EP7211_DONGLE = 13, +} IRDA_DONGLE; + +/* Protocol types to be used for SOCK_DGRAM */ +enum { + IRDAPROTO_UNITDATA = 0, + IRDAPROTO_ULTRA = 1, + IRDAPROTO_MAX +}; + +#define SOL_IRLMP 266 /* Same as SOL_IRDA for now */ +#define SOL_IRTTP 266 /* Same as SOL_IRDA for now */ + +#define IRLMP_ENUMDEVICES 1 /* Return discovery log */ +#define IRLMP_IAS_SET 2 /* Set an attribute in local IAS */ +#define IRLMP_IAS_QUERY 3 /* Query remote IAS for attribute */ +#define IRLMP_HINTS_SET 4 /* Set hint bits advertised */ +#define IRLMP_QOS_SET 5 +#define IRLMP_QOS_GET 6 +#define IRLMP_MAX_SDU_SIZE 7 +#define IRLMP_IAS_GET 8 /* Get an attribute from local IAS */ +#define IRLMP_IAS_DEL 9 /* Remove attribute from local IAS */ +#define IRLMP_HINT_MASK_SET 10 /* Set discovery filter */ +#define IRLMP_WAITDEVICE 11 /* Wait for a new discovery */ + +#define IRTTP_MAX_SDU_SIZE IRLMP_MAX_SDU_SIZE /* Compatibility */ + +#define IAS_MAX_STRING 256 /* See IrLMP 1.1, 4.3.3.2 */ +#define IAS_MAX_OCTET_STRING 1024 /* See IrLMP 1.1, 4.3.3.2 */ +#define IAS_MAX_CLASSNAME 60 /* See IrLMP 1.1, 4.3.1 */ +#define IAS_MAX_ATTRIBNAME 60 /* See IrLMP 1.1, 4.3.3.1 */ +#define IAS_MAX_ATTRIBNUMBER 256 /* See IrLMP 1.1, 4.3.3.1 */ +/* For user space backward compatibility - may be fixed in kernel 2.5.X + * Note : need 60+1 ('\0'), make it 64 for alignement - Jean II */ +#define IAS_EXPORT_CLASSNAME 64 +#define IAS_EXPORT_ATTRIBNAME 256 + +/* Attribute type needed for struct irda_ias_set */ +#define IAS_MISSING 0 +#define IAS_INTEGER 1 +#define IAS_OCT_SEQ 2 +#define IAS_STRING 3 + +#define LSAP_ANY 0xff + +struct sockaddr_irda { + __kernel_sa_family_t sir_family; /* AF_IRDA */ + __u8 sir_lsap_sel; /* LSAP selector */ + __u32 sir_addr; /* Device address */ + char sir_name[25]; /* Usually <service>:IrDA:TinyTP */ +}; + +struct irda_device_info { + __u32 saddr; /* Address of local interface */ + __u32 daddr; /* Address of remote device */ + char info[22]; /* Description */ + __u8 charset; /* Charset used for description */ + __u8 hints[2]; /* Hint bits */ +}; + +struct irda_device_list { + __u32 len; + struct irda_device_info dev[1]; +}; + +struct irda_ias_set { + char irda_class_name[IAS_EXPORT_CLASSNAME]; + char irda_attrib_name[IAS_EXPORT_ATTRIBNAME]; + unsigned int irda_attrib_type; + union { + unsigned int irda_attrib_int; + struct { + unsigned short len; + __u8 octet_seq[IAS_MAX_OCTET_STRING]; + } irda_attrib_octet_seq; + struct { + __u8 len; + __u8 charset; + __u8 string[IAS_MAX_STRING]; + } irda_attrib_string; + } attribute; + __u32 daddr; /* Address of device (for some queries only) */ +}; + +/* Some private IOCTL's (max 16) */ +#define SIOCSDONGLE (SIOCDEVPRIVATE + 0) +#define SIOCGDONGLE (SIOCDEVPRIVATE + 1) +#define SIOCSBANDWIDTH (SIOCDEVPRIVATE + 2) +#define SIOCSMEDIABUSY (SIOCDEVPRIVATE + 3) +#define SIOCGMEDIABUSY (SIOCDEVPRIVATE + 4) +#define SIOCGRECEIVING (SIOCDEVPRIVATE + 5) +#define SIOCSMODE (SIOCDEVPRIVATE + 6) +#define SIOCGMODE (SIOCDEVPRIVATE + 7) +#define SIOCSDTRRTS (SIOCDEVPRIVATE + 8) +#define SIOCGQOS (SIOCDEVPRIVATE + 9) + +/* No reason to include <linux/if.h> just because of this one ;-) */ +#define IRNAMSIZ 16 + +/* IrDA quality of service information (must not exceed 16 bytes) */ +struct if_irda_qos { + unsigned long baudrate; + unsigned short data_size; + unsigned short window_size; + unsigned short min_turn_time; + unsigned short max_turn_time; + unsigned char add_bofs; + unsigned char link_disc; +}; + +/* For setting RTS and DTR lines of a dongle */ +struct if_irda_line { + __u8 dtr; + __u8 rts; +}; + +/* IrDA interface configuration (data part must not exceed 16 bytes) */ +struct if_irda_req { + union { + char ifrn_name[IRNAMSIZ]; /* if name, e.g. "irda0" */ + } ifr_ifrn; + + /* Data part */ + union { + struct if_irda_line ifru_line; + struct if_irda_qos ifru_qos; + unsigned short ifru_flags; + unsigned int ifru_receiving; + unsigned int ifru_mode; + unsigned int ifru_dongle; + } ifr_ifru; +}; + +#define ifr_baudrate ifr_ifru.ifru_qos.baudrate +#define ifr_receiving ifr_ifru.ifru_receiving +#define ifr_dongle ifr_ifru.ifru_dongle +#define ifr_mode ifr_ifru.ifru_mode +#define ifr_dtr ifr_ifru.ifru_line.dtr +#define ifr_rts ifr_ifru.ifru_line.rts + + +/* IrDA netlink definitions */ +#define IRDA_NL_NAME "irda" +#define IRDA_NL_VERSION 1 + +enum irda_nl_commands { + IRDA_NL_CMD_UNSPEC, + IRDA_NL_CMD_SET_MODE, + IRDA_NL_CMD_GET_MODE, + + __IRDA_NL_CMD_AFTER_LAST +}; +#define IRDA_NL_CMD_MAX (__IRDA_NL_CMD_AFTER_LAST - 1) + +enum nl80211_attrs { + IRDA_NL_ATTR_UNSPEC, + IRDA_NL_ATTR_IFNAME, + IRDA_NL_ATTR_MODE, + + __IRDA_NL_ATTR_AFTER_LAST +}; +#define IRDA_NL_ATTR_MAX (__IRDA_NL_ATTR_AFTER_LAST - 1) + +/* IrDA modes */ +#define IRDA_MODE_PRIMARY 0x1 +#define IRDA_MODE_SECONDARY 0x2 +#define IRDA_MODE_MONITOR 0x4 + +#endif /* KERNEL_IRDA_H */ + + + + diff --git a/include/linux/irq.h b/include/linux/irq.h new file mode 100644 index 00000000..b27cfcfd --- /dev/null +++ b/include/linux/irq.h @@ -0,0 +1,752 @@ +#ifndef _LINUX_IRQ_H +#define _LINUX_IRQ_H + +/* + * Please do not include this file in generic code. There is currently + * no requirement for any architecture to implement anything held + * within this file. + * + * Thanks. --rmk + */ + +#include <linux/smp.h> + +#ifndef CONFIG_S390 + +#include <linux/linkage.h> +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/cpumask.h> +#include <linux/gfp.h> +#include <linux/irqreturn.h> +#include <linux/irqnr.h> +#include <linux/errno.h> +#include <linux/topology.h> +#include <linux/wait.h> + +#include <asm/irq.h> +#include <asm/ptrace.h> +#include <asm/irq_regs.h> + +struct seq_file; +struct module; +struct irq_desc; +struct irq_data; +typedef void (*irq_flow_handler_t)(unsigned int irq, + struct irq_desc *desc); +typedef void (*irq_preflow_handler_t)(struct irq_data *data); + +/* + * IRQ line status. + * + * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h + * + * IRQ_TYPE_NONE - default, unspecified type + * IRQ_TYPE_EDGE_RISING - rising edge triggered + * IRQ_TYPE_EDGE_FALLING - falling edge triggered + * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered + * IRQ_TYPE_LEVEL_HIGH - high level triggered + * IRQ_TYPE_LEVEL_LOW - low level triggered + * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits + * IRQ_TYPE_SENSE_MASK - Mask for all the above bits + * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type + * to setup the HW to a sane default (used + * by irqdomain map() callbacks to synchronize + * the HW state and SW flags for a newly + * allocated descriptor). + * + * IRQ_TYPE_PROBE - Special flag for probing in progress + * + * Bits which can be modified via irq_set/clear/modify_status_flags() + * IRQ_LEVEL - Interrupt is level type. Will be also + * updated in the code when the above trigger + * bits are modified via irq_set_irq_type() + * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect + * it from affinity setting + * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing + * IRQ_NOREQUEST - Interrupt cannot be requested via + * request_irq() + * IRQ_NOTHREAD - Interrupt cannot be threaded + * IRQ_NOAUTOEN - Interrupt is not automatically enabled in + * request/setup_irq() + * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) + * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context + * IRQ_NESTED_TRHEAD - Interrupt nests into another thread + * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable + */ +enum { + IRQ_TYPE_NONE = 0x00000000, + IRQ_TYPE_EDGE_RISING = 0x00000001, + IRQ_TYPE_EDGE_FALLING = 0x00000002, + IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), + IRQ_TYPE_LEVEL_HIGH = 0x00000004, + IRQ_TYPE_LEVEL_LOW = 0x00000008, + IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), + IRQ_TYPE_SENSE_MASK = 0x0000000f, + IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, + + IRQ_TYPE_PROBE = 0x00000010, + + IRQ_LEVEL = (1 << 8), + IRQ_PER_CPU = (1 << 9), + IRQ_NOPROBE = (1 << 10), + IRQ_NOREQUEST = (1 << 11), + IRQ_NOAUTOEN = (1 << 12), + IRQ_NO_BALANCING = (1 << 13), + IRQ_MOVE_PCNTXT = (1 << 14), + IRQ_NESTED_THREAD = (1 << 15), + IRQ_NOTHREAD = (1 << 16), + IRQ_PER_CPU_DEVID = (1 << 17), +}; + +#define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) + +#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +/* + * Return value for chip->irq_set_affinity() + * + * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + */ +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY, +}; + +struct msi_desc; +struct irq_domain; + +/** + * struct irq_data - per irq and irq chip data passed down to chip functions + * @irq: interrupt number + * @hwirq: hardware interrupt number, local to the interrupt domain + * @node: node index useful for balancing + * @state_use_accessors: status information for irq chip functions. + * Use accessor functions to deal with it + * @chip: low level interrupt hardware access + * @domain: Interrupt translation domain; responsible for mapping + * between hwirq number and linux irq number. + * @handler_data: per-IRQ data for the irq_chip methods + * @chip_data: platform-specific per-chip private data for the chip + * methods, to allow shared chip implementations + * @msi_desc: MSI descriptor + * @affinity: IRQ affinity on SMP + * + * The fields here need to overlay the ones in irq_desc until we + * cleaned up the direct references and switched everything over to + * irq_data. + */ +struct irq_data { + unsigned int irq; + unsigned long hwirq; + unsigned int node; + unsigned int state_use_accessors; + struct irq_chip *chip; + struct irq_domain *domain; + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +#ifdef CONFIG_SMP + cpumask_var_t affinity; +#endif +}; + +/* + * Bit masks for irq_data.state + * + * IRQD_TRIGGER_MASK - Mask for the trigger type bits + * IRQD_SETAFFINITY_PENDING - Affinity setting is pending + * IRQD_NO_BALANCING - Balancing disabled for this IRQ + * IRQD_PER_CPU - Interrupt is per cpu + * IRQD_AFFINITY_SET - Interrupt affinity was set + * IRQD_LEVEL - Interrupt is level triggered + * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup + * from suspend + * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process + * context + * IRQD_IRQ_DISABLED - Disabled state of the interrupt + * IRQD_IRQ_MASKED - Masked state of the interrupt + * IRQD_IRQ_INPROGRESS - In progress state of the interrupt + */ +enum { + IRQD_TRIGGER_MASK = 0xf, + IRQD_SETAFFINITY_PENDING = (1 << 8), + IRQD_NO_BALANCING = (1 << 10), + IRQD_PER_CPU = (1 << 11), + IRQD_AFFINITY_SET = (1 << 12), + IRQD_LEVEL = (1 << 13), + IRQD_WAKEUP_STATE = (1 << 14), + IRQD_MOVE_PCNTXT = (1 << 15), + IRQD_IRQ_DISABLED = (1 << 16), + IRQD_IRQ_MASKED = (1 << 17), + IRQD_IRQ_INPROGRESS = (1 << 18), +}; + +static inline bool irqd_is_setaffinity_pending(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; +} + +static inline bool irqd_is_per_cpu(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_PER_CPU; +} + +static inline bool irqd_can_balance(struct irq_data *d) +{ + return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); +} + +static inline bool irqd_affinity_was_set(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_AFFINITY_SET; +} + +static inline void irqd_mark_affinity_was_set(struct irq_data *d) +{ + d->state_use_accessors |= IRQD_AFFINITY_SET; +} + +static inline u32 irqd_get_trigger_type(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_TRIGGER_MASK; +} + +/* + * Must only be called inside irq_chip.irq_set_type() functions. + */ +static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) +{ + d->state_use_accessors &= ~IRQD_TRIGGER_MASK; + d->state_use_accessors |= type & IRQD_TRIGGER_MASK; +} + +static inline bool irqd_is_level_type(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_LEVEL; +} + +static inline bool irqd_is_wakeup_set(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_WAKEUP_STATE; +} + +static inline bool irqd_can_move_in_process_context(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_MOVE_PCNTXT; +} + +static inline bool irqd_irq_disabled(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_IRQ_DISABLED; +} + +static inline bool irqd_irq_masked(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_IRQ_MASKED; +} + +static inline bool irqd_irq_inprogress(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_IRQ_INPROGRESS; +} + +/* + * Functions for chained handlers which can be enabled/disabled by the + * standard disable_irq/enable_irq calls. Must be called with + * irq_desc->lock held. + */ +static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) +{ + d->state_use_accessors |= IRQD_IRQ_INPROGRESS; +} + +static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) +{ + d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; +} + +static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) +{ + return d->hwirq; +} + +/** + * struct irq_chip - hardware interrupt chip descriptor + * + * @name: name for /proc/interrupts + * @irq_startup: start up the interrupt (defaults to ->enable if NULL) + * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) + * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) + * @irq_disable: disable the interrupt + * @irq_ack: start of a new interrupt + * @irq_mask: mask an interrupt source + * @irq_mask_ack: ack and mask an interrupt source + * @irq_unmask: unmask an interrupt source + * @irq_eoi: end of interrupt + * @irq_set_affinity: set the CPU affinity on SMP machines + * @irq_retrigger: resend an IRQ to the CPU + * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ + * @irq_set_wake: enable/disable power-management wake-on of an IRQ + * @irq_bus_lock: function to lock access to slow bus (i2c) chips + * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips + * @irq_cpu_online: configure an interrupt source for a secondary CPU + * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU + * @irq_suspend: function called from core code on suspend once per chip + * @irq_resume: function called from core code on resume once per chip + * @irq_pm_shutdown: function called from core code on shutdown once per chip + * @irq_print_chip: optional to print special chip info in show_interrupts + * @flags: chip specific flags + * + * @release: release function solely used by UML + */ +struct irq_chip { + const char *name; + unsigned int (*irq_startup)(struct irq_data *data); + void (*irq_shutdown)(struct irq_data *data); + void (*irq_enable)(struct irq_data *data); + void (*irq_disable)(struct irq_data *data); + + void (*irq_ack)(struct irq_data *data); + void (*irq_mask)(struct irq_data *data); + void (*irq_mask_ack)(struct irq_data *data); + void (*irq_unmask)(struct irq_data *data); + void (*irq_eoi)(struct irq_data *data); + + int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); + int (*irq_retrigger)(struct irq_data *data); + int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); + int (*irq_set_wake)(struct irq_data *data, unsigned int on); + + void (*irq_bus_lock)(struct irq_data *data); + void (*irq_bus_sync_unlock)(struct irq_data *data); + + void (*irq_cpu_online)(struct irq_data *data); + void (*irq_cpu_offline)(struct irq_data *data); + + void (*irq_suspend)(struct irq_data *data); + void (*irq_resume)(struct irq_data *data); + void (*irq_pm_shutdown)(struct irq_data *data); + + void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); + + unsigned long flags; + + /* Currently used only by UML, might disappear one day.*/ +#ifdef CONFIG_IRQ_RELEASE_METHOD + void (*release)(unsigned int irq, void *dev_id); +#endif +}; + +/* + * irq_chip specific flags + * + * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() + * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled + * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path + * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks + * when irq enabled + * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip + */ +enum { + IRQCHIP_SET_TYPE_MASKED = (1 << 0), + IRQCHIP_EOI_IF_HANDLED = (1 << 1), + IRQCHIP_MASK_ON_SUSPEND = (1 << 2), + IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), + IRQCHIP_SKIP_SET_WAKE = (1 << 4), +}; + +/* This include will go away once we isolated irq_desc usage to core code */ +#include <linux/irqdesc.h> + +/* + * Pick up the arch-dependent methods: + */ +#include <asm/hw_irq.h> + +#ifndef NR_IRQS_LEGACY +# define NR_IRQS_LEGACY 0 +#endif + +#ifndef ARCH_IRQ_INIT_FLAGS +# define ARCH_IRQ_INIT_FLAGS 0 +#endif + +#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS + +struct irqaction; +extern int setup_irq(unsigned int irq, struct irqaction *new); +extern void remove_irq(unsigned int irq, struct irqaction *act); +extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); +extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); + +extern void irq_cpu_online(void); +extern void irq_cpu_offline(void); +extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); + +#ifdef CONFIG_GENERIC_HARDIRQS + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) +void irq_move_irq(struct irq_data *data); +void irq_move_masked_irq(struct irq_data *data); +#else +static inline void irq_move_irq(struct irq_data *data) { } +static inline void irq_move_masked_irq(struct irq_data *data) { } +#endif + +extern int no_irq_affinity; + +/* + * Built-in IRQ handlers for various IRQ types, + * callable via desc->handle_irq() + */ +extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_nested_irq(unsigned int irq); + +/* Handling of unhandled and spurious interrupts: */ +extern void note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret); + + +/* Enable/disable irq debugging output: */ +extern int noirqdebug_setup(char *str); + +/* Checks whether the interrupt can be requested by request_irq(): */ +extern int can_request_irq(unsigned int irq, unsigned long irqflags); + +/* Dummy irq-chip implementations: */ +extern struct irq_chip no_irq_chip; +extern struct irq_chip dummy_irq_chip; + +extern void +irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle, const char *name); + +static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle) +{ + irq_set_chip_and_handler_name(irq, chip, handle, NULL); +} + +extern int irq_set_percpu_devid(unsigned int irq); + +extern void +__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name); + +static inline void +irq_set_handler(unsigned int irq, irq_flow_handler_t handle) +{ + __irq_set_handler(irq, handle, 0, NULL); +} + +/* + * Set a highlevel chained flow handler for a given IRQ. + * (a chained handler is automatically enabled and set to + * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) + */ +static inline void +irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) +{ + __irq_set_handler(irq, handle, 1, NULL); +} + +void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); + +static inline void irq_set_status_flags(unsigned int irq, unsigned long set) +{ + irq_modify_status(irq, 0, set); +} + +static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) +{ + irq_modify_status(irq, clr, 0); +} + +static inline void irq_set_noprobe(unsigned int irq) +{ + irq_modify_status(irq, 0, IRQ_NOPROBE); +} + +static inline void irq_set_probe(unsigned int irq) +{ + irq_modify_status(irq, IRQ_NOPROBE, 0); +} + +static inline void irq_set_nothread(unsigned int irq) +{ + irq_modify_status(irq, 0, IRQ_NOTHREAD); +} + +static inline void irq_set_thread(unsigned int irq) +{ + irq_modify_status(irq, IRQ_NOTHREAD, 0); +} + +static inline void irq_set_nested_thread(unsigned int irq, bool nest) +{ + if (nest) + irq_set_status_flags(irq, IRQ_NESTED_THREAD); + else + irq_clear_status_flags(irq, IRQ_NESTED_THREAD); +} + +static inline void irq_set_percpu_devid_flags(unsigned int irq) +{ + irq_set_status_flags(irq, + IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | + IRQ_NOPROBE | IRQ_PER_CPU_DEVID); +} + +/* Handle dynamic irq creation and destruction */ +extern unsigned int create_irq_nr(unsigned int irq_want, int node); +extern int create_irq(void); +extern void destroy_irq(unsigned int irq); + +/* + * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and + * irq_free_desc instead. + */ +extern void dynamic_irq_cleanup(unsigned int irq); +static inline void dynamic_irq_init(unsigned int irq) +{ + dynamic_irq_cleanup(irq); +} + +/* Set/get chip/data for an IRQ: */ +extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); +extern int irq_set_handler_data(unsigned int irq, void *data); +extern int irq_set_chip_data(unsigned int irq, void *data); +extern int irq_set_irq_type(unsigned int irq, unsigned int type); +extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); +extern struct irq_data *irq_get_irq_data(unsigned int irq); + +static inline struct irq_chip *irq_get_chip(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip : NULL; +} + +static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) +{ + return d->chip; +} + +static inline void *irq_get_chip_data(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->chip_data : NULL; +} + +static inline void *irq_data_get_irq_chip_data(struct irq_data *d) +{ + return d->chip_data; +} + +static inline void *irq_get_handler_data(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->handler_data : NULL; +} + +static inline void *irq_data_get_irq_handler_data(struct irq_data *d) +{ + return d->handler_data; +} + +static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + return d ? d->msi_desc : NULL; +} + +static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) +{ + return d->msi_desc; +} + +int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, + struct module *owner); + +/* use macros to avoid needing export.h for THIS_MODULE */ +#define irq_alloc_descs(irq, from, cnt, node) \ + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) + +#define irq_alloc_desc(node) \ + irq_alloc_descs(-1, 0, 1, node) + +#define irq_alloc_desc_at(at, node) \ + irq_alloc_descs(at, at, 1, node) + +#define irq_alloc_desc_from(from, node) \ + irq_alloc_descs(-1, from, 1, node) + +void irq_free_descs(unsigned int irq, unsigned int cnt); +int irq_reserve_irqs(unsigned int from, unsigned int cnt); + +static inline void irq_free_desc(unsigned int irq) +{ + irq_free_descs(irq, 1); +} + +static inline int irq_reserve_irq(unsigned int irq) +{ + return irq_reserve_irqs(irq, 1); +} + +#ifndef irq_reg_writel +# define irq_reg_writel(val, addr) writel(val, addr) +#endif +#ifndef irq_reg_readl +# define irq_reg_readl(addr) readl(addr) +#endif + +/** + * struct irq_chip_regs - register offsets for struct irq_gci + * @enable: Enable register offset to reg_base + * @disable: Disable register offset to reg_base + * @mask: Mask register offset to reg_base + * @ack: Ack register offset to reg_base + * @eoi: Eoi register offset to reg_base + * @type: Type configuration register offset to reg_base + * @polarity: Polarity configuration register offset to reg_base + */ +struct irq_chip_regs { + unsigned long enable; + unsigned long disable; + unsigned long mask; + unsigned long ack; + unsigned long eoi; + unsigned long type; + unsigned long polarity; +}; + +/** + * struct irq_chip_type - Generic interrupt chip instance for a flow type + * @chip: The real interrupt chip which provides the callbacks + * @regs: Register offsets for this chip + * @handler: Flow handler associated with this chip + * @type: Chip can handle these flow types + * + * A irq_generic_chip can have several instances of irq_chip_type when + * it requires different functions and register offsets for different + * flow types. + */ +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; +}; + +/** + * struct irq_chip_generic - Generic irq chip data structure + * @lock: Lock to protect register and cache data access + * @reg_base: Register base address (virtual) + * @irq_base: Interrupt base nr for this chip + * @irq_cnt: Number of interrupts handled by this chip + * @mask_cache: Cached mask register + * @type_cache: Cached type register + * @polarity_cache: Cached polarity register + * @wake_enabled: Interrupt can wakeup from suspend + * @wake_active: Interrupt is marked as an wakeup from suspend source + * @num_ct: Number of available irq_chip_type instances (usually 1) + * @private: Private data for non generic chip callbacks + * @list: List head for keeping track of instances + * @chip_types: Array of interrupt irq_chip_types + * + * Note, that irq_chip_generic can have multiple irq_chip_type + * implementations which can be associated to a particular irq line of + * an irq_chip_generic instance. That allows to share and protect + * state in an irq_chip_generic instance when we need to implement + * different flow mechanisms (level/edge) for it. + */ +struct irq_chip_generic { + raw_spinlock_t lock; + void __iomem *reg_base; + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 type_cache; + u32 polarity_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +/** + * enum irq_gc_flags - Initialization flags for generic irq chips + * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg + * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for + * irq chips which need to call irq_set_wake() on + * the parent irq. Usually GPIO implementations + */ +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1 << 0, + IRQ_GC_INIT_NESTED_LOCK = 1 << 1, +}; + +/* Generic chip callback functions */ +void irq_gc_noop(struct irq_data *d); +void irq_gc_mask_disable_reg(struct irq_data *d); +void irq_gc_mask_set_bit(struct irq_data *d); +void irq_gc_mask_clr_bit(struct irq_data *d); +void irq_gc_unmask_enable_reg(struct irq_data *d); +void irq_gc_ack_set_bit(struct irq_data *d); +void irq_gc_ack_clr_bit(struct irq_data *d); +void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); +void irq_gc_eoi(struct irq_data *d); +int irq_gc_set_wake(struct irq_data *d, unsigned int on); + +/* Setup functions for irq_chip_generic */ +struct irq_chip_generic * +irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, + void __iomem *reg_base, irq_flow_handler_t handler); +void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, + enum irq_gc_flags flags, unsigned int clr, + unsigned int set); +int irq_setup_alt_chip(struct irq_data *d, unsigned int type); +void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, + unsigned int clr, unsigned int set); + +static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) +{ + return container_of(d->chip, struct irq_chip_type, chip); +} + +#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) + +#ifdef CONFIG_SMP +static inline void irq_gc_lock(struct irq_chip_generic *gc) +{ + raw_spin_lock(&gc->lock); +} + +static inline void irq_gc_unlock(struct irq_chip_generic *gc) +{ + raw_spin_unlock(&gc->lock); +} +#else +static inline void irq_gc_lock(struct irq_chip_generic *gc) { } +static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } +#endif + +#endif /* CONFIG_GENERIC_HARDIRQS */ + +#endif /* !CONFIG_S390 */ + +#endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h new file mode 100644 index 00000000..77e4bac2 --- /dev/null +++ b/include/linux/irq_cpustat.h @@ -0,0 +1,31 @@ +#ifndef __irq_cpustat_h +#define __irq_cpustat_h + +/* + * Contains default mappings for irq_cpustat_t, used by almost every + * architecture. Some arch (like s390) have per cpu hardware pages and + * they define their own mappings for irq_stat. + * + * Keith Owens <kaos@ocs.com.au> July 2000. + */ + + +/* + * Simple wrappers reducing source bloat. Define all irq_stat fields + * here, even ones that are arch dependent. That way we get common + * definitions instead of differing sets for each arch. + */ + +#ifndef __ARCH_IRQ_STAT +extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ +#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) +#endif + + /* arch independent irq_stat fields */ +#define local_softirq_pending() \ + __IRQ_STAT(smp_processor_id(), __softirq_pending) + + /* arch dependent irq_stat fields */ +#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ + +#endif /* __irq_cpustat_h */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 00000000..6a9e8f53 --- /dev/null +++ b/include/linux/irq_work.h @@ -0,0 +1,23 @@ +#ifndef _LINUX_IRQ_WORK_H +#define _LINUX_IRQ_WORK_H + +#include <linux/llist.h> + +struct irq_work { + unsigned long flags; + struct llist_node llnode; + void (*func)(struct irq_work *); +}; + +static inline +void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) +{ + work->flags = 0; + work->func = func; +} + +bool irq_work_queue(struct irq_work *work); +void irq_work_run(void); +void irq_work_sync(struct irq_work *work); + +#endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 00000000..f1e25270 --- /dev/null +++ b/include/linux/irqdesc.h @@ -0,0 +1,178 @@ +#ifndef _LINUX_IRQDESC_H +#define _LINUX_IRQDESC_H + +/* + * Core internal functions to deal with irq descriptors + * + * This include will move to kernel/irq once we cleaned up the tree. + * For now it's included from <linux/irq.h> + */ + +struct irq_affinity_notify; +struct proc_dir_entry; +struct timer_rand_state; +struct module; +/** + * struct irq_desc - interrupt descriptor + * @irq_data: per irq and chip data passed down to chip functions + * @timer_rand_state: pointer to timer rand state struct + * @kstat_irqs: irq stats per cpu + * @handle_irq: highlevel irq-events handler + * @preflow_handler: handler called before the flow handler (currently used by sparc) + * @action: the irq action chain + * @status: status information + * @core_internal_state__do_not_mess_with_it: core internal status information + * @depth: disable-depth, for nested irq_disable() calls + * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers + * @irq_count: stats field to detect stalled irqs + * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts + * @lock: locking for SMP + * @affinity_hint: hint to user space for preferred irq affinity + * @affinity_notify: context for notification of affinity changes + * @pending_mask: pending rebalanced interrupts + * @threads_oneshot: bitfield to handle shared oneshot threads + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @dir: /proc/irq/ procfs entry + * @name: flow handler name for /proc/interrupts output + */ +struct irq_desc { + struct irq_data irq_data; + struct timer_rand_state *timer_rand_state; + unsigned int __percpu *kstat_irqs; + irq_flow_handler_t handle_irq; +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI + irq_preflow_handler_t preflow_handler; +#endif + struct irqaction *action; /* IRQ action list */ + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; /* nested irq disables */ + unsigned int wake_depth; /* nested wake enables */ + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; +#ifdef CONFIG_SMP + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_var_t pending_mask; +#endif +#endif + unsigned long threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *dir; +#endif + struct module *owner; + const char *name; +} ____cacheline_internodealigned_in_smp; + +#ifndef CONFIG_SPARSE_IRQ +extern struct irq_desc irq_desc[NR_IRQS]; +#endif + +#ifdef CONFIG_GENERIC_HARDIRQS + +static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) +{ + return &desc->irq_data; +} + +static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) +{ + return desc->irq_data.chip; +} + +static inline void *irq_desc_get_chip_data(struct irq_desc *desc) +{ + return desc->irq_data.chip_data; +} + +static inline void *irq_desc_get_handler_data(struct irq_desc *desc) +{ + return desc->irq_data.handler_data; +} + +static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) +{ + return desc->irq_data.msi_desc; +} + +/* + * Architectures call this to let the generic IRQ layer + * handle an interrupt. If the descriptor is attached to an + * irqchip-style controller then we call the ->handle_irq() handler, + * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + */ +static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +{ + desc->handle_irq(irq, desc); +} + +int generic_handle_irq(unsigned int irq); + +/* Test to see if a driver has successfully requested an irq */ +static inline int irq_has_action(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc->action != NULL; +} + +/* caller has locked the irq_desc and both params are valid */ +static inline void __irq_set_handler_locked(unsigned int irq, + irq_flow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->handle_irq = handler; +} + +/* caller has locked the irq_desc and both params are valid */ +static inline void +__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handler, const char *name) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + irq_desc_get_irq_data(desc)->chip = chip; + desc->handle_irq = handler; + desc->name = name; +} + +static inline int irq_balancing_disabled(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; +} + +static inline void +irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (desc) + lockdep_set_class(&desc->lock, class); +} + +#ifdef CONFIG_IRQ_PREFLOW_FASTEOI +static inline void +__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + desc->preflow_handler = handler; +} +#endif +#endif + +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h new file mode 100644 index 00000000..c65740d7 --- /dev/null +++ b/include/linux/irqdomain.h @@ -0,0 +1,186 @@ +/* + * irq_domain - IRQ translation domains + * + * Translation infrastructure between hw and linux irq numbers. This is + * helpful for interrupt controllers to implement mapping between hardware + * irq numbers and the Linux irq number space. + * + * irq_domains also have a hook for translating device tree interrupt + * representation into a hardware irq number that can be mapped back to a + * Linux irq number without any extra platform support code. + * + * Interrupt controller "domain" data structure. This could be defined as a + * irq domain controller. That is, it handles the mapping between hardware + * and virtual interrupt numbers for a given interrupt domain. The domain + * structure is generally created by the PIC code for a given PIC instance + * (though a domain can cover more than one PIC if they have a flat number + * model). It's the domain callbacks that are responsible for setting the + * irq_chip on a given irq_desc after it's been mapped. + * + * The host code and data structures are agnostic to whether or not + * we use an open firmware device-tree. We do have references to struct + * device_node in two places: in irq_find_host() to find the host matching + * a given interrupt controller node, and of course as an argument to its + * counterpart domain->ops->match() callback. However, those are treated as + * generic pointers by the core and the fact that it's actually a device-node + * pointer is purely a convention between callers and implementation. This + * code could thus be used on other architectures by replacing those two + * by some sort of arch-specific void * "token" used to identify interrupt + * controllers. + */ + +#ifndef _LINUX_IRQDOMAIN_H +#define _LINUX_IRQDOMAIN_H + +#include <linux/types.h> +#include <linux/radix-tree.h> + +struct device_node; +struct irq_domain; +struct of_device_id; + +/* Number of irqs reserved for a legacy isa controller */ +#define NUM_ISA_INTERRUPTS 16 + +/** + * struct irq_domain_ops - Methods for irq_domain objects + * @match: Match an interrupt controller device node to a host, returns + * 1 on a match + * @map: Create or update a mapping between a virtual irq number and a hw + * irq number. This is called only once for a given mapping. + * @unmap: Dispose of such a mapping + * @xlate: Given a device tree node and interrupt specifier, decode + * the hardware irq number and linux irq type value. + * + * Functions below are provided by the driver and called whenever a new mapping + * is created or an old mapping is disposed. The driver can then proceed to + * whatever internal data structures management is required. It also needs + * to setup the irq_desc when returning from map(). + */ +struct irq_domain_ops { + int (*match)(struct irq_domain *d, struct device_node *node); + int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); + void (*unmap)(struct irq_domain *d, unsigned int virq); + int (*xlate)(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type); +}; + +/** + * struct irq_domain - Hardware interrupt number translation object + * @link: Element in global irq_domain list. + * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This + * will be one of the IRQ_DOMAIN_MAP_* values. + * @revmap_data: Revmap method specific data. + * @ops: pointer to irq_domain methods + * @host_data: private data pointer for use by owner. Not touched by irq_domain + * core code. + * @irq_base: Start of irq_desc range assigned to the irq_domain. The creator + * of the irq_domain is responsible for allocating the array of + * irq_desc structures. + * @nr_irq: Number of irqs managed by the irq domain + * @hwirq_base: Starting number for hwirqs managed by the irq domain + * @of_node: (optional) Pointer to device tree nodes associated with the + * irq_domain. Used when decoding device tree interrupt specifiers. + */ +struct irq_domain { + struct list_head link; + + /* type of reverse mapping_technique */ + unsigned int revmap_type; + union { + struct { + unsigned int size; + unsigned int first_irq; + irq_hw_number_t first_hwirq; + } legacy; + struct { + unsigned int size; + unsigned int *revmap; + } linear; + struct { + unsigned int max_irq; + } nomap; + struct radix_tree_root tree; + } revmap_data; + const struct irq_domain_ops *ops; + void *host_data; + irq_hw_number_t inval_irq; + + /* Optional device node pointer */ + struct device_node *of_node; +}; + +#ifdef CONFIG_IRQ_DOMAIN +struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + irq_hw_number_t first_hwirq, + const struct irq_domain_ops *ops, + void *host_data); +struct irq_domain *irq_domain_add_linear(struct device_node *of_node, + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data); +struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, + unsigned int max_irq, + const struct irq_domain_ops *ops, + void *host_data); +struct irq_domain *irq_domain_add_tree(struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data); + +extern struct irq_domain *irq_find_host(struct device_node *node); +extern void irq_set_default_host(struct irq_domain *host); + +static inline struct irq_domain *irq_domain_add_legacy_isa( + struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, + host_data); +} +extern struct irq_domain *irq_find_host(struct device_node *node); +extern void irq_set_default_host(struct irq_domain *host); + + +extern unsigned int irq_create_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); +extern void irq_dispose_mapping(unsigned int virq); +extern unsigned int irq_find_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); +extern unsigned int irq_create_direct_mapping(struct irq_domain *host); +extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, + irq_hw_number_t hwirq); +extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host, + irq_hw_number_t hwirq); +extern unsigned int irq_linear_revmap(struct irq_domain *host, + irq_hw_number_t hwirq); + +extern const struct irq_domain_ops irq_domain_simple_ops; + +/* stock xlate functions */ +int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); +int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); +int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); + +#if defined(CONFIG_OF_IRQ) +extern void irq_domain_generate_simple(const struct of_device_id *match, + u64 phys_base, unsigned int irq_start); +#else /* CONFIG_OF_IRQ */ +static inline void irq_domain_generate_simple(const struct of_device_id *match, + u64 phys_base, unsigned int irq_start) { } +#endif /* !CONFIG_OF_IRQ */ + +#else /* CONFIG_IRQ_DOMAIN */ +static inline void irq_dispose_mapping(unsigned int virq) { } +#endif /* !CONFIG_IRQ_DOMAIN */ + +#endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h new file mode 100644 index 00000000..d176d658 --- /dev/null +++ b/include/linux/irqflags.h @@ -0,0 +1,150 @@ +/* + * include/linux/irqflags.h + * + * IRQ flags tracing: follow the state of the hardirq and softirq flags and + * provide callbacks for transitions between ON and OFF states. + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _LINUX_TRACE_IRQFLAGS_H +#define _LINUX_TRACE_IRQFLAGS_H + +#include <linux/typecheck.h> +#include <asm/irqflags.h> + +#ifdef CONFIG_TRACE_IRQFLAGS + extern void trace_softirqs_on(unsigned long ip); + extern void trace_softirqs_off(unsigned long ip); + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); +# define trace_hardirq_context(p) ((p)->hardirq_context) +# define trace_softirq_context(p) ((p)->softirq_context) +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) +# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) +# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) +# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, +#else +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define trace_softirqs_on(ip) do { } while (0) +# define trace_softirqs_off(ip) do { } while (0) +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS +#endif + +#if defined(CONFIG_IRQSOFF_TRACER) || \ + defined(CONFIG_PREEMPT_TRACER) + extern void stop_critical_timings(void); + extern void start_critical_timings(void); +#else +# define stop_critical_timings() do { } while (0) +# define start_critical_timings() do { } while (0) +#endif + +/* + * Wrap the arch provided IRQ routines to provide appropriate checks. + */ +#define raw_local_irq_disable() arch_local_irq_disable() +#define raw_local_irq_enable() arch_local_irq_enable() +#define raw_local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = arch_local_irq_save(); \ + } while (0) +#define raw_local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + arch_local_irq_restore(flags); \ + } while (0) +#define raw_local_save_flags(flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = arch_local_save_flags(); \ + } while (0) +#define raw_irqs_disabled_flags(flags) \ + ({ \ + typecheck(unsigned long, flags); \ + arch_irqs_disabled_flags(flags); \ + }) +#define raw_irqs_disabled() (arch_irqs_disabled()) +#define raw_safe_halt() arch_safe_halt() + +/* + * The local_irq_*() APIs are equal to the raw_local_irq*() + * if !TRACE_IRQFLAGS. + */ +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#define local_irq_enable() \ + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) +#define local_irq_disable() \ + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { \ + raw_local_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) + + +#define local_irq_restore(flags) \ + do { \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ + } while (0) +#define local_save_flags(flags) \ + do { \ + raw_local_save_flags(flags); \ + } while (0) + +#define irqs_disabled_flags(flags) \ + ({ \ + raw_irqs_disabled_flags(flags); \ + }) + +#define irqs_disabled() \ + ({ \ + unsigned long _flags; \ + raw_local_save_flags(_flags); \ + raw_irqs_disabled_flags(_flags); \ + }) + +#define safe_halt() \ + do { \ + trace_hardirqs_on(); \ + raw_safe_halt(); \ + } while (0) + + +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#define local_irq_enable() do { raw_local_irq_enable(); } while (0) +#define local_irq_disable() do { raw_local_irq_disable(); } while (0) +#define local_irq_save(flags) \ + do { \ + raw_local_irq_save(flags); \ + } while (0) +#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) +#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) +#define irqs_disabled() (raw_irqs_disabled()) +#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) +#define safe_halt() do { raw_safe_halt(); } while (0) + +#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#endif diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h new file mode 100644 index 00000000..3bc4dcab --- /dev/null +++ b/include/linux/irqnr.h @@ -0,0 +1,62 @@ +#ifndef _LINUX_IRQNR_H +#define _LINUX_IRQNR_H + +/* + * Generic irq_desc iterators: + */ +#ifdef __KERNEL__ + +#ifndef CONFIG_GENERIC_HARDIRQS +#include <asm/irq.h> + +/* + * Wrappers for non-genirq architectures: + */ +#define nr_irqs NR_IRQS +#define irq_to_desc(irq) (&irq_desc[irq]) + +# define for_each_irq_desc(irq, desc) \ + for (irq = 0; irq < nr_irqs; irq++) + +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1; irq >= 0; irq--) + +#else /* CONFIG_GENERIC_HARDIRQS */ + +extern int nr_irqs; +extern struct irq_desc *irq_to_desc(unsigned int irq); +unsigned int irq_get_next_irq(unsigned int offset); + +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ + irq++, desc = irq_to_desc(irq)) \ + if (!desc) \ + ; \ + else + + +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ + irq--, desc = irq_to_desc(irq)) \ + if (!desc) \ + ; \ + else + +#ifdef CONFIG_SMP +#define irq_node(irq) (irq_get_irq_data(irq)->node) +#else +#define irq_node(irq) 0 +#endif + +# define for_each_active_irq(irq) \ + for (irq = irq_get_next_irq(0); irq < nr_irqs; \ + irq = irq_get_next_irq(irq + 1)) + +#endif /* CONFIG_GENERIC_HARDIRQS */ + +#define for_each_irq_nr(irq) \ + for (irq = 0; irq < nr_irqs; irq++) + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h new file mode 100644 index 00000000..714ba08d --- /dev/null +++ b/include/linux/irqreturn.h @@ -0,0 +1,19 @@ +#ifndef _LINUX_IRQRETURN_H +#define _LINUX_IRQRETURN_H + +/** + * enum irqreturn + * @IRQ_NONE interrupt was not from this device + * @IRQ_HANDLED interrupt was handled by this device + * @IRQ_WAKE_THREAD handler requests to wake the handler thread + */ +enum irqreturn { + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), + IRQ_WAKE_THREAD = (1 << 1), +}; + +typedef enum irqreturn irqreturn_t; +#define IRQ_RETVAL(x) ((x) != IRQ_NONE) + +#endif diff --git a/include/linux/isa.h b/include/linux/isa.h new file mode 100644 index 00000000..b0270e38 --- /dev/null +++ b/include/linux/isa.h @@ -0,0 +1,39 @@ +/* + * ISA bus. + */ + +#ifndef __LINUX_ISA_H +#define __LINUX_ISA_H + +#include <linux/device.h> +#include <linux/kernel.h> + +struct isa_driver { + int (*match)(struct device *, unsigned int); + int (*probe)(struct device *, unsigned int); + int (*remove)(struct device *, unsigned int); + void (*shutdown)(struct device *, unsigned int); + int (*suspend)(struct device *, unsigned int, pm_message_t); + int (*resume)(struct device *, unsigned int); + + struct device_driver driver; + struct device *devices; +}; + +#define to_isa_driver(x) container_of((x), struct isa_driver, driver) + +#ifdef CONFIG_ISA +int isa_register_driver(struct isa_driver *, unsigned int); +void isa_unregister_driver(struct isa_driver *); +#else +static inline int isa_register_driver(struct isa_driver *d, unsigned int i) +{ + return 0; +} + +static inline void isa_unregister_driver(struct isa_driver *d) +{ +} +#endif + +#endif /* __LINUX_ISA_H */ diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h new file mode 100644 index 00000000..e2d28b02 --- /dev/null +++ b/include/linux/isapnp.h @@ -0,0 +1,125 @@ +/* + * ISA Plug & Play support + * Copyright (c) by Jaroslav Kysela <perex@suse.cz> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef LINUX_ISAPNP_H +#define LINUX_ISAPNP_H + +#include <linux/errno.h> +#include <linux/pnp.h> + +/* + * + */ + +#define ISAPNP_VENDOR(a,b,c) (((((a)-'A'+1)&0x3f)<<2)|\ + ((((b)-'A'+1)&0x18)>>3)|((((b)-'A'+1)&7)<<13)|\ + ((((c)-'A'+1)&0x1f)<<8)) +#define ISAPNP_DEVICE(x) ((((x)&0xf000)>>8)|\ + (((x)&0x0f00)>>8)|\ + (((x)&0x00f0)<<8)|\ + (((x)&0x000f)<<8)) +#define ISAPNP_FUNCTION(x) ISAPNP_DEVICE(x) + +/* + * + */ + +#ifdef __KERNEL__ +#include <linux/mod_devicetable.h> + +#define DEVICE_COUNT_COMPATIBLE 4 + +#define ISAPNP_CARD_DEVS 8 + +#define ISAPNP_CARD_ID(_va, _vb, _vc, _device) \ + .card_vendor = ISAPNP_VENDOR(_va, _vb, _vc), .card_device = ISAPNP_DEVICE(_device) +#define ISAPNP_CARD_END \ + .card_vendor = 0, .card_device = 0 +#define ISAPNP_DEVICE_ID(_va, _vb, _vc, _function) \ + { .vendor = ISAPNP_VENDOR(_va, _vb, _vc), .function = ISAPNP_FUNCTION(_function) } + +/* export used IDs outside module */ +#define ISAPNP_CARD_TABLE(name) \ + MODULE_GENERIC_TABLE(isapnp_card, name) + +struct isapnp_card_id { + unsigned long driver_data; /* data private to the driver */ + unsigned short card_vendor, card_device; + struct { + unsigned short vendor, function; + } devs[ISAPNP_CARD_DEVS]; /* logical devices */ +}; + +#define ISAPNP_DEVICE_SINGLE(_cva, _cvb, _cvc, _cdevice, _dva, _dvb, _dvc, _dfunction) \ + .card_vendor = ISAPNP_VENDOR(_cva, _cvb, _cvc), .card_device = ISAPNP_DEVICE(_cdevice), \ + .vendor = ISAPNP_VENDOR(_dva, _dvb, _dvc), .function = ISAPNP_FUNCTION(_dfunction) +#define ISAPNP_DEVICE_SINGLE_END \ + .card_vendor = 0, .card_device = 0 + +#if defined(CONFIG_ISAPNP) || (defined(CONFIG_ISAPNP_MODULE) && defined(MODULE)) + +#define __ISAPNP__ + +/* lowlevel configuration */ +int isapnp_present(void); +int isapnp_cfg_begin(int csn, int device); +int isapnp_cfg_end(void); +unsigned char isapnp_read_byte(unsigned char idx); +void isapnp_write_byte(unsigned char idx, unsigned char val); + +#ifdef CONFIG_PROC_FS +int isapnp_proc_init(void); +int isapnp_proc_done(void); +#else +static inline int isapnp_proc_init(void) { return 0; } +static inline int isapnp_proc_done(void) { return 0; } +#endif + +/* compat */ +struct pnp_card *pnp_find_card(unsigned short vendor, + unsigned short device, + struct pnp_card *from); +struct pnp_dev *pnp_find_dev(struct pnp_card *card, + unsigned short vendor, + unsigned short function, + struct pnp_dev *from); + +#else /* !CONFIG_ISAPNP */ + +/* lowlevel configuration */ +static inline int isapnp_present(void) { return 0; } +static inline int isapnp_cfg_begin(int csn, int device) { return -ENODEV; } +static inline int isapnp_cfg_end(void) { return -ENODEV; } +static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; } +static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; } + +static inline struct pnp_card *pnp_find_card(unsigned short vendor, + unsigned short device, + struct pnp_card *from) { return NULL; } +static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card, + unsigned short vendor, + unsigned short function, + struct pnp_dev *from) { return NULL; } + +#endif /* CONFIG_ISAPNP */ + +#endif /* __KERNEL__ */ +#endif /* LINUX_ISAPNP_H */ diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h new file mode 100644 index 00000000..2a8b1659 --- /dev/null +++ b/include/linux/iscsi_boot_sysfs.h @@ -0,0 +1,133 @@ +/* + * Export the iSCSI boot info to userland via sysfs. + * + * Copyright (C) 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _ISCSI_BOOT_SYSFS_ +#define _ISCSI_BOOT_SYSFS_ + +/* + * The text attributes names for each of the kobjects. +*/ +enum iscsi_boot_eth_properties_enum { + ISCSI_BOOT_ETH_INDEX, + ISCSI_BOOT_ETH_FLAGS, + ISCSI_BOOT_ETH_IP_ADDR, + ISCSI_BOOT_ETH_SUBNET_MASK, + ISCSI_BOOT_ETH_ORIGIN, + ISCSI_BOOT_ETH_GATEWAY, + ISCSI_BOOT_ETH_PRIMARY_DNS, + ISCSI_BOOT_ETH_SECONDARY_DNS, + ISCSI_BOOT_ETH_DHCP, + ISCSI_BOOT_ETH_VLAN, + ISCSI_BOOT_ETH_MAC, + /* eth_pci_bdf - this is replaced by link to the device itself. */ + ISCSI_BOOT_ETH_HOSTNAME, + ISCSI_BOOT_ETH_END_MARKER, +}; + +enum iscsi_boot_tgt_properties_enum { + ISCSI_BOOT_TGT_INDEX, + ISCSI_BOOT_TGT_FLAGS, + ISCSI_BOOT_TGT_IP_ADDR, + ISCSI_BOOT_TGT_PORT, + ISCSI_BOOT_TGT_LUN, + ISCSI_BOOT_TGT_CHAP_TYPE, + ISCSI_BOOT_TGT_NIC_ASSOC, + ISCSI_BOOT_TGT_NAME, + ISCSI_BOOT_TGT_CHAP_NAME, + ISCSI_BOOT_TGT_CHAP_SECRET, + ISCSI_BOOT_TGT_REV_CHAP_NAME, + ISCSI_BOOT_TGT_REV_CHAP_SECRET, + ISCSI_BOOT_TGT_END_MARKER, +}; + +enum iscsi_boot_initiator_properties_enum { + ISCSI_BOOT_INI_INDEX, + ISCSI_BOOT_INI_FLAGS, + ISCSI_BOOT_INI_ISNS_SERVER, + ISCSI_BOOT_INI_SLP_SERVER, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER, + ISCSI_BOOT_INI_INITIATOR_NAME, + ISCSI_BOOT_INI_END_MARKER, +}; + +struct attribute_group; + +struct iscsi_boot_kobj { + struct kobject kobj; + struct attribute_group *attr_group; + struct list_head list; + + /* + * Pointer to store driver specific info. If set this will + * be freed for the LLD when the kobj release function is called. + */ + void *data; + /* + * Driver specific show function. + * + * The enum of the type. This can be any value of the above + * properties. + */ + ssize_t (*show) (void *data, int type, char *buf); + + /* + * Drivers specific visibility function. + * The function should return if they the attr should be readable + * writable or should not be shown. + * + * The enum of the type. This can be any value of the above + * properties. + */ + umode_t (*is_visible) (void *data, int type); + + /* + * Driver specific release function. + * + * The function should free the data passed in. + */ + void (*release) (void *data); +}; + +struct iscsi_boot_kset { + struct list_head kobj_list; + struct kset *kset; +}; + +struct iscsi_boot_kobj * +iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); + +struct iscsi_boot_kobj * +iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); +struct iscsi_boot_kobj * +iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + umode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); + +struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); +struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); +void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); + +#endif diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h new file mode 100644 index 00000000..8ba7e5b9 --- /dev/null +++ b/include/linux/iscsi_ibft.h @@ -0,0 +1,46 @@ +/* + * Copyright 2007 Red Hat, Inc. + * by Peter Jones <pjones@redhat.com> + * Copyright 2007 IBM, Inc. + * by Konrad Rzeszutek <konradr@linux.vnet.ibm.com> + * Copyright 2008 + * by Konrad Rzeszutek <ketuzsezr@darnok.org> + * + * This code exposes the iSCSI Boot Format Table to userland via sysfs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ISCSI_IBFT_H +#define ISCSI_IBFT_H + +#include <acpi/acpi.h> + +/* + * Logical location of iSCSI Boot Format Table. + * If the value is NULL there is no iBFT on the machine. + */ +extern struct acpi_table_ibft *ibft_addr; + +/* + * Routine used to find and reserve the iSCSI Boot Format Table. The + * mapped address is set in the ibft_addr variable. + */ +#ifdef CONFIG_ISCSI_IBFT_FIND +unsigned long find_ibft_region(unsigned long *sizep); +#else +static inline unsigned long find_ibft_region(unsigned long *sizep) +{ + *sizep = 0; + return 0; +} +#endif + +#endif /* ISCSI_IBFT_H */ diff --git a/include/linux/isdn.h b/include/linux/isdn.h new file mode 100644 index 00000000..292f27a7 --- /dev/null +++ b/include/linux/isdn.h @@ -0,0 +1,622 @@ +/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $ + * + * Main header for the Linux ISDN subsystem (linklevel). + * + * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg + * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __ISDN_H__ +#define __ISDN_H__ + +#include <linux/ioctl.h> + +#define ISDN_MAX_DRIVERS 32 +#define ISDN_MAX_CHANNELS 64 + +/* New ioctl-codes */ +#define IIOCNETAIF _IO('I',1) +#define IIOCNETDIF _IO('I',2) +#define IIOCNETSCF _IO('I',3) +#define IIOCNETGCF _IO('I',4) +#define IIOCNETANM _IO('I',5) +#define IIOCNETDNM _IO('I',6) +#define IIOCNETGNM _IO('I',7) +#define IIOCGETSET _IO('I',8) /* no longer supported */ +#define IIOCSETSET _IO('I',9) /* no longer supported */ +#define IIOCSETVER _IO('I',10) +#define IIOCNETHUP _IO('I',11) +#define IIOCSETGST _IO('I',12) +#define IIOCSETBRJ _IO('I',13) +#define IIOCSIGPRF _IO('I',14) +#define IIOCGETPRF _IO('I',15) +#define IIOCSETPRF _IO('I',16) +#define IIOCGETMAP _IO('I',17) +#define IIOCSETMAP _IO('I',18) +#define IIOCNETASL _IO('I',19) +#define IIOCNETDIL _IO('I',20) +#define IIOCGETCPS _IO('I',21) +#define IIOCGETDVR _IO('I',22) +#define IIOCNETLCR _IO('I',23) /* dwabc ioctl for LCR from isdnlog */ +#define IIOCNETDWRSET _IO('I',24) /* dwabc ioctl to reset abc-values to default on a net-interface */ + +#define IIOCNETALN _IO('I',32) +#define IIOCNETDLN _IO('I',33) + +#define IIOCNETGPN _IO('I',34) + +#define IIOCDBGVAR _IO('I',127) + +#define IIOCDRVCTL _IO('I',128) + +/* cisco hdlck device private ioctls */ +#define SIOCGKEEPPERIOD (SIOCDEVPRIVATE + 0) +#define SIOCSKEEPPERIOD (SIOCDEVPRIVATE + 1) +#define SIOCGDEBSERINT (SIOCDEVPRIVATE + 2) +#define SIOCSDEBSERINT (SIOCDEVPRIVATE + 3) + +/* Packet encapsulations for net-interfaces */ +#define ISDN_NET_ENCAP_ETHER 0 +#define ISDN_NET_ENCAP_RAWIP 1 +#define ISDN_NET_ENCAP_IPTYP 2 +#define ISDN_NET_ENCAP_CISCOHDLC 3 /* Without SLARP and keepalive */ +#define ISDN_NET_ENCAP_SYNCPPP 4 +#define ISDN_NET_ENCAP_UIHDLC 5 +#define ISDN_NET_ENCAP_CISCOHDLCK 6 /* With SLARP and keepalive */ +#define ISDN_NET_ENCAP_X25IFACE 7 /* Documentation/networking/x25-iface.txt */ +#define ISDN_NET_ENCAP_MAX_ENCAP ISDN_NET_ENCAP_X25IFACE + +/* Facility which currently uses an ISDN-channel */ +#define ISDN_USAGE_NONE 0 +#define ISDN_USAGE_RAW 1 +#define ISDN_USAGE_MODEM 2 +#define ISDN_USAGE_NET 3 +#define ISDN_USAGE_VOICE 4 +#define ISDN_USAGE_FAX 5 +#define ISDN_USAGE_MASK 7 /* Mask to get plain usage */ +#define ISDN_USAGE_DISABLED 32 /* This bit is set, if channel is disabled */ +#define ISDN_USAGE_EXCLUSIVE 64 /* This bit is set, if channel is exclusive */ +#define ISDN_USAGE_OUTGOING 128 /* This bit is set, if channel is outgoing */ + +#define ISDN_MODEM_NUMREG 24 /* Number of Modem-Registers */ +#define ISDN_LMSNLEN 255 /* Length of tty's Listen-MSN string */ +#define ISDN_CMSGLEN 50 /* Length of CONNECT-Message to add for Modem */ + +#define ISDN_MSNLEN 32 +#define NET_DV 0x06 /* Data version for isdn_net_ioctl_cfg */ +#define TTY_DV 0x06 /* Data version for iprofd etc. */ + +#define INF_DV 0x01 /* Data version for /dev/isdninfo */ + +typedef struct { + char drvid[25]; + unsigned long arg; +} isdn_ioctl_struct; + +typedef struct { + char name[10]; + char phone[ISDN_MSNLEN]; + int outgoing; +} isdn_net_ioctl_phone; + +typedef struct { + char name[10]; /* Name of interface */ + char master[10]; /* Name of Master for Bundling */ + char slave[10]; /* Name of Slave for Bundling */ + char eaz[256]; /* EAZ/MSN */ + char drvid[25]; /* DriverId for Bindings */ + int onhtime; /* Hangup-Timeout */ + int charge; /* Charge-Units */ + int l2_proto; /* Layer-2 protocol */ + int l3_proto; /* Layer-3 protocol */ + int p_encap; /* Encapsulation */ + int exclusive; /* Channel, if bound exclusive */ + int dialmax; /* Dial Retry-Counter */ + int slavedelay; /* Delay until slave starts up */ + int cbdelay; /* Delay before Callback */ + int chargehup; /* Flag: Charge-Hangup */ + int ihup; /* Flag: Hangup-Timeout on incoming line */ + int secure; /* Flag: Secure */ + int callback; /* Flag: Callback */ + int cbhup; /* Flag: Reject Call before Callback */ + int pppbind; /* ippp device for bindings */ + int chargeint; /* Use fixed charge interval length */ + int triggercps; /* BogoCPS needed for triggering slave */ + int dialtimeout; /* Dial-Timeout */ + int dialwait; /* Time to wait after failed dial */ + int dialmode; /* Flag: off / on / auto */ +} isdn_net_ioctl_cfg; + +#define ISDN_NET_DIALMODE_MASK 0xC0 /* bits for status */ +#define ISDN_NET_DM_OFF 0x00 /* this interface is stopped */ +#define ISDN_NET_DM_MANUAL 0x40 /* this interface is on (manual) */ +#define ISDN_NET_DM_AUTO 0x80 /* this interface is autodial */ +#define ISDN_NET_DIALMODE(x) ((&(x))->flags & ISDN_NET_DIALMODE_MASK) + +#ifdef __KERNEL__ + +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/major.h> +#include <asm/io.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/wait.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial_reg.h> +#include <linux/fcntl.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/tcp.h> +#include <linux/mutex.h> + +#define ISDN_TTY_MAJOR 43 +#define ISDN_TTYAUX_MAJOR 44 +#define ISDN_MAJOR 45 + +/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for + * physical Channel-Mapping, so they MUST NOT be changed without changing + * the correspondent code in isdn.c + */ + +#define ISDN_MINOR_B 0 +#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1) +#define ISDN_MINOR_CTRL 64 +#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1)) +#define ISDN_MINOR_PPP 128 +#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1)) +#define ISDN_MINOR_STATUS 255 + +#ifdef CONFIG_ISDN_PPP + +#ifdef CONFIG_ISDN_PPP_VJ +# include <net/slhc_vj.h> +#endif + +#include <linux/ppp_defs.h> +#include <linux/ppp-ioctl.h> + +#include <linux/isdn_ppp.h> +#endif + +#ifdef CONFIG_ISDN_X25 +# include <linux/concap.h> +#endif + +#include <linux/isdnif.h> + +#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */ + +/* Until now unused */ +#define ISDN_SERVICE_VOICE 1 +#define ISDN_SERVICE_AB 1<<1 +#define ISDN_SERVICE_X21 1<<2 +#define ISDN_SERVICE_G4 1<<3 +#define ISDN_SERVICE_BTX 1<<4 +#define ISDN_SERVICE_DFUE 1<<5 +#define ISDN_SERVICE_X25 1<<6 +#define ISDN_SERVICE_TTX 1<<7 +#define ISDN_SERVICE_MIXED 1<<8 +#define ISDN_SERVICE_FW 1<<9 +#define ISDN_SERVICE_GTEL 1<<10 +#define ISDN_SERVICE_BTXN 1<<11 +#define ISDN_SERVICE_BTEL 1<<12 + +/* Macros checking plain usage */ +#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE) +#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW) +#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) +#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) +#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET) +#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX) +#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING) +#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \ + ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) ) + +/* Timer-delays and scheduling-flags */ +#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */ +#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */ +#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */ +#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */ +#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */ +#define ISDN_TIMER_MODEMREAD 1 +#define ISDN_TIMER_MODEMPLUS 2 +#define ISDN_TIMER_MODEMRING 4 +#define ISDN_TIMER_MODEMXMIT 8 +#define ISDN_TIMER_NETDIAL 16 +#define ISDN_TIMER_NETHANGUP 32 +#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */ +#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \ + ISDN_TIMER_MODEMXMIT) +#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \ + ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER) + +/* Timeout-Values for isdn_net_dial() */ +#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) +#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) +#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) + +/* GLOBAL_FLAGS */ +#define ISDN_GLOBAL_STOPPED 1 + +/*=================== Start of ip-over-ISDN stuff =========================*/ + +/* Feature- and status-flags for a net-interface */ +#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */ +#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */ +#define ISDN_NET_CALLBACK 0x04 /* activate callback */ +#define ISDN_NET_CBHUP 0x08 /* hangup before callback */ +#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */ + +#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */ + +/* Phone-list-element */ +typedef struct { + void *next; + char num[ISDN_MSNLEN]; +} isdn_net_phone; + +/* + Principles when extending structures for generic encapsulation protocol + ("concap") support: + - Stuff which is hardware specific (here i4l-specific) goes in + the netdev -> local structure (here: isdn_net_local) + - Stuff which is encapsulation protocol specific goes in the structure + which holds the linux device structure (here: isdn_net_device) +*/ + +/* Local interface-data */ +typedef struct isdn_net_local_s { + ulong magic; + struct net_device_stats stats; /* Ethernet Statistics */ + int isdn_device; /* Index to isdn-device */ + int isdn_channel; /* Index to isdn-channel */ + int ppp_slot; /* PPPD device slot number */ + int pre_device; /* Preselected isdn-device */ + int pre_channel; /* Preselected isdn-channel */ + int exclusive; /* If non-zero idx to reserved chan.*/ + int flags; /* Connection-flags */ + int dialretry; /* Counter for Dialout-retries */ + int dialmax; /* Max. Number of Dial-retries */ + int cbdelay; /* Delay before Callback starts */ + int dtimer; /* Timeout-counter for dialing */ + char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */ + u_char cbhup; /* Flag: Reject Call before Callback*/ + u_char dialstate; /* State for dialing */ + u_char p_encap; /* Packet encapsulation */ + /* 0 = Ethernet over ISDN */ + /* 1 = RAW-IP */ + /* 2 = IP with type field */ + u_char l2_proto; /* Layer-2-protocol */ + /* See ISDN_PROTO_L2..-constants in */ + /* isdnif.h */ + /* 0 = X75/LAPB with I-Frames */ + /* 1 = X75/LAPB with UI-Frames */ + /* 2 = X75/LAPB with BUI-Frames */ + /* 3 = HDLC */ + u_char l3_proto; /* Layer-3-protocol */ + /* See ISDN_PROTO_L3..-constants in */ + /* isdnif.h */ + /* 0 = Transparent */ + int huptimer; /* Timeout-counter for auto-hangup */ + int charge; /* Counter for charging units */ + ulong chargetime; /* Timer for Charging info */ + int hupflags; /* Flags for charge-unit-hangup: */ + /* bit0: chargeint is invalid */ + /* bit1: Getting charge-interval */ + /* bit2: Do charge-unit-hangup */ + /* bit3: Do hangup even on incoming */ + int outgoing; /* Flag: outgoing call */ + int onhtime; /* Time to keep link up */ + int chargeint; /* Interval between charge-infos */ + int onum; /* Flag: at least 1 outgoing number */ + int cps; /* current speed of this interface */ + int transcount; /* byte-counter for cps-calculation */ + int sqfull; /* Flag: netdev-queue overloaded */ + ulong sqfull_stamp; /* Start-Time of overload */ + ulong slavedelay; /* Dynamic bundling delaytime */ + int triggercps; /* BogoCPS needed for trigger slave */ + isdn_net_phone *phone[2]; /* List of remote-phonenumbers */ + /* phone[0] = Incoming Numbers */ + /* phone[1] = Outgoing Numbers */ + isdn_net_phone *dial; /* Pointer to dialed number */ + struct net_device *master; /* Ptr to Master device for slaves */ + struct net_device *slave; /* Ptr to Slave device for masters */ + struct isdn_net_local_s *next; /* Ptr to next link in bundle */ + struct isdn_net_local_s *last; /* Ptr to last link in bundle */ + struct isdn_net_dev_s *netdev; /* Ptr to netdev */ + struct sk_buff_head super_tx_queue; /* List of supervisory frames to */ + /* be transmitted asap */ + atomic_t frame_cnt; /* number of frames currently */ + /* queued in HL driver */ + /* Ptr to orig. hard_header_cache */ + spinlock_t xmit_lock; /* used to protect the xmit path of */ + /* a particular channel (including */ + /* the frame_cnt */ + + int pppbind; /* ippp device for bindings */ + int dialtimeout; /* How long shall we try on dialing? (jiffies) */ + int dialwait; /* How long shall we wait after failed attempt? (jiffies) */ + ulong dialstarted; /* jiffies of first dialing-attempt */ + ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */ + int huptimeout; /* How long will the connection be up? (seconds) */ +#ifdef CONFIG_ISDN_X25 + struct concap_device_ops *dops; /* callbacks used by encapsulator */ +#endif + /* use an own struct for that in later versions */ + ulong cisco_myseq; /* Local keepalive seq. for Cisco */ + ulong cisco_mineseen; /* returned keepalive seq. from remote */ + ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */ + int cisco_keepalive_period; /* keepalive period */ + ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */ + char cisco_line_state; /* state of line according to keepalive packets */ + char cisco_debserint; /* debugging flag of cisco hdlc with slarp */ + struct timer_list cisco_timer; + struct work_struct tqueue; +} isdn_net_local; + +/* the interface itself */ +typedef struct isdn_net_dev_s { + isdn_net_local *local; + isdn_net_local *queue; /* circular list of all bundled + channels, which are currently + online */ + spinlock_t queue_lock; /* lock to protect queue */ + void *next; /* Pointer to next isdn-interface */ + struct net_device *dev; /* interface to upper levels */ +#ifdef CONFIG_ISDN_PPP + ippp_bundle * pb; /* pointer to the common bundle structure + * with the per-bundle data */ +#endif +#ifdef CONFIG_ISDN_X25 + struct concap_proto *cprot; /* connection oriented encapsulation protocol */ +#endif + +} isdn_net_dev; + +/*===================== End of ip-over-ISDN stuff ===========================*/ + +/*======================= Start of ISDN-tty stuff ===========================*/ + +#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ +#define ISDN_ASYNC_INITIALIZED 0x80000000 /* port was initialized */ +#define ISDN_ASYNC_CALLOUT_ACTIVE 0x40000000 /* Call out device active */ +#define ISDN_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */ +#define ISDN_ASYNC_CLOSING 0x08000000 /* Serial port is closing */ +#define ISDN_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ +#define ISDN_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ +#define ISDN_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */ +#define ISDN_ASYNC_SESSION_LOCKOUT 0x0100 /* Lock cua opens on session */ +#define ISDN_ASYNC_PGRP_LOCKOUT 0x0200 /* Lock cua opens on pgrp */ +#define ISDN_ASYNC_CALLOUT_NOHUP 0x0400 /* No hangup for cui */ +#define ISDN_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */ +#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */ +#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ +#define ISDN_SERIAL_TYPE_NORMAL 1 +#define ISDN_SERIAL_TYPE_CALLOUT 2 + +#ifdef CONFIG_ISDN_AUDIO +/* For using sk_buffs with audio we need some private variables + * within each sk_buff. For this purpose, we declare a struct here, + * and put it always at the private skb->cb data array. A few macros help + * accessing the variables. + */ +typedef struct _isdn_audio_data { + unsigned short dle_count; + unsigned char lock; +} isdn_audio_data_t; + +#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count) +#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock) +#endif + +/* Private data of AT-command-interpreter */ +typedef struct atemu { + u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */ + u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */ + char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */ + char msn[ISDN_MSNLEN]; /* EAZ/MSN */ + char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */ + char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */ + char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */ + char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */ +#ifdef CONFIG_ISDN_AUDIO + u_char vpar[10]; /* Voice-parameters */ + int lastDLE; /* Flag for voice-coding: DLE seen */ +#endif + int mdmcmdl; /* Length of Modem-Commandbuffer */ + int pluscount; /* Counter for +++ sequence */ + u_long lastplus; /* Timestamp of last + */ + int carrierwait; /* Seconds of carrier waiting */ + char mdmcmd[255]; /* Modem-Commandbuffer */ + unsigned int charge; /* Charge units of current connection */ +} atemu; + +/* Private data (similar to async_struct in <linux/serial.h>) */ +typedef struct modem_info { + int magic; + struct module *owner; + int flags; /* defined in tty.h */ + int x_char; /* xon/xoff character */ + int mcr; /* Modem control register */ + int msr; /* Modem status register */ + int lsr; /* Line status register */ + int line; + int count; /* # of fd on device */ + int blocked_open; /* # of blocked opens */ + long session; /* Session of opening process */ + long pgrp; /* pgrp of opening process */ + int online; /* 1 = B-Channel is up, drop data */ + /* 2 = B-Channel is up, deliver d.*/ + int dialing; /* Dial in progress or ATA */ + int rcvsched; /* Receive needs schedule */ + int isdn_driver; /* Index to isdn-driver */ + int isdn_channel; /* Index to isdn-channel */ + int drv_index; /* Index to dev->usage */ + int ncarrier; /* Flag: schedule NO CARRIER */ + unsigned char last_cause[8]; /* Last cause message */ + unsigned char last_num[ISDN_MSNLEN]; + /* Last phone-number */ + unsigned char last_l2; /* Last layer-2 protocol */ + unsigned char last_si; /* Last service */ + unsigned char last_lhup; /* Last hangup local? */ + unsigned char last_dir; /* Last direction (in or out) */ + struct timer_list nc_timer; /* Timer for delayed NO CARRIER */ + int send_outstanding;/* # of outstanding send-requests */ + int xmit_size; /* max. # of chars in xmit_buf */ + int xmit_count; /* # of chars in xmit_buf */ + unsigned char *xmit_buf; /* transmit buffer */ + struct sk_buff_head xmit_queue; /* transmit queue */ + atomic_t xmit_lock; /* Semaphore for isdn_tty_write */ +#ifdef CONFIG_ISDN_AUDIO + int vonline; /* Voice-channel status */ + /* Bit 0 = recording */ + /* Bit 1 = playback */ + /* Bit 2 = playback, DLE-ETX seen */ + struct sk_buff_head dtmf_queue; /* queue for dtmf results */ + void *adpcms; /* state for adpcm decompression */ + void *adpcmr; /* state for adpcm compression */ + void *dtmf_state; /* state for dtmf decoder */ + void *silence_state; /* state for silence detection */ +#endif +#ifdef CONFIG_ISDN_TTY_FAX + struct T30_s *fax; /* T30 Fax Group 3 data/interface */ + int faxonline; /* Fax-channel status */ +#endif + struct tty_struct *tty; /* Pointer to corresponding tty */ + atemu emu; /* AT-emulator data */ + struct ktermios normal_termios; /* For saving termios structs */ + struct ktermios callout_termios; + wait_queue_head_t open_wait, close_wait; + spinlock_t readlock; +} modem_info; + +#define ISDN_MODEM_WINSIZE 8 + +/* Description of one ISDN-tty */ +typedef struct _isdn_modem { + int refcount; /* Number of opens */ + struct tty_driver *tty_modem; /* tty-device */ + struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */ + struct ktermios *modem_termios[ISDN_MAX_CHANNELS]; + struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS]; + modem_info info[ISDN_MAX_CHANNELS]; /* Private data */ +} isdn_modem_t; + +/*======================= End of ISDN-tty stuff ============================*/ + +/*======================== Start of V.110 stuff ============================*/ +#define V110_BUFSIZE 1024 + +typedef struct { + int nbytes; /* 1 Matrixbyte -> nbytes in stream */ + int nbits; /* Number of used bits in streambyte */ + unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */ + int decodelen; /* Amount of data in decodebuf */ + int SyncInit; /* Number of sync frames to send */ + unsigned char *OnlineFrame; /* Precalculated V110 idle frame */ + unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */ + int framelen; /* Length of frames */ + int skbuser; /* Number of unacked userdata skbs */ + int skbidle; /* Number of unacked idle/sync skbs */ + int introducer; /* Local vars for decoder */ + int dbit; + unsigned char b; + int skbres; /* space to reserve in outgoing skb */ + int maxsize; /* maxbufsize of lowlevel driver */ + unsigned char *encodebuf; /* temporary buffer for encoding */ + unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */ +} isdn_v110_stream; + +/*========================= End of V.110 stuff =============================*/ + +/*======================= Start of general stuff ===========================*/ + +typedef struct { + char *next; + char *private; +} infostruct; + +#define DRV_FLAG_RUNNING 1 +#define DRV_FLAG_REJBUS 2 +#define DRV_FLAG_LOADED 4 + +/* Description of hardware-level-driver */ +typedef struct _isdn_driver { + ulong online; /* Channel-Online flags */ + ulong flags; /* Misc driver Flags */ + int locks; /* Number of locks for this driver */ + int channels; /* Number of channels */ + wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */ + int maxbufsize; /* Maximum Buffersize supported */ + unsigned long pktcount; /* Until now: unused */ + int stavail; /* Chars avail on Status-device */ + isdn_if *interface; /* Interface to driver */ + int *rcverr; /* Error-counters for B-Ch.-receive */ + int *rcvcount; /* Byte-counters for B-Ch.-receive */ +#ifdef CONFIG_ISDN_AUDIO + unsigned long DLEflag; /* Flags: Insert DLE at next read */ +#endif + struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */ + wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */ + wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */ + char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */ +} isdn_driver_t; + +/* Main driver-data */ +typedef struct isdn_devt { + struct module *owner; + spinlock_t lock; + unsigned short flags; /* Bitmapped Flags: */ + int drivers; /* Current number of drivers */ + int channels; /* Current number of channels */ + int net_verbose; /* Verbose-Flag */ + int modempoll; /* Flag: tty-read active */ + spinlock_t timerlock; + int tflags; /* Timer-Flags: */ + /* see ISDN_TIMER_..defines */ + int global_flags; + infostruct *infochain; /* List of open info-devs. */ + wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */ + struct timer_list timer; /* Misc.-function Timer */ + int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */ + int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */ + int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */ + char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN]; + /* Remote number of active ch.*/ + int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */ + isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */ + isdn_net_dev *netdev; /* Linked list of net-if's */ + char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */ + struct task_struct *profd; /* For iprofd */ + isdn_modem_t mdm; /* tty-driver-data */ + isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */ + isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */ + ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */ + ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */ + int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */ + atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */ + isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */ + struct mutex mtx; /* serialize list access*/ + unsigned long global_features; +} isdn_dev; + +extern isdn_dev *dev; + + +#endif /* __KERNEL__ */ + +#endif /* __ISDN_H__ */ diff --git a/include/linux/isdn/Kbuild b/include/linux/isdn/Kbuild new file mode 100644 index 00000000..991cdb29 --- /dev/null +++ b/include/linux/isdn/Kbuild @@ -0,0 +1 @@ +header-y += capicmd.h diff --git a/include/linux/isdn/capicmd.h b/include/linux/isdn/capicmd.h new file mode 100644 index 00000000..b58635f7 --- /dev/null +++ b/include/linux/isdn/capicmd.h @@ -0,0 +1,115 @@ +/* $Id: capicmd.h,v 1.2.6.2 2001/09/23 22:24:33 kai Exp $ + * + * CAPI 2.0 Interface for Linux + * + * Copyright 1997 by Carsten Paeth <calle@calle.de> + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __CAPICMD_H__ +#define __CAPICMD_H__ + +#define CAPI_MSG_BASELEN 8 +#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2) +#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2) + +/*----- CAPI commands -----*/ +#define CAPI_ALERT 0x01 +#define CAPI_CONNECT 0x02 +#define CAPI_CONNECT_ACTIVE 0x03 +#define CAPI_CONNECT_B3_ACTIVE 0x83 +#define CAPI_CONNECT_B3 0x82 +#define CAPI_CONNECT_B3_T90_ACTIVE 0x88 +#define CAPI_DATA_B3 0x86 +#define CAPI_DISCONNECT_B3 0x84 +#define CAPI_DISCONNECT 0x04 +#define CAPI_FACILITY 0x80 +#define CAPI_INFO 0x08 +#define CAPI_LISTEN 0x05 +#define CAPI_MANUFACTURER 0xff +#define CAPI_RESET_B3 0x87 +#define CAPI_SELECT_B_PROTOCOL 0x41 + +/*----- CAPI subcommands -----*/ + +#define CAPI_REQ 0x80 +#define CAPI_CONF 0x81 +#define CAPI_IND 0x82 +#define CAPI_RESP 0x83 + +/*----- CAPI combined commands -----*/ + +#define CAPICMD(cmd,subcmd) (((cmd)<<8)|(subcmd)) + +#define CAPI_DISCONNECT_REQ CAPICMD(CAPI_DISCONNECT,CAPI_REQ) +#define CAPI_DISCONNECT_CONF CAPICMD(CAPI_DISCONNECT,CAPI_CONF) +#define CAPI_DISCONNECT_IND CAPICMD(CAPI_DISCONNECT,CAPI_IND) +#define CAPI_DISCONNECT_RESP CAPICMD(CAPI_DISCONNECT,CAPI_RESP) + +#define CAPI_ALERT_REQ CAPICMD(CAPI_ALERT,CAPI_REQ) +#define CAPI_ALERT_CONF CAPICMD(CAPI_ALERT,CAPI_CONF) + +#define CAPI_CONNECT_REQ CAPICMD(CAPI_CONNECT,CAPI_REQ) +#define CAPI_CONNECT_CONF CAPICMD(CAPI_CONNECT,CAPI_CONF) +#define CAPI_CONNECT_IND CAPICMD(CAPI_CONNECT,CAPI_IND) +#define CAPI_CONNECT_RESP CAPICMD(CAPI_CONNECT,CAPI_RESP) + +#define CAPI_CONNECT_ACTIVE_REQ CAPICMD(CAPI_CONNECT_ACTIVE,CAPI_REQ) +#define CAPI_CONNECT_ACTIVE_CONF CAPICMD(CAPI_CONNECT_ACTIVE,CAPI_CONF) +#define CAPI_CONNECT_ACTIVE_IND CAPICMD(CAPI_CONNECT_ACTIVE,CAPI_IND) +#define CAPI_CONNECT_ACTIVE_RESP CAPICMD(CAPI_CONNECT_ACTIVE,CAPI_RESP) + +#define CAPI_SELECT_B_PROTOCOL_REQ CAPICMD(CAPI_SELECT_B_PROTOCOL,CAPI_REQ) +#define CAPI_SELECT_B_PROTOCOL_CONF CAPICMD(CAPI_SELECT_B_PROTOCOL,CAPI_CONF) + +#define CAPI_CONNECT_B3_ACTIVE_REQ CAPICMD(CAPI_CONNECT_B3_ACTIVE,CAPI_REQ) +#define CAPI_CONNECT_B3_ACTIVE_CONF CAPICMD(CAPI_CONNECT_B3_ACTIVE,CAPI_CONF) +#define CAPI_CONNECT_B3_ACTIVE_IND CAPICMD(CAPI_CONNECT_B3_ACTIVE,CAPI_IND) +#define CAPI_CONNECT_B3_ACTIVE_RESP CAPICMD(CAPI_CONNECT_B3_ACTIVE,CAPI_RESP) + +#define CAPI_CONNECT_B3_REQ CAPICMD(CAPI_CONNECT_B3,CAPI_REQ) +#define CAPI_CONNECT_B3_CONF CAPICMD(CAPI_CONNECT_B3,CAPI_CONF) +#define CAPI_CONNECT_B3_IND CAPICMD(CAPI_CONNECT_B3,CAPI_IND) +#define CAPI_CONNECT_B3_RESP CAPICMD(CAPI_CONNECT_B3,CAPI_RESP) + + +#define CAPI_CONNECT_B3_T90_ACTIVE_IND CAPICMD(CAPI_CONNECT_B3_T90_ACTIVE,CAPI_IND) +#define CAPI_CONNECT_B3_T90_ACTIVE_RESP CAPICMD(CAPI_CONNECT_B3_T90_ACTIVE,CAPI_RESP) + +#define CAPI_DATA_B3_REQ CAPICMD(CAPI_DATA_B3,CAPI_REQ) +#define CAPI_DATA_B3_CONF CAPICMD(CAPI_DATA_B3,CAPI_CONF) +#define CAPI_DATA_B3_IND CAPICMD(CAPI_DATA_B3,CAPI_IND) +#define CAPI_DATA_B3_RESP CAPICMD(CAPI_DATA_B3,CAPI_RESP) + +#define CAPI_DISCONNECT_B3_REQ CAPICMD(CAPI_DISCONNECT_B3,CAPI_REQ) +#define CAPI_DISCONNECT_B3_CONF CAPICMD(CAPI_DISCONNECT_B3,CAPI_CONF) +#define CAPI_DISCONNECT_B3_IND CAPICMD(CAPI_DISCONNECT_B3,CAPI_IND) +#define CAPI_DISCONNECT_B3_RESP CAPICMD(CAPI_DISCONNECT_B3,CAPI_RESP) + +#define CAPI_RESET_B3_REQ CAPICMD(CAPI_RESET_B3,CAPI_REQ) +#define CAPI_RESET_B3_CONF CAPICMD(CAPI_RESET_B3,CAPI_CONF) +#define CAPI_RESET_B3_IND CAPICMD(CAPI_RESET_B3,CAPI_IND) +#define CAPI_RESET_B3_RESP CAPICMD(CAPI_RESET_B3,CAPI_RESP) + +#define CAPI_LISTEN_REQ CAPICMD(CAPI_LISTEN,CAPI_REQ) +#define CAPI_LISTEN_CONF CAPICMD(CAPI_LISTEN,CAPI_CONF) + +#define CAPI_MANUFACTURER_REQ CAPICMD(CAPI_MANUFACTURER,CAPI_REQ) +#define CAPI_MANUFACTURER_CONF CAPICMD(CAPI_MANUFACTURER,CAPI_CONF) +#define CAPI_MANUFACTURER_IND CAPICMD(CAPI_MANUFACTURER,CAPI_IND) +#define CAPI_MANUFACTURER_RESP CAPICMD(CAPI_MANUFACTURER,CAPI_RESP) + +#define CAPI_FACILITY_REQ CAPICMD(CAPI_FACILITY,CAPI_REQ) +#define CAPI_FACILITY_CONF CAPICMD(CAPI_FACILITY,CAPI_CONF) +#define CAPI_FACILITY_IND CAPICMD(CAPI_FACILITY,CAPI_IND) +#define CAPI_FACILITY_RESP CAPICMD(CAPI_FACILITY,CAPI_RESP) + +#define CAPI_INFO_REQ CAPICMD(CAPI_INFO,CAPI_REQ) +#define CAPI_INFO_CONF CAPICMD(CAPI_INFO,CAPI_CONF) +#define CAPI_INFO_IND CAPICMD(CAPI_INFO,CAPI_IND) +#define CAPI_INFO_RESP CAPICMD(CAPI_INFO,CAPI_RESP) + +#endif /* __CAPICMD_H__ */ diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h new file mode 100644 index 00000000..11b57c48 --- /dev/null +++ b/include/linux/isdn/capilli.h @@ -0,0 +1,113 @@ +/* $Id: capilli.h,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ + * + * Kernel CAPI 2.0 Driver Interface for Linux + * + * Copyright 1999 by Carsten Paeth <calle@calle.de> + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __CAPILLI_H__ +#define __CAPILLI_H__ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/capi.h> +#include <linux/kernelcapi.h> + +typedef struct capiloaddatapart { + int user; /* data in userspace ? */ + int len; + unsigned char *data; +} capiloaddatapart; + +typedef struct capiloaddata { + capiloaddatapart firmware; + capiloaddatapart configuration; +} capiloaddata; + +typedef struct capicardparams { + unsigned int port; + unsigned irq; + int cardtype; + int cardnr; + unsigned int membase; +} capicardparams; + +struct capi_ctr { + /* filled in before calling attach_capi_ctr */ + struct module *owner; + void *driverdata; /* driver specific */ + char name[32]; /* name of controller */ + char *driver_name; /* name of driver */ + int (*load_firmware)(struct capi_ctr *, capiloaddata *); + void (*reset_ctr)(struct capi_ctr *); + void (*register_appl)(struct capi_ctr *, u16 appl, + capi_register_params *); + void (*release_appl)(struct capi_ctr *, u16 appl); + u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb); + + char *(*procinfo)(struct capi_ctr *); + const struct file_operations *proc_fops; + + /* filled in before calling ready callback */ + u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ + capi_version version; /* CAPI_GET_VERSION */ + capi_profile profile; /* CAPI_GET_PROFILE */ + u8 serial[CAPI_SERIAL_LEN]; /* CAPI_GET_SERIAL */ + + /* management information for kcapi */ + + unsigned long nrecvctlpkt; + unsigned long nrecvdatapkt; + unsigned long nsentctlpkt; + unsigned long nsentdatapkt; + + int cnr; /* controller number */ + unsigned short state; /* controller state */ + int blocked; /* output blocked */ + int traceflag; /* capi trace */ + wait_queue_head_t state_wait_queue; + + struct proc_dir_entry *procent; + char procfn[128]; +}; + +int attach_capi_ctr(struct capi_ctr *); +int detach_capi_ctr(struct capi_ctr *); + +void capi_ctr_ready(struct capi_ctr * card); +void capi_ctr_down(struct capi_ctr * card); +void capi_ctr_suspend_output(struct capi_ctr * card); +void capi_ctr_resume_output(struct capi_ctr * card); +void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb); + +// --------------------------------------------------------------------------- +// needed for AVM capi drivers + +struct capi_driver { + char name[32]; /* driver name */ + char revision[32]; + + int (*add_card)(struct capi_driver *driver, capicardparams *data); + + /* management information for kcapi */ + struct list_head list; +}; + +void register_capi_driver(struct capi_driver *driver); +void unregister_capi_driver(struct capi_driver *driver); + +// --------------------------------------------------------------------------- +// library functions for use by hardware controller drivers + +void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); +void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); +void capilib_release_appl(struct list_head *head, u16 applid); +void capilib_release(struct list_head *head); +void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); +u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid); + +#endif /* __CAPILLI_H__ */ diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h new file mode 100644 index 00000000..5a52f2c9 --- /dev/null +++ b/include/linux/isdn/capiutil.h @@ -0,0 +1,521 @@ +/* $Id: capiutil.h,v 1.5.6.2 2001/09/23 22:24:33 kai Exp $ + * + * CAPI 2.0 defines & types + * + * From CAPI 2.0 Development Kit AVM 1995 (msg.c) + * Rewritten for Linux 1996 by Carsten Paeth <calle@calle.de> + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __CAPIUTIL_H__ +#define __CAPIUTIL_H__ + +#include <asm/types.h> + +#define CAPIMSG_BASELEN 8 +#define CAPIMSG_U8(m, off) (m[off]) +#define CAPIMSG_U16(m, off) (m[off]|(m[(off)+1]<<8)) +#define CAPIMSG_U32(m, off) (m[off]|(m[(off)+1]<<8)|(m[(off)+2]<<16)|(m[(off)+3]<<24)) +#define CAPIMSG_LEN(m) CAPIMSG_U16(m,0) +#define CAPIMSG_APPID(m) CAPIMSG_U16(m,2) +#define CAPIMSG_COMMAND(m) CAPIMSG_U8(m,4) +#define CAPIMSG_SUBCOMMAND(m) CAPIMSG_U8(m,5) +#define CAPIMSG_CMD(m) (((m[4])<<8)|(m[5])) +#define CAPIMSG_MSGID(m) CAPIMSG_U16(m,6) +#define CAPIMSG_CONTROLLER(m) (m[8] & 0x7f) +#define CAPIMSG_CONTROL(m) CAPIMSG_U32(m, 8) +#define CAPIMSG_NCCI(m) CAPIMSG_CONTROL(m) +#define CAPIMSG_DATALEN(m) CAPIMSG_U16(m,16) /* DATA_B3_REQ */ + +static inline void capimsg_setu8(void *m, int off, __u8 val) +{ + ((__u8 *)m)[off] = val; +} + +static inline void capimsg_setu16(void *m, int off, __u16 val) +{ + ((__u8 *)m)[off] = val & 0xff; + ((__u8 *)m)[off+1] = (val >> 8) & 0xff; +} + +static inline void capimsg_setu32(void *m, int off, __u32 val) +{ + ((__u8 *)m)[off] = val & 0xff; + ((__u8 *)m)[off+1] = (val >> 8) & 0xff; + ((__u8 *)m)[off+2] = (val >> 16) & 0xff; + ((__u8 *)m)[off+3] = (val >> 24) & 0xff; +} + +#define CAPIMSG_SETLEN(m, len) capimsg_setu16(m, 0, len) +#define CAPIMSG_SETAPPID(m, applid) capimsg_setu16(m, 2, applid) +#define CAPIMSG_SETCOMMAND(m,cmd) capimsg_setu8(m, 4, cmd) +#define CAPIMSG_SETSUBCOMMAND(m, cmd) capimsg_setu8(m, 5, cmd) +#define CAPIMSG_SETMSGID(m, msgid) capimsg_setu16(m, 6, msgid) +#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr) +#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len) + +/*----- basic-type definitions -----*/ + +typedef __u8 *_cstruct; + +typedef enum { + CAPI_COMPOSE, + CAPI_DEFAULT +} _cmstruct; + +/* + The _cmsg structure contains all possible CAPI 2.0 parameter. + All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE + assembles the parameter and builds CAPI2.0 conform messages. + CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the + parameter in the _cmsg structure + */ + +typedef struct { + /* Header */ + __u16 ApplId; + __u8 Command; + __u8 Subcommand; + __u16 Messagenumber; + + /* Parameter */ + union { + __u32 adrController; + __u32 adrPLCI; + __u32 adrNCCI; + } adr; + + _cmstruct AdditionalInfo; + _cstruct B1configuration; + __u16 B1protocol; + _cstruct B2configuration; + __u16 B2protocol; + _cstruct B3configuration; + __u16 B3protocol; + _cstruct BC; + _cstruct BChannelinformation; + _cmstruct BProtocol; + _cstruct CalledPartyNumber; + _cstruct CalledPartySubaddress; + _cstruct CallingPartyNumber; + _cstruct CallingPartySubaddress; + __u32 CIPmask; + __u32 CIPmask2; + __u16 CIPValue; + __u32 Class; + _cstruct ConnectedNumber; + _cstruct ConnectedSubaddress; + __u32 Data; + __u16 DataHandle; + __u16 DataLength; + _cstruct FacilityConfirmationParameter; + _cstruct Facilitydataarray; + _cstruct FacilityIndicationParameter; + _cstruct FacilityRequestParameter; + __u16 FacilitySelector; + __u16 Flags; + __u32 Function; + _cstruct HLC; + __u16 Info; + _cstruct InfoElement; + __u32 InfoMask; + __u16 InfoNumber; + _cstruct Keypadfacility; + _cstruct LLC; + _cstruct ManuData; + __u32 ManuID; + _cstruct NCPI; + __u16 Reason; + __u16 Reason_B3; + __u16 Reject; + _cstruct Useruserdata; + + /* intern */ + unsigned l, p; + unsigned char *par; + __u8 *m; + + /* buffer to construct message */ + __u8 buf[180]; + +} _cmsg; + +/* + * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0 + * conform message + */ +unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg); + +/* + * capi_message2cmsg disassembles a CAPI message an writes the parameter + * into _cmsg for easy access + */ +unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg); + +/* + * capi_cmsg_header() fills the _cmsg structure with default values, so only + * parameter with non default values must be changed before sending the + * message. + */ +unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId, + __u8 _Command, __u8 _Subcommand, + __u16 _Messagenumber, __u32 _Controller); + +/* + * capi_info2str generated a readable string for Capi2.0 reasons. + */ +char *capi_info2str(__u16 reason); + +/*-----------------------------------------------------------------------*/ + +/* + * Debugging / Tracing functions + */ + +char *capi_cmd2str(__u8 cmd, __u8 subcmd); + +typedef struct { + u_char *buf; + u_char *p; + size_t size; + size_t pos; +} _cdebbuf; + +#define CDEBUG_SIZE 1024 +#define CDEBUG_GSIZE 4096 + +void cdebbuf_free(_cdebbuf *cdb); +int cdebug_init(void); +void cdebug_exit(void); + +_cdebbuf *capi_cmsg2str(_cmsg *cmsg); +_cdebbuf *capi_message2str(__u8 *msg); + +/*-----------------------------------------------------------------------*/ + +static inline void capi_cmsg_answer(_cmsg * cmsg) +{ + cmsg->Subcommand |= 0x01; +} + +/*-----------------------------------------------------------------------*/ + +static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 FacilitySelector, + _cstruct FacilityRequestParameter) +{ + capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr); + cmsg->FacilitySelector = FacilitySelector; + cmsg->FacilityRequestParameter = FacilityRequestParameter; +} + +static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct CalledPartyNumber, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr); + cmsg->CalledPartyNumber = CalledPartyNumber; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 InfoMask, + __u32 CIPmask, + __u32 CIPmask2, + _cstruct CallingPartyNumber, + _cstruct CallingPartySubaddress) +{ + capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr); + cmsg->InfoMask = InfoMask; + cmsg->CIPmask = CIPmask; + cmsg->CIPmask2 = CIPmask2; + cmsg->CallingPartyNumber = CallingPartyNumber; + cmsg->CallingPartySubaddress = CallingPartySubaddress; +} + +static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr); + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 CIPValue, + _cstruct CalledPartyNumber, + _cstruct CallingPartyNumber, + _cstruct CalledPartySubaddress, + _cstruct CallingPartySubaddress, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration, + _cstruct BC, + _cstruct LLC, + _cstruct HLC, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + + capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr); + cmsg->CIPValue = CIPValue; + cmsg->CalledPartyNumber = CalledPartyNumber; + cmsg->CallingPartyNumber = CallingPartyNumber; + cmsg->CalledPartySubaddress = CalledPartySubaddress; + cmsg->CallingPartySubaddress = CallingPartySubaddress; + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; + cmsg->BC = BC; + cmsg->LLC = LLC; + cmsg->HLC = HLC; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 Data, + __u16 DataLength, + __u16 DataHandle, + __u16 Flags) +{ + + capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr); + cmsg->Data = Data; + cmsg->DataLength = DataLength; + cmsg->DataHandle = DataHandle; + cmsg->Flags = Flags; +} + +static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + + capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr); + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + + capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 ManuID, + __u32 Class, + __u32 Function, + _cstruct ManuData) +{ + + capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr); + cmsg->ManuID = ManuID; + cmsg->Class = Class; + cmsg->Function = Function; + cmsg->ManuData = ManuData; +} + +static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + _cstruct NCPI) +{ + + capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr); + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration) +{ + + capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr); + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; +} + +static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 Reject, + __u16 B1protocol, + __u16 B2protocol, + __u16 B3protocol, + _cstruct B1configuration, + _cstruct B2configuration, + _cstruct B3configuration, + _cstruct ConnectedNumber, + _cstruct ConnectedSubaddress, + _cstruct LLC, + _cstruct BChannelinformation, + _cstruct Keypadfacility, + _cstruct Useruserdata, + _cstruct Facilitydataarray) +{ + capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr); + cmsg->Reject = Reject; + cmsg->B1protocol = B1protocol; + cmsg->B2protocol = B2protocol; + cmsg->B3protocol = B3protocol; + cmsg->B1configuration = B1configuration; + cmsg->B2configuration = B2configuration; + cmsg->B3configuration = B3configuration; + cmsg->ConnectedNumber = ConnectedNumber; + cmsg->ConnectedSubaddress = ConnectedSubaddress; + cmsg->LLC = LLC; + cmsg->BChannelinformation = BChannelinformation; + cmsg->Keypadfacility = Keypadfacility; + cmsg->Useruserdata = Useruserdata; + cmsg->Facilitydataarray = Facilitydataarray; +} + +static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 Reject, + _cstruct NCPI) +{ + capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr); + cmsg->Reject = Reject; + cmsg->NCPI = NCPI; +} + +static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 DataHandle) +{ + + capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr); + cmsg->DataHandle = DataHandle; +} + +static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u16 FacilitySelector) +{ + + capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr); + cmsg->FacilitySelector = FacilitySelector; +} + +static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr); +} + +static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr, + __u32 ManuID, + __u32 Class, + __u32 Function, + _cstruct ManuData) +{ + + capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr); + cmsg->ManuID = ManuID; + cmsg->Class = Class; + cmsg->Function = Function; + cmsg->ManuData = ManuData; +} + +static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, + __u32 adr) +{ + + capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr); +} + +#endif /* __CAPIUTIL_H__ */ diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h new file mode 100644 index 00000000..96521370 --- /dev/null +++ b/include/linux/isdn/hdlc.h @@ -0,0 +1,82 @@ +/* + * hdlc.h -- General purpose ISDN HDLC decoder. + * + * Implementation of a HDLC decoder/encoder in software. + * Necessary because some ISDN devices don't have HDLC + * controllers. + * + * Copyright (C) + * 2009 Karsten Keil <keil@b1-systems.de> + * 2002 Wolfgang Mües <wolfgang@iksw-muees.de> + * 2001 Frode Isaksen <fisaksen@bewan.com> + * 2001 Kai Germaschewski <kai.germaschewski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ISDNHDLC_H__ +#define __ISDNHDLC_H__ + +struct isdnhdlc_vars { + int bit_shift; + int hdlc_bits1; + int data_bits; + int ffbit_shift; /* encoding only */ + int state; + int dstpos; + + u16 crc; + + u8 cbin; + u8 shift_reg; + u8 ffvalue; + + /* set if transferring data */ + u32 data_received:1; + /* set if D channel (send idle instead of flags) */ + u32 dchannel:1; + /* set if 56K adaptation */ + u32 do_adapt56:1; + /* set if in closing phase (need to send CRC + flag) */ + u32 do_closing:1; + /* set if data is bitreverse */ + u32 do_bitreverse:1; +}; + +/* Feature Flags */ +#define HDLC_56KBIT 0x01 +#define HDLC_DCHANNEL 0x02 +#define HDLC_BITREVERSE 0x04 + +/* + The return value from isdnhdlc_decode is + the frame length, 0 if no complete frame was decoded, + or a negative error number +*/ +#define HDLC_FRAMING_ERROR 1 +#define HDLC_CRC_ERROR 2 +#define HDLC_LENGTH_ERROR 3 + +extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, + int slen, int *count, u8 *dst, int dsize); + +extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, + u16 slen, int *count, u8 *dst, int dsize); + +#endif /* __ISDNHDLC_H__ */ diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h new file mode 100644 index 00000000..a5a50f52 --- /dev/null +++ b/include/linux/isdn_divertif.h @@ -0,0 +1,51 @@ +/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ + * + * Header for the diversion supplementary interface for i4l. + * + * Author Werner Cornelius (werner@titro.de) + * Copyright by Werner Cornelius (werner@titro.de) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _LINUX_ISDN_DIVERTIF_H +#define _LINUX_ISDN_DIVERTIF_H + +/***********************************************************/ +/* magic value is also used to control version information */ +/***********************************************************/ +#define DIVERT_IF_MAGIC 0x25873401 +#define DIVERT_CMD_REG 0x00 /* register command */ +#define DIVERT_CMD_REL 0x01 /* release command */ +#define DIVERT_NO_ERR 0x00 /* return value no error */ +#define DIVERT_CMD_ERR 0x01 /* invalid cmd */ +#define DIVERT_VER_ERR 0x02 /* magic/version invalid */ +#define DIVERT_REG_ERR 0x03 /* module already registered */ +#define DIVERT_REL_ERR 0x04 /* module not registered */ +#define DIVERT_REG_NAME isdn_register_divert + +#ifdef __KERNEL__ +#include <linux/isdnif.h> +#include <linux/types.h> + +/***************************************************************/ +/* structure exchanging data between isdn hl and divert module */ +/***************************************************************/ +typedef struct + { ulong if_magic; /* magic info and version */ + int cmd; /* command */ + int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */ + int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */ + char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */ + int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */ + } isdn_divert_if; + +/*********************/ +/* function register */ +/*********************/ +extern int DIVERT_REG_NAME(isdn_divert_if *); +#endif + +#endif /* _LINUX_ISDN_DIVERTIF_H */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h new file mode 100644 index 00000000..8687a7dc --- /dev/null +++ b/include/linux/isdn_ppp.h @@ -0,0 +1,248 @@ +/* Linux ISDN subsystem, sync PPP, interface to ipppd + * + * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg + * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) + * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name) + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef _LINUX_ISDN_PPP_H +#define _LINUX_ISDN_PPP_H + +#define CALLTYPE_INCOMING 0x1 +#define CALLTYPE_OUTGOING 0x2 +#define CALLTYPE_CALLBACK 0x4 + +#define IPPP_VERSION "2.2.0" + +struct pppcallinfo +{ + int calltype; + unsigned char local_num[64]; + unsigned char remote_num[64]; + int charge_units; +}; + +#define PPPIOCGCALLINFO _IOWR('t',128,struct pppcallinfo) +#define PPPIOCBUNDLE _IOW('t',129,int) +#define PPPIOCGMPFLAGS _IOR('t',130,int) +#define PPPIOCSMPFLAGS _IOW('t',131,int) +#define PPPIOCSMPMTU _IOW('t',132,int) +#define PPPIOCSMPMRU _IOW('t',133,int) +#define PPPIOCGCOMPRESSORS _IOR('t',134,unsigned long [8]) +#define PPPIOCSCOMPRESSOR _IOW('t',135,int) +#define PPPIOCGIFNAME _IOR('t',136, char [IFNAMSIZ] ) + + +#define SC_MP_PROT 0x00000200 +#define SC_REJ_MP_PROT 0x00000400 +#define SC_OUT_SHORT_SEQ 0x00000800 +#define SC_IN_SHORT_SEQ 0x00004000 + +#define SC_DECOMP_ON 0x01 +#define SC_COMP_ON 0x02 +#define SC_DECOMP_DISCARD 0x04 +#define SC_COMP_DISCARD 0x08 +#define SC_LINK_DECOMP_ON 0x10 +#define SC_LINK_COMP_ON 0x20 +#define SC_LINK_DECOMP_DISCARD 0x40 +#define SC_LINK_COMP_DISCARD 0x80 + +#define ISDN_PPP_COMP_MAX_OPTIONS 16 + +#define IPPP_COMP_FLAG_XMIT 0x1 +#define IPPP_COMP_FLAG_LINK 0x2 + +struct isdn_ppp_comp_data { + int num; + unsigned char options[ISDN_PPP_COMP_MAX_OPTIONS]; + int optlen; + int flags; +}; + +#ifdef __KERNEL__ + + + +#ifdef CONFIG_IPPP_FILTER +#include <linux/filter.h> +#endif + +#define DECOMP_ERR_NOMEM (-10) + +#define MP_END_FRAG 0x40 +#define MP_BEGIN_FRAG 0x80 + +#define MP_MAX_QUEUE_LEN 16 + +/* + * We need a way for the decompressor to influence the generation of CCP + * Reset-Requests in a variety of ways. The decompressor is already returning + * a lot of information (generated skb length, error conditions) so we use + * another parameter. This parameter is a pointer to a structure which is + * to be marked valid by the decompressor and only in this case is ever used. + * Furthermore, the only case where this data is used is when the decom- + * pressor returns DECOMP_ERROR. + * + * We use this same struct for the reset entry of the compressor to commu- + * nicate to its caller how to deal with sending of a Reset Ack. In this + * case, expra is not used, but other options still apply (suppressing + * sending with rsend, appending arbitrary data, etc). + */ + +#define IPPP_RESET_MAXDATABYTES 32 + +struct isdn_ppp_resetparams { + unsigned char valid:1; /* rw Is this structure filled at all ? */ + unsigned char rsend:1; /* rw Should we send one at all ? */ + unsigned char idval:1; /* rw Is the id field valid ? */ + unsigned char dtval:1; /* rw Is the data field valid ? */ + unsigned char expra:1; /* rw Is an Ack expected for this Req ? */ + unsigned char id; /* wo Send CCP ResetReq with this id */ + unsigned short maxdlen; /* ro Max bytes to be stored in data field */ + unsigned short dlen; /* rw Bytes stored in data field */ + unsigned char *data; /* wo Data for ResetReq info field */ +}; + +/* + * this is an 'old friend' from ppp-comp.h under a new name + * check the original include for more information + */ +struct isdn_ppp_compressor { + struct isdn_ppp_compressor *next, *prev; + struct module *owner; + int num; /* CCP compression protocol number */ + + void *(*alloc) (struct isdn_ppp_comp_data *); + void (*free) (void *state); + int (*init) (void *state, struct isdn_ppp_comp_data *, + int unit,int debug); + + /* The reset entry needs to get more exact information about the + ResetReq or ResetAck it was called with. The parameters are + obvious. If reset is called without a Req or Ack frame which + could be handed into it, code MUST be set to 0. Using rsparm, + the reset entry can control if and how a ResetAck is returned. */ + + void (*reset) (void *state, unsigned char code, unsigned char id, + unsigned char *data, unsigned len, + struct isdn_ppp_resetparams *rsparm); + + int (*compress) (void *state, struct sk_buff *in, + struct sk_buff *skb_out, int proto); + + int (*decompress) (void *state,struct sk_buff *in, + struct sk_buff *skb_out, + struct isdn_ppp_resetparams *rsparm); + + void (*incomp) (void *state, struct sk_buff *in,int proto); + void (*stat) (void *state, struct compstat *stats); +}; + +extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *); +extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *); +extern int isdn_ppp_dial_slave(char *); +extern int isdn_ppp_hangup_slave(char *); + +typedef struct { + unsigned long seqerrs; + unsigned long frame_drops; + unsigned long overflows; + unsigned long max_queue_len; +} isdn_mppp_stats; + +typedef struct { + int mp_mrru; /* unused */ + struct sk_buff * frags; /* fragments sl list -- use skb->next */ + long frames; /* number of frames in the frame list */ + unsigned int seq; /* last processed packet seq #: any packets + * with smaller seq # will be dropped + * unconditionally */ + spinlock_t lock; + int ref_ct; + /* statistics */ + isdn_mppp_stats stats; +} ippp_bundle; + +#define NUM_RCV_BUFFS 64 + +struct ippp_buf_queue { + struct ippp_buf_queue *next; + struct ippp_buf_queue *last; + char *buf; /* NULL here indicates end of queue */ + int len; +}; + +/* The data structure for one CCP reset transaction */ +enum ippp_ccp_reset_states { + CCPResetIdle, + CCPResetSentReq, + CCPResetRcvdReq, + CCPResetSentAck, + CCPResetRcvdAck +}; + +struct ippp_ccp_reset_state { + enum ippp_ccp_reset_states state; /* State of this transaction */ + struct ippp_struct *is; /* Backlink to device stuff */ + unsigned char id; /* Backlink id index */ + unsigned char ta:1; /* The timer is active (flag) */ + unsigned char expra:1; /* We expect a ResetAck at all */ + int dlen; /* Databytes stored in data */ + struct timer_list timer; /* For timeouts/retries */ + /* This is a hack but seems sufficient for the moment. We do not want + to have this be yet another allocation for some bytes, it is more + memory management overhead than the whole mess is worth. */ + unsigned char data[IPPP_RESET_MAXDATABYTES]; +}; + +/* The data structure keeping track of the currently outstanding CCP Reset + transactions. */ +struct ippp_ccp_reset { + struct ippp_ccp_reset_state *rs[256]; /* One per possible id */ + unsigned char lastid; /* Last id allocated by the engine */ +}; + +struct ippp_struct { + struct ippp_struct *next_link; + int state; + spinlock_t buflock; + struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ + struct ippp_buf_queue *first; /* pointer to (current) first packet */ + struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */ + wait_queue_head_t wq; + struct task_struct *tk; + unsigned int mpppcfg; + unsigned int pppcfg; + unsigned int mru; + unsigned int mpmru; + unsigned int mpmtu; + unsigned int maxcid; + struct isdn_net_local_s *lp; + int unit; + int minor; + unsigned int last_link_seqno; + long mp_seqno; +#ifdef CONFIG_ISDN_PPP_VJ + unsigned char *cbuf; + struct slcompress *slcomp; +#endif +#ifdef CONFIG_IPPP_FILTER + struct sock_filter *pass_filter; /* filter for packets to pass */ + struct sock_filter *active_filter; /* filter for pkts to reset idle */ + unsigned pass_len, active_len; +#endif + unsigned long debug; + struct isdn_ppp_compressor *compressor,*decompressor; + struct isdn_ppp_compressor *link_compressor,*link_decompressor; + void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat; + struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */ + unsigned long compflags; +}; + +#endif /* __KERNEL__ */ +#endif /* _LINUX_ISDN_PPP_H */ diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h new file mode 100644 index 00000000..b8c23f88 --- /dev/null +++ b/include/linux/isdnif.h @@ -0,0 +1,546 @@ +/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $ + * + * Linux ISDN subsystem + * Definition of the interface between the subsystem and its low-level drivers. + * + * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) + * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#ifndef __ISDNIF_H__ +#define __ISDNIF_H__ + + +/* + * Values for general protocol-selection + */ +#define ISDN_PTYPE_UNKNOWN 0 /* Protocol undefined */ +#define ISDN_PTYPE_1TR6 1 /* german 1TR6-protocol */ +#define ISDN_PTYPE_EURO 2 /* EDSS1-protocol */ +#define ISDN_PTYPE_LEASED 3 /* for leased lines */ +#define ISDN_PTYPE_NI1 4 /* US NI-1 protocol */ +#define ISDN_PTYPE_MAX 7 /* Max. 8 Protocols */ + +/* + * Values for Layer-2-protocol-selection + */ +#define ISDN_PROTO_L2_X75I 0 /* X75/LAPB with I-Frames */ +#define ISDN_PROTO_L2_X75UI 1 /* X75/LAPB with UI-Frames */ +#define ISDN_PROTO_L2_X75BUI 2 /* X75/LAPB with UI-Frames */ +#define ISDN_PROTO_L2_HDLC 3 /* HDLC */ +#define ISDN_PROTO_L2_TRANS 4 /* Transparent (Voice) */ +#define ISDN_PROTO_L2_X25DTE 5 /* X25/LAPB DTE mode */ +#define ISDN_PROTO_L2_X25DCE 6 /* X25/LAPB DCE mode */ +#define ISDN_PROTO_L2_V11096 7 /* V.110 bitrate adaption 9600 Baud */ +#define ISDN_PROTO_L2_V11019 8 /* V.110 bitrate adaption 19200 Baud */ +#define ISDN_PROTO_L2_V11038 9 /* V.110 bitrate adaption 38400 Baud */ +#define ISDN_PROTO_L2_MODEM 10 /* Analog Modem on Board */ +#define ISDN_PROTO_L2_FAX 11 /* Fax Group 2/3 */ +#define ISDN_PROTO_L2_HDLC_56K 12 /* HDLC 56k */ +#define ISDN_PROTO_L2_MAX 15 /* Max. 16 Protocols */ + +/* + * Values for Layer-3-protocol-selection + */ +#define ISDN_PROTO_L3_TRANS 0 /* Transparent */ +#define ISDN_PROTO_L3_TRANSDSP 1 /* Transparent with DSP */ +#define ISDN_PROTO_L3_FCLASS2 2 /* Fax Group 2/3 CLASS 2 */ +#define ISDN_PROTO_L3_FCLASS1 3 /* Fax Group 2/3 CLASS 1 */ +#define ISDN_PROTO_L3_MAX 7 /* Max. 8 Protocols */ + +#ifdef __KERNEL__ + +#include <linux/skbuff.h> + +/***************************************************************************/ +/* Extensions made by Werner Cornelius (werner@ikt.de) */ +/* */ +/* The proceed command holds a incoming call in a state to leave processes */ +/* enough time to check whether ist should be accepted. */ +/* The PROT_IO Command extends the interface to make protocol dependent */ +/* features available (call diversion, call waiting...). */ +/* */ +/* The PROT_IO Command is executed with the desired driver id and the arg */ +/* parameter coded as follows: */ +/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */ +/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/ +/* Any additional data is protocol and command specific. */ +/* This mechanism also applies to the statcallb callback STAT_PROT. */ +/* */ +/* This suggested extension permits an easy expansion of protocol specific */ +/* handling. Extensions may be added at any time without changing the HL */ +/* driver code and not getting conflicts without certifications. */ +/* The well known CAPI 2.0 interface handles such extensions in a similar */ +/* way. Perhaps a protocol specific module may be added and separately */ +/* loaded and linked to the basic isdn module for handling. */ +/***************************************************************************/ + +/*****************/ +/* DSS1 commands */ +/*****************/ +#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */ +#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */ + +/*******************************/ +/* DSS1 Status callback values */ +/*******************************/ +#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */ +#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */ +#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */ + + +/*********************************************************************/ +/* structures for DSS1 commands and callback */ +/* */ +/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/ +/* timeout, datalen and data fields must be set before calling. */ +/* */ +/* The return value is a positive hl_id value also delivered in the */ +/* hl_id field. A value of zero signals no more left hl_id capacitys.*/ +/* A negative return value signals errors in LL. So if the return */ +/* value is <= 0 no action in LL will be taken -> request ignored */ +/* */ +/* The timeout field must be filled with a positive value specifying */ +/* the amount of time the INVOKED process waits for a reaction from */ +/* the network. */ +/* If a response (either error or result) is received during this */ +/* intervall, a reporting callback is initiated and the process will */ +/* be deleted, the hl identifier will be freed. */ +/* If no response is received during the specified intervall, a error*/ +/* callback is initiated with timeout set to -1 and a datalen set */ +/* to 0. */ +/* If timeout is set to a value <= 0 during INVOCATION the process is*/ +/* immediately deleted after sending the data. No callback occurs ! */ +/* */ +/* A currently waiting process may be aborted with INVOKE_ABORT. No */ +/* callback will occur when a process has been aborted. */ +/* */ +/* Broadcast invoke frames from the network are reported via the */ +/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */ +/* are supplied by the network and not by the HL. */ +/*********************************************************************/ + +/*****************/ +/* NI1 commands */ +/*****************/ +#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */ +#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */ + +/*******************************/ +/* NI1 Status callback values */ +/*******************************/ +#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */ +#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */ +#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */ + +typedef struct + { ulong ll_id; /* ID supplied by LL when executing */ + /* a command and returned by HL for */ + /* INVOKE_RES and INVOKE_ERR */ + int hl_id; /* ID supplied by HL when called */ + /* for executing a cmd and delivered */ + /* for results and errors */ + /* must be supplied by LL when aborting*/ + int proc; /* invoke procedure used by CMD_INVOKE */ + /* returned by callback and broadcast */ + int timeout; /* timeout for INVOKE CMD in ms */ + /* -1 in stat callback when timed out */ + /* error value when error callback */ + int datalen; /* length of cmd or stat data */ + u_char *data;/* pointer to data delivered or send */ + } isdn_cmd_stat; + +/* + * Commands from linklevel to lowlevel + * + */ +#define ISDN_CMD_IOCTL 0 /* Perform ioctl */ +#define ISDN_CMD_DIAL 1 /* Dial out */ +#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */ +#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */ +#define ISDN_CMD_HANGUP 4 /* Hangup */ +#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */ +#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */ +#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */ +#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */ +#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */ +#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */ +#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */ +#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */ +#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */ +// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */ +// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */ +#define ISDN_CMD_SUSPEND 16 /* Suspend connection */ +#define ISDN_CMD_RESUME 17 /* Resume connection */ +#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */ +#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */ +#define ISDN_CMD_REDIR 20 /* Redir a incoming call */ +#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */ +#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */ +#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */ +#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */ + +/* + * Status-Values delivered from lowlevel to linklevel via + * statcallb(). + * + */ +#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */ +#define ISDN_STAT_ICALL 257 /* Incoming call detected */ +#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */ +#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */ +#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */ +#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */ +#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */ +#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */ +#define ISDN_STAT_CINF 264 /* Charge-Info */ +#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */ +#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */ +#define ISDN_STAT_BSENT 267 /* Signal packet sent */ +#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */ +#define ISDN_STAT_ADDCH 269 /* Add more Channels */ +#define ISDN_STAT_CAUSE 270 /* Cause-Message */ +#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */ +#define ISDN_STAT_REDIR 272 /* Redir result */ +#define ISDN_STAT_PROT 273 /* protocol IO specific callback */ +#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */ +#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */ +#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */ +#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */ +#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */ + +/* + * Audio commands + */ +#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */ +#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */ + +/* + * Values for errcode field + */ +#define ISDN_STAT_L1ERR_SEND 1 +#define ISDN_STAT_L1ERR_RECV 2 + +/* + * Values for feature-field of interface-struct. + */ +/* Layer 2 */ +#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I) +#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI) +#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI) +#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC) +#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS) +#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE) +#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE) +#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096) +#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019) +#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038) +#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM) +#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX) +#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K) + +#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */ +#define ISDN_FEATURE_L2_SHIFT (0) + +/* Layer 3 */ +#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS) +#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP) +#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2) +#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1) + +#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */ +#define ISDN_FEATURE_L3_SHIFT (16) + +/* Signaling */ +#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN) +#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6) +#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO) +#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1) + +#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */ +#define ISDN_FEATURE_P_SHIFT (24) + +typedef struct setup_parm { + unsigned char phone[32]; /* Remote Phone-Number */ + unsigned char eazmsn[32]; /* Local EAZ or MSN */ + unsigned char si1; /* Service Indicator 1 */ + unsigned char si2; /* Service Indicator 2 */ + unsigned char plan; /* Numbering plan */ + unsigned char screen; /* Screening info */ +} setup_parm; + + +#ifdef CONFIG_ISDN_TTY_FAX +/* T.30 Fax G3 */ + +#define FAXIDLEN 21 + +typedef struct T30_s { + /* session parameters */ + __u8 resolution; + __u8 rate; + __u8 width; + __u8 length; + __u8 compression; + __u8 ecm; + __u8 binary; + __u8 scantime; + __u8 id[FAXIDLEN]; + /* additional parameters */ + __u8 phase; + __u8 direction; + __u8 code; + __u8 badlin; + __u8 badmul; + __u8 bor; + __u8 fet; + __u8 pollid[FAXIDLEN]; + __u8 cq; + __u8 cr; + __u8 ctcrty; + __u8 minsp; + __u8 phcto; + __u8 rel; + __u8 nbc; + /* remote station parameters */ + __u8 r_resolution; + __u8 r_rate; + __u8 r_width; + __u8 r_length; + __u8 r_compression; + __u8 r_ecm; + __u8 r_binary; + __u8 r_scantime; + __u8 r_id[FAXIDLEN]; + __u8 r_code; +} __packed T30_s; + +#define ISDN_TTY_FAX_CONN_IN 0 +#define ISDN_TTY_FAX_CONN_OUT 1 + +#define ISDN_TTY_FAX_FCON 0 +#define ISDN_TTY_FAX_DIS 1 +#define ISDN_TTY_FAX_FTT 2 +#define ISDN_TTY_FAX_MCF 3 +#define ISDN_TTY_FAX_DCS 4 +#define ISDN_TTY_FAX_TRAIN_OK 5 +#define ISDN_TTY_FAX_EOP 6 +#define ISDN_TTY_FAX_EOM 7 +#define ISDN_TTY_FAX_MPS 8 +#define ISDN_TTY_FAX_DTC 9 +#define ISDN_TTY_FAX_RID 10 +#define ISDN_TTY_FAX_HNG 11 +#define ISDN_TTY_FAX_DT 12 +#define ISDN_TTY_FAX_FCON_I 13 +#define ISDN_TTY_FAX_DR 14 +#define ISDN_TTY_FAX_ET 15 +#define ISDN_TTY_FAX_CFR 16 +#define ISDN_TTY_FAX_PTS 17 +#define ISDN_TTY_FAX_SENT 18 + +#define ISDN_FAX_PHASE_IDLE 0 +#define ISDN_FAX_PHASE_A 1 +#define ISDN_FAX_PHASE_B 2 +#define ISDN_FAX_PHASE_C 3 +#define ISDN_FAX_PHASE_D 4 +#define ISDN_FAX_PHASE_E 5 + +#endif /* TTY_FAX */ + +#define ISDN_FAX_CLASS1_FAE 0 +#define ISDN_FAX_CLASS1_FTS 1 +#define ISDN_FAX_CLASS1_FRS 2 +#define ISDN_FAX_CLASS1_FTM 3 +#define ISDN_FAX_CLASS1_FRM 4 +#define ISDN_FAX_CLASS1_FTH 5 +#define ISDN_FAX_CLASS1_FRH 6 +#define ISDN_FAX_CLASS1_CTRL 7 + +#define ISDN_FAX_CLASS1_OK 0 +#define ISDN_FAX_CLASS1_CONNECT 1 +#define ISDN_FAX_CLASS1_NOCARR 2 +#define ISDN_FAX_CLASS1_ERROR 3 +#define ISDN_FAX_CLASS1_FCERROR 4 +#define ISDN_FAX_CLASS1_QUERY 5 + +typedef struct { + __u8 cmd; + __u8 subcmd; + __u8 para[50]; +} aux_s; + +#define AT_COMMAND 0 +#define AT_EQ_VALUE 1 +#define AT_QUERY 2 +#define AT_EQ_QUERY 3 + +/* CAPI structs */ + +/* this is compatible to the old union size */ +#define MAX_CAPI_PARA_LEN 50 + +typedef struct { + /* Header */ + __u16 Length; + __u16 ApplId; + __u8 Command; + __u8 Subcommand; + __u16 Messagenumber; + + /* Parameter */ + union { + __u32 Controller; + __u32 PLCI; + __u32 NCCI; + } adr; + __u8 para[MAX_CAPI_PARA_LEN]; +} capi_msg; + +/* + * Structure for exchanging above infos + * + */ +typedef struct { + int driver; /* Lowlevel-Driver-ID */ + int command; /* Command or Status (see above) */ + ulong arg; /* Additional Data */ + union { + ulong errcode; /* Type of error with STAT_L1ERR */ + int length; /* Amount of bytes sent with STAT_BSENT */ + u_char num[50]; /* Additional Data */ + setup_parm setup;/* For SETUP msg */ + capi_msg cmsg; /* For CAPI like messages */ + char display[85];/* display message data */ + isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */ + aux_s aux; /* for modem commands/indications */ +#ifdef CONFIG_ISDN_TTY_FAX + T30_s *fax; /* Pointer to ttys fax struct */ +#endif + ulong userdata; /* User Data */ + } parm; +} isdn_ctrl; + +#define dss1_io isdn_io +#define ni1_io isdn_io + +/* + * The interface-struct itself (initialized at load-time of lowlevel-driver) + * + * See Documentation/isdn/INTERFACE for a description, how the communication + * between the ISDN subsystem and its drivers is done. + * + */ +typedef struct { + struct module *owner; + + /* Number of channels supported by this driver + */ + int channels; + + /* + * Maximum Size of transmit/receive-buffer this driver supports. + */ + int maxbufsize; + + /* Feature-Flags for this driver. + * See defines ISDN_FEATURE_... for Values + */ + unsigned long features; + + /* + * Needed for calculating + * dev->hard_header_len = linklayer header + hl_hdrlen; + * Drivers, not supporting sk_buff's should set this to 0. + */ + unsigned short hl_hdrlen; + + /* + * Receive-Callback using sk_buff's + * Parameters: + * int Driver-ID + * int local channel-number (0 ...) + * struct sk_buff *skb received Data + */ + void (*rcvcallb_skb)(int, int, struct sk_buff *); + + /* Status-Callback + * Parameters: + * isdn_ctrl* + * driver = Driver ID. + * command = One of above ISDN_STAT_... constants. + * arg = depending on status-type. + * num = depending on status-type. + */ + int (*statcallb)(isdn_ctrl*); + + /* Send command + * Parameters: + * isdn_ctrl* + * driver = Driver ID. + * command = One of above ISDN_CMD_... constants. + * arg = depending on command. + * num = depending on command. + */ + int (*command)(isdn_ctrl*); + + /* + * Send data using sk_buff's + * Parameters: + * int driverId + * int local channel-number (0...) + * int Flag: Need ACK for this packet. + * struct sk_buff *skb Data to send + */ + int (*writebuf_skb) (int, int, int, struct sk_buff *); + + /* Send raw D-Channel-Commands + * Parameters: + * u_char pointer data + * int length of data + * int driverId + * int local channel-number (0 ...) + */ + int (*writecmd)(const u_char __user *, int, int, int); + + /* Read raw Status replies + * u_char pointer data (volatile) + * int length of buffer + * int driverId + * int local channel-number (0 ...) + */ + int (*readstat)(u_char __user *, int, int, int); + + char id[20]; +} isdn_if; + +/* + * Function which must be called by lowlevel-driver at loadtime with + * the following fields of above struct set: + * + * channels Number of channels that will be supported. + * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not + * supporting sk_buff's should set this to 0. + * command Address of Command-Handler. + * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...) + * writebuf_skb Address of Skbuff-Send-Handler. + * writecmd " " D-Channel " which accepts raw D-Ch-Commands. + * readstat " " D-Channel " which delivers raw Status-Data. + * + * The linklevel-driver fills the following fields: + * + * channels Driver-ID assigned to this driver. (Must be used on all + * subsequent callbacks. + * rcvcallb_skb Address of handler for received Skbuff's. + * statcallb " " " for status-changes. + * + */ +extern int register_isdn(isdn_if*); +#include <asm/uaccess.h> + +#endif /* __KERNEL__ */ + +#endif /* __ISDNIF_H__ */ diff --git a/include/linux/isicom.h b/include/linux/isicom.h new file mode 100644 index 00000000..b92e0565 --- /dev/null +++ b/include/linux/isicom.h @@ -0,0 +1,84 @@ +#ifndef _LINUX_ISICOM_H +#define _LINUX_ISICOM_H + +#define YES 1 +#define NO 0 + +/* + * ISICOM Driver definitions ... + * + */ + +#define ISICOM_NAME "ISICom" + +/* + * PCI definitions + */ + +#define DEVID_COUNT 9 +#define VENDOR_ID 0x10b5 + +/* + * These are now officially allocated numbers + */ + +#define ISICOM_NMAJOR 112 /* normal */ +#define ISICOM_CMAJOR 113 /* callout */ +#define ISICOM_MAGIC (('M' << 8) | 'T') + +#define WAKEUP_CHARS 256 /* hard coded for now */ +#define TX_SIZE 254 + +#define BOARD_COUNT 4 +#define PORT_COUNT (BOARD_COUNT*16) + +/* character sizes */ + +#define ISICOM_CS5 0x0000 +#define ISICOM_CS6 0x0001 +#define ISICOM_CS7 0x0002 +#define ISICOM_CS8 0x0003 + +/* stop bits */ + +#define ISICOM_1SB 0x0000 +#define ISICOM_2SB 0x0004 + +/* parity */ + +#define ISICOM_NOPAR 0x0000 +#define ISICOM_ODPAR 0x0008 +#define ISICOM_EVPAR 0x0018 + +/* flow control */ + +#define ISICOM_CTSRTS 0x03 +#define ISICOM_INITIATE_XONXOFF 0x04 +#define ISICOM_RESPOND_XONXOFF 0x08 + +#define BOARD(line) (((line) >> 4) & 0x3) + + /* isi kill queue bitmap */ + +#define ISICOM_KILLTX 0x01 +#define ISICOM_KILLRX 0x02 + + /* isi_board status bitmap */ + +#define FIRMWARE_LOADED 0x0001 +#define BOARD_ACTIVE 0x0002 +#define BOARD_INIT 0x0004 + + /* isi_port status bitmap */ + +#define ISI_CTS 0x1000 +#define ISI_DSR 0x2000 +#define ISI_RI 0x4000 +#define ISI_DCD 0x8000 +#define ISI_DTR 0x0100 +#define ISI_RTS 0x0200 + + +#define ISI_TXOK 0x0001 + +#endif /* ISICOM_H */ diff --git a/include/linux/iso_fs.h b/include/linux/iso_fs.h new file mode 100644 index 00000000..4688ac42 --- /dev/null +++ b/include/linux/iso_fs.h @@ -0,0 +1,165 @@ +#ifndef _ISOFS_FS_H +#define _ISOFS_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* + * The isofs filesystem constants/structures + */ + +/* This part borrowed from the bsd386 isofs */ +#define ISODCL(from, to) (to - from + 1) + +struct iso_volume_descriptor { + char type[ISODCL(1,1)]; /* 711 */ + char id[ISODCL(2,6)]; + char version[ISODCL(7,7)]; + char data[ISODCL(8,2048)]; +}; + +/* volume descriptor types */ +#define ISO_VD_PRIMARY 1 +#define ISO_VD_SUPPLEMENTARY 2 +#define ISO_VD_END 255 + +#define ISO_STANDARD_ID "CD001" + +struct iso_primary_descriptor { + char type [ISODCL ( 1, 1)]; /* 711 */ + char id [ISODCL ( 2, 6)]; + char version [ISODCL ( 7, 7)]; /* 711 */ + char unused1 [ISODCL ( 8, 8)]; + char system_id [ISODCL ( 9, 40)]; /* achars */ + char volume_id [ISODCL ( 41, 72)]; /* dchars */ + char unused2 [ISODCL ( 73, 80)]; + char volume_space_size [ISODCL ( 81, 88)]; /* 733 */ + char unused3 [ISODCL ( 89, 120)]; + char volume_set_size [ISODCL (121, 124)]; /* 723 */ + char volume_sequence_number [ISODCL (125, 128)]; /* 723 */ + char logical_block_size [ISODCL (129, 132)]; /* 723 */ + char path_table_size [ISODCL (133, 140)]; /* 733 */ + char type_l_path_table [ISODCL (141, 144)]; /* 731 */ + char opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */ + char type_m_path_table [ISODCL (149, 152)]; /* 732 */ + char opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */ + char root_directory_record [ISODCL (157, 190)]; /* 9.1 */ + char volume_set_id [ISODCL (191, 318)]; /* dchars */ + char publisher_id [ISODCL (319, 446)]; /* achars */ + char preparer_id [ISODCL (447, 574)]; /* achars */ + char application_id [ISODCL (575, 702)]; /* achars */ + char copyright_file_id [ISODCL (703, 739)]; /* 7.5 dchars */ + char abstract_file_id [ISODCL (740, 776)]; /* 7.5 dchars */ + char bibliographic_file_id [ISODCL (777, 813)]; /* 7.5 dchars */ + char creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */ + char modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */ + char expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */ + char effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */ + char file_structure_version [ISODCL (882, 882)]; /* 711 */ + char unused4 [ISODCL (883, 883)]; + char application_data [ISODCL (884, 1395)]; + char unused5 [ISODCL (1396, 2048)]; +}; + +/* Almost the same as the primary descriptor but two fields are specified */ +struct iso_supplementary_descriptor { + char type [ISODCL ( 1, 1)]; /* 711 */ + char id [ISODCL ( 2, 6)]; + char version [ISODCL ( 7, 7)]; /* 711 */ + char flags [ISODCL ( 8, 8)]; /* 853 */ + char system_id [ISODCL ( 9, 40)]; /* achars */ + char volume_id [ISODCL ( 41, 72)]; /* dchars */ + char unused2 [ISODCL ( 73, 80)]; + char volume_space_size [ISODCL ( 81, 88)]; /* 733 */ + char escape [ISODCL ( 89, 120)]; /* 856 */ + char volume_set_size [ISODCL (121, 124)]; /* 723 */ + char volume_sequence_number [ISODCL (125, 128)]; /* 723 */ + char logical_block_size [ISODCL (129, 132)]; /* 723 */ + char path_table_size [ISODCL (133, 140)]; /* 733 */ + char type_l_path_table [ISODCL (141, 144)]; /* 731 */ + char opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */ + char type_m_path_table [ISODCL (149, 152)]; /* 732 */ + char opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */ + char root_directory_record [ISODCL (157, 190)]; /* 9.1 */ + char volume_set_id [ISODCL (191, 318)]; /* dchars */ + char publisher_id [ISODCL (319, 446)]; /* achars */ + char preparer_id [ISODCL (447, 574)]; /* achars */ + char application_id [ISODCL (575, 702)]; /* achars */ + char copyright_file_id [ISODCL (703, 739)]; /* 7.5 dchars */ + char abstract_file_id [ISODCL (740, 776)]; /* 7.5 dchars */ + char bibliographic_file_id [ISODCL (777, 813)]; /* 7.5 dchars */ + char creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */ + char modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */ + char expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */ + char effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */ + char file_structure_version [ISODCL (882, 882)]; /* 711 */ + char unused4 [ISODCL (883, 883)]; + char application_data [ISODCL (884, 1395)]; + char unused5 [ISODCL (1396, 2048)]; +}; + + +#define HS_STANDARD_ID "CDROM" + +struct hs_volume_descriptor { + char foo [ISODCL ( 1, 8)]; /* 733 */ + char type [ISODCL ( 9, 9)]; /* 711 */ + char id [ISODCL ( 10, 14)]; + char version [ISODCL ( 15, 15)]; /* 711 */ + char data[ISODCL(16,2048)]; +}; + + +struct hs_primary_descriptor { + char foo [ISODCL ( 1, 8)]; /* 733 */ + char type [ISODCL ( 9, 9)]; /* 711 */ + char id [ISODCL ( 10, 14)]; + char version [ISODCL ( 15, 15)]; /* 711 */ + char unused1 [ISODCL ( 16, 16)]; /* 711 */ + char system_id [ISODCL ( 17, 48)]; /* achars */ + char volume_id [ISODCL ( 49, 80)]; /* dchars */ + char unused2 [ISODCL ( 81, 88)]; /* 733 */ + char volume_space_size [ISODCL ( 89, 96)]; /* 733 */ + char unused3 [ISODCL ( 97, 128)]; /* 733 */ + char volume_set_size [ISODCL (129, 132)]; /* 723 */ + char volume_sequence_number [ISODCL (133, 136)]; /* 723 */ + char logical_block_size [ISODCL (137, 140)]; /* 723 */ + char path_table_size [ISODCL (141, 148)]; /* 733 */ + char type_l_path_table [ISODCL (149, 152)]; /* 731 */ + char unused4 [ISODCL (153, 180)]; /* 733 */ + char root_directory_record [ISODCL (181, 214)]; /* 9.1 */ +}; + +/* We use this to help us look up the parent inode numbers. */ + +struct iso_path_table{ + unsigned char name_len[2]; /* 721 */ + char extent[4]; /* 731 */ + char parent[2]; /* 721 */ + char name[0]; +} __attribute__((packed)); + +/* high sierra is identical to iso, except that the date is only 6 bytes, and + there is an extra reserved byte after the flags */ + +struct iso_directory_record { + char length [ISODCL (1, 1)]; /* 711 */ + char ext_attr_length [ISODCL (2, 2)]; /* 711 */ + char extent [ISODCL (3, 10)]; /* 733 */ + char size [ISODCL (11, 18)]; /* 733 */ + char date [ISODCL (19, 25)]; /* 7 by 711 */ + char flags [ISODCL (26, 26)]; + char file_unit_size [ISODCL (27, 27)]; /* 711 */ + char interleave [ISODCL (28, 28)]; /* 711 */ + char volume_sequence_number [ISODCL (29, 32)]; /* 723 */ + unsigned char name_len [ISODCL (33, 33)]; /* 711 */ + char name [0]; +} __attribute__((packed)); + +#define ISOFS_BLOCK_BITS 11 +#define ISOFS_BLOCK_SIZE 2048 + +#define ISOFS_BUFFER_SIZE(INODE) ((INODE)->i_sb->s_blocksize) +#define ISOFS_BUFFER_BITS(INODE) ((INODE)->i_sb->s_blocksize_bits) + +#endif /* _ISOFS_FS_H */ diff --git a/include/linux/istallion.h b/include/linux/istallion.h new file mode 100644 index 00000000..ad700a60 --- /dev/null +++ b/include/linux/istallion.h @@ -0,0 +1,123 @@ +/*****************************************************************************/ + +/* + * istallion.h -- stallion intelligent multiport serial driver. + * + * Copyright (C) 1996-1998 Stallion Technologies + * Copyright (C) 1994-1996 Greg Ungerer. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*****************************************************************************/ +#ifndef _ISTALLION_H +#define _ISTALLION_H +/*****************************************************************************/ + +/* + * Define important driver constants here. + */ +#define STL_MAXBRDS 4 +#define STL_MAXPANELS 4 +#define STL_MAXPORTS 64 +#define STL_MAXCHANS (STL_MAXPORTS + 1) +#define STL_MAXDEVS (STL_MAXBRDS * STL_MAXPORTS) + + +/* + * Define a set of structures to hold all the board/panel/port info + * for our ports. These will be dynamically allocated as required at + * driver initialization time. + */ + +/* + * Port and board structures to hold status info about each object. + * The board structure contains pointers to structures for each port + * connected to it. Panels are not distinguished here, since + * communication with the slave board will always be on a per port + * basis. + */ +struct stliport { + unsigned long magic; + struct tty_port port; + unsigned int portnr; + unsigned int panelnr; + unsigned int brdnr; + unsigned long state; + unsigned int devnr; + int baud_base; + int custom_divisor; + int closing_wait; + int rc; + int argsize; + void *argp; + unsigned int rxmarkmsk; + wait_queue_head_t raw_wait; + struct asysigs asig; + unsigned long addr; + unsigned long rxoffset; + unsigned long txoffset; + unsigned long sigs; + unsigned long pflag; + unsigned int rxsize; + unsigned int txsize; + unsigned char reqbit; + unsigned char portidx; + unsigned char portbit; +}; + +/* + * Use a structure of function pointers to do board level operations. + * These include, enable/disable, paging shared memory, interrupting, etc. + */ +struct stlibrd { + unsigned long magic; + unsigned int brdnr; + unsigned int brdtype; + unsigned long state; + unsigned int nrpanels; + unsigned int nrports; + unsigned int nrdevs; + unsigned int iobase; + int iosize; + unsigned long memaddr; + void __iomem *membase; + unsigned long memsize; + int pagesize; + int hostoffset; + int slaveoffset; + int bitsize; + int enabval; + unsigned int panels[STL_MAXPANELS]; + int panelids[STL_MAXPANELS]; + void (*init)(struct stlibrd *brdp); + void (*enable)(struct stlibrd *brdp); + void (*reenable)(struct stlibrd *brdp); + void (*disable)(struct stlibrd *brdp); + void __iomem *(*getmemptr)(struct stlibrd *brdp, unsigned long offset, int line); + void (*intr)(struct stlibrd *brdp); + void (*reset)(struct stlibrd *brdp); + struct stliport *ports[STL_MAXPORTS]; +}; + + +/* + * Define MAGIC numbers used for above structures. + */ +#define STLI_PORTMAGIC 0xe671c7a1 +#define STLI_BOARDMAGIC 0x4bc6c825 + +/*****************************************************************************/ +#endif diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h new file mode 100644 index 00000000..42bf7257 --- /dev/null +++ b/include/linux/ivtv.h @@ -0,0 +1,73 @@ +/* + Public ivtv API header + Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> + Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_IVTV_H__ +#define __LINUX_IVTV_H__ + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/videodev2.h> + +/* ivtv knows several distinct output modes: MPEG streaming, + YUV streaming, YUV updates through user DMA and the passthrough + mode. + + In order to clearly tell the driver that we are in user DMA + YUV mode you need to call IVTV_IOC_DMA_FRAME with y_source == NULL + first (althrough if you don't then the first time + DMA_FRAME is called the mode switch is done automatically). + + When you close the file handle the user DMA mode is exited again. + + While in one mode, you cannot use another mode (EBUSY is returned). + + All this means that if you want to change the YUV interlacing + for the user DMA YUV mode you first need to do call IVTV_IOC_DMA_FRAME + with y_source == NULL before you can set the correct format using + VIDIOC_S_FMT. + + Eventually all this should be replaced with a proper V4L2 API, + but for now we have to do it this way. */ + +struct ivtv_dma_frame { + enum v4l2_buf_type type; /* V4L2_BUF_TYPE_VIDEO_OUTPUT */ + __u32 pixelformat; /* 0 == same as destination */ + void __user *y_source; /* if NULL and type == V4L2_BUF_TYPE_VIDEO_OUTPUT, + then just switch to user DMA YUV output mode */ + void __user *uv_source; /* Unused for RGB pixelformats */ + struct v4l2_rect src; + struct v4l2_rect dst; + __u32 src_width; + __u32 src_height; +}; + +#define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame) + +/* Select the passthrough mode (if the argument is non-zero). In the passthrough + mode the output of the encoder is passed immediately into the decoder. */ +#define IVTV_IOC_PASSTHROUGH_MODE _IOW ('V', BASE_VIDIOC_PRIVATE+1, int) + +/* Deprecated defines: applications should use the defines from videodev2.h */ +#define IVTV_SLICED_TYPE_TELETEXT_B V4L2_MPEG_VBI_IVTV_TELETEXT_B +#define IVTV_SLICED_TYPE_CAPTION_525 V4L2_MPEG_VBI_IVTV_CAPTION_525 +#define IVTV_SLICED_TYPE_WSS_625 V4L2_MPEG_VBI_IVTV_WSS_625 +#define IVTV_SLICED_TYPE_VPS V4L2_MPEG_VBI_IVTV_VPS + +#endif /* _LINUX_IVTV_H */ diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h new file mode 100644 index 00000000..e8b92f67 --- /dev/null +++ b/include/linux/ivtvfb.h @@ -0,0 +1,37 @@ +/* + On Screen Display cx23415 Framebuffer driver + + Copyright (C) 2006, 2007 Ian Armstrong <ian@iarmst.demon.co.uk> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_IVTVFB_H__ +#define __LINUX_IVTVFB_H__ + +#include <linux/compiler.h> +#include <linux/types.h> + +/* Framebuffer external API */ + +struct ivtvfb_dma_frame { + void __user *source; + unsigned long dest_offset; + int count; +}; + +#define IVTVFB_IOC_DMA_FRAME _IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame) + +#endif diff --git a/include/linux/ixjuser.h b/include/linux/ixjuser.h new file mode 100644 index 00000000..94ab5e94 --- /dev/null +++ b/include/linux/ixjuser.h @@ -0,0 +1,720 @@ +#ifndef __LINUX_IXJUSER_H +#define __LINUX_IXJUSER_H + +/****************************************************************************** + * + * ixjuser.h + * + * Device Driver for Quicknet Technologies, Inc.'s Telephony cards + * including the Internet PhoneJACK, Internet PhoneJACK Lite, + * Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and + * SmartCABLE + * + * (c) Copyright 1999-2001 Quicknet Technologies, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Author: Ed Okerson, <eokerson@quicknet.net> + * + * Contributors: Greg Herlein, <gherlein@quicknet.net> + * David W. Erhart, <derhart@quicknet.net> + * John Sellers, <jsellers@quicknet.net> + * Mike Preston, <mpreston@quicknet.net> + * + * More information about the hardware related to this driver can be found + * at our website: http://www.quicknet.net + * + * Fixes: + * + * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT + * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET + * TECHNOLOGIES, INC.HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION + * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + *****************************************************************************/ + +#include <linux/telephony.h> + + +/****************************************************************************** +* +* IOCTL's used for the Quicknet Telephony Cards +* +* If you use the IXJCTL_TESTRAM command, the card must be power cycled to +* reset the SRAM values before further use. +* +******************************************************************************/ + +#define IXJCTL_DSP_RESET _IO ('q', 0xC0) + +#define IXJCTL_RING PHONE_RING +#define IXJCTL_HOOKSTATE PHONE_HOOKSTATE +#define IXJCTL_MAXRINGS PHONE_MAXRINGS +#define IXJCTL_RING_CADENCE PHONE_RING_CADENCE +#define IXJCTL_RING_START PHONE_RING_START +#define IXJCTL_RING_STOP PHONE_RING_STOP + +#define IXJCTL_CARDTYPE _IOR ('q', 0xC1, int) +#define IXJCTL_SERIAL _IOR ('q', 0xC2, int) +#define IXJCTL_DSP_TYPE _IOR ('q', 0xC3, int) +#define IXJCTL_DSP_VERSION _IOR ('q', 0xC4, int) +#define IXJCTL_VERSION _IOR ('q', 0xDA, char *) +#define IXJCTL_DSP_IDLE _IO ('q', 0xC5) +#define IXJCTL_TESTRAM _IO ('q', 0xC6) + +/****************************************************************************** +* +* This group of IOCTLs deal with the record settings of the DSP +* +* The IXJCTL_REC_DEPTH command sets the internal buffer depth of the DSP. +* Setting a lower depth reduces latency, but increases the demand of the +* application to service the driver without frame loss. The DSP has 480 +* bytes of physical buffer memory for the record channel so the true +* maximum limit is determined by how many frames will fit in the buffer. +* +* 1 uncompressed (480 byte) 16-bit linear frame. +* 2 uncompressed (240 byte) 8-bit A-law/mu-law frames. +* 15 TrueSpeech 8.5 frames. +* 20 TrueSpeech 6.3,5.3,4.8 or 4.1 frames. +* +* The default in the driver is currently set to 2 frames. +* +* The IXJCTL_REC_VOLUME and IXJCTL_PLAY_VOLUME commands both use a Q8 +* number as a parameter, 0x100 scales the signal by 1.0, 0x200 scales the +* signal by 2.0, 0x80 scales the signal by 0.5. No protection is given +* against over-scaling, if the multiplication factor times the input +* signal exceeds 16 bits, overflow distortion will occur. The default +* setting is 0x100 (1.0). +* +* The IXJCTL_REC_LEVEL returns the average signal level (not r.m.s.) on +* the most recently recorded frame as a 16 bit value. +******************************************************************************/ + +#define IXJCTL_REC_CODEC PHONE_REC_CODEC +#define IXJCTL_REC_START PHONE_REC_START +#define IXJCTL_REC_STOP PHONE_REC_STOP +#define IXJCTL_REC_DEPTH PHONE_REC_DEPTH +#define IXJCTL_FRAME PHONE_FRAME +#define IXJCTL_REC_VOLUME PHONE_REC_VOLUME +#define IXJCTL_REC_LEVEL PHONE_REC_LEVEL + +typedef enum { + f300_640 = 4, f300_500, f1100, f350, f400, f480, f440, f620, f20_50, + f133_200, f300, f300_420, f330, f300_425, f330_440, f340, f350_400, + f350_440, f350_450, f360, f380_420, f392, f400_425, f400_440, f400_450, + f420, f425, f425_450, f425_475, f435, f440_450, f440_480, f445, f450, + f452, f475, f480_620, f494, f500, f520, f523, f525, f540_660, f587, + f590, f600, f660, f700, f740, f750, f750_1450, f770, f800, f816, f850, + f857_1645, f900, f900_1300, f935_1215, f941_1477, f942, f950, f950_1400, + f975, f1000, f1020, f1050, f1100_1750, f1140, f1200, f1209, f1330, f1336, + lf1366, f1380, f1400, f1477, f1600, f1633_1638, f1800, f1860 +} IXJ_FILTER_FREQ; + +typedef struct { + unsigned int filter; + IXJ_FILTER_FREQ freq; + char enable; +} IXJ_FILTER; + +typedef struct { + char enable; + char en_filter; + unsigned int filter; + unsigned int on1; + unsigned int off1; + unsigned int on2; + unsigned int off2; + unsigned int on3; + unsigned int off3; +} IXJ_FILTER_CADENCE; + +#define IXJCTL_SET_FILTER _IOW ('q', 0xC7, IXJ_FILTER *) +#define IXJCTL_SET_FILTER_RAW _IOW ('q', 0xDD, IXJ_FILTER_RAW *) +#define IXJCTL_GET_FILTER_HIST _IOW ('q', 0xC8, int) +#define IXJCTL_FILTER_CADENCE _IOW ('q', 0xD6, IXJ_FILTER_CADENCE *) +#define IXJCTL_PLAY_CID _IO ('q', 0xD7) +/****************************************************************************** +* +* This IOCTL allows you to reassign values in the tone index table. The +* tone table has 32 entries (0 - 31), but the driver only allows entries +* 13 - 27 to be modified, entry 0 is reserved for silence and 1 - 12 are +* the standard DTMF digits and 28 - 31 are the DTMF tones for A, B, C & D. +* The positions used internally for Call Progress Tones are as follows: +* Dial Tone - 25 +* Ring Back - 26 +* Busy Signal - 27 +* +* The freq values are calculated as: +* freq = cos(2 * PI * frequency / 8000) +* +* The most commonly needed values are already calculated and listed in the +* enum IXJ_TONE_FREQ. Each tone index can have two frequencies with +* different gains, if you are only using a single frequency set the unused +* one to 0. +* +* The gain values range from 0 to 15 indicating +6dB to -24dB in 2dB +* increments. +* +******************************************************************************/ + +typedef enum { + hz20 = 0x7ffa, + hz50 = 0x7fe5, + hz133 = 0x7f4c, + hz200 = 0x7e6b, + hz261 = 0x7d50, /* .63 C1 */ + hz277 = 0x7cfa, /* .18 CS1 */ + hz293 = 0x7c9f, /* .66 D1 */ + hz300 = 0x7c75, + hz311 = 0x7c32, /* .13 DS1 */ + hz329 = 0x7bbf, /* .63 E1 */ + hz330 = 0x7bb8, + hz340 = 0x7b75, + hz349 = 0x7b37, /* .23 F1 */ + hz350 = 0x7b30, + hz360 = 0x7ae9, + hz369 = 0x7aa8, /* .99 FS1 */ + hz380 = 0x7a56, + hz392 = 0x79fa, /* .00 G1 */ + hz400 = 0x79bb, + hz415 = 0x7941, /* .30 GS1 */ + hz420 = 0x7918, + hz425 = 0x78ee, + hz435 = 0x7899, + hz440 = 0x786d, /* .00 A1 */ + hz445 = 0x7842, + hz450 = 0x7815, + hz452 = 0x7803, + hz466 = 0x7784, /* .16 AS1 */ + hz475 = 0x7731, + hz480 = 0x7701, + hz493 = 0x7685, /* .88 B1 */ + hz494 = 0x767b, + hz500 = 0x7640, + hz520 = 0x7578, + hz523 = 0x7559, /* .25 C2 */ + hz525 = 0x7544, + hz540 = 0x74a7, + hz554 = 0x7411, /* .37 CS2 */ + hz587 = 0x72a1, /* .33 D2 */ + hz590 = 0x727f, + hz600 = 0x720b, + hz620 = 0x711e, + hz622 = 0x7106, /* .25 DS2 */ + hz659 = 0x6f3b, /* .26 E2 */ + hz660 = 0x6f2e, + hz698 = 0x6d3d, /* .46 F2 */ + hz700 = 0x6d22, + hz739 = 0x6b09, /* .99 FS2 */ + hz740 = 0x6afa, + hz750 = 0x6a6c, + hz770 = 0x694b, + hz783 = 0x688b, /* .99 G2 */ + hz800 = 0x678d, + hz816 = 0x6698, + hz830 = 0x65bf, /* .61 GS2 */ + hz850 = 0x6484, + hz857 = 0x6414, + hz880 = 0x629f, /* .00 A2 */ + hz900 = 0x6154, + hz932 = 0x5f35, /* .33 AS2 */ + hz935 = 0x5f01, + hz941 = 0x5e9a, + hz942 = 0x5e88, + hz950 = 0x5dfd, + hz975 = 0x5c44, + hz1000 = 0x5a81, + hz1020 = 0x5912, + hz1050 = 0x56e2, + hz1100 = 0x5320, + hz1140 = 0x5007, + hz1200 = 0x4b3b, + hz1209 = 0x4a80, + hz1215 = 0x4a02, + hz1250 = 0x471c, + hz1300 = 0x42e0, + hz1330 = 0x4049, + hz1336 = 0x3fc4, + hz1366 = 0x3d22, + hz1380 = 0x3be4, + hz1400 = 0x3a1b, + hz1450 = 0x3596, + hz1477 = 0x331c, + hz1500 = 0x30fb, + hz1600 = 0x278d, + hz1633 = 0x2462, + hz1638 = 0x23e7, + hz1645 = 0x233a, + hz1750 = 0x18f8, + hz1800 = 0x1405, + hz1860 = 0xe0b, + hz2100 = 0xf5f6, + hz2130 = 0xf2f5, + hz2450 = 0xd3b3, + hz2750 = 0xb8e4 +} IXJ_FREQ; + +typedef enum { + C1 = hz261, + CS1 = hz277, + D1 = hz293, + DS1 = hz311, + E1 = hz329, + F1 = hz349, + FS1 = hz369, + G1 = hz392, + GS1 = hz415, + A1 = hz440, + AS1 = hz466, + B1 = hz493, + C2 = hz523, + CS2 = hz554, + D2 = hz587, + DS2 = hz622, + E2 = hz659, + F2 = hz698, + FS2 = hz739, + G2 = hz783, + GS2 = hz830, + A2 = hz880, + AS2 = hz932, +} IXJ_NOTE; + +typedef struct { + int tone_index; + int freq0; + int gain0; + int freq1; + int gain1; +} IXJ_TONE; + +#define IXJCTL_INIT_TONE _IOW ('q', 0xC9, IXJ_TONE *) + +/****************************************************************************** +* +* The IXJCTL_TONE_CADENCE ioctl defines tone sequences used for various +* Call Progress Tones (CPT). This is accomplished by setting up an array of +* IXJ_CADENCE_ELEMENT structures that sequentially define the states of +* the tone sequence. The tone_on_time and tone_off time are in +* 250 microsecond intervals. A pointer to this array is passed to the +* driver as the ce element of an IXJ_CADENCE structure. The elements_used +* must be set to the number of IXJ_CADENCE_ELEMENTS in the array. The +* termination variable defines what to do at the end of a cadence, the +* options are to play the cadence once and stop, to repeat the last +* element of the cadence indefinitely, or to repeat the entire cadence +* indefinitely. The ce variable is a pointer to the array of IXJ_TONE +* structures. If the freq0 variable is non-zero, the tone table contents +* for the tone_index are updated to the frequencies and gains defined. It +* should be noted that DTMF tones cannot be reassigned, so if DTMF tone +* table indexes are used in a cadence the frequency and gain variables will +* be ignored. +* +* If the array elements contain frequency parameters the driver will +* initialize the needed tone table elements and begin playing the tone, +* there is no preset limit on the number of elements in the cadence. If +* there is more than one frequency used in the cadence, sequential elements +* of different frequencies MUST use different tone table indexes. Only one +* cadence can be played at a time. It is possible to build complex +* cadences with multiple frequencies using 2 tone table indexes by +* alternating between them. +* +******************************************************************************/ + +typedef struct { + int index; + int tone_on_time; + int tone_off_time; + int freq0; + int gain0; + int freq1; + int gain1; +} IXJ_CADENCE_ELEMENT; + +typedef enum { + PLAY_ONCE, + REPEAT_LAST_ELEMENT, + REPEAT_ALL +} IXJ_CADENCE_TERM; + +typedef struct { + int elements_used; + IXJ_CADENCE_TERM termination; + IXJ_CADENCE_ELEMENT __user *ce; +} IXJ_CADENCE; + +#define IXJCTL_TONE_CADENCE _IOW ('q', 0xCA, IXJ_CADENCE *) +/****************************************************************************** +* +* This group of IOCTLs deal with the playback settings of the DSP +* +******************************************************************************/ + +#define IXJCTL_PLAY_CODEC PHONE_PLAY_CODEC +#define IXJCTL_PLAY_START PHONE_PLAY_START +#define IXJCTL_PLAY_STOP PHONE_PLAY_STOP +#define IXJCTL_PLAY_DEPTH PHONE_PLAY_DEPTH +#define IXJCTL_PLAY_VOLUME PHONE_PLAY_VOLUME +#define IXJCTL_PLAY_LEVEL PHONE_PLAY_LEVEL + +/****************************************************************************** +* +* This group of IOCTLs deal with the Acoustic Echo Cancellation settings +* of the DSP +* +* Issuing the IXJCTL_AEC_START command with a value of AEC_OFF has the +* same effect as IXJCTL_AEC_STOP. This is to simplify slider bar +* controls. IXJCTL_AEC_GET_LEVEL returns the current setting of the AEC. +******************************************************************************/ +#define IXJCTL_AEC_START _IOW ('q', 0xCB, int) +#define IXJCTL_AEC_STOP _IO ('q', 0xCC) +#define IXJCTL_AEC_GET_LEVEL _IO ('q', 0xCD) + +#define AEC_OFF 0 +#define AEC_LOW 1 +#define AEC_MED 2 +#define AEC_HIGH 3 +#define AEC_AUTO 4 +#define AEC_AGC 5 +/****************************************************************************** +* +* Call Progress Tones, DTMF, etc. +* IXJCTL_DTMF_OOB determines if DTMF signaling is sent as Out-Of-Band +* only. If you pass a 1, DTMF is suppressed from the audio stream. +* Tone on and off times are in 250 microsecond intervals so +* ioctl(ixj1, IXJCTL_SET_TONE_ON_TIME, 360); +* will set the tone on time of board ixj1 to 360 * 250us = 90ms +* the default values of tone on and off times is 840 or 210ms +******************************************************************************/ + +#define IXJCTL_DTMF_READY PHONE_DTMF_READY +#define IXJCTL_GET_DTMF PHONE_GET_DTMF +#define IXJCTL_GET_DTMF_ASCII PHONE_GET_DTMF_ASCII +#define IXJCTL_DTMF_OOB PHONE_DTMF_OOB +#define IXJCTL_EXCEPTION PHONE_EXCEPTION +#define IXJCTL_PLAY_TONE PHONE_PLAY_TONE +#define IXJCTL_SET_TONE_ON_TIME PHONE_SET_TONE_ON_TIME +#define IXJCTL_SET_TONE_OFF_TIME PHONE_SET_TONE_OFF_TIME +#define IXJCTL_GET_TONE_ON_TIME PHONE_GET_TONE_ON_TIME +#define IXJCTL_GET_TONE_OFF_TIME PHONE_GET_TONE_OFF_TIME +#define IXJCTL_GET_TONE_STATE PHONE_GET_TONE_STATE +#define IXJCTL_BUSY PHONE_BUSY +#define IXJCTL_RINGBACK PHONE_RINGBACK +#define IXJCTL_DIALTONE PHONE_DIALTONE +#define IXJCTL_CPT_STOP PHONE_CPT_STOP + +/****************************************************************************** +* LineJACK specific IOCTLs +* +* The lsb 4 bits of the LED argument represent the state of each of the 4 +* LED's on the LineJACK +******************************************************************************/ + +#define IXJCTL_SET_LED _IOW ('q', 0xCE, int) +#define IXJCTL_MIXER _IOW ('q', 0xCF, int) + +/****************************************************************************** +* +* The master volume controls use attenuation with 32 levels from 0 to -62dB +* with steps of 2dB each, the defines should be OR'ed together then sent +* as the parameter to the mixer command to change the mixer settings. +* +******************************************************************************/ +#define MIXER_MASTER_L 0x0000 +#define MIXER_MASTER_R 0x0100 +#define ATT00DB 0x00 +#define ATT02DB 0x01 +#define ATT04DB 0x02 +#define ATT06DB 0x03 +#define ATT08DB 0x04 +#define ATT10DB 0x05 +#define ATT12DB 0x06 +#define ATT14DB 0x07 +#define ATT16DB 0x08 +#define ATT18DB 0x09 +#define ATT20DB 0x0A +#define ATT22DB 0x0B +#define ATT24DB 0x0C +#define ATT26DB 0x0D +#define ATT28DB 0x0E +#define ATT30DB 0x0F +#define ATT32DB 0x10 +#define ATT34DB 0x11 +#define ATT36DB 0x12 +#define ATT38DB 0x13 +#define ATT40DB 0x14 +#define ATT42DB 0x15 +#define ATT44DB 0x16 +#define ATT46DB 0x17 +#define ATT48DB 0x18 +#define ATT50DB 0x19 +#define ATT52DB 0x1A +#define ATT54DB 0x1B +#define ATT56DB 0x1C +#define ATT58DB 0x1D +#define ATT60DB 0x1E +#define ATT62DB 0x1F +#define MASTER_MUTE 0x80 + +/****************************************************************************** +* +* The input volume controls use gain with 32 levels from +12dB to -50dB +* with steps of 2dB each, the defines should be OR'ed together then sent +* as the parameter to the mixer command to change the mixer settings. +* +******************************************************************************/ +#define MIXER_PORT_CD_L 0x0600 +#define MIXER_PORT_CD_R 0x0700 +#define MIXER_PORT_LINE_IN_L 0x0800 +#define MIXER_PORT_LINE_IN_R 0x0900 +#define MIXER_PORT_POTS_REC 0x0C00 +#define MIXER_PORT_MIC 0x0E00 + +#define GAIN12DB 0x00 +#define GAIN10DB 0x01 +#define GAIN08DB 0x02 +#define GAIN06DB 0x03 +#define GAIN04DB 0x04 +#define GAIN02DB 0x05 +#define GAIN00DB 0x06 +#define GAIN_02DB 0x07 +#define GAIN_04DB 0x08 +#define GAIN_06DB 0x09 +#define GAIN_08DB 0x0A +#define GAIN_10DB 0x0B +#define GAIN_12DB 0x0C +#define GAIN_14DB 0x0D +#define GAIN_16DB 0x0E +#define GAIN_18DB 0x0F +#define GAIN_20DB 0x10 +#define GAIN_22DB 0x11 +#define GAIN_24DB 0x12 +#define GAIN_26DB 0x13 +#define GAIN_28DB 0x14 +#define GAIN_30DB 0x15 +#define GAIN_32DB 0x16 +#define GAIN_34DB 0x17 +#define GAIN_36DB 0x18 +#define GAIN_38DB 0x19 +#define GAIN_40DB 0x1A +#define GAIN_42DB 0x1B +#define GAIN_44DB 0x1C +#define GAIN_46DB 0x1D +#define GAIN_48DB 0x1E +#define GAIN_50DB 0x1F +#define INPUT_MUTE 0x80 + +/****************************************************************************** +* +* The POTS volume control use attenuation with 8 levels from 0dB to -28dB +* with steps of 4dB each, the defines should be OR'ed together then sent +* as the parameter to the mixer command to change the mixer settings. +* +******************************************************************************/ +#define MIXER_PORT_POTS_PLAY 0x0F00 + +#define POTS_ATT_00DB 0x00 +#define POTS_ATT_04DB 0x01 +#define POTS_ATT_08DB 0x02 +#define POTS_ATT_12DB 0x03 +#define POTS_ATT_16DB 0x04 +#define POTS_ATT_20DB 0x05 +#define POTS_ATT_24DB 0x06 +#define POTS_ATT_28DB 0x07 +#define POTS_MUTE 0x80 + +/****************************************************************************** +* +* The DAA controls the interface to the PSTN port. The driver loads the +* US coefficients by default, so if you live in a different country you +* need to load the set for your countries phone system. +* +******************************************************************************/ +#define IXJCTL_DAA_COEFF_SET _IOW ('q', 0xD0, int) + +#define DAA_US 1 /*PITA 8kHz */ +#define DAA_UK 2 /*ISAR34 8kHz */ +#define DAA_FRANCE 3 /* */ +#define DAA_GERMANY 4 +#define DAA_AUSTRALIA 5 +#define DAA_JAPAN 6 + +/****************************************************************************** +* +* Use IXJCTL_PORT to set or query the port the card is set to. If the +* argument is set to PORT_QUERY, the return value of the ioctl will +* indicate which port is currently in use, otherwise it will change the +* port. +* +******************************************************************************/ +#define IXJCTL_PORT _IOW ('q', 0xD1, int) + +#define PORT_QUERY 0 +#define PORT_POTS 1 +#define PORT_PSTN 2 +#define PORT_SPEAKER 3 +#define PORT_HANDSET 4 + +#define IXJCTL_PSTN_SET_STATE PHONE_PSTN_SET_STATE +#define IXJCTL_PSTN_GET_STATE PHONE_PSTN_GET_STATE + +#define PSTN_ON_HOOK 0 +#define PSTN_RINGING 1 +#define PSTN_OFF_HOOK 2 +#define PSTN_PULSE_DIAL 3 + +/****************************************************************************** +* +* The DAA Analog GAIN sets 2 parameters at one time, the receive gain (AGRR), +* and the transmit gain (AGX). OR together the components and pass them +* as the parameter to IXJCTL_DAA_AGAIN. The default setting is both at 0dB. +* +******************************************************************************/ +#define IXJCTL_DAA_AGAIN _IOW ('q', 0xD2, int) + +#define AGRR00DB 0x00 /* Analog gain in receive direction 0dB */ +#define AGRR3_5DB 0x10 /* Analog gain in receive direction 3.5dB */ +#define AGRR06DB 0x30 /* Analog gain in receive direction 6dB */ + +#define AGX00DB 0x00 /* Analog gain in transmit direction 0dB */ +#define AGX_6DB 0x04 /* Analog gain in transmit direction -6dB */ +#define AGX3_5DB 0x08 /* Analog gain in transmit direction 3.5dB */ +#define AGX_2_5B 0x0C /* Analog gain in transmit direction -2.5dB */ + +#define IXJCTL_PSTN_LINETEST _IO ('q', 0xD3) + +#define IXJCTL_CID _IOR ('q', 0xD4, PHONE_CID *) +#define IXJCTL_VMWI _IOR ('q', 0xD8, int) +#define IXJCTL_CIDCW _IOW ('q', 0xD9, PHONE_CID *) +/****************************************************************************** +* +* The wink duration is tunable with this ioctl. The default wink duration +* is 320ms. You do not need to use this ioctl if you do not require a +* different wink duration. +* +******************************************************************************/ +#define IXJCTL_WINK_DURATION PHONE_WINK_DURATION + +/****************************************************************************** +* +* This ioctl will connect the POTS port to the PSTN port on the LineJACK +* In order for this to work properly the port selection should be set to +* the PSTN port with IXJCTL_PORT prior to calling this ioctl. This will +* enable conference calls between PSTN callers and network callers. +* Passing a 1 to this ioctl enables the POTS<->PSTN connection while +* passing a 0 turns it back off. +* +******************************************************************************/ +#define IXJCTL_POTS_PSTN _IOW ('q', 0xD5, int) + +/****************************************************************************** +* +* IOCTLs added by request. +* +* IXJCTL_HZ sets the value your Linux kernel uses for HZ as defined in +* /usr/include/asm/param.h, this determines the fundamental +* frequency of the clock ticks on your Linux system. The kernel +* must be rebuilt if you change this value, also all modules you +* use (except this one) must be recompiled. The default value +* is 100, and you only need to use this IOCTL if you use some +* other value. +* +* +* IXJCTL_RATE sets the number of times per second that the driver polls +* the DSP. This value cannot be larger than HZ. By +* increasing both of these values, you may be able to reduce +* latency because the max hang time that can exist between the +* driver and the DSP will be reduced. +* +******************************************************************************/ + +#define IXJCTL_HZ _IOW ('q', 0xE0, int) +#define IXJCTL_RATE _IOW ('q', 0xE1, int) +#define IXJCTL_FRAMES_READ _IOR ('q', 0xE2, unsigned long) +#define IXJCTL_FRAMES_WRITTEN _IOR ('q', 0xE3, unsigned long) +#define IXJCTL_READ_WAIT _IOR ('q', 0xE4, unsigned long) +#define IXJCTL_WRITE_WAIT _IOR ('q', 0xE5, unsigned long) +#define IXJCTL_DRYBUFFER_READ _IOR ('q', 0xE6, unsigned long) +#define IXJCTL_DRYBUFFER_CLEAR _IO ('q', 0xE7) +#define IXJCTL_DTMF_PRESCALE _IOW ('q', 0xE8, int) + +/****************************************************************************** +* +* This ioctl allows the user application to control what events the driver +* will send signals for, and what signals it will send for which event. +* By default, if signaling is enabled, all events will send SIGIO when +* they occur. To disable signals for an event set the signal to 0. +* +******************************************************************************/ +typedef enum { + SIG_DTMF_READY, + SIG_HOOKSTATE, + SIG_FLASH, + SIG_PSTN_RING, + SIG_CALLER_ID, + SIG_PSTN_WINK, + SIG_F0, SIG_F1, SIG_F2, SIG_F3, + SIG_FC0, SIG_FC1, SIG_FC2, SIG_FC3, + SIG_READ_READY = 33, + SIG_WRITE_READY = 34 +} IXJ_SIGEVENT; + +typedef struct { + unsigned int event; + int signal; +} IXJ_SIGDEF; + +#define IXJCTL_SIGCTL _IOW ('q', 0xE9, IXJ_SIGDEF *) + +/****************************************************************************** +* +* These ioctls allow the user application to change the gain in the +* Smart Cable of the Internet Phone Card. Sending -1 as a value will cause +* return value to be the current setting. Valid values to set are 0x00 - 0x1F +* +* 11111 = +12 dB +* 10111 = 0 dB +* 00000 = -34.5 dB +* +* IXJCTL_SC_RXG sets the Receive gain +* IXJCTL_SC_TXG sets the Transmit gain +* +******************************************************************************/ +#define IXJCTL_SC_RXG _IOW ('q', 0xEA, int) +#define IXJCTL_SC_TXG _IOW ('q', 0xEB, int) + +/****************************************************************************** +* +* The intercom IOCTL's short the output from one card to the input of the +* other and vice versa (actually done in the DSP read function). It is only +* necessary to execute the IOCTL on one card, but it is necessary to have +* both devices open to be able to detect hook switch changes. The record +* codec and rate of each card must match the playback codec and rate of +* the other card for this to work properly. +* +******************************************************************************/ + +#define IXJCTL_INTERCOM_START _IOW ('q', 0xFD, int) +#define IXJCTL_INTERCOM_STOP _IOW ('q', 0xFE, int) + +/****************************************************************************** + * + * new structure for accessing raw filter information + * + ******************************************************************************/ + +typedef struct { + unsigned int filter; + char enable; + unsigned int coeff[19]; +} IXJ_FILTER_RAW; + +#endif diff --git a/include/linux/jbd.h b/include/linux/jbd.h new file mode 100644 index 00000000..d211732b --- /dev/null +++ b/include/linux/jbd.h @@ -0,0 +1,1031 @@ +/* + * linux/include/linux/jbd.h + * + * Written by Stephen C. Tweedie <sct@redhat.com> + * + * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * + * Definitions for transaction data structures for the buffer cache + * filesystem journaling support. + */ + +#ifndef _LINUX_JBD_H +#define _LINUX_JBD_H + +/* Allow this file to be included directly into e2fsprogs */ +#ifndef __KERNEL__ +#include "jfs_compat.h" +#define JFS_DEBUG +#define jfs_debug jbd_debug +#else + +#include <linux/types.h> +#include <linux/buffer_head.h> +#include <linux/journal-head.h> +#include <linux/stddef.h> +#include <linux/bit_spinlock.h> +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/lockdep.h> +#include <linux/slab.h> + +#define journal_oom_retry 1 + +/* + * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds + * certain classes of error which can occur due to failed IOs. Under + * normal use we want ext3 to continue after such errors, because + * hardware _can_ fail, but for debugging purposes when running tests on + * known-good hardware we may want to trap these errors. + */ +#undef JBD_PARANOID_IOFAIL + +/* + * The default maximum commit age, in seconds. + */ +#define JBD_DEFAULT_MAX_COMMIT_AGE 5 + +#ifdef CONFIG_JBD_DEBUG +/* + * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal + * consistency checks. By default we don't do this unless + * CONFIG_JBD_DEBUG is on. + */ +#define JBD_EXPENSIVE_CHECKING +extern u8 journal_enable_debug; + +#define jbd_debug(n, f, a...) \ + do { \ + if ((n) <= journal_enable_debug) { \ + printk (KERN_DEBUG "(%s, %d): %s: ", \ + __FILE__, __LINE__, __func__); \ + printk (f, ## a); \ + } \ + } while (0) +#else +#define jbd_debug(f, a...) /**/ +#endif + +static inline void *jbd_alloc(size_t size, gfp_t flags) +{ + return (void *)__get_free_pages(flags, get_order(size)); +} + +static inline void jbd_free(void *ptr, size_t size) +{ + free_pages((unsigned long)ptr, get_order(size)); +}; + +#define JFS_MIN_JOURNAL_BLOCKS 1024 + + +/** + * typedef handle_t - The handle_t type represents a single atomic update being performed by some process. + * + * All filesystem modifications made by the process go + * through this handle. Recursive operations (such as quota operations) + * are gathered into a single update. + * + * The buffer credits field is used to account for journaled buffers + * being modified by the running process. To ensure that there is + * enough log space for all outstanding operations, we need to limit the + * number of outstanding buffers possible at any time. When the + * operation completes, any buffer credits not used are credited back to + * the transaction, so that at all times we know how many buffers the + * outstanding updates on a transaction might possibly touch. + * + * This is an opaque datatype. + **/ +typedef struct handle_s handle_t; /* Atomic operation type */ + + +/** + * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. + * + * journal_t is linked to from the fs superblock structure. + * + * We use the journal_t to keep track of all outstanding transaction + * activity on the filesystem, and to manage the state of the log + * writing process. + * + * This is an opaque datatype. + **/ +typedef struct journal_s journal_t; /* Journal control structure */ +#endif + +/* + * Internal structures used by the logging mechanism: + */ + +#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */ + +/* + * On-disk structures + */ + +/* + * Descriptor block types: + */ + +#define JFS_DESCRIPTOR_BLOCK 1 +#define JFS_COMMIT_BLOCK 2 +#define JFS_SUPERBLOCK_V1 3 +#define JFS_SUPERBLOCK_V2 4 +#define JFS_REVOKE_BLOCK 5 + +/* + * Standard header for all descriptor blocks: + */ +typedef struct journal_header_s +{ + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +} journal_header_t; + + +/* + * The block tag: used to describe a single buffer in the journal + */ +typedef struct journal_block_tag_s +{ + __be32 t_blocknr; /* The on-disk block number */ + __be32 t_flags; /* See below */ +} journal_block_tag_t; + +/* + * The revoke descriptor: used on disk to describe a series of blocks to + * be revoked from the log + */ +typedef struct journal_revoke_header_s +{ + journal_header_t r_header; + __be32 r_count; /* Count of bytes used in the block */ +} journal_revoke_header_t; + + +/* Definitions for the journal tag flags word: */ +#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */ +#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */ +#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */ +#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */ + + +/* + * The journal superblock. All fields are in big-endian byte order. + */ +typedef struct journal_superblock_s +{ +/* 0x0000 */ + journal_header_t s_header; + +/* 0x000C */ + /* Static information describing the journal */ + __be32 s_blocksize; /* journal device blocksize */ + __be32 s_maxlen; /* total blocks in journal file */ + __be32 s_first; /* first block of log information */ + +/* 0x0018 */ + /* Dynamic information describing the current state of the log */ + __be32 s_sequence; /* first commit ID expected in log */ + __be32 s_start; /* blocknr of start of log */ + +/* 0x0020 */ + /* Error value, as set by journal_abort(). */ + __be32 s_errno; + +/* 0x0024 */ + /* Remaining fields are only valid in a version-2 superblock */ + __be32 s_feature_compat; /* compatible feature set */ + __be32 s_feature_incompat; /* incompatible feature set */ + __be32 s_feature_ro_compat; /* readonly-compatible feature set */ +/* 0x0030 */ + __u8 s_uuid[16]; /* 128-bit uuid for journal */ + +/* 0x0040 */ + __be32 s_nr_users; /* Nr of filesystems sharing log */ + + __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/ + +/* 0x0048 */ + __be32 s_max_transaction; /* Limit of journal blocks per trans.*/ + __be32 s_max_trans_data; /* Limit of data blocks per trans. */ + +/* 0x0050 */ + __u32 s_padding[44]; + +/* 0x0100 */ + __u8 s_users[16*48]; /* ids of all fs'es sharing the log */ +/* 0x0400 */ +} journal_superblock_t; + +#define JFS_HAS_COMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) +#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask)))) +#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) + +#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001 + +/* Features known to this kernel version: */ +#define JFS_KNOWN_COMPAT_FEATURES 0 +#define JFS_KNOWN_ROCOMPAT_FEATURES 0 +#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE + +#ifdef __KERNEL__ + +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/jbd_common.h> + +#define J_ASSERT(assert) BUG_ON(!(assert)) + +#define J_ASSERT_BH(bh, expr) J_ASSERT(expr) +#define J_ASSERT_JH(jh, expr) J_ASSERT(expr) + +#if defined(JBD_PARANOID_IOFAIL) +#define J_EXPECT(expr, why...) J_ASSERT(expr) +#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) +#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr) +#else +#define __journal_expect(expr, why...) \ + ({ \ + int val = (expr); \ + if (!val) { \ + printk(KERN_ERR \ + "EXT3-fs unexpected failure: %s;\n",# expr); \ + printk(KERN_ERR why "\n"); \ + } \ + val; \ + }) +#define J_EXPECT(expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why) +#endif + +struct jbd_revoke_table_s; + +/** + * struct handle_s - this is the concrete type associated with handle_t. + * @h_transaction: Which compound transaction is this update a part of? + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. + * @h_ref: Reference count on this handle + * @h_err: Field for caller's use to track errors through large fs operations + * @h_sync: flag for sync-on-close + * @h_jdata: flag to force data journaling + * @h_aborted: flag indicating fatal error on handle + * @h_lockdep_map: lockdep info for debugging lock problems + */ +struct handle_s +{ + /* Which compound transaction is this update a part of? */ + transaction_t *h_transaction; + + /* Number of remaining buffers we are allowed to dirty: */ + int h_buffer_credits; + + /* Reference count on this handle */ + int h_ref; + + /* Field for caller's use to track errors through large fs */ + /* operations */ + int h_err; + + /* Flags [no locking] */ + unsigned int h_sync: 1; /* sync-on-close */ + unsigned int h_jdata: 1; /* force data journaling */ + unsigned int h_aborted: 1; /* fatal error on handle */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map h_lockdep_map; +#endif +}; + + +/* The transaction_t type is the guts of the journaling mechanism. It + * tracks a compound transaction through its various states: + * + * RUNNING: accepting new updates + * LOCKED: Updates still running but we don't accept new ones + * RUNDOWN: Updates are tidying up but have finished requesting + * new buffers to modify (state not used for now) + * FLUSH: All updates complete, but we are still writing to disk + * COMMIT: All data on disk, writing commit record + * FINISHED: We still have to keep the transaction for checkpointing. + * + * The transaction keeps track of all of the buffers modified by a + * running transaction, and all of the buffers committed but not yet + * flushed to home for finished transactions. + */ + +/* + * Lock ranking: + * + * j_list_lock + * ->jbd_lock_bh_journal_head() (This is "innermost") + * + * j_state_lock + * ->jbd_lock_bh_state() + * + * jbd_lock_bh_state() + * ->j_list_lock + * + * j_state_lock + * ->t_handle_lock + * + * j_state_lock + * ->j_list_lock (journal_unmap_buffer) + * + */ + +struct transaction_s +{ + /* Pointer to the journal for this transaction. [no locking] */ + journal_t *t_journal; + + /* Sequence number for this transaction [no locking] */ + tid_t t_tid; + + /* + * Transaction's current state + * [no locking - only kjournald alters this] + * [j_list_lock] guards transition of a transaction into T_FINISHED + * state and subsequent call of __journal_drop_transaction() + * FIXME: needs barriers + * KLUDGE: [use j_state_lock] + */ + enum { + T_RUNNING, + T_LOCKED, + T_FLUSH, + T_COMMIT, + T_COMMIT_RECORD, + T_FINISHED + } t_state; + + /* + * Where in the log does this transaction's commit start? [no locking] + */ + unsigned int t_log_start; + + /* Number of buffers on the t_buffers list [j_list_lock] */ + int t_nr_buffers; + + /* + * Doubly-linked circular list of all buffers reserved but not yet + * modified by this transaction [j_list_lock] + */ + struct journal_head *t_reserved_list; + + /* + * Doubly-linked circular list of all buffers under writeout during + * commit [j_list_lock] + */ + struct journal_head *t_locked_list; + + /* + * Doubly-linked circular list of all metadata buffers owned by this + * transaction [j_list_lock] + */ + struct journal_head *t_buffers; + + /* + * Doubly-linked circular list of all data buffers still to be + * flushed before this transaction can be committed [j_list_lock] + */ + struct journal_head *t_sync_datalist; + + /* + * Doubly-linked circular list of all forget buffers (superseded + * buffers which we can un-checkpoint once this transaction commits) + * [j_list_lock] + */ + struct journal_head *t_forget; + + /* + * Doubly-linked circular list of all buffers still to be flushed before + * this transaction can be checkpointed. [j_list_lock] + */ + struct journal_head *t_checkpoint_list; + + /* + * Doubly-linked circular list of all buffers submitted for IO while + * checkpointing. [j_list_lock] + */ + struct journal_head *t_checkpoint_io_list; + + /* + * Doubly-linked circular list of temporary buffers currently undergoing + * IO in the log [j_list_lock] + */ + struct journal_head *t_iobuf_list; + + /* + * Doubly-linked circular list of metadata buffers being shadowed by log + * IO. The IO buffers on the iobuf list and the shadow buffers on this + * list match each other one for one at all times. [j_list_lock] + */ + struct journal_head *t_shadow_list; + + /* + * Doubly-linked circular list of control buffers being written to the + * log. [j_list_lock] + */ + struct journal_head *t_log_list; + + /* + * Protects info related to handles + */ + spinlock_t t_handle_lock; + + /* + * Number of outstanding updates running on this transaction + * [t_handle_lock] + */ + int t_updates; + + /* + * Number of buffers reserved for use by all handles in this transaction + * handle but not yet modified. [t_handle_lock] + */ + int t_outstanding_credits; + + /* + * Forward and backward links for the circular list of all transactions + * awaiting checkpoint. [j_list_lock] + */ + transaction_t *t_cpnext, *t_cpprev; + + /* + * When will the transaction expire (become due for commit), in jiffies? + * [no locking] + */ + unsigned long t_expires; + + /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* + * How many handles used this transaction? [t_handle_lock] + */ + int t_handle_count; + + /* + * This transaction is being forced and some process is + * waiting for it to finish. + */ + unsigned int t_synchronous_commit:1; +}; + +/** + * struct journal_s - this is the concrete type associated with journal_t. + * @j_flags: General journaling state flags + * @j_errno: Is there an outstanding uncleared error on the journal (from a + * prior abort)? + * @j_sb_buffer: First part of superblock buffer + * @j_superblock: Second part of superblock buffer + * @j_format_version: Version of the superblock format + * @j_state_lock: Protect the various scalars in the journal + * @j_barrier_count: Number of processes waiting to create a barrier lock + * @j_running_transaction: The current running transaction.. + * @j_committing_transaction: the transaction we are pushing to disk + * @j_checkpoint_transactions: a linked circular list of all transactions + * waiting for checkpointing + * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction + * to start committing, or for a barrier lock to be released + * @j_wait_logspace: Wait queue for waiting for checkpointing to complete + * @j_wait_done_commit: Wait queue for waiting for commit to complete + * @j_wait_checkpoint: Wait queue to trigger checkpointing + * @j_wait_commit: Wait queue to trigger commit + * @j_wait_updates: Wait queue to wait for updates to complete + * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints + * @j_head: Journal head - identifies the first unused block in the journal + * @j_tail: Journal tail - identifies the oldest still-used block in the + * journal. + * @j_free: Journal free - how many free blocks are there in the journal? + * @j_first: The block number of the first usable block + * @j_last: The block number one beyond the last usable block + * @j_dev: Device where we store the journal + * @j_blocksize: blocksize for the location where we store the journal. + * @j_blk_offset: starting block offset for into the device where we store the + * journal + * @j_fs_dev: Device which holds the client fs. For internal journal this will + * be equal to j_dev + * @j_maxlen: Total maximum capacity of the journal region on disk. + * @j_list_lock: Protects the buffer lists and internal buffer state. + * @j_inode: Optional inode where we store the journal. If present, all journal + * block numbers are mapped into this inode via bmap(). + * @j_tail_sequence: Sequence number of the oldest transaction in the log + * @j_transaction_sequence: Sequence number of the next transaction to grant + * @j_commit_sequence: Sequence number of the most recently committed + * transaction + * @j_commit_request: Sequence number of the most recent transaction wanting + * commit + * @j_uuid: Uuid of client object. + * @j_task: Pointer to the current commit thread for this journal + * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a + * single compound commit transaction + * @j_commit_interval: What is the maximum transaction lifetime before we begin + * a commit? + * @j_commit_timer: The timer used to wakeup the commit thread + * @j_revoke_lock: Protect the revoke table + * @j_revoke: The revoke table - maintains the list of revoked blocks in the + * current transaction. + * @j_revoke_table: alternate revoke tables for j_revoke + * @j_wbuf: array of buffer_heads for journal_commit_transaction + * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the + * number that will fit in j_blocksize + * @j_last_sync_writer: most recent pid which did a synchronous write + * @j_average_commit_time: the average amount of time in nanoseconds it + * takes to commit a transaction to the disk. + * @j_private: An opaque pointer to fs-private information. + */ + +struct journal_s +{ + /* General journaling state flags [j_state_lock] */ + unsigned long j_flags; + + /* + * Is there an outstanding uncleared error on the journal (from a prior + * abort)? [j_state_lock] + */ + int j_errno; + + /* The superblock buffer */ + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + + /* Version of the superblock format */ + int j_format_version; + + /* + * Protect the various scalars in the journal + */ + spinlock_t j_state_lock; + + /* + * Number of processes waiting to create a barrier lock [j_state_lock] + */ + int j_barrier_count; + + /* + * Transactions: The current running transaction... + * [j_state_lock] [caller holding open handle] + */ + transaction_t *j_running_transaction; + + /* + * the transaction we are pushing to disk + * [j_state_lock] [caller holding open handle] + */ + transaction_t *j_committing_transaction; + + /* + * ... and a linked circular list of all transactions waiting for + * checkpointing. [j_list_lock] + */ + transaction_t *j_checkpoint_transactions; + + /* + * Wait queue for waiting for a locked transaction to start committing, + * or for a barrier lock to be released + */ + wait_queue_head_t j_wait_transaction_locked; + + /* Wait queue for waiting for checkpointing to complete */ + wait_queue_head_t j_wait_logspace; + + /* Wait queue for waiting for commit to complete */ + wait_queue_head_t j_wait_done_commit; + + /* Wait queue to trigger checkpointing */ + wait_queue_head_t j_wait_checkpoint; + + /* Wait queue to trigger commit */ + wait_queue_head_t j_wait_commit; + + /* Wait queue to wait for updates to complete */ + wait_queue_head_t j_wait_updates; + + /* Semaphore for locking against concurrent checkpoints */ + struct mutex j_checkpoint_mutex; + + /* + * Journal head: identifies the first unused block in the journal. + * [j_state_lock] + */ + unsigned int j_head; + + /* + * Journal tail: identifies the oldest still-used block in the journal. + * [j_state_lock] + */ + unsigned int j_tail; + + /* + * Journal free: how many free blocks are there in the journal? + * [j_state_lock] + */ + unsigned int j_free; + + /* + * Journal start and end: the block numbers of the first usable block + * and one beyond the last usable block in the journal. [j_state_lock] + */ + unsigned int j_first; + unsigned int j_last; + + /* + * Device, blocksize and starting block offset for the location where we + * store the journal. + */ + struct block_device *j_dev; + int j_blocksize; + unsigned int j_blk_offset; + + /* + * Device which holds the client fs. For internal journal this will be + * equal to j_dev. + */ + struct block_device *j_fs_dev; + + /* Total maximum capacity of the journal region on disk. */ + unsigned int j_maxlen; + + /* + * Protects the buffer lists and internal buffer state. + */ + spinlock_t j_list_lock; + + /* Optional inode where we store the journal. If present, all */ + /* journal block numbers are mapped into this inode via */ + /* bmap(). */ + struct inode *j_inode; + + /* + * Sequence number of the oldest transaction in the log [j_state_lock] + */ + tid_t j_tail_sequence; + + /* + * Sequence number of the next transaction to grant [j_state_lock] + */ + tid_t j_transaction_sequence; + + /* + * Sequence number of the most recently committed transaction + * [j_state_lock]. + */ + tid_t j_commit_sequence; + + /* + * Sequence number of the most recent transaction wanting commit + * [j_state_lock] + */ + tid_t j_commit_request; + + /* + * Journal uuid: identifies the object (filesystem, LVM volume etc) + * backed by this journal. This will eventually be replaced by an array + * of uuids, allowing us to index multiple devices within a single + * journal and to perform atomic updates across them. + */ + __u8 j_uuid[16]; + + /* Pointer to the current commit thread for this journal */ + struct task_struct *j_task; + + /* + * Maximum number of metadata buffers to allow in a single compound + * commit transaction + */ + int j_max_transaction_buffers; + + /* + * What is the maximum transaction lifetime before we begin a commit? + */ + unsigned long j_commit_interval; + + /* The timer used to wakeup the commit thread: */ + struct timer_list j_commit_timer; + + /* + * The revoke table: maintains the list of revoked blocks in the + * current transaction. [j_revoke_lock] + */ + spinlock_t j_revoke_lock; + struct jbd_revoke_table_s *j_revoke; + struct jbd_revoke_table_s *j_revoke_table[2]; + + /* + * array of bhs for journal_commit_transaction + */ + struct buffer_head **j_wbuf; + int j_wbufsize; + + /* + * this is the pid of the last person to run a synchronous operation + * through the journal. + */ + pid_t j_last_sync_writer; + + /* + * the average amount of time in nanoseconds it takes to commit a + * transaction to the disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /* + * An opaque pointer to fs-private information. ext3 puts its + * superblock pointer here + */ + void *j_private; +}; + +/* + * Journal flag definitions + */ +#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */ +#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */ +#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */ +#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */ +#define JFS_LOADED 0x010 /* The journal superblock has been loaded */ +#define JFS_BARRIER 0x020 /* Use IDE barriers */ +#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file + * data write error in ordered + * mode */ + +/* + * Function declarations for the journaling transaction and buffer + * management + */ + +/* Filing buffers */ +extern void journal_unfile_buffer(journal_t *, struct journal_head *); +extern void __journal_unfile_buffer(struct journal_head *); +extern void __journal_refile_buffer(struct journal_head *); +extern void journal_refile_buffer(journal_t *, struct journal_head *); +extern void __journal_file_buffer(struct journal_head *, transaction_t *, int); +extern void __journal_free_buffer(struct journal_head *bh); +extern void journal_file_buffer(struct journal_head *, transaction_t *, int); +extern void __journal_clean_data_list(transaction_t *transaction); + +/* Log buffer allocation */ +extern struct journal_head * journal_get_descriptor_buffer(journal_t *); +int journal_next_log_block(journal_t *, unsigned int *); + +/* Commit management */ +extern void journal_commit_transaction(journal_t *); + +/* Checkpoint list management */ +int __journal_clean_checkpoint_list(journal_t *journal); +int __journal_remove_checkpoint(struct journal_head *); +void __journal_insert_checkpoint(struct journal_head *, transaction_t *); + +/* Buffer IO */ +extern int +journal_write_metadata_buffer(transaction_t *transaction, + struct journal_head *jh_in, + struct journal_head **jh_out, + unsigned int blocknr); + +/* Transaction locking */ +extern void __wait_on_journal (journal_t *); + +/* + * Journal locking. + * + * We need to lock the journal during transaction state changes so that nobody + * ever tries to take a handle on the running transaction while we are in the + * middle of moving it to the commit phase. j_state_lock does this. + * + * Note that the locking is completely interrupt unsafe. We never touch + * journal structures from interrupts. + */ + +static inline handle_t *journal_current_handle(void) +{ + return current->journal_info; +} + +/* The journaling code user interface: + * + * Create and destroy handles + * Register buffer modifications against the current transaction. + */ + +extern handle_t *journal_start(journal_t *, int nblocks); +extern int journal_restart (handle_t *, int nblocks); +extern int journal_extend (handle_t *, int nblocks); +extern int journal_get_write_access(handle_t *, struct buffer_head *); +extern int journal_get_create_access (handle_t *, struct buffer_head *); +extern int journal_get_undo_access(handle_t *, struct buffer_head *); +extern int journal_dirty_data (handle_t *, struct buffer_head *); +extern int journal_dirty_metadata (handle_t *, struct buffer_head *); +extern void journal_release_buffer (handle_t *, struct buffer_head *); +extern int journal_forget (handle_t *, struct buffer_head *); +extern void journal_sync_buffer (struct buffer_head *); +extern void journal_invalidatepage(journal_t *, + struct page *, unsigned long); +extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); +extern int journal_stop(handle_t *); +extern int journal_flush (journal_t *); +extern void journal_lock_updates (journal_t *); +extern void journal_unlock_updates (journal_t *); + +extern journal_t * journal_init_dev(struct block_device *bdev, + struct block_device *fs_dev, + int start, int len, int bsize); +extern journal_t * journal_init_inode (struct inode *); +extern int journal_update_format (journal_t *); +extern int journal_check_used_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int journal_check_available_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int journal_set_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int journal_create (journal_t *); +extern int journal_load (journal_t *journal); +extern int journal_destroy (journal_t *); +extern int journal_recover (journal_t *journal); +extern int journal_wipe (journal_t *, int); +extern int journal_skip_recovery (journal_t *); +extern void journal_update_superblock (journal_t *, int); +extern void journal_abort (journal_t *, int); +extern int journal_errno (journal_t *); +extern void journal_ack_err (journal_t *); +extern int journal_clear_err (journal_t *); +extern int journal_bmap(journal_t *, unsigned int, unsigned int *); +extern int journal_force_commit(journal_t *); + +/* + * journal_head management + */ +struct journal_head *journal_add_journal_head(struct buffer_head *bh); +struct journal_head *journal_grab_journal_head(struct buffer_head *bh); +void journal_put_journal_head(struct journal_head *jh); + +/* + * handle management + */ +extern struct kmem_cache *jbd_handle_cache; + +static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) +{ + return kmem_cache_alloc(jbd_handle_cache, gfp_flags); +} + +static inline void jbd_free_handle(handle_t *handle) +{ + kmem_cache_free(jbd_handle_cache, handle); +} + +/* Primary revoke support */ +#define JOURNAL_REVOKE_DEFAULT_HASH 256 +extern int journal_init_revoke(journal_t *, int); +extern void journal_destroy_revoke_caches(void); +extern int journal_init_revoke_caches(void); + +extern void journal_destroy_revoke(journal_t *); +extern int journal_revoke (handle_t *, + unsigned int, struct buffer_head *); +extern int journal_cancel_revoke(handle_t *, struct journal_head *); +extern void journal_write_revoke_records(journal_t *, + transaction_t *, int); + +/* Recovery revoke support */ +extern int journal_set_revoke(journal_t *, unsigned int, tid_t); +extern int journal_test_revoke(journal_t *, unsigned int, tid_t); +extern void journal_clear_revoke(journal_t *); +extern void journal_switch_revoke_table(journal_t *journal); +extern void journal_clear_buffer_revoked_flags(journal_t *journal); + +/* + * The log thread user interface: + * + * Request space in the current transaction, and force transaction commit + * transitions on demand. + */ + +int __log_space_left(journal_t *); /* Called with journal locked */ +int log_start_commit(journal_t *journal, tid_t tid); +int __log_start_commit(journal_t *journal, tid_t tid); +int journal_start_commit(journal_t *journal, tid_t *tid); +int journal_force_commit_nested(journal_t *journal); +int log_wait_commit(journal_t *journal, tid_t tid); +int log_do_checkpoint(journal_t *journal); +int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid); + +void __log_wait_for_space(journal_t *journal); +extern void __journal_drop_transaction(journal_t *, transaction_t *); +extern int cleanup_journal_tail(journal_t *); + +/* Debugging code only: */ + +#define jbd_ENOSYS() \ +do { \ + printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \ + current->state = TASK_UNINTERRUPTIBLE; \ + schedule(); \ +} while (1) + +/* + * is_journal_abort + * + * Simple test wrapper function to test the JFS_ABORT state flag. This + * bit, when set, indicates that we have had a fatal error somewhere, + * either inside the journaling layer or indicated to us by the client + * (eg. ext3), and that we and should not commit any further + * transactions. + */ + +static inline int is_journal_aborted(journal_t *journal) +{ + return journal->j_flags & JFS_ABORT; +} + +static inline int is_handle_aborted(handle_t *handle) +{ + if (handle->h_aborted) + return 1; + return is_journal_aborted(handle->h_transaction->t_journal); +} + +static inline void journal_abort_handle(handle_t *handle) +{ + handle->h_aborted = 1; +} + +#endif /* __KERNEL__ */ + +/* Comparison functions for transaction IDs: perform comparisons using + * modulo arithmetic so that they work over sequence number wraps. */ + +static inline int tid_gt(tid_t x, tid_t y) +{ + int difference = (x - y); + return (difference > 0); +} + +static inline int tid_geq(tid_t x, tid_t y) +{ + int difference = (x - y); + return (difference >= 0); +} + +extern int journal_blocks_per_page(struct inode *inode); + +/* + * Return the minimum number of blocks which must be free in the journal + * before a new transaction may be started. Must be called under j_state_lock. + */ +static inline int jbd_space_needed(journal_t *journal) +{ + int nblocks = journal->j_max_transaction_buffers; + if (journal->j_committing_transaction) + nblocks += journal->j_committing_transaction-> + t_outstanding_credits; + return nblocks; +} + +/* + * Definitions which augment the buffer_head layer + */ + +/* journaling buffer types */ +#define BJ_None 0 /* Not journaled */ +#define BJ_SyncData 1 /* Normal data: flush before commit */ +#define BJ_Metadata 2 /* Normal journaled metadata */ +#define BJ_Forget 3 /* Buffer superseded by this transaction */ +#define BJ_IO 4 /* Buffer is for temporary IO use */ +#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */ +#define BJ_LogCtl 6 /* Buffer contains log descriptors */ +#define BJ_Reserved 7 /* Buffer is reserved for access by journal */ +#define BJ_Locked 8 /* Locked for I/O during commit */ +#define BJ_Types 9 + +extern int jbd_blocks_per_page(struct inode *inode); + +#ifdef __KERNEL__ + +#define buffer_trace_init(bh) do {} while (0) +#define print_buffer_fields(bh) do {} while (0) +#define print_buffer_trace(bh) do {} while (0) +#define BUFFER_TRACE(bh, info) do {} while (0) +#define BUFFER_TRACE2(bh, bh2, info) do {} while (0) +#define JBUFFER_TRACE(jh, info) do {} while (0) + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_JBD_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h new file mode 100644 index 00000000..912c30a8 --- /dev/null +++ b/include/linux/jbd2.h @@ -0,0 +1,1282 @@ +/* + * linux/include/linux/jbd2.h + * + * Written by Stephen C. Tweedie <sct@redhat.com> + * + * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved + * + * This file is part of the Linux kernel and is made available under + * the terms of the GNU General Public License, version 2, or at your + * option, any later version, incorporated herein by reference. + * + * Definitions for transaction data structures for the buffer cache + * filesystem journaling support. + */ + +#ifndef _LINUX_JBD2_H +#define _LINUX_JBD2_H + +/* Allow this file to be included directly into e2fsprogs */ +#ifndef __KERNEL__ +#include "jfs_compat.h" +#define JBD2_DEBUG +#define jfs_debug jbd_debug +#else + +#include <linux/types.h> +#include <linux/buffer_head.h> +#include <linux/journal-head.h> +#include <linux/stddef.h> +#include <linux/bit_spinlock.h> +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/slab.h> +#endif + +#define journal_oom_retry 1 + +/* + * Define JBD2_PARANIOD_IOFAIL to cause a kernel BUG() if ext4 finds + * certain classes of error which can occur due to failed IOs. Under + * normal use we want ext4 to continue after such errors, because + * hardware _can_ fail, but for debugging purposes when running tests on + * known-good hardware we may want to trap these errors. + */ +#undef JBD2_PARANOID_IOFAIL + +/* + * The default maximum commit age, in seconds. + */ +#define JBD2_DEFAULT_MAX_COMMIT_AGE 5 + +#ifdef CONFIG_JBD2_DEBUG +/* + * Define JBD2_EXPENSIVE_CHECKING to enable more expensive internal + * consistency checks. By default we don't do this unless + * CONFIG_JBD2_DEBUG is on. + */ +#define JBD2_EXPENSIVE_CHECKING +extern u8 jbd2_journal_enable_debug; + +#define jbd_debug(n, f, a...) \ + do { \ + if ((n) <= jbd2_journal_enable_debug) { \ + printk (KERN_DEBUG "(%s, %d): %s: ", \ + __FILE__, __LINE__, __func__); \ + printk (f, ## a); \ + } \ + } while (0) +#else +#define jbd_debug(f, a...) /**/ +#endif + +extern void *jbd2_alloc(size_t size, gfp_t flags); +extern void jbd2_free(void *ptr, size_t size); + +#define JBD2_MIN_JOURNAL_BLOCKS 1024 + +#ifdef __KERNEL__ + +/** + * typedef handle_t - The handle_t type represents a single atomic update being performed by some process. + * + * All filesystem modifications made by the process go + * through this handle. Recursive operations (such as quota operations) + * are gathered into a single update. + * + * The buffer credits field is used to account for journaled buffers + * being modified by the running process. To ensure that there is + * enough log space for all outstanding operations, we need to limit the + * number of outstanding buffers possible at any time. When the + * operation completes, any buffer credits not used are credited back to + * the transaction, so that at all times we know how many buffers the + * outstanding updates on a transaction might possibly touch. + * + * This is an opaque datatype. + **/ +typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */ + + +/** + * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. + * + * journal_t is linked to from the fs superblock structure. + * + * We use the journal_t to keep track of all outstanding transaction + * activity on the filesystem, and to manage the state of the log + * writing process. + * + * This is an opaque datatype. + **/ +typedef struct journal_s journal_t; /* Journal control structure */ +#endif + +/* + * Internal structures used by the logging mechanism: + */ + +#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */ + +/* + * On-disk structures + */ + +/* + * Descriptor block types: + */ + +#define JBD2_DESCRIPTOR_BLOCK 1 +#define JBD2_COMMIT_BLOCK 2 +#define JBD2_SUPERBLOCK_V1 3 +#define JBD2_SUPERBLOCK_V2 4 +#define JBD2_REVOKE_BLOCK 5 + +/* + * Standard header for all descriptor blocks: + */ +typedef struct journal_header_s +{ + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +} journal_header_t; + +/* + * Checksum types. + */ +#define JBD2_CRC32_CHKSUM 1 +#define JBD2_MD5_CHKSUM 2 +#define JBD2_SHA1_CHKSUM 3 + +#define JBD2_CRC32_CHKSUM_SIZE 4 + +#define JBD2_CHECKSUM_BYTES (32 / sizeof(u32)) +/* + * Commit block header for storing transactional checksums: + */ +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[JBD2_CHECKSUM_BYTES]; + __be64 h_commit_sec; + __be32 h_commit_nsec; +}; + +/* + * The block tag: used to describe a single buffer in the journal. + * t_blocknr_high is only used if INCOMPAT_64BIT is set, so this + * raw struct shouldn't be used for pointer math or sizeof() - use + * journal_tag_bytes(journal) instead to compute this. + */ +typedef struct journal_block_tag_s +{ + __be32 t_blocknr; /* The on-disk block number */ + __be32 t_flags; /* See below */ + __be32 t_blocknr_high; /* most-significant high 32bits. */ +} journal_block_tag_t; + +#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high)) +#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t)) + +/* + * The revoke descriptor: used on disk to describe a series of blocks to + * be revoked from the log + */ +typedef struct jbd2_journal_revoke_header_s +{ + journal_header_t r_header; + __be32 r_count; /* Count of bytes used in the block */ +} jbd2_journal_revoke_header_t; + + +/* Definitions for the journal tag flags word: */ +#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ +#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */ +#define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */ +#define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */ + + +/* + * The journal superblock. All fields are in big-endian byte order. + */ +typedef struct journal_superblock_s +{ +/* 0x0000 */ + journal_header_t s_header; + +/* 0x000C */ + /* Static information describing the journal */ + __be32 s_blocksize; /* journal device blocksize */ + __be32 s_maxlen; /* total blocks in journal file */ + __be32 s_first; /* first block of log information */ + +/* 0x0018 */ + /* Dynamic information describing the current state of the log */ + __be32 s_sequence; /* first commit ID expected in log */ + __be32 s_start; /* blocknr of start of log */ + +/* 0x0020 */ + /* Error value, as set by jbd2_journal_abort(). */ + __be32 s_errno; + +/* 0x0024 */ + /* Remaining fields are only valid in a version-2 superblock */ + __be32 s_feature_compat; /* compatible feature set */ + __be32 s_feature_incompat; /* incompatible feature set */ + __be32 s_feature_ro_compat; /* readonly-compatible feature set */ +/* 0x0030 */ + __u8 s_uuid[16]; /* 128-bit uuid for journal */ + +/* 0x0040 */ + __be32 s_nr_users; /* Nr of filesystems sharing log */ + + __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/ + +/* 0x0048 */ + __be32 s_max_transaction; /* Limit of journal blocks per trans.*/ + __be32 s_max_trans_data; /* Limit of data blocks per trans. */ + +/* 0x0050 */ + __u32 s_padding[44]; + +/* 0x0100 */ + __u8 s_users[16*48]; /* ids of all fs'es sharing the log */ +/* 0x0400 */ +} journal_superblock_t; + +#define JBD2_HAS_COMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) +#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask)))) +#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \ + ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) + +#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 + +#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 +#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 +#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 + +/* Features known to this kernel version: */ +#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM +#define JBD2_KNOWN_ROCOMPAT_FEATURES 0 +#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ + JBD2_FEATURE_INCOMPAT_64BIT | \ + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) + +#ifdef __KERNEL__ + +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/jbd_common.h> + +#define J_ASSERT(assert) BUG_ON(!(assert)) + +#define J_ASSERT_BH(bh, expr) J_ASSERT(expr) +#define J_ASSERT_JH(jh, expr) J_ASSERT(expr) + +#if defined(JBD2_PARANOID_IOFAIL) +#define J_EXPECT(expr, why...) J_ASSERT(expr) +#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) +#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr) +#else +#define __journal_expect(expr, why...) \ + ({ \ + int val = (expr); \ + if (!val) { \ + printk(KERN_ERR \ + "JBD2 unexpected failure: %s: %s;\n", \ + __func__, #expr); \ + printk(KERN_ERR why "\n"); \ + } \ + val; \ + }) +#define J_EXPECT(expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why) +#endif + +/* Flags in jbd_inode->i_flags */ +#define __JI_COMMIT_RUNNING 0 +/* Commit of the inode data in progress. We use this flag to protect us from + * concurrent deletion of inode. We cannot use reference to inode for this + * since we cannot afford doing last iput() on behalf of kjournald + */ +#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) + +/** + * struct jbd_inode is the structure linking inodes in ordered mode + * present in a transaction so that we can sync them during commit. + */ +struct jbd2_inode { + /* Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] */ + transaction_t *i_transaction; + + /* Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] */ + transaction_t *i_next_transaction; + + /* List of inodes in the i_transaction [j_list_lock] */ + struct list_head i_list; + + /* VFS inode this inode belongs to [constant during the lifetime + * of the structure] */ + struct inode *i_vfs_inode; + + /* Flags of inode [j_list_lock] */ + unsigned long i_flags; +}; + +struct jbd2_revoke_table_s; + +/** + * struct handle_s - The handle_s type is the concrete type associated with + * handle_t. + * @h_transaction: Which compound transaction is this update a part of? + * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. + * @h_ref: Reference count on this handle + * @h_err: Field for caller's use to track errors through large fs operations + * @h_sync: flag for sync-on-close + * @h_jdata: flag to force data journaling + * @h_aborted: flag indicating fatal error on handle + **/ + +/* Docbook can't yet cope with the bit fields, but will leave the documentation + * in so it can be fixed later. + */ + +struct jbd2_journal_handle +{ + /* Which compound transaction is this update a part of? */ + transaction_t *h_transaction; + + /* Number of remaining buffers we are allowed to dirty: */ + int h_buffer_credits; + + /* Reference count on this handle */ + int h_ref; + + /* Field for caller's use to track errors through large fs */ + /* operations */ + int h_err; + + /* Flags [no locking] */ + unsigned int h_sync:1; /* sync-on-close */ + unsigned int h_jdata:1; /* force data journaling */ + unsigned int h_aborted:1; /* fatal error on handle */ + unsigned int h_cowing:1; /* COWing block to snapshot */ + + /* Number of buffers requested by user: + * (before adding the COW credits factor) */ + unsigned int h_base_credits:14; + + /* Number of buffers the user is allowed to dirty: + * (counts only buffers dirtied when !h_cowing) */ + unsigned int h_user_credits:14; + + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map h_lockdep_map; +#endif + +#ifdef CONFIG_JBD2_DEBUG + /* COW debugging counters: */ + unsigned int h_cow_moved; /* blocks moved to snapshot */ + unsigned int h_cow_copied; /* blocks copied to snapshot */ + unsigned int h_cow_ok_jh; /* blocks already COWed during current + transaction */ + unsigned int h_cow_ok_bitmap; /* blocks not set in COW bitmap */ + unsigned int h_cow_ok_mapped;/* blocks already mapped in snapshot */ + unsigned int h_cow_bitmaps; /* COW bitmaps created */ + unsigned int h_cow_excluded; /* blocks set in exclude bitmap */ +#endif +}; + + +/* + * Some stats for checkpoint phase + */ +struct transaction_chp_stats_s { + unsigned long cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +/* The transaction_t type is the guts of the journaling mechanism. It + * tracks a compound transaction through its various states: + * + * RUNNING: accepting new updates + * LOCKED: Updates still running but we don't accept new ones + * RUNDOWN: Updates are tidying up but have finished requesting + * new buffers to modify (state not used for now) + * FLUSH: All updates complete, but we are still writing to disk + * COMMIT: All data on disk, writing commit record + * FINISHED: We still have to keep the transaction for checkpointing. + * + * The transaction keeps track of all of the buffers modified by a + * running transaction, and all of the buffers committed but not yet + * flushed to home for finished transactions. + */ + +/* + * Lock ranking: + * + * j_list_lock + * ->jbd_lock_bh_journal_head() (This is "innermost") + * + * j_state_lock + * ->jbd_lock_bh_state() + * + * jbd_lock_bh_state() + * ->j_list_lock + * + * j_state_lock + * ->t_handle_lock + * + * j_state_lock + * ->j_list_lock (journal_unmap_buffer) + * + */ + +struct transaction_s +{ + /* Pointer to the journal for this transaction. [no locking] */ + journal_t *t_journal; + + /* Sequence number for this transaction [no locking] */ + tid_t t_tid; + + /* + * Transaction's current state + * [no locking - only kjournald2 alters this] + * [j_list_lock] guards transition of a transaction into T_FINISHED + * state and subsequent call of __jbd2_journal_drop_transaction() + * FIXME: needs barriers + * KLUDGE: [use j_state_lock] + */ + enum { + T_RUNNING, + T_LOCKED, + T_FLUSH, + T_COMMIT, + T_COMMIT_DFLUSH, + T_COMMIT_JFLUSH, + T_FINISHED + } t_state; + + /* + * Where in the log does this transaction's commit start? [no locking] + */ + unsigned long t_log_start; + + /* Number of buffers on the t_buffers list [j_list_lock] */ + int t_nr_buffers; + + /* + * Doubly-linked circular list of all buffers reserved but not yet + * modified by this transaction [j_list_lock] + */ + struct journal_head *t_reserved_list; + + /* + * Doubly-linked circular list of all metadata buffers owned by this + * transaction [j_list_lock] + */ + struct journal_head *t_buffers; + + /* + * Doubly-linked circular list of all forget buffers (superseded + * buffers which we can un-checkpoint once this transaction commits) + * [j_list_lock] + */ + struct journal_head *t_forget; + + /* + * Doubly-linked circular list of all buffers still to be flushed before + * this transaction can be checkpointed. [j_list_lock] + */ + struct journal_head *t_checkpoint_list; + + /* + * Doubly-linked circular list of all buffers submitted for IO while + * checkpointing. [j_list_lock] + */ + struct journal_head *t_checkpoint_io_list; + + /* + * Doubly-linked circular list of temporary buffers currently undergoing + * IO in the log [j_list_lock] + */ + struct journal_head *t_iobuf_list; + + /* + * Doubly-linked circular list of metadata buffers being shadowed by log + * IO. The IO buffers on the iobuf list and the shadow buffers on this + * list match each other one for one at all times. [j_list_lock] + */ + struct journal_head *t_shadow_list; + + /* + * Doubly-linked circular list of control buffers being written to the + * log. [j_list_lock] + */ + struct journal_head *t_log_list; + + /* + * List of inodes whose data we've modified in data=ordered mode. + * [j_list_lock] + */ + struct list_head t_inode_list; + + /* + * Protects info related to handles + */ + spinlock_t t_handle_lock; + + /* + * Longest time some handle had to wait for running transaction + */ + unsigned long t_max_wait; + + /* + * When transaction started + */ + unsigned long t_start; + + /* + * Checkpointing stats [j_checkpoint_sem] + */ + struct transaction_chp_stats_s t_chp_stats; + + /* + * Number of outstanding updates running on this transaction + * [t_handle_lock] + */ + atomic_t t_updates; + + /* + * Number of buffers reserved for use by all handles in this transaction + * handle but not yet modified. [t_handle_lock] + */ + atomic_t t_outstanding_credits; + + /* + * Forward and backward links for the circular list of all transactions + * awaiting checkpoint. [j_list_lock] + */ + transaction_t *t_cpnext, *t_cpprev; + + /* + * When will the transaction expire (become due for commit), in jiffies? + * [no locking] + */ + unsigned long t_expires; + + /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* + * How many handles used this transaction? [t_handle_lock] + */ + atomic_t t_handle_count; + + /* + * This transaction is being forced and some process is + * waiting for it to finish. + */ + unsigned int t_synchronous_commit:1; + + /* Disk flush needs to be sent to fs partition [no locking] */ + int t_need_data_flush; + + /* + * For use by the filesystem to store fs-specific data + * structures associated with the transaction + */ + struct list_head t_private_list; +}; + +struct transaction_run_stats_s { + unsigned long rs_wait; + unsigned long rs_running; + unsigned long rs_locked; + unsigned long rs_flushing; + unsigned long rs_logging; + + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + unsigned long ts_tid; + struct transaction_run_stats_s run; +}; + +static inline unsigned long +jbd2_time_diff(unsigned long start, unsigned long end) +{ + if (end >= start) + return end - start; + + return end + (MAX_JIFFY_OFFSET - start); +} + +#define JBD2_NR_BATCH 64 + +/** + * struct journal_s - The journal_s type is the concrete type associated with + * journal_t. + * @j_flags: General journaling state flags + * @j_errno: Is there an outstanding uncleared error on the journal (from a + * prior abort)? + * @j_sb_buffer: First part of superblock buffer + * @j_superblock: Second part of superblock buffer + * @j_format_version: Version of the superblock format + * @j_state_lock: Protect the various scalars in the journal + * @j_barrier_count: Number of processes waiting to create a barrier lock + * @j_barrier: The barrier lock itself + * @j_running_transaction: The current running transaction.. + * @j_committing_transaction: the transaction we are pushing to disk + * @j_checkpoint_transactions: a linked circular list of all transactions + * waiting for checkpointing + * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction + * to start committing, or for a barrier lock to be released + * @j_wait_logspace: Wait queue for waiting for checkpointing to complete + * @j_wait_done_commit: Wait queue for waiting for commit to complete + * @j_wait_checkpoint: Wait queue to trigger checkpointing + * @j_wait_commit: Wait queue to trigger commit + * @j_wait_updates: Wait queue to wait for updates to complete + * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints + * @j_head: Journal head - identifies the first unused block in the journal + * @j_tail: Journal tail - identifies the oldest still-used block in the + * journal. + * @j_free: Journal free - how many free blocks are there in the journal? + * @j_first: The block number of the first usable block + * @j_last: The block number one beyond the last usable block + * @j_dev: Device where we store the journal + * @j_blocksize: blocksize for the location where we store the journal. + * @j_blk_offset: starting block offset for into the device where we store the + * journal + * @j_fs_dev: Device which holds the client fs. For internal journal this will + * be equal to j_dev + * @j_maxlen: Total maximum capacity of the journal region on disk. + * @j_list_lock: Protects the buffer lists and internal buffer state. + * @j_inode: Optional inode where we store the journal. If present, all journal + * block numbers are mapped into this inode via bmap(). + * @j_tail_sequence: Sequence number of the oldest transaction in the log + * @j_transaction_sequence: Sequence number of the next transaction to grant + * @j_commit_sequence: Sequence number of the most recently committed + * transaction + * @j_commit_request: Sequence number of the most recent transaction wanting + * commit + * @j_uuid: Uuid of client object. + * @j_task: Pointer to the current commit thread for this journal + * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a + * single compound commit transaction + * @j_commit_interval: What is the maximum transaction lifetime before we begin + * a commit? + * @j_commit_timer: The timer used to wakeup the commit thread + * @j_revoke_lock: Protect the revoke table + * @j_revoke: The revoke table - maintains the list of revoked blocks in the + * current transaction. + * @j_revoke_table: alternate revoke tables for j_revoke + * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction + * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the + * number that will fit in j_blocksize + * @j_last_sync_writer: most recent pid which did a synchronous write + * @j_history: Buffer storing the transactions statistics history + * @j_history_max: Maximum number of transactions in the statistics history + * @j_history_cur: Current number of transactions in the statistics history + * @j_history_lock: Protect the transactions statistics history + * @j_proc_entry: procfs entry for the jbd statistics directory + * @j_stats: Overall statistics + * @j_private: An opaque pointer to fs-private information. + */ + +struct journal_s +{ + /* General journaling state flags [j_state_lock] */ + unsigned long j_flags; + + /* + * Is there an outstanding uncleared error on the journal (from a prior + * abort)? [j_state_lock] + */ + int j_errno; + + /* The superblock buffer */ + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + + /* Version of the superblock format */ + int j_format_version; + + /* + * Protect the various scalars in the journal + */ + rwlock_t j_state_lock; + + /* + * Number of processes waiting to create a barrier lock [j_state_lock] + */ + int j_barrier_count; + + /* The barrier lock itself */ + struct mutex j_barrier; + + /* + * Transactions: The current running transaction... + * [j_state_lock] [caller holding open handle] + */ + transaction_t *j_running_transaction; + + /* + * the transaction we are pushing to disk + * [j_state_lock] [caller holding open handle] + */ + transaction_t *j_committing_transaction; + + /* + * ... and a linked circular list of all transactions waiting for + * checkpointing. [j_list_lock] + */ + transaction_t *j_checkpoint_transactions; + + /* + * Wait queue for waiting for a locked transaction to start committing, + * or for a barrier lock to be released + */ + wait_queue_head_t j_wait_transaction_locked; + + /* Wait queue for waiting for checkpointing to complete */ + wait_queue_head_t j_wait_logspace; + + /* Wait queue for waiting for commit to complete */ + wait_queue_head_t j_wait_done_commit; + + /* Wait queue to trigger checkpointing */ + wait_queue_head_t j_wait_checkpoint; + + /* Wait queue to trigger commit */ + wait_queue_head_t j_wait_commit; + + /* Wait queue to wait for updates to complete */ + wait_queue_head_t j_wait_updates; + + /* Semaphore for locking against concurrent checkpoints */ + struct mutex j_checkpoint_mutex; + + /* + * List of buffer heads used by the checkpoint routine. This + * was moved from jbd2_log_do_checkpoint() to reduce stack + * usage. Access to this array is controlled by the + * j_checkpoint_mutex. [j_checkpoint_mutex] + */ + struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; + + /* + * Journal head: identifies the first unused block in the journal. + * [j_state_lock] + */ + unsigned long j_head; + + /* + * Journal tail: identifies the oldest still-used block in the journal. + * [j_state_lock] + */ + unsigned long j_tail; + + /* + * Journal free: how many free blocks are there in the journal? + * [j_state_lock] + */ + unsigned long j_free; + + /* + * Journal start and end: the block numbers of the first usable block + * and one beyond the last usable block in the journal. [j_state_lock] + */ + unsigned long j_first; + unsigned long j_last; + + /* + * Device, blocksize and starting block offset for the location where we + * store the journal. + */ + struct block_device *j_dev; + int j_blocksize; + unsigned long long j_blk_offset; + char j_devname[BDEVNAME_SIZE+24]; + + /* + * Device which holds the client fs. For internal journal this will be + * equal to j_dev. + */ + struct block_device *j_fs_dev; + + /* Total maximum capacity of the journal region on disk. */ + unsigned int j_maxlen; + + /* + * Protects the buffer lists and internal buffer state. + */ + spinlock_t j_list_lock; + + /* Optional inode where we store the journal. If present, all */ + /* journal block numbers are mapped into this inode via */ + /* bmap(). */ + struct inode *j_inode; + + /* + * Sequence number of the oldest transaction in the log [j_state_lock] + */ + tid_t j_tail_sequence; + + /* + * Sequence number of the next transaction to grant [j_state_lock] + */ + tid_t j_transaction_sequence; + + /* + * Sequence number of the most recently committed transaction + * [j_state_lock]. + */ + tid_t j_commit_sequence; + + /* + * Sequence number of the most recent transaction wanting commit + * [j_state_lock] + */ + tid_t j_commit_request; + + /* + * Journal uuid: identifies the object (filesystem, LVM volume etc) + * backed by this journal. This will eventually be replaced by an array + * of uuids, allowing us to index multiple devices within a single + * journal and to perform atomic updates across them. + */ + __u8 j_uuid[16]; + + /* Pointer to the current commit thread for this journal */ + struct task_struct *j_task; + + /* + * Maximum number of metadata buffers to allow in a single compound + * commit transaction + */ + int j_max_transaction_buffers; + + /* + * What is the maximum transaction lifetime before we begin a commit? + */ + unsigned long j_commit_interval; + + /* The timer used to wakeup the commit thread: */ + struct timer_list j_commit_timer; + + /* + * The revoke table: maintains the list of revoked blocks in the + * current transaction. [j_revoke_lock] + */ + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + + /* + * array of bhs for jbd2_journal_commit_transaction + */ + struct buffer_head **j_wbuf; + int j_wbufsize; + + /* + * this is the pid of hte last person to run a synchronous operation + * through the journal + */ + pid_t j_last_sync_writer; + + /* + * the average amount of time in nanoseconds it takes to commit a + * transaction to disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /* + * minimum and maximum times that we should wait for + * additional filesystem operations to get batched into a + * synchronous handle in microseconds + */ + u32 j_min_batch_time; + u32 j_max_batch_time; + + /* This function is called when a transaction is closed */ + void (*j_commit_callback)(journal_t *, + transaction_t *); + + /* + * Journal statistics + */ + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + + /* Failed journal commit ID */ + unsigned int j_failed_commit; + + /* + * An opaque pointer to fs-private information. ext3 puts its + * superblock pointer here + */ + void *j_private; +}; + +/* + * Journal flag definitions + */ +#define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */ +#define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */ +#define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */ +#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */ +#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */ +#define JBD2_BARRIER 0x020 /* Use IDE barriers */ +#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file + * data write error in ordered + * mode */ + +/* + * Function declarations for the journaling transaction and buffer + * management + */ + +/* Filing buffers */ +extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); +extern void __jbd2_journal_refile_buffer(struct journal_head *); +extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); +extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); +extern void __journal_free_buffer(struct journal_head *bh); +extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); +extern void __journal_clean_data_list(transaction_t *transaction); + +/* Log buffer allocation */ +extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *); +int jbd2_journal_next_log_block(journal_t *, unsigned long long *); +int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, + unsigned long *block); +void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); +void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); + +/* Commit management */ +extern void jbd2_journal_commit_transaction(journal_t *); + +/* Checkpoint list management */ +int __jbd2_journal_clean_checkpoint_list(journal_t *journal); +int __jbd2_journal_remove_checkpoint(struct journal_head *); +void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); + + +/* + * Triggers + */ + +struct jbd2_buffer_trigger_type { + /* + * Fired a the moment data to write to the journal are known to be + * stable - so either at the moment b_frozen_data is created or just + * before a buffer is written to the journal. mapped_data is a mapped + * buffer that is the frozen data for commit. + */ + void (*t_frozen)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh, void *mapped_data, + size_t size); + + /* + * Fired during journal abort for dirty buffers that will not be + * committed. + */ + void (*t_abort)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh); +}; + +extern void jbd2_buffer_frozen_trigger(struct journal_head *jh, + void *mapped_data, + struct jbd2_buffer_trigger_type *triggers); +extern void jbd2_buffer_abort_trigger(struct journal_head *jh, + struct jbd2_buffer_trigger_type *triggers); + +/* Buffer IO */ +extern int +jbd2_journal_write_metadata_buffer(transaction_t *transaction, + struct journal_head *jh_in, + struct journal_head **jh_out, + unsigned long long blocknr); + +/* Transaction locking */ +extern void __wait_on_journal (journal_t *); + +/* Transaction cache support */ +extern void jbd2_journal_destroy_transaction_cache(void); +extern int jbd2_journal_init_transaction_cache(void); +extern void jbd2_journal_free_transaction(transaction_t *); + +/* + * Journal locking. + * + * We need to lock the journal during transaction state changes so that nobody + * ever tries to take a handle on the running transaction while we are in the + * middle of moving it to the commit phase. j_state_lock does this. + * + * Note that the locking is completely interrupt unsafe. We never touch + * journal structures from interrupts. + */ + +static inline handle_t *journal_current_handle(void) +{ + return current->journal_info; +} + +/* The journaling code user interface: + * + * Create and destroy handles + * Register buffer modifications against the current transaction. + */ + +extern handle_t *jbd2_journal_start(journal_t *, int nblocks); +extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask); +extern int jbd2_journal_restart(handle_t *, int nblocks); +extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask); +extern int jbd2_journal_extend (handle_t *, int nblocks); +extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); +extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); +extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); +void jbd2_journal_set_triggers(struct buffer_head *, + struct jbd2_buffer_trigger_type *type); +extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); +extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *); +extern int jbd2_journal_forget (handle_t *, struct buffer_head *); +extern void journal_sync_buffer (struct buffer_head *); +extern void jbd2_journal_invalidatepage(journal_t *, + struct page *, unsigned long); +extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); +extern int jbd2_journal_stop(handle_t *); +extern int jbd2_journal_flush (journal_t *); +extern void jbd2_journal_lock_updates (journal_t *); +extern void jbd2_journal_unlock_updates (journal_t *); + +extern journal_t * jbd2_journal_init_dev(struct block_device *bdev, + struct block_device *fs_dev, + unsigned long long start, int len, int bsize); +extern journal_t * jbd2_journal_init_inode (struct inode *); +extern int jbd2_journal_update_format (journal_t *); +extern int jbd2_journal_check_used_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int jbd2_journal_check_available_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int jbd2_journal_set_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern void jbd2_journal_clear_features + (journal_t *, unsigned long, unsigned long, unsigned long); +extern int jbd2_journal_load (journal_t *journal); +extern int jbd2_journal_destroy (journal_t *); +extern int jbd2_journal_recover (journal_t *journal); +extern int jbd2_journal_wipe (journal_t *, int); +extern int jbd2_journal_skip_recovery (journal_t *); +extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, + unsigned long, int); +extern void __jbd2_journal_abort_hard (journal_t *); +extern void jbd2_journal_abort (journal_t *, int); +extern int jbd2_journal_errno (journal_t *); +extern void jbd2_journal_ack_err (journal_t *); +extern int jbd2_journal_clear_err (journal_t *); +extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); +extern int jbd2_journal_force_commit(journal_t *); +extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, + struct jbd2_inode *inode, loff_t new_size); +extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); +extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode); + +/* + * journal_head management + */ +struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh); +struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh); +void jbd2_journal_put_journal_head(struct journal_head *jh); + +/* + * handle management + */ +extern struct kmem_cache *jbd2_handle_cache; + +static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) +{ + return kmem_cache_alloc(jbd2_handle_cache, gfp_flags); +} + +static inline void jbd2_free_handle(handle_t *handle) +{ + kmem_cache_free(jbd2_handle_cache, handle); +} + +/* + * jbd2_inode management (optional, for those file systems that want to use + * dynamically allocated jbd2_inode structures) + */ +extern struct kmem_cache *jbd2_inode_cache; + +static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) +{ + return kmem_cache_alloc(jbd2_inode_cache, gfp_flags); +} + +static inline void jbd2_free_inode(struct jbd2_inode *jinode) +{ + kmem_cache_free(jbd2_inode_cache, jinode); +} + +/* Primary revoke support */ +#define JOURNAL_REVOKE_DEFAULT_HASH 256 +extern int jbd2_journal_init_revoke(journal_t *, int); +extern void jbd2_journal_destroy_revoke_caches(void); +extern int jbd2_journal_init_revoke_caches(void); + +extern void jbd2_journal_destroy_revoke(journal_t *); +extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); +extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); +extern void jbd2_journal_write_revoke_records(journal_t *, + transaction_t *, int); + +/* Recovery revoke support */ +extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); +extern int jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t); +extern void jbd2_journal_clear_revoke(journal_t *); +extern void jbd2_journal_switch_revoke_table(journal_t *journal); +extern void jbd2_clear_buffer_revoked_flags(journal_t *journal); + +/* + * The log thread user interface: + * + * Request space in the current transaction, and force transaction commit + * transitions on demand. + */ + +int __jbd2_log_space_left(journal_t *); /* Called with journal locked */ +int jbd2_log_start_commit(journal_t *journal, tid_t tid); +int __jbd2_log_start_commit(journal_t *journal, tid_t tid); +int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); +int jbd2_journal_force_commit_nested(journal_t *journal); +int jbd2_log_wait_commit(journal_t *journal, tid_t tid); +int jbd2_log_do_checkpoint(journal_t *journal); +int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid); + +void __jbd2_log_wait_for_space(journal_t *journal); +extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); +extern int jbd2_cleanup_journal_tail(journal_t *); + +/* Debugging code only: */ + +#define jbd_ENOSYS() \ +do { \ + printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \ + current->state = TASK_UNINTERRUPTIBLE; \ + schedule(); \ +} while (1) + +/* + * is_journal_abort + * + * Simple test wrapper function to test the JBD2_ABORT state flag. This + * bit, when set, indicates that we have had a fatal error somewhere, + * either inside the journaling layer or indicated to us by the client + * (eg. ext3), and that we and should not commit any further + * transactions. + */ + +static inline int is_journal_aborted(journal_t *journal) +{ + return journal->j_flags & JBD2_ABORT; +} + +static inline int is_handle_aborted(handle_t *handle) +{ + if (handle->h_aborted) + return 1; + return is_journal_aborted(handle->h_transaction->t_journal); +} + +static inline void jbd2_journal_abort_handle(handle_t *handle) +{ + handle->h_aborted = 1; +} + +#endif /* __KERNEL__ */ + +/* Comparison functions for transaction IDs: perform comparisons using + * modulo arithmetic so that they work over sequence number wraps. */ + +static inline int tid_gt(tid_t x, tid_t y) +{ + int difference = (x - y); + return (difference > 0); +} + +static inline int tid_geq(tid_t x, tid_t y) +{ + int difference = (x - y); + return (difference >= 0); +} + +extern int jbd2_journal_blocks_per_page(struct inode *inode); +extern size_t journal_tag_bytes(journal_t *journal); + +/* + * Return the minimum number of blocks which must be free in the journal + * before a new transaction may be started. Must be called under j_state_lock. + */ +static inline int jbd_space_needed(journal_t *journal) +{ + int nblocks = journal->j_max_transaction_buffers; + if (journal->j_committing_transaction) + nblocks += atomic_read(&journal->j_committing_transaction-> + t_outstanding_credits); + return nblocks; +} + +/* + * Definitions which augment the buffer_head layer + */ + +/* journaling buffer types */ +#define BJ_None 0 /* Not journaled */ +#define BJ_Metadata 1 /* Normal journaled metadata */ +#define BJ_Forget 2 /* Buffer superseded by this transaction */ +#define BJ_IO 3 /* Buffer is for temporary IO use */ +#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */ +#define BJ_LogCtl 5 /* Buffer contains log descriptors */ +#define BJ_Reserved 6 /* Buffer is reserved for access by journal */ +#define BJ_Types 7 + +extern int jbd_blocks_per_page(struct inode *inode); + +#ifdef __KERNEL__ + +#define buffer_trace_init(bh) do {} while (0) +#define print_buffer_fields(bh) do {} while (0) +#define print_buffer_trace(bh) do {} while (0) +#define BUFFER_TRACE(bh, info) do {} while (0) +#define BUFFER_TRACE2(bh, bh2, info) do {} while (0) +#define JBUFFER_TRACE(jh, info) do {} while (0) + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_JBD2_H */ diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h new file mode 100644 index 00000000..6230f855 --- /dev/null +++ b/include/linux/jbd_common.h @@ -0,0 +1,68 @@ +#ifndef _LINUX_JBD_STATE_H +#define _LINUX_JBD_STATE_H + +enum jbd_state_bits { + BH_JBD /* Has an attached ext3 journal_head */ + = BH_PrivateStart, + BH_JWrite, /* Being written to log (@@@ DEBUGGING) */ + BH_Freed, /* Has been freed (truncated) */ + BH_Revoked, /* Has been revoked from the log */ + BH_RevokeValid, /* Revoked flag is valid */ + BH_JBDDirty, /* Is dirty but journaled */ + BH_State, /* Pins most journal_head state */ + BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ + BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ + BH_JBDPrivateStart, /* First bit available for private use by FS */ +}; + +BUFFER_FNS(JBD, jbd) +BUFFER_FNS(JWrite, jwrite) +BUFFER_FNS(JBDDirty, jbddirty) +TAS_BUFFER_FNS(JBDDirty, jbddirty) +BUFFER_FNS(Revoked, revoked) +TAS_BUFFER_FNS(Revoked, revoked) +BUFFER_FNS(RevokeValid, revokevalid) +TAS_BUFFER_FNS(RevokeValid, revokevalid) +BUFFER_FNS(Freed, freed) + +static inline struct buffer_head *jh2bh(struct journal_head *jh) +{ + return jh->b_bh; +} + +static inline struct journal_head *bh2jh(struct buffer_head *bh) +{ + return bh->b_private; +} + +static inline void jbd_lock_bh_state(struct buffer_head *bh) +{ + bit_spin_lock(BH_State, &bh->b_state); +} + +static inline int jbd_trylock_bh_state(struct buffer_head *bh) +{ + return bit_spin_trylock(BH_State, &bh->b_state); +} + +static inline int jbd_is_locked_bh_state(struct buffer_head *bh) +{ + return bit_spin_is_locked(BH_State, &bh->b_state); +} + +static inline void jbd_unlock_bh_state(struct buffer_head *bh) +{ + bit_spin_unlock(BH_State, &bh->b_state); +} + +static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) +{ + bit_spin_lock(BH_JournalHead, &bh->b_state); +} + +static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) +{ + bit_spin_unlock(BH_JournalHead, &bh->b_state); +} + +#endif diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h new file mode 100644 index 00000000..a18b719f --- /dev/null +++ b/include/linux/jffs2.h @@ -0,0 +1,223 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * + * Created by David Woodhouse <dwmw2@infradead.org> + * + * For licensing information, see the file 'LICENCE' in the + * jffs2 directory. + */ + +#ifndef __LINUX_JFFS2_H__ +#define __LINUX_JFFS2_H__ + +#include <linux/types.h> +#include <linux/magic.h> + +/* You must include something which defines the C99 uintXX_t types. + We don't do it from here because this file is used in too many + different environments. */ + +/* Values we may expect to find in the 'magic' field */ +#define JFFS2_OLD_MAGIC_BITMASK 0x1984 +#define JFFS2_MAGIC_BITMASK 0x1985 +#define KSAMTIB_CIGAM_2SFFJ 0x8519 /* For detecting wrong-endian fs */ +#define JFFS2_EMPTY_BITMASK 0xffff +#define JFFS2_DIRTY_BITMASK 0x0000 + +/* Summary node MAGIC marker */ +#define JFFS2_SUM_MAGIC 0x02851885 + +/* We only allow a single char for length, and 0xFF is empty flash so + we don't want it confused with a real length. Hence max 254. +*/ +#define JFFS2_MAX_NAME_LEN 254 + +/* How small can we sensibly write nodes? */ +#define JFFS2_MIN_DATA_LEN 128 + +#define JFFS2_COMPR_NONE 0x00 +#define JFFS2_COMPR_ZERO 0x01 +#define JFFS2_COMPR_RTIME 0x02 +#define JFFS2_COMPR_RUBINMIPS 0x03 +#define JFFS2_COMPR_COPY 0x04 +#define JFFS2_COMPR_DYNRUBIN 0x05 +#define JFFS2_COMPR_ZLIB 0x06 +#define JFFS2_COMPR_LZO 0x07 +/* Compatibility flags. */ +#define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */ +#define JFFS2_NODE_ACCURATE 0x2000 +/* INCOMPAT: Fail to mount the filesystem */ +#define JFFS2_FEATURE_INCOMPAT 0xc000 +/* ROCOMPAT: Mount read-only */ +#define JFFS2_FEATURE_ROCOMPAT 0x8000 +/* RWCOMPAT_COPY: Mount read/write, and copy the node when it's GC'd */ +#define JFFS2_FEATURE_RWCOMPAT_COPY 0x4000 +/* RWCOMPAT_DELETE: Mount read/write, and delete the node when it's GC'd */ +#define JFFS2_FEATURE_RWCOMPAT_DELETE 0x0000 + +#define JFFS2_NODETYPE_DIRENT (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 1) +#define JFFS2_NODETYPE_INODE (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 2) +#define JFFS2_NODETYPE_CLEANMARKER (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3) +#define JFFS2_NODETYPE_PADDING (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 4) + +#define JFFS2_NODETYPE_SUMMARY (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6) + +#define JFFS2_NODETYPE_XATTR (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 8) +#define JFFS2_NODETYPE_XREF (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 9) + +/* XATTR Related */ +#define JFFS2_XPREFIX_USER 1 /* for "user." */ +#define JFFS2_XPREFIX_SECURITY 2 /* for "security." */ +#define JFFS2_XPREFIX_ACL_ACCESS 3 /* for "system.posix_acl_access" */ +#define JFFS2_XPREFIX_ACL_DEFAULT 4 /* for "system.posix_acl_default" */ +#define JFFS2_XPREFIX_TRUSTED 5 /* for "trusted.*" */ + +#define JFFS2_ACL_VERSION 0x0001 + +// Maybe later... +//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3) +//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4) + + +#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at + mount time, don't wait for it to + happen later */ +#define JFFS2_INO_FLAG_USERCOMPR 2 /* User has requested a specific + compression type */ + + +/* These can go once we've made sure we've caught all uses without + byteswapping */ + +typedef struct { + __u32 v32; +} __attribute__((packed)) jint32_t; + +typedef struct { + __u32 m; +} __attribute__((packed)) jmode_t; + +typedef struct { + __u16 v16; +} __attribute__((packed)) jint16_t; + +struct jffs2_unknown_node +{ + /* All start like this */ + jint16_t magic; + jint16_t nodetype; + jint32_t totlen; /* So we can skip over nodes we don't grok */ + jint32_t hdr_crc; +}; + +struct jffs2_raw_dirent +{ + jint16_t magic; + jint16_t nodetype; /* == JFFS2_NODETYPE_DIRENT */ + jint32_t totlen; + jint32_t hdr_crc; + jint32_t pino; + jint32_t version; + jint32_t ino; /* == zero for unlink */ + jint32_t mctime; + __u8 nsize; + __u8 type; + __u8 unused[2]; + jint32_t node_crc; + jint32_t name_crc; + __u8 name[0]; +}; + +/* The JFFS2 raw inode structure: Used for storage on physical media. */ +/* The uid, gid, atime, mtime and ctime members could be longer, but + are left like this for space efficiency. If and when people decide + they really need them extended, it's simple enough to add support for + a new type of raw node. +*/ +struct jffs2_raw_inode +{ + jint16_t magic; /* A constant magic number. */ + jint16_t nodetype; /* == JFFS2_NODETYPE_INODE */ + jint32_t totlen; /* Total length of this node (inc data, etc.) */ + jint32_t hdr_crc; + jint32_t ino; /* Inode number. */ + jint32_t version; /* Version number. */ + jmode_t mode; /* The file's type or mode. */ + jint16_t uid; /* The file's owner. */ + jint16_t gid; /* The file's group. */ + jint32_t isize; /* Total resultant size of this inode (used for truncations) */ + jint32_t atime; /* Last access time. */ + jint32_t mtime; /* Last modification time. */ + jint32_t ctime; /* Change time. */ + jint32_t offset; /* Where to begin to write. */ + jint32_t csize; /* (Compressed) data size */ + jint32_t dsize; /* Size of the node's data. (after decompression) */ + __u8 compr; /* Compression algorithm used */ + __u8 usercompr; /* Compression algorithm requested by the user */ + jint16_t flags; /* See JFFS2_INO_FLAG_* */ + jint32_t data_crc; /* CRC for the (compressed) data. */ + jint32_t node_crc; /* CRC for the raw inode (excluding data) */ + __u8 data[0]; +}; + +struct jffs2_raw_xattr { + jint16_t magic; + jint16_t nodetype; /* = JFFS2_NODETYPE_XATTR */ + jint32_t totlen; + jint32_t hdr_crc; + jint32_t xid; /* XATTR identifier number */ + jint32_t version; + __u8 xprefix; + __u8 name_len; + jint16_t value_len; + jint32_t data_crc; + jint32_t node_crc; + __u8 data[0]; +} __attribute__((packed)); + +struct jffs2_raw_xref +{ + jint16_t magic; + jint16_t nodetype; /* = JFFS2_NODETYPE_XREF */ + jint32_t totlen; + jint32_t hdr_crc; + jint32_t ino; /* inode number */ + jint32_t xid; /* XATTR identifier number */ + jint32_t xseqno; /* xref sequential number */ + jint32_t node_crc; +} __attribute__((packed)); + +struct jffs2_raw_summary +{ + jint16_t magic; + jint16_t nodetype; /* = JFFS2_NODETYPE_SUMMARY */ + jint32_t totlen; + jint32_t hdr_crc; + jint32_t sum_num; /* number of sum entries*/ + jint32_t cln_mkr; /* clean marker size, 0 = no cleanmarker */ + jint32_t padded; /* sum of the size of padding nodes */ + jint32_t sum_crc; /* summary information crc */ + jint32_t node_crc; /* node crc */ + jint32_t sum[0]; /* inode summary info */ +}; + +union jffs2_node_union +{ + struct jffs2_raw_inode i; + struct jffs2_raw_dirent d; + struct jffs2_raw_xattr x; + struct jffs2_raw_xref r; + struct jffs2_raw_summary s; + struct jffs2_unknown_node u; +}; + +/* Data payload for device nodes. */ +union jffs2_device_node { + jint16_t old_id; + jint32_t new_id; +}; + +#endif /* __LINUX_JFFS2_H__ */ diff --git a/include/linux/jhash.h b/include/linux/jhash.h new file mode 100644 index 00000000..47cb09ed --- /dev/null +++ b/include/linux/jhash.h @@ -0,0 +1,170 @@ +#ifndef _LINUX_JHASH_H +#define _LINUX_JHASH_H + +/* jhash.h: Jenkins hash support. + * + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) + * + * http://burtleburtle.net/bob/hash/ + * + * These are the credits from Bob's sources: + * + * lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It's in + * the public domain. It has no warranty. + * + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * + * I've modified Bob's hash to be useful in the Linux kernel, and + * any bugs present are my fault. + * Jozsef + */ +#include <linux/bitops.h> +#include <linux/unaligned/packed_struct.h> + +/* Best hash sizes are of power of two */ +#define jhash_size(n) ((u32)1<<(n)) +/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ +#define jhash_mask(n) (jhash_size(n)-1) + +/* __jhash_mix -- mix 3 32-bit values reversibly. */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +/* An arbitrary initial parameter */ +#define JHASH_INITVAL 0xdeadbeef + +/* jhash - hash an arbitrary key + * @k: sequence of bytes as key + * @length: the length of the key + * @initval: the previous hash, or an arbitray value + * + * The generic version, hashes an arbitrary sequence of bytes. + * No alignment or length assumptions are made about the input key. + * + * Returns the hash value of the key. The result depends on endianness. + */ +static inline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const u8 *k = key; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + length + initval; + + /* All but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) { + a += __get_unaligned_cpu32(k); + b += __get_unaligned_cpu32(k + 4); + c += __get_unaligned_cpu32(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + /* Last block: affect all 32 bits of (c) */ + /* All the case statements fall through */ + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +/* jhash2 - hash an array of u32's + * @k: the key which must be an array of u32's + * @length: the number of u32's in the key + * @initval: the previous hash, or an arbitray value + * + * Returns the hash value of the key. + */ +static inline u32 jhash2(const u32 *k, u32 length, u32 initval) +{ + u32 a, b, c; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + (length<<2) + initval; + + /* Handle most of the key */ + while (length > 3) { + a += k[0]; + b += k[1]; + c += k[2]; + __jhash_mix(a, b, c); + length -= 3; + k += 3; + } + + /* Handle the last 3 u32's: all the case statements fall through */ + switch (length) { + case 3: c += k[2]; + case 2: b += k[1]; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + + +/* jhash_3words - hash exactly 3, 2 or 1 word(s) */ +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + a += JHASH_INITVAL; + b += JHASH_INITVAL; + c += initval; + + __jhash_final(a, b, c); + + return c; +} + +static inline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return jhash_3words(a, b, 0, initval); +} + +static inline u32 jhash_1word(u32 a, u32 initval) +{ + return jhash_3words(a, 0, 0, initval); +} + +#endif /* _LINUX_JHASH_H */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h new file mode 100644 index 00000000..265e2c3c --- /dev/null +++ b/include/linux/jiffies.h @@ -0,0 +1,315 @@ +#ifndef _LINUX_JIFFIES_H +#define _LINUX_JIFFIES_H + +#include <linux/math64.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <asm/param.h> /* for HZ */ + +/* + * The following defines establish the engineering parameters of the PLL + * model. The HZ variable establishes the timer interrupt frequency, 100 Hz + * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the + * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the + * nearest power of two in order to avoid hardware multiply operations. + */ +#if HZ >= 12 && HZ < 24 +# define SHIFT_HZ 4 +#elif HZ >= 24 && HZ < 48 +# define SHIFT_HZ 5 +#elif HZ >= 48 && HZ < 96 +# define SHIFT_HZ 6 +#elif HZ >= 96 && HZ < 192 +# define SHIFT_HZ 7 +#elif HZ >= 192 && HZ < 384 +# define SHIFT_HZ 8 +#elif HZ >= 384 && HZ < 768 +# define SHIFT_HZ 9 +#elif HZ >= 768 && HZ < 1536 +# define SHIFT_HZ 10 +#elif HZ >= 1536 && HZ < 3072 +# define SHIFT_HZ 11 +#elif HZ >= 3072 && HZ < 6144 +# define SHIFT_HZ 12 +#elif HZ >= 6144 && HZ < 12288 +# define SHIFT_HZ 13 +#else +# error Invalid value of HZ. +#endif + +/* LATCH is used in the interval timer and ftape setup. */ +#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ + +/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can + * improve accuracy by shifting LSH bits, hence calculating: + * (NOM << LSH) / DEN + * This however means trouble for large NOM, because (NOM << LSH) may no + * longer fit in 32 bits. The following way of calculating this gives us + * some slack, under the following conditions: + * - (NOM / DEN) fits in (32 - LSH) bits. + * - (NOM % DEN) fits in (32 - LSH) bits. + */ +#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ + + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) + +/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ +#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) + +/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */ +#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8)) + +/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ +#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) + +/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */ +/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */ +#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8)) + +/* some arch's have a small-data section that can be accessed register-relative + * but that can only take up to, say, 4-byte variables. jiffies being part of + * an 8-byte variable may not be correctly accessed unless we force the issue + */ +#define __jiffy_data __attribute__((section(".data"))) + +/* + * The 64-bit value is not atomic - you MUST NOT read it + * without sampling the sequence number in xtime_lock. + * get_jiffies_64() will do this for you as appropriate. + */ +extern u64 __jiffy_data jiffies_64; +extern unsigned long volatile __jiffy_data jiffies; + +#if (BITS_PER_LONG < 64) +u64 get_jiffies_64(void); +#else +static inline u64 get_jiffies_64(void) +{ + return (u64)jiffies; +} +#endif + +/* + * These inlines deal with timer wrapping correctly. You are + * strongly encouraged to use them + * 1. Because people otherwise forget + * 2. Because if the timer wrap changes in future you won't have to + * alter your driver code. + * + * time_after(a,b) returns true if the time a is after time b. + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + */ +#define time_after(a,b) \ + (typecheck(unsigned long, a) && \ + typecheck(unsigned long, b) && \ + ((long)(b) - (long)(a) < 0)) +#define time_before(a,b) time_after(b,a) + +#define time_after_eq(a,b) \ + (typecheck(unsigned long, a) && \ + typecheck(unsigned long, b) && \ + ((long)(a) - (long)(b) >= 0)) +#define time_before_eq(a,b) time_after_eq(b,a) + +/* + * Calculate whether a is in the range of [b, c]. + */ +#define time_in_range(a,b,c) \ + (time_after_eq(a,b) && \ + time_before_eq(a,c)) + +/* + * Calculate whether a is in the range of [b, c). + */ +#define time_in_range_open(a,b,c) \ + (time_after_eq(a,b) && \ + time_before(a,c)) + +/* Same as above, but does so with platform independent 64bit types. + * These must be used when utilizing jiffies_64 (i.e. return value of + * get_jiffies_64() */ +#define time_after64(a,b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)(b) - (__s64)(a) < 0)) +#define time_before64(a,b) time_after64(b,a) + +#define time_after_eq64(a,b) \ + (typecheck(__u64, a) && \ + typecheck(__u64, b) && \ + ((__s64)(a) - (__s64)(b) >= 0)) +#define time_before_eq64(a,b) time_after_eq64(b,a) + +/* + * These four macros compare jiffies and 'a' for convenience. + */ + +/* time_is_before_jiffies(a) return true if a is before jiffies */ +#define time_is_before_jiffies(a) time_after(jiffies, a) + +/* time_is_after_jiffies(a) return true if a is after jiffies */ +#define time_is_after_jiffies(a) time_before(jiffies, a) + +/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/ +#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a) + +/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/ +#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a) + +/* + * Have the 32 bit jiffies value wrap 5 minutes after boot + * so jiffies wrap bugs show up earlier. + */ +#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) + +/* + * Change timeval to jiffies, trying to avoid the + * most obvious overflows.. + * + * And some not so obvious. + * + * Note that we don't want to return LONG_MAX, because + * for various timeout reasons we often end up having + * to wait "jiffies+1" in order to guarantee that we wait + * at _least_ "jiffies" - so "jiffies+1" had better still + * be positive. + */ +#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1) + +extern unsigned long preset_lpj; + +/* + * We want to do realistic conversions of time so we need to use the same + * values the update wall clock code uses as the jiffies size. This value + * is: TICK_NSEC (which is defined in timex.h). This + * is a constant and is in nanoseconds. We will use scaled math + * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and + * NSEC_JIFFIE_SC. Note that these defines contain nothing but + * constants and so are computed at compile time. SHIFT_HZ (computed in + * timex.h) adjusts the scaling for different HZ values. + + * Scaled math??? What is that? + * + * Scaled math is a way to do integer math on values that would, + * otherwise, either overflow, underflow, or cause undesired div + * instructions to appear in the execution path. In short, we "scale" + * up the operands so they take more bits (more precision, less + * underflow), do the desired operation and then "scale" the result back + * by the same amount. If we do the scaling by shifting we avoid the + * costly mpy and the dastardly div instructions. + + * Suppose, for example, we want to convert from seconds to jiffies + * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The + * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We + * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we + * might calculate at compile time, however, the result will only have + * about 3-4 bits of precision (less for smaller values of HZ). + * + * So, we scale as follows: + * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE); + * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE; + * Then we make SCALE a power of two so: + * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE; + * Now we define: + * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) + * jiff = (sec * SEC_CONV) >> SCALE; + * + * Often the math we use will expand beyond 32-bits so we tell C how to + * do this and pass the 64-bit result of the mpy through the ">> SCALE" + * which should take the result back to 32-bits. We want this expansion + * to capture as much precision as possible. At the same time we don't + * want to overflow so we pick the SCALE to avoid this. In this file, + * that means using a different scale for each range of HZ values (as + * defined in timex.h). + * + * For those who want to know, gcc will give a 64-bit result from a "*" + * operator if the result is a long long AND at least one of the + * operands is cast to long long (usually just prior to the "*" so as + * not to confuse it into thinking it really has a 64-bit operand, + * which, buy the way, it can do, but it takes more code and at least 2 + * mpys). + + * We also need to be aware that one second in nanoseconds is only a + * couple of bits away from overflowing a 32-bit word, so we MUST use + * 64-bits to get the full range time in nanoseconds. + + */ + +/* + * Here are the scales we will use. One for seconds, nanoseconds and + * microseconds. + * + * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and + * check if the sign bit is set. If not, we bump the shift count by 1. + * (Gets an extra bit of precision where we can use it.) + * We know it is set for HZ = 1024 and HZ = 100 not for 1000. + * Haven't tested others. + + * Limits of cpp (for #if expressions) only long (no long long), but + * then we only need the most signicant bit. + */ + +#define SEC_JIFFIE_SC (31 - SHIFT_HZ) +#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000) +#undef SEC_JIFFIE_SC +#define SEC_JIFFIE_SC (32 - SHIFT_HZ) +#endif +#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) +#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) +#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) + +#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) +#define USEC_CONVERSION \ + ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) +/* + * USEC_ROUND is used in the timeval to jiffie conversion. See there + * for more details. It is the scaled resolution rounding value. Note + * that it is a 64-bit value. Since, when it is applied, we are already + * in jiffies (albit scaled), it is nothing but the bits we will shift + * off. + */ +#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) +/* + * The maximum jiffie value is (MAX_INT >> 1). Here we translate that + * into seconds. The 64-bit case will overflow if we are not careful, + * so use the messy SH_DIV macro to do it. Still all constants. + */ +#if BITS_PER_LONG < 64 +# define MAX_SEC_IN_JIFFIES \ + (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC) +#else /* take care of overflow on 64 bits machines */ +# define MAX_SEC_IN_JIFFIES \ + (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1) + +#endif + +/* + * Convert various time units to each other: + */ +extern unsigned int jiffies_to_msecs(const unsigned long j); +extern unsigned int jiffies_to_usecs(const unsigned long j); +extern unsigned long msecs_to_jiffies(const unsigned int m); +extern unsigned long usecs_to_jiffies(const unsigned int u); +extern unsigned long timespec_to_jiffies(const struct timespec *value); +extern void jiffies_to_timespec(const unsigned long jiffies, + struct timespec *value); +extern unsigned long timeval_to_jiffies(const struct timeval *value); +extern void jiffies_to_timeval(const unsigned long jiffies, + struct timeval *value); +extern clock_t jiffies_to_clock_t(unsigned long x); +extern unsigned long clock_t_to_jiffies(unsigned long x); +extern u64 jiffies_64_to_clock_t(u64 x); +extern u64 nsec_to_clock_t(u64 x); +extern u64 nsecs_to_jiffies64(u64 n); +extern unsigned long nsecs_to_jiffies(u64 n); + +#define TIMESTAMP_SIZE 30 + +#endif diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h new file mode 100644 index 00000000..c18b46f8 --- /dev/null +++ b/include/linux/journal-head.h @@ -0,0 +1,109 @@ +/* + * include/linux/journal-head.h + * + * buffer_head fields for JBD + * + * 27 May 2001 Andrew Morton + * Created - pulled out of fs.h + */ + +#ifndef JOURNAL_HEAD_H_INCLUDED +#define JOURNAL_HEAD_H_INCLUDED + +typedef unsigned int tid_t; /* Unique transaction ID */ +typedef struct transaction_s transaction_t; /* Compound transaction type */ + + +struct buffer_head; + +struct journal_head { + /* + * Points back to our buffer_head. [jbd_lock_bh_journal_head()] + */ + struct buffer_head *b_bh; + + /* + * Reference count - see description in journal.c + * [jbd_lock_bh_journal_head()] + */ + int b_jcount; + + /* + * Journalling list for this buffer [jbd_lock_bh_state()] + */ + unsigned b_jlist; + + /* + * This flag signals the buffer has been modified by + * the currently running transaction + * [jbd_lock_bh_state()] + */ + unsigned b_modified; + + /* + * This feild tracks the last transaction id in which this buffer + * has been cowed + * [jbd_lock_bh_state()] + */ + tid_t b_cow_tid; + + /* + * Copy of the buffer data frozen for writing to the log. + * [jbd_lock_bh_state()] + */ + char *b_frozen_data; + + /* + * Pointer to a saved copy of the buffer containing no uncommitted + * deallocation references, so that allocations can avoid overwriting + * uncommitted deletes. [jbd_lock_bh_state()] + */ + char *b_committed_data; + + /* + * Pointer to the compound transaction which owns this buffer's + * metadata: either the running transaction or the committing + * transaction (if there is one). Only applies to buffers on a + * transaction's data or metadata journaling list. + * [j_list_lock] [jbd_lock_bh_state()] + * Either of these locks is enough for reading, both are needed for + * changes. + */ + transaction_t *b_transaction; + + /* + * Pointer to the running compound transaction which is currently + * modifying the buffer's metadata, if there was already a transaction + * committing it when the new transaction touched it. + * [t_list_lock] [jbd_lock_bh_state()] + */ + transaction_t *b_next_transaction; + + /* + * Doubly-linked list of buffers on a transaction's data, metadata or + * forget queue. [t_list_lock] [jbd_lock_bh_state()] + */ + struct journal_head *b_tnext, *b_tprev; + + /* + * Pointer to the compound transaction against which this buffer + * is checkpointed. Only dirty buffers can be checkpointed. + * [j_list_lock] + */ + transaction_t *b_cp_transaction; + + /* + * Doubly-linked list of buffers still remaining to be flushed + * before an old transaction can be checkpointed. + * [j_list_lock] + */ + struct journal_head *b_cpnext, *b_cpprev; + + /* Trigger type */ + struct jbd2_buffer_trigger_type *b_triggers; + + /* Trigger type for the committing transaction's frozen data */ + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +#endif /* JOURNAL_HEAD_H_INCLUDED */ diff --git a/include/linux/joystick.h b/include/linux/joystick.h new file mode 100644 index 00000000..47199b13 --- /dev/null +++ b/include/linux/joystick.h @@ -0,0 +1,145 @@ +#ifndef _LINUX_JOYSTICK_H +#define _LINUX_JOYSTICK_H + +/* + * Copyright (C) 1996-2000 Vojtech Pavlik + * + * Sponsored by SuSE + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Should you need to contact me, the author, you can do so either by + * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: + * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic + */ + +#include <linux/types.h> +#include <linux/input.h> + +/* + * Version + */ + +#define JS_VERSION 0x020100 + +/* + * Types and constants for reading from /dev/js + */ + +#define JS_EVENT_BUTTON 0x01 /* button pressed/released */ +#define JS_EVENT_AXIS 0x02 /* joystick moved */ +#define JS_EVENT_INIT 0x80 /* initial state of device */ + +struct js_event { + __u32 time; /* event timestamp in milliseconds */ + __s16 value; /* value */ + __u8 type; /* event type */ + __u8 number; /* axis/button number */ +}; + +/* + * IOCTL commands for joystick driver + */ + +#define JSIOCGVERSION _IOR('j', 0x01, __u32) /* get driver version */ + +#define JSIOCGAXES _IOR('j', 0x11, __u8) /* get number of axes */ +#define JSIOCGBUTTONS _IOR('j', 0x12, __u8) /* get number of buttons */ +#define JSIOCGNAME(len) _IOC(_IOC_READ, 'j', 0x13, len) /* get identifier string */ + +#define JSIOCSCORR _IOW('j', 0x21, struct js_corr) /* set correction values */ +#define JSIOCGCORR _IOR('j', 0x22, struct js_corr) /* get correction values */ + +#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_CNT]) /* set axis mapping */ +#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_CNT]) /* get axis mapping */ +#define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */ +#define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */ + +/* + * Types and constants for get/set correction + */ + +#define JS_CORR_NONE 0x00 /* returns raw values */ +#define JS_CORR_BROKEN 0x01 /* broken line */ + +struct js_corr { + __s32 coef[8]; + __s16 prec; + __u16 type; +}; + +/* + * v0.x compatibility definitions + */ + +#define JS_RETURN sizeof(struct JS_DATA_TYPE) +#define JS_TRUE 1 +#define JS_FALSE 0 +#define JS_X_0 0x01 +#define JS_Y_0 0x02 +#define JS_X_1 0x04 +#define JS_Y_1 0x08 +#define JS_MAX 2 + +#define JS_DEF_TIMEOUT 0x1300 +#define JS_DEF_CORR 0 +#define JS_DEF_TIMELIMIT 10L + +#define JS_SET_CAL 1 +#define JS_GET_CAL 2 +#define JS_SET_TIMEOUT 3 +#define JS_GET_TIMEOUT 4 +#define JS_SET_TIMELIMIT 5 +#define JS_GET_TIMELIMIT 6 +#define JS_GET_ALL 7 +#define JS_SET_ALL 8 + +struct JS_DATA_TYPE { + __s32 buttons; + __s32 x; + __s32 y; +}; + +struct JS_DATA_SAVE_TYPE_32 { + __s32 JS_TIMEOUT; + __s32 BUSY; + __s32 JS_EXPIRETIME; + __s32 JS_TIMELIMIT; + struct JS_DATA_TYPE JS_SAVE; + struct JS_DATA_TYPE JS_CORR; +}; + +struct JS_DATA_SAVE_TYPE_64 { + __s32 JS_TIMEOUT; + __s32 BUSY; + __s64 JS_EXPIRETIME; + __s64 JS_TIMELIMIT; + struct JS_DATA_TYPE JS_SAVE; + struct JS_DATA_TYPE JS_CORR; +}; + +#ifdef __KERNEL__ +#if BITS_PER_LONG == 64 +#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64 +#elif BITS_PER_LONG == 32 +#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_32 +#else +#error Unexpected BITS_PER_LONG +#endif +#endif + +#endif /* _LINUX_JOYSTICK_H */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 00000000..c513a405 --- /dev/null +++ b/include/linux/jump_label.h @@ -0,0 +1,226 @@ +#ifndef _LINUX_JUMP_LABEL_H +#define _LINUX_JUMP_LABEL_H + +/* + * Jump label support + * + * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> + * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> + * + * Jump labels provide an interface to generate dynamic branches using + * self-modifying code. Assuming toolchain and architecture support the result + * of a "if (static_key_false(&key))" statement is a unconditional branch (which + * defaults to false - and the true block is placed out of line). + * + * However at runtime we can change the branch target using + * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key + * object and for as long as there are references all branches referring to + * that particular key will point to the (out of line) true block. + * + * Since this relies on modifying code the static_key_slow_{inc,dec}() functions + * must be considered absolute slow paths (machine wide synchronization etc.). + * OTOH, since the affected branches are unconditional their runtime overhead + * will be absolutely minimal, esp. in the default (off) case where the total + * effect is a single NOP of appropriate size. The on case will patch in a jump + * to the out-of-line block. + * + * When the control is directly exposed to userspace it is prudent to delay the + * decrement to avoid high frequency code modifications which can (and do) + * cause significant performance degradation. Struct static_key_deferred and + * static_key_slow_dec_deferred() provide for this. + * + * Lacking toolchain and or architecture support, it falls back to a simple + * conditional branch. + * + * struct static_key my_key = STATIC_KEY_INIT_TRUE; + * + * if (static_key_true(&my_key)) { + * } + * + * will result in the true case being in-line and starts the key with a single + * reference. Mixing static_key_true() and static_key_false() on the same key is not + * allowed. + * + * Not initializing the key (static data is initialized to 0s anyway) is the + * same as using STATIC_KEY_INIT_FALSE and static_key_false() is + * equivalent with static_branch(). + * +*/ + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/workqueue.h> + +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) + +struct static_key { + atomic_t enabled; +/* Set lsb bit to 1 if branch is default true, 0 ot */ + struct jump_entry *entries; +#ifdef CONFIG_MODULES + struct static_key_mod *next; +#endif +}; + +struct static_key_deferred { + struct static_key key; + unsigned long timeout; + struct delayed_work work; +}; + +# include <asm/jump_label.h> +# define HAVE_JUMP_LABEL +#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ + +enum jump_label_type { + JUMP_LABEL_DISABLE = 0, + JUMP_LABEL_ENABLE, +}; + +struct module; + +#ifdef HAVE_JUMP_LABEL + +#define JUMP_LABEL_TRUE_BRANCH 1UL + +static +inline struct jump_entry *jump_label_get_entries(struct static_key *key) +{ + return (struct jump_entry *)((unsigned long)key->entries + & ~JUMP_LABEL_TRUE_BRANCH); +} + +static inline bool jump_label_get_branch_default(struct static_key *key) +{ + if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH) + return true; + return false; +} + +static __always_inline bool static_key_false(struct static_key *key) +{ + return arch_static_branch(key); +} + +static __always_inline bool static_key_true(struct static_key *key) +{ + return !static_key_false(key); +} + +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + return arch_static_branch(key); +} + +extern struct jump_entry __start___jump_table[]; +extern struct jump_entry __stop___jump_table[]; + +extern void jump_label_init(void); +extern void jump_label_lock(void); +extern void jump_label_unlock(void); +extern void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +extern int jump_label_text_reserved(void *start, void *end); +extern void static_key_slow_inc(struct static_key *key); +extern void static_key_slow_dec(struct static_key *key); +extern void static_key_slow_dec_deferred(struct static_key_deferred *key); +extern void jump_label_apply_nops(struct module *mod); +extern void +jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +#define STATIC_KEY_INIT_TRUE ((struct static_key) \ + { .enabled = ATOMIC_INIT(1), .entries = (void *)1 }) +#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + { .enabled = ATOMIC_INIT(0), .entries = (void *)0 }) + +#else /* !HAVE_JUMP_LABEL */ + +#include <linux/atomic.h> + +struct static_key { + atomic_t enabled; +}; + +static __always_inline void jump_label_init(void) +{ +} + +struct static_key_deferred { + struct static_key key; +}; + +static __always_inline bool static_key_false(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +static __always_inline bool static_key_true(struct static_key *key) +{ + if (likely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + +static inline void static_key_slow_inc(struct static_key *key) +{ + atomic_inc(&key->enabled); +} + +static inline void static_key_slow_dec(struct static_key *key) +{ + atomic_dec(&key->enabled); +} + +static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) +{ + static_key_slow_dec(&key->key); +} + +static inline int jump_label_text_reserved(void *start, void *end) +{ + return 0; +} + +static inline void jump_label_lock(void) {} +static inline void jump_label_unlock(void) {} + +static inline int jump_label_apply_nops(struct module *mod) +{ + return 0; +} + +static inline void +jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) +{ +} + +#define STATIC_KEY_INIT_TRUE ((struct static_key) \ + { .enabled = ATOMIC_INIT(1) }) +#define STATIC_KEY_INIT_FALSE ((struct static_key) \ + { .enabled = ATOMIC_INIT(0) }) + +#endif /* HAVE_JUMP_LABEL */ + +#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE +#define jump_label_enabled static_key_enabled + +static inline bool static_key_enabled(struct static_key *key) +{ + return (atomic_read(&key->enabled) > 0); +} + +#endif /* _LINUX_JUMP_LABEL_H */ diff --git a/include/linux/jz4740-adc.h b/include/linux/jz4740-adc.h new file mode 100644 index 00000000..8184578f --- /dev/null +++ b/include/linux/jz4740-adc.h @@ -0,0 +1,32 @@ + +#ifndef __LINUX_JZ4740_ADC +#define __LINUX_JZ4740_ADC + +struct device; + +/* + * jz4740_adc_set_config - Configure a JZ4740 adc device + * @dev: Pointer to a jz4740-adc device + * @mask: Mask for the config value to be set + * @val: Value to be set + * + * This function can be used by the JZ4740 ADC mfd cells to configure their + * options in the shared config register. +*/ +int jz4740_adc_set_config(struct device *dev, uint32_t mask, uint32_t val); + +#define JZ_ADC_CONFIG_SPZZ BIT(31) +#define JZ_ADC_CONFIG_EX_IN BIT(30) +#define JZ_ADC_CONFIG_DNUM_MASK (0x7 << 16) +#define JZ_ADC_CONFIG_DMA_ENABLE BIT(15) +#define JZ_ADC_CONFIG_XYZ_MASK (0x2 << 13) +#define JZ_ADC_CONFIG_SAMPLE_NUM_MASK (0x7 << 10) +#define JZ_ADC_CONFIG_CLKDIV_MASK (0xf << 5) +#define JZ_ADC_CONFIG_BAT_MB BIT(4) + +#define JZ_ADC_CONFIG_DNUM(dnum) ((dnum) << 16) +#define JZ_ADC_CONFIG_XYZ_OFFSET(dnum) ((xyz) << 13) +#define JZ_ADC_CONFIG_SAMPLE_NUM(x) ((x) << 10) +#define JZ_ADC_CONFIG_CLKDIV(div) ((div) << 5) + +#endif diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h new file mode 100644 index 00000000..38757195 --- /dev/null +++ b/include/linux/kallsyms.h @@ -0,0 +1,121 @@ +/* Rewritten and vastly simplified by Rusty Russell for in-kernel + * module loader: + * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation + */ +#ifndef _LINUX_KALLSYMS_H +#define _LINUX_KALLSYMS_H + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/stddef.h> + +#define KSYM_NAME_LEN 128 +#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ + 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) + +struct module; + +#ifdef CONFIG_KALLSYMS +/* Lookup the address for a symbol. Returns 0 if not found. */ +unsigned long kallsyms_lookup_name(const char *name); + +/* Call a function on each kallsyms symbol in the core kernel */ +int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + unsigned long), + void *data); + +extern int kallsyms_lookup_size_offset(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset); + +/* Lookup an address. modname is set to NULL if it's in the kernel. */ +const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf); + +/* Look up a kernel symbol and return it in a text buffer. */ +extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_backtrace(char *buffer, unsigned long address); + +/* Look up a kernel symbol and print it to the kernel messages. */ +extern void __print_symbol(const char *fmt, unsigned long address); + +int lookup_symbol_name(unsigned long addr, char *symname); +int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); + +#else /* !CONFIG_KALLSYMS */ + +static inline unsigned long kallsyms_lookup_name(const char *name) +{ + return 0; +} + +static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + +static inline int kallsyms_lookup_size_offset(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset) +{ + return 0; +} + +static inline const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf) +{ + return NULL; +} + +static inline int sprint_symbol(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + +static inline int sprint_backtrace(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + +static inline int lookup_symbol_name(unsigned long addr, char *symname) +{ + return -ERANGE; +} + +static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) +{ + return -ERANGE; +} + +/* Stupid that this does nothing, but I didn't create this mess. */ +#define __print_symbol(fmt, addr) +#endif /*CONFIG_KALLSYMS*/ + +/* This macro allows us to keep printk typechecking */ +static __printf(1, 2) +void __check_printsym_format(const char *fmt, ...) +{ +} + +static inline void print_symbol(const char *fmt, unsigned long addr) +{ + __check_printsym_format(fmt, ""); + __print_symbol(fmt, (unsigned long) + __builtin_extract_return_addr((void *)addr)); +} + +static inline void print_ip_sym(unsigned long ip) +{ + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); +} + +#endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kbd_diacr.h b/include/linux/kbd_diacr.h new file mode 100644 index 00000000..7274ec68 --- /dev/null +++ b/include/linux/kbd_diacr.h @@ -0,0 +1,8 @@ +#ifndef _DIACR_H +#define _DIACR_H +#include <linux/kd.h> + +extern struct kbdiacruc accent_table[]; +extern unsigned int accent_table_size; + +#endif /* _DIACR_H */ diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h new file mode 100644 index 00000000..daf4a3a4 --- /dev/null +++ b/include/linux/kbd_kern.h @@ -0,0 +1,160 @@ +#ifndef _KBD_KERN_H +#define _KBD_KERN_H + +#include <linux/tty.h> +#include <linux/interrupt.h> +#include <linux/keyboard.h> + +extern struct tasklet_struct keyboard_tasklet; + +extern char *func_table[MAX_NR_FUNC]; +extern char func_buf[]; +extern char *funcbufptr; +extern int funcbufsize, funcbufleft; + +/* + * kbd->xxx contains the VC-local things (flag settings etc..) + * + * Note: externally visible are LED_SCR, LED_NUM, LED_CAP defined in kd.h + * The code in KDGETLED / KDSETLED depends on the internal and + * external order being the same. + * + * Note: lockstate is used as index in the array key_map. + */ +struct kbd_struct { + + unsigned char lockstate; +/* 8 modifiers - the names do not have any meaning at all; + they can be associated to arbitrarily chosen keys */ +#define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */ +#define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */ +#define VC_CTRLLOCK KG_CTRL /* control lock mode */ +#define VC_ALTLOCK KG_ALT /* alt lock mode */ +#define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */ +#define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */ +#define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */ +#define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */ + unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */ + + unsigned char ledmode:2; /* one 2-bit value */ +#define LED_SHOW_FLAGS 0 /* traditional state */ +#define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */ +#define LED_SHOW_MEM 2 /* `heartbeat': peek into memory */ + + unsigned char ledflagstate:4; /* flags, not lights */ + unsigned char default_ledflagstate:4; +#define VC_SCROLLOCK 0 /* scroll-lock mode */ +#define VC_NUMLOCK 1 /* numeric lock mode */ +#define VC_CAPSLOCK 2 /* capslock mode */ +#define VC_KANALOCK 3 /* kanalock mode */ + + unsigned char kbdmode:3; /* one 3-bit value */ +#define VC_XLATE 0 /* translate keycodes using keymap */ +#define VC_MEDIUMRAW 1 /* medium raw (keycode) mode */ +#define VC_RAW 2 /* raw (scancode) mode */ +#define VC_UNICODE 3 /* Unicode mode */ +#define VC_OFF 4 /* disabled mode */ + + unsigned char modeflags:5; +#define VC_APPLIC 0 /* application key mode */ +#define VC_CKMODE 1 /* cursor key mode */ +#define VC_REPEAT 2 /* keyboard repeat */ +#define VC_CRLF 3 /* 0 - enter sends CR, 1 - enter sends CRLF */ +#define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */ +}; + +extern int kbd_init(void); + +extern unsigned char getledstate(void); +extern void setledstate(struct kbd_struct *kbd, unsigned int led); + +extern int do_poke_blanked_console; + +extern void (*kbd_ledfunc)(unsigned int led); + +extern int set_console(int nr); +extern void schedule_console_callback(void); + +/* FIXME: review locking for vt.c callers */ +static inline void set_leds(void) +{ + tasklet_schedule(&keyboard_tasklet); +} + +static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + return ((kbd->modeflags >> flag) & 1); +} + +static inline int vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + return ((kbd->ledflagstate >> flag) & 1); +} + +static inline void set_vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + kbd->modeflags |= 1 << flag; +} + +static inline void set_vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + kbd->ledflagstate |= 1 << flag; +} + +static inline void clr_vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + kbd->modeflags &= ~(1 << flag); +} + +static inline void clr_vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + kbd->ledflagstate &= ~(1 << flag); +} + +static inline void chg_vc_kbd_lock(struct kbd_struct * kbd, int flag) +{ + kbd->lockstate ^= 1 << flag; +} + +static inline void chg_vc_kbd_slock(struct kbd_struct * kbd, int flag) +{ + kbd->slockstate ^= 1 << flag; +} + +static inline void chg_vc_kbd_mode(struct kbd_struct * kbd, int flag) +{ + kbd->modeflags ^= 1 << flag; +} + +static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) +{ + kbd->ledflagstate ^= 1 << flag; +} + +#define U(x) ((x) ^ 0xf000) + +#define BRL_UC_ROW 0x2800 + +/* keyboard.c */ + +struct console; + +void compute_shiftstate(void); + +/* defkeymap.c */ + +extern unsigned int keymap_count; + +/* console.c */ + +static inline void con_schedule_flip(struct tty_struct *t) +{ + unsigned long flags; + spin_lock_irqsave(&t->buf.lock, flags); + if (t->buf.tail != NULL) + t->buf.tail->commit = t->buf.tail->used; + spin_unlock_irqrestore(&t->buf.lock, flags); + schedule_work(&t->buf.work); +} + +#endif diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h new file mode 100644 index 00000000..22a72198 --- /dev/null +++ b/include/linux/kbuild.h @@ -0,0 +1,15 @@ +#ifndef __LINUX_KBUILD_H +#define __LINUX_KBUILD_H + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +#define OFFSET(sym, str, mem) \ + DEFINE(sym, offsetof(struct str, mem)) + +#define COMMENT(x) \ + asm volatile("\n->#" x) + +#endif diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h new file mode 100644 index 00000000..be342b94 --- /dev/null +++ b/include/linux/kconfig.h @@ -0,0 +1,46 @@ +#ifndef __LINUX_KCONFIG_H +#define __LINUX_KCONFIG_H + +#include <generated/autoconf.h> + +/* + * Helper macros to use CONFIG_ options in C/CPP expressions. Note that + * these only work with boolean and tristate options. + */ + +/* + * Getting something that works in C and CPP for an arg that may or may + * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1" + * we match on the placeholder define, insert the "0," for arg1 and generate + * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one). + * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when + * the last step cherry picks the 2nd arg, we get a zero. + */ +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +/* + * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', + * 0 otherwise. + * + */ +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +/* + * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 + * otherwise. For boolean options, this is equivalent to + * IS_ENABLED(CONFIG_FOO). + */ +#define IS_BUILTIN(option) config_enabled(option) + +/* + * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 + * otherwise. + */ +#define IS_MODULE(option) config_enabled(option##_MODULE) + +#endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kd.h b/include/linux/kd.h new file mode 100644 index 00000000..c36d8476 --- /dev/null +++ b/include/linux/kd.h @@ -0,0 +1,186 @@ +#ifndef _LINUX_KD_H +#define _LINUX_KD_H +#include <linux/types.h> +#include <linux/compiler.h> + +/* 0x4B is 'K', to avoid collision with termios and vt */ + +#define GIO_FONT 0x4B60 /* gets font in expanded form */ +#define PIO_FONT 0x4B61 /* use font in expanded form */ + +#define GIO_FONTX 0x4B6B /* get font using struct consolefontdesc */ +#define PIO_FONTX 0x4B6C /* set font using struct consolefontdesc */ +struct consolefontdesc { + unsigned short charcount; /* characters in font (256 or 512) */ + unsigned short charheight; /* scan lines per character (1-32) */ + char __user *chardata; /* font data in expanded form */ +}; + +#define PIO_FONTRESET 0x4B6D /* reset to default font */ + +#define GIO_CMAP 0x4B70 /* gets colour palette on VGA+ */ +#define PIO_CMAP 0x4B71 /* sets colour palette on VGA+ */ + +#define KIOCSOUND 0x4B2F /* start sound generation (0 for off) */ +#define KDMKTONE 0x4B30 /* generate tone */ + +#define KDGETLED 0x4B31 /* return current led state */ +#define KDSETLED 0x4B32 /* set led state [lights, not flags] */ +#define LED_SCR 0x01 /* scroll lock led */ +#define LED_NUM 0x02 /* num lock led */ +#define LED_CAP 0x04 /* caps lock led */ + +#define KDGKBTYPE 0x4B33 /* get keyboard type */ +#define KB_84 0x01 +#define KB_101 0x02 /* this is what we always answer */ +#define KB_OTHER 0x03 + +#define KDADDIO 0x4B34 /* add i/o port as valid */ +#define KDDELIO 0x4B35 /* del i/o port as valid */ +#define KDENABIO 0x4B36 /* enable i/o to video board */ +#define KDDISABIO 0x4B37 /* disable i/o to video board */ + +#define KDSETMODE 0x4B3A /* set text/graphics mode */ +#define KD_TEXT 0x00 +#define KD_GRAPHICS 0x01 +#define KD_TEXT0 0x02 /* obsolete */ +#define KD_TEXT1 0x03 /* obsolete */ +#define KDGETMODE 0x4B3B /* get current mode */ + +#define KDMAPDISP 0x4B3C /* map display into address space */ +#define KDUNMAPDISP 0x4B3D /* unmap display from address space */ + +typedef char scrnmap_t; +#define E_TABSZ 256 +#define GIO_SCRNMAP 0x4B40 /* get screen mapping from kernel */ +#define PIO_SCRNMAP 0x4B41 /* put screen mapping table in kernel */ +#define GIO_UNISCRNMAP 0x4B69 /* get full Unicode screen mapping */ +#define PIO_UNISCRNMAP 0x4B6A /* set full Unicode screen mapping */ + +#define GIO_UNIMAP 0x4B66 /* get unicode-to-font mapping from kernel */ +struct unipair { + unsigned short unicode; + unsigned short fontpos; +}; +struct unimapdesc { + unsigned short entry_ct; + struct unipair __user *entries; +}; +#define PIO_UNIMAP 0x4B67 /* put unicode-to-font mapping in kernel */ +#define PIO_UNIMAPCLR 0x4B68 /* clear table, possibly advise hash algorithm */ +struct unimapinit { + unsigned short advised_hashsize; /* 0 if no opinion */ + unsigned short advised_hashstep; /* 0 if no opinion */ + unsigned short advised_hashlevel; /* 0 if no opinion */ +}; + +#define UNI_DIRECT_BASE 0xF000 /* start of Direct Font Region */ +#define UNI_DIRECT_MASK 0x01FF /* Direct Font Region bitmask */ + +#define K_RAW 0x00 +#define K_XLATE 0x01 +#define K_MEDIUMRAW 0x02 +#define K_UNICODE 0x03 +#define K_OFF 0x04 +#define KDGKBMODE 0x4B44 /* gets current keyboard mode */ +#define KDSKBMODE 0x4B45 /* sets current keyboard mode */ + +#define K_METABIT 0x03 +#define K_ESCPREFIX 0x04 +#define KDGKBMETA 0x4B62 /* gets meta key handling mode */ +#define KDSKBMETA 0x4B63 /* sets meta key handling mode */ + +#define K_SCROLLLOCK 0x01 +#define K_NUMLOCK 0x02 +#define K_CAPSLOCK 0x04 +#define KDGKBLED 0x4B64 /* get led flags (not lights) */ +#define KDSKBLED 0x4B65 /* set led flags (not lights) */ + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + unsigned short kb_value; +}; +#define K_NORMTAB 0x00 +#define K_SHIFTTAB 0x01 +#define K_ALTTAB 0x02 +#define K_ALTSHIFTTAB 0x03 + +#define KDGKBENT 0x4B46 /* gets one entry in translation table */ +#define KDSKBENT 0x4B47 /* sets one entry in translation table */ + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; +#define KDGKBSENT 0x4B48 /* gets one function key string entry */ +#define KDSKBSENT 0x4B49 /* sets one function key string entry */ + +struct kbdiacr { + unsigned char diacr, base, result; +}; +struct kbdiacrs { + unsigned int kb_cnt; /* number of entries in following array */ + struct kbdiacr kbdiacr[256]; /* MAX_DIACR from keyboard.h */ +}; +#define KDGKBDIACR 0x4B4A /* read kernel accent table */ +#define KDSKBDIACR 0x4B4B /* write kernel accent table */ + +struct kbdiacruc { + unsigned int diacr, base, result; +}; +struct kbdiacrsuc { + unsigned int kb_cnt; /* number of entries in following array */ + struct kbdiacruc kbdiacruc[256]; /* MAX_DIACR from keyboard.h */ +}; +#define KDGKBDIACRUC 0x4BFA /* read kernel accent table - UCS */ +#define KDSKBDIACRUC 0x4BFB /* write kernel accent table - UCS */ + +struct kbkeycode { + unsigned int scancode, keycode; +}; +#define KDGETKEYCODE 0x4B4C /* read kernel keycode table entry */ +#define KDSETKEYCODE 0x4B4D /* write kernel keycode table entry */ + +#define KDSIGACCEPT 0x4B4E /* accept kbd generated signals */ + +struct kbd_repeat { + int delay; /* in msec; <= 0: don't change */ + int period; /* in msec; <= 0: don't change */ + /* earlier this field was misnamed "rate" */ +}; + +#define KDKBDREP 0x4B52 /* set keyboard delay/repeat rate; + * actually used values are returned */ + +#define KDFONTOP 0x4B72 /* font operations */ + +struct console_font_op { + unsigned int op; /* operation code KD_FONT_OP_* */ + unsigned int flags; /* KD_FONT_FLAG_* */ + unsigned int width, height; /* font size */ + unsigned int charcount; + unsigned char __user *data; /* font data with height fixed to 32 */ +}; + +struct console_font { + unsigned int width, height; /* font size */ + unsigned int charcount; + unsigned char *data; /* font data with height fixed to 32 */ +}; + +#define KD_FONT_OP_SET 0 /* Set font */ +#define KD_FONT_OP_GET 1 /* Get font */ +#define KD_FONT_OP_SET_DEFAULT 2 /* Set font to default, data points to name / NULL */ +#define KD_FONT_OP_COPY 3 /* Copy from another console */ + +#define KD_FONT_FLAG_DONT_RECALC 1 /* Don't recalculate hw charcell size [compat] */ +#ifdef __KERNEL__ +#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */ +#endif + +/* note: 0x4B00-0x4B4E all have had a value at some time; + don't reuse for the time being */ +/* note: 0x4B60-0x4B6D, 0x4B70-0x4B72 used above */ + +#endif /* _LINUX_KD_H */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h new file mode 100644 index 00000000..06472585 --- /dev/null +++ b/include/linux/kdb.h @@ -0,0 +1,169 @@ +#ifndef _KDB_H +#define _KDB_H + +/* + * Kernel Debugger Architecture Independent Global Headers + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com> + * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> + */ + +#ifdef CONFIG_KGDB_KDB +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/atomic.h> + +#define KDB_POLL_FUNC_MAX 5 +extern int kdb_poll_idx; + +/* + * kdb_initial_cpu is initialized to -1, and is set to the cpu + * number whenever the kernel debugger is entered. + */ +extern int kdb_initial_cpu; +extern atomic_t kdb_event; + +/* Types and messages used for dynamically added kdb shell commands */ + +#define KDB_MAXARGS 16 /* Maximum number of arguments to a function */ + +typedef enum { + KDB_REPEAT_NONE = 0, /* Do not repeat this command */ + KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ + KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ +} kdb_repeat_t; + +typedef int (*kdb_func_t)(int, const char **); + +/* KDB return codes from a command or internal kdb function */ +#define KDB_NOTFOUND (-1) +#define KDB_ARGCOUNT (-2) +#define KDB_BADWIDTH (-3) +#define KDB_BADRADIX (-4) +#define KDB_NOTENV (-5) +#define KDB_NOENVVALUE (-6) +#define KDB_NOTIMP (-7) +#define KDB_ENVFULL (-8) +#define KDB_ENVBUFFULL (-9) +#define KDB_TOOMANYBPT (-10) +#define KDB_TOOMANYDBREGS (-11) +#define KDB_DUPBPT (-12) +#define KDB_BPTNOTFOUND (-13) +#define KDB_BADMODE (-14) +#define KDB_BADINT (-15) +#define KDB_INVADDRFMT (-16) +#define KDB_BADREG (-17) +#define KDB_BADCPUNUM (-18) +#define KDB_BADLENGTH (-19) +#define KDB_NOBP (-20) +#define KDB_BADADDR (-21) + +/* + * kdb_diemsg + * + * Contains a pointer to the last string supplied to the + * kernel 'die' panic function. + */ +extern const char *kdb_diemsg; + +#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */ +#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */ +#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */ +#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */ +#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when + * kdb is off */ +#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, + * kdb is disabled */ +#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do + * not use keyboard */ +#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do + * not use keyboard */ + +extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */ + +extern void kdb_save_flags(void); +extern void kdb_restore_flags(void); + +#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag) +#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag)) +#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag)) + +/* + * External entry point for the kernel debugger. The pt_regs + * at the time of entry are supplied along with the reason for + * entry to the kernel debugger. + */ + +typedef enum { + KDB_REASON_ENTER = 1, /* KDB_ENTER() trap/fault - regs valid */ + KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */ + KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */ + KDB_REASON_DEBUG, /* Debug Fault - regs valid */ + KDB_REASON_OOPS, /* Kernel Oops - regs valid */ + KDB_REASON_SWITCH, /* CPU switch - regs valid*/ + KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */ + KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */ + KDB_REASON_RECURSE, /* Recursive entry to kdb; + * regs probably valid */ + KDB_REASON_SSTEP, /* Single Step trap. - regs valid */ +} kdb_reason_t; + +extern int kdb_trap_printk; +extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); +extern __printf(1, 2) int kdb_printf(const char *, ...); +typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); + +extern void kdb_init(int level); + +/* Access to kdb specific polling devices */ +typedef int (*get_char_func)(void); +extern get_char_func kdb_poll_funcs[]; +extern int kdb_get_kbd_char(void); + +static inline +int kdb_process_cpu(const struct task_struct *p) +{ + unsigned int cpu = task_thread_info(p)->cpu; + if (cpu > num_possible_cpus()) + cpu = 0; + return cpu; +} + +/* kdb access to register set for stack dumping */ +extern struct pt_regs *kdb_current_regs; +#ifdef CONFIG_KALLSYMS +extern const char *kdb_walk_kallsyms(loff_t *pos); +#else /* ! CONFIG_KALLSYMS */ +static inline const char *kdb_walk_kallsyms(loff_t *pos) +{ + return NULL; +} +#endif /* ! CONFIG_KALLSYMS */ + +/* Dynamic kdb shell command registration */ +extern int kdb_register(char *, kdb_func_t, char *, char *, short); +extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, + short, kdb_repeat_t); +extern int kdb_unregister(char *); +#else /* ! CONFIG_KGDB_KDB */ +#define kdb_printf(...) +#define kdb_init(x) +#define kdb_register(...) +#define kdb_register_repeat(...) +#define kdb_uregister(x) +#endif /* CONFIG_KGDB_KDB */ +enum { + KDB_NOT_INITIALIZED, + KDB_INIT_EARLY, + KDB_INIT_FULL, +}; + +extern int kdbgetintenv(const char *, int *); +extern int kdb_set(int, const char **); + +#endif /* !_KDB_H */ diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h new file mode 100644 index 00000000..ed815090 --- /dev/null +++ b/include/linux/kdebug.h @@ -0,0 +1,22 @@ +#ifndef _LINUX_KDEBUG_H +#define _LINUX_KDEBUG_H + +#include <asm/kdebug.h> + +struct notifier_block; + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +int register_die_notifier(struct notifier_block *nb); +int unregister_die_notifier(struct notifier_block *nb); + +int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig); + +#endif /* _LINUX_KDEBUG_H */ diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h new file mode 100644 index 00000000..2dacab8b --- /dev/null +++ b/include/linux/kdev_t.h @@ -0,0 +1,100 @@ +#ifndef _LINUX_KDEV_T_H +#define _LINUX_KDEV_T_H +#ifdef __KERNEL__ +#define MINORBITS 20 +#define MINORMASK ((1U << MINORBITS) - 1) + +#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) +#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) +#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) + +#define print_dev_t(buffer, dev) \ + sprintf((buffer), "%u:%u\n", MAJOR(dev), MINOR(dev)) + +#define format_dev_t(buffer, dev) \ + ({ \ + sprintf(buffer, "%u:%u", MAJOR(dev), MINOR(dev)); \ + buffer; \ + }) + +/* acceptable for old filesystems */ +static inline int old_valid_dev(dev_t dev) +{ + return MAJOR(dev) < 256 && MINOR(dev) < 256; +} + +static inline u16 old_encode_dev(dev_t dev) +{ + return (MAJOR(dev) << 8) | MINOR(dev); +} + +static inline dev_t old_decode_dev(u16 val) +{ + return MKDEV((val >> 8) & 255, val & 255); +} + +static inline int new_valid_dev(dev_t dev) +{ + return 1; +} + +static inline u32 new_encode_dev(dev_t dev) +{ + unsigned major = MAJOR(dev); + unsigned minor = MINOR(dev); + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); +} + +static inline dev_t new_decode_dev(u32 dev) +{ + unsigned major = (dev & 0xfff00) >> 8; + unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); + return MKDEV(major, minor); +} + +static inline int huge_valid_dev(dev_t dev) +{ + return 1; +} + +static inline u64 huge_encode_dev(dev_t dev) +{ + return new_encode_dev(dev); +} + +static inline dev_t huge_decode_dev(u64 dev) +{ + return new_decode_dev(dev); +} + +static inline int sysv_valid_dev(dev_t dev) +{ + return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); +} + +static inline u32 sysv_encode_dev(dev_t dev) +{ + return MINOR(dev) | (MAJOR(dev) << 18); +} + +static inline unsigned sysv_major(u32 dev) +{ + return (dev >> 18) & 0x3fff; +} + +static inline unsigned sysv_minor(u32 dev) +{ + return dev & 0x3ffff; +} + +#else /* __KERNEL__ */ + +/* +Some programs want their definitions of MAJOR and MINOR and MKDEV +from the kernel sources. These must be the externally visible ones. +*/ +#define MAJOR(dev) ((dev)>>8) +#define MINOR(dev) ((dev) & 0xff) +#define MKDEV(ma,mi) ((ma)<<8 | (mi)) +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h new file mode 100644 index 00000000..a1bdf696 --- /dev/null +++ b/include/linux/kernel-page-flags.h @@ -0,0 +1,51 @@ +#ifndef LINUX_KERNEL_PAGE_FLAGS_H +#define LINUX_KERNEL_PAGE_FLAGS_H + +/* + * Stable page flag bits exported to user space + */ + +#define KPF_LOCKED 0 +#define KPF_ERROR 1 +#define KPF_REFERENCED 2 +#define KPF_UPTODATE 3 +#define KPF_DIRTY 4 +#define KPF_LRU 5 +#define KPF_ACTIVE 6 +#define KPF_SLAB 7 +#define KPF_WRITEBACK 8 +#define KPF_RECLAIM 9 +#define KPF_BUDDY 10 + +/* 11-20: new additions in 2.6.31 */ +#define KPF_MMAP 11 +#define KPF_ANON 12 +#define KPF_SWAPCACHE 13 +#define KPF_SWAPBACKED 14 +#define KPF_COMPOUND_HEAD 15 +#define KPF_COMPOUND_TAIL 16 +#define KPF_HUGE 17 +#define KPF_UNEVICTABLE 18 +#define KPF_HWPOISON 19 +#define KPF_NOPAGE 20 + +#define KPF_KSM 21 +#define KPF_THP 22 + +#ifdef __KERNEL__ + +/* kernel hacking assistances + * WARNING: subject to change, never rely on them! + */ +#define KPF_RESERVED 32 +#define KPF_MLOCKED 33 +#define KPF_MAPPEDTODISK 34 +#define KPF_PRIVATE 35 +#define KPF_PRIVATE_2 36 +#define KPF_OWNER_PRIVATE 37 +#define KPF_ARCH 38 +#define KPF_UNCACHED 39 + +#endif /* __KERNEL__ */ + +#endif /* LINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h new file mode 100644 index 00000000..747404ab --- /dev/null +++ b/include/linux/kernel.h @@ -0,0 +1,713 @@ +#ifndef _LINUX_KERNEL_H +#define _LINUX_KERNEL_H + +#include <linux/sysinfo.h> + +/* + * 'kernel.h' contains some often-used function prototypes etc + */ +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) + +#ifdef __KERNEL__ + +#include <stdarg.h> +#include <linux/linkage.h> +#include <linux/stddef.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/bitops.h> +#include <linux/log2.h> +#include <linux/typecheck.h> +#include <linux/printk.h> +#include <linux/dynamic_debug.h> +#include <asm/byteorder.h> + +#define USHRT_MAX ((u16)(~0U)) +#define SHRT_MAX ((s16)(USHRT_MAX>>1)) +#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U>>1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL>>1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL>>1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) + +#define STACK_MAGIC 0xdeadbeef + +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP_ULL(ll,d) \ + ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) + +#if BITS_PER_LONG == 32 +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) +#else +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) +#endif + +/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(divisor) __divisor = divisor; \ + (((x) + ((__divisor) / 2)) / (__divisor)); \ +} \ +) + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + + +#define _RET_IP_ (unsigned long)__builtin_return_address(0) +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) + +#ifdef CONFIG_LBDAF +# include <asm/div64.h> +# define sector_div(a, b) do_div(a, b) +#else +# define sector_div(n, b)( \ +{ \ + int _res; \ + _res = (n) % (b); \ + (n) /= (b); \ + _res; \ +} \ +) +#endif + +/** + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) + +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + +struct completion; +struct pt_regs; +struct user; + +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + void __might_sleep(const char *file, int line, int preempt_offset); +/** + * might_sleep - annotation for functions that can sleep + * + * this macro will print a stack trace if it is executed in an atomic + * context (spinlock, irq-handler, ...). + * + * This is a useful debugging help to be able to catch problems early and not + * be bitten later when the calling function happens to sleep when it is not + * supposed to. + */ +# define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) +#else + static inline void __might_sleep(const char *file, int line, + int preempt_offset) { } +# define might_sleep() do { might_resched(); } while (0) +#endif + +#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) + +/* + * abs() handles unsigned and signed longs, ints, shorts and chars. For all + * input types abs() returns a signed long. + * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() + * for those. + */ +#define abs(x) ({ \ + long ret; \ + if (sizeof(x) == sizeof(long)) { \ + long __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } else { \ + int __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } \ + ret; \ + }) + +#define abs64(x) ({ \ + s64 __x = (x); \ + (__x < 0) ? -__x : __x; \ + }) + +#ifdef CONFIG_PROVE_LOCKING +void might_fault(void); +#else +static inline void might_fault(void) +{ + might_sleep(); +} +#endif + +extern struct atomic_notifier_head panic_notifier_list; +extern long (*panic_blink)(int state); +__printf(1, 2) +void panic(const char *fmt, ...) + __noreturn __cold; +extern void oops_enter(void); +extern void oops_exit(void); +void print_oops_end_marker(void); +extern int oops_may_print(void); +void do_exit(long error_code) + __noreturn; +void complete_and_exit(struct completion *, long) + __noreturn; + +/* Internal, do not use. */ +int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __must_check _kstrtol(const char *s, unsigned int base, long *res); + +int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __must_check kstrtoll(const char *s, unsigned int base, long long *res); +static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. + */ + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} + +static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(long, long long) = 0. + */ + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __must_check kstrtoint(const char *s, unsigned int base, int *res); + +static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); +int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); +int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); +int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); + +int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); +int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); +int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); +int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); +int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); +int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); +int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); +int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); +int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); +int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); + +static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) +{ + return kstrtoull_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) +{ + return kstrtoll_from_user(s, count, base, res); +} + +static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) +{ + return kstrtouint_from_user(s, count, base, res); +} + +static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) +{ + return kstrtoint_from_user(s, count, base, res); +} + +/* Obsolete, do not use. Use kstrto<foo> instead */ + +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); +#define strict_strtoul kstrtoul +#define strict_strtol kstrtol +#define strict_strtoull kstrtoull +#define strict_strtoll kstrtoll + +extern int num_to_str(char *buf, int size, unsigned long long num); + +/* lib/printf utilities */ + +extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); +extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); +extern __printf(3, 4) +int snprintf(char *buf, size_t size, const char *fmt, ...); +extern __printf(3, 0) +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern __printf(3, 4) +int scnprintf(char *buf, size_t size, const char *fmt, ...); +extern __printf(3, 0) +int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern __printf(2, 3) +char *kasprintf(gfp_t gfp, const char *fmt, ...); +extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); + +extern __scanf(2, 3) +int sscanf(const char *, const char *, ...); +extern __scanf(2, 0) +int vsscanf(const char *, const char *, va_list); + +extern int get_option(char **str, int *pint); +extern char *get_options(const char *str, int nints, int *ints); +extern unsigned long long memparse(const char *ptr, char **retptr); + +extern int core_kernel_text(unsigned long addr); +extern int core_kernel_data(unsigned long addr); +extern int __kernel_text_address(unsigned long addr); +extern int kernel_text_address(unsigned long addr); +extern int func_ptr_is_kernel_text(void *ptr); + +struct pid; +extern struct pid *session_of_pgrp(struct pid *pgrp); + +unsigned long int_sqrt(unsigned long); + +extern void bust_spinlocks(int yes); +extern void wake_up_klogd(void); +extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ +extern int panic_timeout; +extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; +extern int sysctl_panic_on_stackoverflow; +extern const char *print_tainted(void); +extern void add_taint(unsigned flag); +extern int test_taint(unsigned flag); +extern unsigned long get_taint(void); +extern int root_mountflags; + +extern bool early_boot_irqs_disabled; + +/* Values used for system_state */ +extern enum system_states { + SYSTEM_BOOTING, + SYSTEM_RUNNING, + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, + SYSTEM_SUSPEND_DISK, +} system_state; + +#define TAINT_PROPRIETARY_MODULE 0 +#define TAINT_FORCED_MODULE 1 +#define TAINT_UNSAFE_SMP 2 +#define TAINT_FORCED_RMMOD 3 +#define TAINT_MACHINE_CHECK 4 +#define TAINT_BAD_PAGE 5 +#define TAINT_USER 6 +#define TAINT_DIE 7 +#define TAINT_OVERRIDDEN_ACPI_TABLE 8 +#define TAINT_WARN 9 +#define TAINT_CRAP 10 +#define TAINT_FIRMWARE_WORKAROUND 11 +#define TAINT_OOT_MODULE 12 + +extern const char hex_asc[]; +#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] +#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] + +static inline char *hex_byte_pack(char *buf, u8 byte) +{ + *buf++ = hex_asc_hi(byte); + *buf++ = hex_asc_lo(byte); + return buf; +} + +static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) +{ + return hex_byte_pack(buf, byte); +} + +extern int hex_to_bin(char ch); +extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); + +/* + * General tracing related utility functions - trace_printk(), + * tracing_on/tracing_off and tracing_start()/tracing_stop + * + * Use tracing_on/tracing_off when you want to quickly turn on or off + * tracing. It simply enables or disables the recording of the trace events. + * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on + * file, which gives a means for the kernel and userspace to interact. + * Place a tracing_off() in the kernel where you want tracing to end. + * From user space, examine the trace, and then echo 1 > tracing_on + * to continue tracing. + * + * tracing_stop/tracing_start has slightly more overhead. It is used + * by things like suspend to ram where disabling the recording of the + * trace is not enough, but tracing must actually stop because things + * like calling smp_processor_id() may crash the system. + * + * Most likely, you want to use tracing_on/tracing_off. + */ +#ifdef CONFIG_RING_BUFFER +/* trace_off_permanent stops recording with no way to bring it back */ +void tracing_off_permanent(void); +#else +static inline void tracing_off_permanent(void) { } +#endif + +enum ftrace_dump_mode { + DUMP_NONE, + DUMP_ALL, + DUMP_ORIG, +}; + +#ifdef CONFIG_TRACING +void tracing_on(void); +void tracing_off(void); +int tracing_is_on(void); + +extern void tracing_start(void); +extern void tracing_stop(void); +extern void ftrace_off_permanent(void); + +static inline __printf(1, 2) +void ____trace_printk_check_format(const char *fmt, ...) +{ +} +#define __trace_printk_check_format(fmt, args...) \ +do { \ + if (0) \ + ____trace_printk_check_format(fmt, ##args); \ +} while (0) + +/** + * trace_printk - printf formatting in the ftrace buffer + * @fmt: the printf format for printing + * + * Note: __trace_printk is an internal function for trace_printk and + * the @ip is passed in via the trace_printk macro. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving trace_printks scattered around in + * your code. + */ + +#define trace_printk(fmt, args...) \ +do { \ + __trace_printk_check_format(fmt, ##args); \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ + } else \ + __trace_printk(_THIS_IP_, fmt, ##args); \ +} while (0) + +extern __printf(2, 3) +int __trace_bprintk(unsigned long ip, const char *fmt, ...); + +extern __printf(2, 3) +int __trace_printk(unsigned long ip, const char *fmt, ...); + +extern void trace_dump_stack(void); + +/* + * The double __builtin_constant_p is because gcc will give us an error + * if we try to allocate the static variable to fmt if it is not a + * constant. Even with the outer if statement. + */ +#define ftrace_vprintk(fmt, vargs) \ +do { \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ + } else \ + __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ +} while (0) + +extern int +__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); + +extern int +__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); + +extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); +#else +static inline __printf(1, 2) +int trace_printk(const char *fmt, ...); + +static inline void tracing_start(void) { } +static inline void tracing_stop(void) { } +static inline void ftrace_off_permanent(void) { } +static inline void trace_dump_stack(void) { } + +static inline void tracing_on(void) { } +static inline void tracing_off(void) { } +static inline int tracing_is_on(void) { return 0; } + +static inline int +trace_printk(const char *fmt, ...) +{ + return 0; +} +static inline int +ftrace_vprintk(const char *fmt, va_list ap) +{ + return 0; +} +static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } +#endif /* CONFIG_TRACING */ + +/* + * min()/max()/clamp() macros that also do + * strict type-checking.. See the + * "unnecessary" pointer comparison. + */ +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +#define max(x, y) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2; }) + +#define min3(x, y, z) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + typeof(z) _min3 = (z); \ + (void) (&_min1 == &_min2); \ + (void) (&_min1 == &_min3); \ + _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ + (_min2 < _min3 ? _min2 : _min3); }) + +#define max3(x, y, z) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + typeof(z) _max3 = (z); \ + (void) (&_max1 == &_max2); \ + (void) (&_max1 == &_max3); \ + _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ + (_max2 > _max3 ? _max2 : _max3); }) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @min: minimum allowable value + * @max: maximum allowable value + * + * This macro does strict typechecking of min/max to make sure they are of the + * same type as val. See the unnecessary pointer comparisons. + */ +#define clamp(val, min, max) ({ \ + typeof(val) __val = (val); \ + typeof(min) __min = (min); \ + typeof(max) __max = (max); \ + (void) (&__val == &__min); \ + (void) (&__val == &__max); \ + __val = __val < __min ? __min: __val; \ + __val > __max ? __max: __val; }) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1: __min2; }) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1: __max2; }) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @min: minimum allowable value + * @max: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * 'type' to make all the comparisons. + */ +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min: __val; \ + __val > __max ? __max: __val; }) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @min: minimum allowable value + * @max: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument 'val' is. This is useful when val is an unsigned + * type and min and max are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, min, max) ({ \ + typeof(val) __val = (val); \ + typeof(val) __min = (min); \ + typeof(val) __max = (max); \ + __val = __val < __min ? __min: __val; \ + __val > __max ? __max: __val; }) + + +/* + * swap - swap value of @a and @b + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +/* Trap pasters of __FUNCTION__ at compile-time */ +#define __FUNCTION__ (__func__) + +/* This helps us to avoid #ifdef CONFIG_NUMA */ +#ifdef CONFIG_NUMA +#define NUMA_BUILD 1 +#else +#define NUMA_BUILD 0 +#endif + +/* This helps us avoid #ifdef CONFIG_COMPACTION */ +#ifdef CONFIG_COMPACTION +#define COMPACTION_BUILD 1 +#else +#define COMPACTION_BUILD 0 +#endif + +/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD +#endif + +extern int do_sysinfo(struct sysinfo *info); + +/* To identify board information in panic logs, set this */ +extern char *mach_panic_string; + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h new file mode 100644 index 00000000..2fbd9053 --- /dev/null +++ b/include/linux/kernel_stat.h @@ -0,0 +1,133 @@ +#ifndef _LINUX_KERNEL_STAT_H +#define _LINUX_KERNEL_STAT_H + +#include <linux/smp.h> +#include <linux/threads.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <asm/irq.h> +#include <asm/cputime.h> + +/* + * 'kernel_stat.h' contains the definitions needed for doing + * some kernel statistics (CPU usage, context switches ...), + * used by rstatd/perfmeter + */ + +enum cpu_usage_stat { + CPUTIME_USER, + CPUTIME_NICE, + CPUTIME_SYSTEM, + CPUTIME_SOFTIRQ, + CPUTIME_IRQ, + CPUTIME_IDLE, + CPUTIME_IOWAIT, + CPUTIME_STEAL, + CPUTIME_GUEST, + CPUTIME_GUEST_NICE, + NR_STATS, +}; + +struct kernel_cpustat { + u64 cpustat[NR_STATS]; +}; + +struct kernel_stat { +#ifndef CONFIG_GENERIC_HARDIRQS + unsigned int irqs[NR_IRQS]; +#endif + unsigned long irqs_sum; + unsigned int softirqs[NR_SOFTIRQS]; +}; + +DECLARE_PER_CPU(struct kernel_stat, kstat); +DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); + +/* Must have preemption disabled for this to be meaningful. */ +#define kstat_this_cpu (&__get_cpu_var(kstat)) +#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) +#define kstat_cpu(cpu) per_cpu(kstat, cpu) +#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) + +extern unsigned long long nr_context_switches(void); + +#ifndef CONFIG_GENERIC_HARDIRQS + +struct irq_desc; + +static inline void kstat_incr_irqs_this_cpu(unsigned int irq, + struct irq_desc *desc) +{ + __this_cpu_inc(kstat.irqs[irq]); + __this_cpu_inc(kstat.irqs_sum); +} + +static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) +{ + return kstat_cpu(cpu).irqs[irq]; +} +#else +#include <linux/irq.h> +extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); + +#define kstat_incr_irqs_this_cpu(irqno, DESC) \ +do { \ + __this_cpu_inc(*(DESC)->kstat_irqs); \ + __this_cpu_inc(kstat.irqs_sum); \ +} while (0) + +#endif + +static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) +{ + __this_cpu_inc(kstat.softirqs[irq]); +} + +static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) +{ + return kstat_cpu(cpu).softirqs[irq]; +} + +/* + * Number of interrupts per specific IRQ source, since bootup + */ +#ifndef CONFIG_GENERIC_HARDIRQS +static inline unsigned int kstat_irqs(unsigned int irq) +{ + unsigned int sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += kstat_irqs_cpu(irq, cpu); + + return sum; +} +#else +extern unsigned int kstat_irqs(unsigned int irq); +#endif + +/* + * Number of interrupts per cpu, since bootup + */ +static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) +{ + return kstat_cpu(cpu).irqs_sum; +} + +/* + * Lock/unlock the current runqueue - to extract task statistics: + */ +extern unsigned long long task_delta_exec(struct task_struct *); + +extern void account_user_time(struct task_struct *, cputime_t, cputime_t); +extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); +extern void account_steal_time(cputime_t); +extern void account_idle_time(cputime_t); + +extern void account_process_tick(struct task_struct *, int user); +extern void account_steal_ticks(unsigned long ticks); +extern void account_idle_ticks(unsigned long ticks); + +#endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h new file mode 100644 index 00000000..9c268392 --- /dev/null +++ b/include/linux/kernelcapi.h @@ -0,0 +1,155 @@ +/* + * $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $ + * + * Kernel CAPI 2.0 Interface for Linux + * + * (c) Copyright 1997 by Carsten Paeth (calle@calle.in-berlin.de) + * + */ + +#ifndef __KERNELCAPI_H__ +#define __KERNELCAPI_H__ + +#define CAPI_MAXAPPL 240 /* maximum number of applications */ +#define CAPI_MAXCONTR 32 /* maximum number of controller */ +#define CAPI_MAXDATAWINDOW 8 + + +typedef struct kcapi_flagdef { + int contr; + int flag; +} kcapi_flagdef; + +typedef struct kcapi_carddef { + char driver[32]; + unsigned int port; + unsigned irq; + unsigned int membase; + int cardnr; +} kcapi_carddef; + +/* new ioctls >= 10 */ +#define KCAPI_CMD_TRACE 10 +#define KCAPI_CMD_ADDCARD 11 /* OBSOLETE */ + +/* + * flag > 2 => trace also data + * flag & 1 => show trace + */ +#define KCAPI_TRACE_OFF 0 +#define KCAPI_TRACE_SHORT_NO_DATA 1 +#define KCAPI_TRACE_FULL_NO_DATA 2 +#define KCAPI_TRACE_SHORT 3 +#define KCAPI_TRACE_FULL 4 + + +#ifdef __KERNEL__ + +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/notifier.h> + +struct capi20_appl { + u16 applid; + capi_register_params rparam; + void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb); + void *private; + + /* internal to kernelcapi.o */ + unsigned long nrecvctlpkt; + unsigned long nrecvdatapkt; + unsigned long nsentctlpkt; + unsigned long nsentdatapkt; + struct mutex recv_mtx; + struct sk_buff_head recv_queue; + struct work_struct recv_work; + int release_in_progress; +}; + +u16 capi20_isinstalled(void); +u16 capi20_register(struct capi20_appl *ap); +u16 capi20_release(struct capi20_appl *ap); +u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb); +u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]); +u16 capi20_get_version(u32 contr, struct capi_version *verp); +u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]); +u16 capi20_get_profile(u32 contr, struct capi_profile *profp); +int capi20_manufacturer(unsigned int cmd, void __user *data); + +#define CAPICTR_UP 0 +#define CAPICTR_DOWN 1 + +int register_capictr_notifier(struct notifier_block *nb); +int unregister_capictr_notifier(struct notifier_block *nb); + +#define CAPI_NOERROR 0x0000 + +#define CAPI_TOOMANYAPPLS 0x1001 +#define CAPI_LOGBLKSIZETOSMALL 0x1002 +#define CAPI_BUFFEXECEEDS64K 0x1003 +#define CAPI_MSGBUFSIZETOOSMALL 0x1004 +#define CAPI_ANZLOGCONNNOTSUPPORTED 0x1005 +#define CAPI_REGRESERVED 0x1006 +#define CAPI_REGBUSY 0x1007 +#define CAPI_REGOSRESOURCEERR 0x1008 +#define CAPI_REGNOTINSTALLED 0x1009 +#define CAPI_REGCTRLERNOTSUPPORTEXTEQUIP 0x100a +#define CAPI_REGCTRLERONLYSUPPORTEXTEQUIP 0x100b + +#define CAPI_ILLAPPNR 0x1101 +#define CAPI_ILLCMDORSUBCMDORMSGTOSMALL 0x1102 +#define CAPI_SENDQUEUEFULL 0x1103 +#define CAPI_RECEIVEQUEUEEMPTY 0x1104 +#define CAPI_RECEIVEOVERFLOW 0x1105 +#define CAPI_UNKNOWNNOTPAR 0x1106 +#define CAPI_MSGBUSY 0x1107 +#define CAPI_MSGOSRESOURCEERR 0x1108 +#define CAPI_MSGNOTINSTALLED 0x1109 +#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a +#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b + +typedef enum { + CapiMessageNotSupportedInCurrentState = 0x2001, + CapiIllContrPlciNcci = 0x2002, + CapiNoPlciAvailable = 0x2003, + CapiNoNcciAvailable = 0x2004, + CapiNoListenResourcesAvailable = 0x2005, + CapiNoFaxResourcesAvailable = 0x2006, + CapiIllMessageParmCoding = 0x2007, +} RESOURCE_CODING_PROBLEM; + +typedef enum { + CapiB1ProtocolNotSupported = 0x3001, + CapiB2ProtocolNotSupported = 0x3002, + CapiB3ProtocolNotSupported = 0x3003, + CapiB1ProtocolParameterNotSupported = 0x3004, + CapiB2ProtocolParameterNotSupported = 0x3005, + CapiB3ProtocolParameterNotSupported = 0x3006, + CapiBProtocolCombinationNotSupported = 0x3007, + CapiNcpiNotSupported = 0x3008, + CapiCipValueUnknown = 0x3009, + CapiFlagsNotSupported = 0x300a, + CapiFacilityNotSupported = 0x300b, + CapiDataLengthNotSupportedByCurrentProtocol = 0x300c, + CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d, + CapiTeiAssignmentFailed = 0x300e, +} REQUESTED_SERVICES_PROBLEM; + +typedef enum { + CapiSuccess = 0x0000, + CapiSupplementaryServiceNotSupported = 0x300e, + CapiRequestNotAllowedInThisState = 0x3010, +} SUPPLEMENTARY_SERVICE_INFO; + +typedef enum { + CapiProtocolErrorLayer1 = 0x3301, + CapiProtocolErrorLayer2 = 0x3302, + CapiProtocolErrorLayer3 = 0x3303, + CapiTimeOut = 0x3303, // SuppServiceReason + CapiCallGivenToOtherApplication = 0x3304, +} CAPI_REASON; + +#endif /* __KERNEL__ */ + +#endif /* __KERNELCAPI_H__ */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h new file mode 100644 index 00000000..0d7d6a1b --- /dev/null +++ b/include/linux/kexec.h @@ -0,0 +1,231 @@ +#ifndef LINUX_KEXEC_H +#define LINUX_KEXEC_H + +#ifdef CONFIG_KEXEC +#include <linux/types.h> +#include <linux/list.h> +#include <linux/linkage.h> +#include <linux/compat.h> +#include <linux/ioport.h> +#include <linux/elfcore.h> +#include <linux/elf.h> +#include <asm/kexec.h> + +/* Verify architecture specific macros are defined */ + +#ifndef KEXEC_SOURCE_MEMORY_LIMIT +#error KEXEC_SOURCE_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_DESTINATION_MEMORY_LIMIT +#error KEXEC_DESTINATION_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_CONTROL_MEMORY_LIMIT +#error KEXEC_CONTROL_MEMORY_LIMIT not defined +#endif + +#ifndef KEXEC_CONTROL_PAGE_SIZE +#error KEXEC_CONTROL_PAGE_SIZE not defined +#endif + +#ifndef KEXEC_ARCH +#error KEXEC_ARCH not defined +#endif + +#ifndef KEXEC_CRASH_CONTROL_MEMORY_LIMIT +#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT KEXEC_CONTROL_MEMORY_LIMIT +#endif + +#ifndef KEXEC_CRASH_MEM_ALIGN +#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE +#endif + +#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) +#define KEXEC_CORE_NOTE_NAME "CORE" +#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4) +#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4) +/* + * The per-cpu notes area is a list of notes terminated by a "NULL" + * note header. For kdump, the code in vmcore.c runs in the context + * of the second kernel to combine them into one note. + */ +#ifndef KEXEC_NOTE_BYTES +#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \ + KEXEC_CORE_NOTE_NAME_BYTES + \ + KEXEC_CORE_NOTE_DESC_BYTES ) +#endif + +/* + * This structure is used to hold the arguments that are used when loading + * kernel binaries. + */ + +typedef unsigned long kimage_entry_t; +#define IND_DESTINATION 0x1 +#define IND_INDIRECTION 0x2 +#define IND_DONE 0x4 +#define IND_SOURCE 0x8 + +#define KEXEC_SEGMENT_MAX 16 +struct kexec_segment { + void __user *buf; + size_t bufsz; + unsigned long mem; /* User space sees this as a (void *) ... */ + size_t memsz; +}; + +#ifdef CONFIG_COMPAT +struct compat_kexec_segment { + compat_uptr_t buf; + compat_size_t bufsz; + compat_ulong_t mem; /* User space sees this as a (void *) ... */ + compat_size_t memsz; +}; +#endif + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + + unsigned long destination; + + unsigned long start; + struct page *control_code_page; + struct page *swap_page; + + unsigned long nr_segments; + struct kexec_segment segment[KEXEC_SEGMENT_MAX]; + + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unuseable_pages; + + /* Address of next control page to allocate for crash kernels. */ + unsigned long control_page; + + /* Flags to indicate special processing */ + unsigned int type : 1; +#define KEXEC_TYPE_DEFAULT 0 +#define KEXEC_TYPE_CRASH 1 + unsigned int preserve_context : 1; + +#ifdef ARCH_HAS_KIMAGE_ARCH + struct kimage_arch arch; +#endif +}; + + + +/* kexec interface functions */ +extern void machine_kexec(struct kimage *image); +extern int machine_kexec_prepare(struct kimage *image); +extern void machine_kexec_cleanup(struct kimage *image); +extern asmlinkage long sys_kexec_load(unsigned long entry, + unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); +extern int kernel_kexec(void); +#ifdef CONFIG_COMPAT +extern asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long nr_segments, + struct compat_kexec_segment __user *segments, + unsigned long flags); +#endif +extern struct page *kimage_alloc_control_pages(struct kimage *image, + unsigned int order); +extern void crash_kexec(struct pt_regs *); +int kexec_should_crash(struct task_struct *); +void crash_save_cpu(struct pt_regs *regs, int cpu); +void crash_save_vmcoreinfo(void); +void crash_map_reserved_pages(void); +void crash_unmap_reserved_pages(void); +void arch_crash_save_vmcoreinfo(void); +__printf(1, 2) +void vmcoreinfo_append_str(const char *fmt, ...); +unsigned long paddr_vmcoreinfo_note(void); + +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_PAGESIZE(value) \ + vmcoreinfo_append_str("PAGESIZE=%ld\n", value) +#define VMCOREINFO_SYMBOL(name) \ + vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) +#define VMCOREINFO_SIZE(name) \ + vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ + (unsigned long)sizeof(name)) +#define VMCOREINFO_STRUCT_SIZE(name) \ + vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ + (unsigned long)sizeof(struct name)) +#define VMCOREINFO_OFFSET(name, field) \ + vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \ + (unsigned long)offsetof(struct name, field)) +#define VMCOREINFO_LENGTH(name, value) \ + vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value) +#define VMCOREINFO_NUMBER(name) \ + vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) +#define VMCOREINFO_CONFIG(name) \ + vmcoreinfo_append_str("CONFIG_%s=y\n", #name) + +extern struct kimage *kexec_image; +extern struct kimage *kexec_crash_image; + +#ifndef kexec_flush_icache_page +#define kexec_flush_icache_page(page) +#endif + +#define KEXEC_ON_CRASH 0x00000001 +#define KEXEC_PRESERVE_CONTEXT 0x00000002 +#define KEXEC_ARCH_MASK 0xffff0000 + +/* These values match the ELF architecture values. + * Unless there is a good reason that should continue to be the case. + */ +#define KEXEC_ARCH_DEFAULT ( 0 << 16) +#define KEXEC_ARCH_386 ( 3 << 16) +#define KEXEC_ARCH_X86_64 (62 << 16) +#define KEXEC_ARCH_PPC (20 << 16) +#define KEXEC_ARCH_PPC64 (21 << 16) +#define KEXEC_ARCH_IA_64 (50 << 16) +#define KEXEC_ARCH_ARM (40 << 16) +#define KEXEC_ARCH_S390 (22 << 16) +#define KEXEC_ARCH_SH (42 << 16) +#define KEXEC_ARCH_MIPS_LE (10 << 16) +#define KEXEC_ARCH_MIPS ( 8 << 16) + +/* List of defined/legal kexec flags */ +#ifndef CONFIG_KEXEC_JUMP +#define KEXEC_FLAGS KEXEC_ON_CRASH +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#endif + +#define VMCOREINFO_BYTES (4096) +#define VMCOREINFO_NOTE_NAME "VMCOREINFO" +#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4) +#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \ + + VMCOREINFO_NOTE_NAME_BYTES) + +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; +typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4]; +extern note_buf_t __percpu *crash_notes; +extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; +extern size_t vmcoreinfo_size; +extern size_t vmcoreinfo_max_size; + +int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, + unsigned long long *crash_size, unsigned long long *crash_base); +int crash_shrink_memory(unsigned long new_size); +size_t crash_get_memory_size(void); +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); + +#else /* !CONFIG_KEXEC */ +struct pt_regs; +struct task_struct; +static inline void crash_kexec(struct pt_regs *regs) { } +static inline int kexec_should_crash(struct task_struct *p) { return 0; } +#endif /* CONFIG_KEXEC */ +#endif /* LINUX_KEXEC_H */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h new file mode 100644 index 00000000..39e3c082 --- /dev/null +++ b/include/linux/key-type.h @@ -0,0 +1,125 @@ +/* Definitions for key type implementations + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_KEY_TYPE_H +#define _LINUX_KEY_TYPE_H + +#include <linux/key.h> + +#ifdef CONFIG_KEYS + +/* + * key under-construction record + * - passed to the request_key actor if supplied + */ +struct key_construction { + struct key *key; /* key being constructed */ + struct key *authkey;/* authorisation for key being constructed */ +}; + +typedef int (*request_key_actor_t)(struct key_construction *key, + const char *op, void *aux); + +/* + * kernel managed key type definition + */ +struct key_type { + /* name of the type */ + const char *name; + + /* default payload length for quota precalculation (optional) + * - this can be used instead of calling key_payload_reserve(), that + * function only needs to be called if the real datalen is different + */ + size_t def_datalen; + + /* vet a description */ + int (*vet_description)(const char *description); + + /* instantiate a key of this type + * - this method should call key_payload_reserve() to determine if the + * user's quota will hold the payload + */ + int (*instantiate)(struct key *key, const void *data, size_t datalen); + + /* update a key of this type (optional) + * - this method should call key_payload_reserve() to recalculate the + * quota consumption + * - the key must be locked against read when modifying + */ + int (*update)(struct key *key, const void *data, size_t datalen); + + /* match a key against a description */ + int (*match)(const struct key *key, const void *desc); + + /* clear some of the data from a key on revokation (optional) + * - the key's semaphore will be write-locked by the caller + */ + void (*revoke)(struct key *key); + + /* clear the data from a key (optional) */ + void (*destroy)(struct key *key); + + /* describe a key */ + void (*describe)(const struct key *key, struct seq_file *p); + + /* read a key's data (optional) + * - permission checks will be done by the caller + * - the key's semaphore will be readlocked by the caller + * - should return the amount of data that could be read, no matter how + * much is copied into the buffer + * - shouldn't do the copy if the buffer is NULL + */ + long (*read)(const struct key *key, char __user *buffer, size_t buflen); + + /* handle request_key() for this type instead of invoking + * /sbin/request-key (optional) + * - key is the key to instantiate + * - authkey is the authority to assume when instantiating this key + * - op is the operation to be done, usually "create" + * - the call must not return until the instantiation process has run + * its course + */ + request_key_actor_t request_key; + + /* internal fields */ + struct list_head link; /* link in types list */ + struct lock_class_key lock_class; /* key->sem lock class */ +}; + +extern struct key_type key_type_keyring; + +extern int register_key_type(struct key_type *ktype); +extern void unregister_key_type(struct key_type *ktype); + +extern int key_payload_reserve(struct key *key, size_t datalen); +extern int key_instantiate_and_link(struct key *key, + const void *data, + size_t datalen, + struct key *keyring, + struct key *instkey); +extern int key_reject_and_link(struct key *key, + unsigned timeout, + unsigned error, + struct key *keyring, + struct key *instkey); +extern void complete_request_key(struct key_construction *cons, int error); + +static inline int key_negate_and_link(struct key *key, + unsigned timeout, + struct key *keyring, + struct key *instkey) +{ + return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); +} + +#endif /* CONFIG_KEYS */ +#endif /* _LINUX_KEY_TYPE_H */ diff --git a/include/linux/key.h b/include/linux/key.h new file mode 100644 index 00000000..96933b1e --- /dev/null +++ b/include/linux/key.h @@ -0,0 +1,334 @@ +/* Authentication token and access key management + * + * Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * + * See Documentation/security/keys.txt for information on keys/keyrings. + */ + +#ifndef _LINUX_KEY_H +#define _LINUX_KEY_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/rcupdate.h> +#include <linux/sysctl.h> +#include <linux/rwsem.h> +#include <linux/atomic.h> + +#ifdef __KERNEL__ + +/* key handle serial number */ +typedef int32_t key_serial_t; + +/* key handle permissions mask */ +typedef uint32_t key_perm_t; + +struct key; + +#ifdef CONFIG_KEYS + +#undef KEY_DEBUGGING + +#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */ +#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */ +#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */ +#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */ +#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */ +#define KEY_POS_SETATTR 0x20000000 /* possessor can set key attributes */ +#define KEY_POS_ALL 0x3f000000 + +#define KEY_USR_VIEW 0x00010000 /* user permissions... */ +#define KEY_USR_READ 0x00020000 +#define KEY_USR_WRITE 0x00040000 +#define KEY_USR_SEARCH 0x00080000 +#define KEY_USR_LINK 0x00100000 +#define KEY_USR_SETATTR 0x00200000 +#define KEY_USR_ALL 0x003f0000 + +#define KEY_GRP_VIEW 0x00000100 /* group permissions... */ +#define KEY_GRP_READ 0x00000200 +#define KEY_GRP_WRITE 0x00000400 +#define KEY_GRP_SEARCH 0x00000800 +#define KEY_GRP_LINK 0x00001000 +#define KEY_GRP_SETATTR 0x00002000 +#define KEY_GRP_ALL 0x00003f00 + +#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */ +#define KEY_OTH_READ 0x00000002 +#define KEY_OTH_WRITE 0x00000004 +#define KEY_OTH_SEARCH 0x00000008 +#define KEY_OTH_LINK 0x00000010 +#define KEY_OTH_SETATTR 0x00000020 +#define KEY_OTH_ALL 0x0000003f + +#define KEY_PERM_UNDEF 0xffffffff + +struct seq_file; +struct user_struct; +struct signal_struct; +struct cred; + +struct key_type; +struct key_owner; +struct keyring_list; +struct keyring_name; + +/*****************************************************************************/ +/* + * key reference with possession attribute handling + * + * NOTE! key_ref_t is a typedef'd pointer to a type that is not actually + * defined. This is because we abuse the bottom bit of the reference to carry a + * flag to indicate whether the calling process possesses that key in one of + * its keyrings. + * + * the key_ref_t has been made a separate type so that the compiler can reject + * attempts to dereference it without proper conversion. + * + * the three functions are used to assemble and disassemble references + */ +typedef struct __key_reference_with_attributes *key_ref_t; + +static inline key_ref_t make_key_ref(const struct key *key, + unsigned long possession) +{ + return (key_ref_t) ((unsigned long) key | possession); +} + +static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) +{ + return (struct key *) ((unsigned long) key_ref & ~1UL); +} + +static inline unsigned long is_key_possessed(const key_ref_t key_ref) +{ + return (unsigned long) key_ref & 1UL; +} + +/*****************************************************************************/ +/* + * authentication token / access credential / keyring + * - types of key include: + * - keyrings + * - disk encryption IDs + * - Kerberos TGTs and tickets + */ +struct key { + atomic_t usage; /* number of references */ + key_serial_t serial; /* key serial number */ + struct rb_node serial_node; + struct key_type *type; /* type of key */ + struct rw_semaphore sem; /* change vs change sem */ + struct key_user *user; /* owner of this key */ + void *security; /* security data for this key */ + union { + time_t expiry; /* time at which key expires (or 0) */ + time_t revoked_at; /* time at which key was revoked */ + }; + uid_t uid; + gid_t gid; + key_perm_t perm; /* access permissions */ + unsigned short quotalen; /* length added to quota */ + unsigned short datalen; /* payload data length + * - may not match RCU dereferenced payload + * - payload should contain own length + */ + +#ifdef KEY_DEBUGGING + unsigned magic; +#define KEY_DEBUG_MAGIC 0x18273645u +#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu +#endif + + unsigned long flags; /* status flags (change with bitops) */ +#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ +#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ +#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ +#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ +#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ +#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ +#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ + + /* the description string + * - this is used to match a key against search criteria + * - this should be a printable string + * - eg: for krb5 AFS, this might be "afs@REDHAT.COM" + */ + char *description; + + /* type specific data + * - this is used by the keyring type to index the name + */ + union { + struct list_head link; + unsigned long x[2]; + void *p[2]; + int reject_error; + } type_data; + + /* key data + * - this is used to hold the data actually used in cryptography or + * whatever + */ + union { + unsigned long value; + void __rcu *rcudata; + void *data; + struct keyring_list __rcu *subscriptions; + } payload; +}; + +extern struct key *key_alloc(struct key_type *type, + const char *desc, + uid_t uid, gid_t gid, + const struct cred *cred, + key_perm_t perm, + unsigned long flags); + + +#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ +#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ +#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ + +extern void key_revoke(struct key *key); +extern void key_put(struct key *key); + +static inline struct key *key_get(struct key *key) +{ + if (key) + atomic_inc(&key->usage); + return key; +} + +static inline void key_ref_put(key_ref_t key_ref) +{ + key_put(key_ref_to_ptr(key_ref)); +} + +extern struct key *request_key(struct key_type *type, + const char *description, + const char *callout_info); + +extern struct key *request_key_with_auxdata(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len, + void *aux); + +extern struct key *request_key_async(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len); + +extern struct key *request_key_async_with_auxdata(struct key_type *type, + const char *description, + const void *callout_info, + size_t callout_len, + void *aux); + +extern int wait_for_key_construction(struct key *key, bool intr); + +extern int key_validate(struct key *key); + +extern key_ref_t key_create_or_update(key_ref_t keyring, + const char *type, + const char *description, + const void *payload, + size_t plen, + key_perm_t perm, + unsigned long flags); + +extern int key_update(key_ref_t key, + const void *payload, + size_t plen); + +extern int key_link(struct key *keyring, + struct key *key); + +extern int key_unlink(struct key *keyring, + struct key *key); + +extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, + const struct cred *cred, + unsigned long flags, + struct key *dest); + +extern int keyring_clear(struct key *keyring); + +extern key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description); + +extern int keyring_add_key(struct key *keyring, + struct key *key); + +extern struct key *key_lookup(key_serial_t id); + +static inline key_serial_t key_serial(const struct key *key) +{ + return key ? key->serial : 0; +} + +extern void key_set_timeout(struct key *, unsigned); + +/** + * key_is_instantiated - Determine if a key has been positively instantiated + * @key: The key to check. + * + * Return true if the specified key has been positively instantiated, false + * otherwise. + */ +static inline bool key_is_instantiated(const struct key *key) +{ + return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && + !test_bit(KEY_FLAG_NEGATIVE, &key->flags); +} + +#define rcu_dereference_key(KEY) \ + (rcu_dereference_protected((KEY)->payload.rcudata, \ + rwsem_is_locked(&((struct key *)(KEY))->sem))) + +#define rcu_assign_keypointer(KEY, PAYLOAD) \ + (rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD)) + +#ifdef CONFIG_SYSCTL +extern ctl_table key_sysctls[]; +#endif + +extern void key_replace_session_keyring(void); + +/* + * the userspace interface + */ +extern int install_thread_keyring_to_cred(struct cred *cred); +extern void key_fsuid_changed(struct task_struct *tsk); +extern void key_fsgid_changed(struct task_struct *tsk); +extern void key_init(void); + +#else /* CONFIG_KEYS */ + +#define key_validate(k) 0 +#define key_serial(k) 0 +#define key_get(k) ({ NULL; }) +#define key_revoke(k) do { } while(0) +#define key_put(k) do { } while(0) +#define key_ref_put(k) do { } while(0) +#define make_key_ref(k, p) NULL +#define key_ref_to_ptr(k) NULL +#define is_key_possessed(k) 0 +#define key_fsuid_changed(t) do { } while(0) +#define key_fsgid_changed(t) do { } while(0) +#define key_init() do { } while(0) +#define key_replace_session_keyring() do { } while(0) + +#endif /* CONFIG_KEYS */ +#endif /* __KERNEL__ */ +#endif /* _LINUX_KEY_H */ diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h new file mode 100644 index 00000000..86e5214a --- /dev/null +++ b/include/linux/keyboard.h @@ -0,0 +1,459 @@ +#ifndef __LINUX_KEYBOARD_H +#define __LINUX_KEYBOARD_H + +#include <linux/wait.h> + +#define KG_SHIFT 0 +#define KG_CTRL 2 +#define KG_ALT 3 +#define KG_ALTGR 1 +#define KG_SHIFTL 4 +#define KG_KANASHIFT 4 +#define KG_SHIFTR 5 +#define KG_CTRLL 6 +#define KG_CTRLR 7 +#define KG_CAPSSHIFT 8 + +#define NR_SHIFT 9 + +#define NR_KEYS 256 +#define MAX_NR_KEYMAPS 256 +/* This means 128Kb if all keymaps are allocated. Only the superuser + may increase the number of keymaps beyond MAX_NR_OF_USER_KEYMAPS. */ +#define MAX_NR_OF_USER_KEYMAPS 256 /* should be at least 7 */ + +#ifdef __KERNEL__ +struct notifier_block; +extern unsigned short *key_maps[MAX_NR_KEYMAPS]; +extern unsigned short plain_map[NR_KEYS]; + +struct keyboard_notifier_param { + struct vc_data *vc; /* VC on which the keyboard press was done */ + int down; /* Pressure of the key? */ + int shift; /* Current shift mask */ + int ledstate; /* Current led state */ + unsigned int value; /* keycode, unicode value or keysym */ +}; + +extern int register_keyboard_notifier(struct notifier_block *nb); +extern int unregister_keyboard_notifier(struct notifier_block *nb); +#endif + +#define MAX_NR_FUNC 256 /* max nr of strings assigned to keys */ + +#define KT_LATIN 0 /* we depend on this being zero */ +#define KT_LETTER 11 /* symbol that can be acted upon by CapsLock */ +#define KT_FN 1 +#define KT_SPEC 2 +#define KT_PAD 3 +#define KT_DEAD 4 +#define KT_CONS 5 +#define KT_CUR 6 +#define KT_SHIFT 7 +#define KT_META 8 +#define KT_ASCII 9 +#define KT_LOCK 10 +#define KT_SLOCK 12 +#define KT_DEAD2 13 +#define KT_BRL 14 + +#define K(t,v) (((t)<<8)|(v)) +#define KTYP(x) ((x) >> 8) +#define KVAL(x) ((x) & 0xff) + +#define K_F1 K(KT_FN,0) +#define K_F2 K(KT_FN,1) +#define K_F3 K(KT_FN,2) +#define K_F4 K(KT_FN,3) +#define K_F5 K(KT_FN,4) +#define K_F6 K(KT_FN,5) +#define K_F7 K(KT_FN,6) +#define K_F8 K(KT_FN,7) +#define K_F9 K(KT_FN,8) +#define K_F10 K(KT_FN,9) +#define K_F11 K(KT_FN,10) +#define K_F12 K(KT_FN,11) +#define K_F13 K(KT_FN,12) +#define K_F14 K(KT_FN,13) +#define K_F15 K(KT_FN,14) +#define K_F16 K(KT_FN,15) +#define K_F17 K(KT_FN,16) +#define K_F18 K(KT_FN,17) +#define K_F19 K(KT_FN,18) +#define K_F20 K(KT_FN,19) +#define K_FIND K(KT_FN,20) +#define K_INSERT K(KT_FN,21) +#define K_REMOVE K(KT_FN,22) +#define K_SELECT K(KT_FN,23) +#define K_PGUP K(KT_FN,24) /* PGUP is a synonym for PRIOR */ +#define K_PGDN K(KT_FN,25) /* PGDN is a synonym for NEXT */ +#define K_MACRO K(KT_FN,26) +#define K_HELP K(KT_FN,27) +#define K_DO K(KT_FN,28) +#define K_PAUSE K(KT_FN,29) +#define K_F21 K(KT_FN,30) +#define K_F22 K(KT_FN,31) +#define K_F23 K(KT_FN,32) +#define K_F24 K(KT_FN,33) +#define K_F25 K(KT_FN,34) +#define K_F26 K(KT_FN,35) +#define K_F27 K(KT_FN,36) +#define K_F28 K(KT_FN,37) +#define K_F29 K(KT_FN,38) +#define K_F30 K(KT_FN,39) +#define K_F31 K(KT_FN,40) +#define K_F32 K(KT_FN,41) +#define K_F33 K(KT_FN,42) +#define K_F34 K(KT_FN,43) +#define K_F35 K(KT_FN,44) +#define K_F36 K(KT_FN,45) +#define K_F37 K(KT_FN,46) +#define K_F38 K(KT_FN,47) +#define K_F39 K(KT_FN,48) +#define K_F40 K(KT_FN,49) +#define K_F41 K(KT_FN,50) +#define K_F42 K(KT_FN,51) +#define K_F43 K(KT_FN,52) +#define K_F44 K(KT_FN,53) +#define K_F45 K(KT_FN,54) +#define K_F46 K(KT_FN,55) +#define K_F47 K(KT_FN,56) +#define K_F48 K(KT_FN,57) +#define K_F49 K(KT_FN,58) +#define K_F50 K(KT_FN,59) +#define K_F51 K(KT_FN,60) +#define K_F52 K(KT_FN,61) +#define K_F53 K(KT_FN,62) +#define K_F54 K(KT_FN,63) +#define K_F55 K(KT_FN,64) +#define K_F56 K(KT_FN,65) +#define K_F57 K(KT_FN,66) +#define K_F58 K(KT_FN,67) +#define K_F59 K(KT_FN,68) +#define K_F60 K(KT_FN,69) +#define K_F61 K(KT_FN,70) +#define K_F62 K(KT_FN,71) +#define K_F63 K(KT_FN,72) +#define K_F64 K(KT_FN,73) +#define K_F65 K(KT_FN,74) +#define K_F66 K(KT_FN,75) +#define K_F67 K(KT_FN,76) +#define K_F68 K(KT_FN,77) +#define K_F69 K(KT_FN,78) +#define K_F70 K(KT_FN,79) +#define K_F71 K(KT_FN,80) +#define K_F72 K(KT_FN,81) +#define K_F73 K(KT_FN,82) +#define K_F74 K(KT_FN,83) +#define K_F75 K(KT_FN,84) +#define K_F76 K(KT_FN,85) +#define K_F77 K(KT_FN,86) +#define K_F78 K(KT_FN,87) +#define K_F79 K(KT_FN,88) +#define K_F80 K(KT_FN,89) +#define K_F81 K(KT_FN,90) +#define K_F82 K(KT_FN,91) +#define K_F83 K(KT_FN,92) +#define K_F84 K(KT_FN,93) +#define K_F85 K(KT_FN,94) +#define K_F86 K(KT_FN,95) +#define K_F87 K(KT_FN,96) +#define K_F88 K(KT_FN,97) +#define K_F89 K(KT_FN,98) +#define K_F90 K(KT_FN,99) +#define K_F91 K(KT_FN,100) +#define K_F92 K(KT_FN,101) +#define K_F93 K(KT_FN,102) +#define K_F94 K(KT_FN,103) +#define K_F95 K(KT_FN,104) +#define K_F96 K(KT_FN,105) +#define K_F97 K(KT_FN,106) +#define K_F98 K(KT_FN,107) +#define K_F99 K(KT_FN,108) +#define K_F100 K(KT_FN,109) +#define K_F101 K(KT_FN,110) +#define K_F102 K(KT_FN,111) +#define K_F103 K(KT_FN,112) +#define K_F104 K(KT_FN,113) +#define K_F105 K(KT_FN,114) +#define K_F106 K(KT_FN,115) +#define K_F107 K(KT_FN,116) +#define K_F108 K(KT_FN,117) +#define K_F109 K(KT_FN,118) +#define K_F110 K(KT_FN,119) +#define K_F111 K(KT_FN,120) +#define K_F112 K(KT_FN,121) +#define K_F113 K(KT_FN,122) +#define K_F114 K(KT_FN,123) +#define K_F115 K(KT_FN,124) +#define K_F116 K(KT_FN,125) +#define K_F117 K(KT_FN,126) +#define K_F118 K(KT_FN,127) +#define K_F119 K(KT_FN,128) +#define K_F120 K(KT_FN,129) +#define K_F121 K(KT_FN,130) +#define K_F122 K(KT_FN,131) +#define K_F123 K(KT_FN,132) +#define K_F124 K(KT_FN,133) +#define K_F125 K(KT_FN,134) +#define K_F126 K(KT_FN,135) +#define K_F127 K(KT_FN,136) +#define K_F128 K(KT_FN,137) +#define K_F129 K(KT_FN,138) +#define K_F130 K(KT_FN,139) +#define K_F131 K(KT_FN,140) +#define K_F132 K(KT_FN,141) +#define K_F133 K(KT_FN,142) +#define K_F134 K(KT_FN,143) +#define K_F135 K(KT_FN,144) +#define K_F136 K(KT_FN,145) +#define K_F137 K(KT_FN,146) +#define K_F138 K(KT_FN,147) +#define K_F139 K(KT_FN,148) +#define K_F140 K(KT_FN,149) +#define K_F141 K(KT_FN,150) +#define K_F142 K(KT_FN,151) +#define K_F143 K(KT_FN,152) +#define K_F144 K(KT_FN,153) +#define K_F145 K(KT_FN,154) +#define K_F146 K(KT_FN,155) +#define K_F147 K(KT_FN,156) +#define K_F148 K(KT_FN,157) +#define K_F149 K(KT_FN,158) +#define K_F150 K(KT_FN,159) +#define K_F151 K(KT_FN,160) +#define K_F152 K(KT_FN,161) +#define K_F153 K(KT_FN,162) +#define K_F154 K(KT_FN,163) +#define K_F155 K(KT_FN,164) +#define K_F156 K(KT_FN,165) +#define K_F157 K(KT_FN,166) +#define K_F158 K(KT_FN,167) +#define K_F159 K(KT_FN,168) +#define K_F160 K(KT_FN,169) +#define K_F161 K(KT_FN,170) +#define K_F162 K(KT_FN,171) +#define K_F163 K(KT_FN,172) +#define K_F164 K(KT_FN,173) +#define K_F165 K(KT_FN,174) +#define K_F166 K(KT_FN,175) +#define K_F167 K(KT_FN,176) +#define K_F168 K(KT_FN,177) +#define K_F169 K(KT_FN,178) +#define K_F170 K(KT_FN,179) +#define K_F171 K(KT_FN,180) +#define K_F172 K(KT_FN,181) +#define K_F173 K(KT_FN,182) +#define K_F174 K(KT_FN,183) +#define K_F175 K(KT_FN,184) +#define K_F176 K(KT_FN,185) +#define K_F177 K(KT_FN,186) +#define K_F178 K(KT_FN,187) +#define K_F179 K(KT_FN,188) +#define K_F180 K(KT_FN,189) +#define K_F181 K(KT_FN,190) +#define K_F182 K(KT_FN,191) +#define K_F183 K(KT_FN,192) +#define K_F184 K(KT_FN,193) +#define K_F185 K(KT_FN,194) +#define K_F186 K(KT_FN,195) +#define K_F187 K(KT_FN,196) +#define K_F188 K(KT_FN,197) +#define K_F189 K(KT_FN,198) +#define K_F190 K(KT_FN,199) +#define K_F191 K(KT_FN,200) +#define K_F192 K(KT_FN,201) +#define K_F193 K(KT_FN,202) +#define K_F194 K(KT_FN,203) +#define K_F195 K(KT_FN,204) +#define K_F196 K(KT_FN,205) +#define K_F197 K(KT_FN,206) +#define K_F198 K(KT_FN,207) +#define K_F199 K(KT_FN,208) +#define K_F200 K(KT_FN,209) +#define K_F201 K(KT_FN,210) +#define K_F202 K(KT_FN,211) +#define K_F203 K(KT_FN,212) +#define K_F204 K(KT_FN,213) +#define K_F205 K(KT_FN,214) +#define K_F206 K(KT_FN,215) +#define K_F207 K(KT_FN,216) +#define K_F208 K(KT_FN,217) +#define K_F209 K(KT_FN,218) +#define K_F210 K(KT_FN,219) +#define K_F211 K(KT_FN,220) +#define K_F212 K(KT_FN,221) +#define K_F213 K(KT_FN,222) +#define K_F214 K(KT_FN,223) +#define K_F215 K(KT_FN,224) +#define K_F216 K(KT_FN,225) +#define K_F217 K(KT_FN,226) +#define K_F218 K(KT_FN,227) +#define K_F219 K(KT_FN,228) +#define K_F220 K(KT_FN,229) +#define K_F221 K(KT_FN,230) +#define K_F222 K(KT_FN,231) +#define K_F223 K(KT_FN,232) +#define K_F224 K(KT_FN,233) +#define K_F225 K(KT_FN,234) +#define K_F226 K(KT_FN,235) +#define K_F227 K(KT_FN,236) +#define K_F228 K(KT_FN,237) +#define K_F229 K(KT_FN,238) +#define K_F230 K(KT_FN,239) +#define K_F231 K(KT_FN,240) +#define K_F232 K(KT_FN,241) +#define K_F233 K(KT_FN,242) +#define K_F234 K(KT_FN,243) +#define K_F235 K(KT_FN,244) +#define K_F236 K(KT_FN,245) +#define K_F237 K(KT_FN,246) +#define K_F238 K(KT_FN,247) +#define K_F239 K(KT_FN,248) +#define K_F240 K(KT_FN,249) +#define K_F241 K(KT_FN,250) +#define K_F242 K(KT_FN,251) +#define K_F243 K(KT_FN,252) +#define K_F244 K(KT_FN,253) +#define K_F245 K(KT_FN,254) +#define K_UNDO K(KT_FN,255) + + +#define K_HOLE K(KT_SPEC,0) +#define K_ENTER K(KT_SPEC,1) +#define K_SH_REGS K(KT_SPEC,2) +#define K_SH_MEM K(KT_SPEC,3) +#define K_SH_STAT K(KT_SPEC,4) +#define K_BREAK K(KT_SPEC,5) +#define K_CONS K(KT_SPEC,6) +#define K_CAPS K(KT_SPEC,7) +#define K_NUM K(KT_SPEC,8) +#define K_HOLD K(KT_SPEC,9) +#define K_SCROLLFORW K(KT_SPEC,10) +#define K_SCROLLBACK K(KT_SPEC,11) +#define K_BOOT K(KT_SPEC,12) +#define K_CAPSON K(KT_SPEC,13) +#define K_COMPOSE K(KT_SPEC,14) +#define K_SAK K(KT_SPEC,15) +#define K_DECRCONSOLE K(KT_SPEC,16) +#define K_INCRCONSOLE K(KT_SPEC,17) +#define K_SPAWNCONSOLE K(KT_SPEC,18) +#define K_BARENUMLOCK K(KT_SPEC,19) + +#define K_ALLOCATED K(KT_SPEC,126) /* dynamically allocated keymap */ +#define K_NOSUCHMAP K(KT_SPEC,127) /* returned by KDGKBENT */ + +#define K_P0 K(KT_PAD,0) +#define K_P1 K(KT_PAD,1) +#define K_P2 K(KT_PAD,2) +#define K_P3 K(KT_PAD,3) +#define K_P4 K(KT_PAD,4) +#define K_P5 K(KT_PAD,5) +#define K_P6 K(KT_PAD,6) +#define K_P7 K(KT_PAD,7) +#define K_P8 K(KT_PAD,8) +#define K_P9 K(KT_PAD,9) +#define K_PPLUS K(KT_PAD,10) /* key-pad plus */ +#define K_PMINUS K(KT_PAD,11) /* key-pad minus */ +#define K_PSTAR K(KT_PAD,12) /* key-pad asterisk (star) */ +#define K_PSLASH K(KT_PAD,13) /* key-pad slash */ +#define K_PENTER K(KT_PAD,14) /* key-pad enter */ +#define K_PCOMMA K(KT_PAD,15) /* key-pad comma: kludge... */ +#define K_PDOT K(KT_PAD,16) /* key-pad dot (period): kludge... */ +#define K_PPLUSMINUS K(KT_PAD,17) /* key-pad plus/minus */ +#define K_PPARENL K(KT_PAD,18) /* key-pad left parenthesis */ +#define K_PPARENR K(KT_PAD,19) /* key-pad right parenthesis */ + +#define NR_PAD 20 + +#define K_DGRAVE K(KT_DEAD,0) +#define K_DACUTE K(KT_DEAD,1) +#define K_DCIRCM K(KT_DEAD,2) +#define K_DTILDE K(KT_DEAD,3) +#define K_DDIERE K(KT_DEAD,4) +#define K_DCEDIL K(KT_DEAD,5) + +#define NR_DEAD 6 + +#define K_DOWN K(KT_CUR,0) +#define K_LEFT K(KT_CUR,1) +#define K_RIGHT K(KT_CUR,2) +#define K_UP K(KT_CUR,3) + +#define K_SHIFT K(KT_SHIFT,KG_SHIFT) +#define K_CTRL K(KT_SHIFT,KG_CTRL) +#define K_ALT K(KT_SHIFT,KG_ALT) +#define K_ALTGR K(KT_SHIFT,KG_ALTGR) +#define K_SHIFTL K(KT_SHIFT,KG_SHIFTL) +#define K_SHIFTR K(KT_SHIFT,KG_SHIFTR) +#define K_CTRLL K(KT_SHIFT,KG_CTRLL) +#define K_CTRLR K(KT_SHIFT,KG_CTRLR) +#define K_CAPSSHIFT K(KT_SHIFT,KG_CAPSSHIFT) + +#define K_ASC0 K(KT_ASCII,0) +#define K_ASC1 K(KT_ASCII,1) +#define K_ASC2 K(KT_ASCII,2) +#define K_ASC3 K(KT_ASCII,3) +#define K_ASC4 K(KT_ASCII,4) +#define K_ASC5 K(KT_ASCII,5) +#define K_ASC6 K(KT_ASCII,6) +#define K_ASC7 K(KT_ASCII,7) +#define K_ASC8 K(KT_ASCII,8) +#define K_ASC9 K(KT_ASCII,9) +#define K_HEX0 K(KT_ASCII,10) +#define K_HEX1 K(KT_ASCII,11) +#define K_HEX2 K(KT_ASCII,12) +#define K_HEX3 K(KT_ASCII,13) +#define K_HEX4 K(KT_ASCII,14) +#define K_HEX5 K(KT_ASCII,15) +#define K_HEX6 K(KT_ASCII,16) +#define K_HEX7 K(KT_ASCII,17) +#define K_HEX8 K(KT_ASCII,18) +#define K_HEX9 K(KT_ASCII,19) +#define K_HEXa K(KT_ASCII,20) +#define K_HEXb K(KT_ASCII,21) +#define K_HEXc K(KT_ASCII,22) +#define K_HEXd K(KT_ASCII,23) +#define K_HEXe K(KT_ASCII,24) +#define K_HEXf K(KT_ASCII,25) + +#define NR_ASCII 26 + +#define K_SHIFTLOCK K(KT_LOCK,KG_SHIFT) +#define K_CTRLLOCK K(KT_LOCK,KG_CTRL) +#define K_ALTLOCK K(KT_LOCK,KG_ALT) +#define K_ALTGRLOCK K(KT_LOCK,KG_ALTGR) +#define K_SHIFTLLOCK K(KT_LOCK,KG_SHIFTL) +#define K_SHIFTRLOCK K(KT_LOCK,KG_SHIFTR) +#define K_CTRLLLOCK K(KT_LOCK,KG_CTRLL) +#define K_CTRLRLOCK K(KT_LOCK,KG_CTRLR) +#define K_CAPSSHIFTLOCK K(KT_LOCK,KG_CAPSSHIFT) + +#define K_SHIFT_SLOCK K(KT_SLOCK,KG_SHIFT) +#define K_CTRL_SLOCK K(KT_SLOCK,KG_CTRL) +#define K_ALT_SLOCK K(KT_SLOCK,KG_ALT) +#define K_ALTGR_SLOCK K(KT_SLOCK,KG_ALTGR) +#define K_SHIFTL_SLOCK K(KT_SLOCK,KG_SHIFTL) +#define K_SHIFTR_SLOCK K(KT_SLOCK,KG_SHIFTR) +#define K_CTRLL_SLOCK K(KT_SLOCK,KG_CTRLL) +#define K_CTRLR_SLOCK K(KT_SLOCK,KG_CTRLR) +#define K_CAPSSHIFT_SLOCK K(KT_SLOCK,KG_CAPSSHIFT) + +#define NR_LOCK 9 + +#define K_BRL_BLANK K(KT_BRL, 0) +#define K_BRL_DOT1 K(KT_BRL, 1) +#define K_BRL_DOT2 K(KT_BRL, 2) +#define K_BRL_DOT3 K(KT_BRL, 3) +#define K_BRL_DOT4 K(KT_BRL, 4) +#define K_BRL_DOT5 K(KT_BRL, 5) +#define K_BRL_DOT6 K(KT_BRL, 6) +#define K_BRL_DOT7 K(KT_BRL, 7) +#define K_BRL_DOT8 K(KT_BRL, 8) +#define K_BRL_DOT9 K(KT_BRL, 9) +#define K_BRL_DOT10 K(KT_BRL, 10) + +#define NR_BRL 11 + +#define MAX_DIACR 256 +#endif diff --git a/include/linux/keychord.h b/include/linux/keychord.h new file mode 100644 index 00000000..856a5850 --- /dev/null +++ b/include/linux/keychord.h @@ -0,0 +1,52 @@ +/* + * Key chord input driver + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef __LINUX_KEYCHORD_H_ +#define __LINUX_KEYCHORD_H_ + +#include <linux/input.h> + +#define KEYCHORD_VERSION 1 + +/* + * One or more input_keychord structs are written to /dev/keychord + * at once to specify the list of keychords to monitor. + * Reading /dev/keychord returns the id of a keychord when the + * keychord combination is pressed. A keychord is signalled when + * all of the keys in the keycode list are in the pressed state. + * The order in which the keys are pressed does not matter. + * The keychord will not be signalled if keys not in the keycode + * list are pressed. + * Keychords will not be signalled on key release events. + */ +struct input_keychord { + /* should be KEYCHORD_VERSION */ + __u16 version; + /* + * client specified ID, returned from read() + * when this keychord is pressed. + */ + __u16 id; + + /* number of keycodes in this keychord */ + __u16 count; + + /* variable length array of keycodes */ + __u16 keycodes[]; +}; + +#endif /* __LINUX_KEYCHORD_H_ */ diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h new file mode 100644 index 00000000..9b0b865c --- /dev/null +++ b/include/linux/keyctl.h @@ -0,0 +1,59 @@ +/* keyctl.h: keyctl command IDs + * + * Copyright (C) 2004, 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_KEYCTL_H +#define _LINUX_KEYCTL_H + +/* special process keyring shortcut IDs */ +#define KEY_SPEC_THREAD_KEYRING -1 /* - key ID for thread-specific keyring */ +#define KEY_SPEC_PROCESS_KEYRING -2 /* - key ID for process-specific keyring */ +#define KEY_SPEC_SESSION_KEYRING -3 /* - key ID for session-specific keyring */ +#define KEY_SPEC_USER_KEYRING -4 /* - key ID for UID-specific keyring */ +#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */ +#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */ +#define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */ +#define KEY_SPEC_REQUESTOR_KEYRING -8 /* - key ID for request_key() dest keyring */ + +/* request-key default keyrings */ +#define KEY_REQKEY_DEFL_NO_CHANGE -1 +#define KEY_REQKEY_DEFL_DEFAULT 0 +#define KEY_REQKEY_DEFL_THREAD_KEYRING 1 +#define KEY_REQKEY_DEFL_PROCESS_KEYRING 2 +#define KEY_REQKEY_DEFL_SESSION_KEYRING 3 +#define KEY_REQKEY_DEFL_USER_KEYRING 4 +#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5 +#define KEY_REQKEY_DEFL_GROUP_KEYRING 6 +#define KEY_REQKEY_DEFL_REQUESTOR_KEYRING 7 + +/* keyctl commands */ +#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */ +#define KEYCTL_JOIN_SESSION_KEYRING 1 /* join or start named session keyring */ +#define KEYCTL_UPDATE 2 /* update a key */ +#define KEYCTL_REVOKE 3 /* revoke a key */ +#define KEYCTL_CHOWN 4 /* set ownership of a key */ +#define KEYCTL_SETPERM 5 /* set perms on a key */ +#define KEYCTL_DESCRIBE 6 /* describe a key */ +#define KEYCTL_CLEAR 7 /* clear contents of a keyring */ +#define KEYCTL_LINK 8 /* link a key into a keyring */ +#define KEYCTL_UNLINK 9 /* unlink a key from a keyring */ +#define KEYCTL_SEARCH 10 /* search for a key in a keyring */ +#define KEYCTL_READ 11 /* read a key or keyring's contents */ +#define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */ +#define KEYCTL_NEGATE 13 /* negate a partially constructed key */ +#define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */ +#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ +#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ +#define KEYCTL_GET_SECURITY 17 /* get key security label */ +#define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */ +#define KEYCTL_REJECT 19 /* reject a partially constructed key */ +#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */ + +#endif /* _LINUX_KEYCTL_H */ diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h new file mode 100644 index 00000000..a2ac49e5 --- /dev/null +++ b/include/linux/keyreset.h @@ -0,0 +1,28 @@ +/* + * include/linux/keyreset.h - platform data structure for resetkeys driver + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_KEYRESET_H +#define _LINUX_KEYRESET_H + +#define KEYRESET_NAME "keyreset" + +struct keyreset_platform_data { + int (*reset_fn)(void); + int *keys_up; + int keys_down[]; /* 0 terminated */ +}; + +#endif /* _LINUX_KEYRESET_H */ diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h new file mode 100644 index 00000000..10308c6a --- /dev/null +++ b/include/linux/kfifo.h @@ -0,0 +1,852 @@ +/* + * A generic kernel FIFO implementation + * + * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef _LINUX_KFIFO_H +#define _LINUX_KFIFO_H + +/* + * How to porting drivers to the new generic FIFO API: + * + * - Modify the declaration of the "struct kfifo *" object into a + * in-place "struct kfifo" object + * - Init the in-place object with kfifo_alloc() or kfifo_init() + * Note: The address of the in-place "struct kfifo" object must be + * passed as the first argument to this functions + * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get + * into kfifo_out + * - Replace the use of kfifo_put into kfifo_in_spinlocked and kfifo_get + * into kfifo_out_spinlocked + * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc + * must be passed now to the kfifo_in_spinlocked and kfifo_out_spinlocked + * as the last parameter + * - The formerly __kfifo_* functions are renamed into kfifo_* + */ + +/* + * Note about locking : There is no locking required until only * one reader + * and one writer is using the fifo and no kfifo_reset() will be * called + * kfifo_reset_out() can be safely used, until it will be only called + * in the reader thread. + * For multiple writer and one reader there is only a need to lock the writer. + * And vice versa for only one writer and multiple reader there is only a need + * to lock the reader. + */ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/stddef.h> +#include <linux/scatterlist.h> + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +#define __STRUCT_KFIFO_COMMON(datatype, recsize, ptrtype) \ + union { \ + struct __kfifo kfifo; \ + datatype *type; \ + char (*rectype)[recsize]; \ + ptrtype *ptr; \ + const ptrtype *ptr_const; \ + } + +#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ +{ \ + __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ + type buf[((size < 2) || (size & (size - 1))) ? -1 : size]; \ +} + +#define STRUCT_KFIFO(type, size) \ + struct __STRUCT_KFIFO(type, size, 0, type) + +#define __STRUCT_KFIFO_PTR(type, recsize, ptrtype) \ +{ \ + __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ + type buf[0]; \ +} + +#define STRUCT_KFIFO_PTR(type) \ + struct __STRUCT_KFIFO_PTR(type, 0, type) + +/* + * define compatibility "struct kfifo" for dynamic allocated fifos + */ +struct kfifo __STRUCT_KFIFO_PTR(unsigned char, 0, void); + +#define STRUCT_KFIFO_REC_1(size) \ + struct __STRUCT_KFIFO(unsigned char, size, 1, void) + +#define STRUCT_KFIFO_REC_2(size) \ + struct __STRUCT_KFIFO(unsigned char, size, 2, void) + +/* + * define kfifo_rec types + */ +struct kfifo_rec_ptr_1 __STRUCT_KFIFO_PTR(unsigned char, 1, void); +struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); + +/* + * helper macro to distinguish between real in place fifo where the fifo + * array is a part of the structure and the fifo type where the array is + * outside of the fifo structure. + */ +#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) + +/** + * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object + * @fifo: name of the declared fifo + * @type: type of the fifo elements + */ +#define DECLARE_KFIFO_PTR(fifo, type) STRUCT_KFIFO_PTR(type) fifo + +/** + * DECLARE_KFIFO - macro to declare a fifo object + * @fifo: name of the declared fifo + * @type: type of the fifo elements + * @size: the number of elements in the fifo, this must be a power of 2 + */ +#define DECLARE_KFIFO(fifo, type, size) STRUCT_KFIFO(type, size) fifo + +/** + * INIT_KFIFO - Initialize a fifo declared by DECLARE_KFIFO + * @fifo: name of the declared fifo datatype + */ +#define INIT_KFIFO(fifo) \ +(void)({ \ + typeof(&(fifo)) __tmp = &(fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + __kfifo->in = 0; \ + __kfifo->out = 0; \ + __kfifo->mask = __is_kfifo_ptr(__tmp) ? 0 : ARRAY_SIZE(__tmp->buf) - 1;\ + __kfifo->esize = sizeof(*__tmp->buf); \ + __kfifo->data = __is_kfifo_ptr(__tmp) ? NULL : __tmp->buf; \ +}) + +/** + * DEFINE_KFIFO - macro to define and initialize a fifo + * @fifo: name of the declared fifo datatype + * @type: type of the fifo elements + * @size: the number of elements in the fifo, this must be a power of 2 + * + * Note: the macro can be used for global and local fifo data type variables. + */ +#define DEFINE_KFIFO(fifo, type, size) \ + DECLARE_KFIFO(fifo, type, size) = \ + (typeof(fifo)) { \ + { \ + { \ + .in = 0, \ + .out = 0, \ + .mask = __is_kfifo_ptr(&(fifo)) ? \ + 0 : \ + ARRAY_SIZE((fifo).buf) - 1, \ + .esize = sizeof(*(fifo).buf), \ + .data = __is_kfifo_ptr(&(fifo)) ? \ + NULL : \ + (fifo).buf, \ + } \ + } \ + } + + +static inline unsigned int __must_check +__kfifo_uint_must_check_helper(unsigned int val) +{ + return val; +} + +static inline int __must_check +__kfifo_int_must_check_helper(int val) +{ + return val; +} + +/** + * kfifo_initialized - Check if the fifo is initialized + * @fifo: address of the fifo to check + * + * Return %true if fifo is initialized, otherwise %false. + * Assumes the fifo was 0 before. + */ +#define kfifo_initialized(fifo) ((fifo)->kfifo.mask) + +/** + * kfifo_esize - returns the size of the element managed by the fifo + * @fifo: address of the fifo to be used + */ +#define kfifo_esize(fifo) ((fifo)->kfifo.esize) + +/** + * kfifo_recsize - returns the size of the record length field + * @fifo: address of the fifo to be used + */ +#define kfifo_recsize(fifo) (sizeof(*(fifo)->rectype)) + +/** + * kfifo_size - returns the size of the fifo in elements + * @fifo: address of the fifo to be used + */ +#define kfifo_size(fifo) ((fifo)->kfifo.mask + 1) + +/** + * kfifo_reset - removes the entire fifo content + * @fifo: address of the fifo to be used + * + * Note: usage of kfifo_reset() is dangerous. It should be only called when the + * fifo is exclusived locked or when it is secured that no other thread is + * accessing the fifo. + */ +#define kfifo_reset(fifo) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + __tmp->kfifo.in = __tmp->kfifo.out = 0; \ +}) + +/** + * kfifo_reset_out - skip fifo content + * @fifo: address of the fifo to be used + * + * Note: The usage of kfifo_reset_out() is safe until it will be only called + * from the reader thread and there is only one concurrent reader. Otherwise + * it is dangerous and must be handled in the same way as kfifo_reset(). + */ +#define kfifo_reset_out(fifo) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + __tmp->kfifo.out = __tmp->kfifo.in; \ +}) + +/** + * kfifo_len - returns the number of used elements in the fifo + * @fifo: address of the fifo to be used + */ +#define kfifo_len(fifo) \ +({ \ + typeof((fifo) + 1) __tmpl = (fifo); \ + __tmpl->kfifo.in - __tmpl->kfifo.out; \ +}) + +/** + * kfifo_is_empty - returns true if the fifo is empty + * @fifo: address of the fifo to be used + */ +#define kfifo_is_empty(fifo) \ +({ \ + typeof((fifo) + 1) __tmpq = (fifo); \ + __tmpq->kfifo.in == __tmpq->kfifo.out; \ +}) + +/** + * kfifo_is_full - returns true if the fifo is full + * @fifo: address of the fifo to be used + */ +#define kfifo_is_full(fifo) \ +({ \ + typeof((fifo) + 1) __tmpq = (fifo); \ + kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ +}) + +/** + * kfifo_avail - returns the number of unused elements in the fifo + * @fifo: address of the fifo to be used + */ +#define kfifo_avail(fifo) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmpq = (fifo); \ + const size_t __recsize = sizeof(*__tmpq->rectype); \ + unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ + (__recsize) ? ((__avail <= __recsize) ? 0 : \ + __kfifo_max_r(__avail - __recsize, __recsize)) : \ + __avail; \ +}) \ +) + +/** + * kfifo_skip - skip output data + * @fifo: address of the fifo to be used + */ +#define kfifo_skip(fifo) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __kfifo_skip_r(__kfifo, __recsize); \ + else \ + __kfifo->out++; \ +}) + +/** + * kfifo_peek_len - gets the size of the next fifo record + * @fifo: address of the fifo to be used + * + * This function returns the size of the next fifo record in number of bytes. + */ +#define kfifo_peek_len(fifo) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ + __kfifo_len_r(__kfifo, __recsize); \ +}) \ +) + +/** + * kfifo_alloc - dynamically allocates a new fifo buffer + * @fifo: pointer to the fifo + * @size: the number of elements in the fifo, this must be a power of 2 + * @gfp_mask: get_free_pages mask, passed to kmalloc() + * + * This macro dynamically allocates a new fifo buffer. + * + * The numer of elements will be rounded-up to a power of 2. + * The fifo will be release with kfifo_free(). + * Return 0 if no error, otherwise an error code. + */ +#define kfifo_alloc(fifo, size, gfp_mask) \ +__kfifo_int_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + __is_kfifo_ptr(__tmp) ? \ + __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ + -EINVAL; \ +}) \ +) + +/** + * kfifo_free - frees the fifo + * @fifo: the fifo to be freed + */ +#define kfifo_free(fifo) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__is_kfifo_ptr(__tmp)) \ + __kfifo_free(__kfifo); \ +}) + +/** + * kfifo_init - initialize a fifo using a preallocated buffer + * @fifo: the fifo to assign the buffer + * @buffer: the preallocated buffer to be used + * @size: the size of the internal buffer, this have to be a power of 2 + * + * This macro initialize a fifo using a preallocated buffer. + * + * The numer of elements will be rounded-up to a power of 2. + * Return 0 if no error, otherwise an error code. + */ +#define kfifo_init(fifo, buffer, size) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + __is_kfifo_ptr(__tmp) ? \ + __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ + -EINVAL; \ +}) + +/** + * kfifo_put - put data into the fifo + * @fifo: address of the fifo to be used + * @val: the data to be added + * + * This macro copies the given value into the fifo. + * It returns 0 if the fifo was full. Otherwise it returns the number + * processed elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_put(fifo, val) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((val) + 1) __val = (val); \ + unsigned int __ret; \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (0) { \ + typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ + __dummy = (typeof(__val))NULL; \ + } \ + if (__recsize) \ + __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \ + __recsize); \ + else { \ + __ret = !kfifo_is_full(__tmp); \ + if (__ret) { \ + (__is_kfifo_ptr(__tmp) ? \ + ((typeof(__tmp->type))__kfifo->data) : \ + (__tmp->buf) \ + )[__kfifo->in & __tmp->kfifo.mask] = \ + *(typeof(__tmp->type))__val; \ + smp_wmb(); \ + __kfifo->in++; \ + } \ + } \ + __ret; \ +}) + +/** + * kfifo_get - get data from the fifo + * @fifo: address of the fifo to be used + * @val: the var where to store the data to be added + * + * This macro reads the data from the fifo. + * It returns 0 if the fifo was empty. Otherwise it returns the number + * processed elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_get(fifo, val) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((val) + 1) __val = (val); \ + unsigned int __ret; \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (0) \ + __val = (typeof(__tmp->ptr))0; \ + if (__recsize) \ + __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ + __recsize); \ + else { \ + __ret = !kfifo_is_empty(__tmp); \ + if (__ret) { \ + *(typeof(__tmp->type))__val = \ + (__is_kfifo_ptr(__tmp) ? \ + ((typeof(__tmp->type))__kfifo->data) : \ + (__tmp->buf) \ + )[__kfifo->out & __tmp->kfifo.mask]; \ + smp_wmb(); \ + __kfifo->out++; \ + } \ + } \ + __ret; \ +}) \ +) + +/** + * kfifo_peek - get data from the fifo without removing + * @fifo: address of the fifo to be used + * @val: the var where to store the data to be added + * + * This reads the data from the fifo without removing it from the fifo. + * It returns 0 if the fifo was empty. Otherwise it returns the number + * processed elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_peek(fifo, val) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((val) + 1) __val = (val); \ + unsigned int __ret; \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (0) \ + __val = (typeof(__tmp->ptr))NULL; \ + if (__recsize) \ + __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ + __recsize); \ + else { \ + __ret = !kfifo_is_empty(__tmp); \ + if (__ret) { \ + *(typeof(__tmp->type))__val = \ + (__is_kfifo_ptr(__tmp) ? \ + ((typeof(__tmp->type))__kfifo->data) : \ + (__tmp->buf) \ + )[__kfifo->out & __tmp->kfifo.mask]; \ + smp_wmb(); \ + } \ + } \ + __ret; \ +}) \ +) + +/** + * kfifo_in - put data into the fifo + * @fifo: address of the fifo to be used + * @buf: the data to be added + * @n: number of elements to be added + * + * This macro copies the given buffer into the fifo and returns the + * number of copied elements. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_in(fifo, buf, n) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((buf) + 1) __buf = (buf); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (0) { \ + typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ + __dummy = (typeof(__buf))NULL; \ + } \ + (__recsize) ?\ + __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ + __kfifo_in(__kfifo, __buf, __n); \ +}) + +/** + * kfifo_in_spinlocked - put data into the fifo using a spinlock for locking + * @fifo: address of the fifo to be used + * @buf: the data to be added + * @n: number of elements to be added + * @lock: pointer to the spinlock to use for locking + * + * This macro copies the given values buffer into the fifo and returns the + * number of copied elements. + */ +#define kfifo_in_spinlocked(fifo, buf, n, lock) \ +({ \ + unsigned long __flags; \ + unsigned int __ret; \ + spin_lock_irqsave(lock, __flags); \ + __ret = kfifo_in(fifo, buf, n); \ + spin_unlock_irqrestore(lock, __flags); \ + __ret; \ +}) + +/* alias for kfifo_in_spinlocked, will be removed in a future release */ +#define kfifo_in_locked(fifo, buf, n, lock) \ + kfifo_in_spinlocked(fifo, buf, n, lock) + +/** + * kfifo_out - get data from the fifo + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * + * This macro get some data from the fifo and return the numbers of elements + * copied. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out(fifo, buf, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((buf) + 1) __buf = (buf); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (0) { \ + typeof(__tmp->ptr) __dummy = NULL; \ + __buf = __dummy; \ + } \ + (__recsize) ?\ + __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ + __kfifo_out(__kfifo, __buf, __n); \ +}) \ +) + +/** + * kfifo_out_spinlocked - get data from the fifo using a spinlock for locking + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * @lock: pointer to the spinlock to use for locking + * + * This macro get the data from the fifo and return the numbers of elements + * copied. + */ +#define kfifo_out_spinlocked(fifo, buf, n, lock) \ +__kfifo_uint_must_check_helper( \ +({ \ + unsigned long __flags; \ + unsigned int __ret; \ + spin_lock_irqsave(lock, __flags); \ + __ret = kfifo_out(fifo, buf, n); \ + spin_unlock_irqrestore(lock, __flags); \ + __ret; \ +}) \ +) + +/* alias for kfifo_out_spinlocked, will be removed in a future release */ +#define kfifo_out_locked(fifo, buf, n, lock) \ + kfifo_out_spinlocked(fifo, buf, n, lock) + +/** + * kfifo_from_user - puts some data from user space into the fifo + * @fifo: address of the fifo to be used + * @from: pointer to the data to be added + * @len: the length of the data to be added + * @copied: pointer to output variable to store the number of copied bytes + * + * This macro copies at most @len bytes from the @from into the + * fifo, depending of the available space and returns -EFAULT/0. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_from_user(fifo, from, len, copied) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + const void __user *__from = (from); \ + unsigned int __len = (len); \ + unsigned int *__copied = (copied); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_from_user_r(__kfifo, __from, __len, __copied, __recsize) : \ + __kfifo_from_user(__kfifo, __from, __len, __copied); \ +}) \ +) + +/** + * kfifo_to_user - copies data from the fifo into user space + * @fifo: address of the fifo to be used + * @to: where the data must be copied + * @len: the size of the destination buffer + * @copied: pointer to output variable to store the number of copied bytes + * + * This macro copies at most @len bytes from the fifo into the + * @to buffer and returns -EFAULT/0. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_to_user(fifo, to, len, copied) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + void __user *__to = (to); \ + unsigned int __len = (len); \ + unsigned int *__copied = (copied); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_to_user_r(__kfifo, __to, __len, __copied, __recsize) : \ + __kfifo_to_user(__kfifo, __to, __len, __copied); \ +}) \ +) + +/** + * kfifo_dma_in_prepare - setup a scatterlist for DMA input + * @fifo: address of the fifo to be used + * @sgl: pointer to the scatterlist array + * @nents: number of entries in the scatterlist array + * @len: number of elements to transfer + * + * This macro fills a scatterlist for DMA input. + * It returns the number entries in the scatterlist array. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct scatterlist *__sgl = (sgl); \ + int __nents = (nents); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ + __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \ +}) + +/** + * kfifo_dma_in_finish - finish a DMA IN operation + * @fifo: address of the fifo to be used + * @len: number of bytes to received + * + * This macro finish a DMA IN operation. The in counter will be updated by + * the len parameter. No error checking will be done. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_in_finish(fifo, len) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __kfifo_dma_in_finish_r(__kfifo, __len, __recsize); \ + else \ + __kfifo->in += __len / sizeof(*__tmp->type); \ +}) + +/** + * kfifo_dma_out_prepare - setup a scatterlist for DMA output + * @fifo: address of the fifo to be used + * @sgl: pointer to the scatterlist array + * @nents: number of entries in the scatterlist array + * @len: number of elements to transfer + * + * This macro fills a scatterlist for DMA output which at most @len bytes + * to transfer. + * It returns the number entries in the scatterlist array. + * A zero means there is no space available and the scatterlist is not filled. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + struct scatterlist *__sgl = (sgl); \ + int __nents = (nents); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ + __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \ +}) + +/** + * kfifo_dma_out_finish - finish a DMA OUT operation + * @fifo: address of the fifo to be used + * @len: number of bytes transferd + * + * This macro finish a DMA OUT operation. The out counter will be updated by + * the len parameter. No error checking will be done. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macros. + */ +#define kfifo_dma_out_finish(fifo, len) \ +(void)({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + unsigned int __len = (len); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (__recsize) \ + __kfifo_dma_out_finish_r(__kfifo, __recsize); \ + else \ + __kfifo->out += __len / sizeof(*__tmp->type); \ +}) + +/** + * kfifo_out_peek - gets some data from the fifo + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * + * This macro get the data from the fifo and return the numbers of elements + * copied. The data is not removed from the fifo. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out_peek(fifo, buf, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((buf) + 1) __buf = (buf); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + if (0) { \ + typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \ + __buf = __dummy; \ + } \ + (__recsize) ? \ + __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ + __kfifo_out_peek(__kfifo, __buf, __n); \ +}) \ +) + +extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, + size_t esize, gfp_t gfp_mask); + +extern void __kfifo_free(struct __kfifo *fifo); + +extern int __kfifo_init(struct __kfifo *fifo, void *buffer, + unsigned int size, size_t esize); + +extern unsigned int __kfifo_in(struct __kfifo *fifo, + const void *buf, unsigned int len); + +extern unsigned int __kfifo_out(struct __kfifo *fifo, + void *buf, unsigned int len); + +extern int __kfifo_from_user(struct __kfifo *fifo, + const void __user *from, unsigned long len, unsigned int *copied); + +extern int __kfifo_to_user(struct __kfifo *fifo, + void __user *to, unsigned long len, unsigned int *copied); + +extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len); + +extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len); + +extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, + void *buf, unsigned int len); + +extern unsigned int __kfifo_in_r(struct __kfifo *fifo, + const void *buf, unsigned int len, size_t recsize); + +extern unsigned int __kfifo_out_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize); + +extern int __kfifo_from_user_r(struct __kfifo *fifo, + const void __user *from, unsigned long len, unsigned int *copied, + size_t recsize); + +extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied, size_t recsize); + +extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + +extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, + unsigned int len, size_t recsize); + +extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + +extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); + +extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); + +extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); + +extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize); + +extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize); + +#endif diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h new file mode 100644 index 00000000..c4d2fc19 --- /dev/null +++ b/include/linux/kgdb.h @@ -0,0 +1,313 @@ +/* + * This provides the callbacks and functions that KGDB needs to share between + * the core, I/O and arch-specific portions. + * + * Author: Amit Kale <amitkale@linsyssoft.com> and + * Tom Rini <trini@kernel.crashing.org> + * + * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc. + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ +#ifndef _KGDB_H_ +#define _KGDB_H_ + +#include <linux/serial_8250.h> +#include <linux/linkage.h> +#include <linux/init.h> +#include <linux/atomic.h> +#ifdef CONFIG_HAVE_ARCH_KGDB +#include <asm/kgdb.h> +#endif + +#ifdef CONFIG_KGDB +struct pt_regs; + +/** + * kgdb_skipexception - (optional) exit kgdb_handle_exception early + * @exception: Exception vector number + * @regs: Current &struct pt_regs. + * + * On some architectures it is required to skip a breakpoint + * exception when it occurs after a breakpoint has been removed. + * This can be implemented in the architecture specific portion of kgdb. + */ +extern int kgdb_skipexception(int exception, struct pt_regs *regs); + +struct tasklet_struct; +struct task_struct; +struct uart_port; + +/** + * kgdb_breakpoint - compiled in breakpoint + * + * This will be implemented as a static inline per architecture. This + * function is called by the kgdb core to execute an architecture + * specific trap to cause kgdb to enter the exception processing. + * + */ +void kgdb_breakpoint(void); + +extern int kgdb_connected; +extern int kgdb_io_module_registered; + +extern atomic_t kgdb_setting_breakpoint; +extern atomic_t kgdb_cpu_doing_single_step; + +extern struct task_struct *kgdb_usethread; +extern struct task_struct *kgdb_contthread; + +enum kgdb_bptype { + BP_BREAKPOINT = 0, + BP_HARDWARE_BREAKPOINT, + BP_WRITE_WATCHPOINT, + BP_READ_WATCHPOINT, + BP_ACCESS_WATCHPOINT, + BP_POKE_BREAKPOINT, +}; + +enum kgdb_bpstate { + BP_UNDEFINED = 0, + BP_REMOVED, + BP_SET, + BP_ACTIVE +}; + +struct kgdb_bkpt { + unsigned long bpt_addr; + unsigned char saved_instr[BREAK_INSTR_SIZE]; + enum kgdb_bptype type; + enum kgdb_bpstate state; +}; + +struct dbg_reg_def_t { + char *name; + int size; + int offset; +}; + +#ifndef DBG_MAX_REG_NUM +#define DBG_MAX_REG_NUM 0 +#else +extern struct dbg_reg_def_t dbg_reg_def[]; +extern char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs); +extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); +#endif +#ifndef KGDB_MAX_BREAKPOINTS +# define KGDB_MAX_BREAKPOINTS 1000 +#endif + +#define KGDB_HW_BREAKPOINT 1 + +/* + * Functions each KGDB-supporting architecture must provide: + */ + +/** + * kgdb_arch_init - Perform any architecture specific initalization. + * + * This function will handle the initalization of any architecture + * specific callbacks. + */ +extern int kgdb_arch_init(void); + +/** + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +extern void kgdb_arch_exit(void); + +/** + * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs + * @gdb_regs: A pointer to hold the registers in the order GDB wants. + * @regs: The &struct pt_regs of the current process. + * + * Convert the pt_regs in @regs into the format for registers that + * GDB expects, stored in @gdb_regs. + */ +extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs); + +/** + * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs + * @gdb_regs: A pointer to hold the registers in the order GDB wants. + * @p: The &struct task_struct of the desired process. + * + * Convert the register values of the sleeping process in @p to + * the format that GDB expects. + * This function is called when kgdb does not have access to the + * &struct pt_regs and therefore it should fill the gdb registers + * @gdb_regs with what has been saved in &struct thread_struct + * thread field during switch_to. + */ +extern void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p); + +/** + * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs. + * @gdb_regs: A pointer to hold the registers we've received from GDB. + * @regs: A pointer to a &struct pt_regs to hold these values in. + * + * Convert the GDB regs in @gdb_regs into the pt_regs, and store them + * in @regs. + */ +extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs); + +/** + * kgdb_arch_handle_exception - Handle architecture specific GDB packets. + * @vector: The error vector of the exception that happened. + * @signo: The signal number of the exception that happened. + * @err_code: The error code of the exception that happened. + * @remcom_in_buffer: The buffer of the packet we have read. + * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. + * @regs: The &struct pt_regs of the current process. + * + * This function MUST handle the 'c' and 's' command packets, + * as well packets to set / remove a hardware breakpoint, if used. + * If there are additional packets which the hardware needs to handle, + * they are handled here. The code should return -1 if it wants to + * process more packets, and a %0 or %1 if it wants to exit from the + * kgdb callback. + */ +extern int +kgdb_arch_handle_exception(int vector, int signo, int err_code, + char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *regs); + +/** + * kgdb_roundup_cpus - Get other CPUs into a holding pattern + * @flags: Current IRQ state + * + * On SMP systems, we need to get the attention of the other CPUs + * and get them into a known state. This should do what is needed + * to get the other CPUs to call kgdb_wait(). Note that on some arches, + * the NMI approach is not used for rounding up all the CPUs. For example, + * in case of MIPS, smp_call_function() is used to roundup CPUs. In + * this case, we have to make sure that interrupts are enabled before + * calling smp_call_function(). The argument to this function is + * the flags that will be used when restoring the interrupts. There is + * local_irq_save() call before kgdb_roundup_cpus(). + * + * On non-SMP systems, this is not called. + */ +extern void kgdb_roundup_cpus(unsigned long flags); + +/** + * kgdb_arch_set_pc - Generic call back to the program counter + * @regs: Current &struct pt_regs. + * @pc: The new value for the program counter + * + * This function handles updating the program counter and requires an + * architecture specific implementation. + */ +extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc); + + +/* Optional functions. */ +extern int kgdb_validate_break_address(unsigned long addr); +extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt); +extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt); + +/** + * kgdb_arch_late - Perform any architecture specific initalization. + * + * This function will handle the late initalization of any + * architecture specific callbacks. This is an optional function for + * handling things like late initialization of hw breakpoints. The + * default implementation does nothing. + */ +extern void kgdb_arch_late(void); + + +/** + * struct kgdb_arch - Describe architecture specific values. + * @gdb_bpt_instr: The instruction to trigger a breakpoint. + * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT. + * @set_breakpoint: Allow an architecture to specify how to set a software + * breakpoint. + * @remove_breakpoint: Allow an architecture to specify how to remove a + * software breakpoint. + * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware + * breakpoint. + * @remove_hw_breakpoint: Allow an architecture to specify how to remove a + * hardware breakpoint. + * @disable_hw_break: Allow an architecture to specify how to disable + * hardware breakpoints for a single cpu. + * @remove_all_hw_break: Allow an architecture to specify how to remove all + * hardware breakpoints. + * @correct_hw_break: Allow an architecture to specify how to correct the + * hardware debug registers. + */ +struct kgdb_arch { + unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE]; + unsigned long flags; + + int (*set_breakpoint)(unsigned long, char *); + int (*remove_breakpoint)(unsigned long, char *); + int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); + int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); + void (*disable_hw_break)(struct pt_regs *regs); + void (*remove_all_hw_break)(void); + void (*correct_hw_break)(void); +}; + +/** + * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. + * @name: Name of the I/O driver. + * @read_char: Pointer to a function that will return one char. + * @write_char: Pointer to a function that will write one char. + * @flush: Pointer to a function that will flush any pending writes. + * @init: Pointer to a function that will initialize the device. + * @pre_exception: Pointer to a function that will do any prep work for + * the I/O driver. + * @post_exception: Pointer to a function that will do any cleanup work + * for the I/O driver. + * @is_console: 1 if the end device is a console 0 if the I/O device is + * not a console + */ +struct kgdb_io { + const char *name; + int (*read_char) (void); + void (*write_char) (u8); + void (*flush) (void); + int (*init) (void); + void (*pre_exception) (void); + void (*post_exception) (void); + int is_console; +}; + +extern struct kgdb_arch arch_kgdb_ops; + +extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); + +extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); +extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); +extern struct kgdb_io *dbg_io_ops; + +extern int kgdb_hex2long(char **ptr, unsigned long *long_val); +extern char *kgdb_mem2hex(char *mem, char *buf, int count); +extern int kgdb_hex2mem(char *buf, char *mem, int count); + +extern int kgdb_isremovedbreak(unsigned long addr); +extern void kgdb_schedule_breakpoint(void); + +extern int +kgdb_handle_exception(int ex_vector, int signo, int err_code, + struct pt_regs *regs); +extern int kgdb_nmicallback(int cpu, void *regs); +extern void gdbstub_exit(int status); + +extern int kgdb_single_step; +extern atomic_t kgdb_active; +#define in_dbg_master() \ + (raw_smp_processor_id() == atomic_read(&kgdb_active)) +extern bool dbg_is_early; +extern void __init dbg_late_init(void); +#else /* ! CONFIG_KGDB */ +#define in_dbg_master() (0) +#define dbg_late_init() +#endif /* ! CONFIG_KGDB */ +#endif /* _KGDB_H_ */ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h new file mode 100644 index 00000000..6b394f0b --- /dev/null +++ b/include/linux/khugepaged.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_KHUGEPAGED_H +#define _LINUX_KHUGEPAGED_H + +#include <linux/sched.h> /* MMF_VM_HUGEPAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int __khugepaged_enter(struct mm_struct *mm); +extern void __khugepaged_exit(struct mm_struct *mm); +extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); + +#define khugepaged_enabled() \ + (transparent_hugepage_flags & \ + ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ + (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) +#define khugepaged_always() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_FLAG)) +#define khugepaged_req_madv() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) +#define khugepaged_defrag() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) + +static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) + return __khugepaged_enter(mm); + return 0; +} + +static inline void khugepaged_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) + __khugepaged_exit(mm); +} + +static inline int khugepaged_enter(struct vm_area_struct *vma) +{ + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) + if ((khugepaged_always() || + (khugepaged_req_madv() && + vma->vm_flags & VM_HUGEPAGE)) && + !(vma->vm_flags & VM_NOHUGEPAGE)) + if (__khugepaged_enter(vma->vm_mm)) + return -ENOMEM; + return 0; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} +static inline void khugepaged_exit(struct mm_struct *mm) +{ +} +static inline int khugepaged_enter(struct vm_area_struct *vma) +{ + return 0; +} +static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) +{ + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/klist.h b/include/linux/klist.h new file mode 100644 index 00000000..a370ce57 --- /dev/null +++ b/include/linux/klist.h @@ -0,0 +1,68 @@ +/* + * klist.h - Some generic list helpers, extending struct list_head a bit. + * + * Implementations are found in lib/klist.c + * + * + * Copyright (C) 2005 Patrick Mochel + * + * This file is rleased under the GPL v2. + */ + +#ifndef _LINUX_KLIST_H +#define _LINUX_KLIST_H + +#include <linux/spinlock.h> +#include <linux/kref.h> +#include <linux/list.h> + +struct klist_node; +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +} __attribute__ ((aligned (sizeof(void *)))); + +#define KLIST_INIT(_name, _get, _put) \ + { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ + .k_list = LIST_HEAD_INIT(_name.k_list), \ + .get = _get, \ + .put = _put, } + +#define DEFINE_KLIST(_name, _get, _put) \ + struct klist _name = KLIST_INIT(_name, _get, _put) + +extern void klist_init(struct klist *k, void (*get)(struct klist_node *), + void (*put)(struct klist_node *)); + +struct klist_node { + void *n_klist; /* never access directly */ + struct list_head n_node; + struct kref n_ref; +}; + +extern void klist_add_tail(struct klist_node *n, struct klist *k); +extern void klist_add_head(struct klist_node *n, struct klist *k); +extern void klist_add_after(struct klist_node *n, struct klist_node *pos); +extern void klist_add_before(struct klist_node *n, struct klist_node *pos); + +extern void klist_del(struct klist_node *n); +extern void klist_remove(struct klist_node *n); + +extern int klist_node_attached(struct klist_node *n); + + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + + +extern void klist_iter_init(struct klist *k, struct klist_iter *i); +extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n); +extern void klist_iter_exit(struct klist_iter *i); +extern struct klist_node *klist_next(struct klist_iter *i); + +#endif diff --git a/include/linux/kmalloc_sizes.h b/include/linux/kmalloc_sizes.h new file mode 100644 index 00000000..e576b848 --- /dev/null +++ b/include/linux/kmalloc_sizes.h @@ -0,0 +1,45 @@ +#if (PAGE_SIZE == 4096) + CACHE(32) +#endif + CACHE(64) +#if L1_CACHE_BYTES < 64 + CACHE(96) +#endif + CACHE(128) +#if L1_CACHE_BYTES < 128 + CACHE(192) +#endif + CACHE(256) + CACHE(512) + CACHE(1024) + CACHE(2048) + CACHE(4096) + CACHE(8192) + CACHE(16384) + CACHE(32768) + CACHE(65536) + CACHE(131072) +#if KMALLOC_MAX_SIZE >= 262144 + CACHE(262144) +#endif +#if KMALLOC_MAX_SIZE >= 524288 + CACHE(524288) +#endif +#if KMALLOC_MAX_SIZE >= 1048576 + CACHE(1048576) +#endif +#if KMALLOC_MAX_SIZE >= 2097152 + CACHE(2097152) +#endif +#if KMALLOC_MAX_SIZE >= 4194304 + CACHE(4194304) +#endif +#if KMALLOC_MAX_SIZE >= 8388608 + CACHE(8388608) +#endif +#if KMALLOC_MAX_SIZE >= 16777216 + CACHE(16777216) +#endif +#if KMALLOC_MAX_SIZE >= 33554432 + CACHE(33554432) +#endif diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h new file mode 100644 index 00000000..39f84532 --- /dev/null +++ b/include/linux/kmemcheck.h @@ -0,0 +1,171 @@ +#ifndef LINUX_KMEMCHECK_H +#define LINUX_KMEMCHECK_H + +#include <linux/mm_types.h> +#include <linux/types.h> + +#ifdef CONFIG_KMEMCHECK +extern int kmemcheck_enabled; + +/* The slab-related functions. */ +void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); +void kmemcheck_free_shadow(struct page *page, int order); +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size); +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); + +void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, + gfp_t gfpflags); + +void kmemcheck_show_pages(struct page *p, unsigned int n); +void kmemcheck_hide_pages(struct page *p, unsigned int n); + +bool kmemcheck_page_is_tracked(struct page *p); + +void kmemcheck_mark_unallocated(void *address, unsigned int n); +void kmemcheck_mark_uninitialized(void *address, unsigned int n); +void kmemcheck_mark_initialized(void *address, unsigned int n); +void kmemcheck_mark_freed(void *address, unsigned int n); + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); +void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); + +int kmemcheck_show_addr(unsigned long address); +int kmemcheck_hide_addr(unsigned long address); + +bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); + +/* + * Bitfield annotations + * + * How to use: If you have a struct using bitfields, for example + * + * struct a { + * int x:8, y:8; + * }; + * + * then this should be rewritten as + * + * struct a { + * kmemcheck_bitfield_begin(flags); + * int x:8, y:8; + * kmemcheck_bitfield_end(flags); + * }; + * + * Now the "flags_begin" and "flags_end" members may be used to refer to the + * beginning and end, respectively, of the bitfield (and things like + * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- + * fields should be annotated: + * + * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); + * kmemcheck_annotate_bitfield(a, flags); + */ +#define kmemcheck_bitfield_begin(name) \ + int name##_begin[0]; + +#define kmemcheck_bitfield_end(name) \ + int name##_end[0]; + +#define kmemcheck_annotate_bitfield(ptr, name) \ + do { \ + int _n; \ + \ + if (!ptr) \ + break; \ + \ + _n = (long) &((ptr)->name##_end) \ + - (long) &((ptr)->name##_begin); \ + BUILD_BUG_ON(_n < 0); \ + \ + kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ + } while (0) + +#define kmemcheck_annotate_variable(var) \ + do { \ + kmemcheck_mark_initialized(&(var), sizeof(var)); \ + } while (0) \ + +#else +#define kmemcheck_enabled 0 + +static inline void +kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) +{ +} + +static inline void +kmemcheck_free_shadow(struct page *page, int order) +{ +} + +static inline void +kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ +} + +static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, + size_t size) +{ +} + +static inline void kmemcheck_pagealloc_alloc(struct page *p, + unsigned int order, gfp_t gfpflags) +{ +} + +static inline bool kmemcheck_page_is_tracked(struct page *p) +{ + return false; +} + +static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_freed(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_unallocated_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) +{ + return true; +} + +#define kmemcheck_bitfield_begin(name) +#define kmemcheck_bitfield_end(name) +#define kmemcheck_annotate_bitfield(ptr, name) \ + do { \ + } while (0) + +#define kmemcheck_annotate_variable(var) \ + do { \ + } while (0) + +#endif /* CONFIG_KMEMCHECK */ + +#endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 00000000..2a5e5548 --- /dev/null +++ b/include/linux/kmemleak.h @@ -0,0 +1,106 @@ +/* + * include/linux/kmemleak.h + * + * Copyright (C) 2008 ARM Limited + * Written by Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __KMEMLEAK_H +#define __KMEMLEAK_H + +#ifdef CONFIG_DEBUG_KMEMLEAK + +extern void kmemleak_init(void) __ref; +extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) __ref; +extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; +extern void kmemleak_free(const void *ptr) __ref; +extern void kmemleak_free_part(const void *ptr, size_t size) __ref; +extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; +extern void kmemleak_padding(const void *ptr, unsigned long offset, + size_t size) __ref; +extern void kmemleak_not_leak(const void *ptr) __ref; +extern void kmemleak_ignore(const void *ptr) __ref; +extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; +extern void kmemleak_no_scan(const void *ptr) __ref; + +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, unsigned long flags, + gfp_t gfp) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_alloc(ptr, size, min_count, gfp); +} + +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_free(ptr); +} + +static inline void kmemleak_erase(void **ptr) +{ + *ptr = NULL; +} + +#else + +static inline void kmemleak_init(void) +{ +} +static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) +{ +} +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, unsigned long flags, + gfp_t gfp) +{ +} +static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) +{ +} +static inline void kmemleak_free(const void *ptr) +{ +} +static inline void kmemleak_free_part(const void *ptr, size_t size) +{ +} +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) +{ +} +static inline void kmemleak_free_percpu(const void __percpu *ptr) +{ +} +static inline void kmemleak_not_leak(const void *ptr) +{ +} +static inline void kmemleak_ignore(const void *ptr) +{ +} +static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) +{ +} +static inline void kmemleak_erase(void **ptr) +{ +} +static inline void kmemleak_no_scan(const void *ptr) +{ +} + +#endif /* CONFIG_DEBUG_KMEMLEAK */ + +#endif /* __KMEMLEAK_H */ diff --git a/include/linux/kmod.h b/include/linux/kmod.h new file mode 100644 index 00000000..dd99c329 --- /dev/null +++ b/include/linux/kmod.h @@ -0,0 +1,138 @@ +#ifndef __LINUX_KMOD_H__ +#define __LINUX_KMOD_H__ + +/* + * include/linux/kmod.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/gfp.h> +#include <linux/stddef.h> +#include <linux/errno.h> +#include <linux/compiler.h> +#include <linux/workqueue.h> +#include <linux/sysctl.h> + +#define KMOD_PATH_LEN 256 + +#ifdef CONFIG_MODULES +extern char modprobe_path[]; /* for sysctl */ +/* modprobe exit status on success, -ve on error. Return value + * usually useless though. */ +extern __printf(2, 3) +int __request_module(bool wait, const char *name, ...); +#define request_module(mod...) __request_module(true, mod) +#define request_module_nowait(mod...) __request_module(false, mod) +#define try_then_request_module(x, mod...) \ + ((x) ?: (__request_module(true, mod), (x))) +#else +static inline int request_module(const char *name, ...) { return -ENOSYS; } +static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } +#define try_then_request_module(x, mod...) (x) +#endif + + +struct cred; +struct file; + +#define UMH_NO_WAIT 0 /* don't wait at all */ +#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */ +#define UMH_WAIT_PROC 2 /* wait for the process to complete */ +#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */ + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *info, struct cred *new); + void (*cleanup)(struct subprocess_info *info); + void *data; +}; + +/* Allocate a subprocess_info structure */ +struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, + char **envp, gfp_t gfp_mask); + +/* Set various pieces of state into the subprocess_info structure */ +void call_usermodehelper_setfns(struct subprocess_info *info, + int (*init)(struct subprocess_info *info, struct cred *new), + void (*cleanup)(struct subprocess_info *info), + void *data); + +/* Actually execute the sub-process */ +int call_usermodehelper_exec(struct subprocess_info *info, int wait); + +/* Free the subprocess_info. This is only needed if you're not going + to call call_usermodehelper_exec */ +void call_usermodehelper_freeinfo(struct subprocess_info *info); + +static inline int +call_usermodehelper_fns(char *path, char **argv, char **envp, int wait, + int (*init)(struct subprocess_info *info, struct cred *new), + void (*cleanup)(struct subprocess_info *), void *data) +{ + struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; + + info = call_usermodehelper_setup(path, argv, envp, gfp_mask); + + if (info == NULL) + return -ENOMEM; + + call_usermodehelper_setfns(info, init, cleanup, data); + + return call_usermodehelper_exec(info, wait); +} + +static inline int +call_usermodehelper(char *path, char **argv, char **envp, int wait) +{ + return call_usermodehelper_fns(path, argv, envp, wait, + NULL, NULL, NULL); +} + +extern struct ctl_table usermodehelper_table[]; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING, + UMH_DISABLED, +}; + +extern void usermodehelper_init(void); + +extern int __usermodehelper_disable(enum umh_disable_depth depth); +extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); + +static inline int usermodehelper_disable(void) +{ + return __usermodehelper_disable(UMH_DISABLED); +} + +static inline void usermodehelper_enable(void) +{ + __usermodehelper_set_disable_depth(UMH_ENABLED); +} + +extern int usermodehelper_read_trylock(void); +extern long usermodehelper_read_lock_wait(long timeout); +extern void usermodehelper_read_unlock(void); + +#endif /* __LINUX_KMOD_H__ */ diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h new file mode 100644 index 00000000..35f7237e --- /dev/null +++ b/include/linux/kmsg_dump.h @@ -0,0 +1,70 @@ +/* + * linux/include/kmsg_dump.h + * + * Copyright (C) 2009 Net Insight AB + * + * Author: Simon Kagstrom <simon.kagstrom@netinsight.net> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#ifndef _LINUX_KMSG_DUMP_H +#define _LINUX_KMSG_DUMP_H + +#include <linux/errno.h> +#include <linux/list.h> + +/* + * Keep this list arranged in rough order of priority. Anything listed after + * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump + * is passed to the kernel. + */ +enum kmsg_dump_reason { + KMSG_DUMP_PANIC, + KMSG_DUMP_OOPS, + KMSG_DUMP_EMERG, + KMSG_DUMP_RESTART, + KMSG_DUMP_HALT, + KMSG_DUMP_POWEROFF, +}; + +/** + * struct kmsg_dumper - kernel crash message dumper structure + * @dump: The callback which gets called on crashes. The buffer is passed + * as two sections, where s1 (length l1) contains the older + * messages and s2 (length l2) contains the newer. + * @list: Entry in the dumper list (private) + * @registered: Flag that specifies if this is already registered + */ +struct kmsg_dumper { + void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, + const char *s1, unsigned long l1, + const char *s2, unsigned long l2); + struct list_head list; + int registered; +}; + +#ifdef CONFIG_PRINTK +void kmsg_dump(enum kmsg_dump_reason reason); + +int kmsg_dump_register(struct kmsg_dumper *dumper); + +int kmsg_dump_unregister(struct kmsg_dumper *dumper); +#else +static inline void kmsg_dump(enum kmsg_dump_reason reason) +{ +} + +static inline int kmsg_dump_register(struct kmsg_dumper *dumper) +{ + return -EINVAL; +} + +static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) +{ + return -EINVAL; +} +#endif + +#endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h new file mode 100644 index 00000000..18ca75ff --- /dev/null +++ b/include/linux/kobj_map.h @@ -0,0 +1,19 @@ +/* + * kobj_map.h + */ + +#ifndef _KOBJ_MAP_H_ +#define _KOBJ_MAP_H_ + +#include <linux/mutex.h> + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); +struct kobj_map; + +int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *, + kobj_probe_t *, int (*)(dev_t, void *), void *); +void kobj_unmap(struct kobj_map *, dev_t, unsigned long); +struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *); +struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *); + +#endif /* _KOBJ_MAP_H_ */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h new file mode 100644 index 00000000..fc615a97 --- /dev/null +++ b/include/linux/kobject.h @@ -0,0 +1,234 @@ +/* + * kobject.h - generic kernel object infrastructure. + * + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (c) 2006-2008 Novell Inc. + * + * This file is released under the GPLv2. + * + * Please read Documentation/kobject.txt before using the kobject + * interface, ESPECIALLY the parts about reference counts and object + * destructors. + */ + +#ifndef _KOBJECT_H_ +#define _KOBJECT_H_ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/sysfs.h> +#include <linux/compiler.h> +#include <linux/spinlock.h> +#include <linux/kref.h> +#include <linux/kobject_ns.h> +#include <linux/kernel.h> +#include <linux/wait.h> +#include <linux/atomic.h> + +#define UEVENT_HELPER_PATH_LEN 256 +#define UEVENT_NUM_ENVP 32 /* number of env pointers */ +#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ + +/* path to the userspace helper executed on an event */ +extern char uevent_helper[]; + +/* counter to tag the uevent, read only except for the kobject core */ +extern u64 uevent_seqnum; + +/* + * The actions here must match the index to the string array + * in lib/kobject_uevent.c + * + * Do not add new actions here without checking with the driver-core + * maintainers. Action strings are not meant to express subsystem + * or device specific properties. In most cases you want to send a + * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event + * specific variables added to the event environment. + */ +enum kobject_action { + KOBJ_ADD, + KOBJ_REMOVE, + KOBJ_CHANGE, + KOBJ_MOVE, + KOBJ_ONLINE, + KOBJ_OFFLINE, + KOBJ_MAX +}; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct sysfs_dirent *sd; + struct kref kref; + unsigned int state_initialized:1; + unsigned int state_in_sysfs:1; + unsigned int state_add_uevent_sent:1; + unsigned int state_remove_uevent_sent:1; + unsigned int uevent_suppress:1; +}; + +extern __printf(2, 3) +int kobject_set_name(struct kobject *kobj, const char *name, ...); +extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs); + +static inline const char *kobject_name(const struct kobject *kobj) +{ + return kobj->name; +} + +extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); +extern __printf(3, 4) __must_check +int kobject_add(struct kobject *kobj, struct kobject *parent, + const char *fmt, ...); +extern __printf(4, 5) __must_check +int kobject_init_and_add(struct kobject *kobj, + struct kobj_type *ktype, struct kobject *parent, + const char *fmt, ...); + +extern void kobject_del(struct kobject *kobj); + +extern struct kobject * __must_check kobject_create(void); +extern struct kobject * __must_check kobject_create_and_add(const char *name, + struct kobject *parent); + +extern int __must_check kobject_rename(struct kobject *, const char *new_name); +extern int __must_check kobject_move(struct kobject *, struct kobject *); + +extern struct kobject *kobject_get(struct kobject *kobj); +extern void kobject_put(struct kobject *kobj); + +extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); + +struct kobj_type { + void (*release)(struct kobject *kobj); + const struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; + const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); + const void *(*namespace)(struct kobject *kobj); +}; + +struct kobj_uevent_env { + char *envp[UEVENT_NUM_ENVP]; + int envp_idx; + char buf[UEVENT_BUFFER_SIZE]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(struct kset *kset, struct kobject *kobj); + const char *(* const name)(struct kset *kset, struct kobject *kobj); + int (* const uevent)(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env); +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); +}; + +extern const struct sysfs_ops kobj_sysfs_ops; + +struct sock; + +/** + * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. + * + * A kset defines a group of kobjects. They can be individually + * different "types" but overall these kobjects all want to be grouped + * together and operated on in the same manner. ksets are used to + * define the attribute callbacks and other common events that happen to + * a kobject. + * + * @list: the list of all kobjects for this kset + * @list_lock: a lock for iterating over the kobjects + * @kobj: the embedded kobject for this kset (recursion, isn't it fun...) + * @uevent_ops: the set of uevent operations for this kset. These are + * called whenever a kobject has something happen to it so that the kset + * can add new environment variables, or filter out the uevents if so + * desired. + */ +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +extern void kset_init(struct kset *kset); +extern int __must_check kset_register(struct kset *kset); +extern void kset_unregister(struct kset *kset); +extern struct kset * __must_check kset_create_and_add(const char *name, + const struct kset_uevent_ops *u, + struct kobject *parent_kobj); + +static inline struct kset *to_kset(struct kobject *kobj) +{ + return kobj ? container_of(kobj, struct kset, kobj) : NULL; +} + +static inline struct kset *kset_get(struct kset *k) +{ + return k ? to_kset(kobject_get(&k->kobj)) : NULL; +} + +static inline void kset_put(struct kset *k) +{ + kobject_put(&k->kobj); +} + +static inline struct kobj_type *get_ktype(struct kobject *kobj) +{ + return kobj->ktype; +} + +extern struct kobject *kset_find_obj(struct kset *, const char *); + +/* The global /sys/kernel/ kobject for people to chain off of */ +extern struct kobject *kernel_kobj; +/* The global /sys/kernel/mm/ kobject for people to chain off of */ +extern struct kobject *mm_kobj; +/* The global /sys/hypervisor/ kobject for people to chain off of */ +extern struct kobject *hypervisor_kobj; +/* The global /sys/power/ kobject for people to chain off of */ +extern struct kobject *power_kobj; +/* The global /sys/firmware/ kobject for people to chain off of */ +extern struct kobject *firmware_kobj; + +#if defined(CONFIG_HOTPLUG) +int kobject_uevent(struct kobject *kobj, enum kobject_action action); +int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, + char *envp[]); + +__printf(2, 3) +int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); + +int kobject_action_type(const char *buf, size_t count, + enum kobject_action *type); +#else +static inline int kobject_uevent(struct kobject *kobj, + enum kobject_action action) +{ return 0; } +static inline int kobject_uevent_env(struct kobject *kobj, + enum kobject_action action, + char *envp[]) +{ return 0; } + +static inline __printf(2, 3) +int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) +{ return 0; } + +static inline int kobject_action_type(const char *buf, size_t count, + enum kobject_action *type) +{ return -EINVAL; } +#endif + +#endif /* _KOBJECT_H_ */ diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h new file mode 100644 index 00000000..f66b065a --- /dev/null +++ b/include/linux/kobject_ns.h @@ -0,0 +1,58 @@ +/* Kernel object name space definitions + * + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (c) 2006-2008 Novell Inc. + * + * Split from kobject.h by David Howells (dhowells@redhat.com) + * + * This file is released under the GPLv2. + * + * Please read Documentation/kobject.txt before using the kobject + * interface, ESPECIALLY the parts about reference counts and object + * destructors. + */ + +#ifndef _LINUX_KOBJECT_NS_H +#define _LINUX_KOBJECT_NS_H + +struct sock; +struct kobject; + +/* + * Namespace types which are used to tag kobjects and sysfs entries. + * Network namespace will likely be the first. + */ +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET, + KOBJ_NS_TYPES +}; + +/* + * Callbacks so sysfs can determine namespaces + * @grab_current_ns: return a new reference to calling task's namespace + * @netlink_ns: return namespace to which a sock belongs (right?) + * @initial_ns: return the initial namespace (i.e. init_net_ns) + * @drop_ns: drops a reference to namespace + */ +struct kobj_ns_type_operations { + enum kobj_ns_type type; + void *(*grab_current_ns)(void); + const void *(*netlink_ns)(struct sock *sk); + const void *(*initial_ns)(void); + void (*drop_ns)(void *); +}; + +int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); +int kobj_ns_type_registered(enum kobj_ns_type type); +const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); +const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); + +void *kobj_ns_grab_current(enum kobj_ns_type type); +const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); +const void *kobj_ns_initial(enum kobj_ns_type type); +void kobj_ns_drop(enum kobj_ns_type type, void *ns); + +#endif /* _LINUX_KOBJECT_NS_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h new file mode 100644 index 00000000..b6e1f8c0 --- /dev/null +++ b/include/linux/kprobes.h @@ -0,0 +1,440 @@ +#ifndef _LINUX_KPROBES_H +#define _LINUX_KPROBES_H +/* + * Kernel Probes (KProbes) + * include/linux/kprobes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * + * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel + * Probes initial implementation ( includes suggestions from + * Rusty Russell). + * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes + * interface to access function arguments. + * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston + * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi + * <prasanna@in.ibm.com> added function-return probes. + */ +#include <linux/linkage.h> +#include <linux/list.h> +#include <linux/notifier.h> +#include <linux/smp.h> +#include <linux/bug.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> +#include <linux/rcupdate.h> +#include <linux/mutex.h> + +#ifdef CONFIG_KPROBES +#include <asm/kprobes.h> + +/* kprobe_status settings */ +#define KPROBE_HIT_ACTIVE 0x00000001 +#define KPROBE_HIT_SS 0x00000002 +#define KPROBE_REENTER 0x00000004 +#define KPROBE_HIT_SSDONE 0x00000008 + +/* Attach to insert probes on any functions which should be ignored*/ +#define __kprobes __attribute__((__section__(".kprobes.text"))) +#else /* CONFIG_KPROBES */ +typedef int kprobe_opcode_t; +struct arch_specific_insn { + int dummy; +}; +#define __kprobes +#endif /* CONFIG_KPROBES */ + +struct kprobe; +struct pt_regs; +struct kretprobe; +struct kretprobe_instance; +typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); +typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); +typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, + unsigned long flags); +typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, + int trapnr); +typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, + struct pt_regs *); + +struct kprobe { + struct hlist_node hlist; + + /* list of kprobes for multi-handler support */ + struct list_head list; + + /*count the number of times this probe was temporarily disarmed */ + unsigned long nmissed; + + /* location of the probe point */ + kprobe_opcode_t *addr; + + /* Allow user to indicate symbol name of the probe point */ + const char *symbol_name; + + /* Offset into the symbol */ + unsigned int offset; + + /* Called before addr is executed. */ + kprobe_pre_handler_t pre_handler; + + /* Called after addr is executed, unless... */ + kprobe_post_handler_t post_handler; + + /* + * ... called if executing addr causes a fault (eg. page fault). + * Return 1 if it handled fault, otherwise kernel will see it. + */ + kprobe_fault_handler_t fault_handler; + + /* + * ... called if breakpoint trap occurs in probe handler. + * Return 1 if it handled break, otherwise kernel will see it. + */ + kprobe_break_handler_t break_handler; + + /* Saved opcode (which has been replaced with breakpoint) */ + kprobe_opcode_t opcode; + + /* copy of the original instruction */ + struct arch_specific_insn ainsn; + + /* + * Indicates various status flags. + * Protected by kprobe_mutex after this kprobe is registered. + */ + u32 flags; +}; + +/* Kprobe status flags */ +#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ +#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ +#define KPROBE_FLAG_OPTIMIZED 4 /* + * probe is really optimized. + * NOTE: + * this flag is only for optimized_kprobe. + */ + +/* Has this kprobe gone ? */ +static inline int kprobe_gone(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_GONE; +} + +/* Is this kprobe disabled ? */ +static inline int kprobe_disabled(struct kprobe *p) +{ + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); +} + +/* Is this kprobe really running optimized path ? */ +static inline int kprobe_optimized(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_OPTIMIZED; +} +/* + * Special probe type that uses setjmp-longjmp type tricks to resume + * execution at a specified entry with a matching prototype corresponding + * to the probed function - a trick to enable arguments to become + * accessible seamlessly by probe handling logic. + * Note: + * Because of the way compilers allocate stack space for local variables + * etc upfront, regardless of sub-scopes within a function, this mirroring + * principle currently works only for probes placed on function entry points. + */ +struct jprobe { + struct kprobe kp; + void *entry; /* probe handling code to jump to */ +}; + +/* For backward compatibility with old code using JPROBE_ENTRY() */ +#define JPROBE_ENTRY(handler) (handler) + +/* + * Function-return probe - + * Note: + * User needs to provide a handler function, and initialize maxactive. + * maxactive - The maximum number of instances of the probed function that + * can be active concurrently. + * nmissed - tracks the number of times the probed function's return was + * ignored, due to maxactive being too low. + * + */ +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct hlist_head free_instances; + raw_spinlock_t lock; +}; + +struct kretprobe_instance { + struct hlist_node hlist; + struct kretprobe *rp; + kprobe_opcode_t *ret_addr; + struct task_struct *task; + char data[0]; +}; + +struct kretprobe_blackpoint { + const char *name; + void *addr; +}; + +struct kprobe_blackpoint { + const char *name; + unsigned long start_addr; + unsigned long range; +}; + +#ifdef CONFIG_KPROBES +DECLARE_PER_CPU(struct kprobe *, current_kprobe); +DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +/* + * For #ifdef avoidance: + */ +static inline int kprobes_built_in(void) +{ + return 1; +} + +#ifdef CONFIG_KRETPROBES +extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs); +extern int arch_trampoline_kprobe(struct kprobe *p); +#else /* CONFIG_KRETPROBES */ +static inline void arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) +{ +} +static inline int arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} +#endif /* CONFIG_KRETPROBES */ + +extern struct kretprobe_blackpoint kretprobe_blacklist[]; + +static inline void kretprobe_assert(struct kretprobe_instance *ri, + unsigned long orig_ret_address, unsigned long trampoline_address) +{ + if (!orig_ret_address || (orig_ret_address == trampoline_address)) { + printk("kretprobe BUG!: Processing kretprobe %p @ %p\n", + ri->rp, ri->rp->kp.addr); + BUG(); + } +} + +#ifdef CONFIG_KPROBES_SANITY_TEST +extern int init_test_probes(void); +#else +static inline int init_test_probes(void) +{ + return 0; +} +#endif /* CONFIG_KPROBES_SANITY_TEST */ + +extern int arch_prepare_kprobe(struct kprobe *p); +extern void arch_arm_kprobe(struct kprobe *p); +extern void arch_disarm_kprobe(struct kprobe *p); +extern int arch_init_kprobes(void); +extern void show_registers(struct pt_regs *regs); +extern kprobe_opcode_t *get_insn_slot(void); +extern void free_insn_slot(kprobe_opcode_t *slot, int dirty); +extern void kprobes_inc_nmissed_count(struct kprobe *p); + +#ifdef CONFIG_OPTPROBES +/* + * Internal structure for direct jump optimized probe + */ +struct optimized_kprobe { + struct kprobe kp; + struct list_head list; /* list for optimizing queue */ + struct arch_optimized_insn optinsn; +}; + +/* Architecture dependent functions for direct jump optimization */ +extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn); +extern int arch_check_optimized_kprobe(struct optimized_kprobe *op); +extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op); +extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); +extern void arch_optimize_kprobes(struct list_head *oplist); +extern void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list); +extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); +extern kprobe_opcode_t *get_optinsn_slot(void); +extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty); +extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, + unsigned long addr); + +extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); + +#ifdef CONFIG_SYSCTL +extern int sysctl_kprobes_optimization; +extern int proc_kprobes_optimization_handler(struct ctl_table *table, + int write, void __user *buffer, + size_t *length, loff_t *ppos); +#endif + +#endif /* CONFIG_OPTPROBES */ + +/* Get the kprobe at this addr (if any) - called with preemption disabled */ +struct kprobe *get_kprobe(void *addr); +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags); +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); +struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); + +/* kprobe_running() will just return the current_kprobe on this CPU */ +static inline struct kprobe *kprobe_running(void) +{ + return (__this_cpu_read(current_kprobe)); +} + +static inline void reset_current_kprobe(void) +{ + __this_cpu_write(current_kprobe, NULL); +} + +static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) +{ + return (&__get_cpu_var(kprobe_ctlblk)); +} + +int register_kprobe(struct kprobe *p); +void unregister_kprobe(struct kprobe *p); +int register_kprobes(struct kprobe **kps, int num); +void unregister_kprobes(struct kprobe **kps, int num); +int setjmp_pre_handler(struct kprobe *, struct pt_regs *); +int longjmp_break_handler(struct kprobe *, struct pt_regs *); +int register_jprobe(struct jprobe *p); +void unregister_jprobe(struct jprobe *p); +int register_jprobes(struct jprobe **jps, int num); +void unregister_jprobes(struct jprobe **jps, int num); +void jprobe_return(void); +unsigned long arch_deref_entry_point(void *); + +int register_kretprobe(struct kretprobe *rp); +void unregister_kretprobe(struct kretprobe *rp); +int register_kretprobes(struct kretprobe **rps, int num); +void unregister_kretprobes(struct kretprobe **rps, int num); + +void kprobe_flush_task(struct task_struct *tk); +void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); + +int disable_kprobe(struct kprobe *kp); +int enable_kprobe(struct kprobe *kp); + +void dump_kprobe(struct kprobe *kp); + +#else /* !CONFIG_KPROBES: */ + +static inline int kprobes_built_in(void) +{ + return 0; +} +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + return 0; +} +static inline struct kprobe *get_kprobe(void *addr) +{ + return NULL; +} +static inline struct kprobe *kprobe_running(void) +{ + return NULL; +} +static inline int register_kprobe(struct kprobe *p) +{ + return -ENOSYS; +} +static inline int register_kprobes(struct kprobe **kps, int num) +{ + return -ENOSYS; +} +static inline void unregister_kprobe(struct kprobe *p) +{ +} +static inline void unregister_kprobes(struct kprobe **kps, int num) +{ +} +static inline int register_jprobe(struct jprobe *p) +{ + return -ENOSYS; +} +static inline int register_jprobes(struct jprobe **jps, int num) +{ + return -ENOSYS; +} +static inline void unregister_jprobe(struct jprobe *p) +{ +} +static inline void unregister_jprobes(struct jprobe **jps, int num) +{ +} +static inline void jprobe_return(void) +{ +} +static inline int register_kretprobe(struct kretprobe *rp) +{ + return -ENOSYS; +} +static inline int register_kretprobes(struct kretprobe **rps, int num) +{ + return -ENOSYS; +} +static inline void unregister_kretprobe(struct kretprobe *rp) +{ +} +static inline void unregister_kretprobes(struct kretprobe **rps, int num) +{ +} +static inline void kprobe_flush_task(struct task_struct *tk) +{ +} +static inline int disable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +static inline int enable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +#endif /* CONFIG_KPROBES */ +static inline int disable_kretprobe(struct kretprobe *rp) +{ + return disable_kprobe(&rp->kp); +} +static inline int enable_kretprobe(struct kretprobe *rp) +{ + return enable_kprobe(&rp->kp); +} +static inline int disable_jprobe(struct jprobe *jp) +{ + return disable_kprobe(&jp->kp); +} +static inline int enable_jprobe(struct jprobe *jp) +{ + return enable_kprobe(&jp->kp); +} + +#endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/kref.h b/include/linux/kref.h new file mode 100644 index 00000000..9c07dceb --- /dev/null +++ b/include/linux/kref.h @@ -0,0 +1,96 @@ +/* + * kref.h - library routines for handling generic reference counted objects + * + * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2004 IBM Corp. + * + * based on kobject.h which was: + * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> + * Copyright (C) 2002-2003 Open Source Development Labs + * + * This file is released under the GPLv2. + * + */ + +#ifndef _KREF_H_ +#define _KREF_H_ + +#include <linux/bug.h> +#include <linux/atomic.h> +#include <linux/kernel.h> + +struct kref { + atomic_t refcount; +}; + +/** + * kref_init - initialize object. + * @kref: object in question. + */ +static inline void kref_init(struct kref *kref) +{ + atomic_set(&kref->refcount, 1); +} + +/** + * kref_get - increment refcount for object. + * @kref: object. + */ +static inline void kref_get(struct kref *kref) +{ + WARN_ON(!atomic_read(&kref->refcount)); + atomic_inc(&kref->refcount); +} + +/** + * kref_sub - subtract a number of refcounts for object. + * @kref: object. + * @count: Number of recounts to subtract. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Subtract @count from the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) +{ + WARN_ON(release == NULL); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + +/** + * kref_put - decrement refcount for object. + * @kref: object. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Decrement the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) +{ + return kref_sub(kref, 1, release); +} +#endif /* _KREF_H_ */ diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h new file mode 100644 index 00000000..cb311798 --- /dev/null +++ b/include/linux/ks0108.h @@ -0,0 +1,49 @@ +/* + * Filename: ks0108.h + * Version: 0.1.0 + * Description: ks0108 LCD Controller driver header + * License: GPLv2 + * + * Author: Copyright (C) Miguel Ojeda Sandonis + * Date: 2006-10-31 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _KS0108_H_ +#define _KS0108_H_ + +/* Write a byte to the data port */ +extern void ks0108_writedata(unsigned char byte); + +/* Write a byte to the control port */ +extern void ks0108_writecontrol(unsigned char byte); + +/* Set the controller's current display state (0..1) */ +extern void ks0108_displaystate(unsigned char state); + +/* Set the controller's current startline (0..63) */ +extern void ks0108_startline(unsigned char startline); + +/* Set the controller's current address (0..63) */ +extern void ks0108_address(unsigned char address); + +/* Set the controller's current page (0..7) */ +extern void ks0108_page(unsigned char page); + +/* Is the module inited? */ +extern unsigned char ks0108_isinited(void); + +#endif /* _KS0108_H_ */ diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h new file mode 100644 index 00000000..14ba4452 --- /dev/null +++ b/include/linux/ks8842.h @@ -0,0 +1,38 @@ +/* + * ks8842.h KS8842 platform data struct definition + * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _LINUX_KS8842_H +#define _LINUX_KS8842_H + +#include <linux/if_ether.h> + +/** + * struct ks8842_platform_data - Platform data of the KS8842 network driver + * @macaddr: The MAC address of the device, set to all 0:s to use the on in + * the chip. + * @rx_dma_channel: The DMA channel to use for RX, -1 for none. + * @tx_dma_channel: The DMA channel to use for TX, -1 for none. + * + */ +struct ks8842_platform_data { + u8 macaddr[ETH_ALEN]; + int rx_dma_channel; + int tx_dma_channel; +}; + +#endif diff --git a/include/linux/ksm.h b/include/linux/ksm.h new file mode 100644 index 00000000..3319a696 --- /dev/null +++ b/include/linux/ksm.h @@ -0,0 +1,145 @@ +#ifndef __LINUX_KSM_H +#define __LINUX_KSM_H +/* + * Memory merging support. + * + * This code enables dynamic sharing of identical pages found in different + * memory areas, even if they are not shared by fork(). + */ + +#include <linux/bitops.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/rmap.h> +#include <linux/sched.h> + +struct stable_node; +struct mem_cgroup; + +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address); + +#ifdef CONFIG_KSM +int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); +int __ksm_enter(struct mm_struct *mm); +void __ksm_exit(struct mm_struct *mm); + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) + __ksm_exit(mm); +} + +/* + * A KSM page is one of those write-protected "shared pages" or "merged pages" + * which KSM maps into multiple mms, wherever identical anonymous page content + * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any + * anon_vma, but to that page's node of the stable tree. + */ +static inline int PageKsm(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); +} + +static inline struct stable_node *page_stable_node(struct page *page) +{ + return PageKsm(page) ? page_rmapping(page) : NULL; +} + +static inline void set_page_stable_node(struct page *page, + struct stable_node *stable_node) +{ + page->mapping = (void *)stable_node + + (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); +} + +/* + * When do_swap_page() first faults in from swap what used to be a KSM page, + * no problem, it will be assigned to this vma's anon_vma; but thereafter, + * it might be faulted into a different anon_vma (or perhaps to a different + * offset in the same anon_vma). do_swap_page() cannot do all the locking + * needed to reconstitute a cross-anon_vma KSM page: for now it has to make + * a copy, and leave remerging the pages to a later pass of ksmd. + * + * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, + * but what if the vma was unmerged while the page was swapped out? + */ +static inline int ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct anon_vma *anon_vma = page_anon_vma(page); + + return anon_vma && + (anon_vma->root != vma->anon_vma->root || + page->index != linear_page_index(vma, address)); +} + +int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags); +int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); +int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); +void ksm_migrate_page(struct page *newpage, struct page *oldpage); + +#else /* !CONFIG_KSM */ + +static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +{ + return 0; +} + +static inline void ksm_exit(struct mm_struct *mm) +{ +} + +static inline int PageKsm(struct page *page) +{ + return 0; +} + +#ifdef CONFIG_MMU +static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags) +{ + return 0; +} + +static inline int ksm_might_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + return 0; +} + +static inline int page_referenced_ksm(struct page *page, + struct mem_cgroup *memcg, unsigned long *vm_flags) +{ + return 0; +} + +static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) +{ + return 0; +} + +static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + return 0; +} + +static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) +{ +} +#endif /* CONFIG_MMU */ +#endif /* !CONFIG_KSM */ + +#endif /* __LINUX_KSM_H */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h new file mode 100644 index 00000000..22ccf9de --- /dev/null +++ b/include/linux/kthread.h @@ -0,0 +1,130 @@ +#ifndef _LINUX_KTHREAD_H +#define _LINUX_KTHREAD_H +/* Simple interface for creating and stopping kernel threads without mess. */ +#include <linux/err.h> +#include <linux/sched.h> + +__printf(4, 5) +struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), + void *data, + int node, + const char namefmt[], ...); + +#define kthread_create(threadfn, data, namefmt, arg...) \ + kthread_create_on_node(threadfn, data, -1, namefmt, ##arg) + + +/** + * kthread_run - create and wake a thread. + * @threadfn: the function to run until signal_pending(current). + * @data: data ptr for @threadfn. + * @namefmt: printf-style name for the thread. + * + * Description: Convenient wrapper for kthread_create() followed by + * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). + */ +#define kthread_run(threadfn, data, namefmt, ...) \ +({ \ + struct task_struct *__k \ + = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ + if (!IS_ERR(__k)) \ + wake_up_process(__k); \ + __k; \ +}) + +void kthread_bind(struct task_struct *k, unsigned int cpu); +int kthread_stop(struct task_struct *k); +int kthread_should_stop(void); +bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_data(struct task_struct *k); + +int kthreadd(void *unused); +extern struct task_struct *kthreadd_task; +extern int tsk_fork_get_node(struct task_struct *tsk); + +/* + * Simple work processor based on kthread. + * + * This provides easier way to make use of kthreads. A kthread_work + * can be queued and flushed using queue/flush_kthread_work() + * respectively. Queued kthread_works are processed by a kthread + * running kthread_worker_fn(). + */ +struct kthread_work; +typedef void (*kthread_work_func_t)(struct kthread_work *work); + +struct kthread_worker { + spinlock_t lock; + struct list_head work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + wait_queue_head_t done; + struct kthread_worker *worker; +}; + +#define KTHREAD_WORKER_INIT(worker) { \ + .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ + .work_list = LIST_HEAD_INIT((worker).work_list), \ + } + +#define KTHREAD_WORK_INIT(work, fn) { \ + .node = LIST_HEAD_INIT((work).node), \ + .func = (fn), \ + .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ + } + +#define DEFINE_KTHREAD_WORKER(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) + +#define DEFINE_KTHREAD_WORK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT(work, fn) + +/* + * kthread_worker.lock and kthread_work.done need their own lockdep class + * keys if they are defined on stack with lockdep enabled. Use the + * following macros when defining them on stack. + */ +#ifdef CONFIG_LOCKDEP +# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ + ({ init_kthread_worker(&worker); worker; }) +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) +# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \ + ({ init_kthread_work((&work), fn); work; }) +# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn) +#else +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) +# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn) +#endif + +extern void __init_kthread_worker(struct kthread_worker *worker, + const char *name, struct lock_class_key *key); + +#define init_kthread_worker(worker) \ + do { \ + static struct lock_class_key __key; \ + __init_kthread_worker((worker), "("#worker")->lock", &__key); \ + } while (0) + +#define init_kthread_work(work, fn) \ + do { \ + memset((work), 0, sizeof(struct kthread_work)); \ + INIT_LIST_HEAD(&(work)->node); \ + (work)->func = (fn); \ + init_waitqueue_head(&(work)->done); \ + } while (0) + +int kthread_worker_fn(void *worker_ptr); + +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work); +void flush_kthread_work(struct kthread_work *work); +void flush_kthread_worker(struct kthread_worker *worker); + +#endif /* _LINUX_KTHREAD_H */ diff --git a/include/linux/ktime.h b/include/linux/ktime.h new file mode 100644 index 00000000..603bec29 --- /dev/null +++ b/include/linux/ktime.h @@ -0,0 +1,342 @@ +/* + * include/linux/ktime.h + * + * ktime_t - nanosecond-resolution time format. + * + * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar + * + * data type definitions, declarations, prototypes and macros. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * Credits: + * + * Roman Zippel provided the ideas and primary code snippets of + * the ktime_t union and further simplifications of the original + * code. + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_KTIME_H +#define _LINUX_KTIME_H + +#include <linux/time.h> +#include <linux/jiffies.h> + +/* + * ktime_t: + * + * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers + * internal representation of time values in scalar nanoseconds. The + * design plays out best on 64-bit CPUs, where most conversions are + * NOPs and most arithmetic ktime_t operations are plain arithmetic + * operations. + * + * On 32-bit CPUs an optimized representation of the timespec structure + * is used to avoid expensive conversions from and to timespecs. The + * endian-aware order of the tv struct members is chosen to allow + * mathematical operations on the tv64 member of the union too, which + * for certain operations produces better code. + * + * For architectures with efficient support for 64/32-bit conversions the + * plain scalar nanosecond based representation can be selected by the + * config switch CONFIG_KTIME_SCALAR. + */ +union ktime { + s64 tv64; +#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR) + struct { +# ifdef __BIG_ENDIAN + s32 sec, nsec; +# else + s32 nsec, sec; +# endif + } tv; +#endif +}; + +typedef union ktime ktime_t; /* Kill this */ + +#define KTIME_MAX ((s64)~((u64)1 << 63)) +#if (BITS_PER_LONG == 64) +# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +#else +# define KTIME_SEC_MAX LONG_MAX +#endif + +/* + * ktime_t definitions when using the 64-bit scalar representation: + */ + +#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) + +/** + * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value + * @secs: seconds to set + * @nsecs: nanoseconds to set + * + * Return the ktime_t representation of the value + */ +static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) +{ +#if (BITS_PER_LONG == 64) + if (unlikely(secs >= KTIME_SEC_MAX)) + return (ktime_t){ .tv64 = KTIME_MAX }; +#endif + return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs }; +} + +/* Subtract two ktime_t variables. rem = lhs -rhs: */ +#define ktime_sub(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; }) + +/* Add two ktime_t variables. res = lhs + rhs: */ +#define ktime_add(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) + +/* + * Add a ktime_t variable and a scalar nanosecond value. + * res = kt + nsval: + */ +#define ktime_add_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) + +/* + * Subtract a scalar nanosecod from a ktime_t variable + * res = kt - nsval: + */ +#define ktime_sub_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) + +/* convert a timespec to ktime_t format: */ +static inline ktime_t timespec_to_ktime(struct timespec ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + +/* convert a timeval to ktime_t format: */ +static inline ktime_t timeval_to_ktime(struct timeval tv) +{ + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); +} + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) + +/* Map the ktime_t to timeval conversion to ns_to_timeval function */ +#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) + +/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ +#define ktime_to_ns(kt) ((kt).tv64) + +#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */ + +/* + * Helper macros/inlines to get the ktime_t math right in the timespec + * representation. The macros are sometimes ugly - their actual use is + * pretty okay-ish, given the circumstances. We do all this for + * performance reasons. The pure scalar nsec_t based code was nice and + * simple, but created too many 64-bit / 32-bit conversions and divisions. + * + * Be especially aware that negative values are represented in a way + * that the tv.sec field is negative and the tv.nsec field is greater + * or equal to zero but less than nanoseconds per second. This is the + * same representation which is used by timespecs. + * + * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC + */ + +/* Set a ktime_t variable to a value in sec/nsec representation: */ +static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) +{ + return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } }; +} + +/** + * ktime_sub - subtract two ktime_t variables + * @lhs: minuend + * @rhs: subtrahend + * + * Returns the remainder of the subtraction + */ +static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs) +{ + ktime_t res; + + res.tv64 = lhs.tv64 - rhs.tv64; + if (res.tv.nsec < 0) + res.tv.nsec += NSEC_PER_SEC; + + return res; +} + +/** + * ktime_add - add two ktime_t variables + * @add1: addend1 + * @add2: addend2 + * + * Returns the sum of @add1 and @add2. + */ +static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2) +{ + ktime_t res; + + res.tv64 = add1.tv64 + add2.tv64; + /* + * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx + * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit. + * + * it's equivalent to: + * tv.nsec -= NSEC_PER_SEC + * tv.sec ++; + */ + if (res.tv.nsec >= NSEC_PER_SEC) + res.tv64 += (u32)-NSEC_PER_SEC; + + return res; +} + +/** + * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable + * @kt: addend + * @nsec: the scalar nsec value to add + * + * Returns the sum of @kt and @nsec in ktime_t format + */ +extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec); + +/** + * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable + * @kt: minuend + * @nsec: the scalar nsec value to subtract + * + * Returns the subtraction of @nsec from @kt in ktime_t format + */ +extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec); + +/** + * timespec_to_ktime - convert a timespec to ktime_t format + * @ts: the timespec variable to convert + * + * Returns a ktime_t variable with the converted timespec value + */ +static inline ktime_t timespec_to_ktime(const struct timespec ts) +{ + return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec, + .nsec = (s32)ts.tv_nsec } }; +} + +/** + * timeval_to_ktime - convert a timeval to ktime_t format + * @tv: the timeval variable to convert + * + * Returns a ktime_t variable with the converted timeval value + */ +static inline ktime_t timeval_to_ktime(const struct timeval tv) +{ + return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, + .nsec = (s32)tv.tv_usec * 1000 } }; +} + +/** + * ktime_to_timespec - convert a ktime_t variable to timespec format + * @kt: the ktime_t variable to convert + * + * Returns the timespec representation of the ktime value + */ +static inline struct timespec ktime_to_timespec(const ktime_t kt) +{ + return (struct timespec) { .tv_sec = (time_t) kt.tv.sec, + .tv_nsec = (long) kt.tv.nsec }; +} + +/** + * ktime_to_timeval - convert a ktime_t variable to timeval format + * @kt: the ktime_t variable to convert + * + * Returns the timeval representation of the ktime value + */ +static inline struct timeval ktime_to_timeval(const ktime_t kt) +{ + return (struct timeval) { + .tv_sec = (time_t) kt.tv.sec, + .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) }; +} + +/** + * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds + * @kt: the ktime_t variable to convert + * + * Returns the scalar nanoseconds representation of @kt + */ +static inline s64 ktime_to_ns(const ktime_t kt) +{ + return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; +} + +#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */ + +/** + * ktime_equal - Compares two ktime_t variables to see if they are equal + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Compare two ktime_t variables, returns 1 if equal + */ +static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) +{ + return cmp1.tv64 == cmp2.tv64; +} + +static inline s64 ktime_to_us(const ktime_t kt) +{ + struct timeval tv = ktime_to_timeval(kt); + return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec; +} + +static inline s64 ktime_to_ms(const ktime_t kt) +{ + struct timeval tv = ktime_to_timeval(kt); + return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC; +} + +static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_us(ktime_sub(later, earlier)); +} + +static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) +{ + return ktime_add_ns(kt, usec * 1000); +} + +static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) +{ + return ktime_sub_ns(kt, usec * 1000); +} + +extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } + +/* Get the monotonic time in timespec format: */ +extern void ktime_get_ts(struct timespec *ts); + +/* Get the real (wall-) time in timespec format: */ +#define ktime_get_real_ts(ts) getnstimeofday(ts) + +static inline ktime_t ns_to_ktime(u64 ns) +{ + static const ktime_t ktime_zero = { .tv64 = 0 }; + return ktime_add_ns(ktime_zero, ns); +} + +#endif diff --git a/include/linux/kvm.h b/include/linux/kvm.h new file mode 100644 index 00000000..6c322a90 --- /dev/null +++ b/include/linux/kvm.h @@ -0,0 +1,913 @@ +#ifndef __LINUX_KVM_H +#define __LINUX_KVM_H + +/* + * Userspace interface for /dev/kvm - kernel based virtual machine + * + * Note: you must update KVM_API_VERSION if you change this interface. + */ + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/ioctl.h> +#include <asm/kvm.h> + +#define KVM_API_VERSION 12 + +/* *** Deprecated interfaces *** */ + +#define KVM_TRC_SHIFT 16 + +#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) +#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) + +#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) +#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) +#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) + +#define KVM_TRC_HEAD_SIZE 12 +#define KVM_TRC_CYCLE_SIZE 8 +#define KVM_TRC_EXTRA_MAX 7 + +#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) +#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) +#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) +#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) +#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) +#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) +#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) +#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) +#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) +#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) +#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) +#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) +#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) +#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) +#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) +#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) +#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) +#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) +#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) +#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) +#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) +#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) +#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) +#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) + +struct kvm_user_trace_setup { + __u32 buf_size; + __u32 buf_nr; +}; + +#define __KVM_DEPRECATED_MAIN_W_0x06 \ + _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) +#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07) +#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08) + +#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq) + +struct kvm_breakpoint { + __u32 enabled; + __u32 padding; + __u64 address; +}; + +struct kvm_debug_guest { + __u32 enabled; + __u32 pad; + struct kvm_breakpoint breakpoints[4]; + __u32 singlestep; +}; + +#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest) + +/* *** End of deprecated interfaces *** */ + + +/* for KVM_CREATE_MEMORY_REGION */ +struct kvm_memory_region { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; /* bytes */ +}; + +/* for KVM_SET_USER_MEMORY_REGION */ +struct kvm_userspace_memory_region { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; /* bytes */ + __u64 userspace_addr; /* start of the userspace allocated memory */ +}; + +/* for kvm_memory_region::flags */ +#define KVM_MEM_LOG_DIRTY_PAGES 1UL +#define KVM_MEMSLOT_INVALID (1UL << 1) + +/* for KVM_IRQ_LINE */ +struct kvm_irq_level { + /* + * ACPI gsi notion of irq. + * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. + * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. + */ + union { + __u32 irq; + __s32 status; + }; + __u32 level; +}; + + +struct kvm_irqchip { + __u32 chip_id; + __u32 pad; + union { + char dummy[512]; /* reserving space */ +#ifdef __KVM_HAVE_PIT + struct kvm_pic_state pic; +#endif +#ifdef __KVM_HAVE_IOAPIC + struct kvm_ioapic_state ioapic; +#endif + } chip; +}; + +/* for KVM_CREATE_PIT2 */ +struct kvm_pit_config { + __u32 flags; + __u32 pad[15]; +}; + +#define KVM_PIT_SPEAKER_DUMMY 1 + +#define KVM_EXIT_UNKNOWN 0 +#define KVM_EXIT_EXCEPTION 1 +#define KVM_EXIT_IO 2 +#define KVM_EXIT_HYPERCALL 3 +#define KVM_EXIT_DEBUG 4 +#define KVM_EXIT_HLT 5 +#define KVM_EXIT_MMIO 6 +#define KVM_EXIT_IRQ_WINDOW_OPEN 7 +#define KVM_EXIT_SHUTDOWN 8 +#define KVM_EXIT_FAIL_ENTRY 9 +#define KVM_EXIT_INTR 10 +#define KVM_EXIT_SET_TPR 11 +#define KVM_EXIT_TPR_ACCESS 12 +#define KVM_EXIT_S390_SIEIC 13 +#define KVM_EXIT_S390_RESET 14 +#define KVM_EXIT_DCR 15 +#define KVM_EXIT_NMI 16 +#define KVM_EXIT_INTERNAL_ERROR 17 +#define KVM_EXIT_OSI 18 +#define KVM_EXIT_PAPR_HCALL 19 +#define KVM_EXIT_S390_UCONTROL 20 + +/* For KVM_EXIT_INTERNAL_ERROR */ +#define KVM_INTERNAL_ERROR_EMULATION 1 +#define KVM_INTERNAL_ERROR_SIMUL_EX 2 + +/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ +struct kvm_run { + /* in */ + __u8 request_interrupt_window; + __u8 padding1[7]; + + /* out */ + __u32 exit_reason; + __u8 ready_for_interrupt_injection; + __u8 if_flag; + __u8 padding2[2]; + + /* in (pre_kvm_run), out (post_kvm_run) */ + __u64 cr8; + __u64 apic_base; + +#ifdef __KVM_S390 + /* the processor status word for s390 */ + __u64 psw_mask; /* psw upper half */ + __u64 psw_addr; /* psw lower half */ +#endif + union { + /* KVM_EXIT_UNKNOWN */ + struct { + __u64 hardware_exit_reason; + } hw; + /* KVM_EXIT_FAIL_ENTRY */ + struct { + __u64 hardware_entry_failure_reason; + } fail_entry; + /* KVM_EXIT_EXCEPTION */ + struct { + __u32 exception; + __u32 error_code; + } ex; + /* KVM_EXIT_IO */ + struct { +#define KVM_EXIT_IO_IN 0 +#define KVM_EXIT_IO_OUT 1 + __u8 direction; + __u8 size; /* bytes */ + __u16 port; + __u32 count; + __u64 data_offset; /* relative to kvm_run start */ + } io; + struct { + struct kvm_debug_exit_arch arch; + } debug; + /* KVM_EXIT_MMIO */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } mmio; + /* KVM_EXIT_HYPERCALL */ + struct { + __u64 nr; + __u64 args[6]; + __u64 ret; + __u32 longmode; + __u32 pad; + } hypercall; + /* KVM_EXIT_TPR_ACCESS */ + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; + /* KVM_EXIT_S390_SIEIC */ + struct { + __u8 icptcode; + __u16 ipa; + __u32 ipb; + } s390_sieic; + /* KVM_EXIT_S390_RESET */ +#define KVM_S390_RESET_POR 1 +#define KVM_S390_RESET_CLEAR 2 +#define KVM_S390_RESET_SUBSYSTEM 4 +#define KVM_S390_RESET_CPU_INIT 8 +#define KVM_S390_RESET_IPL 16 + __u64 s390_reset_flags; + /* KVM_EXIT_S390_UCONTROL */ + struct { + __u64 trans_exc_code; + __u32 pgm_code; + } s390_ucontrol; + /* KVM_EXIT_DCR */ + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; + struct { + __u32 suberror; + /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */ + __u32 ndata; + __u64 data[16]; + } internal; + /* KVM_EXIT_OSI */ + struct { + __u64 gprs[32]; + } osi; + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; + /* Fix the size of the union. */ + char padding[256]; + }; + + /* + * shared registers between kvm and userspace. + * kvm_valid_regs specifies the register classes set by the host + * kvm_dirty_regs specified the register classes dirtied by userspace + * struct kvm_sync_regs is architecture specific, as well as the + * bits for kvm_valid_regs and kvm_dirty_regs + */ + __u64 kvm_valid_regs; + __u64 kvm_dirty_regs; + union { + struct kvm_sync_regs regs; + char padding[1024]; + } s; +}; + +/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */ + +struct kvm_coalesced_mmio_zone { + __u64 addr; + __u32 size; + __u32 pad; +}; + +struct kvm_coalesced_mmio { + __u64 phys_addr; + __u32 len; + __u32 pad; + __u8 data[8]; +}; + +struct kvm_coalesced_mmio_ring { + __u32 first, last; + struct kvm_coalesced_mmio coalesced_mmio[0]; +}; + +#define KVM_COALESCED_MMIO_MAX \ + ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \ + sizeof(struct kvm_coalesced_mmio)) + +/* for KVM_TRANSLATE */ +struct kvm_translation { + /* in */ + __u64 linear_address; + + /* out */ + __u64 physical_address; + __u8 valid; + __u8 writeable; + __u8 usermode; + __u8 pad[5]; +}; + +/* for KVM_INTERRUPT */ +struct kvm_interrupt { + /* in */ + __u32 irq; +}; + +/* for KVM_GET_DIRTY_LOG */ +struct kvm_dirty_log { + __u32 slot; + __u32 padding1; + union { + void __user *dirty_bitmap; /* one bit per page */ + __u64 padding2; + }; +}; + +/* for KVM_SET_SIGNAL_MASK */ +struct kvm_signal_mask { + __u32 len; + __u8 sigset[0]; +}; + +/* for KVM_TPR_ACCESS_REPORTING */ +struct kvm_tpr_access_ctl { + __u32 enabled; + __u32 flags; + __u32 reserved[8]; +}; + +/* for KVM_SET_VAPIC_ADDR */ +struct kvm_vapic_addr { + __u64 vapic_addr; +}; + +/* for KVM_SET_MPSTATE */ + +#define KVM_MP_STATE_RUNNABLE 0 +#define KVM_MP_STATE_UNINITIALIZED 1 +#define KVM_MP_STATE_INIT_RECEIVED 2 +#define KVM_MP_STATE_HALTED 3 +#define KVM_MP_STATE_SIPI_RECEIVED 4 + +struct kvm_mp_state { + __u32 mp_state; +}; + +struct kvm_s390_psw { + __u64 mask; + __u64 addr; +}; + +/* valid values for type in kvm_s390_interrupt */ +#define KVM_S390_SIGP_STOP 0xfffe0000u +#define KVM_S390_PROGRAM_INT 0xfffe0001u +#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u +#define KVM_S390_RESTART 0xfffe0003u +#define KVM_S390_INT_VIRTIO 0xffff2603u +#define KVM_S390_INT_SERVICE 0xffff2401u +#define KVM_S390_INT_EMERGENCY 0xffff1201u +#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u + +struct kvm_s390_interrupt { + __u32 type; + __u32 parm; + __u64 parm64; +}; + +/* for KVM_SET_GUEST_DEBUG */ + +#define KVM_GUESTDBG_ENABLE 0x00000001 +#define KVM_GUESTDBG_SINGLESTEP 0x00000002 + +struct kvm_guest_debug { + __u32 control; + __u32 pad; + struct kvm_guest_debug_arch arch; +}; + +enum { + kvm_ioeventfd_flag_nr_datamatch, + kvm_ioeventfd_flag_nr_pio, + kvm_ioeventfd_flag_nr_deassign, + kvm_ioeventfd_flag_nr_max, +}; + +#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch) +#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio) +#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign) + +#define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1) + +struct kvm_ioeventfd { + __u64 datamatch; + __u64 addr; /* legal pio/mmio address */ + __u32 len; /* 1, 2, 4, or 8 bytes */ + __s32 fd; + __u32 flags; + __u8 pad[36]; +}; + +/* for KVM_ENABLE_CAP */ +struct kvm_enable_cap { + /* in */ + __u32 cap; + __u32 flags; + __u64 args[4]; + __u8 pad[64]; +}; + +/* for KVM_PPC_GET_PVINFO */ +struct kvm_ppc_pvinfo { + /* out */ + __u32 flags; + __u32 hcall[4]; + __u8 pad[108]; +}; + +#define KVMIO 0xAE + +/* machine type bits, to be used as argument to KVM_CREATE_VM */ +#define KVM_VM_S390_UCONTROL 1 + +#define KVM_S390_SIE_PAGE_OFFSET 1 + +/* + * ioctls for /dev/kvm fds: + */ +#define KVM_GET_API_VERSION _IO(KVMIO, 0x00) +#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ +#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) + +#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06) +/* + * Check if a kvm extension is available. Argument is extension number, + * return is 1 (yes) or 0 (no, sorry). + */ +#define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03) +/* + * Get size for mmap(vcpu_fd) + */ +#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ +#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) +#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06 +#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 +#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 + +/* + * Extension capability list. + */ +#define KVM_CAP_IRQCHIP 0 +#define KVM_CAP_HLT 1 +#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 +#define KVM_CAP_USER_MEMORY 3 +#define KVM_CAP_SET_TSS_ADDR 4 +#define KVM_CAP_VAPIC 6 +#define KVM_CAP_EXT_CPUID 7 +#define KVM_CAP_CLOCKSOURCE 8 +#define KVM_CAP_NR_VCPUS 9 /* returns recommended max vcpus per vm */ +#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */ +#define KVM_CAP_PIT 11 +#define KVM_CAP_NOP_IO_DELAY 12 +#define KVM_CAP_PV_MMU 13 +#define KVM_CAP_MP_STATE 14 +#define KVM_CAP_COALESCED_MMIO 15 +#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ +#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT +#define KVM_CAP_DEVICE_ASSIGNMENT 17 +#endif +#define KVM_CAP_IOMMU 18 +#ifdef __KVM_HAVE_MSI +#define KVM_CAP_DEVICE_MSI 20 +#endif +/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ +#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 +#ifdef __KVM_HAVE_USER_NMI +#define KVM_CAP_USER_NMI 22 +#endif +#ifdef __KVM_HAVE_GUEST_DEBUG +#define KVM_CAP_SET_GUEST_DEBUG 23 +#endif +#ifdef __KVM_HAVE_PIT +#define KVM_CAP_REINJECT_CONTROL 24 +#endif +#ifdef __KVM_HAVE_IOAPIC +#define KVM_CAP_IRQ_ROUTING 25 +#endif +#define KVM_CAP_IRQ_INJECT_STATUS 26 +#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT +#define KVM_CAP_DEVICE_DEASSIGNMENT 27 +#endif +#ifdef __KVM_HAVE_MSIX +#define KVM_CAP_DEVICE_MSIX 28 +#endif +#define KVM_CAP_ASSIGN_DEV_IRQ 29 +/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ +#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 +#ifdef __KVM_HAVE_MCE +#define KVM_CAP_MCE 31 +#endif +#define KVM_CAP_IRQFD 32 +#ifdef __KVM_HAVE_PIT +#define KVM_CAP_PIT2 33 +#endif +#define KVM_CAP_SET_BOOT_CPU_ID 34 +#ifdef __KVM_HAVE_PIT_STATE2 +#define KVM_CAP_PIT_STATE2 35 +#endif +#define KVM_CAP_IOEVENTFD 36 +#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 +#ifdef __KVM_HAVE_XEN_HVM +#define KVM_CAP_XEN_HVM 38 +#endif +#define KVM_CAP_ADJUST_CLOCK 39 +#define KVM_CAP_INTERNAL_ERROR_DATA 40 +#ifdef __KVM_HAVE_VCPU_EVENTS +#define KVM_CAP_VCPU_EVENTS 41 +#endif +#define KVM_CAP_S390_PSW 42 +#define KVM_CAP_PPC_SEGSTATE 43 +#define KVM_CAP_HYPERV 44 +#define KVM_CAP_HYPERV_VAPIC 45 +#define KVM_CAP_HYPERV_SPIN 46 +#define KVM_CAP_PCI_SEGMENT 47 +#define KVM_CAP_PPC_PAIRED_SINGLES 48 +#define KVM_CAP_INTR_SHADOW 49 +#ifdef __KVM_HAVE_DEBUGREGS +#define KVM_CAP_DEBUGREGS 50 +#endif +#define KVM_CAP_X86_ROBUST_SINGLESTEP 51 +#define KVM_CAP_PPC_OSI 52 +#define KVM_CAP_PPC_UNSET_IRQ 53 +#define KVM_CAP_ENABLE_CAP 54 +#ifdef __KVM_HAVE_XSAVE +#define KVM_CAP_XSAVE 55 +#endif +#ifdef __KVM_HAVE_XCRS +#define KVM_CAP_XCRS 56 +#endif +#define KVM_CAP_PPC_GET_PVINFO 57 +#define KVM_CAP_PPC_IRQ_LEVEL 58 +#define KVM_CAP_ASYNC_PF 59 +#define KVM_CAP_TSC_CONTROL 60 +#define KVM_CAP_GET_TSC_KHZ 61 +#define KVM_CAP_PPC_BOOKE_SREGS 62 +#define KVM_CAP_SPAPR_TCE 63 +#define KVM_CAP_PPC_SMT 64 +#define KVM_CAP_PPC_RMA 65 +#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ +#define KVM_CAP_PPC_HIOR 67 +#define KVM_CAP_PPC_PAPR 68 +#define KVM_CAP_SW_TLB 69 +#define KVM_CAP_ONE_REG 70 +#define KVM_CAP_S390_GMAP 71 +#define KVM_CAP_TSC_DEADLINE_TIMER 72 +#define KVM_CAP_S390_UCONTROL 73 +#define KVM_CAP_SYNC_REGS 74 +#define KVM_CAP_PCI_2_3 75 + +#ifdef KVM_CAP_IRQ_ROUTING + +struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +}; + +struct kvm_irq_routing_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + __u32 pad; +}; + +/* gsi routing entry types */ +#define KVM_IRQ_ROUTING_IRQCHIP 1 +#define KVM_IRQ_ROUTING_MSI 2 + +struct kvm_irq_routing_entry { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 pad; + union { + struct kvm_irq_routing_irqchip irqchip; + struct kvm_irq_routing_msi msi; + __u32 pad[8]; + } u; +}; + +struct kvm_irq_routing { + __u32 nr; + __u32 flags; + struct kvm_irq_routing_entry entries[0]; +}; + +#endif + +#ifdef KVM_CAP_MCE +/* x86 MCE */ +struct kvm_x86_mce { + __u64 status; + __u64 addr; + __u64 misc; + __u64 mcg_status; + __u8 bank; + __u8 pad1[7]; + __u64 pad2[3]; +}; +#endif + +#ifdef KVM_CAP_XEN_HVM +struct kvm_xen_hvm_config { + __u32 flags; + __u32 msr; + __u64 blob_addr_32; + __u64 blob_addr_64; + __u8 blob_size_32; + __u8 blob_size_64; + __u8 pad2[30]; +}; +#endif + +#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0) + +struct kvm_irqfd { + __u32 fd; + __u32 gsi; + __u32 flags; + __u8 pad[20]; +}; + +struct kvm_clock_data { + __u64 clock; + __u32 flags; + __u32 pad[9]; +}; + +#define KVM_MMU_FSL_BOOKE_NOHV 0 +#define KVM_MMU_FSL_BOOKE_HV 1 + +struct kvm_config_tlb { + __u64 params; + __u64 array; + __u32 mmu_type; + __u32 array_len; +}; + +struct kvm_dirty_tlb { + __u64 bitmap; + __u32 num_dirty; +}; + +/* Available with KVM_CAP_ONE_REG */ + +#define KVM_REG_ARCH_MASK 0xff00000000000000ULL +#define KVM_REG_GENERIC 0x0000000000000000ULL + +/* + * Architecture specific registers are to be defined in arch headers and + * ORed with the arch identifier. + */ +#define KVM_REG_PPC 0x1000000000000000ULL +#define KVM_REG_X86 0x2000000000000000ULL +#define KVM_REG_IA64 0x3000000000000000ULL +#define KVM_REG_ARM 0x4000000000000000ULL +#define KVM_REG_S390 0x5000000000000000ULL + +#define KVM_REG_SIZE_SHIFT 52 +#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL +#define KVM_REG_SIZE_U8 0x0000000000000000ULL +#define KVM_REG_SIZE_U16 0x0010000000000000ULL +#define KVM_REG_SIZE_U32 0x0020000000000000ULL +#define KVM_REG_SIZE_U64 0x0030000000000000ULL +#define KVM_REG_SIZE_U128 0x0040000000000000ULL +#define KVM_REG_SIZE_U256 0x0050000000000000ULL +#define KVM_REG_SIZE_U512 0x0060000000000000ULL +#define KVM_REG_SIZE_U1024 0x0070000000000000ULL + +struct kvm_one_reg { + __u64 id; + __u64 addr; +}; + +/* + * ioctls for VM fds + */ +#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) +/* + * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns + * a vcpu fd. + */ +#define KVM_CREATE_VCPU _IO(KVMIO, 0x41) +#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) +/* KVM_SET_MEMORY_ALIAS is obsolete: */ +#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) +#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) +#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) +#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \ + struct kvm_userspace_memory_region) +#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) +#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) + +/* enable ucontrol for s390 */ +struct kvm_s390_ucas_mapping { + __u64 user_addr; + __u64 vcpu_addr; + __u64 length; +}; +#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping) +#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping) +#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long) + +/* Device model IOC */ +#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) +#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) +#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip) +#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip) +#define KVM_CREATE_PIT _IO(KVMIO, 0x64) +#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) +#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) +#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level) +#define KVM_REGISTER_COALESCED_MMIO \ + _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) +#define KVM_UNREGISTER_COALESCED_MMIO \ + _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) +#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ + struct kvm_assigned_pci_dev) +#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) +/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ +#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70 +#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) +#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) +#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ + struct kvm_assigned_pci_dev) +#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \ + struct kvm_assigned_msix_nr) +#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \ + struct kvm_assigned_msix_entry) +#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) +#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) +#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) +#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) +#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd) +#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config) +#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data) +#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data) +/* Available with KVM_CAP_PIT_STATE2 */ +#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) +#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) +/* Available with KVM_CAP_PPC_GET_PVINFO */ +#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo) +/* Available with KVM_CAP_TSC_CONTROL */ +#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) +#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) +/* Available with KVM_CAP_PCI_2_3 */ +#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \ + struct kvm_assigned_pci_dev) + +/* + * ioctls for vcpu fds + */ +#define KVM_RUN _IO(KVMIO, 0x80) +#define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs) +#define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs) +#define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs) +#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) +#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) +#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) +/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ +#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87 +#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) +#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) +#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) +#define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask) +#define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu) +#define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) +#define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state) +#define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) +#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2) +#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) +/* Available with KVM_CAP_VAPIC */ +#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) +/* Available with KVM_CAP_VAPIC */ +#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) +/* valid for virtual machine (for floating interrupt)_and_ vcpu */ +#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt) +/* store status for s390 */ +#define KVM_S390_STORE_STATUS_NOADDR (-1ul) +#define KVM_S390_STORE_STATUS_PREFIXED (-2ul) +#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long) +/* initial ipl psw for s390 */ +#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw) +/* initial reset for s390 */ +#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) +#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) +#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) +/* Available with KVM_CAP_NMI */ +#define KVM_NMI _IO(KVMIO, 0x9a) +/* Available with KVM_CAP_SET_GUEST_DEBUG */ +#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug) +/* MCE for x86 */ +#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64) +#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64) +#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce) +/* IA64 stack access */ +#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) +#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) +/* Available with KVM_CAP_VCPU_EVENTS */ +#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events) +#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events) +/* Available with KVM_CAP_DEBUGREGS */ +#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs) +#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs) +#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap) +/* Available with KVM_CAP_XSAVE */ +#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave) +#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave) +/* Available with KVM_CAP_XCRS */ +#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs) +#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs) +#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) +/* Available with KVM_CAP_RMA */ +#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) +/* Available with KVM_CAP_SW_TLB */ +#define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb) +/* Available with KVM_CAP_ONE_REG */ +#define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg) +#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg) + +#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) +#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) +#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) + +struct kvm_assigned_pci_dev { + __u32 assigned_dev_id; + __u32 busnr; + __u32 devfn; + __u32 flags; + __u32 segnr; + union { + __u32 reserved[11]; + }; +}; + +#define KVM_DEV_IRQ_HOST_INTX (1 << 0) +#define KVM_DEV_IRQ_HOST_MSI (1 << 1) +#define KVM_DEV_IRQ_HOST_MSIX (1 << 2) + +#define KVM_DEV_IRQ_GUEST_INTX (1 << 8) +#define KVM_DEV_IRQ_GUEST_MSI (1 << 9) +#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) + +#define KVM_DEV_IRQ_HOST_MASK 0x00ff +#define KVM_DEV_IRQ_GUEST_MASK 0xff00 + +struct kvm_assigned_irq { + __u32 assigned_dev_id; + __u32 host_irq; /* ignored (legacy field) */ + __u32 guest_irq; + __u32 flags; + union { + __u32 reserved[12]; + }; +}; + +struct kvm_assigned_msix_nr { + __u32 assigned_dev_id; + __u16 entry_nr; + __u16 padding; +}; + +#define KVM_MAX_MSIX_PER_DEV 256 +struct kvm_assigned_msix_entry { + __u32 assigned_dev_id; + __u32 gsi; + __u16 entry; /* The index of entry in the MSI-X table */ + __u16 padding[3]; +}; + +#endif /* __LINUX_KVM_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h new file mode 100644 index 00000000..72cbf08d --- /dev/null +++ b/include/linux/kvm_host.h @@ -0,0 +1,857 @@ +#ifndef __KVM_HOST_H +#define __KVM_HOST_H + +/* + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/types.h> +#include <linux/hardirq.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/bug.h> +#include <linux/mm.h> +#include <linux/mmu_notifier.h> +#include <linux/preempt.h> +#include <linux/msi.h> +#include <linux/slab.h> +#include <linux/rcupdate.h> +#include <linux/ratelimit.h> +#include <asm/signal.h> + +#include <linux/kvm.h> +#include <linux/kvm_para.h> + +#include <linux/kvm_types.h> + +#include <asm/kvm_host.h> + +#ifndef KVM_MMIO_SIZE +#define KVM_MMIO_SIZE 8 +#endif + +/* + * vcpu->requests bit members + */ +#define KVM_REQ_TLB_FLUSH 0 +#define KVM_REQ_MIGRATE_TIMER 1 +#define KVM_REQ_REPORT_TPR_ACCESS 2 +#define KVM_REQ_MMU_RELOAD 3 +#define KVM_REQ_TRIPLE_FAULT 4 +#define KVM_REQ_PENDING_TIMER 5 +#define KVM_REQ_UNHALT 6 +#define KVM_REQ_MMU_SYNC 7 +#define KVM_REQ_CLOCK_UPDATE 8 +#define KVM_REQ_KICK 9 +#define KVM_REQ_DEACTIVATE_FPU 10 +#define KVM_REQ_EVENT 11 +#define KVM_REQ_APF_HALT 12 +#define KVM_REQ_STEAL_UPDATE 13 +#define KVM_REQ_NMI 14 +#define KVM_REQ_IMMEDIATE_EXIT 15 +#define KVM_REQ_PMU 16 +#define KVM_REQ_PMI 17 + +#define KVM_USERSPACE_IRQ_SOURCE_ID 0 + +struct kvm; +struct kvm_vcpu; +extern struct kmem_cache *kvm_vcpu_cache; + +struct kvm_io_range { + gpa_t addr; + int len; + struct kvm_io_device *dev; +}; + +struct kvm_io_bus { + int dev_count; +#define NR_IOBUS_DEVS 300 + struct kvm_io_range range[NR_IOBUS_DEVS]; +}; + +enum kvm_bus { + KVM_MMIO_BUS, + KVM_PIO_BUS, + KVM_NR_BUSES +}; + +int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, const void *val); +int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, + void *val); +int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + int len, struct kvm_io_device *dev); +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); + +#ifdef CONFIG_KVM_ASYNC_PF +struct kvm_async_pf { + struct work_struct work; + struct list_head link; + struct list_head queue; + struct kvm_vcpu *vcpu; + struct mm_struct *mm; + gva_t gva; + unsigned long addr; + struct kvm_arch_async_pf arch; + struct page *page; + bool done; +}; + +void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); +void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); +int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + struct kvm_arch_async_pf *arch); +int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); +#endif + +enum { + OUTSIDE_GUEST_MODE, + IN_GUEST_MODE, + EXITING_GUEST_MODE +}; + +struct kvm_vcpu { + struct kvm *kvm; +#ifdef CONFIG_PREEMPT_NOTIFIERS + struct preempt_notifier preempt_notifier; +#endif + int cpu; + int vcpu_id; + int srcu_idx; + int mode; + unsigned long requests; + unsigned long guest_debug; + + struct mutex mutex; + struct kvm_run *run; + + int fpu_active; + int guest_fpu_loaded, guest_xcr0_loaded; + wait_queue_head_t wq; + struct pid *pid; + int sigset_active; + sigset_t sigset; + struct kvm_vcpu_stat stat; + +#ifdef CONFIG_HAS_IOMEM + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_size; + int mmio_index; + unsigned char mmio_data[KVM_MMIO_SIZE]; + gpa_t mmio_phys_addr; +#endif + +#ifdef CONFIG_KVM_ASYNC_PF + struct { + u32 queued; + struct list_head queue; + struct list_head done; + spinlock_t lock; + } async_pf; +#endif + + struct kvm_vcpu_arch arch; +}; + +static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) +{ + return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); +} + +/* + * Some of the bitops functions do not support too long bitmaps. + * This number must be determined not to exceed such limits. + */ +#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) + +struct kvm_memory_slot { + gfn_t base_gfn; + unsigned long npages; + unsigned long flags; + unsigned long *rmap; + unsigned long *dirty_bitmap; + unsigned long *dirty_bitmap_head; + unsigned long nr_dirty_pages; + struct kvm_arch_memory_slot arch; + unsigned long userspace_addr; + int user_alloc; + int id; +}; + +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) +{ + return ALIGN(memslot->npages, BITS_PER_LONG) / 8; +} + +struct kvm_kernel_irq_routing_entry { + u32 gsi; + u32 type; + int (*set)(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level); + union { + struct { + unsigned irqchip; + unsigned pin; + } irqchip; + struct msi_msg msi; + }; + struct hlist_node link; +}; + +#ifdef __KVM_HAVE_IOAPIC + +struct kvm_irq_routing_table { + int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; + struct kvm_kernel_irq_routing_entry *rt_entries; + u32 nr_rt_entries; + /* + * Array indexed by gsi. Each entry contains list of irq chips + * the gsi is connected to. + */ + struct hlist_head map[0]; +}; + +#else + +struct kvm_irq_routing_table {}; + +#endif + +#ifndef KVM_MEM_SLOTS_NUM +#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) +#endif + +/* + * Note: + * memslots are not sorted by id anymore, please use id_to_memslot() + * to get the memslot by its id. + */ +struct kvm_memslots { + u64 generation; + struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; + /* The mapping table from slot id to the index in memslots[]. */ + int id_to_index[KVM_MEM_SLOTS_NUM]; +}; + +struct kvm { + spinlock_t mmu_lock; + struct mutex slots_lock; + struct mm_struct *mm; /* userspace tied to this vm */ + struct kvm_memslots *memslots; + struct srcu_struct srcu; +#ifdef CONFIG_KVM_APIC_ARCHITECTURE + u32 bsp_vcpu_id; +#endif + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + atomic_t online_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus *buses[KVM_NR_BUSES]; +#ifdef CONFIG_HAVE_KVM_EVENTFD + struct { + spinlock_t lock; + struct list_head items; + } irqfds; + struct list_head ioeventfds; +#endif + struct kvm_vm_stat stat; + struct kvm_arch arch; + atomic_t users_count; +#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; +#endif + + struct mutex irq_lock; +#ifdef CONFIG_HAVE_KVM_IRQCHIP + /* + * Update side is protected by irq_lock and, + * if configured, irqfds.lock. + */ + struct kvm_irq_routing_table __rcu *irq_routing; + struct hlist_head mask_notifier_list; + struct hlist_head irq_ack_notifier_list; +#endif + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif + long tlbs_dirty; +}; + +/* The guest did something we don't support. */ +#define pr_unimpl(vcpu, fmt, ...) \ + pr_err_ratelimited("kvm: %i: cpu%i " fmt, \ + current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__) + +#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) +#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) + +static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) +{ + smp_rmb(); + return kvm->vcpus[i]; +} + +#define kvm_for_each_vcpu(idx, vcpup, kvm) \ + for (idx = 0; \ + idx < atomic_read(&kvm->online_vcpus) && \ + (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ + idx++) + +#define kvm_for_each_memslot(memslot, slots) \ + for (memslot = &slots->memslots[0]; \ + memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ + memslot++) + +int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); +void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); + +void vcpu_load(struct kvm_vcpu *vcpu); +void vcpu_put(struct kvm_vcpu *vcpu); + +int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module); +void kvm_exit(void); + +void kvm_get_kvm(struct kvm *kvm); +void kvm_put_kvm(struct kvm *kvm); +void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new); + +static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) +{ + return rcu_dereference_check(kvm->memslots, + srcu_read_lock_held(&kvm->srcu) + || lockdep_is_held(&kvm->slots_lock)); +} + +static inline struct kvm_memory_slot * +id_to_memslot(struct kvm_memslots *slots, int id) +{ + int index = slots->id_to_index[id]; + struct kvm_memory_slot *slot; + + slot = &slots->memslots[index]; + + WARN_ON(slot->id != id); + return slot; +} + +#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) +#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) +static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } + +extern struct page *bad_page; +extern struct page *fault_page; + +extern pfn_t bad_pfn; +extern pfn_t fault_pfn; + +int is_error_page(struct page *page); +int is_error_pfn(pfn_t pfn); +int is_hwpoison_pfn(pfn_t pfn); +int is_fault_pfn(pfn_t pfn); +int is_noslot_pfn(pfn_t pfn); +int is_invalid_pfn(pfn_t pfn); +int kvm_is_error_hva(unsigned long addr); +int kvm_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + int user_alloc); +int __kvm_set_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + int user_alloc); +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, + int user_alloc); +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc); +bool kvm_largepages_enabled(void); +void kvm_disable_largepages(void); +void kvm_arch_flush_shadow(struct kvm *kvm); + +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, + int nr_pages); + +struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); +void kvm_release_page_clean(struct page *page); +void kvm_release_page_dirty(struct page *page); +void kvm_set_page_dirty(struct page *page); +void kvm_set_page_accessed(struct page *page); + +pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); +pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); +pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, + bool write_fault, bool *writable); +pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); +pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, + bool *writable); +pfn_t gfn_to_pfn_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn); +void kvm_release_pfn_dirty(pfn_t); +void kvm_release_pfn_clean(pfn_t pfn); +void kvm_set_pfn_dirty(pfn_t pfn); +void kvm_set_pfn_accessed(pfn_t pfn); +void kvm_get_pfn(pfn_t pfn); + +int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, + int len); +int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len); +int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, + int offset, int len); +int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, + unsigned long len); +int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + gpa_t gpa); +int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); +int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); +int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); +void mark_page_dirty(struct kvm *kvm, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, + gfn_t gfn); + +void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); +void kvm_resched(struct kvm_vcpu *vcpu); +void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); +void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); + +void kvm_flush_remote_tlbs(struct kvm *kvm); +void kvm_reload_remote_mmus(struct kvm *kvm); + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); + +int kvm_dev_ioctl_check_extension(long ext); + +int kvm_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log, int *is_dirty); +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log); + +int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, + struct + kvm_userspace_memory_region *mem, + int user_alloc); +long kvm_arch_vm_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr); + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state); +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state); +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg); +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +int kvm_arch_init(void *opaque); +void kvm_arch_exit(void); + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); + +int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); +int kvm_arch_hardware_enable(void *garbage); +void kvm_arch_hardware_disable(void *garbage); +int kvm_arch_hardware_setup(void); +void kvm_arch_hardware_unsetup(void); +void kvm_arch_check_processor_compat(void *rtn); +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); + +void kvm_free_physmem(struct kvm *kvm); + +#ifndef __KVM_HAVE_ARCH_VM_ALLOC +static inline struct kvm *kvm_arch_alloc_vm(void) +{ + return kzalloc(sizeof(struct kvm), GFP_KERNEL); +} + +static inline void kvm_arch_free_vm(struct kvm *kvm) +{ + kfree(kvm); +} +#endif + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); +void kvm_arch_destroy_vm(struct kvm *kvm); +void kvm_free_all_assigned_devices(struct kvm *kvm); +void kvm_arch_sync_events(struct kvm *kvm); + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); +void kvm_vcpu_kick(struct kvm_vcpu *vcpu); + +int kvm_is_mmio_pfn(pfn_t pfn); + +struct kvm_irq_ack_notifier { + struct hlist_node link; + unsigned gsi; + void (*irq_acked)(struct kvm_irq_ack_notifier *kian); +}; + +struct kvm_assigned_dev_kernel { + struct kvm_irq_ack_notifier ack_notifier; + struct list_head list; + int assigned_dev_id; + int host_segnr; + int host_busnr; + int host_devfn; + unsigned int entries_nr; + int host_irq; + bool host_irq_disabled; + bool pci_2_3; + struct msix_entry *host_msix_entries; + int guest_irq; + struct msix_entry *guest_msix_entries; + unsigned long irq_requested_type; + int irq_source_id; + int flags; + struct pci_dev *dev; + struct kvm *kvm; + spinlock_t intx_lock; + spinlock_t intx_mask_lock; + char irq_name[32]; + struct pci_saved_state *pci_saved_state; +}; + +struct kvm_irq_mask_notifier { + void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); + int irq; + struct hlist_node link; +}; + +void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, + struct kvm_irq_mask_notifier *kimn); +void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, + struct kvm_irq_mask_notifier *kimn); +void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, + bool mask); + +#ifdef __KVM_HAVE_IOAPIC +void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, + union kvm_ioapic_redirect_entry *entry, + unsigned long *deliver_bitmask); +#endif +int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, + int irq_source_id, int level); +void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); +void kvm_register_irq_ack_notifier(struct kvm *kvm, + struct kvm_irq_ack_notifier *kian); +void kvm_unregister_irq_ack_notifier(struct kvm *kvm, + struct kvm_irq_ack_notifier *kian); +int kvm_request_irq_source_id(struct kvm *kvm); +void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); + +/* For vcpu->arch.iommu_flags */ +#define KVM_IOMMU_CACHE_COHERENCY 0x1 + +#ifdef CONFIG_IOMMU_API +int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); +int kvm_iommu_map_guest(struct kvm *kvm); +int kvm_iommu_unmap_guest(struct kvm *kvm); +int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev); +int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev); +#else /* CONFIG_IOMMU_API */ +static inline int kvm_iommu_map_pages(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + return 0; +} + +static inline void kvm_iommu_unmap_pages(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +static inline int kvm_iommu_map_guest(struct kvm *kvm) +{ + return -ENODEV; +} + +static inline int kvm_iommu_unmap_guest(struct kvm *kvm) +{ + return 0; +} + +static inline int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + return 0; +} + +static inline int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + return 0; +} +#endif /* CONFIG_IOMMU_API */ + +static inline void kvm_guest_enter(void) +{ + BUG_ON(preemptible()); + account_system_vtime(current); + current->flags |= PF_VCPU; + /* KVM does not hold any references to rcu protected data when it + * switches CPU into a guest mode. In fact switching to a guest mode + * is very similar to exiting to userspase from rcu point of view. In + * addition CPU may stay in a guest mode for quite a long time (up to + * one time slice). Lets treat guest mode as quiescent state, just like + * we do with user-mode execution. + */ + rcu_virt_note_context_switch(smp_processor_id()); +} + +static inline void kvm_guest_exit(void) +{ + account_system_vtime(current); + current->flags &= ~PF_VCPU; +} + +/* + * search_memslots() and __gfn_to_memslot() are here because they are + * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. + * gfn_to_memslot() itself isn't here as an inline because that would + * bloat other code too much. + */ +static inline struct kvm_memory_slot * +search_memslots(struct kvm_memslots *slots, gfn_t gfn) +{ + struct kvm_memory_slot *memslot; + + kvm_for_each_memslot(memslot, slots) + if (gfn >= memslot->base_gfn && + gfn < memslot->base_gfn + memslot->npages) + return memslot; + + return NULL; +} + +static inline struct kvm_memory_slot * +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +{ + return search_memslots(slots, gfn); +} + +static inline int memslot_id(struct kvm *kvm, gfn_t gfn) +{ + return gfn_to_memslot(kvm, gfn)->id; +} + +static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) +{ + /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ + return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - + (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); +} + +static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, + gfn_t gfn) +{ + return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; +} + +static inline gpa_t gfn_to_gpa(gfn_t gfn) +{ + return (gpa_t)gfn << PAGE_SHIFT; +} + +static inline gfn_t gpa_to_gfn(gpa_t gpa) +{ + return (gfn_t)(gpa >> PAGE_SHIFT); +} + +static inline hpa_t pfn_to_hpa(pfn_t pfn) +{ + return (hpa_t)pfn << PAGE_SHIFT; +} + +static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) +{ + set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); +} + +enum kvm_stat_kind { + KVM_STAT_VM, + KVM_STAT_VCPU, +}; + +struct kvm_stats_debugfs_item { + const char *name; + int offset; + enum kvm_stat_kind kind; + struct dentry *dentry; +}; +extern struct kvm_stats_debugfs_item debugfs_entries[]; +extern struct dentry *kvm_debugfs_dir; + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return 1; + /* + * Ensure the read of mmu_notifier_count happens before the read + * of mmu_notifier_seq. This interacts with the smp_wmb() in + * mmu_notifier_invalidate_range_end to make sure that the caller + * either sees the old (non-zero) value of mmu_notifier_count or + * the new (incremented) value of mmu_notifier_seq. + * PowerPC Book3s HV KVM calls this under a per-page lock + * rather than under kvm->mmu_lock, for scalability, so + * can't rely on kvm->mmu_lock to keep things ordered. + */ + smp_rmb(); + if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + +#ifdef CONFIG_HAVE_KVM_IRQCHIP + +#define KVM_MAX_IRQ_ROUTES 1024 + +int kvm_setup_default_irq_routing(struct kvm *kvm); +int kvm_set_irq_routing(struct kvm *kvm, + const struct kvm_irq_routing_entry *entries, + unsigned nr, + unsigned flags); +void kvm_free_irq_routing(struct kvm *kvm); + +#else + +static inline void kvm_free_irq_routing(struct kvm *kvm) {} + +#endif + +#ifdef CONFIG_HAVE_KVM_EVENTFD + +void kvm_eventfd_init(struct kvm *kvm); +int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); +void kvm_irqfd_release(struct kvm *kvm); +void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); +int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); + +#else + +static inline void kvm_eventfd_init(struct kvm *kvm) {} + +static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) +{ + return -EINVAL; +} + +static inline void kvm_irqfd_release(struct kvm *kvm) {} + +#ifdef CONFIG_HAVE_KVM_IRQCHIP +static inline void kvm_irq_routing_update(struct kvm *kvm, + struct kvm_irq_routing_table *irq_rt) +{ + rcu_assign_pointer(kvm->irq_routing, irq_rt); +} +#endif + +static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) +{ + return -ENOSYS; +} + +#endif /* CONFIG_HAVE_KVM_EVENTFD */ + +#ifdef CONFIG_KVM_APIC_ARCHITECTURE +static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; +} + +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); + +#else + +static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } + +#endif + +#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT + +long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, + unsigned long arg); + +#else + +static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, + unsigned long arg) +{ + return -ENOTTY; +} + +#endif + +static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + set_bit(req, &vcpu->requests); +} + +static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) +{ + if (test_bit(req, &vcpu->requests)) { + clear_bit(req, &vcpu->requests); + return true; + } else { + return false; + } +} + +#endif + diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h new file mode 100644 index 00000000..ff476dda --- /dev/null +++ b/include/linux/kvm_para.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_KVM_PARA_H +#define __LINUX_KVM_PARA_H + +/* + * This header file provides a method for making a hypercall to the host + * Architectures should define: + * - kvm_hypercall0, kvm_hypercall1... + * - kvm_arch_para_features + * - kvm_para_available + */ + +/* Return values for hypercalls */ +#define KVM_ENOSYS 1000 +#define KVM_EFAULT EFAULT +#define KVM_E2BIG E2BIG +#define KVM_EPERM EPERM + +#define KVM_HC_VAPIC_POLL_IRQ 1 +#define KVM_HC_MMU_OP 2 +#define KVM_HC_FEATURES 3 +#define KVM_HC_PPC_MAP_MAGIC_PAGE 4 + +/* + * hypercalls use architecture specific + */ +#include <asm/kvm_para.h> + +#ifdef __KERNEL__ + +static inline int kvm_para_has_feature(unsigned int feature) +{ + if (kvm_arch_para_features() & (1UL << feature)) + return 1; + return 0; +} +#endif /* __KERNEL__ */ +#endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h new file mode 100644 index 00000000..fa7cc724 --- /dev/null +++ b/include/linux/kvm_types.h @@ -0,0 +1,77 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __KVM_TYPES_H__ +#define __KVM_TYPES_H__ + +#include <asm/types.h> + +/* + * Address types: + * + * gva - guest virtual address + * gpa - guest physical address + * gfn - guest frame number + * hva - host virtual address + * hpa - host physical address + * hfn - host frame number + */ + +typedef unsigned long gva_t; +typedef u64 gpa_t; +typedef u64 gfn_t; + +typedef unsigned long hva_t; +typedef u64 hpa_t; +typedef u64 hfn_t; + +typedef hfn_t pfn_t; + +union kvm_ioapic_redirect_entry { + u64 bits; + struct { + u8 vector; + u8 delivery_mode:3; + u8 dest_mode:1; + u8 delivery_status:1; + u8 polarity:1; + u8 remote_irr:1; + u8 trig_mode:1; + u8 mask:1; + u8 reserve:7; + u8 reserved[4]; + u8 dest_id; + } fields; +}; + +struct kvm_lapic_irq { + u32 vector; + u32 delivery_mode; + u32 dest_mode; + u32 level; + u32 trig_mode; + u32 shorthand; + u32 dest_id; +}; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + unsigned long hva; + struct kvm_memory_slot *memslot; +}; + +#endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h new file mode 100644 index 00000000..e77d7f9b --- /dev/null +++ b/include/linux/l2tp.h @@ -0,0 +1,164 @@ +/* + * L2TP-over-IP socket for L2TPv3. + * + * Author: James Chapman <jchapman@katalix.com> + */ + +#ifndef _LINUX_L2TP_H_ +#define _LINUX_L2TP_H_ + +#include <linux/types.h> +#include <linux/socket.h> +#ifdef __KERNEL__ +#include <linux/in.h> +#else +#include <netinet/in.h> +#endif + +#define IPPROTO_L2TP 115 + +/** + * struct sockaddr_l2tpip - the sockaddr structure for L2TP-over-IP sockets + * @l2tp_family: address family number AF_L2TPIP. + * @l2tp_addr: protocol specific address information + * @l2tp_conn_id: connection id of tunnel + */ +#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */ +struct sockaddr_l2tpip { + /* The first fields must match struct sockaddr_in */ + __kernel_sa_family_t l2tp_family; /* AF_INET */ + __be16 l2tp_unused; /* INET port number (unused) */ + struct in_addr l2tp_addr; /* Internet address */ + + __u32 l2tp_conn_id; /* Connection ID of tunnel */ + + /* Pad to size of `struct sockaddr'. */ + unsigned char __pad[sizeof(struct sockaddr) - + sizeof(__kernel_sa_family_t) - + sizeof(__be16) - sizeof(struct in_addr) - + sizeof(__u32)]; +}; + +/***************************************************************************** + * NETLINK_GENERIC netlink family. + *****************************************************************************/ + +/* + * Commands. + * Valid TLVs of each command are:- + * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid + * TUNNEL_DELETE - CONN_ID + * TUNNEL_MODIFY - CONN_ID, udpcsum + * TUNNEL_GETSTATS - CONN_ID, (stats) + * TUNNEL_GET - CONN_ID, (...) + * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec + * SESSION_DELETE - SESSION_ID + * SESSION_MODIFY - SESSION_ID, data_seq + * SESSION_GET - SESSION_ID, (...) + * SESSION_GETSTATS - SESSION_ID, (stats) + * + */ +enum { + L2TP_CMD_NOOP, + L2TP_CMD_TUNNEL_CREATE, + L2TP_CMD_TUNNEL_DELETE, + L2TP_CMD_TUNNEL_MODIFY, + L2TP_CMD_TUNNEL_GET, + L2TP_CMD_SESSION_CREATE, + L2TP_CMD_SESSION_DELETE, + L2TP_CMD_SESSION_MODIFY, + L2TP_CMD_SESSION_GET, + __L2TP_CMD_MAX, +}; + +#define L2TP_CMD_MAX (__L2TP_CMD_MAX - 1) + +/* + * ATTR types defined for L2TP + */ +enum { + L2TP_ATTR_NONE, /* no data */ + L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */ + L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */ + L2TP_ATTR_OFFSET, /* u16 */ + L2TP_ATTR_DATA_SEQ, /* u16 */ + L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */ + L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */ + L2TP_ATTR_PROTO_VERSION, /* u8 */ + L2TP_ATTR_IFNAME, /* string */ + L2TP_ATTR_CONN_ID, /* u32 */ + L2TP_ATTR_PEER_CONN_ID, /* u32 */ + L2TP_ATTR_SESSION_ID, /* u32 */ + L2TP_ATTR_PEER_SESSION_ID, /* u32 */ + L2TP_ATTR_UDP_CSUM, /* u8 */ + L2TP_ATTR_VLAN_ID, /* u16 */ + L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */ + L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */ + L2TP_ATTR_DEBUG, /* u32 */ + L2TP_ATTR_RECV_SEQ, /* u8 */ + L2TP_ATTR_SEND_SEQ, /* u8 */ + L2TP_ATTR_LNS_MODE, /* u8 */ + L2TP_ATTR_USING_IPSEC, /* u8 */ + L2TP_ATTR_RECV_TIMEOUT, /* msec */ + L2TP_ATTR_FD, /* int */ + L2TP_ATTR_IP_SADDR, /* u32 */ + L2TP_ATTR_IP_DADDR, /* u32 */ + L2TP_ATTR_UDP_SPORT, /* u16 */ + L2TP_ATTR_UDP_DPORT, /* u16 */ + L2TP_ATTR_MTU, /* u16 */ + L2TP_ATTR_MRU, /* u16 */ + L2TP_ATTR_STATS, /* nested */ + __L2TP_ATTR_MAX, +}; + +#define L2TP_ATTR_MAX (__L2TP_ATTR_MAX - 1) + +/* Nested in L2TP_ATTR_STATS */ +enum { + L2TP_ATTR_STATS_NONE, /* no data */ + L2TP_ATTR_TX_PACKETS, /* u64 */ + L2TP_ATTR_TX_BYTES, /* u64 */ + L2TP_ATTR_TX_ERRORS, /* u64 */ + L2TP_ATTR_RX_PACKETS, /* u64 */ + L2TP_ATTR_RX_BYTES, /* u64 */ + L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */ + L2TP_ATTR_RX_OOS_PACKETS, /* u64 */ + L2TP_ATTR_RX_ERRORS, /* u64 */ + __L2TP_ATTR_STATS_MAX, +}; + +#define L2TP_ATTR_STATS_MAX (__L2TP_ATTR_STATS_MAX - 1) + +enum l2tp_pwtype { + L2TP_PWTYPE_NONE = 0x0000, + L2TP_PWTYPE_ETH_VLAN = 0x0004, + L2TP_PWTYPE_ETH = 0x0005, + L2TP_PWTYPE_PPP = 0x0007, + L2TP_PWTYPE_PPP_AC = 0x0008, + L2TP_PWTYPE_IP = 0x000b, + __L2TP_PWTYPE_MAX +}; + +enum l2tp_l2spec_type { + L2TP_L2SPECTYPE_NONE, + L2TP_L2SPECTYPE_DEFAULT, +}; + +enum l2tp_encap_type { + L2TP_ENCAPTYPE_UDP, + L2TP_ENCAPTYPE_IP, +}; + +enum l2tp_seqmode { + L2TP_SEQ_NONE = 0, + L2TP_SEQ_IP = 1, + L2TP_SEQ_ALL = 2, +}; + +/* + * NETLINK_GENERIC related info + */ +#define L2TP_GENL_NAME "l2tp" +#define L2TP_GENL_VERSION 0x1 + +#endif diff --git a/include/linux/lapb.h b/include/linux/lapb.h new file mode 100644 index 00000000..873c1eb6 --- /dev/null +++ b/include/linux/lapb.h @@ -0,0 +1,57 @@ +/* + * These are the public elements of the Linux LAPB module. + */ + +#ifndef LAPB_KERNEL_H +#define LAPB_KERNEL_H + +#define LAPB_OK 0 +#define LAPB_BADTOKEN 1 +#define LAPB_INVALUE 2 +#define LAPB_CONNECTED 3 +#define LAPB_NOTCONNECTED 4 +#define LAPB_REFUSED 5 +#define LAPB_TIMEDOUT 6 +#define LAPB_NOMEM 7 + +#define LAPB_STANDARD 0x00 +#define LAPB_EXTENDED 0x01 + +#define LAPB_SLP 0x00 +#define LAPB_MLP 0x02 + +#define LAPB_DTE 0x00 +#define LAPB_DCE 0x04 + +struct lapb_register_struct { + void (*connect_confirmation)(struct net_device *dev, int reason); + void (*connect_indication)(struct net_device *dev, int reason); + void (*disconnect_confirmation)(struct net_device *dev, int reason); + void (*disconnect_indication)(struct net_device *dev, int reason); + int (*data_indication)(struct net_device *dev, struct sk_buff *skb); + void (*data_transmit)(struct net_device *dev, struct sk_buff *skb); +}; + +struct lapb_parms_struct { + unsigned int t1; + unsigned int t1timer; + unsigned int t2; + unsigned int t2timer; + unsigned int n2; + unsigned int n2count; + unsigned int window; + unsigned int state; + unsigned int mode; +}; + +extern int lapb_register(struct net_device *dev, + const struct lapb_register_struct *callbacks); +extern int lapb_unregister(struct net_device *dev); +extern int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms); +extern int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms); +extern int lapb_connect_request(struct net_device *dev); +extern int lapb_disconnect_request(struct net_device *dev); +extern int lapb_data_request(struct net_device *dev, struct sk_buff *skb); +extern int lapb_data_received(struct net_device *dev, struct sk_buff *skb); + +#endif diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h new file mode 100644 index 00000000..e23121f9 --- /dev/null +++ b/include/linux/latencytop.h @@ -0,0 +1,53 @@ +/* + * latencytop.h: Infrastructure for displaying latency + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + */ + +#ifndef _INCLUDE_GUARD_LATENCYTOP_H_ +#define _INCLUDE_GUARD_LATENCYTOP_H_ + +#include <linux/compiler.h> +struct task_struct; + +#ifdef CONFIG_LATENCYTOP + +#define LT_SAVECOUNT 32 +#define LT_BACKTRACEDEPTH 12 + +struct latency_record { + unsigned long backtrace[LT_BACKTRACEDEPTH]; + unsigned int count; + unsigned long time; + unsigned long max; +}; + + + +extern int latencytop_enabled; +void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); +static inline void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ + if (unlikely(latencytop_enabled)) + __account_scheduler_latency(task, usecs, inter); +} + +void clear_all_latency_tracing(struct task_struct *p); + +#else + +static inline void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ +} + +static inline void clear_all_latency_tracing(struct task_struct *p) +{ +} + +#endif + +#endif diff --git a/include/linux/lcd.h b/include/linux/lcd.h new file mode 100644 index 00000000..8877123f --- /dev/null +++ b/include/linux/lcd.h @@ -0,0 +1,115 @@ +/* + * LCD Lowlevel Control Abstraction + * + * Copyright (C) 2003,2004 Hewlett-Packard Company + * + */ + +#ifndef _LINUX_LCD_H +#define _LINUX_LCD_H + +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/fb.h> + +/* Notes on locking: + * + * lcd_device->ops_lock is an internal backlight lock protecting the ops + * field and no code outside the core should need to touch it. + * + * Access to set_power() is serialised by the update_lock mutex since + * most drivers seem to need this and historically get it wrong. + * + * Most drivers don't need locking on their get_power() method. + * If yours does, you need to implement it in the driver. You can use the + * update_lock mutex if appropriate. + * + * Any other use of the locks below is probably wrong. + */ + +struct lcd_device; +struct fb_info; + +struct lcd_properties { + /* The maximum value for contrast (read-only) */ + int max_contrast; +}; + +struct lcd_ops { + /* Get the LCD panel power status (0: full on, 1..3: controller + power on, flat panel power off, 4: full off), see FB_BLANK_XXX */ + int (*get_power)(struct lcd_device *); + /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ + int (*set_power)(struct lcd_device *, int power); + /* Get the current contrast setting (0-max_contrast) */ + int (*get_contrast)(struct lcd_device *); + /* Set LCD panel contrast */ + int (*set_contrast)(struct lcd_device *, int contrast); + /* Set LCD panel mode (resolutions ...) */ + int (*set_mode)(struct lcd_device *, struct fb_videomode *); + /* Check if given framebuffer device is the one LCD is bound to; + return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ + int (*check_fb)(struct lcd_device *, struct fb_info *); +}; + +struct lcd_device { + struct lcd_properties props; + /* This protects the 'ops' field. If 'ops' is NULL, the driver that + registered this device has been unloaded, and if class_get_devdata() + points to something in the body of that driver, it is also invalid. */ + struct mutex ops_lock; + /* If this is NULL, the backing module is unloaded */ + struct lcd_ops *ops; + /* Serialise access to set_power method */ + struct mutex update_lock; + /* The framebuffer notifier block */ + struct notifier_block fb_notif; + + struct device dev; +}; + +struct lcd_platform_data { + /* reset lcd panel device. */ + int (*reset)(struct lcd_device *ld); + /* on or off to lcd panel. if 'enable' is 0 then + lcd power off and 1, lcd power on. */ + int (*power_on)(struct lcd_device *ld, int enable); + + /* it indicates whether lcd panel was enabled + from bootloader or not. */ + int lcd_enabled; + /* it means delay for stable time when it becomes low to high + or high to low that is dependent on whether reset gpio is + low active or high active. */ + unsigned int reset_delay; + /* stable time needing to become lcd power on. */ + unsigned int power_on_delay; + /* stable time needing to become lcd power off. */ + unsigned int power_off_delay; + + /* it could be used for any purpose. */ + void *pdata; +}; + +static inline void lcd_set_power(struct lcd_device *ld, int power) +{ + mutex_lock(&ld->update_lock); + if (ld->ops && ld->ops->set_power) + ld->ops->set_power(ld, power); + mutex_unlock(&ld->update_lock); +} + +extern struct lcd_device *lcd_device_register(const char *name, + struct device *parent, void *devdata, struct lcd_ops *ops); +extern void lcd_device_unregister(struct lcd_device *ld); + +#define to_lcd_device(obj) container_of(obj, struct lcd_device, dev) + +static inline void * lcd_get_data(struct lcd_device *ld_dev) +{ + return dev_get_drvdata(&ld_dev->dev); +} + + +#endif diff --git a/include/linux/lcm.h b/include/linux/lcm.h new file mode 100644 index 00000000..7bf01d77 --- /dev/null +++ b/include/linux/lcm.h @@ -0,0 +1,8 @@ +#ifndef _LCM_H +#define _LCM_H + +#include <linux/compiler.h> + +unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _LCM_H */ diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h new file mode 100644 index 00000000..eeae6e74 --- /dev/null +++ b/include/linux/led-lm3530.h @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA. + * Copyright (C) 2009 Motorola, Inc. + * + * License Terms: GNU General Public License v2 + * + * Simple driver for National Semiconductor LM35330 Backlight driver chip + * + * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com> + * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com> + */ + +#ifndef _LINUX_LED_LM3530_H__ +#define _LINUX_LED_LM3530_H__ + +#define LM3530_FS_CURR_5mA (0) /* Full Scale Current */ +#define LM3530_FS_CURR_8mA (1) +#define LM3530_FS_CURR_12mA (2) +#define LM3530_FS_CURR_15mA (3) +#define LM3530_FS_CURR_19mA (4) +#define LM3530_FS_CURR_22mA (5) +#define LM3530_FS_CURR_26mA (6) +#define LM3530_FS_CURR_29mA (7) + +#define LM3530_ALS_AVRG_TIME_32ms (0) /* ALS Averaging Time */ +#define LM3530_ALS_AVRG_TIME_64ms (1) +#define LM3530_ALS_AVRG_TIME_128ms (2) +#define LM3530_ALS_AVRG_TIME_256ms (3) +#define LM3530_ALS_AVRG_TIME_512ms (4) +#define LM3530_ALS_AVRG_TIME_1024ms (5) +#define LM3530_ALS_AVRG_TIME_2048ms (6) +#define LM3530_ALS_AVRG_TIME_4096ms (7) + +#define LM3530_RAMP_TIME_1ms (0) /* Brigtness Ramp Time */ +#define LM3530_RAMP_TIME_130ms (1) /* Max to 0 and vice versa */ +#define LM3530_RAMP_TIME_260ms (2) +#define LM3530_RAMP_TIME_520ms (3) +#define LM3530_RAMP_TIME_1s (4) +#define LM3530_RAMP_TIME_2s (5) +#define LM3530_RAMP_TIME_4s (6) +#define LM3530_RAMP_TIME_8s (7) + +/* ALS Resistor Select */ +#define LM3530_ALS_IMPD_Z (0x00) /* ALS Impedance */ +#define LM3530_ALS_IMPD_13_53kOhm (0x01) +#define LM3530_ALS_IMPD_9_01kOhm (0x02) +#define LM3530_ALS_IMPD_5_41kOhm (0x03) +#define LM3530_ALS_IMPD_2_27kOhm (0x04) +#define LM3530_ALS_IMPD_1_94kOhm (0x05) +#define LM3530_ALS_IMPD_1_81kOhm (0x06) +#define LM3530_ALS_IMPD_1_6kOhm (0x07) +#define LM3530_ALS_IMPD_1_138kOhm (0x08) +#define LM3530_ALS_IMPD_1_05kOhm (0x09) +#define LM3530_ALS_IMPD_1_011kOhm (0x0A) +#define LM3530_ALS_IMPD_941Ohm (0x0B) +#define LM3530_ALS_IMPD_759Ohm (0x0C) +#define LM3530_ALS_IMPD_719Ohm (0x0D) +#define LM3530_ALS_IMPD_700Ohm (0x0E) +#define LM3530_ALS_IMPD_667Ohm (0x0F) + +enum lm3530_mode { + LM3530_BL_MODE_MANUAL = 0, /* "man" */ + LM3530_BL_MODE_ALS, /* "als" */ + LM3530_BL_MODE_PWM, /* "pwm" */ +}; + +/* ALS input select */ +enum lm3530_als_mode { + LM3530_INPUT_AVRG = 0, /* ALS1 and ALS2 input average */ + LM3530_INPUT_ALS1, /* ALS1 Input */ + LM3530_INPUT_ALS2, /* ALS2 Input */ + LM3530_INPUT_CEIL, /* Max of ALS1 and ALS2 */ +}; + +/* PWM Platform Specific Data */ +struct lm3530_pwm_data { + void (*pwm_set_intensity) (int brightness, int max_brightness); + int (*pwm_get_intensity) (int max_brightness); +}; + +/** + * struct lm3530_platform_data + * @mode: mode of operation i.e. Manual, ALS or PWM + * @als_input_mode: select source of ALS input - ALS1/2 or average + * @max_current: full scale LED current + * @pwm_pol_hi: PWM input polarity - active high/active low + * @als_avrg_time: ALS input averaging time + * @brt_ramp_law: brightness mapping mode - exponential/linear + * @brt_ramp_fall: rate of fall of led current + * @brt_ramp_rise: rate of rise of led current + * @als1_resistor_sel: internal resistance from ALS1 input to ground + * @als2_resistor_sel: internal resistance from ALS2 input to ground + * @als_vmin: als input voltage calibrated for max brightness in mV + * @als_vmax: als input voltage calibrated for min brightness in mV + * @brt_val: brightness value (0-255) + * @pwm_data: PWM control functions (only valid when the mode is PWM) + */ +struct lm3530_platform_data { + enum lm3530_mode mode; + enum lm3530_als_mode als_input_mode; + + u8 max_current; + bool pwm_pol_hi; + u8 als_avrg_time; + + bool brt_ramp_law; + u8 brt_ramp_fall; + u8 brt_ramp_rise; + + u8 als1_resistor_sel; + u8 als2_resistor_sel; + + u32 als_vmin; + u32 als_vmax; + + u8 brt_val; + + struct lm3530_pwm_data pwm_data; +}; + +#endif /* _LINUX_LED_LM3530_H__ */ diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h new file mode 100644 index 00000000..42f854a1 --- /dev/null +++ b/include/linux/leds-bd2802.h @@ -0,0 +1,26 @@ +/* + * leds-bd2802.h - RGB LED Driver + * + * Copyright (C) 2009 Samsung Electronics + * Kim Kyuwon <q1.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf + * + */ +#ifndef _LEDS_BD2802_H_ +#define _LEDS_BD2802_H_ + +struct bd2802_led_platform_data{ + int reset_gpio; + u8 rgb_time; +}; + +#define RGB_TIME(slopedown, slopeup, waveform) \ + ((slopedown) << 6 | (slopeup) << 4 | (waveform)) + +#endif /* _LEDS_BD2802_H_ */ + diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h new file mode 100644 index 00000000..2618aa90 --- /dev/null +++ b/include/linux/leds-lp3944.h @@ -0,0 +1,50 @@ +/* + * leds-lp3944.h - platform data structure for lp3944 led controller + * + * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_LEDS_LP3944_H +#define __LINUX_LEDS_LP3944_H + +#define LP3944_LED0 0 +#define LP3944_LED1 1 +#define LP3944_LED2 2 +#define LP3944_LED3 3 +#define LP3944_LED4 4 +#define LP3944_LED5 5 +#define LP3944_LED6 6 +#define LP3944_LED7 7 +#define LP3944_LEDS_MAX 8 + +#define LP3944_LED_STATUS_MASK 0x03 +enum lp3944_status { + LP3944_LED_STATUS_OFF = 0x0, + LP3944_LED_STATUS_ON = 0x1, + LP3944_LED_STATUS_DIM0 = 0x2, + LP3944_LED_STATUS_DIM1 = 0x3 +}; + +enum lp3944_type { + LP3944_LED_TYPE_NONE, + LP3944_LED_TYPE_LED, + LP3944_LED_TYPE_LED_INVERTED, +}; + +struct lp3944_led { + char *name; + enum lp3944_type type; + enum lp3944_status status; +}; + +struct lp3944_platform_data { + struct lp3944_led leds[LP3944_LEDS_MAX]; + u8 leds_size; +}; + +#endif /* __LINUX_LEDS_LP3944_H */ diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h new file mode 100644 index 00000000..3f071ec0 --- /dev/null +++ b/include/linux/leds-lp5521.h @@ -0,0 +1,73 @@ +/* + * LP5521 LED chip driver. + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_LP5521_H +#define __LINUX_LP5521_H + +/* See Documentation/leds/leds-lp5521.txt */ + +struct lp5521_led_config { + char *name; + u8 chan_nr; + u8 led_current; /* mA x10, 0 if led is not connected */ + u8 max_current; +}; + +struct lp5521_led_pattern { + u8 *r; + u8 *g; + u8 *b; + u8 size_r; + u8 size_g; + u8 size_b; +}; + +#define LP5521_CLOCK_AUTO 0 +#define LP5521_CLOCK_INT 1 +#define LP5521_CLOCK_EXT 2 + +/* Bits in CONFIG register */ +#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */ +#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */ +#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */ +#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */ +#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */ +#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */ +#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */ +#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */ +#define LP5521_CLK_INT 1 /* Internal clock */ +#define LP5521_CLK_AUTO 2 /* Automatic clock selection */ + +struct lp5521_platform_data { + struct lp5521_led_config *led_config; + u8 num_channels; + u8 clock_mode; + int (*setup_resources)(void); + void (*release_resources)(void); + void (*enable)(bool state); + const char *label; + u8 update_config; + struct lp5521_led_pattern *patterns; + int num_patterns; +}; + +#endif /* __LINUX_LP5521_H */ diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h new file mode 100644 index 00000000..2694289b --- /dev/null +++ b/include/linux/leds-lp5523.h @@ -0,0 +1,48 @@ +/* + * LP5523 LED Driver + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_LP5523_H +#define __LINUX_LP5523_H + +/* See Documentation/leds/leds-lp5523.txt */ + +struct lp5523_led_config { + u8 chan_nr; + u8 led_current; /* mA x10, 0 if led is not connected */ + u8 max_current; +}; + +#define LP5523_CLOCK_AUTO 0 +#define LP5523_CLOCK_INT 1 +#define LP5523_CLOCK_EXT 2 + +struct lp5523_platform_data { + struct lp5523_led_config *led_config; + u8 num_channels; + u8 clock_mode; + int (*setup_resources)(void); + void (*release_resources)(void); + void (*enable)(bool state); + const char *label; +}; + +#endif /* __LINUX_LP5523_H */ diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h new file mode 100644 index 00000000..b8d6fffe --- /dev/null +++ b/include/linux/leds-pca9532.h @@ -0,0 +1,48 @@ +/* + * pca9532.h - platform data structure for pca9532 led controller + * + * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf + * + */ + +#ifndef __LINUX_PCA9532_H +#define __LINUX_PCA9532_H + +#include <linux/leds.h> +#include <linux/workqueue.h> + +enum pca9532_state { + PCA9532_OFF = 0x0, + PCA9532_ON = 0x1, + PCA9532_PWM0 = 0x2, + PCA9532_PWM1 = 0x3 +}; + +enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, + PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO }; + +struct pca9532_led { + u8 id; + struct i2c_client *client; + char *name; + struct led_classdev ldev; + struct work_struct work; + enum pca9532_type type; + enum pca9532_state state; +}; + +struct pca9532_platform_data { + struct pca9532_led leds[16]; + u8 pwm[2]; + u8 psc[2]; + int gpio_base; +}; + +#endif /* __LINUX_PCA9532_H */ + diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h new file mode 100644 index 00000000..e2337a8c --- /dev/null +++ b/include/linux/leds-regulator.h @@ -0,0 +1,46 @@ +/* + * leds-regulator.h - platform data structure for regulator driven LEDs. + * + * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_LEDS_REGULATOR_H +#define __LINUX_LEDS_REGULATOR_H + +/* + * Use "vled" as supply id when declaring the regulator consumer: + * + * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = { + * { .dev_name = "leds-regulator.0", .supply = "vled" }, + * }; + * + * If you have several regulator driven LEDs, you can append a numerical id to + * .dev_name as done above, and use the same id when declaring the platform + * device: + * + * static struct led_regulator_platform_data a780_vibrator_data = { + * .name = "a780::vibrator", + * }; + * + * static struct platform_device a780_vibrator = { + * .name = "leds-regulator", + * .id = 0, + * .dev = { + * .platform_data = &a780_vibrator_data, + * }, + * }; + */ + +#include <linux/leds.h> + +struct led_regulator_platform_data { + char *name; /* LED name as expected by LED class */ + enum led_brightness brightness; /* initial brightness value */ +}; + +#endif /* __LINUX_LEDS_REGULATOR_H */ diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h new file mode 100644 index 00000000..dcabf4fa --- /dev/null +++ b/include/linux/leds-tca6507.h @@ -0,0 +1,34 @@ +/* + * TCA6507 LED chip driver. + * + * Copyright (C) 2011 Neil Brown <neil@brown.name> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_TCA6507_H +#define __LINUX_TCA6507_H +#include <linux/leds.h> + +struct tca6507_platform_data { + struct led_platform_data leds; +#ifdef CONFIG_GPIOLIB + int gpio_base; + void (*setup)(unsigned gpio_base, unsigned ngpio); +#endif +}; + +#define TCA6507_MAKE_GPIO 1 +#endif /* __LINUX_TCA6507_H*/ diff --git a/include/linux/leds.h b/include/linux/leds.h new file mode 100644 index 00000000..5884def1 --- /dev/null +++ b/include/linux/leds.h @@ -0,0 +1,213 @@ +/* + * Driver model for leds and led triggers + * + * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu> + * Copyright (C) 2005 Richard Purdie <rpurdie@openedhand.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef __LINUX_LEDS_H_INCLUDED +#define __LINUX_LEDS_H_INCLUDED + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/rwsem.h> +#include <linux/timer.h> + +struct device; +/* + * LED Core + */ + +enum led_brightness { + LED_OFF = 0, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct led_classdev { + const char *name; + int brightness; + int max_brightness; + int flags; + + /* Lower 16 bits reflect status */ +#define LED_SUSPENDED (1 << 0) + /* Upper 16 bits reflect control information */ +#define LED_CORE_SUSPENDRESUME (1 << 16) + + /* Set LED brightness level */ + /* Must not sleep, use a workqueue if needed */ + void (*brightness_set)(struct led_classdev *led_cdev, + enum led_brightness brightness); + /* Get LED brightness level */ + enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); + + /* + * Activate hardware accelerated blink, delays are in milliseconds + * and if both are zero then a sensible default should be chosen. + * The call should adjust the timings in that case and if it can't + * match the values specified exactly. + * Deactivate blinking again when the brightness is set to a fixed + * value via the brightness_set() callback. + */ + int (*blink_set)(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off); + + struct device *dev; + struct list_head node; /* LED Device list */ + const char *default_trigger; /* Trigger to use */ + + unsigned long blink_delay_on, blink_delay_off; + struct timer_list blink_timer; + int blink_brightness; + +#ifdef CONFIG_LEDS_TRIGGERS + /* Protects the trigger data below */ + struct rw_semaphore trigger_lock; + + struct led_trigger *trigger; + struct list_head trig_list; + void *trigger_data; +#endif +}; + +extern int led_classdev_register(struct device *parent, + struct led_classdev *led_cdev); +extern void led_classdev_unregister(struct led_classdev *led_cdev); +extern void led_classdev_suspend(struct led_classdev *led_cdev); +extern void led_classdev_resume(struct led_classdev *led_cdev); + +/** + * led_blink_set - set blinking with software fallback + * @led_cdev: the LED to start blinking + * @delay_on: the time it should be on (in ms) + * @delay_off: the time it should ble off (in ms) + * + * This function makes the LED blink, attempting to use the + * hardware acceleration if possible, but falling back to + * software blinking if there is no hardware blinking or if + * the LED refuses the passed values. + * + * Note that if software blinking is active, simply calling + * led_cdev->brightness_set() will not stop the blinking, + * use led_classdev_brightness_set() instead. + */ +extern void led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off); +/** + * led_brightness_set - set LED brightness + * @led_cdev: the LED to set + * @brightness: the brightness to set it to + * + * Set an LED's brightness, and, if necessary, cancel the + * software blink timer that implements blinking when the + * hardware doesn't. + */ +extern void led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness); + +/* + * LED Triggers + */ +#ifdef CONFIG_LEDS_TRIGGERS + +#define TRIG_NAME_MAX 50 + +struct led_trigger { + /* Trigger Properties */ + const char *name; + void (*activate)(struct led_classdev *led_cdev); + void (*deactivate)(struct led_classdev *led_cdev); + + /* LEDs under control by this trigger (for simple triggers) */ + rwlock_t leddev_list_lock; + struct list_head led_cdevs; + + /* Link to next registered trigger */ + struct list_head next_trig; +}; + +/* Registration functions for complex triggers */ +extern int led_trigger_register(struct led_trigger *trigger); +extern void led_trigger_unregister(struct led_trigger *trigger); + +/* Registration functions for simple triggers */ +#define DEFINE_LED_TRIGGER(x) static struct led_trigger *x; +#define DEFINE_LED_TRIGGER_GLOBAL(x) struct led_trigger *x; +extern void led_trigger_register_simple(const char *name, + struct led_trigger **trigger); +extern void led_trigger_unregister_simple(struct led_trigger *trigger); +extern void led_trigger_event(struct led_trigger *trigger, + enum led_brightness event); +extern void led_trigger_blink(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off); + +#else + +/* Triggers aren't active - null macros */ +#define DEFINE_LED_TRIGGER(x) +#define DEFINE_LED_TRIGGER_GLOBAL(x) +#define led_trigger_register_simple(x, y) do {} while(0) +#define led_trigger_unregister_simple(x) do {} while(0) +#define led_trigger_event(x, y) do {} while(0) + +#endif + +/* Trigger specific functions */ +#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK +extern void ledtrig_ide_activity(void); +#else +#define ledtrig_ide_activity() do {} while(0) +#endif + +/* + * Generic LED platform data for describing LED names and default triggers. + */ +struct led_info { + const char *name; + const char *default_trigger; + int flags; +}; + +struct led_platform_data { + int num_leds; + struct led_info *leds; +}; + +/* For the leds-gpio driver */ +struct gpio_led { + const char *name; + const char *default_trigger; + unsigned gpio; + unsigned active_low : 1; + unsigned retain_state_suspended : 1; + unsigned default_state : 2; + /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ +}; +#define LEDS_GPIO_DEFSTATE_OFF 0 +#define LEDS_GPIO_DEFSTATE_ON 1 +#define LEDS_GPIO_DEFSTATE_KEEP 2 + +struct gpio_led_platform_data { + int num_leds; + const struct gpio_led *leds; + +#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ +#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ +#define GPIO_LED_BLINK 2 /* Please, blink */ + int (*gpio_blink_set)(unsigned gpio, int state, + unsigned long *delay_on, + unsigned long *delay_off); +}; + +struct platform_device *gpio_led_register_device( + int id, const struct gpio_led_platform_data *pdata); + +#endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h new file mode 100644 index 00000000..33a07116 --- /dev/null +++ b/include/linux/leds_pwm.h @@ -0,0 +1,21 @@ +/* + * PWM LED driver data - see drivers/leds/leds-pwm.c + */ +#ifndef __LINUX_LEDS_PWM_H +#define __LINUX_LEDS_PWM_H + +struct led_pwm { + const char *name; + const char *default_trigger; + unsigned pwm_id; + u8 active_low; + unsigned max_brightness; + unsigned pwm_period_ns; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + +#endif diff --git a/include/linux/lglock.h b/include/linux/lglock.h new file mode 100644 index 00000000..87f402cc --- /dev/null +++ b/include/linux/lglock.h @@ -0,0 +1,200 @@ +/* + * Specialised local-global spinlock. Can only be declared as global variables + * to avoid overhead and keep things simple (and we don't want to start using + * these inside dynamically allocated structures). + * + * "local/global locks" (lglocks) can be used to: + * + * - Provide fast exclusive access to per-CPU data, with exclusive access to + * another CPU's data allowed but possibly subject to contention, and to + * provide very slow exclusive access to all per-CPU data. + * - Or to provide very fast and scalable read serialisation, and to provide + * very slow exclusive serialisation of data (not necessarily per-CPU data). + * + * Brlocks are also implemented as a short-hand notation for the latter use + * case. + * + * Copyright 2009, 2010, Nick Piggin, Novell Inc. + */ +#ifndef __LINUX_LGLOCK_H +#define __LINUX_LGLOCK_H + +#include <linux/spinlock.h> +#include <linux/lockdep.h> +#include <linux/percpu.h> +#include <linux/cpu.h> + +/* can make br locks by using local lock for read side, global lock for write */ +#define br_lock_init(name) name##_lock_init() +#define br_read_lock(name) name##_local_lock() +#define br_read_unlock(name) name##_local_unlock() +#define br_write_lock(name) name##_global_lock_online() +#define br_write_unlock(name) name##_global_unlock_online() + +#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) +#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) + + +#define lg_lock_init(name) name##_lock_init() +#define lg_local_lock(name) name##_local_lock() +#define lg_local_unlock(name) name##_local_unlock() +#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu) +#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) +#define lg_global_lock(name) name##_global_lock() +#define lg_global_unlock(name) name##_global_unlock() +#define lg_global_lock_online(name) name##_global_lock_online() +#define lg_global_unlock_online(name) name##_global_unlock_online() + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define LOCKDEP_INIT_MAP lockdep_init_map + +#define DEFINE_LGLOCK_LOCKDEP(name) \ + struct lock_class_key name##_lock_key; \ + struct lockdep_map name##_lock_dep_map; \ + EXPORT_SYMBOL(name##_lock_dep_map) + +#else +#define LOCKDEP_INIT_MAP(a, b, c, d) + +#define DEFINE_LGLOCK_LOCKDEP(name) +#endif + + +#define DECLARE_LGLOCK(name) \ + extern void name##_lock_init(void); \ + extern void name##_local_lock(void); \ + extern void name##_local_unlock(void); \ + extern void name##_local_lock_cpu(int cpu); \ + extern void name##_local_unlock_cpu(int cpu); \ + extern void name##_global_lock(void); \ + extern void name##_global_unlock(void); \ + extern void name##_global_lock_online(void); \ + extern void name##_global_unlock_online(void); \ + +#define DEFINE_LGLOCK(name) \ + \ + DEFINE_SPINLOCK(name##_cpu_lock); \ + cpumask_t name##_cpus __read_mostly; \ + DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ + DEFINE_LGLOCK_LOCKDEP(name); \ + \ + static int \ + name##_lg_cpu_callback(struct notifier_block *nb, \ + unsigned long action, void *hcpu) \ + { \ + switch (action & ~CPU_TASKS_FROZEN) { \ + case CPU_UP_PREPARE: \ + spin_lock(&name##_cpu_lock); \ + cpu_set((unsigned long)hcpu, name##_cpus); \ + spin_unlock(&name##_cpu_lock); \ + break; \ + case CPU_UP_CANCELED: case CPU_DEAD: \ + spin_lock(&name##_cpu_lock); \ + cpu_clear((unsigned long)hcpu, name##_cpus); \ + spin_unlock(&name##_cpu_lock); \ + } \ + return NOTIFY_OK; \ + } \ + static struct notifier_block name##_lg_cpu_notifier = { \ + .notifier_call = name##_lg_cpu_callback, \ + }; \ + void name##_lock_init(void) { \ + int i; \ + LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ + for_each_possible_cpu(i) { \ + arch_spinlock_t *lock; \ + lock = &per_cpu(name##_lock, i); \ + *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ + } \ + register_hotcpu_notifier(&name##_lg_cpu_notifier); \ + get_online_cpus(); \ + for_each_online_cpu(i) \ + cpu_set(i, name##_cpus); \ + put_online_cpus(); \ + } \ + EXPORT_SYMBOL(name##_lock_init); \ + \ + void name##_local_lock(void) { \ + arch_spinlock_t *lock; \ + preempt_disable(); \ + rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ + lock = &__get_cpu_var(name##_lock); \ + arch_spin_lock(lock); \ + } \ + EXPORT_SYMBOL(name##_local_lock); \ + \ + void name##_local_unlock(void) { \ + arch_spinlock_t *lock; \ + rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ + lock = &__get_cpu_var(name##_lock); \ + arch_spin_unlock(lock); \ + preempt_enable(); \ + } \ + EXPORT_SYMBOL(name##_local_unlock); \ + \ + void name##_local_lock_cpu(int cpu) { \ + arch_spinlock_t *lock; \ + preempt_disable(); \ + rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ + lock = &per_cpu(name##_lock, cpu); \ + arch_spin_lock(lock); \ + } \ + EXPORT_SYMBOL(name##_local_lock_cpu); \ + \ + void name##_local_unlock_cpu(int cpu) { \ + arch_spinlock_t *lock; \ + rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ + lock = &per_cpu(name##_lock, cpu); \ + arch_spin_unlock(lock); \ + preempt_enable(); \ + } \ + EXPORT_SYMBOL(name##_local_unlock_cpu); \ + \ + void name##_global_lock_online(void) { \ + int i; \ + spin_lock(&name##_cpu_lock); \ + rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ + for_each_cpu(i, &name##_cpus) { \ + arch_spinlock_t *lock; \ + lock = &per_cpu(name##_lock, i); \ + arch_spin_lock(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_global_lock_online); \ + \ + void name##_global_unlock_online(void) { \ + int i; \ + rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ + for_each_cpu(i, &name##_cpus) { \ + arch_spinlock_t *lock; \ + lock = &per_cpu(name##_lock, i); \ + arch_spin_unlock(lock); \ + } \ + spin_unlock(&name##_cpu_lock); \ + } \ + EXPORT_SYMBOL(name##_global_unlock_online); \ + \ + void name##_global_lock(void) { \ + int i; \ + preempt_disable(); \ + rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ + for_each_possible_cpu(i) { \ + arch_spinlock_t *lock; \ + lock = &per_cpu(name##_lock, i); \ + arch_spin_lock(lock); \ + } \ + } \ + EXPORT_SYMBOL(name##_global_lock); \ + \ + void name##_global_unlock(void) { \ + int i; \ + rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ + for_each_possible_cpu(i) { \ + arch_spinlock_t *lock; \ + lock = &per_cpu(name##_lock, i); \ + arch_spin_unlock(lock); \ + } \ + preempt_enable(); \ + } \ + EXPORT_SYMBOL(name##_global_unlock); +#endif diff --git a/include/linux/lguest.h b/include/linux/lguest.h new file mode 100644 index 00000000..9962c6bb --- /dev/null +++ b/include/linux/lguest.h @@ -0,0 +1,73 @@ +/* + * Things the lguest guest needs to know. Note: like all lguest interfaces, + * this is subject to wild and random change between versions. + */ +#ifndef _LINUX_LGUEST_H +#define _LINUX_LGUEST_H + +#ifndef __ASSEMBLY__ +#include <linux/time.h> +#include <asm/irq.h> +#include <asm/lguest_hcall.h> + +#define LG_CLOCK_MIN_DELTA 100UL +#define LG_CLOCK_MAX_DELTA ULONG_MAX + +/*G:031 + * The second method of communicating with the Host is to via "struct + * lguest_data". Once the Guest's initialization hypercall tells the Host where + * this is, the Guest and Host both publish information in it. +:*/ +struct lguest_data { + /* + * 512 == enabled (same as eflags in normal hardware). The Guest + * changes interrupts so often that a hypercall is too slow. + */ + unsigned int irq_enabled; + /* Fine-grained interrupt disabling by the Guest */ + DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); + + /* + * The Host writes the virtual address of the last page fault here, + * which saves the Guest a hypercall. CR2 is the native register where + * this address would normally be found. + */ + unsigned long cr2; + + /* Wallclock time set by the Host. */ + struct timespec time; + + /* + * Interrupt pending set by the Host. The Guest should do a hypercall + * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). + */ + int irq_pending; + + /* + * Async hypercall ring. Instead of directly making hypercalls, we can + * place them in here for processing the next time the Host wants. + * This batching can be quite efficient. + */ + + /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ + u8 hcall_status[LHCALL_RING_SIZE]; + /* The actual registers for the hypercalls. */ + struct hcall_args hcalls[LHCALL_RING_SIZE]; + +/* Fields initialized by the Host at boot: */ + /* Memory not to try to access */ + unsigned long reserve_mem; + /* KHz for the TSC clock. */ + u32 tsc_khz; + +/* Fields initialized by the Guest at boot: */ + /* Instruction range to suppress interrupts even if enabled */ + unsigned long noirq_start, noirq_end; + /* Address above which page tables are all identical. */ + unsigned long kernel_address; + /* The vector to try to use for system calls (0x40 or 0x80). */ + unsigned int syscall_vec; +}; +extern struct lguest_data lguest_data; +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_LGUEST_H */ diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h new file mode 100644 index 00000000..495203ff --- /dev/null +++ b/include/linux/lguest_launcher.h @@ -0,0 +1,73 @@ +#ifndef _LINUX_LGUEST_LAUNCHER +#define _LINUX_LGUEST_LAUNCHER +/* Everything the "lguest" userspace program needs to know. */ +#include <linux/types.h> + +/*D:010 + * Drivers + * + * The Guest needs devices to do anything useful. Since we don't let it touch + * real devices (think of the damage it could do!) we provide virtual devices. + * We could emulate a PCI bus with various devices on it, but that is a fairly + * complex burden for the Host and suboptimal for the Guest, so we have our own + * simple lguest bus and we use "virtio" drivers. These drivers need a set of + * routines from us which will actually do the virtual I/O, but they handle all + * the net/block/console stuff themselves. This means that if we want to add + * a new device, we simply need to write a new virtio driver and create support + * for it in the Launcher: this code won't need to change. + * + * Virtio devices are also used by kvm, so we can simply reuse their optimized + * device drivers. And one day when everyone uses virtio, my plan will be + * complete. Bwahahahah! + * + * Devices are described by a simplified ID, a status byte, and some "config" + * bytes which describe this device's configuration. This is placed by the + * Launcher just above the top of physical memory: + */ +struct lguest_device_desc { + /* The device type: console, network, disk etc. Type 0 terminates. */ + __u8 type; + /* The number of virtqueues (first in config array) */ + __u8 num_vq; + /* + * The number of bytes of feature bits. Multiply by 2: one for host + * features and one for Guest acknowledgements. + */ + __u8 feature_len; + /* The number of bytes of the config array after virtqueues. */ + __u8 config_len; + /* A status byte, written by the Guest. */ + __u8 status; + __u8 config[0]; +}; + +/*D:135 + * This is how we expect the device configuration field for a virtqueue + * to be laid out in config space. + */ +struct lguest_vqconfig { + /* The number of entries in the virtio_ring */ + __u16 num; + /* The interrupt we get when something happens. */ + __u16 irq; + /* The page number of the virtio ring for this device. */ + __u32 pfn; +}; +/*:*/ + +/* Write command first word is a request. */ +enum lguest_req +{ + LHREQ_INITIALIZE, /* + base, pfnlimit, start */ + LHREQ_GETDMA, /* No longer used */ + LHREQ_IRQ, /* + irq */ + LHREQ_BREAK, /* No longer used */ + LHREQ_EVENTFD, /* + address, fd. */ +}; + +/* + * The alignment to use between consumer and producer parts of vring. + * x86 pagesize for historical reasons. + */ +#define LGUEST_VRING_ALIGN 4096 +#endif /* _LINUX_LGUEST_LAUNCHER */ diff --git a/include/linux/libata.h b/include/linux/libata.h new file mode 100644 index 00000000..6e887c74 --- /dev/null +++ b/include/linux/libata.h @@ -0,0 +1,1786 @@ +/* + * Copyright 2003-2005 Red Hat, Inc. All rights reserved. + * Copyright 2003-2005 Jeff Garzik + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + */ + +#ifndef __LINUX_LIBATA_H__ +#define __LINUX_LIBATA_H__ + +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> +#include <linux/io.h> +#include <linux/ata.h> +#include <linux/workqueue.h> +#include <scsi/scsi_host.h> +#include <linux/acpi.h> +#include <linux/cdrom.h> +#include <linux/sched.h> + +/* + * Define if arch has non-standard setup. This is a _PCI_ standard + * not a legacy or ISA standard. + */ +#ifdef CONFIG_ATA_NONSTANDARD +#include <asm/libata-portmap.h> +#else +#include <asm-generic/libata-portmap.h> +#endif + +/* + * compile-time options: to be removed as soon as all the drivers are + * converted to the new debugging mechanism + */ +#undef ATA_DEBUG /* debugging output */ +#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ +#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ +#undef ATA_NDEBUG /* define to disable quick runtime checks */ + + +/* note: prints function name for you */ +#ifdef ATA_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#ifdef ATA_VERBOSE_DEBUG +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#else +#define VPRINTK(fmt, args...) +#endif /* ATA_VERBOSE_DEBUG */ +#else +#define DPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) +#endif /* ATA_DEBUG */ + +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) + +#define ata_print_version_once(dev, version) \ +({ \ + static bool __print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + ata_print_version(dev, version); \ + } \ +}) + +/* NEW: debug levels */ +#define HAVE_LIBATA_MSG 1 + +enum { + ATA_MSG_DRV = 0x0001, + ATA_MSG_INFO = 0x0002, + ATA_MSG_PROBE = 0x0004, + ATA_MSG_WARN = 0x0008, + ATA_MSG_MALLOC = 0x0010, + ATA_MSG_CTL = 0x0020, + ATA_MSG_INTR = 0x0040, + ATA_MSG_ERR = 0x0080, +}; + +#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) +#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) +#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) +#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) +#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) +#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) +#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) +#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) + +static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) +{ + if (dval < 0 || dval >= (sizeof(u32) * 8)) + return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ + if (!dval) + return 0; + return (1 << dval) - 1; +} + +/* defines only for the constants which don't work well as enums */ +#define ATA_TAG_POISON 0xfafbfcfdU + +enum { + /* various global constants */ + LIBATA_MAX_PRD = ATA_MAX_PRD / 2, + LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ + ATA_DEF_QUEUE = 1, + /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, + ATA_SHORT_PAUSE = 16, + + ATAPI_MAX_DRAIN = 16 << 10, + + ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, + + ATA_SHT_EMULATED = 1, + ATA_SHT_CMD_PER_LUN = 1, + ATA_SHT_THIS_ID = -1, + ATA_SHT_USE_CLUSTERING = 1, + + /* struct ata_device stuff */ + ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ + ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ + ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ + ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ + ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */ + ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ + ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ + ATA_DFLAG_AN = (1 << 7), /* AN configured */ + ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ + ATA_DFLAG_CFG_MASK = (1 << 12) - 1, + + ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ + ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ + ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ + ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ + ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ + ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ + ATA_DFLAG_INIT_MASK = (1 << 24) - 1, + + ATA_DFLAG_DETACH = (1 << 24), + ATA_DFLAG_DETACHED = (1 << 25), + + ATA_DEV_UNKNOWN = 0, /* unknown device */ + ATA_DEV_ATA = 1, /* ATA device */ + ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ + ATA_DEV_ATAPI = 3, /* ATAPI device */ + ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ + ATA_DEV_PMP = 5, /* SATA port multiplier */ + ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ + ATA_DEV_SEMB = 7, /* SEMB */ + ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ + ATA_DEV_NONE = 9, /* no device */ + + /* struct ata_link flags */ + ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ + ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ + ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ + ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ + ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, + ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ + ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ + ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ + ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ + + /* struct ata_port flags */ + ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ + /* (doesn't imply presence) */ + ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ + ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ + ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ + ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD + * doesn't handle PIO interrupts */ + ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ + ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */ + ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */ + ATA_FLAG_DEBUGMSG = (1 << 13), + ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */ + ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ + ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ + ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ + ATA_FLAG_AN = (1 << 18), /* controller supports AN */ + ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ + ATA_FLAG_EM = (1 << 21), /* driver supports enclosure + * management */ + ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity + * led */ + ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ + + /* bits 24:31 of ap->flags are reserved for LLD specific flags */ + + + /* struct ata_port pflags */ + ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ + ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ + ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ + ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ + ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ + ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ + ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ + ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ + ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ + ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ + + ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ + ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ + ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ + + ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ + ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ + + /* struct ata_queued_cmd flags */ + ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ + ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ + ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ + ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ + ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ + ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ + ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ + + ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ + ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ + ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ + + /* host set flags */ + ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ + ATA_HOST_STARTED = (1 << 1), /* Host started */ + ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ + ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */ + + /* bits 24:31 of host->flags are reserved for LLD specific flags */ + + /* various lengths of time */ + ATA_TMOUT_BOOT = 30000, /* heuristic */ + ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + + /* + * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s + * is too much without parallel probing. Use 2s if parallel + * probing is available, 800ms otherwise. + */ + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + + /* Spec mandates to wait for ">= 2ms" before checking status + * after reset. We wait 150ms, because that was the magic + * delay used for ATAPI devices in Hale Landis's ATADRVR, for + * the period of time between when the ATA command register is + * written, and then status is checked. Because waiting for + * "a while" before checking status is fine, post SRST, we + * perform this magic delay here as well. + * + * Old drivers/ide uses the 2mS rule and then waits for ready. + */ + ATA_WAIT_AFTER_RESET = 150, + + /* If PMP is supported, we have to do follow-up SRST. As some + * PMPs don't send D2H Reg FIS after hardreset, LLDs are + * advised to wait only for the following duration before + * doing SRST. + */ + ATA_TMOUT_PMP_SRST_WAIT = 5000, + + /* ATA bus states */ + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + + /* SATA port states */ + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + + /* encoding various smaller bitmaps into a single + * unsigned long bitmap + */ + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, + ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, + + /* size of buffer to pad xfers ending on unaligned boundaries */ + ATA_DMA_PAD_SZ = 4, + + /* ering size */ + ATA_ERING_SIZE = 32, + + /* return values for ->qc_defer */ + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + + /* desc_len for ata_eh_info and context */ + ATA_EH_DESC_LEN = 80, + + /* reset / recovery action types */ + ATA_EH_REVALIDATE = (1 << 0), + ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ + ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ + ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, + ATA_EH_ENABLE_LINK = (1 << 3), + ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ + + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, + ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | + ATA_EH_ENABLE_LINK, + + /* ata_eh_info->flags */ + ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ + ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ + ATA_EHI_QUIET = (1 << 3), /* be quiet */ + ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ + + ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ + ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ + ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ + ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ + ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */ + + ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, + + /* mask of flags to transfer *to* the slave link */ + ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, + + /* max tries if error condition is still set after ->error_handler */ + ATA_EH_MAX_TRIES = 5, + + /* sometimes resuming a link requires several retries */ + ATA_LINK_RESUME_TRIES = 5, + + /* how hard are we gonna try to probe/recover devices */ + ATA_PROBE_MAX_TRIES = 3, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + + SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ + + /* This should match the actual table size of + * ata_eh_cmd_timeout_table in libata-eh.c. + */ + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6, + + /* Horkage types. May be set by libata or controller on drives + (some horkage may be drive/controller pair dependent */ + + ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ + ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ + ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ + ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */ + ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ + ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ + ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ + ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ + ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ + ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ + ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands + not multiple of 16 bytes */ + ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ + ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ + ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ + ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ + ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ + + /* DMA mask for user DMA control: User visible values; DO NOT + renumber */ + ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ + ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ + ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ + + /* ATAPI command types */ + ATAPI_READ = 0, /* READs */ + ATAPI_WRITE = 1, /* WRITEs */ + ATAPI_READ_CD = 2, /* READ CD [MSF] */ + ATAPI_PASS_THRU = 3, /* SAT pass-thru */ + ATAPI_MISC = 4, /* the rest */ + + /* Timing constants */ + ATA_TIMING_SETUP = (1 << 0), + ATA_TIMING_ACT8B = (1 << 1), + ATA_TIMING_REC8B = (1 << 2), + ATA_TIMING_CYC8B = (1 << 3), + ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | + ATA_TIMING_CYC8B, + ATA_TIMING_ACTIVE = (1 << 4), + ATA_TIMING_RECOVER = (1 << 5), + ATA_TIMING_DMACK_HOLD = (1 << 6), + ATA_TIMING_CYCLE = (1 << 7), + ATA_TIMING_UDMA = (1 << 8), + ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | + ATA_TIMING_REC8B | ATA_TIMING_CYC8B | + ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | + ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE | + ATA_TIMING_UDMA, + + /* ACPI constants */ + ATA_ACPI_FILTER_SETXFER = 1 << 0, + ATA_ACPI_FILTER_LOCK = 1 << 1, + ATA_ACPI_FILTER_DIPM = 1 << 2, + ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */ + ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */ + + ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER | + ATA_ACPI_FILTER_LOCK | + ATA_ACPI_FILTER_DIPM, +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) + << ATA_SHIFT_PIO, + ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) + << ATA_SHIFT_MWDMA, + ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) + << ATA_SHIFT_UDMA, +}; + +enum hsm_task_states { + HSM_ST_IDLE, /* no command on going */ + HSM_ST_FIRST, /* (waiting the device to) + write CDB or first data block */ + HSM_ST, /* (waiting the device to) transfer data */ + HSM_ST_LAST, /* (waiting the device to) complete command */ + HSM_ST_ERR, /* error */ +}; + +enum ata_completion_errors { + AC_ERR_DEV = (1 << 0), /* device reported error */ + AC_ERR_HSM = (1 << 1), /* host state machine violation */ + AC_ERR_TIMEOUT = (1 << 2), /* timeout */ + AC_ERR_MEDIA = (1 << 3), /* media error */ + AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ + AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ + AC_ERR_SYSTEM = (1 << 6), /* system error */ + AC_ERR_INVALID = (1 << 7), /* invalid argument */ + AC_ERR_OTHER = (1 << 8), /* unknown */ + AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */ + AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ +}; + +/* + * Link power management policy: If you alter this, you also need to + * alter libata-scsi.c (for the ascii descriptions) + */ +enum ata_lpm_policy { + ATA_LPM_UNKNOWN, + ATA_LPM_MAX_POWER, + ATA_LPM_MED_POWER, + ATA_LPM_MIN_POWER, +}; + +enum ata_lpm_hints { + ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ + ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ +}; + +/* forward declarations */ +struct scsi_device; +struct ata_port_operations; +struct ata_port; +struct ata_link; +struct ata_queued_cmd; + +/* typedefs */ +typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); +typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline); +typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, + unsigned long deadline); +typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); + +extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_unload_heads; +extern struct device_attribute dev_attr_em_message_type; +extern struct device_attribute dev_attr_em_message; +extern struct device_attribute dev_attr_sw_activity; + +enum sw_activity { + OFF, + BLINK_ON, + BLINK_OFF, +}; + +#ifdef CONFIG_ATA_SFF +struct ata_ioports { + void __iomem *cmd_addr; + void __iomem *data_addr; + void __iomem *error_addr; + void __iomem *feature_addr; + void __iomem *nsect_addr; + void __iomem *lbal_addr; + void __iomem *lbam_addr; + void __iomem *lbah_addr; + void __iomem *device_addr; + void __iomem *status_addr; + void __iomem *command_addr; + void __iomem *altstatus_addr; + void __iomem *ctl_addr; +#ifdef CONFIG_ATA_BMDMA + void __iomem *bmdma_addr; +#endif /* CONFIG_ATA_BMDMA */ + void __iomem *scr_addr; +}; +#endif /* CONFIG_ATA_SFF */ + +struct ata_host { + spinlock_t lock; + struct device *dev; + void __iomem * const *iomap; + unsigned int n_ports; + void *private_data; + struct ata_port_operations *ops; + unsigned long flags; + + struct mutex eh_mutex; + struct task_struct *eh_owner; + +#ifdef CONFIG_ATA_ACPI + acpi_handle acpi_handle; +#endif + struct ata_port *simplex_claimed; /* channel owning the DMA */ + struct ata_port *ports[0]; +}; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + + struct ata_taskfile tf; + u8 cdb[ATAPI_CDB_LEN]; + + unsigned long flags; /* ATA_QCFLAG_xxx */ + unsigned int tag; + unsigned int n_elem; + unsigned int orig_n_elem; + + int dma_dir; + + unsigned int sect_size; + + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + + struct scatterlist sgent; + + struct scatterlist *sg; + + struct scatterlist *cursg; + unsigned int cursg_ofs; + + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + + void *private_data; + void *lldd_task; +}; + +struct ata_port_stats { + unsigned long unhandled_irq; + unsigned long idle_irq; + unsigned long rw_reqbuf; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + struct ata_ering_entry ring[ATA_ERING_SIZE]; +}; + +struct ata_device { + struct ata_link *link; + unsigned int devno; /* 0 or 1 */ + unsigned int horkage; /* List of broken features */ + unsigned long flags; /* ATA_DFLAG_xxx */ + struct scsi_device *sdev; /* attached SCSI device */ + void *private_data; +#ifdef CONFIG_ATA_ACPI + acpi_handle acpi_handle; + union acpi_object *gtf_cache; + unsigned int gtf_filter; +#endif + struct device tdev; + /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ + u64 n_sectors; /* size of device, if ATA */ + u64 n_native_sectors; /* native size, if ATA */ + unsigned int class; /* ATA_DEV_xxx */ + unsigned long unpark_deadline; + + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; /* ATA_SHIFT_xxx */ + + unsigned int multi_count; /* sectors count for + READ/WRITE MULTIPLE */ + unsigned int max_sectors; /* per-device max sectors */ + unsigned int cdb_len; + + /* per-dev xfer mask */ + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; + + /* for CHS addressing */ + u16 cylinders; /* Number of cylinders */ + u16 heads; /* Number of heads */ + u16 sectors; /* Number of sectors per track */ + + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ + }; + + /* error history */ + int spdn_cnt; + /* ering is CLEAR_END, read comment above CLEAR_END */ + struct ata_ering ering; +}; + +/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are + * cleared to zero on ata_dev_init(). + */ +#define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) +#define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) + +struct ata_eh_info { + struct ata_device *dev; /* offending device */ + u32 serror; /* SError from LLDD */ + unsigned int err_mask; /* port-wide err_mask */ + unsigned int action; /* ATA_EH_* action mask */ + unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ + unsigned int flags; /* ATA_EHI_* flags */ + + unsigned int probe_mask; + + char desc[ATA_EH_DESC_LEN]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[ATA_MAX_DEVICES]; + int cmd_timeout_idx[ATA_MAX_DEVICES] + [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; + unsigned int classes[ATA_MAX_DEVICES]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[ATA_MAX_DEVICES]; + /* timestamp for the last reset attempt or success */ + unsigned long last_reset; +}; + +struct ata_acpi_drive +{ + u32 pio; + u32 dma; +} __packed; + +struct ata_acpi_gtm { + struct ata_acpi_drive drive[2]; + u32 flags; +} __packed; + +struct ata_link { + struct ata_port *ap; + int pmp; /* port multiplier port # */ + + struct device tdev; + unsigned int active_tag; /* active tag on this link */ + u32 sactive; /* active NCQ commands */ + + unsigned int flags; /* ATA_LFLAG_xxx */ + + u32 saved_scontrol; /* SControl on probe */ + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; /* current SATA PHY speed */ + enum ata_lpm_policy lpm_policy; + + /* record runtime error info, protected by host_set lock */ + struct ata_eh_info eh_info; + /* EH context */ + struct ata_eh_context eh_context; + + struct ata_device device[ATA_MAX_DEVICES]; +}; +#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) +#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) + +struct ata_port { + struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ + struct ata_port_operations *ops; + spinlock_t *lock; + /* Flags owned by the EH context. Only EH should touch these once the + port is active */ + unsigned long flags; /* ATA_FLAG_xxx */ + /* Flags that change dynamically, protected by ap->lock */ + unsigned int pflags; /* ATA_PFLAG_xxx */ + unsigned int print_id; /* user visible unique port ID */ + unsigned int port_no; /* 0 based port no. inside the host */ + +#ifdef CONFIG_ATA_SFF + struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ + u8 ctl; /* cache of ATA control register */ + u8 last_ctl; /* Cache last written value */ + struct ata_link* sff_pio_task_link; /* link currently used */ + struct delayed_work sff_pio_task; +#ifdef CONFIG_ATA_BMDMA + struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ + dma_addr_t bmdma_prd_dma; /* and its DMA mapping */ +#endif /* CONFIG_ATA_BMDMA */ +#endif /* CONFIG_ATA_SFF */ + + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; /* cable type; ATA_CBL_xxx */ + + struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; + unsigned long qc_allocated; + unsigned int qc_active; + int nr_active_links; /* #links with active qcs */ + + struct ata_link link; /* host default link */ + struct ata_link *slave_link; /* see ata_slave_link_init() */ + + int nr_pmp_links; /* nr of available PMP links */ + struct ata_link *pmp_link; /* array of PMP links */ + struct ata_link *excl_link; /* for PMP qc exclusion */ + + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + struct device tdev; + + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct work_struct scsi_rescan_task; + + unsigned int hsm_task_state; + + u32 msg_enable; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + + pm_message_t pm_mesg; + int *pm_result; + enum ata_lpm_policy target_lpm_policy; + + struct timer_list fastdrain_timer; + unsigned long fastdrain_cnt; + + int em_message_type; + void *private_data; + +#ifdef CONFIG_ATA_ACPI + acpi_handle acpi_handle; + struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ +#endif + /* owned by EH */ + u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; +}; + +/* The following initializer overrides a method to NULL whether one of + * its parent has the method defined or not. This is equivalent to + * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant + * expression and thus can't be used as an initializer. + */ +#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) + +struct ata_port_operations { + /* + * Command execution + */ + int (*qc_defer)(struct ata_queued_cmd *qc); + int (*check_atapi_dma)(struct ata_queued_cmd *qc); + void (*qc_prep)(struct ata_queued_cmd *qc); + unsigned int (*qc_issue)(struct ata_queued_cmd *qc); + bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); + + /* + * Configuration and exception handling + */ + int (*cable_detect)(struct ata_port *ap); + unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); + void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); + void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); + int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); + unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); + + void (*dev_config)(struct ata_device *dev); + + void (*freeze)(struct ata_port *ap); + void (*thaw)(struct ata_port *ap); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *ap); + void (*lost_interrupt)(struct ata_port *ap); + void (*post_internal_cmd)(struct ata_queued_cmd *qc); + + /* + * Optional features + */ + int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val); + int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); + void (*pmp_attach)(struct ata_port *ap); + void (*pmp_detach)(struct ata_port *ap); + int (*set_lpm)(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); + + /* + * Start, stop, suspend and resume + */ + int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); + int (*port_resume)(struct ata_port *ap); + int (*port_start)(struct ata_port *ap); + void (*port_stop)(struct ata_port *ap); + void (*host_stop)(struct ata_host *host); + +#ifdef CONFIG_ATA_SFF + /* + * SFF / taskfile oriented ops + */ + void (*sff_dev_select)(struct ata_port *ap, unsigned int device); + void (*sff_set_devctl)(struct ata_port *ap, u8 ctl); + u8 (*sff_check_status)(struct ata_port *ap); + u8 (*sff_check_altstatus)(struct ata_port *ap); + void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); + void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); + void (*sff_exec_command)(struct ata_port *ap, + const struct ata_taskfile *tf); + unsigned int (*sff_data_xfer)(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *qc); + +#ifdef CONFIG_ATA_BMDMA + void (*bmdma_setup)(struct ata_queued_cmd *qc); + void (*bmdma_start)(struct ata_queued_cmd *qc); + void (*bmdma_stop)(struct ata_queued_cmd *qc); + u8 (*bmdma_status)(struct ata_port *ap); +#endif /* CONFIG_ATA_BMDMA */ +#endif /* CONFIG_ATA_SFF */ + + ssize_t (*em_show)(struct ata_port *ap, char *buf); + ssize_t (*em_store)(struct ata_port *ap, const char *message, + size_t size); + ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf); + ssize_t (*sw_activity_store)(struct ata_device *dev, + enum sw_activity val); + /* + * Obsolete + */ + void (*phy_reset)(struct ata_port *ap); + void (*eng_timeout)(struct ata_port *ap); + + /* + * ->inherits must be the last field and all the preceding + * fields must be pointers. + */ + const struct ata_port_operations *inherits; +}; + +struct ata_port_info { + unsigned long flags; + unsigned long link_flags; + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +struct ata_timing { + unsigned short mode; /* ATA mode */ + unsigned short setup; /* t1 */ + unsigned short act8b; /* t2 for 8-bit I/O */ + unsigned short rec8b; /* t2i for 8-bit I/O */ + unsigned short cyc8b; /* t0 for 8-bit I/O */ + unsigned short active; /* t2 or tD */ + unsigned short recover; /* t2i or tK */ + unsigned short dmack_hold; /* tj */ + unsigned short cycle; /* t0 */ + unsigned short udma; /* t2CYCTYP/2 */ +}; + +/* + * Core layer - drivers/ata/libata-core.c + */ +extern const unsigned long sata_deb_timing_normal[]; +extern const unsigned long sata_deb_timing_hotplug[]; +extern const unsigned long sata_deb_timing_long[]; + +extern struct ata_port_operations ata_dummy_port_ops; +extern const struct ata_port_info ata_dummy_port_info; + +static inline const unsigned long * +sata_ehc_deb_timing(struct ata_eh_context *ehc) +{ + if (ehc->i.flags & ATA_EHI_HOTPLUGGED) + return sata_deb_timing_hotplug; + else + return sata_deb_timing_normal; +} + +static inline int ata_port_is_dummy(struct ata_port *ap) +{ + return ap->ops == &ata_dummy_port_ops; +} + +extern int sata_set_spd(struct ata_link *link); +extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); +extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, + int (*check_ready)(struct ata_link *link)); +extern int sata_link_debounce(struct ata_link *link, + const unsigned long *params, unsigned long deadline); +extern int sata_link_resume(struct ata_link *link, const unsigned long *params, + unsigned long deadline); +extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup); +extern int sata_link_hardreset(struct ata_link *link, + const unsigned long *timing, unsigned long deadline, + bool *online, int (*check_ready)(struct ata_link *)); +extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); + +extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); +extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, + const struct ata_port_info * const * ppi, int n_ports); +extern int ata_slave_link_init(struct ata_port *ap); +extern int ata_host_start(struct ata_host *host); +extern int ata_host_register(struct ata_host *host, + struct scsi_host_template *sht); +extern int ata_host_activate(struct ata_host *host, int irq, + irq_handler_t irq_handler, unsigned long irq_flags, + struct scsi_host_template *sht); +extern void ata_host_detach(struct ata_host *host); +extern void ata_host_init(struct ata_host *, struct device *, + unsigned long, struct ata_port_operations *); +extern int ata_scsi_detect(struct scsi_host_template *sht); +extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); +extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, + int cmd, void __user *arg); +extern void ata_sas_port_destroy(struct ata_port *); +extern struct ata_port *ata_sas_port_alloc(struct ata_host *, + struct ata_port_info *, struct Scsi_Host *); +extern void ata_sas_async_probe(struct ata_port *ap); +extern int ata_sas_sync_probe(struct ata_port *ap); +extern int ata_sas_port_init(struct ata_port *); +extern int ata_sas_port_start(struct ata_port *ap); +extern void ata_sas_port_stop(struct ata_port *ap); +extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); +extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); +extern int sata_scr_valid(struct ata_link *link); +extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); +extern int sata_scr_write(struct ata_link *link, int reg, u32 val); +extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); +extern bool ata_link_online(struct ata_link *link); +extern bool ata_link_offline(struct ata_link *link); +#ifdef CONFIG_PM +extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); +extern void ata_host_resume(struct ata_host *host); +#endif +extern int ata_ratelimit(void); +extern void ata_msleep(struct ata_port *ap, unsigned int msecs); +extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, + u32 val, unsigned long interval, unsigned long timeout); +extern int atapi_cmd_type(u8 opcode); +extern void ata_tf_to_fis(const struct ata_taskfile *tf, + u8 pmp, int is_cmd, u8 *fis); +extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); +extern unsigned long ata_pack_xfermask(unsigned long pio_mask, + unsigned long mwdma_mask, unsigned long udma_mask); +extern void ata_unpack_xfermask(unsigned long xfer_mask, + unsigned long *pio_mask, unsigned long *mwdma_mask, + unsigned long *udma_mask); +extern u8 ata_xfer_mask2mode(unsigned long xfer_mask); +extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); +extern int ata_xfer_mode2shift(unsigned long xfer_mode); +extern const char *ata_mode_string(unsigned long xfer_mask); +extern unsigned long ata_id_xfermask(const u16 *id); +extern int ata_std_qc_defer(struct ata_queued_cmd *qc); +extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); +extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, + unsigned int n_elem); +extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); +extern void ata_dev_disable(struct ata_device *adev); +extern void ata_id_string(const u16 *id, unsigned char *s, + unsigned int ofs, unsigned int len); +extern void ata_id_c_string(const u16 *id, unsigned char *s, + unsigned int ofs, unsigned int len); +extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); +extern void ata_qc_complete(struct ata_queued_cmd *qc); +extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); +extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); +extern int ata_std_bios_param(struct scsi_device *sdev, + struct block_device *bdev, + sector_t capacity, int geom[]); +extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); +extern int ata_scsi_slave_config(struct scsi_device *sdev); +extern void ata_scsi_slave_destroy(struct scsi_device *sdev); +extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, + int queue_depth, int reason); +extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, + int queue_depth, int reason); +extern struct ata_device *ata_dev_pair(struct ata_device *adev); +extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); +extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); +extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); + +extern int ata_cable_40wire(struct ata_port *ap); +extern int ata_cable_80wire(struct ata_port *ap); +extern int ata_cable_sata(struct ata_port *ap); +extern int ata_cable_ignore(struct ata_port *ap); +extern int ata_cable_unknown(struct ata_port *ap); + +/* Timing helpers */ +extern unsigned int ata_pio_need_iordy(const struct ata_device *); +extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); +extern int ata_timing_compute(struct ata_device *, unsigned short, + struct ata_timing *, int, int); +extern void ata_timing_merge(const struct ata_timing *, + const struct ata_timing *, struct ata_timing *, + unsigned int); +extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); + +/* PCI */ +#ifdef CONFIG_PCI +struct pci_dev; + +struct pci_bits { + unsigned int reg; /* PCI config register to read */ + unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ + unsigned long mask; + unsigned long val; +}; + +extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); +extern void ata_pci_remove_one(struct pci_dev *pdev); + +#ifdef CONFIG_PM +extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); +extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); +extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); +extern int ata_pci_device_resume(struct pci_dev *pdev); +#endif /* CONFIG_PM */ +#endif /* CONFIG_PCI */ + +/* + * ACPI - drivers/ata/libata-acpi.c + */ +#ifdef CONFIG_ATA_ACPI +static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) +{ + if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID) + return &ap->__acpi_init_gtm; + return NULL; +} +int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); +int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); +unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm); +int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); +#else +static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) +{ + return NULL; +} + +static inline int ata_acpi_stm(const struct ata_port *ap, + struct ata_acpi_gtm *stm) +{ + return -ENOSYS; +} + +static inline int ata_acpi_gtm(const struct ata_port *ap, + struct ata_acpi_gtm *stm) +{ + return -ENOSYS; +} + +static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm) +{ + return 0; +} + +static inline int ata_acpi_cbl_80wire(struct ata_port *ap, + const struct ata_acpi_gtm *gtm) +{ + return 0; +} +#endif + +/* + * EH - drivers/ata/libata-eh.c + */ +extern void ata_port_schedule_eh(struct ata_port *ap); +extern void ata_port_wait_eh(struct ata_port *ap); +extern int ata_link_abort(struct ata_link *link); +extern int ata_port_abort(struct ata_port *ap); +extern int ata_port_freeze(struct ata_port *ap); +extern int sata_async_notification(struct ata_port *ap); + +extern void ata_eh_freeze_port(struct ata_port *ap); +extern void ata_eh_thaw_port(struct ata_port *ap); + +extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); +extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); +extern void ata_eh_analyze_ncq_error(struct ata_link *link); + +extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, + ata_reset_fn_t softreset, ata_reset_fn_t hardreset, + ata_postreset_fn_t postreset); +extern void ata_std_error_handler(struct ata_port *ap); +extern int ata_link_nr_enabled(struct ata_link *link); + +/* + * Base operations to inherit from and initializers for sht + * + * Operations + * + * base : Common to all libata drivers. + * sata : SATA controllers w/ native interface. + * pmp : SATA controllers w/ PMP support. + * sff : SFF ATA controllers w/o BMDMA support. + * bmdma : SFF ATA controllers w/ BMDMA support. + * + * sht initializers + * + * BASE : Common to all libata drivers. The user must set + * sg_tablesize and dma_boundary. + * PIO : SFF ATA controllers w/ only PIO support. + * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and + * dma_boundary are set to BMDMA limits. + * NCQ : SATA controllers supporting NCQ. The user must set + * sg_tablesize, dma_boundary and can_queue. + */ +extern const struct ata_port_operations ata_base_port_ops; +extern const struct ata_port_operations sata_port_ops; +extern struct device_attribute *ata_common_sdev_attrs[]; + +#define ATA_BASE_SHT(drv_name) \ + .module = THIS_MODULE, \ + .name = drv_name, \ + .ioctl = ata_scsi_ioctl, \ + .queuecommand = ata_scsi_queuecmd, \ + .can_queue = ATA_DEF_QUEUE, \ + .this_id = ATA_SHT_THIS_ID, \ + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ + .emulated = ATA_SHT_EMULATED, \ + .use_clustering = ATA_SHT_USE_CLUSTERING, \ + .proc_name = drv_name, \ + .slave_configure = ata_scsi_slave_config, \ + .slave_destroy = ata_scsi_slave_destroy, \ + .bios_param = ata_std_bios_param, \ + .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ + .sdev_attrs = ata_common_sdev_attrs + +#define ATA_NCQ_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .change_queue_depth = ata_scsi_change_queue_depth + +/* + * PMP helpers + */ +#ifdef CONFIG_SATA_PMP +static inline bool sata_pmp_supported(struct ata_port *ap) +{ + return ap->flags & ATA_FLAG_PMP; +} + +static inline bool sata_pmp_attached(struct ata_port *ap) +{ + return ap->nr_pmp_links != 0; +} + +static inline int ata_is_host_link(const struct ata_link *link) +{ + return link == &link->ap->link || link == link->ap->slave_link; +} +#else /* CONFIG_SATA_PMP */ +static inline bool sata_pmp_supported(struct ata_port *ap) +{ + return false; +} + +static inline bool sata_pmp_attached(struct ata_port *ap) +{ + return false; +} + +static inline int ata_is_host_link(const struct ata_link *link) +{ + return 1; +} +#endif /* CONFIG_SATA_PMP */ + +static inline int sata_srst_pmp(struct ata_link *link) +{ + if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) + return SATA_PMP_CTRL_PORT; + return link->pmp; +} + +/* + * printk helpers + */ +__printf(3, 4) +int ata_port_printk(const struct ata_port *ap, const char *level, + const char *fmt, ...); +__printf(3, 4) +int ata_link_printk(const struct ata_link *link, const char *level, + const char *fmt, ...); +__printf(3, 4) +int ata_dev_printk(const struct ata_device *dev, const char *level, + const char *fmt, ...); + +#define ata_port_err(ap, fmt, ...) \ + ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_port_warn(ap, fmt, ...) \ + ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_port_notice(ap, fmt, ...) \ + ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_port_info(ap, fmt, ...) \ + ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_port_dbg(ap, fmt, ...) \ + ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) + +#define ata_link_err(link, fmt, ...) \ + ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_link_warn(link, fmt, ...) \ + ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_link_notice(link, fmt, ...) \ + ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_link_info(link, fmt, ...) \ + ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_link_dbg(link, fmt, ...) \ + ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__) + +#define ata_dev_err(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_dev_warn(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_dev_notice(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_dev_info(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_dev_dbg(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__) + +void ata_print_version(const struct device *dev, const char *version); + +/* + * ata_eh_info helpers + */ +extern __printf(2, 3) +void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...); +extern __printf(2, 3) +void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...); +extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); + +static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) +{ + ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; + ehi->flags |= ATA_EHI_HOTPLUGGED; + ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; + ehi->err_mask |= AC_ERR_ATA_BUS; +} + +/* + * port description helpers + */ +extern __printf(2, 3) +void ata_port_desc(struct ata_port *ap, const char *fmt, ...); +#ifdef CONFIG_PCI +extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, + const char *name); +#endif + +static inline unsigned int ata_tag_valid(unsigned int tag) +{ + return (tag < ATA_MAX_QUEUE) ? 1 : 0; +} + +static inline unsigned int ata_tag_internal(unsigned int tag) +{ + return tag == ATA_TAG_INTERNAL; +} + +/* + * device helpers + */ +static inline unsigned int ata_class_enabled(unsigned int class) +{ + return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || + class == ATA_DEV_PMP || class == ATA_DEV_SEMB; +} + +static inline unsigned int ata_class_disabled(unsigned int class) +{ + return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || + class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP; +} + +static inline unsigned int ata_class_absent(unsigned int class) +{ + return !ata_class_enabled(class) && !ata_class_disabled(class); +} + +static inline unsigned int ata_dev_enabled(const struct ata_device *dev) +{ + return ata_class_enabled(dev->class); +} + +static inline unsigned int ata_dev_disabled(const struct ata_device *dev) +{ + return ata_class_disabled(dev->class); +} + +static inline unsigned int ata_dev_absent(const struct ata_device *dev) +{ + return ata_class_absent(dev->class); +} + +/* + * link helpers + */ +static inline int ata_link_max_devices(const struct ata_link *link) +{ + if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) + return 2; + return 1; +} + +static inline int ata_link_active(struct ata_link *link) +{ + return ata_tag_valid(link->active_tag) || link->sactive; +} + +/* + * Iterators + * + * ATA_LITER_* constants are used to select link iteration mode and + * ATA_DITER_* device iteration mode. + * + * For a custom iteration directly using ata_{link|dev}_next(), if + * @link or @dev, respectively, is NULL, the first element is + * returned. @dev and @link can be any valid device or link and the + * next element according to the iteration mode will be returned. + * After the last element, NULL is returned. + */ +enum ata_link_iter_mode { + ATA_LITER_EDGE, /* if present, PMP links only; otherwise, + * host link. no slave link */ + ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ + ATA_LITER_PMP_FIRST, /* PMP links followed by host link, + * slave link still comes after host link */ +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED, + ATA_DITER_ENABLED_REVERSE, + ATA_DITER_ALL, + ATA_DITER_ALL_REVERSE, +}; + +extern struct ata_link *ata_link_next(struct ata_link *link, + struct ata_port *ap, + enum ata_link_iter_mode mode); + +extern struct ata_device *ata_dev_next(struct ata_device *dev, + struct ata_link *link, + enum ata_dev_iter_mode mode); + +/* + * Shortcut notation for iterations + * + * ata_for_each_link() iterates over each link of @ap according to + * @mode. @link points to the current link in the loop. @link is + * NULL after loop termination. ata_for_each_dev() works the same way + * except that it iterates over each device of @link. + * + * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be + * specified when using the following shorthand notations. Only the + * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be + * specified. This not only increases brevity but also makes it + * impossible to use ATA_LITER_* for device iteration or vice-versa. + */ +#define ata_for_each_link(link, ap, mode) \ + for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ + (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) + +#define ata_for_each_dev(dev, link, mode) \ + for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ + (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) + +/** + * ata_ncq_enabled - Test whether NCQ is enabled + * @dev: ATA device to test for + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * 1 if NCQ is enabled for @dev, 0 otherwise. + */ +static inline int ata_ncq_enabled(struct ata_device *dev) +{ + return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | + ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; +} + +static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) +{ + qc->tf.ctl |= ATA_NIEN; +} + +static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, + unsigned int tag) +{ + if (likely(ata_tag_valid(tag))) + return &ap->qcmd[tag]; + return NULL; +} + +static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, + unsigned int tag) +{ + struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); + + if (unlikely(!qc) || !ap->ops->error_handler) + return qc; + + if ((qc->flags & (ATA_QCFLAG_ACTIVE | + ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) + return qc; + + return NULL; +} + +static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc) +{ + return qc->nbytes - min(qc->extrabytes, qc->nbytes); +} + +static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) +{ + memset(tf, 0, sizeof(*tf)); + +#ifdef CONFIG_ATA_SFF + tf->ctl = dev->link->ap->ctl; +#else + tf->ctl = ATA_DEVCTL_OBS; +#endif + if (dev->devno == 0) + tf->device = ATA_DEVICE_OBS; + else + tf->device = ATA_DEVICE_OBS | ATA_DEV1; +} + +static inline void ata_qc_reinit(struct ata_queued_cmd *qc) +{ + qc->dma_dir = DMA_NONE; + qc->sg = NULL; + qc->flags = 0; + qc->cursg = NULL; + qc->cursg_ofs = 0; + qc->nbytes = qc->extrabytes = qc->curbytes = 0; + qc->n_elem = 0; + qc->err_mask = 0; + qc->sect_size = ATA_SECT_SIZE; + + ata_tf_init(qc->dev, &qc->tf); + + /* init result_tf such that it indicates normal completion */ + qc->result_tf.command = ATA_DRDY; + qc->result_tf.feature = 0; +} + +static inline int ata_try_flush_cache(const struct ata_device *dev) +{ + return ata_id_wcache_enabled(dev->id) || + ata_id_has_flush(dev->id) || + ata_id_has_flush_ext(dev->id); +} + +static inline unsigned int ac_err_mask(u8 status) +{ + if (status & (ATA_BUSY | ATA_DRQ)) + return AC_ERR_HSM; + if (status & (ATA_ERR | ATA_DF)) + return AC_ERR_DEV; + return 0; +} + +static inline unsigned int __ac_err_mask(u8 status) +{ + unsigned int mask = ac_err_mask(status); + if (mask == 0) + return AC_ERR_OTHER; + return mask; +} + +static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) +{ + return *(struct ata_port **)&host->hostdata[0]; +} + +static inline int ata_check_ready(u8 status) +{ + if (!(status & ATA_BUSY)) + return 1; + + /* 0xff indicates either no device or device not ready */ + if (status == 0xff) + return -ENODEV; + + return 0; +} + +static inline unsigned long ata_deadline(unsigned long from_jiffies, + unsigned long timeout_msecs) +{ + return from_jiffies + msecs_to_jiffies(timeout_msecs); +} + +/* Don't open code these in drivers as there are traps. Firstly the range may + change in future hardware and specs, secondly 0xFF means 'no DMA' but is + > UDMA_0. Dyma ddreigiau */ + +static inline int ata_using_mwdma(struct ata_device *adev) +{ + if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) + return 1; + return 0; +} + +static inline int ata_using_udma(struct ata_device *adev) +{ + if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) + return 1; + return 0; +} + +static inline int ata_dma_enabled(struct ata_device *adev) +{ + return (adev->dma_mode == 0xFF ? 0 : 1); +} + +/************************************************************************** + * PMP - drivers/ata/libata-pmp.c + */ +#ifdef CONFIG_SATA_PMP + +extern const struct ata_port_operations sata_pmp_port_ops; + +extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); +extern void sata_pmp_error_handler(struct ata_port *ap); + +#else /* CONFIG_SATA_PMP */ + +#define sata_pmp_port_ops sata_port_ops +#define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer +#define sata_pmp_error_handler ata_std_error_handler + +#endif /* CONFIG_SATA_PMP */ + + +/************************************************************************** + * SFF - drivers/ata/libata-sff.c + */ +#ifdef CONFIG_ATA_SFF + +extern const struct ata_port_operations ata_sff_port_ops; +extern const struct ata_port_operations ata_bmdma32_port_ops; + +/* PIO only, sg_tablesize and dma_boundary limits can be removed */ +#define ATA_PIO_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .sg_tablesize = LIBATA_MAX_PRD, \ + .dma_boundary = ATA_DMA_BOUNDARY + +extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); +extern u8 ata_sff_check_status(struct ata_port *ap); +extern void ata_sff_pause(struct ata_port *ap); +extern void ata_sff_dma_pause(struct ata_port *ap); +extern int ata_sff_busy_sleep(struct ata_port *ap, + unsigned long timeout_pat, unsigned long timeout); +extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); +extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); +extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_sff_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf); +extern unsigned int ata_sff_data_xfer(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); +extern void ata_sff_irq_on(struct ata_port *ap); +extern void ata_sff_irq_clear(struct ata_port *ap); +extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + u8 status, int in_wq); +extern void ata_sff_queue_work(struct work_struct *work); +extern void ata_sff_queue_delayed_work(struct delayed_work *dwork, + unsigned long delay); +extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); +extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); +extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); +extern unsigned int ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc); +extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); +extern void ata_sff_lost_interrupt(struct ata_port *ap); +extern void ata_sff_freeze(struct ata_port *ap); +extern void ata_sff_thaw(struct ata_port *ap); +extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); +extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, + u8 *r_err); +extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, + unsigned long deadline); +extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline); +extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); +extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); +extern void ata_sff_error_handler(struct ata_port *ap); +extern void ata_sff_std_ports(struct ata_ioports *ioaddr); +#ifdef CONFIG_PCI +extern int ata_pci_sff_init_host(struct ata_host *host); +extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host); +extern int ata_pci_sff_activate_host(struct ata_host *host, + irq_handler_t irq_handler, + struct scsi_host_template *sht); +extern int ata_pci_sff_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, int hflags); +#endif /* CONFIG_PCI */ + +#ifdef CONFIG_ATA_BMDMA + +extern const struct ata_port_operations ata_bmdma_port_ops; + +#define ATA_BMDMA_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .sg_tablesize = LIBATA_MAX_PRD, \ + .dma_boundary = ATA_DMA_BOUNDARY + +extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); +extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); +extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc); +extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); +extern void ata_bmdma_error_handler(struct ata_port *ap); +extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); +extern void ata_bmdma_irq_clear(struct ata_port *ap); +extern void ata_bmdma_setup(struct ata_queued_cmd *qc); +extern void ata_bmdma_start(struct ata_queued_cmd *qc); +extern void ata_bmdma_stop(struct ata_queued_cmd *qc); +extern u8 ata_bmdma_status(struct ata_port *ap); +extern int ata_bmdma_port_start(struct ata_port *ap); +extern int ata_bmdma_port_start32(struct ata_port *ap); + +#ifdef CONFIG_PCI +extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); +extern void ata_pci_bmdma_init(struct ata_host *host); +extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host); +extern int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, + void *host_priv, int hflags); +#endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ + +/** + * ata_sff_busy_wait - Wait for a port status register + * @ap: Port to wait for. + * @bits: bits that must be clear + * @max: number of 10uS waits to perform + * + * Waits up to max*10 microseconds for the selected bits in the port's + * status register to be cleared. + * Returns final value of status register. + * + * LOCKING: + * Inherited from caller. + */ +static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, + unsigned int max) +{ + u8 status; + + do { + udelay(10); + status = ap->ops->sff_check_status(ap); + max--; + } while (status != 0xff && (status & bits) && (max > 0)); + + return status; +} + +/** + * ata_wait_idle - Wait for a port to be idle. + * @ap: Port to wait for. + * + * Waits up to 10ms for port's BUSY and DRQ signals to clear. + * Returns final value of status register. + * + * LOCKING: + * Inherited from caller. + */ +static inline u8 ata_wait_idle(struct ata_port *ap) +{ + u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + +#ifdef ATA_DEBUG + if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) + ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", + status); +#endif + + return status; +} +#endif /* CONFIG_ATA_SFF */ + +#endif /* __LINUX_LIBATA_H__ */ diff --git a/include/linux/libps2.h b/include/linux/libps2.h new file mode 100644 index 00000000..79603a6c --- /dev/null +++ b/include/linux/libps2.h @@ -0,0 +1,56 @@ +#ifndef _LIBPS2_H +#define _LIBPS2_H + +/* + * Copyright (C) 1999-2002 Vojtech Pavlik + * Copyright (C) 2004 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + + +#define PS2_CMD_GETID 0x02f2 +#define PS2_CMD_RESET_BAT 0x02ff + +#define PS2_RET_BAT 0xaa +#define PS2_RET_ID 0x00 +#define PS2_RET_ACK 0xfa +#define PS2_RET_NAK 0xfe +#define PS2_RET_ERR 0xfc + +#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */ +#define PS2_FLAG_CMD 2 /* Waiting for command to finish */ +#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */ +#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */ +#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */ + +struct ps2dev { + struct serio *serio; + + /* Ensures that only one command is executing at a time */ + struct mutex cmd_mutex; + + /* Used to signal completion from interrupt handler */ + wait_queue_head_t wait; + + unsigned long flags; + unsigned char cmdbuf[6]; + unsigned char cmdcnt; + unsigned char nak; +}; + +void ps2_init(struct ps2dev *ps2dev, struct serio *serio); +int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout); +void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout); +void ps2_begin_command(struct ps2dev *ps2dev); +void ps2_end_command(struct ps2dev *ps2dev); +int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); +int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command); +int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data); +int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data); +void ps2_cmd_aborted(struct ps2dev *ps2dev); +int ps2_is_keyboard_id(char id); + +#endif /* _LIBPS2_H */ diff --git a/include/linux/license.h b/include/linux/license.h new file mode 100644 index 00000000..decdbf43 --- /dev/null +++ b/include/linux/license.h @@ -0,0 +1,14 @@ +#ifndef __LICENSE_H +#define __LICENSE_H + +static inline int license_is_gpl_compatible(const char *license) +{ + return (strcmp(license, "GPL") == 0 + || strcmp(license, "GPL v2") == 0 + || strcmp(license, "GPL and additional rights") == 0 + || strcmp(license, "Dual BSD/GPL") == 0 + || strcmp(license, "Dual MIT/GPL") == 0 + || strcmp(license, "Dual MPL/GPL") == 0); +} + +#endif diff --git a/include/linux/limits.h b/include/linux/limits.h new file mode 100644 index 00000000..2d0f9416 --- /dev/null +++ b/include/linux/limits.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_LIMITS_H +#define _LINUX_LIMITS_H + +#define NR_OPEN 1024 + +#define NGROUPS_MAX 65536 /* supplemental group IDs are available */ +#define ARG_MAX 131072 /* # bytes of args + environ for exec() */ +#define LINK_MAX 127 /* # links a file may have */ +#define MAX_CANON 255 /* size of the canonical input queue */ +#define MAX_INPUT 255 /* size of the type-ahead buffer */ +#define NAME_MAX 255 /* # chars in a file name */ +#define PATH_MAX 4096 /* # chars in a path name including nul */ +#define PIPE_BUF 4096 /* # bytes in atomic write to a pipe */ +#define XATTR_NAME_MAX 255 /* # chars in an extended attribute name */ +#define XATTR_SIZE_MAX 65536 /* size of an extended attribute value (64k) */ +#define XATTR_LIST_MAX 65536 /* size of extended attribute namelist (64k) */ + +#define RTSIG_MAX 32 + +#endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h new file mode 100644 index 00000000..807f1e53 --- /dev/null +++ b/include/linux/linkage.h @@ -0,0 +1,91 @@ +#ifndef _LINUX_LINKAGE_H +#define _LINUX_LINKAGE_H + +#include <linux/compiler.h> +#include <asm/linkage.h> + +#ifdef __cplusplus +#define CPP_ASMLINKAGE extern "C" +#else +#define CPP_ASMLINKAGE +#endif + +#ifndef asmlinkage +#define asmlinkage CPP_ASMLINKAGE +#endif + +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) + +/* + * For assembly routines. + * + * Note when using these that you must specify the appropriate + * alignment directives yourself + */ +#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" + +/* + * This is used by architectures to keep arguments on the stack + * untouched by the compiler by keeping them live until the end. + * The argument stack may be owned by the assembly-language + * caller, not the callee, and gcc doesn't always understand + * that. + * + * We have the return value, and a maximum of six arguments. + * + * This should always be followed by a "return ret" for the + * protection to work (ie no more work that the compiler might + * end up needing stack temporaries for). + */ +/* Assembly files may be compiled with -traditional .. */ +#ifndef __ASSEMBLY__ +#ifndef asmlinkage_protect +# define asmlinkage_protect(n, ret, args...) do { } while (0) +#endif +#endif + +#ifndef __ALIGN +#define __ALIGN .align 4,0x90 +#define __ALIGN_STR ".align 4,0x90" +#endif + +#ifdef __ASSEMBLY__ + +#ifndef LINKER_SCRIPT +#define ALIGN __ALIGN +#define ALIGN_STR __ALIGN_STR + +#ifndef ENTRY +#define ENTRY(name) \ + .globl name; \ + ALIGN; \ + name: +#endif +#endif /* LINKER_SCRIPT */ + +#ifndef WEAK +#define WEAK(name) \ + .weak name; \ + name: +#endif + +#ifndef END +#define END(name) \ + .size name, .-name +#endif + +/* If symbol 'name' is treated as a subroutine (gets called, and returns) + * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of + * static analysis tools such as stack depth analyzer. + */ +#ifndef ENDPROC +#define ENDPROC(name) \ + .type name, @function; \ + END(name) +#endif + +#endif + +#endif diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h new file mode 100644 index 00000000..ca5bd91d --- /dev/null +++ b/include/linux/linux_logo.h @@ -0,0 +1,61 @@ +#ifndef _LINUX_LINUX_LOGO_H +#define _LINUX_LINUX_LOGO_H + +/* + * Linux logo to be displayed on boot + * + * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu) + * Copyright (C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au> + * Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de> + * Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org> + * + * Serial_console ascii image can be any size, + * but should contain %s to display the version + */ + +#include <linux/init.h> + + +#define LINUX_LOGO_MONO 1 /* monochrome black/white */ +#define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */ +#define LINUX_LOGO_CLUT224 3 /* 224 colors */ +#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */ + + +struct linux_logo { + int type; /* one of LINUX_LOGO_* */ + unsigned int width; + unsigned int height; + unsigned int clutsize; /* LINUX_LOGO_CLUT224 only */ + const unsigned char *clut; /* LINUX_LOGO_CLUT224 only */ + const unsigned char *data; +}; + +extern const struct linux_logo logo_linux_mono; +extern const struct linux_logo logo_linux_vga16; +extern const struct linux_logo logo_linux_clut224; +extern const struct linux_logo logo_blackfin_vga16; +extern const struct linux_logo logo_blackfin_clut224; +extern const struct linux_logo logo_dec_clut224; +extern const struct linux_logo logo_mac_clut224; +extern const struct linux_logo logo_parisc_clut224; +extern const struct linux_logo logo_sgi_clut224; +extern const struct linux_logo logo_sun_clut224; +extern const struct linux_logo logo_superh_mono; +extern const struct linux_logo logo_superh_vga16; +extern const struct linux_logo logo_superh_clut224; +extern const struct linux_logo logo_m32r_clut224; +extern const struct linux_logo logo_spe_clut224; + +extern const struct linux_logo *fb_find_logo(int depth); +#ifdef CONFIG_FB_LOGO_EXTRA +extern void fb_append_extra_logo(const struct linux_logo *logo, + unsigned int n); +#else +static inline void fb_append_extra_logo(const struct linux_logo *logo, + unsigned int n) +{} +#endif + +#endif /* _LINUX_LINUX_LOGO_H */ diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h new file mode 100644 index 00000000..f1664c63 --- /dev/null +++ b/include/linux/lis3lv02d.h @@ -0,0 +1,127 @@ +#ifndef __LIS3LV02D_H_ +#define __LIS3LV02D_H_ + +/** + * struct lis3lv02d_platform_data - lis3 chip family platform data + * @click_flags: Click detection unit configuration + * @click_thresh_x: Click detection unit x axis threshold + * @click_thresh_y: Click detection unit y axis threshold + * @click_thresh_z: Click detection unit z axis threshold + * @click_time_limit: Click detection unit time parameter + * @click_latency: Click detection unit latency parameter + * @click_window: Click detection unit window parameter + * @irq_cfg: On chip irq source and type configuration (click / + * data available / wake up, open drain, polarity) + * @irq_flags1: Additional irq triggering flags for irq channel 0 + * @irq_flags2: Additional irq triggering flags for irq channel 1 + * @duration1: Wake up unit 1 duration parameter + * @duration2: Wake up unit 2 duration parameter + * @wakeup_flags: Wake up unit 1 flags + * @wakeup_thresh: Wake up unit 1 threshold value + * @wakeup_flags2: Wake up unit 2 flags + * @wakeup_thresh2: Wake up unit 2 threshold value + * @hipass_ctrl: High pass filter control (enable / disable, cut off + * frequency) + * @axis_x: Sensor orientation remapping for x-axis + * @axis_y: Sensor orientation remapping for y-axis + * @axis_z: Sensor orientation remapping for z-axis + * @driver_features: Enable bits for different features. Disabled by default + * @default_rate: Default sampling rate. 0 means reset default + * @setup_resources: Interrupt line setup call back function + * @release_resources: Interrupt line release call back function + * @st_min_limits[3]: Selftest acceptance minimum values + * @st_max_limits[3]: Selftest acceptance maximum values + * @irq2: Irq line 2 number + * + * Platform data is used to setup the sensor chip. Meaning of the different + * chip features can be found from the data sheet. It is publicly available + * at www.st.com web pages. Currently the platform data is used + * only for the 8 bit device. The 8 bit device has two wake up / free fall + * detection units and click detection unit. There are plenty of ways to + * configure the chip which makes is quite hard to explain deeper meaning of + * the fields here. Behaviour of the detection blocks varies heavily depending + * on the configuration. For example, interrupt detection block can use high + * pass filtered data which makes it react to the changes in the acceleration. + * Irq_flags can be used to enable interrupt detection on the both edges. + * With proper chip configuration this produces interrupt when some trigger + * starts and when it goes away. + */ + +struct lis3lv02d_platform_data { + /* please note: the 'click' feature is only supported for + * LIS[32]02DL variants of the chip and will be ignored for + * others */ +#define LIS3_CLICK_SINGLE_X (1 << 0) +#define LIS3_CLICK_DOUBLE_X (1 << 1) +#define LIS3_CLICK_SINGLE_Y (1 << 2) +#define LIS3_CLICK_DOUBLE_Y (1 << 3) +#define LIS3_CLICK_SINGLE_Z (1 << 4) +#define LIS3_CLICK_DOUBLE_Z (1 << 5) + unsigned char click_flags; + unsigned char click_thresh_x; + unsigned char click_thresh_y; + unsigned char click_thresh_z; + unsigned char click_time_limit; + unsigned char click_latency; + unsigned char click_window; + +#define LIS3_IRQ1_DISABLE (0 << 0) +#define LIS3_IRQ1_FF_WU_1 (1 << 0) +#define LIS3_IRQ1_FF_WU_2 (2 << 0) +#define LIS3_IRQ1_FF_WU_12 (3 << 0) +#define LIS3_IRQ1_DATA_READY (4 << 0) +#define LIS3_IRQ1_CLICK (7 << 0) +#define LIS3_IRQ1_MASK (7 << 0) +#define LIS3_IRQ2_DISABLE (0 << 3) +#define LIS3_IRQ2_FF_WU_1 (1 << 3) +#define LIS3_IRQ2_FF_WU_2 (2 << 3) +#define LIS3_IRQ2_FF_WU_12 (3 << 3) +#define LIS3_IRQ2_DATA_READY (4 << 3) +#define LIS3_IRQ2_CLICK (7 << 3) +#define LIS3_IRQ2_MASK (7 << 3) +#define LIS3_IRQ_OPEN_DRAIN (1 << 6) +#define LIS3_IRQ_ACTIVE_LOW (1 << 7) + unsigned char irq_cfg; + unsigned char irq_flags1; /* Additional irq edge / level flags */ + unsigned char irq_flags2; /* Additional irq edge / level flags */ + unsigned char duration1; + unsigned char duration2; +#define LIS3_WAKEUP_X_LO (1 << 0) +#define LIS3_WAKEUP_X_HI (1 << 1) +#define LIS3_WAKEUP_Y_LO (1 << 2) +#define LIS3_WAKEUP_Y_HI (1 << 3) +#define LIS3_WAKEUP_Z_LO (1 << 4) +#define LIS3_WAKEUP_Z_HI (1 << 5) + unsigned char wakeup_flags; + unsigned char wakeup_thresh; + unsigned char wakeup_flags2; + unsigned char wakeup_thresh2; +#define LIS3_HIPASS_CUTFF_8HZ 0 +#define LIS3_HIPASS_CUTFF_4HZ 1 +#define LIS3_HIPASS_CUTFF_2HZ 2 +#define LIS3_HIPASS_CUTFF_1HZ 3 +#define LIS3_HIPASS1_DISABLE (1 << 2) +#define LIS3_HIPASS2_DISABLE (1 << 3) + unsigned char hipass_ctrl; +#define LIS3_NO_MAP 0 +#define LIS3_DEV_X 1 +#define LIS3_DEV_Y 2 +#define LIS3_DEV_Z 3 +#define LIS3_INV_DEV_X -1 +#define LIS3_INV_DEV_Y -2 +#define LIS3_INV_DEV_Z -3 + s8 axis_x; + s8 axis_y; + s8 axis_z; +#define LIS3_USE_BLOCK_READ 0x02 + u16 driver_features; + int default_rate; + int (*setup_resources)(void); + int (*release_resources)(void); + /* Limits for selftest are specified in chip data sheet */ + s16 st_min_limits[3]; /* min pass limit x, y, z */ + s16 st_max_limits[3]; /* max pass limit x, y, z */ + int irq2; +}; + +#endif /* __LIS3LV02D_H_ */ diff --git a/include/linux/list.h b/include/linux/list.h new file mode 100644 index 00000000..cc6d2aa6 --- /dev/null +++ b/include/linux/list.h @@ -0,0 +1,719 @@ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/poison.h> +#include <linux/const.h> + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +#ifndef CONFIG_DEBUG_LIST +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} +#else +extern void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next); +#endif + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +#ifndef CONFIG_DEBUG_LIST +static inline void __list_del_entry(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} +#else +extern void __list_del_entry(struct list_head *entry); +extern void list_del(struct list_head *entry); +#endif + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del_entry(entry); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del_entry(list); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del_entry(list); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +/** + * list_rotate_left - rotate the list to the left + * @head: the head of the list + */ +static inline void list_rotate_left(struct list_head *head) +{ + struct list_head *first; + + if (!list_empty(head)) { + first = head->next; + list_move_tail(first, head); + } +} + +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + +static inline void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * list_splice - join two lists, this is designed for stacks + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(const struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This variant doesn't differ from list_for_each() any more. + * We don't do prefetching in either case. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; pos != (head); pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_prev_safe(pos, n, head) \ + for (pos = (head)->prev, n = pos->prev; \ + pos != (head); \ + pos = n, n = pos->prev) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_continue_reverse - iterate backwards from the given point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Start to iterate over list of given type backwards, continuing after + * the current position. + */ +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_continue - continue list iteration safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_from - iterate over list from current point safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member), \ + n = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.prev, typeof(*n), member)) + +/** + * list_safe_reset_next - reset a stale list_for_each_entry_safe loop + * @pos: the loop cursor used in the list_for_each_entry_safe loop + * @n: temporary storage used in list_for_each_entry_safe + * @member: the name of the list_struct within the struct. + * + * list_safe_reset_next is not safe to use in general if the list may be + * modified concurrently (eg. the lock is dropped in the loop body). An + * exception to this is if the cursor element (pos) is pinned in the list, + * and list_safe_reset_next is called after re-taking the lock and before + * completing the current iteration of the loop body. + */ +#define list_safe_reset_next(pos, n, member) \ + n = list_entry(pos->member.next, typeof(*pos), member) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if(next->next) + next->next->pprev = &next->next; +} + +/* after that we'll appear to be on some hlist and hlist_del will work */ +static inline void hlist_add_fake(struct hlist_node *n) +{ + n->pprev = &n->next; +} + +/* + * Move a list from one list head to another. Fixup the pprev + * reference of the first entry if it exists. + */ +static inline void hlist_move_list(struct hlist_head *old, + struct hlist_head *new) +{ + new->first = old->first; + if (new->first) + new->first->pprev = &new->first; + old->first = NULL; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos ; pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h new file mode 100644 index 00000000..31f9d75a --- /dev/null +++ b/include/linux/list_bl.h @@ -0,0 +1,156 @@ +#ifndef _LINUX_LIST_BL_H +#define _LINUX_LIST_BL_H + +#include <linux/list.h> +#include <linux/bit_spinlock.h> + +/* + * Special version of lists, where head of the list has a lock in the lowest + * bit. This is useful for scalable hash tables without increasing memory + * footprint overhead. + * + * For modification operations, the 0 bit of hlist_bl_head->first + * pointer must be set. + * + * With some small modifications, this can easily be adapted to store several + * arbitrary bits (not just a single lock bit), if the need arises to store + * some fast and compact auxiliary data. + */ + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#define LIST_BL_LOCKMASK 1UL +#else +#define LIST_BL_LOCKMASK 0UL +#endif + +#ifdef CONFIG_DEBUG_LIST +#define LIST_BL_BUG_ON(x) BUG_ON(x) +#else +#define LIST_BL_BUG_ON(x) +#endif + + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; +}; +#define INIT_HLIST_BL_HEAD(ptr) \ + ((ptr)->first = NULL) + +static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) + +static inline int hlist_bl_unhashed(const struct hlist_bl_node *h) +{ + return !h->pprev; +} + +static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) +{ + return (struct hlist_bl_node *) + ((unsigned long)h->first & ~LIST_BL_LOCKMASK); +} + +static inline void hlist_bl_set_first(struct hlist_bl_head *h, + struct hlist_bl_node *n) +{ + LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); + LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != + LIST_BL_LOCKMASK); + h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); +} + +static inline int hlist_bl_empty(const struct hlist_bl_head *h) +{ + return !((unsigned long)h->first & ~LIST_BL_LOCKMASK); +} + +static inline void hlist_bl_add_head(struct hlist_bl_node *n, + struct hlist_bl_head *h) +{ + struct hlist_bl_node *first = hlist_bl_first(h); + + n->next = first; + if (first) + first->pprev = &n->next; + n->pprev = &h->first; + hlist_bl_set_first(h, n); +} + +static inline void __hlist_bl_del(struct hlist_bl_node *n) +{ + struct hlist_bl_node *next = n->next; + struct hlist_bl_node **pprev = n->pprev; + + LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); + + /* pprev may be `first`, so be careful not to lose the lock bit */ + *pprev = (struct hlist_bl_node *) + ((unsigned long)next | + ((unsigned long)*pprev & LIST_BL_LOCKMASK)); + if (next) + next->pprev = pprev; +} + +static inline void hlist_bl_del(struct hlist_bl_node *n) +{ + __hlist_bl_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_bl_del_init(struct hlist_bl_node *n) +{ + if (!hlist_bl_unhashed(n)) { + __hlist_bl_del(n); + INIT_HLIST_BL_NODE(n); + } +} + +static inline void hlist_bl_lock(struct hlist_bl_head *b) +{ + bit_spin_lock(0, (unsigned long *)b); +} + +static inline void hlist_bl_unlock(struct hlist_bl_head *b) +{ + __bit_spin_unlock(0, (unsigned long *)b); +} + +/** + * hlist_bl_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_bl_for_each_entry(tpos, pos, head, member) \ + for (pos = hlist_bl_first(head); \ + pos && \ + ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = hlist_bl_first(head); \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h new file mode 100644 index 00000000..5d10ae36 --- /dev/null +++ b/include/linux/list_nulls.h @@ -0,0 +1,112 @@ +#ifndef _LINUX_LIST_NULLS_H +#define _LINUX_LIST_NULLS_H + +/* + * Special version of lists, where end of list is not a NULL pointer, + * but a 'nulls' marker, which can have many different values. + * (up to 2^31 different values guaranteed on all platforms) + * + * In the standard hlist, termination of a list is the NULL pointer. + * In this special 'nulls' variant, we use the fact that objects stored in + * a list are aligned on a word (4 or 8 bytes alignment). + * We therefore use the last significant bit of 'ptr' : + * Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1) + * Set to 0 : This is a pointer to some object (ptr) + */ + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next, **pprev; +}; +#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \ + ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1))) + +#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) +/** + * ptr_is_a_nulls - Test if a ptr is a nulls + * @ptr: ptr to be tested + * + */ +static inline int is_a_nulls(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr & 1); +} + +/** + * get_nulls_value - Get the 'nulls' value of the end of chain + * @ptr: end of chain + * + * Should be called only if is_a_nulls(ptr); + */ +static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr) >> 1; +} + +static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) +{ + return !h->pprev; +} + +static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) +{ + return is_a_nulls(h->first); +} + +static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + h->first = n; + if (!is_a_nulls(first)) + first->pprev = &n->next; +} + +static inline void __hlist_nulls_del(struct hlist_nulls_node *n) +{ + struct hlist_nulls_node *next = n->next; + struct hlist_nulls_node **pprev = n->pprev; + *pprev = next; + if (!is_a_nulls(next)) + next->pprev = pprev; +} + +static inline void hlist_nulls_del(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_nulls_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_nulls_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_nulls_for_each_entry_from(tpos, pos, member) \ + for (; (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +#endif diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h new file mode 100644 index 00000000..1a2df2ef --- /dev/null +++ b/include/linux/list_sort.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_LIST_SORT_H +#define _LINUX_LIST_SORT_H + +#include <linux/types.h> + +struct list_head; + +void list_sort(void *priv, struct list_head *head, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)); +#endif diff --git a/include/linux/llc.h b/include/linux/llc.h new file mode 100644 index 00000000..a2418ae1 --- /dev/null +++ b/include/linux/llc.h @@ -0,0 +1,91 @@ +#ifndef __LINUX_LLC_H +#define __LINUX_LLC_H +/* + * IEEE 802.2 User Interface SAPs for Linux, data structures and indicators. + * + * Copyright (c) 2001 by Jay Schulist <jschlst@samba.org> + * + * This program can be redistributed or modified under the terms of the + * GNU General Public License as published by the Free Software Foundation. + * This program is distributed without any warranty or implied warranty + * of merchantability or fitness for a particular purpose. + * + * See the GNU General Public License for more details. + */ + +#include <linux/socket.h> + +#define __LLC_SOCK_SIZE__ 16 /* sizeof(sockaddr_llc), word align. */ +struct sockaddr_llc { + __kernel_sa_family_t sllc_family; /* AF_LLC */ + __kernel_sa_family_t sllc_arphrd; /* ARPHRD_ETHER */ + unsigned char sllc_test; + unsigned char sllc_xid; + unsigned char sllc_ua; /* UA data, only for SOCK_STREAM. */ + unsigned char sllc_sap; + unsigned char sllc_mac[IFHWADDRLEN]; + unsigned char __pad[__LLC_SOCK_SIZE__ - + sizeof(__kernel_sa_family_t) * 2 - + sizeof(unsigned char) * 4 - IFHWADDRLEN]; +}; + +/* sockopt definitions. */ +enum llc_sockopts { + LLC_OPT_UNKNOWN = 0, + LLC_OPT_RETRY, /* max retrans attempts. */ + LLC_OPT_SIZE, /* max PDU size (octets). */ + LLC_OPT_ACK_TMR_EXP, /* ack expire time (secs). */ + LLC_OPT_P_TMR_EXP, /* pf cycle expire time (secs). */ + LLC_OPT_REJ_TMR_EXP, /* rej sent expire time (secs). */ + LLC_OPT_BUSY_TMR_EXP, /* busy state expire time (secs). */ + LLC_OPT_TX_WIN, /* tx window size. */ + LLC_OPT_RX_WIN, /* rx window size. */ + LLC_OPT_PKTINFO, /* ancillary packet information. */ + LLC_OPT_MAX +}; + +#define LLC_OPT_MAX_RETRY 100 +#define LLC_OPT_MAX_SIZE 4196 +#define LLC_OPT_MAX_WIN 127 +#define LLC_OPT_MAX_ACK_TMR_EXP 60 +#define LLC_OPT_MAX_P_TMR_EXP 60 +#define LLC_OPT_MAX_REJ_TMR_EXP 60 +#define LLC_OPT_MAX_BUSY_TMR_EXP 60 + +/* LLC SAP types. */ +#define LLC_SAP_NULL 0x00 /* NULL SAP. */ +#define LLC_SAP_LLC 0x02 /* LLC Sublayer Management. */ +#define LLC_SAP_SNA 0x04 /* SNA Path Control. */ +#define LLC_SAP_PNM 0x0E /* Proway Network Management. */ +#define LLC_SAP_IP 0x06 /* TCP/IP. */ +#define LLC_SAP_BSPAN 0x42 /* Bridge Spanning Tree Proto */ +#define LLC_SAP_MMS 0x4E /* Manufacturing Message Srv. */ +#define LLC_SAP_8208 0x7E /* ISO 8208 */ +#define LLC_SAP_3COM 0x80 /* 3COM. */ +#define LLC_SAP_PRO 0x8E /* Proway Active Station List */ +#define LLC_SAP_SNAP 0xAA /* SNAP. */ +#define LLC_SAP_BANYAN 0xBC /* Banyan. */ +#define LLC_SAP_IPX 0xE0 /* IPX/SPX. */ +#define LLC_SAP_NETBEUI 0xF0 /* NetBEUI. */ +#define LLC_SAP_LANMGR 0xF4 /* LanManager. */ +#define LLC_SAP_IMPL 0xF8 /* IMPL */ +#define LLC_SAP_DISC 0xFC /* Discovery */ +#define LLC_SAP_OSI 0xFE /* OSI Network Layers. */ +#define LLC_SAP_LAR 0xDC /* LAN Address Resolution */ +#define LLC_SAP_RM 0xD4 /* Resource Management */ +#define LLC_SAP_GLOBAL 0xFF /* Global SAP. */ + +struct llc_pktinfo { + int lpi_ifindex; + unsigned char lpi_sap; + unsigned char lpi_mac[IFHWADDRLEN]; +}; + +#ifdef __KERNEL__ +#define LLC_SAP_DYN_START 0xC0 +#define LLC_SAP_DYN_STOP 0xDE +#define LLC_SAP_DYN_TRIES 4 + +#define llc_ui_skb_cb(__skb) ((struct sockaddr_llc *)&((__skb)->cb[0])) +#endif /* __KERNEL__ */ +#endif /* __LINUX_LLC_H */ diff --git a/include/linux/llist.h b/include/linux/llist.h new file mode 100644 index 00000000..a5199f6d --- /dev/null +++ b/include/linux/llist.h @@ -0,0 +1,186 @@ +#ifndef LLIST_H +#define LLIST_H +/* + * Lock-less NULL terminated single linked list + * + * If there are multiple producers and multiple consumers, llist_add + * can be used in producers and llist_del_all can be used in + * consumers. They can work simultaneously without lock. But + * llist_del_first can not be used here. Because llist_del_first + * depends on list->first->next does not changed if list->first is not + * changed during its operation, but llist_del_first, llist_add, + * llist_add (or llist_del_all, llist_add, llist_add) sequence in + * another consumer may violate that. + * + * If there are multiple producers and one consumer, llist_add can be + * used in producers and llist_del_all or llist_del_first can be used + * in the consumer. + * + * This can be summarized as follow: + * + * | add | del_first | del_all + * add | - | - | - + * del_first | | L | L + * del_all | | | - + * + * Where "-" stands for no lock is needed, while "L" stands for lock + * is needed. + * + * The list entries deleted via llist_del_all can be traversed with + * traversing function such as llist_for_each etc. But the list + * entries can not be traversed safely before deleted from the list. + * The order of deleted entries is from the newest to the oldest added + * one. If you want to traverse from the oldest to the newest, you + * must reverse the order by yourself before traversing. + * + * The basic atomic operation of this list is cmpxchg on long. On + * architectures that don't have NMI-safe cmpxchg implementation, the + * list can NOT be used in NMI handlers. So code that uses the list in + * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * + * Copyright 2010,2011 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <asm/cmpxchg.h> + +struct llist_head { + struct llist_node *first; +}; + +struct llist_node { + struct llist_node *next; +}; + +#define LLIST_HEAD_INIT(name) { NULL } +#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name) + +/** + * init_llist_head - initialize lock-less list head + * @head: the head for your lock-less list + */ +static inline void init_llist_head(struct llist_head *list) +{ + list->first = NULL; +} + +/** + * llist_entry - get the struct of this entry + * @ptr: the &struct llist_node pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the llist_node within the struct. + */ +#define llist_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * llist_for_each - iterate over some deleted entries of a lock-less list + * @pos: the &struct llist_node to use as a loop cursor + * @node: the first entry of deleted list entries + * + * In general, some entries of the lock-less list can be traversed + * safely only after being deleted from list, so start with an entry + * instead of list head. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each(pos, node) \ + for ((pos) = (node); pos; (pos) = (pos)->next) + +/** + * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type + * @pos: the type * to use as a loop cursor. + * @node: the fist entry of deleted list entries. + * @member: the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry(pos, node, member) \ + for ((pos) = llist_entry((node), typeof(*(pos)), member); \ + &(pos)->member != NULL; \ + (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) + +/** + * llist_empty - tests whether a lock-less list is empty + * @head: the list to test + * + * Not guaranteed to be accurate or up to date. Just a quick way to + * test whether the list is empty without deleting something from the + * list. + */ +static inline bool llist_empty(const struct llist_head *head) +{ + return ACCESS_ONCE(head->first) == NULL; +} + +static inline struct llist_node *llist_next(struct llist_node *node) +{ + return node->next; +} + +/** + * llist_add - add a new entry + * @new: new entry to be added + * @head: the head for your lock-less list + * + * Returns true if the list was empty prior to adding this entry. + */ +static inline bool llist_add(struct llist_node *new, struct llist_head *head) +{ + struct llist_node *entry, *old_entry; + + entry = head->first; + for (;;) { + old_entry = entry; + new->next = entry; + entry = cmpxchg(&head->first, old_entry, new); + if (entry == old_entry) + break; + } + + return old_entry == NULL; +} + +/** + * llist_del_all - delete all entries from lock-less list + * @head: the head of lock-less list to delete all entries + * + * If list is empty, return NULL, otherwise, delete all entries and + * return the pointer to the first entry. The order of entries + * deleted is from the newest to the oldest added one. + */ +static inline struct llist_node *llist_del_all(struct llist_head *head) +{ + return xchg(&head->first, NULL); +} + +extern bool llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head); +extern struct llist_node *llist_del_first(struct llist_head *head); + +#endif /* LLIST_H */ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h new file mode 100644 index 00000000..4d24d645 --- /dev/null +++ b/include/linux/lockd/bind.h @@ -0,0 +1,60 @@ +/* + * linux/include/linux/lockd/bind.h + * + * This is the part of lockd visible to nfsd and the nfs client. + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_LOCKD_BIND_H +#define LINUX_LOCKD_BIND_H + +#include <linux/lockd/nlm.h> +/* need xdr-encoded error codes too, so... */ +#include <linux/lockd/xdr.h> +#ifdef CONFIG_LOCKD_V4 +#include <linux/lockd/xdr4.h> +#endif + +/* Dummy declarations */ +struct svc_rqst; + +/* + * This is the set of functions for lockd->nfsd communication + */ +struct nlmsvc_binding { + __be32 (*fopen)(struct svc_rqst *, + struct nfs_fh *, + struct file **); + void (*fclose)(struct file *); +}; + +extern struct nlmsvc_binding * nlmsvc_ops; + +/* + * Similar to nfs_client_initdata, but without the NFS-specific + * rpc_ops field. + */ +struct nlmclnt_initdata { + const char *hostname; + const struct sockaddr *address; + size_t addrlen; + unsigned short protocol; + u32 nfs_version; + int noresvport; + struct net *net; +}; + +/* + * Functions exported by the lockd module + */ + +extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); +extern void nlmclnt_done(struct nlm_host *host); + +extern int nlmclnt_proc(struct nlm_host *host, int cmd, + struct file_lock *fl); +extern int lockd_up(struct net *net); +extern void lockd_down(struct net *net); + +#endif /* LINUX_LOCKD_BIND_H */ diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h new file mode 100644 index 00000000..257d3779 --- /dev/null +++ b/include/linux/lockd/debug.h @@ -0,0 +1,47 @@ +/* + * linux/include/linux/lockd/debug.h + * + * Debugging stuff. + * + * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_LOCKD_DEBUG_H +#define LINUX_LOCKD_DEBUG_H + +#ifdef __KERNEL__ + +#include <linux/sunrpc/debug.h> + +/* + * Enable lockd debugging. + * Requires RPC_DEBUG. + */ +#ifdef RPC_DEBUG +# define LOCKD_DEBUG 1 +#endif + +#undef ifdebug +#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) +# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) +#else +# define ifdebug(flag) if (0) +#endif + +#endif /* __KERNEL__ */ + +/* + * Debug flags + */ +#define NLMDBG_SVC 0x0001 +#define NLMDBG_CLIENT 0x0002 +#define NLMDBG_CLNTLOCK 0x0004 +#define NLMDBG_SVCLOCK 0x0008 +#define NLMDBG_MONITOR 0x0010 +#define NLMDBG_CLNTSUBS 0x0020 +#define NLMDBG_SVCSUBS 0x0040 +#define NLMDBG_HOSTCACHE 0x0080 +#define NLMDBG_XDR 0x0100 +#define NLMDBG_ALL 0x7fff + +#endif /* LINUX_LOCKD_DEBUG_H */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h new file mode 100644 index 00000000..f04ce6ac --- /dev/null +++ b/include/linux/lockd/lockd.h @@ -0,0 +1,365 @@ +/* + * linux/include/linux/lockd/lockd.h + * + * General-purpose lockd include file. + * + * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_LOCKD_LOCKD_H +#define LINUX_LOCKD_LOCKD_H + +#ifdef __KERNEL__ + +#include <linux/in.h> +#include <linux/in6.h> +#include <net/ipv6.h> +#include <linux/fs.h> +#include <linux/kref.h> +#include <linux/utsname.h> +#include <linux/nfsd/nfsfh.h> +#include <linux/lockd/bind.h> +#include <linux/lockd/xdr.h> +#ifdef CONFIG_LOCKD_V4 +#include <linux/lockd/xdr4.h> +#endif +#include <linux/lockd/debug.h> + +/* + * Version string + */ +#define LOCKD_VERSION "0.5" + +/* + * Default timeout for RPC calls (seconds) + */ +#define LOCKD_DFLT_TIMEO 10 + +/* + * Lockd host handle (used both by the client and server personality). + */ +struct nlm_host { + struct hlist_node h_hash; /* doubly linked list */ + struct sockaddr_storage h_addr; /* peer address */ + size_t h_addrlen; + struct sockaddr_storage h_srcaddr; /* our address (optional) */ + size_t h_srcaddrlen; + struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ + char *h_name; /* remote hostname */ + u32 h_version; /* interface version */ + unsigned short h_proto; /* transport proto */ + unsigned short h_reclaiming : 1, + h_server : 1, /* server side, not client side */ + h_noresvport : 1, + h_inuse : 1; + wait_queue_head_t h_gracewait; /* wait while reclaiming */ + struct rw_semaphore h_rwsem; /* Reboot recovery lock */ + u32 h_state; /* pseudo-state counter */ + u32 h_nsmstate; /* true remote NSM state */ + u32 h_pidcount; /* Pseudopids */ + atomic_t h_count; /* reference count */ + struct mutex h_mutex; /* mutex for pmap binding */ + unsigned long h_nextrebind; /* next portmap call */ + unsigned long h_expires; /* eligible for GC */ + struct list_head h_lockowners; /* Lockowners for the client */ + spinlock_t h_lock; + struct list_head h_granted; /* Locks in GRANTED state */ + struct list_head h_reclaim; /* Locks in RECLAIM state */ + struct nsm_handle *h_nsmhandle; /* NSM status handle */ + char *h_addrbuf; /* address eyecatcher */ + struct net *net; /* host net */ +}; + +/* + * The largest string sm_addrbuf should hold is a full-size IPv6 address + * (no "::" anywhere) with a scope ID. The buffer size is computed to + * hold eight groups of colon-separated four-hex-digit numbers, a + * percent sign, a scope id (at most 32 bits, in decimal), and NUL. + */ +#define NSM_ADDRBUF ((8 * 4 + 7) + (1 + 10) + 1) + +struct nsm_handle { + struct list_head sm_link; + atomic_t sm_count; + char *sm_mon_name; + char *sm_name; + struct sockaddr_storage sm_addr; + size_t sm_addrlen; + unsigned int sm_monitored : 1, + sm_sticky : 1; /* don't unmonitor */ + struct nsm_private sm_priv; + char sm_addrbuf[NSM_ADDRBUF]; +}; + +/* + * Rigorous type checking on sockaddr type conversions + */ +static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host) +{ + return (struct sockaddr_in *)&host->h_addr; +} + +static inline struct sockaddr *nlm_addr(const struct nlm_host *host) +{ + return (struct sockaddr *)&host->h_addr; +} + +static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host) +{ + return (struct sockaddr_in *)&host->h_srcaddr; +} + +static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) +{ + return (struct sockaddr *)&host->h_srcaddr; +} + +/* + * Map an fl_owner_t into a unique 32-bit "pid" + */ +struct nlm_lockowner { + struct list_head list; + atomic_t count; + + struct nlm_host *host; + fl_owner_t owner; + uint32_t pid; +}; + +struct nlm_wait; + +/* + * Memory chunk for NLM client RPC request. + */ +#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) +struct nlm_rqst { + atomic_t a_count; + unsigned int a_flags; /* initial RPC task flags */ + struct nlm_host * a_host; /* host handle */ + struct nlm_args a_args; /* arguments */ + struct nlm_res a_res; /* result */ + struct nlm_block * a_block; + unsigned int a_retries; /* Retry count */ + u8 a_owner[NLMCLNT_OHSIZE]; +}; + +/* + * This struct describes a file held open by lockd on behalf of + * an NFS client. + */ +struct nlm_file { + struct hlist_node f_list; /* linked list */ + struct nfs_fh f_handle; /* NFS file handle */ + struct file * f_file; /* VFS file pointer */ + struct nlm_share * f_shares; /* DOS shares */ + struct list_head f_blocks; /* blocked locks */ + unsigned int f_locks; /* guesstimate # of locks */ + unsigned int f_count; /* reference count */ + struct mutex f_mutex; /* avoid concurrent access */ +}; + +/* + * This is a server block (i.e. a lock requested by some client which + * couldn't be granted because of a conflicting lock). + */ +#define NLM_NEVER (~(unsigned long) 0) +/* timeout on non-blocking call: */ +#define NLM_TIMEOUT (7 * HZ) + +struct nlm_block { + struct kref b_count; /* Reference count */ + struct list_head b_list; /* linked list of all blocks */ + struct list_head b_flist; /* linked list (per file) */ + struct nlm_rqst * b_call; /* RPC args & callback info */ + struct svc_serv * b_daemon; /* NLM service */ + struct nlm_host * b_host; /* host handle for RPC clnt */ + unsigned long b_when; /* next re-xmit */ + unsigned int b_id; /* block id */ + unsigned char b_granted; /* VFS granted lock */ + struct nlm_file * b_file; /* file in question */ + struct cache_req * b_cache_req; /* deferred request handling */ + struct file_lock * b_fl; /* set for GETLK */ + struct cache_deferred_req * b_deferred_req; + unsigned int b_flags; /* block flags */ +#define B_QUEUED 1 /* lock queued */ +#define B_GOT_CALLBACK 2 /* got lock or conflicting lock */ +#define B_TIMED_OUT 4 /* filesystem too slow to respond */ +}; + +/* + * Global variables + */ +extern const struct rpc_program nlm_program; +extern struct svc_procedure nlmsvc_procedures[]; +#ifdef CONFIG_LOCKD_V4 +extern struct svc_procedure nlmsvc_procedures4[]; +#endif +extern int nlmsvc_grace_period; +extern unsigned long nlmsvc_timeout; +extern bool nsm_use_hostnames; +extern u32 nsm_local_state; + +/* + * Lockd client functions + */ +struct nlm_rqst * nlm_alloc_call(struct nlm_host *host); +int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *); +int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *); +void nlmclnt_release_call(struct nlm_rqst *); +struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl); +void nlmclnt_finish_block(struct nlm_wait *block); +int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout); +__be32 nlmclnt_grant(const struct sockaddr *addr, + const struct nlm_lock *lock); +void nlmclnt_recovery(struct nlm_host *); +int nlmclnt_reclaim(struct nlm_host *, struct file_lock *); +void nlmclnt_next_cookie(struct nlm_cookie *); + +/* + * Host cache + */ +struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, + const size_t salen, + const unsigned short protocol, + const u32 version, + const char *hostname, + int noresvport, + struct net *net); +void nlmclnt_release_host(struct nlm_host *); +struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, + const char *hostname, + const size_t hostname_len); +void nlmsvc_release_host(struct nlm_host *); +struct rpc_clnt * nlm_bind_host(struct nlm_host *); +void nlm_rebind_host(struct nlm_host *); +struct nlm_host * nlm_get_host(struct nlm_host *); +void nlm_shutdown_hosts(void); +void nlm_shutdown_hosts_net(struct net *net); +void nlm_host_rebooted(const struct nlm_reboot *); + +/* + * Host monitoring + */ +int nsm_monitor(const struct nlm_host *host); +void nsm_unmonitor(const struct nlm_host *host); + +struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, + const size_t salen, + const char *hostname, + const size_t hostname_len); +struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); +void nsm_release(struct nsm_handle *nsm); + +/* + * This is used in garbage collection and resource reclaim + * A return value != 0 means destroy the lock/block/share + */ +typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); + +/* + * Server-side lock handling + */ +__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, + struct nlm_host *, struct nlm_lock *, int, + struct nlm_cookie *, int); +__be32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *); +__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, + struct nlm_host *, struct nlm_lock *, + struct nlm_lock *, struct nlm_cookie *); +__be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); +unsigned long nlmsvc_retry_blocked(void); +void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, + nlm_host_match_fn_t match); +void nlmsvc_grant_reply(struct nlm_cookie *, __be32); +void nlmsvc_release_call(struct nlm_rqst *); + +/* + * File handling for the server personality + */ +__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, + struct nfs_fh *); +void nlm_release_file(struct nlm_file *); +void nlmsvc_mark_resources(void); +void nlmsvc_free_host_resources(struct nlm_host *); +void nlmsvc_invalidate_all(void); + +/* + * Cluster failover support + */ +int nlmsvc_unlock_all_by_sb(struct super_block *sb); +int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); + +static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) +{ + return file->f_file->f_path.dentry->d_inode; +} + +static inline int __nlm_privileged_request4(const struct sockaddr *sap) +{ + const struct sockaddr_in *sin = (struct sockaddr_in *)sap; + + if (ntohs(sin->sin_port) > 1023) + return 0; + + return ipv4_is_loopback(sin->sin_addr.s_addr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline int __nlm_privileged_request6(const struct sockaddr *sap) +{ + const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; + + if (ntohs(sin6->sin6_port) > 1023) + return 0; + + if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) + return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]); + + return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; +} +#else /* IS_ENABLED(CONFIG_IPV6) */ +static inline int __nlm_privileged_request6(const struct sockaddr *sap) +{ + return 0; +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +/* + * Ensure incoming requests are from local privileged callers. + * + * Return TRUE if sender is local and is connecting via a privileged port; + * otherwise return FALSE. + */ +static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) +{ + const struct sockaddr *sap = svc_addr(rqstp); + + switch (sap->sa_family) { + case AF_INET: + return __nlm_privileged_request4(sap); + case AF_INET6: + return __nlm_privileged_request6(sap); + default: + return 0; + } +} + +/* + * Compare two NLM locks. + * When the second lock is of type F_UNLCK, this acts like a wildcard. + */ +static inline int nlm_compare_locks(const struct file_lock *fl1, + const struct file_lock *fl2) +{ + return fl1->fl_pid == fl2->fl_pid + && fl1->fl_owner == fl2->fl_owner + && fl1->fl_start == fl2->fl_start + && fl1->fl_end == fl2->fl_end + &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); +} + +extern const struct lock_manager_operations nlmsvc_lock_operations; + +#endif /* __KERNEL__ */ + +#endif /* LINUX_LOCKD_LOCKD_H */ diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h new file mode 100644 index 00000000..d9d46e44 --- /dev/null +++ b/include/linux/lockd/nlm.h @@ -0,0 +1,57 @@ +/* + * linux/include/linux/lockd/nlm.h + * + * Declarations for the Network Lock Manager protocol. + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_LOCKD_NLM_H +#define LINUX_LOCKD_NLM_H + + +/* Maximum file offset in file_lock.fl_end */ +# define NLM_OFFSET_MAX ((s32) 0x7fffffff) +# define NLM4_OFFSET_MAX ((s64) ((~(u64)0) >> 1)) + +/* Return states for NLM */ +enum { + NLM_LCK_GRANTED = 0, + NLM_LCK_DENIED = 1, + NLM_LCK_DENIED_NOLOCKS = 2, + NLM_LCK_BLOCKED = 3, + NLM_LCK_DENIED_GRACE_PERIOD = 4, +#ifdef CONFIG_LOCKD_V4 + NLM_DEADLCK = 5, + NLM_ROFS = 6, + NLM_STALE_FH = 7, + NLM_FBIG = 8, + NLM_FAILED = 9, +#endif +}; + +#define NLM_PROGRAM 100021 + +#define NLMPROC_NULL 0 +#define NLMPROC_TEST 1 +#define NLMPROC_LOCK 2 +#define NLMPROC_CANCEL 3 +#define NLMPROC_UNLOCK 4 +#define NLMPROC_GRANTED 5 +#define NLMPROC_TEST_MSG 6 +#define NLMPROC_LOCK_MSG 7 +#define NLMPROC_CANCEL_MSG 8 +#define NLMPROC_UNLOCK_MSG 9 +#define NLMPROC_GRANTED_MSG 10 +#define NLMPROC_TEST_RES 11 +#define NLMPROC_LOCK_RES 12 +#define NLMPROC_CANCEL_RES 13 +#define NLMPROC_UNLOCK_RES 14 +#define NLMPROC_GRANTED_RES 15 +#define NLMPROC_NSM_NOTIFY 16 /* statd callback */ +#define NLMPROC_SHARE 20 +#define NLMPROC_UNSHARE 21 +#define NLMPROC_NM_LOCK 22 +#define NLMPROC_FREE_ALL 23 + +#endif /* LINUX_LOCKD_NLM_H */ diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h new file mode 100644 index 00000000..630c5bf6 --- /dev/null +++ b/include/linux/lockd/share.h @@ -0,0 +1,31 @@ +/* + * linux/include/linux/lockd/share.h + * + * DOS share management for lockd. + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_LOCKD_SHARE_H +#define LINUX_LOCKD_SHARE_H + +/* + * DOS share for a specific file + */ +struct nlm_share { + struct nlm_share * s_next; /* linked list */ + struct nlm_host * s_host; /* client host */ + struct nlm_file * s_file; /* shared file */ + struct xdr_netobj s_owner; /* owner handle */ + u32 s_access; /* access mode */ + u32 s_mode; /* deny mode */ +}; + +__be32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *, + struct nlm_args *); +__be32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *, + struct nlm_args *); +void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, + nlm_host_match_fn_t); + +#endif /* LINUX_LOCKD_SHARE_H */ diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h new file mode 100644 index 00000000..d39ed1cc --- /dev/null +++ b/include/linux/lockd/xdr.h @@ -0,0 +1,118 @@ +/* + * linux/include/linux/lockd/xdr.h + * + * XDR types for the NLM protocol + * + * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LOCKD_XDR_H +#define LOCKD_XDR_H + +#include <linux/fs.h> +#include <linux/nfs.h> +#include <linux/sunrpc/xdr.h> + +#define SM_MAXSTRLEN 1024 +#define SM_PRIV_SIZE 16 + +struct nsm_private { + unsigned char data[SM_PRIV_SIZE]; +}; + +struct svc_rqst; + +#define NLM_MAXCOOKIELEN 32 +#define NLM_MAXSTRLEN 1024 + +#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) +#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) +#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) +#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) +#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) + +#define nlm_drop_reply cpu_to_be32(30000) + +/* Lock info passed via NLM */ +struct nlm_lock { + char * caller; + unsigned int len; /* length of "caller" */ + struct nfs_fh fh; + struct xdr_netobj oh; + u32 svid; + struct file_lock fl; +}; + +/* + * NLM cookies. Technically they can be 1K, but Linux only uses 8 bytes. + * FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to + * 32 bytes. + */ + +struct nlm_cookie +{ + unsigned char data[NLM_MAXCOOKIELEN]; + unsigned int len; +}; + +/* + * Generic lockd arguments for all but sm_notify + */ +struct nlm_args { + struct nlm_cookie cookie; + struct nlm_lock lock; + u32 block; + u32 reclaim; + u32 state; + u32 monitor; + u32 fsm_access; + u32 fsm_mode; +}; + +typedef struct nlm_args nlm_args; + +/* + * Generic lockd result + */ +struct nlm_res { + struct nlm_cookie cookie; + __be32 status; + struct nlm_lock lock; +}; + +/* + * statd callback when client has rebooted + */ +struct nlm_reboot { + char *mon; + unsigned int len; + u32 state; + struct nsm_private priv; +}; + +/* + * Contents of statd callback when monitored host rebooted + */ +#define NLMSVC_XDRSIZE sizeof(struct nlm_args) + +int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlmsvc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlmsvc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlmsvc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlmsvc_encode_void(struct svc_rqst *, __be32 *, void *); +int nlmsvc_decode_void(struct svc_rqst *, __be32 *, void *); +int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlmsvc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *); +/* +int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); + */ + +#endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h new file mode 100644 index 00000000..e58c88b5 --- /dev/null +++ b/include/linux/lockd/xdr4.h @@ -0,0 +1,47 @@ +/* + * linux/include/linux/lockd/xdr4.h + * + * XDR types for the NLM protocol + * + * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LOCKD_XDR4_H +#define LOCKD_XDR4_H + +#include <linux/fs.h> +#include <linux/nfs.h> +#include <linux/sunrpc/xdr.h> +#include <linux/lockd/xdr.h> + +/* error codes new to NLMv4 */ +#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) +#define nlm4_rofs cpu_to_be32(NLM_ROFS) +#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) +#define nlm4_fbig cpu_to_be32(NLM_FBIG) +#define nlm4_failed cpu_to_be32(NLM_FAILED) + + + +int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlm4svc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlm4svc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlm4svc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlm4svc_encode_void(struct svc_rqst *, __be32 *, void *); +int nlm4svc_decode_void(struct svc_rqst *, __be32 *, void *); +int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *); +int nlm4svc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *); +int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *); +/* +int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); +int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); + */ +extern const struct rpc_version nlm_version4; + +#endif /* LOCKD_XDR4_H */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h new file mode 100644 index 00000000..d36619ea --- /dev/null +++ b/include/linux/lockdep.h @@ -0,0 +1,558 @@ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * + * see Documentation/lockdep-design.txt for more details. + */ +#ifndef __LINUX_LOCKDEP_H +#define __LINUX_LOCKDEP_H + +struct task_struct; +struct lockdep_map; + +/* for sysctl */ +extern int prove_locking; +extern int lock_stat; + +#ifdef CONFIG_LOCKDEP + +#include <linux/linkage.h> +#include <linux/list.h> +#include <linux/debug_locks.h> +#include <linux/stacktrace.h> + +/* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( + */ +#define XXX_LOCK_USAGE_STATES (1+3*4) + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + +/* + * Lock-classes are keyed via unique addresses, by embedding the + * lockclass-key into the kernel (or module) .data section. (For + * static locks we use the lock address itself as the key.) + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +#define LOCKSTAT_POINTS 4 + +/* + * The lock-class itself: + */ +struct lock_class { + /* + * class-hash: + */ + struct list_head hash_entry; + + /* + * global list of all lock-classes: + */ + struct list_head lock_entry; + + struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + unsigned int version; + + /* + * Statistics counter: + */ + unsigned long ops; + + const char *name; + int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; +#endif +}; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[4]; + unsigned long contending_point[4]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + +/* + * Every lock has a list of other locks that were taken after it. + * We only grow the list, never remove from it: + */ +struct lock_list { + struct list_head entry; + struct lock_class *class; + struct stack_trace trace; + int distance; + + /* + * The parent field is used to implement breadth-first search, and the + * bit 0 is reused to indicate if the lock has been accessed in BFS. + */ + struct lock_list *parent; +}; + +/* + * We record lock dependency chains, so that we can cache them: + */ +struct lock_chain { + u8 irq_context; + u8 depth; + u16 base; + struct list_head entry; + u64 chain_key; +}; + +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + unsigned long acquire_ip; + struct lockdep_map *instance; + struct lockdep_map *nest_lock; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; /* 16 bits */ + + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; + unsigned int references:11; /* 32 bits */ +}; + +/* + * Initialization, self-test and debugging-output methods: + */ +extern void lockdep_init(void); +extern void lockdep_info(void); +extern void lockdep_reset(void); +extern void lockdep_reset_lock(struct lockdep_map *lock); +extern void lockdep_free_key_range(void *start, unsigned long size); +extern void lockdep_sys_exit(void); + +extern void lockdep_off(void); +extern void lockdep_on(void); + +/* + * These methods are used by specific locking variants (spinlocks, + * rwlocks, mutexes and rwsems) to pass init/acquire/release events + * to lockdep: + */ + +extern void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass); + +/* + * To initialize a lockdep_map statically use this macro. + * Note that _name must not be NULL. + */ +#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ + { .name = (_name), .key = (void *)(_key), } + +/* + * Reinitialize a lock key - for cases where there is special locking or + * special initialization of locks so that the validator gets the scope + * of dependencies wrong: they are either too broad (they need a class-split) + * or they are too narrow (they suffer from a false class-split): + */ +#define lockdep_set_class(lock, key) \ + lockdep_init_map(&(lock)->dep_map, #key, key, 0) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map(&(lock)->dep_map, name, key, 0) +#define lockdep_set_class_and_subclass(lock, key, sub) \ + lockdep_init_map(&(lock)->dep_map, #key, key, sub) +#define lockdep_set_subclass(lock, sub) \ + lockdep_init_map(&(lock)->dep_map, #lock, \ + (lock)->dep_map.key, sub) + +#define lockdep_set_novalidate_class(lock) \ + lockdep_set_class(lock, &__lockdep_no_validate__) +/* + * Compare locking classes + */ +#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) + +static inline int lockdep_match_key(struct lockdep_map *lock, + struct lock_class_key *key) +{ + return lock->key == key; +} + +/* + * Acquire a lock. + * + * Values for "read": + * + * 0: exclusive (write) acquire + * 1: read-acquire (no recursion allowed) + * 2: read-acquire with same-instance recursion allowed + * + * Values for check: + * + * 0: disabled + * 1: simple checks (freeing, held-at-exit-time, etc.) + * 2: full validation + */ +extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); + +extern void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); + +#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) + +extern int lock_is_held(struct lockdep_map *lock); + +extern void lock_set_class(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, unsigned int subclass, + unsigned long ip); + +static inline void lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + lock_set_class(lock, lock->name, lock->key, subclass, ip); +} + +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); +extern void lockdep_clear_current_reclaim_state(void); +extern void lockdep_trace_alloc(gfp_t mask); + +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, + +#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) + +#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) + +#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) + +#else /* !LOCKDEP */ + +static inline void lockdep_off(void) +{ +} + +static inline void lockdep_on(void) +{ +} + +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) +# define lock_release(l, n, i) do { } while (0) +# define lock_set_class(l, n, k, s, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) +# define lockdep_set_current_reclaim_state(g) do { } while (0) +# define lockdep_clear_current_reclaim_state() do { } while (0) +# define lockdep_trace_alloc(g) do { } while (0) +# define lockdep_init() do { } while (0) +# define lockdep_info() do { } while (0) +# define lockdep_init_map(lock, name, key, sub) \ + do { (void)(name); (void)(key); } while (0) +# define lockdep_set_class(lock, key) do { (void)(key); } while (0) +# define lockdep_set_class_and_name(lock, key, name) \ + do { (void)(key); (void)(name); } while (0) +#define lockdep_set_class_and_subclass(lock, key, sub) \ + do { (void)(key); } while (0) +#define lockdep_set_subclass(lock, sub) do { } while (0) + +#define lockdep_set_novalidate_class(lock) do { } while (0) + +/* + * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP + * case since the result is not well defined and the caller should rather + * #ifdef the call himself. + */ + +# define INIT_LOCKDEP +# define lockdep_reset() do { debug_locks = 1; } while (0) +# define lockdep_free_key_range(start, size) do { } while (0) +# define lockdep_sys_exit() do { } while (0) +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; + +#define lockdep_depth(tsk) (0) + +#define lockdep_assert_held(l) do { } while (0) + +#define lockdep_recursing(tsk) (0) + +#endif /* !LOCKDEP */ + +#ifdef CONFIG_LOCK_STAT + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); + +#define LOCK_CONTENDED(_lock, try, lock) \ +do { \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + lock(_lock); \ + } \ + lock_acquired(&(_lock)->dep_map, _RET_IP_); \ +} while (0) + +#else /* CONFIG_LOCK_STAT */ + +#define lock_contended(lockdep_map, ip) do {} while (0) +#define lock_acquired(lockdep_map, ip) do {} while (0) + +#define LOCK_CONTENDED(_lock, try, lock) \ + lock(_lock) + +#endif /* CONFIG_LOCK_STAT */ + +#ifdef CONFIG_LOCKDEP + +/* + * On lockdep we dont want the hand-coded irq-enable of + * _raw_*_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ + LOCK_CONTENDED((_lock), (try), (lock)) + +#else /* CONFIG_LOCKDEP */ + +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ + lockfl((_lock), (flags)) + +#endif /* CONFIG_LOCKDEP */ + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void print_irqtrace_events(struct task_struct *curr); +#else +static inline void print_irqtrace_events(struct task_struct *curr) +{ +} +#endif + +/* + * For trivial one-depth nesting of a lock-class, the following + * global define can be used. (Subsystems with multiple levels + * of nesting should define their own lock-nesting subclasses.) + */ +#define SINGLE_DEPTH_NESTING 1 + +/* + * Map the dependency ops to NOP or to real lockdep ops, depending + * on the per lock-class debug mode: + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) +# else +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# endif +# define spin_release(l, n, i) lock_release(l, n, i) +#else +# define spin_acquire(l, s, t, i) do { } while (0) +# define spin_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) +# else +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) +# endif +# define rwlock_release(l, n, i) lock_release(l, n, i) +#else +# define rwlock_acquire(l, s, t, i) do { } while (0) +# define rwlock_acquire_read(l, s, t, i) do { } while (0) +# define rwlock_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) +# else +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) +# endif +# define mutex_release(l, n, i) lock_release(l, n, i) +#else +# define mutex_acquire(l, s, t, i) do { } while (0) +# define mutex_acquire_nest(l, s, t, n, i) do { } while (0) +# define mutex_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) +# else +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) +# endif +# define rwsem_release(l, n, i) lock_release(l, n, i) +#else +# define rwsem_acquire(l, s, t, i) do { } while (0) +# define rwsem_acquire_read(l, s, t, i) do { } while (0) +# define rwsem_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) +# else +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) +# endif +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define lock_map_acquire(l) do { } while (0) +# define lock_map_acquire_read(l) do { } while (0) +# define lock_map_release(l) do { } while (0) +#endif + +#ifdef CONFIG_PROVE_LOCKING +# define might_lock(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +# define might_lock_read(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +#else +# define might_lock(lock) do { } while (0) +# define might_lock_read(lock) do { } while (0) +#endif + +#ifdef CONFIG_PROVE_RCU +void lockdep_rcu_suspicious(const char *file, const int line, const char *s); +#endif + +#endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/log2.h b/include/linux/log2.h new file mode 100644 index 00000000..fd7ff3d9 --- /dev/null +++ b/include/linux/log2.h @@ -0,0 +1,208 @@ +/* Integer base 2 logarithm calculation + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_LOG2_H +#define _LINUX_LOG2_H + +#include <linux/types.h> +#include <linux/bitops.h> + +/* + * deal with unrepresentable constant logarithms + */ +extern __attribute__((const, noreturn)) +int ____ilog2_NaN(void); + +/* + * non-constant log of base 2 calculators + * - the arch may override these in asm/bitops.h if they can be implemented + * more efficiently than using fls() and fls64() + * - the arch is not required to handle n==0 if implementing the fallback + */ +#ifndef CONFIG_ARCH_HAS_ILOG2_U32 +static inline __attribute__((const)) +int __ilog2_u32(u32 n) +{ + return fls(n) - 1; +} +#endif + +#ifndef CONFIG_ARCH_HAS_ILOG2_U64 +static inline __attribute__((const)) +int __ilog2_u64(u64 n) +{ + return fls64(n) - 1; +} +#endif + +/* + * Determine whether some value is a power of two, where zero is + * *not* considered a power of two. + */ + +static inline __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + +/* + * round up to nearest power of two + */ +static inline __attribute__((const)) +unsigned long __roundup_pow_of_two(unsigned long n) +{ + return 1UL << fls_long(n - 1); +} + +/* + * round down to nearest power of two + */ +static inline __attribute__((const)) +unsigned long __rounddown_pow_of_two(unsigned long n) +{ + return 1UL << (fls_long(n) - 1); +} + +/** + * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value + * @n - parameter + * + * constant-capable log of base 2 calculation + * - this can be used to initialise global variables from constant data, hence + * the massive ternary operator construction + * + * selects the appropriately-sized optimised version depending on sizeof(n) + */ +#define ilog2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (n) < 1 ? ____ilog2_NaN() : \ + (n) & (1ULL << 63) ? 63 : \ + (n) & (1ULL << 62) ? 62 : \ + (n) & (1ULL << 61) ? 61 : \ + (n) & (1ULL << 60) ? 60 : \ + (n) & (1ULL << 59) ? 59 : \ + (n) & (1ULL << 58) ? 58 : \ + (n) & (1ULL << 57) ? 57 : \ + (n) & (1ULL << 56) ? 56 : \ + (n) & (1ULL << 55) ? 55 : \ + (n) & (1ULL << 54) ? 54 : \ + (n) & (1ULL << 53) ? 53 : \ + (n) & (1ULL << 52) ? 52 : \ + (n) & (1ULL << 51) ? 51 : \ + (n) & (1ULL << 50) ? 50 : \ + (n) & (1ULL << 49) ? 49 : \ + (n) & (1ULL << 48) ? 48 : \ + (n) & (1ULL << 47) ? 47 : \ + (n) & (1ULL << 46) ? 46 : \ + (n) & (1ULL << 45) ? 45 : \ + (n) & (1ULL << 44) ? 44 : \ + (n) & (1ULL << 43) ? 43 : \ + (n) & (1ULL << 42) ? 42 : \ + (n) & (1ULL << 41) ? 41 : \ + (n) & (1ULL << 40) ? 40 : \ + (n) & (1ULL << 39) ? 39 : \ + (n) & (1ULL << 38) ? 38 : \ + (n) & (1ULL << 37) ? 37 : \ + (n) & (1ULL << 36) ? 36 : \ + (n) & (1ULL << 35) ? 35 : \ + (n) & (1ULL << 34) ? 34 : \ + (n) & (1ULL << 33) ? 33 : \ + (n) & (1ULL << 32) ? 32 : \ + (n) & (1ULL << 31) ? 31 : \ + (n) & (1ULL << 30) ? 30 : \ + (n) & (1ULL << 29) ? 29 : \ + (n) & (1ULL << 28) ? 28 : \ + (n) & (1ULL << 27) ? 27 : \ + (n) & (1ULL << 26) ? 26 : \ + (n) & (1ULL << 25) ? 25 : \ + (n) & (1ULL << 24) ? 24 : \ + (n) & (1ULL << 23) ? 23 : \ + (n) & (1ULL << 22) ? 22 : \ + (n) & (1ULL << 21) ? 21 : \ + (n) & (1ULL << 20) ? 20 : \ + (n) & (1ULL << 19) ? 19 : \ + (n) & (1ULL << 18) ? 18 : \ + (n) & (1ULL << 17) ? 17 : \ + (n) & (1ULL << 16) ? 16 : \ + (n) & (1ULL << 15) ? 15 : \ + (n) & (1ULL << 14) ? 14 : \ + (n) & (1ULL << 13) ? 13 : \ + (n) & (1ULL << 12) ? 12 : \ + (n) & (1ULL << 11) ? 11 : \ + (n) & (1ULL << 10) ? 10 : \ + (n) & (1ULL << 9) ? 9 : \ + (n) & (1ULL << 8) ? 8 : \ + (n) & (1ULL << 7) ? 7 : \ + (n) & (1ULL << 6) ? 6 : \ + (n) & (1ULL << 5) ? 5 : \ + (n) & (1ULL << 4) ? 4 : \ + (n) & (1ULL << 3) ? 3 : \ + (n) & (1ULL << 2) ? 2 : \ + (n) & (1ULL << 1) ? 1 : \ + (n) & (1ULL << 0) ? 0 : \ + ____ilog2_NaN() \ + ) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ + ) + +/** + * roundup_pow_of_two - round the given value up to nearest power of two + * @n - parameter + * + * round the given value up to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define roundup_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 1 : \ + (1UL << (ilog2((n) - 1) + 1)) \ + ) : \ + __roundup_pow_of_two(n) \ + ) + +/** + * rounddown_pow_of_two - round the given value down to nearest power of two + * @n - parameter + * + * round the given value down to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define rounddown_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (1UL << ilog2(n))) : \ + __rounddown_pow_of_two(n) \ + ) + +/** + * order_base_2 - calculate the (rounded up) base 2 order of the argument + * @n: parameter + * + * The first few values calculated by this routine: + * ob2(0) = 0 + * ob2(1) = 0 + * ob2(2) = 1 + * ob2(3) = 2 + * ob2(4) = 2 + * ob2(5) = 3 + * ... and so on. + */ + +#define order_base_2(n) ilog2(roundup_pow_of_two(n)) + +#endif /* _LINUX_LOG2_H */ diff --git a/include/linux/loop.h b/include/linux/loop.h new file mode 100644 index 00000000..11a41a8f --- /dev/null +++ b/include/linux/loop.h @@ -0,0 +1,167 @@ +#ifndef _LINUX_LOOP_H +#define _LINUX_LOOP_H + +/* + * include/linux/loop.h + * + * Written by Theodore Ts'o, 3/29/93. + * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + */ + +#define LO_NAME_SIZE 64 +#define LO_KEY_SIZE 32 + +#ifdef __KERNEL__ +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +/* Possible states of device */ +enum { + Lo_unbound, + Lo_bound, + Lo_rundown, +}; + +struct loop_func_table; + +struct loop_device { + int lo_number; + int lo_refcnt; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + int (*transfer)(struct loop_device *, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block); + char lo_file_name[LO_NAME_SIZE]; + char lo_crypt_name[LO_NAME_SIZE]; + char lo_encrypt_key[LO_KEY_SIZE]; + int lo_encrypt_key_size; + struct loop_func_table *lo_encryption; + __u32 lo_init[2]; + uid_t lo_key_owner; /* Who set the key */ + int (*ioctl)(struct loop_device *, int cmd, + unsigned long arg); + + struct file * lo_backing_file; + struct block_device *lo_device; + unsigned lo_blocksize; + void *key_data; + + gfp_t old_gfp_mask; + + spinlock_t lo_lock; + struct bio_list lo_bio_list; + int lo_state; + struct mutex lo_ctl_mutex; + struct task_struct *lo_thread; + wait_queue_head_t lo_event; + + struct request_queue *lo_queue; + struct gendisk *lo_disk; +}; + +#endif /* __KERNEL__ */ + +/* + * Loop flags + */ +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, +}; + +#include <asm/posix_types.h> /* for __kernel_old_dev_t */ +#include <linux/types.h> /* for __u64 */ + +/* Backwards compatibility version */ +struct loop_info { + int lo_number; /* ioctl r/o */ + __kernel_old_dev_t lo_device; /* ioctl r/o */ + unsigned long lo_inode; /* ioctl r/o */ + __kernel_old_dev_t lo_rdevice; /* ioctl r/o */ + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; /* ioctl w/o */ + int lo_flags; /* ioctl r/o */ + char lo_name[LO_NAME_SIZE]; + unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + unsigned long lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; /* ioctl r/o */ + __u64 lo_inode; /* ioctl r/o */ + __u64 lo_rdevice; /* ioctl r/o */ + __u64 lo_offset; + __u64 lo_sizelimit;/* bytes, 0 == max available */ + __u32 lo_number; /* ioctl r/o */ + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; /* ioctl w/o */ + __u32 lo_flags; /* ioctl r/o */ + __u8 lo_file_name[LO_NAME_SIZE]; + __u8 lo_crypt_name[LO_NAME_SIZE]; + __u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + __u64 lo_init[2]; +}; + +/* + * Loop filter types + */ + +#define LO_CRYPT_NONE 0 +#define LO_CRYPT_XOR 1 +#define LO_CRYPT_DES 2 +#define LO_CRYPT_FISH2 3 /* Twofish encryption */ +#define LO_CRYPT_BLOW 4 +#define LO_CRYPT_CAST128 5 +#define LO_CRYPT_IDEA 6 +#define LO_CRYPT_DUMMY 9 +#define LO_CRYPT_SKIPJACK 10 +#define LO_CRYPT_CRYPTOAPI 18 +#define MAX_LO_CRYPT 20 + +#ifdef __KERNEL__ +/* Support for loadable transfer modules */ +struct loop_func_table { + int number; /* filter type */ + int (*transfer)(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block); + int (*init)(struct loop_device *, const struct loop_info64 *); + /* release is called from loop_unregister_transfer or clr_fd */ + int (*release)(struct loop_device *); + int (*ioctl)(struct loop_device *, int cmd, unsigned long arg); + struct module *owner; +}; + +int loop_register_transfer(struct loop_func_table *funcs); +int loop_unregister_transfer(int number); + +#endif +/* + * IOCTL commands --- we will commandeer 0x4C ('L') + */ + +#define LOOP_SET_FD 0x4C00 +#define LOOP_CLR_FD 0x4C01 +#define LOOP_SET_STATUS 0x4C02 +#define LOOP_GET_STATUS 0x4C03 +#define LOOP_SET_STATUS64 0x4C04 +#define LOOP_GET_STATUS64 0x4C05 +#define LOOP_CHANGE_FD 0x4C06 +#define LOOP_SET_CAPACITY 0x4C07 + +/* /dev/loop-control interface */ +#define LOOP_CTL_ADD 0x4C80 +#define LOOP_CTL_REMOVE 0x4C81 +#define LOOP_CTL_GET_FREE 0x4C82 +#endif diff --git a/include/linux/lp.h b/include/linux/lp.h new file mode 100644 index 00000000..0df024bf --- /dev/null +++ b/include/linux/lp.h @@ -0,0 +1,191 @@ +#ifndef _LINUX_LP_H +#define _LINUX_LP_H + +/* + * usr/include/linux/lp.h c.1991-1992 James Wiegand + * many modifications copyright (C) 1992 Michael K. Johnson + * Interrupt support added 1993 Nigel Gamble + * Removed 8255 status defines from inside __KERNEL__ Marcelo Tosatti + */ + +/* + * Per POSIX guidelines, this module reserves the LP and lp prefixes + * These are the lp_table[minor].flags flags... + */ +#define LP_EXIST 0x0001 +#define LP_SELEC 0x0002 +#define LP_BUSY 0x0004 +#define LP_BUSY_BIT_POS 2 +#define LP_OFFL 0x0008 +#define LP_NOPA 0x0010 +#define LP_ERR 0x0020 +#define LP_ABORT 0x0040 +#define LP_CAREFUL 0x0080 /* obsoleted -arca */ +#define LP_ABORTOPEN 0x0100 + +#define LP_TRUST_IRQ_ 0x0200 /* obsolete */ +#define LP_NO_REVERSE 0x0400 /* No reverse mode available. */ +#define LP_DATA_AVAIL 0x0800 /* Data is available. */ + +/* + * bit defines for 8255 status port + * base + 1 + * accessed with LP_S(minor), which gets the byte... + */ +#define LP_PBUSY 0x80 /* inverted input, active high */ +#define LP_PACK 0x40 /* unchanged input, active low */ +#define LP_POUTPA 0x20 /* unchanged input, active high */ +#define LP_PSELECD 0x10 /* unchanged input, active high */ +#define LP_PERRORP 0x08 /* unchanged input, active low */ + +/* timeout for each character. This is relative to bus cycles -- it + * is the count in a busy loop. THIS IS THE VALUE TO CHANGE if you + * have extremely slow printing, or if the machine seems to slow down + * a lot when you print. If you have slow printing, increase this + * number and recompile, and if your system gets bogged down, decrease + * this number. This can be changed with the tunelp(8) command as well. + */ + +#define LP_INIT_CHAR 1000 + +/* The parallel port specs apparently say that there needs to be + * a .5usec wait before and after the strobe. + */ + +#define LP_INIT_WAIT 1 + +/* This is the amount of time that the driver waits for the printer to + * catch up when the printer's buffer appears to be filled. If you + * want to tune this and have a fast printer (i.e. HPIIIP), decrease + * this number, and if you have a slow printer, increase this number. + * This is in hundredths of a second, the default 2 being .05 second. + * Or use the tunelp(8) command, which is especially nice if you want + * change back and forth between character and graphics printing, which + * are wildly different... + */ + +#define LP_INIT_TIME 2 + +/* IOCTL numbers */ +#define LPCHAR 0x0601 /* corresponds to LP_INIT_CHAR */ +#define LPTIME 0x0602 /* corresponds to LP_INIT_TIME */ +#define LPABORT 0x0604 /* call with TRUE arg to abort on error, + FALSE to retry. Default is retry. */ +#define LPSETIRQ 0x0605 /* call with new IRQ number, + or 0 for polling (no IRQ) */ +#define LPGETIRQ 0x0606 /* get the current IRQ number */ +#define LPWAIT 0x0608 /* corresponds to LP_INIT_WAIT */ +/* NOTE: LPCAREFUL is obsoleted and it' s always the default right now -arca */ +#define LPCAREFUL 0x0609 /* call with TRUE arg to require out-of-paper, off- + line, and error indicators good on all writes, + FALSE to ignore them. Default is ignore. */ +#define LPABORTOPEN 0x060a /* call with TRUE arg to abort open() on error, + FALSE to ignore error. Default is ignore. */ +#define LPGETSTATUS 0x060b /* return LP_S(minor) */ +#define LPRESET 0x060c /* reset printer */ +#ifdef LP_STATS +#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */ +#endif +#define LPGETFLAGS 0x060e /* get status flags */ +#define LPSETTIMEOUT 0x060f /* set parport timeout */ + +/* timeout for printk'ing a timeout, in jiffies (100ths of a second). + This is also used for re-checking error conditions if LP_ABORT is + not set. This is the default behavior. */ + +#define LP_TIMEOUT_INTERRUPT (60 * HZ) +#define LP_TIMEOUT_POLLED (10 * HZ) + +#ifdef __KERNEL__ + +#include <linux/wait.h> +#include <linux/mutex.h> + +/* Magic numbers for defining port-device mappings */ +#define LP_PARPORT_UNSPEC -4 +#define LP_PARPORT_AUTO -3 +#define LP_PARPORT_OFF -2 +#define LP_PARPORT_NONE -1 + +#define LP_F(minor) lp_table[(minor)].flags /* flags for busy, etc. */ +#define LP_CHAR(minor) lp_table[(minor)].chars /* busy timeout */ +#define LP_TIME(minor) lp_table[(minor)].time /* wait time */ +#define LP_WAIT(minor) lp_table[(minor)].wait /* strobe wait */ +#define LP_IRQ(minor) lp_table[(minor)].dev->port->irq /* interrupt # */ + /* PARPORT_IRQ_NONE means polled */ +#ifdef LP_STATS +#define LP_STAT(minor) lp_table[(minor)].stats /* statistics area */ +#endif +#define LP_BUFFER_SIZE PAGE_SIZE + +#define LP_BASE(x) lp_table[(x)].dev->port->base + +#ifdef LP_STATS +struct lp_stats { + unsigned long chars; + unsigned long sleeps; + unsigned int maxrun; + unsigned int maxwait; + unsigned int meanwait; + unsigned int mdev; +}; +#endif + +struct lp_struct { + struct pardevice *dev; + unsigned long flags; + unsigned int chars; + unsigned int time; + unsigned int wait; + char *lp_buffer; +#ifdef LP_STATS + unsigned int lastcall; + unsigned int runchars; + struct lp_stats stats; +#endif + wait_queue_head_t waitq; + unsigned int last_error; + struct mutex port_mutex; + wait_queue_head_t dataq; + long timeout; + unsigned int best_mode; + unsigned int current_mode; + unsigned long bits; +}; + +/* + * The following constants describe the various signals of the printer port + * hardware. Note that the hardware inverts some signals and that some + * signals are active low. An example is LP_STROBE, which must be programmed + * with 1 for being active and 0 for being inactive, because the strobe signal + * gets inverted, but it is also active low. + */ + + +/* + * defines for 8255 control port + * base + 2 + * accessed with LP_C(minor) + */ +#define LP_PINTEN 0x10 /* high to read data in or-ed with data out */ +#define LP_PSELECP 0x08 /* inverted output, active low */ +#define LP_PINITP 0x04 /* unchanged output, active low */ +#define LP_PAUTOLF 0x02 /* inverted output, active low */ +#define LP_PSTROBE 0x01 /* short high output on raising edge */ + +/* + * the value written to ports to test existence. PC-style ports will + * return the value written. AT-style ports will return 0. so why not + * make them the same ? + */ +#define LP_DUMMY 0x00 + +/* + * This is the port delay time, in microseconds. + * It is used only in the lp_init() and lp_reset() routine. + */ +#define LP_DELAY 50 + +#endif + +#endif diff --git a/include/linux/lp855x.h b/include/linux/lp855x.h new file mode 100644 index 00000000..781a490a --- /dev/null +++ b/include/linux/lp855x.h @@ -0,0 +1,131 @@ +/* + * LP855x Backlight Driver + * + * Copyright (C) 2011 Texas Instruments + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _LP855X_H +#define _LP855X_H + +#define BL_CTL_SHFT (0) +#define BRT_MODE_SHFT (1) +#define BRT_MODE_MASK (0x06) + +/* Enable backlight. Only valid when BRT_MODE=10(I2C only) */ +#define ENABLE_BL (1) +#define DISABLE_BL (0) + +#define I2C_CONFIG(id) id ## _I2C_CONFIG +#define PWM_CONFIG(id) id ## _PWM_CONFIG + +/* DEVICE CONTROL register - LP8550 */ +#define LP8550_PWM_CONFIG (LP8550_PWM_ONLY << BRT_MODE_SHFT) +#define LP8550_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \ + (LP8550_I2C_ONLY << BRT_MODE_SHFT)) + +/* DEVICE CONTROL register - LP8551 */ +#define LP8551_PWM_CONFIG LP8550_PWM_CONFIG +#define LP8551_I2C_CONFIG LP8550_I2C_CONFIG + +/* DEVICE CONTROL register - LP8552 */ +#define LP8552_PWM_CONFIG LP8550_PWM_CONFIG +#define LP8552_I2C_CONFIG LP8550_I2C_CONFIG + +/* DEVICE CONTROL register - LP8553 */ +#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG +#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG + +/* DEVICE CONTROL register - LP8556 */ +#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT) +#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT) +#define LP8556_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \ + (LP8556_I2C_ONLY << BRT_MODE_SHFT)) +#define LP8556_COMB2_CONFIG (LP8556_COMBINED2 << BRT_MODE_SHFT) + +/* ROM area boundary */ +#define EEPROM_START (0xA0) +#define EEPROM_END (0xA7) +#define EPROM_START (0xA0) +#define EPROM_END (0xAF) + +enum lp855x_chip_id { + LP8550, + LP8551, + LP8552, + LP8553, + LP8556, +}; + +enum lp855x_brightness_ctrl_mode { + PWM_BASED = 1, + REGISTER_BASED, +}; + +enum lp8550_brighntess_source { + LP8550_PWM_ONLY, + LP8550_I2C_ONLY = 2, +}; + +enum lp8551_brighntess_source { + LP8551_PWM_ONLY = LP8550_PWM_ONLY, + LP8551_I2C_ONLY = LP8550_I2C_ONLY, +}; + +enum lp8552_brighntess_source { + LP8552_PWM_ONLY = LP8550_PWM_ONLY, + LP8552_I2C_ONLY = LP8550_I2C_ONLY, +}; + +enum lp8553_brighntess_source { + LP8553_PWM_ONLY = LP8550_PWM_ONLY, + LP8553_I2C_ONLY = LP8550_I2C_ONLY, +}; + +enum lp8556_brightness_source { + LP8556_PWM_ONLY, + LP8556_COMBINED1, /* pwm + i2c before the shaper block */ + LP8556_I2C_ONLY, + LP8556_COMBINED2, /* pwm + i2c after the shaper block */ +}; + +struct lp855x_pwm_data { + void (*pwm_set_intensity) (int brightness, int max_brightness); + int (*pwm_get_intensity) (int max_brightness); +}; + +struct lp855x_rom_data { + u8 addr; + u8 val; +}; + +/** + * struct lp855x_platform_data + * @name : Backlight driver name. If it is not defined, default name is set. + * @mode : brightness control by pwm or lp855x register + * @device_control : value of DEVICE CONTROL register + * @initial_brightness : initial value of backlight brightness + * @pwm_data : platform specific pwm generation functions. + Only valid when mode is PWM_BASED. + * @load_new_rom_data : + 0 : use default configuration data + 1 : update values of eeprom or eprom registers on loading driver + * @size_program : total size of lp855x_rom_data + * @rom_data : list of new eeprom/eprom registers + */ +struct lp855x_platform_data { + char *name; + enum lp855x_brightness_ctrl_mode mode; + u8 device_control; + int initial_brightness; + struct lp855x_pwm_data pwm_data; + u8 load_new_rom_data; + int size_program; + struct lp855x_rom_data *rom_data; +}; + +#endif diff --git a/include/linux/lp8727.h b/include/linux/lp8727.h new file mode 100644 index 00000000..ea98c613 --- /dev/null +++ b/include/linux/lp8727.h @@ -0,0 +1,65 @@ +/* + * LP8727 Micro/Mini USB IC with integrated charger + * + * Copyright (C) 2011 Texas Instruments + * Copyright (C) 2011 National Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LP8727_H +#define _LP8727_H + +enum lp8727_eoc_level { + EOC_5P, + EOC_10P, + EOC_16P, + EOC_20P, + EOC_25P, + EOC_33P, + EOC_50P, +}; + +enum lp8727_ichg { + ICHG_90mA, + ICHG_100mA, + ICHG_400mA, + ICHG_450mA, + ICHG_500mA, + ICHG_600mA, + ICHG_700mA, + ICHG_800mA, + ICHG_900mA, + ICHG_1000mA, +}; + +/** + * struct lp8727_chg_param + * @eoc_level : end of charge level setting + * @ichg : charging current + */ +struct lp8727_chg_param { + enum lp8727_eoc_level eoc_level; + enum lp8727_ichg ichg; +}; + +/** + * struct lp8727_platform_data + * @get_batt_present : check battery status - exists or not + * @get_batt_level : get battery voltage (mV) + * @get_batt_capacity : get battery capacity (%) + * @get_batt_temp : get battery temperature + * @ac, @usb : charging parameters each charger type + */ +struct lp8727_platform_data { + u8 (*get_batt_present)(void); + u16 (*get_batt_level)(void); + u8 (*get_batt_capacity)(void); + u8 (*get_batt_temp)(void); + struct lp8727_chg_param ac; + struct lp8727_chg_param usb; +}; + +#endif diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h new file mode 100644 index 00000000..7a71ffad --- /dev/null +++ b/include/linux/lru_cache.h @@ -0,0 +1,294 @@ +/* + lru_cache.c + + This file is part of DRBD by Philipp Reisner and Lars Ellenberg. + + Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. + Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. + Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. + + drbd is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + drbd is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with drbd; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + + */ + +#ifndef LRU_CACHE_H +#define LRU_CACHE_H + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/bitops.h> +#include <linux/string.h> /* for memset */ +#include <linux/seq_file.h> + +/* +This header file (and its .c file; kernel-doc of functions see there) + define a helper framework to easily keep track of index:label associations, + and changes to an "active set" of objects, as well as pending transactions, + to persistently record those changes. + + We use an LRU policy if it is necessary to "cool down" a region currently in + the active set before we can "heat" a previously unused region. + + Because of this later property, it is called "lru_cache". + As it actually Tracks Objects in an Active SeT, we could also call it + toast (incidentally that is what may happen to the data on the + backend storage uppon next resync, if we don't get it right). + +What for? + +We replicate IO (more or less synchronously) to local and remote disk. + +For crash recovery after replication node failure, + we need to resync all regions that have been target of in-flight WRITE IO + (in use, or "hot", regions), as we don't know wether or not those WRITEs have + made it to stable storage. + + To avoid a "full resync", we need to persistently track these regions. + + This is known as "write intent log", and can be implemented as on-disk + (coarse or fine grained) bitmap, or other meta data. + + To avoid the overhead of frequent extra writes to this meta data area, + usually the condition is softened to regions that _may_ have been target of + in-flight WRITE IO, e.g. by only lazily clearing the on-disk write-intent + bitmap, trading frequency of meta data transactions against amount of + (possibly unnecessary) resync traffic. + + If we set a hard limit on the area that may be "hot" at any given time, we + limit the amount of resync traffic needed for crash recovery. + +For recovery after replication link failure, + we need to resync all blocks that have been changed on the other replica + in the mean time, or, if both replica have been changed independently [*], + all blocks that have been changed on either replica in the mean time. + [*] usually as a result of a cluster split-brain and insufficient protection. + but there are valid use cases to do this on purpose. + + Tracking those blocks can be implemented as "dirty bitmap". + Having it fine-grained reduces the amount of resync traffic. + It should also be persistent, to allow for reboots (or crashes) + while the replication link is down. + +There are various possible implementations for persistently storing +write intent log information, three of which are mentioned here. + +"Chunk dirtying" + The on-disk "dirty bitmap" may be re-used as "write-intent" bitmap as well. + To reduce the frequency of bitmap updates for write-intent log purposes, + one could dirty "chunks" (of some size) at a time of the (fine grained) + on-disk bitmap, while keeping the in-memory "dirty" bitmap as clean as + possible, flushing it to disk again when a previously "hot" (and on-disk + dirtied as full chunk) area "cools down" again (no IO in flight anymore, + and none expected in the near future either). + +"Explicit (coarse) write intent bitmap" + An other implementation could chose a (probably coarse) explicit bitmap, + for write-intent log purposes, additionally to the fine grained dirty bitmap. + +"Activity log" + Yet an other implementation may keep track of the hot regions, by starting + with an empty set, and writing down a journal of region numbers that have + become "hot", or have "cooled down" again. + + To be able to use a ring buffer for this journal of changes to the active + set, we not only record the actual changes to that set, but also record the + not changing members of the set in a round robin fashion. To do so, we use a + fixed (but configurable) number of slots which we can identify by index, and + associate region numbers (labels) with these indices. + For each transaction recording a change to the active set, we record the + change itself (index: -old_label, +new_label), and which index is associated + with which label (index: current_label) within a certain sliding window that + is moved further over the available indices with each such transaction. + + Thus, for crash recovery, if the ringbuffer is sufficiently large, we can + accurately reconstruct the active set. + + Sufficiently large depends only on maximum number of active objects, and the + size of the sliding window recording "index: current_label" associations within + each transaction. + + This is what we call the "activity log". + + Currently we need one activity log transaction per single label change, which + does not give much benefit over the "dirty chunks of bitmap" approach, other + than potentially less seeks. + + We plan to change the transaction format to support multiple changes per + transaction, which then would reduce several (disjoint, "random") updates to + the bitmap into one transaction to the activity log ring buffer. +*/ + +/* this defines an element in a tracked set + * .colision is for hash table lookup. + * When we process a new IO request, we know its sector, thus can deduce the + * region number (label) easily. To do the label -> object lookup without a + * full list walk, we use a simple hash table. + * + * .list is on one of three lists: + * in_use: currently in use (refcnt > 0, lc_number != LC_FREE) + * lru: unused but ready to be reused or recycled + * (lc_refcnt == 0, lc_number != LC_FREE), + * free: unused but ready to be recycled + * (lc_refcnt == 0, lc_number == LC_FREE), + * + * an element is said to be "in the active set", + * if either on "in_use" or "lru", i.e. lc_number != LC_FREE. + * + * DRBD currently (May 2009) only uses 61 elements on the resync lru_cache + * (total memory usage 2 pages), and up to 3833 elements on the act_log + * lru_cache, totalling ~215 kB for 64bit architecture, ~53 pages. + * + * We usually do not actually free these objects again, but only "recycle" + * them, as the change "index: -old_label, +LC_FREE" would need a transaction + * as well. Which also means that using a kmem_cache to allocate the objects + * from wastes some resources. + * But it avoids high order page allocations in kmalloc. + */ +struct lc_element { + struct hlist_node colision; + struct list_head list; /* LRU list or free list */ + unsigned refcnt; + /* back "pointer" into lc_cache->element[index], + * for paranoia, and for "lc_element_to_index" */ + unsigned lc_index; + /* if we want to track a larger set of objects, + * it needs to become arch independend u64 */ + unsigned lc_number; + + /* special label when on free list */ +#define LC_FREE (~0U) +}; + +struct lru_cache { + /* the least recently used item is kept at lru->prev */ + struct list_head lru; + struct list_head free; + struct list_head in_use; + + /* the pre-created kmem cache to allocate the objects from */ + struct kmem_cache *lc_cache; + + /* size of tracked objects, used to memset(,0,) them in lc_reset */ + size_t element_size; + /* offset of struct lc_element member in the tracked object */ + size_t element_off; + + /* number of elements (indices) */ + unsigned int nr_elements; + /* Arbitrary limit on maximum tracked objects. Practical limit is much + * lower due to allocation failures, probably. For typical use cases, + * nr_elements should be a few thousand at most. + * This also limits the maximum value of lc_element.lc_index, allowing the + * 8 high bits of .lc_index to be overloaded with flags in the future. */ +#define LC_MAX_ACTIVE (1<<24) + + /* statistics */ + unsigned used; /* number of lelements currently on in_use list */ + unsigned long hits, misses, starving, dirty, changed; + + /* see below: flag-bits for lru_cache */ + unsigned long flags; + + /* when changing the label of an index element */ + unsigned int new_number; + + /* for paranoia when changing the label of an index element */ + struct lc_element *changing_element; + + void *lc_private; + const char *name; + + /* nr_elements there */ + struct hlist_head *lc_slot; + struct lc_element **lc_element; +}; + + +/* flag-bits for lru_cache */ +enum { + /* debugging aid, to catch concurrent access early. + * user needs to guarantee exclusive access by proper locking! */ + __LC_PARANOIA, + /* if we need to change the set, but currently there is a changing + * transaction pending, we are "dirty", and must deferr further + * changing requests */ + __LC_DIRTY, + /* if we need to change the set, but currently there is no free nor + * unused element available, we are "starving", and must not give out + * further references, to guarantee that eventually some refcnt will + * drop to zero and we will be able to make progress again, changing + * the set, writing the transaction. + * if the statistics say we are frequently starving, + * nr_elements is too small. */ + __LC_STARVING, +}; +#define LC_PARANOIA (1<<__LC_PARANOIA) +#define LC_DIRTY (1<<__LC_DIRTY) +#define LC_STARVING (1<<__LC_STARVING) + +extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, + unsigned e_count, size_t e_size, size_t e_off); +extern void lc_reset(struct lru_cache *lc); +extern void lc_destroy(struct lru_cache *lc); +extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); +extern void lc_del(struct lru_cache *lc, struct lc_element *element); + +extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); +extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); +extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); +extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); +extern void lc_changed(struct lru_cache *lc, struct lc_element *e); + +struct seq_file; +extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); + +extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, + void (*detail) (struct seq_file *, struct lc_element *)); + +/** + * lc_try_lock - can be used to stop lc_get() from changing the tracked set + * @lc: the lru cache to operate on + * + * Note that the reference counts and order on the active and lru lists may + * still change. Returns true if we acquired the lock. + */ +static inline int lc_try_lock(struct lru_cache *lc) +{ + return !test_and_set_bit(__LC_DIRTY, &lc->flags); +} + +/** + * lc_unlock - unlock @lc, allow lc_get() to change the set again + * @lc: the lru cache to operate on + */ +static inline void lc_unlock(struct lru_cache *lc) +{ + clear_bit(__LC_DIRTY, &lc->flags); + smp_mb__after_clear_bit(); +} + +static inline int lc_is_used(struct lru_cache *lc, unsigned int enr) +{ + struct lc_element *e = lc_find(lc, enr); + return e && e->refcnt; +} + +#define lc_entry(ptr, type, member) \ + container_of(ptr, type, member) + +extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i); +extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e); + +#endif diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h new file mode 100644 index 00000000..fad48aab --- /dev/null +++ b/include/linux/lsm_audit.h @@ -0,0 +1,105 @@ +/* + * Common LSM logging functions + * Heavily borrowed from selinux/avc.h + * + * Author : Etienne BASSET <etienne.basset@ensta.org> + * + * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil> + * All BUGS to : Etienne BASSET <etienne.basset@ensta.org> + */ +#ifndef _LSM_COMMON_LOGGING_ +#define _LSM_COMMON_LOGGING_ + +#include <linux/stddef.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kdev_t.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/audit.h> +#include <linux/in6.h> +#include <linux/path.h> +#include <linux/key.h> +#include <linux/skbuff.h> + +struct lsm_network_audit { + int netif; + struct sock *sk; + u16 family; + __be16 dport; + __be16 sport; + union { + struct { + __be32 daddr; + __be32 saddr; + } v4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } v6; + } fam; +}; + +/* Auxiliary data to use in generating the audit record. */ +struct common_audit_data { + char type; +#define LSM_AUDIT_DATA_PATH 1 +#define LSM_AUDIT_DATA_NET 2 +#define LSM_AUDIT_DATA_CAP 3 +#define LSM_AUDIT_DATA_IPC 4 +#define LSM_AUDIT_DATA_TASK 5 +#define LSM_AUDIT_DATA_KEY 6 +#define LSM_AUDIT_DATA_NONE 7 +#define LSM_AUDIT_DATA_KMOD 8 +#define LSM_AUDIT_DATA_INODE 9 +#define LSM_AUDIT_DATA_DENTRY 10 + struct task_struct *tsk; + union { + struct path path; + struct dentry *dentry; + struct inode *inode; + struct lsm_network_audit *net; + int cap; + int ipc_id; + struct task_struct *tsk; +#ifdef CONFIG_KEYS + struct { + key_serial_t key; + char *key_desc; + } key_struct; +#endif + char *kmod_name; + } u; + /* this union contains LSM specific data */ + union { +#ifdef CONFIG_SECURITY_SMACK + struct smack_audit_data *smack_audit_data; +#endif +#ifdef CONFIG_SECURITY_SELINUX + struct selinux_audit_data *selinux_audit_data; +#endif +#ifdef CONFIG_SECURITY_APPARMOR + struct apparmor_audit_data *apparmor_audit_data; +#endif + }; /* per LSM data pointer union */ +}; + +#define v4info fam.v4 +#define v6info fam.v6 + +int ipv4_skb_to_auditdata(struct sk_buff *skb, + struct common_audit_data *ad, u8 *proto); + +int ipv6_skb_to_auditdata(struct sk_buff *skb, + struct common_audit_data *ad, u8 *proto); + +/* Initialize an LSM audit data structure. */ +#define COMMON_AUDIT_DATA_INIT(_d, _t) \ + { memset((_d), 0, sizeof(struct common_audit_data)); \ + (_d)->type = LSM_AUDIT_DATA_##_t; } + +void common_lsm_audit(struct common_audit_data *a, + void (*pre_audit)(struct audit_buffer *, void *), + void (*post_audit)(struct audit_buffer *, void *)); + +#endif diff --git a/include/linux/lz4.h b/include/linux/lz4.h new file mode 100755 index 00000000..8bd8ad7e --- /dev/null +++ b/include/linux/lz4.h @@ -0,0 +1,89 @@ +#ifndef __LZ4_H__ +#define __LZ4_H__ +/* + * LZ4 Kernel Interface + * + * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *)) +#define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *)) + +#define lz4_worst_compress(x) ((x) + ((x) / 255) + 16) + +/* + * lz4_compressbound() + * Provides the maximum size that LZ4 may output in a "worst case" scenario + * (input data not compressible) + */ +static inline size_t lz4_compressbound(size_t isize) +{ + return isize + (isize / 255) + 16; +} + +/* + * lz4_compress() + * src : source address of the original data + * src_len : size of the original data + * dst : output buffer address of the compressed data + * This requires 'dst' of size LZ4_COMPRESSBOUND. + * dst_len : is the output size, which is returned after compress done + * workmem : address of the working memory. + * This requires 'workmem' of size LZ4_MEM_COMPRESS. + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer and workmem must be already allocated with + * the defined size. + */ +int lz4_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + + /* + * lz4hc_compress() + * src : source address of the original data + * src_len : size of the original data + * dst : output buffer address of the compressed data + * This requires 'dst' of size LZ4_COMPRESSBOUND. + * dst_len : is the output size, which is returned after compress done + * workmem : address of the working memory. + * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer and workmem must be already allocated with + * the defined size. + */ +int lz4hc_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + +/* + * lz4_decompress() + * src : source address of the compressed data + * src_len : is the input size, whcih is returned after decompress done + * dest : output buffer address of the decompressed data + * actual_dest_len: is the size of uncompressed data, supposing it's known + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer must be already allocated. + * slightly faster than lz4_decompress_unknownoutputsize() + */ +int lz4_decompress(const unsigned char *src, size_t *src_len, + unsigned char *dest, size_t actual_dest_len); + +/* + * lz4_decompress_unknownoutputsize() + * src : source address of the compressed data + * src_len : is the input size, therefore the compressed size + * dest : output buffer address of the decompressed data + * dest_len: is the max size of the destination buffer, which is + * returned with actual size of decompressed data after + * decompress done + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer must be already allocated. + */ +int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, + unsigned char *dest, size_t *dest_len); +#endif diff --git a/include/linux/lzo.h b/include/linux/lzo.h new file mode 100644 index 00000000..d793497e --- /dev/null +++ b/include/linux/lzo.h @@ -0,0 +1,44 @@ +#ifndef __LZO_H__ +#define __LZO_H__ +/* + * LZO Public Kernel Interface + * A mini subset of the LZO real-time data compression library + * + * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#define LZO1X_MEM_COMPRESS (16384 * sizeof(unsigned char *)) +#define LZO1X_1_MEM_COMPRESS LZO1X_MEM_COMPRESS + +#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) + +/* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */ +int lzo1x_1_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + +/* safe decompression with overrun testing */ +int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len); + +/* + * Return values (< 0 = Error) + */ +#define LZO_E_OK 0 +#define LZO_E_ERROR (-1) +#define LZO_E_OUT_OF_MEMORY (-2) +#define LZO_E_NOT_COMPRESSIBLE (-3) +#define LZO_E_INPUT_OVERRUN (-4) +#define LZO_E_OUTPUT_OVERRUN (-5) +#define LZO_E_LOOKBEHIND_OVERRUN (-6) +#define LZO_E_EOF_NOT_FOUND (-7) +#define LZO_E_INPUT_NOT_CONSUMED (-8) +#define LZO_E_NOT_YET_IMPLEMENTED (-9) + +#endif diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h new file mode 100644 index 00000000..915d6b4f --- /dev/null +++ b/include/linux/m48t86.h @@ -0,0 +1,16 @@ +/* + * ST M48T86 / Dallas DS12887 RTC driver + * Copyright (c) 2006 Tower Technologies + * + * Author: Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +struct m48t86_ops +{ + void (*writebyte)(unsigned char value, unsigned long addr); + unsigned char (*readbyte)(unsigned long addr); +}; diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h new file mode 100644 index 00000000..41d1eeb9 --- /dev/null +++ b/include/linux/mISDNdsp.h @@ -0,0 +1,39 @@ +#ifndef __mISDNdsp_H__ +#define __mISDNdsp_H__ + +struct mISDN_dsp_element_arg { + char *name; + char *def; + char *desc; +}; + +struct mISDN_dsp_element { + char *name; + void *(*new)(const char *arg); + void (*free)(void *p); + void (*process_tx)(void *p, unsigned char *data, int len); + void (*process_rx)(void *p, unsigned char *data, int len, + unsigned int txlen); + int num_args; + struct mISDN_dsp_element_arg + *args; +}; + +extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem); +extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); + +struct dsp_features { + int hfc_id; /* unique id to identify the chip (or -1) */ + int hfc_dtmf; /* set if HFCmulti card supports dtmf */ + int hfc_conf; /* set if HFCmulti card supports conferences */ + int hfc_loops; /* set if card supports tone loops */ + int hfc_echocanhw; /* set if card supports echocancelation*/ + int pcm_id; /* unique id to identify the pcm bus (or -1) */ + int pcm_slots; /* number of slots on the pcm bus */ + int pcm_banks; /* number of IO banks of pcm bus */ + int unclocked; /* data is not clocked (has jitter/loss) */ + int unordered; /* data is unordered (packets have index) */ +}; + +#endif + diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h new file mode 100644 index 00000000..4af84140 --- /dev/null +++ b/include/linux/mISDNhw.h @@ -0,0 +1,186 @@ +/* + * + * Author Karsten Keil <kkeil@novell.com> + * + * Basic declarations for the mISDN HW channels + * + * Copyright 2008 by Karsten Keil <kkeil@novell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MISDNHW_H +#define MISDNHW_H +#include <linux/mISDNif.h> +#include <linux/timer.h> + +/* + * HW DEBUG 0xHHHHGGGG + * H - hardware driver specific bits + * G - for all drivers + */ + +#define DEBUG_HW 0x00000001 +#define DEBUG_HW_OPEN 0x00000002 +#define DEBUG_HW_DCHANNEL 0x00000100 +#define DEBUG_HW_DFIFO 0x00000200 +#define DEBUG_HW_BCHANNEL 0x00001000 +#define DEBUG_HW_BFIFO 0x00002000 + +#define MAX_DFRAME_LEN_L1 300 +#define MAX_MON_FRAME 32 +#define MAX_LOG_SPACE 2048 +#define MISDN_COPY_SIZE 32 + +/* channel->Flags bit field */ +#define FLG_TX_BUSY 0 /* tx_buf in use */ +#define FLG_TX_NEXT 1 /* next_skb in use */ +#define FLG_L1_BUSY 2 /* L1 is permanent busy */ +#define FLG_L2_ACTIVATED 3 /* activated from L2 */ +#define FLG_OPEN 5 /* channel is in use */ +#define FLG_ACTIVE 6 /* channel is activated */ +#define FLG_BUSY_TIMER 7 +/* channel type */ +#define FLG_DCHANNEL 8 /* channel is D-channel */ +#define FLG_BCHANNEL 9 /* channel is B-channel */ +#define FLG_ECHANNEL 10 /* channel is E-channel */ +#define FLG_TRANSPARENT 12 /* channel use transparent data */ +#define FLG_HDLC 13 /* channel use hdlc data */ +#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ +#define FLG_ORIGIN 15 /* channel is on origin site */ +/* channel specific stuff */ +#define FLG_FILLEMPTY 16 /* fill fifo on first frame (empty) */ +/* arcofi specific */ +#define FLG_ARCOFI_TIMER 17 +#define FLG_ARCOFI_ERROR 18 +/* isar specific */ +#define FLG_INITIALIZED 17 +#define FLG_DLEETX 18 +#define FLG_LASTDLE 19 +#define FLG_FIRST 20 +#define FLG_LASTDATA 21 +#define FLG_NMD_DATA 22 +#define FLG_FTI_RUN 23 +#define FLG_LL_OK 24 +#define FLG_LL_CONN 25 +#define FLG_DTMFSEND 26 + +/* workq events */ +#define FLG_RECVQUEUE 30 +#define FLG_PHCHANGE 31 + +#define schedule_event(s, ev) do { \ + test_and_set_bit(ev, &((s)->Flags)); \ + schedule_work(&((s)->workq)); \ + } while (0) + +struct dchannel { + struct mISDNdevice dev; + u_long Flags; + struct work_struct workq; + void (*phfunc) (struct dchannel *); + u_int state; + void *l1; + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff_head squeue; + struct sk_buff_head rqueue; + struct sk_buff *tx_skb; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +typedef int (dchannel_l1callback)(struct dchannel *, u_int); +extern int create_l1(struct dchannel *, dchannel_l1callback *); + +/* private L1 commands */ +#define INFO0 0x8002 +#define INFO1 0x8102 +#define INFO2 0x8202 +#define INFO3_P8 0x8302 +#define INFO3_P10 0x8402 +#define INFO4_P8 0x8502 +#define INFO4_P10 0x8602 +#define LOSTFRAMING 0x8702 +#define ANYSIGNAL 0x8802 +#define HW_POWERDOWN 0x8902 +#define HW_RESET_REQ 0x8a02 +#define HW_POWERUP_REQ 0x8b02 +#define HW_DEACT_REQ 0x8c02 +#define HW_ACTIVATE_REQ 0x8e02 +#define HW_D_NOBLOCKED 0x8f02 +#define HW_RESET_IND 0x9002 +#define HW_POWERUP_IND 0x9102 +#define HW_DEACT_IND 0x9202 +#define HW_ACTIVATE_IND 0x9302 +#define HW_DEACT_CNF 0x9402 +#define HW_TESTLOOP 0x9502 +#define HW_TESTRX_RAW 0x9602 +#define HW_TESTRX_HDLC 0x9702 +#define HW_TESTRX_OFF 0x9802 + +struct layer1; +extern int l1_event(struct layer1 *, u_int); + + +struct bchannel { + struct mISDNchannel ch; + int nr; + u_long Flags; + struct work_struct workq; + u_int state; + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff *next_skb; + struct sk_buff *tx_skb; + struct sk_buff_head rqueue; + int rcount; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +extern int mISDN_initdchannel(struct dchannel *, int, void *); +extern int mISDN_initbchannel(struct bchannel *, int); +extern int mISDN_freedchannel(struct dchannel *); +extern void mISDN_clear_bchannel(struct bchannel *); +extern int mISDN_freebchannel(struct bchannel *); +extern void queue_ch_frame(struct mISDNchannel *, u_int, + int, struct sk_buff *); +extern int dchannel_senddata(struct dchannel *, struct sk_buff *); +extern int bchannel_senddata(struct bchannel *, struct sk_buff *); +extern void recv_Dchannel(struct dchannel *); +extern void recv_Echannel(struct dchannel *, struct dchannel *); +extern void recv_Bchannel(struct bchannel *, unsigned int id); +extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); +extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); +extern void confirm_Bsend(struct bchannel *bch); +extern int get_next_bframe(struct bchannel *); +extern int get_next_dframe(struct dchannel *); + +#endif diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h new file mode 100644 index 00000000..b5e7f220 --- /dev/null +++ b/include/linux/mISDNif.h @@ -0,0 +1,590 @@ +/* + * + * Author Karsten Keil <kkeil@novell.com> + * + * Copyright 2008 by Karsten Keil <kkeil@novell.com> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE + * version 2.1 as published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU LESSER GENERAL PUBLIC LICENSE for more details. + * + */ + +#ifndef mISDNIF_H +#define mISDNIF_H + +#include <stdarg.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/socket.h> + +/* + * ABI Version 32 bit + * + * <8 bit> Major version + * - changed if any interface become backwards incompatible + * + * <8 bit> Minor version + * - changed if any interface is extended but backwards compatible + * + * <16 bit> Release number + * - should be incremented on every checkin + */ +#define MISDN_MAJOR_VERSION 1 +#define MISDN_MINOR_VERSION 1 +#define MISDN_RELEASE 21 + +/* primitives for information exchange + * generell format + * <16 bit 0 > + * <8 bit command> + * BIT 8 = 1 LAYER private + * BIT 7 = 1 answer + * BIT 6 = 1 DATA + * <8 bit target layer mask> + * + * Layer = 00 is reserved for general commands + Layer = 01 L2 -> HW + Layer = 02 HW -> L2 + Layer = 04 L3 -> L2 + Layer = 08 L2 -> L3 + * Layer = FF is reserved for broadcast commands + */ + +#define MISDN_CMDMASK 0xff00 +#define MISDN_LAYERMASK 0x00ff + +/* generell commands */ +#define OPEN_CHANNEL 0x0100 +#define CLOSE_CHANNEL 0x0200 +#define CONTROL_CHANNEL 0x0300 +#define CHECK_DATA 0x0400 + +/* layer 2 -> layer 1 */ +#define PH_ACTIVATE_REQ 0x0101 +#define PH_DEACTIVATE_REQ 0x0201 +#define PH_DATA_REQ 0x2001 +#define MPH_ACTIVATE_REQ 0x0501 +#define MPH_DEACTIVATE_REQ 0x0601 +#define MPH_INFORMATION_REQ 0x0701 +#define PH_CONTROL_REQ 0x0801 + +/* layer 1 -> layer 2 */ +#define PH_ACTIVATE_IND 0x0102 +#define PH_ACTIVATE_CNF 0x4102 +#define PH_DEACTIVATE_IND 0x0202 +#define PH_DEACTIVATE_CNF 0x4202 +#define PH_DATA_IND 0x2002 +#define PH_DATA_E_IND 0x3002 +#define MPH_ACTIVATE_IND 0x0502 +#define MPH_DEACTIVATE_IND 0x0602 +#define MPH_INFORMATION_IND 0x0702 +#define PH_DATA_CNF 0x6002 +#define PH_CONTROL_IND 0x0802 +#define PH_CONTROL_CNF 0x4802 + +/* layer 3 -> layer 2 */ +#define DL_ESTABLISH_REQ 0x1004 +#define DL_RELEASE_REQ 0x1104 +#define DL_DATA_REQ 0x3004 +#define DL_UNITDATA_REQ 0x3104 +#define DL_INFORMATION_REQ 0x0004 + +/* layer 2 -> layer 3 */ +#define DL_ESTABLISH_IND 0x1008 +#define DL_ESTABLISH_CNF 0x5008 +#define DL_RELEASE_IND 0x1108 +#define DL_RELEASE_CNF 0x5108 +#define DL_DATA_IND 0x3008 +#define DL_UNITDATA_IND 0x3108 +#define DL_INFORMATION_IND 0x0008 + +/* intern layer 2 management */ +#define MDL_ASSIGN_REQ 0x1804 +#define MDL_ASSIGN_IND 0x1904 +#define MDL_REMOVE_REQ 0x1A04 +#define MDL_REMOVE_IND 0x1B04 +#define MDL_STATUS_UP_IND 0x1C04 +#define MDL_STATUS_DOWN_IND 0x1D04 +#define MDL_STATUS_UI_IND 0x1E04 +#define MDL_ERROR_IND 0x1F04 +#define MDL_ERROR_RSP 0x5F04 + +/* DL_INFORMATION_IND types */ +#define DL_INFO_L2_CONNECT 0x0001 +#define DL_INFO_L2_REMOVED 0x0002 + +/* PH_CONTROL types */ +/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ +#define DTMF_TONE_VAL 0x2000 +#define DTMF_TONE_MASK 0x007F +#define DTMF_TONE_START 0x2100 +#define DTMF_TONE_STOP 0x2200 +#define DTMF_HFC_COEF 0x4000 +#define DSP_CONF_JOIN 0x2403 +#define DSP_CONF_SPLIT 0x2404 +#define DSP_RECEIVE_OFF 0x2405 +#define DSP_RECEIVE_ON 0x2406 +#define DSP_ECHO_ON 0x2407 +#define DSP_ECHO_OFF 0x2408 +#define DSP_MIX_ON 0x2409 +#define DSP_MIX_OFF 0x240a +#define DSP_DELAY 0x240b +#define DSP_JITTER 0x240c +#define DSP_TXDATA_ON 0x240d +#define DSP_TXDATA_OFF 0x240e +#define DSP_TX_DEJITTER 0x240f +#define DSP_TX_DEJ_OFF 0x2410 +#define DSP_TONE_PATT_ON 0x2411 +#define DSP_TONE_PATT_OFF 0x2412 +#define DSP_VOL_CHANGE_TX 0x2413 +#define DSP_VOL_CHANGE_RX 0x2414 +#define DSP_BF_ENABLE_KEY 0x2415 +#define DSP_BF_DISABLE 0x2416 +#define DSP_BF_ACCEPT 0x2416 +#define DSP_BF_REJECT 0x2417 +#define DSP_PIPELINE_CFG 0x2418 +#define HFC_VOL_CHANGE_TX 0x2601 +#define HFC_VOL_CHANGE_RX 0x2602 +#define HFC_SPL_LOOP_ON 0x2603 +#define HFC_SPL_LOOP_OFF 0x2604 +/* for T30 FAX and analog modem */ +#define HW_MOD_FRM 0x4000 +#define HW_MOD_FRH 0x4001 +#define HW_MOD_FTM 0x4002 +#define HW_MOD_FTH 0x4003 +#define HW_MOD_FTS 0x4004 +#define HW_MOD_CONNECT 0x4010 +#define HW_MOD_OK 0x4011 +#define HW_MOD_NOCARR 0x4012 +#define HW_MOD_FCERROR 0x4013 +#define HW_MOD_READY 0x4014 +#define HW_MOD_LASTDATA 0x4015 + +/* DSP_TONE_PATT_ON parameter */ +#define TONE_OFF 0x0000 +#define TONE_GERMAN_DIALTONE 0x0001 +#define TONE_GERMAN_OLDDIALTONE 0x0002 +#define TONE_AMERICAN_DIALTONE 0x0003 +#define TONE_GERMAN_DIALPBX 0x0004 +#define TONE_GERMAN_OLDDIALPBX 0x0005 +#define TONE_AMERICAN_DIALPBX 0x0006 +#define TONE_GERMAN_RINGING 0x0007 +#define TONE_GERMAN_OLDRINGING 0x0008 +#define TONE_AMERICAN_RINGPBX 0x000b +#define TONE_GERMAN_RINGPBX 0x000c +#define TONE_GERMAN_OLDRINGPBX 0x000d +#define TONE_AMERICAN_RINGING 0x000e +#define TONE_GERMAN_BUSY 0x000f +#define TONE_GERMAN_OLDBUSY 0x0010 +#define TONE_AMERICAN_BUSY 0x0011 +#define TONE_GERMAN_HANGUP 0x0012 +#define TONE_GERMAN_OLDHANGUP 0x0013 +#define TONE_AMERICAN_HANGUP 0x0014 +#define TONE_SPECIAL_INFO 0x0015 +#define TONE_GERMAN_GASSENBESETZT 0x0016 +#define TONE_GERMAN_AUFSCHALTTON 0x0016 + +/* MPH_INFORMATION_IND */ +#define L1_SIGNAL_LOS_OFF 0x0010 +#define L1_SIGNAL_LOS_ON 0x0011 +#define L1_SIGNAL_AIS_OFF 0x0012 +#define L1_SIGNAL_AIS_ON 0x0013 +#define L1_SIGNAL_RDI_OFF 0x0014 +#define L1_SIGNAL_RDI_ON 0x0015 +#define L1_SIGNAL_SLIP_RX 0x0020 +#define L1_SIGNAL_SLIP_TX 0x0021 + +/* + * protocol ids + * D channel 1-31 + * B channel 33 - 63 + */ + +#define ISDN_P_NONE 0 +#define ISDN_P_BASE 0 +#define ISDN_P_TE_S0 0x01 +#define ISDN_P_NT_S0 0x02 +#define ISDN_P_TE_E1 0x03 +#define ISDN_P_NT_E1 0x04 +#define ISDN_P_TE_UP0 0x05 +#define ISDN_P_NT_UP0 0x06 + +#define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \ + (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE)) +#define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \ + (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT)) +#define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0)) +#define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1)) +#define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0)) + + +#define ISDN_P_LAPD_TE 0x10 +#define ISDN_P_LAPD_NT 0x11 + +#define ISDN_P_B_MASK 0x1f +#define ISDN_P_B_START 0x20 + +#define ISDN_P_B_RAW 0x21 +#define ISDN_P_B_HDLC 0x22 +#define ISDN_P_B_X75SLP 0x23 +#define ISDN_P_B_L2DTMF 0x24 +#define ISDN_P_B_L2DSP 0x25 +#define ISDN_P_B_L2DSPHDLC 0x26 +#define ISDN_P_B_T30_FAX 0x27 +#define ISDN_P_B_MODEM_ASYNC 0x28 + +#define OPTION_L2_PMX 1 +#define OPTION_L2_PTP 2 +#define OPTION_L2_FIXEDTEI 3 +#define OPTION_L2_CLEANUP 4 +#define OPTION_L1_HOLD 5 + +/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ +#define MISDN_MAX_IDLEN 20 + +struct mISDNhead { + unsigned int prim; + unsigned int id; +} __packed; + +#define MISDN_HEADER_LEN sizeof(struct mISDNhead) +#define MAX_DATA_SIZE 2048 +#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) +#define MAX_DFRAME_LEN 260 + +#define MISDN_ID_ADDR_MASK 0xFFFF +#define MISDN_ID_TEI_MASK 0xFF00 +#define MISDN_ID_SAPI_MASK 0x00FF +#define MISDN_ID_TEI_ANY 0x7F00 + +#define MISDN_ID_ANY 0xFFFF +#define MISDN_ID_NONE 0xFFFE + +#define GROUP_TEI 127 +#define TEI_SAPI 63 +#define CTRL_SAPI 0 + +#define MISDN_MAX_CHANNEL 127 +#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) + +#define SOL_MISDN 0 + +struct sockaddr_mISDN { + sa_family_t family; + unsigned char dev; + unsigned char channel; + unsigned char sapi; + unsigned char tei; +}; + +struct mISDNversion { + unsigned char major; + unsigned char minor; + unsigned short release; +}; + +struct mISDN_devinfo { + u_int id; + u_int Dprotocols; + u_int Bprotocols; + u_int protocol; + u_char channelmap[MISDN_CHMAP_SIZE]; + u_int nrbchan; + char name[MISDN_MAX_IDLEN]; +}; + +struct mISDN_devrename { + u_int id; + char name[MISDN_MAX_IDLEN]; /* new name */ +}; + +/* MPH_INFORMATION_REQ payload */ +struct ph_info_ch { + __u32 protocol; + __u64 Flags; +}; + +struct ph_info_dch { + struct ph_info_ch ch; + __u16 state; + __u16 num_bch; +}; + +struct ph_info { + struct ph_info_dch dch; + struct ph_info_ch bch[]; +}; + +/* timer device ioctl */ +#define IMADDTIMER _IOR('I', 64, int) +#define IMDELTIMER _IOR('I', 65, int) + +/* socket ioctls */ +#define IMGETVERSION _IOR('I', 66, int) +#define IMGETCOUNT _IOR('I', 67, int) +#define IMGETDEVINFO _IOR('I', 68, int) +#define IMCTRLREQ _IOR('I', 69, int) +#define IMCLEAR_L2 _IOR('I', 70, int) +#define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) +#define IMHOLD_L1 _IOR('I', 72, int) + +static inline int +test_channelmap(u_int nr, u_char *map) +{ + if (nr <= MISDN_MAX_CHANNEL) + return map[nr >> 3] & (1 << (nr & 7)); + else + return 0; +} + +static inline void +set_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] |= (1 << (nr & 7)); +} + +static inline void +clear_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] &= ~(1 << (nr & 7)); +} + +/* CONTROL_CHANNEL parameters */ +#define MISDN_CTRL_GETOP 0x0000 +#define MISDN_CTRL_LOOP 0x0001 +#define MISDN_CTRL_CONNECT 0x0002 +#define MISDN_CTRL_DISCONNECT 0x0004 +#define MISDN_CTRL_PCMCONNECT 0x0010 +#define MISDN_CTRL_PCMDISCONNECT 0x0020 +#define MISDN_CTRL_SETPEER 0x0040 +#define MISDN_CTRL_UNSETPEER 0x0080 +#define MISDN_CTRL_RX_OFF 0x0100 +#define MISDN_CTRL_FILL_EMPTY 0x0200 +#define MISDN_CTRL_GETPEER 0x0400 +#define MISDN_CTRL_HW_FEATURES_OP 0x2000 +#define MISDN_CTRL_HW_FEATURES 0x2001 +#define MISDN_CTRL_HFC_OP 0x4000 +#define MISDN_CTRL_HFC_PCM_CONN 0x4001 +#define MISDN_CTRL_HFC_PCM_DISC 0x4002 +#define MISDN_CTRL_HFC_CONF_JOIN 0x4003 +#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 +#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 +#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 +#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 +#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 +#define MISDN_CTRL_HFC_WD_INIT 0x4009 +#define MISDN_CTRL_HFC_WD_RESET 0x400A + +/* socket options */ +#define MISDN_TIME_STAMP 0x0001 + +struct mISDN_ctrl_req { + int op; + int channel; + int p1; + int p2; +}; + +/* muxer options */ +#define MISDN_OPT_ALL 1 +#define MISDN_OPT_TEIMGR 2 + +#ifdef __KERNEL__ +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/net.h> +#include <net/sock.h> +#include <linux/completion.h> + +#define DEBUG_CORE 0x000000ff +#define DEBUG_CORE_FUNC 0x00000002 +#define DEBUG_SOCKET 0x00000004 +#define DEBUG_MANAGER 0x00000008 +#define DEBUG_SEND_ERR 0x00000010 +#define DEBUG_MSG_THREAD 0x00000020 +#define DEBUG_QUEUE_FUNC 0x00000040 +#define DEBUG_L1 0x0000ff00 +#define DEBUG_L1_FSM 0x00000200 +#define DEBUG_L2 0x00ff0000 +#define DEBUG_L2_FSM 0x00020000 +#define DEBUG_L2_CTRL 0x00040000 +#define DEBUG_L2_RECV 0x00080000 +#define DEBUG_L2_TEI 0x00100000 +#define DEBUG_L2_TEIFSM 0x00200000 +#define DEBUG_TIMER 0x01000000 +#define DEBUG_CLOCK 0x02000000 + +#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) +#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) +#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) + +/* socket states */ +#define MISDN_OPEN 1 +#define MISDN_BOUND 2 +#define MISDN_CLOSED 3 + +struct mISDNchannel; +struct mISDNdevice; +struct mISDNstack; +struct mISDNclock; + +struct channel_req { + u_int protocol; + struct sockaddr_mISDN adr; + struct mISDNchannel *ch; +}; + +typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); +typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); +typedef int (create_func_t)(struct channel_req *); + +struct Bprotocol { + struct list_head list; + char *name; + u_int Bprotocols; + create_func_t *create; +}; + +struct mISDNchannel { + struct list_head list; + u_int protocol; + u_int nr; + u_long opt; + u_int addr; + struct mISDNstack *st; + struct mISDNchannel *peer; + send_func_t *send; + send_func_t *recv; + ctrl_func_t *ctrl; +}; + +struct mISDN_sock_list { + struct hlist_head head; + rwlock_t lock; +}; + +struct mISDN_sock { + struct sock sk; + struct mISDNchannel ch; + u_int cmask; + struct mISDNdevice *dev; +}; + + + +struct mISDNdevice { + struct mISDNchannel D; + u_int id; + u_int Dprotocols; + u_int Bprotocols; + u_int nrbchan; + u_char channelmap[MISDN_CHMAP_SIZE]; + struct list_head bchannels; + struct mISDNchannel *teimgr; + struct device dev; +}; + +struct mISDNstack { + u_long status; + struct mISDNdevice *dev; + struct task_struct *thread; + struct completion *notify; + wait_queue_head_t workq; + struct sk_buff_head msgq; + struct list_head layer2; + struct mISDNchannel *layer1; + struct mISDNchannel own; + struct mutex lmutex; /* protect lists */ + struct mISDN_sock_list l1sock; +#ifdef MISDN_MSG_STATS + u_int msg_cnt; + u_int sleep_cnt; + u_int stopped_cnt; +#endif +}; + +typedef int (clockctl_func_t)(void *, int); + +struct mISDNclock { + struct list_head list; + char name[64]; + int pri; + clockctl_func_t *ctl; + void *priv; +}; + +/* global alloc/queue functions */ + +static inline struct sk_buff * +mI_alloc_skb(unsigned int len, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); + if (likely(skb)) + skb_reserve(skb, MISDN_HEADER_LEN); + return skb; +} + +static inline struct sk_buff * +_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); + struct mISDNhead *hh; + + if (!skb) + return NULL; + if (len) + memcpy(skb_put(skb, len), dp, len); + hh = mISDN_HEAD_P(skb); + hh->prim = prim; + hh->id = id; + return skb; +} + +static inline void +_queue_data(struct mISDNchannel *ch, u_int prim, + u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + if (!ch->peer) + return; + skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); + if (!skb) + return; + if (ch->recv(ch->peer, skb)) + dev_kfree_skb(skb); +} + +/* global register/unregister functions */ + +extern int mISDN_register_device(struct mISDNdevice *, + struct device *parent, char *name); +extern void mISDN_unregister_device(struct mISDNdevice *); +extern int mISDN_register_Bprotocol(struct Bprotocol *); +extern void mISDN_unregister_Bprotocol(struct Bprotocol *); +extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, + void *); +extern void mISDN_unregister_clock(struct mISDNclock *); + +static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) +{ + if (dev) + return dev_get_drvdata(dev); + else + return NULL; +} + +extern void set_channel_address(struct mISDNchannel *, u_int, u_int); +extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *); +extern unsigned short mISDN_clock_get(void); + +#endif /* __KERNEL__ */ +#endif /* mISDNIF_H */ diff --git a/include/linux/magic.h b/include/linux/magic.h new file mode 100644 index 00000000..e15192cb --- /dev/null +++ b/include/linux/magic.h @@ -0,0 +1,72 @@ +#ifndef __LINUX_MAGIC_H__ +#define __LINUX_MAGIC_H__ + +#define ADFS_SUPER_MAGIC 0xadf5 +#define AFFS_SUPER_MAGIC 0xadff +#define AFS_SUPER_MAGIC 0x5346414F +#define AUTOFS_SUPER_MAGIC 0x0187 +#define CODA_SUPER_MAGIC 0x73757245 +#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ +#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ +#define DEBUGFS_MAGIC 0x64626720 +#define SECURITYFS_MAGIC 0x73636673 +#define SELINUX_MAGIC 0xf97cff8c +#define RAMFS_MAGIC 0x858458f6 /* some random number */ +#define TMPFS_MAGIC 0x01021994 +#define HUGETLBFS_MAGIC 0x958458f6 /* some random number */ +#define SQUASHFS_MAGIC 0x73717368 +#define ECRYPTFS_SUPER_MAGIC 0xf15f +#define EFS_SUPER_MAGIC 0x414A53 +#define EXT2_SUPER_MAGIC 0xEF53 +#define EXT3_SUPER_MAGIC 0xEF53 +#define XENFS_SUPER_MAGIC 0xabba1974 +#define EXT4_SUPER_MAGIC 0xEF53 +#define BTRFS_SUPER_MAGIC 0x9123683E +#define NILFS_SUPER_MAGIC 0x3434 +#define HPFS_SUPER_MAGIC 0xf995e849 +#define ISOFS_SUPER_MAGIC 0x9660 +#define JFFS2_SUPER_MAGIC 0x72b6 +#define PSTOREFS_MAGIC 0x6165676C + +#define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */ +#define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */ +#define MINIX2_SUPER_MAGIC 0x2468 /* minix v2 fs, 14 char names */ +#define MINIX2_SUPER_MAGIC2 0x2478 /* minix v2 fs, 30 char names */ +#define MINIX3_SUPER_MAGIC 0x4d5a /* minix v3 fs, 60 char names */ + +#define MSDOS_SUPER_MAGIC 0x4d44 /* MD */ +#define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */ +#define NFS_SUPER_MAGIC 0x6969 +#define OPENPROM_SUPER_MAGIC 0x9fa1 +#define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */ +#define QNX6_SUPER_MAGIC 0x68191122 /* qnx6 fs detection */ + +#define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */ + /* used by file system utilities that + look at the superblock, etc. */ +#define REISERFS_SUPER_MAGIC_STRING "ReIsErFs" +#define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs" +#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs" + +#define SMB_SUPER_MAGIC 0x517B +#define CGROUP_SUPER_MAGIC 0x27e0eb + + +#define STACK_END_MAGIC 0x57AC6E9D + +#define V9FS_MAGIC 0x01021997 + +#define BDEVFS_MAGIC 0x62646576 +#define BINFMTFS_MAGIC 0x42494e4d +#define DEVPTS_SUPER_MAGIC 0x1cd1 +#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA +#define PIPEFS_MAGIC 0x50495045 +#define PROC_SUPER_MAGIC 0x9fa0 +#define SOCKFS_MAGIC 0x534F434B +#define SYSFS_MAGIC 0x62656572 +#define USBDEVICE_SUPER_MAGIC 0x9fa2 +#define MTD_INODE_FS_MAGIC 0x11307854 +#define ANON_INODE_FS_MAGIC 0x09041934 + + +#endif /* __LINUX_MAGIC_H__ */ diff --git a/include/linux/major.h b/include/linux/major.h new file mode 100644 index 00000000..14706398 --- /dev/null +++ b/include/linux/major.h @@ -0,0 +1,184 @@ +#ifndef _LINUX_MAJOR_H +#define _LINUX_MAJOR_H + +/* + * This file has definitions for major device numbers. + * For the device number assignments, see Documentation/devices.txt. + */ + +#define UNNAMED_MAJOR 0 +#define MEM_MAJOR 1 +#define RAMDISK_MAJOR 1 +#define FLOPPY_MAJOR 2 +#define PTY_MASTER_MAJOR 2 +#define IDE0_MAJOR 3 +#define HD_MAJOR IDE0_MAJOR +#define PTY_SLAVE_MAJOR 3 +#define TTY_MAJOR 4 +#define TTYAUX_MAJOR 5 +#define LP_MAJOR 6 +#define VCS_MAJOR 7 +#define LOOP_MAJOR 7 +#define SCSI_DISK0_MAJOR 8 +#define SCSI_TAPE_MAJOR 9 +#define MD_MAJOR 9 +#define MISC_MAJOR 10 +#define SCSI_CDROM_MAJOR 11 +#define MUX_MAJOR 11 /* PA-RISC only */ +#define XT_DISK_MAJOR 13 +#define INPUT_MAJOR 13 +#define SOUND_MAJOR 14 +#define CDU31A_CDROM_MAJOR 15 +#define JOYSTICK_MAJOR 15 +#define GOLDSTAR_CDROM_MAJOR 16 +#define OPTICS_CDROM_MAJOR 17 +#define SANYO_CDROM_MAJOR 18 +#define CYCLADES_MAJOR 19 +#define CYCLADESAUX_MAJOR 20 +#define MITSUMI_X_CDROM_MAJOR 20 +#define MFM_ACORN_MAJOR 21 /* ARM Linux /dev/mfm */ +#define SCSI_GENERIC_MAJOR 21 +#define IDE1_MAJOR 22 +#define DIGICU_MAJOR 22 +#define DIGI_MAJOR 23 +#define MITSUMI_CDROM_MAJOR 23 +#define CDU535_CDROM_MAJOR 24 +#define STL_SERIALMAJOR 24 +#define MATSUSHITA_CDROM_MAJOR 25 +#define STL_CALLOUTMAJOR 25 +#define MATSUSHITA_CDROM2_MAJOR 26 +#define QIC117_TAPE_MAJOR 27 +#define MATSUSHITA_CDROM3_MAJOR 27 +#define MATSUSHITA_CDROM4_MAJOR 28 +#define STL_SIOMEMMAJOR 28 +#define ACSI_MAJOR 28 +#define AZTECH_CDROM_MAJOR 29 +#define FB_MAJOR 29 /* /dev/fb* framebuffers */ +#define CM206_CDROM_MAJOR 32 +#define IDE2_MAJOR 33 +#define IDE3_MAJOR 34 +#define Z8530_MAJOR 34 +#define XPRAM_MAJOR 35 /* Expanded storage on S/390: "slow ram"*/ +#define NETLINK_MAJOR 36 +#define PS2ESDI_MAJOR 36 +#define IDETAPE_MAJOR 37 +#define Z2RAM_MAJOR 37 +#define APBLOCK_MAJOR 38 /* AP1000 Block device */ +#define DDV_MAJOR 39 /* AP1000 DDV block device */ +#define NBD_MAJOR 43 /* Network block device */ +#define RISCOM8_NORMAL_MAJOR 48 +#define DAC960_MAJOR 48 /* 48..55 */ +#define RISCOM8_CALLOUT_MAJOR 49 +#define MKISS_MAJOR 55 +#define DSP56K_MAJOR 55 /* DSP56001 processor device */ + +#define IDE4_MAJOR 56 +#define IDE5_MAJOR 57 + +#define SCSI_DISK1_MAJOR 65 +#define SCSI_DISK2_MAJOR 66 +#define SCSI_DISK3_MAJOR 67 +#define SCSI_DISK4_MAJOR 68 +#define SCSI_DISK5_MAJOR 69 +#define SCSI_DISK6_MAJOR 70 +#define SCSI_DISK7_MAJOR 71 + +#define COMPAQ_SMART2_MAJOR 72 +#define COMPAQ_SMART2_MAJOR1 73 +#define COMPAQ_SMART2_MAJOR2 74 +#define COMPAQ_SMART2_MAJOR3 75 +#define COMPAQ_SMART2_MAJOR4 76 +#define COMPAQ_SMART2_MAJOR5 77 +#define COMPAQ_SMART2_MAJOR6 78 +#define COMPAQ_SMART2_MAJOR7 79 + +#define SPECIALIX_NORMAL_MAJOR 75 +#define SPECIALIX_CALLOUT_MAJOR 76 + +#define AURORA_MAJOR 79 + +#define I2O_MAJOR 80 /* 80->87 */ + +#define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */ +#define SCSI_CHANGER_MAJOR 86 + +#define IDE6_MAJOR 88 +#define IDE7_MAJOR 89 +#define IDE8_MAJOR 90 +#define IDE9_MAJOR 91 + +#define DASD_MAJOR 94 + +#define MDISK_MAJOR 95 + +#define UBD_MAJOR 98 + +#define PP_MAJOR 99 +#define JSFD_MAJOR 99 + +#define PHONE_MAJOR 100 + +#define COMPAQ_CISS_MAJOR 104 +#define COMPAQ_CISS_MAJOR1 105 +#define COMPAQ_CISS_MAJOR2 106 +#define COMPAQ_CISS_MAJOR3 107 +#define COMPAQ_CISS_MAJOR4 108 +#define COMPAQ_CISS_MAJOR5 109 +#define COMPAQ_CISS_MAJOR6 110 +#define COMPAQ_CISS_MAJOR7 111 + +#define VIODASD_MAJOR 112 +#define VIOCD_MAJOR 113 + +#define ATARAID_MAJOR 114 + +#define SCSI_DISK8_MAJOR 128 +#define SCSI_DISK9_MAJOR 129 +#define SCSI_DISK10_MAJOR 130 +#define SCSI_DISK11_MAJOR 131 +#define SCSI_DISK12_MAJOR 132 +#define SCSI_DISK13_MAJOR 133 +#define SCSI_DISK14_MAJOR 134 +#define SCSI_DISK15_MAJOR 135 + +#define UNIX98_PTY_MASTER_MAJOR 128 +#define UNIX98_PTY_MAJOR_COUNT 8 +#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT) + +#define DRBD_MAJOR 147 +#define RTF_MAJOR 150 +#define RAW_MAJOR 162 + +#define USB_ACM_MAJOR 166 +#define USB_ACM_AUX_MAJOR 167 +#define USB_CHAR_MAJOR 180 + +#define MMC_BLOCK_MAJOR 179 + +#define VXVM_MAJOR 199 /* VERITAS volume i/o driver */ +#define VXSPEC_MAJOR 200 /* VERITAS volume config driver */ +#define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */ + +#define XENVBD_MAJOR 202 /* Xen virtual block device */ + +#define MSR_MAJOR 202 +#define CPUID_MAJOR 203 + +#define OSST_MAJOR 206 /* OnStream-SCx0 SCSI tape */ + +#define IBM_TTY3270_MAJOR 227 +#define IBM_FS3270_MAJOR 228 + +#define VIOTAPE_MAJOR 230 + +#define VD_MAJOR 236 /* For all Video Decoders & Encoders */ +#define CIPHER_MAJOR 238 /* For security engine */ + +#define VID_MAJOR 241 /* For CMOS driver */ +#define MB_MAJOR 242 /* For WMT Memory Block driver */ + +#define BLOCK_EXT_MAJOR 259 +#define SCSI_OSD_MAJOR 260 /* open-osd's OSD scsi device */ +#define CEC_MAJOR 261 /* for WMT HDMI CEC driver*/ + +#endif diff --git a/include/linux/map_to_7segment.h b/include/linux/map_to_7segment.h new file mode 100644 index 00000000..12d62a54 --- /dev/null +++ b/include/linux/map_to_7segment.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef MAP_TO_7SEGMENT_H +#define MAP_TO_7SEGMENT_H + +/* This file provides translation primitives and tables for the conversion + * of (ASCII) characters to a 7-segments notation. + * + * The 7 segment's wikipedia notation below is used as standard. + * See: http://en.wikipedia.org/wiki/Seven_segment_display + * + * Notation: +-a-+ + * f b + * +-g-+ + * e c + * +-d-+ + * + * Usage: + * + * Register a map variable, and fill it with a character set: + * static SEG7_DEFAULT_MAP(map_seg7); + * + * + * Then use for conversion: + * seg7 = map_to_seg7(&map_seg7, some_char); + * ... + * + * In device drivers it is recommended, if required, to make the char map + * accessible via the sysfs interface using the following scheme: + * + * static ssize_t show_map(struct device *dev, char *buf) { + * memcpy(buf, &map_seg7, sizeof(map_seg7)); + * return sizeof(map_seg7); + * } + * static ssize_t store_map(struct device *dev, const char *buf, size_t cnt) { + * if(cnt != sizeof(map_seg7)) + * return -EINVAL; + * memcpy(&map_seg7, buf, cnt); + * return cnt; + * } + * static DEVICE_ATTR(map_seg7, PERMS_RW, show_map, store_map); + * + * History: + * 2005-05-31 RFC linux-kernel@vger.kernel.org + */ +#include <linux/errno.h> + + +#define BIT_SEG7_A 0 +#define BIT_SEG7_B 1 +#define BIT_SEG7_C 2 +#define BIT_SEG7_D 3 +#define BIT_SEG7_E 4 +#define BIT_SEG7_F 5 +#define BIT_SEG7_G 6 +#define BIT_SEG7_RESERVED 7 + +struct seg7_conversion_map { + unsigned char table[128]; +}; + +static __inline__ int map_to_seg7(struct seg7_conversion_map *map, int c) +{ + return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL; +} + +#define SEG7_CONVERSION_MAP(_name, _map) \ + struct seg7_conversion_map _name = { .table = { _map } } + +/* + * It is recommended to use a facility that allows user space to redefine + * custom character sets for LCD devices. Please use a sysfs interface + * as described above. + */ +#define MAP_TO_SEG7_SYSFS_FILE "map_seg7" + +/******************************************************************************* + * ASCII conversion table + ******************************************************************************/ + +#define _SEG7(l,a,b,c,d,e,f,g) \ + ( a<<BIT_SEG7_A | b<<BIT_SEG7_B | c<<BIT_SEG7_C | d<<BIT_SEG7_D | \ + e<<BIT_SEG7_E | f<<BIT_SEG7_F | g<<BIT_SEG7_G ) + +#define _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \ + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + +#define _MAP_33_47_ASCII_SEG7_SYMBOL \ + _SEG7('!',0,0,0,0,1,1,0), _SEG7('"',0,1,0,0,0,1,0), _SEG7('#',0,1,1,0,1,1,0),\ + _SEG7('$',1,0,1,1,0,1,1), _SEG7('%',0,0,1,0,0,1,0), _SEG7('&',1,0,1,1,1,1,1),\ + _SEG7('\'',0,0,0,0,0,1,0),_SEG7('(',1,0,0,1,1,1,0), _SEG7(')',1,1,1,1,0,0,0),\ + _SEG7('*',0,1,1,0,1,1,1), _SEG7('+',0,1,1,0,0,0,1), _SEG7(',',0,0,0,0,1,0,0),\ + _SEG7('-',0,0,0,0,0,0,1), _SEG7('.',0,0,0,0,1,0,0), _SEG7('/',0,1,0,0,1,0,1), + +#define _MAP_48_57_ASCII_SEG7_NUMERIC \ + _SEG7('0',1,1,1,1,1,1,0), _SEG7('1',0,1,1,0,0,0,0), _SEG7('2',1,1,0,1,1,0,1),\ + _SEG7('3',1,1,1,1,0,0,1), _SEG7('4',0,1,1,0,0,1,1), _SEG7('5',1,0,1,1,0,1,1),\ + _SEG7('6',1,0,1,1,1,1,1), _SEG7('7',1,1,1,0,0,0,0), _SEG7('8',1,1,1,1,1,1,1),\ + _SEG7('9',1,1,1,1,0,1,1), + +#define _MAP_58_64_ASCII_SEG7_SYMBOL \ + _SEG7(':',0,0,0,1,0,0,1), _SEG7(';',0,0,0,1,0,0,1), _SEG7('<',1,0,0,0,0,1,1),\ + _SEG7('=',0,0,0,1,0,0,1), _SEG7('>',1,1,0,0,0,0,1), _SEG7('?',1,1,1,0,0,1,0),\ + _SEG7('@',1,1,0,1,1,1,1), + +#define _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \ + _SEG7('A',1,1,1,0,1,1,1), _SEG7('B',1,1,1,1,1,1,1), _SEG7('C',1,0,0,1,1,1,0),\ + _SEG7('D',1,1,1,1,1,1,0), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\ + _SEG7('G',1,1,1,1,0,1,1), _SEG7('H',0,1,1,0,1,1,1), _SEG7('I',0,1,1,0,0,0,0),\ + _SEG7('J',0,1,1,1,0,0,0), _SEG7('K',0,1,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\ + _SEG7('M',1,1,1,0,1,1,0), _SEG7('N',1,1,1,0,1,1,0), _SEG7('O',1,1,1,1,1,1,0),\ + _SEG7('P',1,1,0,0,1,1,1), _SEG7('Q',1,1,1,1,1,1,0), _SEG7('R',1,1,1,0,1,1,1),\ + _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('U',0,1,1,1,1,1,0),\ + _SEG7('V',0,1,1,1,1,1,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\ + _SEG7('Y',0,1,1,0,0,1,1), _SEG7('Z',1,1,0,1,1,0,1), + +#define _MAP_91_96_ASCII_SEG7_SYMBOL \ + _SEG7('[',1,0,0,1,1,1,0), _SEG7('\\',0,0,1,0,0,1,1),_SEG7(']',1,1,1,1,0,0,0),\ + _SEG7('^',1,1,0,0,0,1,0), _SEG7('_',0,0,0,1,0,0,0), _SEG7('`',0,1,0,0,0,0,0), + +#define _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ + _SEG7('A',1,1,1,0,1,1,1), _SEG7('b',0,0,1,1,1,1,1), _SEG7('c',0,0,0,1,1,0,1),\ + _SEG7('d',0,1,1,1,1,0,1), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\ + _SEG7('G',1,1,1,1,0,1,1), _SEG7('h',0,0,1,0,1,1,1), _SEG7('i',0,0,1,0,0,0,0),\ + _SEG7('j',0,0,1,1,0,0,0), _SEG7('k',0,0,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\ + _SEG7('M',1,1,1,0,1,1,0), _SEG7('n',0,0,1,0,1,0,1), _SEG7('o',0,0,1,1,1,0,1),\ + _SEG7('P',1,1,0,0,1,1,1), _SEG7('q',1,1,1,0,0,1,1), _SEG7('r',0,0,0,0,1,0,1),\ + _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('u',0,0,1,1,1,0,0),\ + _SEG7('v',0,0,1,1,1,0,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\ + _SEG7('y',0,1,1,1,0,1,1), _SEG7('Z',1,1,0,1,1,0,1), + +#define _MAP_123_126_ASCII_SEG7_SYMBOL \ + _SEG7('{',1,0,0,1,1,1,0), _SEG7('|',0,0,0,0,1,1,0), _SEG7('}',1,1,1,1,0,0,0),\ + _SEG7('~',1,0,0,0,0,0,0), + +/* Maps */ + +/* This set tries to map as close as possible to the visible characteristics + * of the ASCII symbol, lowercase and uppercase letters may differ in + * presentation on the display. + */ +#define MAP_ASCII7SEG_ALPHANUM \ + _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \ + _MAP_33_47_ASCII_SEG7_SYMBOL \ + _MAP_48_57_ASCII_SEG7_NUMERIC \ + _MAP_58_64_ASCII_SEG7_SYMBOL \ + _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \ + _MAP_91_96_ASCII_SEG7_SYMBOL \ + _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ + _MAP_123_126_ASCII_SEG7_SYMBOL + +/* This set tries to map as close as possible to the symbolic characteristics + * of the ASCII character for maximum discrimination. + * For now this means all alpha chars are in lower case representations. + * (This for example facilitates the use of hex numbers with uppercase input.) + */ +#define MAP_ASCII7SEG_ALPHANUM_LC \ + _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \ + _MAP_33_47_ASCII_SEG7_SYMBOL \ + _MAP_48_57_ASCII_SEG7_NUMERIC \ + _MAP_58_64_ASCII_SEG7_SYMBOL \ + _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ + _MAP_91_96_ASCII_SEG7_SYMBOL \ + _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ + _MAP_123_126_ASCII_SEG7_SYMBOL + +#define SEG7_DEFAULT_MAP(_name) \ + SEG7_CONVERSION_MAP(_name,MAP_ASCII7SEG_ALPHANUM) + +#endif /* MAP_TO_7SEGMENT_H */ + diff --git a/include/linux/maple.h b/include/linux/maple.h new file mode 100644 index 00000000..c37288b2 --- /dev/null +++ b/include/linux/maple.h @@ -0,0 +1,105 @@ +#ifndef __LINUX_MAPLE_H +#define __LINUX_MAPLE_H + +#include <mach/maple.h> + +struct device; +extern struct bus_type maple_bus_type; + +/* Maple Bus command and response codes */ +enum maple_code { + MAPLE_RESPONSE_FILEERR = -5, + MAPLE_RESPONSE_AGAIN, /* retransmit */ + MAPLE_RESPONSE_BADCMD, + MAPLE_RESPONSE_BADFUNC, + MAPLE_RESPONSE_NONE, /* unit didn't respond*/ + MAPLE_COMMAND_DEVINFO = 1, + MAPLE_COMMAND_ALLINFO, + MAPLE_COMMAND_RESET, + MAPLE_COMMAND_KILL, + MAPLE_RESPONSE_DEVINFO, + MAPLE_RESPONSE_ALLINFO, + MAPLE_RESPONSE_OK, + MAPLE_RESPONSE_DATATRF, + MAPLE_COMMAND_GETCOND, + MAPLE_COMMAND_GETMINFO, + MAPLE_COMMAND_BREAD, + MAPLE_COMMAND_BWRITE, + MAPLE_COMMAND_BSYNC, + MAPLE_COMMAND_SETCOND, + MAPLE_COMMAND_MICCONTROL +}; + +enum maple_file_errors { + MAPLE_FILEERR_INVALID_PARTITION = 0x01000000, + MAPLE_FILEERR_PHASE_ERROR = 0x02000000, + MAPLE_FILEERR_INVALID_BLOCK = 0x04000000, + MAPLE_FILEERR_WRITE_ERROR = 0x08000000, + MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000, + MAPLE_FILEERR_BAD_CRC = 0x20000000 +}; + +struct maple_buffer { + char bufx[0x400]; + void *buf; +}; + +struct mapleq { + struct list_head list; + struct maple_device *dev; + struct maple_buffer *recvbuf; + void *sendbuf, *recvbuf_p2; + unsigned char length; + enum maple_code command; +}; + +struct maple_devinfo { + unsigned long function; + unsigned long function_data[3]; + unsigned char area_code; + unsigned char connector_direction; + char product_name[31]; + char product_licence[61]; + unsigned short standby_power; + unsigned short max_power; +}; + +struct maple_device { + struct maple_driver *driver; + struct mapleq *mq; + void (*callback) (struct mapleq * mq); + void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf); + int (*can_unload)(struct maple_device *mdev); + unsigned long when, interval, function; + struct maple_devinfo devinfo; + unsigned char port, unit; + char product_name[32]; + char product_licence[64]; + atomic_t busy; + wait_queue_head_t maple_wait; + struct device dev; +}; + +struct maple_driver { + unsigned long function; + struct device_driver drv; +}; + +void maple_getcond_callback(struct maple_device *dev, + void (*callback) (struct mapleq * mq), + unsigned long interval, + unsigned long function); +int maple_driver_register(struct maple_driver *); +void maple_driver_unregister(struct maple_driver *); + +int maple_add_packet(struct maple_device *mdev, u32 function, + u32 command, u32 length, void *data); +void maple_clear_dev(struct maple_device *mdev); + +#define to_maple_dev(n) container_of(n, struct maple_device, dev) +#define to_maple_driver(n) container_of(n, struct maple_driver, drv) + +#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + +#endif /* __LINUX_MAPLE_H */ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h new file mode 100644 index 00000000..dd3c34eb --- /dev/null +++ b/include/linux/marvell_phy.h @@ -0,0 +1,22 @@ +#ifndef _MARVELL_PHY_H +#define _MARVELL_PHY_H + +/* Mask used for ID comparisons */ +#define MARVELL_PHY_ID_MASK 0xfffffff0 + +/* Known PHY IDs */ +#define MARVELL_PHY_ID_88E1101 0x01410c60 +#define MARVELL_PHY_ID_88E1112 0x01410c90 +#define MARVELL_PHY_ID_88E1111 0x01410cc0 +#define MARVELL_PHY_ID_88E1118 0x01410e10 +#define MARVELL_PHY_ID_88E1121R 0x01410cb0 +#define MARVELL_PHY_ID_88E1145 0x01410cd0 +#define MARVELL_PHY_ID_88E1149R 0x01410e50 +#define MARVELL_PHY_ID_88E1240 0x01410e30 +#define MARVELL_PHY_ID_88E1318S 0x01410e90 + +/* struct phy_device dev_flags definitions */ +#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 +#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 + +#endif /* _MARVELL_PHY_H */ diff --git a/include/linux/math64.h b/include/linux/math64.h new file mode 100644 index 00000000..b8ba8554 --- /dev/null +++ b/include/linux/math64.h @@ -0,0 +1,121 @@ +#ifndef _LINUX_MATH64_H +#define _LINUX_MATH64_H + +#include <linux/types.h> +#include <asm/div64.h> + +#if BITS_PER_LONG == 64 + +#define div64_long(x,y) div64_s64((x),(y)) + +/** + * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder + * + * This is commonly provided by 32bit archs to provide an optimized 64bit + * divide. + */ +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div_s64_rem - signed 64bit divide with 32bit divisor with remainder + */ +static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div64_u64 - unsigned 64bit divide with 64bit divisor + */ +static inline u64 div64_u64(u64 dividend, u64 divisor) +{ + return dividend / divisor; +} + +/** + * div64_s64 - signed 64bit divide with 64bit divisor + */ +static inline s64 div64_s64(s64 dividend, s64 divisor) +{ + return dividend / divisor; +} + +#elif BITS_PER_LONG == 32 + +#define div64_long(x,y) div_s64((x),(y)) + +#ifndef div_u64_rem +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = do_div(dividend, divisor); + return dividend; +} +#endif + +#ifndef div_s64_rem +extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); +#endif + +#ifndef div64_u64 +extern u64 div64_u64(u64 dividend, u64 divisor); +#endif + +#ifndef div64_s64 +extern s64 div64_s64(s64 dividend, s64 divisor); +#endif + +#endif /* BITS_PER_LONG */ + +/** + * div_u64 - unsigned 64bit divide with 32bit divisor + * + * This is the most common 64bit divide and should be used if possible, + * as many 32bit archs can optimize this variant better than a full 64bit + * divide. + */ +#ifndef div_u64 +static inline u64 div_u64(u64 dividend, u32 divisor) +{ + u32 remainder; + return div_u64_rem(dividend, divisor, &remainder); +} +#endif + +/** + * div_s64 - signed 64bit divide with 32bit divisor + */ +#ifndef div_s64 +static inline s64 div_s64(s64 dividend, s32 divisor) +{ + s32 remainder; + return div_s64_rem(dividend, divisor, &remainder); +} +#endif + +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); + +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + +#endif /* _LINUX_MATH64_H */ diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h new file mode 100644 index 00000000..8c22a893 --- /dev/null +++ b/include/linux/matroxfb.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_MATROXFB_H__ +#define __LINUX_MATROXFB_H__ + +#include <asm/ioctl.h> +#include <linux/types.h> +#include <linux/videodev2.h> +#include <linux/fb.h> + +struct matroxioc_output_mode { + __u32 output; /* which output */ +#define MATROXFB_OUTPUT_PRIMARY 0x0000 +#define MATROXFB_OUTPUT_SECONDARY 0x0001 +#define MATROXFB_OUTPUT_DFP 0x0002 + __u32 mode; /* which mode */ +#define MATROXFB_OUTPUT_MODE_PAL 0x0001 +#define MATROXFB_OUTPUT_MODE_NTSC 0x0002 +#define MATROXFB_OUTPUT_MODE_MONITOR 0x0080 +}; +#define MATROXFB_SET_OUTPUT_MODE _IOW('n',0xFA,size_t) +#define MATROXFB_GET_OUTPUT_MODE _IOWR('n',0xFA,size_t) + +/* bitfield */ +#define MATROXFB_OUTPUT_CONN_PRIMARY (1 << MATROXFB_OUTPUT_PRIMARY) +#define MATROXFB_OUTPUT_CONN_SECONDARY (1 << MATROXFB_OUTPUT_SECONDARY) +#define MATROXFB_OUTPUT_CONN_DFP (1 << MATROXFB_OUTPUT_DFP) +/* connect these outputs to this framebuffer */ +#define MATROXFB_SET_OUTPUT_CONNECTION _IOW('n',0xF8,size_t) +/* which outputs are connected to this framebuffer */ +#define MATROXFB_GET_OUTPUT_CONNECTION _IOR('n',0xF8,size_t) +/* which outputs are available for this framebuffer */ +#define MATROXFB_GET_AVAILABLE_OUTPUTS _IOR('n',0xF9,size_t) +/* which outputs exist on this framebuffer */ +#define MATROXFB_GET_ALL_OUTPUTS _IOR('n',0xFB,size_t) + +enum matroxfb_ctrl_id { + MATROXFB_CID_TESTOUT = V4L2_CID_PRIVATE_BASE, + MATROXFB_CID_DEFLICKER, + MATROXFB_CID_LAST +}; + +#endif + diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h new file mode 100644 index 00000000..ad97b06c --- /dev/null +++ b/include/linux/max17040_battery.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2009 Samsung Electronics + * Minkyu Kang <mk7.kang@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAX17040_BATTERY_H_ +#define __MAX17040_BATTERY_H_ + +struct max17040_platform_data { + int (*battery_online)(void); + int (*charger_online)(void); + int (*charger_enable)(void); +}; + +#endif diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h new file mode 100644 index 00000000..5525d370 --- /dev/null +++ b/include/linux/mbcache.h @@ -0,0 +1,53 @@ +/* + File: linux/mbcache.h + + (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> +*/ + +struct mb_cache_entry { + struct list_head e_lru_list; + struct mb_cache *e_cache; + unsigned short e_used; + unsigned short e_queued; + struct block_device *e_bdev; + sector_t e_block; + struct list_head e_block_list; + struct { + struct list_head o_list; + unsigned int o_key; + } e_index; +}; + +struct mb_cache { + struct list_head c_cache_list; + const char *c_name; + atomic_t c_entry_count; + int c_max_entries; + int c_bucket_bits; + struct kmem_cache *c_entry_cache; + struct list_head *c_block_hash; + struct list_head *c_index_hash; +}; + +/* Functions on caches */ + +struct mb_cache *mb_cache_create(const char *, int); +void mb_cache_shrink(struct block_device *); +void mb_cache_destroy(struct mb_cache *); + +/* Functions on cache entries */ + +struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); +int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, + sector_t, unsigned int); +void mb_cache_entry_release(struct mb_cache_entry *); +void mb_cache_entry_free(struct mb_cache_entry *); +struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, + struct block_device *, + sector_t); +struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, + struct block_device *, + unsigned int); +struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, + struct block_device *, + unsigned int); diff --git a/include/linux/mbus.h b/include/linux/mbus.h new file mode 100644 index 00000000..efa1a6d7 --- /dev/null +++ b/include/linux/mbus.h @@ -0,0 +1,47 @@ +/* + * Marvell MBUS common definitions. + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_MBUS_H +#define __LINUX_MBUS_H + +struct mbus_dram_target_info +{ + /* + * The 4-bit MBUS target ID of the DRAM controller. + */ + u8 mbus_dram_target_id; + + /* + * The base address, size, and MBUS attribute ID for each + * of the possible DRAM chip selects. Peripherals are + * required to support at least 4 decode windows. + */ + int num_cs; + struct mbus_dram_window { + u8 cs_index; + u8 mbus_attr; + u32 base; + u32 size; + } cs[4]; +}; + +/* + * The Marvell mbus is to be found only on SOCs from the Orion family + * at the moment. Provide a dummy stub for other architectures. + */ +#ifdef CONFIG_PLAT_ORION +extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); +#else +static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) +{ + return NULL; +} +#endif +#endif diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h new file mode 100644 index 00000000..2f4e957a --- /dev/null +++ b/include/linux/mc146818rtc.h @@ -0,0 +1,119 @@ +/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM + * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993 + * derived from Data Sheet, Copyright Motorola 1984 (!). + * It was written to be part of the Linux operating system. + */ +/* permission is hereby granted to copy, modify and redistribute this code + * in terms of the GNU Library General Public License, Version 2 or later, + * at your option. + */ + +#ifndef _MC146818RTC_H +#define _MC146818RTC_H + +#include <asm/io.h> +#include <linux/rtc.h> /* get the user-level API */ +#include <asm/mc146818rtc.h> /* register access macros */ + +#ifdef __KERNEL__ +#include <linux/spinlock.h> /* spinlock_t */ +extern spinlock_t rtc_lock; /* serialize CMOS RAM access */ + +/* Some RTCs extend the mc146818 register set to support alarms of more + * than 24 hours in the future; or dates that include a century code. + * This platform_data structure can pass this information to the driver. + * + * Also, some platforms need suspend()/resume() hooks to kick in special + * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up + * a separate wakeup alarm used by some almost-clone chips. + */ +struct cmos_rtc_board_info { + void (*wake_on)(struct device *dev); + void (*wake_off)(struct device *dev); + + u8 rtc_day_alarm; /* zero, or register index */ + u8 rtc_mon_alarm; /* zero, or register index */ + u8 rtc_century; /* zero, or register index */ +}; +#endif + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_SECONDS 0 +#define RTC_SECONDS_ALARM 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + +#define RTC_DAY_OF_WEEK 6 +#define RTC_DAY_OF_MONTH 7 +#define RTC_MONTH 8 +#define RTC_YEAR 9 + +/* control registers - Moto names + */ +#define RTC_REG_A 10 +#define RTC_REG_B 11 +#define RTC_REG_C 12 +#define RTC_REG_D 13 + +/********************************************************************** + * register details + **********************************************************************/ +#define RTC_FREQ_SELECT RTC_REG_A + +/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, + * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, + * totalling to a max high interval of 2.228 ms. + */ +# define RTC_UIP 0x80 +# define RTC_DIV_CTL 0x70 + /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ +# define RTC_REF_CLCK_4MHZ 0x00 +# define RTC_REF_CLCK_1MHZ 0x10 +# define RTC_REF_CLCK_32KHZ 0x20 + /* 2 values for divider stage reset, others for "testing purposes only" */ +# define RTC_DIV_RESET1 0x60 +# define RTC_DIV_RESET2 0x70 + /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ +# define RTC_RATE_SELECT 0x0F + +/**********************************************************************/ +#define RTC_CONTROL RTC_REG_B +# define RTC_SET 0x80 /* disable updates for clock setting */ +# define RTC_PIE 0x40 /* periodic interrupt enable */ +# define RTC_AIE 0x20 /* alarm interrupt enable */ +# define RTC_UIE 0x10 /* update-finished interrupt enable */ +# define RTC_SQWE 0x08 /* enable square-wave output */ +# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +/**********************************************************************/ +#define RTC_INTR_FLAGS RTC_REG_C +/* caution - cleared by read */ +# define RTC_IRQF 0x80 /* any of the following 3 is active */ +# define RTC_PF 0x40 +# define RTC_AF 0x20 +# define RTC_UF 0x10 + +/**********************************************************************/ +#define RTC_VALID RTC_REG_D +# define RTC_VRT 0x80 /* valid RAM and time */ +/**********************************************************************/ + +#ifndef ARCH_RTC_LOCATION /* Override by <asm/mc146818rtc.h>? */ + +#define RTC_IO_EXTENT 0x8 +#define RTC_IO_EXTENT_USED 0x2 +#define RTC_IOMAPPED 1 /* Default to I/O mapping. */ + +#else +#define RTC_IO_EXTENT_USED RTC_IO_EXTENT +#endif /* ARCH_RTC_LOCATION */ + +#endif /* _MC146818RTC_H */ diff --git a/include/linux/mc6821.h b/include/linux/mc6821.h new file mode 100644 index 00000000..28e301e2 --- /dev/null +++ b/include/linux/mc6821.h @@ -0,0 +1,51 @@ +#ifndef _MC6821_H_ +#define _MC6821_H_ + +/* + * This file describes the memery mapping of the MC6821 PIA. + * The unions describe overlayed registers. Which of them is used is + * determined by bit 2 of the corresponding control register. + * this files expects the PIA_REG_PADWIDTH to be defined the numeric + * value of the register spacing. + * + * Data came from MFC-31-Developer Kit (from Ralph Seidel, + * zodiac@darkness.gun.de) and Motorola Data Sheet (from + * Richard Hirst, srh@gpt.co.uk) + * + * 6.11.95 copyright Joerg Dorchain (dorchain@mpi-sb.mpg.de) + * + */ + +#ifndef PIA_REG_PADWIDTH +#define PIA_REG_PADWIDTH 255 +#endif + +struct pia { + union { + volatile u_char pra; + volatile u_char ddra; + } ua; + u_char pad1[PIA_REG_PADWIDTH]; + volatile u_char cra; + u_char pad2[PIA_REG_PADWIDTH]; + union { + volatile u_char prb; + volatile u_char ddrb; + } ub; + u_char pad3[PIA_REG_PADWIDTH]; + volatile u_char crb; + u_char pad4[PIA_REG_PADWIDTH]; +}; + +#define ppra ua.pra +#define pddra ua.ddra +#define pprb ub.prb +#define pddrb ub.ddrb + +#define PIA_C1_ENABLE_IRQ (1<<0) +#define PIA_C1_LOW_TO_HIGH (1<<1) +#define PIA_DDR (1<<2) +#define PIA_IRQ2 (1<<6) +#define PIA_IRQ1 (1<<7) + +#endif diff --git a/include/linux/mca-legacy.h b/include/linux/mca-legacy.h new file mode 100644 index 00000000..7a3aea84 --- /dev/null +++ b/include/linux/mca-legacy.h @@ -0,0 +1,66 @@ +/* -*- mode: c; c-basic-offset: 8 -*- */ + +/* This is the function prototypes for the old legacy MCA interface + * + * Please move your driver to the new sysfs based one instead */ + +#ifndef _LINUX_MCA_LEGACY_H +#define _LINUX_MCA_LEGACY_H + +#include <linux/mca.h> + +#warning "MCA legacy - please move your driver to the new sysfs api" + +/* MCA_NOTFOUND is an error condition. The other two indicate + * motherboard POS registers contain the adapter. They might be + * returned by the mca_find_adapter() function, and can be used as + * arguments to mca_read_stored_pos(). I'm not going to allow direct + * access to the motherboard registers until we run across an adapter + * that requires it. We don't know enough about them to know if it's + * safe. + * + * See Documentation/mca.txt or one of the existing drivers for + * more information. + */ +#define MCA_NOTFOUND (-1) + + + +/* Returns the slot of the first enabled adapter matching id. User can + * specify a starting slot beyond zero, to deal with detecting multiple + * devices. Returns MCA_NOTFOUND if id not found. Also checks the + * integrated adapters. + */ +extern int mca_find_adapter(int id, int start); +extern int mca_find_unused_adapter(int id, int start); + +extern int mca_mark_as_used(int slot); +extern void mca_mark_as_unused(int slot); + +/* gets a byte out of POS register (stored in memory) */ +extern unsigned char mca_read_stored_pos(int slot, int reg); + +/* This can be expanded later. Right now, it gives us a way of + * getting meaningful information into the MCA_info structure, + * so we can have a more interesting /proc/mca. + */ +extern void mca_set_adapter_name(int slot, char* name); + +/* These routines actually mess with the hardware POS registers. They + * temporarily disable the device (and interrupts), so make sure you know + * what you're doing if you use them. Furthermore, writing to a POS may + * result in two devices trying to share a resource, which in turn can + * result in multiple devices sharing memory spaces, IRQs, or even trashing + * hardware. YOU HAVE BEEN WARNED. + * + * You can only access slots with this. Motherboard registers are off + * limits. + */ + +/* read a byte from the specified POS register. */ +extern unsigned char mca_read_pos(int slot, int reg); + +/* write a byte to the specified POS register. */ +extern void mca_write_pos(int slot, int reg, unsigned char byte); + +#endif diff --git a/include/linux/mca.h b/include/linux/mca.h new file mode 100644 index 00000000..37972704 --- /dev/null +++ b/include/linux/mca.h @@ -0,0 +1,148 @@ +/* + * Header for Microchannel Architecture Bus + * Written by Martin Kolinek, February 1996 + */ + +#ifndef _LINUX_MCA_H +#define _LINUX_MCA_H + +#include <linux/device.h> + +#ifdef CONFIG_MCA +#include <asm/mca.h> + +extern int MCA_bus; +#else +#define MCA_bus 0 +#endif + +/* This sets up an information callback for /proc/mca/slot?. The + * function is called with the buffer, slot, and device pointer (or + * some equally informative context information, or nothing, if you + * prefer), and is expected to put useful information into the + * buffer. The adapter name, id, and POS registers get printed + * before this is called though, so don't do it again. + * + * This should be called with a NULL procfn when a module + * unregisters, thus preventing kernel crashes and other such + * nastiness. + */ +typedef int (*MCA_ProcFn)(char* buf, int slot, void* dev); + +/* Should only be called by the NMI interrupt handler, this will do some + * fancy stuff to figure out what might have generated a NMI. + */ +extern void mca_handle_nmi(void); + +enum MCA_AdapterStatus { + MCA_ADAPTER_NORMAL = 0, + MCA_ADAPTER_NONE = 1, + MCA_ADAPTER_DISABLED = 2, + MCA_ADAPTER_ERROR = 3 +}; + +struct mca_device { + u64 dma_mask; + int pos_id; + int slot; + + /* index into id_table, set by the bus match routine */ + int index; + + /* is there a driver installed? 0 - No, 1 - Yes */ + int driver_loaded; + /* POS registers */ + unsigned char pos[8]; + /* if a pseudo adapter of the motherboard, this is the motherboard + * register value to use for setup cycles */ + short pos_register; + + enum MCA_AdapterStatus status; +#ifdef CONFIG_MCA_PROC_FS + /* name of the proc/mca file */ + char procname[8]; + /* /proc info callback */ + MCA_ProcFn procfn; + /* device/context info for proc callback */ + void *proc_dev; +#endif + struct device dev; + char name[32]; +}; +#define to_mca_device(mdev) container_of(mdev, struct mca_device, dev) + +struct mca_bus_accessor_functions { + unsigned char (*mca_read_pos)(struct mca_device *, int reg); + void (*mca_write_pos)(struct mca_device *, int reg, + unsigned char byte); + int (*mca_transform_irq)(struct mca_device *, int irq); + int (*mca_transform_ioport)(struct mca_device *, + int region); + void * (*mca_transform_memory)(struct mca_device *, + void *memory); +}; + +struct mca_bus { + u64 default_dma_mask; + int number; + struct mca_bus_accessor_functions f; + struct device dev; + char name[32]; +}; +#define to_mca_bus(mdev) container_of(mdev, struct mca_bus, dev) + +struct mca_driver { + const short *id_table; + void *driver_data; + int integrated_id; + struct device_driver driver; +}; +#define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver) + +/* Ongoing supported API functions */ +extern struct mca_device *mca_find_device_by_slot(int slot); +extern int mca_system_init(void); +extern struct mca_bus *mca_attach_bus(int); + +extern unsigned char mca_device_read_stored_pos(struct mca_device *mca_dev, + int reg); +extern unsigned char mca_device_read_pos(struct mca_device *mca_dev, int reg); +extern void mca_device_write_pos(struct mca_device *mca_dev, int reg, + unsigned char byte); +extern int mca_device_transform_irq(struct mca_device *mca_dev, int irq); +extern int mca_device_transform_ioport(struct mca_device *mca_dev, int port); +extern void *mca_device_transform_memory(struct mca_device *mca_dev, + void *mem); +extern int mca_device_claimed(struct mca_device *mca_dev); +extern void mca_device_set_claim(struct mca_device *mca_dev, int val); +extern void mca_device_set_name(struct mca_device *mca_dev, const char *name); +static inline char *mca_device_get_name(struct mca_device *mca_dev) +{ + return mca_dev ? mca_dev->name : NULL; +} + +extern enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev); + +extern struct bus_type mca_bus_type; + +extern int mca_register_driver(struct mca_driver *drv); +extern int mca_register_driver_integrated(struct mca_driver *, int); +extern void mca_unregister_driver(struct mca_driver *drv); + +/* WARNING: only called by the boot time device setup */ +extern int mca_register_device(int bus, struct mca_device *mca_dev); + +#ifdef CONFIG_MCA_PROC_FS +extern void mca_do_proc_init(void); +extern void mca_set_adapter_procfn(int slot, MCA_ProcFn, void* dev); +#else +static inline void mca_do_proc_init(void) +{ +} + +static inline void mca_set_adapter_procfn(int slot, MCA_ProcFn fn, void* dev) +{ +} +#endif + +#endif /* _LINUX_MCA_H */ diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h new file mode 100644 index 00000000..76f52bbb --- /dev/null +++ b/include/linux/mdio-bitbang.h @@ -0,0 +1,45 @@ +#ifndef __LINUX_MDIO_BITBANG_H +#define __LINUX_MDIO_BITBANG_H + +#include <linux/phy.h> + +struct module; + +struct mdiobb_ctrl; + +struct mdiobb_ops { + struct module *owner; + + /* Set the Management Data Clock high if level is one, + * low if level is zero. + */ + void (*set_mdc)(struct mdiobb_ctrl *ctrl, int level); + + /* Configure the Management Data I/O pin as an input if + * "output" is zero, or an output if "output" is one. + */ + void (*set_mdio_dir)(struct mdiobb_ctrl *ctrl, int output); + + /* Set the Management Data I/O pin high if value is one, + * low if "value" is zero. This may only be called + * when the MDIO pin is configured as an output. + */ + void (*set_mdio_data)(struct mdiobb_ctrl *ctrl, int value); + + /* Retrieve the state Management Data I/O pin. */ + int (*get_mdio_data)(struct mdiobb_ctrl *ctrl); +}; + +struct mdiobb_ctrl { + const struct mdiobb_ops *ops; + /* reset callback */ + int (*reset)(struct mii_bus *bus); +}; + +/* The returned bus is not yet registered with the phy layer. */ +struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl); + +/* The bus must already have been unregistered. */ +void free_mdio_bitbang(struct mii_bus *bus); + +#endif diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h new file mode 100644 index 00000000..7c9fe3c2 --- /dev/null +++ b/include/linux/mdio-gpio.h @@ -0,0 +1,27 @@ +/* + * MDIO-GPIO bus platform data structures + * + * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __LINUX_MDIO_GPIO_H +#define __LINUX_MDIO_GPIO_H + +#include <linux/mdio-bitbang.h> + +struct mdio_gpio_platform_data { + /* GPIO numbers for bus pins */ + unsigned int mdc; + unsigned int mdio; + + unsigned int phy_mask; + int irqs[PHY_MAX_ADDR]; + /* reset callback */ + int (*reset)(struct mii_bus *bus); +}; + +#endif /* __LINUX_MDIO_GPIO_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h new file mode 100644 index 00000000..dfb94795 --- /dev/null +++ b/include/linux/mdio.h @@ -0,0 +1,361 @@ +/* + * linux/mdio.h: definitions for MDIO (clause 45) transceivers + * Copyright 2006-2009 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef __LINUX_MDIO_H__ +#define __LINUX_MDIO_H__ + +#include <linux/types.h> +#include <linux/mii.h> + +/* MDIO Manageable Devices (MMDs). */ +#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment/ + * Physical Medium Dependent */ +#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */ +#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */ +#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */ +#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */ +#define MDIO_MMD_TC 6 /* Transmission Convergence */ +#define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */ +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + +/* Generic MDIO registers. */ +#define MDIO_CTRL1 MII_BMCR +#define MDIO_STAT1 MII_BMSR +#define MDIO_DEVID1 MII_PHYSID1 +#define MDIO_DEVID2 MII_PHYSID2 +#define MDIO_SPEED 4 /* Speed ability */ +#define MDIO_DEVS1 5 /* Devices in package */ +#define MDIO_DEVS2 6 +#define MDIO_CTRL2 7 /* 10G control 2 */ +#define MDIO_STAT2 8 /* 10G status 2 */ +#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */ +#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */ +#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */ +#define MDIO_PKGID1 14 /* Package identifier */ +#define MDIO_PKGID2 15 +#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ +#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ +#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ + +/* Media-dependent registers. */ +#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ +#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. + * Lanes B-D are numbered 134-136. */ +#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ +#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ +#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ +#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ +#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ + +/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ +#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ +#define MDIO_PMA_LASI_TXCTRL 0x9001 /* TX_ALARM control */ +#define MDIO_PMA_LASI_CTRL 0x9002 /* LASI control */ +#define MDIO_PMA_LASI_RXSTAT 0x9003 /* RX_ALARM status */ +#define MDIO_PMA_LASI_TXSTAT 0x9004 /* TX_ALARM status */ +#define MDIO_PMA_LASI_STAT 0x9005 /* LASI status */ + +/* Control register 1. */ +/* Enable extended speed selection */ +#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100) +/* All speed selection bits */ +#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c) +#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX +#define MDIO_CTRL1_LPOWER BMCR_PDOWN +#define MDIO_CTRL1_RESET BMCR_RESET +#define MDIO_PMA_CTRL1_LOOPBACK 0x0001 +#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000 +#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100 +#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART +#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE +#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ + +/* 10 Gb/s */ +#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00) +/* 10PASS-TS/2BASE-TL */ +#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04) + +/* Status register 1. */ +#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */ +#define MDIO_STAT1_LSTATUS BMSR_LSTATUS +#define MDIO_STAT1_FAULT 0x0080 /* Fault */ +#define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */ +#define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE +#define MDIO_AN_STAT1_RFAULT BMSR_RFAULT +#define MDIO_AN_STAT1_COMPLETE BMSR_ANEGCOMPLETE +#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */ +#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */ + +/* Speed register. */ +#define MDIO_SPEED_10G 0x0001 /* 10G capable */ +#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */ +#define MDIO_PMA_SPEED_10P 0x0004 /* 10PASS-TS capable */ +#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */ +#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */ +#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */ +#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */ + +/* Device present registers. */ +#define MDIO_DEVS_PRESENT(devad) (1 << (devad)) +#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD) +#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS) +#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS) +#define MDIO_DEVS_PHYXS MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS) +#define MDIO_DEVS_DTEXS MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS) +#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC) +#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN) +#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT) + +/* Control register 2. */ +#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */ +#define MDIO_PMA_CTRL2_10GBCX4 0x0000 /* 10GBASE-CX4 type */ +#define MDIO_PMA_CTRL2_10GBEW 0x0001 /* 10GBASE-EW type */ +#define MDIO_PMA_CTRL2_10GBLW 0x0002 /* 10GBASE-LW type */ +#define MDIO_PMA_CTRL2_10GBSW 0x0003 /* 10GBASE-SW type */ +#define MDIO_PMA_CTRL2_10GBLX4 0x0004 /* 10GBASE-LX4 type */ +#define MDIO_PMA_CTRL2_10GBER 0x0005 /* 10GBASE-ER type */ +#define MDIO_PMA_CTRL2_10GBLR 0x0006 /* 10GBASE-LR type */ +#define MDIO_PMA_CTRL2_10GBSR 0x0007 /* 10GBASE-SR type */ +#define MDIO_PMA_CTRL2_10GBLRM 0x0008 /* 10GBASE-LRM type */ +#define MDIO_PMA_CTRL2_10GBT 0x0009 /* 10GBASE-T type */ +#define MDIO_PMA_CTRL2_10GBKX4 0x000a /* 10GBASE-KX4 type */ +#define MDIO_PMA_CTRL2_10GBKR 0x000b /* 10GBASE-KR type */ +#define MDIO_PMA_CTRL2_1000BT 0x000c /* 1000BASE-T type */ +#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */ +#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */ +#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */ +#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */ +#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */ +#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */ +#define MDIO_PCS_CTRL2_10GBW 0x0002 /* 10GBASE-W type */ +#define MDIO_PCS_CTRL2_10GBT 0x0003 /* 10GBASE-T type */ + +/* Status register 2. */ +#define MDIO_STAT2_RXFAULT 0x0400 /* Receive fault */ +#define MDIO_STAT2_TXFAULT 0x0800 /* Transmit fault */ +#define MDIO_STAT2_DEVPRST 0xc000 /* Device present */ +#define MDIO_STAT2_DEVPRST_VAL 0x8000 /* Device present value */ +#define MDIO_PMA_STAT2_LBABLE 0x0001 /* PMA loopback ability */ +#define MDIO_PMA_STAT2_10GBEW 0x0002 /* 10GBASE-EW ability */ +#define MDIO_PMA_STAT2_10GBLW 0x0004 /* 10GBASE-LW ability */ +#define MDIO_PMA_STAT2_10GBSW 0x0008 /* 10GBASE-SW ability */ +#define MDIO_PMA_STAT2_10GBLX4 0x0010 /* 10GBASE-LX4 ability */ +#define MDIO_PMA_STAT2_10GBER 0x0020 /* 10GBASE-ER ability */ +#define MDIO_PMA_STAT2_10GBLR 0x0040 /* 10GBASE-LR ability */ +#define MDIO_PMA_STAT2_10GBSR 0x0080 /* 10GBASE-SR ability */ +#define MDIO_PMD_STAT2_TXDISAB 0x0100 /* PMD TX disable ability */ +#define MDIO_PMA_STAT2_EXTABLE 0x0200 /* Extended abilities */ +#define MDIO_PMA_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */ +#define MDIO_PMA_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */ +#define MDIO_PCS_STAT2_10GBR 0x0001 /* 10GBASE-R capable */ +#define MDIO_PCS_STAT2_10GBX 0x0002 /* 10GBASE-X capable */ +#define MDIO_PCS_STAT2_10GBW 0x0004 /* 10GBASE-W capable */ +#define MDIO_PCS_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */ +#define MDIO_PCS_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */ + +/* Transmit disable register. */ +#define MDIO_PMD_TXDIS_GLOBAL 0x0001 /* Global PMD TX disable */ +#define MDIO_PMD_TXDIS_0 0x0002 /* PMD TX disable 0 */ +#define MDIO_PMD_TXDIS_1 0x0004 /* PMD TX disable 1 */ +#define MDIO_PMD_TXDIS_2 0x0008 /* PMD TX disable 2 */ +#define MDIO_PMD_TXDIS_3 0x0010 /* PMD TX disable 3 */ + +/* Receive signal detect register. */ +#define MDIO_PMD_RXDET_GLOBAL 0x0001 /* Global PMD RX signal detect */ +#define MDIO_PMD_RXDET_0 0x0002 /* PMD RX signal detect 0 */ +#define MDIO_PMD_RXDET_1 0x0004 /* PMD RX signal detect 1 */ +#define MDIO_PMD_RXDET_2 0x0008 /* PMD RX signal detect 2 */ +#define MDIO_PMD_RXDET_3 0x0010 /* PMD RX signal detect 3 */ + +/* Extended abilities register. */ +#define MDIO_PMA_EXTABLE_10GCX4 0x0001 /* 10GBASE-CX4 ability */ +#define MDIO_PMA_EXTABLE_10GBLRM 0x0002 /* 10GBASE-LRM ability */ +#define MDIO_PMA_EXTABLE_10GBT 0x0004 /* 10GBASE-T ability */ +#define MDIO_PMA_EXTABLE_10GBKX4 0x0008 /* 10GBASE-KX4 ability */ +#define MDIO_PMA_EXTABLE_10GBKR 0x0010 /* 10GBASE-KR ability */ +#define MDIO_PMA_EXTABLE_1000BT 0x0020 /* 1000BASE-T ability */ +#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */ +#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */ +#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */ + +/* PHY XGXS lane state register. */ +#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 +#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 +#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 +#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 +#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 + +/* PMA 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_SWAPPOL_ABNX 0x0001 /* Pair A/B uncrossed */ +#define MDIO_PMA_10GBT_SWAPPOL_CDNX 0x0002 /* Pair C/D uncrossed */ +#define MDIO_PMA_10GBT_SWAPPOL_AREV 0x0100 /* Pair A polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_BREV 0x0200 /* Pair B polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_CREV 0x0400 /* Pair C polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_DREV 0x0800 /* Pair D polarity reversed */ + +/* PMA 10GBASE-T TX power register. */ +#define MDIO_PMA_10GBT_TXPWR_SHORT 0x0001 /* Short-reach mode */ + +/* PMA 10GBASE-T SNR registers. */ +/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */ +#define MDIO_PMA_10GBT_SNR_BIAS 0x8000 +#define MDIO_PMA_10GBT_SNR_MAX 127 + +/* PMA 10GBASE-R FEC ability register. */ +#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ +#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ + +/* PCS 10GBASE-R/-T status register 1. */ +#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */ + +/* PCS 10GBASE-R/-T status register 2. */ +#define MDIO_PCS_10GBRT_STAT2_ERR 0x00ff +#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00 + +/* AN 10GBASE-T control register. */ +#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */ + +/* AN 10GBASE-T status register. */ +#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */ +#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */ +#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */ +#define MDIO_AN_10GBT_STAT_REMOK 0x1000 /* Remote OK */ +#define MDIO_AN_10GBT_STAT_LOCOK 0x2000 /* Local OK */ +#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ +#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ + +/* AN EEE Advertisement register. */ +#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */ +#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */ + +/* LASI RX_ALARM control/status registers. */ +#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */ +#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */ +#define MDIO_PMA_LASI_RX_PMALFLT 0x0010 /* PMA/PMD RX local fault */ +#define MDIO_PMA_LASI_RX_OPTICPOWERFLT 0x0020 /* RX optical power fault */ +#define MDIO_PMA_LASI_RX_WISLFLT 0x0200 /* WIS local fault */ + +/* LASI TX_ALARM control/status registers. */ +#define MDIO_PMA_LASI_TX_PHYXSLFLT 0x0001 /* PHY XS TX local fault */ +#define MDIO_PMA_LASI_TX_PCSLFLT 0x0008 /* PCS TX local fault */ +#define MDIO_PMA_LASI_TX_PMALFLT 0x0010 /* PMA/PMD TX local fault */ +#define MDIO_PMA_LASI_TX_LASERPOWERFLT 0x0080 /* Laser output power fault */ +#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100 /* Laser temperature fault */ +#define MDIO_PMA_LASI_TX_LASERBICURRFLT 0x0200 /* Laser bias current fault */ + +/* LASI control/status registers. */ +#define MDIO_PMA_LASI_LSALARM 0x0001 /* LS_ALARM enable/status */ +#define MDIO_PMA_LASI_TXALARM 0x0002 /* TX_ALARM enable/status */ +#define MDIO_PMA_LASI_RXALARM 0x0004 /* RX_ALARM enable/status */ + +/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */ + +#define MDIO_PHY_ID_C45 0x8000 +#define MDIO_PHY_ID_PRTAD 0x03e0 +#define MDIO_PHY_ID_DEVAD 0x001f +#define MDIO_PHY_ID_C45_MASK \ + (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD) + +static inline __u16 mdio_phy_id_c45(int prtad, int devad) +{ + return MDIO_PHY_ID_C45 | (prtad << 5) | devad; +} + +#ifdef __KERNEL__ + +static inline bool mdio_phy_id_is_c45(int phy_id) +{ + return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK); +} + +static inline __u16 mdio_phy_id_prtad(int phy_id) +{ + return (phy_id & MDIO_PHY_ID_PRTAD) >> 5; +} + +static inline __u16 mdio_phy_id_devad(int phy_id) +{ + return phy_id & MDIO_PHY_ID_DEVAD; +} + +/** + * struct mdio_if_info - Ethernet controller MDIO interface + * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown) + * @mmds: Mask of MMDs expected to be present in the PHY. This must be + * non-zero unless @prtad = %MDIO_PRTAD_NONE. + * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then + * MII register access will be passed through with @devad = + * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to + * commonly used clause 22 registers will be translated into + * clause 45 registers. + * @dev: Net device structure + * @mdio_read: Register read function; returns value or negative error code + * @mdio_write: Register write function; returns 0 or negative error code + */ +struct mdio_if_info { + int prtad; + u32 mmds; + unsigned mode_support; + + struct net_device *dev; + int (*mdio_read)(struct net_device *dev, int prtad, int devad, + u16 addr); + int (*mdio_write)(struct net_device *dev, int prtad, int devad, + u16 addr, u16 val); +}; + +#define MDIO_PRTAD_NONE (-1) +#define MDIO_DEVAD_NONE (-1) +#define MDIO_SUPPORTS_C22 1 +#define MDIO_SUPPORTS_C45 2 +#define MDIO_EMULATE_C22 4 + +struct ethtool_cmd; +struct ethtool_pauseparam; +extern int mdio45_probe(struct mdio_if_info *mdio, int prtad); +extern int mdio_set_flag(const struct mdio_if_info *mdio, + int prtad, int devad, u16 addr, int mask, + bool sense); +extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds); +extern int mdio45_nway_restart(const struct mdio_if_info *mdio); +extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, + struct ethtool_cmd *ecmd, + u32 npage_adv, u32 npage_lpa); +extern void +mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio, + const struct ethtool_pauseparam *ecmd); + +/** + * mdio45_ethtool_gset - get settings for ETHTOOL_GSET + * @mdio: MDIO interface + * @ecmd: Ethtool request structure + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. Use + * mdio45_ethtool_gset_npage() to specify advertisement bits from next + * pages. + */ +static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, + struct ethtool_cmd *ecmd) +{ + mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); +} + +extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, + struct mii_ioctl_data *mii_data, int cmd); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_MDIO_H__ */ diff --git a/include/linux/media.h b/include/linux/media.h new file mode 100644 index 00000000..0ef88332 --- /dev/null +++ b/include/linux/media.h @@ -0,0 +1,132 @@ +/* + * Multimedia device API + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * Sakari Ailus <sakari.ailus@iki.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MEDIA_H +#define __LINUX_MEDIA_H + +#include <linux/ioctl.h> +#include <linux/types.h> +#include <linux/version.h> + +#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0) + +struct media_device_info { + char driver[16]; + char model[32]; + char serial[40]; + char bus_info[32]; + __u32 media_version; + __u32 hw_revision; + __u32 driver_version; + __u32 reserved[31]; +}; + +#define MEDIA_ENT_ID_FLAG_NEXT (1 << 31) + +#define MEDIA_ENT_TYPE_SHIFT 16 +#define MEDIA_ENT_TYPE_MASK 0x00ff0000 +#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff + +#define MEDIA_ENT_T_DEVNODE (1 << MEDIA_ENT_TYPE_SHIFT) +#define MEDIA_ENT_T_DEVNODE_V4L (MEDIA_ENT_T_DEVNODE + 1) +#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2) +#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_T_DEVNODE + 3) +#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_T_DEVNODE + 4) + +#define MEDIA_ENT_T_V4L2_SUBDEV (2 << MEDIA_ENT_TYPE_SHIFT) +#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR (MEDIA_ENT_T_V4L2_SUBDEV + 1) +#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH (MEDIA_ENT_T_V4L2_SUBDEV + 2) +#define MEDIA_ENT_T_V4L2_SUBDEV_LENS (MEDIA_ENT_T_V4L2_SUBDEV + 3) + +#define MEDIA_ENT_FL_DEFAULT (1 << 0) + +struct media_entity_desc { + __u32 id; + char name[32]; + __u32 type; + __u32 revision; + __u32 flags; + __u32 group_id; + __u16 pads; + __u16 links; + + __u32 reserved[4]; + + union { + /* Node specifications */ + struct { + __u32 major; + __u32 minor; + } v4l; + struct { + __u32 major; + __u32 minor; + } fb; + struct { + __u32 card; + __u32 device; + __u32 subdevice; + } alsa; + int dvb; + + /* Sub-device specifications */ + /* Nothing needed yet */ + __u8 raw[184]; + }; +}; + +#define MEDIA_PAD_FL_SINK (1 << 0) +#define MEDIA_PAD_FL_SOURCE (1 << 1) + +struct media_pad_desc { + __u32 entity; /* entity ID */ + __u16 index; /* pad index */ + __u32 flags; /* pad flags */ + __u32 reserved[2]; +}; + +#define MEDIA_LNK_FL_ENABLED (1 << 0) +#define MEDIA_LNK_FL_IMMUTABLE (1 << 1) +#define MEDIA_LNK_FL_DYNAMIC (1 << 2) + +struct media_link_desc { + struct media_pad_desc source; + struct media_pad_desc sink; + __u32 flags; + __u32 reserved[2]; +}; + +struct media_links_enum { + __u32 entity; + /* Should have enough room for pads elements */ + struct media_pad_desc __user *pads; + /* Should have enough room for links elements */ + struct media_link_desc __user *links; + __u32 reserved[4]; +}; + +#define MEDIA_IOC_DEVICE_INFO _IOWR('|', 0x00, struct media_device_info) +#define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc) +#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum) +#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) + +#endif /* __LINUX_MEDIA_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h new file mode 100644 index 00000000..19dc455b --- /dev/null +++ b/include/linux/memblock.h @@ -0,0 +1,251 @@ +#ifndef _LINUX_MEMBLOCK_H +#define _LINUX_MEMBLOCK_H +#ifdef __KERNEL__ + +#ifdef CONFIG_HAVE_MEMBLOCK +/* + * Logical memory blocks. + * + * Copyright (C) 2001 Peter Bergner, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/init.h> +#include <linux/mm.h> + +#define INIT_MEMBLOCK_REGIONS 128 + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + int nid; +#endif +}; + +struct memblock_type { + unsigned long cnt; /* number of regions */ + unsigned long max; /* size of the allocated array */ + phys_addr_t total_size; /* size of all regions */ + struct memblock_region *regions; +}; + +struct memblock { + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +extern struct memblock memblock; +extern int memblock_debug; + +#define memblock_dbg(fmt, ...) \ + if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) + +phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align); +phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); +void memblock_allow_resize(void); +int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); +int memblock_add(phys_addr_t base, phys_addr_t size); +int memblock_remove(phys_addr_t base, phys_addr_t size); +int memblock_free(phys_addr_t base, phys_addr_t size); +int memblock_reserve(phys_addr_t base, phys_addr_t size); + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, + unsigned long *out_end_pfn, int *out_nid); + +/** + * for_each_mem_pfn_range - early memory pfn range iterator + * @i: an integer used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to ulong for start pfn of the range, can be %NULL + * @p_end: ptr to ulong for end pfn of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over configured memory ranges. Available after early_node_map is + * populated. + */ +#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ + for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ + i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +/** + * for_each_free_mem_range - iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock. Available as + * soon as memblock is initialized. + */ +#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ + for (i = 0, \ + __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) + +void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +/** + * for_each_free_mem_range_reverse - rev-iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock in reverse + * order. Available as soon as memblock is initialized. + */ +#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ + for (i = (u64)ULLONG_MAX, \ + __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); + +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ + r->nid = nid; +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ + return r->nid; +} +#else +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ + return 0; +} +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); + +/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + +phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t memblock_phys_mem_size(void); +phys_addr_t memblock_start_of_DRAM(void); +phys_addr_t memblock_end_of_DRAM(void); +void memblock_enforce_memory_limit(phys_addr_t memory_limit); +int memblock_is_memory(phys_addr_t addr); +int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +int memblock_is_reserved(phys_addr_t addr); +int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); + +extern void __memblock_dump_all(void); + +static inline void memblock_dump_all(void) +{ + if (memblock_debug) + __memblock_dump_all(); +} + +/** + * memblock_set_current_limit - Set the current allocation limit to allow + * limiting allocations to what is currently + * accessible during boot + * @limit: New limit value (physical address) + */ +void memblock_set_current_limit(phys_addr_t limit); + + +/* + * pfn conversion functions + * + * While the memory MEMBLOCKs should always be page aligned, the reserved + * MEMBLOCKs may not be. This accessor attempt to provide a very clear + * idea of what they return for such non aligned MEMBLOCKs. + */ + +/** + * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region + * @reg: memblock_region structure + */ +static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) +{ + return PFN_UP(reg->base); +} + +/** + * memblock_region_memory_end_pfn - Return the end_pfn this region + * @reg: memblock_region structure + */ +static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) +{ + return PFN_DOWN(reg->base + reg->size); +} + +/** + * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region + * @reg: memblock_region structure + */ +static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) +{ + return PFN_DOWN(reg->base); +} + +/** + * memblock_region_reserved_end_pfn - Return the end_pfn this region + * @reg: memblock_region structure + */ +static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) +{ + return PFN_UP(reg->base + reg->size); +} + +#define for_each_memblock(memblock_type, region) \ + for (region = memblock.memblock_type.regions; \ + region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ + region++) + + +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK +#define __init_memblock __meminit +#define __initdata_memblock __meminitdata +#else +#define __init_memblock +#define __initdata_memblock +#endif + +#else +static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return 0; +} + +#endif /* CONFIG_HAVE_MEMBLOCK */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_MEMBLOCK_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h new file mode 100644 index 00000000..f94efd2f --- /dev/null +++ b/include/linux/memcontrol.h @@ -0,0 +1,452 @@ +/* memcontrol.h - Memory Controller + * + * Copyright IBM Corporation, 2007 + * Author Balbir Singh <balbir@linux.vnet.ibm.com> + * + * Copyright 2007 OpenVZ SWsoft Inc + * Author: Pavel Emelianov <xemul@openvz.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_MEMCONTROL_H +#define _LINUX_MEMCONTROL_H +#include <linux/cgroup.h> +#include <linux/vm_event_item.h> + +struct mem_cgroup; +struct page_cgroup; +struct page; +struct mm_struct; + +/* Stats that can be updated by kernel. */ +enum mem_cgroup_page_stat_item { + MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ +}; + +struct mem_cgroup_reclaim_cookie { + struct zone *zone; + int priority; + unsigned int generation; +}; + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +/* + * All "charge" functions with gfp_mask should use GFP_KERNEL or + * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't + * alloc memory but reclaims memory from all available zones. So, "where I want + * memory from" bits of gfp_mask has no meaning. So any bits of that field is + * available but adding a rule is better. charge functions' gfp_mask should + * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous + * codes. + * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) + */ + +extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask); +/* for swap handling */ +extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, gfp_t mask, struct mem_cgroup **memcgp); +extern void mem_cgroup_commit_charge_swapin(struct page *page, + struct mem_cgroup *memcg); +extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); + +extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask); + +struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); +struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, + enum lru_list); +void mem_cgroup_lru_del_list(struct page *, enum lru_list); +void mem_cgroup_lru_del(struct page *); +struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, + enum lru_list, enum lru_list); + +/* For coalescing uncharge for reducing memcg' overhead*/ +extern void mem_cgroup_uncharge_start(void); +extern void mem_cgroup_uncharge_end(void); + +extern void mem_cgroup_uncharge_page(struct page *page); +extern void mem_cgroup_uncharge_cache_page(struct page *page); + +extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, + int order); +int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); + +extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); +extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); +extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); + +extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); +extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); + +static inline +int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) +{ + struct mem_cgroup *memcg; + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner)); + rcu_read_unlock(); + return cgroup == memcg; +} + +extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); + +extern int +mem_cgroup_prepare_migration(struct page *page, + struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); +extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, + struct page *oldpage, struct page *newpage, bool migration_ok); + +struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, + struct mem_cgroup *, + struct mem_cgroup_reclaim_cookie *); +void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); + +/* + * For memory reclaim. + */ +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, + struct zone *zone); +int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, + struct zone *zone); +int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); +unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, + int nid, int zid, unsigned int lrumask); +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone); +struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page); +extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, + struct task_struct *p); +extern void mem_cgroup_replace_page_cache(struct page *oldpage, + struct page *newpage); + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern int do_swap_account; +#endif + +static inline bool mem_cgroup_disabled(void) +{ + if (mem_cgroup_subsys.disabled) + return true; + return false; +} + +void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, + unsigned long *flags); + +extern atomic_t memcg_moving; + +static inline void mem_cgroup_begin_update_page_stat(struct page *page, + bool *locked, unsigned long *flags) +{ + if (mem_cgroup_disabled()) + return; + rcu_read_lock(); + *locked = false; + if (atomic_read(&memcg_moving)) + __mem_cgroup_begin_update_page_stat(page, locked, flags); +} + +void __mem_cgroup_end_update_page_stat(struct page *page, + unsigned long *flags); +static inline void mem_cgroup_end_update_page_stat(struct page *page, + bool *locked, unsigned long *flags) +{ + if (mem_cgroup_disabled()) + return; + if (*locked) + __mem_cgroup_end_update_page_stat(page, flags); + rcu_read_unlock(); +} + +void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx, + int val); + +static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ + mem_cgroup_update_page_stat(page, idx, 1); +} + +static inline void mem_cgroup_dec_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ + mem_cgroup_update_page_stat(page, idx, -1); +} + +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); +u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); + +void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void mem_cgroup_split_huge_fixup(struct page *head); +#endif + +#ifdef CONFIG_DEBUG_VM +bool mem_cgroup_bad_page_check(struct page *page); +void mem_cgroup_print_bad_page(struct page *page); +#endif +#else /* CONFIG_CGROUP_MEM_RES_CTLR */ +struct mem_cgroup; + +static inline int mem_cgroup_newpage_charge(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) +{ + return 0; +} + +static inline int mem_cgroup_cache_charge(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) +{ + return 0; +} + +static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) +{ + return 0; +} + +static inline void mem_cgroup_commit_charge_swapin(struct page *page, + struct mem_cgroup *memcg) +{ +} + +static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) +{ +} + +static inline void mem_cgroup_uncharge_start(void) +{ +} + +static inline void mem_cgroup_uncharge_end(void) +{ +} + +static inline void mem_cgroup_uncharge_page(struct page *page) +{ +} + +static inline void mem_cgroup_uncharge_cache_page(struct page *page) +{ +} + +static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, + struct mem_cgroup *memcg) +{ + return &zone->lruvec; +} + +static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, + struct page *page, + enum lru_list lru) +{ + return &zone->lruvec; +} + +static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) +{ +} + +static inline void mem_cgroup_lru_del(struct page *page) +{ +} + +static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, + struct page *page, + enum lru_list from, + enum lru_list to) +{ + return &zone->lruvec; +} + +static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) +{ + return NULL; +} + +static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) +{ + return NULL; +} + +static inline int mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) +{ + return 1; +} + +static inline int task_in_mem_cgroup(struct task_struct *task, + const struct mem_cgroup *memcg) +{ + return 1; +} + +static inline struct cgroup_subsys_state + *mem_cgroup_css(struct mem_cgroup *memcg) +{ + return NULL; +} + +static inline int +mem_cgroup_prepare_migration(struct page *page, struct page *newpage, + struct mem_cgroup **memcgp, gfp_t gfp_mask) +{ + return 0; +} + +static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, + struct page *oldpage, struct page *newpage, bool migration_ok) +{ +} + +static inline struct mem_cgroup * +mem_cgroup_iter(struct mem_cgroup *root, + struct mem_cgroup *prev, + struct mem_cgroup_reclaim_cookie *reclaim) +{ + return NULL; +} + +static inline void mem_cgroup_iter_break(struct mem_cgroup *root, + struct mem_cgroup *prev) +{ +} + +static inline bool mem_cgroup_disabled(void) +{ + return true; +} + +static inline int +mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +{ + return 1; +} + +static inline int +mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) +{ + return 1; +} + +static inline unsigned long +mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, + unsigned int lru_mask) +{ + return 0; +} + + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) +{ + return NULL; +} + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + return NULL; +} + +static inline void +mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) +{ +} + +static inline void mem_cgroup_begin_update_page_stat(struct page *page, + bool *locked, unsigned long *flags) +{ +} + +static inline void mem_cgroup_end_update_page_stat(struct page *page, + bool *locked, unsigned long *flags) +{ +} + +static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ +} + +static inline void mem_cgroup_dec_page_stat(struct page *page, + enum mem_cgroup_page_stat_item idx) +{ +} + +static inline +unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) +{ + return 0; +} + +static inline +u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) +{ + return 0; +} + +static inline void mem_cgroup_split_huge_fixup(struct page *head) +{ +} + +static inline +void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) +{ +} +static inline void mem_cgroup_replace_page_cache(struct page *oldpage, + struct page *newpage) +{ +} +#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ + +#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) +static inline bool +mem_cgroup_bad_page_check(struct page *page) +{ + return false; +} + +static inline void +mem_cgroup_print_bad_page(struct page *page) +{ +} +#endif + +enum { + UNDER_LIMIT, + SOFT_LIMIT, + OVER_LIMIT, +}; + +struct sock; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM +void sock_update_memcg(struct sock *sk); +void sock_release_memcg(struct sock *sk); +#else +static inline void sock_update_memcg(struct sock *sk) +{ +} +static inline void sock_release_memcg(struct sock *sk) +{ +} +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ +#endif /* _LINUX_MEMCONTROL_H */ + diff --git a/include/linux/memory.h b/include/linux/memory.h new file mode 100644 index 00000000..1ac7f6e4 --- /dev/null +++ b/include/linux/memory.h @@ -0,0 +1,154 @@ +/* + * include/linux/memory.h - generic memory definition + * + * This is mainly for topological representation. We define the + * basic "struct memory_block" here, which can be embedded in per-arch + * definitions or NUMA information. + * + * Basic handling of the devices is done in drivers/base/memory.c + * and system devices are handled in drivers/base/sys.c. + * + * Memory block are exported via sysfs in the class/memory/devices/ + * directory. + * + */ +#ifndef _LINUX_MEMORY_H_ +#define _LINUX_MEMORY_H_ + +#include <linux/node.h> +#include <linux/compiler.h> +#include <linux/mutex.h> + +#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) + +struct memory_block { + unsigned long start_section_nr; + unsigned long end_section_nr; + unsigned long state; + int section_count; + + /* + * This serializes all state change requests. It isn't + * held during creation because the control files are + * created long after the critical areas during + * initialization. + */ + struct mutex state_mutex; + int phys_device; /* to which fru does this belong? */ + void *hw; /* optional pointer to fw/hw data */ + int (*phys_callback)(struct memory_block *); + struct device dev; +}; + +int arch_get_memory_phys_device(unsigned long start_pfn); + +/* These states are exposed to userspace as text strings in sysfs */ +#define MEM_ONLINE (1<<0) /* exposed to userspace */ +#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ +#define MEM_OFFLINE (1<<2) /* exposed to userspace */ +#define MEM_GOING_ONLINE (1<<3) +#define MEM_CANCEL_ONLINE (1<<4) +#define MEM_CANCEL_OFFLINE (1<<5) + +struct memory_notify { + unsigned long start_pfn; + unsigned long nr_pages; + int status_change_nid; +}; + +/* + * During pageblock isolation, count the number of pages within the + * range [start_pfn, start_pfn + nr_pages) which are owned by code + * in the notifier chain. + */ +#define MEM_ISOLATE_COUNT (1<<0) + +struct memory_isolate_notify { + unsigned long start_pfn; /* Start of range to check */ + unsigned int nr_pages; /* # pages in range to check */ + unsigned int pages_found; /* # pages owned found by callbacks */ +}; + +struct notifier_block; +struct mem_section; + +/* + * Priorities for the hotplug memory callback routines (stored in decreasing + * order in the callback chain) + */ +#define SLAB_CALLBACK_PRI 1 +#define IPC_CALLBACK_PRI 10 + +#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE +static inline int memory_dev_init(void) +{ + return 0; +} +static inline int register_memory_notifier(struct notifier_block *nb) +{ + return 0; +} +static inline void unregister_memory_notifier(struct notifier_block *nb) +{ +} +static inline int memory_notify(unsigned long val, void *v) +{ + return 0; +} +static inline int register_memory_isolate_notifier(struct notifier_block *nb) +{ + return 0; +} +static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) +{ +} +static inline int memory_isolate_notify(unsigned long val, void *v) +{ + return 0; +} +#else +extern int register_memory_notifier(struct notifier_block *nb); +extern void unregister_memory_notifier(struct notifier_block *nb); +extern int register_memory_isolate_notifier(struct notifier_block *nb); +extern void unregister_memory_isolate_notifier(struct notifier_block *nb); +extern int register_new_memory(int, struct mem_section *); +extern int unregister_memory_section(struct mem_section *); +extern int memory_dev_init(void); +extern int remove_memory_block(unsigned long, struct mem_section *, int); +extern int memory_notify(unsigned long val, void *v); +extern int memory_isolate_notify(unsigned long val, void *v); +extern struct memory_block *find_memory_block_hinted(struct mem_section *, + struct memory_block *); +extern struct memory_block *find_memory_block(struct mem_section *); +#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) +enum mem_add_context { BOOT, HOTPLUG }; +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +#ifdef CONFIG_MEMORY_HOTPLUG +#define hotplug_memory_notifier(fn, pri) { \ + static __meminitdata struct notifier_block fn##_mem_nb =\ + { .notifier_call = fn, .priority = pri }; \ + register_memory_notifier(&fn##_mem_nb); \ +} +#else +#define hotplug_memory_notifier(fn, pri) do { } while (0) +#endif + +/* + * 'struct memory_accessor' is a generic interface to provide + * in-kernel access to persistent memory such as i2c or SPI EEPROMs + */ +struct memory_accessor { + ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset, + size_t count); + ssize_t (*write)(struct memory_accessor *, const char *buf, + off_t offset, size_t count); +}; + +/* + * Kernel text modification mutex, used for code patching. Users of this lock + * can sleep. + */ +extern struct mutex text_mutex; + +#endif /* _LINUX_MEMORY_H_ */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h new file mode 100644 index 00000000..910550f3 --- /dev/null +++ b/include/linux/memory_hotplug.h @@ -0,0 +1,243 @@ +#ifndef __LINUX_MEMORY_HOTPLUG_H +#define __LINUX_MEMORY_HOTPLUG_H + +#include <linux/mmzone.h> +#include <linux/spinlock.h> +#include <linux/notifier.h> +#include <linux/bug.h> + +struct page; +struct zone; +struct pglist_data; +struct mem_section; + +#ifdef CONFIG_MEMORY_HOTPLUG + +/* + * Types for free bootmem stored in page->lru.next. These have to be in + * some random range in unsigned long space for debugging purposes. + */ +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; + +/* + * pgdat resizing functions + */ +static inline +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_lock_irqsave(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_init(struct pglist_data *pgdat) +{ + spin_lock_init(&pgdat->node_size_lock); +} +/* + * Zone resizing functions + */ +static inline unsigned zone_span_seqbegin(struct zone *zone) +{ + return read_seqbegin(&zone->span_seqlock); +} +static inline int zone_span_seqretry(struct zone *zone, unsigned iv) +{ + return read_seqretry(&zone->span_seqlock, iv); +} +static inline void zone_span_writelock(struct zone *zone) +{ + write_seqlock(&zone->span_seqlock); +} +static inline void zone_span_writeunlock(struct zone *zone) +{ + write_sequnlock(&zone->span_seqlock); +} +static inline void zone_seqlock_init(struct zone *zone) +{ + seqlock_init(&zone->span_seqlock); +} +extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); +extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); +extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); +/* VM interface that may be used by firmware interface */ +extern int online_pages(unsigned long, unsigned long); +extern void __offline_isolated_pages(unsigned long, unsigned long); + +typedef void (*online_page_callback_t)(struct page *page); + +extern int set_online_page_callback(online_page_callback_t callback); +extern int restore_online_page_callback(online_page_callback_t callback); + +extern void __online_page_set_limits(struct page *page); +extern void __online_page_increment_counters(struct page *page); +extern void __online_page_free(struct page *page); + +#ifdef CONFIG_MEMORY_HOTREMOVE +extern bool is_pageblock_removable_nolock(struct page *page); +#endif /* CONFIG_MEMORY_HOTREMOVE */ + +/* reasonably generic interface to expand the physical pages in a zone */ +extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages); +extern int __remove_pages(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages); + +#ifdef CONFIG_NUMA +extern int memory_add_physaddr_to_nid(u64 start); +#else +static inline int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +#endif + +#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION +/* + * For supporting node-hotadd, we have to allocate a new pgdat. + * + * If an arch has generic style NODE_DATA(), + * node_data[nid] = kzalloc() works well. But it depends on the architecture. + * + * In general, generic_alloc_nodedata() is used. + * Now, arch_free_nodedata() is just defined for error path of node_hot_add. + * + */ +extern pg_data_t *arch_alloc_nodedata(int nid); +extern void arch_free_nodedata(pg_data_t *pgdat); +extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); + +#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) +#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) + +#ifdef CONFIG_NUMA +/* + * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. + * XXX: kmalloc_node() can't work well to get new node's memory at this time. + * Because, pgdat for the new node is not allocated/initialized yet itself. + * To use new node's memory, more consideration will be necessary. + */ +#define generic_alloc_nodedata(nid) \ +({ \ + kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ +}) +/* + * This definition is just for error path in node hotadd. + * For node hotremove, we have to replace this. + */ +#define generic_free_nodedata(pgdat) kfree(pgdat) + +extern pg_data_t *node_data[]; +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} + +#else /* !CONFIG_NUMA */ + +/* never called */ +static inline pg_data_t *generic_alloc_nodedata(int nid) +{ + BUG(); + return NULL; +} +static inline void generic_free_nodedata(pg_data_t *pgdat) +{ +} +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ +} +#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} +static inline void put_page_bootmem(struct page *page) +{ +} +#else +extern void register_page_bootmem_info_node(struct pglist_data *pgdat); +extern void put_page_bootmem(struct page *page); +#endif + +/* + * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug + * notifier will be called under this. 2) offline/online/add/remove memory + * will not run simultaneously. + */ + +void lock_memory_hotplug(void); +void unlock_memory_hotplug(void); + +#else /* ! CONFIG_MEMORY_HOTPLUG */ +/* + * Stub functions for when hotplug is off + */ +static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_init(struct pglist_data *pgdat) {} + +static inline unsigned zone_span_seqbegin(struct zone *zone) +{ + return 0; +} +static inline int zone_span_seqretry(struct zone *zone, unsigned iv) +{ + return 0; +} +static inline void zone_span_writelock(struct zone *zone) {} +static inline void zone_span_writeunlock(struct zone *zone) {} +static inline void zone_seqlock_init(struct zone *zone) {} + +static inline int mhp_notimplemented(const char *func) +{ + printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); + dump_stack(); + return -ENOSYS; +} + +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} + +static inline void lock_memory_hotplug(void) {} +static inline void unlock_memory_hotplug(void) {} + +#endif /* ! CONFIG_MEMORY_HOTPLUG */ + +#ifdef CONFIG_MEMORY_HOTREMOVE + +extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); + +#else +static inline int is_mem_section_removable(unsigned long pfn, + unsigned long nr_pages) +{ + return 0; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + +extern int mem_online_node(int nid); +extern int add_memory(int nid, u64 start, u64 size); +extern int arch_add_memory(int nid, u64 start, u64 size); +extern int remove_memory(u64 start, u64 size); +extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, + int nr_pages); +extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); +extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, + unsigned long pnum); + +#endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h new file mode 100644 index 00000000..7c727a90 --- /dev/null +++ b/include/linux/mempolicy.h @@ -0,0 +1,385 @@ +#ifndef _LINUX_MEMPOLICY_H +#define _LINUX_MEMPOLICY_H 1 + +#include <linux/errno.h> + +/* + * NUMA memory policies for Linux. + * Copyright 2003,2004 Andi Kleen SuSE Labs + */ + +/* + * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are + * passed by the user to either set_mempolicy() or mbind() in an 'int' actual. + * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags. + */ + +/* Policies */ +enum { + MPOL_DEFAULT, + MPOL_PREFERRED, + MPOL_BIND, + MPOL_INTERLEAVE, + MPOL_MAX, /* always last member of enum */ +}; + +enum mpol_rebind_step { + MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */ + MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */ + MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/ + MPOL_REBIND_NSTEP, +}; + +/* Flags for set_mempolicy */ +#define MPOL_F_STATIC_NODES (1 << 15) +#define MPOL_F_RELATIVE_NODES (1 << 14) + +/* + * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to + * either set_mempolicy() or mbind(). + */ +#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) + +/* Flags for get_mempolicy */ +#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ +#define MPOL_F_ADDR (1<<1) /* look up vma using address */ +#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ + +/* Flags for mbind */ +#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ +#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ +#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ +#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ + +/* + * Internal flags that share the struct mempolicy flags word with + * "mode flags". These flags are allocated from bit 0 up, as they + * are never OR'ed into the mode in mempolicy API arguments. + */ +#define MPOL_F_SHARED (1 << 0) /* identify shared policies */ +#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ +#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ + +#ifdef __KERNEL__ + +#include <linux/mmzone.h> +#include <linux/slab.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/nodemask.h> +#include <linux/pagemap.h> + +struct mm_struct; + +#ifdef CONFIG_NUMA + +/* + * Describe a memory policy. + * + * A mempolicy can be either associated with a process or with a VMA. + * For VMA related allocations the VMA policy is preferred, otherwise + * the process policy is used. Interrupts ignore the memory policy + * of the current process. + * + * Locking policy for interlave: + * In process context there is no locking because only the process accesses + * its own state. All vma manipulation is somewhat protected by a down_read on + * mmap_sem. + * + * Freeing policy: + * Mempolicy objects are reference counted. A mempolicy will be freed when + * mpol_put() decrements the reference count to zero. + * + * Duplicating policy objects: + * mpol_dup() allocates a new mempolicy and copies the specified mempolicy + * to the new storage. The reference count of the new object is initialized + * to 1, representing the caller of mpol_dup(). + */ +struct mempolicy { + atomic_t refcnt; + unsigned short mode; /* See MPOL_* above */ + unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ + union { + short preferred_node; /* preferred */ + nodemask_t nodes; /* interleave/bind */ + /* undefined for default */ + } v; + union { + nodemask_t cpuset_mems_allowed; /* relative to these nodes */ + nodemask_t user_nodemask; /* nodemask passed by user */ + } w; +}; + +/* + * Support for managing mempolicy data objects (clone, copy, destroy) + * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. + */ + +extern void __mpol_put(struct mempolicy *pol); +static inline void mpol_put(struct mempolicy *pol) +{ + if (pol) + __mpol_put(pol); +} + +/* + * Does mempolicy pol need explicit unref after use? + * Currently only needed for shared policies. + */ +static inline int mpol_needs_cond_ref(struct mempolicy *pol) +{ + return (pol && (pol->flags & MPOL_F_SHARED)); +} + +static inline void mpol_cond_put(struct mempolicy *pol) +{ + if (mpol_needs_cond_ref(pol)) + __mpol_put(pol); +} + +extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, + struct mempolicy *frompol); +static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, + struct mempolicy *frompol) +{ + if (!frompol) + return frompol; + return __mpol_cond_copy(tompol, frompol); +} + +extern struct mempolicy *__mpol_dup(struct mempolicy *pol); +static inline struct mempolicy *mpol_dup(struct mempolicy *pol) +{ + if (pol) + pol = __mpol_dup(pol); + return pol; +} + +#define vma_policy(vma) ((vma)->vm_policy) +#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) + +static inline void mpol_get(struct mempolicy *pol) +{ + if (pol) + atomic_inc(&pol->refcnt); +} + +extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); +static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) +{ + if (a == b) + return true; + return __mpol_equal(a, b); +} + +/* + * Tree of shared policies for a shared memory region. + * Maintain the policies in a pseudo mm that contains vmas. The vmas + * carry the policy. As a special twist the pseudo mm is indexed in pages, not + * bytes, so that we can work with shared memory segments bigger than + * unsigned long. + */ + +struct sp_node { + struct rb_node nd; + unsigned long start, end; + struct mempolicy *policy; +}; + +struct shared_policy { + struct rb_root root; + spinlock_t lock; +}; + +void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); +int mpol_set_shared_policy(struct shared_policy *info, + struct vm_area_struct *vma, + struct mempolicy *new); +void mpol_free_shared_policy(struct shared_policy *p); +struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, + unsigned long idx); + +struct mempolicy *get_vma_policy(struct task_struct *tsk, + struct vm_area_struct *vma, unsigned long addr); + +extern void numa_default_policy(void); +extern void numa_policy_init(void); +extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, + enum mpol_rebind_step step); +extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); +extern void mpol_fix_fork_child_flag(struct task_struct *p); + +extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask); +extern bool init_nodemask_of_mempolicy(nodemask_t *mask); +extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, + const nodemask_t *mask); +extern unsigned slab_node(struct mempolicy *policy); + +extern enum zone_type policy_zone; + +static inline void check_highest_zone(enum zone_type k) +{ + if (k > policy_zone && k != ZONE_MOVABLE) + policy_zone = k; +} + +int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); + + +#ifdef CONFIG_TMPFS +extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); +#endif + +extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, + int no_context); + +/* Check if a vma is migratable */ +static inline int vma_migratable(struct vm_area_struct *vma) +{ + if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) + return 0; + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return 0; + return 1; +} + +#else + +struct mempolicy {}; + +static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) +{ + return true; +} + +static inline void mpol_put(struct mempolicy *p) +{ +} + +static inline void mpol_cond_put(struct mempolicy *pol) +{ +} + +static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, + struct mempolicy *from) +{ + return from; +} + +static inline void mpol_get(struct mempolicy *pol) +{ +} + +static inline struct mempolicy *mpol_dup(struct mempolicy *old) +{ + return NULL; +} + +struct shared_policy {}; + +static inline int mpol_set_shared_policy(struct shared_policy *info, + struct vm_area_struct *vma, + struct mempolicy *new) +{ + return -EINVAL; +} + +static inline void mpol_shared_policy_init(struct shared_policy *sp, + struct mempolicy *mpol) +{ +} + +static inline void mpol_free_shared_policy(struct shared_policy *p) +{ +} + +static inline struct mempolicy * +mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +{ + return NULL; +} + +#define vma_policy(vma) NULL +#define vma_set_policy(vma, pol) do {} while(0) + +static inline void numa_policy_init(void) +{ +} + +static inline void numa_default_policy(void) +{ +} + +static inline void mpol_rebind_task(struct task_struct *tsk, + const nodemask_t *new, + enum mpol_rebind_step step) +{ +} + +static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) +{ +} + +static inline void mpol_fix_fork_child_flag(struct task_struct *p) +{ +} + +static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask) +{ + *mpol = NULL; + *nodemask = NULL; + return node_zonelist(0, gfp_flags); +} + +static inline bool init_nodemask_of_mempolicy(nodemask_t *m) +{ + return false; +} + +static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, + const nodemask_t *mask) +{ + return false; +} + +static inline int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, + const nodemask_t *to_nodes, int flags) +{ + return 0; +} + +static inline void check_highest_zone(int k) +{ +} + +#ifdef CONFIG_TMPFS +static inline int mpol_parse_str(char *str, struct mempolicy **mpol, + int no_context) +{ + return 1; /* error */ +} +#endif + +static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, + int no_context) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/mempool.h b/include/linux/mempool.h new file mode 100644 index 00000000..7c08052e --- /dev/null +++ b/include/linux/mempool.h @@ -0,0 +1,73 @@ +/* + * memory buffer pool support + */ +#ifndef _LINUX_MEMPOOL_H +#define _LINUX_MEMPOOL_H + +#include <linux/wait.h> + +struct kmem_cache; + +typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); +typedef void (mempool_free_t)(void *element, void *pool_data); + +typedef struct mempool_s { + spinlock_t lock; + int min_nr; /* nr of elements at *elements */ + int curr_nr; /* Current nr of elements at *elements */ + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +} mempool_t; + +extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, int nid); + +extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); +extern void mempool_destroy(mempool_t *pool); +extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); +extern void mempool_free(void *element, mempool_t *pool); + +/* + * A mempool_alloc_t and mempool_free_t that get the memory from + * a slab that is passed in through pool_data. + */ +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); +static inline mempool_t * +mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) +{ + return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, + (void *) kc); +} + +/* + * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the + * amount of memory specified by pool_data + */ +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); +void mempool_kfree(void *element, void *pool_data); +static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) +{ + return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, + (void *) size); +} + +/* + * A mempool_alloc_t and mempool_free_t for a simple page allocator that + * allocates pages of the order specified by pool_data + */ +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); +void mempool_free_pages(void *element, void *pool_data); +static inline mempool_t *mempool_create_page_pool(int min_nr, int order) +{ + return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, + (void *)(long)order); +} + +#endif /* _LINUX_MEMPOOL_H */ diff --git a/include/linux/memstick.h b/include/linux/memstick.h new file mode 100644 index 00000000..690c35a9 --- /dev/null +++ b/include/linux/memstick.h @@ -0,0 +1,347 @@ +/* + * Sony MemoryStick support + * + * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _MEMSTICK_H +#define _MEMSTICK_H + +#include <linux/workqueue.h> +#include <linux/scatterlist.h> +#include <linux/device.h> + +/*** Hardware based structures ***/ + +struct ms_status_register { + unsigned char reserved; + unsigned char interrupt; +#define MEMSTICK_INT_CMDNAK 0x01 +#define MEMSTICK_INT_IOREQ 0x08 +#define MEMSTICK_INT_IOBREQ 0x10 +#define MEMSTICK_INT_BREQ 0x20 +#define MEMSTICK_INT_ERR 0x40 +#define MEMSTICK_INT_CED 0x80 + + unsigned char status0; +#define MEMSTICK_STATUS0_WP 0x01 +#define MEMSTICK_STATUS0_SL 0x02 +#define MEMSTICK_STATUS0_BF 0x10 +#define MEMSTICK_STATUS0_BE 0x20 +#define MEMSTICK_STATUS0_FB0 0x40 +#define MEMSTICK_STATUS0_MB 0x80 + + unsigned char status1; +#define MEMSTICK_STATUS1_UCFG 0x01 +#define MEMSTICK_STATUS1_FGER 0x02 +#define MEMSTICK_STATUS1_UCEX 0x04 +#define MEMSTICK_STATUS1_EXER 0x08 +#define MEMSTICK_STATUS1_UCDT 0x10 +#define MEMSTICK_STATUS1_DTER 0x20 +#define MEMSTICK_STATUS1_FB1 0x40 +#define MEMSTICK_STATUS1_MB 0x80 +} __attribute__((packed)); + +struct ms_id_register { + unsigned char type; + unsigned char if_mode; + unsigned char category; + unsigned char class; +} __attribute__((packed)); + +struct ms_param_register { + unsigned char system; +#define MEMSTICK_SYS_PAM 0x08 +#define MEMSTICK_SYS_BAMD 0x80 + + unsigned char block_address_msb; + unsigned short block_address; + unsigned char cp; +#define MEMSTICK_CP_BLOCK 0x00 +#define MEMSTICK_CP_PAGE 0x20 +#define MEMSTICK_CP_EXTRA 0x40 +#define MEMSTICK_CP_OVERWRITE 0x80 + + unsigned char page_address; +} __attribute__((packed)); + +struct ms_extra_data_register { + unsigned char overwrite_flag; +#define MEMSTICK_OVERWRITE_UDST 0x10 +#define MEMSTICK_OVERWRITE_PGST1 0x20 +#define MEMSTICK_OVERWRITE_PGST0 0x40 +#define MEMSTICK_OVERWRITE_BKST 0x80 + + unsigned char management_flag; +#define MEMSTICK_MANAGEMENT_SYSFLG 0x04 +#define MEMSTICK_MANAGEMENT_ATFLG 0x08 +#define MEMSTICK_MANAGEMENT_SCMS1 0x10 +#define MEMSTICK_MANAGEMENT_SCMS0 0x20 + + unsigned short logical_address; +} __attribute__((packed)); + +struct ms_register { + struct ms_status_register status; + struct ms_id_register id; + unsigned char reserved[8]; + struct ms_param_register param; + struct ms_extra_data_register extra_data; +} __attribute__((packed)); + +struct mspro_param_register { + unsigned char system; +#define MEMSTICK_SYS_PAR4 0x00 +#define MEMSTICK_SYS_PAR8 0x40 +#define MEMSTICK_SYS_SERIAL 0x80 + + __be16 data_count; + __be32 data_address; + unsigned char tpc_param; +} __attribute__((packed)); + +struct mspro_io_info_register { + unsigned char version; + unsigned char io_category; + unsigned char current_req; + unsigned char card_opt_info; + unsigned char rdy_wait_time; +} __attribute__((packed)); + +struct mspro_io_func_register { + unsigned char func_enable; + unsigned char func_select; + unsigned char func_intmask; + unsigned char transfer_mode; +} __attribute__((packed)); + +struct mspro_io_cmd_register { + unsigned short tpc_param; + unsigned short data_count; + unsigned int data_address; +} __attribute__((packed)); + +struct mspro_register { + struct ms_status_register status; + struct ms_id_register id; + unsigned char reserved0[8]; + struct mspro_param_register param; + unsigned char reserved1[8]; + struct mspro_io_info_register io_info; + struct mspro_io_func_register io_func; + unsigned char reserved2[7]; + struct mspro_io_cmd_register io_cmd; + unsigned char io_int; + unsigned char io_int_func; +} __attribute__((packed)); + +struct ms_register_addr { + unsigned char r_offset; + unsigned char r_length; + unsigned char w_offset; + unsigned char w_length; +} __attribute__((packed)); + +enum memstick_tpc { + MS_TPC_READ_MG_STATUS = 0x01, + MS_TPC_READ_LONG_DATA = 0x02, + MS_TPC_READ_SHORT_DATA = 0x03, + MS_TPC_READ_MG_DATA = 0x03, + MS_TPC_READ_REG = 0x04, + MS_TPC_READ_QUAD_DATA = 0x05, + MS_TPC_READ_IO_DATA = 0x05, + MS_TPC_GET_INT = 0x07, + MS_TPC_SET_RW_REG_ADRS = 0x08, + MS_TPC_EX_SET_CMD = 0x09, + MS_TPC_WRITE_QUAD_DATA = 0x0a, + MS_TPC_WRITE_IO_DATA = 0x0a, + MS_TPC_WRITE_REG = 0x0b, + MS_TPC_WRITE_SHORT_DATA = 0x0c, + MS_TPC_WRITE_MG_DATA = 0x0c, + MS_TPC_WRITE_LONG_DATA = 0x0d, + MS_TPC_SET_CMD = 0x0e +}; + +enum memstick_command { + MS_CMD_BLOCK_END = 0x33, + MS_CMD_RESET = 0x3c, + MS_CMD_BLOCK_WRITE = 0x55, + MS_CMD_SLEEP = 0x5a, + MS_CMD_BLOCK_ERASE = 0x99, + MS_CMD_BLOCK_READ = 0xaa, + MS_CMD_CLEAR_BUF = 0xc3, + MS_CMD_FLASH_STOP = 0xcc, + MS_CMD_LOAD_ID = 0x60, + MS_CMD_CMP_ICV = 0x7f, + MSPRO_CMD_FORMAT = 0x10, + MSPRO_CMD_SLEEP = 0x11, + MSPRO_CMD_WAKEUP = 0x12, + MSPRO_CMD_READ_DATA = 0x20, + MSPRO_CMD_WRITE_DATA = 0x21, + MSPRO_CMD_READ_ATRB = 0x24, + MSPRO_CMD_STOP = 0x25, + MSPRO_CMD_ERASE = 0x26, + MSPRO_CMD_READ_QUAD = 0x27, + MSPRO_CMD_WRITE_QUAD = 0x28, + MSPRO_CMD_SET_IBD = 0x46, + MSPRO_CMD_GET_IBD = 0x47, + MSPRO_CMD_IN_IO_DATA = 0xb0, + MSPRO_CMD_OUT_IO_DATA = 0xb1, + MSPRO_CMD_READ_IO_ATRB = 0xb2, + MSPRO_CMD_IN_IO_FIFO = 0xb3, + MSPRO_CMD_OUT_IO_FIFO = 0xb4, + MSPRO_CMD_IN_IOM = 0xb5, + MSPRO_CMD_OUT_IOM = 0xb6, +}; + +/*** Driver structures and functions ***/ + +enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE }; + +#define MEMSTICK_POWER_OFF 0 +#define MEMSTICK_POWER_ON 1 + +#define MEMSTICK_SERIAL 0 +#define MEMSTICK_PAR4 1 +#define MEMSTICK_PAR8 2 + +struct memstick_host; +struct memstick_driver; + +struct memstick_device_id { + unsigned char match_flags; +#define MEMSTICK_MATCH_ALL 0x01 + + unsigned char type; +#define MEMSTICK_TYPE_LEGACY 0xff +#define MEMSTICK_TYPE_DUO 0x00 +#define MEMSTICK_TYPE_PRO 0x01 + + unsigned char category; +#define MEMSTICK_CATEGORY_STORAGE 0xff +#define MEMSTICK_CATEGORY_STORAGE_DUO 0x00 +#define MEMSTICK_CATEGORY_IO 0x01 +#define MEMSTICK_CATEGORY_IO_PRO 0x10 + + unsigned char class; +#define MEMSTICK_CLASS_FLASH 0xff +#define MEMSTICK_CLASS_DUO 0x00 +#define MEMSTICK_CLASS_ROM 0x01 +#define MEMSTICK_CLASS_RO 0x02 +#define MEMSTICK_CLASS_WP 0x03 +}; + +struct memstick_request { + unsigned char tpc; + unsigned char data_dir:1, + need_card_int:1, + long_data:1; + unsigned char int_reg; + int error; + union { + struct scatterlist sg; + struct { + unsigned char data_len; + unsigned char data[15]; + }; + }; +}; + +struct memstick_dev { + struct memstick_device_id id; + struct memstick_host *host; + struct ms_register_addr reg_addr; + struct completion mrq_complete; + struct memstick_request current_mrq; + + /* Check that media driver is still willing to operate the device. */ + int (*check)(struct memstick_dev *card); + /* Get next request from the media driver. */ + int (*next_request)(struct memstick_dev *card, + struct memstick_request **mrq); + /* Tell the media driver to stop doing things */ + void (*stop)(struct memstick_dev *card); + /* Allow the media driver to continue */ + void (*start)(struct memstick_dev *card); + + struct device dev; +}; + +struct memstick_host { + struct mutex lock; + unsigned int id; + unsigned int caps; +#define MEMSTICK_CAP_AUTO_GET_INT 1 +#define MEMSTICK_CAP_PAR4 2 +#define MEMSTICK_CAP_PAR8 4 + + struct work_struct media_checker; + struct device dev; + + struct memstick_dev *card; + unsigned int retries; + + /* Notify the host that some requests are pending. */ + void (*request)(struct memstick_host *host); + /* Set host IO parameters (power, clock, etc). */ + int (*set_param)(struct memstick_host *host, + enum memstick_param param, + int value); + unsigned long private[0] ____cacheline_aligned; +}; + +struct memstick_driver { + struct memstick_device_id *id_table; + int (*probe)(struct memstick_dev *card); + void (*remove)(struct memstick_dev *card); + int (*suspend)(struct memstick_dev *card, + pm_message_t state); + int (*resume)(struct memstick_dev *card); + + struct device_driver driver; +}; + +int memstick_register_driver(struct memstick_driver *drv); +void memstick_unregister_driver(struct memstick_driver *drv); + +struct memstick_host *memstick_alloc_host(unsigned int extra, + struct device *dev); + +int memstick_add_host(struct memstick_host *host); +void memstick_remove_host(struct memstick_host *host); +void memstick_free_host(struct memstick_host *host); +void memstick_detect_change(struct memstick_host *host); +void memstick_suspend_host(struct memstick_host *host); +void memstick_resume_host(struct memstick_host *host); + +void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc, + const struct scatterlist *sg); +void memstick_init_req(struct memstick_request *mrq, unsigned char tpc, + const void *buf, size_t length); +int memstick_next_req(struct memstick_host *host, + struct memstick_request **mrq); +void memstick_new_req(struct memstick_host *host); + +int memstick_set_rw_addr(struct memstick_dev *card); + +static inline void *memstick_priv(struct memstick_host *host) +{ + return (void *)host->private; +} + +static inline void *memstick_get_drvdata(struct memstick_dev *card) +{ + return dev_get_drvdata(&card->dev); +} + +static inline void memstick_set_drvdata(struct memstick_dev *card, void *data) +{ + dev_set_drvdata(&card->dev, data); +} + +#endif diff --git a/include/linux/meye.h b/include/linux/meye.h new file mode 100644 index 00000000..0dd49954 --- /dev/null +++ b/include/linux/meye.h @@ -0,0 +1,66 @@ +/* + * Motion Eye video4linux driver for Sony Vaio PictureBook + * + * Copyright (C) 2001-2003 Stelian Pop <stelian@popies.net> + * + * Copyright (C) 2001-2002 Alcôve <www.alcove.com> + * + * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> + * + * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. + * + * Some parts borrowed from various video4linux drivers, especially + * bttv-driver.c and zoran.c, see original files for credits. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _MEYE_H_ +#define _MEYE_H_ + +/****************************************************************************/ +/* Private API for handling mjpeg capture / playback. */ +/****************************************************************************/ + +struct meye_params { + unsigned char subsample; + unsigned char quality; + unsigned char sharpness; + unsigned char agc; + unsigned char picture; + unsigned char framerate; +}; + +/* query the extended parameters */ +#define MEYEIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct meye_params) +/* set the extended parameters */ +#define MEYEIOC_S_PARAMS _IOW ('v', BASE_VIDIOC_PRIVATE+1, struct meye_params) +/* queue a buffer for mjpeg capture */ +#define MEYEIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+2, int) +/* sync a previously queued mjpeg buffer */ +#define MEYEIOC_SYNC _IOWR('v', BASE_VIDIOC_PRIVATE+3, int) +/* get a still uncompressed snapshot */ +#define MEYEIOC_STILLCAPT _IO ('v', BASE_VIDIOC_PRIVATE+4) +/* get a jpeg compressed snapshot */ +#define MEYEIOC_STILLJCAPT _IOR ('v', BASE_VIDIOC_PRIVATE+5, int) + +/* V4L2 private controls */ +#define V4L2_CID_AGC V4L2_CID_PRIVATE_BASE +#define V4L2_CID_MEYE_SHARPNESS (V4L2_CID_PRIVATE_BASE + 1) +#define V4L2_CID_PICTURE (V4L2_CID_PRIVATE_BASE + 2) +#define V4L2_CID_JPEGQUAL (V4L2_CID_PRIVATE_BASE + 3) +#define V4L2_CID_FRAMERATE (V4L2_CID_PRIVATE_BASE + 4) + +#endif diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h new file mode 100644 index 00000000..84d071ad --- /dev/null +++ b/include/linux/mfd/88pm860x.h @@ -0,0 +1,414 @@ +/* + * Marvell 88PM860x Interface + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_88PM860X_H +#define __LINUX_MFD_88PM860X_H + +#include <linux/interrupt.h> + +#define MFD_NAME_SIZE (40) + +enum { + CHIP_INVALID = 0, + CHIP_PM8606, + CHIP_PM8607, + CHIP_MAX, +}; + +enum { + PM8606_ID_INVALID, + PM8606_ID_BACKLIGHT, + PM8606_ID_LED, + PM8606_ID_VIBRATOR, + PM8606_ID_TOUCH, + PM8606_ID_SOUND, + PM8606_ID_CHARGER, + PM8606_ID_MAX, +}; + +enum { + PM8606_BACKLIGHT1 = 0, + PM8606_BACKLIGHT2, + PM8606_BACKLIGHT3, +}; + +enum { + PM8606_LED1_RED = 0, + PM8606_LED1_GREEN, + PM8606_LED1_BLUE, + PM8606_LED2_RED, + PM8606_LED2_GREEN, + PM8606_LED2_BLUE, + PM8607_LED_VIBRATOR, +}; + + +/* 8606 Registers */ +#define PM8606_DCM_BOOST (0x00) +#define PM8606_PWM (0x01) + +/* Backlight Registers */ +#define PM8606_WLED1A (0x02) +#define PM8606_WLED1B (0x03) +#define PM8606_WLED2A (0x04) +#define PM8606_WLED2B (0x05) +#define PM8606_WLED3A (0x06) +#define PM8606_WLED3B (0x07) + +/* LED Registers */ +#define PM8606_RGB2A (0x08) +#define PM8606_RGB2B (0x09) +#define PM8606_RGB2C (0x0A) +#define PM8606_RGB2D (0x0B) +#define PM8606_RGB1A (0x0C) +#define PM8606_RGB1B (0x0D) +#define PM8606_RGB1C (0x0E) +#define PM8606_RGB1D (0x0F) + +#define PM8606_PREREGULATORA (0x10) +#define PM8606_PREREGULATORB (0x11) +#define PM8606_VIBRATORA (0x12) +#define PM8606_VIBRATORB (0x13) +#define PM8606_VCHG (0x14) +#define PM8606_VSYS (0x15) +#define PM8606_MISC (0x16) +#define PM8606_CHIP_ID (0x17) +#define PM8606_STATUS (0x18) +#define PM8606_FLAGS (0x19) +#define PM8606_PROTECTA (0x1A) +#define PM8606_PROTECTB (0x1B) +#define PM8606_PROTECTC (0x1C) + +/* Bit definitions of PM8606 registers */ +#define PM8606_DCM_500MA (0x0) /* current limit */ +#define PM8606_DCM_750MA (0x1) +#define PM8606_DCM_1000MA (0x2) +#define PM8606_DCM_1250MA (0x3) +#define PM8606_DCM_250MV (0x0 << 2) +#define PM8606_DCM_300MV (0x1 << 2) +#define PM8606_DCM_350MV (0x2 << 2) +#define PM8606_DCM_400MV (0x3 << 2) + +#define PM8606_PWM_31200HZ (0x0) +#define PM8606_PWM_15600HZ (0x1) +#define PM8606_PWM_7800HZ (0x2) +#define PM8606_PWM_3900HZ (0x3) +#define PM8606_PWM_1950HZ (0x4) +#define PM8606_PWM_976HZ (0x5) +#define PM8606_PWM_488HZ (0x6) +#define PM8606_PWM_244HZ (0x7) +#define PM8606_PWM_FREQ_MASK (0x7) + +#define PM8606_WLED_ON (1 << 0) +#define PM8606_WLED_CURRENT(x) ((x & 0x1F) << 1) + +#define PM8606_LED_CURRENT(x) (((x >> 2) & 0x07) << 5) + +#define PM8606_VSYS_EN (1 << 1) + +#define PM8606_MISC_OSC_EN (1 << 4) + +enum { + PM8607_ID_BUCK1 = 0, + PM8607_ID_BUCK2, + PM8607_ID_BUCK3, + + PM8607_ID_LDO1, + PM8607_ID_LDO2, + PM8607_ID_LDO3, + PM8607_ID_LDO4, + PM8607_ID_LDO5, + PM8607_ID_LDO6, + PM8607_ID_LDO7, + PM8607_ID_LDO8, + PM8607_ID_LDO9, + PM8607_ID_LDO10, + PM8607_ID_LDO11, + PM8607_ID_LDO12, + PM8607_ID_LDO13, + PM8607_ID_LDO14, + PM8607_ID_LDO15, + + PM8607_ID_RG_MAX, +}; + +/* 8607 chip ID is 0x40 or 0x50 */ +#define PM8607_VERSION_MASK (0xF0) /* 8607 chip ID mask */ + +/* Interrupt Registers */ +#define PM8607_STATUS_1 (0x01) +#define PM8607_STATUS_2 (0x02) +#define PM8607_INT_STATUS1 (0x03) +#define PM8607_INT_STATUS2 (0x04) +#define PM8607_INT_STATUS3 (0x05) +#define PM8607_INT_MASK_1 (0x06) +#define PM8607_INT_MASK_2 (0x07) +#define PM8607_INT_MASK_3 (0x08) + +/* Regulator Control Registers */ +#define PM8607_LDO1 (0x10) +#define PM8607_LDO2 (0x11) +#define PM8607_LDO3 (0x12) +#define PM8607_LDO4 (0x13) +#define PM8607_LDO5 (0x14) +#define PM8607_LDO6 (0x15) +#define PM8607_LDO7 (0x16) +#define PM8607_LDO8 (0x17) +#define PM8607_LDO9 (0x18) +#define PM8607_LDO10 (0x19) +#define PM8607_LDO12 (0x1A) +#define PM8607_LDO14 (0x1B) +#define PM8607_SLEEP_MODE1 (0x1C) +#define PM8607_SLEEP_MODE2 (0x1D) +#define PM8607_SLEEP_MODE3 (0x1E) +#define PM8607_SLEEP_MODE4 (0x1F) +#define PM8607_GO (0x20) +#define PM8607_SLEEP_BUCK1 (0x21) +#define PM8607_SLEEP_BUCK2 (0x22) +#define PM8607_SLEEP_BUCK3 (0x23) +#define PM8607_BUCK1 (0x24) +#define PM8607_BUCK2 (0x25) +#define PM8607_BUCK3 (0x26) +#define PM8607_BUCK_CONTROLS (0x27) +#define PM8607_SUPPLIES_EN11 (0x2B) +#define PM8607_SUPPLIES_EN12 (0x2C) +#define PM8607_GROUP1 (0x2D) +#define PM8607_GROUP2 (0x2E) +#define PM8607_GROUP3 (0x2F) +#define PM8607_GROUP4 (0x30) +#define PM8607_GROUP5 (0x31) +#define PM8607_GROUP6 (0x32) +#define PM8607_SUPPLIES_EN21 (0x33) +#define PM8607_SUPPLIES_EN22 (0x34) + +/* Vibrator Control Registers */ +#define PM8607_VIBRATOR_SET (0x28) +#define PM8607_VIBRATOR_PWM (0x29) + +/* GPADC Registers */ +#define PM8607_GP_BIAS1 (0x4F) +#define PM8607_MEAS_EN1 (0x50) +#define PM8607_MEAS_EN2 (0x51) +#define PM8607_MEAS_EN3 (0x52) +#define PM8607_MEAS_OFF_TIME1 (0x53) +#define PM8607_MEAS_OFF_TIME2 (0x54) +#define PM8607_TSI_PREBIAS (0x55) /* prebias time */ +#define PM8607_PD_PREBIAS (0x56) /* prebias time */ +#define PM8607_GPADC_MISC1 (0x57) + +/* RTC Control Registers */ +#define PM8607_RTC1 (0xA0) +#define PM8607_RTC_COUNTER1 (0xA1) +#define PM8607_RTC_COUNTER2 (0xA2) +#define PM8607_RTC_COUNTER3 (0xA3) +#define PM8607_RTC_COUNTER4 (0xA4) +#define PM8607_RTC_EXPIRE1 (0xA5) +#define PM8607_RTC_EXPIRE2 (0xA6) +#define PM8607_RTC_EXPIRE3 (0xA7) +#define PM8607_RTC_EXPIRE4 (0xA8) +#define PM8607_RTC_TRIM1 (0xA9) +#define PM8607_RTC_TRIM2 (0xAA) +#define PM8607_RTC_TRIM3 (0xAB) +#define PM8607_RTC_TRIM4 (0xAC) +#define PM8607_RTC_MISC1 (0xAD) +#define PM8607_RTC_MISC2 (0xAE) +#define PM8607_RTC_MISC3 (0xAF) + +/* Misc Registers */ +#define PM8607_CHIP_ID (0x00) +#define PM8607_B0_MISC1 (0x0C) +#define PM8607_LDO1 (0x10) +#define PM8607_DVC3 (0x26) +#define PM8607_A1_MISC1 (0x40) + +/* bit definitions of Status Query Interface */ +#define PM8607_STATUS_CC (1 << 3) +#define PM8607_STATUS_PEN (1 << 4) +#define PM8607_STATUS_HEADSET (1 << 5) +#define PM8607_STATUS_HOOK (1 << 6) +#define PM8607_STATUS_MICIN (1 << 7) +#define PM8607_STATUS_ONKEY (1 << 8) +#define PM8607_STATUS_EXTON (1 << 9) +#define PM8607_STATUS_CHG (1 << 10) +#define PM8607_STATUS_BAT (1 << 11) +#define PM8607_STATUS_VBUS (1 << 12) +#define PM8607_STATUS_OV (1 << 13) + +/* bit definitions of BUCK3 */ +#define PM8607_BUCK3_DOUBLE (1 << 6) + +/* bit definitions of Misc1 */ +#define PM8607_A1_MISC1_PI2C (1 << 0) +#define PM8607_B0_MISC1_INV_INT (1 << 0) +#define PM8607_B0_MISC1_INT_CLEAR (1 << 1) +#define PM8607_B0_MISC1_INT_MASK (1 << 2) +#define PM8607_B0_MISC1_PI2C (1 << 3) +#define PM8607_B0_MISC1_RESET (1 << 6) + +/* bits definitions of GPADC */ +#define PM8607_GPADC_EN (1 << 0) +#define PM8607_GPADC_PREBIAS_MASK (3 << 1) +#define PM8607_GPADC_SLOT_CYCLE_MASK (3 << 3) /* slow mode */ +#define PM8607_GPADC_OFF_SCALE_MASK (3 << 5) /* GP sleep mode */ +#define PM8607_GPADC_SW_CAL_MASK (1 << 7) + +#define PM8607_PD_PREBIAS_MASK (0x1F << 0) +#define PM8607_PD_PRECHG_MASK (7 << 5) + +#define PM8606_REF_GP_OSC_OFF 0 +#define PM8606_REF_GP_OSC_ON 1 +#define PM8606_REF_GP_OSC_UNKNOWN 2 + +/* Clients of reference group and 8MHz oscillator in 88PM8606 */ +enum pm8606_ref_gp_and_osc_clients { + REF_GP_NO_CLIENTS = 0, + WLED1_DUTY = (1<<0), /*PF 0x02.7:0*/ + WLED2_DUTY = (1<<1), /*PF 0x04.7:0*/ + WLED3_DUTY = (1<<2), /*PF 0x06.7:0*/ + RGB1_ENABLE = (1<<3), /*PF 0x07.1*/ + RGB2_ENABLE = (1<<4), /*PF 0x07.2*/ + LDO_VBR_EN = (1<<5), /*PF 0x12.0*/ + REF_GP_MAX_CLIENT = 0xFFFF +}; + +/* Interrupt Number in 88PM8607 */ +enum { + PM8607_IRQ_ONKEY, + PM8607_IRQ_EXTON, + PM8607_IRQ_CHG, + PM8607_IRQ_BAT, + PM8607_IRQ_RTC, + PM8607_IRQ_CC, + PM8607_IRQ_VBAT, + PM8607_IRQ_VCHG, + PM8607_IRQ_VSYS, + PM8607_IRQ_TINT, + PM8607_IRQ_GPADC0, + PM8607_IRQ_GPADC1, + PM8607_IRQ_GPADC2, + PM8607_IRQ_GPADC3, + PM8607_IRQ_AUDIO_SHORT, + PM8607_IRQ_PEN, + PM8607_IRQ_HEADSET, + PM8607_IRQ_HOOK, + PM8607_IRQ_MICIN, + PM8607_IRQ_CHG_FAIL, + PM8607_IRQ_CHG_DONE, + PM8607_IRQ_CHG_FAULT, +}; + +enum { + PM8607_CHIP_A0 = 0x40, + PM8607_CHIP_A1 = 0x41, + PM8607_CHIP_B0 = 0x48, +}; + +struct pm860x_chip { + struct device *dev; + struct mutex irq_lock; + struct mutex osc_lock; + struct i2c_client *client; + struct i2c_client *companion; /* companion chip client */ + struct regmap *regmap; + struct regmap *regmap_companion; + + int buck3_double; /* DVC ramp slope double */ + unsigned short companion_addr; + unsigned short osc_vote; + int id; + int irq_mode; + int irq_base; + int core_irq; + unsigned char chip_version; + unsigned char osc_status; + + unsigned int wakeup_flag; +}; + +enum { + GI2C_PORT = 0, + PI2C_PORT, +}; + +struct pm860x_backlight_pdata { + int id; + int pwm; + int iset; + unsigned long flags; +}; + +struct pm860x_led_pdata { + int id; + int iset; + unsigned long flags; +}; + +struct pm860x_rtc_pdata { + int (*sync)(unsigned int ticks); + int vrtc; +}; + +struct pm860x_touch_pdata { + int gpadc_prebias; + int slot_cycle; + int off_scale; + int sw_cal; + int tsi_prebias; /* time, slot */ + int pen_prebias; /* time, slot */ + int pen_prechg; /* time, slot */ + int res_x; /* resistor of Xplate */ + unsigned long flags; +}; + +struct pm860x_power_pdata { + unsigned fast_charge; /* charge current */ +}; + +struct pm860x_platform_data { + struct pm860x_backlight_pdata *backlight; + struct pm860x_led_pdata *led; + struct pm860x_rtc_pdata *rtc; + struct pm860x_touch_pdata *touch; + struct pm860x_power_pdata *power; + struct regulator_init_data *regulator; + + unsigned short companion_addr; /* I2C address of companion chip */ + int i2c_port; /* Controlled by GI2C or PI2C */ + int irq_mode; /* Clear interrupt by read/write(0/1) */ + int irq_base; /* IRQ base number of 88pm860x */ + int num_leds; + int num_backlights; + int num_regulators; +}; + +extern int pm8606_osc_enable(struct pm860x_chip *, unsigned short); +extern int pm8606_osc_disable(struct pm860x_chip *, unsigned short); + +extern int pm860x_reg_read(struct i2c_client *, int); +extern int pm860x_reg_write(struct i2c_client *, int, unsigned char); +extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *); +extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *); +extern int pm860x_set_bits(struct i2c_client *, int, unsigned char, + unsigned char); +extern int pm860x_page_reg_read(struct i2c_client *, int); +extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char); +extern int pm860x_page_bulk_read(struct i2c_client *, int, int, + unsigned char *); +extern int pm860x_page_bulk_write(struct i2c_client *, int, int, + unsigned char *); +extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char, + unsigned char); + +extern int pm860x_device_init(struct pm860x_chip *chip, + struct pm860x_platform_data *pdata) __devinit ; +extern void pm860x_device_exit(struct pm860x_chip *chip) __devexit ; + +#endif /* __LINUX_MFD_88PM860X_H */ diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h new file mode 100644 index 00000000..f7316c29 --- /dev/null +++ b/include/linux/mfd/aat2870.h @@ -0,0 +1,181 @@ +/* + * linux/include/linux/mfd/aat2870.h + * + * Copyright (c) 2011, NVIDIA Corporation. + * Author: Jin Park <jinyoungp@nvidia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_MFD_AAT2870_H +#define __LINUX_MFD_AAT2870_H + +#include <linux/debugfs.h> +#include <linux/i2c.h> + +/* Register offsets */ +#define AAT2870_BL_CH_EN 0x00 +#define AAT2870_BLM 0x01 +#define AAT2870_BLS 0x02 +#define AAT2870_BL1 0x03 +#define AAT2870_BL2 0x04 +#define AAT2870_BL3 0x05 +#define AAT2870_BL4 0x06 +#define AAT2870_BL5 0x07 +#define AAT2870_BL6 0x08 +#define AAT2870_BL7 0x09 +#define AAT2870_BL8 0x0A +#define AAT2870_FLR 0x0B +#define AAT2870_FM 0x0C +#define AAT2870_FS 0x0D +#define AAT2870_ALS_CFG0 0x0E +#define AAT2870_ALS_CFG1 0x0F +#define AAT2870_ALS_CFG2 0x10 +#define AAT2870_AMB 0x11 +#define AAT2870_ALS0 0x12 +#define AAT2870_ALS1 0x13 +#define AAT2870_ALS2 0x14 +#define AAT2870_ALS3 0x15 +#define AAT2870_ALS4 0x16 +#define AAT2870_ALS5 0x17 +#define AAT2870_ALS6 0x18 +#define AAT2870_ALS7 0x19 +#define AAT2870_ALS8 0x1A +#define AAT2870_ALS9 0x1B +#define AAT2870_ALSA 0x1C +#define AAT2870_ALSB 0x1D +#define AAT2870_ALSC 0x1E +#define AAT2870_ALSD 0x1F +#define AAT2870_ALSE 0x20 +#define AAT2870_ALSF 0x21 +#define AAT2870_SUB_SET 0x22 +#define AAT2870_SUB_CTRL 0x23 +#define AAT2870_LDO_AB 0x24 +#define AAT2870_LDO_CD 0x25 +#define AAT2870_LDO_EN 0x26 +#define AAT2870_REG_NUM 0x27 + +/* Device IDs */ +enum aat2870_id { + AAT2870_ID_BL, + AAT2870_ID_LDOA, + AAT2870_ID_LDOB, + AAT2870_ID_LDOC, + AAT2870_ID_LDOD +}; + +/* Backlight channels */ +#define AAT2870_BL_CH1 0x01 +#define AAT2870_BL_CH2 0x02 +#define AAT2870_BL_CH3 0x04 +#define AAT2870_BL_CH4 0x08 +#define AAT2870_BL_CH5 0x10 +#define AAT2870_BL_CH6 0x20 +#define AAT2870_BL_CH7 0x40 +#define AAT2870_BL_CH8 0x80 +#define AAT2870_BL_CH_ALL 0xFF + +/* Backlight current magnitude (mA) */ +enum aat2870_current { + AAT2870_CURRENT_0_45 = 1, + AAT2870_CURRENT_0_90, + AAT2870_CURRENT_1_80, + AAT2870_CURRENT_2_70, + AAT2870_CURRENT_3_60, + AAT2870_CURRENT_4_50, + AAT2870_CURRENT_5_40, + AAT2870_CURRENT_6_30, + AAT2870_CURRENT_7_20, + AAT2870_CURRENT_8_10, + AAT2870_CURRENT_9_00, + AAT2870_CURRENT_9_90, + AAT2870_CURRENT_10_8, + AAT2870_CURRENT_11_7, + AAT2870_CURRENT_12_6, + AAT2870_CURRENT_13_5, + AAT2870_CURRENT_14_4, + AAT2870_CURRENT_15_3, + AAT2870_CURRENT_16_2, + AAT2870_CURRENT_17_1, + AAT2870_CURRENT_18_0, + AAT2870_CURRENT_18_9, + AAT2870_CURRENT_19_8, + AAT2870_CURRENT_20_7, + AAT2870_CURRENT_21_6, + AAT2870_CURRENT_22_5, + AAT2870_CURRENT_23_4, + AAT2870_CURRENT_24_3, + AAT2870_CURRENT_25_2, + AAT2870_CURRENT_26_1, + AAT2870_CURRENT_27_0, + AAT2870_CURRENT_27_9 +}; + +struct aat2870_register { + bool readable; + bool writeable; + u8 value; +}; + +struct aat2870_data { + struct device *dev; + struct i2c_client *client; + + struct mutex io_lock; + struct aat2870_register *reg_cache; /* register cache */ + int en_pin; /* enable GPIO pin (if < 0, ignore this value) */ + bool is_enable; + + /* init and uninit for platform specified */ + int (*init)(struct aat2870_data *aat2870); + void (*uninit)(struct aat2870_data *aat2870); + + /* i2c io funcntions */ + int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val); + int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val); + int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val); + + /* for debugfs */ + struct dentry *dentry_root; + struct dentry *dentry_reg; +}; + +struct aat2870_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct aat2870_platform_data { + int en_pin; /* enable GPIO pin (if < 0, ignore this value) */ + + struct aat2870_subdev_info *subdevs; + int num_subdevs; + + /* init and uninit for platform specified */ + int (*init)(struct aat2870_data *aat2870); + void (*uninit)(struct aat2870_data *aat2870); +}; + +struct aat2870_bl_platform_data { + /* backlight channels, default is AAT2870_BL_CH_ALL */ + int channels; + /* backlight current magnitude, default is AAT2870_CURRENT_27_9 */ + int max_current; + /* maximum brightness, default is 255 */ + int max_brightness; +}; + +#endif /* __LINUX_MFD_AAT2870_H */ diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h new file mode 100644 index 00000000..ee96cd51 --- /dev/null +++ b/include/linux/mfd/abx500.h @@ -0,0 +1,461 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * AB3100 core access functions + * Author: Linus Walleij <linus.walleij@stericsson.com> + * + * ABX500 core access functions. + * The abx500 interface is used for the Analog Baseband chip + * ab3100, ab5500, and ab8500. + * + * Author: Mattias Wallin <mattias.wallin@stericsson.com> + * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> + * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> + * Author: Rickard Andersson <rickard.andersson@stericsson.com> + */ + +#include <linux/regulator/machine.h> + +struct device; + +#ifndef MFD_ABX500_H +#define MFD_ABX500_H + +#define AB3100_P1A 0xc0 +#define AB3100_P1B 0xc1 +#define AB3100_P1C 0xc2 +#define AB3100_P1D 0xc3 +#define AB3100_P1E 0xc4 +#define AB3100_P1F 0xc5 +#define AB3100_P1G 0xc6 +#define AB3100_R2A 0xc7 +#define AB3100_R2B 0xc8 +#define AB5500_1_0 0x20 +#define AB5500_1_1 0x21 +#define AB5500_2_0 0x24 + +/* + * AB3100, EVENTA1, A2 and A3 event register flags + * these are catenated into a single 32-bit flag in the code + * for event notification broadcasts. + */ +#define AB3100_EVENTA1_ONSWA (0x01<<16) +#define AB3100_EVENTA1_ONSWB (0x02<<16) +#define AB3100_EVENTA1_ONSWC (0x04<<16) +#define AB3100_EVENTA1_DCIO (0x08<<16) +#define AB3100_EVENTA1_OVER_TEMP (0x10<<16) +#define AB3100_EVENTA1_SIM_OFF (0x20<<16) +#define AB3100_EVENTA1_VBUS (0x40<<16) +#define AB3100_EVENTA1_VSET_USB (0x80<<16) + +#define AB3100_EVENTA2_READY_TX (0x01<<8) +#define AB3100_EVENTA2_READY_RX (0x02<<8) +#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8) +#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8) +#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8) +#define AB3100_EVENTA2_MIDR (0x20<<8) +#define AB3100_EVENTA2_BATTERY_REM (0x40<<8) +#define AB3100_EVENTA2_ALARM (0x80<<8) + +#define AB3100_EVENTA3_ADC_TRIG5 (0x01) +#define AB3100_EVENTA3_ADC_TRIG4 (0x02) +#define AB3100_EVENTA3_ADC_TRIG3 (0x04) +#define AB3100_EVENTA3_ADC_TRIG2 (0x08) +#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10) +#define AB3100_EVENTA3_ADC_TRIGVTX (0x20) +#define AB3100_EVENTA3_ADC_TRIG1 (0x40) +#define AB3100_EVENTA3_ADC_TRIG0 (0x80) + +/* AB3100, STR register flags */ +#define AB3100_STR_ONSWA (0x01) +#define AB3100_STR_ONSWB (0x02) +#define AB3100_STR_ONSWC (0x04) +#define AB3100_STR_DCIO (0x08) +#define AB3100_STR_BOOT_MODE (0x10) +#define AB3100_STR_SIM_OFF (0x20) +#define AB3100_STR_BATT_REMOVAL (0x40) +#define AB3100_STR_VBUS (0x80) + +/* + * AB3100 contains 8 regulators, one external regulator controller + * and a buck converter, further the LDO E and buck converter can + * have separate settings if they are in sleep mode, this is + * modeled as a separate regulator. + */ +#define AB3100_NUM_REGULATORS 10 + +/** + * struct ab3100 + * @access_mutex: lock out concurrent accesses to the AB3100 registers + * @dev: pointer to the containing device + * @i2c_client: I2C client for this chip + * @testreg_client: secondary client for test registers + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @event_subscribers: event subscribers are listed here + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + * + * This struct is PRIVATE and devices using it should NOT + * access ANY fields. It is used as a token for calling the + * AB3100 functions. + */ +struct ab3100 { + struct mutex access_mutex; + struct device *dev; + struct i2c_client *i2c_client; + struct i2c_client *testreg_client; + char chip_name[32]; + u8 chip_id; + struct blocking_notifier_head event_subscribers; + u8 startup_events[3]; + bool startup_events_read; +}; + +/** + * struct ab3100_platform_data + * Data supplied to initialize board connections to the AB3100 + * @reg_constraints: regulator constraints for target board + * the order of these constraints are: LDO A, C, D, E, + * F, G, H, K, EXT and BUCK. + * @reg_initvals: initial values for the regulator registers + * plus two sleep settings for LDO E and the BUCK converter. + * exactly AB3100_NUM_REGULATORS+2 values must be sent in. + * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK, + * BUCK sleep, LDO D. (LDO D need to be initialized last.) + * @external_voltage: voltage level of the external regulator. + */ +struct ab3100_platform_data { + struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS]; + u8 reg_initvals[AB3100_NUM_REGULATORS+2]; + int external_voltage; +}; + +int ab3100_event_register(struct ab3100 *ab3100, + struct notifier_block *nb); +int ab3100_event_unregister(struct ab3100 *ab3100, + struct notifier_block *nb); + +/** + * struct abx500_init_setting + * Initial value of the registers for driver to use during setup. + */ +struct abx500_init_settings { + u8 bank; + u8 reg; + u8 setting; +}; + +/* Battery driver related data */ +/* + * ADC for the battery thermistor. + * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined + * with a NTC resistor to both identify the battery and to measure its + * temperature. Different phone manufactures uses different techniques to both + * identify the battery and to read its temperature. + */ +enum abx500_adc_therm { + ABx500_ADC_THERM_BATCTRL, + ABx500_ADC_THERM_BATTEMP, +}; + +/** + * struct abx500_res_to_temp - defines one point in a temp to res curve. To + * be used in battery packs that combines the identification resistor with a + * NTC resistor. + * @temp: battery pack temperature in Celcius + * @resist: NTC resistor net total resistance + */ +struct abx500_res_to_temp { + int temp; + int resist; +}; + +/** + * struct abx500_v_to_cap - Table for translating voltage to capacity + * @voltage: Voltage in mV + * @capacity: Capacity in percent + */ +struct abx500_v_to_cap { + int voltage; + int capacity; +}; + +/* Forward declaration */ +struct abx500_fg; + +/** + * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds + * if not specified + * @recovery_sleep_timer: Time between measurements while recovering + * @recovery_total_time: Total recovery time + * @init_timer: Measurement interval during startup + * @init_discard_time: Time we discard voltage measurement at startup + * @init_total_time: Total init time during startup + * @high_curr_time: Time current has to be high to go to recovery + * @accu_charging: FG accumulation time while charging + * @accu_high_curr: FG accumulation time in high current mode + * @high_curr_threshold: High current threshold, in mA + * @lowbat_threshold: Low battery threshold, in mV + * @overbat_threshold: Over battery threshold, in mV + * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 + * Resolution in 50 mV step. + * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 + * Resolution in 50 mV step. + * @user_cap_limit Capacity reported from user must be within this + * limit to be considered as sane, in percentage + * points. + * @maint_thres This is the threshold where we stop reporting + * battery full while in maintenance, in per cent + */ +struct abx500_fg_parameters { + int recovery_sleep_timer; + int recovery_total_time; + int init_timer; + int init_discard_time; + int init_total_time; + int high_curr_time; + int accu_charging; + int accu_high_curr; + int high_curr_threshold; + int lowbat_threshold; + int overbat_threshold; + int battok_falling_th_sel0; + int battok_raising_th_sel1; + int user_cap_limit; + int maint_thres; +}; + +/** + * struct abx500_charger_maximization - struct used by the board config. + * @use_maxi: Enable maximization for this battery type + * @maxi_chg_curr: Maximum charger current allowed + * @maxi_wait_cycles: cycles to wait before setting charger current + * @charger_curr_step delta between two charger current settings (mA) + */ +struct abx500_maxim_parameters { + bool ena_maxi; + int chg_curr; + int wait_cycles; + int charger_curr_step; +}; + +/** + * struct abx500_battery_type - different batteries supported + * @name: battery technology + * @resis_high: battery upper resistance limit + * @resis_low: battery lower resistance limit + * @charge_full_design: Maximum battery capacity in mAh + * @nominal_voltage: Nominal voltage of the battery in mV + * @termination_vol: max voltage upto which battery can be charged + * @termination_curr battery charging termination current in mA + * @recharge_vol battery voltage limit that will trigger a new + * full charging cycle in the case where maintenan- + * -ce charging has been disabled + * @normal_cur_lvl: charger current in normal state in mA + * @normal_vol_lvl: charger voltage in normal state in mV + * @maint_a_cur_lvl: charger current in maintenance A state in mA + * @maint_a_vol_lvl: charger voltage in maintenance A state in mV + * @maint_a_chg_timer_h: charge time in maintenance A state + * @maint_b_cur_lvl: charger current in maintenance B state in mA + * @maint_b_vol_lvl: charger voltage in maintenance B state in mV + * @maint_b_chg_timer_h: charge time in maintenance B state + * @low_high_cur_lvl: charger current in temp low/high state in mA + * @low_high_vol_lvl: charger voltage in temp low/high state in mV' + * @battery_resistance: battery inner resistance in mOhm. + * @n_r_t_tbl_elements: number of elements in r_to_t_tbl + * @r_to_t_tbl: table containing resistance to temp points + * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl + * @v_to_cap_tbl: Voltage to capacity (in %) table + * @n_batres_tbl_elements number of elements in the batres_tbl + * @batres_tbl battery internal resistance vs temperature table + */ +struct abx500_battery_type { + int name; + int resis_high; + int resis_low; + int charge_full_design; + int nominal_voltage; + int termination_vol; + int termination_curr; + int recharge_vol; + int normal_cur_lvl; + int normal_vol_lvl; + int maint_a_cur_lvl; + int maint_a_vol_lvl; + int maint_a_chg_timer_h; + int maint_b_cur_lvl; + int maint_b_vol_lvl; + int maint_b_chg_timer_h; + int low_high_cur_lvl; + int low_high_vol_lvl; + int battery_resistance; + int n_temp_tbl_elements; + struct abx500_res_to_temp *r_to_t_tbl; + int n_v_cap_tbl_elements; + struct abx500_v_to_cap *v_to_cap_tbl; + int n_batres_tbl_elements; + struct batres_vs_temp *batres_tbl; +}; + +/** + * struct abx500_bm_capacity_levels - abx500 capacity level data + * @critical: critical capacity level in percent + * @low: low capacity level in percent + * @normal: normal capacity level in percent + * @high: high capacity level in percent + * @full: full capacity level in percent + */ +struct abx500_bm_capacity_levels { + int critical; + int low; + int normal; + int high; + int full; +}; + +/** + * struct abx500_bm_charger_parameters - Charger specific parameters + * @usb_volt_max: maximum allowed USB charger voltage in mV + * @usb_curr_max: maximum allowed USB charger current in mA + * @ac_volt_max: maximum allowed AC charger voltage in mV + * @ac_curr_max: maximum allowed AC charger current in mA + */ +struct abx500_bm_charger_parameters { + int usb_volt_max; + int usb_curr_max; + int ac_volt_max; + int ac_curr_max; +}; + +/** + * struct abx500_bm_data - abx500 battery management data + * @temp_under under this temp, charging is stopped + * @temp_low between this temp and temp_under charging is reduced + * @temp_high between this temp and temp_over charging is reduced + * @temp_over over this temp, charging is stopped + * @temp_now present battery temperature + * @temp_interval_chg temperature measurement interval in s when charging + * @temp_interval_nochg temperature measurement interval in s when not charging + * @main_safety_tmr_h safety timer for main charger + * @usb_safety_tmr_h safety timer for usb charger + * @bkup_bat_v voltage which we charge the backup battery with + * @bkup_bat_i current which we charge the backup battery with + * @no_maintenance indicates that maintenance charging is disabled + * @abx500_adc_therm placement of thermistor, batctrl or battemp adc + * @chg_unknown_bat flag to enable charging of unknown batteries + * @enable_overshoot flag to enable VBAT overshoot control + * @auto_trig flag to enable auto adc trigger + * @fg_res resistance of FG resistor in 0.1mOhm + * @n_btypes number of elements in array bat_type + * @batt_id index of the identified battery in array bat_type + * @interval_charging charge alg cycle period time when charging (sec) + * @interval_not_charging charge alg cycle period time when not charging (sec) + * @temp_hysteresis temperature hysteresis + * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) + * @maxi: maximization parameters + * @cap_levels capacity in percent for the different capacity levels + * @bat_type table of supported battery types + * @chg_params charger parameters + * @fg_params fuel gauge parameters + */ +struct abx500_bm_data { + int temp_under; + int temp_low; + int temp_high; + int temp_over; + int temp_now; + int temp_interval_chg; + int temp_interval_nochg; + int main_safety_tmr_h; + int usb_safety_tmr_h; + int bkup_bat_v; + int bkup_bat_i; + bool no_maintenance; + bool chg_unknown_bat; + bool enable_overshoot; + bool auto_trig; + enum abx500_adc_therm adc_therm; + int fg_res; + int n_btypes; + int batt_id; + int interval_charging; + int interval_not_charging; + int temp_hysteresis; + int gnd_lift_resistance; + const struct abx500_maxim_parameters *maxi; + const struct abx500_bm_capacity_levels *cap_levels; + const struct abx500_battery_type *bat_type; + const struct abx500_bm_charger_parameters *chg_params; + const struct abx500_fg_parameters *fg_params; +}; + +struct abx500_chargalg_platform_data { + char **supplied_to; + size_t num_supplicants; +}; + +struct abx500_charger_platform_data { + char **supplied_to; + size_t num_supplicants; + bool autopower_cfg; +}; + +struct abx500_btemp_platform_data { + char **supplied_to; + size_t num_supplicants; +}; + +struct abx500_fg_platform_data { + char **supplied_to; + size_t num_supplicants; +}; + +struct abx500_bm_plat_data { + struct abx500_bm_data *battery; + struct abx500_charger_platform_data *charger; + struct abx500_btemp_platform_data *btemp; + struct abx500_fg_platform_data *fg; + struct abx500_chargalg_platform_data *chargalg; +}; + +int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value); +int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value); +int abx500_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs); +int abx500_set_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs); +/** + * abx500_mask_and_set_register_inerruptible() - Modifies selected bits of a + * target register + * + * @dev: The AB sub device. + * @bank: The i2c bank number. + * @bitmask: The bit mask to use. + * @bitvalues: The new bit values. + * + * Updates the value of an AB register: + * value -> ((value & ~bitmask) | (bitvalues & bitmask)) + */ +int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues); +int abx500_get_chip_id(struct device *dev); +int abx500_event_registers_startup_state_get(struct device *dev, u8 *event); +int abx500_startup_irq_enabled(struct device *dev, unsigned int irq); + +struct abx500_ops { + int (*get_chip_id) (struct device *); + int (*get_register) (struct device *, u8, u8, u8 *); + int (*set_register) (struct device *, u8, u8, u8); + int (*get_register_page) (struct device *, u8, u8, u8 *, u8); + int (*set_register_page) (struct device *, u8, u8, u8 *, u8); + int (*mask_and_set_register) (struct device *, u8, u8, u8, u8); + int (*event_registers_startup_state_get) (struct device *, u8 *); + int (*startup_irq_enabled) (struct device *, unsigned int); +}; + +int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops); +void abx500_remove_ops(struct device *dev); +#endif diff --git a/include/linux/mfd/abx500/ab5500.h b/include/linux/mfd/abx500/ab5500.h new file mode 100644 index 00000000..54f820ed --- /dev/null +++ b/include/linux/mfd/abx500/ab5500.h @@ -0,0 +1,140 @@ +/* + * Copyright (C) ST-Ericsson 2011 + * + * License Terms: GNU General Public License v2 + */ +#ifndef MFD_AB5500_H +#define MFD_AB5500_H + +struct device; + +enum ab5500_devid { + AB5500_DEVID_ADC, + AB5500_DEVID_LEDS, + AB5500_DEVID_POWER, + AB5500_DEVID_REGULATORS, + AB5500_DEVID_SIM, + AB5500_DEVID_RTC, + AB5500_DEVID_CHARGER, + AB5500_DEVID_FUELGAUGE, + AB5500_DEVID_VIBRATOR, + AB5500_DEVID_CODEC, + AB5500_DEVID_USB, + AB5500_DEVID_OTP, + AB5500_DEVID_VIDEO, + AB5500_DEVID_DBIECI, + AB5500_DEVID_ONSWA, + AB5500_NUM_DEVICES, +}; + +enum ab5500_banks { + AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP = 0, + AB5500_BANK_VDDDIG_IO_I2C_CLK_TST = 1, + AB5500_BANK_VDENC = 2, + AB5500_BANK_SIM_USBSIM = 3, + AB5500_BANK_LED = 4, + AB5500_BANK_ADC = 5, + AB5500_BANK_RTC = 6, + AB5500_BANK_STARTUP = 7, + AB5500_BANK_DBI_ECI = 8, + AB5500_BANK_CHG = 9, + AB5500_BANK_FG_BATTCOM_ACC = 10, + AB5500_BANK_USB = 11, + AB5500_BANK_IT = 12, + AB5500_BANK_VIBRA = 13, + AB5500_BANK_AUDIO_HEADSETUSB = 14, + AB5500_NUM_BANKS = 15, +}; + +enum ab5500_banks_addr { + AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP = 0x4A, + AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST = 0x4B, + AB5500_ADDR_VDENC = 0x06, + AB5500_ADDR_SIM_USBSIM = 0x04, + AB5500_ADDR_LED = 0x10, + AB5500_ADDR_ADC = 0x0A, + AB5500_ADDR_RTC = 0x0F, + AB5500_ADDR_STARTUP = 0x03, + AB5500_ADDR_DBI_ECI = 0x07, + AB5500_ADDR_CHG = 0x0B, + AB5500_ADDR_FG_BATTCOM_ACC = 0x0C, + AB5500_ADDR_USB = 0x05, + AB5500_ADDR_IT = 0x0E, + AB5500_ADDR_VIBRA = 0x02, + AB5500_ADDR_AUDIO_HEADSETUSB = 0x0D, +}; + +/* + * Interrupt register offsets + * Bank : 0x0E + */ +#define AB5500_IT_SOURCE0_REG 0x20 +#define AB5500_IT_SOURCE1_REG 0x21 +#define AB5500_IT_SOURCE2_REG 0x22 +#define AB5500_IT_SOURCE3_REG 0x23 +#define AB5500_IT_SOURCE4_REG 0x24 +#define AB5500_IT_SOURCE5_REG 0x25 +#define AB5500_IT_SOURCE6_REG 0x26 +#define AB5500_IT_SOURCE7_REG 0x27 +#define AB5500_IT_SOURCE8_REG 0x28 +#define AB5500_IT_SOURCE9_REG 0x29 +#define AB5500_IT_SOURCE10_REG 0x2A +#define AB5500_IT_SOURCE11_REG 0x2B +#define AB5500_IT_SOURCE12_REG 0x2C +#define AB5500_IT_SOURCE13_REG 0x2D +#define AB5500_IT_SOURCE14_REG 0x2E +#define AB5500_IT_SOURCE15_REG 0x2F +#define AB5500_IT_SOURCE16_REG 0x30 +#define AB5500_IT_SOURCE17_REG 0x31 +#define AB5500_IT_SOURCE18_REG 0x32 +#define AB5500_IT_SOURCE19_REG 0x33 +#define AB5500_IT_SOURCE20_REG 0x34 +#define AB5500_IT_SOURCE21_REG 0x35 +#define AB5500_IT_SOURCE22_REG 0x36 +#define AB5500_IT_SOURCE23_REG 0x37 + +#define AB5500_NUM_IRQ_REGS 23 + +/** + * struct ab5500 + * @access_mutex: lock out concurrent accesses to the AB registers + * @dev: a pointer to the device struct for this chip driver + * @ab5500_irq: the analog baseband irq + * @irq_base: the platform configuration irq base for subdevices + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @irq_lock: a lock to protect the mask + * @abb_events: a local bit mask of the prcmu wakeup events + * @event_mask: a local copy of the mask event registers + * @last_event_mask: a copy of the last event_mask written to hardware + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + */ +struct ab5500 { + struct mutex access_mutex; + struct device *dev; + unsigned int ab5500_irq; + unsigned int irq_base; + char chip_name[32]; + u8 chip_id; + struct mutex irq_lock; + u32 abb_events; + u8 mask[AB5500_NUM_IRQ_REGS]; + u8 oldmask[AB5500_NUM_IRQ_REGS]; + u8 startup_events[AB5500_NUM_IRQ_REGS]; + bool startup_events_read; +#ifdef CONFIG_DEBUG_FS + unsigned int debug_bank; + unsigned int debug_address; +#endif +}; + +struct ab5500_platform_data { + struct {unsigned int base; unsigned int count; } irq; + void *dev_data[AB5500_NUM_DEVICES]; + struct abx500_init_settings *init_settings; + unsigned int init_settings_sz; + bool pm_power_off; +}; + +#endif /* MFD_AB5500_H */ diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h new file mode 100644 index 00000000..44310c98 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -0,0 +1,474 @@ +/* + * Copyright ST-Ericsson 2012. + * + * Author: Arun Murthy <arun.murthy@stericsson.com> + * Licensed under GPLv2. + */ + +#ifndef _AB8500_BM_H +#define _AB8500_BM_H + +#include <linux/kernel.h> +#include <linux/mfd/abx500.h> + +/* + * System control 2 register offsets. + * bank = 0x02 + */ +#define AB8500_MAIN_WDOG_CTRL_REG 0x01 +#define AB8500_LOW_BAT_REG 0x03 +#define AB8500_BATT_OK_REG 0x04 +/* + * USB/ULPI register offsets + * Bank : 0x5 + */ +#define AB8500_USB_LINE_STAT_REG 0x80 + +/* + * Charger / status register offfsets + * Bank : 0x0B + */ +#define AB8500_CH_STATUS1_REG 0x00 +#define AB8500_CH_STATUS2_REG 0x01 +#define AB8500_CH_USBCH_STAT1_REG 0x02 +#define AB8500_CH_USBCH_STAT2_REG 0x03 +#define AB8500_CH_FSM_STAT_REG 0x04 +#define AB8500_CH_STAT_REG 0x05 + +/* + * Charger / control register offfsets + * Bank : 0x0B + */ +#define AB8500_CH_VOLT_LVL_REG 0x40 +#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/ +#define AB8500_CH_OPT_CRNTLVL_REG 0x42 +#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/ +#define AB8500_CH_WD_TIMER_REG 0x50 +#define AB8500_CHARG_WD_CTRL 0x51 +#define AB8500_BTEMP_HIGH_TH 0x52 +#define AB8500_LED_INDICATOR_PWM_CTRL 0x53 +#define AB8500_LED_INDICATOR_PWM_DUTY 0x54 +#define AB8500_BATT_OVV 0x55 +#define AB8500_CHARGER_CTRL 0x56 +#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/ + +/* + * Charger / main control register offsets + * Bank : 0x0B + */ +#define AB8500_MCH_CTRL1 0x80 +#define AB8500_MCH_CTRL2 0x81 +#define AB8500_MCH_IPT_CURLVL_REG 0x82 +#define AB8500_CH_WD_REG 0x83 + +/* + * Charger / USB control register offsets + * Bank : 0x0B + */ +#define AB8500_USBCH_CTRL1_REG 0xC0 +#define AB8500_USBCH_CTRL2_REG 0xC1 +#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2 + +/* + * Gas Gauge register offsets + * Bank : 0x0C + */ +#define AB8500_GASG_CC_CTRL_REG 0x00 +#define AB8500_GASG_CC_ACCU1_REG 0x01 +#define AB8500_GASG_CC_ACCU2_REG 0x02 +#define AB8500_GASG_CC_ACCU3_REG 0x03 +#define AB8500_GASG_CC_ACCU4_REG 0x04 +#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05 +#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06 +#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07 +#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08 +#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09 +#define AB8500_GASG_CC_OFFSET_REG 0x0A +#define AB8500_GASG_CC_NCOV_ACCU 0x10 +#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11 +#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12 +#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13 +#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14 + +/* + * Interrupt register offsets + * Bank : 0x0E + */ +#define AB8500_IT_SOURCE2_REG 0x01 +#define AB8500_IT_SOURCE21_REG 0x14 + +/* + * RTC register offsets + * Bank: 0x0F + */ +#define AB8500_RTC_BACKUP_CHG_REG 0x0C +#define AB8500_RTC_CC_CONF_REG 0x01 +#define AB8500_RTC_CTRL_REG 0x0B + +/* + * OTP register offsets + * Bank : 0x15 + */ +#define AB8500_OTP_CONF_15 0x0E + +/* GPADC constants from AB8500 spec, UM0836 */ +#define ADC_RESOLUTION 1024 +#define ADC_CH_MAIN_MIN 0 +#define ADC_CH_MAIN_MAX 20030 +#define ADC_CH_VBUS_MIN 0 +#define ADC_CH_VBUS_MAX 20030 +#define ADC_CH_VBAT_MIN 2300 +#define ADC_CH_VBAT_MAX 4800 +#define ADC_CH_BKBAT_MIN 0 +#define ADC_CH_BKBAT_MAX 3200 + +/* Main charge i/p current */ +#define MAIN_CH_IP_CUR_0P9A 0x80 +#define MAIN_CH_IP_CUR_1P0A 0x90 +#define MAIN_CH_IP_CUR_1P1A 0xA0 +#define MAIN_CH_IP_CUR_1P2A 0xB0 +#define MAIN_CH_IP_CUR_1P3A 0xC0 +#define MAIN_CH_IP_CUR_1P4A 0xD0 +#define MAIN_CH_IP_CUR_1P5A 0xE0 + +/* ChVoltLevel */ +#define CH_VOL_LVL_3P5 0x00 +#define CH_VOL_LVL_4P0 0x14 +#define CH_VOL_LVL_4P05 0x16 +#define CH_VOL_LVL_4P1 0x1B +#define CH_VOL_LVL_4P15 0x20 +#define CH_VOL_LVL_4P2 0x25 +#define CH_VOL_LVL_4P6 0x4D + +/* ChOutputCurrentLevel */ +#define CH_OP_CUR_LVL_0P1 0x00 +#define CH_OP_CUR_LVL_0P2 0x01 +#define CH_OP_CUR_LVL_0P3 0x02 +#define CH_OP_CUR_LVL_0P4 0x03 +#define CH_OP_CUR_LVL_0P5 0x04 +#define CH_OP_CUR_LVL_0P6 0x05 +#define CH_OP_CUR_LVL_0P7 0x06 +#define CH_OP_CUR_LVL_0P8 0x07 +#define CH_OP_CUR_LVL_0P9 0x08 +#define CH_OP_CUR_LVL_1P4 0x0D +#define CH_OP_CUR_LVL_1P5 0x0E +#define CH_OP_CUR_LVL_1P6 0x0F + +/* BTEMP High thermal limits */ +#define BTEMP_HIGH_TH_57_0 0x00 +#define BTEMP_HIGH_TH_52 0x01 +#define BTEMP_HIGH_TH_57_1 0x02 +#define BTEMP_HIGH_TH_62 0x03 + +/* current is mA */ +#define USB_0P1A 100 +#define USB_0P2A 200 +#define USB_0P3A 300 +#define USB_0P4A 400 +#define USB_0P5A 500 + +#define LOW_BAT_3P1V 0x20 +#define LOW_BAT_2P3V 0x00 +#define LOW_BAT_RESET 0x01 +#define LOW_BAT_ENABLE 0x01 + +/* Backup battery constants */ +#define BUP_ICH_SEL_50UA 0x00 +#define BUP_ICH_SEL_150UA 0x04 +#define BUP_ICH_SEL_300UA 0x08 +#define BUP_ICH_SEL_700UA 0x0C + +#define BUP_VCH_SEL_2P5V 0x00 +#define BUP_VCH_SEL_2P6V 0x01 +#define BUP_VCH_SEL_2P8V 0x02 +#define BUP_VCH_SEL_3P1V 0x03 + +/* Battery OVV constants */ +#define BATT_OVV_ENA 0x02 +#define BATT_OVV_TH_3P7 0x00 +#define BATT_OVV_TH_4P75 0x01 + +/* A value to indicate over voltage */ +#define BATT_OVV_VALUE 4750 + +/* VBUS OVV constants */ +#define VBUS_OVV_SELECT_MASK 0x78 +#define VBUS_OVV_SELECT_5P6V 0x00 +#define VBUS_OVV_SELECT_5P7V 0x08 +#define VBUS_OVV_SELECT_5P8V 0x10 +#define VBUS_OVV_SELECT_5P9V 0x18 +#define VBUS_OVV_SELECT_6P0V 0x20 +#define VBUS_OVV_SELECT_6P1V 0x28 +#define VBUS_OVV_SELECT_6P2V 0x30 +#define VBUS_OVV_SELECT_6P3V 0x38 + +#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04 + +/* Fuel Gauge constants */ +#define RESET_ACCU 0x02 +#define READ_REQ 0x01 +#define CC_DEEP_SLEEP_ENA 0x02 +#define CC_PWR_UP_ENA 0x01 +#define CC_SAMPLES_40 0x28 +#define RD_NCONV_ACCU_REQ 0x01 +#define CC_CALIB 0x08 +#define CC_INTAVGOFFSET_ENA 0x10 +#define CC_MUXOFFSET 0x80 +#define CC_INT_CAL_N_AVG_MASK 0x60 +#define CC_INT_CAL_SAMPLES_16 0x40 +#define CC_INT_CAL_SAMPLES_8 0x20 +#define CC_INT_CAL_SAMPLES_4 0x00 + +/* RTC constants */ +#define RTC_BUP_CH_ENA 0x10 + +/* BatCtrl Current Source Constants */ +#define BAT_CTRL_7U_ENA 0x01 +#define BAT_CTRL_20U_ENA 0x02 +#define BAT_CTRL_CMP_ENA 0x04 +#define FORCE_BAT_CTRL_CMP_HIGH 0x08 +#define BAT_CTRL_PULL_UP_ENA 0x10 + +/* Battery type */ +#define BATTERY_UNKNOWN 00 + +/** + * struct res_to_temp - defines one point in a temp to res curve. To + * be used in battery packs that combines the identification resistor with a + * NTC resistor. + * @temp: battery pack temperature in Celcius + * @resist: NTC resistor net total resistance + */ +struct res_to_temp { + int temp; + int resist; +}; + +/** + * struct batres_vs_temp - defines one point in a temp vs battery internal + * resistance curve. + * @temp: battery pack temperature in Celcius + * @resist: battery internal reistance in mOhm + */ +struct batres_vs_temp { + int temp; + int resist; +}; + +/* Forward declaration */ +struct ab8500_fg; + +/** + * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds + * if not specified + * @recovery_sleep_timer: Time between measurements while recovering + * @recovery_total_time: Total recovery time + * @init_timer: Measurement interval during startup + * @init_discard_time: Time we discard voltage measurement at startup + * @init_total_time: Total init time during startup + * @high_curr_time: Time current has to be high to go to recovery + * @accu_charging: FG accumulation time while charging + * @accu_high_curr: FG accumulation time in high current mode + * @high_curr_threshold: High current threshold, in mA + * @lowbat_threshold: Low battery threshold, in mV + * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 + * Resolution in 50 mV step. + * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 + * Resolution in 50 mV step. + * @user_cap_limit Capacity reported from user must be within this + * limit to be considered as sane, in percentage + * points. + * @maint_thres This is the threshold where we stop reporting + * battery full while in maintenance, in per cent + */ +struct ab8500_fg_parameters { + int recovery_sleep_timer; + int recovery_total_time; + int init_timer; + int init_discard_time; + int init_total_time; + int high_curr_time; + int accu_charging; + int accu_high_curr; + int high_curr_threshold; + int lowbat_threshold; + int battok_falling_th_sel0; + int battok_raising_th_sel1; + int user_cap_limit; + int maint_thres; +}; + +/** + * struct ab8500_charger_maximization - struct used by the board config. + * @use_maxi: Enable maximization for this battery type + * @maxi_chg_curr: Maximum charger current allowed + * @maxi_wait_cycles: cycles to wait before setting charger current + * @charger_curr_step delta between two charger current settings (mA) + */ +struct ab8500_maxim_parameters { + bool ena_maxi; + int chg_curr; + int wait_cycles; + int charger_curr_step; +}; + +/** + * struct ab8500_bm_capacity_levels - ab8500 capacity level data + * @critical: critical capacity level in percent + * @low: low capacity level in percent + * @normal: normal capacity level in percent + * @high: high capacity level in percent + * @full: full capacity level in percent + */ +struct ab8500_bm_capacity_levels { + int critical; + int low; + int normal; + int high; + int full; +}; + +/** + * struct ab8500_bm_charger_parameters - Charger specific parameters + * @usb_volt_max: maximum allowed USB charger voltage in mV + * @usb_curr_max: maximum allowed USB charger current in mA + * @ac_volt_max: maximum allowed AC charger voltage in mV + * @ac_curr_max: maximum allowed AC charger current in mA + */ +struct ab8500_bm_charger_parameters { + int usb_volt_max; + int usb_curr_max; + int ac_volt_max; + int ac_curr_max; +}; + +/** + * struct ab8500_bm_data - ab8500 battery management data + * @temp_under under this temp, charging is stopped + * @temp_low between this temp and temp_under charging is reduced + * @temp_high between this temp and temp_over charging is reduced + * @temp_over over this temp, charging is stopped + * @temp_interval_chg temperature measurement interval in s when charging + * @temp_interval_nochg temperature measurement interval in s when not charging + * @main_safety_tmr_h safety timer for main charger + * @usb_safety_tmr_h safety timer for usb charger + * @bkup_bat_v voltage which we charge the backup battery with + * @bkup_bat_i current which we charge the backup battery with + * @no_maintenance indicates that maintenance charging is disabled + * @adc_therm placement of thermistor, batctrl or battemp adc + * @chg_unknown_bat flag to enable charging of unknown batteries + * @enable_overshoot flag to enable VBAT overshoot control + * @fg_res resistance of FG resistor in 0.1mOhm + * @n_btypes number of elements in array bat_type + * @batt_id index of the identified battery in array bat_type + * @interval_charging charge alg cycle period time when charging (sec) + * @interval_not_charging charge alg cycle period time when not charging (sec) + * @temp_hysteresis temperature hysteresis + * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) + * @maxi: maximization parameters + * @cap_levels capacity in percent for the different capacity levels + * @bat_type table of supported battery types + * @chg_params charger parameters + * @fg_params fuel gauge parameters + */ +struct ab8500_bm_data { + int temp_under; + int temp_low; + int temp_high; + int temp_over; + int temp_interval_chg; + int temp_interval_nochg; + int main_safety_tmr_h; + int usb_safety_tmr_h; + int bkup_bat_v; + int bkup_bat_i; + bool no_maintenance; + bool chg_unknown_bat; + bool enable_overshoot; + enum abx500_adc_therm adc_therm; + int fg_res; + int n_btypes; + int batt_id; + int interval_charging; + int interval_not_charging; + int temp_hysteresis; + int gnd_lift_resistance; + const struct ab8500_maxim_parameters *maxi; + const struct ab8500_bm_capacity_levels *cap_levels; + const struct ab8500_bm_charger_parameters *chg_params; + const struct ab8500_fg_parameters *fg_params; +}; + +struct ab8500_charger_platform_data { + char **supplied_to; + size_t num_supplicants; + bool autopower_cfg; +}; + +struct ab8500_btemp_platform_data { + char **supplied_to; + size_t num_supplicants; +}; + +struct ab8500_fg_platform_data { + char **supplied_to; + size_t num_supplicants; +}; + +struct ab8500_chargalg_platform_data { + char **supplied_to; + size_t num_supplicants; +}; +struct ab8500_btemp; +struct ab8500_gpadc; +struct ab8500_fg; +#ifdef CONFIG_AB8500_BM +void ab8500_fg_reinit(void); +void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); +struct ab8500_btemp *ab8500_btemp_get(void); +int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp); +struct ab8500_fg *ab8500_fg_get(void); +int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev); +int ab8500_fg_inst_curr_start(struct ab8500_fg *di); +int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res); +int ab8500_fg_inst_curr_done(struct ab8500_fg *di); + +#else +int ab8500_fg_inst_curr_done(struct ab8500_fg *di) +{ +} +static void ab8500_fg_reinit(void) +{ +} +static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA) +{ +} +static struct ab8500_btemp *ab8500_btemp_get(void) +{ + return NULL; +} +static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp) +{ + return 0; +} +struct ab8500_fg *ab8500_fg_get(void) +{ + return NULL; +} +static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev) +{ + return -ENODEV; +} + +static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di) +{ + return -ENODEV; +} + +static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res) +{ + return -ENODEV; +} + +#endif +#endif /* _AB8500_BM_H */ diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h new file mode 100644 index 00000000..25296676 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-gpadc.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2010 ST-Ericsson SA + * Licensed under GPLv2. + * + * Author: Arun R Murthy <arun.murthy@stericsson.com> + * Author: Daniel Willerud <daniel.willerud@stericsson.com> + */ + +#ifndef _AB8500_GPADC_H +#define _AB8500_GPADC_H + +/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2) */ +#define BAT_CTRL 0x01 +#define BTEMP_BALL 0x02 +#define MAIN_CHARGER_V 0x03 +#define ACC_DETECT1 0x04 +#define ACC_DETECT2 0x05 +#define ADC_AUX1 0x06 +#define ADC_AUX2 0x07 +#define MAIN_BAT_V 0x08 +#define VBUS_V 0x09 +#define MAIN_CHARGER_C 0x0A +#define USB_CHARGER_C 0x0B +#define BK_BAT_V 0x0C +#define DIE_TEMP 0x0D + +struct ab8500_gpadc; + +struct ab8500_gpadc *ab8500_gpadc_get(char *name); +int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel); +int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel); +int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, + u8 channel, int ad_value); + +#endif /* _AB8500_GPADC_H */ diff --git a/include/linux/mfd/abx500/ab8500-gpio.h b/include/linux/mfd/abx500/ab8500-gpio.h new file mode 100644 index 00000000..2387c207 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-gpio.h @@ -0,0 +1,23 @@ +/* + * Copyright ST-Ericsson 2010. + * + * Author: Bibek Basu <bibek.basu@stericsson.com> + * Licensed under GPLv2. + */ + +#ifndef _AB8500_GPIO_H +#define _AB8500_GPIO_H + +/* + * Platform data to register a block: only the initial gpio/irq number. + * Array sizes are large enough to contain all AB8500 and AB9540 GPIO + * registers. + */ + +struct ab8500_gpio_platform_data { + int gpio_base; + u32 irq_base; + u8 config_reg[8]; +}; + +#endif /* _AB8500_GPIO_H */ diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h new file mode 100644 index 00000000..10eb5097 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h @@ -0,0 +1,297 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson. + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __AB8500_SYSCTRL_H +#define __AB8500_SYSCTRL_H + +#include <linux/bitops.h> + +#ifdef CONFIG_AB8500_CORE + +int ab8500_sysctrl_read(u16 reg, u8 *value); +int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); + +#else + +static inline int ab8500_sysctrl_read(u16 reg, u8 *value) +{ + return 0; +} + +static inline int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value) +{ + return 0; +} + +#endif /* CONFIG_AB8500_CORE */ + +static inline int ab8500_sysctrl_set(u16 reg, u8 bits) +{ + return ab8500_sysctrl_write(reg, bits, bits); +} + +static inline int ab8500_sysctrl_clear(u16 reg, u8 bits) +{ + return ab8500_sysctrl_write(reg, bits, 0); +} + +/* Registers */ +#define AB8500_TURNONSTATUS 0x100 +#define AB8500_RESETSTATUS 0x101 +#define AB8500_PONKEY1PRESSSTATUS 0x102 +#define AB8500_SYSCLKREQSTATUS 0x142 +#define AB8500_STW4500CTRL1 0x180 +#define AB8500_STW4500CTRL2 0x181 +#define AB8500_STW4500CTRL3 0x200 +#define AB8500_MAINWDOGCTRL 0x201 +#define AB8500_MAINWDOGTIMER 0x202 +#define AB8500_LOWBAT 0x203 +#define AB8500_BATTOK 0x204 +#define AB8500_SYSCLKTIMER 0x205 +#define AB8500_SMPSCLKCTRL 0x206 +#define AB8500_SMPSCLKSEL1 0x207 +#define AB8500_SMPSCLKSEL2 0x208 +#define AB8500_SMPSCLKSEL3 0x209 +#define AB8500_SYSULPCLKCONF 0x20A +#define AB8500_SYSULPCLKCTRL1 0x20B +#define AB8500_SYSCLKCTRL 0x20C +#define AB8500_SYSCLKREQ1VALID 0x20D +#define AB8500_SYSTEMCTRLSUP 0x20F +#define AB8500_SYSCLKREQ1RFCLKBUF 0x210 +#define AB8500_SYSCLKREQ2RFCLKBUF 0x211 +#define AB8500_SYSCLKREQ3RFCLKBUF 0x212 +#define AB8500_SYSCLKREQ4RFCLKBUF 0x213 +#define AB8500_SYSCLKREQ5RFCLKBUF 0x214 +#define AB8500_SYSCLKREQ6RFCLKBUF 0x215 +#define AB8500_SYSCLKREQ7RFCLKBUF 0x216 +#define AB8500_SYSCLKREQ8RFCLKBUF 0x217 +#define AB8500_DITHERCLKCTRL 0x220 +#define AB8500_SWATCTRL 0x230 +#define AB8500_HIQCLKCTRL 0x232 +#define AB8500_VSIMSYSCLKCTRL 0x233 +#define AB9540_SYSCLK12BUFCTRL 0x234 +#define AB9540_SYSCLK12CONFCTRL 0x235 +#define AB9540_SYSCLK12BUFCTRL2 0x236 +#define AB9540_SYSCLK12BUF1VALID 0x237 +#define AB9540_SYSCLK12BUF2VALID 0x238 +#define AB9540_SYSCLK12BUF3VALID 0x239 +#define AB9540_SYSCLK12BUF4VALID 0x23A + +/* Bits */ +#define AB8500_TURNONSTATUS_PORNVBAT BIT(0) +#define AB8500_TURNONSTATUS_PONKEY1DBF BIT(1) +#define AB8500_TURNONSTATUS_PONKEY2DBF BIT(2) +#define AB8500_TURNONSTATUS_RTCALARM BIT(3) +#define AB8500_TURNONSTATUS_MAINCHDET BIT(4) +#define AB8500_TURNONSTATUS_VBUSDET BIT(5) +#define AB8500_TURNONSTATUS_USBIDDETECT BIT(6) + +#define AB8500_RESETSTATUS_RESETN4500NSTATUS BIT(0) +#define AB8500_RESETSTATUS_SWRESETN4500NSTATUS BIT(2) + +#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_MASK 0x7F +#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_SHIFT 0 + +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ1STATUS BIT(0) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ2STATUS BIT(1) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ3STATUS BIT(2) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ4STATUS BIT(3) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ5STATUS BIT(4) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ6STATUS BIT(5) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ7STATUS BIT(6) +#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ8STATUS BIT(7) + +#define AB8500_STW4500CTRL1_SWOFF BIT(0) +#define AB8500_STW4500CTRL1_SWRESET4500N BIT(1) +#define AB8500_STW4500CTRL1_THDB8500SWOFF BIT(2) + +#define AB8500_STW4500CTRL2_RESETNVAUX1VALID BIT(0) +#define AB8500_STW4500CTRL2_RESETNVAUX2VALID BIT(1) +#define AB8500_STW4500CTRL2_RESETNVAUX3VALID BIT(2) +#define AB8500_STW4500CTRL2_RESETNVMODVALID BIT(3) +#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY1VALID BIT(4) +#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY2VALID BIT(5) +#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY3VALID BIT(6) +#define AB8500_STW4500CTRL2_RESETNVSMPS1VALID BIT(7) + +#define AB8500_STW4500CTRL3_CLK32KOUT2DIS BIT(0) +#define AB8500_STW4500CTRL3_RESETAUDN BIT(1) +#define AB8500_STW4500CTRL3_RESETDENCN BIT(2) +#define AB8500_STW4500CTRL3_THSDENA BIT(3) + +#define AB8500_MAINWDOGCTRL_MAINWDOGENA BIT(0) +#define AB8500_MAINWDOGCTRL_MAINWDOGKICK BIT(1) +#define AB8500_MAINWDOGCTRL_WDEXPTURNONVALID BIT(4) + +#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_MASK 0x7F +#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_SHIFT 0 + +#define AB8500_LOWBAT_LOWBATENA BIT(0) +#define AB8500_LOWBAT_LOWBAT_MASK 0x7E +#define AB8500_LOWBAT_LOWBAT_SHIFT 1 + +#define AB8500_BATTOK_BATTOKSEL0THF_MASK 0x0F +#define AB8500_BATTOK_BATTOKSEL0THF_SHIFT 0 +#define AB8500_BATTOK_BATTOKSEL1THF_MASK 0xF0 +#define AB8500_BATTOK_BATTOKSEL1THF_SHIFT 4 + +#define AB8500_SYSCLKTIMER_SYSCLKTIMER_MASK 0x0F +#define AB8500_SYSCLKTIMER_SYSCLKTIMER_SHIFT 0 +#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_MASK 0xF0 +#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_SHIFT 4 + +#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_MASK 0x03 +#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_SHIFT 0 +#define AB8500_SMPSCLKCTRL_3M2CLKINTENA BIT(2) + +#define AB8500_SMPSCLKSEL1_VARMCLKSEL_MASK 0x07 +#define AB8500_SMPSCLKSEL1_VARMCLKSEL_SHIFT 0 +#define AB8500_SMPSCLKSEL1_VAPECLKSEL_MASK 0x38 +#define AB8500_SMPSCLKSEL1_VAPECLKSEL_SHIFT 3 + +#define AB8500_SMPSCLKSEL2_VMODCLKSEL_MASK 0x07 +#define AB8500_SMPSCLKSEL2_VMODCLKSEL_SHIFT 0 +#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_MASK 0x38 +#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_SHIFT 3 + +#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_MASK 0x07 +#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_SHIFT 0 +#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_MASK 0x38 +#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_SHIFT 3 + +#define AB8500_SYSULPCLKCONF_ULPCLKCONF_MASK 0x03 +#define AB8500_SYSULPCLKCONF_ULPCLKCONF_SHIFT 0 +#define AB8500_SYSULPCLKCONF_CLK27MHZSTRE BIT(2) +#define AB8500_SYSULPCLKCONF_TVOUTCLKDELN BIT(3) +#define AB8500_SYSULPCLKCONF_TVOUTCLKINV BIT(4) +#define AB8500_SYSULPCLKCONF_ULPCLKSTRE BIT(5) +#define AB8500_SYSULPCLKCONF_CLK27MHZBUFENA BIT(6) +#define AB8500_SYSULPCLKCONF_CLK27MHZPDENA BIT(7) + +#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK 0x03 +#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT 0 +#define AB8500_SYSULPCLKCTRL1_ULPCLKREQ BIT(2) +#define AB8500_SYSULPCLKCTRL1_4500SYSCLKREQ BIT(3) +#define AB8500_SYSULPCLKCTRL1_AUDIOCLKENA BIT(4) +#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ BIT(5) +#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ BIT(6) +#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ BIT(7) + +#define AB8500_SYSCLKCTRL_TVOUTPLLENA BIT(0) +#define AB8500_SYSCLKCTRL_TVOUTCLKENA BIT(1) +#define AB8500_SYSCLKCTRL_USBCLKENA BIT(2) + +#define AB8500_SYSCLKREQ1VALID_SYSCLKREQ1VALID BIT(0) +#define AB8500_SYSCLKREQ1VALID_ULPCLKREQ1VALID BIT(1) +#define AB8500_SYSCLKREQ1VALID_USBSYSCLKREQ1VALID BIT(2) + +#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_MASK 0x03 +#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_SHIFT 0 +#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_MASK 0x0C +#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_SHIFT 2 +#define AB8500_SYSTEMCTRLSUP_INTDB8500NOD BIT(4) + +#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF4 BIT(4) + +#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF2 BIT(2) +#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF3 BIT(3) +#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF4 BIT(4) + +#define AB8500_DITHERCLKCTRL_VARMDITHERENA BIT(0) +#define AB8500_DITHERCLKCTRL_VSMPS3DITHERENA BIT(1) +#define AB8500_DITHERCLKCTRL_VSMPS1DITHERENA BIT(2) +#define AB8500_DITHERCLKCTRL_VSMPS2DITHERENA BIT(3) +#define AB8500_DITHERCLKCTRL_VMODDITHERENA BIT(4) +#define AB8500_DITHERCLKCTRL_VAPEDITHERENA BIT(5) +#define AB8500_DITHERCLKCTRL_DITHERDEL_MASK 0xC0 +#define AB8500_DITHERCLKCTRL_DITHERDEL_SHIFT 6 + +#define AB8500_SWATCTRL_UPDATERF BIT(0) +#define AB8500_SWATCTRL_SWATENABLE BIT(1) +#define AB8500_SWATCTRL_RFOFFTIMER_MASK 0x1C +#define AB8500_SWATCTRL_RFOFFTIMER_SHIFT 2 +#define AB8500_SWATCTRL_SWATBIT5 BIT(6) + +#define AB8500_HIQCLKCTRL_SYSCLKREQ1HIQENAVALID BIT(0) +#define AB8500_HIQCLKCTRL_SYSCLKREQ2HIQENAVALID BIT(1) +#define AB8500_HIQCLKCTRL_SYSCLKREQ3HIQENAVALID BIT(2) +#define AB8500_HIQCLKCTRL_SYSCLKREQ4HIQENAVALID BIT(3) +#define AB8500_HIQCLKCTRL_SYSCLKREQ5HIQENAVALID BIT(4) +#define AB8500_HIQCLKCTRL_SYSCLKREQ6HIQENAVALID BIT(5) +#define AB8500_HIQCLKCTRL_SYSCLKREQ7HIQENAVALID BIT(6) +#define AB8500_HIQCLKCTRL_SYSCLKREQ8HIQENAVALID BIT(7) + +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ1VALID BIT(0) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ2VALID BIT(1) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ3VALID BIT(2) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ4VALID BIT(3) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ5VALID BIT(4) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ6VALID BIT(5) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ7VALID BIT(6) +#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ8VALID BIT(7) + +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF1ENA BIT(0) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF2ENA BIT(1) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF3ENA BIT(2) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF4ENA BIT(3) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUFENA_MASK 0x0F +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF1STRE BIT(4) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF2STRE BIT(5) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF3STRE BIT(6) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF4STRE BIT(7) +#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUFSTRE_MASK 0xF0 + +#define AB9540_SYSCLK12CONFCTRL_PLL26TO38ENA BIT(0) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK12USBMUXSEL BIT(1) +#define AB9540_SYSCLK12CONFCTRL_INT384MHZMUXSEL_MASK 0x0C +#define AB9540_SYSCLK12CONFCTRL_INT384MHZMUXSEL_SHIFT 2 +#define AB9540_SYSCLK12CONFCTRL_SYSCLK12BUFMUX BIT(4) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK12PLLMUX BIT(5) +#define AB9540_SYSCLK12CONFCTRL_SYSCLK2MUXVALID BIT(6) + +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF1PDENA BIT(0) +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF2PDENA BIT(1) +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF3PDENA BIT(2) +#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF4PDENA BIT(3) + +#define AB9540_SYSCLK12BUF1VALID_SYSCLK12BUF1VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF1VALID_SYSCLK12BUF1VALID_SHIFT 0 + +#define AB9540_SYSCLK12BUF2VALID_SYSCLK12BUF2VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF2VALID_SYSCLK12BUF2VALID_SHIFT 0 + +#define AB9540_SYSCLK12BUF3VALID_SYSCLK12BUF3VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF3VALID_SYSCLK12BUF3VALID_SHIFT 0 + +#define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_MASK 0xFF +#define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_SHIFT 0 + +#endif /* __AB8500_SYSCTRL_H */ diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h new file mode 100644 index 00000000..fccc3002 --- /dev/null +++ b/include/linux/mfd/abx500/ab8500.h @@ -0,0 +1,327 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> + */ +#ifndef MFD_AB8500_H +#define MFD_AB8500_H + +#include <linux/mutex.h> + +struct device; + +/* + * AB IC versions + * + * AB8500_VERSION_AB8500 should be 0xFF but will never be read as need a + * non-supported multi-byte I2C access via PRCMU. Set to 0x00 to ease the + * print of version string. + */ +enum ab8500_version { + AB8500_VERSION_AB8500 = 0x0, + AB8500_VERSION_AB8505 = 0x1, + AB8500_VERSION_AB9540 = 0x2, + AB8500_VERSION_AB8540 = 0x3, + AB8500_VERSION_UNDEFINED, +}; + +/* AB8500 CIDs*/ +#define AB8500_CUTEARLY 0x00 +#define AB8500_CUT1P0 0x10 +#define AB8500_CUT1P1 0x11 +#define AB8500_CUT2P0 0x20 +#define AB8500_CUT3P0 0x30 +#define AB8500_CUT3P3 0x33 + +/* + * AB8500 bank addresses + */ +#define AB8500_SYS_CTRL1_BLOCK 0x1 +#define AB8500_SYS_CTRL2_BLOCK 0x2 +#define AB8500_REGU_CTRL1 0x3 +#define AB8500_REGU_CTRL2 0x4 +#define AB8500_USB 0x5 +#define AB8500_TVOUT 0x6 +#define AB8500_DBI 0x7 +#define AB8500_ECI_AV_ACC 0x8 +#define AB8500_RESERVED 0x9 +#define AB8500_GPADC 0xA +#define AB8500_CHARGER 0xB +#define AB8500_GAS_GAUGE 0xC +#define AB8500_AUDIO 0xD +#define AB8500_INTERRUPT 0xE +#define AB8500_RTC 0xF +#define AB8500_MISC 0x10 +#define AB8500_DEVELOPMENT 0x11 +#define AB8500_DEBUG 0x12 +#define AB8500_PROD_TEST 0x13 +#define AB8500_OTP_EMUL 0x15 + +/* + * Interrupts + * Values used to index into array ab8500_irq_regoffset[] defined in + * drivers/mdf/ab8500-core.c + */ +/* Definitions for AB8500 and AB9540 */ +/* ab8500_irq_regoffset[0] -> IT[Source|Latch|Mask]1 */ +#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0 /* not 8505/9540 */ +#define AB8500_INT_UN_PLUG_TV_DET 1 /* not 8505/9540 */ +#define AB8500_INT_PLUG_TV_DET 2 /* not 8505/9540 */ +#define AB8500_INT_TEMP_WARM 3 +#define AB8500_INT_PON_KEY2DB_F 4 +#define AB8500_INT_PON_KEY2DB_R 5 +#define AB8500_INT_PON_KEY1DB_F 6 +#define AB8500_INT_PON_KEY1DB_R 7 +/* ab8500_irq_regoffset[1] -> IT[Source|Latch|Mask]2 */ +#define AB8500_INT_BATT_OVV 8 +#define AB8500_INT_MAIN_CH_UNPLUG_DET 10 /* not 8505 */ +#define AB8500_INT_MAIN_CH_PLUG_DET 11 /* not 8505 */ +#define AB8500_INT_VBUS_DET_F 14 +#define AB8500_INT_VBUS_DET_R 15 +/* ab8500_irq_regoffset[2] -> IT[Source|Latch|Mask]3 */ +#define AB8500_INT_VBUS_CH_DROP_END 16 +#define AB8500_INT_RTC_60S 17 +#define AB8500_INT_RTC_ALARM 18 +#define AB8500_INT_BAT_CTRL_INDB 20 +#define AB8500_INT_CH_WD_EXP 21 +#define AB8500_INT_VBUS_OVV 22 +#define AB8500_INT_MAIN_CH_DROP_END 23 /* not 8505/9540 */ +/* ab8500_irq_regoffset[3] -> IT[Source|Latch|Mask]4 */ +#define AB8500_INT_CCN_CONV_ACC 24 +#define AB8500_INT_INT_AUD 25 +#define AB8500_INT_CCEOC 26 +#define AB8500_INT_CC_INT_CALIB 27 +#define AB8500_INT_LOW_BAT_F 28 +#define AB8500_INT_LOW_BAT_R 29 +#define AB8500_INT_BUP_CHG_NOT_OK 30 +#define AB8500_INT_BUP_CHG_OK 31 +/* ab8500_irq_regoffset[4] -> IT[Source|Latch|Mask]5 */ +#define AB8500_INT_GP_HW_ADC_CONV_END 32 /* not 8505 */ +#define AB8500_INT_ACC_DETECT_1DB_F 33 +#define AB8500_INT_ACC_DETECT_1DB_R 34 +#define AB8500_INT_ACC_DETECT_22DB_F 35 +#define AB8500_INT_ACC_DETECT_22DB_R 36 +#define AB8500_INT_ACC_DETECT_21DB_F 37 +#define AB8500_INT_ACC_DETECT_21DB_R 38 +#define AB8500_INT_GP_SW_ADC_CONV_END 39 +/* ab8500_irq_regoffset[5] -> IT[Source|Latch|Mask]7 */ +#define AB8500_INT_GPIO6R 40 /* not 8505/9540 */ +#define AB8500_INT_GPIO7R 41 /* not 8505/9540 */ +#define AB8500_INT_GPIO8R 42 /* not 8505/9540 */ +#define AB8500_INT_GPIO9R 43 /* not 8505/9540 */ +#define AB8500_INT_GPIO10R 44 +#define AB8500_INT_GPIO11R 45 +#define AB8500_INT_GPIO12R 46 /* not 8505 */ +#define AB8500_INT_GPIO13R 47 +/* ab8500_irq_regoffset[6] -> IT[Source|Latch|Mask]8 */ +#define AB8500_INT_GPIO24R 48 /* not 8505 */ +#define AB8500_INT_GPIO25R 49 /* not 8505 */ +#define AB8500_INT_GPIO36R 50 /* not 8505/9540 */ +#define AB8500_INT_GPIO37R 51 /* not 8505/9540 */ +#define AB8500_INT_GPIO38R 52 /* not 8505/9540 */ +#define AB8500_INT_GPIO39R 53 /* not 8505/9540 */ +#define AB8500_INT_GPIO40R 54 +#define AB8500_INT_GPIO41R 55 +/* ab8500_irq_regoffset[7] -> IT[Source|Latch|Mask]9 */ +#define AB8500_INT_GPIO6F 56 /* not 8505/9540 */ +#define AB8500_INT_GPIO7F 57 /* not 8505/9540 */ +#define AB8500_INT_GPIO8F 58 /* not 8505/9540 */ +#define AB8500_INT_GPIO9F 59 /* not 8505/9540 */ +#define AB8500_INT_GPIO10F 60 +#define AB8500_INT_GPIO11F 61 +#define AB8500_INT_GPIO12F 62 /* not 8505 */ +#define AB8500_INT_GPIO13F 63 +/* ab8500_irq_regoffset[8] -> IT[Source|Latch|Mask]10 */ +#define AB8500_INT_GPIO24F 64 /* not 8505 */ +#define AB8500_INT_GPIO25F 65 /* not 8505 */ +#define AB8500_INT_GPIO36F 66 /* not 8505/9540 */ +#define AB8500_INT_GPIO37F 67 /* not 8505/9540 */ +#define AB8500_INT_GPIO38F 68 /* not 8505/9540 */ +#define AB8500_INT_GPIO39F 69 /* not 8505/9540 */ +#define AB8500_INT_GPIO40F 70 +#define AB8500_INT_GPIO41F 71 +/* ab8500_irq_regoffset[9] -> IT[Source|Latch|Mask]12 */ +#define AB8500_INT_ADP_SOURCE_ERROR 72 +#define AB8500_INT_ADP_SINK_ERROR 73 +#define AB8500_INT_ADP_PROBE_PLUG 74 +#define AB8500_INT_ADP_PROBE_UNPLUG 75 +#define AB8500_INT_ADP_SENSE_OFF 76 +#define AB8500_INT_USB_PHY_POWER_ERR 78 +#define AB8500_INT_USB_LINK_STATUS 79 +/* ab8500_irq_regoffset[10] -> IT[Source|Latch|Mask]19 */ +#define AB8500_INT_BTEMP_LOW 80 +#define AB8500_INT_BTEMP_LOW_MEDIUM 81 +#define AB8500_INT_BTEMP_MEDIUM_HIGH 82 +#define AB8500_INT_BTEMP_HIGH 83 +/* ab8500_irq_regoffset[11] -> IT[Source|Latch|Mask]20 */ +#define AB8500_INT_SRP_DETECT 88 +#define AB8500_INT_USB_CHARGER_NOT_OKR 89 +#define AB8500_INT_ID_WAKEUP_R 90 +#define AB8500_INT_ID_DET_R1R 92 +#define AB8500_INT_ID_DET_R2R 93 +#define AB8500_INT_ID_DET_R3R 94 +#define AB8500_INT_ID_DET_R4R 95 +/* ab8500_irq_regoffset[12] -> IT[Source|Latch|Mask]21 */ +#define AB8500_INT_ID_WAKEUP_F 96 +#define AB8500_INT_ID_DET_R1F 98 +#define AB8500_INT_ID_DET_R2F 99 +#define AB8500_INT_ID_DET_R3F 100 +#define AB8500_INT_ID_DET_R4F 101 +#define AB8500_INT_CHAUTORESTARTAFTSEC 102 +#define AB8500_INT_CHSTOPBYSEC 103 +/* ab8500_irq_regoffset[13] -> IT[Source|Latch|Mask]22 */ +#define AB8500_INT_USB_CH_TH_PROT_F 104 +#define AB8500_INT_USB_CH_TH_PROT_R 105 +#define AB8500_INT_MAIN_CH_TH_PROT_F 106 /* not 8505/9540 */ +#define AB8500_INT_MAIN_CH_TH_PROT_R 107 /* not 8505/9540 */ +#define AB8500_INT_CHCURLIMNOHSCHIRP 109 +#define AB8500_INT_CHCURLIMHSCHIRP 110 +#define AB8500_INT_XTAL32K_KO 111 + +/* Definitions for AB9540 */ +/* ab8500_irq_regoffset[14] -> IT[Source|Latch|Mask]13 */ +#define AB9540_INT_GPIO50R 113 +#define AB9540_INT_GPIO51R 114 /* not 8505 */ +#define AB9540_INT_GPIO52R 115 +#define AB9540_INT_GPIO53R 116 +#define AB9540_INT_GPIO54R 117 /* not 8505 */ +#define AB9540_INT_IEXT_CH_RF_BFN_R 118 +#define AB9540_INT_IEXT_CH_RF_BFN_F 119 +/* ab8500_irq_regoffset[15] -> IT[Source|Latch|Mask]14 */ +#define AB9540_INT_GPIO50F 121 +#define AB9540_INT_GPIO51F 122 /* not 8505 */ +#define AB9540_INT_GPIO52F 123 +#define AB9540_INT_GPIO53F 124 +#define AB9540_INT_GPIO54F 125 /* not 8505 */ + +/* + * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the + * entire platform. This is a "compile time" constant so this must be set to + * the largest possible value that may be encountered with different AB SOCs. + * Of the currently supported AB devices, AB8500 and AB9540, it is the AB9540 + * which is larger. + */ +#define AB8500_NR_IRQS 112 +#define AB8505_NR_IRQS 128 +#define AB9540_NR_IRQS 128 +/* This is set to the roof of any AB8500 chip variant IRQ counts */ +#define AB8500_MAX_NR_IRQS AB9540_NR_IRQS + +#define AB8500_NUM_IRQ_REGS 14 +#define AB9540_NUM_IRQ_REGS 17 + +/** + * struct ab8500 - ab8500 internal structure + * @dev: parent device + * @lock: read/write operations lock + * @irq_lock: genirq bus lock + * @irq: irq line + * @version: chip version id (e.g. ab8500 or ab9540) + * @chip_id: chip revision id + * @write: register write + * @write_masked: masked register write + * @read: register read + * @rx_buf: rx buf for SPI + * @tx_buf: tx buf for SPI + * @mask: cache of IRQ regs for bus lock + * @oldmask: cache of previous IRQ regs for bus lock + * @mask_size: Actual number of valid entries in mask[], oldmask[] and + * irq_reg_offset + * @irq_reg_offset: Array of offsets into IRQ registers + */ +struct ab8500 { + struct device *dev; + struct mutex lock; + struct mutex irq_lock; + + int irq_base; + int irq; + enum ab8500_version version; + u8 chip_id; + + int (*write)(struct ab8500 *ab8500, u16 addr, u8 data); + int (*write_masked)(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data); + int (*read)(struct ab8500 *ab8500, u16 addr); + + unsigned long tx_buf[4]; + unsigned long rx_buf[4]; + + u8 *mask; + u8 *oldmask; + int mask_size; + const int *irq_reg_offset; +}; + +struct regulator_reg_init; +struct regulator_init_data; +struct ab8500_gpio_platform_data; + +/** + * struct ab8500_platform_data - AB8500 platform data + * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used + * @init: board-specific initialization after detection of ab8500 + * @num_regulator_reg_init: number of regulator init registers + * @regulator_reg_init: regulator init registers + * @num_regulator: number of regulators + * @regulator: machine-specific constraints for regulators + */ +struct ab8500_platform_data { + int irq_base; + void (*init) (struct ab8500 *); + int num_regulator_reg_init; + struct ab8500_regulator_reg_init *regulator_reg_init; + int num_regulator; + struct regulator_init_data *regulator; + struct ab8500_gpio_platform_data *gpio; +}; + +extern int __devinit ab8500_init(struct ab8500 *ab8500, + enum ab8500_version version); +extern int __devexit ab8500_exit(struct ab8500 *ab8500); + +static inline int is_ab8500(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB8500; +} + +static inline int is_ab8505(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB8505; +} + +static inline int is_ab9540(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB9540; +} + +static inline int is_ab8540(struct ab8500 *ab) +{ + return ab->version == AB8500_VERSION_AB8540; +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_1p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT1P0)); +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_1p1_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT1P1)); +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_2p0_or_earlier(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT2P0)); +} + +/* exclude also ab8505, ab9540... */ +static inline int is_ab8500_2p0(struct ab8500 *ab) +{ + return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0)); +} + +#endif /* MFD_AB8500_H */ diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h new file mode 100644 index 00000000..9b077257 --- /dev/null +++ b/include/linux/mfd/abx500/ux500_chargalg.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) ST-Ericsson SA 2012 + * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _UX500_CHARGALG_H +#define _UX500_CHARGALG_H + +#include <linux/power_supply.h> + +#define psy_to_ux500_charger(x) container_of((x), \ + struct ux500_charger, psy) + +/* Forward declaration */ +struct ux500_charger; + +struct ux500_charger_ops { + int (*enable) (struct ux500_charger *, int, int, int); + int (*kick_wd) (struct ux500_charger *); + int (*update_curr) (struct ux500_charger *, int); +}; + +/** + * struct ux500_charger - power supply ux500 charger sub class + * @psy power supply base class + * @ops ux500 charger operations + * @max_out_volt maximum output charger voltage in mV + * @max_out_curr maximum output charger current in mA + */ +struct ux500_charger { + struct power_supply psy; + struct ux500_charger_ops ops; + int max_out_volt; + int max_out_curr; +}; + +#endif diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h new file mode 100644 index 00000000..ac37558a --- /dev/null +++ b/include/linux/mfd/adp5520.h @@ -0,0 +1,299 @@ +/* + * Definitions and platform data for Analog Devices + * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys) + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + + +#ifndef __LINUX_MFD_ADP5520_H +#define __LINUX_MFD_ADP5520_H + +#define ID_ADP5520 5520 +#define ID_ADP5501 5501 + +/* + * ADP5520/ADP5501 Register Map + */ + +#define ADP5520_MODE_STATUS 0x00 +#define ADP5520_INTERRUPT_ENABLE 0x01 +#define ADP5520_BL_CONTROL 0x02 +#define ADP5520_BL_TIME 0x03 +#define ADP5520_BL_FADE 0x04 +#define ADP5520_DAYLIGHT_MAX 0x05 +#define ADP5520_DAYLIGHT_DIM 0x06 +#define ADP5520_OFFICE_MAX 0x07 +#define ADP5520_OFFICE_DIM 0x08 +#define ADP5520_DARK_MAX 0x09 +#define ADP5520_DARK_DIM 0x0A +#define ADP5520_BL_VALUE 0x0B +#define ADP5520_ALS_CMPR_CFG 0x0C +#define ADP5520_L2_TRIP 0x0D +#define ADP5520_L2_HYS 0x0E +#define ADP5520_L3_TRIP 0x0F +#define ADP5520_L3_HYS 0x10 +#define ADP5520_LED_CONTROL 0x11 +#define ADP5520_LED_TIME 0x12 +#define ADP5520_LED_FADE 0x13 +#define ADP5520_LED1_CURRENT 0x14 +#define ADP5520_LED2_CURRENT 0x15 +#define ADP5520_LED3_CURRENT 0x16 + +/* + * ADP5520 Register Map + */ + +#define ADP5520_GPIO_CFG_1 0x17 +#define ADP5520_GPIO_CFG_2 0x18 +#define ADP5520_GPIO_IN 0x19 +#define ADP5520_GPIO_OUT 0x1A +#define ADP5520_GPIO_INT_EN 0x1B +#define ADP5520_GPIO_INT_STAT 0x1C +#define ADP5520_GPIO_INT_LVL 0x1D +#define ADP5520_GPIO_DEBOUNCE 0x1E +#define ADP5520_GPIO_PULLUP 0x1F +#define ADP5520_KP_INT_STAT_1 0x20 +#define ADP5520_KP_INT_STAT_2 0x21 +#define ADP5520_KR_INT_STAT_1 0x22 +#define ADP5520_KR_INT_STAT_2 0x23 +#define ADP5520_KEY_STAT_1 0x24 +#define ADP5520_KEY_STAT_2 0x25 + +/* + * MODE_STATUS bits + */ + +#define ADP5520_nSTNBY (1 << 7) +#define ADP5520_BL_EN (1 << 6) +#define ADP5520_DIM_EN (1 << 5) +#define ADP5520_OVP_INT (1 << 4) +#define ADP5520_CMPR_INT (1 << 3) +#define ADP5520_GPI_INT (1 << 2) +#define ADP5520_KR_INT (1 << 1) +#define ADP5520_KP_INT (1 << 0) + +/* + * INTERRUPT_ENABLE bits + */ + +#define ADP5520_AUTO_LD_EN (1 << 4) +#define ADP5520_CMPR_IEN (1 << 3) +#define ADP5520_OVP_IEN (1 << 2) +#define ADP5520_KR_IEN (1 << 1) +#define ADP5520_KP_IEN (1 << 0) + +/* + * BL_CONTROL bits + */ + +#define ADP5520_BL_LVL ((x) << 5) +#define ADP5520_BL_LAW ((x) << 4) +#define ADP5520_BL_AUTO_ADJ (1 << 3) +#define ADP5520_OVP_EN (1 << 2) +#define ADP5520_FOVR (1 << 1) +#define ADP5520_KP_BL_EN (1 << 0) + +/* + * ALS_CMPR_CFG bits + */ + +#define ADP5520_L3_OUT (1 << 3) +#define ADP5520_L2_OUT (1 << 2) +#define ADP5520_L3_EN (1 << 1) + +#define ADP5020_MAX_BRIGHTNESS 0x7F + +#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) +#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4)) +#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en) + +/* + * LEDs subdevice bits and masks + */ + +#define ADP5520_01_MAXLEDS 3 + +#define ADP5520_FLAG_LED_MASK 0x3 +#define ADP5520_FLAG_OFFT_SHIFT 8 +#define ADP5520_FLAG_OFFT_MASK 0x3 + +#define ADP5520_R3_MODE (1 << 5) +#define ADP5520_C3_MODE (1 << 4) +#define ADP5520_LED_LAW (1 << 3) +#define ADP5520_LED3_EN (1 << 2) +#define ADP5520_LED2_EN (1 << 1) +#define ADP5520_LED1_EN (1 << 0) + +/* + * GPIO subdevice bits and masks + */ + +#define ADP5520_MAXGPIOS 8 + +#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */ +#define ADP5520_GPIO_C2 (1 << 6) +#define ADP5520_GPIO_C1 (1 << 5) +#define ADP5520_GPIO_C0 (1 << 4) +#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */ +#define ADP5520_GPIO_R2 (1 << 2) +#define ADP5520_GPIO_R1 (1 << 1) +#define ADP5520_GPIO_R0 (1 << 0) + +struct adp5520_gpio_platform_data { + unsigned gpio_start; + u8 gpio_en_mask; + u8 gpio_pullup_mask; +}; + +/* + * Keypad subdevice bits and masks + */ + +#define ADP5520_MAXKEYS 16 + +#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */ +#define ADP5520_COL_C2 (1 << 6) +#define ADP5520_COL_C1 (1 << 5) +#define ADP5520_COL_C0 (1 << 4) +#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */ +#define ADP5520_ROW_R2 (1 << 2) +#define ADP5520_ROW_R1 (1 << 1) +#define ADP5520_ROW_R0 (1 << 0) + +#define ADP5520_KEY(row, col) (col + row * 4) +#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS + +struct adp5520_keys_platform_data { + int rows_en_mask; /* Number of rows */ + int cols_en_mask; /* Number of columns */ + const unsigned short *keymap; /* Pointer to keymap */ + unsigned short keymapsize; /* Keymap size */ + unsigned repeat:1; /* Enable key repeat */ +}; + + +/* + * LEDs subdevice platform data + */ + +#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */ +#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */ +#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */ + +#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT) +#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT) + +#define ADP5520_LED_ONT_200ms 0 +#define ADP5520_LED_ONT_600ms 1 +#define ADP5520_LED_ONT_800ms 2 +#define ADP5520_LED_ONT_1200ms 3 + +struct adp5520_leds_platform_data { + int num_leds; + struct led_info *leds; + u8 fade_in; /* Backlight Fade-In Timer */ + u8 fade_out; /* Backlight Fade-Out Timer */ + u8 led_on_time; +}; + +/* + * Backlight subdevice platform data + */ + +#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP5520_FADE_T_600ms 2 +#define ADP5520_FADE_T_900ms 3 +#define ADP5520_FADE_T_1200ms 4 +#define ADP5520_FADE_T_1500ms 5 +#define ADP5520_FADE_T_1800ms 6 +#define ADP5520_FADE_T_2100ms 7 +#define ADP5520_FADE_T_2400ms 8 +#define ADP5520_FADE_T_2700ms 9 +#define ADP5520_FADE_T_3000ms 10 +#define ADP5520_FADE_T_3500ms 11 +#define ADP5520_FADE_T_4000ms 12 +#define ADP5520_FADE_T_4500ms 13 +#define ADP5520_FADE_T_5000ms 14 +#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP5520_BL_LAW_LINEAR 0 +#define ADP5520_BL_LAW_SQUARE 1 +#define ADP5520_BL_LAW_CUBIC1 2 +#define ADP5520_BL_LAW_CUBIC2 3 + +#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP5520_BL_AMBL_FILT_160ms 1 +#define ADP5520_BL_AMBL_FILT_320ms 2 +#define ADP5520_BL_AMBL_FILT_640ms 3 +#define ADP5520_BL_AMBL_FILT_1280ms 4 +#define ADP5520_BL_AMBL_FILT_2560ms 5 +#define ADP5520_BL_AMBL_FILT_5120ms 6 +#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + + /* + * Blacklight current 0..30mA + */ +#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30) + + /* + * L2 comparator current 0..1000uA + */ +#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000) + + /* + * L3 comparator current 0..127uA + */ +#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127) + +struct adp5520_backlight_platform_data { + u8 fade_in; /* Backlight Fade-In Timer */ + u8 fade_out; /* Backlight Fade-Out Timer */ + u8 fade_led_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */ +}; + +/* + * MFD chip platform data + */ + +struct adp5520_platform_data { + struct adp5520_keys_platform_data *keys; + struct adp5520_gpio_platform_data *gpio; + struct adp5520_leds_platform_data *leds; + struct adp5520_backlight_platform_data *backlight; +}; + +/* + * MFD chip functions + */ + +extern int adp5520_read(struct device *dev, int reg, uint8_t *val); +extern int adp5520_write(struct device *dev, int reg, u8 val); +extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask); + +extern int adp5520_register_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +extern int adp5520_unregister_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +#endif /* __LINUX_MFD_ADP5520_H */ diff --git a/include/linux/mfd/anatop.h b/include/linux/mfd/anatop.h new file mode 100644 index 00000000..22c1007d --- /dev/null +++ b/include/linux/mfd/anatop.h @@ -0,0 +1,40 @@ +/* + * anatop.h - Anatop MFD driver + * + * Copyright (C) 2012 Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org> + * Copyright (C) 2012 Linaro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_ANATOP_H +#define __LINUX_MFD_ANATOP_H + +#include <linux/spinlock.h> + +/** + * anatop - MFD data + * @ioreg: ioremap register + * @reglock: spinlock for register read/write + */ +struct anatop { + void *ioreg; + spinlock_t reglock; +}; + +extern u32 anatop_get_bits(struct anatop *, u32, int, int); +extern void anatop_set_bits(struct anatop *, u32, int, int, u32); + +#endif /* __LINUX_MFD_ANATOP_H */ diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h new file mode 100644 index 00000000..ed793b77 --- /dev/null +++ b/include/linux/mfd/asic3.h @@ -0,0 +1,313 @@ +/* + * include/linux/mfd/asic3.h + * + * Compaq ASIC3 headers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Copyright 2001 Compaq Computer Corporation. + * Copyright 2007-2008 OpenedHand Ltd. + */ + +#ifndef __ASIC3_H__ +#define __ASIC3_H__ + +#include <linux/types.h> + +struct led_classdev; +struct asic3_led { + const char *name; + const char *default_trigger; + struct led_classdev *cdev; +}; + +struct asic3_platform_data { + u16 *gpio_config; + unsigned int gpio_config_num; + + unsigned int irq_base; + + unsigned int gpio_base; + + struct asic3_led *leds; +}; + +#define ASIC3_NUM_GPIO_BANKS 4 +#define ASIC3_GPIOS_PER_BANK 16 +#define ASIC3_NUM_GPIOS 64 +#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6 + +#define ASIC3_IRQ_LED0 64 +#define ASIC3_IRQ_LED1 65 +#define ASIC3_IRQ_LED2 66 +#define ASIC3_IRQ_SPI 67 +#define ASIC3_IRQ_SMBUS 68 +#define ASIC3_IRQ_OWM 69 + +#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio)) + +#define ASIC3_GPIO_BANK_A 0 +#define ASIC3_GPIO_BANK_B 1 +#define ASIC3_GPIO_BANK_C 2 +#define ASIC3_GPIO_BANK_D 3 + +#define ASIC3_GPIO(bank, gpio) \ + ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio)) +#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf)) +/* All offsets below are specified with this address bus shift */ +#define ASIC3_DEFAULT_ADDR_SHIFT 2 + +#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg) +#define ASIC3_GPIO_OFFSET(base, reg) \ + (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg) + +#define ASIC3_GPIO_A_BASE 0x0000 +#define ASIC3_GPIO_B_BASE 0x0100 +#define ASIC3_GPIO_C_BASE 0x0200 +#define ASIC3_GPIO_D_BASE 0x0300 + +#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4) +#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \ + (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4))) +#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio)) +#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100)) +#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100)) + +#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */ +#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */ +#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */ +#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */ +#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */ +#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */ +#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */ +#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */ +#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */ +#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */ +#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */ +#define ASIC3_GPIO_SLEEP_CONF 0x2c /* + * R/W bit 1: autosleep + * 0: disable gposlpout in normal mode, + * enable gposlpout in sleep mode. + */ +#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */ + +/* + * ASIC3 GPIO config + * + * Bits 0..6 gpio number + * Bits 7..13 Alternate function + * Bit 14 Direction + * Bit 15 Initial value + * + */ +#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f) +#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7) +#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14) +#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15) +#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \ + | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \ + | (((init) & 0x1) << 15)) +#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \ + ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init)) +#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \ + ASIC3_CONFIG_GPIO((gpio), 0, 1, (init)) + +/* + * Alternate functions + */ +#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) +#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) +#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) +#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0) +#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0) +#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0) +#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) +#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) +#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) +#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0) +#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0) +#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0) +#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0) +#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0) +#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0) +#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0) +#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0) +#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0) +#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0) +#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0) +#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0) +#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0) +#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0) +#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0) + + +#define ASIC3_SPI_Base 0x0400 +#define ASIC3_SPI_Control 0x0000 +#define ASIC3_SPI_TxData 0x0004 +#define ASIC3_SPI_RxData 0x0008 +#define ASIC3_SPI_Int 0x000c +#define ASIC3_SPI_Status 0x0010 + +#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */ + +#define ASIC3_PWM_0_Base 0x0500 +#define ASIC3_PWM_1_Base 0x0600 +#define ASIC3_PWM_TimeBase 0x0000 +#define ASIC3_PWM_PeriodTime 0x0004 +#define ASIC3_PWM_DutyTime 0x0008 + +#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */ +#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */ + +#define ASIC3_NUM_LEDS 3 +#define ASIC3_LED_0_Base 0x0700 +#define ASIC3_LED_1_Base 0x0800 +#define ASIC3_LED_2_Base 0x0900 +#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */ +#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */ +#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */ +#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */ + +/* LED TimeBase bits - match ASIC2 */ +#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */ + /* Note: max = 5 on hx4700 */ + /* 0: maximum time base */ + /* 1: maximum time base / 2 */ + /* n: maximum time base / 2^n */ + +#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */ +#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */ +#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ + +#define ASIC3_CLOCK_BASE 0x0A00 +#define ASIC3_CLOCK_CDEX 0x00 +#define ASIC3_CLOCK_SEL 0x04 + +#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */ +#define CLOCK_CDEX_SOURCE0 (1 << 0) +#define CLOCK_CDEX_SOURCE1 (1 << 1) +#define CLOCK_CDEX_SPI (1 << 2) +#define CLOCK_CDEX_OWM (1 << 3) +#define CLOCK_CDEX_PWM0 (1 << 4) +#define CLOCK_CDEX_PWM1 (1 << 5) +#define CLOCK_CDEX_LED0 (1 << 6) +#define CLOCK_CDEX_LED1 (1 << 7) +#define CLOCK_CDEX_LED2 (1 << 8) + +/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */ +#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */ +#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */ +#define CLOCK_CDEX_SMBUS (1 << 11) +#define CLOCK_CDEX_CONTROL_CX (1 << 12) + +#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */ +#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */ + +#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */ +#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */ + +/* R/W: INT clock source control (32.768 kHz) */ +#define CLOCK_SEL_CX (1 << 2) + + +#define ASIC3_INTR_BASE 0x0B00 + +#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */ +#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */ +#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */ +#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */ + +#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */ +#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */ +#define ASIC3_INTMASK_MASK0 (1 << 2) +#define ASIC3_INTMASK_MASK1 (1 << 3) +#define ASIC3_INTMASK_MASK2 (1 << 4) +#define ASIC3_INTMASK_MASK3 (1 << 5) +#define ASIC3_INTMASK_MASK4 (1 << 6) +#define ASIC3_INTMASK_MASK5 (1 << 7) + +#define ASIC3_INTR_PERIPHERAL_A (1 << 0) +#define ASIC3_INTR_PERIPHERAL_B (1 << 1) +#define ASIC3_INTR_PERIPHERAL_C (1 << 2) +#define ASIC3_INTR_PERIPHERAL_D (1 << 3) +#define ASIC3_INTR_LED0 (1 << 4) +#define ASIC3_INTR_LED1 (1 << 5) +#define ASIC3_INTR_LED2 (1 << 6) +#define ASIC3_INTR_SPI (1 << 7) +#define ASIC3_INTR_SMBUS (1 << 8) +#define ASIC3_INTR_OWM (1 << 9) + +#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */ +#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */ + + +/* Basic control of the SD ASIC */ +#define ASIC3_SDHWCTRL_BASE 0x0E00 +#define ASIC3_SDHWCTRL_SDCONF 0x00 + +#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */ +#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */ +#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */ +#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */ + +/* SD card write protection: 0=high */ +#define ASIC3_SDHWCTRL_LEVWP (1 << 4) +#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */ + +/* SD card power supply ctrl 1=enable */ +#define ASIC3_SDHWCTRL_SDPWR (1 << 6) + +#define ASIC3_EXTCF_BASE 0x1100 + +#define ASIC3_EXTCF_SELECT 0x00 +#define ASIC3_EXTCF_RESET 0x04 + +#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */ +#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */ +#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */ +#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */ +#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */ +#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */ +#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */ +#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */ +#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */ +#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */ +#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */ +#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */ +#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14) +#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */ + +/********************************************* + * The Onewire interface (DS1WM) is handled + * by the ds1wm driver. + * + *********************************************/ + +#define ASIC3_OWM_BASE 0xC00 + +/***************************************************************************** + * The SD configuration registers are at a completely different location + * in memory. They are divided into three sets of registers: + * + * SD_CONFIG Core configuration register + * SD_CTRL Control registers for SD operations + * SDIO_CTRL Control registers for SDIO operations + * + *****************************************************************************/ +#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ +#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */ +#define ASIC3_SD_CTRL_BASE 0x1000 +#define ASIC3_SDIO_CTRL_BASE 0x1200 + +#define ASIC3_MAP_SIZE_32BIT 0x2000 +#define ASIC3_MAP_SIZE_16BIT 0x1000 + +/* Functions needed by leds-asic3 */ + +struct asic3; +extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val); +extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg); + +#endif /* __ASIC3_H__ */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 00000000..4e76163d --- /dev/null +++ b/include/linux/mfd/core.h @@ -0,0 +1,100 @@ +/* + * drivers/mfd/mfd-core.h + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef MFD_CORE_H +#define MFD_CORE_H + +#include <linux/platform_device.h> + +/* + * This struct describes the MFD part ("cell"). + * After registration the copy of this structure will become the platform data + * of the resulting platform_device + */ +struct mfd_cell { + const char *name; + int id; + + /* refcounting for multiple drivers to use a single cell */ + atomic_t *usage_count; + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + /* platform data passed to the sub devices drivers */ + void *platform_data; + size_t pdata_size; + + /* + * These resources can be specified relative to the parent device. + * For accessing hardware you should use resources from the platform dev + */ + int num_resources; + const struct resource *resources; + + /* don't check for resource conflicts */ + bool ignore_resource_conflicts; + + /* + * Disable runtime PM callbacks for this subdevice - see + * pm_runtime_no_callbacks(). + */ + bool pm_runtime_no_callbacks; +}; + +/* + * Convenience functions for clients using shared cells. Refcounting + * happens automatically, with the cell's enable/disable callbacks + * being called only when a device is first being enabled or no other + * clients are making use of it. + */ +extern int mfd_cell_enable(struct platform_device *pdev); +extern int mfd_cell_disable(struct platform_device *pdev); + +/* + * "Clone" multiple platform devices for a single cell. This is to be used + * for devices that have multiple users of a cell. For example, if an mfd + * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver, + * and a platform driver, the following bit of code would be use after first + * calling mfd_add_devices(): + * + * const char *fclones[] = { "foo-gpio", "foo-mtd" }; + * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones)); + * + * Each driver (MTD, GPIO, and platform driver) would then register + * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively. + * The cell's .enable/.disable hooks should be used to deal with hardware + * resource contention. + */ +extern int mfd_clone_cell(const char *cell, const char **clones, + size_t n_clones); + +/* + * Given a platform device that's been created by mfd_add_devices(), fetch + * the mfd_cell that created it. + */ +static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) +{ + return pdev->mfd_cell; +} + +extern int mfd_add_devices(struct device *parent, int id, + struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); + +extern void mfd_remove_devices(struct device *parent); + +#endif diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h new file mode 100644 index 00000000..0aa3a1a4 --- /dev/null +++ b/include/linux/mfd/da903x.h @@ -0,0 +1,247 @@ +#ifndef __LINUX_PMIC_DA903X_H +#define __LINUX_PMIC_DA903X_H + +/* Unified sub device IDs for DA9030/DA9034/DA9035 */ +enum { + DA9030_ID_LED_1, + DA9030_ID_LED_2, + DA9030_ID_LED_3, + DA9030_ID_LED_4, + DA9030_ID_LED_PC, + DA9030_ID_VIBRA, + DA9030_ID_WLED, + DA9030_ID_BUCK1, + DA9030_ID_BUCK2, + DA9030_ID_LDO1, + DA9030_ID_LDO2, + DA9030_ID_LDO3, + DA9030_ID_LDO4, + DA9030_ID_LDO5, + DA9030_ID_LDO6, + DA9030_ID_LDO7, + DA9030_ID_LDO8, + DA9030_ID_LDO9, + DA9030_ID_LDO10, + DA9030_ID_LDO11, + DA9030_ID_LDO12, + DA9030_ID_LDO13, + DA9030_ID_LDO14, + DA9030_ID_LDO15, + DA9030_ID_LDO16, + DA9030_ID_LDO17, + DA9030_ID_LDO18, + DA9030_ID_LDO19, + DA9030_ID_LDO_INT, /* LDO Internal */ + DA9030_ID_BAT, /* battery charger */ + + DA9034_ID_LED_1, + DA9034_ID_LED_2, + DA9034_ID_VIBRA, + DA9034_ID_WLED, + DA9034_ID_TOUCH, + + DA9034_ID_BUCK1, + DA9034_ID_BUCK2, + DA9034_ID_LDO1, + DA9034_ID_LDO2, + DA9034_ID_LDO3, + DA9034_ID_LDO4, + DA9034_ID_LDO5, + DA9034_ID_LDO6, + DA9034_ID_LDO7, + DA9034_ID_LDO8, + DA9034_ID_LDO9, + DA9034_ID_LDO10, + DA9034_ID_LDO11, + DA9034_ID_LDO12, + DA9034_ID_LDO13, + DA9034_ID_LDO14, + DA9034_ID_LDO15, + + DA9035_ID_BUCK3, +}; + +/* + * DA9030/DA9034 LEDs sub-devices uses generic "struct led_info" + * as the platform_data + */ + +/* DA9030 flags for "struct led_info" + */ +#define DA9030_LED_RATE_ON (0 << 5) +#define DA9030_LED_RATE_052S (1 << 5) +#define DA9030_LED_DUTY_1_16 (0 << 3) +#define DA9030_LED_DUTY_1_8 (1 << 3) +#define DA9030_LED_DUTY_1_4 (2 << 3) +#define DA9030_LED_DUTY_1_2 (3 << 3) + +#define DA9030_VIBRA_MODE_1P3V (0 << 1) +#define DA9030_VIBRA_MODE_2P7V (1 << 1) +#define DA9030_VIBRA_FREQ_1HZ (0 << 2) +#define DA9030_VIBRA_FREQ_2HZ (1 << 2) +#define DA9030_VIBRA_FREQ_4HZ (2 << 2) +#define DA9030_VIBRA_FREQ_8HZ (3 << 2) +#define DA9030_VIBRA_DUTY_ON (0 << 4) +#define DA9030_VIBRA_DUTY_75P (1 << 4) +#define DA9030_VIBRA_DUTY_50P (2 << 4) +#define DA9030_VIBRA_DUTY_25P (3 << 4) + +/* DA9034 flags for "struct led_info" */ +#define DA9034_LED_RAMP (1 << 7) + +/* DA9034 touch screen platform data */ +struct da9034_touch_pdata { + int interval_ms; /* sampling interval while pen down */ + int x_inverted; + int y_inverted; +}; + +struct da9034_backlight_pdata { + int output_current; /* output current of WLED, from 0-31 (in mA) */ +}; + +/* DA9030 battery charger data */ +struct power_supply_info; + +struct da9030_battery_info { + /* battery parameters */ + struct power_supply_info *battery_info; + + /* current and voltage to use for battery charging */ + unsigned int charge_milliamp; + unsigned int charge_millivolt; + + /* voltage thresholds (in millivolts) */ + int vbat_low; + int vbat_crit; + int vbat_charge_start; + int vbat_charge_stop; + int vbat_charge_restart; + + /* battery nominal minimal and maximal voltages in millivolts */ + int vcharge_min; + int vcharge_max; + + /* Temperature thresholds. These are DA9030 register values + "as is" and should be measured for each battery type */ + int tbat_low; + int tbat_high; + int tbat_restart; + + + /* battery monitor interval (seconds) */ + unsigned int batmon_interval; + + /* platform callbacks for battery low and critical events */ + void (*battery_low)(void); + void (*battery_critical)(void); +}; + +struct da903x_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct da903x_platform_data { + int num_subdevs; + struct da903x_subdev_info *subdevs; +}; + +/* bit definitions for DA9030 events */ +#define DA9030_EVENT_ONKEY (1 << 0) +#define DA9030_EVENT_PWREN (1 << 1) +#define DA9030_EVENT_EXTON (1 << 2) +#define DA9030_EVENT_CHDET (1 << 3) +#define DA9030_EVENT_TBAT (1 << 4) +#define DA9030_EVENT_VBATMON (1 << 5) +#define DA9030_EVENT_VBATMON_TXON (1 << 6) +#define DA9030_EVENT_CHIOVER (1 << 7) +#define DA9030_EVENT_TCTO (1 << 8) +#define DA9030_EVENT_CCTO (1 << 9) +#define DA9030_EVENT_ADC_READY (1 << 10) +#define DA9030_EVENT_VBUS_4P4 (1 << 11) +#define DA9030_EVENT_VBUS_4P0 (1 << 12) +#define DA9030_EVENT_SESS_VALID (1 << 13) +#define DA9030_EVENT_SRP_DETECT (1 << 14) +#define DA9030_EVENT_WATCHDOG (1 << 15) +#define DA9030_EVENT_LDO15 (1 << 16) +#define DA9030_EVENT_LDO16 (1 << 17) +#define DA9030_EVENT_LDO17 (1 << 18) +#define DA9030_EVENT_LDO18 (1 << 19) +#define DA9030_EVENT_LDO19 (1 << 20) +#define DA9030_EVENT_BUCK2 (1 << 21) + +/* bit definitions for DA9034 events */ +#define DA9034_EVENT_ONKEY (1 << 0) +#define DA9034_EVENT_EXTON (1 << 2) +#define DA9034_EVENT_CHDET (1 << 3) +#define DA9034_EVENT_TBAT (1 << 4) +#define DA9034_EVENT_VBATMON (1 << 5) +#define DA9034_EVENT_REV_IOVER (1 << 6) +#define DA9034_EVENT_CH_IOVER (1 << 7) +#define DA9034_EVENT_CH_TCTO (1 << 8) +#define DA9034_EVENT_CH_CCTO (1 << 9) +#define DA9034_EVENT_USB_DEV (1 << 10) +#define DA9034_EVENT_OTGCP_IOVER (1 << 11) +#define DA9034_EVENT_VBUS_4P55 (1 << 12) +#define DA9034_EVENT_VBUS_3P8 (1 << 13) +#define DA9034_EVENT_SESS_1P8 (1 << 14) +#define DA9034_EVENT_SRP_READY (1 << 15) +#define DA9034_EVENT_ADC_MAN (1 << 16) +#define DA9034_EVENT_ADC_AUTO4 (1 << 17) +#define DA9034_EVENT_ADC_AUTO5 (1 << 18) +#define DA9034_EVENT_ADC_AUTO6 (1 << 19) +#define DA9034_EVENT_PEN_DOWN (1 << 20) +#define DA9034_EVENT_TSI_READY (1 << 21) +#define DA9034_EVENT_UART_TX (1 << 22) +#define DA9034_EVENT_UART_RX (1 << 23) +#define DA9034_EVENT_HEADSET (1 << 25) +#define DA9034_EVENT_HOOKSWITCH (1 << 26) +#define DA9034_EVENT_WATCHDOG (1 << 27) + +extern int da903x_register_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); +extern int da903x_unregister_notifier(struct device *dev, + struct notifier_block *nb, unsigned int events); + +/* Status Query Interface */ +#define DA9030_STATUS_ONKEY (1 << 0) +#define DA9030_STATUS_PWREN1 (1 << 1) +#define DA9030_STATUS_EXTON (1 << 2) +#define DA9030_STATUS_CHDET (1 << 3) +#define DA9030_STATUS_TBAT (1 << 4) +#define DA9030_STATUS_VBATMON (1 << 5) +#define DA9030_STATUS_VBATMON_TXON (1 << 6) +#define DA9030_STATUS_MCLKDET (1 << 7) + +#define DA9034_STATUS_ONKEY (1 << 0) +#define DA9034_STATUS_EXTON (1 << 2) +#define DA9034_STATUS_CHDET (1 << 3) +#define DA9034_STATUS_TBAT (1 << 4) +#define DA9034_STATUS_VBATMON (1 << 5) +#define DA9034_STATUS_PEN_DOWN (1 << 6) +#define DA9034_STATUS_MCLKDET (1 << 7) +#define DA9034_STATUS_USB_DEV (1 << 8) +#define DA9034_STATUS_HEADSET (1 << 9) +#define DA9034_STATUS_HOOKSWITCH (1 << 10) +#define DA9034_STATUS_REMCON (1 << 11) +#define DA9034_STATUS_VBUS_VALID_4P55 (1 << 12) +#define DA9034_STATUS_VBUS_VALID_3P8 (1 << 13) +#define DA9034_STATUS_SESS_VALID_1P8 (1 << 14) +#define DA9034_STATUS_SRP_READY (1 << 15) + +extern int da903x_query_status(struct device *dev, unsigned int status); + + +/* NOTE: the functions below are not intended for use outside + * of the DA903x sub-device drivers + */ +extern int da903x_write(struct device *dev, int reg, uint8_t val); +extern int da903x_writes(struct device *dev, int reg, int len, uint8_t *val); +extern int da903x_read(struct device *dev, int reg, uint8_t *val); +extern int da903x_reads(struct device *dev, int reg, int len, uint8_t *val); +extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask); +extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +#endif /* __LINUX_PMIC_DA903X_H */ diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h new file mode 100644 index 00000000..7ffbd6e9 --- /dev/null +++ b/include/linux/mfd/da9052/da9052.h @@ -0,0 +1,129 @@ +/* + * da9052 declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen <dchen@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __MFD_DA9052_DA9052_H +#define __MFD_DA9052_DA9052_H + +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/list.h> +#include <linux/mfd/core.h> + +#include <linux/mfd/da9052/reg.h> + +#define DA9052_IRQ_DCIN 0 +#define DA9052_IRQ_VBUS 1 +#define DA9052_IRQ_DCINREM 2 +#define DA9052_IRQ_VBUSREM 3 +#define DA9052_IRQ_VDDLOW 4 +#define DA9052_IRQ_ALARM 5 +#define DA9052_IRQ_SEQRDY 6 +#define DA9052_IRQ_COMP1V2 7 +#define DA9052_IRQ_NONKEY 8 +#define DA9052_IRQ_IDFLOAT 9 +#define DA9052_IRQ_IDGND 10 +#define DA9052_IRQ_CHGEND 11 +#define DA9052_IRQ_TBAT 12 +#define DA9052_IRQ_ADC_EOM 13 +#define DA9052_IRQ_PENDOWN 14 +#define DA9052_IRQ_TSIREADY 15 +#define DA9052_IRQ_GPI0 16 +#define DA9052_IRQ_GPI1 17 +#define DA9052_IRQ_GPI2 18 +#define DA9052_IRQ_GPI3 19 +#define DA9052_IRQ_GPI4 20 +#define DA9052_IRQ_GPI5 21 +#define DA9052_IRQ_GPI6 22 +#define DA9052_IRQ_GPI7 23 +#define DA9052_IRQ_GPI8 24 +#define DA9052_IRQ_GPI9 25 +#define DA9052_IRQ_GPI10 26 +#define DA9052_IRQ_GPI11 27 +#define DA9052_IRQ_GPI12 28 +#define DA9052_IRQ_GPI13 29 +#define DA9052_IRQ_GPI14 30 +#define DA9052_IRQ_GPI15 31 + +enum da9052_chip_id { + DA9052, + DA9053_AA, + DA9053_BA, + DA9053_BB, +}; + +struct da9052_pdata; + +struct da9052 { + struct device *dev; + struct regmap *regmap; + + int irq_base; + u8 chip_id; + + int chip_irq; +}; + +/* Device I/O API */ +static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg) +{ + int val, ret; + + ret = regmap_read(da9052->regmap, reg, &val); + if (ret < 0) + return ret; + return val; +} + +static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg, + unsigned char val) +{ + return regmap_write(da9052->regmap, reg, val); +} + +static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); +} + +static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + return regmap_raw_write(da9052->regmap, reg, val, reg_cnt); +} + +static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, + unsigned char bit_mask, + unsigned char reg_val) +{ + return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); +} + +int da9052_device_init(struct da9052 *da9052, u8 chip_id); +void da9052_device_exit(struct da9052 *da9052); + +extern struct regmap_config da9052_regmap_config; + +#endif /* __MFD_DA9052_DA9052_H */ diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h new file mode 100644 index 00000000..62c5c3c2 --- /dev/null +++ b/include/linux/mfd/da9052/pdata.h @@ -0,0 +1,40 @@ +/* + * Platform data declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen <dchen@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __MFD_DA9052_PDATA_H__ +#define __MFD_DA9052_PDATA_H__ + +#define DA9052_MAX_REGULATORS 14 + +struct da9052; + +struct da9052_pdata { + struct led_platform_data *pled; + int (*init) (struct da9052 *da9052); + int irq_base; + int gpio_base; + int use_for_apm; + struct regulator_init_data *regulators[DA9052_MAX_REGULATORS]; +}; + +#endif diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h new file mode 100644 index 00000000..b97f7309 --- /dev/null +++ b/include/linux/mfd/da9052/reg.h @@ -0,0 +1,749 @@ +/* + * Register declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen <dchen@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_MFD_DA9052_REG_H +#define __LINUX_MFD_DA9052_REG_H + +/* PAGE REGISTERS */ +#define DA9052_PAGE0_CON_REG 0 +#define DA9052_PAGE1_CON_REG 128 + +/* STATUS REGISTERS */ +#define DA9052_STATUS_A_REG 1 +#define DA9052_STATUS_B_REG 2 +#define DA9052_STATUS_C_REG 3 +#define DA9052_STATUS_D_REG 4 + +/* EVENT REGISTERS */ +#define DA9052_EVENT_A_REG 5 +#define DA9052_EVENT_B_REG 6 +#define DA9052_EVENT_C_REG 7 +#define DA9052_EVENT_D_REG 8 +#define DA9052_FAULTLOG_REG 9 + +/* IRQ REGISTERS */ +#define DA9052_IRQ_MASK_A_REG 10 +#define DA9052_IRQ_MASK_B_REG 11 +#define DA9052_IRQ_MASK_C_REG 12 +#define DA9052_IRQ_MASK_D_REG 13 + +/* CONTROL REGISTERS */ +#define DA9052_CONTROL_A_REG 14 +#define DA9052_CONTROL_B_REG 15 +#define DA9052_CONTROL_C_REG 16 +#define DA9052_CONTROL_D_REG 17 + +#define DA9052_PDDIS_REG 18 +#define DA9052_INTERFACE_REG 19 +#define DA9052_RESET_REG 20 + +/* GPIO REGISTERS */ +#define DA9052_GPIO_0_1_REG 21 +#define DA9052_GPIO_2_3_REG 22 +#define DA9052_GPIO_4_5_REG 23 +#define DA9052_GPIO_6_7_REG 24 +#define DA9052_GPIO_14_15_REG 28 + +/* POWER SEQUENCER CONTROL REGISTERS */ +#define DA9052_ID_0_1_REG 29 +#define DA9052_ID_2_3_REG 30 +#define DA9052_ID_4_5_REG 31 +#define DA9052_ID_6_7_REG 32 +#define DA9052_ID_8_9_REG 33 +#define DA9052_ID_10_11_REG 34 +#define DA9052_ID_12_13_REG 35 +#define DA9052_ID_14_15_REG 36 +#define DA9052_ID_16_17_REG 37 +#define DA9052_ID_18_19_REG 38 +#define DA9052_ID_20_21_REG 39 +#define DA9052_SEQ_STATUS_REG 40 +#define DA9052_SEQ_A_REG 41 +#define DA9052_SEQ_B_REG 42 +#define DA9052_SEQ_TIMER_REG 43 + +/* LDO AND BUCK REGISTERS */ +#define DA9052_BUCKA_REG 44 +#define DA9052_BUCKB_REG 45 +#define DA9052_BUCKCORE_REG 46 +#define DA9052_BUCKPRO_REG 47 +#define DA9052_BUCKMEM_REG 48 +#define DA9052_BUCKPERI_REG 49 +#define DA9052_LDO1_REG 50 +#define DA9052_LDO2_REG 51 +#define DA9052_LDO3_REG 52 +#define DA9052_LDO4_REG 53 +#define DA9052_LDO5_REG 54 +#define DA9052_LDO6_REG 55 +#define DA9052_LDO7_REG 56 +#define DA9052_LDO8_REG 57 +#define DA9052_LDO9_REG 58 +#define DA9052_LDO10_REG 59 +#define DA9052_SUPPLY_REG 60 +#define DA9052_PULLDOWN_REG 61 +#define DA9052_CHGBUCK_REG 62 +#define DA9052_WAITCONT_REG 63 +#define DA9052_ISET_REG 64 +#define DA9052_BATCHG_REG 65 + +/* BATTERY CONTROL REGISTRS */ +#define DA9052_CHG_CONT_REG 66 +#define DA9052_INPUT_CONT_REG 67 +#define DA9052_CHG_TIME_REG 68 +#define DA9052_BBAT_CONT_REG 69 + +/* LED CONTROL REGISTERS */ +#define DA9052_BOOST_REG 70 +#define DA9052_LED_CONT_REG 71 +#define DA9052_LEDMIN123_REG 72 +#define DA9052_LED1_CONF_REG 73 +#define DA9052_LED2_CONF_REG 74 +#define DA9052_LED3_CONF_REG 75 +#define DA9052_LED1CONT_REG 76 +#define DA9052_LED2CONT_REG 77 +#define DA9052_LED3CONT_REG 78 +#define DA9052_LED_CONT_4_REG 79 +#define DA9052_LED_CONT_5_REG 80 + +/* ADC CONTROL REGISTERS */ +#define DA9052_ADC_MAN_REG 81 +#define DA9052_ADC_CONT_REG 82 +#define DA9052_ADC_RES_L_REG 83 +#define DA9052_ADC_RES_H_REG 84 +#define DA9052_VDD_RES_REG 85 +#define DA9052_VDD_MON_REG 86 + +#define DA9052_ICHG_AV_REG 87 +#define DA9052_ICHG_THD_REG 88 +#define DA9052_ICHG_END_REG 89 +#define DA9052_TBAT_RES_REG 90 +#define DA9052_TBAT_HIGHP_REG 91 +#define DA9052_TBAT_HIGHN_REG 92 +#define DA9052_TBAT_LOW_REG 93 +#define DA9052_T_OFFSET_REG 94 + +#define DA9052_ADCIN4_RES_REG 95 +#define DA9052_AUTO4_HIGH_REG 96 +#define DA9052_AUTO4_LOW_REG 97 +#define DA9052_ADCIN5_RES_REG 98 +#define DA9052_AUTO5_HIGH_REG 99 +#define DA9052_AUTO5_LOW_REG 100 +#define DA9052_ADCIN6_RES_REG 101 +#define DA9052_AUTO6_HIGH_REG 102 +#define DA9052_AUTO6_LOW_REG 103 + +#define DA9052_TJUNC_RES_REG 104 + +/* TSI CONTROL REGISTERS */ +#define DA9052_TSI_CONT_A_REG 105 +#define DA9052_TSI_CONT_B_REG 106 +#define DA9052_TSI_X_MSB_REG 107 +#define DA9052_TSI_Y_MSB_REG 108 +#define DA9052_TSI_LSB_REG 109 +#define DA9052_TSI_Z_MSB_REG 110 + +/* RTC COUNT REGISTERS */ +#define DA9052_COUNT_S_REG 111 +#define DA9052_COUNT_MI_REG 112 +#define DA9052_COUNT_H_REG 113 +#define DA9052_COUNT_D_REG 114 +#define DA9052_COUNT_MO_REG 115 +#define DA9052_COUNT_Y_REG 116 + +/* RTC CONTROL REGISTERS */ +#define DA9052_ALARM_MI_REG 117 +#define DA9052_ALARM_H_REG 118 +#define DA9052_ALARM_D_REG 119 +#define DA9052_ALARM_MO_REG 120 +#define DA9052_ALARM_Y_REG 121 +#define DA9052_SECOND_A_REG 122 +#define DA9052_SECOND_B_REG 123 +#define DA9052_SECOND_C_REG 124 +#define DA9052_SECOND_D_REG 125 + +/* PAGE CONFIGURATION BIT */ +#define DA9052_PAGE_CONF 0X80 + +/* STATUS REGISTER A BITS */ +#define DA9052_STATUSA_VDATDET 0X80 +#define DA9052_STATUSA_VBUSSEL 0X40 +#define DA9052_STATUSA_DCINSEL 0X20 +#define DA9052_STATUSA_VBUSDET 0X10 +#define DA9052_STATUSA_DCINDET 0X08 +#define DA9052_STATUSA_IDGND 0X04 +#define DA9052_STATUSA_IDFLOAT 0X02 +#define DA9052_STATUSA_NONKEY 0X01 + +/* STATUS REGISTER B BITS */ +#define DA9052_STATUSB_COMPDET 0X80 +#define DA9052_STATUSB_SEQUENCING 0X40 +#define DA9052_STATUSB_GPFB2 0X20 +#define DA9052_STATUSB_CHGTO 0X10 +#define DA9052_STATUSB_CHGEND 0X08 +#define DA9052_STATUSB_CHGLIM 0X04 +#define DA9052_STATUSB_CHGPRE 0X02 +#define DA9052_STATUSB_CHGATT 0X01 + +/* STATUS REGISTER C BITS */ +#define DA9052_STATUSC_GPI7 0X80 +#define DA9052_STATUSC_GPI6 0X40 +#define DA9052_STATUSC_GPI5 0X20 +#define DA9052_STATUSC_GPI4 0X10 +#define DA9052_STATUSC_GPI3 0X08 +#define DA9052_STATUSC_GPI2 0X04 +#define DA9052_STATUSC_GPI1 0X02 +#define DA9052_STATUSC_GPI0 0X01 + +/* STATUS REGISTER D BITS */ +#define DA9052_STATUSD_GPI15 0X80 +#define DA9052_STATUSD_GPI14 0X40 +#define DA9052_STATUSD_GPI13 0X20 +#define DA9052_STATUSD_GPI12 0X10 +#define DA9052_STATUSD_GPI11 0X08 +#define DA9052_STATUSD_GPI10 0X04 +#define DA9052_STATUSD_GPI9 0X02 +#define DA9052_STATUSD_GPI8 0X01 + +/* EVENT REGISTER A BITS */ +#define DA9052_EVENTA_ECOMP1V2 0X80 +#define DA9052_EVENTA_ESEQRDY 0X40 +#define DA9052_EVENTA_EALRAM 0X20 +#define DA9052_EVENTA_EVDDLOW 0X10 +#define DA9052_EVENTA_EVBUSREM 0X08 +#define DA9052_EVENTA_EDCINREM 0X04 +#define DA9052_EVENTA_EVBUSDET 0X02 +#define DA9052_EVENTA_EDCINDET 0X01 + +/* EVENT REGISTER B BITS */ +#define DA9052_EVENTB_ETSIREADY 0X80 +#define DA9052_EVENTB_EPENDOWN 0X40 +#define DA9052_EVENTB_EADCEOM 0X20 +#define DA9052_EVENTB_ETBAT 0X10 +#define DA9052_EVENTB_ECHGEND 0X08 +#define DA9052_EVENTB_EIDGND 0X04 +#define DA9052_EVENTB_EIDFLOAT 0X02 +#define DA9052_EVENTB_ENONKEY 0X01 + +/* EVENT REGISTER C BITS */ +#define DA9052_EVENTC_EGPI7 0X80 +#define DA9052_EVENTC_EGPI6 0X40 +#define DA9052_EVENTC_EGPI5 0X20 +#define DA9052_EVENTC_EGPI4 0X10 +#define DA9052_EVENTC_EGPI3 0X08 +#define DA9052_EVENTC_EGPI2 0X04 +#define DA9052_EVENTC_EGPI1 0X02 +#define DA9052_EVENTC_EGPI0 0X01 + +/* EVENT REGISTER D BITS */ +#define DA9052_EVENTD_EGPI15 0X80 +#define DA9052_EVENTD_EGPI14 0X40 +#define DA9052_EVENTD_EGPI13 0X20 +#define DA9052_EVENTD_EGPI12 0X10 +#define DA9052_EVENTD_EGPI11 0X08 +#define DA9052_EVENTD_EGPI10 0X04 +#define DA9052_EVENTD_EGPI9 0X02 +#define DA9052_EVENTD_EGPI8 0X01 + +/* IRQ MASK REGISTERS BITS */ +#define DA9052_M_NONKEY 0X0100 + +/* TSI EVENT REGISTERS BITS */ +#define DA9052_E_PEN_DOWN 0X4000 +#define DA9052_E_TSI_READY 0X8000 + +/* FAULT LOG REGISTER BITS */ +#define DA9052_FAULTLOG_WAITSET 0X80 +#define DA9052_FAULTLOG_NSDSET 0X40 +#define DA9052_FAULTLOG_KEYSHUT 0X20 +#define DA9052_FAULTLOG_TEMPOVER 0X08 +#define DA9052_FAULTLOG_VDDSTART 0X04 +#define DA9052_FAULTLOG_VDDFAULT 0X02 +#define DA9052_FAULTLOG_TWDERROR 0X01 + +/* CONTROL REGISTER A BITS */ +#define DA9052_CONTROLA_GPIV 0X80 +#define DA9052_CONTROLA_PMOTYPE 0X20 +#define DA9052_CONTROLA_PMOV 0X10 +#define DA9052_CONTROLA_PMIV 0X08 +#define DA9052_CONTROLA_PMIFV 0X08 +#define DA9052_CONTROLA_PWR1EN 0X04 +#define DA9052_CONTROLA_PWREN 0X02 +#define DA9052_CONTROLA_SYSEN 0X01 + +/* CONTROL REGISTER B BITS */ +#define DA9052_CONTROLB_SHUTDOWN 0X80 +#define DA9052_CONTROLB_DEEPSLEEP 0X40 +#define DA9052_CONTROL_B_WRITEMODE 0X20 +#define DA9052_CONTROLB_BBATEN 0X10 +#define DA9052_CONTROLB_OTPREADEN 0X08 +#define DA9052_CONTROLB_AUTOBOOT 0X04 +#define DA9052_CONTROLB_ACTDIODE 0X02 +#define DA9052_CONTROLB_BUCKMERGE 0X01 + +/* CONTROL REGISTER C BITS */ +#define DA9052_CONTROLC_BLINKDUR 0X80 +#define DA9052_CONTROLC_BLINKFRQ 0X60 +#define DA9052_CONTROLC_DEBOUNCING 0X1C +#define DA9052_CONTROLC_PMFB2PIN 0X02 +#define DA9052_CONTROLC_PMFB1PIN 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_CONTROLD_WATCHDOG 0X80 +#define DA9052_CONTROLD_ACCDETEN 0X40 +#define DA9052_CONTROLD_GPI1415SD 0X20 +#define DA9052_CONTROLD_NONKEYSD 0X10 +#define DA9052_CONTROLD_KEEPACTEN 0X08 +#define DA9052_CONTROLD_TWDSCALE 0X07 + +/* POWER DOWN DISABLE REGISTER BITS */ +#define DA9052_PDDIS_PMCONTPD 0X80 +#define DA9052_PDDIS_OUT32KPD 0X40 +#define DA9052_PDDIS_CHGBBATPD 0X20 +#define DA9052_PDDIS_CHGPD 0X10 +#define DA9052_PDDIS_HS2WIREPD 0X08 +#define DA9052_PDDIS_PMIFPD 0X04 +#define DA9052_PDDIS_GPADCPD 0X02 +#define DA9052_PDDIS_GPIOPD 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_INTERFACE_IFBASEADDR 0XE0 +#define DA9052_INTERFACE_NCSPOL 0X10 +#define DA9052_INTERFACE_RWPOL 0X08 +#define DA9052_INTERFACE_CPHA 0X04 +#define DA9052_INTERFACE_CPOL 0X02 +#define DA9052_INTERFACE_IFTYPE 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_RESET_RESETEVENT 0XC0 +#define DA9052_RESET_RESETTIMER 0X3F + +/* GPIO REGISTERS */ +/* GPIO CONTROL REGISTER BITS */ +#define DA9052_GPIO_EVEN_PORT_PIN 0X03 +#define DA9052_GPIO_EVEN_PORT_TYPE 0X04 +#define DA9052_GPIO_EVEN_PORT_MODE 0X08 + +#define DA9052_GPIO_ODD_PORT_PIN 0X30 +#define DA9052_GPIO_ODD_PORT_TYPE 0X40 +#define DA9052_GPIO_ODD_PORT_MODE 0X80 + +/*POWER SEQUENCER REGISTER BITS */ +/* SEQ CONTROL REGISTER BITS FOR ID 0 AND 1 */ +#define DA9052_ID01_LDO1STEP 0XF0 +#define DA9052_ID01_SYSPRE 0X04 +#define DA9052_ID01_DEFSUPPLY 0X02 +#define DA9052_ID01_NRESMODE 0X01 + +/* SEQ CONTROL REGISTER BITS FOR ID 2 AND 3 */ +#define DA9052_ID23_LDO3STEP 0XF0 +#define DA9052_ID23_LDO2STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 4 AND 5 */ +#define DA9052_ID45_LDO5STEP 0XF0 +#define DA9052_ID45_LDO4STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 6 AND 7 */ +#define DA9052_ID67_LDO7STEP 0XF0 +#define DA9052_ID67_LDO6STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 8 AND 9 */ +#define DA9052_ID89_LDO9STEP 0XF0 +#define DA9052_ID89_LDO8STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 10 AND 11 */ +#define DA9052_ID1011_PDDISSTEP 0XF0 +#define DA9052_ID1011_LDO10STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 12 AND 13 */ +#define DA9052_ID1213_VMEMSWSTEP 0XF0 +#define DA9052_ID1213_VPERISWSTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 14 AND 15 */ +#define DA9052_ID1415_BUCKPROSTEP 0XF0 +#define DA9052_ID1415_BUCKCORESTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 16 AND 17 */ +#define DA9052_ID1617_BUCKPERISTEP 0XF0 +#define DA9052_ID1617_BUCKMEMSTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 18 AND 19 */ +#define DA9052_ID1819_GPRISE2STEP 0XF0 +#define DA9052_ID1819_GPRISE1STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 20 AND 21 */ +#define DA9052_ID2021_GPFALL2STEP 0XF0 +#define DA9052_ID2021_GPFALL1STEP 0X0F + +/* POWER SEQ STATUS REGISTER BITS */ +#define DA9052_SEQSTATUS_SEQPOINTER 0XF0 +#define DA9052_SEQSTATUS_WAITSTEP 0X0F + +/* POWER SEQ A REGISTER BITS */ +#define DA9052_SEQA_POWEREND 0XF0 +#define DA9052_SEQA_SYSTEMEND 0X0F + +/* POWER SEQ B REGISTER BITS */ +#define DA9052_SEQB_PARTDOWN 0XF0 +#define DA9052_SEQB_MAXCOUNT 0X0F + +/* POWER SEQ TIMER REGISTER BITS */ +#define DA9052_SEQTIMER_SEQDUMMY 0XF0 +#define DA9052_SEQTIMER_SEQTIME 0X0F + +/*POWER SUPPLY CONTROL REGISTER BITS */ +/* BUCK REGISTER A BITS */ +#define DA9052_BUCKA_BPROILIM 0XC0 +#define DA9052_BUCKA_BPROMODE 0X30 +#define DA9052_BUCKA_BCOREILIM 0X0C +#define DA9052_BUCKA_BCOREMODE 0X03 + +/* BUCK REGISTER B BITS */ +#define DA9052_BUCKB_BERIILIM 0XC0 +#define DA9052_BUCKB_BPERIMODE 0X30 +#define DA9052_BUCKB_BMEMILIM 0X0C +#define DA9052_BUCKB_BMEMMODE 0X03 + +/* BUCKCORE REGISTER BITS */ +#define DA9052_BUCKCORE_BCORECONF 0X80 +#define DA9052_BUCKCORE_BCOREEN 0X40 +#define DA9052_BUCKCORE_VBCORE 0X3F + +/* BUCKPRO REGISTER BITS */ +#define DA9052_BUCKPRO_BPROCONF 0X80 +#define DA9052_BUCKPRO_BPROEN 0X40 +#define DA9052_BUCKPRO_VBPRO 0X3F + +/* BUCKMEM REGISTER BITS */ +#define DA9052_BUCKMEM_BMEMCONF 0X80 +#define DA9052_BUCKMEM_BMEMEN 0X40 +#define DA9052_BUCKMEM_VBMEM 0X3F + +/* BUCKPERI REGISTER BITS */ +#define DA9052_BUCKPERI_BPERICONF 0X80 +#define DA9052_BUCKPERI_BPERIEN 0X40 +#define DA9052_BUCKPERI_BPERIHS 0X20 +#define DA9052_BUCKPERI_VBPERI 0X1F + +/* LDO1 REGISTER BITS */ +#define DA9052_LDO1_LDO1CONF 0X80 +#define DA9052_LDO1_LDO1EN 0X40 +#define DA9052_LDO1_VLDO1 0X1F + +/* LDO2 REGISTER BITS */ +#define DA9052_LDO2_LDO2CONF 0X80 +#define DA9052_LDO2_LDO2EN 0X40 +#define DA9052_LDO2_VLDO2 0X3F + +/* LDO3 REGISTER BITS */ +#define DA9052_LDO3_LDO3CONF 0X80 +#define DA9052_LDO3_LDO3EN 0X40 +#define DA9052_LDO3_VLDO3 0X3F + +/* LDO4 REGISTER BITS */ +#define DA9052_LDO4_LDO4CONF 0X80 +#define DA9052_LDO4_LDO4EN 0X40 +#define DA9052_LDO4_VLDO4 0X3F + +/* LDO5 REGISTER BITS */ +#define DA9052_LDO5_LDO5CONF 0X80 +#define DA9052_LDO5_LDO5EN 0X40 +#define DA9052_LDO5_VLDO5 0X3F + +/* LDO6 REGISTER BITS */ +#define DA9052_LDO6_LDO6CONF 0X80 +#define DA9052_LDO6_LDO6EN 0X40 +#define DA9052_LDO6_VLDO6 0X3F + +/* LDO7 REGISTER BITS */ +#define DA9052_LDO7_LDO7CONF 0X80 +#define DA9052_LDO7_LDO7EN 0X40 +#define DA9052_LDO7_VLDO7 0X3F + +/* LDO8 REGISTER BITS */ +#define DA9052_LDO8_LDO8CONF 0X80 +#define DA9052_LDO8_LDO8EN 0X40 +#define DA9052_LDO8_VLDO8 0X3F + +/* LDO9 REGISTER BITS */ +#define DA9052_LDO9_LDO9CONF 0X80 +#define DA9052_LDO9_LDO9EN 0X40 +#define DA9052_LDO9_VLDO9 0X3F + +/* LDO10 REGISTER BITS */ +#define DA9052_LDO10_LDO10CONF 0X80 +#define DA9052_LDO10_LDO10EN 0X40 +#define DA9052_LDO10_VLDO10 0X3F + +/* SUPPLY REGISTER BITS */ +#define DA9052_SUPPLY_VLOCK 0X80 +#define DA9052_SUPPLY_VMEMSWEN 0X40 +#define DA9052_SUPPLY_VPERISWEN 0X20 +#define DA9052_SUPPLY_VLDO3GO 0X10 +#define DA9052_SUPPLY_VLDO2GO 0X08 +#define DA9052_SUPPLY_VBMEMGO 0X04 +#define DA9052_SUPPLY_VBPROGO 0X02 +#define DA9052_SUPPLY_VBCOREGO 0X01 + +/* PULLDOWN REGISTER BITS */ +#define DA9052_PULLDOWN_LDO5PDDIS 0X20 +#define DA9052_PULLDOWN_LDO2PDDIS 0X10 +#define DA9052_PULLDOWN_LDO1PDDIS 0X08 +#define DA9052_PULLDOWN_MEMPDDIS 0X04 +#define DA9052_PULLDOWN_PROPDDIS 0X02 +#define DA9052_PULLDOWN_COREPDDIS 0X01 + +/* BAT CHARGER REGISTER BITS */ +/* CHARGER BUCK REGISTER BITS */ +#define DA9052_CHGBUCK_CHGTEMP 0X80 +#define DA9052_CHGBUCK_CHGUSBILIM 0X40 +#define DA9052_CHGBUCK_CHGBUCKLP 0X20 +#define DA9052_CHGBUCK_CHGBUCKEN 0X10 +#define DA9052_CHGBUCK_ISETBUCK 0X0F + +/* WAIT COUNTER REGISTER BITS */ +#define DA9052_WAITCONT_WAITDIR 0X80 +#define DA9052_WAITCONT_RTCCLOCK 0X40 +#define DA9052_WAITCONT_WAITMODE 0X20 +#define DA9052_WAITCONT_EN32KOUT 0X10 +#define DA9052_WAITCONT_DELAYTIME 0X0F + +/* ISET CONTROL REGISTER BITS */ +#define DA9052_ISET_ISETDCIN 0XF0 +#define DA9052_ISET_ISETVBUS 0X0F + +/* BATTERY CHARGER CONTROL REGISTER BITS */ +#define DA9052_BATCHG_ICHGPRE 0XC0 +#define DA9052_BATCHG_ICHGBAT 0X3F + +/* CHARGER COUNTER REGISTER BITS */ +#define DA9052_CHG_CONT_VCHG_BAT 0XF8 +#define DA9052_CHG_CONT_TCTR 0X07 + +/* INPUT CONTROL REGISTER BITS */ +#define DA9052_INPUT_CONT_TCTR_MODE 0X80 +#define DA9052_INPUT_CONT_VBUS_SUSP 0X10 +#define DA9052_INPUT_CONT_DCIN_SUSP 0X08 + +/* CHARGING TIME REGISTER BITS */ +#define DA9052_CHGTIME_CHGTIME 0XFF + +/* BACKUP BATTERY CONTROL REGISTER BITS */ +#define DA9052_BBATCONT_BCHARGERISET 0XF0 +#define DA9052_BBATCONT_BCHARGERVSET 0X0F + +/* LED REGISTERS BITS */ +/* LED BOOST REGISTER BITS */ +#define DA9052_BOOST_EBFAULT 0X80 +#define DA9052_BOOST_MBFAULT 0X40 +#define DA9052_BOOST_BOOSTFRQ 0X20 +#define DA9052_BOOST_BOOSTILIM 0X10 +#define DA9052_BOOST_LED3INEN 0X08 +#define DA9052_BOOST_LED2INEN 0X04 +#define DA9052_BOOST_LED1INEN 0X02 +#define DA9052_BOOST_BOOSTEN 0X01 + +/* LED CONTROL REGISTER BITS */ +#define DA9052_LEDCONT_SELLEDMODE 0X80 +#define DA9052_LEDCONT_LED3ICONT 0X40 +#define DA9052_LEDCONT_LED3RAMP 0X20 +#define DA9052_LEDCONT_LED3EN 0X10 +#define DA9052_LEDCONT_LED2RAMP 0X08 +#define DA9052_LEDCONT_LED2EN 0X04 +#define DA9052_LEDCONT_LED1RAMP 0X02 +#define DA9052_LEDCONT_LED1EN 0X01 + +/* LEDMIN123 REGISTER BIT */ +#define DA9052_LEDMIN123_LEDMINCURRENT 0XFF + +/* LED1CONF REGISTER BIT */ +#define DA9052_LED1CONF_LED1CURRENT 0XFF + +/* LED2CONF REGISTER BIT */ +#define DA9052_LED2CONF_LED2CURRENT 0XFF + +/* LED3CONF REGISTER BIT */ +#define DA9052_LED3CONF_LED3CURRENT 0XFF + +/* LED COUNT REGISTER BIT */ +#define DA9052_LED_CONT_DIM 0X80 + +/* ADC MAN REGISTERS BITS */ +#define DA9052_ADC_MAN_MAN_CONV 0X10 +#define DA9052_ADC_MAN_MUXSEL_VDDOUT 0X00 +#define DA9052_ADC_MAN_MUXSEL_ICH 0X01 +#define DA9052_ADC_MAN_MUXSEL_TBAT 0X02 +#define DA9052_ADC_MAN_MUXSEL_VBAT 0X03 +#define DA9052_ADC_MAN_MUXSEL_AD4 0X04 +#define DA9052_ADC_MAN_MUXSEL_AD5 0X05 +#define DA9052_ADC_MAN_MUXSEL_AD6 0X06 +#define DA9052_ADC_MAN_MUXSEL_VBBAT 0X09 + +/* ADC CONTROL REGSISTERS BITS */ +#define DA9052_ADCCONT_COMP1V2EN 0X80 +#define DA9052_ADCCONT_ADCMODE 0X40 +#define DA9052_ADCCONT_TBATISRCEN 0X20 +#define DA9052_ADCCONT_AD4ISRCEN 0X10 +#define DA9052_ADCCONT_AUTOAD6EN 0X08 +#define DA9052_ADCCONT_AUTOAD5EN 0X04 +#define DA9052_ADCCONT_AUTOAD4EN 0X02 +#define DA9052_ADCCONT_AUTOVDDEN 0X01 + +/* ADC 10 BIT MANUAL CONVERSION RESULT LOW REGISTER */ +#define DA9052_ADC_RES_LSB 0X03 + +/* ADC 10 BIT MANUAL CONVERSION RESULT HIGH REGISTER */ +#define DA9052_ADCRESH_ADCRESMSB 0XFF + +/* VDD RES REGSISTER BIT*/ +#define DA9052_VDDRES_VDDOUTRES 0XFF + +/* VDD MON REGSISTER BIT */ +#define DA9052_VDDMON_VDDOUTMON 0XFF + +/* ICHG_AV REGSISTER BIT */ +#define DA9052_ICHGAV_ICHGAV 0XFF + +/* ICHG_THD REGSISTER BIT */ +#define DA9052_ICHGTHD_ICHGTHD 0XFF + +/* ICHG_END REGSISTER BIT */ +#define DA9052_ICHGEND_ICHGEND 0XFF + +/* TBAT_RES REGSISTER BIT */ +#define DA9052_TBATRES_TBATRES 0XFF + +/* TBAT_HIGHP REGSISTER BIT */ +#define DA9052_TBATHIGHP_TBATHIGHP 0XFF + +/* TBAT_HIGHN REGSISTER BIT */ +#define DA9052_TBATHIGHN_TBATHIGHN 0XFF + +/* TBAT_LOW REGSISTER BIT */ +#define DA9052_TBATLOW_TBATLOW 0XFF + +/* T_OFFSET REGSISTER BIT */ +#define DA9052_TOFFSET_TOFFSET 0XFF + +/* ADCIN4_RES REGSISTER BIT */ +#define DA9052_ADCIN4RES_ADCIN4RES 0XFF + +/* ADCIN4_HIGH REGSISTER BIT */ +#define DA9052_AUTO4HIGH_AUTO4HIGH 0XFF + +/* ADCIN4_LOW REGSISTER BIT */ +#define DA9052_AUTO4LOW_AUTO4LOW 0XFF + +/* ADCIN5_RES REGSISTER BIT */ +#define DA9052_ADCIN5RES_ADCIN5RES 0XFF + +/* ADCIN5_HIGH REGSISTER BIT */ +#define DA9052_AUTO5HIGH_AUTOHIGH 0XFF + +/* ADCIN5_LOW REGSISTER BIT */ +#define DA9052_AUTO5LOW_AUTO5LOW 0XFF + +/* ADCIN6_RES REGSISTER BIT */ +#define DA9052_ADCIN6RES_ADCIN6RES 0XFF + +/* ADCIN6_HIGH REGSISTER BIT */ +#define DA9052_AUTO6HIGH_AUTO6HIGH 0XFF + +/* ADCIN6_LOW REGSISTER BIT */ +#define DA9052_AUTO6LOW_AUTO6LOW 0XFF + +/* TJUNC_RES REGSISTER BIT*/ +#define DA9052_TJUNCRES_TJUNCRES 0XFF + +/* TSI REGISTER */ +/* TSI CONTROL REGISTER A BITS */ +#define DA9052_TSICONTA_TSIDELAY 0XC0 +#define DA9052_TSICONTA_TSISKIP 0X38 +#define DA9052_TSICONTA_TSIMODE 0X04 +#define DA9052_TSICONTA_PENDETEN 0X02 +#define DA9052_TSICONTA_AUTOTSIEN 0X01 + +/* TSI CONTROL REGISTER B BITS */ +#define DA9052_TSICONTB_ADCREF 0X80 +#define DA9052_TSICONTB_TSIMAN 0X40 +#define DA9052_TSICONTB_TSIMUX 0X30 +#define DA9052_TSICONTB_TSISEL3 0X08 +#define DA9052_TSICONTB_TSISEL2 0X04 +#define DA9052_TSICONTB_TSISEL1 0X02 +#define DA9052_TSICONTB_TSISEL0 0X01 + +/* TSI X CO-ORDINATE MSB RESULT REGISTER BITS */ +#define DA9052_TSIXMSB_TSIXM 0XFF + +/* TSI Y CO-ORDINATE MSB RESULT REGISTER BITS */ +#define DA9052_TSIYMSB_TSIYM 0XFF + +/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */ +#define DA9052_TSILSB_PENDOWN 0X40 +#define DA9052_TSILSB_TSIZL 0X30 +#define DA9052_TSILSB_TSIYL 0X0C +#define DA9052_TSILSB_TSIXL 0X03 + +/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */ +#define DA9052_TSIZMSB_TSIZM 0XFF + +/* RTC REGISTER */ +/* RTC TIMER SECONDS REGISTER BITS */ +#define DA9052_COUNTS_MONITOR 0X40 +#define DA9052_RTC_SEC 0X3F + +/* RTC TIMER MINUTES REGISTER BIT */ +#define DA9052_RTC_MIN 0X3F + +/* RTC TIMER HOUR REGISTER BIT */ +#define DA9052_RTC_HOUR 0X1F + +/* RTC TIMER DAYS REGISTER BIT */ +#define DA9052_RTC_DAY 0X1F + +/* RTC TIMER MONTHS REGISTER BIT */ +#define DA9052_RTC_MONTH 0X0F + +/* RTC TIMER YEARS REGISTER BIT */ +#define DA9052_RTC_YEAR 0X3F + +/* RTC ALARM MINUTES REGISTER BITS */ +#define DA9052_ALARMM_I_TICK_TYPE 0X80 +#define DA9052_ALARMMI_ALARMTYPE 0X40 + +/* RTC ALARM YEARS REGISTER BITS */ +#define DA9052_ALARM_Y_TICK_ON 0X80 +#define DA9052_ALARM_Y_ALARM_ON 0X40 + +/* RTC SECONDS REGISTER A BITS */ +#define DA9052_SECONDA_SECONDSA 0XFF + +/* RTC SECONDS REGISTER B BITS */ +#define DA9052_SECONDB_SECONDSB 0XFF + +/* RTC SECONDS REGISTER C BITS */ +#define DA9052_SECONDC_SECONDSC 0XFF + +/* RTC SECONDS REGISTER D BITS */ +#define DA9052_SECONDD_SECONDSD 0XFF + +#endif +/* __LINUX_MFD_DA9052_REG_H */ diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h new file mode 100644 index 00000000..0ab61320 --- /dev/null +++ b/include/linux/mfd/davinci_voicecodec.h @@ -0,0 +1,126 @@ +/* + * DaVinci Voice Codec Core Interface for TI platforms + * + * Copyright (C) 2010 Texas Instruments, Inc + * + * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_ +#define __LINUX_MFD_DAVINIC_VOICECODEC_H_ + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> + +#include <mach/edma.h> + +/* + * Register values. + */ +#define DAVINCI_VC_PID 0x00 +#define DAVINCI_VC_CTRL 0x04 +#define DAVINCI_VC_INTEN 0x08 +#define DAVINCI_VC_INTSTATUS 0x0c +#define DAVINCI_VC_INTCLR 0x10 +#define DAVINCI_VC_EMUL_CTRL 0x14 +#define DAVINCI_VC_RFIFO 0x20 +#define DAVINCI_VC_WFIFO 0x24 +#define DAVINCI_VC_FIFOSTAT 0x28 +#define DAVINCI_VC_TST_CTRL 0x2C +#define DAVINCI_VC_REG05 0x94 +#define DAVINCI_VC_REG09 0xA4 +#define DAVINCI_VC_REG12 0xB0 + +/* DAVINCI_VC_CTRL bit fields */ +#define DAVINCI_VC_CTRL_MASK 0x5500 +#define DAVINCI_VC_CTRL_RSTADC BIT(0) +#define DAVINCI_VC_CTRL_RSTDAC BIT(1) +#define DAVINCI_VC_CTRL_RD_BITS_8 BIT(4) +#define DAVINCI_VC_CTRL_RD_UNSIGNED BIT(5) +#define DAVINCI_VC_CTRL_WD_BITS_8 BIT(6) +#define DAVINCI_VC_CTRL_WD_UNSIGNED BIT(7) +#define DAVINCI_VC_CTRL_RFIFOEN BIT(8) +#define DAVINCI_VC_CTRL_RFIFOCL BIT(9) +#define DAVINCI_VC_CTRL_RFIFOMD_WORD_1 BIT(10) +#define DAVINCI_VC_CTRL_WFIFOEN BIT(12) +#define DAVINCI_VC_CTRL_WFIFOCL BIT(13) +#define DAVINCI_VC_CTRL_WFIFOMD_WORD_1 BIT(14) + +/* DAVINCI_VC_INT bit fields */ +#define DAVINCI_VC_INT_MASK 0x3F +#define DAVINCI_VC_INT_RDRDY_MASK BIT(0) +#define DAVINCI_VC_INT_RERROVF_MASK BIT(1) +#define DAVINCI_VC_INT_RERRUDR_MASK BIT(2) +#define DAVINCI_VC_INT_WDREQ_MASK BIT(3) +#define DAVINCI_VC_INT_WERROVF_MASKBIT BIT(4) +#define DAVINCI_VC_INT_WERRUDR_MASK BIT(5) + +/* DAVINCI_VC_REG05 bit fields */ +#define DAVINCI_VC_REG05_PGA_GAIN 0x07 + +/* DAVINCI_VC_REG09 bit fields */ +#define DAVINCI_VC_REG09_MUTE 0x40 +#define DAVINCI_VC_REG09_DIG_ATTEN 0x3F + +/* DAVINCI_VC_REG12 bit fields */ +#define DAVINCI_VC_REG12_POWER_ALL_ON 0xFD +#define DAVINCI_VC_REG12_POWER_ALL_OFF 0x00 + +#define DAVINCI_VC_CELLS 2 + +enum davinci_vc_cells { + DAVINCI_VC_VCIF_CELL, + DAVINCI_VC_CQ93VC_CELL, +}; + +struct davinci_vcif { + struct platform_device *pdev; + u32 dma_tx_channel; + u32 dma_rx_channel; + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; +}; + +struct cq93vc { + struct platform_device *pdev; + struct snd_soc_codec *codec; + u32 sysclk; +}; + +struct davinci_vc; + +struct davinci_vc { + /* Device data */ + struct device *dev; + struct platform_device *pdev; + struct clk *clk; + + /* Memory resources */ + void __iomem *base; + resource_size_t pbase; + size_t base_size; + + /* MFD cells */ + struct mfd_cell cells[DAVINCI_VC_CELLS]; + + /* Client devices */ + struct davinci_vcif davinci_vcif; + struct cq93vc cq93vc; +}; + +#endif diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h new file mode 100644 index 00000000..5a049dfa --- /dev/null +++ b/include/linux/mfd/db5500-prcmu.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * U5500 PRCMU API. + */ +#ifndef __MFD_DB5500_PRCMU_H +#define __MFD_DB5500_PRCMU_H + +static inline int prcmu_resetout(u8 resoutn, u8 state) +{ + return 0; +} + +static inline int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + return 0; +} + +static inline int db5500_prcmu_request_clock(u8 clock, bool enable) +{ + return 0; +} + +static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + return 0; +} + +static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state) +{ + return 0; +} + +static inline u16 db5500_prcmu_get_reset_code(void) +{ + return 0; +} + +static inline bool db5500_prcmu_is_ac_wake_requested(void) +{ + return 0; +} + +static inline int db5500_prcmu_set_arm_opp(u8 opp) +{ + return 0; +} + +static inline int db5500_prcmu_get_arm_opp(void) +{ + return 0; +} + +static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {} + +static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {} + +static inline void db5500_prcmu_system_reset(u16 reset_code) {} + +static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {} + +#ifdef CONFIG_MFD_DB5500_PRCMU + +void db5500_prcmu_early_init(void); +int db5500_prcmu_set_display_clocks(void); +int db5500_prcmu_disable_dsipll(void); +int db5500_prcmu_enable_dsipll(void); +int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); + +#else /* !CONFIG_UX500_SOC_DB5500 */ + +static inline void db5500_prcmu_early_init(void) {} + +static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int db5500_prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int db5500_prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int db5500_prcmu_enable_dsipll(void) +{ + return 0; +} + +#endif /* CONFIG_MFD_DB5500_PRCMU */ + +#endif /* __MFD_DB5500_PRCMU_H */ diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h new file mode 100644 index 00000000..b3a43b12 --- /dev/null +++ b/include/linux/mfd/db8500-prcmu.h @@ -0,0 +1,795 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> + * + * PRCMU f/w APIs + */ +#ifndef __MFD_DB8500_PRCMU_H +#define __MFD_DB8500_PRCMU_H + +#include <linux/interrupt.h> +#include <linux/bitops.h> + +/* + * Registers + */ +#define DB8500_PRCM_GPIOCR 0x138 +#define DB8500_PRCM_GPIOCR_DBG_UARTMOD_CMD0 BIT(0) +#define DB8500_PRCM_GPIOCR_DBG_STM_APE_CMD BIT(9) +#define DB8500_PRCM_GPIOCR_DBG_STM_MOD_CMD1 BIT(11) +#define DB8500_PRCM_GPIOCR_SPI2_SELECT BIT(23) + +#define DB8500_PRCM_LINE_VALUE 0x170 +#define DB8500_PRCM_LINE_VALUE_HSI_CAWAKE0 BIT(3) + +#define DB8500_PRCM_DSI_SW_RESET 0x324 +#define DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN BIT(0) +#define DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN BIT(1) +#define DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN BIT(2) + +/* This portion previously known as <mach/prcmu-fw-defs_v1.h> */ + +/** + * enum state - ON/OFF state definition + * @OFF: State is ON + * @ON: State is OFF + * + */ +enum state { + OFF = 0x0, + ON = 0x1, +}; + +/** + * enum ret_state - general purpose On/Off/Retention states + * + */ +enum ret_state { + OFFST = 0, + ONST = 1, + RETST = 2 +}; + +/** + * enum clk_arm - ARM Cortex A9 clock schemes + * @A9_OFF: + * @A9_BOOT: + * @A9_OPPT1: + * @A9_OPPT2: + * @A9_EXTCLK: + */ +enum clk_arm { + A9_OFF, + A9_BOOT, + A9_OPPT1, + A9_OPPT2, + A9_EXTCLK +}; + +/** + * enum clk_gen - GEN#0/GEN#1 clock schemes + * @GEN_OFF: + * @GEN_BOOT: + * @GEN_OPPT1: + */ +enum clk_gen { + GEN_OFF, + GEN_BOOT, + GEN_OPPT1, +}; + +/* some information between arm and xp70 */ + +/** + * enum romcode_write - Romcode message written by A9 AND read by XP70 + * @RDY_2_DS: Value set when ApDeepSleep state can be executed by XP70 + * @RDY_2_XP70_RST: Value set when 0x0F has been successfully polled by the + * romcode. The xp70 will go into self-reset + */ +enum romcode_write { + RDY_2_DS = 0x09, + RDY_2_XP70_RST = 0x10 +}; + +/** + * enum romcode_read - Romcode message written by XP70 and read by A9 + * @INIT: Init value when romcode field is not used + * @FS_2_DS: Value set when power state is going from ApExecute to + * ApDeepSleep + * @END_DS: Value set when ApDeepSleep power state is reached coming from + * ApExecute state + * @DS_TO_FS: Value set when power state is going from ApDeepSleep to + * ApExecute + * @END_FS: Value set when ApExecute power state is reached coming from + * ApDeepSleep state + * @SWR: Value set when power state is going to ApReset + * @END_SWR: Value set when the xp70 finished executing ApReset actions and + * waits for romcode acknowledgment to go to self-reset + */ +enum romcode_read { + INIT = 0x00, + FS_2_DS = 0x0A, + END_DS = 0x0B, + DS_TO_FS = 0x0C, + END_FS = 0x0D, + SWR = 0x0E, + END_SWR = 0x0F +}; + +/** + * enum ap_pwrst - current power states defined in PRCMU firmware + * @NO_PWRST: Current power state init + * @AP_BOOT: Current power state is apBoot + * @AP_EXECUTE: Current power state is apExecute + * @AP_DEEP_SLEEP: Current power state is apDeepSleep + * @AP_SLEEP: Current power state is apSleep + * @AP_IDLE: Current power state is apIdle + * @AP_RESET: Current power state is apReset + */ +enum ap_pwrst { + NO_PWRST = 0x00, + AP_BOOT = 0x01, + AP_EXECUTE = 0x02, + AP_DEEP_SLEEP = 0x03, + AP_SLEEP = 0x04, + AP_IDLE = 0x05, + AP_RESET = 0x06 +}; + +/** + * enum ap_pwrst_trans - Transition states defined in PRCMU firmware + * @NO_TRANSITION: No power state transition + * @APEXECUTE_TO_APSLEEP: Power state transition from ApExecute to ApSleep + * @APIDLE_TO_APSLEEP: Power state transition from ApIdle to ApSleep + * @APBOOT_TO_APEXECUTE: Power state transition from ApBoot to ApExecute + * @APEXECUTE_TO_APDEEPSLEEP: Power state transition from ApExecute to + * ApDeepSleep + * @APEXECUTE_TO_APIDLE: Power state transition from ApExecute to ApIdle + */ +enum ap_pwrst_trans { + PRCMU_AP_NO_CHANGE = 0x00, + APEXECUTE_TO_APSLEEP = 0x01, + APIDLE_TO_APSLEEP = 0x02, /* To be removed */ + PRCMU_AP_SLEEP = 0x01, + APBOOT_TO_APEXECUTE = 0x03, + APEXECUTE_TO_APDEEPSLEEP = 0x04, /* To be removed */ + PRCMU_AP_DEEP_SLEEP = 0x04, + APEXECUTE_TO_APIDLE = 0x05, /* To be removed */ + PRCMU_AP_IDLE = 0x05, + PRCMU_AP_DEEP_IDLE = 0x07, +}; + +/** + * enum hw_acc_state - State definition for hardware accelerator + * @HW_NO_CHANGE: The hardware accelerator state must remain unchanged + * @HW_OFF: The hardware accelerator must be switched off + * @HW_OFF_RAMRET: The hardware accelerator must be switched off with its + * internal RAM in retention + * @HW_ON: The hwa hardware accelerator hwa must be switched on + * + * NOTE! Deprecated, to be removed when all users switched over to use the + * regulator API. + */ +enum hw_acc_state { + HW_NO_CHANGE = 0x00, + HW_OFF = 0x01, + HW_OFF_RAMRET = 0x02, + HW_ON = 0x04 +}; + +/** + * enum mbox_2_arm_stat - Status messages definition for mbox_arm + * @BOOT_TO_EXECUTEOK: The apBoot to apExecute state transition has been + * completed + * @DEEPSLEEPOK: The apExecute to apDeepSleep state transition has been + * completed + * @SLEEPOK: The apExecute to apSleep state transition has been completed + * @IDLEOK: The apExecute to apIdle state transition has been completed + * @SOFTRESETOK: The A9 watchdog/ SoftReset state has been completed + * @SOFTRESETGO : The A9 watchdog/SoftReset state is on going + * @BOOT_TO_EXECUTE: The apBoot to apExecute state transition is on going + * @EXECUTE_TO_DEEPSLEEP: The apExecute to apDeepSleep state transition is on + * going + * @DEEPSLEEP_TO_EXECUTE: The apDeepSleep to apExecute state transition is on + * going + * @DEEPSLEEP_TO_EXECUTEOK: The apDeepSleep to apExecute state transition has + * been completed + * @EXECUTE_TO_SLEEP: The apExecute to apSleep state transition is on going + * @SLEEP_TO_EXECUTE: The apSleep to apExecute state transition is on going + * @SLEEP_TO_EXECUTEOK: The apSleep to apExecute state transition has been + * completed + * @EXECUTE_TO_IDLE: The apExecute to apIdle state transition is on going + * @IDLE_TO_EXECUTE: The apIdle to apExecute state transition is on going + * @IDLE_TO_EXECUTEOK: The apIdle to apExecute state transition has been + * completed + * @INIT_STATUS: Status init + */ +enum ap_pwrsttr_status { + BOOT_TO_EXECUTEOK = 0xFF, + DEEPSLEEPOK = 0xFE, + SLEEPOK = 0xFD, + IDLEOK = 0xFC, + SOFTRESETOK = 0xFB, + SOFTRESETGO = 0xFA, + BOOT_TO_EXECUTE = 0xF9, + EXECUTE_TO_DEEPSLEEP = 0xF8, + DEEPSLEEP_TO_EXECUTE = 0xF7, + DEEPSLEEP_TO_EXECUTEOK = 0xF6, + EXECUTE_TO_SLEEP = 0xF5, + SLEEP_TO_EXECUTE = 0xF4, + SLEEP_TO_EXECUTEOK = 0xF3, + EXECUTE_TO_IDLE = 0xF2, + IDLE_TO_EXECUTE = 0xF1, + IDLE_TO_EXECUTEOK = 0xF0, + RDYTODS_RETURNTOEXE = 0xEF, + NORDYTODS_RETURNTOEXE = 0xEE, + EXETOSLEEP_RETURNTOEXE = 0xED, + EXETOIDLE_RETURNTOEXE = 0xEC, + INIT_STATUS = 0xEB, + + /*error messages */ + INITERROR = 0x00, + PLLARMLOCKP_ER = 0x01, + PLLDDRLOCKP_ER = 0x02, + PLLSOCLOCKP_ER = 0x03, + PLLSOCK1LOCKP_ER = 0x04, + ARMWFI_ER = 0x05, + SYSCLKOK_ER = 0x06, + I2C_NACK_DATA_ER = 0x07, + BOOT_ER = 0x08, + I2C_STATUS_ALWAYS_1 = 0x0A, + I2C_NACK_REG_ADDR_ER = 0x0B, + I2C_NACK_DATA0123_ER = 0x1B, + I2C_NACK_ADDR_ER = 0x1F, + CURAPPWRSTISNOT_BOOT = 0x20, + CURAPPWRSTISNOT_EXECUTE = 0x21, + CURAPPWRSTISNOT_SLEEPMODE = 0x22, + CURAPPWRSTISNOT_CORRECTFORIT10 = 0x23, + FIFO4500WUISNOT_WUPEVENT = 0x24, + PLL32KLOCKP_ER = 0x29, + DDRDEEPSLEEPOK_ER = 0x2A, + ROMCODEREADY_ER = 0x50, + WUPBEFOREDS = 0x51, + DDRCONFIG_ER = 0x52, + WUPBEFORESLEEP = 0x53, + WUPBEFOREIDLE = 0x54 +}; /* earlier called as mbox_2_arm_stat */ + +/** + * enum dvfs_stat - DVFS status messages definition + * @DVFS_GO: A state transition DVFS is on going + * @DVFS_ARM100OPPOK: The state transition DVFS has been completed for 100OPP + * @DVFS_ARM50OPPOK: The state transition DVFS has been completed for 50OPP + * @DVFS_ARMEXTCLKOK: The state transition DVFS has been completed for EXTCLK + * @DVFS_NOCHGTCLKOK: The state transition DVFS has been completed for + * NOCHGCLK + * @DVFS_INITSTATUS: Value init + */ +enum dvfs_stat { + DVFS_GO = 0xFF, + DVFS_ARM100OPPOK = 0xFE, + DVFS_ARM50OPPOK = 0xFD, + DVFS_ARMEXTCLKOK = 0xFC, + DVFS_NOCHGTCLKOK = 0xFB, + DVFS_INITSTATUS = 0x00 +}; + +/** + * enum sva_mmdsp_stat - SVA MMDSP status messages + * @SVA_MMDSP_GO: SVAMMDSP interrupt has happened + * @SVA_MMDSP_INIT: Status init + */ +enum sva_mmdsp_stat { + SVA_MMDSP_GO = 0xFF, + SVA_MMDSP_INIT = 0x00 +}; + +/** + * enum sia_mmdsp_stat - SIA MMDSP status messages + * @SIA_MMDSP_GO: SIAMMDSP interrupt has happened + * @SIA_MMDSP_INIT: Status init + */ +enum sia_mmdsp_stat { + SIA_MMDSP_GO = 0xFF, + SIA_MMDSP_INIT = 0x00 +}; + +/** + * enum mbox_to_arm_err - Error messages definition + * @INIT_ERR: Init value + * @PLLARMLOCKP_ERR: PLLARM has not been correctly locked in given time + * @PLLDDRLOCKP_ERR: PLLDDR has not been correctly locked in the given time + * @PLLSOC0LOCKP_ERR: PLLSOC0 has not been correctly locked in the given time + * @PLLSOC1LOCKP_ERR: PLLSOC1 has not been correctly locked in the given time + * @ARMWFI_ERR: The ARM WFI has not been correctly executed in the given time + * @SYSCLKOK_ERR: The SYSCLK is not available in the given time + * @BOOT_ERR: Romcode has not validated the XP70 self reset in the given time + * @ROMCODESAVECONTEXT: The Romcode didn.t correctly save it secure context + * @VARMHIGHSPEEDVALTO_ERR: The ARM high speed supply value transfered + * through I2C has not been correctly executed in the given time + * @VARMHIGHSPEEDACCESS_ERR: The command value of VarmHighSpeedVal transfered + * through I2C has not been correctly executed in the given time + * @VARMLOWSPEEDVALTO_ERR:The ARM low speed supply value transfered through + * I2C has not been correctly executed in the given time + * @VARMLOWSPEEDACCESS_ERR: The command value of VarmLowSpeedVal transfered + * through I2C has not been correctly executed in the given time + * @VARMRETENTIONVALTO_ERR: The ARM retention supply value transfered through + * I2C has not been correctly executed in the given time + * @VARMRETENTIONACCESS_ERR: The command value of VarmRetentionVal transfered + * through I2C has not been correctly executed in the given time + * @VAPEHIGHSPEEDVALTO_ERR: The APE highspeed supply value transfered through + * I2C has not been correctly executed in the given time + * @VSAFEHPVALTO_ERR: The SAFE high power supply value transfered through I2C + * has not been correctly executed in the given time + * @VMODSEL1VALTO_ERR: The MODEM sel1 supply value transfered through I2C has + * not been correctly executed in the given time + * @VMODSEL2VALTO_ERR: The MODEM sel2 supply value transfered through I2C has + * not been correctly executed in the given time + * @VARMOFFACCESS_ERR: The command value of Varm ON/OFF transfered through + * I2C has not been correctly executed in the given time + * @VAPEOFFACCESS_ERR: The command value of Vape ON/OFF transfered through + * I2C has not been correctly executed in the given time + * @VARMRETACCES_ERR: The command value of Varm retention ON/OFF transfered + * through I2C has not been correctly executed in the given time + * @CURAPPWRSTISNOTBOOT:Generated when Arm want to do power state transition + * ApBoot to ApExecute but the power current state is not Apboot + * @CURAPPWRSTISNOTEXECUTE: Generated when Arm want to do power state + * transition from ApExecute to others power state but the + * power current state is not ApExecute + * @CURAPPWRSTISNOTSLEEPMODE: Generated when wake up events are transmitted + * but the power current state is not ApDeepSleep/ApSleep/ApIdle + * @CURAPPWRSTISNOTCORRECTDBG: Generated when wake up events are transmitted + * but the power current state is not correct + * @ARMREGU1VALTO_ERR:The ArmRegu1 value transferred through I2C has not + * been correctly executed in the given time + * @ARMREGU2VALTO_ERR: The ArmRegu2 value transferred through I2C has not + * been correctly executed in the given time + * @VAPEREGUVALTO_ERR: The VApeRegu value transfered through I2C has not + * been correctly executed in the given time + * @VSMPS3REGUVALTO_ERR: The VSmps3Regu value transfered through I2C has not + * been correctly executed in the given time + * @VMODREGUVALTO_ERR: The VModemRegu value transfered through I2C has not + * been correctly executed in the given time + */ +enum mbox_to_arm_err { + INIT_ERR = 0x00, + PLLARMLOCKP_ERR = 0x01, + PLLDDRLOCKP_ERR = 0x02, + PLLSOC0LOCKP_ERR = 0x03, + PLLSOC1LOCKP_ERR = 0x04, + ARMWFI_ERR = 0x05, + SYSCLKOK_ERR = 0x06, + BOOT_ERR = 0x07, + ROMCODESAVECONTEXT = 0x08, + VARMHIGHSPEEDVALTO_ERR = 0x10, + VARMHIGHSPEEDACCESS_ERR = 0x11, + VARMLOWSPEEDVALTO_ERR = 0x12, + VARMLOWSPEEDACCESS_ERR = 0x13, + VARMRETENTIONVALTO_ERR = 0x14, + VARMRETENTIONACCESS_ERR = 0x15, + VAPEHIGHSPEEDVALTO_ERR = 0x16, + VSAFEHPVALTO_ERR = 0x17, + VMODSEL1VALTO_ERR = 0x18, + VMODSEL2VALTO_ERR = 0x19, + VARMOFFACCESS_ERR = 0x1A, + VAPEOFFACCESS_ERR = 0x1B, + VARMRETACCES_ERR = 0x1C, + CURAPPWRSTISNOTBOOT = 0x20, + CURAPPWRSTISNOTEXECUTE = 0x21, + CURAPPWRSTISNOTSLEEPMODE = 0x22, + CURAPPWRSTISNOTCORRECTDBG = 0x23, + ARMREGU1VALTO_ERR = 0x24, + ARMREGU2VALTO_ERR = 0x25, + VAPEREGUVALTO_ERR = 0x26, + VSMPS3REGUVALTO_ERR = 0x27, + VMODREGUVALTO_ERR = 0x28 +}; + +enum hw_acc { + SVAMMDSP = 0, + SVAPIPE = 1, + SIAMMDSP = 2, + SIAPIPE = 3, + SGA = 4, + B2R2MCDE = 5, + ESRAM12 = 6, + ESRAM34 = 7, +}; + +enum cs_pwrmgt { + PWRDNCS0 = 0, + WKUPCS0 = 1, + PWRDNCS1 = 2, + WKUPCS1 = 3 +}; + +/* Defs related to autonomous power management */ + +/** + * enum sia_sva_pwr_policy - Power policy + * @NO_CHGT: No change + * @DSPOFF_HWPOFF: + * @DSPOFFRAMRET_HWPOFF: + * @DSPCLKOFF_HWPOFF: + * @DSPCLKOFF_HWPCLKOFF: + * + */ +enum sia_sva_pwr_policy { + NO_CHGT = 0x0, + DSPOFF_HWPOFF = 0x1, + DSPOFFRAMRET_HWPOFF = 0x2, + DSPCLKOFF_HWPOFF = 0x3, + DSPCLKOFF_HWPCLKOFF = 0x4, +}; + +/** + * enum auto_enable - Auto Power enable + * @AUTO_OFF: + * @AUTO_ON: + * + */ +enum auto_enable { + AUTO_OFF = 0x0, + AUTO_ON = 0x1, +}; + +/* End of file previously known as prcmu-fw-defs_v1.h */ + +/** + * enum prcmu_power_status - results from set_power_state + * @PRCMU_SLEEP_OK: Sleep went ok + * @PRCMU_DEEP_SLEEP_OK: DeepSleep went ok + * @PRCMU_IDLE_OK: Idle went ok + * @PRCMU_DEEPIDLE_OK: DeepIdle went ok + * @PRCMU_PRCMU2ARMPENDINGIT_ER: Pending interrupt detected + * @PRCMU_ARMPENDINGIT_ER: Pending interrupt detected + * + */ +enum prcmu_power_status { + PRCMU_SLEEP_OK = 0xf3, + PRCMU_DEEP_SLEEP_OK = 0xf6, + PRCMU_IDLE_OK = 0xf0, + PRCMU_DEEPIDLE_OK = 0xe3, + PRCMU_PRCMU2ARMPENDINGIT_ER = 0x91, + PRCMU_ARMPENDINGIT_ER = 0x93, +}; + +/* + * Definitions for autonomous power management configuration. + */ + +#define PRCMU_AUTO_PM_OFF 0 +#define PRCMU_AUTO_PM_ON 1 + +#define PRCMU_AUTO_PM_POWER_ON_HSEM BIT(0) +#define PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT BIT(1) + +enum prcmu_auto_pm_policy { + PRCMU_AUTO_PM_POLICY_NO_CHANGE, + PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF, + PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF, + PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_OFF, + PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_CLK_OFF, +}; + +/** + * struct prcmu_auto_pm_config - Autonomous power management configuration. + * @sia_auto_pm_enable: SIA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON}) + * @sia_power_on: SIA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask) + * @sia_policy: SIA power policy. (enum prcmu_auto_pm_policy) + * @sva_auto_pm_enable: SVA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON}) + * @sva_power_on: SVA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask) + * @sva_policy: SVA power policy. (enum prcmu_auto_pm_policy) + */ +struct prcmu_auto_pm_config { + u8 sia_auto_pm_enable; + u8 sia_power_on; + u8 sia_policy; + u8 sva_auto_pm_enable; + u8 sva_power_on; + u8 sva_policy; +}; + +#define PRCMU_FW_PROJECT_U8500 2 +#define PRCMU_FW_PROJECT_U9500 4 +#define PRCMU_FW_PROJECT_U8500_C2 7 +#define PRCMU_FW_PROJECT_U9500_C2 11 +#define PRCMU_FW_PROJECT_U8520 13 +#define PRCMU_FW_PROJECT_U8420 14 + +struct prcmu_fw_version { + u8 project; + u8 api_version; + u8 func_version; + u8 errata; +}; + +#ifdef CONFIG_MFD_DB8500_PRCMU + +void db8500_prcmu_early_init(void); +int prcmu_set_rc_a2p(enum romcode_write); +enum romcode_read prcmu_get_rc_p2a(void); +enum ap_pwrst prcmu_get_xp70_current_state(void); +bool prcmu_has_arm_maxopp(void); +struct prcmu_fw_version *prcmu_get_fw_version(void); +int prcmu_request_ape_opp_100_voltage(bool enable); +int prcmu_release_usb_wakeup_state(void); +void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, + struct prcmu_auto_pm_config *idle); +bool prcmu_is_auto_pm_enabled(void); + +int prcmu_config_clkout(u8 clkout, u8 source, u8 div); +int prcmu_set_clock_divider(u8 clock, u8 divider); +int db8500_prcmu_config_hotdog(u8 threshold); +int db8500_prcmu_config_hotmon(u8 low, u8 high); +int db8500_prcmu_start_temp_sense(u16 cycles32k); +int db8500_prcmu_stop_temp_sense(void); +int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); + +void prcmu_ac_wake_req(void); +void prcmu_ac_sleep_req(void); +void db8500_prcmu_modem_reset(void); + +int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off); +int db8500_prcmu_enable_a9wdog(u8 id); +int db8500_prcmu_disable_a9wdog(u8 id); +int db8500_prcmu_kick_a9wdog(u8 id); +int db8500_prcmu_load_a9wdog(u8 id, u32 val); + +void db8500_prcmu_system_reset(u16 reset_code); +int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll); +u8 db8500_prcmu_get_power_state_result(void); +int db8500_prcmu_gic_decouple(void); +int db8500_prcmu_gic_recouple(void); +int db8500_prcmu_copy_gic_settings(void); +bool db8500_prcmu_gic_pending_irq(void); +bool db8500_prcmu_pending_irq(void); +bool db8500_prcmu_is_cpu_in_wfi(int cpu); +void db8500_prcmu_enable_wakeups(u32 wakeups); +int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state); +int db8500_prcmu_request_clock(u8 clock, bool enable); +int db8500_prcmu_set_display_clocks(void); +int db8500_prcmu_disable_dsipll(void); +int db8500_prcmu_enable_dsipll(void); +void db8500_prcmu_config_abb_event_readout(u32 abb_events); +void db8500_prcmu_get_abb_event_buffer(void __iomem **buf); +int db8500_prcmu_config_esram0_deep_sleep(u8 state); +u16 db8500_prcmu_get_reset_code(void); +bool db8500_prcmu_is_ac_wake_requested(void); +int db8500_prcmu_set_arm_opp(u8 opp); +int db8500_prcmu_get_arm_opp(void); +int db8500_prcmu_set_ape_opp(u8 opp); +int db8500_prcmu_get_ape_opp(void); +int db8500_prcmu_set_ddr_opp(u8 opp); +int db8500_prcmu_get_ddr_opp(void); + +u32 db8500_prcmu_read(unsigned int reg); +void db8500_prcmu_write(unsigned int reg, u32 value); +void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value); + +#else /* !CONFIG_MFD_DB8500_PRCMU */ + +static inline void db8500_prcmu_early_init(void) {} + +static inline int prcmu_set_rc_a2p(enum romcode_write code) +{ + return 0; +} + +static inline enum romcode_read prcmu_get_rc_p2a(void) +{ + return INIT; +} + +static inline enum ap_pwrst prcmu_get_xp70_current_state(void) +{ + return AP_EXECUTE; +} + +static inline bool prcmu_has_arm_maxopp(void) +{ + return false; +} + +static inline struct prcmu_fw_version *prcmu_get_fw_version(void) +{ + return NULL; +} + +static inline int db8500_prcmu_set_ape_opp(u8 opp) +{ + return 0; +} + +static inline int db8500_prcmu_get_ape_opp(void) +{ + return APE_100_OPP; +} + +static inline int prcmu_request_ape_opp_100_voltage(bool enable) +{ + return 0; +} + +static inline int prcmu_release_usb_wakeup_state(void) +{ + return 0; +} + +static inline int db8500_prcmu_set_ddr_opp(u8 opp) +{ + return 0; +} + +static inline int db8500_prcmu_get_ddr_opp(void) +{ + return DDR_100_OPP; +} + +static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, + struct prcmu_auto_pm_config *idle) +{ +} + +static inline bool prcmu_is_auto_pm_enabled(void) +{ + return false; +} + +static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div) +{ + return 0; +} + +static inline int prcmu_set_clock_divider(u8 clock, u8 divider) +{ + return 0; +} + +static inline int db8500_prcmu_config_hotdog(u8 threshold) +{ + return 0; +} + +static inline int db8500_prcmu_config_hotmon(u8 low, u8 high) +{ + return 0; +} + +static inline int db8500_prcmu_start_temp_sense(u16 cycles32k) +{ + return 0; +} + +static inline int db8500_prcmu_stop_temp_sense(void) +{ + return 0; +} + +static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline void prcmu_ac_wake_req(void) {} + +static inline void prcmu_ac_sleep_req(void) {} + +static inline void db8500_prcmu_modem_reset(void) {} + +static inline void db8500_prcmu_system_reset(u16 reset_code) {} + +static inline int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + return 0; +} + +static inline u8 db8500_prcmu_get_power_state_result(void) +{ + return 0; +} + +static inline void db8500_prcmu_enable_wakeups(u32 wakeups) {} + +static inline int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + return 0; +} + +static inline int db8500_prcmu_request_clock(u8 clock, bool enable) +{ + return 0; +} + +static inline int db8500_prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int db8500_prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int db8500_prcmu_enable_dsipll(void) +{ + return 0; +} + +static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state) +{ + return 0; +} + +static inline void db8500_prcmu_config_abb_event_readout(u32 abb_events) {} + +static inline void db8500_prcmu_get_abb_event_buffer(void __iomem **buf) {} + +static inline u16 db8500_prcmu_get_reset_code(void) +{ + return 0; +} + +static inline int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off) +{ + return 0; +} + +static inline int db8500_prcmu_enable_a9wdog(u8 id) +{ + return 0; +} + +static inline int db8500_prcmu_disable_a9wdog(u8 id) +{ + return 0; +} + +static inline int db8500_prcmu_kick_a9wdog(u8 id) +{ + return 0; +} + +static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val) +{ + return 0; +} + +static inline bool db8500_prcmu_is_ac_wake_requested(void) +{ + return 0; +} + +static inline int db8500_prcmu_set_arm_opp(u8 opp) +{ + return 0; +} + +static inline int db8500_prcmu_get_arm_opp(void) +{ + return 0; +} + +static inline u32 db8500_prcmu_read(unsigned int reg) +{ + return 0; +} + +static inline void db8500_prcmu_write(unsigned int reg, u32 value) {} + +static inline void db8500_prcmu_write_masked(unsigned int reg, u32 mask, + u32 value) {} + +#endif /* !CONFIG_MFD_DB8500_PRCMU */ + +#endif /* __MFD_DB8500_PRCMU_H */ diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h new file mode 100644 index 00000000..d7674eb7 --- /dev/null +++ b/include/linux/mfd/dbx500-prcmu.h @@ -0,0 +1,915 @@ +/* + * Copyright (C) ST Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * + * STE Ux500 PRCMU API + */ +#ifndef __MACH_PRCMU_H +#define __MACH_PRCMU_H + +#include <linux/interrupt.h> +#include <linux/notifier.h> +#include <linux/err.h> + +/* PRCMU Wakeup defines */ +enum prcmu_wakeup_index { + PRCMU_WAKEUP_INDEX_RTC, + PRCMU_WAKEUP_INDEX_RTT0, + PRCMU_WAKEUP_INDEX_RTT1, + PRCMU_WAKEUP_INDEX_HSI0, + PRCMU_WAKEUP_INDEX_HSI1, + PRCMU_WAKEUP_INDEX_USB, + PRCMU_WAKEUP_INDEX_ABB, + PRCMU_WAKEUP_INDEX_ABB_FIFO, + PRCMU_WAKEUP_INDEX_ARM, + PRCMU_WAKEUP_INDEX_CD_IRQ, + NUM_PRCMU_WAKEUP_INDICES +}; +#define PRCMU_WAKEUP(_name) (BIT(PRCMU_WAKEUP_INDEX_##_name)) + +/* EPOD (power domain) IDs */ + +/* + * DB8500 EPODs + * - EPOD_ID_SVAMMDSP: power domain for SVA MMDSP + * - EPOD_ID_SVAPIPE: power domain for SVA pipe + * - EPOD_ID_SIAMMDSP: power domain for SIA MMDSP + * - EPOD_ID_SIAPIPE: power domain for SIA pipe + * - EPOD_ID_SGA: power domain for SGA + * - EPOD_ID_B2R2_MCDE: power domain for B2R2 and MCDE + * - EPOD_ID_ESRAM12: power domain for ESRAM 1 and 2 + * - EPOD_ID_ESRAM34: power domain for ESRAM 3 and 4 + * - NUM_EPOD_ID: number of power domains + * + * TODO: These should be prefixed. + */ +#define EPOD_ID_SVAMMDSP 0 +#define EPOD_ID_SVAPIPE 1 +#define EPOD_ID_SIAMMDSP 2 +#define EPOD_ID_SIAPIPE 3 +#define EPOD_ID_SGA 4 +#define EPOD_ID_B2R2_MCDE 5 +#define EPOD_ID_ESRAM12 6 +#define EPOD_ID_ESRAM34 7 +#define NUM_EPOD_ID 8 + +/* + * DB5500 EPODs + */ +#define DB5500_EPOD_ID_BASE 0x0100 +#define DB5500_EPOD_ID_SGA (DB5500_EPOD_ID_BASE + 0) +#define DB5500_EPOD_ID_HVA (DB5500_EPOD_ID_BASE + 1) +#define DB5500_EPOD_ID_SIA (DB5500_EPOD_ID_BASE + 2) +#define DB5500_EPOD_ID_DISP (DB5500_EPOD_ID_BASE + 3) +#define DB5500_EPOD_ID_ESRAM12 (DB5500_EPOD_ID_BASE + 6) +#define DB5500_NUM_EPOD_ID 7 + +/* + * state definition for EPOD (power domain) + * - EPOD_STATE_NO_CHANGE: The EPOD should remain unchanged + * - EPOD_STATE_OFF: The EPOD is switched off + * - EPOD_STATE_RAMRET: The EPOD is switched off with its internal RAM in + * retention + * - EPOD_STATE_ON_CLK_OFF: The EPOD is switched on, clock is still off + * - EPOD_STATE_ON: Same as above, but with clock enabled + */ +#define EPOD_STATE_NO_CHANGE 0x00 +#define EPOD_STATE_OFF 0x01 +#define EPOD_STATE_RAMRET 0x02 +#define EPOD_STATE_ON_CLK_OFF 0x03 +#define EPOD_STATE_ON 0x04 + +/* DB5500 CLKOUT IDs */ +enum { + DB5500_CLKOUT0 = 0, + DB5500_CLKOUT1, +}; + +/* DB5500 CLKOUTx sources */ +enum { + DB5500_CLKOUT_REF_CLK_SEL0, + DB5500_CLKOUT_RTC_CLK0_SEL0, + DB5500_CLKOUT_ULP_CLK_SEL0, + DB5500_CLKOUT_STATIC0, + DB5500_CLKOUT_REFCLK, + DB5500_CLKOUT_ULPCLK, + DB5500_CLKOUT_ARMCLK, + DB5500_CLKOUT_SYSACC0CLK, + DB5500_CLKOUT_SOC0PLLCLK, + DB5500_CLKOUT_SOC1PLLCLK, + DB5500_CLKOUT_DDRPLLCLK, + DB5500_CLKOUT_TVCLK, + DB5500_CLKOUT_IRDACLK, +}; + +/* + * CLKOUT sources + */ +#define PRCMU_CLKSRC_CLK38M 0x00 +#define PRCMU_CLKSRC_ACLK 0x01 +#define PRCMU_CLKSRC_SYSCLK 0x02 +#define PRCMU_CLKSRC_LCDCLK 0x03 +#define PRCMU_CLKSRC_SDMMCCLK 0x04 +#define PRCMU_CLKSRC_TVCLK 0x05 +#define PRCMU_CLKSRC_TIMCLK 0x06 +#define PRCMU_CLKSRC_CLK009 0x07 +/* These are only valid for CLKOUT1: */ +#define PRCMU_CLKSRC_SIAMMDSPCLK 0x40 +#define PRCMU_CLKSRC_I2CCLK 0x41 +#define PRCMU_CLKSRC_MSP02CLK 0x42 +#define PRCMU_CLKSRC_ARMPLL_OBSCLK 0x43 +#define PRCMU_CLKSRC_HSIRXCLK 0x44 +#define PRCMU_CLKSRC_HSITXCLK 0x45 +#define PRCMU_CLKSRC_ARMCLKFIX 0x46 +#define PRCMU_CLKSRC_HDMICLK 0x47 + +/* + * Clock identifiers. + */ +enum prcmu_clock { + PRCMU_SGACLK, + PRCMU_UARTCLK, + PRCMU_MSP02CLK, + PRCMU_MSP1CLK, + PRCMU_I2CCLK, + PRCMU_SDMMCCLK, + PRCMU_SPARE1CLK, + PRCMU_SLIMCLK, + PRCMU_PER1CLK, + PRCMU_PER2CLK, + PRCMU_PER3CLK, + PRCMU_PER5CLK, + PRCMU_PER6CLK, + PRCMU_PER7CLK, + PRCMU_LCDCLK, + PRCMU_BMLCLK, + PRCMU_HSITXCLK, + PRCMU_HSIRXCLK, + PRCMU_HDMICLK, + PRCMU_APEATCLK, + PRCMU_APETRACECLK, + PRCMU_MCDECLK, + PRCMU_IPI2CCLK, + PRCMU_DSIALTCLK, + PRCMU_DMACLK, + PRCMU_B2R2CLK, + PRCMU_TVCLK, + PRCMU_SSPCLK, + PRCMU_RNGCLK, + PRCMU_UICCCLK, + PRCMU_PWMCLK, + PRCMU_IRDACLK, + PRCMU_IRRCCLK, + PRCMU_SIACLK, + PRCMU_SVACLK, + PRCMU_ACLK, + PRCMU_NUM_REG_CLOCKS, + PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS, + PRCMU_CDCLK, + PRCMU_TIMCLK, + PRCMU_PLLSOC0, + PRCMU_PLLSOC1, + PRCMU_PLLDDR, + PRCMU_PLLDSI, + PRCMU_DSI0CLK, + PRCMU_DSI1CLK, + PRCMU_DSI0ESCCLK, + PRCMU_DSI1ESCCLK, + PRCMU_DSI2ESCCLK, +}; + +/** + * enum ape_opp - APE OPP states definition + * @APE_OPP_INIT: + * @APE_NO_CHANGE: The APE operating point is unchanged + * @APE_100_OPP: The new APE operating point is ape100opp + * @APE_50_OPP: 50% + * @APE_50_PARTLY_25_OPP: 50%, except some clocks at 25%. + */ +enum ape_opp { + APE_OPP_INIT = 0x00, + APE_NO_CHANGE = 0x01, + APE_100_OPP = 0x02, + APE_50_OPP = 0x03, + APE_50_PARTLY_25_OPP = 0xFF, +}; + +/** + * enum arm_opp - ARM OPP states definition + * @ARM_OPP_INIT: + * @ARM_NO_CHANGE: The ARM operating point is unchanged + * @ARM_100_OPP: The new ARM operating point is arm100opp + * @ARM_50_OPP: The new ARM operating point is arm50opp + * @ARM_MAX_OPP: Operating point is "max" (more than 100) + * @ARM_MAX_FREQ100OPP: Set max opp if available, else 100 + * @ARM_EXTCLK: The new ARM operating point is armExtClk + */ +enum arm_opp { + ARM_OPP_INIT = 0x00, + ARM_NO_CHANGE = 0x01, + ARM_100_OPP = 0x02, + ARM_50_OPP = 0x03, + ARM_MAX_OPP = 0x04, + ARM_MAX_FREQ100OPP = 0x05, + ARM_EXTCLK = 0x07 +}; + +/** + * enum ddr_opp - DDR OPP states definition + * @DDR_100_OPP: The new DDR operating point is ddr100opp + * @DDR_50_OPP: The new DDR operating point is ddr50opp + * @DDR_25_OPP: The new DDR operating point is ddr25opp + */ +enum ddr_opp { + DDR_100_OPP = 0x00, + DDR_50_OPP = 0x01, + DDR_25_OPP = 0x02, +}; + +/* + * Definitions for controlling ESRAM0 in deep sleep. + */ +#define ESRAM0_DEEP_SLEEP_STATE_OFF 1 +#define ESRAM0_DEEP_SLEEP_STATE_RET 2 + +/** + * enum ddr_pwrst - DDR power states definition + * @DDR_PWR_STATE_UNCHANGED: SDRAM and DDR controller state is unchanged + * @DDR_PWR_STATE_ON: + * @DDR_PWR_STATE_OFFLOWLAT: + * @DDR_PWR_STATE_OFFHIGHLAT: + */ +enum ddr_pwrst { + DDR_PWR_STATE_UNCHANGED = 0x00, + DDR_PWR_STATE_ON = 0x01, + DDR_PWR_STATE_OFFLOWLAT = 0x02, + DDR_PWR_STATE_OFFHIGHLAT = 0x03 +}; + +#include <linux/mfd/db8500-prcmu.h> +#include <linux/mfd/db5500-prcmu.h> + +#if defined(CONFIG_UX500_SOC_DB8500) || defined(CONFIG_UX500_SOC_DB5500) + +#include <mach/id.h> + +static inline void __init prcmu_early_init(void) +{ + if (cpu_is_u5500()) + return db5500_prcmu_early_init(); + else + return db8500_prcmu_early_init(); +} + +static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + if (cpu_is_u5500()) + return db5500_prcmu_set_power_state(state, keep_ulp_clk, + keep_ap_pll); + else + return db8500_prcmu_set_power_state(state, keep_ulp_clk, + keep_ap_pll); +} + +static inline u8 prcmu_get_power_state_result(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_get_power_state_result(); +} + +static inline int prcmu_gic_decouple(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_gic_decouple(); +} + +static inline int prcmu_gic_recouple(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_gic_recouple(); +} + +static inline bool prcmu_gic_pending_irq(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_gic_pending_irq(); +} + +static inline bool prcmu_is_cpu_in_wfi(int cpu) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_is_cpu_in_wfi(cpu); +} + +static inline int prcmu_copy_gic_settings(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_copy_gic_settings(); +} + +static inline bool prcmu_pending_irq(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_pending_irq(); +} + +static inline int prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_set_epod(epod_id, epod_state); +} + +static inline void prcmu_enable_wakeups(u32 wakeups) +{ + if (cpu_is_u5500()) + db5500_prcmu_enable_wakeups(wakeups); + else + db8500_prcmu_enable_wakeups(wakeups); +} + +static inline void prcmu_disable_wakeups(void) +{ + prcmu_enable_wakeups(0); +} + +static inline void prcmu_config_abb_event_readout(u32 abb_events) +{ + if (cpu_is_u5500()) + db5500_prcmu_config_abb_event_readout(abb_events); + else + db8500_prcmu_config_abb_event_readout(abb_events); +} + +static inline void prcmu_get_abb_event_buffer(void __iomem **buf) +{ + if (cpu_is_u5500()) + db5500_prcmu_get_abb_event_buffer(buf); + else + db8500_prcmu_get_abb_event_buffer(buf); +} + +int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); +int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size); + +int prcmu_config_clkout(u8 clkout, u8 source, u8 div); + +static inline int prcmu_request_clock(u8 clock, bool enable) +{ + if (cpu_is_u5500()) + return db5500_prcmu_request_clock(clock, enable); + else + return db8500_prcmu_request_clock(clock, enable); +} + +unsigned long prcmu_clock_rate(u8 clock); +long prcmu_round_clock_rate(u8 clock, unsigned long rate); +int prcmu_set_clock_rate(u8 clock, unsigned long rate); + +static inline int prcmu_set_ddr_opp(u8 opp) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_set_ddr_opp(opp); +} +static inline int prcmu_get_ddr_opp(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_get_ddr_opp(); +} + +static inline int prcmu_set_arm_opp(u8 opp) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_set_arm_opp(opp); +} + +static inline int prcmu_get_arm_opp(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_get_arm_opp(); +} + +static inline int prcmu_set_ape_opp(u8 opp) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_set_ape_opp(opp); +} + +static inline int prcmu_get_ape_opp(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_get_ape_opp(); +} + +static inline void prcmu_system_reset(u16 reset_code) +{ + if (cpu_is_u5500()) + return db5500_prcmu_system_reset(reset_code); + else + return db8500_prcmu_system_reset(reset_code); +} + +static inline u16 prcmu_get_reset_code(void) +{ + if (cpu_is_u5500()) + return db5500_prcmu_get_reset_code(); + else + return db8500_prcmu_get_reset_code(); +} + +void prcmu_ac_wake_req(void); +void prcmu_ac_sleep_req(void); +static inline void prcmu_modem_reset(void) +{ + if (cpu_is_u5500()) + return; + else + return db8500_prcmu_modem_reset(); +} + +static inline bool prcmu_is_ac_wake_requested(void) +{ + if (cpu_is_u5500()) + return db5500_prcmu_is_ac_wake_requested(); + else + return db8500_prcmu_is_ac_wake_requested(); +} + +static inline int prcmu_set_display_clocks(void) +{ + if (cpu_is_u5500()) + return db5500_prcmu_set_display_clocks(); + else + return db8500_prcmu_set_display_clocks(); +} + +static inline int prcmu_disable_dsipll(void) +{ + if (cpu_is_u5500()) + return db5500_prcmu_disable_dsipll(); + else + return db8500_prcmu_disable_dsipll(); +} + +static inline int prcmu_enable_dsipll(void) +{ + if (cpu_is_u5500()) + return db5500_prcmu_enable_dsipll(); + else + return db8500_prcmu_enable_dsipll(); +} + +static inline int prcmu_config_esram0_deep_sleep(u8 state) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_config_esram0_deep_sleep(state); +} + +static inline int prcmu_config_hotdog(u8 threshold) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_config_hotdog(threshold); +} + +static inline int prcmu_config_hotmon(u8 low, u8 high) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_config_hotmon(low, high); +} + +static inline int prcmu_start_temp_sense(u16 cycles32k) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_start_temp_sense(cycles32k); +} + +static inline int prcmu_stop_temp_sense(void) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_stop_temp_sense(); +} + +static inline u32 prcmu_read(unsigned int reg) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_read(reg); +} + +static inline void prcmu_write(unsigned int reg, u32 value) +{ + if (cpu_is_u5500()) + return; + else + db8500_prcmu_write(reg, value); +} + +static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) +{ + if (cpu_is_u5500()) + return; + else + db8500_prcmu_write_masked(reg, mask, value); +} + +static inline int prcmu_enable_a9wdog(u8 id) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_enable_a9wdog(id); +} + +static inline int prcmu_disable_a9wdog(u8 id) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_disable_a9wdog(id); +} + +static inline int prcmu_kick_a9wdog(u8 id) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_kick_a9wdog(id); +} + +static inline int prcmu_load_a9wdog(u8 id, u32 timeout) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_load_a9wdog(id, timeout); +} + +static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off) +{ + if (cpu_is_u5500()) + return -EINVAL; + else + return db8500_prcmu_config_a9wdog(num, sleep_auto_off); +} +#else + +static inline void __init prcmu_early_init(void) {} + +static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk, + bool keep_ap_pll) +{ + return 0; +} + +static inline int prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + return 0; +} + +static inline void prcmu_enable_wakeups(u32 wakeups) {} + +static inline void prcmu_disable_wakeups(void) {} + +static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, + u8 size) +{ + return -ENOSYS; +} + +static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div) +{ + return 0; +} + +static inline int prcmu_request_clock(u8 clock, bool enable) +{ + return 0; +} + +static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate) +{ + return 0; +} + +static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate) +{ + return 0; +} + +static inline unsigned long prcmu_clock_rate(u8 clock) +{ + return 0; +} + +static inline int prcmu_set_ape_opp(u8 opp) +{ + return 0; +} + +static inline int prcmu_get_ape_opp(void) +{ + return APE_100_OPP; +} + +static inline int prcmu_set_arm_opp(u8 opp) +{ + return 0; +} + +static inline int prcmu_get_arm_opp(void) +{ + return ARM_100_OPP; +} + +static inline int prcmu_set_ddr_opp(u8 opp) +{ + return 0; +} + +static inline int prcmu_get_ddr_opp(void) +{ + return DDR_100_OPP; +} + +static inline void prcmu_system_reset(u16 reset_code) {} + +static inline u16 prcmu_get_reset_code(void) +{ + return 0; +} + +static inline void prcmu_ac_wake_req(void) {} + +static inline void prcmu_ac_sleep_req(void) {} + +static inline void prcmu_modem_reset(void) {} + +static inline bool prcmu_is_ac_wake_requested(void) +{ + return false; +} + +static inline int prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int prcmu_enable_dsipll(void) +{ + return 0; +} + +static inline int prcmu_config_esram0_deep_sleep(u8 state) +{ + return 0; +} + +static inline void prcmu_config_abb_event_readout(u32 abb_events) {} + +static inline void prcmu_get_abb_event_buffer(void __iomem **buf) +{ + *buf = NULL; +} + +static inline int prcmu_config_hotdog(u8 threshold) +{ + return 0; +} + +static inline int prcmu_config_hotmon(u8 low, u8 high) +{ + return 0; +} + +static inline int prcmu_start_temp_sense(u16 cycles32k) +{ + return 0; +} + +static inline int prcmu_stop_temp_sense(void) +{ + return 0; +} + +static inline u32 prcmu_read(unsigned int reg) +{ + return 0; +} + +static inline void prcmu_write(unsigned int reg, u32 value) {} + +static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) {} + +#endif + +static inline void prcmu_set(unsigned int reg, u32 bits) +{ + prcmu_write_masked(reg, bits, bits); +} + +static inline void prcmu_clear(unsigned int reg, u32 bits) +{ + prcmu_write_masked(reg, bits, 0); +} + +#if defined(CONFIG_UX500_SOC_DB8500) || defined(CONFIG_UX500_SOC_DB5500) + +/** + * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1. + */ +static inline void prcmu_enable_spi2(void) +{ + if (cpu_is_u8500()) + prcmu_set(DB8500_PRCM_GPIOCR, DB8500_PRCM_GPIOCR_SPI2_SELECT); +} + +/** + * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1. + */ +static inline void prcmu_disable_spi2(void) +{ + if (cpu_is_u8500()) + prcmu_clear(DB8500_PRCM_GPIOCR, DB8500_PRCM_GPIOCR_SPI2_SELECT); +} + +/** + * prcmu_enable_stm_mod_uart - Enables pin muxing for STMMOD + * and UARTMOD on OtherAlternateC3. + */ +static inline void prcmu_enable_stm_mod_uart(void) +{ + if (cpu_is_u8500()) { + prcmu_set(DB8500_PRCM_GPIOCR, + (DB8500_PRCM_GPIOCR_DBG_STM_MOD_CMD1 | + DB8500_PRCM_GPIOCR_DBG_UARTMOD_CMD0)); + } +} + +/** + * prcmu_disable_stm_mod_uart - Disables pin muxing for STMMOD + * and UARTMOD on OtherAlternateC3. + */ +static inline void prcmu_disable_stm_mod_uart(void) +{ + if (cpu_is_u8500()) { + prcmu_clear(DB8500_PRCM_GPIOCR, + (DB8500_PRCM_GPIOCR_DBG_STM_MOD_CMD1 | + DB8500_PRCM_GPIOCR_DBG_UARTMOD_CMD0)); + } +} + +/** + * prcmu_enable_stm_ape - Enables pin muxing for STM APE on OtherAlternateC1. + */ +static inline void prcmu_enable_stm_ape(void) +{ + if (cpu_is_u8500()) { + prcmu_set(DB8500_PRCM_GPIOCR, + DB8500_PRCM_GPIOCR_DBG_STM_APE_CMD); + } +} + +/** + * prcmu_disable_stm_ape - Disables pin muxing for STM APE on OtherAlternateC1. + */ +static inline void prcmu_disable_stm_ape(void) +{ + if (cpu_is_u8500()) { + prcmu_clear(DB8500_PRCM_GPIOCR, + DB8500_PRCM_GPIOCR_DBG_STM_APE_CMD); + } +} + +#else + +static inline void prcmu_enable_spi2(void) {} +static inline void prcmu_disable_spi2(void) {} +static inline void prcmu_enable_stm_mod_uart(void) {} +static inline void prcmu_disable_stm_mod_uart(void) {} +static inline void prcmu_enable_stm_ape(void) {} +static inline void prcmu_disable_stm_ape(void) {} + +#endif + +/* PRCMU QoS APE OPP class */ +#define PRCMU_QOS_APE_OPP 1 +#define PRCMU_QOS_DDR_OPP 2 +#define PRCMU_QOS_ARM_OPP 3 +#define PRCMU_QOS_DEFAULT_VALUE -1 + +#ifdef CONFIG_DBX500_PRCMU_QOS_POWER + +unsigned long prcmu_qos_get_cpufreq_opp_delay(void); +void prcmu_qos_set_cpufreq_opp_delay(unsigned long); +void prcmu_qos_force_opp(int, s32); +int prcmu_qos_requirement(int pm_qos_class); +int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value); +int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value); +void prcmu_qos_remove_requirement(int pm_qos_class, char *name); +int prcmu_qos_add_notifier(int prcmu_qos_class, + struct notifier_block *notifier); +int prcmu_qos_remove_notifier(int prcmu_qos_class, + struct notifier_block *notifier); + +#else + +static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void) +{ + return 0; +} + +static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {} + +static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {} + +static inline int prcmu_qos_requirement(int prcmu_qos_class) +{ + return 0; +} + +static inline int prcmu_qos_add_requirement(int prcmu_qos_class, + char *name, s32 value) +{ + return 0; +} + +static inline int prcmu_qos_update_requirement(int prcmu_qos_class, + char *name, s32 new_value) +{ + return 0; +} + +static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name) +{ +} + +static inline int prcmu_qos_add_notifier(int prcmu_qos_class, + struct notifier_block *notifier) +{ + return 0; +} +static inline int prcmu_qos_remove_notifier(int prcmu_qos_class, + struct notifier_block *notifier) +{ + return 0; +} + +#endif + +#endif /* __MACH_PRCMU_H */ diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h new file mode 100644 index 00000000..38a372a0 --- /dev/null +++ b/include/linux/mfd/ds1wm.h @@ -0,0 +1,13 @@ +/* MFD cell driver data for the DS1WM driver */ + +struct ds1wm_driver_data { + int active_high; + int clock_rate; + /* in milliseconds, the amount of time to */ + /* sleep following a reset pulse. Zero */ + /* should work if your bus devices recover*/ + /* time respects the 1-wire spec since the*/ + /* ds1wm implements the precise timings of*/ + /* a reset pulse/presence detect sequence.*/ + unsigned int reset_recover_delay; +}; diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h new file mode 100644 index 00000000..40c37216 --- /dev/null +++ b/include/linux/mfd/ezx-pcap.h @@ -0,0 +1,252 @@ +/* + * Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com> + * + * For further information, please see http://wiki.openezx.org/PCAP2 + */ + +#ifndef EZX_PCAP_H +#define EZX_PCAP_H + +struct pcap_subdev { + int id; + const char *name; + void *platform_data; +}; + +struct pcap_platform_data { + unsigned int irq_base; + unsigned int config; + void (*init) (void *); /* board specific init */ + int num_subdevs; + struct pcap_subdev *subdevs; +}; + +struct pcap_chip; + +int ezx_pcap_write(struct pcap_chip *, u8, u32); +int ezx_pcap_read(struct pcap_chip *, u8, u32 *); +int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32); +int pcap_to_irq(struct pcap_chip *, int); +int irq_to_pcap(struct pcap_chip *, int); +int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); +int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]); +void pcap_set_ts_bits(struct pcap_chip *, u32); + +#define PCAP_SECOND_PORT 1 +#define PCAP_CS_AH 2 + +#define PCAP_REGISTER_WRITE_OP_BIT 0x80000000 +#define PCAP_REGISTER_READ_OP_BIT 0x00000000 + +#define PCAP_REGISTER_VALUE_MASK 0x01ffffff +#define PCAP_REGISTER_ADDRESS_MASK 0x7c000000 +#define PCAP_REGISTER_ADDRESS_SHIFT 26 +#define PCAP_REGISTER_NUMBER 32 +#define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff +#define PCAP_MASK_ALL_INTERRUPT 0x01ffffff + +/* registers accessible by both pcap ports */ +#define PCAP_REG_ISR 0x0 /* Interrupt Status */ +#define PCAP_REG_MSR 0x1 /* Interrupt Mask */ +#define PCAP_REG_PSTAT 0x2 /* Processor Status */ +#define PCAP_REG_VREG2 0x6 /* Regulator Bank 2 Control */ +#define PCAP_REG_AUXVREG 0x7 /* Auxiliary Regulator Control */ +#define PCAP_REG_BATT 0x8 /* Battery Control */ +#define PCAP_REG_ADC 0x9 /* AD Control */ +#define PCAP_REG_ADR 0xa /* AD Result */ +#define PCAP_REG_CODEC 0xb /* Audio Codec Control */ +#define PCAP_REG_RX_AMPS 0xc /* RX Audio Amplifiers Control */ +#define PCAP_REG_ST_DAC 0xd /* Stereo DAC Control */ +#define PCAP_REG_BUSCTRL 0x14 /* Connectivity Control */ +#define PCAP_REG_PERIPH 0x15 /* Peripheral Control */ +#define PCAP_REG_LOWPWR 0x18 /* Regulator Low Power Control */ +#define PCAP_REG_TX_AMPS 0x1a /* TX Audio Amplifiers Control */ +#define PCAP_REG_GP 0x1b /* General Purpose */ +#define PCAP_REG_TEST1 0x1c +#define PCAP_REG_TEST2 0x1d +#define PCAP_REG_VENDOR_TEST1 0x1e +#define PCAP_REG_VENDOR_TEST2 0x1f + +/* registers accessible by pcap port 1 only (a1200, e2 & e6) */ +#define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */ +#define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */ +#define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */ +#define PCAP_REG_RTC_TOD 0xe /* RTC Time of Day */ +#define PCAP_REG_RTC_TODA 0xf /* RTC Time of Day Alarm */ +#define PCAP_REG_RTC_DAY 0x10 /* RTC Day */ +#define PCAP_REG_RTC_DAYA 0x11 /* RTC Day Alarm */ +#define PCAP_REG_MTRTMR 0x12 /* AD Monitor Timer */ +#define PCAP_REG_PWR 0x13 /* Power Control */ +#define PCAP_REG_AUXVREG_MASK 0x16 /* Auxiliary Regulator Mask */ +#define PCAP_REG_VENDOR_REV 0x17 +#define PCAP_REG_PERIPH_MASK 0x19 /* Peripheral Mask */ + +/* PCAP2 Interrupts */ +#define PCAP_NIRQS 23 +#define PCAP_IRQ_ADCDONE 0 /* ADC done port 1 */ +#define PCAP_IRQ_TS 1 /* Touch Screen */ +#define PCAP_IRQ_1HZ 2 /* 1HZ timer */ +#define PCAP_IRQ_WH 3 /* ADC above high limit */ +#define PCAP_IRQ_WL 4 /* ADC below low limit */ +#define PCAP_IRQ_TODA 5 /* Time of day alarm */ +#define PCAP_IRQ_USB4V 6 /* USB above 4V */ +#define PCAP_IRQ_ONOFF 7 /* On/Off button */ +#define PCAP_IRQ_ONOFF2 8 /* On/Off button 2 */ +#define PCAP_IRQ_USB1V 9 /* USB above 1V */ +#define PCAP_IRQ_MOBPORT 10 +#define PCAP_IRQ_MIC 11 /* Mic attach/HS button */ +#define PCAP_IRQ_HS 12 /* Headset attach */ +#define PCAP_IRQ_ST 13 +#define PCAP_IRQ_PC 14 /* Power Cut */ +#define PCAP_IRQ_WARM 15 +#define PCAP_IRQ_EOL 16 /* Battery End Of Life */ +#define PCAP_IRQ_CLK 17 +#define PCAP_IRQ_SYSRST 18 /* System Reset */ +#define PCAP_IRQ_DUMMY 19 +#define PCAP_IRQ_ADCDONE2 20 /* ADC done port 2 */ +#define PCAP_IRQ_SOFTRESET 21 +#define PCAP_IRQ_MNEXB 22 + +/* voltage regulators */ +#define V1 0 +#define V2 1 +#define V3 2 +#define V4 3 +#define V5 4 +#define V6 5 +#define V7 6 +#define V8 7 +#define V9 8 +#define V10 9 +#define VAUX1 10 +#define VAUX2 11 +#define VAUX3 12 +#define VAUX4 13 +#define VSIM 14 +#define VSIM2 15 +#define VVIB 16 +#define SW1 17 +#define SW2 18 +#define SW3 19 +#define SW1S 20 +#define SW2S 21 + +#define PCAP_BATT_DAC_MASK 0x000000ff +#define PCAP_BATT_DAC_SHIFT 0 +#define PCAP_BATT_B_FDBK (1 << 8) +#define PCAP_BATT_EXT_ISENSE (1 << 9) +#define PCAP_BATT_V_COIN_MASK 0x00003c00 +#define PCAP_BATT_V_COIN_SHIFT 10 +#define PCAP_BATT_I_COIN (1 << 14) +#define PCAP_BATT_COIN_CH_EN (1 << 15) +#define PCAP_BATT_EOL_SEL_MASK 0x000e0000 +#define PCAP_BATT_EOL_SEL_SHIFT 17 +#define PCAP_BATT_EOL_CMP_EN (1 << 20) +#define PCAP_BATT_BATT_DET_EN (1 << 21) +#define PCAP_BATT_THERMBIAS_CTRL (1 << 22) + +#define PCAP_ADC_ADEN (1 << 0) +#define PCAP_ADC_RAND (1 << 1) +#define PCAP_ADC_AD_SEL1 (1 << 2) +#define PCAP_ADC_AD_SEL2 (1 << 3) +#define PCAP_ADC_ADA1_MASK 0x00000070 +#define PCAP_ADC_ADA1_SHIFT 4 +#define PCAP_ADC_ADA2_MASK 0x00000380 +#define PCAP_ADC_ADA2_SHIFT 7 +#define PCAP_ADC_ATO_MASK 0x00003c00 +#define PCAP_ADC_ATO_SHIFT 10 +#define PCAP_ADC_ATOX (1 << 14) +#define PCAP_ADC_MTR1 (1 << 15) +#define PCAP_ADC_MTR2 (1 << 16) +#define PCAP_ADC_TS_M_MASK 0x000e0000 +#define PCAP_ADC_TS_M_SHIFT 17 +#define PCAP_ADC_TS_REF_LOWPWR (1 << 20) +#define PCAP_ADC_TS_REFENB (1 << 21) +#define PCAP_ADC_BATT_I_POLARITY (1 << 22) +#define PCAP_ADC_BATT_I_ADC (1 << 23) + +#define PCAP_ADC_BANK_0 0 +#define PCAP_ADC_BANK_1 1 +/* ADC bank 0 */ +#define PCAP_ADC_CH_COIN 0 +#define PCAP_ADC_CH_BATT 1 +#define PCAP_ADC_CH_BPLUS 2 +#define PCAP_ADC_CH_MOBPORTB 3 +#define PCAP_ADC_CH_TEMPERATURE 4 +#define PCAP_ADC_CH_CHARGER_ID 5 +#define PCAP_ADC_CH_AD6 6 +/* ADC bank 1 */ +#define PCAP_ADC_CH_AD7 0 +#define PCAP_ADC_CH_AD8 1 +#define PCAP_ADC_CH_AD9 2 +#define PCAP_ADC_CH_TS_X1 3 +#define PCAP_ADC_CH_TS_X2 4 +#define PCAP_ADC_CH_TS_Y1 5 +#define PCAP_ADC_CH_TS_Y2 6 + +#define PCAP_ADC_T_NOW 0 +#define PCAP_ADC_T_IN_BURST 1 +#define PCAP_ADC_T_OUT_BURST 2 + +#define PCAP_ADC_ATO_IN_BURST 6 +#define PCAP_ADC_ATO_OUT_BURST 0 + +#define PCAP_ADC_TS_M_XY 1 +#define PCAP_ADC_TS_M_PRESSURE 2 +#define PCAP_ADC_TS_M_PLATE_X 3 +#define PCAP_ADC_TS_M_PLATE_Y 4 +#define PCAP_ADC_TS_M_STANDBY 5 +#define PCAP_ADC_TS_M_NONTS 6 + +#define PCAP_ADR_ADD1_MASK 0x000003ff +#define PCAP_ADR_ADD1_SHIFT 0 +#define PCAP_ADR_ADD2_MASK 0x000ffc00 +#define PCAP_ADR_ADD2_SHIFT 10 +#define PCAP_ADR_ADINC1 (1 << 20) +#define PCAP_ADR_ADINC2 (1 << 21) +#define PCAP_ADR_ASC (1 << 22) +#define PCAP_ADR_ONESHOT (1 << 23) + +#define PCAP_BUSCTRL_FSENB (1 << 0) +#define PCAP_BUSCTRL_USB_SUSPEND (1 << 1) +#define PCAP_BUSCTRL_USB_PU (1 << 2) +#define PCAP_BUSCTRL_USB_PD (1 << 3) +#define PCAP_BUSCTRL_VUSB_EN (1 << 4) +#define PCAP_BUSCTRL_USB_PS (1 << 5) +#define PCAP_BUSCTRL_VUSB_MSTR_EN (1 << 6) +#define PCAP_BUSCTRL_VBUS_PD_ENB (1 << 7) +#define PCAP_BUSCTRL_CURRLIM (1 << 8) +#define PCAP_BUSCTRL_RS232ENB (1 << 9) +#define PCAP_BUSCTRL_RS232_DIR (1 << 10) +#define PCAP_BUSCTRL_SE0_CONN (1 << 11) +#define PCAP_BUSCTRL_USB_PDM (1 << 12) +#define PCAP_BUSCTRL_BUS_PRI_ADJ (1 << 24) + +/* leds */ +#define PCAP_LED0 0 +#define PCAP_LED1 1 +#define PCAP_BL0 2 +#define PCAP_BL1 3 +#define PCAP_LED_3MA 0 +#define PCAP_LED_4MA 1 +#define PCAP_LED_5MA 2 +#define PCAP_LED_9MA 3 +#define PCAP_LED_T_MASK 0xf +#define PCAP_LED_C_MASK 0x3 +#define PCAP_BL_MASK 0x1f +#define PCAP_BL0_SHIFT 0 +#define PCAP_LED0_EN (1 << 5) +#define PCAP_LED1_EN (1 << 6) +#define PCAP_LED0_T_SHIFT 7 +#define PCAP_LED1_T_SHIFT 11 +#define PCAP_LED0_C_SHIFT 15 +#define PCAP_LED1_C_SHIFT 17 +#define PCAP_BL1_SHIFT 20 + +/* RTC */ +#define PCAP_RTC_DAY_MASK 0x3fff +#define PCAP_RTC_TOD_MASK 0xffff +#define PCAP_RTC_PC_MASK 0x7 +#define SEC_PER_DAY 86400 + +#endif diff --git a/include/linux/mfd/htc-egpio.h b/include/linux/mfd/htc-egpio.h new file mode 100644 index 00000000..b4201c97 --- /dev/null +++ b/include/linux/mfd/htc-egpio.h @@ -0,0 +1,57 @@ +/* + * HTC simple EGPIO irq and gpio extender + */ + +#ifndef __HTC_EGPIO_H__ +#define __HTC_EGPIO_H__ + +#include <linux/gpio.h> + +/* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */ +#define HTC_EGPIO_OUTPUT (~0) +#define HTC_EGPIO_INPUT 0 + +/** + * struct htc_egpio_chip - descriptor to create gpio_chip for register range + * @reg_start: index of first register + * @gpio_base: gpio number of first pin in this register range + * @num_gpios: number of gpios in this register range, max BITS_PER_LONG + * (number of registers = DIV_ROUND_UP(num_gpios, reg_width)) + * @direction: bitfield, '0' = input, '1' = output, + */ +struct htc_egpio_chip { + int reg_start; + int gpio_base; + int num_gpios; + unsigned long direction; + unsigned long initial_values; +}; + +/** + * struct htc_egpio_platform_data - description provided by the arch + * @irq_base: beginning of available IRQs (eg, IRQ_BOARD_START) + * @num_irqs: number of irqs + * @reg_width: number of bits per register, either 8 or 16 bit + * @bus_width: alignment of the registers, either 16 or 32 bit + * @invert_acks: set if chip requires writing '0' to ack an irq, instead of '1' + * @ack_register: location of the irq/ack register + * @chip: pointer to array of htc_egpio_chip descriptors + * @num_chips: number of egpio chip descriptors + */ +struct htc_egpio_platform_data { + int bus_width; + int reg_width; + + int irq_base; + int num_irqs; + int invert_acks; + int ack_register; + + struct htc_egpio_chip *chip; + int num_chips; +}; + +/* Determine the wakeup irq, to be called during early resume */ +extern int htc_egpio_get_wakeup_irq(struct device *dev); + +#endif diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h new file mode 100644 index 00000000..3d3ed67b --- /dev/null +++ b/include/linux/mfd/htc-pasic3.h @@ -0,0 +1,54 @@ +/* + * HTC PASIC3 driver - LEDs and DS1WM + * + * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + * + */ + +#ifndef __PASIC3_H +#define __PASIC3_H + +#include <linux/platform_device.h> +#include <linux/leds.h> + +extern void pasic3_write_register(struct device *dev, u32 reg, u8 val); +extern u8 pasic3_read_register(struct device *dev, u32 reg); + +/* + * mask for registers 0x20,0x21,0x22 + */ +#define PASIC3_MASK_LED0 0x04 +#define PASIC3_MASK_LED1 0x08 +#define PASIC3_MASK_LED2 0x40 + +/* + * bits in register 0x06 + */ +#define PASIC3_BIT2_LED0 0x08 +#define PASIC3_BIT2_LED1 0x10 +#define PASIC3_BIT2_LED2 0x20 + +struct pasic3_led { + struct led_classdev led; + unsigned int hw_num; + unsigned int bit2; + unsigned int mask; + struct pasic3_leds_machinfo *pdata; +}; + +struct pasic3_leds_machinfo { + unsigned int num_leds; + unsigned int power_gpio; + struct pasic3_led *leds; +}; + +struct pasic3_platform_data { + struct pasic3_leds_machinfo *led_pdata; + unsigned int clock_rate; +}; + +#endif diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h new file mode 100644 index 00000000..439a7a61 --- /dev/null +++ b/include/linux/mfd/intel_msic.h @@ -0,0 +1,456 @@ +/* + * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC + * + * Copyright (C) 2011, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_INTEL_MSIC_H__ +#define __LINUX_MFD_INTEL_MSIC_H__ + +/* ID */ +#define INTEL_MSIC_ID0 0x000 /* RO */ +#define INTEL_MSIC_ID1 0x001 /* RO */ + +/* IRQ */ +#define INTEL_MSIC_IRQLVL1 0x002 +#define INTEL_MSIC_ADC1INT 0x003 +#define INTEL_MSIC_CCINT 0x004 +#define INTEL_MSIC_PWRSRCINT 0x005 +#define INTEL_MSIC_PWRSRCINT1 0x006 +#define INTEL_MSIC_CHRINT 0x007 +#define INTEL_MSIC_CHRINT1 0x008 +#define INTEL_MSIC_RTCIRQ 0x009 +#define INTEL_MSIC_GPIO0LVIRQ 0x00a +#define INTEL_MSIC_GPIO1LVIRQ 0x00b +#define INTEL_MSIC_GPIOHVIRQ 0x00c +#define INTEL_MSIC_VRINT 0x00d +#define INTEL_MSIC_OCAUDIO 0x00e +#define INTEL_MSIC_ACCDET 0x00f +#define INTEL_MSIC_RESETIRQ1 0x010 +#define INTEL_MSIC_RESETIRQ2 0x011 +#define INTEL_MSIC_MADC1INT 0x012 +#define INTEL_MSIC_MCCINT 0x013 +#define INTEL_MSIC_MPWRSRCINT 0x014 +#define INTEL_MSIC_MPWRSRCINT1 0x015 +#define INTEL_MSIC_MCHRINT 0x016 +#define INTEL_MSIC_MCHRINT1 0x017 +#define INTEL_MSIC_RTCIRQMASK 0x018 +#define INTEL_MSIC_GPIO0LVIRQMASK 0x019 +#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a +#define INTEL_MSIC_GPIOHVIRQMASK 0x01b +#define INTEL_MSIC_VRINTMASK 0x01c +#define INTEL_MSIC_OCAUDIOMASK 0x01d +#define INTEL_MSIC_ACCDETMASK 0x01e +#define INTEL_MSIC_RESETIRQ1MASK 0x01f +#define INTEL_MSIC_RESETIRQ2MASK 0x020 +#define INTEL_MSIC_IRQLVL1MSK 0x021 +#define INTEL_MSIC_PBCONFIG 0x03e +#define INTEL_MSIC_PBSTATUS 0x03f /* RO */ + +/* GPIO */ +#define INTEL_MSIC_GPIO0LV7CTLO 0x040 +#define INTEL_MSIC_GPIO0LV6CTLO 0x041 +#define INTEL_MSIC_GPIO0LV5CTLO 0x042 +#define INTEL_MSIC_GPIO0LV4CTLO 0x043 +#define INTEL_MSIC_GPIO0LV3CTLO 0x044 +#define INTEL_MSIC_GPIO0LV2CTLO 0x045 +#define INTEL_MSIC_GPIO0LV1CTLO 0x046 +#define INTEL_MSIC_GPIO0LV0CTLO 0x047 +#define INTEL_MSIC_GPIO1LV7CTLOS 0x048 +#define INTEL_MSIC_GPIO1LV6CTLO 0x049 +#define INTEL_MSIC_GPIO1LV5CTLO 0x04a +#define INTEL_MSIC_GPIO1LV4CTLO 0x04b +#define INTEL_MSIC_GPIO1LV3CTLO 0x04c +#define INTEL_MSIC_GPIO1LV2CTLO 0x04d +#define INTEL_MSIC_GPIO1LV1CTLO 0x04e +#define INTEL_MSIC_GPIO1LV0CTLO 0x04f +#define INTEL_MSIC_GPIO0LV7CTLI 0x050 +#define INTEL_MSIC_GPIO0LV6CTLI 0x051 +#define INTEL_MSIC_GPIO0LV5CTLI 0x052 +#define INTEL_MSIC_GPIO0LV4CTLI 0x053 +#define INTEL_MSIC_GPIO0LV3CTLI 0x054 +#define INTEL_MSIC_GPIO0LV2CTLI 0x055 +#define INTEL_MSIC_GPIO0LV1CTLI 0x056 +#define INTEL_MSIC_GPIO0LV0CTLI 0x057 +#define INTEL_MSIC_GPIO1LV7CTLIS 0x058 +#define INTEL_MSIC_GPIO1LV6CTLI 0x059 +#define INTEL_MSIC_GPIO1LV5CTLI 0x05a +#define INTEL_MSIC_GPIO1LV4CTLI 0x05b +#define INTEL_MSIC_GPIO1LV3CTLI 0x05c +#define INTEL_MSIC_GPIO1LV2CTLI 0x05d +#define INTEL_MSIC_GPIO1LV1CTLI 0x05e +#define INTEL_MSIC_GPIO1LV0CTLI 0x05f +#define INTEL_MSIC_PWM0CLKDIV1 0x061 +#define INTEL_MSIC_PWM0CLKDIV0 0x062 +#define INTEL_MSIC_PWM1CLKDIV1 0x063 +#define INTEL_MSIC_PWM1CLKDIV0 0x064 +#define INTEL_MSIC_PWM2CLKDIV1 0x065 +#define INTEL_MSIC_PWM2CLKDIV0 0x066 +#define INTEL_MSIC_PWM0DUTYCYCLE 0x067 +#define INTEL_MSIC_PWM1DUTYCYCLE 0x068 +#define INTEL_MSIC_PWM2DUTYCYCLE 0x069 +#define INTEL_MSIC_GPIO0HV3CTLO 0x06d +#define INTEL_MSIC_GPIO0HV2CTLO 0x06e +#define INTEL_MSIC_GPIO0HV1CTLO 0x06f +#define INTEL_MSIC_GPIO0HV0CTLO 0x070 +#define INTEL_MSIC_GPIO1HV3CTLO 0x071 +#define INTEL_MSIC_GPIO1HV2CTLO 0x072 +#define INTEL_MSIC_GPIO1HV1CTLO 0x073 +#define INTEL_MSIC_GPIO1HV0CTLO 0x074 +#define INTEL_MSIC_GPIO0HV3CTLI 0x075 +#define INTEL_MSIC_GPIO0HV2CTLI 0x076 +#define INTEL_MSIC_GPIO0HV1CTLI 0x077 +#define INTEL_MSIC_GPIO0HV0CTLI 0x078 +#define INTEL_MSIC_GPIO1HV3CTLI 0x079 +#define INTEL_MSIC_GPIO1HV2CTLI 0x07a +#define INTEL_MSIC_GPIO1HV1CTLI 0x07b +#define INTEL_MSIC_GPIO1HV0CTLI 0x07c + +/* SVID */ +#define INTEL_MSIC_SVIDCTRL0 0x080 +#define INTEL_MSIC_SVIDCTRL1 0x081 +#define INTEL_MSIC_SVIDCTRL2 0x082 +#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */ +#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */ +#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */ +#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */ +#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087 +#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088 +#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089 +#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a +#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b +#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c +#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */ +#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */ +#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */ +#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */ +#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */ + +/* VREG */ +#define INTEL_MSIC_VCCLATCH 0x0c0 +#define INTEL_MSIC_VNNLATCH 0x0c1 +#define INTEL_MSIC_VCCCNT 0x0c2 +#define INTEL_MSIC_SMPSRAMP 0x0c3 +#define INTEL_MSIC_VNNCNT 0x0c4 +#define INTEL_MSIC_VNNAONCNT 0x0c5 +#define INTEL_MSIC_VCC122AONCNT 0x0c6 +#define INTEL_MSIC_V180AONCNT 0x0c7 +#define INTEL_MSIC_V500CNT 0x0c8 +#define INTEL_MSIC_VIHFCNT 0x0c9 +#define INTEL_MSIC_LDORAMP1 0x0ca +#define INTEL_MSIC_LDORAMP2 0x0cb +#define INTEL_MSIC_VCC108AONCNT 0x0cc +#define INTEL_MSIC_VCC108ASCNT 0x0cd +#define INTEL_MSIC_VCC108CNT 0x0ce +#define INTEL_MSIC_VCCA100ASCNT 0x0cf +#define INTEL_MSIC_VCCA100CNT 0x0d0 +#define INTEL_MSIC_VCC180AONCNT 0x0d1 +#define INTEL_MSIC_VCC180CNT 0x0d2 +#define INTEL_MSIC_VCC330CNT 0x0d3 +#define INTEL_MSIC_VUSB330CNT 0x0d4 +#define INTEL_MSIC_VCCSDIOCNT 0x0d5 +#define INTEL_MSIC_VPROG1CNT 0x0d6 +#define INTEL_MSIC_VPROG2CNT 0x0d7 +#define INTEL_MSIC_VEMMCSCNT 0x0d8 +#define INTEL_MSIC_VEMMC1CNT 0x0d9 +#define INTEL_MSIC_VEMMC2CNT 0x0da +#define INTEL_MSIC_VAUDACNT 0x0db +#define INTEL_MSIC_VHSPCNT 0x0dc +#define INTEL_MSIC_VHSNCNT 0x0dd +#define INTEL_MSIC_VHDMICNT 0x0de +#define INTEL_MSIC_VOTGCNT 0x0df +#define INTEL_MSIC_V1P35CNT 0x0e0 +#define INTEL_MSIC_V330AONCNT 0x0e1 + +/* RESET */ +#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */ +#define INTEL_MSIC_ERCONFIG 0x101 + +/* BURST */ +#define INTEL_MSIC_BATCURRENTLIMIT12 0x102 +#define INTEL_MSIC_BATTIMELIMIT12 0x103 +#define INTEL_MSIC_BATTIMELIMIT3 0x104 +#define INTEL_MSIC_BATTIMEDB 0x105 +#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106 +#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107 +#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108 + +/* RTC */ +#define INTEL_MSIC_RTCB1 0x140 /* RO */ +#define INTEL_MSIC_RTCB2 0x141 /* RO */ +#define INTEL_MSIC_RTCB3 0x142 /* RO */ +#define INTEL_MSIC_RTCB4 0x143 /* RO */ +#define INTEL_MSIC_RTCOB1 0x144 +#define INTEL_MSIC_RTCOB2 0x145 +#define INTEL_MSIC_RTCOB3 0x146 +#define INTEL_MSIC_RTCOB4 0x147 +#define INTEL_MSIC_RTCAB1 0x148 +#define INTEL_MSIC_RTCAB2 0x149 +#define INTEL_MSIC_RTCAB3 0x14a +#define INTEL_MSIC_RTCAB4 0x14b +#define INTEL_MSIC_RTCWAB1 0x14c +#define INTEL_MSIC_RTCWAB2 0x14d +#define INTEL_MSIC_RTCWAB3 0x14e +#define INTEL_MSIC_RTCWAB4 0x14f +#define INTEL_MSIC_RTCSC1 0x150 +#define INTEL_MSIC_RTCSC2 0x151 +#define INTEL_MSIC_RTCSC3 0x152 +#define INTEL_MSIC_RTCSC4 0x153 +#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */ +#define INTEL_MSIC_RTCCONFIG1 0x155 +#define INTEL_MSIC_RTCCONFIG2 0x156 + +/* CHARGER */ +#define INTEL_MSIC_BDTIMER 0x180 +#define INTEL_MSIC_BATTRMV 0x181 +#define INTEL_MSIC_VBUSDET 0x182 +#define INTEL_MSIC_VBUSDET1 0x183 +#define INTEL_MSIC_ADPHVDET 0x184 +#define INTEL_MSIC_ADPLVDET 0x185 +#define INTEL_MSIC_ADPDETDBDM 0x186 +#define INTEL_MSIC_LOWBATTDET 0x187 +#define INTEL_MSIC_CHRCTRL 0x188 +#define INTEL_MSIC_CHRCVOLTAGE 0x189 +#define INTEL_MSIC_CHRCCURRENT 0x18a +#define INTEL_MSIC_SPCHARGER 0x18b +#define INTEL_MSIC_CHRTTIME 0x18c +#define INTEL_MSIC_CHRCTRL1 0x18d +#define INTEL_MSIC_PWRSRCLMT 0x18e +#define INTEL_MSIC_CHRSTWDT 0x18f +#define INTEL_MSIC_WDTWRITE 0x190 /* WO */ +#define INTEL_MSIC_CHRSAFELMT 0x191 +#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */ +#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */ +#define INTEL_MSIC_CHRLEDPWM 0x194 +#define INTEL_MSIC_CHRLEDCTRL 0x195 + +/* ADC */ +#define INTEL_MSIC_ADC1CNTL1 0x1c0 +#define INTEL_MSIC_ADC1CNTL2 0x1c1 +#define INTEL_MSIC_ADC1CNTL3 0x1c2 +#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */ +#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */ +#define INTEL_MSIC_ADC1ADDR0 0x1c5 +#define INTEL_MSIC_ADC1ADDR1 0x1c6 +#define INTEL_MSIC_ADC1ADDR2 0x1c7 +#define INTEL_MSIC_ADC1ADDR3 0x1c8 +#define INTEL_MSIC_ADC1ADDR4 0x1c9 +#define INTEL_MSIC_ADC1ADDR5 0x1ca +#define INTEL_MSIC_ADC1ADDR6 0x1cb +#define INTEL_MSIC_ADC1ADDR7 0x1cc +#define INTEL_MSIC_ADC1ADDR8 0x1cd +#define INTEL_MSIC_ADC1ADDR9 0x1ce +#define INTEL_MSIC_ADC1ADDR10 0x1cf +#define INTEL_MSIC_ADC1ADDR11 0x1d0 +#define INTEL_MSIC_ADC1ADDR12 0x1d1 +#define INTEL_MSIC_ADC1ADDR13 0x1d2 +#define INTEL_MSIC_ADC1ADDR14 0x1d3 +#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */ +#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */ +#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */ +#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */ +#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */ +#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */ +#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */ +#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */ +#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */ +#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */ +#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */ +#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */ +#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */ +#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */ +#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */ +#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */ +#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */ +#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */ +#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */ +#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */ +#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */ +#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */ +#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */ +#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */ +#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */ +#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */ +#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */ +#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */ +#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */ +#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */ +#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */ +#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */ +#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */ +#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */ +#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */ +#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */ +#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */ +#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */ +#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */ +#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */ +#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */ +#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */ +#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */ +#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */ +#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */ +#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */ +#define INTEL_MSIC_CCCNTL 0x202 +#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */ +#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */ +#define INTEL_MSIC_CCADCHA 0x205 /* RO */ +#define INTEL_MSIC_CCADCLA 0x206 /* RO */ + +/* AUDIO */ +#define INTEL_MSIC_AUDPLLCTRL 0x240 +#define INTEL_MSIC_DMICBUF0123 0x241 +#define INTEL_MSIC_DMICBUF45 0x242 +#define INTEL_MSIC_DMICGPO 0x244 +#define INTEL_MSIC_DMICMUX 0x245 +#define INTEL_MSIC_DMICCLK 0x246 +#define INTEL_MSIC_MICBIAS 0x247 +#define INTEL_MSIC_ADCCONFIG 0x248 +#define INTEL_MSIC_MICAMP1 0x249 +#define INTEL_MSIC_MICAMP2 0x24a +#define INTEL_MSIC_NOISEMUX 0x24b +#define INTEL_MSIC_AUDIOMUX12 0x24c +#define INTEL_MSIC_AUDIOMUX34 0x24d +#define INTEL_MSIC_AUDIOSINC 0x24e +#define INTEL_MSIC_AUDIOTXEN 0x24f +#define INTEL_MSIC_HSEPRXCTRL 0x250 +#define INTEL_MSIC_IHFRXCTRL 0x251 +#define INTEL_MSIC_VOICETXVOL 0x252 +#define INTEL_MSIC_SIDETONEVOL 0x253 +#define INTEL_MSIC_MUSICSHARVOL 0x254 +#define INTEL_MSIC_VOICETXCTRL 0x255 +#define INTEL_MSIC_HSMIXER 0x256 +#define INTEL_MSIC_DACCONFIG 0x257 +#define INTEL_MSIC_SOFTMUTE 0x258 +#define INTEL_MSIC_HSLVOLCTRL 0x259 +#define INTEL_MSIC_HSRVOLCTRL 0x25a +#define INTEL_MSIC_IHFLVOLCTRL 0x25b +#define INTEL_MSIC_IHFRVOLCTRL 0x25c +#define INTEL_MSIC_DRIVEREN 0x25d +#define INTEL_MSIC_LINEOUTCTRL 0x25e +#define INTEL_MSIC_VIB1CTRL1 0x25f +#define INTEL_MSIC_VIB1CTRL2 0x260 +#define INTEL_MSIC_VIB1CTRL3 0x261 +#define INTEL_MSIC_VIB1SPIPCM_1 0x262 +#define INTEL_MSIC_VIB1SPIPCM_2 0x263 +#define INTEL_MSIC_VIB1CTRL5 0x264 +#define INTEL_MSIC_VIB2CTRL1 0x265 +#define INTEL_MSIC_VIB2CTRL2 0x266 +#define INTEL_MSIC_VIB2CTRL3 0x267 +#define INTEL_MSIC_VIB2SPIPCM_1 0x268 +#define INTEL_MSIC_VIB2SPIPCM_2 0x269 +#define INTEL_MSIC_VIB2CTRL5 0x26a +#define INTEL_MSIC_BTNCTRL1 0x26b +#define INTEL_MSIC_BTNCTRL2 0x26c +#define INTEL_MSIC_PCM1TXSLOT01 0x26d +#define INTEL_MSIC_PCM1TXSLOT23 0x26e +#define INTEL_MSIC_PCM1TXSLOT45 0x26f +#define INTEL_MSIC_PCM1RXSLOT0123 0x270 +#define INTEL_MSIC_PCM1RXSLOT045 0x271 +#define INTEL_MSIC_PCM2TXSLOT01 0x272 +#define INTEL_MSIC_PCM2TXSLOT23 0x273 +#define INTEL_MSIC_PCM2TXSLOT45 0x274 +#define INTEL_MSIC_PCM2RXSLOT01 0x275 +#define INTEL_MSIC_PCM2RXSLOT23 0x276 +#define INTEL_MSIC_PCM2RXSLOT45 0x277 +#define INTEL_MSIC_PCM1CTRL1 0x278 +#define INTEL_MSIC_PCM1CTRL2 0x279 +#define INTEL_MSIC_PCM1CTRL3 0x27a +#define INTEL_MSIC_PCM2CTRL1 0x27b +#define INTEL_MSIC_PCM2CTRL2 0x27c + +/* HDMI */ +#define INTEL_MSIC_HDMIPUEN 0x280 +#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */ + +/* Physical address of the start of the MSIC interrupt tree in SRAM */ +#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0 + +/** + * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver + * @gpio_base: base number for the GPIOs + */ +struct intel_msic_gpio_pdata { + unsigned gpio_base; +}; + +/** + * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver + * @gpio: GPIO number used for OCD interrupts + * + * The MSIC MFD driver converts @gpio into an IRQ number and passes it to + * the OCD driver as %IORESOURCE_IRQ. + */ +struct intel_msic_ocd_pdata { + unsigned gpio; +}; + +/* MSIC embedded blocks (subdevices) */ +enum intel_msic_block { + INTEL_MSIC_BLOCK_TOUCH, + INTEL_MSIC_BLOCK_ADC, + INTEL_MSIC_BLOCK_BATTERY, + INTEL_MSIC_BLOCK_GPIO, + INTEL_MSIC_BLOCK_AUDIO, + INTEL_MSIC_BLOCK_HDMI, + INTEL_MSIC_BLOCK_THERMAL, + INTEL_MSIC_BLOCK_POWER_BTN, + INTEL_MSIC_BLOCK_OCD, + + INTEL_MSIC_BLOCK_LAST, +}; + +/** + * struct intel_msic_platform_data - platform data for the MSIC driver + * @irq: array of interrupt numbers, one per device. If @irq is set to %0 + * for a given block, the corresponding platform device is not + * created. For devices which don't have an interrupt, use %0xff + * (this is same as in SFI spec). + * @gpio: platform data for the MSIC GPIO driver + * @ocd: platform data for the MSIC OCD driver + * + * Once the MSIC driver is initialized, the register interface is ready to + * use. All the platform devices for subdevices are created after the + * register interface is ready so that we can guarantee its availability to + * the subdevice drivers. + * + * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ + * resources of the created platform device. + */ +struct intel_msic_platform_data { + int irq[INTEL_MSIC_BLOCK_LAST]; + struct intel_msic_gpio_pdata *gpio; + struct intel_msic_ocd_pdata *ocd; +}; + +struct intel_msic; + +extern int intel_msic_reg_read(unsigned short reg, u8 *val); +extern int intel_msic_reg_write(unsigned short reg, u8 val); +extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask); +extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count); +extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count); + +/* + * pdev_to_intel_msic - gets an MSIC instance from the platform device + * @pdev: platform device pointer + * + * The client drivers need to have pointer to the MSIC instance if they + * want to call intel_msic_irq_read(). This macro can be used for + * convenience to get the MSIC pointer from @pdev where needed. This is + * _only_ valid for devices which are managed by the MSIC. + */ +#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent)) + +extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg, + u8 *val); + +#endif /* __LINUX_MFD_INTEL_MSIC_H__ */ diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h new file mode 100644 index 00000000..e9994c46 --- /dev/null +++ b/include/linux/mfd/janz.h @@ -0,0 +1,54 @@ +/* + * Common Definitions for Janz MODULbus devices + * + * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef JANZ_H +#define JANZ_H + +struct janz_platform_data { + /* MODULbus Module Number */ + unsigned int modno; +}; + +/* PLX bridge chip onboard registers */ +struct janz_cmodio_onboard_regs { + u8 unused1; + + /* + * Read access: interrupt status + * Write access: interrupt disable + */ + u8 int_disable; + u8 unused2; + + /* + * Read access: MODULbus number (hex switch) + * Write access: interrupt enable + */ + u8 int_enable; + u8 unused3; + + /* write-only */ + u8 reset_assert; + u8 unused4; + + /* write-only */ + u8 reset_deassert; + u8 unused5; + + /* read-write access to serial EEPROM */ + u8 eep; + u8 unused6; + + /* write-only access to EEPROM chip select */ + u8 enid; +}; + +#endif /* JANZ_H */ diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h new file mode 100644 index 00000000..15b2392a --- /dev/null +++ b/include/linux/mfd/max8925.h @@ -0,0 +1,256 @@ +/* + * Maxim8925 Interface + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MFD_MAX8925_H +#define __LINUX_MFD_MAX8925_H + +#include <linux/mutex.h> +#include <linux/interrupt.h> + +/* Unified sub device IDs for MAX8925 */ +enum { + MAX8925_ID_SD1, + MAX8925_ID_SD2, + MAX8925_ID_SD3, + MAX8925_ID_LDO1, + MAX8925_ID_LDO2, + MAX8925_ID_LDO3, + MAX8925_ID_LDO4, + MAX8925_ID_LDO5, + MAX8925_ID_LDO6, + MAX8925_ID_LDO7, + MAX8925_ID_LDO8, + MAX8925_ID_LDO9, + MAX8925_ID_LDO10, + MAX8925_ID_LDO11, + MAX8925_ID_LDO12, + MAX8925_ID_LDO13, + MAX8925_ID_LDO14, + MAX8925_ID_LDO15, + MAX8925_ID_LDO16, + MAX8925_ID_LDO17, + MAX8925_ID_LDO18, + MAX8925_ID_LDO19, + MAX8925_ID_LDO20, + MAX8925_ID_MAX, +}; + +enum { + /* + * Charging current threshold trigger going from fast charge + * to TOPOFF charge. From 5% to 20% of fasting charging current. + */ + MAX8925_TOPOFF_THR_5PER, + MAX8925_TOPOFF_THR_10PER, + MAX8925_TOPOFF_THR_15PER, + MAX8925_TOPOFF_THR_20PER, +}; + +enum { + /* Fast charging current */ + MAX8925_FCHG_85MA, + MAX8925_FCHG_300MA, + MAX8925_FCHG_460MA, + MAX8925_FCHG_600MA, + MAX8925_FCHG_700MA, + MAX8925_FCHG_800MA, + MAX8925_FCHG_900MA, + MAX8925_FCHG_1000MA, +}; + +/* Charger registers */ +#define MAX8925_CHG_IRQ1 (0x7e) +#define MAX8925_CHG_IRQ2 (0x7f) +#define MAX8925_CHG_IRQ1_MASK (0x80) +#define MAX8925_CHG_IRQ2_MASK (0x81) +#define MAX8925_CHG_STATUS (0x82) + +/* GPM registers */ +#define MAX8925_SYSENSEL (0x00) +#define MAX8925_ON_OFF_IRQ1 (0x01) +#define MAX8925_ON_OFF_IRQ1_MASK (0x02) +#define MAX8925_ON_OFF_STATUS (0x03) +#define MAX8925_ON_OFF_IRQ2 (0x0d) +#define MAX8925_ON_OFF_IRQ2_MASK (0x0e) +#define MAX8925_RESET_CNFG (0x0f) + +/* Touch registers */ +#define MAX8925_TSC_IRQ (0x00) +#define MAX8925_TSC_IRQ_MASK (0x01) +#define MAX8925_TSC_CNFG1 (0x02) +#define MAX8925_ADC_SCHED (0x10) +#define MAX8925_ADC_RES_END (0x6f) + +#define MAX8925_NREF_OK (1 << 4) + +/* RTC registers */ +#define MAX8925_ALARM0_CNTL (0x18) +#define MAX8925_ALARM1_CNTL (0x19) +#define MAX8925_RTC_IRQ (0x1c) +#define MAX8925_RTC_IRQ_MASK (0x1d) +#define MAX8925_MPL_CNTL (0x1e) + +/* WLED registers */ +#define MAX8925_WLED_MODE_CNTL (0x84) +#define MAX8925_WLED_CNTL (0x85) + +/* MAX8925 Registers */ +#define MAX8925_SDCTL1 (0x04) +#define MAX8925_SDCTL2 (0x07) +#define MAX8925_SDCTL3 (0x0A) +#define MAX8925_SDV1 (0x06) +#define MAX8925_SDV2 (0x09) +#define MAX8925_SDV3 (0x0C) +#define MAX8925_LDOCTL1 (0x18) +#define MAX8925_LDOCTL2 (0x1C) +#define MAX8925_LDOCTL3 (0x20) +#define MAX8925_LDOCTL4 (0x24) +#define MAX8925_LDOCTL5 (0x28) +#define MAX8925_LDOCTL6 (0x2C) +#define MAX8925_LDOCTL7 (0x30) +#define MAX8925_LDOCTL8 (0x34) +#define MAX8925_LDOCTL9 (0x38) +#define MAX8925_LDOCTL10 (0x3C) +#define MAX8925_LDOCTL11 (0x40) +#define MAX8925_LDOCTL12 (0x44) +#define MAX8925_LDOCTL13 (0x48) +#define MAX8925_LDOCTL14 (0x4C) +#define MAX8925_LDOCTL15 (0x50) +#define MAX8925_LDOCTL16 (0x10) +#define MAX8925_LDOCTL17 (0x14) +#define MAX8925_LDOCTL18 (0x72) +#define MAX8925_LDOCTL19 (0x5C) +#define MAX8925_LDOCTL20 (0x9C) +#define MAX8925_LDOVOUT1 (0x1A) +#define MAX8925_LDOVOUT2 (0x1E) +#define MAX8925_LDOVOUT3 (0x22) +#define MAX8925_LDOVOUT4 (0x26) +#define MAX8925_LDOVOUT5 (0x2A) +#define MAX8925_LDOVOUT6 (0x2E) +#define MAX8925_LDOVOUT7 (0x32) +#define MAX8925_LDOVOUT8 (0x36) +#define MAX8925_LDOVOUT9 (0x3A) +#define MAX8925_LDOVOUT10 (0x3E) +#define MAX8925_LDOVOUT11 (0x42) +#define MAX8925_LDOVOUT12 (0x46) +#define MAX8925_LDOVOUT13 (0x4A) +#define MAX8925_LDOVOUT14 (0x4E) +#define MAX8925_LDOVOUT15 (0x52) +#define MAX8925_LDOVOUT16 (0x12) +#define MAX8925_LDOVOUT17 (0x16) +#define MAX8925_LDOVOUT18 (0x74) +#define MAX8925_LDOVOUT19 (0x5E) +#define MAX8925_LDOVOUT20 (0x9E) + +/* bit definitions */ +#define CHG_IRQ1_MASK (0x07) +#define CHG_IRQ2_MASK (0xff) +#define ON_OFF_IRQ1_MASK (0xff) +#define ON_OFF_IRQ2_MASK (0x03) +#define TSC_IRQ_MASK (0x03) +#define RTC_IRQ_MASK (0x0c) + +#define MAX8925_MAX_REGULATOR (23) + +#define MAX8925_NAME_SIZE (32) + +/* IRQ definitions */ +enum { + MAX8925_IRQ_VCHG_DC_OVP, + MAX8925_IRQ_VCHG_DC_F, + MAX8925_IRQ_VCHG_DC_R, + MAX8925_IRQ_VCHG_THM_OK_R, + MAX8925_IRQ_VCHG_THM_OK_F, + MAX8925_IRQ_VCHG_SYSLOW_F, + MAX8925_IRQ_VCHG_SYSLOW_R, + MAX8925_IRQ_VCHG_RST, + MAX8925_IRQ_VCHG_DONE, + MAX8925_IRQ_VCHG_TOPOFF, + MAX8925_IRQ_VCHG_TMR_FAULT, + MAX8925_IRQ_GPM_RSTIN, + MAX8925_IRQ_GPM_MPL, + MAX8925_IRQ_GPM_SW_3SEC, + MAX8925_IRQ_GPM_EXTON_F, + MAX8925_IRQ_GPM_EXTON_R, + MAX8925_IRQ_GPM_SW_1SEC, + MAX8925_IRQ_GPM_SW_F, + MAX8925_IRQ_GPM_SW_R, + MAX8925_IRQ_GPM_SYSCKEN_F, + MAX8925_IRQ_GPM_SYSCKEN_R, + MAX8925_IRQ_RTC_ALARM1, + MAX8925_IRQ_RTC_ALARM0, + MAX8925_IRQ_TSC_STICK, + MAX8925_IRQ_TSC_NSTICK, + MAX8925_NR_IRQS, +}; + +struct max8925_chip { + struct device *dev; + struct i2c_client *i2c; + struct i2c_client *adc; + struct i2c_client *rtc; + struct mutex io_lock; + struct mutex irq_lock; + + int irq_base; + int core_irq; + int tsc_irq; + + unsigned int wakeup_flag; +}; + +struct max8925_backlight_pdata { + int lxw_scl; /* 0/1 -- 0.8Ohm/0.4Ohm */ + int lxw_freq; /* 700KHz ~ 1400KHz */ + int dual_string; /* 0/1 -- single/dual string */ +}; + +struct max8925_touch_pdata { + unsigned int flags; +}; + +struct max8925_power_pdata { + int (*set_charger)(int); + unsigned batt_detect:1; + unsigned topoff_threshold:2; + unsigned fast_charge:3; /* charge current */ + unsigned no_temp_support:1; /* set if no temperature detect */ + unsigned no_insert_detect:1; /* set if no ac insert detect */ + char **supplied_to; + int num_supplicants; +}; + +/* + * irq_base: stores IRQ base number of MAX8925 in platform + * tsc_irq: stores IRQ number of MAX8925 TSC + */ +struct max8925_platform_data { + struct max8925_backlight_pdata *backlight; + struct max8925_touch_pdata *touch; + struct max8925_power_pdata *power; + struct regulator_init_data *regulator[MAX8925_MAX_REGULATOR]; + + int irq_base; + int tsc_irq; +}; + +extern int max8925_reg_read(struct i2c_client *, int); +extern int max8925_reg_write(struct i2c_client *, int, unsigned char); +extern int max8925_bulk_read(struct i2c_client *, int, int, unsigned char *); +extern int max8925_bulk_write(struct i2c_client *, int, int, unsigned char *); +extern int max8925_set_bits(struct i2c_client *, int, unsigned char, + unsigned char); + +extern int max8925_device_init(struct max8925_chip *, + struct max8925_platform_data *); +extern void max8925_device_exit(struct max8925_chip *); +#endif /* __LINUX_MFD_MAX8925_H */ + diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h new file mode 100644 index 00000000..3f4deb62 --- /dev/null +++ b/include/linux/mfd/max8997-private.h @@ -0,0 +1,363 @@ +/* + * max8997.h - Voltage regulator driver for the Maxim 8997 + * + * Copyright (C) 2010 Samsung Electrnoics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX8997_PRIV_H +#define __LINUX_MFD_MAX8997_PRIV_H + +#include <linux/i2c.h> + +#define MAX8997_REG_INVALID (0xff) + +enum max8997_pmic_reg { + MAX8997_REG_PMIC_ID0 = 0x00, + MAX8997_REG_PMIC_ID1 = 0x01, + MAX8997_REG_INTSRC = 0x02, + MAX8997_REG_INT1 = 0x03, + MAX8997_REG_INT2 = 0x04, + MAX8997_REG_INT3 = 0x05, + MAX8997_REG_INT4 = 0x06, + + MAX8997_REG_INT1MSK = 0x08, + MAX8997_REG_INT2MSK = 0x09, + MAX8997_REG_INT3MSK = 0x0a, + MAX8997_REG_INT4MSK = 0x0b, + + MAX8997_REG_STATUS1 = 0x0d, + MAX8997_REG_STATUS2 = 0x0e, + MAX8997_REG_STATUS3 = 0x0f, + MAX8997_REG_STATUS4 = 0x10, + + MAX8997_REG_MAINCON1 = 0x13, + MAX8997_REG_MAINCON2 = 0x14, + MAX8997_REG_BUCKRAMP = 0x15, + + MAX8997_REG_BUCK1CTRL = 0x18, + MAX8997_REG_BUCK1DVS1 = 0x19, + MAX8997_REG_BUCK1DVS2 = 0x1a, + MAX8997_REG_BUCK1DVS3 = 0x1b, + MAX8997_REG_BUCK1DVS4 = 0x1c, + MAX8997_REG_BUCK1DVS5 = 0x1d, + MAX8997_REG_BUCK1DVS6 = 0x1e, + MAX8997_REG_BUCK1DVS7 = 0x1f, + MAX8997_REG_BUCK1DVS8 = 0x20, + MAX8997_REG_BUCK2CTRL = 0x21, + MAX8997_REG_BUCK2DVS1 = 0x22, + MAX8997_REG_BUCK2DVS2 = 0x23, + MAX8997_REG_BUCK2DVS3 = 0x24, + MAX8997_REG_BUCK2DVS4 = 0x25, + MAX8997_REG_BUCK2DVS5 = 0x26, + MAX8997_REG_BUCK2DVS6 = 0x27, + MAX8997_REG_BUCK2DVS7 = 0x28, + MAX8997_REG_BUCK2DVS8 = 0x29, + MAX8997_REG_BUCK3CTRL = 0x2a, + MAX8997_REG_BUCK3DVS = 0x2b, + MAX8997_REG_BUCK4CTRL = 0x2c, + MAX8997_REG_BUCK4DVS = 0x2d, + MAX8997_REG_BUCK5CTRL = 0x2e, + MAX8997_REG_BUCK5DVS1 = 0x2f, + MAX8997_REG_BUCK5DVS2 = 0x30, + MAX8997_REG_BUCK5DVS3 = 0x31, + MAX8997_REG_BUCK5DVS4 = 0x32, + MAX8997_REG_BUCK5DVS5 = 0x33, + MAX8997_REG_BUCK5DVS6 = 0x34, + MAX8997_REG_BUCK5DVS7 = 0x35, + MAX8997_REG_BUCK5DVS8 = 0x36, + MAX8997_REG_BUCK6CTRL = 0x37, + MAX8997_REG_BUCK6BPSKIPCTRL = 0x38, + MAX8997_REG_BUCK7CTRL = 0x39, + MAX8997_REG_BUCK7DVS = 0x3a, + MAX8997_REG_LDO1CTRL = 0x3b, + MAX8997_REG_LDO2CTRL = 0x3c, + MAX8997_REG_LDO3CTRL = 0x3d, + MAX8997_REG_LDO4CTRL = 0x3e, + MAX8997_REG_LDO5CTRL = 0x3f, + MAX8997_REG_LDO6CTRL = 0x40, + MAX8997_REG_LDO7CTRL = 0x41, + MAX8997_REG_LDO8CTRL = 0x42, + MAX8997_REG_LDO9CTRL = 0x43, + MAX8997_REG_LDO10CTRL = 0x44, + MAX8997_REG_LDO11CTRL = 0x45, + MAX8997_REG_LDO12CTRL = 0x46, + MAX8997_REG_LDO13CTRL = 0x47, + MAX8997_REG_LDO14CTRL = 0x48, + MAX8997_REG_LDO15CTRL = 0x49, + MAX8997_REG_LDO16CTRL = 0x4a, + MAX8997_REG_LDO17CTRL = 0x4b, + MAX8997_REG_LDO18CTRL = 0x4c, + MAX8997_REG_LDO21CTRL = 0x4d, + + MAX8997_REG_MBCCTRL1 = 0x50, + MAX8997_REG_MBCCTRL2 = 0x51, + MAX8997_REG_MBCCTRL3 = 0x52, + MAX8997_REG_MBCCTRL4 = 0x53, + MAX8997_REG_MBCCTRL5 = 0x54, + MAX8997_REG_MBCCTRL6 = 0x55, + MAX8997_REG_OTPCGHCVS = 0x56, + + MAX8997_REG_SAFEOUTCTRL = 0x5a, + + MAX8997_REG_LBCNFG1 = 0x5e, + MAX8997_REG_LBCNFG2 = 0x5f, + MAX8997_REG_BBCCTRL = 0x60, + + MAX8997_REG_FLASH1_CUR = 0x63, /* 0x63 ~ 0x6e for FLASH */ + MAX8997_REG_FLASH2_CUR = 0x64, + MAX8997_REG_MOVIE_CUR = 0x65, + MAX8997_REG_GSMB_CUR = 0x66, + MAX8997_REG_BOOST_CNTL = 0x67, + MAX8997_REG_LEN_CNTL = 0x68, + MAX8997_REG_FLASH_CNTL = 0x69, + MAX8997_REG_WDT_CNTL = 0x6a, + MAX8997_REG_MAXFLASH1 = 0x6b, + MAX8997_REG_MAXFLASH2 = 0x6c, + MAX8997_REG_FLASHSTATUS = 0x6d, + MAX8997_REG_FLASHSTATUSMASK = 0x6e, + + MAX8997_REG_GPIOCNTL1 = 0x70, + MAX8997_REG_GPIOCNTL2 = 0x71, + MAX8997_REG_GPIOCNTL3 = 0x72, + MAX8997_REG_GPIOCNTL4 = 0x73, + MAX8997_REG_GPIOCNTL5 = 0x74, + MAX8997_REG_GPIOCNTL6 = 0x75, + MAX8997_REG_GPIOCNTL7 = 0x76, + MAX8997_REG_GPIOCNTL8 = 0x77, + MAX8997_REG_GPIOCNTL9 = 0x78, + MAX8997_REG_GPIOCNTL10 = 0x79, + MAX8997_REG_GPIOCNTL11 = 0x7a, + MAX8997_REG_GPIOCNTL12 = 0x7b, + + MAX8997_REG_LDO1CONFIG = 0x80, + MAX8997_REG_LDO2CONFIG = 0x81, + MAX8997_REG_LDO3CONFIG = 0x82, + MAX8997_REG_LDO4CONFIG = 0x83, + MAX8997_REG_LDO5CONFIG = 0x84, + MAX8997_REG_LDO6CONFIG = 0x85, + MAX8997_REG_LDO7CONFIG = 0x86, + MAX8997_REG_LDO8CONFIG = 0x87, + MAX8997_REG_LDO9CONFIG = 0x88, + MAX8997_REG_LDO10CONFIG = 0x89, + MAX8997_REG_LDO11CONFIG = 0x8a, + MAX8997_REG_LDO12CONFIG = 0x8b, + MAX8997_REG_LDO13CONFIG = 0x8c, + MAX8997_REG_LDO14CONFIG = 0x8d, + MAX8997_REG_LDO15CONFIG = 0x8e, + MAX8997_REG_LDO16CONFIG = 0x8f, + MAX8997_REG_LDO17CONFIG = 0x90, + MAX8997_REG_LDO18CONFIG = 0x91, + MAX8997_REG_LDO21CONFIG = 0x92, + + MAX8997_REG_DVSOKTIMER1 = 0x97, + MAX8997_REG_DVSOKTIMER2 = 0x98, + MAX8997_REG_DVSOKTIMER4 = 0x99, + MAX8997_REG_DVSOKTIMER5 = 0x9a, + + MAX8997_REG_PMIC_END = 0x9b, +}; + +enum max8997_muic_reg { + MAX8997_MUIC_REG_ID = 0x0, + MAX8997_MUIC_REG_INT1 = 0x1, + MAX8997_MUIC_REG_INT2 = 0x2, + MAX8997_MUIC_REG_INT3 = 0x3, + MAX8997_MUIC_REG_STATUS1 = 0x4, + MAX8997_MUIC_REG_STATUS2 = 0x5, + MAX8997_MUIC_REG_STATUS3 = 0x6, + MAX8997_MUIC_REG_INTMASK1 = 0x7, + MAX8997_MUIC_REG_INTMASK2 = 0x8, + MAX8997_MUIC_REG_INTMASK3 = 0x9, + MAX8997_MUIC_REG_CDETCTRL = 0xa, + + MAX8997_MUIC_REG_CONTROL1 = 0xc, + MAX8997_MUIC_REG_CONTROL2 = 0xd, + MAX8997_MUIC_REG_CONTROL3 = 0xe, + + MAX8997_MUIC_REG_END = 0xf, +}; + +enum max8997_haptic_reg { + MAX8997_HAPTIC_REG_GENERAL = 0x00, + MAX8997_HAPTIC_REG_CONF1 = 0x01, + MAX8997_HAPTIC_REG_CONF2 = 0x02, + MAX8997_HAPTIC_REG_DRVCONF = 0x03, + MAX8997_HAPTIC_REG_CYCLECONF1 = 0x04, + MAX8997_HAPTIC_REG_CYCLECONF2 = 0x05, + MAX8997_HAPTIC_REG_SIGCONF1 = 0x06, + MAX8997_HAPTIC_REG_SIGCONF2 = 0x07, + MAX8997_HAPTIC_REG_SIGCONF3 = 0x08, + MAX8997_HAPTIC_REG_SIGCONF4 = 0x09, + MAX8997_HAPTIC_REG_SIGDC1 = 0x0a, + MAX8997_HAPTIC_REG_SIGDC2 = 0x0b, + MAX8997_HAPTIC_REG_SIGPWMDC1 = 0x0c, + MAX8997_HAPTIC_REG_SIGPWMDC2 = 0x0d, + MAX8997_HAPTIC_REG_SIGPWMDC3 = 0x0e, + MAX8997_HAPTIC_REG_SIGPWMDC4 = 0x0f, + MAX8997_HAPTIC_REG_MTR_REV = 0x10, + + MAX8997_HAPTIC_REG_END = 0x11, +}; + +/* slave addr = 0x0c: using "2nd part" of rev4 datasheet */ +enum max8997_rtc_reg { + MAX8997_RTC_CTRLMASK = 0x02, + MAX8997_RTC_CTRL = 0x03, + MAX8997_RTC_UPDATE1 = 0x04, + MAX8997_RTC_UPDATE2 = 0x05, + MAX8997_RTC_WTSR_SMPL = 0x06, + + MAX8997_RTC_SEC = 0x10, + MAX8997_RTC_MIN = 0x11, + MAX8997_RTC_HOUR = 0x12, + MAX8997_RTC_DAY_OF_WEEK = 0x13, + MAX8997_RTC_MONTH = 0x14, + MAX8997_RTC_YEAR = 0x15, + MAX8997_RTC_DAY_OF_MONTH = 0x16, + MAX8997_RTC_ALARM1_SEC = 0x17, + MAX8997_RTC_ALARM1_MIN = 0x18, + MAX8997_RTC_ALARM1_HOUR = 0x19, + MAX8997_RTC_ALARM1_DAY_OF_WEEK = 0x1a, + MAX8997_RTC_ALARM1_MONTH = 0x1b, + MAX8997_RTC_ALARM1_YEAR = 0x1c, + MAX8997_RTC_ALARM1_DAY_OF_MONTH = 0x1d, + MAX8997_RTC_ALARM2_SEC = 0x1e, + MAX8997_RTC_ALARM2_MIN = 0x1f, + MAX8997_RTC_ALARM2_HOUR = 0x20, + MAX8997_RTC_ALARM2_DAY_OF_WEEK = 0x21, + MAX8997_RTC_ALARM2_MONTH = 0x22, + MAX8997_RTC_ALARM2_YEAR = 0x23, + MAX8997_RTC_ALARM2_DAY_OF_MONTH = 0x24, +}; + +enum max8997_irq_source { + PMIC_INT1 = 0, + PMIC_INT2, + PMIC_INT3, + PMIC_INT4, + + FUEL_GAUGE, /* Ignored (MAX17042 driver handles) */ + + MUIC_INT1, + MUIC_INT2, + MUIC_INT3, + + GPIO_LOW, /* Not implemented */ + GPIO_HI, /* Not implemented */ + + FLASH_STATUS, /* Not implemented */ + + MAX8997_IRQ_GROUP_NR, +}; + +enum max8997_irq { + MAX8997_PMICIRQ_PWRONR, + MAX8997_PMICIRQ_PWRONF, + MAX8997_PMICIRQ_PWRON1SEC, + MAX8997_PMICIRQ_JIGONR, + MAX8997_PMICIRQ_JIGONF, + MAX8997_PMICIRQ_LOWBAT2, + MAX8997_PMICIRQ_LOWBAT1, + + MAX8997_PMICIRQ_JIGR, + MAX8997_PMICIRQ_JIGF, + MAX8997_PMICIRQ_MR, + MAX8997_PMICIRQ_DVS1OK, + MAX8997_PMICIRQ_DVS2OK, + MAX8997_PMICIRQ_DVS3OK, + MAX8997_PMICIRQ_DVS4OK, + + MAX8997_PMICIRQ_CHGINS, + MAX8997_PMICIRQ_CHGRM, + MAX8997_PMICIRQ_DCINOVP, + MAX8997_PMICIRQ_TOPOFFR, + MAX8997_PMICIRQ_CHGRSTF, + MAX8997_PMICIRQ_MBCHGTMEXPD, + + MAX8997_PMICIRQ_RTC60S, + MAX8997_PMICIRQ_RTCA1, + MAX8997_PMICIRQ_RTCA2, + MAX8997_PMICIRQ_SMPL_INT, + MAX8997_PMICIRQ_RTC1S, + MAX8997_PMICIRQ_WTSR, + + MAX8997_MUICIRQ_ADCError, + MAX8997_MUICIRQ_ADCLow, + MAX8997_MUICIRQ_ADC, + + MAX8997_MUICIRQ_VBVolt, + MAX8997_MUICIRQ_DBChg, + MAX8997_MUICIRQ_DCDTmr, + MAX8997_MUICIRQ_ChgDetRun, + MAX8997_MUICIRQ_ChgTyp, + + MAX8997_MUICIRQ_OVP, + + MAX8997_IRQ_NR, +}; + +#define MAX8997_NUM_GPIO 12 +struct max8997_dev { + struct device *dev; + struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */ + struct i2c_client *rtc; /* slave addr 0x0c */ + struct i2c_client *haptic; /* slave addr 0x90 */ + struct i2c_client *muic; /* slave addr 0x4a */ + struct mutex iolock; + + int type; + struct platform_device *battery; /* battery control (not fuel gauge) */ + + int irq; + int ono; + int irq_base; + struct mutex irqlock; + int irq_masks_cur[MAX8997_IRQ_GROUP_NR]; + int irq_masks_cache[MAX8997_IRQ_GROUP_NR]; + + /* For hibernation */ + u8 reg_dump[MAX8997_REG_PMIC_END + MAX8997_MUIC_REG_END + + MAX8997_HAPTIC_REG_END]; + + bool gpio_status[MAX8997_NUM_GPIO]; +}; + +enum max8997_types { + TYPE_MAX8997, + TYPE_MAX8966, +}; + +extern int max8997_irq_init(struct max8997_dev *max8997); +extern void max8997_irq_exit(struct max8997_dev *max8997); +extern int max8997_irq_resume(struct max8997_dev *max8997); + +extern int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest); +extern int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value); +extern int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask); + +#define MAX8997_GPIO_INT_BOTH (0x3 << 4) +#define MAX8997_GPIO_INT_RISE (0x2 << 4) +#define MAX8997_GPIO_INT_FALL (0x1 << 4) + +#define MAX8997_GPIO_INT_MASK (0x3 << 4) +#define MAX8997_GPIO_DATA_MASK (0x1 << 2) +#endif /* __LINUX_MFD_MAX8997_PRIV_H */ diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h new file mode 100644 index 00000000..28726dd5 --- /dev/null +++ b/include/linux/mfd/max8997.h @@ -0,0 +1,252 @@ +/* + * max8997.h - Driver for the Maxim 8997/8966 + * + * Copyright (C) 2009-2010 Samsung Electrnoics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * This driver is based on max8998.h + * + * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices. + * Except Fuel Gauge, every device shares the same I2C bus and included in + * this mfd driver. Although the fuel gauge is included in the chip, it is + * excluded from the driver because a) it has a different I2C bus from + * others and b) it can be enabled simply by using MAX17042 driver. + */ + +#ifndef __LINUX_MFD_MAX8998_H +#define __LINUX_MFD_MAX8998_H + +#include <linux/regulator/consumer.h> + +/* MAX8997/8966 regulator IDs */ +enum max8998_regulators { + MAX8997_LDO1 = 0, + MAX8997_LDO2, + MAX8997_LDO3, + MAX8997_LDO4, + MAX8997_LDO5, + MAX8997_LDO6, + MAX8997_LDO7, + MAX8997_LDO8, + MAX8997_LDO9, + MAX8997_LDO10, + MAX8997_LDO11, + MAX8997_LDO12, + MAX8997_LDO13, + MAX8997_LDO14, + MAX8997_LDO15, + MAX8997_LDO16, + MAX8997_LDO17, + MAX8997_LDO18, + MAX8997_LDO21, + MAX8997_BUCK1, + MAX8997_BUCK2, + MAX8997_BUCK3, + MAX8997_BUCK4, + MAX8997_BUCK5, + MAX8997_BUCK6, + MAX8997_BUCK7, + MAX8997_EN32KHZ_AP, + MAX8997_EN32KHZ_CP, + MAX8997_ENVICHG, + MAX8997_ESAFEOUT1, + MAX8997_ESAFEOUT2, + MAX8997_CHARGER_CV, /* control MBCCV of MBCCTRL3 */ + MAX8997_CHARGER, /* charger current, MBCCTRL4 */ + MAX8997_CHARGER_TOPOFF, /* MBCCTRL5 */ + + MAX8997_REG_MAX, +}; + +struct max8997_regulator_data { + int id; + struct regulator_init_data *initdata; +}; + +enum max8997_muic_usb_type { + MAX8997_USB_HOST, + MAX8997_USB_DEVICE, +}; + +enum max8997_muic_charger_type { + MAX8997_CHARGER_TYPE_NONE = 0, + MAX8997_CHARGER_TYPE_USB, + MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT, + MAX8997_CHARGER_TYPE_DEDICATED_CHG, + MAX8997_CHARGER_TYPE_500MA, + MAX8997_CHARGER_TYPE_1A, + MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7, +}; + +struct max8997_muic_reg_data { + u8 addr; + u8 data; +}; + +/** + * struct max8997_muic_platform_data + * @usb_callback: callback function for USB + * inform callee of USB type (HOST or DEVICE) + * and attached state(true or false) + * @charger_callback: callback function for charger + * inform callee of charger_type + * and attached state(true or false) + * @deskdock_callback: callback function for desk dock + * inform callee of attached state(true or false) + * @cardock_callback: callback function for car dock + * inform callee of attached state(true or false) + * @mhl_callback: callback function for MHL (Mobile High-definition Link) + * inform callee of attached state(true or false) + * @uart_callback: callback function for JIG UART + * inform callee of attached state(true or false) + * @init_data: array of max8997_muic_reg_data + * used for initializing registers of MAX8997 MUIC device + * @num_init_data: array size of init_data + */ +struct max8997_muic_platform_data { + void (*usb_callback)(enum max8997_muic_usb_type usb_type, + bool attached); + void (*charger_callback)(bool attached, + enum max8997_muic_charger_type charger_type); + void (*deskdock_callback) (bool attached); + void (*cardock_callback) (bool attached); + void (*mhl_callback) (bool attached); + void (*uart_callback) (bool attached); + + struct max8997_muic_reg_data *init_data; + int num_init_data; +}; + +enum max8997_haptic_motor_type { + MAX8997_HAPTIC_ERM, + MAX8997_HAPTIC_LRA, +}; + +enum max8997_haptic_pulse_mode { + MAX8997_EXTERNAL_MODE, + MAX8997_INTERNAL_MODE, +}; + +enum max8997_haptic_pwm_divisor { + MAX8997_PWM_DIVISOR_32, + MAX8997_PWM_DIVISOR_64, + MAX8997_PWM_DIVISOR_128, + MAX8997_PWM_DIVISOR_256, +}; + +/** + * max8997_haptic_platform_data + * @pwm_channel_id: channel number of PWM device + * valid for MAX8997_EXTERNAL_MODE + * @pwm_period: period in nano second for PWM device + * valid for MAX8997_EXTERNAL_MODE + * @type: motor type + * @mode: pulse mode + * MAX8997_EXTERNAL_MODE: external PWM device is used to control motor + * MAX8997_INTERNAL_MODE: internal pulse generator is used to control motor + * @pwm_divisor: divisor for external PWM device + * @internal_mode_pattern: internal mode pattern for internal mode + * [0 - 3]: valid pattern number + * @pattern_cycle: the number of cycles of the waveform + * for the internal mode pattern + * [0 - 15]: available cycles + * @pattern_signal_period: period of the waveform for the internal mode pattern + * [0 - 255]: available period + */ +struct max8997_haptic_platform_data { + unsigned int pwm_channel_id; + unsigned int pwm_period; + + enum max8997_haptic_motor_type type; + enum max8997_haptic_pulse_mode mode; + enum max8997_haptic_pwm_divisor pwm_divisor; + + unsigned int internal_mode_pattern; + unsigned int pattern_cycle; + unsigned int pattern_signal_period; +}; + +enum max8997_led_mode { + MAX8997_NONE, + MAX8997_FLASH_MODE, + MAX8997_MOVIE_MODE, + MAX8997_FLASH_PIN_CONTROL_MODE, + MAX8997_MOVIE_PIN_CONTROL_MODE, +}; + +/** + * struct max8997_led_platform_data + * The number of LED devices for MAX8997 is two + * @mode: LED mode for each LED device + * @brightness: initial brightness for each LED device + * range: + * [0 - 31]: MAX8997_FLASH_MODE and MAX8997_FLASH_PIN_CONTROL_MODE + * [0 - 15]: MAX8997_MOVIE_MODE and MAX8997_MOVIE_PIN_CONTROL_MODE + */ +struct max8997_led_platform_data { + enum max8997_led_mode mode[2]; + u8 brightness[2]; +}; + +struct max8997_platform_data { + /* IRQ */ + int irq_base; + int ono; + int wakeup; + + /* ---- PMIC ---- */ + struct max8997_regulator_data *regulators; + int num_regulators; + + /* + * SET1~3 DVS GPIOs control Buck1, 2, and 5 simultaneously. Therefore, + * With buckx_gpiodvs enabled, the buckx cannot be controlled + * independently. To control buckx (of 1, 2, and 5) independently, + * disable buckx_gpiodvs and control with BUCKxDVS1 register. + * + * When buckx_gpiodvs and bucky_gpiodvs are both enabled, set_voltage + * on buckx will change the voltage of bucky at the same time. + * + */ + bool ignore_gpiodvs_side_effect; + int buck125_gpios[3]; /* GPIO of [0]SET1, [1]SET2, [2]SET3 */ + int buck125_default_idx; /* Default value of SET1, 2, 3 */ + unsigned int buck1_voltage[8]; /* buckx_voltage in uV */ + bool buck1_gpiodvs; + unsigned int buck2_voltage[8]; + bool buck2_gpiodvs; + unsigned int buck5_voltage[8]; + bool buck5_gpiodvs; + + /* ---- Charger control ---- */ + /* eoc stands for 'end of charge' */ + int eoc_mA; /* 50 ~ 200mA by 10mA step */ + /* charge Full Timeout */ + int timeout; /* 0 (no timeout), 5, 6, 7 hours */ + + /* ---- MUIC ---- */ + struct max8997_muic_platform_data *muic_pdata; + + /* ---- HAPTIC ---- */ + struct max8997_haptic_platform_data *haptic_pdata; + + /* RTC: Not implemented */ + /* ---- LED ---- */ + struct max8997_led_platform_data *led_pdata; +}; + +#endif /* __LINUX_MFD_MAX8998_H */ diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h new file mode 100644 index 00000000..effa5d3b --- /dev/null +++ b/include/linux/mfd/max8998-private.h @@ -0,0 +1,177 @@ +/* + * max8998.h - Voltage regulator driver for the Maxim 8998 + * + * Copyright (C) 2009-2010 Samsung Electrnoics + * Kyungmin Park <kyungmin.park@samsung.com> + * Marek Szyprowski <m.szyprowski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX8998_PRIV_H +#define __LINUX_MFD_MAX8998_PRIV_H + +#define MAX8998_NUM_IRQ_REGS 4 + +/* MAX 8998 registers */ +enum { + MAX8998_REG_IRQ1, + MAX8998_REG_IRQ2, + MAX8998_REG_IRQ3, + MAX8998_REG_IRQ4, + MAX8998_REG_IRQM1, + MAX8998_REG_IRQM2, + MAX8998_REG_IRQM3, + MAX8998_REG_IRQM4, + MAX8998_REG_STATUS1, + MAX8998_REG_STATUS2, + MAX8998_REG_STATUSM1, + MAX8998_REG_STATUSM2, + MAX8998_REG_CHGR1, + MAX8998_REG_CHGR2, + MAX8998_REG_LDO_ACTIVE_DISCHARGE1, + MAX8998_REG_LDO_ACTIVE_DISCHARGE2, + MAX8998_REG_BUCK_ACTIVE_DISCHARGE3, + MAX8998_REG_ONOFF1, + MAX8998_REG_ONOFF2, + MAX8998_REG_ONOFF3, + MAX8998_REG_ONOFF4, + MAX8998_REG_BUCK1_VOLTAGE1, + MAX8998_REG_BUCK1_VOLTAGE2, + MAX8998_REG_BUCK1_VOLTAGE3, + MAX8998_REG_BUCK1_VOLTAGE4, + MAX8998_REG_BUCK2_VOLTAGE1, + MAX8998_REG_BUCK2_VOLTAGE2, + MAX8998_REG_BUCK3, + MAX8998_REG_BUCK4, + MAX8998_REG_LDO2_LDO3, + MAX8998_REG_LDO4, + MAX8998_REG_LDO5, + MAX8998_REG_LDO6, + MAX8998_REG_LDO7, + MAX8998_REG_LDO8_LDO9, + MAX8998_REG_LDO10_LDO11, + MAX8998_REG_LDO12, + MAX8998_REG_LDO13, + MAX8998_REG_LDO14, + MAX8998_REG_LDO15, + MAX8998_REG_LDO16, + MAX8998_REG_LDO17, + MAX8998_REG_BKCHR, + MAX8998_REG_LBCNFG1, + MAX8998_REG_LBCNFG2, +}; + +/* IRQ definitions */ +enum { + MAX8998_IRQ_DCINF, + MAX8998_IRQ_DCINR, + MAX8998_IRQ_JIGF, + MAX8998_IRQ_JIGR, + MAX8998_IRQ_PWRONF, + MAX8998_IRQ_PWRONR, + + MAX8998_IRQ_WTSREVNT, + MAX8998_IRQ_SMPLEVNT, + MAX8998_IRQ_ALARM1, + MAX8998_IRQ_ALARM0, + + MAX8998_IRQ_ONKEY1S, + MAX8998_IRQ_TOPOFFR, + MAX8998_IRQ_DCINOVPR, + MAX8998_IRQ_CHGRSTF, + MAX8998_IRQ_DONER, + MAX8998_IRQ_CHGFAULT, + + MAX8998_IRQ_LOBAT1, + MAX8998_IRQ_LOBAT2, + + MAX8998_IRQ_NR, +}; + +/* MAX8998 various variants */ +enum { + TYPE_MAX8998 = 0, /* Default */ + TYPE_LP3974, /* National version of MAX8998 */ + TYPE_LP3979, /* Added AVS */ +}; + +#define MAX8998_IRQ_DCINF_MASK (1 << 2) +#define MAX8998_IRQ_DCINR_MASK (1 << 3) +#define MAX8998_IRQ_JIGF_MASK (1 << 4) +#define MAX8998_IRQ_JIGR_MASK (1 << 5) +#define MAX8998_IRQ_PWRONF_MASK (1 << 6) +#define MAX8998_IRQ_PWRONR_MASK (1 << 7) + +#define MAX8998_IRQ_WTSREVNT_MASK (1 << 0) +#define MAX8998_IRQ_SMPLEVNT_MASK (1 << 1) +#define MAX8998_IRQ_ALARM1_MASK (1 << 2) +#define MAX8998_IRQ_ALARM0_MASK (1 << 3) + +#define MAX8998_IRQ_ONKEY1S_MASK (1 << 0) +#define MAX8998_IRQ_TOPOFFR_MASK (1 << 2) +#define MAX8998_IRQ_DCINOVPR_MASK (1 << 3) +#define MAX8998_IRQ_CHGRSTF_MASK (1 << 4) +#define MAX8998_IRQ_DONER_MASK (1 << 5) +#define MAX8998_IRQ_CHGFAULT_MASK (1 << 7) + +#define MAX8998_IRQ_LOBAT1_MASK (1 << 0) +#define MAX8998_IRQ_LOBAT2_MASK (1 << 1) + +#define MAX8998_ENRAMP (1 << 4) + +/** + * struct max8998_dev - max8998 master device for sub-drivers + * @dev: master device of the chip (can be used to access platform data) + * @i2c: i2c client private data for regulator + * @rtc: i2c client private data for rtc + * @iolock: mutex for serializing io access + * @irqlock: mutex for buslock + * @irq_base: base IRQ number for max8998, required for IRQs + * @irq: generic IRQ number for max8998 + * @ono: power onoff IRQ number for max8998 + * @irq_masks_cur: currently active value + * @irq_masks_cache: cached hardware value + * @type: indicate which max8998 "variant" is used + */ +struct max8998_dev { + struct device *dev; + struct i2c_client *i2c; + struct i2c_client *rtc; + struct mutex iolock; + struct mutex irqlock; + + int irq_base; + int irq; + int ono; + u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; + u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS]; + int type; + bool wakeup; +}; + +int max8998_irq_init(struct max8998_dev *max8998); +void max8998_irq_exit(struct max8998_dev *max8998); +int max8998_irq_resume(struct max8998_dev *max8998); + +extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest); +extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8998_write_reg(struct i2c_client *i2c, u8 reg, u8 value); +extern int max8998_bulk_write(struct i2c_client *i2c, u8 reg, int count, + u8 *buf); +extern int max8998_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask); + +#endif /* __LINUX_MFD_MAX8998_PRIV_H */ diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h new file mode 100644 index 00000000..f4f0dfa4 --- /dev/null +++ b/include/linux/mfd/max8998.h @@ -0,0 +1,124 @@ +/* + * max8998.h - Voltage regulator driver for the Maxim 8998 + * + * Copyright (C) 2009-2010 Samsung Electrnoics + * Kyungmin Park <kyungmin.park@samsung.com> + * Marek Szyprowski <m.szyprowski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_MFD_MAX8998_H +#define __LINUX_MFD_MAX8998_H + +#include <linux/regulator/machine.h> + +/* MAX 8998 regulator ids */ +enum { + MAX8998_LDO2 = 2, + MAX8998_LDO3, + MAX8998_LDO4, + MAX8998_LDO5, + MAX8998_LDO6, + MAX8998_LDO7, + MAX8998_LDO8, + MAX8998_LDO9, + MAX8998_LDO10, + MAX8998_LDO11, + MAX8998_LDO12, + MAX8998_LDO13, + MAX8998_LDO14, + MAX8998_LDO15, + MAX8998_LDO16, + MAX8998_LDO17, + MAX8998_BUCK1, + MAX8998_BUCK2, + MAX8998_BUCK3, + MAX8998_BUCK4, + MAX8998_EN32KHZ_AP, + MAX8998_EN32KHZ_CP, + MAX8998_ENVICHG, + MAX8998_ESAFEOUT1, + MAX8998_ESAFEOUT2, +}; + +/** + * max8998_regulator_data - regulator data + * @id: regulator id + * @initdata: regulator init data (contraints, supplies, ...) + */ +struct max8998_regulator_data { + int id; + struct regulator_init_data *initdata; +}; + +/** + * struct max8998_board - packages regulator init data + * @regulators: array of defined regulators + * @num_regulators: number of regultors used + * @irq_base: base IRQ number for max8998, required for IRQs + * @ono: power onoff IRQ number for max8998 + * @buck_voltage_lock: Do NOT change the values of the following six + * registers set by buck?_voltage?. The voltage of BUCK1/2 cannot + * be other than the preset values. + * @buck1_voltage1: BUCK1 DVS mode 1 voltage register + * @buck1_voltage2: BUCK1 DVS mode 2 voltage register + * @buck1_voltage3: BUCK1 DVS mode 3 voltage register + * @buck1_voltage4: BUCK1 DVS mode 4 voltage register + * @buck2_voltage1: BUCK2 DVS mode 1 voltage register + * @buck2_voltage2: BUCK2 DVS mode 2 voltage register + * @buck1_set1: BUCK1 gpio pin 1 to set output voltage + * @buck1_set2: BUCK1 gpio pin 2 to set output voltage + * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2 + * @buck2_set3: BUCK2 gpio pin to set output voltage + * @buck2_default_idx: Default for BUCK2 gpio pin. + * @wakeup: Allow to wake up from suspend + * @rtc_delay: LP3974 RTC chip bug that requires delay after a register + * write before reading it. + * @eoc: End of Charge Level in percent: 10% ~ 45% by 5% step + * If it equals 0, leave it unchanged. + * Otherwise, it is a invalid value. + * @restart: Restart Level in mV: 100, 150, 200, and -1 for disable. + * If it equals 0, leave it unchanged. + * Otherwise, it is a invalid value. + * @timeout: Full Timeout in hours: 5, 6, 7, and -1 for disable. + * If it equals 0, leave it unchanged. + * Otherwise, leave it unchanged. + */ +struct max8998_platform_data { + struct max8998_regulator_data *regulators; + int num_regulators; + int irq_base; + int ono; + bool buck_voltage_lock; + int buck1_voltage1; + int buck1_voltage2; + int buck1_voltage3; + int buck1_voltage4; + int buck2_voltage1; + int buck2_voltage2; + int buck1_set1; + int buck1_set2; + int buck1_default_idx; + int buck2_set3; + int buck2_default_idx; + bool wakeup; + bool rtc_delay; + int eoc; + int restart; + int timeout; +}; + +#endif /* __LINUX_MFD_MAX8998_H */ diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h new file mode 100644 index 00000000..a8eeda77 --- /dev/null +++ b/include/linux/mfd/mc13783.h @@ -0,0 +1,91 @@ +/* + * Copyright 2010 Yong Shen <yong.shen@linaro.org> + * Copyright 2009-2010 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#ifndef __LINUX_MFD_MC13783_H +#define __LINUX_MFD_MC13783_H + +#include <linux/mfd/mc13xxx.h> + +#define MC13783_REG_SW1A 0 +#define MC13783_REG_SW1B 1 +#define MC13783_REG_SW2A 2 +#define MC13783_REG_SW2B 3 +#define MC13783_REG_SW3 4 +#define MC13783_REG_PLL 5 +#define MC13783_REG_VAUDIO 6 +#define MC13783_REG_VIOHI 7 +#define MC13783_REG_VIOLO 8 +#define MC13783_REG_VDIG 9 +#define MC13783_REG_VGEN 10 +#define MC13783_REG_VRFDIG 11 +#define MC13783_REG_VRFREF 12 +#define MC13783_REG_VRFCP 13 +#define MC13783_REG_VSIM 14 +#define MC13783_REG_VESIM 15 +#define MC13783_REG_VCAM 16 +#define MC13783_REG_VRFBG 17 +#define MC13783_REG_VVIB 18 +#define MC13783_REG_VRF1 19 +#define MC13783_REG_VRF2 20 +#define MC13783_REG_VMMC1 21 +#define MC13783_REG_VMMC2 22 +#define MC13783_REG_GPO1 23 +#define MC13783_REG_GPO2 24 +#define MC13783_REG_GPO3 25 +#define MC13783_REG_GPO4 26 +#define MC13783_REG_V1 27 +#define MC13783_REG_V2 28 +#define MC13783_REG_V3 29 +#define MC13783_REG_V4 30 +#define MC13783_REG_PWGT1SPI 31 +#define MC13783_REG_PWGT2SPI 32 + +#define MC13783_IRQ_ADCDONE MC13XXX_IRQ_ADCDONE +#define MC13783_IRQ_ADCBISDONE MC13XXX_IRQ_ADCBISDONE +#define MC13783_IRQ_TS MC13XXX_IRQ_TS +#define MC13783_IRQ_WHIGH 3 +#define MC13783_IRQ_WLOW 4 +#define MC13783_IRQ_CHGDET MC13XXX_IRQ_CHGDET +#define MC13783_IRQ_CHGOV 7 +#define MC13783_IRQ_CHGREV MC13XXX_IRQ_CHGREV +#define MC13783_IRQ_CHGSHORT MC13XXX_IRQ_CHGSHORT +#define MC13783_IRQ_CCCV MC13XXX_IRQ_CCCV +#define MC13783_IRQ_CHGCURR MC13XXX_IRQ_CHGCURR +#define MC13783_IRQ_BPON MC13XXX_IRQ_BPON +#define MC13783_IRQ_LOBATL MC13XXX_IRQ_LOBATL +#define MC13783_IRQ_LOBATH MC13XXX_IRQ_LOBATH +#define MC13783_IRQ_UDP 15 +#define MC13783_IRQ_USB 16 +#define MC13783_IRQ_ID 19 +#define MC13783_IRQ_SE1 21 +#define MC13783_IRQ_CKDET 22 +#define MC13783_IRQ_UDM 23 +#define MC13783_IRQ_1HZ MC13XXX_IRQ_1HZ +#define MC13783_IRQ_TODA MC13XXX_IRQ_TODA +#define MC13783_IRQ_ONOFD1 27 +#define MC13783_IRQ_ONOFD2 28 +#define MC13783_IRQ_ONOFD3 29 +#define MC13783_IRQ_SYSRST MC13XXX_IRQ_SYSRST +#define MC13783_IRQ_RTCRST MC13XXX_IRQ_RTCRST +#define MC13783_IRQ_PC MC13XXX_IRQ_PC +#define MC13783_IRQ_WARM MC13XXX_IRQ_WARM +#define MC13783_IRQ_MEMHLD MC13XXX_IRQ_MEMHLD +#define MC13783_IRQ_PWRRDY 35 +#define MC13783_IRQ_THWARNL MC13XXX_IRQ_THWARNL +#define MC13783_IRQ_THWARNH MC13XXX_IRQ_THWARNH +#define MC13783_IRQ_CLK MC13XXX_IRQ_CLK +#define MC13783_IRQ_SEMAF 39 +#define MC13783_IRQ_MC2B 41 +#define MC13783_IRQ_HSDET 42 +#define MC13783_IRQ_HSL 43 +#define MC13783_IRQ_ALSPTH 44 +#define MC13783_IRQ_AHSSHORT 45 +#define MC13783_NUM_IRQ MC13XXX_NUM_IRQ + +#endif /* ifndef __LINUX_MFD_MC13783_H */ diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h new file mode 100644 index 00000000..a00f2bec --- /dev/null +++ b/include/linux/mfd/mc13892.h @@ -0,0 +1,39 @@ +/* + * Copyright 2010 Yong Shen <yong.shen@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#ifndef __LINUX_MFD_MC13892_H +#define __LINUX_MFD_MC13892_H + +#include <linux/mfd/mc13xxx.h> + +#define MC13892_SW1 0 +#define MC13892_SW2 1 +#define MC13892_SW3 2 +#define MC13892_SW4 3 +#define MC13892_SWBST 4 +#define MC13892_VIOHI 5 +#define MC13892_VPLL 6 +#define MC13892_VDIG 7 +#define MC13892_VSD 8 +#define MC13892_VUSB2 9 +#define MC13892_VVIDEO 10 +#define MC13892_VAUDIO 11 +#define MC13892_VCAM 12 +#define MC13892_VGEN1 13 +#define MC13892_VGEN2 14 +#define MC13892_VGEN3 15 +#define MC13892_VUSB 16 +#define MC13892_GPO1 17 +#define MC13892_GPO2 18 +#define MC13892_GPO3 19 +#define MC13892_GPO4 20 +#define MC13892_PWGT1SPI 21 +#define MC13892_PWGT2SPI 22 +#define MC13892_VCOINCELL 23 + +#endif diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h new file mode 100644 index 00000000..10e038ba --- /dev/null +++ b/include/linux/mfd/mc13xxx.h @@ -0,0 +1,210 @@ +/* + * Copyright 2009-2010 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#ifndef __LINUX_MFD_MC13XXX_H +#define __LINUX_MFD_MC13XXX_H + +#include <linux/interrupt.h> + +struct mc13xxx; + +void mc13xxx_lock(struct mc13xxx *mc13xxx); +void mc13xxx_unlock(struct mc13xxx *mc13xxx); + +int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val); +int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val); +int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset, + u32 mask, u32 val); + +int mc13xxx_get_flags(struct mc13xxx *mc13xxx); + +int mc13xxx_irq_request(struct mc13xxx *mc13xxx, int irq, + irq_handler_t handler, const char *name, void *dev); +int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq, + irq_handler_t handler, const char *name, void *dev); +int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev); + +int mc13xxx_irq_mask(struct mc13xxx *mc13xxx, int irq); +int mc13xxx_irq_unmask(struct mc13xxx *mc13xxx, int irq); +int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq, + int *enabled, int *pending); +int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq); + +int mc13xxx_get_flags(struct mc13xxx *mc13xxx); + +int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, + unsigned int mode, unsigned int channel, + u8 ato, bool atox, unsigned int *sample); + +#define MC13XXX_IRQ_ADCDONE 0 +#define MC13XXX_IRQ_ADCBISDONE 1 +#define MC13XXX_IRQ_TS 2 +#define MC13XXX_IRQ_CHGDET 6 +#define MC13XXX_IRQ_CHGREV 8 +#define MC13XXX_IRQ_CHGSHORT 9 +#define MC13XXX_IRQ_CCCV 10 +#define MC13XXX_IRQ_CHGCURR 11 +#define MC13XXX_IRQ_BPON 12 +#define MC13XXX_IRQ_LOBATL 13 +#define MC13XXX_IRQ_LOBATH 14 +#define MC13XXX_IRQ_1HZ 24 +#define MC13XXX_IRQ_TODA 25 +#define MC13XXX_IRQ_SYSRST 30 +#define MC13XXX_IRQ_RTCRST 31 +#define MC13XXX_IRQ_PC 32 +#define MC13XXX_IRQ_WARM 33 +#define MC13XXX_IRQ_MEMHLD 34 +#define MC13XXX_IRQ_THWARNL 36 +#define MC13XXX_IRQ_THWARNH 37 +#define MC13XXX_IRQ_CLK 38 + +#define MC13XXX_NUM_IRQ 46 + +struct regulator_init_data; + +struct mc13xxx_regulator_init_data { + int id; + struct regulator_init_data *init_data; + struct device_node *node; +}; + +struct mc13xxx_regulator_platform_data { + int num_regulators; + struct mc13xxx_regulator_init_data *regulators; +}; + +struct mc13xxx_led_platform_data { +#define MC13783_LED_MD 0 +#define MC13783_LED_AD 1 +#define MC13783_LED_KP 2 +#define MC13783_LED_R1 3 +#define MC13783_LED_G1 4 +#define MC13783_LED_B1 5 +#define MC13783_LED_R2 6 +#define MC13783_LED_G2 7 +#define MC13783_LED_B2 8 +#define MC13783_LED_R3 9 +#define MC13783_LED_G3 10 +#define MC13783_LED_B3 11 +#define MC13783_LED_MAX MC13783_LED_B3 + int id; + const char *name; + const char *default_trigger; + +/* Three or two bits current selection depending on the led */ + char max_current; +}; + +struct mc13xxx_leds_platform_data { + int num_leds; + struct mc13xxx_led_platform_data *led; + +#define MC13783_LED_TRIODE_MD (1 << 0) +#define MC13783_LED_TRIODE_AD (1 << 1) +#define MC13783_LED_TRIODE_KP (1 << 2) +#define MC13783_LED_BOOST_EN (1 << 3) +#define MC13783_LED_TC1HALF (1 << 4) +#define MC13783_LED_SLEWLIMTC (1 << 5) +#define MC13783_LED_SLEWLIMBL (1 << 6) +#define MC13783_LED_TRIODE_TC1 (1 << 7) +#define MC13783_LED_TRIODE_TC2 (1 << 8) +#define MC13783_LED_TRIODE_TC3 (1 << 9) + int flags; + +#define MC13783_LED_AB_DISABLED 0 +#define MC13783_LED_AB_MD1 1 +#define MC13783_LED_AB_MD12 2 +#define MC13783_LED_AB_MD123 3 +#define MC13783_LED_AB_MD1234 4 +#define MC13783_LED_AB_MD1234_AD1 5 +#define MC13783_LED_AB_MD1234_AD12 6 +#define MC13783_LED_AB_MD1_AD 7 + char abmode; + +#define MC13783_LED_ABREF_200MV 0 +#define MC13783_LED_ABREF_400MV 1 +#define MC13783_LED_ABREF_600MV 2 +#define MC13783_LED_ABREF_800MV 3 + char abref; + +#define MC13783_LED_PERIOD_10MS 0 +#define MC13783_LED_PERIOD_100MS 1 +#define MC13783_LED_PERIOD_500MS 2 +#define MC13783_LED_PERIOD_2S 3 + char bl_period; + char tc1_period; + char tc2_period; + char tc3_period; +}; + +struct mc13xxx_buttons_platform_data { +#define MC13783_BUTTON_DBNC_0MS 0 +#define MC13783_BUTTON_DBNC_30MS 1 +#define MC13783_BUTTON_DBNC_150MS 2 +#define MC13783_BUTTON_DBNC_750MS 3 +#define MC13783_BUTTON_ENABLE (1 << 2) +#define MC13783_BUTTON_POL_INVERT (1 << 3) +#define MC13783_BUTTON_RESET_EN (1 << 4) + int b1on_flags; + unsigned short b1on_key; + int b2on_flags; + unsigned short b2on_key; + int b3on_flags; + unsigned short b3on_key; +}; + +struct mc13xxx_ts_platform_data { + /* Delay between Touchscreen polarization and ADC Conversion. + * Given in clock ticks of a 32 kHz clock which gives a granularity of + * about 30.5ms */ + u8 ato; + +#define MC13783_TS_ATO_FIRST false +#define MC13783_TS_ATO_EACH true + /* Use the ATO delay only for the first conversion or for each one */ + bool atox; +}; + +struct mc13xxx_platform_data { +#define MC13XXX_USE_TOUCHSCREEN (1 << 0) +#define MC13XXX_USE_CODEC (1 << 1) +#define MC13XXX_USE_ADC (1 << 2) +#define MC13XXX_USE_RTC (1 << 3) + unsigned int flags; + + struct mc13xxx_regulator_platform_data regulators; + struct mc13xxx_leds_platform_data *leds; + struct mc13xxx_buttons_platform_data *buttons; + struct mc13xxx_ts_platform_data touch; +}; + +#define MC13XXX_ADC_MODE_TS 1 +#define MC13XXX_ADC_MODE_SINGLE_CHAN 2 +#define MC13XXX_ADC_MODE_MULT_CHAN 3 + +#define MC13XXX_ADC0 43 +#define MC13XXX_ADC0_LICELLCON (1 << 0) +#define MC13XXX_ADC0_CHRGICON (1 << 1) +#define MC13XXX_ADC0_BATICON (1 << 2) +#define MC13XXX_ADC0_ADREFEN (1 << 10) +#define MC13XXX_ADC0_TSMOD0 (1 << 12) +#define MC13XXX_ADC0_TSMOD1 (1 << 13) +#define MC13XXX_ADC0_TSMOD2 (1 << 14) +#define MC13XXX_ADC0_ADINC1 (1 << 16) +#define MC13XXX_ADC0_ADINC2 (1 << 17) + +#define MC13XXX_ADC0_TSMOD_MASK (MC13XXX_ADC0_TSMOD0 | \ + MC13XXX_ADC0_TSMOD1 | \ + MC13XXX_ADC0_TSMOD2) + +#define MC13XXX_ADC0_CONFIG_MASK (MC13XXX_ADC0_TSMOD_MASK | \ + MC13XXX_ADC0_LICELLCON | \ + MC13XXX_ADC0_CHRGICON | \ + MC13XXX_ADC0_BATICON) + +#endif /* ifndef __LINUX_MFD_MC13XXX_H */ diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h new file mode 100644 index 00000000..a9e8bd15 --- /dev/null +++ b/include/linux/mfd/mcp.h @@ -0,0 +1,64 @@ +/* + * linux/drivers/mfd/mcp.h + * + * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ +#ifndef MCP_H +#define MCP_H + +struct mcp_ops; + +struct mcp { + struct module *owner; + struct mcp_ops *ops; + spinlock_t lock; + int use_count; + unsigned int sclk_rate; + unsigned int rw_timeout; + struct device attached_device; +}; + +struct mcp_ops { + void (*set_telecom_divisor)(struct mcp *, unsigned int); + void (*set_audio_divisor)(struct mcp *, unsigned int); + void (*reg_write)(struct mcp *, unsigned int, unsigned int); + unsigned int (*reg_read)(struct mcp *, unsigned int); + void (*enable)(struct mcp *); + void (*disable)(struct mcp *); +}; + +void mcp_set_telecom_divisor(struct mcp *, unsigned int); +void mcp_set_audio_divisor(struct mcp *, unsigned int); +void mcp_reg_write(struct mcp *, unsigned int, unsigned int); +unsigned int mcp_reg_read(struct mcp *, unsigned int); +void mcp_enable(struct mcp *); +void mcp_disable(struct mcp *); +#define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate) + +struct mcp *mcp_host_alloc(struct device *, size_t); +int mcp_host_add(struct mcp *, void *); +void mcp_host_del(struct mcp *); +void mcp_host_free(struct mcp *); + +struct mcp_driver { + struct device_driver drv; + int (*probe)(struct mcp *); + void (*remove)(struct mcp *); +}; + +int mcp_driver_register(struct mcp_driver *); +void mcp_driver_unregister(struct mcp_driver *); + +#define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device) +#define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d) + +static inline void *mcp_priv(struct mcp *mcp) +{ + return mcp + 1; +} + +#endif diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h new file mode 100644 index 00000000..b35e6280 --- /dev/null +++ b/include/linux/mfd/pcf50633/adc.h @@ -0,0 +1,73 @@ +/* + * adc.h -- Driver for NXP PCF50633 ADC + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_ADC_H +#define __LINUX_MFD_PCF50633_ADC_H + +#include <linux/mfd/pcf50633/core.h> +#include <linux/platform_device.h> + +/* ADC Registers */ +#define PCF50633_REG_ADCC3 0x52 +#define PCF50633_REG_ADCC2 0x53 +#define PCF50633_REG_ADCC1 0x54 +#define PCF50633_REG_ADCS1 0x55 +#define PCF50633_REG_ADCS2 0x56 +#define PCF50633_REG_ADCS3 0x57 + +#define PCF50633_ADCC1_ADCSTART 0x01 +#define PCF50633_ADCC1_RES_8BIT 0x02 +#define PCF50633_ADCC1_RES_10BIT 0x00 +#define PCF50633_ADCC1_AVERAGE_NO 0x00 +#define PCF50633_ADCC1_AVERAGE_4 0x04 +#define PCF50633_ADCC1_AVERAGE_8 0x08 +#define PCF50633_ADCC1_AVERAGE_16 0x0c +#define PCF50633_ADCC1_MUX_BATSNS_RES 0x00 +#define PCF50633_ADCC1_MUX_BATSNS_SUBTR 0x10 +#define PCF50633_ADCC1_MUX_ADCIN2_RES 0x20 +#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR 0x30 +#define PCF50633_ADCC1_MUX_BATTEMP 0x60 +#define PCF50633_ADCC1_MUX_ADCIN1 0x70 +#define PCF50633_ADCC1_AVERAGE_MASK 0x0c +#define PCF50633_ADCC1_ADCMUX_MASK 0xf0 + +#define PCF50633_ADCC2_RATIO_NONE 0x00 +#define PCF50633_ADCC2_RATIO_BATTEMP 0x01 +#define PCF50633_ADCC2_RATIO_ADCIN1 0x02 +#define PCF50633_ADCC2_RATIO_BOTH 0x03 +#define PCF50633_ADCC2_RATIOSETTL_100US 0x04 + +#define PCF50633_ADCC3_ACCSW_EN 0x01 +#define PCF50633_ADCC3_NTCSW_EN 0x04 +#define PCF50633_ADCC3_RES_DIV_TWO 0x10 +#define PCF50633_ADCC3_RES_DIV_THREE 0x00 + +#define PCF50633_ADCS3_REF_NTCSW 0x00 +#define PCF50633_ADCS3_REF_ACCSW 0x10 +#define PCF50633_ADCS3_REF_2V0 0x20 +#define PCF50633_ADCS3_REF_VISA 0x30 +#define PCF50633_ADCS3_REF_2V0_2 0x70 +#define PCF50633_ADCS3_ADCRDY 0x80 + +#define PCF50633_ADCS3_ADCDAT1L_MASK 0x03 +#define PCF50633_ADCS3_ADCDAT2L_MASK 0x0c +#define PCF50633_ADCS3_ADCDAT2L_SHIFT 2 +#define PCF50633_ASCS3_REF_MASK 0x70 + +extern int +pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, + void (*callback)(struct pcf50633 *, void *, int), + void *callback_param); +extern int +pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg); + +#endif /* __LINUX_PCF50633_ADC_H */ diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h new file mode 100644 index 00000000..83747e21 --- /dev/null +++ b/include/linux/mfd/pcf50633/backlight.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * PCF50633 backlight device driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_MFD_PCF50633_BACKLIGHT +#define __LINUX_MFD_PCF50633_BACKLIGHT + +/* +* @default_brightness: Backlight brightness is initialized to this value +* +* Brightness to be used after the driver has been probed. +* Valid range 0-63. +* +* @default_brightness_limit: The actual brightness is limited by this value +* +* Brightness limit to be used after the driver has been probed. This is useful +* when it is not known how much power is available for the backlight during +* probe. +* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit. +* +* @ramp_time: Display ramp time when changing brightness +* +* When changing the backlights brightness the change is not instant, instead +* it fades smooth from one state to another. This value specifies how long +* the fade should take. The lower the value the higher the fade time. +* Valid range 0-255 +*/ +struct pcf50633_bl_platform_data { + unsigned int default_brightness; + unsigned int default_brightness_limit; + uint8_t ramp_time; +}; + + +struct pcf50633; + +int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit); + +#endif + diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h new file mode 100644 index 00000000..a8084075 --- /dev/null +++ b/include/linux/mfd/pcf50633/core.h @@ -0,0 +1,238 @@ +/* + * core.h -- Core driver for NXP PCF50633 + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_CORE_H +#define __LINUX_MFD_PCF50633_CORE_H + +#include <linux/i2c.h> +#include <linux/workqueue.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/power_supply.h> +#include <linux/mfd/pcf50633/backlight.h> + +struct pcf50633; +struct regmap; + +#define PCF50633_NUM_REGULATORS 11 + +struct pcf50633_platform_data { + struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS]; + + char **batteries; + int num_batteries; + + /* + * Should be set accordingly to the reference resistor used, see + * I_{ch(ref)} charger reference current in the pcf50633 User + * Manual. + */ + int charger_reference_current_ma; + + /* Callbacks */ + void (*probe_done)(struct pcf50633 *); + void (*mbc_event_callback)(struct pcf50633 *, int); + void (*regulator_registered)(struct pcf50633 *, int); + void (*force_shutdown)(struct pcf50633 *); + + u8 resumers[5]; + + struct pcf50633_bl_platform_data *backlight_data; +}; + +struct pcf50633_irq { + void (*handler) (int, void *); + void *data; +}; + +int pcf50633_register_irq(struct pcf50633 *pcf, int irq, + void (*handler) (int, void *), void *data); +int pcf50633_free_irq(struct pcf50633 *pcf, int irq); + +int pcf50633_irq_mask(struct pcf50633 *pcf, int irq); +int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq); +int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq); + +int pcf50633_read_block(struct pcf50633 *, u8 reg, + int nr_regs, u8 *data); +int pcf50633_write_block(struct pcf50633 *pcf, u8 reg, + int nr_regs, u8 *data); +u8 pcf50633_reg_read(struct pcf50633 *, u8 reg); +int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val); + +int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val); +int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits); + +/* Interrupt registers */ + +#define PCF50633_REG_INT1 0x02 +#define PCF50633_REG_INT2 0x03 +#define PCF50633_REG_INT3 0x04 +#define PCF50633_REG_INT4 0x05 +#define PCF50633_REG_INT5 0x06 + +#define PCF50633_REG_INT1M 0x07 +#define PCF50633_REG_INT2M 0x08 +#define PCF50633_REG_INT3M 0x09 +#define PCF50633_REG_INT4M 0x0a +#define PCF50633_REG_INT5M 0x0b + +enum { + /* Chip IRQs */ + PCF50633_IRQ_ADPINS, + PCF50633_IRQ_ADPREM, + PCF50633_IRQ_USBINS, + PCF50633_IRQ_USBREM, + PCF50633_IRQ_RESERVED1, + PCF50633_IRQ_RESERVED2, + PCF50633_IRQ_ALARM, + PCF50633_IRQ_SECOND, + PCF50633_IRQ_ONKEYR, + PCF50633_IRQ_ONKEYF, + PCF50633_IRQ_EXTON1R, + PCF50633_IRQ_EXTON1F, + PCF50633_IRQ_EXTON2R, + PCF50633_IRQ_EXTON2F, + PCF50633_IRQ_EXTON3R, + PCF50633_IRQ_EXTON3F, + PCF50633_IRQ_BATFULL, + PCF50633_IRQ_CHGHALT, + PCF50633_IRQ_THLIMON, + PCF50633_IRQ_THLIMOFF, + PCF50633_IRQ_USBLIMON, + PCF50633_IRQ_USBLIMOFF, + PCF50633_IRQ_ADCRDY, + PCF50633_IRQ_ONKEY1S, + PCF50633_IRQ_LOWSYS, + PCF50633_IRQ_LOWBAT, + PCF50633_IRQ_HIGHTMP, + PCF50633_IRQ_AUTOPWRFAIL, + PCF50633_IRQ_DWN1PWRFAIL, + PCF50633_IRQ_DWN2PWRFAIL, + PCF50633_IRQ_LEDPWRFAIL, + PCF50633_IRQ_LEDOVP, + PCF50633_IRQ_LDO1PWRFAIL, + PCF50633_IRQ_LDO2PWRFAIL, + PCF50633_IRQ_LDO3PWRFAIL, + PCF50633_IRQ_LDO4PWRFAIL, + PCF50633_IRQ_LDO5PWRFAIL, + PCF50633_IRQ_LDO6PWRFAIL, + PCF50633_IRQ_HCLDOPWRFAIL, + PCF50633_IRQ_HCLDOOVL, + + /* Always last */ + PCF50633_NUM_IRQ, +}; + +struct pcf50633 { + struct device *dev; + struct regmap *regmap; + + struct pcf50633_platform_data *pdata; + int irq; + struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ]; + struct work_struct irq_work; + struct workqueue_struct *work_queue; + struct mutex lock; + + u8 mask_regs[5]; + + u8 suspend_irq_masks[5]; + u8 resume_reason[5]; + int is_suspended; + + int onkey1s_held; + + struct platform_device *rtc_pdev; + struct platform_device *mbc_pdev; + struct platform_device *adc_pdev; + struct platform_device *input_pdev; + struct platform_device *bl_pdev; + struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS]; +}; + +enum pcf50633_reg_int1 { + PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */ + PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */ + PCF50633_INT1_USBINS = 0x04, /* USB inserted */ + PCF50633_INT1_USBREM = 0x08, /* USB removed */ + /* reserved */ + PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */ + PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */ +}; + +enum pcf50633_reg_int2 { + PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */ + PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */ + PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */ + PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */ + PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */ + PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */ + PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */ + PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */ +}; + +enum pcf50633_reg_int3 { + PCF50633_INT3_BATFULL = 0x01, /* Battery full */ + PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */ + PCF50633_INT3_THLIMON = 0x04, + PCF50633_INT3_THLIMOFF = 0x08, + PCF50633_INT3_USBLIMON = 0x10, + PCF50633_INT3_USBLIMOFF = 0x20, + PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */ + PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */ +}; + +enum pcf50633_reg_int4 { + PCF50633_INT4_LOWSYS = 0x01, + PCF50633_INT4_LOWBAT = 0x02, + PCF50633_INT4_HIGHTMP = 0x04, + PCF50633_INT4_AUTOPWRFAIL = 0x08, + PCF50633_INT4_DWN1PWRFAIL = 0x10, + PCF50633_INT4_DWN2PWRFAIL = 0x20, + PCF50633_INT4_LEDPWRFAIL = 0x40, + PCF50633_INT4_LEDOVP = 0x80, +}; + +enum pcf50633_reg_int5 { + PCF50633_INT5_LDO1PWRFAIL = 0x01, + PCF50633_INT5_LDO2PWRFAIL = 0x02, + PCF50633_INT5_LDO3PWRFAIL = 0x04, + PCF50633_INT5_LDO4PWRFAIL = 0x08, + PCF50633_INT5_LDO5PWRFAIL = 0x10, + PCF50633_INT5_LDO6PWRFAIL = 0x20, + PCF50633_INT5_HCLDOPWRFAIL = 0x40, + PCF50633_INT5_HCLDOOVL = 0x80, +}; + +/* misc. registers */ +#define PCF50633_REG_OOCSHDWN 0x0c + +/* LED registers */ +#define PCF50633_REG_LEDOUT 0x28 +#define PCF50633_REG_LEDENA 0x29 +#define PCF50633_REG_LEDCTL 0x2a +#define PCF50633_REG_LEDDIM 0x2b + +static inline struct pcf50633 *dev_to_pcf50633(struct device *dev) +{ + return dev_get_drvdata(dev); +} + +int pcf50633_irq_init(struct pcf50633 *pcf, int irq); +void pcf50633_irq_free(struct pcf50633 *pcf); +#ifdef CONFIG_PM +int pcf50633_irq_suspend(struct pcf50633 *pcf); +int pcf50633_irq_resume(struct pcf50633 *pcf); +#endif + +#endif diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h new file mode 100644 index 00000000..a42b845e --- /dev/null +++ b/include/linux/mfd/pcf50633/gpio.h @@ -0,0 +1,52 @@ +/* + * gpio.h -- GPIO driver for NXP PCF50633 + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_GPIO_H +#define __LINUX_MFD_PCF50633_GPIO_H + +#include <linux/mfd/pcf50633/core.h> + +#define PCF50633_GPIO1 1 +#define PCF50633_GPIO2 2 +#define PCF50633_GPIO3 3 +#define PCF50633_GPO 4 + +#define PCF50633_REG_GPIO1CFG 0x14 +#define PCF50633_REG_GPIO2CFG 0x15 +#define PCF50633_REG_GPIO3CFG 0x16 +#define PCF50633_REG_GPOCFG 0x17 + +#define PCF50633_GPOCFG_GPOSEL_MASK 0x07 + +enum pcf50633_reg_gpocfg { + PCF50633_GPOCFG_GPOSEL_0 = 0x00, + PCF50633_GPOCFG_GPOSEL_LED_NFET = 0x01, + PCF50633_GPOCFG_GPOSEL_SYSxOK = 0x02, + PCF50633_GPOCFG_GPOSEL_CLK32K = 0x03, + PCF50633_GPOCFG_GPOSEL_ADAPUSB = 0x04, + PCF50633_GPOCFG_GPOSEL_USBxOK = 0x05, + PCF50633_GPOCFG_GPOSEL_ACTPH4 = 0x06, + PCF50633_GPOCFG_GPOSEL_1 = 0x07, + PCF50633_GPOCFG_GPOSEL_INVERSE = 0x08, +}; + +int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val); +u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio); + +int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert); +int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio); + +int pcf50633_gpio_power_supply_set(struct pcf50633 *, + int gpio, int regulator, int on); +#endif /* __LINUX_MFD_PCF50633_GPIO_H */ + + diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h new file mode 100644 index 00000000..df4f5fa8 --- /dev/null +++ b/include/linux/mfd/pcf50633/mbc.h @@ -0,0 +1,134 @@ +/* + * mbc.h -- Driver for NXP PCF50633 Main Battery Charger + * + * (C) 2006-2008 by Openmoko, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_PCF50633_MBC_H +#define __LINUX_MFD_PCF50633_MBC_H + +#include <linux/mfd/pcf50633/core.h> +#include <linux/platform_device.h> + +#define PCF50633_REG_MBCC1 0x43 +#define PCF50633_REG_MBCC2 0x44 +#define PCF50633_REG_MBCC3 0x45 +#define PCF50633_REG_MBCC4 0x46 +#define PCF50633_REG_MBCC5 0x47 +#define PCF50633_REG_MBCC6 0x48 +#define PCF50633_REG_MBCC7 0x49 +#define PCF50633_REG_MBCC8 0x4a +#define PCF50633_REG_MBCS1 0x4b +#define PCF50633_REG_MBCS2 0x4c +#define PCF50633_REG_MBCS3 0x4d + +enum pcf50633_reg_mbcc1 { + PCF50633_MBCC1_CHGENA = 0x01, /* Charger enable */ + PCF50633_MBCC1_AUTOSTOP = 0x02, + PCF50633_MBCC1_AUTORES = 0x04, /* automatic resume */ + PCF50633_MBCC1_RESUME = 0x08, /* explicit resume cmd */ + PCF50633_MBCC1_RESTART = 0x10, /* restart charging */ + PCF50633_MBCC1_PREWDTIME_60M = 0x20, /* max. precharging time */ + PCF50633_MBCC1_WDTIME_1H = 0x00, + PCF50633_MBCC1_WDTIME_2H = 0x40, + PCF50633_MBCC1_WDTIME_4H = 0x80, + PCF50633_MBCC1_WDTIME_6H = 0xc0, +}; +#define PCF50633_MBCC1_WDTIME_MASK 0xc0 + +enum pcf50633_reg_mbcc2 { + PCF50633_MBCC2_VBATCOND_2V7 = 0x00, + PCF50633_MBCC2_VBATCOND_2V85 = 0x01, + PCF50633_MBCC2_VBATCOND_3V0 = 0x02, + PCF50633_MBCC2_VBATCOND_3V15 = 0x03, + PCF50633_MBCC2_VMAX_4V = 0x00, + PCF50633_MBCC2_VMAX_4V20 = 0x28, + PCF50633_MBCC2_VRESDEBTIME_64S = 0x80, /* debounce time (32/64sec) */ +}; + +enum pcf50633_reg_mbcc7 { + PCF50633_MBCC7_USB_100mA = 0x00, + PCF50633_MBCC7_USB_500mA = 0x01, + PCF50633_MBCC7_USB_1000mA = 0x02, + PCF50633_MBCC7_USB_SUSPEND = 0x03, + PCF50633_MBCC7_BATTEMP_EN = 0x04, + PCF50633_MBCC7_BATSYSIMAX_1A6 = 0x00, + PCF50633_MBCC7_BATSYSIMAX_1A8 = 0x40, + PCF50633_MBCC7_BATSYSIMAX_2A0 = 0x80, + PCF50633_MBCC7_BATSYSIMAX_2A2 = 0xc0, +}; +#define PCF50633_MBCC7_USB_MASK 0x03 + +enum pcf50633_reg_mbcc8 { + PCF50633_MBCC8_USBENASUS = 0x10, +}; + +enum pcf50633_reg_mbcs1 { + PCF50633_MBCS1_USBPRES = 0x01, + PCF50633_MBCS1_USBOK = 0x02, + PCF50633_MBCS1_ADAPTPRES = 0x04, + PCF50633_MBCS1_ADAPTOK = 0x08, + PCF50633_MBCS1_TBAT_OK = 0x00, + PCF50633_MBCS1_TBAT_ABOVE = 0x10, + PCF50633_MBCS1_TBAT_BELOW = 0x20, + PCF50633_MBCS1_TBAT_UNDEF = 0x30, + PCF50633_MBCS1_PREWDTEXP = 0x40, + PCF50633_MBCS1_WDTEXP = 0x80, +}; + +enum pcf50633_reg_mbcs2_mbcmod { + PCF50633_MBCS2_MBC_PLAY = 0x00, + PCF50633_MBCS2_MBC_USB_PRE = 0x01, + PCF50633_MBCS2_MBC_USB_PRE_WAIT = 0x02, + PCF50633_MBCS2_MBC_USB_FAST = 0x03, + PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04, + PCF50633_MBCS2_MBC_USB_SUSPEND = 0x05, + PCF50633_MBCS2_MBC_ADP_PRE = 0x06, + PCF50633_MBCS2_MBC_ADP_PRE_WAIT = 0x07, + PCF50633_MBCS2_MBC_ADP_FAST = 0x08, + PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09, + PCF50633_MBCS2_MBC_BAT_FULL = 0x0a, + PCF50633_MBCS2_MBC_HALT = 0x0b, +}; +#define PCF50633_MBCS2_MBC_MASK 0x0f +enum pcf50633_reg_mbcs2_chgstat { + PCF50633_MBCS2_CHGS_NONE = 0x00, + PCF50633_MBCS2_CHGS_ADAPTER = 0x10, + PCF50633_MBCS2_CHGS_USB = 0x20, + PCF50633_MBCS2_CHGS_BOTH = 0x30, +}; +#define PCF50633_MBCS2_RESSTAT_AUTO 0x40 + +enum pcf50633_reg_mbcs3 { + PCF50633_MBCS3_USBLIM_PLAY = 0x01, + PCF50633_MBCS3_USBLIM_CGH = 0x02, + PCF50633_MBCS3_TLIM_PLAY = 0x04, + PCF50633_MBCS3_TLIM_CHG = 0x08, + PCF50633_MBCS3_ILIM = 0x10, /* 1: Ibat > Icutoff */ + PCF50633_MBCS3_VLIM = 0x20, /* 1: Vbat == Vmax */ + PCF50633_MBCS3_VBATSTAT = 0x40, /* 1: Vbat > Vbatcond */ + PCF50633_MBCS3_VRES = 0x80, /* 1: Vbat > Vth(RES) */ +}; + +#define PCF50633_MBCC2_VBATCOND_MASK 0x03 +#define PCF50633_MBCC2_VMAX_MASK 0x3c + +/* Charger status */ +#define PCF50633_MBC_USB_ONLINE 0x01 +#define PCF50633_MBC_USB_ACTIVE 0x02 +#define PCF50633_MBC_ADAPTER_ONLINE 0x04 +#define PCF50633_MBC_ADAPTER_ACTIVE 0x08 + +int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); + +int pcf50633_mbc_get_status(struct pcf50633 *); +int pcf50633_mbc_get_usb_online_status(struct pcf50633 *); + +#endif + diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h new file mode 100644 index 00000000..2d3dbe53 --- /dev/null +++ b/include/linux/mfd/pcf50633/pmic.h @@ -0,0 +1,67 @@ +#ifndef __LINUX_MFD_PCF50633_PMIC_H +#define __LINUX_MFD_PCF50633_PMIC_H + +#include <linux/mfd/pcf50633/core.h> +#include <linux/platform_device.h> + +#define PCF50633_REG_AUTOOUT 0x1a +#define PCF50633_REG_AUTOENA 0x1b +#define PCF50633_REG_AUTOCTL 0x1c +#define PCF50633_REG_AUTOMXC 0x1d +#define PCF50633_REG_DOWN1OUT 0x1e +#define PCF50633_REG_DOWN1ENA 0x1f +#define PCF50633_REG_DOWN1CTL 0x20 +#define PCF50633_REG_DOWN1MXC 0x21 +#define PCF50633_REG_DOWN2OUT 0x22 +#define PCF50633_REG_DOWN2ENA 0x23 +#define PCF50633_REG_DOWN2CTL 0x24 +#define PCF50633_REG_DOWN2MXC 0x25 +#define PCF50633_REG_MEMLDOOUT 0x26 +#define PCF50633_REG_MEMLDOENA 0x27 +#define PCF50633_REG_LDO1OUT 0x2d +#define PCF50633_REG_LDO1ENA 0x2e +#define PCF50633_REG_LDO2OUT 0x2f +#define PCF50633_REG_LDO2ENA 0x30 +#define PCF50633_REG_LDO3OUT 0x31 +#define PCF50633_REG_LDO3ENA 0x32 +#define PCF50633_REG_LDO4OUT 0x33 +#define PCF50633_REG_LDO4ENA 0x34 +#define PCF50633_REG_LDO5OUT 0x35 +#define PCF50633_REG_LDO5ENA 0x36 +#define PCF50633_REG_LDO6OUT 0x37 +#define PCF50633_REG_LDO6ENA 0x38 +#define PCF50633_REG_HCLDOOUT 0x39 +#define PCF50633_REG_HCLDOENA 0x3a +#define PCF50633_REG_HCLDOOVL 0x40 + +enum pcf50633_regulator_enable { + PCF50633_REGULATOR_ON = 0x01, + PCF50633_REGULATOR_ON_GPIO1 = 0x02, + PCF50633_REGULATOR_ON_GPIO2 = 0x04, + PCF50633_REGULATOR_ON_GPIO3 = 0x08, +}; +#define PCF50633_REGULATOR_ON_MASK 0x0f + +enum pcf50633_regulator_phase { + PCF50633_REGULATOR_ACTPH1 = 0x00, + PCF50633_REGULATOR_ACTPH2 = 0x10, + PCF50633_REGULATOR_ACTPH3 = 0x20, + PCF50633_REGULATOR_ACTPH4 = 0x30, +}; +#define PCF50633_REGULATOR_ACTPH_MASK 0x30 + +enum pcf50633_regulator_id { + PCF50633_REGULATOR_AUTO, + PCF50633_REGULATOR_DOWN1, + PCF50633_REGULATOR_DOWN2, + PCF50633_REGULATOR_LDO1, + PCF50633_REGULATOR_LDO2, + PCF50633_REGULATOR_LDO3, + PCF50633_REGULATOR_LDO4, + PCF50633_REGULATOR_LDO5, + PCF50633_REGULATOR_LDO6, + PCF50633_REGULATOR_HCLDO, + PCF50633_REGULATOR_MEMLDO, +}; +#endif + diff --git a/include/linux/mfd/pm8xxx/core.h b/include/linux/mfd/pm8xxx/core.h new file mode 100644 index 00000000..bd2f4f64 --- /dev/null +++ b/include/linux/mfd/pm8xxx/core.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * Qualcomm PMIC 8xxx driver header file + * + */ + +#ifndef __MFD_PM8XXX_CORE_H +#define __MFD_PM8XXX_CORE_H + +#include <linux/mfd/core.h> + +struct pm8xxx_drvdata { + int (*pmic_readb) (const struct device *dev, u16 addr, u8 *val); + int (*pmic_writeb) (const struct device *dev, u16 addr, u8 val); + int (*pmic_read_buf) (const struct device *dev, u16 addr, u8 *buf, + int n); + int (*pmic_write_buf) (const struct device *dev, u16 addr, u8 *buf, + int n); + int (*pmic_read_irq_stat) (const struct device *dev, int irq); + void *pm_chip_data; +}; + +static inline int pm8xxx_readb(const struct device *dev, u16 addr, u8 *val) +{ + struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); + + if (!dd) + return -EINVAL; + return dd->pmic_readb(dev, addr, val); +} + +static inline int pm8xxx_writeb(const struct device *dev, u16 addr, u8 val) +{ + struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); + + if (!dd) + return -EINVAL; + return dd->pmic_writeb(dev, addr, val); +} + +static inline int pm8xxx_read_buf(const struct device *dev, u16 addr, u8 *buf, + int n) +{ + struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); + + if (!dd) + return -EINVAL; + return dd->pmic_read_buf(dev, addr, buf, n); +} + +static inline int pm8xxx_write_buf(const struct device *dev, u16 addr, u8 *buf, + int n) +{ + struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); + + if (!dd) + return -EINVAL; + return dd->pmic_write_buf(dev, addr, buf, n); +} + +static inline int pm8xxx_read_irq_stat(const struct device *dev, int irq) +{ + struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); + + if (!dd) + return -EINVAL; + return dd->pmic_read_irq_stat(dev, irq); +} + +#endif diff --git a/include/linux/mfd/pm8xxx/irq.h b/include/linux/mfd/pm8xxx/irq.h new file mode 100644 index 00000000..4b21769f --- /dev/null +++ b/include/linux/mfd/pm8xxx/irq.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * Qualcomm PMIC irq 8xxx driver header file + * + */ + +#ifndef __MFD_PM8XXX_IRQ_H +#define __MFD_PM8XXX_IRQ_H + +#include <linux/errno.h> +#include <linux/err.h> + +struct pm8xxx_irq_core_data { + u32 rev; + int nirqs; +}; + +struct pm8xxx_irq_platform_data { + int irq_base; + struct pm8xxx_irq_core_data irq_cdata; + int devirq; + int irq_trigger_flag; +}; + +struct pm_irq_chip; + +#ifdef CONFIG_MFD_PM8XXX_IRQ +int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq); +struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev, + const struct pm8xxx_irq_platform_data *pdata); +int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip); +#else +static inline int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq) +{ + return -ENXIO; +} +static inline struct pm_irq_chip * __devinit pm8xxx_irq_init( + const struct device *dev, + const struct pm8xxx_irq_platform_data *pdata) +{ + return ERR_PTR(-ENXIO); +} +static inline int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip) +{ + return -ENXIO; +} +#endif /* CONFIG_MFD_PM8XXX_IRQ */ +#endif /* __MFD_PM8XXX_IRQ_H */ diff --git a/include/linux/mfd/pm8xxx/pm8921.h b/include/linux/mfd/pm8xxx/pm8921.h new file mode 100644 index 00000000..00fa3de7 --- /dev/null +++ b/include/linux/mfd/pm8xxx/pm8921.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * Qualcomm PMIC 8921 driver header file + * + */ + +#ifndef __MFD_PM8921_H +#define __MFD_PM8921_H + +#include <linux/mfd/pm8xxx/irq.h> + +#define PM8921_NR_IRQS 256 + +struct pm8921_platform_data { + int irq_base; + struct pm8xxx_irq_platform_data *irq_pdata; +}; + +#endif diff --git a/include/linux/mfd/pm8xxx/rtc.h b/include/linux/mfd/pm8xxx/rtc.h new file mode 100644 index 00000000..14f1983e --- /dev/null +++ b/include/linux/mfd/pm8xxx/rtc.h @@ -0,0 +1,25 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __RTC_PM8XXX_H__ +#define __RTC_PM8XXX_H__ + +#define PM8XXX_RTC_DEV_NAME "rtc-pm8xxx" +/** + * struct pm8xxx_rtc_pdata - RTC driver platform data + * @rtc_write_enable: variable stating RTC write capability + */ +struct pm8xxx_rtc_platform_data { + bool rtc_write_enable; +}; + +#endif /* __RTC_PM8XXX_H__ */ diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h new file mode 100644 index 00000000..0b64b19d --- /dev/null +++ b/include/linux/mfd/rc5t583.h @@ -0,0 +1,326 @@ +/* + * Core driver interface to access RICOH_RC5T583 power management chip. + * + * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. + * Author: Laxman dewangan <ldewangan@nvidia.com> + * + * Based on code + * Copyright (C) 2011 RICOH COMPANY,LTD + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef __LINUX_MFD_RC5T583_H +#define __LINUX_MFD_RC5T583_H + +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/regmap.h> + +#define RC5T583_MAX_REGS 0xF8 + +/* Maximum number of main interrupts */ +#define MAX_MAIN_INTERRUPT 5 +#define RC5T583_MAX_GPEDGE_REG 2 +#define RC5T583_MAX_INTERRUPT_MASK_REGS 9 + +/* Interrupt enable register */ +#define RC5T583_INT_EN_SYS1 0x19 +#define RC5T583_INT_EN_SYS2 0x1D +#define RC5T583_INT_EN_DCDC 0x41 +#define RC5T583_INT_EN_RTC 0xED +#define RC5T583_INT_EN_ADC1 0x90 +#define RC5T583_INT_EN_ADC2 0x91 +#define RC5T583_INT_EN_ADC3 0x92 + +/* Interrupt status registers (monitor regs in Ricoh)*/ +#define RC5T583_INTC_INTPOL 0xAD +#define RC5T583_INTC_INTEN 0xAE +#define RC5T583_INTC_INTMON 0xAF + +#define RC5T583_INT_MON_GRP 0xAF +#define RC5T583_INT_MON_SYS1 0x1B +#define RC5T583_INT_MON_SYS2 0x1F +#define RC5T583_INT_MON_DCDC 0x43 +#define RC5T583_INT_MON_RTC 0xEE + +/* Interrupt clearing registers */ +#define RC5T583_INT_IR_SYS1 0x1A +#define RC5T583_INT_IR_SYS2 0x1E +#define RC5T583_INT_IR_DCDC 0x42 +#define RC5T583_INT_IR_RTC 0xEE +#define RC5T583_INT_IR_ADCL 0x94 +#define RC5T583_INT_IR_ADCH 0x95 +#define RC5T583_INT_IR_ADCEND 0x96 +#define RC5T583_INT_IR_GPIOR 0xA9 +#define RC5T583_INT_IR_GPIOF 0xAA + +/* Sleep sequence registers */ +#define RC5T583_SLPSEQ1 0x21 +#define RC5T583_SLPSEQ2 0x22 +#define RC5T583_SLPSEQ3 0x23 +#define RC5T583_SLPSEQ4 0x24 +#define RC5T583_SLPSEQ5 0x25 +#define RC5T583_SLPSEQ6 0x26 +#define RC5T583_SLPSEQ7 0x27 +#define RC5T583_SLPSEQ8 0x28 +#define RC5T583_SLPSEQ9 0x29 +#define RC5T583_SLPSEQ10 0x2A +#define RC5T583_SLPSEQ11 0x2B + +/* Regulator registers */ +#define RC5T583_REG_DC0CTL 0x30 +#define RC5T583_REG_DC0DAC 0x31 +#define RC5T583_REG_DC0LATCTL 0x32 +#define RC5T583_REG_SR0CTL 0x33 + +#define RC5T583_REG_DC1CTL 0x34 +#define RC5T583_REG_DC1DAC 0x35 +#define RC5T583_REG_DC1LATCTL 0x36 +#define RC5T583_REG_SR1CTL 0x37 + +#define RC5T583_REG_DC2CTL 0x38 +#define RC5T583_REG_DC2DAC 0x39 +#define RC5T583_REG_DC2LATCTL 0x3A +#define RC5T583_REG_SR2CTL 0x3B + +#define RC5T583_REG_DC3CTL 0x3C +#define RC5T583_REG_DC3DAC 0x3D +#define RC5T583_REG_DC3LATCTL 0x3E +#define RC5T583_REG_SR3CTL 0x3F + + +#define RC5T583_REG_LDOEN1 0x50 +#define RC5T583_REG_LDOEN2 0x51 +#define RC5T583_REG_LDODIS1 0x52 +#define RC5T583_REG_LDODIS2 0x53 + +#define RC5T583_REG_LDO0DAC 0x54 +#define RC5T583_REG_LDO1DAC 0x55 +#define RC5T583_REG_LDO2DAC 0x56 +#define RC5T583_REG_LDO3DAC 0x57 +#define RC5T583_REG_LDO4DAC 0x58 +#define RC5T583_REG_LDO5DAC 0x59 +#define RC5T583_REG_LDO6DAC 0x5A +#define RC5T583_REG_LDO7DAC 0x5B +#define RC5T583_REG_LDO8DAC 0x5C +#define RC5T583_REG_LDO9DAC 0x5D + +#define RC5T583_REG_DC0DAC_DS 0x60 +#define RC5T583_REG_DC1DAC_DS 0x61 +#define RC5T583_REG_DC2DAC_DS 0x62 +#define RC5T583_REG_DC3DAC_DS 0x63 + +#define RC5T583_REG_LDO0DAC_DS 0x64 +#define RC5T583_REG_LDO1DAC_DS 0x65 +#define RC5T583_REG_LDO2DAC_DS 0x66 +#define RC5T583_REG_LDO3DAC_DS 0x67 +#define RC5T583_REG_LDO4DAC_DS 0x68 +#define RC5T583_REG_LDO5DAC_DS 0x69 +#define RC5T583_REG_LDO6DAC_DS 0x6A +#define RC5T583_REG_LDO7DAC_DS 0x6B +#define RC5T583_REG_LDO8DAC_DS 0x6C +#define RC5T583_REG_LDO9DAC_DS 0x6D + +/* GPIO register base address */ +#define RC5T583_GPIO_IOSEL 0xA0 +#define RC5T583_GPIO_PDEN 0xA1 +#define RC5T583_GPIO_IOOUT 0xA2 +#define RC5T583_GPIO_PGSEL 0xA3 +#define RC5T583_GPIO_GPINV 0xA4 +#define RC5T583_GPIO_GPDEB 0xA5 +#define RC5T583_GPIO_GPEDGE1 0xA6 +#define RC5T583_GPIO_GPEDGE2 0xA7 +#define RC5T583_GPIO_EN_INT 0xA8 +#define RC5T583_GPIO_MON_IOIN 0xAB +#define RC5T583_GPIO_GPOFUNC 0xAC + +/* RICOH_RC5T583 IRQ definitions */ +enum { + RC5T583_IRQ_ONKEY, + RC5T583_IRQ_ACOK, + RC5T583_IRQ_LIDOPEN, + RC5T583_IRQ_PREOT, + RC5T583_IRQ_CLKSTP, + RC5T583_IRQ_ONKEY_OFF, + RC5T583_IRQ_WD, + RC5T583_IRQ_EN_PWRREQ1, + RC5T583_IRQ_EN_PWRREQ2, + RC5T583_IRQ_PRE_VINDET, + + RC5T583_IRQ_DC0LIM, + RC5T583_IRQ_DC1LIM, + RC5T583_IRQ_DC2LIM, + RC5T583_IRQ_DC3LIM, + + RC5T583_IRQ_CTC, + RC5T583_IRQ_YALE, + RC5T583_IRQ_DALE, + RC5T583_IRQ_WALE, + + RC5T583_IRQ_AIN1L, + RC5T583_IRQ_AIN2L, + RC5T583_IRQ_AIN3L, + RC5T583_IRQ_VBATL, + RC5T583_IRQ_VIN3L, + RC5T583_IRQ_VIN8L, + RC5T583_IRQ_AIN1H, + RC5T583_IRQ_AIN2H, + RC5T583_IRQ_AIN3H, + RC5T583_IRQ_VBATH, + RC5T583_IRQ_VIN3H, + RC5T583_IRQ_VIN8H, + RC5T583_IRQ_ADCEND, + + RC5T583_IRQ_GPIO0, + RC5T583_IRQ_GPIO1, + RC5T583_IRQ_GPIO2, + RC5T583_IRQ_GPIO3, + RC5T583_IRQ_GPIO4, + RC5T583_IRQ_GPIO5, + RC5T583_IRQ_GPIO6, + RC5T583_IRQ_GPIO7, + + /* Should be last entry */ + RC5T583_MAX_IRQS, +}; + +/* Ricoh583 gpio definitions */ +enum { + RC5T583_GPIO0, + RC5T583_GPIO1, + RC5T583_GPIO2, + RC5T583_GPIO3, + RC5T583_GPIO4, + RC5T583_GPIO5, + RC5T583_GPIO6, + RC5T583_GPIO7, + + /* Should be last entry */ + RC5T583_MAX_GPIO, +}; + +enum { + RC5T583_DS_NONE, + RC5T583_DS_DC0, + RC5T583_DS_DC1, + RC5T583_DS_DC2, + RC5T583_DS_DC3, + RC5T583_DS_LDO0, + RC5T583_DS_LDO1, + RC5T583_DS_LDO2, + RC5T583_DS_LDO3, + RC5T583_DS_LDO4, + RC5T583_DS_LDO5, + RC5T583_DS_LDO6, + RC5T583_DS_LDO7, + RC5T583_DS_LDO8, + RC5T583_DS_LDO9, + RC5T583_DS_PSO0, + RC5T583_DS_PSO1, + RC5T583_DS_PSO2, + RC5T583_DS_PSO3, + RC5T583_DS_PSO4, + RC5T583_DS_PSO5, + RC5T583_DS_PSO6, + RC5T583_DS_PSO7, + + /* Should be last entry */ + RC5T583_DS_MAX, +}; + +/* + * Ricoh pmic RC5T583 supports sleep through two external controls. + * The output of gpios and regulator can be enable/disable through + * this external signals. + */ +enum { + RC5T583_EXT_PWRREQ1_CONTROL = 0x1, + RC5T583_EXT_PWRREQ2_CONTROL = 0x2, +}; + +struct rc5t583 { + struct device *dev; + struct regmap *regmap; + int chip_irq; + int irq_base; + struct mutex irq_lock; + unsigned long group_irq_en[MAX_MAIN_INTERRUPT]; + + /* For main interrupt bits in INTC */ + uint8_t intc_inten_reg; + + /* For group interrupt bits and address */ + uint8_t irq_en_reg[RC5T583_MAX_INTERRUPT_MASK_REGS]; + + /* For gpio edge */ + uint8_t gpedge_reg[RC5T583_MAX_GPEDGE_REG]; +}; + +/* + * rc5t583_platform_data: Platform data for ricoh rc5t583 pmu. + * The board specific data is provided through this structure. + * @irq_base: Irq base number on which this device registers their interrupts. + * @enable_shutdown: Enable shutdown through the input pin "shutdown". + */ + +struct rc5t583_platform_data { + int irq_base; + bool enable_shutdown; +}; + +static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_write(rc5t583->regmap, reg, val); +} + +static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + unsigned int ival; + int ret; + ret = regmap_read(rc5t583->regmap, reg, &ival); + if (!ret) + *val = (uint8_t)ival; + return ret; +} + +static inline int rc5t583_set_bits(struct device *dev, unsigned int reg, + unsigned int bit_mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); +} + +static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg, + unsigned int bit_mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); +} + +static inline int rc5t583_update(struct device *dev, unsigned int reg, + unsigned int val, unsigned int mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, mask, val); +} + +int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id, + int ext_pwr_req, int deepsleep_slot_nr); +int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base); +int rc5t583_irq_exit(struct rc5t583 *rc5t583); + +#endif diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h new file mode 100644 index 00000000..4bdf19c8 --- /dev/null +++ b/include/linux/mfd/rdc321x.h @@ -0,0 +1,26 @@ +#ifndef __RDC321X_MFD_H +#define __RDC321X_MFD_H + +#include <linux/types.h> +#include <linux/pci.h> + +/* Offsets to be accessed in the southbridge PCI + * device configuration register */ +#define RDC321X_WDT_CTRL 0x44 +#define RDC321X_GPIO_CTRL_REG1 0x48 +#define RDC321X_GPIO_DATA_REG1 0x4c +#define RDC321X_GPIO_CTRL_REG2 0x84 +#define RDC321X_GPIO_DATA_REG2 0x88 + +#define RDC321X_MAX_GPIO 58 + +struct rdc321x_gpio_pdata { + struct pci_dev *sb_pdev; + unsigned max_gpios; +}; + +struct rdc321x_wdt_pdata { + struct pci_dev *sb_pdev; +}; + +#endif /* __RDC321X_MFD_H */ diff --git a/include/linux/mfd/s5m87xx/s5m-core.h b/include/linux/mfd/s5m87xx/s5m-core.h new file mode 100644 index 00000000..a7480b57 --- /dev/null +++ b/include/linux/mfd/s5m87xx/s5m-core.h @@ -0,0 +1,373 @@ +/* + * s5m-core.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_S5M_CORE_H +#define __LINUX_MFD_S5M_CORE_H + +#define NUM_IRQ_REGS 4 + +enum s5m_device_type { + S5M8751X, + S5M8763X, + S5M8767X, +}; + +/* S5M8767 registers */ +enum s5m8767_reg { + S5M8767_REG_ID, + S5M8767_REG_INT1, + S5M8767_REG_INT2, + S5M8767_REG_INT3, + S5M8767_REG_INT1M, + S5M8767_REG_INT2M, + S5M8767_REG_INT3M, + S5M8767_REG_STATUS1, + S5M8767_REG_STATUS2, + S5M8767_REG_STATUS3, + S5M8767_REG_CTRL1, + S5M8767_REG_CTRL2, + S5M8767_REG_LOWBAT1, + S5M8767_REG_LOWBAT2, + S5M8767_REG_BUCHG, + S5M8767_REG_DVSRAMP, + S5M8767_REG_DVSTIMER2 = 0x10, + S5M8767_REG_DVSTIMER3, + S5M8767_REG_DVSTIMER4, + S5M8767_REG_LDO1, + S5M8767_REG_LDO2, + S5M8767_REG_LDO3, + S5M8767_REG_LDO4, + S5M8767_REG_LDO5, + S5M8767_REG_LDO6, + S5M8767_REG_LDO7, + S5M8767_REG_LDO8, + S5M8767_REG_LDO9, + S5M8767_REG_LDO10, + S5M8767_REG_LDO11, + S5M8767_REG_LDO12, + S5M8767_REG_LDO13, + S5M8767_REG_LDO14 = 0x20, + S5M8767_REG_LDO15, + S5M8767_REG_LDO16, + S5M8767_REG_LDO17, + S5M8767_REG_LDO18, + S5M8767_REG_LDO19, + S5M8767_REG_LDO20, + S5M8767_REG_LDO21, + S5M8767_REG_LDO22, + S5M8767_REG_LDO23, + S5M8767_REG_LDO24, + S5M8767_REG_LDO25, + S5M8767_REG_LDO26, + S5M8767_REG_LDO27, + S5M8767_REG_LDO28, + S5M8767_REG_UVLO = 0x31, + S5M8767_REG_BUCK1CTRL1, + S5M8767_REG_BUCK1CTRL2, + S5M8767_REG_BUCK2CTRL, + S5M8767_REG_BUCK2DVS1, + S5M8767_REG_BUCK2DVS2, + S5M8767_REG_BUCK2DVS3, + S5M8767_REG_BUCK2DVS4, + S5M8767_REG_BUCK2DVS5, + S5M8767_REG_BUCK2DVS6, + S5M8767_REG_BUCK2DVS7, + S5M8767_REG_BUCK2DVS8, + S5M8767_REG_BUCK3CTRL, + S5M8767_REG_BUCK3DVS1, + S5M8767_REG_BUCK3DVS2, + S5M8767_REG_BUCK3DVS3, + S5M8767_REG_BUCK3DVS4, + S5M8767_REG_BUCK3DVS5, + S5M8767_REG_BUCK3DVS6, + S5M8767_REG_BUCK3DVS7, + S5M8767_REG_BUCK3DVS8, + S5M8767_REG_BUCK4CTRL, + S5M8767_REG_BUCK4DVS1, + S5M8767_REG_BUCK4DVS2, + S5M8767_REG_BUCK4DVS3, + S5M8767_REG_BUCK4DVS4, + S5M8767_REG_BUCK4DVS5, + S5M8767_REG_BUCK4DVS6, + S5M8767_REG_BUCK4DVS7, + S5M8767_REG_BUCK4DVS8, + S5M8767_REG_BUCK5CTRL1, + S5M8767_REG_BUCK5CTRL2, + S5M8767_REG_BUCK5CTRL3, + S5M8767_REG_BUCK5CTRL4, + S5M8767_REG_BUCK5CTRL5, + S5M8767_REG_BUCK6CTRL1, + S5M8767_REG_BUCK6CTRL2, + S5M8767_REG_BUCK7CTRL1, + S5M8767_REG_BUCK7CTRL2, + S5M8767_REG_BUCK8CTRL1, + S5M8767_REG_BUCK8CTRL2, + S5M8767_REG_BUCK9CTRL1, + S5M8767_REG_BUCK9CTRL2, + S5M8767_REG_LDO1CTRL, + S5M8767_REG_LDO2_1CTRL, + S5M8767_REG_LDO2_2CTRL, + S5M8767_REG_LDO2_3CTRL, + S5M8767_REG_LDO2_4CTRL, + S5M8767_REG_LDO3CTRL, + S5M8767_REG_LDO4CTRL, + S5M8767_REG_LDO5CTRL, + S5M8767_REG_LDO6CTRL, + S5M8767_REG_LDO7CTRL, + S5M8767_REG_LDO8CTRL, + S5M8767_REG_LDO9CTRL, + S5M8767_REG_LDO10CTRL, + S5M8767_REG_LDO11CTRL, + S5M8767_REG_LDO12CTRL, + S5M8767_REG_LDO13CTRL, + S5M8767_REG_LDO14CTRL, + S5M8767_REG_LDO15CTRL, + S5M8767_REG_LDO16CTRL, + S5M8767_REG_LDO17CTRL, + S5M8767_REG_LDO18CTRL, + S5M8767_REG_LDO19CTRL, + S5M8767_REG_LDO20CTRL, + S5M8767_REG_LDO21CTRL, + S5M8767_REG_LDO22CTRL, + S5M8767_REG_LDO23CTRL, + S5M8767_REG_LDO24CTRL, + S5M8767_REG_LDO25CTRL, + S5M8767_REG_LDO26CTRL, + S5M8767_REG_LDO27CTRL, + S5M8767_REG_LDO28CTRL, +}; + +/* S5M8763 registers */ +enum s5m8763_reg { + S5M8763_REG_IRQ1, + S5M8763_REG_IRQ2, + S5M8763_REG_IRQ3, + S5M8763_REG_IRQ4, + S5M8763_REG_IRQM1, + S5M8763_REG_IRQM2, + S5M8763_REG_IRQM3, + S5M8763_REG_IRQM4, + S5M8763_REG_STATUS1, + S5M8763_REG_STATUS2, + S5M8763_REG_STATUSM1, + S5M8763_REG_STATUSM2, + S5M8763_REG_CHGR1, + S5M8763_REG_CHGR2, + S5M8763_REG_LDO_ACTIVE_DISCHARGE1, + S5M8763_REG_LDO_ACTIVE_DISCHARGE2, + S5M8763_REG_BUCK_ACTIVE_DISCHARGE3, + S5M8763_REG_ONOFF1, + S5M8763_REG_ONOFF2, + S5M8763_REG_ONOFF3, + S5M8763_REG_ONOFF4, + S5M8763_REG_BUCK1_VOLTAGE1, + S5M8763_REG_BUCK1_VOLTAGE2, + S5M8763_REG_BUCK1_VOLTAGE3, + S5M8763_REG_BUCK1_VOLTAGE4, + S5M8763_REG_BUCK2_VOLTAGE1, + S5M8763_REG_BUCK2_VOLTAGE2, + S5M8763_REG_BUCK3, + S5M8763_REG_BUCK4, + S5M8763_REG_LDO1_LDO2, + S5M8763_REG_LDO3, + S5M8763_REG_LDO4, + S5M8763_REG_LDO5, + S5M8763_REG_LDO6, + S5M8763_REG_LDO7, + S5M8763_REG_LDO7_LDO8, + S5M8763_REG_LDO9_LDO10, + S5M8763_REG_LDO11, + S5M8763_REG_LDO12, + S5M8763_REG_LDO13, + S5M8763_REG_LDO14, + S5M8763_REG_LDO15, + S5M8763_REG_LDO16, + S5M8763_REG_BKCHR, + S5M8763_REG_LBCNFG1, + S5M8763_REG_LBCNFG2, +}; + +enum s5m8767_irq { + S5M8767_IRQ_PWRR, + S5M8767_IRQ_PWRF, + S5M8767_IRQ_PWR1S, + S5M8767_IRQ_JIGR, + S5M8767_IRQ_JIGF, + S5M8767_IRQ_LOWBAT2, + S5M8767_IRQ_LOWBAT1, + + S5M8767_IRQ_MRB, + S5M8767_IRQ_DVSOK2, + S5M8767_IRQ_DVSOK3, + S5M8767_IRQ_DVSOK4, + + S5M8767_IRQ_RTC60S, + S5M8767_IRQ_RTCA1, + S5M8767_IRQ_RTCA2, + S5M8767_IRQ_SMPL, + S5M8767_IRQ_RTC1S, + S5M8767_IRQ_WTSR, + + S5M8767_IRQ_NR, +}; + +#define S5M8767_IRQ_PWRR_MASK (1 << 0) +#define S5M8767_IRQ_PWRF_MASK (1 << 1) +#define S5M8767_IRQ_PWR1S_MASK (1 << 3) +#define S5M8767_IRQ_JIGR_MASK (1 << 4) +#define S5M8767_IRQ_JIGF_MASK (1 << 5) +#define S5M8767_IRQ_LOWBAT2_MASK (1 << 6) +#define S5M8767_IRQ_LOWBAT1_MASK (1 << 7) + +#define S5M8767_IRQ_MRB_MASK (1 << 2) +#define S5M8767_IRQ_DVSOK2_MASK (1 << 3) +#define S5M8767_IRQ_DVSOK3_MASK (1 << 4) +#define S5M8767_IRQ_DVSOK4_MASK (1 << 5) + +#define S5M8767_IRQ_RTC60S_MASK (1 << 0) +#define S5M8767_IRQ_RTCA1_MASK (1 << 1) +#define S5M8767_IRQ_RTCA2_MASK (1 << 2) +#define S5M8767_IRQ_SMPL_MASK (1 << 3) +#define S5M8767_IRQ_RTC1S_MASK (1 << 4) +#define S5M8767_IRQ_WTSR_MASK (1 << 5) + +enum s5m8763_irq { + S5M8763_IRQ_DCINF, + S5M8763_IRQ_DCINR, + S5M8763_IRQ_JIGF, + S5M8763_IRQ_JIGR, + S5M8763_IRQ_PWRONF, + S5M8763_IRQ_PWRONR, + + S5M8763_IRQ_WTSREVNT, + S5M8763_IRQ_SMPLEVNT, + S5M8763_IRQ_ALARM1, + S5M8763_IRQ_ALARM0, + + S5M8763_IRQ_ONKEY1S, + S5M8763_IRQ_TOPOFFR, + S5M8763_IRQ_DCINOVPR, + S5M8763_IRQ_CHGRSTF, + S5M8763_IRQ_DONER, + S5M8763_IRQ_CHGFAULT, + + S5M8763_IRQ_LOBAT1, + S5M8763_IRQ_LOBAT2, + + S5M8763_IRQ_NR, +}; + +#define S5M8763_IRQ_DCINF_MASK (1 << 2) +#define S5M8763_IRQ_DCINR_MASK (1 << 3) +#define S5M8763_IRQ_JIGF_MASK (1 << 4) +#define S5M8763_IRQ_JIGR_MASK (1 << 5) +#define S5M8763_IRQ_PWRONF_MASK (1 << 6) +#define S5M8763_IRQ_PWRONR_MASK (1 << 7) + +#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0) +#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1) +#define S5M8763_IRQ_ALARM1_MASK (1 << 2) +#define S5M8763_IRQ_ALARM0_MASK (1 << 3) + +#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0) +#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2) +#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3) +#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4) +#define S5M8763_IRQ_DONER_MASK (1 << 5) +#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7) + +#define S5M8763_IRQ_LOBAT1_MASK (1 << 0) +#define S5M8763_IRQ_LOBAT2_MASK (1 << 1) + +#define S5M8763_ENRAMP (1 << 4) + +/** + * struct s5m87xx_dev - s5m87xx master device for sub-drivers + * @dev: master device of the chip (can be used to access platform data) + * @i2c: i2c client private data for regulator + * @rtc: i2c client private data for rtc + * @iolock: mutex for serializing io access + * @irqlock: mutex for buslock + * @irq_base: base IRQ number for s5m87xx, required for IRQs + * @irq: generic IRQ number for s5m87xx + * @ono: power onoff IRQ number for s5m87xx + * @irq_masks_cur: currently active value + * @irq_masks_cache: cached hardware value + * @type: indicate which s5m87xx "variant" is used + */ +struct s5m87xx_dev { + struct device *dev; + struct regmap *regmap; + struct i2c_client *i2c; + struct i2c_client *rtc; + struct mutex iolock; + struct mutex irqlock; + + int device_type; + int irq_base; + int irq; + int ono; + u8 irq_masks_cur[NUM_IRQ_REGS]; + u8 irq_masks_cache[NUM_IRQ_REGS]; + int type; + bool wakeup; +}; + +int s5m_irq_init(struct s5m87xx_dev *s5m87xx); +void s5m_irq_exit(struct s5m87xx_dev *s5m87xx); +int s5m_irq_resume(struct s5m87xx_dev *s5m87xx); + +extern int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest); +extern int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf); +extern int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value); +extern int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf); +extern int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask); + +struct s5m_platform_data { + struct s5m_regulator_data *regulators; + int device_type; + int num_regulators; + + int irq_base; + int (*cfg_pmic_irq)(void); + + int ono; + bool wakeup; + bool buck_voltage_lock; + + int buck_gpios[3]; + int buck2_voltage[8]; + bool buck2_gpiodvs; + int buck3_voltage[8]; + bool buck3_gpiodvs; + int buck4_voltage[8]; + bool buck4_gpiodvs; + + int buck_set1; + int buck_set2; + int buck_set3; + int buck2_enable; + int buck3_enable; + int buck4_enable; + int buck_default_idx; + int buck2_default_idx; + int buck3_default_idx; + int buck4_default_idx; + + int buck_ramp_delay; + bool buck2_ramp_enable; + bool buck3_ramp_enable; + bool buck4_ramp_enable; +}; + +#endif /* __LINUX_MFD_S5M_CORE_H */ diff --git a/include/linux/mfd/s5m87xx/s5m-pmic.h b/include/linux/mfd/s5m87xx/s5m-pmic.h new file mode 100644 index 00000000..a72a5d27 --- /dev/null +++ b/include/linux/mfd/s5m87xx/s5m-pmic.h @@ -0,0 +1,100 @@ +/* s5m87xx.h + * + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LINUX_MFD_S5M_PMIC_H +#define __LINUX_MFD_S5M_PMIC_H + +#include <linux/regulator/machine.h> + +/* S5M8767 regulator ids */ +enum s5m8767_regulators { + S5M8767_LDO1, + S5M8767_LDO2, + S5M8767_LDO3, + S5M8767_LDO4, + S5M8767_LDO5, + S5M8767_LDO6, + S5M8767_LDO7, + S5M8767_LDO8, + S5M8767_LDO9, + S5M8767_LDO10, + S5M8767_LDO11, + S5M8767_LDO12, + S5M8767_LDO13, + S5M8767_LDO14, + S5M8767_LDO15, + S5M8767_LDO16, + S5M8767_LDO17, + S5M8767_LDO18, + S5M8767_LDO19, + S5M8767_LDO20, + S5M8767_LDO21, + S5M8767_LDO22, + S5M8767_LDO23, + S5M8767_LDO24, + S5M8767_LDO25, + S5M8767_LDO26, + S5M8767_LDO27, + S5M8767_LDO28, + S5M8767_BUCK1, + S5M8767_BUCK2, + S5M8767_BUCK3, + S5M8767_BUCK4, + S5M8767_BUCK5, + S5M8767_BUCK6, + S5M8767_BUCK7, + S5M8767_BUCK8, + S5M8767_BUCK9, + S5M8767_AP_EN32KHZ, + S5M8767_CP_EN32KHZ, + + S5M8767_REG_MAX, +}; + +/* S5M8763 regulator ids */ +enum s5m8763_regulators { + S5M8763_LDO1, + S5M8763_LDO2, + S5M8763_LDO3, + S5M8763_LDO4, + S5M8763_LDO5, + S5M8763_LDO6, + S5M8763_LDO7, + S5M8763_LDO8, + S5M8763_LDO9, + S5M8763_LDO10, + S5M8763_LDO11, + S5M8763_LDO12, + S5M8763_LDO13, + S5M8763_LDO14, + S5M8763_LDO15, + S5M8763_LDO16, + S5M8763_BUCK1, + S5M8763_BUCK2, + S5M8763_BUCK3, + S5M8763_BUCK4, + S5M8763_AP_EN32KHZ, + S5M8763_CP_EN32KHZ, + S5M8763_ENCHGVI, + S5M8763_ESAFEUSB1, + S5M8763_ESAFEUSB2, +}; + +/** + * s5m87xx_regulator_data - regulator data + * @id: regulator id + * @initdata: regulator init data (contraints, supplies, ...) + */ +struct s5m_regulator_data { + int id; + struct regulator_init_data *initdata; +}; + +#endif /* __LINUX_MFD_S5M_PMIC_H */ diff --git a/include/linux/mfd/s5m87xx/s5m-rtc.h b/include/linux/mfd/s5m87xx/s5m-rtc.h new file mode 100644 index 00000000..6ce8da26 --- /dev/null +++ b/include/linux/mfd/s5m87xx/s5m-rtc.h @@ -0,0 +1,84 @@ +/* + * s5m-rtc.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_S5M_RTC_H +#define __LINUX_MFD_S5M_RTC_H + +enum s5m87xx_rtc_reg { + S5M87XX_RTC_SEC, + S5M87XX_RTC_MIN, + S5M87XX_RTC_HOUR, + S5M87XX_RTC_WEEKDAY, + S5M87XX_RTC_DATE, + S5M87XX_RTC_MONTH, + S5M87XX_RTC_YEAR1, + S5M87XX_RTC_YEAR2, + S5M87XX_ALARM0_SEC, + S5M87XX_ALARM0_MIN, + S5M87XX_ALARM0_HOUR, + S5M87XX_ALARM0_WEEKDAY, + S5M87XX_ALARM0_DATE, + S5M87XX_ALARM0_MONTH, + S5M87XX_ALARM0_YEAR1, + S5M87XX_ALARM0_YEAR2, + S5M87XX_ALARM1_SEC, + S5M87XX_ALARM1_MIN, + S5M87XX_ALARM1_HOUR, + S5M87XX_ALARM1_WEEKDAY, + S5M87XX_ALARM1_DATE, + S5M87XX_ALARM1_MONTH, + S5M87XX_ALARM1_YEAR1, + S5M87XX_ALARM1_YEAR2, + S5M87XX_ALARM0_CONF, + S5M87XX_ALARM1_CONF, + S5M87XX_RTC_STATUS, + S5M87XX_WTSR_SMPL_CNTL, + S5M87XX_RTC_UDR_CON, +}; + +#define RTC_I2C_ADDR (0x0C >> 1) + +#define HOUR_12 (1 << 7) +#define HOUR_AMPM (1 << 6) +#define HOUR_PM (1 << 5) +#define ALARM0_STATUS (1 << 1) +#define ALARM1_STATUS (1 << 2) +#define UPDATE_AD (1 << 0) + +/* RTC Control Register */ +#define BCD_EN_SHIFT 0 +#define BCD_EN_MASK (1 << BCD_EN_SHIFT) +#define MODEL24_SHIFT 1 +#define MODEL24_MASK (1 << MODEL24_SHIFT) +/* RTC Update Register1 */ +#define RTC_UDR_SHIFT 0 +#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT) +/* RTC Hour register */ +#define HOUR_PM_SHIFT 6 +#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT) +/* RTC Alarm Enable */ +#define ALARM_ENABLE_SHIFT 7 +#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT) + +enum { + RTC_SEC = 0, + RTC_MIN, + RTC_HOUR, + RTC_WEEKDAY, + RTC_DATE, + RTC_MONTH, + RTC_YEAR1, + RTC_YEAR2, +}; + +#endif /* __LINUX_MFD_S5M_RTC_H */ diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h new file mode 100644 index 00000000..8516fd1e --- /dev/null +++ b/include/linux/mfd/stmpe.h @@ -0,0 +1,225 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + */ + +#ifndef __LINUX_MFD_STMPE_H +#define __LINUX_MFD_STMPE_H + +#include <linux/mutex.h> + +struct device; + +enum stmpe_block { + STMPE_BLOCK_GPIO = 1 << 0, + STMPE_BLOCK_KEYPAD = 1 << 1, + STMPE_BLOCK_TOUCHSCREEN = 1 << 2, + STMPE_BLOCK_ADC = 1 << 3, + STMPE_BLOCK_PWM = 1 << 4, + STMPE_BLOCK_ROTATOR = 1 << 5, +}; + +enum stmpe_partnum { + STMPE610, + STMPE801, + STMPE811, + STMPE1601, + STMPE2401, + STMPE2403, + STMPE_NBR_PARTS +}; + +/* + * For registers whose locations differ on variants, the correct address is + * obtained by indexing stmpe->regs with one of the following. + */ +enum { + STMPE_IDX_CHIP_ID, + STMPE_IDX_ICR_LSB, + STMPE_IDX_IER_LSB, + STMPE_IDX_ISR_MSB, + STMPE_IDX_GPMR_LSB, + STMPE_IDX_GPSR_LSB, + STMPE_IDX_GPCR_LSB, + STMPE_IDX_GPDR_LSB, + STMPE_IDX_GPEDR_MSB, + STMPE_IDX_GPRER_LSB, + STMPE_IDX_GPFER_LSB, + STMPE_IDX_GPAFR_U_MSB, + STMPE_IDX_IEGPIOR_LSB, + STMPE_IDX_ISGPIOR_MSB, + STMPE_IDX_MAX, +}; + + +struct stmpe_variant_info; +struct stmpe_client_info; + +/** + * struct stmpe - STMPE MFD structure + * @lock: lock protecting I/O operations + * @irq_lock: IRQ bus lock + * @dev: device, mostly for dev_dbg() + * @client: client - i2c or spi + * @ci: client specific information + * @partnum: part number + * @variant: the detected STMPE model number + * @regs: list of addresses of registers which are at different addresses on + * different variants. Indexed by one of STMPE_IDX_*. + * @irq: irq number for stmpe + * @irq_base: starting IRQ number for internal IRQs + * @num_gpios: number of gpios, differs for variants + * @ier: cache of IER registers for bus_lock + * @oldier: cache of IER registers for bus_lock + * @pdata: platform data + */ +struct stmpe { + struct mutex lock; + struct mutex irq_lock; + struct device *dev; + void *client; + struct stmpe_client_info *ci; + enum stmpe_partnum partnum; + struct stmpe_variant_info *variant; + const u8 *regs; + + int irq; + int irq_base; + int num_gpios; + u8 ier[2]; + u8 oldier[2]; + struct stmpe_platform_data *pdata; +}; + +extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data); +extern int stmpe_reg_read(struct stmpe *stmpe, u8 reg); +extern int stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length, + u8 *values); +extern int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length, + const u8 *values); +extern int stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val); +extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, + enum stmpe_block block); +extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); +extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); + +struct matrix_keymap_data; + +/** + * struct stmpe_keypad_platform_data - STMPE keypad platform data + * @keymap_data: key map table and size + * @debounce_ms: debounce interval, in ms. Maximum is + * %STMPE_KEYPAD_MAX_DEBOUNCE. + * @scan_count: number of key scanning cycles to confirm key data. + * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT. + * @no_autorepeat: disable key autorepeat + */ +struct stmpe_keypad_platform_data { + struct matrix_keymap_data *keymap_data; + unsigned int debounce_ms; + unsigned int scan_count; + bool no_autorepeat; +}; + +#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) + +/** + * struct stmpe_gpio_platform_data - STMPE GPIO platform data + * @gpio_base: first gpio number assigned. A maximum of + * %STMPE_NR_GPIOS GPIOs will be allocated. + * @norequest_mask: bitmask specifying which GPIOs should _not_ be + * requestable due to different usage (e.g. touch, keypad) + * STMPE_GPIO_NOREQ_* macros can be used here. + * @setup: board specific setup callback. + * @remove: board specific remove callback + */ +struct stmpe_gpio_platform_data { + int gpio_base; + unsigned norequest_mask; + void (*setup)(struct stmpe *stmpe, unsigned gpio_base); + void (*remove)(struct stmpe *stmpe, unsigned gpio_base); +}; + +/** + * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform + * data + * @sample_time: ADC converstion time in number of clock. + * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks, + * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks), + * recommended is 4. + * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC) + * @ref_sel: ADC reference source + * (0 -> internal reference, 1 -> external reference) + * @adc_freq: ADC Clock speed + * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz) + * @ave_ctrl: Sample average control + * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples) + * @touch_det_delay: Touch detect interrupt delay + * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us, + * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms) + * recommended is 3 + * @settling: Panel driver settling time + * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms, + * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms) + * recommended is 2 + * @fraction_z: Length of the fractional part in z + * (fraction_z ([0..7]) = Count of the fractional part) + * recommended is 7 + * @i_drive: current limit value of the touchscreen drivers + * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max) + * + * */ +struct stmpe_ts_platform_data { + u8 sample_time; + u8 mod_12b; + u8 ref_sel; + u8 adc_freq; + u8 ave_ctrl; + u8 touch_det_delay; + u8 settling; + u8 fraction_z; + u8 i_drive; +}; + +/** + * struct stmpe_platform_data - STMPE platform data + * @id: device id to distinguish between multiple STMPEs on the same board + * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) + * @irq_trigger: IRQ trigger to use for the interrupt to the host + * @irq_invert_polarity: IRQ line is connected with reversed polarity + * @autosleep: bool to enable/disable stmpe autosleep + * @autosleep_timeout: inactivity timeout in milliseconds for autosleep + * @irq_base: base IRQ number. %STMPE_NR_IRQS irqs will be used, or + * %STMPE_NR_INTERNAL_IRQS if the GPIO driver is not used. + * @irq_over_gpio: true if gpio is used to get irq + * @irq_gpio: gpio number over which irq will be requested (significant only if + * irq_over_gpio is true) + * @gpio: GPIO-specific platform data + * @keypad: keypad-specific platform data + * @ts: touchscreen-specific platform data + */ +struct stmpe_platform_data { + int id; + unsigned int blocks; + int irq_base; + unsigned int irq_trigger; + bool irq_invert_polarity; + bool autosleep; + bool irq_over_gpio; + int irq_gpio; + int autosleep_timeout; + + struct stmpe_gpio_platform_data *gpio; + struct stmpe_keypad_platform_data *keypad; + struct stmpe_ts_platform_data *ts; +}; + +#define STMPE_NR_INTERNAL_IRQS 9 +#define STMPE_INT_GPIO(x) (STMPE_NR_INTERNAL_IRQS + (x)) + +#define STMPE_NR_GPIOS 24 +#define STMPE_NR_IRQS STMPE_INT_GPIO(STMPE_NR_GPIOS) + +#endif diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 00000000..b4629818 --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,34 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include <linux/mfd/core.h> +#include <linux/mfd/tmio.h> + +struct t7l66xb_platform_data { + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h new file mode 100644 index 00000000..3acb3a8e --- /dev/null +++ b/include/linux/mfd/tc3589x.h @@ -0,0 +1,195 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + */ + +#ifndef __LINUX_MFD_TC3589x_H +#define __LINUX_MFD_TC3589x_H + +struct device; + +enum tx3589x_block { + TC3589x_BLOCK_GPIO = 1 << 0, + TC3589x_BLOCK_KEYPAD = 1 << 1, +}; + +#define TC3589x_RSTCTRL_IRQRST (1 << 4) +#define TC3589x_RSTCTRL_TIMRST (1 << 3) +#define TC3589x_RSTCTRL_ROTRST (1 << 2) +#define TC3589x_RSTCTRL_KBDRST (1 << 1) +#define TC3589x_RSTCTRL_GPIRST (1 << 0) + +/* Keyboard Configuration Registers */ +#define TC3589x_KBDSETTLE_REG 0x01 +#define TC3589x_KBDBOUNCE 0x02 +#define TC3589x_KBDSIZE 0x03 +#define TC3589x_KBCFG_LSB 0x04 +#define TC3589x_KBCFG_MSB 0x05 +#define TC3589x_KBDIC 0x08 +#define TC3589x_KBDMSK 0x09 +#define TC3589x_EVTCODE_FIFO 0x10 +#define TC3589x_KBDMFS 0x8F + +#define TC3589x_IRQST 0x91 + +#define TC3589x_MANFCODE_MAGIC 0x03 +#define TC3589x_MANFCODE 0x80 +#define TC3589x_VERSION 0x81 +#define TC3589x_IOCFG 0xA7 + +#define TC3589x_CLKMODE 0x88 +#define TC3589x_CLKCFG 0x89 +#define TC3589x_CLKEN 0x8A + +#define TC3589x_RSTCTRL 0x82 +#define TC3589x_EXTRSTN 0x83 +#define TC3589x_RSTINTCLR 0x84 + +/* Pull up/down configuration registers */ +#define TC3589x_IOCFG 0xA7 +#define TC3589x_IOPULLCFG0_LSB 0xAA +#define TC3589x_IOPULLCFG0_MSB 0xAB +#define TC3589x_IOPULLCFG1_LSB 0xAC +#define TC3589x_IOPULLCFG1_MSB 0xAD +#define TC3589x_IOPULLCFG2_LSB 0xAE + +#define TC3589x_GPIOIS0 0xC9 +#define TC3589x_GPIOIS1 0xCA +#define TC3589x_GPIOIS2 0xCB +#define TC3589x_GPIOIBE0 0xCC +#define TC3589x_GPIOIBE1 0xCD +#define TC3589x_GPIOIBE2 0xCE +#define TC3589x_GPIOIEV0 0xCF +#define TC3589x_GPIOIEV1 0xD0 +#define TC3589x_GPIOIEV2 0xD1 +#define TC3589x_GPIOIE0 0xD2 +#define TC3589x_GPIOIE1 0xD3 +#define TC3589x_GPIOIE2 0xD4 +#define TC3589x_GPIORIS0 0xD6 +#define TC3589x_GPIORIS1 0xD7 +#define TC3589x_GPIORIS2 0xD8 +#define TC3589x_GPIOMIS0 0xD9 +#define TC3589x_GPIOMIS1 0xDA +#define TC3589x_GPIOMIS2 0xDB +#define TC3589x_GPIOIC0 0xDC +#define TC3589x_GPIOIC1 0xDD +#define TC3589x_GPIOIC2 0xDE + +#define TC3589x_GPIODATA0 0xC0 +#define TC3589x_GPIOMASK0 0xc1 +#define TC3589x_GPIODATA1 0xC2 +#define TC3589x_GPIOMASK1 0xc3 +#define TC3589x_GPIODATA2 0xC4 +#define TC3589x_GPIOMASK2 0xC5 + +#define TC3589x_GPIODIR0 0xC6 +#define TC3589x_GPIODIR1 0xC7 +#define TC3589x_GPIODIR2 0xC8 + +#define TC3589x_GPIOSYNC0 0xE6 +#define TC3589x_GPIOSYNC1 0xE7 +#define TC3589x_GPIOSYNC2 0xE8 + +#define TC3589x_GPIOWAKE0 0xE9 +#define TC3589x_GPIOWAKE1 0xEA +#define TC3589x_GPIOWAKE2 0xEB + +#define TC3589x_GPIOODM0 0xE0 +#define TC3589x_GPIOODE0 0xE1 +#define TC3589x_GPIOODM1 0xE2 +#define TC3589x_GPIOODE1 0xE3 +#define TC3589x_GPIOODM2 0xE4 +#define TC3589x_GPIOODE2 0xE5 + +#define TC3589x_INT_GPIIRQ 0 +#define TC3589x_INT_TI0IRQ 1 +#define TC3589x_INT_TI1IRQ 2 +#define TC3589x_INT_TI2IRQ 3 +#define TC3589x_INT_ROTIRQ 5 +#define TC3589x_INT_KBDIRQ 6 +#define TC3589x_INT_PORIRQ 7 + +#define TC3589x_NR_INTERNAL_IRQS 8 +#define TC3589x_INT_GPIO(x) (TC3589x_NR_INTERNAL_IRQS + (x)) + +struct tc3589x { + struct mutex lock; + struct device *dev; + struct i2c_client *i2c; + + int irq_base; + int num_gpio; + struct tc3589x_platform_data *pdata; +}; + +extern int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data); +extern int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg); +extern int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length, + u8 *values); +extern int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length, + const u8 *values); +extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val); + +/* + * Keypad related platform specific constants + * These values may be modified for fine tuning + */ +#define TC_KPD_ROWS 0x8 +#define TC_KPD_COLUMNS 0x8 +#define TC_KPD_DEBOUNCE_PERIOD 0xA3 +#define TC_KPD_SETTLE_TIME 0xA3 + +/** + * struct tc35893_platform_data - data structure for platform specific data + * @keymap_data: matrix scan code table for keycodes + * @krow: mask for available rows, value is 0xFF + * @kcol: mask for available columns, value is 0xFF + * @debounce_period: platform specific debounce time + * @settle_time: platform specific settle down time + * @irqtype: type of interrupt, falling or rising edge + * @enable_wakeup: specifies if keypad event can wake up system from sleep + * @no_autorepeat: flag for auto repetition + */ +struct tc3589x_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + u8 krow; + u8 kcol; + u8 debounce_period; + u8 settle_time; + unsigned long irqtype; + bool enable_wakeup; + bool no_autorepeat; +}; + +/** + * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data + * @gpio_base: first gpio number assigned to TC3589x. A maximum of + * %TC3589x_NR_GPIOS GPIOs will be allocated. + * @setup: callback for board-specific initialization + * @remove: callback for board-specific teardown + */ +struct tc3589x_gpio_platform_data { + int gpio_base; + void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); + void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); +}; + +/** + * struct tc3589x_platform_data - TC3589x platform data + * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) + * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used. + * @gpio: GPIO-specific platform data + * @keypad: keypad-specific platform data + */ +struct tc3589x_platform_data { + unsigned int block; + int irq_base; + struct tc3589x_gpio_platform_data *gpio; + const struct tc3589x_keypad_platform_data *keypad; +}; + +#define TC3589x_NR_GPIOS 24 +#define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS) + +#endif diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 00000000..b4888209 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,20 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 00000000..626e4482 --- /dev/null +++ b/include/linux/mfd/tc6393xb.h @@ -0,0 +1,59 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton <spyro@f2s.com> + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H + +#include <linux/fb.h> + +/* Also one should provide the CK3P6MI clock */ +struct tc6393xb_platform_data { + u16 scr_pll2cr; /* PLL2 Control */ + u16 scr_gper; /* GP Enable */ + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* base for subdevice irqs */ + int gpio_base; + int (*setup)(struct platform_device *dev); + void (*teardown)(struct platform_device *dev); + + struct tmio_nand_data *nand_data; + struct tmio_fb_data *fb_data; + + unsigned resume_restore : 1; /* make special actions + to preserve the state + on suspend/resume */ +}; + +extern int tc6393xb_lcd_mode(struct platform_device *fb, + const struct fb_videomode *mode); +extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on); + +/* + * Relative to irq_base + */ +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 +#define IRQ_TC6393_OHCI 2 +#define IRQ_TC6393_FB 4 + +#define TC6393XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/ti_ssp.h b/include/linux/mfd/ti_ssp.h new file mode 100644 index 00000000..dbb4b43b --- /dev/null +++ b/include/linux/mfd/ti_ssp.h @@ -0,0 +1,93 @@ +/* + * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs + * + * Copyright (C) 2010 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __TI_SSP_H__ +#define __TI_SSP_H__ + +struct ti_ssp_dev_data { + const char *dev_name; + void *pdata; + size_t pdata_size; +}; + +struct ti_ssp_data { + unsigned long out_clock; + struct ti_ssp_dev_data dev_data[2]; +}; + +struct ti_ssp_spi_data { + unsigned long iosel; + int num_cs; + void (*select)(int cs); +}; + +/* + * Sequencer port IO pin configuration bits. These do not correlate 1-1 with + * the hardware. The iosel field in the port data combines iosel1 and iosel2, + * and is therefore not a direct map to register space. It is best to use the + * macros below to construct iosel values. + * + * least significant 16 bits --> iosel1 + * most significant 16 bits --> iosel2 + */ + +#define SSP_IN 0x0000 +#define SSP_DATA 0x0001 +#define SSP_CLOCK 0x0002 +#define SSP_CHIPSEL 0x0003 +#define SSP_OUT 0x0004 +#define SSP_PIN_SEL(pin, v) ((v) << ((pin) * 3)) +#define SSP_PIN_MASK(pin) SSP_PIN_SEL(pin, 0x7) +#define SSP_INPUT_SEL(pin) ((pin) << 16) + +/* Sequencer port config bits */ +#define SSP_EARLY_DIN BIT(8) +#define SSP_DELAY_DOUT BIT(9) + +/* Sequence map definitions */ +#define SSP_CLK_HIGH BIT(0) +#define SSP_CLK_LOW 0 +#define SSP_DATA_HIGH BIT(1) +#define SSP_DATA_LOW 0 +#define SSP_CS_HIGH BIT(2) +#define SSP_CS_LOW 0 +#define SSP_OUT_MODE BIT(3) +#define SSP_IN_MODE 0 +#define SSP_DATA_REG BIT(4) +#define SSP_ADDR_REG 0 + +#define SSP_OPCODE_DIRECT ((0x0) << 5) +#define SSP_OPCODE_TOGGLE ((0x1) << 5) +#define SSP_OPCODE_SHIFT ((0x2) << 5) +#define SSP_OPCODE_BRANCH0 ((0x4) << 5) +#define SSP_OPCODE_BRANCH1 ((0x5) << 5) +#define SSP_OPCODE_BRANCH ((0x6) << 5) +#define SSP_OPCODE_STOP ((0x7) << 5) +#define SSP_BRANCH(addr) ((addr) << 8) +#define SSP_COUNT(cycles) ((cycles) << 8) + +int ti_ssp_raw_read(struct device *dev); +int ti_ssp_raw_write(struct device *dev, u32 val); +int ti_ssp_load(struct device *dev, int offs, u32* prog, int len); +int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output); +int ti_ssp_set_mode(struct device *dev, int mode); +int ti_ssp_set_iosel(struct device *dev, u32 iosel); + +#endif /* __TI_SSP_H__ */ diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 00000000..f5171dbf --- /dev/null +++ b/include/linux/mfd/tmio.h @@ -0,0 +1,153 @@ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +#include <linux/device.h> +#include <linux/fb.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + +#define CNF_CMD 0x04 +#define CNF_CTL_BASE 0x10 +#define CNF_INT_PIN 0x3d +#define CNF_STOP_CLK_CTL 0x40 +#define CNF_GCLK_CTL 0x41 +#define CNF_SD_CLK_MODE 0x42 +#define CNF_PIN_STATUS 0x44 +#define CNF_PWR_CTL_1 0x48 +#define CNF_PWR_CTL_2 0x49 +#define CNF_PWR_CTL_3 0x4a +#define CNF_CARD_DETECT_MODE 0x4c +#define CNF_SD_SLOT 0x50 +#define CNF_EXT_GCLK_CTL_1 0xf0 +#define CNF_EXT_GCLK_CTL_2 0xf1 +#define CNF_EXT_GCLK_CTL_3 0xf9 +#define CNF_SD_LED_EN_1 0xfa +#define CNF_SD_LED_EN_2 0xfe + +#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/ + +#define sd_config_write8(base, shift, reg, val) \ + tmio_iowrite8((val), (base) + ((reg) << (shift))) +#define sd_config_write16(base, shift, reg, val) \ + tmio_iowrite16((val), (base) + ((reg) << (shift))) +#define sd_config_write32(base, shift, reg, val) \ + do { \ + tmio_iowrite16((val), (base) + ((reg) << (shift))); \ + tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \ + } while (0) + +/* tmio MMC platform flags */ +#define TMIO_MMC_WRPROTECT_DISABLE (1 << 0) +/* + * Some controllers can support a 2-byte block size when the bus width + * is configured in 4-bit mode. + */ +#define TMIO_MMC_BLKSZ_2BYTES (1 << 1) +/* + * Some controllers can support SDIO IRQ signalling. + */ +#define TMIO_MMC_SDIO_IRQ (1 << 2) +/* + * Some platforms can detect card insertion events with controller powered + * down, using a GPIO IRQ, in which case they have to fill in cd_irq, cd_gpio, + * and cd_flags fields of struct tmio_mmc_data. + */ +#define TMIO_MMC_HAS_COLD_CD (1 << 3) +/* + * Some controllers require waiting for the SD bus to become + * idle before writing to some registers. + */ +#define TMIO_MMC_HAS_IDLE_WAIT (1 << 4) +/* + * A GPIO is used for card hotplug detection. We need an extra flag for this, + * because 0 is a valid GPIO number too, and requiring users to specify + * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility. + */ +#define TMIO_MMC_USE_GPIO_CD (1 << 5) + +int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); +int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); +void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); +void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state); + +struct tmio_mmc_dma { + void *chan_priv_tx; + void *chan_priv_rx; + int alignment_shift; +}; + +struct tmio_mmc_host; + +/* + * data for the MMC controller + */ +struct tmio_mmc_data { + unsigned int hclk; + unsigned long capabilities; + unsigned long flags; + u32 ocr_mask; /* available voltages */ + struct tmio_mmc_dma *dma; + struct device *dev; + unsigned int cd_gpio; + void (*set_pwr)(struct platform_device *host, int state); + void (*set_clk_div)(struct platform_device *host, int state); + int (*get_cd)(struct platform_device *host); + int (*write16_hook)(struct tmio_mmc_host *host, int addr); +}; + +/* + * This function is deprecated and will be removed soon. Please, convert your + * platform to use drivers/mmc/core/cd-gpio.c + */ +#include <linux/mmc/host.h> +static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata) +{ + if (pdata) + mmc_detect_change(dev_get_drvdata(pdata->dev), + msecs_to_jiffies(100)); +} + +/* + * data for the NAND controller + */ +struct tmio_nand_data { + struct nand_bbt_descr *badblock_pattern; + struct mtd_partition *partition; + unsigned int num_partitions; +}; + +#define FBIO_TMIO_ACC_WRITE 0x7C639300 +#define FBIO_TMIO_ACC_SYNC 0x7C639301 + +struct tmio_fb_data { + int (*lcd_set_power)(struct platform_device *fb_dev, + bool on); + int (*lcd_mode)(struct platform_device *fb_dev, + const struct fb_videomode *mode); + int num_modes; + struct fb_videomode *modes; + + /* in mm: size of screen */ + int height; + int width; +}; + + +#endif diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h new file mode 100644 index 00000000..386743dd --- /dev/null +++ b/include/linux/mfd/tps6105x.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef MFD_TPS6105X_H +#define MFD_TPS6105X_H + +#include <linux/i2c.h> +#include <linux/regulator/machine.h> + +/* + * Register definitions to all subdrivers + */ +#define TPS6105X_REG_0 0x00 +#define TPS6105X_REG0_MODE_SHIFT 6 +#define TPS6105X_REG0_MODE_MASK (0x03<<6) +/* These defines for both reg0 and reg1 */ +#define TPS6105X_REG0_MODE_SHUTDOWN 0x00 +#define TPS6105X_REG0_MODE_TORCH 0x01 +#define TPS6105X_REG0_MODE_TORCH_FLASH 0x02 +#define TPS6105X_REG0_MODE_VOLTAGE 0x03 +#define TPS6105X_REG0_VOLTAGE_SHIFT 4 +#define TPS6105X_REG0_VOLTAGE_MASK (3<<4) +#define TPS6105X_REG0_VOLTAGE_450 0 +#define TPS6105X_REG0_VOLTAGE_500 1 +#define TPS6105X_REG0_VOLTAGE_525 2 +#define TPS6105X_REG0_VOLTAGE_500_2 3 +#define TPS6105X_REG0_DIMMING_SHIFT 3 +#define TPS6105X_REG0_TORCHC_SHIFT 0 +#define TPS6105X_REG0_TORCHC_MASK (7<<0) +#define TPS6105X_REG0_TORCHC_0 0x00 +#define TPS6105X_REG0_TORCHC_50 0x01 +#define TPS6105X_REG0_TORCHC_75 0x02 +#define TPS6105X_REG0_TORCHC_100 0x03 +#define TPS6105X_REG0_TORCHC_150 0x04 +#define TPS6105X_REG0_TORCHC_200 0x05 +#define TPS6105X_REG0_TORCHC_250_400 0x06 +#define TPS6105X_REG0_TORCHC_250_500 0x07 +#define TPS6105X_REG_1 0x01 +#define TPS6105X_REG1_MODE_SHIFT 6 +#define TPS6105X_REG1_MODE_MASK (0x03<<6) +#define TPS6105X_REG1_MODE_SHUTDOWN 0x00 +#define TPS6105X_REG1_MODE_TORCH 0x01 +#define TPS6105X_REG1_MODE_TORCH_FLASH 0x02 +#define TPS6105X_REG1_MODE_VOLTAGE 0x03 +#define TPS6105X_REG_2 0x02 +#define TPS6105X_REG_3 0x03 + +/** + * enum tps6105x_mode - desired mode for the TPS6105x + * @TPS6105X_MODE_SHUTDOWN: this instance is inactive, not used for anything + * @TPS61905X_MODE_TORCH: this instance is used as a LED, usually a while + * LED, for example as backlight or flashlight. If this is set, the + * TPS6105X will register to the LED framework + * @TPS6105X_MODE_TORCH_FLASH: this instance is used as a flashgun, usually + * in a camera + * @TPS6105X_MODE_VOLTAGE: this instance is used as a voltage regulator and + * will register to the regulator framework + */ +enum tps6105x_mode { + TPS6105X_MODE_SHUTDOWN, + TPS6105X_MODE_TORCH, + TPS6105X_MODE_TORCH_FLASH, + TPS6105X_MODE_VOLTAGE, +}; + +/** + * struct tps6105x_platform_data - TPS61905x platform data + * @mode: what mode this instance shall be operated in, + * this is not selectable at runtime + * @regulator_data: initialization data for the voltage + * regulator if used as a voltage source + */ +struct tps6105x_platform_data { + enum tps6105x_mode mode; + struct regulator_init_data *regulator_data; +}; + +/** + * struct tps6105x - state holder for the TPS6105x drivers + * @mutex: mutex to serialize I2C accesses + * @i2c_client: corresponding I2C client + * @regulator: regulator device if used in voltage mode + */ +struct tps6105x { + struct tps6105x_platform_data *pdata; + struct mutex lock; + struct i2c_client *client; + struct regulator_dev *regulator; +}; + +extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value); +extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf); +extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg, + u8 bitmask, u8 bitvalues); + +#endif diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h new file mode 100644 index 00000000..c923e486 --- /dev/null +++ b/include/linux/mfd/tps6507x.h @@ -0,0 +1,169 @@ +/* linux/mfd/tps6507x.h + * + * Functions to access TPS65070 power management chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * + * For licencing details see kernel-base/COPYING + */ + +#ifndef __LINUX_MFD_TPS6507X_H +#define __LINUX_MFD_TPS6507X_H + +/* + * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + + +/* Register definitions */ +#define TPS6507X_REG_PPATH1 0X01 +#define TPS6507X_CHG_USB BIT(7) +#define TPS6507X_CHG_AC BIT(6) +#define TPS6507X_CHG_USB_PW_ENABLE BIT(5) +#define TPS6507X_CHG_AC_PW_ENABLE BIT(4) +#define TPS6507X_CHG_AC_CURRENT BIT(2) +#define TPS6507X_CHG_USB_CURRENT BIT(0) + +#define TPS6507X_REG_INT 0X02 +#define TPS6507X_REG_MASK_AC_USB BIT(7) +#define TPS6507X_REG_MASK_TSC BIT(6) +#define TPS6507X_REG_MASK_PB_IN BIT(5) +#define TPS6507X_REG_TSC_INT BIT(3) +#define TPS6507X_REG_PB_IN_INT BIT(2) +#define TPS6507X_REG_AC_USB_APPLIED BIT(1) +#define TPS6507X_REG_AC_USB_REMOVED BIT(0) + +#define TPS6507X_REG_CHGCONFIG0 0X03 + +#define TPS6507X_REG_CHGCONFIG1 0X04 +#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) +#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) +#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) +#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) +#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) + +#define TPS6507X_REG_CHGCONFIG2 0X05 + +#define TPS6507X_REG_CHGCONFIG3 0X06 + +#define TPS6507X_REG_ADCONFIG 0X07 +#define TPS6507X_ADCONFIG_AD_ENABLE BIT(7) +#define TPS6507X_ADCONFIG_START_CONVERSION BIT(6) +#define TPS6507X_ADCONFIG_CONVERSION_DONE BIT(5) +#define TPS6507X_ADCONFIG_VREF_ENABLE BIT(4) +#define TPS6507X_ADCONFIG_INPUT_AD_IN1 0 +#define TPS6507X_ADCONFIG_INPUT_AD_IN2 1 +#define TPS6507X_ADCONFIG_INPUT_AD_IN3 2 +#define TPS6507X_ADCONFIG_INPUT_AD_IN4 3 +#define TPS6507X_ADCONFIG_INPUT_TS_PIN 4 +#define TPS6507X_ADCONFIG_INPUT_BAT_CURRENT 5 +#define TPS6507X_ADCONFIG_INPUT_AC_VOLTAGE 6 +#define TPS6507X_ADCONFIG_INPUT_SYS_VOLTAGE 7 +#define TPS6507X_ADCONFIG_INPUT_CHARGER_VOLTAGE 8 +#define TPS6507X_ADCONFIG_INPUT_BAT_VOLTAGE 9 +#define TPS6507X_ADCONFIG_INPUT_THRESHOLD_VOLTAGE 10 +#define TPS6507X_ADCONFIG_INPUT_ISET1_VOLTAGE 11 +#define TPS6507X_ADCONFIG_INPUT_ISET2_VOLTAGE 12 +#define TPS6507X_ADCONFIG_INPUT_REAL_TSC 14 +#define TPS6507X_ADCONFIG_INPUT_TSC 15 + +#define TPS6507X_REG_TSCMODE 0X08 +#define TPS6507X_TSCMODE_X_POSITION 0 +#define TPS6507X_TSCMODE_Y_POSITION 1 +#define TPS6507X_TSCMODE_PRESSURE 2 +#define TPS6507X_TSCMODE_X_PLATE 3 +#define TPS6507X_TSCMODE_Y_PLATE 4 +#define TPS6507X_TSCMODE_STANDBY 5 +#define TPS6507X_TSCMODE_ADC_INPUT 6 +#define TPS6507X_TSCMODE_DISABLE 7 + +#define TPS6507X_REG_ADRESULT_1 0X09 + +#define TPS6507X_REG_ADRESULT_2 0X0A +#define TPS6507X_REG_ADRESULT_2_MASK (BIT(1) | BIT(0)) + +#define TPS6507X_REG_PGOOD 0X0B + +#define TPS6507X_REG_PGOODMASK 0X0C + +#define TPS6507X_REG_CON_CTRL1 0X0D +#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) +#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) +#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) +#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) +#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) + +#define TPS6507X_REG_CON_CTRL2 0X0E + +#define TPS6507X_REG_CON_CTRL3 0X0F + +#define TPS6507X_REG_DEFDCDC1 0X10 +#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7) +#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC2_LOW 0X11 +#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC2_HIGH 0X12 +#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC3_LOW 0X13 +#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC3_HIGH 0X14 +#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F + +#define TPS6507X_REG_DEFSLEW 0X15 + +#define TPS6507X_REG_LDO_CTRL1 0X16 +#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F + +#define TPS6507X_REG_DEFLDO2 0X17 +#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F + +#define TPS6507X_REG_WLED_CTRL1 0X18 + +#define TPS6507X_REG_WLED_CTRL2 0X19 + +/* VDCDC MASK */ +#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F + +#define TPS6507X_MAX_REGISTER 0X19 + +/** + * struct tps6507x_board - packages regulator and touchscreen init data + * @tps6507x_regulator_data: regulator initialization values + * + * Board data may be used to initialize regulator and touchscreen. + */ + +struct tps6507x_board { + struct regulator_init_data *tps6507x_pmic_init_data; + struct touchscreen_init_data *tps6507x_ts_init_data; +}; + +/** + * struct tps6507x_dev - tps6507x sub-driver chip access routines + * @read_dev() - I2C register read function + * @write_dev() - I2C register write function + * + * Device data may be used to access the TPS6507x chip + */ + +struct tps6507x_dev { + struct device *dev; + struct i2c_client *i2c_client; + int (*read_dev)(struct tps6507x_dev *tps6507x, char reg, int size, + void *dest); + int (*write_dev)(struct tps6507x_dev *tps6507x, char reg, int size, + void *src); + + /* Client devices */ + struct tps6507x_pmic *pmic; + struct tps6507x_ts *ts; +}; + +#endif /* __LINUX_MFD_TPS6507X_H */ diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h new file mode 100644 index 00000000..38e31c55 --- /dev/null +++ b/include/linux/mfd/tps65090.h @@ -0,0 +1,46 @@ +/* + * Core driver interface for TI TPS65090 PMIC family + * + * Copyright (C) 2012 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __LINUX_MFD_TPS65090_H +#define __LINUX_MFD_TPS65090_H + +struct tps65090_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct tps65090_platform_data { + int irq_base; + int num_subdevs; + struct tps65090_subdev_info *subdevs; +}; + +/* + * NOTE: the functions below are not intended for use outside + * of the TPS65090 sub-device drivers + */ +extern int tps65090_write(struct device *dev, int reg, uint8_t val); +extern int tps65090_read(struct device *dev, int reg, uint8_t *val); +extern int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num); +extern int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num); + +#endif /*__LINUX_MFD_TPS65090_H */ diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h new file mode 100644 index 00000000..e030ef9a --- /dev/null +++ b/include/linux/mfd/tps65217.h @@ -0,0 +1,283 @@ +/* + * linux/mfd/tps65217.h + * + * Functions to access TPS65217 power management chip. + * + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_TPS65217_H +#define __LINUX_MFD_TPS65217_H + +#include <linux/i2c.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> + +/* I2C ID for TPS65217 part */ +#define TPS65217_I2C_ID 0x24 + +/* All register addresses */ +#define TPS65217_REG_CHIPID 0X00 +#define TPS65217_REG_PPATH 0X01 +#define TPS65217_REG_INT 0X02 +#define TPS65217_REG_CHGCONFIG0 0X03 +#define TPS65217_REG_CHGCONFIG1 0X04 +#define TPS65217_REG_CHGCONFIG2 0X05 +#define TPS65217_REG_CHGCONFIG3 0X06 +#define TPS65217_REG_WLEDCTRL1 0X07 +#define TPS65217_REG_WLEDCTRL2 0X08 +#define TPS65217_REG_MUXCTRL 0X09 +#define TPS65217_REG_STATUS 0X0A +#define TPS65217_REG_PASSWORD 0X0B +#define TPS65217_REG_PGOOD 0X0C +#define TPS65217_REG_DEFPG 0X0D +#define TPS65217_REG_DEFDCDC1 0X0E +#define TPS65217_REG_DEFDCDC2 0X0F +#define TPS65217_REG_DEFDCDC3 0X10 +#define TPS65217_REG_DEFSLEW 0X11 +#define TPS65217_REG_DEFLDO1 0X12 +#define TPS65217_REG_DEFLDO2 0X13 +#define TPS65217_REG_DEFLS1 0X14 +#define TPS65217_REG_DEFLS2 0X15 +#define TPS65217_REG_ENABLE 0X16 +#define TPS65217_REG_DEFUVLO 0X18 +#define TPS65217_REG_SEQ1 0X19 +#define TPS65217_REG_SEQ2 0X1A +#define TPS65217_REG_SEQ3 0X1B +#define TPS65217_REG_SEQ4 0X1C +#define TPS65217_REG_SEQ5 0X1D +#define TPS65217_REG_SEQ6 0X1E + +/* Register field definitions */ +#define TPS65217_CHIPID_CHIP_MASK 0xF0 +#define TPS65217_CHIPID_REV_MASK 0x0F + +#define TPS65217_PPATH_ACSINK_ENABLE BIT(7) +#define TPS65217_PPATH_USBSINK_ENABLE BIT(6) +#define TPS65217_PPATH_AC_PW_ENABLE BIT(5) +#define TPS65217_PPATH_USB_PW_ENABLE BIT(4) +#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C +#define TPS65217_PPATH_USB_CURRENT_MASK 0x03 + +#define TPS65217_INT_PBM BIT(6) +#define TPS65217_INT_ACM BIT(5) +#define TPS65217_INT_USBM BIT(4) +#define TPS65217_INT_PBI BIT(2) +#define TPS65217_INT_ACI BIT(1) +#define TPS65217_INT_USBI BIT(0) + +#define TPS65217_CHGCONFIG0_TREG BIT(7) +#define TPS65217_CHGCONFIG0_DPPM BIT(6) +#define TPS65217_CHGCONFIG0_TSUSP BIT(5) +#define TPS65217_CHGCONFIG0_TERMI BIT(4) +#define TPS65217_CHGCONFIG0_ACTIVE BIT(3) +#define TPS65217_CHGCONFIG0_CHGTOUT BIT(2) +#define TPS65217_CHGCONFIG0_PCHGTOUT BIT(1) +#define TPS65217_CHGCONFIG0_BATTEMP BIT(0) + +#define TPS65217_CHGCONFIG1_TMR_MASK 0xC0 +#define TPS65217_CHGCONFIG1_TMR_ENABLE BIT(5) +#define TPS65217_CHGCONFIG1_NTC_TYPE BIT(4) +#define TPS65217_CHGCONFIG1_RESET BIT(3) +#define TPS65217_CHGCONFIG1_TERM BIT(2) +#define TPS65217_CHGCONFIG1_SUSP BIT(1) +#define TPS65217_CHGCONFIG1_CHG_EN BIT(0) + +#define TPS65217_CHGCONFIG2_DYNTMR BIT(7) +#define TPS65217_CHGCONFIG2_VPREGHG BIT(6) +#define TPS65217_CHGCONFIG2_VOREG_MASK 0x30 + +#define TPS65217_CHGCONFIG3_ICHRG_MASK 0xC0 +#define TPS65217_CHGCONFIG3_DPPMTH_MASK 0x30 +#define TPS65217_CHGCONFIG2_PCHRGT BIT(3) +#define TPS65217_CHGCONFIG2_TERMIF 0x06 +#define TPS65217_CHGCONFIG2_TRANGE BIT(0) + +#define TPS65217_WLEDCTRL1_ISINK_ENABLE BIT(3) +#define TPS65217_WLEDCTRL1_ISEL BIT(2) +#define TPS65217_WLEDCTRL1_FDIM_MASK 0x03 + +#define TPS65217_WLEDCTRL2_DUTY_MASK 0x7F + +#define TPS65217_MUXCTRL_MUX_MASK 0x07 + +#define TPS65217_STATUS_OFF BIT(7) +#define TPS65217_STATUS_ACPWR BIT(3) +#define TPS65217_STATUS_USBPWR BIT(2) +#define TPS65217_STATUS_PB BIT(0) + +#define TPS65217_PASSWORD_REGS_UNLOCK 0x7D + +#define TPS65217_PGOOD_LDO3_PG BIT(6) +#define TPS65217_PGOOD_LDO4_PG BIT(5) +#define TPS65217_PGOOD_DC1_PG BIT(4) +#define TPS65217_PGOOD_DC2_PG BIT(3) +#define TPS65217_PGOOD_DC3_PG BIT(2) +#define TPS65217_PGOOD_LDO1_PG BIT(1) +#define TPS65217_PGOOD_LDO2_PG BIT(0) + +#define TPS65217_DEFPG_LDO1PGM BIT(3) +#define TPS65217_DEFPG_LDO2PGM BIT(2) +#define TPS65217_DEFPG_PGDLY_MASK 0x03 + +#define TPS65217_DEFDCDCX_XADJX BIT(7) +#define TPS65217_DEFDCDCX_DCDC_MASK 0x3F + +#define TPS65217_DEFSLEW_GO BIT(7) +#define TPS65217_DEFSLEW_GODSBL BIT(6) +#define TPS65217_DEFSLEW_PFM_EN1 BIT(5) +#define TPS65217_DEFSLEW_PFM_EN2 BIT(4) +#define TPS65217_DEFSLEW_PFM_EN3 BIT(3) +#define TPS65217_DEFSLEW_SLEW_MASK 0x07 + +#define TPS65217_DEFLDO1_LDO1_MASK 0x0F + +#define TPS65217_DEFLDO2_TRACK BIT(6) +#define TPS65217_DEFLDO2_LDO2_MASK 0x3F + +#define TPS65217_DEFLDO3_LDO3_EN BIT(5) +#define TPS65217_DEFLDO3_LDO3_MASK 0x1F + +#define TPS65217_DEFLDO4_LDO4_EN BIT(5) +#define TPS65217_DEFLDO4_LDO4_MASK 0x1F + +#define TPS65217_ENABLE_LS1_EN BIT(6) +#define TPS65217_ENABLE_LS2_EN BIT(5) +#define TPS65217_ENABLE_DC1_EN BIT(4) +#define TPS65217_ENABLE_DC2_EN BIT(3) +#define TPS65217_ENABLE_DC3_EN BIT(2) +#define TPS65217_ENABLE_LDO1_EN BIT(1) +#define TPS65217_ENABLE_LDO2_EN BIT(0) + +#define TPS65217_DEFUVLO_UVLOHYS BIT(2) +#define TPS65217_DEFUVLO_UVLO_MASK 0x03 + +#define TPS65217_SEQ1_DC1_SEQ_MASK 0xF0 +#define TPS65217_SEQ1_DC2_SEQ_MASK 0x0F + +#define TPS65217_SEQ2_DC3_SEQ_MASK 0xF0 +#define TPS65217_SEQ2_LDO1_SEQ_MASK 0x0F + +#define TPS65217_SEQ3_LDO2_SEQ_MASK 0xF0 +#define TPS65217_SEQ3_LDO3_SEQ_MASK 0x0F + +#define TPS65217_SEQ4_LDO4_SEQ_MASK 0xF0 + +#define TPS65217_SEQ5_DLY1_MASK 0xC0 +#define TPS65217_SEQ5_DLY2_MASK 0x30 +#define TPS65217_SEQ5_DLY3_MASK 0x0C +#define TPS65217_SEQ5_DLY4_MASK 0x03 + +#define TPS65217_SEQ6_DLY5_MASK 0xC0 +#define TPS65217_SEQ6_DLY6_MASK 0x30 +#define TPS65217_SEQ6_SEQUP BIT(2) +#define TPS65217_SEQ6_SEQDWN BIT(1) +#define TPS65217_SEQ6_INSTDWN BIT(0) + +#define TPS65217_MAX_REGISTER 0x1E +#define TPS65217_PROTECT_NONE 0 +#define TPS65217_PROTECT_L1 1 +#define TPS65217_PROTECT_L2 2 + + +enum tps65217_regulator_id { + /* DCDC's */ + TPS65217_DCDC_1, + TPS65217_DCDC_2, + TPS65217_DCDC_3, + /* LDOs */ + TPS65217_LDO_1, + TPS65217_LDO_2, + TPS65217_LDO_3, + TPS65217_LDO_4, +}; + +#define TPS65217_MAX_REG_ID TPS65217_LDO_4 + +/* Number of step-down converters available */ +#define TPS65217_NUM_DCDC 3 +/* Number of LDO voltage regulators available */ +#define TPS65217_NUM_LDO 4 +/* Number of total regulators available */ +#define TPS65217_NUM_REGULATOR (TPS65217_NUM_DCDC + TPS65217_NUM_LDO) + +/** + * struct tps65217_board - packages regulator init data + * @tps65217_regulator_data: regulator initialization values + * + * Board data may be used to initialize regulator. + */ +struct tps65217_board { + struct regulator_init_data *tps65217_init_data; +}; + +/** + * struct tps_info - packages regulator constraints + * @name: Voltage regulator name + * @min_uV: minimum micro volts + * @max_uV: minimum micro volts + * @vsel_to_uv: Function pointer to get voltage from selector + * @uv_to_vsel: Function pointer to get selector from voltage + * @table: Table for non-uniform voltage step-size + * @table_len: Length of the voltage table + * @enable_mask: Regulator enable mask bits + * @set_vout_reg: Regulator output voltage set register + * @set_vout_mask: Regulator output voltage set mask + * + * This data is used to check the regualtor voltage limits while setting. + */ +struct tps_info { + const char *name; + int min_uV; + int max_uV; + int (*vsel_to_uv)(unsigned int vsel); + int (*uv_to_vsel)(int uV, unsigned int *vsel); + const int *table; + unsigned int table_len; + unsigned int enable_mask; + unsigned int set_vout_reg; + unsigned int set_vout_mask; +}; + +/** + * struct tps65217 - tps65217 sub-driver chip access routines + * + * Device data may be used to access the TPS65217 chip + */ + +struct tps65217 { + struct device *dev; + struct tps65217_board *pdata; + struct regulator_desc desc[TPS65217_NUM_REGULATOR]; + struct regulator_dev *rdev[TPS65217_NUM_REGULATOR]; + struct tps_info *info[TPS65217_NUM_REGULATOR]; + struct regmap *regmap; + + /* Client devices */ + struct platform_device *regulator_pdev[TPS65217_NUM_REGULATOR]; +}; + +static inline struct tps65217 *dev_to_tps65217(struct device *dev) +{ + return dev_get_drvdata(dev); +} + +int tps65217_reg_read(struct tps65217 *tps, unsigned int reg, + unsigned int *val); +int tps65217_reg_write(struct tps65217 *tps, unsigned int reg, + unsigned int val, unsigned int level); +int tps65217_set_bits(struct tps65217 *tps, unsigned int reg, + unsigned int mask, unsigned int val, unsigned int level); +int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg, + unsigned int mask, unsigned int level); + +#endif /* __LINUX_MFD_TPS65217_H */ diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h new file mode 100644 index 00000000..b19176ea --- /dev/null +++ b/include/linux/mfd/tps6586x.h @@ -0,0 +1,94 @@ +#ifndef __LINUX_MFD_TPS6586X_H +#define __LINUX_MFD_TPS6586X_H + +#define TPS6586X_SLEW_RATE_INSTANTLY 0x00 +#define TPS6586X_SLEW_RATE_110UV 0x01 +#define TPS6586X_SLEW_RATE_220UV 0x02 +#define TPS6586X_SLEW_RATE_440UV 0x03 +#define TPS6586X_SLEW_RATE_880UV 0x04 +#define TPS6586X_SLEW_RATE_1760UV 0x05 +#define TPS6586X_SLEW_RATE_3520UV 0x06 +#define TPS6586X_SLEW_RATE_7040UV 0x07 + +#define TPS6586X_SLEW_RATE_SET 0x08 +#define TPS6586X_SLEW_RATE_MASK 0x07 + +enum { + TPS6586X_ID_SM_0, + TPS6586X_ID_SM_1, + TPS6586X_ID_SM_2, + TPS6586X_ID_LDO_0, + TPS6586X_ID_LDO_1, + TPS6586X_ID_LDO_2, + TPS6586X_ID_LDO_3, + TPS6586X_ID_LDO_4, + TPS6586X_ID_LDO_5, + TPS6586X_ID_LDO_6, + TPS6586X_ID_LDO_7, + TPS6586X_ID_LDO_8, + TPS6586X_ID_LDO_9, + TPS6586X_ID_LDO_RTC, +}; + +enum { + TPS6586X_INT_PLDO_0, + TPS6586X_INT_PLDO_1, + TPS6586X_INT_PLDO_2, + TPS6586X_INT_PLDO_3, + TPS6586X_INT_PLDO_4, + TPS6586X_INT_PLDO_5, + TPS6586X_INT_PLDO_6, + TPS6586X_INT_PLDO_7, + TPS6586X_INT_COMP_DET, + TPS6586X_INT_ADC, + TPS6586X_INT_PLDO_8, + TPS6586X_INT_PLDO_9, + TPS6586X_INT_PSM_0, + TPS6586X_INT_PSM_1, + TPS6586X_INT_PSM_2, + TPS6586X_INT_PSM_3, + TPS6586X_INT_RTC_ALM1, + TPS6586X_INT_ACUSB_OVP, + TPS6586X_INT_USB_DET, + TPS6586X_INT_AC_DET, + TPS6586X_INT_BAT_DET, + TPS6586X_INT_CHG_STAT, + TPS6586X_INT_CHG_TEMP, + TPS6586X_INT_PP, + TPS6586X_INT_RESUME, + TPS6586X_INT_LOW_SYS, + TPS6586X_INT_RTC_ALM2, +}; + +struct tps6586x_settings { + int slew_rate; +}; + +struct tps6586x_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct tps6586x_platform_data { + int num_subdevs; + struct tps6586x_subdev_info *subdevs; + + int gpio_base; + int irq_base; +}; + +/* + * NOTE: the functions below are not intended for use outside + * of the TPS6586X sub-device drivers + */ +extern int tps6586x_write(struct device *dev, int reg, uint8_t val); +extern int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val); +extern int tps6586x_read(struct device *dev, int reg, uint8_t *val); +extern int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val); +extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask); +extern int tps6586x_update(struct device *dev, int reg, uint8_t val, + uint8_t mask); + +#endif /*__LINUX_MFD_TPS6586X_H */ diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h new file mode 100644 index 00000000..1c6c2860 --- /dev/null +++ b/include/linux/mfd/tps65910.h @@ -0,0 +1,848 @@ +/* + * tps65910.h -- TI TPS6591x + * + * Copyright 2010-2011 Texas Instruments Inc. + * + * Author: Graeme Gregory <gg@slimlogic.co.uk> + * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> + * Author: Arnaud Deconinck <a-deconinck@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_TPS65910_H +#define __LINUX_MFD_TPS65910_H + +#include <linux/gpio.h> + +/* TPS chip id list */ +#define TPS65910 0 +#define TPS65911 1 + +/* TPS regulator type list */ +#define REGULATOR_LDO 0 +#define REGULATOR_DCDC 1 + +/* + * List of registers for component TPS65910 + * + */ + +#define TPS65910_SECONDS 0x0 +#define TPS65910_MINUTES 0x1 +#define TPS65910_HOURS 0x2 +#define TPS65910_DAYS 0x3 +#define TPS65910_MONTHS 0x4 +#define TPS65910_YEARS 0x5 +#define TPS65910_WEEKS 0x6 +#define TPS65910_ALARM_SECONDS 0x8 +#define TPS65910_ALARM_MINUTES 0x9 +#define TPS65910_ALARM_HOURS 0xA +#define TPS65910_ALARM_DAYS 0xB +#define TPS65910_ALARM_MONTHS 0xC +#define TPS65910_ALARM_YEARS 0xD +#define TPS65910_RTC_CTRL 0x10 +#define TPS65910_RTC_STATUS 0x11 +#define TPS65910_RTC_INTERRUPTS 0x12 +#define TPS65910_RTC_COMP_LSB 0x13 +#define TPS65910_RTC_COMP_MSB 0x14 +#define TPS65910_RTC_RES_PROG 0x15 +#define TPS65910_RTC_RESET_STATUS 0x16 +#define TPS65910_BCK1 0x17 +#define TPS65910_BCK2 0x18 +#define TPS65910_BCK3 0x19 +#define TPS65910_BCK4 0x1A +#define TPS65910_BCK5 0x1B +#define TPS65910_PUADEN 0x1C +#define TPS65910_REF 0x1D +#define TPS65910_VRTC 0x1E +#define TPS65910_VIO 0x20 +#define TPS65910_VDD1 0x21 +#define TPS65910_VDD1_OP 0x22 +#define TPS65910_VDD1_SR 0x23 +#define TPS65910_VDD2 0x24 +#define TPS65910_VDD2_OP 0x25 +#define TPS65910_VDD2_SR 0x26 +#define TPS65910_VDD3 0x27 +#define TPS65910_VDIG1 0x30 +#define TPS65910_VDIG2 0x31 +#define TPS65910_VAUX1 0x32 +#define TPS65910_VAUX2 0x33 +#define TPS65910_VAUX33 0x34 +#define TPS65910_VMMC 0x35 +#define TPS65910_VPLL 0x36 +#define TPS65910_VDAC 0x37 +#define TPS65910_THERM 0x38 +#define TPS65910_BBCH 0x39 +#define TPS65910_DCDCCTRL 0x3E +#define TPS65910_DEVCTRL 0x3F +#define TPS65910_DEVCTRL2 0x40 +#define TPS65910_SLEEP_KEEP_LDO_ON 0x41 +#define TPS65910_SLEEP_KEEP_RES_ON 0x42 +#define TPS65910_SLEEP_SET_LDO_OFF 0x43 +#define TPS65910_SLEEP_SET_RES_OFF 0x44 +#define TPS65910_EN1_LDO_ASS 0x45 +#define TPS65910_EN1_SMPS_ASS 0x46 +#define TPS65910_EN2_LDO_ASS 0x47 +#define TPS65910_EN2_SMPS_ASS 0x48 +#define TPS65910_EN3_LDO_ASS 0x49 +#define TPS65910_SPARE 0x4A +#define TPS65910_INT_STS 0x50 +#define TPS65910_INT_MSK 0x51 +#define TPS65910_INT_STS2 0x52 +#define TPS65910_INT_MSK2 0x53 +#define TPS65910_INT_STS3 0x54 +#define TPS65910_INT_MSK3 0x55 +#define TPS65910_GPIO0 0x60 +#define TPS65910_GPIO1 0x61 +#define TPS65910_GPIO2 0x62 +#define TPS65910_GPIO3 0x63 +#define TPS65910_GPIO4 0x64 +#define TPS65910_GPIO5 0x65 +#define TPS65910_GPIO6 0x66 +#define TPS65910_GPIO7 0x67 +#define TPS65910_GPIO8 0x68 +#define TPS65910_JTAGVERNUM 0x80 +#define TPS65910_MAX_REGISTER 0x80 + +/* + * List of registers specific to TPS65911 + */ +#define TPS65911_VDDCTRL 0x27 +#define TPS65911_VDDCTRL_OP 0x28 +#define TPS65911_VDDCTRL_SR 0x29 +#define TPS65911_LDO1 0x30 +#define TPS65911_LDO2 0x31 +#define TPS65911_LDO5 0x32 +#define TPS65911_LDO8 0x33 +#define TPS65911_LDO7 0x34 +#define TPS65911_LDO6 0x35 +#define TPS65911_LDO4 0x36 +#define TPS65911_LDO3 0x37 +#define TPS65911_VMBCH 0x6A +#define TPS65911_VMBCH2 0x6B + +/* + * List of register bitfields for component TPS65910 + * + */ + + +/*Register BCK1 (0x80) register.RegisterDescription */ +#define BCK1_BCKUP_MASK 0xFF +#define BCK1_BCKUP_SHIFT 0 + + +/*Register BCK2 (0x80) register.RegisterDescription */ +#define BCK2_BCKUP_MASK 0xFF +#define BCK2_BCKUP_SHIFT 0 + + +/*Register BCK3 (0x80) register.RegisterDescription */ +#define BCK3_BCKUP_MASK 0xFF +#define BCK3_BCKUP_SHIFT 0 + + +/*Register BCK4 (0x80) register.RegisterDescription */ +#define BCK4_BCKUP_MASK 0xFF +#define BCK4_BCKUP_SHIFT 0 + + +/*Register BCK5 (0x80) register.RegisterDescription */ +#define BCK5_BCKUP_MASK 0xFF +#define BCK5_BCKUP_SHIFT 0 + + +/*Register PUADEN (0x80) register.RegisterDescription */ +#define PUADEN_EN3P_MASK 0x80 +#define PUADEN_EN3P_SHIFT 7 +#define PUADEN_I2CCTLP_MASK 0x40 +#define PUADEN_I2CCTLP_SHIFT 6 +#define PUADEN_I2CSRP_MASK 0x20 +#define PUADEN_I2CSRP_SHIFT 5 +#define PUADEN_PWRONP_MASK 0x10 +#define PUADEN_PWRONP_SHIFT 4 +#define PUADEN_SLEEPP_MASK 0x08 +#define PUADEN_SLEEPP_SHIFT 3 +#define PUADEN_PWRHOLDP_MASK 0x04 +#define PUADEN_PWRHOLDP_SHIFT 2 +#define PUADEN_BOOT1P_MASK 0x02 +#define PUADEN_BOOT1P_SHIFT 1 +#define PUADEN_BOOT0P_MASK 0x01 +#define PUADEN_BOOT0P_SHIFT 0 + + +/*Register REF (0x80) register.RegisterDescription */ +#define REF_VMBCH_SEL_MASK 0x0C +#define REF_VMBCH_SEL_SHIFT 2 +#define REF_ST_MASK 0x03 +#define REF_ST_SHIFT 0 + + +/*Register VRTC (0x80) register.RegisterDescription */ +#define VRTC_VRTC_OFFMASK_MASK 0x08 +#define VRTC_VRTC_OFFMASK_SHIFT 3 +#define VRTC_ST_MASK 0x03 +#define VRTC_ST_SHIFT 0 + + +/*Register VIO (0x80) register.RegisterDescription */ +#define VIO_ILMAX_MASK 0xC0 +#define VIO_ILMAX_SHIFT 6 +#define VIO_SEL_MASK 0x0C +#define VIO_SEL_SHIFT 2 +#define VIO_ST_MASK 0x03 +#define VIO_ST_SHIFT 0 + + +/*Register VDD1 (0x80) register.RegisterDescription */ +#define VDD1_VGAIN_SEL_MASK 0xC0 +#define VDD1_VGAIN_SEL_SHIFT 6 +#define VDD1_ILMAX_MASK 0x20 +#define VDD1_ILMAX_SHIFT 5 +#define VDD1_TSTEP_MASK 0x1C +#define VDD1_TSTEP_SHIFT 2 +#define VDD1_ST_MASK 0x03 +#define VDD1_ST_SHIFT 0 + + +/*Register VDD1_OP (0x80) register.RegisterDescription */ +#define VDD1_OP_CMD_MASK 0x80 +#define VDD1_OP_CMD_SHIFT 7 +#define VDD1_OP_SEL_MASK 0x7F +#define VDD1_OP_SEL_SHIFT 0 + + +/*Register VDD1_SR (0x80) register.RegisterDescription */ +#define VDD1_SR_SEL_MASK 0x7F +#define VDD1_SR_SEL_SHIFT 0 + + +/*Register VDD2 (0x80) register.RegisterDescription */ +#define VDD2_VGAIN_SEL_MASK 0xC0 +#define VDD2_VGAIN_SEL_SHIFT 6 +#define VDD2_ILMAX_MASK 0x20 +#define VDD2_ILMAX_SHIFT 5 +#define VDD2_TSTEP_MASK 0x1C +#define VDD2_TSTEP_SHIFT 2 +#define VDD2_ST_MASK 0x03 +#define VDD2_ST_SHIFT 0 + + +/*Register VDD2_OP (0x80) register.RegisterDescription */ +#define VDD2_OP_CMD_MASK 0x80 +#define VDD2_OP_CMD_SHIFT 7 +#define VDD2_OP_SEL_MASK 0x7F +#define VDD2_OP_SEL_SHIFT 0 + +/*Register VDD2_SR (0x80) register.RegisterDescription */ +#define VDD2_SR_SEL_MASK 0x7F +#define VDD2_SR_SEL_SHIFT 0 + + +/*Registers VDD1, VDD2 voltage values definitions */ +#define VDD1_2_NUM_VOLT_FINE 73 +#define VDD1_2_NUM_VOLT_COARSE 3 +#define VDD1_2_MIN_VOLT 6000 +#define VDD1_2_OFFSET 125 + + +/*Register VDD3 (0x80) register.RegisterDescription */ +#define VDD3_CKINEN_MASK 0x04 +#define VDD3_CKINEN_SHIFT 2 +#define VDD3_ST_MASK 0x03 +#define VDD3_ST_SHIFT 0 +#define VDDCTRL_MIN_VOLT 6000 +#define VDDCTRL_OFFSET 125 + +/*Registers VDIG (0x80) to VDAC register.RegisterDescription */ +#define LDO_SEL_MASK 0x0C +#define LDO_SEL_SHIFT 2 +#define LDO_ST_MASK 0x03 +#define LDO_ST_SHIFT 0 +#define LDO_ST_ON_BIT 0x01 +#define LDO_ST_MODE_BIT 0x02 + + +/* Registers LDO1 to LDO8 in tps65910 */ +#define LDO1_SEL_MASK 0xFC +#define LDO3_SEL_MASK 0x7C +#define LDO_MIN_VOLT 1000 +#define LDO_MAX_VOLT 3300 + + +/*Register VDIG1 (0x80) register.RegisterDescription */ +#define VDIG1_SEL_MASK 0x0C +#define VDIG1_SEL_SHIFT 2 +#define VDIG1_ST_MASK 0x03 +#define VDIG1_ST_SHIFT 0 + + +/*Register VDIG2 (0x80) register.RegisterDescription */ +#define VDIG2_SEL_MASK 0x0C +#define VDIG2_SEL_SHIFT 2 +#define VDIG2_ST_MASK 0x03 +#define VDIG2_ST_SHIFT 0 + + +/*Register VAUX1 (0x80) register.RegisterDescription */ +#define VAUX1_SEL_MASK 0x0C +#define VAUX1_SEL_SHIFT 2 +#define VAUX1_ST_MASK 0x03 +#define VAUX1_ST_SHIFT 0 + + +/*Register VAUX2 (0x80) register.RegisterDescription */ +#define VAUX2_SEL_MASK 0x0C +#define VAUX2_SEL_SHIFT 2 +#define VAUX2_ST_MASK 0x03 +#define VAUX2_ST_SHIFT 0 + + +/*Register VAUX33 (0x80) register.RegisterDescription */ +#define VAUX33_SEL_MASK 0x0C +#define VAUX33_SEL_SHIFT 2 +#define VAUX33_ST_MASK 0x03 +#define VAUX33_ST_SHIFT 0 + + +/*Register VMMC (0x80) register.RegisterDescription */ +#define VMMC_SEL_MASK 0x0C +#define VMMC_SEL_SHIFT 2 +#define VMMC_ST_MASK 0x03 +#define VMMC_ST_SHIFT 0 + + +/*Register VPLL (0x80) register.RegisterDescription */ +#define VPLL_SEL_MASK 0x0C +#define VPLL_SEL_SHIFT 2 +#define VPLL_ST_MASK 0x03 +#define VPLL_ST_SHIFT 0 + + +/*Register VDAC (0x80) register.RegisterDescription */ +#define VDAC_SEL_MASK 0x0C +#define VDAC_SEL_SHIFT 2 +#define VDAC_ST_MASK 0x03 +#define VDAC_ST_SHIFT 0 + + +/*Register THERM (0x80) register.RegisterDescription */ +#define THERM_THERM_HD_MASK 0x20 +#define THERM_THERM_HD_SHIFT 5 +#define THERM_THERM_TS_MASK 0x10 +#define THERM_THERM_TS_SHIFT 4 +#define THERM_THERM_HDSEL_MASK 0x0C +#define THERM_THERM_HDSEL_SHIFT 2 +#define THERM_RSVD1_MASK 0x02 +#define THERM_RSVD1_SHIFT 1 +#define THERM_THERM_STATE_MASK 0x01 +#define THERM_THERM_STATE_SHIFT 0 + + +/*Register BBCH (0x80) register.RegisterDescription */ +#define BBCH_BBSEL_MASK 0x06 +#define BBCH_BBSEL_SHIFT 1 +#define BBCH_BBCHEN_MASK 0x01 +#define BBCH_BBCHEN_SHIFT 0 + + +/*Register DCDCCTRL (0x80) register.RegisterDescription */ +#define DCDCCTRL_VDD2_PSKIP_MASK 0x20 +#define DCDCCTRL_VDD2_PSKIP_SHIFT 5 +#define DCDCCTRL_VDD1_PSKIP_MASK 0x10 +#define DCDCCTRL_VDD1_PSKIP_SHIFT 4 +#define DCDCCTRL_VIO_PSKIP_MASK 0x08 +#define DCDCCTRL_VIO_PSKIP_SHIFT 3 +#define DCDCCTRL_DCDCCKEXT_MASK 0x04 +#define DCDCCTRL_DCDCCKEXT_SHIFT 2 +#define DCDCCTRL_DCDCCKSYNC_MASK 0x03 +#define DCDCCTRL_DCDCCKSYNC_SHIFT 0 + + +/*Register DEVCTRL (0x80) register.RegisterDescription */ +#define DEVCTRL_RTC_PWDN_MASK 0x40 +#define DEVCTRL_RTC_PWDN_SHIFT 6 +#define DEVCTRL_CK32K_CTRL_MASK 0x20 +#define DEVCTRL_CK32K_CTRL_SHIFT 5 +#define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10 +#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4 +#define DEVCTRL_DEV_OFF_RST_MASK 0x08 +#define DEVCTRL_DEV_OFF_RST_SHIFT 3 +#define DEVCTRL_DEV_ON_MASK 0x04 +#define DEVCTRL_DEV_ON_SHIFT 2 +#define DEVCTRL_DEV_SLP_MASK 0x02 +#define DEVCTRL_DEV_SLP_SHIFT 1 +#define DEVCTRL_DEV_OFF_MASK 0x01 +#define DEVCTRL_DEV_OFF_SHIFT 0 + + +/*Register DEVCTRL2 (0x80) register.RegisterDescription */ +#define DEVCTRL2_TSLOT_LENGTH_MASK 0x30 +#define DEVCTRL2_TSLOT_LENGTH_SHIFT 4 +#define DEVCTRL2_SLEEPSIG_POL_MASK 0x08 +#define DEVCTRL2_SLEEPSIG_POL_SHIFT 3 +#define DEVCTRL2_PWON_LP_OFF_MASK 0x04 +#define DEVCTRL2_PWON_LP_OFF_SHIFT 2 +#define DEVCTRL2_PWON_LP_RST_MASK 0x02 +#define DEVCTRL2_PWON_LP_RST_SHIFT 1 +#define DEVCTRL2_IT_POL_MASK 0x01 +#define DEVCTRL2_IT_POL_SHIFT 0 + + +/*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */ +#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80 +#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7 +#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40 +#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6 +#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20 +#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5 +#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10 +#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4 +#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08 +#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3 +#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04 +#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2 +#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02 +#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1 +#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01 +#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0 + + +/*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */ +#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80 +#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7 +#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40 +#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6 +#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20 +#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5 +#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10 +#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4 +#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08 +#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3 +#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04 +#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2 +#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02 +#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1 +#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01 +#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0 + + +/*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */ +#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80 +#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7 +#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40 +#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6 +#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20 +#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5 +#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10 +#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4 +#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08 +#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3 +#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04 +#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2 +#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02 +#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1 +#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01 +#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0 + + +/*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */ +#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80 +#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7 +#define SLEEP_SET_RES_OFF_RSVD_MASK 0x60 +#define SLEEP_SET_RES_OFF_RSVD_SHIFT 5 +#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10 +#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4 +#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08 +#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3 +#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04 +#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2 +#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02 +#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1 +#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01 +#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0 + + +/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */ +#define EN1_LDO_ASS_VDAC_EN1_MASK 0x80 +#define EN1_LDO_ASS_VDAC_EN1_SHIFT 7 +#define EN1_LDO_ASS_VPLL_EN1_MASK 0x40 +#define EN1_LDO_ASS_VPLL_EN1_SHIFT 6 +#define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20 +#define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5 +#define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10 +#define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4 +#define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08 +#define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3 +#define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04 +#define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2 +#define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02 +#define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1 +#define EN1_LDO_ASS_VMMC_EN1_MASK 0x01 +#define EN1_LDO_ASS_VMMC_EN1_SHIFT 0 + + +/*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */ +#define EN1_SMPS_ASS_RSVD_MASK 0xE0 +#define EN1_SMPS_ASS_RSVD_SHIFT 5 +#define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10 +#define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4 +#define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08 +#define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3 +#define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04 +#define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2 +#define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02 +#define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1 +#define EN1_SMPS_ASS_VIO_EN1_MASK 0x01 +#define EN1_SMPS_ASS_VIO_EN1_SHIFT 0 + + +/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */ +#define EN2_LDO_ASS_VDAC_EN2_MASK 0x80 +#define EN2_LDO_ASS_VDAC_EN2_SHIFT 7 +#define EN2_LDO_ASS_VPLL_EN2_MASK 0x40 +#define EN2_LDO_ASS_VPLL_EN2_SHIFT 6 +#define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20 +#define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5 +#define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10 +#define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4 +#define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08 +#define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3 +#define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04 +#define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2 +#define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02 +#define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1 +#define EN2_LDO_ASS_VMMC_EN2_MASK 0x01 +#define EN2_LDO_ASS_VMMC_EN2_SHIFT 0 + + +/*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */ +#define EN2_SMPS_ASS_RSVD_MASK 0xE0 +#define EN2_SMPS_ASS_RSVD_SHIFT 5 +#define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10 +#define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4 +#define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08 +#define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3 +#define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04 +#define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2 +#define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02 +#define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1 +#define EN2_SMPS_ASS_VIO_EN2_MASK 0x01 +#define EN2_SMPS_ASS_VIO_EN2_SHIFT 0 + + +/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */ +#define EN3_LDO_ASS_VDAC_EN3_MASK 0x80 +#define EN3_LDO_ASS_VDAC_EN3_SHIFT 7 +#define EN3_LDO_ASS_VPLL_EN3_MASK 0x40 +#define EN3_LDO_ASS_VPLL_EN3_SHIFT 6 +#define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20 +#define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5 +#define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10 +#define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4 +#define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08 +#define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3 +#define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04 +#define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2 +#define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02 +#define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1 +#define EN3_LDO_ASS_VMMC_EN3_MASK 0x01 +#define EN3_LDO_ASS_VMMC_EN3_SHIFT 0 + + +/*Register SPARE (0x80) register.RegisterDescription */ +#define SPARE_SPARE_MASK 0xFF +#define SPARE_SPARE_SHIFT 0 + + +/*Register INT_STS (0x80) register.RegisterDescription */ +#define INT_STS_RTC_PERIOD_IT_MASK 0x80 +#define INT_STS_RTC_PERIOD_IT_SHIFT 7 +#define INT_STS_RTC_ALARM_IT_MASK 0x40 +#define INT_STS_RTC_ALARM_IT_SHIFT 6 +#define INT_STS_HOTDIE_IT_MASK 0x20 +#define INT_STS_HOTDIE_IT_SHIFT 5 +#define INT_STS_PWRHOLD_IT_MASK 0x10 +#define INT_STS_PWRHOLD_IT_SHIFT 4 +#define INT_STS_PWRON_LP_IT_MASK 0x08 +#define INT_STS_PWRON_LP_IT_SHIFT 3 +#define INT_STS_PWRON_IT_MASK 0x04 +#define INT_STS_PWRON_IT_SHIFT 2 +#define INT_STS_VMBHI_IT_MASK 0x02 +#define INT_STS_VMBHI_IT_SHIFT 1 +#define INT_STS_VMBDCH_IT_MASK 0x01 +#define INT_STS_VMBDCH_IT_SHIFT 0 + + +/*Register INT_MSK (0x80) register.RegisterDescription */ +#define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80 +#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7 +#define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40 +#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6 +#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20 +#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5 +#define INT_MSK_PWRHOLD_IT_MSK_MASK 0x10 +#define INT_MSK_PWRHOLD_IT_MSK_SHIFT 4 +#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08 +#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3 +#define INT_MSK_PWRON_IT_MSK_MASK 0x04 +#define INT_MSK_PWRON_IT_MSK_SHIFT 2 +#define INT_MSK_VMBHI_IT_MSK_MASK 0x02 +#define INT_MSK_VMBHI_IT_MSK_SHIFT 1 +#define INT_MSK_VMBDCH_IT_MSK_MASK 0x01 +#define INT_MSK_VMBDCH_IT_MSK_SHIFT 0 + + +/*Register INT_STS2 (0x80) register.RegisterDescription */ +#define INT_STS2_GPIO3_F_IT_MASK 0x80 +#define INT_STS2_GPIO3_F_IT_SHIFT 7 +#define INT_STS2_GPIO3_R_IT_MASK 0x40 +#define INT_STS2_GPIO3_R_IT_SHIFT 6 +#define INT_STS2_GPIO2_F_IT_MASK 0x20 +#define INT_STS2_GPIO2_F_IT_SHIFT 5 +#define INT_STS2_GPIO2_R_IT_MASK 0x10 +#define INT_STS2_GPIO2_R_IT_SHIFT 4 +#define INT_STS2_GPIO1_F_IT_MASK 0x08 +#define INT_STS2_GPIO1_F_IT_SHIFT 3 +#define INT_STS2_GPIO1_R_IT_MASK 0x04 +#define INT_STS2_GPIO1_R_IT_SHIFT 2 +#define INT_STS2_GPIO0_F_IT_MASK 0x02 +#define INT_STS2_GPIO0_F_IT_SHIFT 1 +#define INT_STS2_GPIO0_R_IT_MASK 0x01 +#define INT_STS2_GPIO0_R_IT_SHIFT 0 + + +/*Register INT_MSK2 (0x80) register.RegisterDescription */ +#define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80 +#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7 +#define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40 +#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6 +#define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20 +#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5 +#define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10 +#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4 +#define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08 +#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3 +#define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04 +#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2 +#define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02 +#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1 +#define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01 +#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0 + + +/*Register INT_STS3 (0x80) register.RegisterDescription */ +#define INT_STS3_GPIO5_F_IT_MASK 0x08 +#define INT_STS3_GPIO5_F_IT_SHIFT 3 +#define INT_STS3_GPIO5_R_IT_MASK 0x04 +#define INT_STS3_GPIO5_R_IT_SHIFT 2 +#define INT_STS3_GPIO4_F_IT_MASK 0x02 +#define INT_STS3_GPIO4_F_IT_SHIFT 1 +#define INT_STS3_GPIO4_R_IT_MASK 0x01 +#define INT_STS3_GPIO4_R_IT_SHIFT 0 + + +/*Register INT_MSK3 (0x80) register.RegisterDescription */ +#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08 +#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3 +#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04 +#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2 +#define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02 +#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1 +#define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01 +#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0 + + +/*Register GPIO (0x80) register.RegisterDescription */ +#define GPIO_SLEEP_MASK 0x80 +#define GPIO_SLEEP_SHIFT 7 +#define GPIO_DEB_MASK 0x10 +#define GPIO_DEB_SHIFT 4 +#define GPIO_PUEN_MASK 0x08 +#define GPIO_PUEN_SHIFT 3 +#define GPIO_CFG_MASK 0x04 +#define GPIO_CFG_SHIFT 2 +#define GPIO_STS_MASK 0x02 +#define GPIO_STS_SHIFT 1 +#define GPIO_SET_MASK 0x01 +#define GPIO_SET_SHIFT 0 + + +/*Register JTAGVERNUM (0x80) register.RegisterDescription */ +#define JTAGVERNUM_VERNUM_MASK 0x0F +#define JTAGVERNUM_VERNUM_SHIFT 0 + + +/* Register VDDCTRL (0x27) bit definitions */ +#define VDDCTRL_ST_MASK 0x03 +#define VDDCTRL_ST_SHIFT 0 + + +/*Register VDDCTRL_OP (0x28) bit definitios */ +#define VDDCTRL_OP_CMD_MASK 0x80 +#define VDDCTRL_OP_CMD_SHIFT 7 +#define VDDCTRL_OP_SEL_MASK 0x7F +#define VDDCTRL_OP_SEL_SHIFT 0 + + +/*Register VDDCTRL_SR (0x29) bit definitions */ +#define VDDCTRL_SR_SEL_MASK 0x7F +#define VDDCTRL_SR_SEL_SHIFT 0 + + +/* IRQ Definitions */ +#define TPS65910_IRQ_VBAT_VMBDCH 0 +#define TPS65910_IRQ_VBAT_VMHI 1 +#define TPS65910_IRQ_PWRON 2 +#define TPS65910_IRQ_PWRON_LP 3 +#define TPS65910_IRQ_PWRHOLD 4 +#define TPS65910_IRQ_HOTDIE 5 +#define TPS65910_IRQ_RTC_ALARM 6 +#define TPS65910_IRQ_RTC_PERIOD 7 +#define TPS65910_IRQ_GPIO_R 8 +#define TPS65910_IRQ_GPIO_F 9 +#define TPS65910_NUM_IRQ 10 + +#define TPS65911_IRQ_VBAT_VMBDCH 0 +#define TPS65911_IRQ_VBAT_VMBDCH2L 1 +#define TPS65911_IRQ_VBAT_VMBDCH2H 2 +#define TPS65911_IRQ_VBAT_VMHI 3 +#define TPS65911_IRQ_PWRON 4 +#define TPS65911_IRQ_PWRON_LP 5 +#define TPS65911_IRQ_PWRHOLD_F 6 +#define TPS65911_IRQ_PWRHOLD_R 7 +#define TPS65911_IRQ_HOTDIE 8 +#define TPS65911_IRQ_RTC_ALARM 9 +#define TPS65911_IRQ_RTC_PERIOD 10 +#define TPS65911_IRQ_GPIO0_R 11 +#define TPS65911_IRQ_GPIO0_F 12 +#define TPS65911_IRQ_GPIO1_R 13 +#define TPS65911_IRQ_GPIO1_F 14 +#define TPS65911_IRQ_GPIO2_R 15 +#define TPS65911_IRQ_GPIO2_F 16 +#define TPS65911_IRQ_GPIO3_R 17 +#define TPS65911_IRQ_GPIO3_F 18 +#define TPS65911_IRQ_GPIO4_R 19 +#define TPS65911_IRQ_GPIO4_F 20 +#define TPS65911_IRQ_GPIO5_R 21 +#define TPS65911_IRQ_GPIO5_F 22 +#define TPS65911_IRQ_WTCHDG 23 +#define TPS65911_IRQ_PWRDN 24 + +#define TPS65911_NUM_IRQ 25 + + +/* GPIO Register Definitions */ +#define TPS65910_GPIO_DEB BIT(2) +#define TPS65910_GPIO_PUEN BIT(3) +#define TPS65910_GPIO_CFG BIT(2) +#define TPS65910_GPIO_STS BIT(1) +#define TPS65910_GPIO_SET BIT(0) + +/* Max number of TPS65910/11 GPIOs */ +#define TPS65910_NUM_GPIO 6 +#define TPS65911_NUM_GPIO 9 +#define TPS6591X_MAX_NUM_GPIO 9 + +/* Regulator Index Definitions */ +#define TPS65910_REG_VRTC 0 +#define TPS65910_REG_VIO 1 +#define TPS65910_REG_VDD1 2 +#define TPS65910_REG_VDD2 3 +#define TPS65910_REG_VDD3 4 +#define TPS65910_REG_VDIG1 5 +#define TPS65910_REG_VDIG2 6 +#define TPS65910_REG_VPLL 7 +#define TPS65910_REG_VDAC 8 +#define TPS65910_REG_VAUX1 9 +#define TPS65910_REG_VAUX2 10 +#define TPS65910_REG_VAUX33 11 +#define TPS65910_REG_VMMC 12 + +#define TPS65911_REG_VDDCTRL 4 +#define TPS65911_REG_LDO1 5 +#define TPS65911_REG_LDO2 6 +#define TPS65911_REG_LDO3 7 +#define TPS65911_REG_LDO4 8 +#define TPS65911_REG_LDO5 9 +#define TPS65911_REG_LDO6 10 +#define TPS65911_REG_LDO7 11 +#define TPS65911_REG_LDO8 12 + +/* Max number of TPS65910/11 regulators */ +#define TPS65910_NUM_REGS 13 + +/* External sleep controls through EN1/EN2/EN3/SLEEP inputs */ +#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 0x1 +#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2 0x2 +#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4 +#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8 + +/** + * struct tps65910_board + * Board platform data may be used to initialize regulators. + */ + +struct tps65910_board { + int gpio_base; + int irq; + int irq_base; + int vmbch_threshold; + int vmbch2_threshold; + bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO]; + unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS]; + struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS]; +}; + +/** + * struct tps65910 - tps65910 sub-driver chip access routines + */ + +struct tps65910 { + struct device *dev; + struct i2c_client *i2c_client; + struct regmap *regmap; + struct mutex io_mutex; + unsigned int id; + int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest); + int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src); + + /* Client devices */ + struct tps65910_pmic *pmic; + struct tps65910_rtc *rtc; + struct tps65910_power *power; + + /* GPIO Handling */ + struct gpio_chip gpio; + + /* IRQ Handling */ + struct mutex irq_lock; + int chip_irq; + int irq_base; + int irq_num; + u32 irq_mask; +}; + +struct tps65910_platform_data { + int irq; + int irq_base; +}; + +int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask); +int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask); +void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base); +int tps65910_irq_init(struct tps65910 *tps65910, int irq, + struct tps65910_platform_data *pdata); +int tps65910_irq_exit(struct tps65910 *tps65910); + +static inline int tps65910_chip_id(struct tps65910 *tps65910) +{ + return tps65910->id; +} + +#endif /* __LINUX_MFD_TPS65910_H */ diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h new file mode 100644 index 00000000..aaceab40 --- /dev/null +++ b/include/linux/mfd/tps65912.h @@ -0,0 +1,327 @@ +/* + * tps65912.h -- TI TPS6591x + * + * Copyright 2011 Texas Instruments Inc. + * + * Author: Margarita Olaya <magi@slimlogic.co.uk> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_TPS65912_H +#define __LINUX_MFD_TPS65912_H + +/* TPS regulator type list */ +#define REGULATOR_LDO 0 +#define REGULATOR_DCDC 1 + +/* + * List of registers for TPS65912 + */ + +#define TPS65912_DCDC1_CTRL 0x00 +#define TPS65912_DCDC2_CTRL 0x01 +#define TPS65912_DCDC3_CTRL 0x02 +#define TPS65912_DCDC4_CTRL 0x03 +#define TPS65912_DCDC1_OP 0x04 +#define TPS65912_DCDC1_AVS 0x05 +#define TPS65912_DCDC1_LIMIT 0x06 +#define TPS65912_DCDC2_OP 0x07 +#define TPS65912_DCDC2_AVS 0x08 +#define TPS65912_DCDC2_LIMIT 0x09 +#define TPS65912_DCDC3_OP 0x0A +#define TPS65912_DCDC3_AVS 0x0B +#define TPS65912_DCDC3_LIMIT 0x0C +#define TPS65912_DCDC4_OP 0x0D +#define TPS65912_DCDC4_AVS 0x0E +#define TPS65912_DCDC4_LIMIT 0x0F +#define TPS65912_LDO1_OP 0x10 +#define TPS65912_LDO1_AVS 0x11 +#define TPS65912_LDO1_LIMIT 0x12 +#define TPS65912_LDO2_OP 0x13 +#define TPS65912_LDO2_AVS 0x14 +#define TPS65912_LDO2_LIMIT 0x15 +#define TPS65912_LDO3_OP 0x16 +#define TPS65912_LDO3_AVS 0x17 +#define TPS65912_LDO3_LIMIT 0x18 +#define TPS65912_LDO4_OP 0x19 +#define TPS65912_LDO4_AVS 0x1A +#define TPS65912_LDO4_LIMIT 0x1B +#define TPS65912_LDO5 0x1C +#define TPS65912_LDO6 0x1D +#define TPS65912_LDO7 0x1E +#define TPS65912_LDO8 0x1F +#define TPS65912_LDO9 0x20 +#define TPS65912_LDO10 0x21 +#define TPS65912_THRM 0x22 +#define TPS65912_CLK32OUT 0x23 +#define TPS65912_DEVCTRL 0x24 +#define TPS65912_DEVCTRL2 0x25 +#define TPS65912_I2C_SPI_CFG 0x26 +#define TPS65912_KEEP_ON 0x27 +#define TPS65912_KEEP_ON2 0x28 +#define TPS65912_SET_OFF1 0x29 +#define TPS65912_SET_OFF2 0x2A +#define TPS65912_DEF_VOLT 0x2B +#define TPS65912_DEF_VOLT_MAPPING 0x2C +#define TPS65912_DISCHARGE 0x2D +#define TPS65912_DISCHARGE2 0x2E +#define TPS65912_EN1_SET1 0x2F +#define TPS65912_EN1_SET2 0x30 +#define TPS65912_EN2_SET1 0x31 +#define TPS65912_EN2_SET2 0x32 +#define TPS65912_EN3_SET1 0x33 +#define TPS65912_EN3_SET2 0x34 +#define TPS65912_EN4_SET1 0x35 +#define TPS65912_EN4_SET2 0x36 +#define TPS65912_PGOOD 0x37 +#define TPS65912_PGOOD2 0x38 +#define TPS65912_INT_STS 0x39 +#define TPS65912_INT_MSK 0x3A +#define TPS65912_INT_STS2 0x3B +#define TPS65912_INT_MSK2 0x3C +#define TPS65912_INT_STS3 0x3D +#define TPS65912_INT_MSK3 0x3E +#define TPS65912_INT_STS4 0x3F +#define TPS65912_INT_MSK4 0x40 +#define TPS65912_GPIO1 0x41 +#define TPS65912_GPIO2 0x42 +#define TPS65912_GPIO3 0x43 +#define TPS65912_GPIO4 0x44 +#define TPS65912_GPIO5 0x45 +#define TPS65912_VMON 0x46 +#define TPS65912_LEDA_CTRL1 0x47 +#define TPS65912_LEDA_CTRL2 0x48 +#define TPS65912_LEDA_CTRL3 0x49 +#define TPS65912_LEDA_CTRL4 0x4A +#define TPS65912_LEDA_CTRL5 0x4B +#define TPS65912_LEDA_CTRL6 0x4C +#define TPS65912_LEDA_CTRL7 0x4D +#define TPS65912_LEDA_CTRL8 0x4E +#define TPS65912_LEDB_CTRL1 0x4F +#define TPS65912_LEDB_CTRL2 0x50 +#define TPS65912_LEDB_CTRL3 0x51 +#define TPS65912_LEDB_CTRL4 0x52 +#define TPS65912_LEDB_CTRL5 0x53 +#define TPS65912_LEDB_CTRL6 0x54 +#define TPS65912_LEDB_CTRL7 0x55 +#define TPS65912_LEDB_CTRL8 0x56 +#define TPS65912_LEDC_CTRL1 0x57 +#define TPS65912_LEDC_CTRL2 0x58 +#define TPS65912_LEDC_CTRL3 0x59 +#define TPS65912_LEDC_CTRL4 0x5A +#define TPS65912_LEDC_CTRL5 0x5B +#define TPS65912_LEDC_CTRL6 0x5C +#define TPS65912_LEDC_CTRL7 0x5D +#define TPS65912_LEDC_CTRL8 0x5E +#define TPS65912_LED_RAMP_UP_TIME 0x5F +#define TPS65912_LED_RAMP_DOWN_TIME 0x60 +#define TPS65912_LED_SEQ_EN 0x61 +#define TPS65912_LOADSWITCH 0x62 +#define TPS65912_SPARE 0x63 +#define TPS65912_VERNUM 0x64 +#define TPS6591X_MAX_REGISTER 0x64 + +/* IRQ Definitions */ +#define TPS65912_IRQ_PWRHOLD_F 0 +#define TPS65912_IRQ_VMON 1 +#define TPS65912_IRQ_PWRON 2 +#define TPS65912_IRQ_PWRON_LP 3 +#define TPS65912_IRQ_PWRHOLD_R 4 +#define TPS65912_IRQ_HOTDIE 5 +#define TPS65912_IRQ_GPIO1_R 6 +#define TPS65912_IRQ_GPIO1_F 7 +#define TPS65912_IRQ_GPIO2_R 8 +#define TPS65912_IRQ_GPIO2_F 9 +#define TPS65912_IRQ_GPIO3_R 10 +#define TPS65912_IRQ_GPIO3_F 11 +#define TPS65912_IRQ_GPIO4_R 12 +#define TPS65912_IRQ_GPIO4_F 13 +#define TPS65912_IRQ_GPIO5_R 14 +#define TPS65912_IRQ_GPIO5_F 15 +#define TPS65912_IRQ_PGOOD_DCDC1 16 +#define TPS65912_IRQ_PGOOD_DCDC2 17 +#define TPS65912_IRQ_PGOOD_DCDC3 18 +#define TPS65912_IRQ_PGOOD_DCDC4 19 +#define TPS65912_IRQ_PGOOD_LDO1 20 +#define TPS65912_IRQ_PGOOD_LDO2 21 +#define TPS65912_IRQ_PGOOD_LDO3 22 +#define TPS65912_IRQ_PGOOD_LDO4 23 +#define TPS65912_IRQ_PGOOD_LDO5 24 +#define TPS65912_IRQ_PGOOD_LDO6 25 +#define TPS65912_IRQ_PGOOD_LDO7 26 +#define TPS65912_IRQ_PGOOD_LD08 27 +#define TPS65912_IRQ_PGOOD_LDO9 28 +#define TPS65912_IRQ_PGOOD_LDO10 29 + +#define TPS65912_NUM_IRQ 30 + +/* GPIO 1 and 2 Register Definitions */ +#define GPIO_SLEEP_MASK 0x80 +#define GPIO_SLEEP_SHIFT 7 +#define GPIO_DEB_MASK 0x10 +#define GPIO_DEB_SHIFT 4 +#define GPIO_CFG_MASK 0x04 +#define GPIO_CFG_SHIFT 2 +#define GPIO_STS_MASK 0x02 +#define GPIO_STS_SHIFT 1 +#define GPIO_SET_MASK 0x01 +#define GPIO_SET_SHIFT 0 + +/* GPIO 3 Register Definitions */ +#define GPIO3_SLEEP_MASK 0x80 +#define GPIO3_SLEEP_SHIFT 7 +#define GPIO3_SEL_MASK 0x40 +#define GPIO3_SEL_SHIFT 6 +#define GPIO3_ODEN_MASK 0x20 +#define GPIO3_ODEN_SHIFT 5 +#define GPIO3_DEB_MASK 0x10 +#define GPIO3_DEB_SHIFT 4 +#define GPIO3_PDEN_MASK 0x08 +#define GPIO3_PDEN_SHIFT 3 +#define GPIO3_CFG_MASK 0x04 +#define GPIO3_CFG_SHIFT 2 +#define GPIO3_STS_MASK 0x02 +#define GPIO3_STS_SHIFT 1 +#define GPIO3_SET_MASK 0x01 +#define GPIO3_SET_SHIFT 0 + +/* GPIO 4 Register Definitions */ +#define GPIO4_SLEEP_MASK 0x80 +#define GPIO4_SLEEP_SHIFT 7 +#define GPIO4_SEL_MASK 0x40 +#define GPIO4_SEL_SHIFT 6 +#define GPIO4_ODEN_MASK 0x20 +#define GPIO4_ODEN_SHIFT 5 +#define GPIO4_DEB_MASK 0x10 +#define GPIO4_DEB_SHIFT 4 +#define GPIO4_PDEN_MASK 0x08 +#define GPIO4_PDEN_SHIFT 3 +#define GPIO4_CFG_MASK 0x04 +#define GPIO4_CFG_SHIFT 2 +#define GPIO4_STS_MASK 0x02 +#define GPIO4_STS_SHIFT 1 +#define GPIO4_SET_MASK 0x01 +#define GPIO4_SET_SHIFT 0 + +/* Register THERM (0x80) register.RegisterDescription */ +#define THERM_THERM_HD_MASK 0x20 +#define THERM_THERM_HD_SHIFT 5 +#define THERM_THERM_TS_MASK 0x10 +#define THERM_THERM_TS_SHIFT 4 +#define THERM_THERM_HDSEL_MASK 0x0C +#define THERM_THERM_HDSEL_SHIFT 2 +#define THERM_RSVD1_MASK 0x02 +#define THERM_RSVD1_SHIFT 1 +#define THERM_THERM_STATE_MASK 0x01 +#define THERM_THERM_STATE_SHIFT 0 + +/* Register DCDCCTRL1 register.RegisterDescription */ +#define DCDCCTRL_VCON_ENABLE_MASK 0x80 +#define DCDCCTRL_VCON_ENABLE_SHIFT 7 +#define DCDCCTRL_VCON_RANGE1_MASK 0x40 +#define DCDCCTRL_VCON_RANGE1_SHIFT 6 +#define DCDCCTRL_VCON_RANGE0_MASK 0x20 +#define DCDCCTRL_VCON_RANGE0_SHIFT 5 +#define DCDCCTRL_TSTEP2_MASK 0x10 +#define DCDCCTRL_TSTEP2_SHIFT 4 +#define DCDCCTRL_TSTEP1_MASK 0x08 +#define DCDCCTRL_TSTEP1_SHIFT 3 +#define DCDCCTRL_TSTEP0_MASK 0x04 +#define DCDCCTRL_TSTEP0_SHIFT 2 +#define DCDCCTRL_DCDC1_MODE_MASK 0x02 +#define DCDCCTRL_DCDC1_MODE_SHIFT 1 + +/* Register DCDCCTRL2 and DCDCCTRL3 register.RegisterDescription */ +#define DCDCCTRL_TSTEP2_MASK 0x10 +#define DCDCCTRL_TSTEP2_SHIFT 4 +#define DCDCCTRL_TSTEP1_MASK 0x08 +#define DCDCCTRL_TSTEP1_SHIFT 3 +#define DCDCCTRL_TSTEP0_MASK 0x04 +#define DCDCCTRL_TSTEP0_SHIFT 2 +#define DCDCCTRL_DCDC_MODE_MASK 0x02 +#define DCDCCTRL_DCDC_MODE_SHIFT 1 +#define DCDCCTRL_RSVD0_MASK 0x01 +#define DCDCCTRL_RSVD0_SHIFT 0 + +/* Register DCDCCTRL4 register.RegisterDescription */ +#define DCDCCTRL_RAMP_TIME_MASK 0x01 +#define DCDCCTRL_RAMP_TIME_SHIFT 0 + +/* Register DCDCx_AVS */ +#define DCDC_AVS_ENABLE_MASK 0x80 +#define DCDC_AVS_ENABLE_SHIFT 7 +#define DCDC_AVS_ECO_MASK 0x40 +#define DCDC_AVS_ECO_SHIFT 6 + +/* Register DCDCx_LIMIT */ +#define DCDC_LIMIT_RANGE_MASK 0xC0 +#define DCDC_LIMIT_RANGE_SHIFT 6 +#define DCDC_LIMIT_MAX_SEL_MASK 0x3F +#define DCDC_LIMIT_MAX_SEL_SHIFT 0 + +/** + * struct tps65912_board + * Board platform dat may be used to initialize regulators. + */ +struct tps65912_board { + int is_dcdc1_avs; + int is_dcdc2_avs; + int is_dcdc3_avs; + int is_dcdc4_avs; + int irq; + int irq_base; + int gpio_base; + struct regulator_init_data *tps65912_pmic_init_data; +}; + +/** + * struct tps65912 - tps65912 sub-driver chip access routines + */ + +struct tps65912 { + struct device *dev; + /* for read/write acces */ + struct mutex io_mutex; + + /* For device IO interfaces: I2C or SPI */ + void *control_data; + + int (*read)(struct tps65912 *tps65912, u8 reg, int size, void *dest); + int (*write)(struct tps65912 *tps65912, u8 reg, int size, void *src); + + /* Client devices */ + struct tps65912_pmic *pmic; + + /* GPIO Handling */ + struct gpio_chip gpio; + + /* IRQ Handling */ + struct mutex irq_lock; + int chip_irq; + int irq_base; + int irq_num; + u32 irq_mask; +}; + +struct tps65912_platform_data { + int irq; + int irq_base; +}; + +unsigned int tps_chip(void); + +int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask); +int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask); +int tps65912_reg_read(struct tps65912 *tps65912, u8 reg); +int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val); +int tps65912_device_init(struct tps65912 *tps65912); +void tps65912_device_exit(struct tps65912 *tps65912); +int tps65912_irq_init(struct tps65912 *tps65912, int irq, + struct tps65912_platform_data *pdata); + +#endif /* __LINUX_MFD_TPS65912_H */ diff --git a/include/linux/mfd/twl4030-audio.h b/include/linux/mfd/twl4030-audio.h new file mode 100644 index 00000000..3d22b72d --- /dev/null +++ b/include/linux/mfd/twl4030-audio.h @@ -0,0 +1,272 @@ +/* + * MFD driver for twl4030 audio submodule + * + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + * + * Copyright: (C) 2009 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __TWL4030_CODEC_H__ +#define __TWL4030_CODEC_H__ + +/* Codec registers */ +#define TWL4030_REG_CODEC_MODE 0x01 +#define TWL4030_REG_OPTION 0x02 +#define TWL4030_REG_UNKNOWN 0x03 +#define TWL4030_REG_MICBIAS_CTL 0x04 +#define TWL4030_REG_ANAMICL 0x05 +#define TWL4030_REG_ANAMICR 0x06 +#define TWL4030_REG_AVADC_CTL 0x07 +#define TWL4030_REG_ADCMICSEL 0x08 +#define TWL4030_REG_DIGMIXING 0x09 +#define TWL4030_REG_ATXL1PGA 0x0A +#define TWL4030_REG_ATXR1PGA 0x0B +#define TWL4030_REG_AVTXL2PGA 0x0C +#define TWL4030_REG_AVTXR2PGA 0x0D +#define TWL4030_REG_AUDIO_IF 0x0E +#define TWL4030_REG_VOICE_IF 0x0F +#define TWL4030_REG_ARXR1PGA 0x10 +#define TWL4030_REG_ARXL1PGA 0x11 +#define TWL4030_REG_ARXR2PGA 0x12 +#define TWL4030_REG_ARXL2PGA 0x13 +#define TWL4030_REG_VRXPGA 0x14 +#define TWL4030_REG_VSTPGA 0x15 +#define TWL4030_REG_VRX2ARXPGA 0x16 +#define TWL4030_REG_AVDAC_CTL 0x17 +#define TWL4030_REG_ARX2VTXPGA 0x18 +#define TWL4030_REG_ARXL1_APGA_CTL 0x19 +#define TWL4030_REG_ARXR1_APGA_CTL 0x1A +#define TWL4030_REG_ARXL2_APGA_CTL 0x1B +#define TWL4030_REG_ARXR2_APGA_CTL 0x1C +#define TWL4030_REG_ATX2ARXPGA 0x1D +#define TWL4030_REG_BT_IF 0x1E +#define TWL4030_REG_BTPGA 0x1F +#define TWL4030_REG_BTSTPGA 0x20 +#define TWL4030_REG_EAR_CTL 0x21 +#define TWL4030_REG_HS_SEL 0x22 +#define TWL4030_REG_HS_GAIN_SET 0x23 +#define TWL4030_REG_HS_POPN_SET 0x24 +#define TWL4030_REG_PREDL_CTL 0x25 +#define TWL4030_REG_PREDR_CTL 0x26 +#define TWL4030_REG_PRECKL_CTL 0x27 +#define TWL4030_REG_PRECKR_CTL 0x28 +#define TWL4030_REG_HFL_CTL 0x29 +#define TWL4030_REG_HFR_CTL 0x2A +#define TWL4030_REG_ALC_CTL 0x2B +#define TWL4030_REG_ALC_SET1 0x2C +#define TWL4030_REG_ALC_SET2 0x2D +#define TWL4030_REG_BOOST_CTL 0x2E +#define TWL4030_REG_SOFTVOL_CTL 0x2F +#define TWL4030_REG_DTMF_FREQSEL 0x30 +#define TWL4030_REG_DTMF_TONEXT1H 0x31 +#define TWL4030_REG_DTMF_TONEXT1L 0x32 +#define TWL4030_REG_DTMF_TONEXT2H 0x33 +#define TWL4030_REG_DTMF_TONEXT2L 0x34 +#define TWL4030_REG_DTMF_TONOFF 0x35 +#define TWL4030_REG_DTMF_WANONOFF 0x36 +#define TWL4030_REG_I2S_RX_SCRAMBLE_H 0x37 +#define TWL4030_REG_I2S_RX_SCRAMBLE_M 0x38 +#define TWL4030_REG_I2S_RX_SCRAMBLE_L 0x39 +#define TWL4030_REG_APLL_CTL 0x3A +#define TWL4030_REG_DTMF_CTL 0x3B +#define TWL4030_REG_DTMF_PGA_CTL2 0x3C +#define TWL4030_REG_DTMF_PGA_CTL1 0x3D +#define TWL4030_REG_MISC_SET_1 0x3E +#define TWL4030_REG_PCMBTMUX 0x3F +#define TWL4030_REG_RX_PATH_SEL 0x43 +#define TWL4030_REG_VDL_APGA_CTL 0x44 +#define TWL4030_REG_VIBRA_CTL 0x45 +#define TWL4030_REG_VIBRA_SET 0x46 +#define TWL4030_REG_VIBRA_PWM_SET 0x47 +#define TWL4030_REG_ANAMIC_GAIN 0x48 +#define TWL4030_REG_MISC_SET_2 0x49 + +/* Bitfield Definitions */ + +/* TWL4030_CODEC_MODE (0x01) Fields */ +#define TWL4030_APLL_RATE 0xF0 +#define TWL4030_APLL_RATE_8000 0x00 +#define TWL4030_APLL_RATE_11025 0x10 +#define TWL4030_APLL_RATE_12000 0x20 +#define TWL4030_APLL_RATE_16000 0x40 +#define TWL4030_APLL_RATE_22050 0x50 +#define TWL4030_APLL_RATE_24000 0x60 +#define TWL4030_APLL_RATE_32000 0x80 +#define TWL4030_APLL_RATE_44100 0x90 +#define TWL4030_APLL_RATE_48000 0xA0 +#define TWL4030_APLL_RATE_96000 0xE0 +#define TWL4030_SEL_16K 0x08 +#define TWL4030_CODECPDZ 0x02 +#define TWL4030_OPT_MODE 0x01 +#define TWL4030_OPTION_1 (1 << 0) +#define TWL4030_OPTION_2 (0 << 0) + +/* TWL4030_OPTION (0x02) Fields */ +#define TWL4030_ATXL1_EN (1 << 0) +#define TWL4030_ATXR1_EN (1 << 1) +#define TWL4030_ATXL2_VTXL_EN (1 << 2) +#define TWL4030_ATXR2_VTXR_EN (1 << 3) +#define TWL4030_ARXL1_VRX_EN (1 << 4) +#define TWL4030_ARXR1_EN (1 << 5) +#define TWL4030_ARXL2_EN (1 << 6) +#define TWL4030_ARXR2_EN (1 << 7) + +/* TWL4030_REG_MICBIAS_CTL (0x04) Fields */ +#define TWL4030_MICBIAS2_CTL 0x40 +#define TWL4030_MICBIAS1_CTL 0x20 +#define TWL4030_HSMICBIAS_EN 0x04 +#define TWL4030_MICBIAS2_EN 0x02 +#define TWL4030_MICBIAS1_EN 0x01 + +/* ANAMICL (0x05) Fields */ +#define TWL4030_CNCL_OFFSET_START 0x80 +#define TWL4030_OFFSET_CNCL_SEL 0x60 +#define TWL4030_OFFSET_CNCL_SEL_ARX1 0x00 +#define TWL4030_OFFSET_CNCL_SEL_ARX2 0x20 +#define TWL4030_OFFSET_CNCL_SEL_VRX 0x40 +#define TWL4030_OFFSET_CNCL_SEL_ALL 0x60 +#define TWL4030_MICAMPL_EN 0x10 +#define TWL4030_CKMIC_EN 0x08 +#define TWL4030_AUXL_EN 0x04 +#define TWL4030_HSMIC_EN 0x02 +#define TWL4030_MAINMIC_EN 0x01 + +/* ANAMICR (0x06) Fields */ +#define TWL4030_MICAMPR_EN 0x10 +#define TWL4030_AUXR_EN 0x04 +#define TWL4030_SUBMIC_EN 0x01 + +/* AVADC_CTL (0x07) Fields */ +#define TWL4030_ADCL_EN 0x08 +#define TWL4030_AVADC_CLK_PRIORITY 0x04 +#define TWL4030_ADCR_EN 0x02 + +/* TWL4030_REG_ADCMICSEL (0x08) Fields */ +#define TWL4030_DIGMIC1_EN 0x08 +#define TWL4030_TX2IN_SEL 0x04 +#define TWL4030_DIGMIC0_EN 0x02 +#define TWL4030_TX1IN_SEL 0x01 + +/* AUDIO_IF (0x0E) Fields */ +#define TWL4030_AIF_SLAVE_EN 0x80 +#define TWL4030_DATA_WIDTH 0x60 +#define TWL4030_DATA_WIDTH_16S_16W 0x00 +#define TWL4030_DATA_WIDTH_32S_16W 0x40 +#define TWL4030_DATA_WIDTH_32S_24W 0x60 +#define TWL4030_AIF_FORMAT 0x18 +#define TWL4030_AIF_FORMAT_CODEC 0x00 +#define TWL4030_AIF_FORMAT_LEFT 0x08 +#define TWL4030_AIF_FORMAT_RIGHT 0x10 +#define TWL4030_AIF_FORMAT_TDM 0x18 +#define TWL4030_AIF_TRI_EN 0x04 +#define TWL4030_CLK256FS_EN 0x02 +#define TWL4030_AIF_EN 0x01 + +/* VOICE_IF (0x0F) Fields */ +#define TWL4030_VIF_SLAVE_EN 0x80 +#define TWL4030_VIF_DIN_EN 0x40 +#define TWL4030_VIF_DOUT_EN 0x20 +#define TWL4030_VIF_SWAP 0x10 +#define TWL4030_VIF_FORMAT 0x08 +#define TWL4030_VIF_TRI_EN 0x04 +#define TWL4030_VIF_SUB_EN 0x02 +#define TWL4030_VIF_EN 0x01 + +/* EAR_CTL (0x21) */ +#define TWL4030_EAR_GAIN 0x30 + +/* HS_GAIN_SET (0x23) Fields */ +#define TWL4030_HSR_GAIN 0x0C +#define TWL4030_HSR_GAIN_PWR_DOWN 0x00 +#define TWL4030_HSR_GAIN_PLUS_6DB 0x04 +#define TWL4030_HSR_GAIN_0DB 0x08 +#define TWL4030_HSR_GAIN_MINUS_6DB 0x0C +#define TWL4030_HSL_GAIN 0x03 +#define TWL4030_HSL_GAIN_PWR_DOWN 0x00 +#define TWL4030_HSL_GAIN_PLUS_6DB 0x01 +#define TWL4030_HSL_GAIN_0DB 0x02 +#define TWL4030_HSL_GAIN_MINUS_6DB 0x03 + +/* HS_POPN_SET (0x24) Fields */ +#define TWL4030_VMID_EN 0x40 +#define TWL4030_EXTMUTE 0x20 +#define TWL4030_RAMP_DELAY 0x1C +#define TWL4030_RAMP_DELAY_20MS 0x00 +#define TWL4030_RAMP_DELAY_40MS 0x04 +#define TWL4030_RAMP_DELAY_81MS 0x08 +#define TWL4030_RAMP_DELAY_161MS 0x0C +#define TWL4030_RAMP_DELAY_323MS 0x10 +#define TWL4030_RAMP_DELAY_645MS 0x14 +#define TWL4030_RAMP_DELAY_1291MS 0x18 +#define TWL4030_RAMP_DELAY_2581MS 0x1C +#define TWL4030_RAMP_EN 0x02 + +/* PREDL_CTL (0x25) */ +#define TWL4030_PREDL_GAIN 0x30 + +/* PREDR_CTL (0x26) */ +#define TWL4030_PREDR_GAIN 0x30 + +/* PRECKL_CTL (0x27) */ +#define TWL4030_PRECKL_GAIN 0x30 + +/* PRECKR_CTL (0x28) */ +#define TWL4030_PRECKR_GAIN 0x30 + +/* HFL_CTL (0x29, 0x2A) Fields */ +#define TWL4030_HF_CTL_HB_EN 0x04 +#define TWL4030_HF_CTL_LOOP_EN 0x08 +#define TWL4030_HF_CTL_RAMP_EN 0x10 +#define TWL4030_HF_CTL_REF_EN 0x20 + +/* APLL_CTL (0x3A) Fields */ +#define TWL4030_APLL_EN 0x10 +#define TWL4030_APLL_INFREQ 0x0F +#define TWL4030_APLL_INFREQ_19200KHZ 0x05 +#define TWL4030_APLL_INFREQ_26000KHZ 0x06 +#define TWL4030_APLL_INFREQ_38400KHZ 0x0F + +/* REG_MISC_SET_1 (0x3E) Fields */ +#define TWL4030_CLK64_EN 0x80 +#define TWL4030_SCRAMBLE_EN 0x40 +#define TWL4030_FMLOOP_EN 0x20 +#define TWL4030_SMOOTH_ANAVOL_EN 0x02 +#define TWL4030_DIGMIC_LR_SWAP_EN 0x01 + +/* VIBRA_CTL (0x45) */ +#define TWL4030_VIBRA_EN 0x01 +#define TWL4030_VIBRA_DIR 0x02 +#define TWL4030_VIBRA_AUDIO_SEL_L1 (0x00 << 2) +#define TWL4030_VIBRA_AUDIO_SEL_R1 (0x01 << 2) +#define TWL4030_VIBRA_AUDIO_SEL_L2 (0x02 << 2) +#define TWL4030_VIBRA_AUDIO_SEL_R2 (0x03 << 2) +#define TWL4030_VIBRA_SEL 0x10 +#define TWL4030_VIBRA_DIR_SEL 0x20 + +/* TWL4030 codec resource IDs */ +enum twl4030_audio_res { + TWL4030_AUDIO_RES_POWER = 0, + TWL4030_AUDIO_RES_APLL, + TWL4030_AUDIO_RES_MAX, +}; + +int twl4030_audio_disable_resource(enum twl4030_audio_res id); +int twl4030_audio_enable_resource(enum twl4030_audio_res id); +unsigned int twl4030_audio_get_mclk(void); + +#endif /* End of __TWL4030_CODEC_H__ */ diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h new file mode 100644 index 00000000..b15b5f03 --- /dev/null +++ b/include/linux/mfd/twl6040.h @@ -0,0 +1,251 @@ +/* + * MFD driver for twl6040 + * + * Authors: Jorge Eduardo Candelaria <jorge.candelaria@ti.com> + * Misael Lopez Cruz <misael.lopez@ti.com> + * + * Copyright: (C) 2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __TWL6040_CODEC_H__ +#define __TWL6040_CODEC_H__ + +#include <linux/interrupt.h> +#include <linux/mfd/core.h> + +#define TWL6040_REG_ASICID 0x01 +#define TWL6040_REG_ASICREV 0x02 +#define TWL6040_REG_INTID 0x03 +#define TWL6040_REG_INTMR 0x04 +#define TWL6040_REG_NCPCTL 0x05 +#define TWL6040_REG_LDOCTL 0x06 +#define TWL6040_REG_HPPLLCTL 0x07 +#define TWL6040_REG_LPPLLCTL 0x08 +#define TWL6040_REG_LPPLLDIV 0x09 +#define TWL6040_REG_AMICBCTL 0x0A +#define TWL6040_REG_DMICBCTL 0x0B +#define TWL6040_REG_MICLCTL 0x0C +#define TWL6040_REG_MICRCTL 0x0D +#define TWL6040_REG_MICGAIN 0x0E +#define TWL6040_REG_LINEGAIN 0x0F +#define TWL6040_REG_HSLCTL 0x10 +#define TWL6040_REG_HSRCTL 0x11 +#define TWL6040_REG_HSGAIN 0x12 +#define TWL6040_REG_EARCTL 0x13 +#define TWL6040_REG_HFLCTL 0x14 +#define TWL6040_REG_HFLGAIN 0x15 +#define TWL6040_REG_HFRCTL 0x16 +#define TWL6040_REG_HFRGAIN 0x17 +#define TWL6040_REG_VIBCTLL 0x18 +#define TWL6040_REG_VIBDATL 0x19 +#define TWL6040_REG_VIBCTLR 0x1A +#define TWL6040_REG_VIBDATR 0x1B +#define TWL6040_REG_HKCTL1 0x1C +#define TWL6040_REG_HKCTL2 0x1D +#define TWL6040_REG_GPOCTL 0x1E +#define TWL6040_REG_ALB 0x1F +#define TWL6040_REG_DLB 0x20 +#define TWL6040_REG_TRIM1 0x28 +#define TWL6040_REG_TRIM2 0x29 +#define TWL6040_REG_TRIM3 0x2A +#define TWL6040_REG_HSOTRIM 0x2B +#define TWL6040_REG_HFOTRIM 0x2C +#define TWL6040_REG_ACCCTL 0x2D +#define TWL6040_REG_STATUS 0x2E + +/* INTID (0x03) fields */ + +#define TWL6040_THINT 0x01 +#define TWL6040_PLUGINT 0x02 +#define TWL6040_UNPLUGINT 0x04 +#define TWL6040_HOOKINT 0x08 +#define TWL6040_HFINT 0x10 +#define TWL6040_VIBINT 0x20 +#define TWL6040_READYINT 0x40 + +/* INTMR (0x04) fields */ + +#define TWL6040_THMSK 0x01 +#define TWL6040_PLUGMSK 0x02 +#define TWL6040_HOOKMSK 0x08 +#define TWL6040_HFMSK 0x10 +#define TWL6040_VIBMSK 0x20 +#define TWL6040_READYMSK 0x40 +#define TWL6040_ALLINT_MSK 0x7B + +/* NCPCTL (0x05) fields */ + +#define TWL6040_NCPENA 0x01 +#define TWL6040_NCPOPEN 0x40 + +/* LDOCTL (0x06) fields */ + +#define TWL6040_LSLDOENA 0x01 +#define TWL6040_HSLDOENA 0x04 +#define TWL6040_REFENA 0x40 +#define TWL6040_OSCENA 0x80 + +/* HPPLLCTL (0x07) fields */ + +#define TWL6040_HPLLENA 0x01 +#define TWL6040_HPLLRST 0x02 +#define TWL6040_HPLLBP 0x04 +#define TWL6040_HPLLSQRENA 0x08 +#define TWL6040_MCLK_12000KHZ (0 << 5) +#define TWL6040_MCLK_19200KHZ (1 << 5) +#define TWL6040_MCLK_26000KHZ (2 << 5) +#define TWL6040_MCLK_38400KHZ (3 << 5) +#define TWL6040_MCLK_MSK 0x60 + +/* LPPLLCTL (0x08) fields */ + +#define TWL6040_LPLLENA 0x01 +#define TWL6040_LPLLRST 0x02 +#define TWL6040_LPLLSEL 0x04 +#define TWL6040_LPLLFIN 0x08 +#define TWL6040_HPLLSEL 0x10 + +/* HSLCTL/R (0x10/0x11) fields */ + +#define TWL6040_HSDACENA (1 << 0) +#define TWL6040_HSDACMODE (1 << 1) +#define TWL6040_HSDRVMODE (1 << 3) + +/* VIBCTLL/R (0x18/0x1A) fields */ + +#define TWL6040_VIBENA (1 << 0) +#define TWL6040_VIBSEL (1 << 1) +#define TWL6040_VIBCTRL (1 << 2) +#define TWL6040_VIBCTRL_P (1 << 3) +#define TWL6040_VIBCTRL_N (1 << 4) + +/* VIBDATL/R (0x19/0x1B) fields */ + +#define TWL6040_VIBDAT_MAX 0x64 + +/* GPOCTL (0x1E) fields */ + +#define TWL6040_GPO1 0x01 +#define TWL6040_GPO2 0x02 +#define TWL6040_GPO3 0x03 + +/* ACCCTL (0x2D) fields */ + +#define TWL6040_I2CSEL 0x01 +#define TWL6040_RESETSPLIT 0x04 +#define TWL6040_INTCLRMODE 0x08 + +/* STATUS (0x2E) fields */ + +#define TWL6040_PLUGCOMP 0x02 +#define TWL6040_VIBLOCDET 0x10 +#define TWL6040_VIBROCDET 0x20 +#define TWL6040_TSHUTDET 0x40 + +#define TWL6040_CELLS 2 + +#define TWL6040_REV_ES1_0 0x00 +#define TWL6040_REV_ES1_1 0x01 +#define TWL6040_REV_ES1_2 0x02 + +#define TWL6040_IRQ_TH 0 +#define TWL6040_IRQ_PLUG 1 +#define TWL6040_IRQ_HOOK 2 +#define TWL6040_IRQ_HF 3 +#define TWL6040_IRQ_VIB 4 +#define TWL6040_IRQ_READY 5 + +/* PLL selection */ +#define TWL6040_SYSCLK_SEL_LPPLL 0 +#define TWL6040_SYSCLK_SEL_HPPLL 1 + +struct twl6040_codec_data { + u16 hs_left_step; + u16 hs_right_step; + u16 hf_left_step; + u16 hf_right_step; +}; + +struct twl6040_vibra_data { + unsigned int vibldrv_res; /* left driver resistance */ + unsigned int vibrdrv_res; /* right driver resistance */ + unsigned int viblmotor_res; /* left motor resistance */ + unsigned int vibrmotor_res; /* right motor resistance */ + int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ + int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ +}; + +struct twl6040_platform_data { + int audpwron_gpio; /* audio power-on gpio */ + unsigned int irq_base; + + struct twl6040_codec_data *codec; + struct twl6040_vibra_data *vibra; +}; + +struct regmap; + +struct twl6040 { + struct device *dev; + struct regmap *regmap; + struct mutex mutex; + struct mutex io_mutex; + struct mutex irq_mutex; + struct mfd_cell cells[TWL6040_CELLS]; + struct completion ready; + + int audpwron; + int power_count; + int rev; + u8 vibra_ctrl_cache[2]; + + /* PLL configuration */ + int pll; + unsigned int sysclk; + unsigned int mclk; + + unsigned int irq; + unsigned int irq_base; + u8 irq_masks_cur; + u8 irq_masks_cache; +}; + +int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg); +int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, + u8 val); +int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, + u8 mask); +int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, + u8 mask); +int twl6040_power(struct twl6040 *twl6040, int on); +int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, + unsigned int freq_in, unsigned int freq_out); +int twl6040_get_pll(struct twl6040 *twl6040); +unsigned int twl6040_get_sysclk(struct twl6040 *twl6040); +int twl6040_irq_init(struct twl6040 *twl6040); +void twl6040_irq_exit(struct twl6040 *twl6040); +/* Get the combined status of the vibra control register */ +int twl6040_get_vibralr_status(struct twl6040 *twl6040); + +static inline int twl6040_get_revid(struct twl6040 *twl6040) +{ + return twl6040->rev; +} + + +#endif /* End of __TWL6040_CODEC_H__ */ diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h new file mode 100644 index 00000000..28af4175 --- /dev/null +++ b/include/linux/mfd/ucb1x00.h @@ -0,0 +1,259 @@ +/* + * linux/include/mfd/ucb1x00.h + * + * Copyright (C) 2001 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ +#ifndef UCB1200_H +#define UCB1200_H + +#include <linux/mfd/mcp.h> +#include <linux/gpio.h> +#include <linux/mutex.h> + +#define UCB_IO_DATA 0x00 +#define UCB_IO_DIR 0x01 + +#define UCB_IO_0 (1 << 0) +#define UCB_IO_1 (1 << 1) +#define UCB_IO_2 (1 << 2) +#define UCB_IO_3 (1 << 3) +#define UCB_IO_4 (1 << 4) +#define UCB_IO_5 (1 << 5) +#define UCB_IO_6 (1 << 6) +#define UCB_IO_7 (1 << 7) +#define UCB_IO_8 (1 << 8) +#define UCB_IO_9 (1 << 9) + +#define UCB_IE_RIS 0x02 +#define UCB_IE_FAL 0x03 +#define UCB_IE_STATUS 0x04 +#define UCB_IE_CLEAR 0x04 +#define UCB_IE_ADC (1 << 11) +#define UCB_IE_TSPX (1 << 12) +#define UCB_IE_TSMX (1 << 13) +#define UCB_IE_TCLIP (1 << 14) +#define UCB_IE_ACLIP (1 << 15) + +#define UCB_IRQ_TSPX 12 + +#define UCB_TC_A 0x05 +#define UCB_TC_A_LOOP (1 << 7) /* UCB1200 */ +#define UCB_TC_A_AMPL (1 << 7) /* UCB1300 */ + +#define UCB_TC_B 0x06 +#define UCB_TC_B_VOICE_ENA (1 << 3) +#define UCB_TC_B_CLIP (1 << 4) +#define UCB_TC_B_ATT (1 << 6) +#define UCB_TC_B_SIDE_ENA (1 << 11) +#define UCB_TC_B_MUTE (1 << 13) +#define UCB_TC_B_IN_ENA (1 << 14) +#define UCB_TC_B_OUT_ENA (1 << 15) + +#define UCB_AC_A 0x07 +#define UCB_AC_B 0x08 +#define UCB_AC_B_LOOP (1 << 8) +#define UCB_AC_B_MUTE (1 << 13) +#define UCB_AC_B_IN_ENA (1 << 14) +#define UCB_AC_B_OUT_ENA (1 << 15) + +#define UCB_TS_CR 0x09 +#define UCB_TS_CR_TSMX_POW (1 << 0) +#define UCB_TS_CR_TSPX_POW (1 << 1) +#define UCB_TS_CR_TSMY_POW (1 << 2) +#define UCB_TS_CR_TSPY_POW (1 << 3) +#define UCB_TS_CR_TSMX_GND (1 << 4) +#define UCB_TS_CR_TSPX_GND (1 << 5) +#define UCB_TS_CR_TSMY_GND (1 << 6) +#define UCB_TS_CR_TSPY_GND (1 << 7) +#define UCB_TS_CR_MODE_INT (0 << 8) +#define UCB_TS_CR_MODE_PRES (1 << 8) +#define UCB_TS_CR_MODE_POS (2 << 8) +#define UCB_TS_CR_BIAS_ENA (1 << 11) +#define UCB_TS_CR_TSPX_LOW (1 << 12) +#define UCB_TS_CR_TSMX_LOW (1 << 13) + +#define UCB_ADC_CR 0x0a +#define UCB_ADC_SYNC_ENA (1 << 0) +#define UCB_ADC_VREFBYP_CON (1 << 1) +#define UCB_ADC_INP_TSPX (0 << 2) +#define UCB_ADC_INP_TSMX (1 << 2) +#define UCB_ADC_INP_TSPY (2 << 2) +#define UCB_ADC_INP_TSMY (3 << 2) +#define UCB_ADC_INP_AD0 (4 << 2) +#define UCB_ADC_INP_AD1 (5 << 2) +#define UCB_ADC_INP_AD2 (6 << 2) +#define UCB_ADC_INP_AD3 (7 << 2) +#define UCB_ADC_EXT_REF (1 << 5) +#define UCB_ADC_START (1 << 7) +#define UCB_ADC_ENA (1 << 15) + +#define UCB_ADC_DATA 0x0b +#define UCB_ADC_DAT_VAL (1 << 15) +#define UCB_ADC_DAT(x) (((x) & 0x7fe0) >> 5) + +#define UCB_ID 0x0c +#define UCB_ID_1200 0x1004 +#define UCB_ID_1300 0x1005 +#define UCB_ID_TC35143 0x9712 + +#define UCB_MODE 0x0d +#define UCB_MODE_DYN_VFLAG_ENA (1 << 12) +#define UCB_MODE_AUD_OFF_CAN (1 << 13) + +enum ucb1x00_reset { + UCB_RST_PROBE, + UCB_RST_RESUME, + UCB_RST_SUSPEND, + UCB_RST_REMOVE, + UCB_RST_PROBE_FAIL, +}; + +struct ucb1x00_plat_data { + void (*reset)(enum ucb1x00_reset); + unsigned irq_base; + int gpio_base; + unsigned can_wakeup; +}; + +struct ucb1x00 { + raw_spinlock_t irq_lock; + struct mcp *mcp; + unsigned int irq; + int irq_base; + struct mutex adc_mutex; + spinlock_t io_lock; + u16 id; + u16 io_dir; + u16 io_out; + u16 adc_cr; + u16 irq_fal_enbl; + u16 irq_ris_enbl; + u16 irq_mask; + u16 irq_wake; + struct device dev; + struct list_head node; + struct list_head devs; + struct gpio_chip gpio; +}; + +struct ucb1x00_driver; + +struct ucb1x00_dev { + struct list_head dev_node; + struct list_head drv_node; + struct ucb1x00 *ucb; + struct ucb1x00_driver *drv; + void *priv; +}; + +struct ucb1x00_driver { + struct list_head node; + struct list_head devs; + int (*add)(struct ucb1x00_dev *dev); + void (*remove)(struct ucb1x00_dev *dev); + int (*suspend)(struct ucb1x00_dev *dev); + int (*resume)(struct ucb1x00_dev *dev); +}; + +#define classdev_to_ucb1x00(cd) container_of(cd, struct ucb1x00, dev) + +int ucb1x00_register_driver(struct ucb1x00_driver *); +void ucb1x00_unregister_driver(struct ucb1x00_driver *); + +/** + * ucb1x00_clkrate - return the UCB1x00 SIB clock rate + * @ucb: UCB1x00 structure describing chip + * + * Return the SIB clock rate in Hz. + */ +static inline unsigned int ucb1x00_clkrate(struct ucb1x00 *ucb) +{ + return mcp_get_sclk_rate(ucb->mcp); +} + +/** + * ucb1x00_enable - enable the UCB1x00 SIB clock + * @ucb: UCB1x00 structure describing chip + * + * Enable the SIB clock. This can be called multiple times. + */ +static inline void ucb1x00_enable(struct ucb1x00 *ucb) +{ + mcp_enable(ucb->mcp); +} + +/** + * ucb1x00_disable - disable the UCB1x00 SIB clock + * @ucb: UCB1x00 structure describing chip + * + * Disable the SIB clock. The SIB clock will only be disabled + * when the number of ucb1x00_enable calls match the number of + * ucb1x00_disable calls. + */ +static inline void ucb1x00_disable(struct ucb1x00 *ucb) +{ + mcp_disable(ucb->mcp); +} + +/** + * ucb1x00_reg_write - write a UCB1x00 register + * @ucb: UCB1x00 structure describing chip + * @reg: UCB1x00 4-bit register index to write + * @val: UCB1x00 16-bit value to write + * + * Write the UCB1x00 register @reg with value @val. The SIB + * clock must be running for this function to return. + */ +static inline void ucb1x00_reg_write(struct ucb1x00 *ucb, unsigned int reg, unsigned int val) +{ + mcp_reg_write(ucb->mcp, reg, val); +} + +/** + * ucb1x00_reg_read - read a UCB1x00 register + * @ucb: UCB1x00 structure describing chip + * @reg: UCB1x00 4-bit register index to write + * + * Read the UCB1x00 register @reg and return its value. The SIB + * clock must be running for this function to return. + */ +static inline unsigned int ucb1x00_reg_read(struct ucb1x00 *ucb, unsigned int reg) +{ + return mcp_reg_read(ucb->mcp, reg); +} +/** + * ucb1x00_set_audio_divisor - + * @ucb: UCB1x00 structure describing chip + * @div: SIB clock divisor + */ +static inline void ucb1x00_set_audio_divisor(struct ucb1x00 *ucb, unsigned int div) +{ + mcp_set_audio_divisor(ucb->mcp, div); +} + +/** + * ucb1x00_set_telecom_divisor - + * @ucb: UCB1x00 structure describing chip + * @div: SIB clock divisor + */ +static inline void ucb1x00_set_telecom_divisor(struct ucb1x00 *ucb, unsigned int div) +{ + mcp_set_telecom_divisor(ucb->mcp, div); +} + +void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int, unsigned int); +void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int, unsigned int); +unsigned int ucb1x00_io_read(struct ucb1x00 *ucb); + +#define UCB_NOSYNC (0) +#define UCB_SYNC (1) + +unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync); +void ucb1x00_adc_enable(struct ucb1x00 *ucb); +void ucb1x00_adc_disable(struct ucb1x00 *ucb); + +#endif diff --git a/include/linux/mfd/vt1603/core.h b/include/linux/mfd/vt1603/core.h new file mode 100755 index 00000000..3dc639c2 --- /dev/null +++ b/include/linux/mfd/vt1603/core.h @@ -0,0 +1,44 @@ +/*++ + * WonderMedia core driver for VT1603/VT1609 + * + * Copyright c 2010 WonderMedia Technologies, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * WonderMedia Technologies, Inc. + * 4F, 533, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C +--*/ + +#ifndef __MFD_VT1603_CORE_H__ +#define __MFD_VT1603_CORE_H__ + +enum vt1603_type { + VT1603 = 0, + VT1609 = 1, +}; + +struct vt1603 { + int (*reg_read)(struct vt1603 *vt1603, u8 reg, u8 *val); + int (*reg_write)(struct vt1603 *vt1603, u8 reg, u8 val); + void *control_data; + int type; + struct device *dev; +}; + +/* Device I/O API */ +int vt1603_reg_write(struct vt1603 *vt1603, u8 reg, u8 val); +int vt1603_reg_read(struct vt1603 *vt1603, u8 reg, u8 *val); +void vt1603_regs_dump(struct vt1603 *vt1603); + +#endif diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h new file mode 100644 index 00000000..db2f3f45 --- /dev/null +++ b/include/linux/mfd/wl1273-core.h @@ -0,0 +1,290 @@ +/* + * include/linux/mfd/wl1273-core.h + * + * Some definitions for the wl1273 radio receiver/transmitter chip. + * + * Copyright (C) 2010 Nokia Corporation + * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef WL1273_CORE_H +#define WL1273_CORE_H + +#include <linux/i2c.h> +#include <linux/mfd/core.h> + +#define WL1273_FM_DRIVER_NAME "wl1273-fm" +#define RX71_FM_I2C_ADDR 0x22 + +#define WL1273_STEREO_GET 0 +#define WL1273_RSSI_LVL_GET 1 +#define WL1273_IF_COUNT_GET 2 +#define WL1273_FLAG_GET 3 +#define WL1273_RDS_SYNC_GET 4 +#define WL1273_RDS_DATA_GET 5 +#define WL1273_FREQ_SET 10 +#define WL1273_AF_FREQ_SET 11 +#define WL1273_MOST_MODE_SET 12 +#define WL1273_MOST_BLEND_SET 13 +#define WL1273_DEMPH_MODE_SET 14 +#define WL1273_SEARCH_LVL_SET 15 +#define WL1273_BAND_SET 16 +#define WL1273_MUTE_STATUS_SET 17 +#define WL1273_RDS_PAUSE_LVL_SET 18 +#define WL1273_RDS_PAUSE_DUR_SET 19 +#define WL1273_RDS_MEM_SET 20 +#define WL1273_RDS_BLK_B_SET 21 +#define WL1273_RDS_MSK_B_SET 22 +#define WL1273_RDS_PI_MASK_SET 23 +#define WL1273_RDS_PI_SET 24 +#define WL1273_RDS_SYSTEM_SET 25 +#define WL1273_INT_MASK_SET 26 +#define WL1273_SEARCH_DIR_SET 27 +#define WL1273_VOLUME_SET 28 +#define WL1273_AUDIO_ENABLE 29 +#define WL1273_PCM_MODE_SET 30 +#define WL1273_I2S_MODE_CONFIG_SET 31 +#define WL1273_POWER_SET 32 +#define WL1273_INTX_CONFIG_SET 33 +#define WL1273_PULL_EN_SET 34 +#define WL1273_HILO_SET 35 +#define WL1273_SWITCH2FREF 36 +#define WL1273_FREQ_DRIFT_REPORT 37 + +#define WL1273_PCE_GET 40 +#define WL1273_FIRM_VER_GET 41 +#define WL1273_ASIC_VER_GET 42 +#define WL1273_ASIC_ID_GET 43 +#define WL1273_MAN_ID_GET 44 +#define WL1273_TUNER_MODE_SET 45 +#define WL1273_STOP_SEARCH 46 +#define WL1273_RDS_CNTRL_SET 47 + +#define WL1273_WRITE_HARDWARE_REG 100 +#define WL1273_CODE_DOWNLOAD 101 +#define WL1273_RESET 102 + +#define WL1273_FM_POWER_MODE 254 +#define WL1273_FM_INTERRUPT 255 + +/* Transmitter API */ + +#define WL1273_CHANL_SET 55 +#define WL1273_SCAN_SPACING_SET 56 +#define WL1273_REF_SET 57 +#define WL1273_POWER_ENB_SET 90 +#define WL1273_POWER_ATT_SET 58 +#define WL1273_POWER_LEV_SET 59 +#define WL1273_AUDIO_DEV_SET 60 +#define WL1273_PILOT_DEV_SET 61 +#define WL1273_RDS_DEV_SET 62 +#define WL1273_PUPD_SET 91 +#define WL1273_AUDIO_IO_SET 63 +#define WL1273_PREMPH_SET 64 +#define WL1273_MONO_SET 66 +#define WL1273_MUTE 92 +#define WL1273_MPX_LMT_ENABLE 67 +#define WL1273_PI_SET 93 +#define WL1273_ECC_SET 69 +#define WL1273_PTY 70 +#define WL1273_AF 71 +#define WL1273_DISPLAY_MODE 74 +#define WL1273_RDS_REP_SET 77 +#define WL1273_RDS_CONFIG_DATA_SET 98 +#define WL1273_RDS_DATA_SET 99 +#define WL1273_RDS_DATA_ENB 94 +#define WL1273_TA_SET 78 +#define WL1273_TP_SET 79 +#define WL1273_DI_SET 80 +#define WL1273_MS_SET 81 +#define WL1273_PS_SCROLL_SPEED 82 +#define WL1273_TX_AUDIO_LEVEL_TEST 96 +#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73 +#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54 +#define WL1273_RX_ANTENNA_SELECT 87 +#define WL1273_I2C_DEV_ADDR_SET 86 +#define WL1273_REF_ERR_CALIB_PARAM_SET 88 +#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89 +#define WL1273_SOC_INT_TRIGGER 52 +#define WL1273_SOC_AUDIO_PATH_SET 83 +#define WL1273_SOC_PCMI_OVERRIDE 84 +#define WL1273_SOC_I2S_OVERRIDE 85 +#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95 +#define WL1273_RSSI_BLOCK_SCAN_START 97 +#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5 +#define WL1273_READ_FMANT_TUNE_VALUE 104 + +#define WL1273_RDS_OFF 0 +#define WL1273_RDS_ON 1 +#define WL1273_RDS_RESET 2 + +#define WL1273_AUDIO_DIGITAL 0 +#define WL1273_AUDIO_ANALOG 1 + +#define WL1273_MODE_RX BIT(0) +#define WL1273_MODE_TX BIT(1) +#define WL1273_MODE_OFF BIT(2) +#define WL1273_MODE_SUSPENDED BIT(3) + +#define WL1273_RADIO_CHILD BIT(0) +#define WL1273_CODEC_CHILD BIT(1) + +#define WL1273_RX_MONO 1 +#define WL1273_RX_STEREO 0 +#define WL1273_TX_MONO 0 +#define WL1273_TX_STEREO 1 + +#define WL1273_MAX_VOLUME 0xffff +#define WL1273_DEFAULT_VOLUME 0x78b8 + +/* I2S protocol, left channel first, data width 16 bits */ +#define WL1273_PCM_DEF_MODE 0x00 + +/* Rx */ +#define WL1273_AUDIO_ENABLE_I2S BIT(0) +#define WL1273_AUDIO_ENABLE_ANALOG BIT(1) + +/* Tx */ +#define WL1273_AUDIO_IO_SET_ANALOG 0 +#define WL1273_AUDIO_IO_SET_I2S 1 + +#define WL1273_PUPD_SET_OFF 0x00 +#define WL1273_PUPD_SET_ON 0x01 +#define WL1273_PUPD_SET_RETENTION 0x10 + +/* I2S mode */ +#define WL1273_IS2_WIDTH_32 0x0 +#define WL1273_IS2_WIDTH_40 0x1 +#define WL1273_IS2_WIDTH_22_23 0x2 +#define WL1273_IS2_WIDTH_23_22 0x3 +#define WL1273_IS2_WIDTH_48 0x4 +#define WL1273_IS2_WIDTH_50 0x5 +#define WL1273_IS2_WIDTH_60 0x6 +#define WL1273_IS2_WIDTH_64 0x7 +#define WL1273_IS2_WIDTH_80 0x8 +#define WL1273_IS2_WIDTH_96 0x9 +#define WL1273_IS2_WIDTH_128 0xa +#define WL1273_IS2_WIDTH 0xf + +#define WL1273_IS2_FORMAT_STD (0x0 << 4) +#define WL1273_IS2_FORMAT_LEFT (0x1 << 4) +#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4) +#define WL1273_IS2_FORMAT_USER (0x3 << 4) + +#define WL1273_IS2_MASTER (0x0 << 6) +#define WL1273_IS2_SLAVEW (0x1 << 6) + +#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7) +#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7) + +#define WL1273_IS2_SDOWS_RR (0x0 << 8) +#define WL1273_IS2_SDOWS_RF (0x1 << 8) +#define WL1273_IS2_SDOWS_FR (0x2 << 8) +#define WL1273_IS2_SDOWS_FF (0x3 << 8) + +#define WL1273_IS2_TRI_OPT (0x0 << 10) +#define WL1273_IS2_TRI_ALWAYS (0x1 << 10) + +#define WL1273_IS2_RATE_48K (0x0 << 12) +#define WL1273_IS2_RATE_44_1K (0x1 << 12) +#define WL1273_IS2_RATE_32K (0x2 << 12) +#define WL1273_IS2_RATE_22_05K (0x4 << 12) +#define WL1273_IS2_RATE_16K (0x5 << 12) +#define WL1273_IS2_RATE_12K (0x8 << 12) +#define WL1273_IS2_RATE_11_025 (0x9 << 12) +#define WL1273_IS2_RATE_8K (0xa << 12) +#define WL1273_IS2_RATE (0xf << 12) + +#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \ + WL1273_IS2_FORMAT_STD | \ + WL1273_IS2_MASTER | \ + WL1273_IS2_TRI_AFTER_SENDING | \ + WL1273_IS2_SDOWS_RR | \ + WL1273_IS2_TRI_OPT | \ + WL1273_IS2_RATE_48K) + +#define SCHAR_MIN (-128) +#define SCHAR_MAX 127 + +#define WL1273_FR_EVENT BIT(0) +#define WL1273_BL_EVENT BIT(1) +#define WL1273_RDS_EVENT BIT(2) +#define WL1273_BBLK_EVENT BIT(3) +#define WL1273_LSYNC_EVENT BIT(4) +#define WL1273_LEV_EVENT BIT(5) +#define WL1273_IFFR_EVENT BIT(6) +#define WL1273_PI_EVENT BIT(7) +#define WL1273_PD_EVENT BIT(8) +#define WL1273_STIC_EVENT BIT(9) +#define WL1273_MAL_EVENT BIT(10) +#define WL1273_POW_ENB_EVENT BIT(11) +#define WL1273_SCAN_OVER_EVENT BIT(12) +#define WL1273_ERROR_EVENT BIT(13) + +#define TUNER_MODE_STOP_SEARCH 0 +#define TUNER_MODE_PRESET 1 +#define TUNER_MODE_AUTO_SEEK 2 +#define TUNER_MODE_AF 3 +#define TUNER_MODE_AUTO_SEEK_PI 4 +#define TUNER_MODE_AUTO_SEEK_BULK 5 + +#define RDS_BLOCK_SIZE 3 + +struct wl1273_fm_platform_data { + int (*request_resources) (struct i2c_client *client); + void (*free_resources) (void); + void (*enable) (void); + void (*disable) (void); + + u8 forbidden_modes; + unsigned int children; +}; + +#define WL1273_FM_CORE_CELLS 2 + +#define WL1273_BAND_OTHER 0 +#define WL1273_BAND_JAPAN 1 + +#define WL1273_BAND_JAPAN_LOW 76000 +#define WL1273_BAND_JAPAN_HIGH 90000 +#define WL1273_BAND_OTHER_LOW 87500 +#define WL1273_BAND_OTHER_HIGH 108000 + +#define WL1273_BAND_TX_LOW 76000 +#define WL1273_BAND_TX_HIGH 108000 + +struct wl1273_core { + struct mfd_cell cells[WL1273_FM_CORE_CELLS]; + struct wl1273_fm_platform_data *pdata; + + unsigned int mode; + unsigned int i2s_mode; + unsigned int volume; + unsigned int audio_mode; + unsigned int channel_number; + struct mutex lock; /* for serializing fm radio operations */ + + struct i2c_client *client; + + int (*read)(struct wl1273_core *core, u8, u16 *); + int (*write)(struct wl1273_core *core, u8, u16); + int (*write_data)(struct wl1273_core *core, u8 *, u16); + int (*set_audio)(struct wl1273_core *core, unsigned int); + int (*set_volume)(struct wl1273_core *core, unsigned int); +}; + +#endif /* ifndef WL1273_CORE_H */ diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h new file mode 100644 index 00000000..b132067e --- /dev/null +++ b/include/linux/mfd/wm831x/auxadc.h @@ -0,0 +1,216 @@ +/* + * include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_AUXADC_H__ +#define __MFD_WM831X_AUXADC_H__ + +/* + * R16429 (0x402D) - AuxADC Data + */ +#define WM831X_AUX_DATA_SRC_MASK 0xF000 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_SRC_SHIFT 12 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_SRC_WIDTH 4 /* AUX_DATA_SRC - [15:12] */ +#define WM831X_AUX_DATA_MASK 0x0FFF /* AUX_DATA - [11:0] */ +#define WM831X_AUX_DATA_SHIFT 0 /* AUX_DATA - [11:0] */ +#define WM831X_AUX_DATA_WIDTH 12 /* AUX_DATA - [11:0] */ + +/* + * R16430 (0x402E) - AuxADC Control + */ +#define WM831X_AUX_ENA 0x8000 /* AUX_ENA */ +#define WM831X_AUX_ENA_MASK 0x8000 /* AUX_ENA */ +#define WM831X_AUX_ENA_SHIFT 15 /* AUX_ENA */ +#define WM831X_AUX_ENA_WIDTH 1 /* AUX_ENA */ +#define WM831X_AUX_CVT_ENA 0x4000 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_MASK 0x4000 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_SHIFT 14 /* AUX_CVT_ENA */ +#define WM831X_AUX_CVT_ENA_WIDTH 1 /* AUX_CVT_ENA */ +#define WM831X_AUX_SLPENA 0x1000 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_MASK 0x1000 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_SHIFT 12 /* AUX_SLPENA */ +#define WM831X_AUX_SLPENA_WIDTH 1 /* AUX_SLPENA */ +#define WM831X_AUX_FRC_ENA 0x0800 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_MASK 0x0800 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_SHIFT 11 /* AUX_FRC_ENA */ +#define WM831X_AUX_FRC_ENA_WIDTH 1 /* AUX_FRC_ENA */ +#define WM831X_AUX_RATE_MASK 0x003F /* AUX_RATE - [5:0] */ +#define WM831X_AUX_RATE_SHIFT 0 /* AUX_RATE - [5:0] */ +#define WM831X_AUX_RATE_WIDTH 6 /* AUX_RATE - [5:0] */ + +/* + * R16431 (0x402F) - AuxADC Source + */ +#define WM831X_AUX_CAL_SEL 0x8000 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_MASK 0x8000 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_SHIFT 15 /* AUX_CAL_SEL */ +#define WM831X_AUX_CAL_SEL_WIDTH 1 /* AUX_CAL_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL 0x0400 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_MASK 0x0400 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_SHIFT 10 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_BKUP_BATT_SEL_WIDTH 1 /* AUX_BKUP_BATT_SEL */ +#define WM831X_AUX_WALL_SEL 0x0200 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_MASK 0x0200 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_SHIFT 9 /* AUX_WALL_SEL */ +#define WM831X_AUX_WALL_SEL_WIDTH 1 /* AUX_WALL_SEL */ +#define WM831X_AUX_BATT_SEL 0x0100 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_MASK 0x0100 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_SHIFT 8 /* AUX_BATT_SEL */ +#define WM831X_AUX_BATT_SEL_WIDTH 1 /* AUX_BATT_SEL */ +#define WM831X_AUX_USB_SEL 0x0080 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_MASK 0x0080 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_SHIFT 7 /* AUX_USB_SEL */ +#define WM831X_AUX_USB_SEL_WIDTH 1 /* AUX_USB_SEL */ +#define WM831X_AUX_SYSVDD_SEL 0x0040 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_MASK 0x0040 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_SHIFT 6 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_SYSVDD_SEL_WIDTH 1 /* AUX_SYSVDD_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL 0x0020 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_MASK 0x0020 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_SHIFT 5 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_BATT_TEMP_SEL_WIDTH 1 /* AUX_BATT_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL 0x0010 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_MASK 0x0010 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_SHIFT 4 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_CHIP_TEMP_SEL_WIDTH 1 /* AUX_CHIP_TEMP_SEL */ +#define WM831X_AUX_AUX4_SEL 0x0008 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_MASK 0x0008 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_SHIFT 3 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX4_SEL_WIDTH 1 /* AUX_AUX4_SEL */ +#define WM831X_AUX_AUX3_SEL 0x0004 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_MASK 0x0004 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_SHIFT 2 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX3_SEL_WIDTH 1 /* AUX_AUX3_SEL */ +#define WM831X_AUX_AUX2_SEL 0x0002 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_MASK 0x0002 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_SHIFT 1 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX2_SEL_WIDTH 1 /* AUX_AUX2_SEL */ +#define WM831X_AUX_AUX1_SEL 0x0001 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_MASK 0x0001 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_SHIFT 0 /* AUX_AUX1_SEL */ +#define WM831X_AUX_AUX1_SEL_WIDTH 1 /* AUX_AUX1_SEL */ + +/* + * R16432 (0x4030) - Comparator Control + */ +#define WM831X_DCOMP4_STS 0x0800 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_MASK 0x0800 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_SHIFT 11 /* DCOMP4_STS */ +#define WM831X_DCOMP4_STS_WIDTH 1 /* DCOMP4_STS */ +#define WM831X_DCOMP3_STS 0x0400 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_MASK 0x0400 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_SHIFT 10 /* DCOMP3_STS */ +#define WM831X_DCOMP3_STS_WIDTH 1 /* DCOMP3_STS */ +#define WM831X_DCOMP2_STS 0x0200 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_MASK 0x0200 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_SHIFT 9 /* DCOMP2_STS */ +#define WM831X_DCOMP2_STS_WIDTH 1 /* DCOMP2_STS */ +#define WM831X_DCOMP1_STS 0x0100 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_MASK 0x0100 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_SHIFT 8 /* DCOMP1_STS */ +#define WM831X_DCOMP1_STS_WIDTH 1 /* DCOMP1_STS */ +#define WM831X_DCMP4_ENA 0x0008 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_MASK 0x0008 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_SHIFT 3 /* DCMP4_ENA */ +#define WM831X_DCMP4_ENA_WIDTH 1 /* DCMP4_ENA */ +#define WM831X_DCMP3_ENA 0x0004 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_MASK 0x0004 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_SHIFT 2 /* DCMP3_ENA */ +#define WM831X_DCMP3_ENA_WIDTH 1 /* DCMP3_ENA */ +#define WM831X_DCMP2_ENA 0x0002 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_MASK 0x0002 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_SHIFT 1 /* DCMP2_ENA */ +#define WM831X_DCMP2_ENA_WIDTH 1 /* DCMP2_ENA */ +#define WM831X_DCMP1_ENA 0x0001 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_MASK 0x0001 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_SHIFT 0 /* DCMP1_ENA */ +#define WM831X_DCMP1_ENA_WIDTH 1 /* DCMP1_ENA */ + +/* + * R16433 (0x4031) - Comparator 1 + */ +#define WM831X_DCMP1_SRC_MASK 0xE000 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_SRC_SHIFT 13 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_SRC_WIDTH 3 /* DCMP1_SRC - [15:13] */ +#define WM831X_DCMP1_GT 0x1000 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_MASK 0x1000 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_SHIFT 12 /* DCMP1_GT */ +#define WM831X_DCMP1_GT_WIDTH 1 /* DCMP1_GT */ +#define WM831X_DCMP1_THR_MASK 0x0FFF /* DCMP1_THR - [11:0] */ +#define WM831X_DCMP1_THR_SHIFT 0 /* DCMP1_THR - [11:0] */ +#define WM831X_DCMP1_THR_WIDTH 12 /* DCMP1_THR - [11:0] */ + +/* + * R16434 (0x4032) - Comparator 2 + */ +#define WM831X_DCMP2_SRC_MASK 0xE000 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_SRC_SHIFT 13 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_SRC_WIDTH 3 /* DCMP2_SRC - [15:13] */ +#define WM831X_DCMP2_GT 0x1000 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_MASK 0x1000 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_SHIFT 12 /* DCMP2_GT */ +#define WM831X_DCMP2_GT_WIDTH 1 /* DCMP2_GT */ +#define WM831X_DCMP2_THR_MASK 0x0FFF /* DCMP2_THR - [11:0] */ +#define WM831X_DCMP2_THR_SHIFT 0 /* DCMP2_THR - [11:0] */ +#define WM831X_DCMP2_THR_WIDTH 12 /* DCMP2_THR - [11:0] */ + +/* + * R16435 (0x4033) - Comparator 3 + */ +#define WM831X_DCMP3_SRC_MASK 0xE000 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_SRC_SHIFT 13 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_SRC_WIDTH 3 /* DCMP3_SRC - [15:13] */ +#define WM831X_DCMP3_GT 0x1000 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_MASK 0x1000 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_SHIFT 12 /* DCMP3_GT */ +#define WM831X_DCMP3_GT_WIDTH 1 /* DCMP3_GT */ +#define WM831X_DCMP3_THR_MASK 0x0FFF /* DCMP3_THR - [11:0] */ +#define WM831X_DCMP3_THR_SHIFT 0 /* DCMP3_THR - [11:0] */ +#define WM831X_DCMP3_THR_WIDTH 12 /* DCMP3_THR - [11:0] */ + +/* + * R16436 (0x4034) - Comparator 4 + */ +#define WM831X_DCMP4_SRC_MASK 0xE000 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_SRC_SHIFT 13 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_SRC_WIDTH 3 /* DCMP4_SRC - [15:13] */ +#define WM831X_DCMP4_GT 0x1000 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_MASK 0x1000 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_SHIFT 12 /* DCMP4_GT */ +#define WM831X_DCMP4_GT_WIDTH 1 /* DCMP4_GT */ +#define WM831X_DCMP4_THR_MASK 0x0FFF /* DCMP4_THR - [11:0] */ +#define WM831X_DCMP4_THR_SHIFT 0 /* DCMP4_THR - [11:0] */ +#define WM831X_DCMP4_THR_WIDTH 12 /* DCMP4_THR - [11:0] */ + +#define WM831X_AUX_CAL_FACTOR 0xfff +#define WM831X_AUX_CAL_NOMINAL 0x222 + +enum wm831x_auxadc { + WM831X_AUX_CAL = 15, + WM831X_AUX_BKUP_BATT = 10, + WM831X_AUX_WALL = 9, + WM831X_AUX_BATT = 8, + WM831X_AUX_USB = 7, + WM831X_AUX_SYSVDD = 6, + WM831X_AUX_BATT_TEMP = 5, + WM831X_AUX_CHIP_TEMP = 4, + WM831X_AUX_AUX4 = 3, + WM831X_AUX_AUX3 = 2, + WM831X_AUX_AUX2 = 1, + WM831X_AUX_AUX1 = 0, +}; + +int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input); +int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input); + +#endif diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h new file mode 100644 index 00000000..4b121185 --- /dev/null +++ b/include/linux/mfd/wm831x/core.h @@ -0,0 +1,422 @@ +/* + * include/linux/mfd/wm831x/core.h -- Core interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_CORE_H__ +#define __MFD_WM831X_CORE_H__ + +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/regmap.h> + +/* + * Register values. + */ +#define WM831X_RESET_ID 0x00 +#define WM831X_REVISION 0x01 +#define WM831X_PARENT_ID 0x4000 +#define WM831X_SYSVDD_CONTROL 0x4001 +#define WM831X_THERMAL_MONITORING 0x4002 +#define WM831X_POWER_STATE 0x4003 +#define WM831X_WATCHDOG 0x4004 +#define WM831X_ON_PIN_CONTROL 0x4005 +#define WM831X_RESET_CONTROL 0x4006 +#define WM831X_CONTROL_INTERFACE 0x4007 +#define WM831X_SECURITY_KEY 0x4008 +#define WM831X_SOFTWARE_SCRATCH 0x4009 +#define WM831X_OTP_CONTROL 0x400A +#define WM831X_GPIO_LEVEL 0x400C +#define WM831X_SYSTEM_STATUS 0x400D +#define WM831X_ON_SOURCE 0x400E +#define WM831X_OFF_SOURCE 0x400F +#define WM831X_SYSTEM_INTERRUPTS 0x4010 +#define WM831X_INTERRUPT_STATUS_1 0x4011 +#define WM831X_INTERRUPT_STATUS_2 0x4012 +#define WM831X_INTERRUPT_STATUS_3 0x4013 +#define WM831X_INTERRUPT_STATUS_4 0x4014 +#define WM831X_INTERRUPT_STATUS_5 0x4015 +#define WM831X_IRQ_CONFIG 0x4017 +#define WM831X_SYSTEM_INTERRUPTS_MASK 0x4018 +#define WM831X_INTERRUPT_STATUS_1_MASK 0x4019 +#define WM831X_INTERRUPT_STATUS_2_MASK 0x401A +#define WM831X_INTERRUPT_STATUS_3_MASK 0x401B +#define WM831X_INTERRUPT_STATUS_4_MASK 0x401C +#define WM831X_INTERRUPT_STATUS_5_MASK 0x401D +#define WM831X_RTC_WRITE_COUNTER 0x4020 +#define WM831X_RTC_TIME_1 0x4021 +#define WM831X_RTC_TIME_2 0x4022 +#define WM831X_RTC_ALARM_1 0x4023 +#define WM831X_RTC_ALARM_2 0x4024 +#define WM831X_RTC_CONTROL 0x4025 +#define WM831X_RTC_TRIM 0x4026 +#define WM831X_TOUCH_CONTROL_1 0x4028 +#define WM831X_TOUCH_CONTROL_2 0x4029 +#define WM831X_TOUCH_DATA_X 0x402A +#define WM831X_TOUCH_DATA_Y 0x402B +#define WM831X_TOUCH_DATA_Z 0x402C +#define WM831X_AUXADC_DATA 0x402D +#define WM831X_AUXADC_CONTROL 0x402E +#define WM831X_AUXADC_SOURCE 0x402F +#define WM831X_COMPARATOR_CONTROL 0x4030 +#define WM831X_COMPARATOR_1 0x4031 +#define WM831X_COMPARATOR_2 0x4032 +#define WM831X_COMPARATOR_3 0x4033 +#define WM831X_COMPARATOR_4 0x4034 +#define WM831X_GPIO1_CONTROL 0x4038 +#define WM831X_GPIO2_CONTROL 0x4039 +#define WM831X_GPIO3_CONTROL 0x403A +#define WM831X_GPIO4_CONTROL 0x403B +#define WM831X_GPIO5_CONTROL 0x403C +#define WM831X_GPIO6_CONTROL 0x403D +#define WM831X_GPIO7_CONTROL 0x403E +#define WM831X_GPIO8_CONTROL 0x403F +#define WM831X_GPIO9_CONTROL 0x4040 +#define WM831X_GPIO10_CONTROL 0x4041 +#define WM831X_GPIO11_CONTROL 0x4042 +#define WM831X_GPIO12_CONTROL 0x4043 +#define WM831X_GPIO13_CONTROL 0x4044 +#define WM831X_GPIO14_CONTROL 0x4045 +#define WM831X_GPIO15_CONTROL 0x4046 +#define WM831X_GPIO16_CONTROL 0x4047 +#define WM831X_CHARGER_CONTROL_1 0x4048 +#define WM831X_CHARGER_CONTROL_2 0x4049 +#define WM831X_CHARGER_STATUS 0x404A +#define WM831X_BACKUP_CHARGER_CONTROL 0x404B +#define WM831X_STATUS_LED_1 0x404C +#define WM831X_STATUS_LED_2 0x404D +#define WM831X_CURRENT_SINK_1 0x404E +#define WM831X_CURRENT_SINK_2 0x404F +#define WM831X_DCDC_ENABLE 0x4050 +#define WM831X_LDO_ENABLE 0x4051 +#define WM831X_DCDC_STATUS 0x4052 +#define WM831X_LDO_STATUS 0x4053 +#define WM831X_DCDC_UV_STATUS 0x4054 +#define WM831X_LDO_UV_STATUS 0x4055 +#define WM831X_DC1_CONTROL_1 0x4056 +#define WM831X_DC1_CONTROL_2 0x4057 +#define WM831X_DC1_ON_CONFIG 0x4058 +#define WM831X_DC1_SLEEP_CONTROL 0x4059 +#define WM831X_DC1_DVS_CONTROL 0x405A +#define WM831X_DC2_CONTROL_1 0x405B +#define WM831X_DC2_CONTROL_2 0x405C +#define WM831X_DC2_ON_CONFIG 0x405D +#define WM831X_DC2_SLEEP_CONTROL 0x405E +#define WM831X_DC2_DVS_CONTROL 0x405F +#define WM831X_DC3_CONTROL_1 0x4060 +#define WM831X_DC3_CONTROL_2 0x4061 +#define WM831X_DC3_ON_CONFIG 0x4062 +#define WM831X_DC3_SLEEP_CONTROL 0x4063 +#define WM831X_DC4_CONTROL 0x4064 +#define WM831X_DC4_SLEEP_CONTROL 0x4065 +#define WM832X_DC4_SLEEP_CONTROL 0x4067 +#define WM831X_EPE1_CONTROL 0x4066 +#define WM831X_EPE2_CONTROL 0x4067 +#define WM831X_LDO1_CONTROL 0x4068 +#define WM831X_LDO1_ON_CONTROL 0x4069 +#define WM831X_LDO1_SLEEP_CONTROL 0x406A +#define WM831X_LDO2_CONTROL 0x406B +#define WM831X_LDO2_ON_CONTROL 0x406C +#define WM831X_LDO2_SLEEP_CONTROL 0x406D +#define WM831X_LDO3_CONTROL 0x406E +#define WM831X_LDO3_ON_CONTROL 0x406F +#define WM831X_LDO3_SLEEP_CONTROL 0x4070 +#define WM831X_LDO4_CONTROL 0x4071 +#define WM831X_LDO4_ON_CONTROL 0x4072 +#define WM831X_LDO4_SLEEP_CONTROL 0x4073 +#define WM831X_LDO5_CONTROL 0x4074 +#define WM831X_LDO5_ON_CONTROL 0x4075 +#define WM831X_LDO5_SLEEP_CONTROL 0x4076 +#define WM831X_LDO6_CONTROL 0x4077 +#define WM831X_LDO6_ON_CONTROL 0x4078 +#define WM831X_LDO6_SLEEP_CONTROL 0x4079 +#define WM831X_LDO7_CONTROL 0x407A +#define WM831X_LDO7_ON_CONTROL 0x407B +#define WM831X_LDO7_SLEEP_CONTROL 0x407C +#define WM831X_LDO8_CONTROL 0x407D +#define WM831X_LDO8_ON_CONTROL 0x407E +#define WM831X_LDO8_SLEEP_CONTROL 0x407F +#define WM831X_LDO9_CONTROL 0x4080 +#define WM831X_LDO9_ON_CONTROL 0x4081 +#define WM831X_LDO9_SLEEP_CONTROL 0x4082 +#define WM831X_LDO10_CONTROL 0x4083 +#define WM831X_LDO10_ON_CONTROL 0x4084 +#define WM831X_LDO10_SLEEP_CONTROL 0x4085 +#define WM831X_LDO11_ON_CONTROL 0x4087 +#define WM831X_LDO11_SLEEP_CONTROL 0x4088 +#define WM831X_POWER_GOOD_SOURCE_1 0x408E +#define WM831X_POWER_GOOD_SOURCE_2 0x408F +#define WM831X_CLOCK_CONTROL_1 0x4090 +#define WM831X_CLOCK_CONTROL_2 0x4091 +#define WM831X_FLL_CONTROL_1 0x4092 +#define WM831X_FLL_CONTROL_2 0x4093 +#define WM831X_FLL_CONTROL_3 0x4094 +#define WM831X_FLL_CONTROL_4 0x4095 +#define WM831X_FLL_CONTROL_5 0x4096 +#define WM831X_UNIQUE_ID_1 0x7800 +#define WM831X_UNIQUE_ID_2 0x7801 +#define WM831X_UNIQUE_ID_3 0x7802 +#define WM831X_UNIQUE_ID_4 0x7803 +#define WM831X_UNIQUE_ID_5 0x7804 +#define WM831X_UNIQUE_ID_6 0x7805 +#define WM831X_UNIQUE_ID_7 0x7806 +#define WM831X_UNIQUE_ID_8 0x7807 +#define WM831X_FACTORY_OTP_ID 0x7808 +#define WM831X_FACTORY_OTP_1 0x7809 +#define WM831X_FACTORY_OTP_2 0x780A +#define WM831X_FACTORY_OTP_3 0x780B +#define WM831X_FACTORY_OTP_4 0x780C +#define WM831X_FACTORY_OTP_5 0x780D +#define WM831X_CUSTOMER_OTP_ID 0x7810 +#define WM831X_DC1_OTP_CONTROL 0x7811 +#define WM831X_DC2_OTP_CONTROL 0x7812 +#define WM831X_DC3_OTP_CONTROL 0x7813 +#define WM831X_LDO1_2_OTP_CONTROL 0x7814 +#define WM831X_LDO3_4_OTP_CONTROL 0x7815 +#define WM831X_LDO5_6_OTP_CONTROL 0x7816 +#define WM831X_LDO7_8_OTP_CONTROL 0x7817 +#define WM831X_LDO9_10_OTP_CONTROL 0x7818 +#define WM831X_LDO11_EPE_CONTROL 0x7819 +#define WM831X_GPIO1_OTP_CONTROL 0x781A +#define WM831X_GPIO2_OTP_CONTROL 0x781B +#define WM831X_GPIO3_OTP_CONTROL 0x781C +#define WM831X_GPIO4_OTP_CONTROL 0x781D +#define WM831X_GPIO5_OTP_CONTROL 0x781E +#define WM831X_GPIO6_OTP_CONTROL 0x781F +#define WM831X_DBE_CHECK_DATA 0x7827 + +/* + * R0 (0x00) - Reset ID + */ +#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ + +/* + * R1 (0x01) - Revision + */ +#define WM831X_PARENT_REV_MASK 0xFF00 /* PARENT_REV - [15:8] */ +#define WM831X_PARENT_REV_SHIFT 8 /* PARENT_REV - [15:8] */ +#define WM831X_PARENT_REV_WIDTH 8 /* PARENT_REV - [15:8] */ +#define WM831X_CHILD_REV_MASK 0x00FF /* CHILD_REV - [7:0] */ +#define WM831X_CHILD_REV_SHIFT 0 /* CHILD_REV - [7:0] */ +#define WM831X_CHILD_REV_WIDTH 8 /* CHILD_REV - [7:0] */ + +/* + * R16384 (0x4000) - Parent ID + */ +#define WM831X_PARENT_ID_MASK 0xFFFF /* PARENT_ID - [15:0] */ +#define WM831X_PARENT_ID_SHIFT 0 /* PARENT_ID - [15:0] */ +#define WM831X_PARENT_ID_WIDTH 16 /* PARENT_ID - [15:0] */ + +/* + * R16389 (0x4005) - ON Pin Control + */ +#define WM831X_ON_PIN_SECACT_MASK 0x0300 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_SECACT_SHIFT 8 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_SECACT_WIDTH 2 /* ON_PIN_SECACT - [9:8] */ +#define WM831X_ON_PIN_PRIMACT_MASK 0x0030 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_PRIMACT_SHIFT 4 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_PRIMACT_WIDTH 2 /* ON_PIN_PRIMACT - [5:4] */ +#define WM831X_ON_PIN_STS 0x0008 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_MASK 0x0008 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_SHIFT 3 /* ON_PIN_STS */ +#define WM831X_ON_PIN_STS_WIDTH 1 /* ON_PIN_STS */ +#define WM831X_ON_PIN_TO_MASK 0x0003 /* ON_PIN_TO - [1:0] */ +#define WM831X_ON_PIN_TO_SHIFT 0 /* ON_PIN_TO - [1:0] */ +#define WM831X_ON_PIN_TO_WIDTH 2 /* ON_PIN_TO - [1:0] */ + +/* + * R16528 (0x4090) - Clock Control 1 + */ +#define WM831X_CLKOUT_ENA 0x8000 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_ENA_MASK 0x8000 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_ENA_SHIFT 15 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_ENA_WIDTH 1 /* CLKOUT_ENA */ +#define WM831X_CLKOUT_OD 0x2000 /* CLKOUT_OD */ +#define WM831X_CLKOUT_OD_MASK 0x2000 /* CLKOUT_OD */ +#define WM831X_CLKOUT_OD_SHIFT 13 /* CLKOUT_OD */ +#define WM831X_CLKOUT_OD_WIDTH 1 /* CLKOUT_OD */ +#define WM831X_CLKOUT_SLOT_MASK 0x0700 /* CLKOUT_SLOT - [10:8] */ +#define WM831X_CLKOUT_SLOT_SHIFT 8 /* CLKOUT_SLOT - [10:8] */ +#define WM831X_CLKOUT_SLOT_WIDTH 3 /* CLKOUT_SLOT - [10:8] */ +#define WM831X_CLKOUT_SLPSLOT_MASK 0x0070 /* CLKOUT_SLPSLOT - [6:4] */ +#define WM831X_CLKOUT_SLPSLOT_SHIFT 4 /* CLKOUT_SLPSLOT - [6:4] */ +#define WM831X_CLKOUT_SLPSLOT_WIDTH 3 /* CLKOUT_SLPSLOT - [6:4] */ +#define WM831X_CLKOUT_SRC 0x0001 /* CLKOUT_SRC */ +#define WM831X_CLKOUT_SRC_MASK 0x0001 /* CLKOUT_SRC */ +#define WM831X_CLKOUT_SRC_SHIFT 0 /* CLKOUT_SRC */ +#define WM831X_CLKOUT_SRC_WIDTH 1 /* CLKOUT_SRC */ + +/* + * R16529 (0x4091) - Clock Control 2 + */ +#define WM831X_XTAL_INH 0x8000 /* XTAL_INH */ +#define WM831X_XTAL_INH_MASK 0x8000 /* XTAL_INH */ +#define WM831X_XTAL_INH_SHIFT 15 /* XTAL_INH */ +#define WM831X_XTAL_INH_WIDTH 1 /* XTAL_INH */ +#define WM831X_XTAL_ENA 0x2000 /* XTAL_ENA */ +#define WM831X_XTAL_ENA_MASK 0x2000 /* XTAL_ENA */ +#define WM831X_XTAL_ENA_SHIFT 13 /* XTAL_ENA */ +#define WM831X_XTAL_ENA_WIDTH 1 /* XTAL_ENA */ +#define WM831X_XTAL_BKUPENA 0x1000 /* XTAL_BKUPENA */ +#define WM831X_XTAL_BKUPENA_MASK 0x1000 /* XTAL_BKUPENA */ +#define WM831X_XTAL_BKUPENA_SHIFT 12 /* XTAL_BKUPENA */ +#define WM831X_XTAL_BKUPENA_WIDTH 1 /* XTAL_BKUPENA */ +#define WM831X_FLL_AUTO 0x0080 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_MASK 0x0080 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_SHIFT 7 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_WIDTH 1 /* FLL_AUTO */ +#define WM831X_FLL_AUTO_FREQ_MASK 0x0007 /* FLL_AUTO_FREQ - [2:0] */ +#define WM831X_FLL_AUTO_FREQ_SHIFT 0 /* FLL_AUTO_FREQ - [2:0] */ +#define WM831X_FLL_AUTO_FREQ_WIDTH 3 /* FLL_AUTO_FREQ - [2:0] */ + +/* + * R16530 (0x4092) - FLL Control 1 + */ +#define WM831X_FLL_FRAC 0x0004 /* FLL_FRAC */ +#define WM831X_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */ +#define WM831X_FLL_FRAC_SHIFT 2 /* FLL_FRAC */ +#define WM831X_FLL_FRAC_WIDTH 1 /* FLL_FRAC */ +#define WM831X_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */ +#define WM831X_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */ +#define WM831X_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */ +#define WM831X_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ +#define WM831X_FLL_ENA 0x0001 /* FLL_ENA */ +#define WM831X_FLL_ENA_MASK 0x0001 /* FLL_ENA */ +#define WM831X_FLL_ENA_SHIFT 0 /* FLL_ENA */ +#define WM831X_FLL_ENA_WIDTH 1 /* FLL_ENA */ + +/* + * R16531 (0x4093) - FLL Control 2 + */ +#define WM831X_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */ +#define WM831X_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */ +#define WM831X_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */ +#define WM831X_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */ +#define WM831X_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */ +#define WM831X_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */ +#define WM831X_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */ +#define WM831X_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */ +#define WM831X_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */ + +/* + * R16532 (0x4094) - FLL Control 3 + */ +#define WM831X_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */ +#define WM831X_FLL_K_SHIFT 0 /* FLL_K - [15:0] */ +#define WM831X_FLL_K_WIDTH 16 /* FLL_K - [15:0] */ + +/* + * R16533 (0x4095) - FLL Control 4 + */ +#define WM831X_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */ +#define WM831X_FLL_N_SHIFT 5 /* FLL_N - [14:5] */ +#define WM831X_FLL_N_WIDTH 10 /* FLL_N - [14:5] */ +#define WM831X_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */ +#define WM831X_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */ +#define WM831X_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */ + +/* + * R16534 (0x4096) - FLL Control 5 + */ +#define WM831X_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */ +#define WM831X_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */ +#define WM831X_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */ +#define WM831X_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */ +#define WM831X_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */ +#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */ + +struct regulator_dev; + +#define WM831X_NUM_IRQ_REGS 5 +#define WM831X_NUM_GPIO_REGS 16 + +enum wm831x_parent { + WM8310 = 0x8310, + WM8311 = 0x8311, + WM8312 = 0x8312, + WM8320 = 0x8320, + WM8321 = 0x8321, + WM8325 = 0x8325, + WM8326 = 0x8326, +}; + +struct wm831x; +enum wm831x_auxadc; + +typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x, + enum wm831x_auxadc input); + +struct wm831x { + struct mutex io_lock; + + struct device *dev; + + struct regmap *regmap; + + int irq; /* Our chip IRQ */ + struct mutex irq_lock; + int irq_base; + int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */ + int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ + + bool soft_shutdown; + + /* Chip revision based flags */ + unsigned has_gpio_ena:1; /* Has GPIO enable bit */ + unsigned has_cs_sts:1; /* Has current sink status bit */ + unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */ + + int num_gpio; + + /* Used by the interrupt controller code to post writes */ + int gpio_update[WM831X_NUM_GPIO_REGS]; + bool gpio_level[WM831X_NUM_GPIO_REGS]; + + struct mutex auxadc_lock; + struct list_head auxadc_pending; + u16 auxadc_active; + wm831x_auxadc_read_fn auxadc_read; + + /* The WM831x has a security key blocking access to certain + * registers. The mutex is taken by the accessors for locking + * and unlocking the security key, locked is used to fail + * writes if the lock is held. + */ + struct mutex key_lock; + unsigned int locked:1; +}; + +/* Device I/O API */ +int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg); +int wm831x_reg_write(struct wm831x *wm831x, unsigned short reg, + unsigned short val); +void wm831x_reg_lock(struct wm831x *wm831x); +int wm831x_reg_unlock(struct wm831x *wm831x); +int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, + unsigned short mask, unsigned short val); +int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, + int count, u16 *buf); + +int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq); +void wm831x_device_exit(struct wm831x *wm831x); +int wm831x_device_suspend(struct wm831x *wm831x); +void wm831x_device_shutdown(struct wm831x *wm831x); +int wm831x_irq_init(struct wm831x *wm831x, int irq); +void wm831x_irq_exit(struct wm831x *wm831x); +void wm831x_auxadc_init(struct wm831x *wm831x); + +extern struct regmap_config wm831x_regmap_config; + +#endif diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h new file mode 100644 index 00000000..9b163c58 --- /dev/null +++ b/include/linux/mfd/wm831x/gpio.h @@ -0,0 +1,59 @@ +/* + * include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_GPIO_H__ +#define __MFD_WM831X_GPIO_H__ + +/* + * R16440-16455 (0x4038-0x4047) - GPIOx Control + */ +#define WM831X_GPN_DIR 0x8000 /* GPN_DIR */ +#define WM831X_GPN_DIR_MASK 0x8000 /* GPN_DIR */ +#define WM831X_GPN_DIR_SHIFT 15 /* GPN_DIR */ +#define WM831X_GPN_DIR_WIDTH 1 /* GPN_DIR */ +#define WM831X_GPN_PULL_MASK 0x6000 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_PULL_SHIFT 13 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_PULL_WIDTH 2 /* GPN_PULL - [14:13] */ +#define WM831X_GPN_INT_MODE 0x1000 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_MASK 0x1000 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_SHIFT 12 /* GPN_INT_MODE */ +#define WM831X_GPN_INT_MODE_WIDTH 1 /* GPN_INT_MODE */ +#define WM831X_GPN_PWR_DOM 0x0800 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_MASK 0x0800 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_SHIFT 11 /* GPN_PWR_DOM */ +#define WM831X_GPN_PWR_DOM_WIDTH 1 /* GPN_PWR_DOM */ +#define WM831X_GPN_POL 0x0400 /* GPN_POL */ +#define WM831X_GPN_POL_MASK 0x0400 /* GPN_POL */ +#define WM831X_GPN_POL_SHIFT 10 /* GPN_POL */ +#define WM831X_GPN_POL_WIDTH 1 /* GPN_POL */ +#define WM831X_GPN_OD 0x0200 /* GPN_OD */ +#define WM831X_GPN_OD_MASK 0x0200 /* GPN_OD */ +#define WM831X_GPN_OD_SHIFT 9 /* GPN_OD */ +#define WM831X_GPN_OD_WIDTH 1 /* GPN_OD */ +#define WM831X_GPN_ENA 0x0080 /* GPN_ENA */ +#define WM831X_GPN_ENA_MASK 0x0080 /* GPN_ENA */ +#define WM831X_GPN_ENA_SHIFT 7 /* GPN_ENA */ +#define WM831X_GPN_ENA_WIDTH 1 /* GPN_ENA */ +#define WM831X_GPN_TRI 0x0080 /* GPN_TRI */ +#define WM831X_GPN_TRI_MASK 0x0080 /* GPN_TRI */ +#define WM831X_GPN_TRI_SHIFT 7 /* GPN_TRI */ +#define WM831X_GPN_TRI_WIDTH 1 /* GPN_TRI */ +#define WM831X_GPN_FN_MASK 0x000F /* GPN_FN - [3:0] */ +#define WM831X_GPN_FN_SHIFT 0 /* GPN_FN - [3:0] */ +#define WM831X_GPN_FN_WIDTH 4 /* GPN_FN - [3:0] */ + +#define WM831X_GPIO_PULL_NONE (0 << WM831X_GPN_PULL_SHIFT) +#define WM831X_GPIO_PULL_DOWN (1 << WM831X_GPN_PULL_SHIFT) +#define WM831X_GPIO_PULL_UP (2 << WM831X_GPN_PULL_SHIFT) +#endif diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h new file mode 100644 index 00000000..3a8c9765 --- /dev/null +++ b/include/linux/mfd/wm831x/irq.h @@ -0,0 +1,764 @@ +/* + * include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_IRQ_H__ +#define __MFD_WM831X_IRQ_H__ + +/* Interrupt number assignments within Linux */ +#define WM831X_IRQ_TEMP_THW 0 +#define WM831X_IRQ_GPIO_1 1 +#define WM831X_IRQ_GPIO_2 2 +#define WM831X_IRQ_GPIO_3 3 +#define WM831X_IRQ_GPIO_4 4 +#define WM831X_IRQ_GPIO_5 5 +#define WM831X_IRQ_GPIO_6 6 +#define WM831X_IRQ_GPIO_7 7 +#define WM831X_IRQ_GPIO_8 8 +#define WM831X_IRQ_GPIO_9 9 +#define WM831X_IRQ_GPIO_10 10 +#define WM831X_IRQ_GPIO_11 11 +#define WM831X_IRQ_GPIO_12 12 +#define WM831X_IRQ_GPIO_13 13 +#define WM831X_IRQ_GPIO_14 14 +#define WM831X_IRQ_GPIO_15 15 +#define WM831X_IRQ_GPIO_16 16 +#define WM831X_IRQ_ON 17 +#define WM831X_IRQ_PPM_SYSLO 18 +#define WM831X_IRQ_PPM_PWR_SRC 19 +#define WM831X_IRQ_PPM_USB_CURR 20 +#define WM831X_IRQ_WDOG_TO 21 +#define WM831X_IRQ_RTC_PER 22 +#define WM831X_IRQ_RTC_ALM 23 +#define WM831X_IRQ_CHG_BATT_HOT 24 +#define WM831X_IRQ_CHG_BATT_COLD 25 +#define WM831X_IRQ_CHG_BATT_FAIL 26 +#define WM831X_IRQ_CHG_OV 27 +#define WM831X_IRQ_CHG_END 29 +#define WM831X_IRQ_CHG_TO 30 +#define WM831X_IRQ_CHG_MODE 31 +#define WM831X_IRQ_CHG_START 32 +#define WM831X_IRQ_TCHDATA 33 +#define WM831X_IRQ_TCHPD 34 +#define WM831X_IRQ_AUXADC_DATA 35 +#define WM831X_IRQ_AUXADC_DCOMP1 36 +#define WM831X_IRQ_AUXADC_DCOMP2 37 +#define WM831X_IRQ_AUXADC_DCOMP3 38 +#define WM831X_IRQ_AUXADC_DCOMP4 39 +#define WM831X_IRQ_CS1 40 +#define WM831X_IRQ_CS2 41 +#define WM831X_IRQ_HC_DC1 42 +#define WM831X_IRQ_HC_DC2 43 +#define WM831X_IRQ_UV_LDO1 44 +#define WM831X_IRQ_UV_LDO2 45 +#define WM831X_IRQ_UV_LDO3 46 +#define WM831X_IRQ_UV_LDO4 47 +#define WM831X_IRQ_UV_LDO5 48 +#define WM831X_IRQ_UV_LDO6 49 +#define WM831X_IRQ_UV_LDO7 50 +#define WM831X_IRQ_UV_LDO8 51 +#define WM831X_IRQ_UV_LDO9 52 +#define WM831X_IRQ_UV_LDO10 53 +#define WM831X_IRQ_UV_DC1 54 +#define WM831X_IRQ_UV_DC2 55 +#define WM831X_IRQ_UV_DC3 56 +#define WM831X_IRQ_UV_DC4 57 + +#define WM831X_NUM_IRQS 58 + +/* + * R16400 (0x4010) - System Interrupts + */ +#define WM831X_PS_INT 0x8000 /* PS_INT */ +#define WM831X_PS_INT_MASK 0x8000 /* PS_INT */ +#define WM831X_PS_INT_SHIFT 15 /* PS_INT */ +#define WM831X_PS_INT_WIDTH 1 /* PS_INT */ +#define WM831X_TEMP_INT 0x4000 /* TEMP_INT */ +#define WM831X_TEMP_INT_MASK 0x4000 /* TEMP_INT */ +#define WM831X_TEMP_INT_SHIFT 14 /* TEMP_INT */ +#define WM831X_TEMP_INT_WIDTH 1 /* TEMP_INT */ +#define WM831X_GP_INT 0x2000 /* GP_INT */ +#define WM831X_GP_INT_MASK 0x2000 /* GP_INT */ +#define WM831X_GP_INT_SHIFT 13 /* GP_INT */ +#define WM831X_GP_INT_WIDTH 1 /* GP_INT */ +#define WM831X_ON_PIN_INT 0x1000 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_MASK 0x1000 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_SHIFT 12 /* ON_PIN_INT */ +#define WM831X_ON_PIN_INT_WIDTH 1 /* ON_PIN_INT */ +#define WM831X_WDOG_INT 0x0800 /* WDOG_INT */ +#define WM831X_WDOG_INT_MASK 0x0800 /* WDOG_INT */ +#define WM831X_WDOG_INT_SHIFT 11 /* WDOG_INT */ +#define WM831X_WDOG_INT_WIDTH 1 /* WDOG_INT */ +#define WM831X_TCHDATA_INT 0x0400 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_MASK 0x0400 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_SHIFT 10 /* TCHDATA_INT */ +#define WM831X_TCHDATA_INT_WIDTH 1 /* TCHDATA_INT */ +#define WM831X_TCHPD_INT 0x0200 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_MASK 0x0200 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_SHIFT 9 /* TCHPD_INT */ +#define WM831X_TCHPD_INT_WIDTH 1 /* TCHPD_INT */ +#define WM831X_AUXADC_INT 0x0100 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_MASK 0x0100 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_SHIFT 8 /* AUXADC_INT */ +#define WM831X_AUXADC_INT_WIDTH 1 /* AUXADC_INT */ +#define WM831X_PPM_INT 0x0080 /* PPM_INT */ +#define WM831X_PPM_INT_MASK 0x0080 /* PPM_INT */ +#define WM831X_PPM_INT_SHIFT 7 /* PPM_INT */ +#define WM831X_PPM_INT_WIDTH 1 /* PPM_INT */ +#define WM831X_CS_INT 0x0040 /* CS_INT */ +#define WM831X_CS_INT_MASK 0x0040 /* CS_INT */ +#define WM831X_CS_INT_SHIFT 6 /* CS_INT */ +#define WM831X_CS_INT_WIDTH 1 /* CS_INT */ +#define WM831X_RTC_INT 0x0020 /* RTC_INT */ +#define WM831X_RTC_INT_MASK 0x0020 /* RTC_INT */ +#define WM831X_RTC_INT_SHIFT 5 /* RTC_INT */ +#define WM831X_RTC_INT_WIDTH 1 /* RTC_INT */ +#define WM831X_OTP_INT 0x0010 /* OTP_INT */ +#define WM831X_OTP_INT_MASK 0x0010 /* OTP_INT */ +#define WM831X_OTP_INT_SHIFT 4 /* OTP_INT */ +#define WM831X_OTP_INT_WIDTH 1 /* OTP_INT */ +#define WM831X_CHILD_INT 0x0008 /* CHILD_INT */ +#define WM831X_CHILD_INT_MASK 0x0008 /* CHILD_INT */ +#define WM831X_CHILD_INT_SHIFT 3 /* CHILD_INT */ +#define WM831X_CHILD_INT_WIDTH 1 /* CHILD_INT */ +#define WM831X_CHG_INT 0x0004 /* CHG_INT */ +#define WM831X_CHG_INT_MASK 0x0004 /* CHG_INT */ +#define WM831X_CHG_INT_SHIFT 2 /* CHG_INT */ +#define WM831X_CHG_INT_WIDTH 1 /* CHG_INT */ +#define WM831X_HC_INT 0x0002 /* HC_INT */ +#define WM831X_HC_INT_MASK 0x0002 /* HC_INT */ +#define WM831X_HC_INT_SHIFT 1 /* HC_INT */ +#define WM831X_HC_INT_WIDTH 1 /* HC_INT */ +#define WM831X_UV_INT 0x0001 /* UV_INT */ +#define WM831X_UV_INT_MASK 0x0001 /* UV_INT */ +#define WM831X_UV_INT_SHIFT 0 /* UV_INT */ +#define WM831X_UV_INT_WIDTH 1 /* UV_INT */ + +/* + * R16401 (0x4011) - Interrupt Status 1 + */ +#define WM831X_PPM_SYSLO_EINT 0x8000 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_MASK 0x8000 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_SHIFT 15 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_SYSLO_EINT_WIDTH 1 /* PPM_SYSLO_EINT */ +#define WM831X_PPM_PWR_SRC_EINT 0x4000 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_MASK 0x4000 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_SHIFT 14 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_PWR_SRC_EINT_WIDTH 1 /* PPM_PWR_SRC_EINT */ +#define WM831X_PPM_USB_CURR_EINT 0x2000 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_MASK 0x2000 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_SHIFT 13 /* PPM_USB_CURR_EINT */ +#define WM831X_PPM_USB_CURR_EINT_WIDTH 1 /* PPM_USB_CURR_EINT */ +#define WM831X_ON_PIN_EINT 0x1000 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_MASK 0x1000 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_SHIFT 12 /* ON_PIN_EINT */ +#define WM831X_ON_PIN_EINT_WIDTH 1 /* ON_PIN_EINT */ +#define WM831X_WDOG_TO_EINT 0x0800 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_MASK 0x0800 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_SHIFT 11 /* WDOG_TO_EINT */ +#define WM831X_WDOG_TO_EINT_WIDTH 1 /* WDOG_TO_EINT */ +#define WM831X_TCHDATA_EINT 0x0400 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_MASK 0x0400 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_SHIFT 10 /* TCHDATA_EINT */ +#define WM831X_TCHDATA_EINT_WIDTH 1 /* TCHDATA_EINT */ +#define WM831X_TCHPD_EINT 0x0200 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_MASK 0x0200 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_SHIFT 9 /* TCHPD_EINT */ +#define WM831X_TCHPD_EINT_WIDTH 1 /* TCHPD_EINT */ +#define WM831X_AUXADC_DATA_EINT 0x0100 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_MASK 0x0100 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_SHIFT 8 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DATA_EINT_WIDTH 1 /* AUXADC_DATA_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT 0x0080 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_MASK 0x0080 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_SHIFT 7 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP4_EINT_WIDTH 1 /* AUXADC_DCOMP4_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT 0x0040 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_MASK 0x0040 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_SHIFT 6 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP3_EINT_WIDTH 1 /* AUXADC_DCOMP3_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT 0x0020 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_MASK 0x0020 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_SHIFT 5 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP2_EINT_WIDTH 1 /* AUXADC_DCOMP2_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT 0x0010 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_MASK 0x0010 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_SHIFT 4 /* AUXADC_DCOMP1_EINT */ +#define WM831X_AUXADC_DCOMP1_EINT_WIDTH 1 /* AUXADC_DCOMP1_EINT */ +#define WM831X_RTC_PER_EINT 0x0008 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_MASK 0x0008 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_SHIFT 3 /* RTC_PER_EINT */ +#define WM831X_RTC_PER_EINT_WIDTH 1 /* RTC_PER_EINT */ +#define WM831X_RTC_ALM_EINT 0x0004 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_MASK 0x0004 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_SHIFT 2 /* RTC_ALM_EINT */ +#define WM831X_RTC_ALM_EINT_WIDTH 1 /* RTC_ALM_EINT */ +#define WM831X_TEMP_THW_EINT 0x0002 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_MASK 0x0002 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_SHIFT 1 /* TEMP_THW_EINT */ +#define WM831X_TEMP_THW_EINT_WIDTH 1 /* TEMP_THW_EINT */ + +/* + * R16402 (0x4012) - Interrupt Status 2 + */ +#define WM831X_CHG_BATT_HOT_EINT 0x8000 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_MASK 0x8000 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_SHIFT 15 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_HOT_EINT_WIDTH 1 /* CHG_BATT_HOT_EINT */ +#define WM831X_CHG_BATT_COLD_EINT 0x4000 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_MASK 0x4000 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_SHIFT 14 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_COLD_EINT_WIDTH 1 /* CHG_BATT_COLD_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT 0x2000 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_MASK 0x2000 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_SHIFT 13 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_BATT_FAIL_EINT_WIDTH 1 /* CHG_BATT_FAIL_EINT */ +#define WM831X_CHG_OV_EINT 0x1000 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_MASK 0x1000 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_SHIFT 12 /* CHG_OV_EINT */ +#define WM831X_CHG_OV_EINT_WIDTH 1 /* CHG_OV_EINT */ +#define WM831X_CHG_END_EINT 0x0800 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_MASK 0x0800 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_SHIFT 11 /* CHG_END_EINT */ +#define WM831X_CHG_END_EINT_WIDTH 1 /* CHG_END_EINT */ +#define WM831X_CHG_TO_EINT 0x0400 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_MASK 0x0400 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_SHIFT 10 /* CHG_TO_EINT */ +#define WM831X_CHG_TO_EINT_WIDTH 1 /* CHG_TO_EINT */ +#define WM831X_CHG_MODE_EINT 0x0200 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_MASK 0x0200 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_SHIFT 9 /* CHG_MODE_EINT */ +#define WM831X_CHG_MODE_EINT_WIDTH 1 /* CHG_MODE_EINT */ +#define WM831X_CHG_START_EINT 0x0100 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_MASK 0x0100 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_SHIFT 8 /* CHG_START_EINT */ +#define WM831X_CHG_START_EINT_WIDTH 1 /* CHG_START_EINT */ +#define WM831X_CS2_EINT 0x0080 /* CS2_EINT */ +#define WM831X_CS2_EINT_MASK 0x0080 /* CS2_EINT */ +#define WM831X_CS2_EINT_SHIFT 7 /* CS2_EINT */ +#define WM831X_CS2_EINT_WIDTH 1 /* CS2_EINT */ +#define WM831X_CS1_EINT 0x0040 /* CS1_EINT */ +#define WM831X_CS1_EINT_MASK 0x0040 /* CS1_EINT */ +#define WM831X_CS1_EINT_SHIFT 6 /* CS1_EINT */ +#define WM831X_CS1_EINT_WIDTH 1 /* CS1_EINT */ +#define WM831X_OTP_CMD_END_EINT 0x0020 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_MASK 0x0020 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_SHIFT 5 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_CMD_END_EINT_WIDTH 1 /* OTP_CMD_END_EINT */ +#define WM831X_OTP_ERR_EINT 0x0010 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_MASK 0x0010 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_SHIFT 4 /* OTP_ERR_EINT */ +#define WM831X_OTP_ERR_EINT_WIDTH 1 /* OTP_ERR_EINT */ +#define WM831X_PS_POR_EINT 0x0004 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_MASK 0x0004 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_SHIFT 2 /* PS_POR_EINT */ +#define WM831X_PS_POR_EINT_WIDTH 1 /* PS_POR_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT 0x0002 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_MASK 0x0002 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_SHIFT 1 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_SLEEP_OFF_EINT_WIDTH 1 /* PS_SLEEP_OFF_EINT */ +#define WM831X_PS_ON_WAKE_EINT 0x0001 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_MASK 0x0001 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_SHIFT 0 /* PS_ON_WAKE_EINT */ +#define WM831X_PS_ON_WAKE_EINT_WIDTH 1 /* PS_ON_WAKE_EINT */ + +/* + * R16403 (0x4013) - Interrupt Status 3 + */ +#define WM831X_UV_LDO10_EINT 0x0200 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_MASK 0x0200 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_SHIFT 9 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO10_EINT_WIDTH 1 /* UV_LDO10_EINT */ +#define WM831X_UV_LDO9_EINT 0x0100 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_MASK 0x0100 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_SHIFT 8 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO9_EINT_WIDTH 1 /* UV_LDO9_EINT */ +#define WM831X_UV_LDO8_EINT 0x0080 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_MASK 0x0080 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_SHIFT 7 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO8_EINT_WIDTH 1 /* UV_LDO8_EINT */ +#define WM831X_UV_LDO7_EINT 0x0040 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_MASK 0x0040 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_SHIFT 6 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO7_EINT_WIDTH 1 /* UV_LDO7_EINT */ +#define WM831X_UV_LDO6_EINT 0x0020 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_MASK 0x0020 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_SHIFT 5 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO6_EINT_WIDTH 1 /* UV_LDO6_EINT */ +#define WM831X_UV_LDO5_EINT 0x0010 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_MASK 0x0010 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_SHIFT 4 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO5_EINT_WIDTH 1 /* UV_LDO5_EINT */ +#define WM831X_UV_LDO4_EINT 0x0008 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_MASK 0x0008 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_SHIFT 3 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO4_EINT_WIDTH 1 /* UV_LDO4_EINT */ +#define WM831X_UV_LDO3_EINT 0x0004 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_MASK 0x0004 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_SHIFT 2 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO3_EINT_WIDTH 1 /* UV_LDO3_EINT */ +#define WM831X_UV_LDO2_EINT 0x0002 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_MASK 0x0002 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_SHIFT 1 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO2_EINT_WIDTH 1 /* UV_LDO2_EINT */ +#define WM831X_UV_LDO1_EINT 0x0001 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_MASK 0x0001 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_SHIFT 0 /* UV_LDO1_EINT */ +#define WM831X_UV_LDO1_EINT_WIDTH 1 /* UV_LDO1_EINT */ + +/* + * R16404 (0x4014) - Interrupt Status 4 + */ +#define WM831X_HC_DC2_EINT 0x0200 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_MASK 0x0200 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_SHIFT 9 /* HC_DC2_EINT */ +#define WM831X_HC_DC2_EINT_WIDTH 1 /* HC_DC2_EINT */ +#define WM831X_HC_DC1_EINT 0x0100 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_MASK 0x0100 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_SHIFT 8 /* HC_DC1_EINT */ +#define WM831X_HC_DC1_EINT_WIDTH 1 /* HC_DC1_EINT */ +#define WM831X_UV_DC4_EINT 0x0008 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_MASK 0x0008 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_SHIFT 3 /* UV_DC4_EINT */ +#define WM831X_UV_DC4_EINT_WIDTH 1 /* UV_DC4_EINT */ +#define WM831X_UV_DC3_EINT 0x0004 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_MASK 0x0004 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_SHIFT 2 /* UV_DC3_EINT */ +#define WM831X_UV_DC3_EINT_WIDTH 1 /* UV_DC3_EINT */ +#define WM831X_UV_DC2_EINT 0x0002 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_MASK 0x0002 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_SHIFT 1 /* UV_DC2_EINT */ +#define WM831X_UV_DC2_EINT_WIDTH 1 /* UV_DC2_EINT */ +#define WM831X_UV_DC1_EINT 0x0001 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_MASK 0x0001 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_SHIFT 0 /* UV_DC1_EINT */ +#define WM831X_UV_DC1_EINT_WIDTH 1 /* UV_DC1_EINT */ + +/* + * R16405 (0x4015) - Interrupt Status 5 + */ +#define WM831X_GP16_EINT 0x8000 /* GP16_EINT */ +#define WM831X_GP16_EINT_MASK 0x8000 /* GP16_EINT */ +#define WM831X_GP16_EINT_SHIFT 15 /* GP16_EINT */ +#define WM831X_GP16_EINT_WIDTH 1 /* GP16_EINT */ +#define WM831X_GP15_EINT 0x4000 /* GP15_EINT */ +#define WM831X_GP15_EINT_MASK 0x4000 /* GP15_EINT */ +#define WM831X_GP15_EINT_SHIFT 14 /* GP15_EINT */ +#define WM831X_GP15_EINT_WIDTH 1 /* GP15_EINT */ +#define WM831X_GP14_EINT 0x2000 /* GP14_EINT */ +#define WM831X_GP14_EINT_MASK 0x2000 /* GP14_EINT */ +#define WM831X_GP14_EINT_SHIFT 13 /* GP14_EINT */ +#define WM831X_GP14_EINT_WIDTH 1 /* GP14_EINT */ +#define WM831X_GP13_EINT 0x1000 /* GP13_EINT */ +#define WM831X_GP13_EINT_MASK 0x1000 /* GP13_EINT */ +#define WM831X_GP13_EINT_SHIFT 12 /* GP13_EINT */ +#define WM831X_GP13_EINT_WIDTH 1 /* GP13_EINT */ +#define WM831X_GP12_EINT 0x0800 /* GP12_EINT */ +#define WM831X_GP12_EINT_MASK 0x0800 /* GP12_EINT */ +#define WM831X_GP12_EINT_SHIFT 11 /* GP12_EINT */ +#define WM831X_GP12_EINT_WIDTH 1 /* GP12_EINT */ +#define WM831X_GP11_EINT 0x0400 /* GP11_EINT */ +#define WM831X_GP11_EINT_MASK 0x0400 /* GP11_EINT */ +#define WM831X_GP11_EINT_SHIFT 10 /* GP11_EINT */ +#define WM831X_GP11_EINT_WIDTH 1 /* GP11_EINT */ +#define WM831X_GP10_EINT 0x0200 /* GP10_EINT */ +#define WM831X_GP10_EINT_MASK 0x0200 /* GP10_EINT */ +#define WM831X_GP10_EINT_SHIFT 9 /* GP10_EINT */ +#define WM831X_GP10_EINT_WIDTH 1 /* GP10_EINT */ +#define WM831X_GP9_EINT 0x0100 /* GP9_EINT */ +#define WM831X_GP9_EINT_MASK 0x0100 /* GP9_EINT */ +#define WM831X_GP9_EINT_SHIFT 8 /* GP9_EINT */ +#define WM831X_GP9_EINT_WIDTH 1 /* GP9_EINT */ +#define WM831X_GP8_EINT 0x0080 /* GP8_EINT */ +#define WM831X_GP8_EINT_MASK 0x0080 /* GP8_EINT */ +#define WM831X_GP8_EINT_SHIFT 7 /* GP8_EINT */ +#define WM831X_GP8_EINT_WIDTH 1 /* GP8_EINT */ +#define WM831X_GP7_EINT 0x0040 /* GP7_EINT */ +#define WM831X_GP7_EINT_MASK 0x0040 /* GP7_EINT */ +#define WM831X_GP7_EINT_SHIFT 6 /* GP7_EINT */ +#define WM831X_GP7_EINT_WIDTH 1 /* GP7_EINT */ +#define WM831X_GP6_EINT 0x0020 /* GP6_EINT */ +#define WM831X_GP6_EINT_MASK 0x0020 /* GP6_EINT */ +#define WM831X_GP6_EINT_SHIFT 5 /* GP6_EINT */ +#define WM831X_GP6_EINT_WIDTH 1 /* GP6_EINT */ +#define WM831X_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM831X_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM831X_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM831X_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM831X_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM831X_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM831X_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM831X_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM831X_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM831X_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM831X_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM831X_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM831X_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM831X_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM831X_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM831X_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM831X_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM831X_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM831X_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM831X_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R16407 (0x4017) - IRQ Config + */ +#define WM831X_IRQ_OD 0x0002 /* IRQ_OD */ +#define WM831X_IRQ_OD_MASK 0x0002 /* IRQ_OD */ +#define WM831X_IRQ_OD_SHIFT 1 /* IRQ_OD */ +#define WM831X_IRQ_OD_WIDTH 1 /* IRQ_OD */ +#define WM831X_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM831X_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM831X_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM831X_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R16408 (0x4018) - System Interrupts Mask + */ +#define WM831X_IM_PS_INT 0x8000 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_MASK 0x8000 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_SHIFT 15 /* IM_PS_INT */ +#define WM831X_IM_PS_INT_WIDTH 1 /* IM_PS_INT */ +#define WM831X_IM_TEMP_INT 0x4000 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_MASK 0x4000 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_SHIFT 14 /* IM_TEMP_INT */ +#define WM831X_IM_TEMP_INT_WIDTH 1 /* IM_TEMP_INT */ +#define WM831X_IM_GP_INT 0x2000 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_MASK 0x2000 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_SHIFT 13 /* IM_GP_INT */ +#define WM831X_IM_GP_INT_WIDTH 1 /* IM_GP_INT */ +#define WM831X_IM_ON_PIN_INT 0x1000 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_MASK 0x1000 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_SHIFT 12 /* IM_ON_PIN_INT */ +#define WM831X_IM_ON_PIN_INT_WIDTH 1 /* IM_ON_PIN_INT */ +#define WM831X_IM_WDOG_INT 0x0800 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_MASK 0x0800 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_SHIFT 11 /* IM_WDOG_INT */ +#define WM831X_IM_WDOG_INT_WIDTH 1 /* IM_WDOG_INT */ +#define WM831X_IM_TCHDATA_INT 0x0400 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_MASK 0x0400 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_SHIFT 10 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHDATA_INT_WIDTH 1 /* IM_TCHDATA_INT */ +#define WM831X_IM_TCHPD_INT 0x0200 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_MASK 0x0200 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_SHIFT 9 /* IM_TCHPD_INT */ +#define WM831X_IM_TCHPD_INT_WIDTH 1 /* IM_TCHPD_INT */ +#define WM831X_IM_AUXADC_INT 0x0100 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_MASK 0x0100 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_SHIFT 8 /* IM_AUXADC_INT */ +#define WM831X_IM_AUXADC_INT_WIDTH 1 /* IM_AUXADC_INT */ +#define WM831X_IM_PPM_INT 0x0080 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_MASK 0x0080 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_SHIFT 7 /* IM_PPM_INT */ +#define WM831X_IM_PPM_INT_WIDTH 1 /* IM_PPM_INT */ +#define WM831X_IM_CS_INT 0x0040 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_MASK 0x0040 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_SHIFT 6 /* IM_CS_INT */ +#define WM831X_IM_CS_INT_WIDTH 1 /* IM_CS_INT */ +#define WM831X_IM_RTC_INT 0x0020 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_MASK 0x0020 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_SHIFT 5 /* IM_RTC_INT */ +#define WM831X_IM_RTC_INT_WIDTH 1 /* IM_RTC_INT */ +#define WM831X_IM_OTP_INT 0x0010 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_MASK 0x0010 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_SHIFT 4 /* IM_OTP_INT */ +#define WM831X_IM_OTP_INT_WIDTH 1 /* IM_OTP_INT */ +#define WM831X_IM_CHILD_INT 0x0008 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_MASK 0x0008 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_SHIFT 3 /* IM_CHILD_INT */ +#define WM831X_IM_CHILD_INT_WIDTH 1 /* IM_CHILD_INT */ +#define WM831X_IM_CHG_INT 0x0004 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_MASK 0x0004 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_SHIFT 2 /* IM_CHG_INT */ +#define WM831X_IM_CHG_INT_WIDTH 1 /* IM_CHG_INT */ +#define WM831X_IM_HC_INT 0x0002 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_MASK 0x0002 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_SHIFT 1 /* IM_HC_INT */ +#define WM831X_IM_HC_INT_WIDTH 1 /* IM_HC_INT */ +#define WM831X_IM_UV_INT 0x0001 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_MASK 0x0001 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_SHIFT 0 /* IM_UV_INT */ +#define WM831X_IM_UV_INT_WIDTH 1 /* IM_UV_INT */ + +/* + * R16409 (0x4019) - Interrupt Status 1 Mask + */ +#define WM831X_IM_PPM_SYSLO_EINT 0x8000 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_MASK 0x8000 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_SHIFT 15 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_SYSLO_EINT_WIDTH 1 /* IM_PPM_SYSLO_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT 0x4000 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_MASK 0x4000 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_SHIFT 14 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_PWR_SRC_EINT_WIDTH 1 /* IM_PPM_PWR_SRC_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT 0x2000 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_MASK 0x2000 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_SHIFT 13 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_PPM_USB_CURR_EINT_WIDTH 1 /* IM_PPM_USB_CURR_EINT */ +#define WM831X_IM_ON_PIN_EINT 0x1000 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_MASK 0x1000 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_SHIFT 12 /* IM_ON_PIN_EINT */ +#define WM831X_IM_ON_PIN_EINT_WIDTH 1 /* IM_ON_PIN_EINT */ +#define WM831X_IM_WDOG_TO_EINT 0x0800 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_MASK 0x0800 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_SHIFT 11 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_WDOG_TO_EINT_WIDTH 1 /* IM_WDOG_TO_EINT */ +#define WM831X_IM_TCHDATA_EINT 0x0400 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_MASK 0x0400 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_SHIFT 10 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHDATA_EINT_WIDTH 1 /* IM_TCHDATA_EINT */ +#define WM831X_IM_TCHPD_EINT 0x0200 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_MASK 0x0200 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_SHIFT 9 /* IM_TCHPD_EINT */ +#define WM831X_IM_TCHPD_EINT_WIDTH 1 /* IM_TCHPD_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT 0x0100 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_MASK 0x0100 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_SHIFT 8 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DATA_EINT_WIDTH 1 /* IM_AUXADC_DATA_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT 0x0080 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_MASK 0x0080 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_SHIFT 7 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP4_EINT_WIDTH 1 /* IM_AUXADC_DCOMP4_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT 0x0040 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_MASK 0x0040 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_SHIFT 6 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP3_EINT_WIDTH 1 /* IM_AUXADC_DCOMP3_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT 0x0020 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_MASK 0x0020 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_SHIFT 5 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP2_EINT_WIDTH 1 /* IM_AUXADC_DCOMP2_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT 0x0010 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_MASK 0x0010 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_SHIFT 4 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_AUXADC_DCOMP1_EINT_WIDTH 1 /* IM_AUXADC_DCOMP1_EINT */ +#define WM831X_IM_RTC_PER_EINT 0x0008 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_MASK 0x0008 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_SHIFT 3 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_PER_EINT_WIDTH 1 /* IM_RTC_PER_EINT */ +#define WM831X_IM_RTC_ALM_EINT 0x0004 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_MASK 0x0004 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_SHIFT 2 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_RTC_ALM_EINT_WIDTH 1 /* IM_RTC_ALM_EINT */ +#define WM831X_IM_TEMP_THW_EINT 0x0002 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_MASK 0x0002 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_SHIFT 1 /* IM_TEMP_THW_EINT */ +#define WM831X_IM_TEMP_THW_EINT_WIDTH 1 /* IM_TEMP_THW_EINT */ + +/* + * R16410 (0x401A) - Interrupt Status 2 Mask + */ +#define WM831X_IM_CHG_BATT_HOT_EINT 0x8000 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_MASK 0x8000 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_SHIFT 15 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_HOT_EINT_WIDTH 1 /* IM_CHG_BATT_HOT_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT 0x4000 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_MASK 0x4000 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_SHIFT 14 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_COLD_EINT_WIDTH 1 /* IM_CHG_BATT_COLD_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT 0x2000 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_MASK 0x2000 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_SHIFT 13 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_BATT_FAIL_EINT_WIDTH 1 /* IM_CHG_BATT_FAIL_EINT */ +#define WM831X_IM_CHG_OV_EINT 0x1000 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_MASK 0x1000 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_SHIFT 12 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_OV_EINT_WIDTH 1 /* IM_CHG_OV_EINT */ +#define WM831X_IM_CHG_END_EINT 0x0800 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_MASK 0x0800 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_SHIFT 11 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_END_EINT_WIDTH 1 /* IM_CHG_END_EINT */ +#define WM831X_IM_CHG_TO_EINT 0x0400 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_MASK 0x0400 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_SHIFT 10 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_TO_EINT_WIDTH 1 /* IM_CHG_TO_EINT */ +#define WM831X_IM_CHG_MODE_EINT 0x0200 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_MASK 0x0200 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_SHIFT 9 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_MODE_EINT_WIDTH 1 /* IM_CHG_MODE_EINT */ +#define WM831X_IM_CHG_START_EINT 0x0100 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_MASK 0x0100 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_SHIFT 8 /* IM_CHG_START_EINT */ +#define WM831X_IM_CHG_START_EINT_WIDTH 1 /* IM_CHG_START_EINT */ +#define WM831X_IM_CS2_EINT 0x0080 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_MASK 0x0080 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_SHIFT 7 /* IM_CS2_EINT */ +#define WM831X_IM_CS2_EINT_WIDTH 1 /* IM_CS2_EINT */ +#define WM831X_IM_CS1_EINT 0x0040 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_MASK 0x0040 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_SHIFT 6 /* IM_CS1_EINT */ +#define WM831X_IM_CS1_EINT_WIDTH 1 /* IM_CS1_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT 0x0020 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_MASK 0x0020 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_SHIFT 5 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_CMD_END_EINT_WIDTH 1 /* IM_OTP_CMD_END_EINT */ +#define WM831X_IM_OTP_ERR_EINT 0x0010 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_MASK 0x0010 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_SHIFT 4 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_OTP_ERR_EINT_WIDTH 1 /* IM_OTP_ERR_EINT */ +#define WM831X_IM_PS_POR_EINT 0x0004 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_MASK 0x0004 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_SHIFT 2 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_POR_EINT_WIDTH 1 /* IM_PS_POR_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT 0x0002 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_MASK 0x0002 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_SHIFT 1 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_SLEEP_OFF_EINT_WIDTH 1 /* IM_PS_SLEEP_OFF_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT 0x0001 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_MASK 0x0001 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_SHIFT 0 /* IM_PS_ON_WAKE_EINT */ +#define WM831X_IM_PS_ON_WAKE_EINT_WIDTH 1 /* IM_PS_ON_WAKE_EINT */ + +/* + * R16411 (0x401B) - Interrupt Status 3 Mask + */ +#define WM831X_IM_UV_LDO10_EINT 0x0200 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_MASK 0x0200 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_SHIFT 9 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO10_EINT_WIDTH 1 /* IM_UV_LDO10_EINT */ +#define WM831X_IM_UV_LDO9_EINT 0x0100 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_MASK 0x0100 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_SHIFT 8 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO9_EINT_WIDTH 1 /* IM_UV_LDO9_EINT */ +#define WM831X_IM_UV_LDO8_EINT 0x0080 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_MASK 0x0080 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_SHIFT 7 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO8_EINT_WIDTH 1 /* IM_UV_LDO8_EINT */ +#define WM831X_IM_UV_LDO7_EINT 0x0040 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_MASK 0x0040 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_SHIFT 6 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO7_EINT_WIDTH 1 /* IM_UV_LDO7_EINT */ +#define WM831X_IM_UV_LDO6_EINT 0x0020 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_MASK 0x0020 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_SHIFT 5 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO6_EINT_WIDTH 1 /* IM_UV_LDO6_EINT */ +#define WM831X_IM_UV_LDO5_EINT 0x0010 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_MASK 0x0010 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_SHIFT 4 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO5_EINT_WIDTH 1 /* IM_UV_LDO5_EINT */ +#define WM831X_IM_UV_LDO4_EINT 0x0008 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_MASK 0x0008 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_SHIFT 3 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO4_EINT_WIDTH 1 /* IM_UV_LDO4_EINT */ +#define WM831X_IM_UV_LDO3_EINT 0x0004 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_MASK 0x0004 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_SHIFT 2 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO3_EINT_WIDTH 1 /* IM_UV_LDO3_EINT */ +#define WM831X_IM_UV_LDO2_EINT 0x0002 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_MASK 0x0002 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_SHIFT 1 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO2_EINT_WIDTH 1 /* IM_UV_LDO2_EINT */ +#define WM831X_IM_UV_LDO1_EINT 0x0001 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_MASK 0x0001 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_SHIFT 0 /* IM_UV_LDO1_EINT */ +#define WM831X_IM_UV_LDO1_EINT_WIDTH 1 /* IM_UV_LDO1_EINT */ + +/* + * R16412 (0x401C) - Interrupt Status 4 Mask + */ +#define WM831X_IM_HC_DC2_EINT 0x0200 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_MASK 0x0200 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_SHIFT 9 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC2_EINT_WIDTH 1 /* IM_HC_DC2_EINT */ +#define WM831X_IM_HC_DC1_EINT 0x0100 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_MASK 0x0100 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_SHIFT 8 /* IM_HC_DC1_EINT */ +#define WM831X_IM_HC_DC1_EINT_WIDTH 1 /* IM_HC_DC1_EINT */ +#define WM831X_IM_UV_DC4_EINT 0x0008 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_MASK 0x0008 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_SHIFT 3 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC4_EINT_WIDTH 1 /* IM_UV_DC4_EINT */ +#define WM831X_IM_UV_DC3_EINT 0x0004 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_MASK 0x0004 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_SHIFT 2 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC3_EINT_WIDTH 1 /* IM_UV_DC3_EINT */ +#define WM831X_IM_UV_DC2_EINT 0x0002 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_MASK 0x0002 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_SHIFT 1 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC2_EINT_WIDTH 1 /* IM_UV_DC2_EINT */ +#define WM831X_IM_UV_DC1_EINT 0x0001 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_MASK 0x0001 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_SHIFT 0 /* IM_UV_DC1_EINT */ +#define WM831X_IM_UV_DC1_EINT_WIDTH 1 /* IM_UV_DC1_EINT */ + +/* + * R16413 (0x401D) - Interrupt Status 5 Mask + */ +#define WM831X_IM_GP16_EINT 0x8000 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_MASK 0x8000 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_SHIFT 15 /* IM_GP16_EINT */ +#define WM831X_IM_GP16_EINT_WIDTH 1 /* IM_GP16_EINT */ +#define WM831X_IM_GP15_EINT 0x4000 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_MASK 0x4000 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_SHIFT 14 /* IM_GP15_EINT */ +#define WM831X_IM_GP15_EINT_WIDTH 1 /* IM_GP15_EINT */ +#define WM831X_IM_GP14_EINT 0x2000 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_MASK 0x2000 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_SHIFT 13 /* IM_GP14_EINT */ +#define WM831X_IM_GP14_EINT_WIDTH 1 /* IM_GP14_EINT */ +#define WM831X_IM_GP13_EINT 0x1000 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_MASK 0x1000 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_SHIFT 12 /* IM_GP13_EINT */ +#define WM831X_IM_GP13_EINT_WIDTH 1 /* IM_GP13_EINT */ +#define WM831X_IM_GP12_EINT 0x0800 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_MASK 0x0800 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_SHIFT 11 /* IM_GP12_EINT */ +#define WM831X_IM_GP12_EINT_WIDTH 1 /* IM_GP12_EINT */ +#define WM831X_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ +#define WM831X_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ +#define WM831X_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ +#define WM831X_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ +#define WM831X_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ +#define WM831X_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ +#define WM831X_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ +#define WM831X_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ +#define WM831X_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ +#define WM831X_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ +#define WM831X_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ +#define WM831X_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ +#define WM831X_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM831X_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM831X_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM831X_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM831X_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM831X_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM831X_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM831X_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM831X_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM831X_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + + +#endif diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h new file mode 100644 index 00000000..ce1f81a3 --- /dev/null +++ b/include/linux/mfd/wm831x/otp.h @@ -0,0 +1,162 @@ +/* + * include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_OTP_H__ +#define __MFD_WM831X_OTP_H__ + +int wm831x_otp_init(struct wm831x *wm831x); +void wm831x_otp_exit(struct wm831x *wm831x); + +/* + * R30720 (0x7800) - Unique ID 1 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30721 (0x7801) - Unique ID 2 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30722 (0x7802) - Unique ID 3 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30723 (0x7803) - Unique ID 4 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30724 (0x7804) - Unique ID 5 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30725 (0x7805) - Unique ID 6 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30726 (0x7806) - Unique ID 7 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30727 (0x7807) - Unique ID 8 + */ +#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ +#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ + +/* + * R30728 (0x7808) - Factory OTP ID + */ +#define WM831X_OTP_FACT_ID_MASK 0xFFFE /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_ID_SHIFT 1 /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_ID_WIDTH 15 /* OTP_FACT_ID - [15:1] */ +#define WM831X_OTP_FACT_FINAL 0x0001 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_MASK 0x0001 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_SHIFT 0 /* OTP_FACT_FINAL */ +#define WM831X_OTP_FACT_FINAL_WIDTH 1 /* OTP_FACT_FINAL */ + +/* + * R30729 (0x7809) - Factory OTP 1 + */ +#define WM831X_DC3_TRIM_MASK 0xF000 /* DC3_TRIM - [15:12] */ +#define WM831X_DC3_TRIM_SHIFT 12 /* DC3_TRIM - [15:12] */ +#define WM831X_DC3_TRIM_WIDTH 4 /* DC3_TRIM - [15:12] */ +#define WM831X_DC2_TRIM_MASK 0x0FC0 /* DC2_TRIM - [11:6] */ +#define WM831X_DC2_TRIM_SHIFT 6 /* DC2_TRIM - [11:6] */ +#define WM831X_DC2_TRIM_WIDTH 6 /* DC2_TRIM - [11:6] */ +#define WM831X_DC1_TRIM_MASK 0x003F /* DC1_TRIM - [5:0] */ +#define WM831X_DC1_TRIM_SHIFT 0 /* DC1_TRIM - [5:0] */ +#define WM831X_DC1_TRIM_WIDTH 6 /* DC1_TRIM - [5:0] */ + +/* + * R30730 (0x780A) - Factory OTP 2 + */ +#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ +#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ + +/* + * R30731 (0x780B) - Factory OTP 3 + */ +#define WM831X_OSC_TRIM_MASK 0x0780 /* OSC_TRIM - [10:7] */ +#define WM831X_OSC_TRIM_SHIFT 7 /* OSC_TRIM - [10:7] */ +#define WM831X_OSC_TRIM_WIDTH 4 /* OSC_TRIM - [10:7] */ +#define WM831X_BG_TRIM_MASK 0x0078 /* BG_TRIM - [6:3] */ +#define WM831X_BG_TRIM_SHIFT 3 /* BG_TRIM - [6:3] */ +#define WM831X_BG_TRIM_WIDTH 4 /* BG_TRIM - [6:3] */ +#define WM831X_LPBG_TRIM_MASK 0x0007 /* LPBG_TRIM - [2:0] */ +#define WM831X_LPBG_TRIM_SHIFT 0 /* LPBG_TRIM - [2:0] */ +#define WM831X_LPBG_TRIM_WIDTH 3 /* LPBG_TRIM - [2:0] */ + +/* + * R30732 (0x780C) - Factory OTP 4 + */ +#define WM831X_CHILD_I2C_ADDR_MASK 0x00FE /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CHILD_I2C_ADDR_SHIFT 1 /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CHILD_I2C_ADDR_WIDTH 7 /* CHILD_I2C_ADDR - [7:1] */ +#define WM831X_CH_AW 0x0001 /* CH_AW */ +#define WM831X_CH_AW_MASK 0x0001 /* CH_AW */ +#define WM831X_CH_AW_SHIFT 0 /* CH_AW */ +#define WM831X_CH_AW_WIDTH 1 /* CH_AW */ + +/* + * R30733 (0x780D) - Factory OTP 5 + */ +#define WM831X_CHARGE_TRIM_MASK 0x003F /* CHARGE_TRIM - [5:0] */ +#define WM831X_CHARGE_TRIM_SHIFT 0 /* CHARGE_TRIM - [5:0] */ +#define WM831X_CHARGE_TRIM_WIDTH 6 /* CHARGE_TRIM - [5:0] */ + +/* + * R30736 (0x7810) - Customer OTP ID + */ +#define WM831X_OTP_AUTO_PROG 0x8000 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_MASK 0x8000 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_SHIFT 15 /* OTP_AUTO_PROG */ +#define WM831X_OTP_AUTO_PROG_WIDTH 1 /* OTP_AUTO_PROG */ +#define WM831X_OTP_CUST_ID_MASK 0x7FFE /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_ID_SHIFT 1 /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_ID_WIDTH 14 /* OTP_CUST_ID - [14:1] */ +#define WM831X_OTP_CUST_FINAL 0x0001 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_MASK 0x0001 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_SHIFT 0 /* OTP_CUST_FINAL */ +#define WM831X_OTP_CUST_FINAL_WIDTH 1 /* OTP_CUST_FINAL */ + +/* + * R30759 (0x7827) - DBE CHECK DATA + */ +#define WM831X_DBE_VALID_DATA_MASK 0xFFFF /* DBE_VALID_DATA - [15:0] */ +#define WM831X_DBE_VALID_DATA_SHIFT 0 /* DBE_VALID_DATA - [15:0] */ +#define WM831X_DBE_VALID_DATA_WIDTH 16 /* DBE_VALID_DATA - [15:0] */ + + +#endif diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h new file mode 100644 index 00000000..1d7a3f7b --- /dev/null +++ b/include/linux/mfd/wm831x/pdata.h @@ -0,0 +1,150 @@ +/* + * include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PDATA_H__ +#define __MFD_WM831X_PDATA_H__ + +struct wm831x; +struct regulator_init_data; + +struct wm831x_backlight_pdata { + int isink; /** ISINK to use, 1 or 2 */ + int max_uA; /** Maximum current to allow */ +}; + +struct wm831x_backup_pdata { + int charger_enable; + int no_constant_voltage; /** Disable constant voltage charging */ + int vlim; /** Voltage limit in milivolts */ + int ilim; /** Current limit in microamps */ +}; + +struct wm831x_battery_pdata { + int enable; /** Enable charging */ + int fast_enable; /** Enable fast charging */ + int off_mask; /** Mask OFF while charging */ + int trickle_ilim; /** Trickle charge current limit, in mA */ + int vsel; /** Target voltage, in mV */ + int eoc_iterm; /** End of trickle charge current, in mA */ + int fast_ilim; /** Fast charge current limit, in mA */ + int timeout; /** Charge cycle timeout, in minutes */ +}; + +/** + * Configuration for the WM831x DC-DC BuckWise convertors. This + * should be passed as driver_data in the regulator_init_data. + * + * Currently all the configuration is for the fast DVS switching + * support of the devices. This allows MFPs on the device to be + * configured as an input to switch between two output voltages, + * allowing voltage transitions without the expense of an access over + * I2C or SPI buses. + */ +struct wm831x_buckv_pdata { + int dvs_gpio; /** CPU GPIO to use for DVS switching */ + int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ + int dvs_init_state; /** DVS state to expect on startup */ + int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ +}; + +/* Sources for status LED configuration. Values are register values + * plus 1 to allow for a zero default for preserve. + */ +enum wm831x_status_src { + WM831X_STATUS_PRESERVE = 0, /* Keep the current hardware setting */ + WM831X_STATUS_OTP = 1, + WM831X_STATUS_POWER = 2, + WM831X_STATUS_CHARGER = 3, + WM831X_STATUS_MANUAL = 4, +}; + +struct wm831x_status_pdata { + enum wm831x_status_src default_src; + const char *name; + const char *default_trigger; +}; + +struct wm831x_touch_pdata { + int fivewire; /** 1 for five wire mode, 0 for 4 wire */ + int isel; /** Current for pen down (uA) */ + int rpu; /** Pen down sensitivity resistor divider */ + int pressure; /** Report pressure (boolean) */ + unsigned int data_irq; /** Touch data ready IRQ */ + int data_irqf; /** IRQ flags for data ready IRQ */ + unsigned int pd_irq; /** Touch pendown detect IRQ */ + int pd_irqf; /** IRQ flags for pen down IRQ */ +}; + +enum wm831x_watchdog_action { + WM831X_WDOG_NONE = 0, + WM831X_WDOG_INTERRUPT = 1, + WM831X_WDOG_RESET = 2, + WM831X_WDOG_WAKE = 3, +}; + +struct wm831x_watchdog_pdata { + enum wm831x_watchdog_action primary, secondary; + int update_gpio; + unsigned int software:1; +}; + +#define WM831X_MAX_STATUS 2 +#define WM831X_MAX_DCDC 4 +#define WM831X_MAX_EPE 2 +#define WM831X_MAX_LDO 11 +#define WM831X_MAX_ISINK 2 + +#define WM831X_GPIO_CONFIGURE 0x10000 +#define WM831X_GPIO_NUM 16 + +struct wm831x_pdata { + /** Used to distinguish multiple WM831x chips */ + int wm831x_num; + + /** Called before subdevices are set up */ + int (*pre_init)(struct wm831x *wm831x); + /** Called after subdevices are set up */ + int (*post_init)(struct wm831x *wm831x); + + /** Put the /IRQ line into CMOS mode */ + bool irq_cmos; + + /** Disable the touchscreen */ + bool disable_touch; + + /** The driver should initiate a power off sequence during shutdown */ + bool soft_shutdown; + + int irq_base; + int gpio_base; + int gpio_defaults[WM831X_GPIO_NUM]; + struct wm831x_backlight_pdata *backlight; + struct wm831x_backup_pdata *backup; + struct wm831x_battery_pdata *battery; + struct wm831x_touch_pdata *touch; + struct wm831x_watchdog_pdata *watchdog; + + /** LED1 = 0 and so on */ + struct wm831x_status_pdata *status[WM831X_MAX_STATUS]; + /** DCDC1 = 0 and so on */ + struct regulator_init_data *dcdc[WM831X_MAX_DCDC]; + /** EPE1 = 0 and so on */ + struct regulator_init_data *epe[WM831X_MAX_EPE]; + /** LDO1 = 0 and so on */ + struct regulator_init_data *ldo[WM831X_MAX_LDO]; + /** ISINK1 = 0 and so on*/ + struct regulator_init_data *isink[WM831X_MAX_ISINK]; +}; + +#endif diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h new file mode 100644 index 00000000..b18cbb02 --- /dev/null +++ b/include/linux/mfd/wm831x/pmu.h @@ -0,0 +1,189 @@ +/* + * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PMU_H__ +#define __MFD_WM831X_PMU_H__ + +/* + * R16387 (0x4003) - Power State + */ +#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */ +#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */ +#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */ +#define WM831X_REF_LP 0x1000 /* REF_LP */ +#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */ +#define WM831X_REF_LP_SHIFT 12 /* REF_LP */ +#define WM831X_REF_LP_WIDTH 1 /* REF_LP */ +#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */ +#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */ +#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */ + +/* + * R16397 (0x400D) - System Status + */ +#define WM831X_THW_STS 0x8000 /* THW_STS */ +#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */ +#define WM831X_THW_STS_SHIFT 15 /* THW_STS */ +#define WM831X_THW_STS_WIDTH 1 /* THW_STS */ +#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */ +#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */ +#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */ +#define WM831X_PWR_USB 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */ +#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */ +#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */ + +/* + * R16456 (0x4048) - Charger Control 1 + */ +#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */ +#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */ +#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */ +#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */ +#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */ +#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */ +#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */ + +/* + * R16457 (0x4049) - Charger Control 2 + */ +#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */ +#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */ + +/* + * R16458 (0x404A) - Charger Status + */ +#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */ +#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */ +#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */ +#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */ +#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */ +#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */ +#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */ + +#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT) + +/* + * R16459 (0x404B) - Backup Charger Control + */ +#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */ +#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */ + +#endif diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h new file mode 100644 index 00000000..955d30fc --- /dev/null +++ b/include/linux/mfd/wm831x/regulator.h @@ -0,0 +1,1218 @@ +/* + * linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_REGULATOR_H__ +#define __MFD_WM831X_REGULATOR_H__ + +/* + * R16462 (0x404E) - Current Sink 1 + */ +#define WM831X_CS1_ENA 0x8000 /* CS1_ENA */ +#define WM831X_CS1_ENA_MASK 0x8000 /* CS1_ENA */ +#define WM831X_CS1_ENA_SHIFT 15 /* CS1_ENA */ +#define WM831X_CS1_ENA_WIDTH 1 /* CS1_ENA */ +#define WM831X_CS1_DRIVE 0x4000 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_MASK 0x4000 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_SHIFT 14 /* CS1_DRIVE */ +#define WM831X_CS1_DRIVE_WIDTH 1 /* CS1_DRIVE */ +#define WM831X_CS1_SLPENA 0x1000 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_MASK 0x1000 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_SHIFT 12 /* CS1_SLPENA */ +#define WM831X_CS1_SLPENA_WIDTH 1 /* CS1_SLPENA */ +#define WM831X_CS1_OFF_RAMP_MASK 0x0C00 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_OFF_RAMP_SHIFT 10 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_OFF_RAMP_WIDTH 2 /* CS1_OFF_RAMP - [11:10] */ +#define WM831X_CS1_ON_RAMP_MASK 0x0300 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ON_RAMP_SHIFT 8 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ON_RAMP_WIDTH 2 /* CS1_ON_RAMP - [9:8] */ +#define WM831X_CS1_ISEL_MASK 0x003F /* CS1_ISEL - [5:0] */ +#define WM831X_CS1_ISEL_SHIFT 0 /* CS1_ISEL - [5:0] */ +#define WM831X_CS1_ISEL_WIDTH 6 /* CS1_ISEL - [5:0] */ + +/* + * R16463 (0x404F) - Current Sink 2 + */ +#define WM831X_CS2_ENA 0x8000 /* CS2_ENA */ +#define WM831X_CS2_ENA_MASK 0x8000 /* CS2_ENA */ +#define WM831X_CS2_ENA_SHIFT 15 /* CS2_ENA */ +#define WM831X_CS2_ENA_WIDTH 1 /* CS2_ENA */ +#define WM831X_CS2_DRIVE 0x4000 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_MASK 0x4000 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_SHIFT 14 /* CS2_DRIVE */ +#define WM831X_CS2_DRIVE_WIDTH 1 /* CS2_DRIVE */ +#define WM831X_CS2_SLPENA 0x1000 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_MASK 0x1000 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_SHIFT 12 /* CS2_SLPENA */ +#define WM831X_CS2_SLPENA_WIDTH 1 /* CS2_SLPENA */ +#define WM831X_CS2_OFF_RAMP_MASK 0x0C00 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_OFF_RAMP_SHIFT 10 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_OFF_RAMP_WIDTH 2 /* CS2_OFF_RAMP - [11:10] */ +#define WM831X_CS2_ON_RAMP_MASK 0x0300 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ON_RAMP_SHIFT 8 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ON_RAMP_WIDTH 2 /* CS2_ON_RAMP - [9:8] */ +#define WM831X_CS2_ISEL_MASK 0x003F /* CS2_ISEL - [5:0] */ +#define WM831X_CS2_ISEL_SHIFT 0 /* CS2_ISEL - [5:0] */ +#define WM831X_CS2_ISEL_WIDTH 6 /* CS2_ISEL - [5:0] */ + +/* + * R16464 (0x4050) - DCDC Enable + */ +#define WM831X_EPE2_ENA 0x0080 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_MASK 0x0080 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_SHIFT 7 /* EPE2_ENA */ +#define WM831X_EPE2_ENA_WIDTH 1 /* EPE2_ENA */ +#define WM831X_EPE1_ENA 0x0040 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_MASK 0x0040 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_SHIFT 6 /* EPE1_ENA */ +#define WM831X_EPE1_ENA_WIDTH 1 /* EPE1_ENA */ +#define WM831X_DC4_ENA 0x0008 /* DC4_ENA */ +#define WM831X_DC4_ENA_MASK 0x0008 /* DC4_ENA */ +#define WM831X_DC4_ENA_SHIFT 3 /* DC4_ENA */ +#define WM831X_DC4_ENA_WIDTH 1 /* DC4_ENA */ +#define WM831X_DC3_ENA 0x0004 /* DC3_ENA */ +#define WM831X_DC3_ENA_MASK 0x0004 /* DC3_ENA */ +#define WM831X_DC3_ENA_SHIFT 2 /* DC3_ENA */ +#define WM831X_DC3_ENA_WIDTH 1 /* DC3_ENA */ +#define WM831X_DC2_ENA 0x0002 /* DC2_ENA */ +#define WM831X_DC2_ENA_MASK 0x0002 /* DC2_ENA */ +#define WM831X_DC2_ENA_SHIFT 1 /* DC2_ENA */ +#define WM831X_DC2_ENA_WIDTH 1 /* DC2_ENA */ +#define WM831X_DC1_ENA 0x0001 /* DC1_ENA */ +#define WM831X_DC1_ENA_MASK 0x0001 /* DC1_ENA */ +#define WM831X_DC1_ENA_SHIFT 0 /* DC1_ENA */ +#define WM831X_DC1_ENA_WIDTH 1 /* DC1_ENA */ + +/* + * R16465 (0x4051) - LDO Enable + */ +#define WM831X_LDO11_ENA 0x0400 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_MASK 0x0400 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_SHIFT 10 /* LDO11_ENA */ +#define WM831X_LDO11_ENA_WIDTH 1 /* LDO11_ENA */ +#define WM831X_LDO10_ENA 0x0200 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_MASK 0x0200 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_SHIFT 9 /* LDO10_ENA */ +#define WM831X_LDO10_ENA_WIDTH 1 /* LDO10_ENA */ +#define WM831X_LDO9_ENA 0x0100 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_MASK 0x0100 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_SHIFT 8 /* LDO9_ENA */ +#define WM831X_LDO9_ENA_WIDTH 1 /* LDO9_ENA */ +#define WM831X_LDO8_ENA 0x0080 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_MASK 0x0080 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_SHIFT 7 /* LDO8_ENA */ +#define WM831X_LDO8_ENA_WIDTH 1 /* LDO8_ENA */ +#define WM831X_LDO7_ENA 0x0040 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_MASK 0x0040 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_SHIFT 6 /* LDO7_ENA */ +#define WM831X_LDO7_ENA_WIDTH 1 /* LDO7_ENA */ +#define WM831X_LDO6_ENA 0x0020 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_MASK 0x0020 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_SHIFT 5 /* LDO6_ENA */ +#define WM831X_LDO6_ENA_WIDTH 1 /* LDO6_ENA */ +#define WM831X_LDO5_ENA 0x0010 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_MASK 0x0010 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_SHIFT 4 /* LDO5_ENA */ +#define WM831X_LDO5_ENA_WIDTH 1 /* LDO5_ENA */ +#define WM831X_LDO4_ENA 0x0008 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_MASK 0x0008 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_SHIFT 3 /* LDO4_ENA */ +#define WM831X_LDO4_ENA_WIDTH 1 /* LDO4_ENA */ +#define WM831X_LDO3_ENA 0x0004 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_MASK 0x0004 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_SHIFT 2 /* LDO3_ENA */ +#define WM831X_LDO3_ENA_WIDTH 1 /* LDO3_ENA */ +#define WM831X_LDO2_ENA 0x0002 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ +#define WM831X_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ +#define WM831X_LDO1_ENA 0x0001 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_SHIFT 0 /* LDO1_ENA */ +#define WM831X_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ + +/* + * R16466 (0x4052) - DCDC Status + */ +#define WM831X_EPE2_STS 0x0080 /* EPE2_STS */ +#define WM831X_EPE2_STS_MASK 0x0080 /* EPE2_STS */ +#define WM831X_EPE2_STS_SHIFT 7 /* EPE2_STS */ +#define WM831X_EPE2_STS_WIDTH 1 /* EPE2_STS */ +#define WM831X_EPE1_STS 0x0040 /* EPE1_STS */ +#define WM831X_EPE1_STS_MASK 0x0040 /* EPE1_STS */ +#define WM831X_EPE1_STS_SHIFT 6 /* EPE1_STS */ +#define WM831X_EPE1_STS_WIDTH 1 /* EPE1_STS */ +#define WM831X_DC4_STS 0x0008 /* DC4_STS */ +#define WM831X_DC4_STS_MASK 0x0008 /* DC4_STS */ +#define WM831X_DC4_STS_SHIFT 3 /* DC4_STS */ +#define WM831X_DC4_STS_WIDTH 1 /* DC4_STS */ +#define WM831X_DC3_STS 0x0004 /* DC3_STS */ +#define WM831X_DC3_STS_MASK 0x0004 /* DC3_STS */ +#define WM831X_DC3_STS_SHIFT 2 /* DC3_STS */ +#define WM831X_DC3_STS_WIDTH 1 /* DC3_STS */ +#define WM831X_DC2_STS 0x0002 /* DC2_STS */ +#define WM831X_DC2_STS_MASK 0x0002 /* DC2_STS */ +#define WM831X_DC2_STS_SHIFT 1 /* DC2_STS */ +#define WM831X_DC2_STS_WIDTH 1 /* DC2_STS */ +#define WM831X_DC1_STS 0x0001 /* DC1_STS */ +#define WM831X_DC1_STS_MASK 0x0001 /* DC1_STS */ +#define WM831X_DC1_STS_SHIFT 0 /* DC1_STS */ +#define WM831X_DC1_STS_WIDTH 1 /* DC1_STS */ + +/* + * R16467 (0x4053) - LDO Status + */ +#define WM831X_LDO11_STS 0x0400 /* LDO11_STS */ +#define WM831X_LDO11_STS_MASK 0x0400 /* LDO11_STS */ +#define WM831X_LDO11_STS_SHIFT 10 /* LDO11_STS */ +#define WM831X_LDO11_STS_WIDTH 1 /* LDO11_STS */ +#define WM831X_LDO10_STS 0x0200 /* LDO10_STS */ +#define WM831X_LDO10_STS_MASK 0x0200 /* LDO10_STS */ +#define WM831X_LDO10_STS_SHIFT 9 /* LDO10_STS */ +#define WM831X_LDO10_STS_WIDTH 1 /* LDO10_STS */ +#define WM831X_LDO9_STS 0x0100 /* LDO9_STS */ +#define WM831X_LDO9_STS_MASK 0x0100 /* LDO9_STS */ +#define WM831X_LDO9_STS_SHIFT 8 /* LDO9_STS */ +#define WM831X_LDO9_STS_WIDTH 1 /* LDO9_STS */ +#define WM831X_LDO8_STS 0x0080 /* LDO8_STS */ +#define WM831X_LDO8_STS_MASK 0x0080 /* LDO8_STS */ +#define WM831X_LDO8_STS_SHIFT 7 /* LDO8_STS */ +#define WM831X_LDO8_STS_WIDTH 1 /* LDO8_STS */ +#define WM831X_LDO7_STS 0x0040 /* LDO7_STS */ +#define WM831X_LDO7_STS_MASK 0x0040 /* LDO7_STS */ +#define WM831X_LDO7_STS_SHIFT 6 /* LDO7_STS */ +#define WM831X_LDO7_STS_WIDTH 1 /* LDO7_STS */ +#define WM831X_LDO6_STS 0x0020 /* LDO6_STS */ +#define WM831X_LDO6_STS_MASK 0x0020 /* LDO6_STS */ +#define WM831X_LDO6_STS_SHIFT 5 /* LDO6_STS */ +#define WM831X_LDO6_STS_WIDTH 1 /* LDO6_STS */ +#define WM831X_LDO5_STS 0x0010 /* LDO5_STS */ +#define WM831X_LDO5_STS_MASK 0x0010 /* LDO5_STS */ +#define WM831X_LDO5_STS_SHIFT 4 /* LDO5_STS */ +#define WM831X_LDO5_STS_WIDTH 1 /* LDO5_STS */ +#define WM831X_LDO4_STS 0x0008 /* LDO4_STS */ +#define WM831X_LDO4_STS_MASK 0x0008 /* LDO4_STS */ +#define WM831X_LDO4_STS_SHIFT 3 /* LDO4_STS */ +#define WM831X_LDO4_STS_WIDTH 1 /* LDO4_STS */ +#define WM831X_LDO3_STS 0x0004 /* LDO3_STS */ +#define WM831X_LDO3_STS_MASK 0x0004 /* LDO3_STS */ +#define WM831X_LDO3_STS_SHIFT 2 /* LDO3_STS */ +#define WM831X_LDO3_STS_WIDTH 1 /* LDO3_STS */ +#define WM831X_LDO2_STS 0x0002 /* LDO2_STS */ +#define WM831X_LDO2_STS_MASK 0x0002 /* LDO2_STS */ +#define WM831X_LDO2_STS_SHIFT 1 /* LDO2_STS */ +#define WM831X_LDO2_STS_WIDTH 1 /* LDO2_STS */ +#define WM831X_LDO1_STS 0x0001 /* LDO1_STS */ +#define WM831X_LDO1_STS_MASK 0x0001 /* LDO1_STS */ +#define WM831X_LDO1_STS_SHIFT 0 /* LDO1_STS */ +#define WM831X_LDO1_STS_WIDTH 1 /* LDO1_STS */ + +/* + * R16468 (0x4054) - DCDC UV Status + */ +#define WM831X_DC2_OV_STS 0x2000 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_MASK 0x2000 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_SHIFT 13 /* DC2_OV_STS */ +#define WM831X_DC2_OV_STS_WIDTH 1 /* DC2_OV_STS */ +#define WM831X_DC1_OV_STS 0x1000 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_MASK 0x1000 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_SHIFT 12 /* DC1_OV_STS */ +#define WM831X_DC1_OV_STS_WIDTH 1 /* DC1_OV_STS */ +#define WM831X_DC2_HC_STS 0x0200 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_MASK 0x0200 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_SHIFT 9 /* DC2_HC_STS */ +#define WM831X_DC2_HC_STS_WIDTH 1 /* DC2_HC_STS */ +#define WM831X_DC1_HC_STS 0x0100 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_MASK 0x0100 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_SHIFT 8 /* DC1_HC_STS */ +#define WM831X_DC1_HC_STS_WIDTH 1 /* DC1_HC_STS */ +#define WM831X_DC4_UV_STS 0x0008 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_MASK 0x0008 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_SHIFT 3 /* DC4_UV_STS */ +#define WM831X_DC4_UV_STS_WIDTH 1 /* DC4_UV_STS */ +#define WM831X_DC3_UV_STS 0x0004 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_MASK 0x0004 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_SHIFT 2 /* DC3_UV_STS */ +#define WM831X_DC3_UV_STS_WIDTH 1 /* DC3_UV_STS */ +#define WM831X_DC2_UV_STS 0x0002 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_MASK 0x0002 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_SHIFT 1 /* DC2_UV_STS */ +#define WM831X_DC2_UV_STS_WIDTH 1 /* DC2_UV_STS */ +#define WM831X_DC1_UV_STS 0x0001 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_MASK 0x0001 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_SHIFT 0 /* DC1_UV_STS */ +#define WM831X_DC1_UV_STS_WIDTH 1 /* DC1_UV_STS */ + +/* + * R16469 (0x4055) - LDO UV Status + */ +#define WM831X_INTLDO_UV_STS 0x8000 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_MASK 0x8000 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_SHIFT 15 /* INTLDO_UV_STS */ +#define WM831X_INTLDO_UV_STS_WIDTH 1 /* INTLDO_UV_STS */ +#define WM831X_LDO10_UV_STS 0x0200 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_MASK 0x0200 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_SHIFT 9 /* LDO10_UV_STS */ +#define WM831X_LDO10_UV_STS_WIDTH 1 /* LDO10_UV_STS */ +#define WM831X_LDO9_UV_STS 0x0100 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_MASK 0x0100 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_SHIFT 8 /* LDO9_UV_STS */ +#define WM831X_LDO9_UV_STS_WIDTH 1 /* LDO9_UV_STS */ +#define WM831X_LDO8_UV_STS 0x0080 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_MASK 0x0080 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_SHIFT 7 /* LDO8_UV_STS */ +#define WM831X_LDO8_UV_STS_WIDTH 1 /* LDO8_UV_STS */ +#define WM831X_LDO7_UV_STS 0x0040 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_MASK 0x0040 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_SHIFT 6 /* LDO7_UV_STS */ +#define WM831X_LDO7_UV_STS_WIDTH 1 /* LDO7_UV_STS */ +#define WM831X_LDO6_UV_STS 0x0020 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_MASK 0x0020 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_SHIFT 5 /* LDO6_UV_STS */ +#define WM831X_LDO6_UV_STS_WIDTH 1 /* LDO6_UV_STS */ +#define WM831X_LDO5_UV_STS 0x0010 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_MASK 0x0010 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_SHIFT 4 /* LDO5_UV_STS */ +#define WM831X_LDO5_UV_STS_WIDTH 1 /* LDO5_UV_STS */ +#define WM831X_LDO4_UV_STS 0x0008 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_MASK 0x0008 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_SHIFT 3 /* LDO4_UV_STS */ +#define WM831X_LDO4_UV_STS_WIDTH 1 /* LDO4_UV_STS */ +#define WM831X_LDO3_UV_STS 0x0004 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_MASK 0x0004 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_SHIFT 2 /* LDO3_UV_STS */ +#define WM831X_LDO3_UV_STS_WIDTH 1 /* LDO3_UV_STS */ +#define WM831X_LDO2_UV_STS 0x0002 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_MASK 0x0002 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_SHIFT 1 /* LDO2_UV_STS */ +#define WM831X_LDO2_UV_STS_WIDTH 1 /* LDO2_UV_STS */ +#define WM831X_LDO1_UV_STS 0x0001 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_MASK 0x0001 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_SHIFT 0 /* LDO1_UV_STS */ +#define WM831X_LDO1_UV_STS_WIDTH 1 /* LDO1_UV_STS */ + +/* + * R16470 (0x4056) - DC1 Control 1 + */ +#define WM831X_DC1_RATE_MASK 0xC000 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_RATE_SHIFT 14 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_RATE_WIDTH 2 /* DC1_RATE - [15:14] */ +#define WM831X_DC1_PHASE 0x1000 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_MASK 0x1000 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_SHIFT 12 /* DC1_PHASE */ +#define WM831X_DC1_PHASE_WIDTH 1 /* DC1_PHASE */ +#define WM831X_DC1_FREQ_MASK 0x0300 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FREQ_SHIFT 8 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FREQ_WIDTH 2 /* DC1_FREQ - [9:8] */ +#define WM831X_DC1_FLT 0x0080 /* DC1_FLT */ +#define WM831X_DC1_FLT_MASK 0x0080 /* DC1_FLT */ +#define WM831X_DC1_FLT_SHIFT 7 /* DC1_FLT */ +#define WM831X_DC1_FLT_WIDTH 1 /* DC1_FLT */ +#define WM831X_DC1_SOFT_START_MASK 0x0030 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_SOFT_START_SHIFT 4 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_SOFT_START_WIDTH 2 /* DC1_SOFT_START - [5:4] */ +#define WM831X_DC1_CAP_MASK 0x0003 /* DC1_CAP - [1:0] */ +#define WM831X_DC1_CAP_SHIFT 0 /* DC1_CAP - [1:0] */ +#define WM831X_DC1_CAP_WIDTH 2 /* DC1_CAP - [1:0] */ + +/* + * R16471 (0x4057) - DC1 Control 2 + */ +#define WM831X_DC1_ERR_ACT_MASK 0xC000 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_ERR_ACT_SHIFT 14 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_ERR_ACT_WIDTH 2 /* DC1_ERR_ACT - [15:14] */ +#define WM831X_DC1_HWC_SRC_MASK 0x1800 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_SRC_SHIFT 11 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_SRC_WIDTH 2 /* DC1_HWC_SRC - [12:11] */ +#define WM831X_DC1_HWC_VSEL 0x0400 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_MASK 0x0400 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_SHIFT 10 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_VSEL_WIDTH 1 /* DC1_HWC_VSEL */ +#define WM831X_DC1_HWC_MODE_MASK 0x0300 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HWC_MODE_SHIFT 8 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HWC_MODE_WIDTH 2 /* DC1_HWC_MODE - [9:8] */ +#define WM831X_DC1_HC_THR_MASK 0x0070 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_THR_SHIFT 4 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_THR_WIDTH 3 /* DC1_HC_THR - [6:4] */ +#define WM831X_DC1_HC_IND_ENA 0x0001 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_MASK 0x0001 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_SHIFT 0 /* DC1_HC_IND_ENA */ +#define WM831X_DC1_HC_IND_ENA_WIDTH 1 /* DC1_HC_IND_ENA */ + +/* + * R16472 (0x4058) - DC1 ON Config + */ +#define WM831X_DC1_ON_SLOT_MASK 0xE000 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_SLOT_SHIFT 13 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_SLOT_WIDTH 3 /* DC1_ON_SLOT - [15:13] */ +#define WM831X_DC1_ON_MODE_MASK 0x0300 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_MODE_SHIFT 8 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_MODE_WIDTH 2 /* DC1_ON_MODE - [9:8] */ +#define WM831X_DC1_ON_VSEL_MASK 0x007F /* DC1_ON_VSEL - [6:0] */ +#define WM831X_DC1_ON_VSEL_SHIFT 0 /* DC1_ON_VSEL - [6:0] */ +#define WM831X_DC1_ON_VSEL_WIDTH 7 /* DC1_ON_VSEL - [6:0] */ + +/* + * R16473 (0x4059) - DC1 SLEEP Control + */ +#define WM831X_DC1_SLP_SLOT_MASK 0xE000 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_SLOT_SHIFT 13 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_SLOT_WIDTH 3 /* DC1_SLP_SLOT - [15:13] */ +#define WM831X_DC1_SLP_MODE_MASK 0x0300 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_MODE_SHIFT 8 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_MODE_WIDTH 2 /* DC1_SLP_MODE - [9:8] */ +#define WM831X_DC1_SLP_VSEL_MASK 0x007F /* DC1_SLP_VSEL - [6:0] */ +#define WM831X_DC1_SLP_VSEL_SHIFT 0 /* DC1_SLP_VSEL - [6:0] */ +#define WM831X_DC1_SLP_VSEL_WIDTH 7 /* DC1_SLP_VSEL - [6:0] */ + +/* + * R16474 (0x405A) - DC1 DVS Control + */ +#define WM831X_DC1_DVS_SRC_MASK 0x1800 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_SRC_SHIFT 11 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_SRC_WIDTH 2 /* DC1_DVS_SRC - [12:11] */ +#define WM831X_DC1_DVS_VSEL_MASK 0x007F /* DC1_DVS_VSEL - [6:0] */ +#define WM831X_DC1_DVS_VSEL_SHIFT 0 /* DC1_DVS_VSEL - [6:0] */ +#define WM831X_DC1_DVS_VSEL_WIDTH 7 /* DC1_DVS_VSEL - [6:0] */ + +/* + * R16475 (0x405B) - DC2 Control 1 + */ +#define WM831X_DC2_RATE_MASK 0xC000 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_RATE_SHIFT 14 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_RATE_WIDTH 2 /* DC2_RATE - [15:14] */ +#define WM831X_DC2_PHASE 0x1000 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_MASK 0x1000 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_SHIFT 12 /* DC2_PHASE */ +#define WM831X_DC2_PHASE_WIDTH 1 /* DC2_PHASE */ +#define WM831X_DC2_FREQ_MASK 0x0300 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FREQ_SHIFT 8 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FREQ_WIDTH 2 /* DC2_FREQ - [9:8] */ +#define WM831X_DC2_FLT 0x0080 /* DC2_FLT */ +#define WM831X_DC2_FLT_MASK 0x0080 /* DC2_FLT */ +#define WM831X_DC2_FLT_SHIFT 7 /* DC2_FLT */ +#define WM831X_DC2_FLT_WIDTH 1 /* DC2_FLT */ +#define WM831X_DC2_SOFT_START_MASK 0x0030 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_SOFT_START_SHIFT 4 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_SOFT_START_WIDTH 2 /* DC2_SOFT_START - [5:4] */ +#define WM831X_DC2_CAP_MASK 0x0003 /* DC2_CAP - [1:0] */ +#define WM831X_DC2_CAP_SHIFT 0 /* DC2_CAP - [1:0] */ +#define WM831X_DC2_CAP_WIDTH 2 /* DC2_CAP - [1:0] */ + +/* + * R16476 (0x405C) - DC2 Control 2 + */ +#define WM831X_DC2_ERR_ACT_MASK 0xC000 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_ERR_ACT_SHIFT 14 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_ERR_ACT_WIDTH 2 /* DC2_ERR_ACT - [15:14] */ +#define WM831X_DC2_HWC_SRC_MASK 0x1800 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_SRC_SHIFT 11 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_SRC_WIDTH 2 /* DC2_HWC_SRC - [12:11] */ +#define WM831X_DC2_HWC_VSEL 0x0400 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_MASK 0x0400 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_SHIFT 10 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_VSEL_WIDTH 1 /* DC2_HWC_VSEL */ +#define WM831X_DC2_HWC_MODE_MASK 0x0300 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HWC_MODE_SHIFT 8 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HWC_MODE_WIDTH 2 /* DC2_HWC_MODE - [9:8] */ +#define WM831X_DC2_HC_THR_MASK 0x0070 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_THR_SHIFT 4 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_THR_WIDTH 3 /* DC2_HC_THR - [6:4] */ +#define WM831X_DC2_HC_IND_ENA 0x0001 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_MASK 0x0001 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_SHIFT 0 /* DC2_HC_IND_ENA */ +#define WM831X_DC2_HC_IND_ENA_WIDTH 1 /* DC2_HC_IND_ENA */ + +/* + * R16477 (0x405D) - DC2 ON Config + */ +#define WM831X_DC2_ON_SLOT_MASK 0xE000 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_SLOT_SHIFT 13 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_SLOT_WIDTH 3 /* DC2_ON_SLOT - [15:13] */ +#define WM831X_DC2_ON_MODE_MASK 0x0300 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_MODE_SHIFT 8 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_MODE_WIDTH 2 /* DC2_ON_MODE - [9:8] */ +#define WM831X_DC2_ON_VSEL_MASK 0x007F /* DC2_ON_VSEL - [6:0] */ +#define WM831X_DC2_ON_VSEL_SHIFT 0 /* DC2_ON_VSEL - [6:0] */ +#define WM831X_DC2_ON_VSEL_WIDTH 7 /* DC2_ON_VSEL - [6:0] */ + +/* + * R16478 (0x405E) - DC2 SLEEP Control + */ +#define WM831X_DC2_SLP_SLOT_MASK 0xE000 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_SLOT_SHIFT 13 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_SLOT_WIDTH 3 /* DC2_SLP_SLOT - [15:13] */ +#define WM831X_DC2_SLP_MODE_MASK 0x0300 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_MODE_SHIFT 8 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_MODE_WIDTH 2 /* DC2_SLP_MODE - [9:8] */ +#define WM831X_DC2_SLP_VSEL_MASK 0x007F /* DC2_SLP_VSEL - [6:0] */ +#define WM831X_DC2_SLP_VSEL_SHIFT 0 /* DC2_SLP_VSEL - [6:0] */ +#define WM831X_DC2_SLP_VSEL_WIDTH 7 /* DC2_SLP_VSEL - [6:0] */ + +/* + * R16479 (0x405F) - DC2 DVS Control + */ +#define WM831X_DC2_DVS_SRC_MASK 0x1800 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_SRC_SHIFT 11 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_SRC_WIDTH 2 /* DC2_DVS_SRC - [12:11] */ +#define WM831X_DC2_DVS_VSEL_MASK 0x007F /* DC2_DVS_VSEL - [6:0] */ +#define WM831X_DC2_DVS_VSEL_SHIFT 0 /* DC2_DVS_VSEL - [6:0] */ +#define WM831X_DC2_DVS_VSEL_WIDTH 7 /* DC2_DVS_VSEL - [6:0] */ + +/* + * R16480 (0x4060) - DC3 Control 1 + */ +#define WM831X_DC3_PHASE 0x1000 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_MASK 0x1000 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_SHIFT 12 /* DC3_PHASE */ +#define WM831X_DC3_PHASE_WIDTH 1 /* DC3_PHASE */ +#define WM831X_DC3_FLT 0x0080 /* DC3_FLT */ +#define WM831X_DC3_FLT_MASK 0x0080 /* DC3_FLT */ +#define WM831X_DC3_FLT_SHIFT 7 /* DC3_FLT */ +#define WM831X_DC3_FLT_WIDTH 1 /* DC3_FLT */ +#define WM831X_DC3_SOFT_START_MASK 0x0030 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_SOFT_START_SHIFT 4 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_SOFT_START_WIDTH 2 /* DC3_SOFT_START - [5:4] */ +#define WM831X_DC3_STNBY_LIM_MASK 0x000C /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_STNBY_LIM_SHIFT 2 /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_STNBY_LIM_WIDTH 2 /* DC3_STNBY_LIM - [3:2] */ +#define WM831X_DC3_CAP_MASK 0x0003 /* DC3_CAP - [1:0] */ +#define WM831X_DC3_CAP_SHIFT 0 /* DC3_CAP - [1:0] */ +#define WM831X_DC3_CAP_WIDTH 2 /* DC3_CAP - [1:0] */ + +/* + * R16481 (0x4061) - DC3 Control 2 + */ +#define WM831X_DC3_ERR_ACT_MASK 0xC000 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_ERR_ACT_SHIFT 14 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_ERR_ACT_WIDTH 2 /* DC3_ERR_ACT - [15:14] */ +#define WM831X_DC3_HWC_SRC_MASK 0x1800 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_SRC_SHIFT 11 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_SRC_WIDTH 2 /* DC3_HWC_SRC - [12:11] */ +#define WM831X_DC3_HWC_VSEL 0x0400 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_MASK 0x0400 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_SHIFT 10 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_VSEL_WIDTH 1 /* DC3_HWC_VSEL */ +#define WM831X_DC3_HWC_MODE_MASK 0x0300 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_HWC_MODE_SHIFT 8 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_HWC_MODE_WIDTH 2 /* DC3_HWC_MODE - [9:8] */ +#define WM831X_DC3_OVP 0x0080 /* DC3_OVP */ +#define WM831X_DC3_OVP_MASK 0x0080 /* DC3_OVP */ +#define WM831X_DC3_OVP_SHIFT 7 /* DC3_OVP */ +#define WM831X_DC3_OVP_WIDTH 1 /* DC3_OVP */ + +/* + * R16482 (0x4062) - DC3 ON Config + */ +#define WM831X_DC3_ON_SLOT_MASK 0xE000 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_SLOT_SHIFT 13 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_SLOT_WIDTH 3 /* DC3_ON_SLOT - [15:13] */ +#define WM831X_DC3_ON_MODE_MASK 0x0300 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_MODE_SHIFT 8 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_MODE_WIDTH 2 /* DC3_ON_MODE - [9:8] */ +#define WM831X_DC3_ON_VSEL_MASK 0x007F /* DC3_ON_VSEL - [6:0] */ +#define WM831X_DC3_ON_VSEL_SHIFT 0 /* DC3_ON_VSEL - [6:0] */ +#define WM831X_DC3_ON_VSEL_WIDTH 7 /* DC3_ON_VSEL - [6:0] */ + +/* + * R16483 (0x4063) - DC3 SLEEP Control + */ +#define WM831X_DC3_SLP_SLOT_MASK 0xE000 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_SLOT_SHIFT 13 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_SLOT_WIDTH 3 /* DC3_SLP_SLOT - [15:13] */ +#define WM831X_DC3_SLP_MODE_MASK 0x0300 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_MODE_SHIFT 8 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_MODE_WIDTH 2 /* DC3_SLP_MODE - [9:8] */ +#define WM831X_DC3_SLP_VSEL_MASK 0x007F /* DC3_SLP_VSEL - [6:0] */ +#define WM831X_DC3_SLP_VSEL_SHIFT 0 /* DC3_SLP_VSEL - [6:0] */ +#define WM831X_DC3_SLP_VSEL_WIDTH 7 /* DC3_SLP_VSEL - [6:0] */ + +/* + * R16484 (0x4064) - DC4 Control + */ +#define WM831X_DC4_ERR_ACT_MASK 0xC000 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_ERR_ACT_SHIFT 14 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_ERR_ACT_WIDTH 2 /* DC4_ERR_ACT - [15:14] */ +#define WM831X_DC4_HWC_SRC_MASK 0x1800 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_SRC_SHIFT 11 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_SRC_WIDTH 2 /* DC4_HWC_SRC - [12:11] */ +#define WM831X_DC4_HWC_MODE 0x0100 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_MASK 0x0100 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_SHIFT 8 /* DC4_HWC_MODE */ +#define WM831X_DC4_HWC_MODE_WIDTH 1 /* DC4_HWC_MODE */ +#define WM831X_DC4_RANGE_MASK 0x000C /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_RANGE_SHIFT 2 /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_RANGE_WIDTH 2 /* DC4_RANGE - [3:2] */ +#define WM831X_DC4_FBSRC 0x0001 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_MASK 0x0001 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_SHIFT 0 /* DC4_FBSRC */ +#define WM831X_DC4_FBSRC_WIDTH 1 /* DC4_FBSRC */ + +/* + * R16485 (0x4065) - DC4 SLEEP Control + */ +#define WM831X_DC4_SLPENA 0x0100 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_MASK 0x0100 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_SHIFT 8 /* DC4_SLPENA */ +#define WM831X_DC4_SLPENA_WIDTH 1 /* DC4_SLPENA */ + +/* + * R16488 (0x4068) - LDO1 Control + */ +#define WM831X_LDO1_ERR_ACT_MASK 0xC000 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_ERR_ACT_SHIFT 14 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_ERR_ACT_WIDTH 2 /* LDO1_ERR_ACT - [15:14] */ +#define WM831X_LDO1_HWC_SRC_MASK 0x1800 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_SRC_SHIFT 11 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_SRC_WIDTH 2 /* LDO1_HWC_SRC - [12:11] */ +#define WM831X_LDO1_HWC_VSEL 0x0400 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_MASK 0x0400 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_SHIFT 10 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_VSEL_WIDTH 1 /* LDO1_HWC_VSEL */ +#define WM831X_LDO1_HWC_MODE_MASK 0x0300 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_HWC_MODE_SHIFT 8 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_HWC_MODE_WIDTH 2 /* LDO1_HWC_MODE - [9:8] */ +#define WM831X_LDO1_FLT 0x0080 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_MASK 0x0080 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_SHIFT 7 /* LDO1_FLT */ +#define WM831X_LDO1_FLT_WIDTH 1 /* LDO1_FLT */ +#define WM831X_LDO1_SWI 0x0040 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_MASK 0x0040 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_SHIFT 6 /* LDO1_SWI */ +#define WM831X_LDO1_SWI_WIDTH 1 /* LDO1_SWI */ +#define WM831X_LDO1_LP_MODE 0x0001 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_MASK 0x0001 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_SHIFT 0 /* LDO1_LP_MODE */ +#define WM831X_LDO1_LP_MODE_WIDTH 1 /* LDO1_LP_MODE */ + +/* + * R16489 (0x4069) - LDO1 ON Control + */ +#define WM831X_LDO1_ON_SLOT_MASK 0xE000 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_SLOT_SHIFT 13 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_SLOT_WIDTH 3 /* LDO1_ON_SLOT - [15:13] */ +#define WM831X_LDO1_ON_MODE 0x0100 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_MASK 0x0100 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_SHIFT 8 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_MODE_WIDTH 1 /* LDO1_ON_MODE */ +#define WM831X_LDO1_ON_VSEL_MASK 0x001F /* LDO1_ON_VSEL - [4:0] */ +#define WM831X_LDO1_ON_VSEL_SHIFT 0 /* LDO1_ON_VSEL - [4:0] */ +#define WM831X_LDO1_ON_VSEL_WIDTH 5 /* LDO1_ON_VSEL - [4:0] */ + +/* + * R16490 (0x406A) - LDO1 SLEEP Control + */ +#define WM831X_LDO1_SLP_SLOT_MASK 0xE000 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_SLOT_SHIFT 13 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_SLOT_WIDTH 3 /* LDO1_SLP_SLOT - [15:13] */ +#define WM831X_LDO1_SLP_MODE 0x0100 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_MASK 0x0100 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_SHIFT 8 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_MODE_WIDTH 1 /* LDO1_SLP_MODE */ +#define WM831X_LDO1_SLP_VSEL_MASK 0x001F /* LDO1_SLP_VSEL - [4:0] */ +#define WM831X_LDO1_SLP_VSEL_SHIFT 0 /* LDO1_SLP_VSEL - [4:0] */ +#define WM831X_LDO1_SLP_VSEL_WIDTH 5 /* LDO1_SLP_VSEL - [4:0] */ + +/* + * R16491 (0x406B) - LDO2 Control + */ +#define WM831X_LDO2_ERR_ACT_MASK 0xC000 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_ERR_ACT_SHIFT 14 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_ERR_ACT_WIDTH 2 /* LDO2_ERR_ACT - [15:14] */ +#define WM831X_LDO2_HWC_SRC_MASK 0x1800 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_SRC_SHIFT 11 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_SRC_WIDTH 2 /* LDO2_HWC_SRC - [12:11] */ +#define WM831X_LDO2_HWC_VSEL 0x0400 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_MASK 0x0400 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_SHIFT 10 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_VSEL_WIDTH 1 /* LDO2_HWC_VSEL */ +#define WM831X_LDO2_HWC_MODE_MASK 0x0300 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_HWC_MODE_SHIFT 8 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_HWC_MODE_WIDTH 2 /* LDO2_HWC_MODE - [9:8] */ +#define WM831X_LDO2_FLT 0x0080 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_MASK 0x0080 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_SHIFT 7 /* LDO2_FLT */ +#define WM831X_LDO2_FLT_WIDTH 1 /* LDO2_FLT */ +#define WM831X_LDO2_SWI 0x0040 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_MASK 0x0040 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_SHIFT 6 /* LDO2_SWI */ +#define WM831X_LDO2_SWI_WIDTH 1 /* LDO2_SWI */ +#define WM831X_LDO2_LP_MODE 0x0001 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_MASK 0x0001 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_SHIFT 0 /* LDO2_LP_MODE */ +#define WM831X_LDO2_LP_MODE_WIDTH 1 /* LDO2_LP_MODE */ + +/* + * R16492 (0x406C) - LDO2 ON Control + */ +#define WM831X_LDO2_ON_SLOT_MASK 0xE000 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_SLOT_SHIFT 13 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_SLOT_WIDTH 3 /* LDO2_ON_SLOT - [15:13] */ +#define WM831X_LDO2_ON_MODE 0x0100 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_MASK 0x0100 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_SHIFT 8 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_MODE_WIDTH 1 /* LDO2_ON_MODE */ +#define WM831X_LDO2_ON_VSEL_MASK 0x001F /* LDO2_ON_VSEL - [4:0] */ +#define WM831X_LDO2_ON_VSEL_SHIFT 0 /* LDO2_ON_VSEL - [4:0] */ +#define WM831X_LDO2_ON_VSEL_WIDTH 5 /* LDO2_ON_VSEL - [4:0] */ + +/* + * R16493 (0x406D) - LDO2 SLEEP Control + */ +#define WM831X_LDO2_SLP_SLOT_MASK 0xE000 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_SLOT_SHIFT 13 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_SLOT_WIDTH 3 /* LDO2_SLP_SLOT - [15:13] */ +#define WM831X_LDO2_SLP_MODE 0x0100 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_MASK 0x0100 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_SHIFT 8 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_MODE_WIDTH 1 /* LDO2_SLP_MODE */ +#define WM831X_LDO2_SLP_VSEL_MASK 0x001F /* LDO2_SLP_VSEL - [4:0] */ +#define WM831X_LDO2_SLP_VSEL_SHIFT 0 /* LDO2_SLP_VSEL - [4:0] */ +#define WM831X_LDO2_SLP_VSEL_WIDTH 5 /* LDO2_SLP_VSEL - [4:0] */ + +/* + * R16494 (0x406E) - LDO3 Control + */ +#define WM831X_LDO3_ERR_ACT_MASK 0xC000 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_ERR_ACT_SHIFT 14 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_ERR_ACT_WIDTH 2 /* LDO3_ERR_ACT - [15:14] */ +#define WM831X_LDO3_HWC_SRC_MASK 0x1800 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_SRC_SHIFT 11 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_SRC_WIDTH 2 /* LDO3_HWC_SRC - [12:11] */ +#define WM831X_LDO3_HWC_VSEL 0x0400 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_MASK 0x0400 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_SHIFT 10 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_VSEL_WIDTH 1 /* LDO3_HWC_VSEL */ +#define WM831X_LDO3_HWC_MODE_MASK 0x0300 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_HWC_MODE_SHIFT 8 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_HWC_MODE_WIDTH 2 /* LDO3_HWC_MODE - [9:8] */ +#define WM831X_LDO3_FLT 0x0080 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_MASK 0x0080 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_SHIFT 7 /* LDO3_FLT */ +#define WM831X_LDO3_FLT_WIDTH 1 /* LDO3_FLT */ +#define WM831X_LDO3_SWI 0x0040 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_MASK 0x0040 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_SHIFT 6 /* LDO3_SWI */ +#define WM831X_LDO3_SWI_WIDTH 1 /* LDO3_SWI */ +#define WM831X_LDO3_LP_MODE 0x0001 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_MASK 0x0001 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_SHIFT 0 /* LDO3_LP_MODE */ +#define WM831X_LDO3_LP_MODE_WIDTH 1 /* LDO3_LP_MODE */ + +/* + * R16495 (0x406F) - LDO3 ON Control + */ +#define WM831X_LDO3_ON_SLOT_MASK 0xE000 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_SLOT_SHIFT 13 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_SLOT_WIDTH 3 /* LDO3_ON_SLOT - [15:13] */ +#define WM831X_LDO3_ON_MODE 0x0100 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_MASK 0x0100 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_SHIFT 8 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_MODE_WIDTH 1 /* LDO3_ON_MODE */ +#define WM831X_LDO3_ON_VSEL_MASK 0x001F /* LDO3_ON_VSEL - [4:0] */ +#define WM831X_LDO3_ON_VSEL_SHIFT 0 /* LDO3_ON_VSEL - [4:0] */ +#define WM831X_LDO3_ON_VSEL_WIDTH 5 /* LDO3_ON_VSEL - [4:0] */ + +/* + * R16496 (0x4070) - LDO3 SLEEP Control + */ +#define WM831X_LDO3_SLP_SLOT_MASK 0xE000 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_SLOT_SHIFT 13 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_SLOT_WIDTH 3 /* LDO3_SLP_SLOT - [15:13] */ +#define WM831X_LDO3_SLP_MODE 0x0100 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_MASK 0x0100 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_SHIFT 8 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_MODE_WIDTH 1 /* LDO3_SLP_MODE */ +#define WM831X_LDO3_SLP_VSEL_MASK 0x001F /* LDO3_SLP_VSEL - [4:0] */ +#define WM831X_LDO3_SLP_VSEL_SHIFT 0 /* LDO3_SLP_VSEL - [4:0] */ +#define WM831X_LDO3_SLP_VSEL_WIDTH 5 /* LDO3_SLP_VSEL - [4:0] */ + +/* + * R16497 (0x4071) - LDO4 Control + */ +#define WM831X_LDO4_ERR_ACT_MASK 0xC000 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_ERR_ACT_SHIFT 14 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_ERR_ACT_WIDTH 2 /* LDO4_ERR_ACT - [15:14] */ +#define WM831X_LDO4_HWC_SRC_MASK 0x1800 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_SRC_SHIFT 11 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_SRC_WIDTH 2 /* LDO4_HWC_SRC - [12:11] */ +#define WM831X_LDO4_HWC_VSEL 0x0400 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_MASK 0x0400 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_SHIFT 10 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_VSEL_WIDTH 1 /* LDO4_HWC_VSEL */ +#define WM831X_LDO4_HWC_MODE_MASK 0x0300 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_HWC_MODE_SHIFT 8 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_HWC_MODE_WIDTH 2 /* LDO4_HWC_MODE - [9:8] */ +#define WM831X_LDO4_FLT 0x0080 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_MASK 0x0080 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_SHIFT 7 /* LDO4_FLT */ +#define WM831X_LDO4_FLT_WIDTH 1 /* LDO4_FLT */ +#define WM831X_LDO4_SWI 0x0040 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_MASK 0x0040 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_SHIFT 6 /* LDO4_SWI */ +#define WM831X_LDO4_SWI_WIDTH 1 /* LDO4_SWI */ +#define WM831X_LDO4_LP_MODE 0x0001 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_MASK 0x0001 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_SHIFT 0 /* LDO4_LP_MODE */ +#define WM831X_LDO4_LP_MODE_WIDTH 1 /* LDO4_LP_MODE */ + +/* + * R16498 (0x4072) - LDO4 ON Control + */ +#define WM831X_LDO4_ON_SLOT_MASK 0xE000 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_SLOT_SHIFT 13 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_SLOT_WIDTH 3 /* LDO4_ON_SLOT - [15:13] */ +#define WM831X_LDO4_ON_MODE 0x0100 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_MASK 0x0100 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_SHIFT 8 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_MODE_WIDTH 1 /* LDO4_ON_MODE */ +#define WM831X_LDO4_ON_VSEL_MASK 0x001F /* LDO4_ON_VSEL - [4:0] */ +#define WM831X_LDO4_ON_VSEL_SHIFT 0 /* LDO4_ON_VSEL - [4:0] */ +#define WM831X_LDO4_ON_VSEL_WIDTH 5 /* LDO4_ON_VSEL - [4:0] */ + +/* + * R16499 (0x4073) - LDO4 SLEEP Control + */ +#define WM831X_LDO4_SLP_SLOT_MASK 0xE000 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_SLOT_SHIFT 13 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_SLOT_WIDTH 3 /* LDO4_SLP_SLOT - [15:13] */ +#define WM831X_LDO4_SLP_MODE 0x0100 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_MASK 0x0100 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_SHIFT 8 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_MODE_WIDTH 1 /* LDO4_SLP_MODE */ +#define WM831X_LDO4_SLP_VSEL_MASK 0x001F /* LDO4_SLP_VSEL - [4:0] */ +#define WM831X_LDO4_SLP_VSEL_SHIFT 0 /* LDO4_SLP_VSEL - [4:0] */ +#define WM831X_LDO4_SLP_VSEL_WIDTH 5 /* LDO4_SLP_VSEL - [4:0] */ + +/* + * R16500 (0x4074) - LDO5 Control + */ +#define WM831X_LDO5_ERR_ACT_MASK 0xC000 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_ERR_ACT_SHIFT 14 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_ERR_ACT_WIDTH 2 /* LDO5_ERR_ACT - [15:14] */ +#define WM831X_LDO5_HWC_SRC_MASK 0x1800 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_SRC_SHIFT 11 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_SRC_WIDTH 2 /* LDO5_HWC_SRC - [12:11] */ +#define WM831X_LDO5_HWC_VSEL 0x0400 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_MASK 0x0400 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_SHIFT 10 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_VSEL_WIDTH 1 /* LDO5_HWC_VSEL */ +#define WM831X_LDO5_HWC_MODE_MASK 0x0300 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_HWC_MODE_SHIFT 8 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_HWC_MODE_WIDTH 2 /* LDO5_HWC_MODE - [9:8] */ +#define WM831X_LDO5_FLT 0x0080 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_MASK 0x0080 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_SHIFT 7 /* LDO5_FLT */ +#define WM831X_LDO5_FLT_WIDTH 1 /* LDO5_FLT */ +#define WM831X_LDO5_SWI 0x0040 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_MASK 0x0040 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_SHIFT 6 /* LDO5_SWI */ +#define WM831X_LDO5_SWI_WIDTH 1 /* LDO5_SWI */ +#define WM831X_LDO5_LP_MODE 0x0001 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_MASK 0x0001 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_SHIFT 0 /* LDO5_LP_MODE */ +#define WM831X_LDO5_LP_MODE_WIDTH 1 /* LDO5_LP_MODE */ + +/* + * R16501 (0x4075) - LDO5 ON Control + */ +#define WM831X_LDO5_ON_SLOT_MASK 0xE000 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_SLOT_SHIFT 13 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_SLOT_WIDTH 3 /* LDO5_ON_SLOT - [15:13] */ +#define WM831X_LDO5_ON_MODE 0x0100 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_MASK 0x0100 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_SHIFT 8 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_MODE_WIDTH 1 /* LDO5_ON_MODE */ +#define WM831X_LDO5_ON_VSEL_MASK 0x001F /* LDO5_ON_VSEL - [4:0] */ +#define WM831X_LDO5_ON_VSEL_SHIFT 0 /* LDO5_ON_VSEL - [4:0] */ +#define WM831X_LDO5_ON_VSEL_WIDTH 5 /* LDO5_ON_VSEL - [4:0] */ + +/* + * R16502 (0x4076) - LDO5 SLEEP Control + */ +#define WM831X_LDO5_SLP_SLOT_MASK 0xE000 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_SLOT_SHIFT 13 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_SLOT_WIDTH 3 /* LDO5_SLP_SLOT - [15:13] */ +#define WM831X_LDO5_SLP_MODE 0x0100 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_MASK 0x0100 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_SHIFT 8 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_MODE_WIDTH 1 /* LDO5_SLP_MODE */ +#define WM831X_LDO5_SLP_VSEL_MASK 0x001F /* LDO5_SLP_VSEL - [4:0] */ +#define WM831X_LDO5_SLP_VSEL_SHIFT 0 /* LDO5_SLP_VSEL - [4:0] */ +#define WM831X_LDO5_SLP_VSEL_WIDTH 5 /* LDO5_SLP_VSEL - [4:0] */ + +/* + * R16503 (0x4077) - LDO6 Control + */ +#define WM831X_LDO6_ERR_ACT_MASK 0xC000 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_ERR_ACT_SHIFT 14 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_ERR_ACT_WIDTH 2 /* LDO6_ERR_ACT - [15:14] */ +#define WM831X_LDO6_HWC_SRC_MASK 0x1800 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_SRC_SHIFT 11 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_SRC_WIDTH 2 /* LDO6_HWC_SRC - [12:11] */ +#define WM831X_LDO6_HWC_VSEL 0x0400 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_MASK 0x0400 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_SHIFT 10 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_VSEL_WIDTH 1 /* LDO6_HWC_VSEL */ +#define WM831X_LDO6_HWC_MODE_MASK 0x0300 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_HWC_MODE_SHIFT 8 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_HWC_MODE_WIDTH 2 /* LDO6_HWC_MODE - [9:8] */ +#define WM831X_LDO6_FLT 0x0080 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_MASK 0x0080 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_SHIFT 7 /* LDO6_FLT */ +#define WM831X_LDO6_FLT_WIDTH 1 /* LDO6_FLT */ +#define WM831X_LDO6_SWI 0x0040 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_MASK 0x0040 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_SHIFT 6 /* LDO6_SWI */ +#define WM831X_LDO6_SWI_WIDTH 1 /* LDO6_SWI */ +#define WM831X_LDO6_LP_MODE 0x0001 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_MASK 0x0001 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_SHIFT 0 /* LDO6_LP_MODE */ +#define WM831X_LDO6_LP_MODE_WIDTH 1 /* LDO6_LP_MODE */ + +/* + * R16504 (0x4078) - LDO6 ON Control + */ +#define WM831X_LDO6_ON_SLOT_MASK 0xE000 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_SLOT_SHIFT 13 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_SLOT_WIDTH 3 /* LDO6_ON_SLOT - [15:13] */ +#define WM831X_LDO6_ON_MODE 0x0100 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_MASK 0x0100 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_SHIFT 8 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_MODE_WIDTH 1 /* LDO6_ON_MODE */ +#define WM831X_LDO6_ON_VSEL_MASK 0x001F /* LDO6_ON_VSEL - [4:0] */ +#define WM831X_LDO6_ON_VSEL_SHIFT 0 /* LDO6_ON_VSEL - [4:0] */ +#define WM831X_LDO6_ON_VSEL_WIDTH 5 /* LDO6_ON_VSEL - [4:0] */ + +/* + * R16505 (0x4079) - LDO6 SLEEP Control + */ +#define WM831X_LDO6_SLP_SLOT_MASK 0xE000 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_SLOT_SHIFT 13 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_SLOT_WIDTH 3 /* LDO6_SLP_SLOT - [15:13] */ +#define WM831X_LDO6_SLP_MODE 0x0100 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_MASK 0x0100 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_SHIFT 8 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_MODE_WIDTH 1 /* LDO6_SLP_MODE */ +#define WM831X_LDO6_SLP_VSEL_MASK 0x001F /* LDO6_SLP_VSEL - [4:0] */ +#define WM831X_LDO6_SLP_VSEL_SHIFT 0 /* LDO6_SLP_VSEL - [4:0] */ +#define WM831X_LDO6_SLP_VSEL_WIDTH 5 /* LDO6_SLP_VSEL - [4:0] */ + +/* + * R16506 (0x407A) - LDO7 Control + */ +#define WM831X_LDO7_ERR_ACT_MASK 0xC000 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_ERR_ACT_SHIFT 14 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_ERR_ACT_WIDTH 2 /* LDO7_ERR_ACT - [15:14] */ +#define WM831X_LDO7_HWC_SRC_MASK 0x1800 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_SRC_SHIFT 11 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_SRC_WIDTH 2 /* LDO7_HWC_SRC - [12:11] */ +#define WM831X_LDO7_HWC_VSEL 0x0400 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_MASK 0x0400 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_SHIFT 10 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_VSEL_WIDTH 1 /* LDO7_HWC_VSEL */ +#define WM831X_LDO7_HWC_MODE_MASK 0x0300 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_HWC_MODE_SHIFT 8 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_HWC_MODE_WIDTH 2 /* LDO7_HWC_MODE - [9:8] */ +#define WM831X_LDO7_FLT 0x0080 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_MASK 0x0080 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_SHIFT 7 /* LDO7_FLT */ +#define WM831X_LDO7_FLT_WIDTH 1 /* LDO7_FLT */ +#define WM831X_LDO7_SWI 0x0040 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_MASK 0x0040 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_SHIFT 6 /* LDO7_SWI */ +#define WM831X_LDO7_SWI_WIDTH 1 /* LDO7_SWI */ + +/* + * R16507 (0x407B) - LDO7 ON Control + */ +#define WM831X_LDO7_ON_SLOT_MASK 0xE000 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_SLOT_SHIFT 13 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_SLOT_WIDTH 3 /* LDO7_ON_SLOT - [15:13] */ +#define WM831X_LDO7_ON_MODE 0x0100 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_MASK 0x0100 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_SHIFT 8 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_MODE_WIDTH 1 /* LDO7_ON_MODE */ +#define WM831X_LDO7_ON_VSEL_MASK 0x001F /* LDO7_ON_VSEL - [4:0] */ +#define WM831X_LDO7_ON_VSEL_SHIFT 0 /* LDO7_ON_VSEL - [4:0] */ +#define WM831X_LDO7_ON_VSEL_WIDTH 5 /* LDO7_ON_VSEL - [4:0] */ + +/* + * R16508 (0x407C) - LDO7 SLEEP Control + */ +#define WM831X_LDO7_SLP_SLOT_MASK 0xE000 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_SLOT_SHIFT 13 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_SLOT_WIDTH 3 /* LDO7_SLP_SLOT - [15:13] */ +#define WM831X_LDO7_SLP_MODE 0x0100 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_MASK 0x0100 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_SHIFT 8 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_MODE_WIDTH 1 /* LDO7_SLP_MODE */ +#define WM831X_LDO7_SLP_VSEL_MASK 0x001F /* LDO7_SLP_VSEL - [4:0] */ +#define WM831X_LDO7_SLP_VSEL_SHIFT 0 /* LDO7_SLP_VSEL - [4:0] */ +#define WM831X_LDO7_SLP_VSEL_WIDTH 5 /* LDO7_SLP_VSEL - [4:0] */ + +/* + * R16509 (0x407D) - LDO8 Control + */ +#define WM831X_LDO8_ERR_ACT_MASK 0xC000 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_ERR_ACT_SHIFT 14 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_ERR_ACT_WIDTH 2 /* LDO8_ERR_ACT - [15:14] */ +#define WM831X_LDO8_HWC_SRC_MASK 0x1800 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_SRC_SHIFT 11 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_SRC_WIDTH 2 /* LDO8_HWC_SRC - [12:11] */ +#define WM831X_LDO8_HWC_VSEL 0x0400 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_MASK 0x0400 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_SHIFT 10 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_VSEL_WIDTH 1 /* LDO8_HWC_VSEL */ +#define WM831X_LDO8_HWC_MODE_MASK 0x0300 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_HWC_MODE_SHIFT 8 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_HWC_MODE_WIDTH 2 /* LDO8_HWC_MODE - [9:8] */ +#define WM831X_LDO8_FLT 0x0080 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_MASK 0x0080 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_SHIFT 7 /* LDO8_FLT */ +#define WM831X_LDO8_FLT_WIDTH 1 /* LDO8_FLT */ +#define WM831X_LDO8_SWI 0x0040 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_MASK 0x0040 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_SHIFT 6 /* LDO8_SWI */ +#define WM831X_LDO8_SWI_WIDTH 1 /* LDO8_SWI */ + +/* + * R16510 (0x407E) - LDO8 ON Control + */ +#define WM831X_LDO8_ON_SLOT_MASK 0xE000 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_SLOT_SHIFT 13 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_SLOT_WIDTH 3 /* LDO8_ON_SLOT - [15:13] */ +#define WM831X_LDO8_ON_MODE 0x0100 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_MASK 0x0100 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_SHIFT 8 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_MODE_WIDTH 1 /* LDO8_ON_MODE */ +#define WM831X_LDO8_ON_VSEL_MASK 0x001F /* LDO8_ON_VSEL - [4:0] */ +#define WM831X_LDO8_ON_VSEL_SHIFT 0 /* LDO8_ON_VSEL - [4:0] */ +#define WM831X_LDO8_ON_VSEL_WIDTH 5 /* LDO8_ON_VSEL - [4:0] */ + +/* + * R16511 (0x407F) - LDO8 SLEEP Control + */ +#define WM831X_LDO8_SLP_SLOT_MASK 0xE000 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_SLOT_SHIFT 13 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_SLOT_WIDTH 3 /* LDO8_SLP_SLOT - [15:13] */ +#define WM831X_LDO8_SLP_MODE 0x0100 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_MASK 0x0100 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_SHIFT 8 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_MODE_WIDTH 1 /* LDO8_SLP_MODE */ +#define WM831X_LDO8_SLP_VSEL_MASK 0x001F /* LDO8_SLP_VSEL - [4:0] */ +#define WM831X_LDO8_SLP_VSEL_SHIFT 0 /* LDO8_SLP_VSEL - [4:0] */ +#define WM831X_LDO8_SLP_VSEL_WIDTH 5 /* LDO8_SLP_VSEL - [4:0] */ + +/* + * R16512 (0x4080) - LDO9 Control + */ +#define WM831X_LDO9_ERR_ACT_MASK 0xC000 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_ERR_ACT_SHIFT 14 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_ERR_ACT_WIDTH 2 /* LDO9_ERR_ACT - [15:14] */ +#define WM831X_LDO9_HWC_SRC_MASK 0x1800 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_SRC_SHIFT 11 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_SRC_WIDTH 2 /* LDO9_HWC_SRC - [12:11] */ +#define WM831X_LDO9_HWC_VSEL 0x0400 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_MASK 0x0400 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_SHIFT 10 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_VSEL_WIDTH 1 /* LDO9_HWC_VSEL */ +#define WM831X_LDO9_HWC_MODE_MASK 0x0300 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_HWC_MODE_SHIFT 8 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_HWC_MODE_WIDTH 2 /* LDO9_HWC_MODE - [9:8] */ +#define WM831X_LDO9_FLT 0x0080 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_MASK 0x0080 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_SHIFT 7 /* LDO9_FLT */ +#define WM831X_LDO9_FLT_WIDTH 1 /* LDO9_FLT */ +#define WM831X_LDO9_SWI 0x0040 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_MASK 0x0040 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_SHIFT 6 /* LDO9_SWI */ +#define WM831X_LDO9_SWI_WIDTH 1 /* LDO9_SWI */ + +/* + * R16513 (0x4081) - LDO9 ON Control + */ +#define WM831X_LDO9_ON_SLOT_MASK 0xE000 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_SLOT_SHIFT 13 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_SLOT_WIDTH 3 /* LDO9_ON_SLOT - [15:13] */ +#define WM831X_LDO9_ON_MODE 0x0100 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_MASK 0x0100 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_SHIFT 8 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_MODE_WIDTH 1 /* LDO9_ON_MODE */ +#define WM831X_LDO9_ON_VSEL_MASK 0x001F /* LDO9_ON_VSEL - [4:0] */ +#define WM831X_LDO9_ON_VSEL_SHIFT 0 /* LDO9_ON_VSEL - [4:0] */ +#define WM831X_LDO9_ON_VSEL_WIDTH 5 /* LDO9_ON_VSEL - [4:0] */ + +/* + * R16514 (0x4082) - LDO9 SLEEP Control + */ +#define WM831X_LDO9_SLP_SLOT_MASK 0xE000 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_SLOT_SHIFT 13 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_SLOT_WIDTH 3 /* LDO9_SLP_SLOT - [15:13] */ +#define WM831X_LDO9_SLP_MODE 0x0100 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_MASK 0x0100 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_SHIFT 8 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_MODE_WIDTH 1 /* LDO9_SLP_MODE */ +#define WM831X_LDO9_SLP_VSEL_MASK 0x001F /* LDO9_SLP_VSEL - [4:0] */ +#define WM831X_LDO9_SLP_VSEL_SHIFT 0 /* LDO9_SLP_VSEL - [4:0] */ +#define WM831X_LDO9_SLP_VSEL_WIDTH 5 /* LDO9_SLP_VSEL - [4:0] */ + +/* + * R16515 (0x4083) - LDO10 Control + */ +#define WM831X_LDO10_ERR_ACT_MASK 0xC000 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_ERR_ACT_SHIFT 14 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_ERR_ACT_WIDTH 2 /* LDO10_ERR_ACT - [15:14] */ +#define WM831X_LDO10_HWC_SRC_MASK 0x1800 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_SRC_SHIFT 11 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_SRC_WIDTH 2 /* LDO10_HWC_SRC - [12:11] */ +#define WM831X_LDO10_HWC_VSEL 0x0400 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_MASK 0x0400 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_SHIFT 10 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_VSEL_WIDTH 1 /* LDO10_HWC_VSEL */ +#define WM831X_LDO10_HWC_MODE_MASK 0x0300 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_HWC_MODE_SHIFT 8 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_HWC_MODE_WIDTH 2 /* LDO10_HWC_MODE - [9:8] */ +#define WM831X_LDO10_FLT 0x0080 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_MASK 0x0080 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_SHIFT 7 /* LDO10_FLT */ +#define WM831X_LDO10_FLT_WIDTH 1 /* LDO10_FLT */ +#define WM831X_LDO10_SWI 0x0040 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_MASK 0x0040 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_SHIFT 6 /* LDO10_SWI */ +#define WM831X_LDO10_SWI_WIDTH 1 /* LDO10_SWI */ + +/* + * R16516 (0x4084) - LDO10 ON Control + */ +#define WM831X_LDO10_ON_SLOT_MASK 0xE000 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_SLOT_SHIFT 13 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_SLOT_WIDTH 3 /* LDO10_ON_SLOT - [15:13] */ +#define WM831X_LDO10_ON_MODE 0x0100 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_MASK 0x0100 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_SHIFT 8 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_MODE_WIDTH 1 /* LDO10_ON_MODE */ +#define WM831X_LDO10_ON_VSEL_MASK 0x001F /* LDO10_ON_VSEL - [4:0] */ +#define WM831X_LDO10_ON_VSEL_SHIFT 0 /* LDO10_ON_VSEL - [4:0] */ +#define WM831X_LDO10_ON_VSEL_WIDTH 5 /* LDO10_ON_VSEL - [4:0] */ + +/* + * R16517 (0x4085) - LDO10 SLEEP Control + */ +#define WM831X_LDO10_SLP_SLOT_MASK 0xE000 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_SLOT_SHIFT 13 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_SLOT_WIDTH 3 /* LDO10_SLP_SLOT - [15:13] */ +#define WM831X_LDO10_SLP_MODE 0x0100 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_MASK 0x0100 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_SHIFT 8 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_MODE_WIDTH 1 /* LDO10_SLP_MODE */ +#define WM831X_LDO10_SLP_VSEL_MASK 0x001F /* LDO10_SLP_VSEL - [4:0] */ +#define WM831X_LDO10_SLP_VSEL_SHIFT 0 /* LDO10_SLP_VSEL - [4:0] */ +#define WM831X_LDO10_SLP_VSEL_WIDTH 5 /* LDO10_SLP_VSEL - [4:0] */ + +/* + * R16519 (0x4087) - LDO11 ON Control + */ +#define WM831X_LDO11_ON_SLOT_MASK 0xE000 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_ON_SLOT_SHIFT 13 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_ON_SLOT_WIDTH 3 /* LDO11_ON_SLOT - [15:13] */ +#define WM831X_LDO11_OFFENA 0x1000 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_MASK 0x1000 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_SHIFT 12 /* LDO11_OFFENA */ +#define WM831X_LDO11_OFFENA_WIDTH 1 /* LDO11_OFFENA */ +#define WM831X_LDO11_VSEL_SRC 0x0080 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_MASK 0x0080 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_SHIFT 7 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_VSEL_SRC_WIDTH 1 /* LDO11_VSEL_SRC */ +#define WM831X_LDO11_ON_VSEL_MASK 0x000F /* LDO11_ON_VSEL - [3:0] */ +#define WM831X_LDO11_ON_VSEL_SHIFT 0 /* LDO11_ON_VSEL - [3:0] */ +#define WM831X_LDO11_ON_VSEL_WIDTH 4 /* LDO11_ON_VSEL - [3:0] */ + +/* + * R16520 (0x4088) - LDO11 SLEEP Control + */ +#define WM831X_LDO11_SLP_SLOT_MASK 0xE000 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_SLOT_SHIFT 13 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_SLOT_WIDTH 3 /* LDO11_SLP_SLOT - [15:13] */ +#define WM831X_LDO11_SLP_VSEL_MASK 0x000F /* LDO11_SLP_VSEL - [3:0] */ +#define WM831X_LDO11_SLP_VSEL_SHIFT 0 /* LDO11_SLP_VSEL - [3:0] */ +#define WM831X_LDO11_SLP_VSEL_WIDTH 4 /* LDO11_SLP_VSEL - [3:0] */ + +/* + * R16526 (0x408E) - Power Good Source 1 + */ +#define WM831X_DC4_OK 0x0008 /* DC4_OK */ +#define WM831X_DC4_OK_MASK 0x0008 /* DC4_OK */ +#define WM831X_DC4_OK_SHIFT 3 /* DC4_OK */ +#define WM831X_DC4_OK_WIDTH 1 /* DC4_OK */ +#define WM831X_DC3_OK 0x0004 /* DC3_OK */ +#define WM831X_DC3_OK_MASK 0x0004 /* DC3_OK */ +#define WM831X_DC3_OK_SHIFT 2 /* DC3_OK */ +#define WM831X_DC3_OK_WIDTH 1 /* DC3_OK */ +#define WM831X_DC2_OK 0x0002 /* DC2_OK */ +#define WM831X_DC2_OK_MASK 0x0002 /* DC2_OK */ +#define WM831X_DC2_OK_SHIFT 1 /* DC2_OK */ +#define WM831X_DC2_OK_WIDTH 1 /* DC2_OK */ +#define WM831X_DC1_OK 0x0001 /* DC1_OK */ +#define WM831X_DC1_OK_MASK 0x0001 /* DC1_OK */ +#define WM831X_DC1_OK_SHIFT 0 /* DC1_OK */ +#define WM831X_DC1_OK_WIDTH 1 /* DC1_OK */ + +/* + * R16527 (0x408F) - Power Good Source 2 + */ +#define WM831X_LDO10_OK 0x0200 /* LDO10_OK */ +#define WM831X_LDO10_OK_MASK 0x0200 /* LDO10_OK */ +#define WM831X_LDO10_OK_SHIFT 9 /* LDO10_OK */ +#define WM831X_LDO10_OK_WIDTH 1 /* LDO10_OK */ +#define WM831X_LDO9_OK 0x0100 /* LDO9_OK */ +#define WM831X_LDO9_OK_MASK 0x0100 /* LDO9_OK */ +#define WM831X_LDO9_OK_SHIFT 8 /* LDO9_OK */ +#define WM831X_LDO9_OK_WIDTH 1 /* LDO9_OK */ +#define WM831X_LDO8_OK 0x0080 /* LDO8_OK */ +#define WM831X_LDO8_OK_MASK 0x0080 /* LDO8_OK */ +#define WM831X_LDO8_OK_SHIFT 7 /* LDO8_OK */ +#define WM831X_LDO8_OK_WIDTH 1 /* LDO8_OK */ +#define WM831X_LDO7_OK 0x0040 /* LDO7_OK */ +#define WM831X_LDO7_OK_MASK 0x0040 /* LDO7_OK */ +#define WM831X_LDO7_OK_SHIFT 6 /* LDO7_OK */ +#define WM831X_LDO7_OK_WIDTH 1 /* LDO7_OK */ +#define WM831X_LDO6_OK 0x0020 /* LDO6_OK */ +#define WM831X_LDO6_OK_MASK 0x0020 /* LDO6_OK */ +#define WM831X_LDO6_OK_SHIFT 5 /* LDO6_OK */ +#define WM831X_LDO6_OK_WIDTH 1 /* LDO6_OK */ +#define WM831X_LDO5_OK 0x0010 /* LDO5_OK */ +#define WM831X_LDO5_OK_MASK 0x0010 /* LDO5_OK */ +#define WM831X_LDO5_OK_SHIFT 4 /* LDO5_OK */ +#define WM831X_LDO5_OK_WIDTH 1 /* LDO5_OK */ +#define WM831X_LDO4_OK 0x0008 /* LDO4_OK */ +#define WM831X_LDO4_OK_MASK 0x0008 /* LDO4_OK */ +#define WM831X_LDO4_OK_SHIFT 3 /* LDO4_OK */ +#define WM831X_LDO4_OK_WIDTH 1 /* LDO4_OK */ +#define WM831X_LDO3_OK 0x0004 /* LDO3_OK */ +#define WM831X_LDO3_OK_MASK 0x0004 /* LDO3_OK */ +#define WM831X_LDO3_OK_SHIFT 2 /* LDO3_OK */ +#define WM831X_LDO3_OK_WIDTH 1 /* LDO3_OK */ +#define WM831X_LDO2_OK 0x0002 /* LDO2_OK */ +#define WM831X_LDO2_OK_MASK 0x0002 /* LDO2_OK */ +#define WM831X_LDO2_OK_SHIFT 1 /* LDO2_OK */ +#define WM831X_LDO2_OK_WIDTH 1 /* LDO2_OK */ +#define WM831X_LDO1_OK 0x0001 /* LDO1_OK */ +#define WM831X_LDO1_OK_MASK 0x0001 /* LDO1_OK */ +#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ +#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ + +#define WM831X_ISINK_MAX_ISEL 55 +extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; + +#endif diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h new file mode 100644 index 00000000..6bc090d0 --- /dev/null +++ b/include/linux/mfd/wm831x/status.h @@ -0,0 +1,34 @@ +/* + * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_STATUS_H__ +#define __MFD_WM831X_STATUS_H__ + +#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */ +#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */ +#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */ + +#endif diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h new file mode 100644 index 00000000..97a99b52 --- /dev/null +++ b/include/linux/mfd/wm831x/watchdog.h @@ -0,0 +1,52 @@ +/* + * include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_WATCHDOG_H__ +#define __MFD_WM831X_WATCHDOG_H__ + + +/* + * R16388 (0x4004) - Watchdog + */ +#define WM831X_WDOG_ENA 0x8000 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_MASK 0x8000 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_SHIFT 15 /* WDOG_ENA */ +#define WM831X_WDOG_ENA_WIDTH 1 /* WDOG_ENA */ +#define WM831X_WDOG_DEBUG 0x4000 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_MASK 0x4000 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_SHIFT 14 /* WDOG_DEBUG */ +#define WM831X_WDOG_DEBUG_WIDTH 1 /* WDOG_DEBUG */ +#define WM831X_WDOG_RST_SRC 0x2000 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_MASK 0x2000 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_SHIFT 13 /* WDOG_RST_SRC */ +#define WM831X_WDOG_RST_SRC_WIDTH 1 /* WDOG_RST_SRC */ +#define WM831X_WDOG_SLPENA 0x1000 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_MASK 0x1000 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_SHIFT 12 /* WDOG_SLPENA */ +#define WM831X_WDOG_SLPENA_WIDTH 1 /* WDOG_SLPENA */ +#define WM831X_WDOG_RESET 0x0800 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_MASK 0x0800 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_SHIFT 11 /* WDOG_RESET */ +#define WM831X_WDOG_RESET_WIDTH 1 /* WDOG_RESET */ +#define WM831X_WDOG_SECACT_MASK 0x0300 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_SECACT_SHIFT 8 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_SECACT_WIDTH 2 /* WDOG_SECACT - [9:8] */ +#define WM831X_WDOG_PRIMACT_MASK 0x0030 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_PRIMACT_SHIFT 4 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_PRIMACT_WIDTH 2 /* WDOG_PRIMACT - [5:4] */ +#define WM831X_WDOG_TO_MASK 0x0007 /* WDOG_TO - [2:0] */ +#define WM831X_WDOG_TO_SHIFT 0 /* WDOG_TO - [2:0] */ +#define WM831X_WDOG_TO_WIDTH 3 /* WDOG_TO - [2:0] */ + +#endif diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h new file mode 100644 index 00000000..bd581c6f --- /dev/null +++ b/include/linux/mfd/wm8350/audio.h @@ -0,0 +1,628 @@ +/* + * audio.h -- Audio Driver for Wolfson WM8350 PMIC + * + * Copyright 2007, 2008 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_AUDIO_H_ +#define __LINUX_MFD_WM8350_AUDIO_H_ + +#include <linux/platform_device.h> + +#define WM8350_CLOCK_CONTROL_1 0x28 +#define WM8350_CLOCK_CONTROL_2 0x29 +#define WM8350_FLL_CONTROL_1 0x2A +#define WM8350_FLL_CONTROL_2 0x2B +#define WM8350_FLL_CONTROL_3 0x2C +#define WM8350_FLL_CONTROL_4 0x2D +#define WM8350_DAC_CONTROL 0x30 +#define WM8350_DAC_DIGITAL_VOLUME_L 0x32 +#define WM8350_DAC_DIGITAL_VOLUME_R 0x33 +#define WM8350_DAC_LR_RATE 0x35 +#define WM8350_DAC_CLOCK_CONTROL 0x36 +#define WM8350_DAC_MUTE 0x3A +#define WM8350_DAC_MUTE_VOLUME 0x3B +#define WM8350_DAC_SIDE 0x3C +#define WM8350_ADC_CONTROL 0x40 +#define WM8350_ADC_DIGITAL_VOLUME_L 0x42 +#define WM8350_ADC_DIGITAL_VOLUME_R 0x43 +#define WM8350_ADC_DIVIDER 0x44 +#define WM8350_ADC_LR_RATE 0x46 +#define WM8350_INPUT_CONTROL 0x48 +#define WM8350_IN3_INPUT_CONTROL 0x49 +#define WM8350_MIC_BIAS_CONTROL 0x4A +#define WM8350_OUTPUT_CONTROL 0x4C +#define WM8350_JACK_DETECT 0x4D +#define WM8350_ANTI_POP_CONTROL 0x4E +#define WM8350_LEFT_INPUT_VOLUME 0x50 +#define WM8350_RIGHT_INPUT_VOLUME 0x51 +#define WM8350_LEFT_MIXER_CONTROL 0x58 +#define WM8350_RIGHT_MIXER_CONTROL 0x59 +#define WM8350_OUT3_MIXER_CONTROL 0x5C +#define WM8350_OUT4_MIXER_CONTROL 0x5D +#define WM8350_OUTPUT_LEFT_MIXER_VOLUME 0x60 +#define WM8350_OUTPUT_RIGHT_MIXER_VOLUME 0x61 +#define WM8350_INPUT_MIXER_VOLUME_L 0x62 +#define WM8350_INPUT_MIXER_VOLUME_R 0x63 +#define WM8350_INPUT_MIXER_VOLUME 0x64 +#define WM8350_LOUT1_VOLUME 0x68 +#define WM8350_ROUT1_VOLUME 0x69 +#define WM8350_LOUT2_VOLUME 0x6A +#define WM8350_ROUT2_VOLUME 0x6B +#define WM8350_BEEP_VOLUME 0x6F +#define WM8350_AI_FORMATING 0x70 +#define WM8350_ADC_DAC_COMP 0x71 +#define WM8350_AI_ADC_CONTROL 0x72 +#define WM8350_AI_DAC_CONTROL 0x73 +#define WM8350_AIF_TEST 0x74 +#define WM8350_JACK_PIN_STATUS 0xE7 + +/* Bit values for R08 (0x08) */ +#define WM8350_CODEC_ISEL_1_5 0 /* x1.5 */ +#define WM8350_CODEC_ISEL_1_0 1 /* x1.0 */ +#define WM8350_CODEC_ISEL_0_75 2 /* x0.75 */ +#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */ + +#define WM8350_VMID_OFF 0 +#define WM8350_VMID_300K 1 +#define WM8350_VMID_50K 2 +#define WM8350_VMID_5K 3 + +/* + * R40 (0x28) - Clock Control 1 + */ +#define WM8350_TOCLK_RATE 0x4000 +#define WM8350_MCLK_SEL 0x0800 +#define WM8350_MCLK_DIV_MASK 0x0100 +#define WM8350_BCLK_DIV_MASK 0x00F0 +#define WM8350_OPCLK_DIV_MASK 0x0007 + +/* + * R41 (0x29) - Clock Control 2 + */ +#define WM8350_LRC_ADC_SEL 0x8000 +#define WM8350_MCLK_DIR 0x0001 + +/* + * R42 (0x2A) - FLL Control 1 + */ +#define WM8350_FLL_DITHER_WIDTH_MASK 0x3000 +#define WM8350_FLL_DITHER_HP 0x0800 +#define WM8350_FLL_OUTDIV_MASK 0x0700 +#define WM8350_FLL_RSP_RATE_MASK 0x00F0 +#define WM8350_FLL_RATE_MASK 0x0007 + +/* + * R43 (0x2B) - FLL Control 2 + */ +#define WM8350_FLL_RATIO_MASK 0xF800 +#define WM8350_FLL_N_MASK 0x03FF + +/* + * R44 (0x2C) - FLL Control 3 + */ +#define WM8350_FLL_K_MASK 0xFFFF + +/* + * R45 (0x2D) - FLL Control 4 + */ +#define WM8350_FLL_FRAC 0x0020 +#define WM8350_FLL_SLOW_LOCK_REF 0x0010 +#define WM8350_FLL_CLK_SRC_MASK 0x0003 + +/* + * R48 (0x30) - DAC Control + */ +#define WM8350_DAC_MONO 0x2000 +#define WM8350_AIF_LRCLKRATE 0x1000 +#define WM8350_DEEMP_MASK 0x0030 +#define WM8350_DACL_DATINV 0x0002 +#define WM8350_DACR_DATINV 0x0001 + +/* + * R50 (0x32) - DAC Digital Volume L + */ +#define WM8350_DAC_VU 0x0100 +#define WM8350_DACL_VOL_MASK 0x00FF + +/* + * R51 (0x33) - DAC Digital Volume R + */ +#define WM8350_DAC_VU 0x0100 +#define WM8350_DACR_VOL_MASK 0x00FF + +/* + * R53 (0x35) - DAC LR Rate + */ +#define WM8350_DACLRC_ENA 0x0800 +#define WM8350_DACLRC_RATE_MASK 0x07FF + +/* + * R54 (0x36) - DAC Clock Control + */ +#define WM8350_DACCLK_POL 0x0010 +#define WM8350_DAC_CLKDIV_MASK 0x0007 + +/* + * R58 (0x3A) - DAC Mute + */ +#define WM8350_DAC_MUTE_ENA 0x4000 + +/* + * R59 (0x3B) - DAC Mute Volume + */ +#define WM8350_DAC_MUTEMODE 0x4000 +#define WM8350_DAC_MUTERATE 0x2000 +#define WM8350_DAC_SB_FILT 0x1000 + +/* + * R60 (0x3C) - DAC Side + */ +#define WM8350_ADC_TO_DACL_MASK 0x3000 +#define WM8350_ADC_TO_DACR_MASK 0x0C00 + +/* + * R64 (0x40) - ADC Control + */ +#define WM8350_ADC_HPF_CUT_MASK 0x0300 +#define WM8350_ADCL_DATINV 0x0002 +#define WM8350_ADCR_DATINV 0x0001 + +/* + * R66 (0x42) - ADC Digital Volume L + */ +#define WM8350_ADC_VU 0x0100 +#define WM8350_ADCL_VOL_MASK 0x00FF + +/* + * R67 (0x43) - ADC Digital Volume R + */ +#define WM8350_ADC_VU 0x0100 +#define WM8350_ADCR_VOL_MASK 0x00FF + +/* + * R68 (0x44) - ADC Divider + */ +#define WM8350_ADCL_DAC_SVOL_MASK 0x0F00 +#define WM8350_ADCR_DAC_SVOL_MASK 0x00F0 +#define WM8350_ADCCLK_POL 0x0008 +#define WM8350_ADC_CLKDIV_MASK 0x0007 + +/* + * R70 (0x46) - ADC LR Rate + */ +#define WM8350_ADCLRC_ENA 0x0800 +#define WM8350_ADCLRC_RATE_MASK 0x07FF + +/* + * R72 (0x48) - Input Control + */ +#define WM8350_IN2R_ENA 0x0400 +#define WM8350_IN1RN_ENA 0x0200 +#define WM8350_IN1RP_ENA 0x0100 +#define WM8350_IN2L_ENA 0x0004 +#define WM8350_IN1LN_ENA 0x0002 +#define WM8350_IN1LP_ENA 0x0001 + +/* + * R73 (0x49) - IN3 Input Control + */ +#define WM8350_IN3R_SHORT 0x4000 +#define WM8350_IN3L_SHORT 0x0040 + +/* + * R74 (0x4A) - Mic Bias Control + */ +#define WM8350_MICBSEL 0x4000 +#define WM8350_MCDTHR_MASK 0x001C +#define WM8350_MCDSCTHR_MASK 0x0003 + +/* + * R76 (0x4C) - Output Control + */ +#define WM8350_OUT4_VROI 0x0800 +#define WM8350_OUT3_VROI 0x0400 +#define WM8350_OUT2_VROI 0x0200 +#define WM8350_OUT1_VROI 0x0100 +#define WM8350_OUT2_FB 0x0004 +#define WM8350_OUT1_FB 0x0001 + +/* + * R77 (0x4D) - Jack Detect + */ +#define WM8350_JDL_ENA 0x8000 +#define WM8350_JDR_ENA 0x4000 + +/* + * R78 (0x4E) - Anti Pop Control + */ +#define WM8350_ANTI_POP_MASK 0x0300 +#define WM8350_DIS_OP_LN4_MASK 0x00C0 +#define WM8350_DIS_OP_LN3_MASK 0x0030 +#define WM8350_DIS_OP_OUT2_MASK 0x000C +#define WM8350_DIS_OP_OUT1_MASK 0x0003 + +/* + * R80 (0x50) - Left Input Volume + */ +#define WM8350_INL_MUTE 0x4000 +#define WM8350_INL_ZC 0x2000 +#define WM8350_IN_VU 0x0100 +#define WM8350_INL_VOL_MASK 0x00FC + +/* + * R81 (0x51) - Right Input Volume + */ +#define WM8350_INR_MUTE 0x4000 +#define WM8350_INR_ZC 0x2000 +#define WM8350_IN_VU 0x0100 +#define WM8350_INR_VOL_MASK 0x00FC + +/* + * R88 (0x58) - Left Mixer Control + */ +#define WM8350_DACR_TO_MIXOUTL 0x1000 +#define WM8350_DACL_TO_MIXOUTL 0x0800 +#define WM8350_IN3L_TO_MIXOUTL 0x0004 +#define WM8350_INR_TO_MIXOUTL 0x0002 +#define WM8350_INL_TO_MIXOUTL 0x0001 + +/* + * R89 (0x59) - Right Mixer Control + */ +#define WM8350_DACR_TO_MIXOUTR 0x1000 +#define WM8350_DACL_TO_MIXOUTR 0x0800 +#define WM8350_IN3R_TO_MIXOUTR 0x0008 +#define WM8350_INR_TO_MIXOUTR 0x0002 +#define WM8350_INL_TO_MIXOUTR 0x0001 + +/* + * R92 (0x5C) - OUT3 Mixer Control + */ +#define WM8350_DACL_TO_OUT3 0x0800 +#define WM8350_MIXINL_TO_OUT3 0x0100 +#define WM8350_OUT4_TO_OUT3 0x0008 +#define WM8350_MIXOUTL_TO_OUT3 0x0001 + +/* + * R93 (0x5D) - OUT4 Mixer Control + */ +#define WM8350_DACR_TO_OUT4 0x1000 +#define WM8350_DACL_TO_OUT4 0x0800 +#define WM8350_OUT4_ATTN 0x0400 +#define WM8350_MIXINR_TO_OUT4 0x0200 +#define WM8350_OUT3_TO_OUT4 0x0004 +#define WM8350_MIXOUTR_TO_OUT4 0x0002 +#define WM8350_MIXOUTL_TO_OUT4 0x0001 + +/* + * R96 (0x60) - Output Left Mixer Volume + */ +#define WM8350_IN3L_MIXOUTL_VOL_MASK 0x0E00 +#define WM8350_IN3L_MIXOUTL_VOL_SHIFT 9 +#define WM8350_INR_MIXOUTL_VOL_MASK 0x00E0 +#define WM8350_INR_MIXOUTL_VOL_SHIFT 5 +#define WM8350_INL_MIXOUTL_VOL_MASK 0x000E +#define WM8350_INL_MIXOUTL_VOL_SHIFT 1 + +/* Bit values for R96 (0x60) */ +#define WM8350_IN3L_MIXOUTL_VOL_OFF 0 +#define WM8350_IN3L_MIXOUTL_VOL_M12DB 1 +#define WM8350_IN3L_MIXOUTL_VOL_M9DB 2 +#define WM8350_IN3L_MIXOUTL_VOL_M6DB 3 +#define WM8350_IN3L_MIXOUTL_VOL_M3DB 4 +#define WM8350_IN3L_MIXOUTL_VOL_0DB 5 +#define WM8350_IN3L_MIXOUTL_VOL_3DB 6 +#define WM8350_IN3L_MIXOUTL_VOL_6DB 7 + +#define WM8350_INR_MIXOUTL_VOL_OFF 0 +#define WM8350_INR_MIXOUTL_VOL_M12DB 1 +#define WM8350_INR_MIXOUTL_VOL_M9DB 2 +#define WM8350_INR_MIXOUTL_VOL_M6DB 3 +#define WM8350_INR_MIXOUTL_VOL_M3DB 4 +#define WM8350_INR_MIXOUTL_VOL_0DB 5 +#define WM8350_INR_MIXOUTL_VOL_3DB 6 +#define WM8350_INR_MIXOUTL_VOL_6DB 7 + +#define WM8350_INL_MIXOUTL_VOL_OFF 0 +#define WM8350_INL_MIXOUTL_VOL_M12DB 1 +#define WM8350_INL_MIXOUTL_VOL_M9DB 2 +#define WM8350_INL_MIXOUTL_VOL_M6DB 3 +#define WM8350_INL_MIXOUTL_VOL_M3DB 4 +#define WM8350_INL_MIXOUTL_VOL_0DB 5 +#define WM8350_INL_MIXOUTL_VOL_3DB 6 +#define WM8350_INL_MIXOUTL_VOL_6DB 7 + +/* + * R97 (0x61) - Output Right Mixer Volume + */ +#define WM8350_IN3R_MIXOUTR_VOL_MASK 0xE000 +#define WM8350_IN3R_MIXOUTR_VOL_SHIFT 13 +#define WM8350_INR_MIXOUTR_VOL_MASK 0x00E0 +#define WM8350_INR_MIXOUTR_VOL_SHIFT 5 +#define WM8350_INL_MIXOUTR_VOL_MASK 0x000E +#define WM8350_INL_MIXOUTR_VOL_SHIFT 1 + +/* Bit values for R96 (0x60) */ +#define WM8350_IN3R_MIXOUTR_VOL_OFF 0 +#define WM8350_IN3R_MIXOUTR_VOL_M12DB 1 +#define WM8350_IN3R_MIXOUTR_VOL_M9DB 2 +#define WM8350_IN3R_MIXOUTR_VOL_M6DB 3 +#define WM8350_IN3R_MIXOUTR_VOL_M3DB 4 +#define WM8350_IN3R_MIXOUTR_VOL_0DB 5 +#define WM8350_IN3R_MIXOUTR_VOL_3DB 6 +#define WM8350_IN3R_MIXOUTR_VOL_6DB 7 + +#define WM8350_INR_MIXOUTR_VOL_OFF 0 +#define WM8350_INR_MIXOUTR_VOL_M12DB 1 +#define WM8350_INR_MIXOUTR_VOL_M9DB 2 +#define WM8350_INR_MIXOUTR_VOL_M6DB 3 +#define WM8350_INR_MIXOUTR_VOL_M3DB 4 +#define WM8350_INR_MIXOUTR_VOL_0DB 5 +#define WM8350_INR_MIXOUTR_VOL_3DB 6 +#define WM8350_INR_MIXOUTR_VOL_6DB 7 + +#define WM8350_INL_MIXOUTR_VOL_OFF 0 +#define WM8350_INL_MIXOUTR_VOL_M12DB 1 +#define WM8350_INL_MIXOUTR_VOL_M9DB 2 +#define WM8350_INL_MIXOUTR_VOL_M6DB 3 +#define WM8350_INL_MIXOUTR_VOL_M3DB 4 +#define WM8350_INL_MIXOUTR_VOL_0DB 5 +#define WM8350_INL_MIXOUTR_VOL_3DB 6 +#define WM8350_INL_MIXOUTR_VOL_6DB 7 + +/* + * R98 (0x62) - Input Mixer Volume L + */ +#define WM8350_IN3L_MIXINL_VOL_MASK 0x0E00 +#define WM8350_IN2L_MIXINL_VOL_MASK 0x000E +#define WM8350_INL_MIXINL_VOL 0x0001 + +/* + * R99 (0x63) - Input Mixer Volume R + */ +#define WM8350_IN3R_MIXINR_VOL_MASK 0xE000 +#define WM8350_IN2R_MIXINR_VOL_MASK 0x00E0 +#define WM8350_INR_MIXINR_VOL 0x0001 + +/* + * R100 (0x64) - Input Mixer Volume + */ +#define WM8350_OUT4_MIXIN_DST 0x8000 +#define WM8350_OUT4_MIXIN_VOL_MASK 0x000E + +/* + * R104 (0x68) - LOUT1 Volume + */ +#define WM8350_OUT1L_MUTE 0x4000 +#define WM8350_OUT1L_ZC 0x2000 +#define WM8350_OUT1_VU 0x0100 +#define WM8350_OUT1L_VOL_MASK 0x00FC +#define WM8350_OUT1L_VOL_SHIFT 2 + +/* + * R105 (0x69) - ROUT1 Volume + */ +#define WM8350_OUT1R_MUTE 0x4000 +#define WM8350_OUT1R_ZC 0x2000 +#define WM8350_OUT1_VU 0x0100 +#define WM8350_OUT1R_VOL_MASK 0x00FC +#define WM8350_OUT1R_VOL_SHIFT 2 + +/* + * R106 (0x6A) - LOUT2 Volume + */ +#define WM8350_OUT2L_MUTE 0x4000 +#define WM8350_OUT2L_ZC 0x2000 +#define WM8350_OUT2_VU 0x0100 +#define WM8350_OUT2L_VOL_MASK 0x00FC + +/* + * R107 (0x6B) - ROUT2 Volume + */ +#define WM8350_OUT2R_MUTE 0x4000 +#define WM8350_OUT2R_ZC 0x2000 +#define WM8350_OUT2R_INV 0x0400 +#define WM8350_OUT2R_INV_MUTE 0x0200 +#define WM8350_OUT2_VU 0x0100 +#define WM8350_OUT2R_VOL_MASK 0x00FC + +/* + * R111 (0x6F) - BEEP Volume + */ +#define WM8350_IN3R_OUT2R_VOL_MASK 0x00E0 + +/* + * R112 (0x70) - AI Formating + */ +#define WM8350_AIF_BCLK_INV 0x8000 +#define WM8350_AIF_TRI 0x2000 +#define WM8350_AIF_LRCLK_INV 0x1000 +#define WM8350_AIF_WL_MASK 0x0C00 +#define WM8350_AIF_FMT_MASK 0x0300 + +/* + * R113 (0x71) - ADC DAC COMP + */ +#define WM8350_DAC_COMP 0x0080 +#define WM8350_DAC_COMPMODE 0x0040 +#define WM8350_ADC_COMP 0x0020 +#define WM8350_ADC_COMPMODE 0x0010 +#define WM8350_LOOPBACK 0x0001 + +/* + * R114 (0x72) - AI ADC Control + */ +#define WM8350_AIFADC_PD 0x0080 +#define WM8350_AIFADCL_SRC 0x0040 +#define WM8350_AIFADCR_SRC 0x0020 +#define WM8350_AIFADC_TDM_CHAN 0x0010 +#define WM8350_AIFADC_TDM 0x0008 + +/* + * R115 (0x73) - AI DAC Control + */ +#define WM8350_BCLK_MSTR 0x4000 +#define WM8350_AIFDAC_PD 0x0080 +#define WM8350_DACL_SRC 0x0040 +#define WM8350_DACR_SRC 0x0020 +#define WM8350_AIFDAC_TDM_CHAN 0x0010 +#define WM8350_AIFDAC_TDM 0x0008 +#define WM8350_DAC_BOOST_MASK 0x0003 + +/* + * R116 (0x74) - AIF Test + */ +#define WM8350_CODEC_BYP 0x4000 +#define WM8350_AIFADC_WR_TST 0x2000 +#define WM8350_AIFADC_RD_TST 0x1000 +#define WM8350_AIFDAC_WR_TST 0x0800 +#define WM8350_AIFDAC_RD_TST 0x0400 +#define WM8350_AIFADC_ASYN 0x0020 +#define WM8350_AIFDAC_ASYN 0x0010 + +/* + * R231 (0xE7) - Jack Status + */ +#define WM8350_JACK_L_LVL 0x0800 +#define WM8350_JACK_R_LVL 0x0400 +#define WM8350_JACK_MICSCD_LVL 0x0200 +#define WM8350_JACK_MICSD_LVL 0x0100 + +/* + * WM8350 Platform setup + */ +#define WM8350_S_CURVE_NONE 0x0 +#define WM8350_S_CURVE_FAST 0x1 +#define WM8350_S_CURVE_MEDIUM 0x2 +#define WM8350_S_CURVE_SLOW 0x3 + +#define WM8350_DISCHARGE_OFF 0x0 +#define WM8350_DISCHARGE_FAST 0x1 +#define WM8350_DISCHARGE_MEDIUM 0x2 +#define WM8350_DISCHARGE_SLOW 0x3 + +#define WM8350_TIE_OFF_500R 0x0 +#define WM8350_TIE_OFF_30K 0x1 + +/* + * Clock sources & directions + */ +#define WM8350_SYSCLK 0 + +#define WM8350_MCLK_SEL_PLL_MCLK 0 +#define WM8350_MCLK_SEL_PLL_DAC 1 +#define WM8350_MCLK_SEL_PLL_ADC 2 +#define WM8350_MCLK_SEL_PLL_32K 3 +#define WM8350_MCLK_SEL_MCLK 5 + +/* clock divider id's */ +#define WM8350_ADC_CLKDIV 0 +#define WM8350_DAC_CLKDIV 1 +#define WM8350_BCLK_CLKDIV 2 +#define WM8350_OPCLK_CLKDIV 3 +#define WM8350_TO_CLKDIV 4 +#define WM8350_SYS_CLKDIV 5 +#define WM8350_DACLR_CLKDIV 6 +#define WM8350_ADCLR_CLKDIV 7 + +/* ADC clock dividers */ +#define WM8350_ADCDIV_1 0x0 +#define WM8350_ADCDIV_1_5 0x1 +#define WM8350_ADCDIV_2 0x2 +#define WM8350_ADCDIV_3 0x3 +#define WM8350_ADCDIV_4 0x4 +#define WM8350_ADCDIV_5_5 0x5 +#define WM8350_ADCDIV_6 0x6 + +/* ADC clock dividers */ +#define WM8350_DACDIV_1 0x0 +#define WM8350_DACDIV_1_5 0x1 +#define WM8350_DACDIV_2 0x2 +#define WM8350_DACDIV_3 0x3 +#define WM8350_DACDIV_4 0x4 +#define WM8350_DACDIV_5_5 0x5 +#define WM8350_DACDIV_6 0x6 + +/* BCLK clock dividers */ +#define WM8350_BCLK_DIV_1 (0x0 << 4) +#define WM8350_BCLK_DIV_1_5 (0x1 << 4) +#define WM8350_BCLK_DIV_2 (0x2 << 4) +#define WM8350_BCLK_DIV_3 (0x3 << 4) +#define WM8350_BCLK_DIV_4 (0x4 << 4) +#define WM8350_BCLK_DIV_5_5 (0x5 << 4) +#define WM8350_BCLK_DIV_6 (0x6 << 4) +#define WM8350_BCLK_DIV_8 (0x7 << 4) +#define WM8350_BCLK_DIV_11 (0x8 << 4) +#define WM8350_BCLK_DIV_12 (0x9 << 4) +#define WM8350_BCLK_DIV_16 (0xa << 4) +#define WM8350_BCLK_DIV_22 (0xb << 4) +#define WM8350_BCLK_DIV_24 (0xc << 4) +#define WM8350_BCLK_DIV_32 (0xd << 4) +#define WM8350_BCLK_DIV_44 (0xe << 4) +#define WM8350_BCLK_DIV_48 (0xf << 4) + +/* Sys (MCLK) clock dividers */ +#define WM8350_MCLK_DIV_1 (0x0 << 8) +#define WM8350_MCLK_DIV_2 (0x1 << 8) + +/* OP clock dividers */ +#define WM8350_OPCLK_DIV_1 0x0 +#define WM8350_OPCLK_DIV_2 0x1 +#define WM8350_OPCLK_DIV_3 0x2 +#define WM8350_OPCLK_DIV_4 0x3 +#define WM8350_OPCLK_DIV_5_5 0x4 +#define WM8350_OPCLK_DIV_6 0x5 + +/* DAI ID */ +#define WM8350_HIFI_DAI 0 + +/* + * Audio interrupts. + */ +#define WM8350_IRQ_CODEC_JCK_DET_L 39 +#define WM8350_IRQ_CODEC_JCK_DET_R 40 +#define WM8350_IRQ_CODEC_MICSCD 41 +#define WM8350_IRQ_CODEC_MICD 42 + +/* + * WM8350 Platform data. + * + * This must be initialised per platform for best audio performance. + * Please see WM8350 datasheet for information. + */ +struct wm8350_audio_platform_data { + int vmid_discharge_msecs; /* VMID --> OFF discharge time */ + int drain_msecs; /* OFF drain time */ + int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */ + int vmid_charge_msecs; /* vmid power up time */ + u32 vmid_s_curve:2; /* vmid enable s curve speed */ + u32 dis_out4:2; /* out4 discharge speed */ + u32 dis_out3:2; /* out3 discharge speed */ + u32 dis_out2:2; /* out2 discharge speed */ + u32 dis_out1:2; /* out1 discharge speed */ + u32 vroi_out4:1; /* out4 tie off */ + u32 vroi_out3:1; /* out3 tie off */ + u32 vroi_out2:1; /* out2 tie off */ + u32 vroi_out1:1; /* out1 tie off */ + u32 vroi_enable:1; /* enable tie off */ + u32 codec_current_on:2; /* current level ON */ + u32 codec_current_standby:2; /* current level STANDBY */ + u32 codec_current_charge:2; /* codec current @ vmid charge */ +}; + +struct snd_soc_codec; + +struct wm8350_codec { + struct platform_device *pdev; + struct snd_soc_codec *codec; + struct wm8350_audio_platform_data *platform_data; +}; + +#endif diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h new file mode 100644 index 00000000..54bc5d0f --- /dev/null +++ b/include/linux/mfd/wm8350/comparator.h @@ -0,0 +1,175 @@ +/* + * comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_WM8350_COMPARATOR_H_ +#define __LINUX_MFD_WM8350_COMPARATOR_H_ + +/* + * Registers + */ + +#define WM8350_DIGITISER_CONTROL_1 0x90 +#define WM8350_DIGITISER_CONTROL_2 0x91 +#define WM8350_AUX1_READBACK 0x98 +#define WM8350_AUX2_READBACK 0x99 +#define WM8350_AUX3_READBACK 0x9A +#define WM8350_AUX4_READBACK 0x9B +#define WM8350_CHIP_TEMP_READBACK 0x9F +#define WM8350_GENERIC_COMPARATOR_CONTROL 0xA3 +#define WM8350_GENERIC_COMPARATOR_1 0xA4 +#define WM8350_GENERIC_COMPARATOR_2 0xA5 +#define WM8350_GENERIC_COMPARATOR_3 0xA6 +#define WM8350_GENERIC_COMPARATOR_4 0xA7 + +/* + * R144 (0x90) - Digitiser Control (1) + */ +#define WM8350_AUXADC_CTC 0x4000 +#define WM8350_AUXADC_POLL 0x2000 +#define WM8350_AUXADC_HIB_MODE 0x1000 +#define WM8350_AUXADC_SEL8 0x0080 +#define WM8350_AUXADC_SEL7 0x0040 +#define WM8350_AUXADC_SEL6 0x0020 +#define WM8350_AUXADC_SEL5 0x0010 +#define WM8350_AUXADC_SEL4 0x0008 +#define WM8350_AUXADC_SEL3 0x0004 +#define WM8350_AUXADC_SEL2 0x0002 +#define WM8350_AUXADC_SEL1 0x0001 + +/* + * R145 (0x91) - Digitiser Control (2) + */ +#define WM8350_AUXADC_MASKMODE_MASK 0x3000 +#define WM8350_AUXADC_CRATE_MASK 0x0700 +#define WM8350_AUXADC_CAL 0x0004 +#define WM8350_AUX_RBMODE 0x0002 +#define WM8350_AUXADC_WAIT 0x0001 + +/* + * R152 (0x98) - AUX1 Readback + */ +#define WM8350_AUXADC_SCALE1_MASK 0x6000 +#define WM8350_AUXADC_REF1 0x1000 +#define WM8350_AUXADC_DATA1_MASK 0x0FFF + +/* + * R153 (0x99) - AUX2 Readback + */ +#define WM8350_AUXADC_SCALE2_MASK 0x6000 +#define WM8350_AUXADC_REF2 0x1000 +#define WM8350_AUXADC_DATA2_MASK 0x0FFF + +/* + * R154 (0x9A) - AUX3 Readback + */ +#define WM8350_AUXADC_SCALE3_MASK 0x6000 +#define WM8350_AUXADC_REF3 0x1000 +#define WM8350_AUXADC_DATA3_MASK 0x0FFF + +/* + * R155 (0x9B) - AUX4 Readback + */ +#define WM8350_AUXADC_SCALE4_MASK 0x6000 +#define WM8350_AUXADC_REF4 0x1000 +#define WM8350_AUXADC_DATA4_MASK 0x0FFF + +/* + * R156 (0x9C) - USB Voltage Readback + */ +#define WM8350_AUXADC_DATA_USB_MASK 0x0FFF + +/* + * R157 (0x9D) - LINE Voltage Readback + */ +#define WM8350_AUXADC_DATA_LINE_MASK 0x0FFF + +/* + * R158 (0x9E) - BATT Voltage Readback + */ +#define WM8350_AUXADC_DATA_BATT_MASK 0x0FFF + +/* + * R159 (0x9F) - Chip Temp Readback + */ +#define WM8350_AUXADC_DATA_CHIPTEMP_MASK 0x0FFF + +/* + * R163 (0xA3) - Generic Comparator Control + */ +#define WM8350_DCMP4_ENA 0x0008 +#define WM8350_DCMP3_ENA 0x0004 +#define WM8350_DCMP2_ENA 0x0002 +#define WM8350_DCMP1_ENA 0x0001 + +/* + * R164 (0xA4) - Generic comparator 1 + */ +#define WM8350_DCMP1_SRCSEL_MASK 0xE000 +#define WM8350_DCMP1_GT 0x1000 +#define WM8350_DCMP1_THR_MASK 0x0FFF + +/* + * R165 (0xA5) - Generic comparator 2 + */ +#define WM8350_DCMP2_SRCSEL_MASK 0xE000 +#define WM8350_DCMP2_GT 0x1000 +#define WM8350_DCMP2_THR_MASK 0x0FFF + +/* + * R166 (0xA6) - Generic comparator 3 + */ +#define WM8350_DCMP3_SRCSEL_MASK 0xE000 +#define WM8350_DCMP3_GT 0x1000 +#define WM8350_DCMP3_THR_MASK 0x0FFF + +/* + * R167 (0xA7) - Generic comparator 4 + */ +#define WM8350_DCMP4_SRCSEL_MASK 0xE000 +#define WM8350_DCMP4_GT 0x1000 +#define WM8350_DCMP4_THR_MASK 0x0FFF + +/* + * Interrupts. + */ +#define WM8350_IRQ_AUXADC_DATARDY 16 +#define WM8350_IRQ_AUXADC_DCOMP4 17 +#define WM8350_IRQ_AUXADC_DCOMP3 18 +#define WM8350_IRQ_AUXADC_DCOMP2 19 +#define WM8350_IRQ_AUXADC_DCOMP1 20 +#define WM8350_IRQ_SYS_HYST_COMP_FAIL 21 +#define WM8350_IRQ_SYS_CHIP_GT115 22 +#define WM8350_IRQ_SYS_CHIP_GT140 23 + +/* + * USB/2, LINE & BATT = ((VRTC * 2) / 4095)) * 10e6 uV + * Where VRTC = 2.7 V + */ +#define WM8350_AUX_COEFF 1319 + +#define WM8350_AUXADC_AUX1 0 +#define WM8350_AUXADC_AUX2 1 +#define WM8350_AUXADC_AUX3 2 +#define WM8350_AUXADC_AUX4 3 +#define WM8350_AUXADC_USB 4 +#define WM8350_AUXADC_LINE 5 +#define WM8350_AUXADC_BATT 6 +#define WM8350_AUXADC_TEMP 7 + +struct wm8350; + +/* + * AUX ADC Readback + */ +int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, + int vref); + +#endif diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h new file mode 100644 index 00000000..98fcc977 --- /dev/null +++ b/include/linux/mfd/wm8350/core.h @@ -0,0 +1,713 @@ +/* + * core.h -- Core Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_CORE_H_ +#define __LINUX_MFD_WM8350_CORE_H_ + +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/interrupt.h> +#include <linux/completion.h> + +#include <linux/mfd/wm8350/audio.h> +#include <linux/mfd/wm8350/gpio.h> +#include <linux/mfd/wm8350/pmic.h> +#include <linux/mfd/wm8350/rtc.h> +#include <linux/mfd/wm8350/supply.h> +#include <linux/mfd/wm8350/wdt.h> + +/* + * Register values. + */ +#define WM8350_RESET_ID 0x00 +#define WM8350_ID 0x01 +#define WM8350_REVISION 0x02 +#define WM8350_SYSTEM_CONTROL_1 0x03 +#define WM8350_SYSTEM_CONTROL_2 0x04 +#define WM8350_SYSTEM_HIBERNATE 0x05 +#define WM8350_INTERFACE_CONTROL 0x06 +#define WM8350_POWER_MGMT_1 0x08 +#define WM8350_POWER_MGMT_2 0x09 +#define WM8350_POWER_MGMT_3 0x0A +#define WM8350_POWER_MGMT_4 0x0B +#define WM8350_POWER_MGMT_5 0x0C +#define WM8350_POWER_MGMT_6 0x0D +#define WM8350_POWER_MGMT_7 0x0E + +#define WM8350_SYSTEM_INTERRUPTS 0x18 +#define WM8350_INT_STATUS_1 0x19 +#define WM8350_INT_STATUS_2 0x1A +#define WM8350_POWER_UP_INT_STATUS 0x1B +#define WM8350_UNDER_VOLTAGE_INT_STATUS 0x1C +#define WM8350_OVER_CURRENT_INT_STATUS 0x1D +#define WM8350_GPIO_INT_STATUS 0x1E +#define WM8350_COMPARATOR_INT_STATUS 0x1F +#define WM8350_SYSTEM_INTERRUPTS_MASK 0x20 +#define WM8350_INT_STATUS_1_MASK 0x21 +#define WM8350_INT_STATUS_2_MASK 0x22 +#define WM8350_POWER_UP_INT_STATUS_MASK 0x23 +#define WM8350_UNDER_VOLTAGE_INT_STATUS_MASK 0x24 +#define WM8350_OVER_CURRENT_INT_STATUS_MASK 0x25 +#define WM8350_GPIO_INT_STATUS_MASK 0x26 +#define WM8350_COMPARATOR_INT_STATUS_MASK 0x27 +#define WM8350_CHARGER_OVERRIDES 0xE2 +#define WM8350_MISC_OVERRIDES 0xE3 +#define WM8350_COMPARATOR_OVERRIDES 0xE7 +#define WM8350_STATE_MACHINE_STATUS 0xE9 + +#define WM8350_MAX_REGISTER 0xFF + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Reset/ID + */ +#define WM8350_SW_RESET_CHIP_ID_MASK 0xFFFF + +/* + * R1 (0x01) - ID + */ +#define WM8350_CHIP_REV_MASK 0x7000 +#define WM8350_CONF_STS_MASK 0x0C00 +#define WM8350_CUST_ID_MASK 0x00FF + +/* + * R2 (0x02) - Revision + */ +#define WM8350_MASK_REV_MASK 0x00FF + +/* + * R3 (0x03) - System Control 1 + */ +#define WM8350_CHIP_ON 0x8000 +#define WM8350_POWERCYCLE 0x2000 +#define WM8350_VCC_FAULT_OV 0x1000 +#define WM8350_REG_RSTB_TIME_MASK 0x0C00 +#define WM8350_BG_SLEEP 0x0200 +#define WM8350_MEM_VALID 0x0020 +#define WM8350_CHIP_SET_UP 0x0010 +#define WM8350_ON_DEB_T 0x0008 +#define WM8350_ON_POL 0x0002 +#define WM8350_IRQ_POL 0x0001 + +/* + * R4 (0x04) - System Control 2 + */ +#define WM8350_USB_SUSPEND_8MA 0x8000 +#define WM8350_USB_SUSPEND 0x4000 +#define WM8350_USB_MSTR 0x2000 +#define WM8350_USB_MSTR_SRC 0x1000 +#define WM8350_USB_500MA 0x0800 +#define WM8350_USB_NOLIM 0x0400 + +/* + * R5 (0x05) - System Hibernate + */ +#define WM8350_HIBERNATE 0x8000 +#define WM8350_WDOG_HIB_MODE 0x0080 +#define WM8350_REG_HIB_STARTUP_SEQ 0x0040 +#define WM8350_REG_RESET_HIB_MODE 0x0020 +#define WM8350_RST_HIB_MODE 0x0010 +#define WM8350_IRQ_HIB_MODE 0x0008 +#define WM8350_MEMRST_HIB_MODE 0x0004 +#define WM8350_PCCOMP_HIB_MODE 0x0002 +#define WM8350_TEMPMON_HIB_MODE 0x0001 + +/* + * R6 (0x06) - Interface Control + */ +#define WM8350_USE_DEV_PINS 0x8000 +#define WM8350_USE_DEV_PINS_MASK 0x8000 +#define WM8350_USE_DEV_PINS_SHIFT 15 +#define WM8350_DEV_ADDR_MASK 0x6000 +#define WM8350_DEV_ADDR_SHIFT 13 +#define WM8350_CONFIG_DONE 0x1000 +#define WM8350_CONFIG_DONE_MASK 0x1000 +#define WM8350_CONFIG_DONE_SHIFT 12 +#define WM8350_RECONFIG_AT_ON 0x0800 +#define WM8350_RECONFIG_AT_ON_MASK 0x0800 +#define WM8350_RECONFIG_AT_ON_SHIFT 11 +#define WM8350_AUTOINC 0x0200 +#define WM8350_AUTOINC_MASK 0x0200 +#define WM8350_AUTOINC_SHIFT 9 +#define WM8350_ARA 0x0100 +#define WM8350_ARA_MASK 0x0100 +#define WM8350_ARA_SHIFT 8 +#define WM8350_SPI_CFG 0x0008 +#define WM8350_SPI_CFG_MASK 0x0008 +#define WM8350_SPI_CFG_SHIFT 3 +#define WM8350_SPI_4WIRE 0x0004 +#define WM8350_SPI_4WIRE_MASK 0x0004 +#define WM8350_SPI_4WIRE_SHIFT 2 +#define WM8350_SPI_3WIRE 0x0002 +#define WM8350_SPI_3WIRE_MASK 0x0002 +#define WM8350_SPI_3WIRE_SHIFT 1 + +/* Bit values for R06 (0x06) */ +#define WM8350_USE_DEV_PINS_PRIMARY 0 +#define WM8350_USE_DEV_PINS_DEV 1 + +#define WM8350_DEV_ADDR_34 0 +#define WM8350_DEV_ADDR_36 1 +#define WM8350_DEV_ADDR_3C 2 +#define WM8350_DEV_ADDR_3E 3 + +#define WM8350_CONFIG_DONE_OFF 0 +#define WM8350_CONFIG_DONE_DONE 1 + +#define WM8350_RECONFIG_AT_ON_OFF 0 +#define WM8350_RECONFIG_AT_ON_ON 1 + +#define WM8350_AUTOINC_OFF 0 +#define WM8350_AUTOINC_ON 1 + +#define WM8350_ARA_OFF 0 +#define WM8350_ARA_ON 1 + +#define WM8350_SPI_CFG_CMOS 0 +#define WM8350_SPI_CFG_OD 1 + +#define WM8350_SPI_4WIRE_3WIRE 0 +#define WM8350_SPI_4WIRE_4WIRE 1 + +#define WM8350_SPI_3WIRE_I2C 0 +#define WM8350_SPI_3WIRE_SPI 1 + +/* + * R8 (0x08) - Power mgmt (1) + */ +#define WM8350_CODEC_ISEL_MASK 0xC000 +#define WM8350_VBUFEN 0x2000 +#define WM8350_OUTPUT_DRAIN_EN 0x0400 +#define WM8350_MIC_DET_ENA 0x0100 +#define WM8350_BIASEN 0x0020 +#define WM8350_MICBEN 0x0010 +#define WM8350_VMIDEN 0x0004 +#define WM8350_VMID_MASK 0x0003 +#define WM8350_VMID_SHIFT 0 + +/* + * R9 (0x09) - Power mgmt (2) + */ +#define WM8350_IN3R_ENA 0x0800 +#define WM8350_IN3L_ENA 0x0400 +#define WM8350_INR_ENA 0x0200 +#define WM8350_INL_ENA 0x0100 +#define WM8350_MIXINR_ENA 0x0080 +#define WM8350_MIXINL_ENA 0x0040 +#define WM8350_OUT4_ENA 0x0020 +#define WM8350_OUT3_ENA 0x0010 +#define WM8350_MIXOUTR_ENA 0x0002 +#define WM8350_MIXOUTL_ENA 0x0001 + +/* + * R10 (0x0A) - Power mgmt (3) + */ +#define WM8350_IN3R_TO_OUT2R 0x0080 +#define WM8350_OUT2R_ENA 0x0008 +#define WM8350_OUT2L_ENA 0x0004 +#define WM8350_OUT1R_ENA 0x0002 +#define WM8350_OUT1L_ENA 0x0001 + +/* + * R11 (0x0B) - Power mgmt (4) + */ +#define WM8350_SYSCLK_ENA 0x4000 +#define WM8350_ADC_HPF_ENA 0x2000 +#define WM8350_FLL_ENA 0x0800 +#define WM8350_FLL_OSC_ENA 0x0400 +#define WM8350_TOCLK_ENA 0x0100 +#define WM8350_DACR_ENA 0x0020 +#define WM8350_DACL_ENA 0x0010 +#define WM8350_ADCR_ENA 0x0008 +#define WM8350_ADCL_ENA 0x0004 + +/* + * R12 (0x0C) - Power mgmt (5) + */ +#define WM8350_CODEC_ENA 0x1000 +#define WM8350_RTC_TICK_ENA 0x0800 +#define WM8350_OSC32K_ENA 0x0400 +#define WM8350_CHG_ENA 0x0200 +#define WM8350_ACC_DET_ENA 0x0100 +#define WM8350_AUXADC_ENA 0x0080 +#define WM8350_DCMP4_ENA 0x0008 +#define WM8350_DCMP3_ENA 0x0004 +#define WM8350_DCMP2_ENA 0x0002 +#define WM8350_DCMP1_ENA 0x0001 + +/* + * R13 (0x0D) - Power mgmt (6) + */ +#define WM8350_LS_ENA 0x8000 +#define WM8350_LDO4_ENA 0x0800 +#define WM8350_LDO3_ENA 0x0400 +#define WM8350_LDO2_ENA 0x0200 +#define WM8350_LDO1_ENA 0x0100 +#define WM8350_DC6_ENA 0x0020 +#define WM8350_DC5_ENA 0x0010 +#define WM8350_DC4_ENA 0x0008 +#define WM8350_DC3_ENA 0x0004 +#define WM8350_DC2_ENA 0x0002 +#define WM8350_DC1_ENA 0x0001 + +/* + * R14 (0x0E) - Power mgmt (7) + */ +#define WM8350_CS2_ENA 0x0002 +#define WM8350_CS1_ENA 0x0001 + +/* + * R24 (0x18) - System Interrupts + */ +#define WM8350_OC_INT 0x2000 +#define WM8350_UV_INT 0x1000 +#define WM8350_PUTO_INT 0x0800 +#define WM8350_CS_INT 0x0200 +#define WM8350_EXT_INT 0x0100 +#define WM8350_CODEC_INT 0x0080 +#define WM8350_GP_INT 0x0040 +#define WM8350_AUXADC_INT 0x0020 +#define WM8350_RTC_INT 0x0010 +#define WM8350_SYS_INT 0x0008 +#define WM8350_CHG_INT 0x0004 +#define WM8350_USB_INT 0x0002 +#define WM8350_WKUP_INT 0x0001 + +/* + * R25 (0x19) - Interrupt Status 1 + */ +#define WM8350_CHG_BAT_HOT_EINT 0x8000 +#define WM8350_CHG_BAT_COLD_EINT 0x4000 +#define WM8350_CHG_BAT_FAIL_EINT 0x2000 +#define WM8350_CHG_TO_EINT 0x1000 +#define WM8350_CHG_END_EINT 0x0800 +#define WM8350_CHG_START_EINT 0x0400 +#define WM8350_CHG_FAST_RDY_EINT 0x0200 +#define WM8350_RTC_PER_EINT 0x0080 +#define WM8350_RTC_SEC_EINT 0x0040 +#define WM8350_RTC_ALM_EINT 0x0020 +#define WM8350_CHG_VBATT_LT_3P9_EINT 0x0004 +#define WM8350_CHG_VBATT_LT_3P1_EINT 0x0002 +#define WM8350_CHG_VBATT_LT_2P85_EINT 0x0001 + +/* + * R26 (0x1A) - Interrupt Status 2 + */ +#define WM8350_CS1_EINT 0x2000 +#define WM8350_CS2_EINT 0x1000 +#define WM8350_USB_LIMIT_EINT 0x0400 +#define WM8350_AUXADC_DATARDY_EINT 0x0100 +#define WM8350_AUXADC_DCOMP4_EINT 0x0080 +#define WM8350_AUXADC_DCOMP3_EINT 0x0040 +#define WM8350_AUXADC_DCOMP2_EINT 0x0020 +#define WM8350_AUXADC_DCOMP1_EINT 0x0010 +#define WM8350_SYS_HYST_COMP_FAIL_EINT 0x0008 +#define WM8350_SYS_CHIP_GT115_EINT 0x0004 +#define WM8350_SYS_CHIP_GT140_EINT 0x0002 +#define WM8350_SYS_WDOG_TO_EINT 0x0001 + +/* + * R27 (0x1B) - Power Up Interrupt Status + */ +#define WM8350_PUTO_LDO4_EINT 0x0800 +#define WM8350_PUTO_LDO3_EINT 0x0400 +#define WM8350_PUTO_LDO2_EINT 0x0200 +#define WM8350_PUTO_LDO1_EINT 0x0100 +#define WM8350_PUTO_DC6_EINT 0x0020 +#define WM8350_PUTO_DC5_EINT 0x0010 +#define WM8350_PUTO_DC4_EINT 0x0008 +#define WM8350_PUTO_DC3_EINT 0x0004 +#define WM8350_PUTO_DC2_EINT 0x0002 +#define WM8350_PUTO_DC1_EINT 0x0001 + +/* + * R28 (0x1C) - Under Voltage Interrupt status + */ +#define WM8350_UV_LDO4_EINT 0x0800 +#define WM8350_UV_LDO3_EINT 0x0400 +#define WM8350_UV_LDO2_EINT 0x0200 +#define WM8350_UV_LDO1_EINT 0x0100 +#define WM8350_UV_DC6_EINT 0x0020 +#define WM8350_UV_DC5_EINT 0x0010 +#define WM8350_UV_DC4_EINT 0x0008 +#define WM8350_UV_DC3_EINT 0x0004 +#define WM8350_UV_DC2_EINT 0x0002 +#define WM8350_UV_DC1_EINT 0x0001 + +/* + * R29 (0x1D) - Over Current Interrupt status + */ +#define WM8350_OC_LS_EINT 0x8000 + +/* + * R30 (0x1E) - GPIO Interrupt Status + */ +#define WM8350_GP12_EINT 0x1000 +#define WM8350_GP11_EINT 0x0800 +#define WM8350_GP10_EINT 0x0400 +#define WM8350_GP9_EINT 0x0200 +#define WM8350_GP8_EINT 0x0100 +#define WM8350_GP7_EINT 0x0080 +#define WM8350_GP6_EINT 0x0040 +#define WM8350_GP5_EINT 0x0020 +#define WM8350_GP4_EINT 0x0010 +#define WM8350_GP3_EINT 0x0008 +#define WM8350_GP2_EINT 0x0004 +#define WM8350_GP1_EINT 0x0002 +#define WM8350_GP0_EINT 0x0001 + +/* + * R31 (0x1F) - Comparator Interrupt Status + */ +#define WM8350_EXT_USB_FB_EINT 0x8000 +#define WM8350_EXT_WALL_FB_EINT 0x4000 +#define WM8350_EXT_BAT_FB_EINT 0x2000 +#define WM8350_CODEC_JCK_DET_L_EINT 0x0800 +#define WM8350_CODEC_JCK_DET_R_EINT 0x0400 +#define WM8350_CODEC_MICSCD_EINT 0x0200 +#define WM8350_CODEC_MICD_EINT 0x0100 +#define WM8350_WKUP_OFF_STATE_EINT 0x0040 +#define WM8350_WKUP_HIB_STATE_EINT 0x0020 +#define WM8350_WKUP_CONV_FAULT_EINT 0x0010 +#define WM8350_WKUP_WDOG_RST_EINT 0x0008 +#define WM8350_WKUP_GP_PWR_ON_EINT 0x0004 +#define WM8350_WKUP_ONKEY_EINT 0x0002 +#define WM8350_WKUP_GP_WAKEUP_EINT 0x0001 + +/* + * R32 (0x20) - System Interrupts Mask + */ +#define WM8350_IM_OC_INT 0x2000 +#define WM8350_IM_UV_INT 0x1000 +#define WM8350_IM_PUTO_INT 0x0800 +#define WM8350_IM_SPARE_INT 0x0400 +#define WM8350_IM_CS_INT 0x0200 +#define WM8350_IM_EXT_INT 0x0100 +#define WM8350_IM_CODEC_INT 0x0080 +#define WM8350_IM_GP_INT 0x0040 +#define WM8350_IM_AUXADC_INT 0x0020 +#define WM8350_IM_RTC_INT 0x0010 +#define WM8350_IM_SYS_INT 0x0008 +#define WM8350_IM_CHG_INT 0x0004 +#define WM8350_IM_USB_INT 0x0002 +#define WM8350_IM_WKUP_INT 0x0001 + +/* + * R33 (0x21) - Interrupt Status 1 Mask + */ +#define WM8350_IM_CHG_BAT_HOT_EINT 0x8000 +#define WM8350_IM_CHG_BAT_COLD_EINT 0x4000 +#define WM8350_IM_CHG_BAT_FAIL_EINT 0x2000 +#define WM8350_IM_CHG_TO_EINT 0x1000 +#define WM8350_IM_CHG_END_EINT 0x0800 +#define WM8350_IM_CHG_START_EINT 0x0400 +#define WM8350_IM_CHG_FAST_RDY_EINT 0x0200 +#define WM8350_IM_RTC_PER_EINT 0x0080 +#define WM8350_IM_RTC_SEC_EINT 0x0040 +#define WM8350_IM_RTC_ALM_EINT 0x0020 +#define WM8350_IM_CHG_VBATT_LT_3P9_EINT 0x0004 +#define WM8350_IM_CHG_VBATT_LT_3P1_EINT 0x0002 +#define WM8350_IM_CHG_VBATT_LT_2P85_EINT 0x0001 + +/* + * R34 (0x22) - Interrupt Status 2 Mask + */ +#define WM8350_IM_SPARE2_EINT 0x8000 +#define WM8350_IM_SPARE1_EINT 0x4000 +#define WM8350_IM_CS1_EINT 0x2000 +#define WM8350_IM_CS2_EINT 0x1000 +#define WM8350_IM_USB_LIMIT_EINT 0x0400 +#define WM8350_IM_AUXADC_DATARDY_EINT 0x0100 +#define WM8350_IM_AUXADC_DCOMP4_EINT 0x0080 +#define WM8350_IM_AUXADC_DCOMP3_EINT 0x0040 +#define WM8350_IM_AUXADC_DCOMP2_EINT 0x0020 +#define WM8350_IM_AUXADC_DCOMP1_EINT 0x0010 +#define WM8350_IM_SYS_HYST_COMP_FAIL_EINT 0x0008 +#define WM8350_IM_SYS_CHIP_GT115_EINT 0x0004 +#define WM8350_IM_SYS_CHIP_GT140_EINT 0x0002 +#define WM8350_IM_SYS_WDOG_TO_EINT 0x0001 + +/* + * R35 (0x23) - Power Up Interrupt Status Mask + */ +#define WM8350_IM_PUTO_LDO4_EINT 0x0800 +#define WM8350_IM_PUTO_LDO3_EINT 0x0400 +#define WM8350_IM_PUTO_LDO2_EINT 0x0200 +#define WM8350_IM_PUTO_LDO1_EINT 0x0100 +#define WM8350_IM_PUTO_DC6_EINT 0x0020 +#define WM8350_IM_PUTO_DC5_EINT 0x0010 +#define WM8350_IM_PUTO_DC4_EINT 0x0008 +#define WM8350_IM_PUTO_DC3_EINT 0x0004 +#define WM8350_IM_PUTO_DC2_EINT 0x0002 +#define WM8350_IM_PUTO_DC1_EINT 0x0001 + +/* + * R36 (0x24) - Under Voltage Interrupt status Mask + */ +#define WM8350_IM_UV_LDO4_EINT 0x0800 +#define WM8350_IM_UV_LDO3_EINT 0x0400 +#define WM8350_IM_UV_LDO2_EINT 0x0200 +#define WM8350_IM_UV_LDO1_EINT 0x0100 +#define WM8350_IM_UV_DC6_EINT 0x0020 +#define WM8350_IM_UV_DC5_EINT 0x0010 +#define WM8350_IM_UV_DC4_EINT 0x0008 +#define WM8350_IM_UV_DC3_EINT 0x0004 +#define WM8350_IM_UV_DC2_EINT 0x0002 +#define WM8350_IM_UV_DC1_EINT 0x0001 + +/* + * R37 (0x25) - Over Current Interrupt status Mask + */ +#define WM8350_IM_OC_LS_EINT 0x8000 + +/* + * R38 (0x26) - GPIO Interrupt Status Mask + */ +#define WM8350_IM_GP12_EINT 0x1000 +#define WM8350_IM_GP11_EINT 0x0800 +#define WM8350_IM_GP10_EINT 0x0400 +#define WM8350_IM_GP9_EINT 0x0200 +#define WM8350_IM_GP8_EINT 0x0100 +#define WM8350_IM_GP7_EINT 0x0080 +#define WM8350_IM_GP6_EINT 0x0040 +#define WM8350_IM_GP5_EINT 0x0020 +#define WM8350_IM_GP4_EINT 0x0010 +#define WM8350_IM_GP3_EINT 0x0008 +#define WM8350_IM_GP2_EINT 0x0004 +#define WM8350_IM_GP1_EINT 0x0002 +#define WM8350_IM_GP0_EINT 0x0001 + +/* + * R39 (0x27) - Comparator Interrupt Status Mask + */ +#define WM8350_IM_EXT_USB_FB_EINT 0x8000 +#define WM8350_IM_EXT_WALL_FB_EINT 0x4000 +#define WM8350_IM_EXT_BAT_FB_EINT 0x2000 +#define WM8350_IM_CODEC_JCK_DET_L_EINT 0x0800 +#define WM8350_IM_CODEC_JCK_DET_R_EINT 0x0400 +#define WM8350_IM_CODEC_MICSCD_EINT 0x0200 +#define WM8350_IM_CODEC_MICD_EINT 0x0100 +#define WM8350_IM_WKUP_OFF_STATE_EINT 0x0040 +#define WM8350_IM_WKUP_HIB_STATE_EINT 0x0020 +#define WM8350_IM_WKUP_CONV_FAULT_EINT 0x0010 +#define WM8350_IM_WKUP_WDOG_RST_EINT 0x0008 +#define WM8350_IM_WKUP_GP_PWR_ON_EINT 0x0004 +#define WM8350_IM_WKUP_ONKEY_EINT 0x0002 +#define WM8350_IM_WKUP_GP_WAKEUP_EINT 0x0001 + +/* + * R220 (0xDC) - RAM BIST 1 + */ +#define WM8350_READ_STATUS 0x0800 +#define WM8350_TSTRAM_CLK 0x0100 +#define WM8350_TSTRAM_CLK_ENA 0x0080 +#define WM8350_STARTSEQ 0x0040 +#define WM8350_READ_SRC 0x0020 +#define WM8350_COUNT_DIR 0x0010 +#define WM8350_TSTRAM_MODE_MASK 0x000E +#define WM8350_TSTRAM_ENA 0x0001 + +/* + * R225 (0xE1) - DCDC/LDO status + */ +#define WM8350_LS_STS 0x8000 +#define WM8350_LDO4_STS 0x0800 +#define WM8350_LDO3_STS 0x0400 +#define WM8350_LDO2_STS 0x0200 +#define WM8350_LDO1_STS 0x0100 +#define WM8350_DC6_STS 0x0020 +#define WM8350_DC5_STS 0x0010 +#define WM8350_DC4_STS 0x0008 +#define WM8350_DC3_STS 0x0004 +#define WM8350_DC2_STS 0x0002 +#define WM8350_DC1_STS 0x0001 + +/* + * R226 (0xE2) - Charger status + */ +#define WM8350_CHG_BATT_HOT_OVRDE 0x8000 +#define WM8350_CHG_BATT_COLD_OVRDE 0x4000 + +/* + * R227 (0xE3) - Misc Overrides + */ +#define WM8350_USB_LIMIT_OVRDE 0x0400 + +/* + * R227 (0xE7) - Comparator Overrides + */ +#define WM8350_USB_FB_OVRDE 0x8000 +#define WM8350_WALL_FB_OVRDE 0x4000 +#define WM8350_BATT_FB_OVRDE 0x2000 + + +/* + * R233 (0xE9) - State Machinine Status + */ +#define WM8350_USB_SM_MASK 0x0700 +#define WM8350_USB_SM_SHIFT 8 + +#define WM8350_USB_SM_100_SLV 1 +#define WM8350_USB_SM_500_SLV 5 +#define WM8350_USB_SM_STDBY_SLV 7 + +/* WM8350 wake up conditions */ +#define WM8350_IRQ_WKUP_OFF_STATE 43 +#define WM8350_IRQ_WKUP_HIB_STATE 44 +#define WM8350_IRQ_WKUP_CONV_FAULT 45 +#define WM8350_IRQ_WKUP_WDOG_RST 46 +#define WM8350_IRQ_WKUP_GP_PWR_ON 47 +#define WM8350_IRQ_WKUP_ONKEY 48 +#define WM8350_IRQ_WKUP_GP_WAKEUP 49 + +/* wm8350 chip revisions */ +#define WM8350_REV_E 0x4 +#define WM8350_REV_F 0x5 +#define WM8350_REV_G 0x6 +#define WM8350_REV_H 0x7 + +#define WM8350_NUM_IRQ 63 + +#define WM8350_NUM_IRQ_REGS 7 + +struct wm8350_reg_access { + u16 readable; /* Mask of readable bits */ + u16 writable; /* Mask of writable bits */ + u16 vol; /* Mask of volatile bits */ +}; +extern const struct wm8350_reg_access wm8350_reg_io_map[]; +extern const u16 wm8350_mode0_defaults[]; +extern const u16 wm8350_mode1_defaults[]; +extern const u16 wm8350_mode2_defaults[]; +extern const u16 wm8350_mode3_defaults[]; +extern const u16 wm8351_mode0_defaults[]; +extern const u16 wm8351_mode1_defaults[]; +extern const u16 wm8351_mode2_defaults[]; +extern const u16 wm8351_mode3_defaults[]; +extern const u16 wm8352_mode0_defaults[]; +extern const u16 wm8352_mode1_defaults[]; +extern const u16 wm8352_mode2_defaults[]; +extern const u16 wm8352_mode3_defaults[]; + +struct wm8350; + +struct wm8350_hwmon { + struct platform_device *pdev; + struct device *classdev; +}; + +struct wm8350 { + struct device *dev; + + /* device IO */ + union { + struct i2c_client *i2c_client; + struct spi_device *spi_device; + }; + int (*read_dev)(struct wm8350 *wm8350, char reg, int size, void *dest); + int (*write_dev)(struct wm8350 *wm8350, char reg, int size, + void *src); + u16 *reg_cache; + + struct mutex auxadc_mutex; + struct completion auxadc_done; + + /* Interrupt handling */ + struct mutex irq_lock; + int chip_irq; + int irq_base; + u16 irq_masks[WM8350_NUM_IRQ_REGS]; + + /* Client devices */ + struct wm8350_codec codec; + struct wm8350_gpio gpio; + struct wm8350_hwmon hwmon; + struct wm8350_pmic pmic; + struct wm8350_power power; + struct wm8350_rtc rtc; + struct wm8350_wdt wdt; +}; + +/** + * Data to be supplied by the platform to initialise the WM8350. + * + * @init: Function called during driver initialisation. Should be + * used by the platform to configure GPIO functions and similar. + * @irq_high: Set if WM8350 IRQ is active high. + * @irq_base: Base IRQ for genirq (not currently used). + * @gpio_base: Base for gpiolib. + */ +struct wm8350_platform_data { + int (*init)(struct wm8350 *wm8350); + int irq_high; + int irq_base; + int gpio_base; +}; + + +/* + * WM8350 device initialisation and exit. + */ +int wm8350_device_init(struct wm8350 *wm8350, int irq, + struct wm8350_platform_data *pdata); +void wm8350_device_exit(struct wm8350 *wm8350); + +/* + * WM8350 device IO + */ +int wm8350_clear_bits(struct wm8350 *wm8350, u16 reg, u16 mask); +int wm8350_set_bits(struct wm8350 *wm8350, u16 reg, u16 mask); +u16 wm8350_reg_read(struct wm8350 *wm8350, int reg); +int wm8350_reg_write(struct wm8350 *wm8350, int reg, u16 val); +int wm8350_reg_lock(struct wm8350 *wm8350); +int wm8350_reg_unlock(struct wm8350 *wm8350); +int wm8350_block_read(struct wm8350 *wm8350, int reg, int size, u16 *dest); +int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src); + +/* + * WM8350 internal interrupts + */ +static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq, + irq_handler_t handler, + unsigned long flags, + const char *name, void *data) +{ + if (!wm8350->irq_base) + return -ENODEV; + + return request_threaded_irq(irq + wm8350->irq_base, NULL, + handler, flags, name, data); +} + +static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data) +{ + free_irq(irq + wm8350->irq_base, data); +} + +static inline void wm8350_mask_irq(struct wm8350 *wm8350, int irq) +{ + disable_irq(irq + wm8350->irq_base); +} + +static inline void wm8350_unmask_irq(struct wm8350 *wm8350, int irq) +{ + enable_irq(irq + wm8350->irq_base); +} + +int wm8350_irq_init(struct wm8350 *wm8350, int irq, + struct wm8350_platform_data *pdata); +int wm8350_irq_exit(struct wm8350 *wm8350); + +#endif diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h new file mode 100644 index 00000000..d657bcd6 --- /dev/null +++ b/include/linux/mfd/wm8350/gpio.h @@ -0,0 +1,361 @@ +/* + * gpio.h -- GPIO Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_GPIO_H_ +#define __LINUX_MFD_WM8350_GPIO_H_ + +#include <linux/platform_device.h> + +/* + * GPIO Registers. + */ +#define WM8350_GPIO_DEBOUNCE 0x80 +#define WM8350_GPIO_PIN_PULL_UP_CONTROL 0x81 +#define WM8350_GPIO_PULL_DOWN_CONTROL 0x82 +#define WM8350_GPIO_INT_MODE 0x83 +#define WM8350_GPIO_CONTROL 0x85 +#define WM8350_GPIO_CONFIGURATION_I_O 0x86 +#define WM8350_GPIO_PIN_POLARITY_TYPE 0x87 +#define WM8350_GPIO_FUNCTION_SELECT_1 0x8C +#define WM8350_GPIO_FUNCTION_SELECT_2 0x8D +#define WM8350_GPIO_FUNCTION_SELECT_3 0x8E +#define WM8350_GPIO_FUNCTION_SELECT_4 0x8F +#define WM8350_GPIO_LEVEL 0xE6 + +/* + * GPIO Functions + */ +#define WM8350_GPIO0_GPIO_IN 0x0 +#define WM8350_GPIO0_GPIO_OUT 0x0 +#define WM8350_GPIO0_PWR_ON_IN 0x1 +#define WM8350_GPIO0_PWR_ON_OUT 0x1 +#define WM8350_GPIO0_LDO_EN_IN 0x2 +#define WM8350_GPIO0_VRTC_OUT 0x2 +#define WM8350_GPIO0_LPWR1_IN 0x3 +#define WM8350_GPIO0_POR_B_OUT 0x3 + +#define WM8350_GPIO1_GPIO_IN 0x0 +#define WM8350_GPIO1_GPIO_OUT 0x0 +#define WM8350_GPIO1_PWR_ON_IN 0x1 +#define WM8350_GPIO1_DO_CONF_OUT 0x1 +#define WM8350_GPIO1_LDO_EN_IN 0x2 +#define WM8350_GPIO1_RESET_OUT 0x2 +#define WM8350_GPIO1_LPWR2_IN 0x3 +#define WM8350_GPIO1_MEMRST_OUT 0x3 + +#define WM8350_GPIO2_GPIO_IN 0x0 +#define WM8350_GPIO2_GPIO_OUT 0x0 +#define WM8350_GPIO2_PWR_ON_IN 0x1 +#define WM8350_GPIO2_PWR_ON_OUT 0x1 +#define WM8350_GPIO2_WAKE_UP_IN 0x2 +#define WM8350_GPIO2_VRTC_OUT 0x2 +#define WM8350_GPIO2_32KHZ_IN 0x3 +#define WM8350_GPIO2_32KHZ_OUT 0x3 + +#define WM8350_GPIO3_GPIO_IN 0x0 +#define WM8350_GPIO3_GPIO_OUT 0x0 +#define WM8350_GPIO3_PWR_ON_IN 0x1 +#define WM8350_GPIO3_P_CLK_OUT 0x1 +#define WM8350_GPIO3_LDO_EN_IN 0x2 +#define WM8350_GPIO3_VRTC_OUT 0x2 +#define WM8350_GPIO3_PWR_OFF_IN 0x3 +#define WM8350_GPIO3_32KHZ_OUT 0x3 + +#define WM8350_GPIO4_GPIO_IN 0x0 +#define WM8350_GPIO4_GPIO_OUT 0x0 +#define WM8350_GPIO4_MR_IN 0x1 +#define WM8350_GPIO4_MEM_RST_OUT 0x1 +#define WM8350_GPIO4_FLASH_IN 0x2 +#define WM8350_GPIO4_ADA_OUT 0x2 +#define WM8350_GPIO4_HIBERNATE_IN 0x3 +#define WM8350_GPIO4_FLASH_OUT 0x3 +#define WM8350_GPIO4_MICDET_OUT 0x4 +#define WM8350_GPIO4_MICSHT_OUT 0x5 + +#define WM8350_GPIO5_GPIO_IN 0x0 +#define WM8350_GPIO5_GPIO_OUT 0x0 +#define WM8350_GPIO5_LPWR1_IN 0x1 +#define WM8350_GPIO5_P_CLK_OUT 0x1 +#define WM8350_GPIO5_ADCLRCLK_IN 0x2 +#define WM8350_GPIO5_ADCLRCLK_OUT 0x2 +#define WM8350_GPIO5_HIBERNATE_IN 0x3 +#define WM8350_GPIO5_32KHZ_OUT 0x3 +#define WM8350_GPIO5_MICDET_OUT 0x4 +#define WM8350_GPIO5_MICSHT_OUT 0x5 +#define WM8350_GPIO5_ADA_OUT 0x6 +#define WM8350_GPIO5_OPCLK_OUT 0x7 + +#define WM8350_GPIO6_GPIO_IN 0x0 +#define WM8350_GPIO6_GPIO_OUT 0x0 +#define WM8350_GPIO6_LPWR2_IN 0x1 +#define WM8350_GPIO6_MEMRST_OUT 0x1 +#define WM8350_GPIO6_FLASH_IN 0x2 +#define WM8350_GPIO6_ADA_OUT 0x2 +#define WM8350_GPIO6_HIBERNATE_IN 0x3 +#define WM8350_GPIO6_RTC_OUT 0x3 +#define WM8350_GPIO6_MICDET_OUT 0x4 +#define WM8350_GPIO6_MICSHT_OUT 0x5 +#define WM8350_GPIO6_ADCLRCLKB_OUT 0x6 +#define WM8350_GPIO6_SDOUT_OUT 0x7 + +#define WM8350_GPIO7_GPIO_IN 0x0 +#define WM8350_GPIO7_GPIO_OUT 0x0 +#define WM8350_GPIO7_LPWR3_IN 0x1 +#define WM8350_GPIO7_P_CLK_OUT 0x1 +#define WM8350_GPIO7_MASK_IN 0x2 +#define WM8350_GPIO7_VCC_FAULT_OUT 0x2 +#define WM8350_GPIO7_HIBERNATE_IN 0x3 +#define WM8350_GPIO7_BATT_FAULT_OUT 0x3 +#define WM8350_GPIO7_MICDET_OUT 0x4 +#define WM8350_GPIO7_MICSHT_OUT 0x5 +#define WM8350_GPIO7_ADA_OUT 0x6 +#define WM8350_GPIO7_CSB_IN 0x7 + +#define WM8350_GPIO8_GPIO_IN 0x0 +#define WM8350_GPIO8_GPIO_OUT 0x0 +#define WM8350_GPIO8_MR_IN 0x1 +#define WM8350_GPIO8_VCC_FAULT_OUT 0x1 +#define WM8350_GPIO8_ADCBCLK_IN 0x2 +#define WM8350_GPIO8_ADCBCLK_OUT 0x2 +#define WM8350_GPIO8_PWR_OFF_IN 0x3 +#define WM8350_GPIO8_BATT_FAULT_OUT 0x3 +#define WM8350_GPIO8_ALTSCL_IN 0xf + +#define WM8350_GPIO9_GPIO_IN 0x0 +#define WM8350_GPIO9_GPIO_OUT 0x0 +#define WM8350_GPIO9_HEARTBEAT_IN 0x1 +#define WM8350_GPIO9_VCC_FAULT_OUT 0x1 +#define WM8350_GPIO9_MASK_IN 0x2 +#define WM8350_GPIO9_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO9_PWR_OFF_IN 0x3 +#define WM8350_GPIO9_BATT_FAULT_OUT 0x3 +#define WM8350_GPIO9_ALTSDA_OUT 0xf + +#define WM8350_GPIO10_GPIO_IN 0x0 +#define WM8350_GPIO10_GPIO_OUT 0x0 +#define WM8350_GPIO10_ISINKC_OUT 0x1 +#define WM8350_GPIO10_PWR_OFF_IN 0x2 +#define WM8350_GPIO10_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO10_CHD_IND_IN 0x3 + +#define WM8350_GPIO11_GPIO_IN 0x0 +#define WM8350_GPIO11_GPIO_OUT 0x0 +#define WM8350_GPIO11_ISINKD_OUT 0x1 +#define WM8350_GPIO11_WAKEUP_IN 0x2 +#define WM8350_GPIO11_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO11_CHD_IND_IN 0x3 + +#define WM8350_GPIO12_GPIO_IN 0x0 +#define WM8350_GPIO12_GPIO_OUT 0x0 +#define WM8350_GPIO12_ISINKE_OUT 0x1 +#define WM8350_GPIO12_LINE_GT_BATT_OUT 0x2 +#define WM8350_GPIO12_LINE_EN_OUT 0x3 +#define WM8350_GPIO12_32KHZ_OUT 0x4 + +#define WM8350_GPIO_DIR_IN 0 +#define WM8350_GPIO_DIR_OUT 1 +#define WM8350_GPIO_ACTIVE_LOW 0 +#define WM8350_GPIO_ACTIVE_HIGH 1 +#define WM8350_GPIO_PULL_NONE 0 +#define WM8350_GPIO_PULL_UP 1 +#define WM8350_GPIO_PULL_DOWN 2 +#define WM8350_GPIO_INVERT_OFF 0 +#define WM8350_GPIO_INVERT_ON 1 +#define WM8350_GPIO_DEBOUNCE_OFF 0 +#define WM8350_GPIO_DEBOUNCE_ON 1 + +/* + * R30 (0x1E) - GPIO Interrupt Status + */ +#define WM8350_GP12_EINT 0x1000 +#define WM8350_GP11_EINT 0x0800 +#define WM8350_GP10_EINT 0x0400 +#define WM8350_GP9_EINT 0x0200 +#define WM8350_GP8_EINT 0x0100 +#define WM8350_GP7_EINT 0x0080 +#define WM8350_GP6_EINT 0x0040 +#define WM8350_GP5_EINT 0x0020 +#define WM8350_GP4_EINT 0x0010 +#define WM8350_GP3_EINT 0x0008 +#define WM8350_GP2_EINT 0x0004 +#define WM8350_GP1_EINT 0x0002 +#define WM8350_GP0_EINT 0x0001 + + +/* + * R128 (0x80) - GPIO Debounce + */ +#define WM8350_GP12_DB 0x1000 +#define WM8350_GP11_DB 0x0800 +#define WM8350_GP10_DB 0x0400 +#define WM8350_GP9_DB 0x0200 +#define WM8350_GP8_DB 0x0100 +#define WM8350_GP7_DB 0x0080 +#define WM8350_GP6_DB 0x0040 +#define WM8350_GP5_DB 0x0020 +#define WM8350_GP4_DB 0x0010 +#define WM8350_GP3_DB 0x0008 +#define WM8350_GP2_DB 0x0004 +#define WM8350_GP1_DB 0x0002 +#define WM8350_GP0_DB 0x0001 + +/* + * R129 (0x81) - GPIO Pin pull up Control + */ +#define WM8350_GP12_PU 0x1000 +#define WM8350_GP11_PU 0x0800 +#define WM8350_GP10_PU 0x0400 +#define WM8350_GP9_PU 0x0200 +#define WM8350_GP8_PU 0x0100 +#define WM8350_GP7_PU 0x0080 +#define WM8350_GP6_PU 0x0040 +#define WM8350_GP5_PU 0x0020 +#define WM8350_GP4_PU 0x0010 +#define WM8350_GP3_PU 0x0008 +#define WM8350_GP2_PU 0x0004 +#define WM8350_GP1_PU 0x0002 +#define WM8350_GP0_PU 0x0001 + +/* + * R130 (0x82) - GPIO Pull down Control + */ +#define WM8350_GP12_PD 0x1000 +#define WM8350_GP11_PD 0x0800 +#define WM8350_GP10_PD 0x0400 +#define WM8350_GP9_PD 0x0200 +#define WM8350_GP8_PD 0x0100 +#define WM8350_GP7_PD 0x0080 +#define WM8350_GP6_PD 0x0040 +#define WM8350_GP5_PD 0x0020 +#define WM8350_GP4_PD 0x0010 +#define WM8350_GP3_PD 0x0008 +#define WM8350_GP2_PD 0x0004 +#define WM8350_GP1_PD 0x0002 +#define WM8350_GP0_PD 0x0001 + +/* + * R131 (0x83) - GPIO Interrupt Mode + */ +#define WM8350_GP12_INTMODE 0x1000 +#define WM8350_GP11_INTMODE 0x0800 +#define WM8350_GP10_INTMODE 0x0400 +#define WM8350_GP9_INTMODE 0x0200 +#define WM8350_GP8_INTMODE 0x0100 +#define WM8350_GP7_INTMODE 0x0080 +#define WM8350_GP6_INTMODE 0x0040 +#define WM8350_GP5_INTMODE 0x0020 +#define WM8350_GP4_INTMODE 0x0010 +#define WM8350_GP3_INTMODE 0x0008 +#define WM8350_GP2_INTMODE 0x0004 +#define WM8350_GP1_INTMODE 0x0002 +#define WM8350_GP0_INTMODE 0x0001 + +/* + * R133 (0x85) - GPIO Control + */ +#define WM8350_GP_DBTIME_MASK 0x00C0 + +/* + * R134 (0x86) - GPIO Configuration (i/o) + */ +#define WM8350_GP12_DIR 0x1000 +#define WM8350_GP11_DIR 0x0800 +#define WM8350_GP10_DIR 0x0400 +#define WM8350_GP9_DIR 0x0200 +#define WM8350_GP8_DIR 0x0100 +#define WM8350_GP7_DIR 0x0080 +#define WM8350_GP6_DIR 0x0040 +#define WM8350_GP5_DIR 0x0020 +#define WM8350_GP4_DIR 0x0010 +#define WM8350_GP3_DIR 0x0008 +#define WM8350_GP2_DIR 0x0004 +#define WM8350_GP1_DIR 0x0002 +#define WM8350_GP0_DIR 0x0001 + +/* + * R135 (0x87) - GPIO Pin Polarity / Type + */ +#define WM8350_GP12_CFG 0x1000 +#define WM8350_GP11_CFG 0x0800 +#define WM8350_GP10_CFG 0x0400 +#define WM8350_GP9_CFG 0x0200 +#define WM8350_GP8_CFG 0x0100 +#define WM8350_GP7_CFG 0x0080 +#define WM8350_GP6_CFG 0x0040 +#define WM8350_GP5_CFG 0x0020 +#define WM8350_GP4_CFG 0x0010 +#define WM8350_GP3_CFG 0x0008 +#define WM8350_GP2_CFG 0x0004 +#define WM8350_GP1_CFG 0x0002 +#define WM8350_GP0_CFG 0x0001 + +/* + * R140 (0x8C) - GPIO Function Select 1 + */ +#define WM8350_GP3_FN_MASK 0xF000 +#define WM8350_GP2_FN_MASK 0x0F00 +#define WM8350_GP1_FN_MASK 0x00F0 +#define WM8350_GP0_FN_MASK 0x000F + +/* + * R141 (0x8D) - GPIO Function Select 2 + */ +#define WM8350_GP7_FN_MASK 0xF000 +#define WM8350_GP6_FN_MASK 0x0F00 +#define WM8350_GP5_FN_MASK 0x00F0 +#define WM8350_GP4_FN_MASK 0x000F + +/* + * R142 (0x8E) - GPIO Function Select 3 + */ +#define WM8350_GP11_FN_MASK 0xF000 +#define WM8350_GP10_FN_MASK 0x0F00 +#define WM8350_GP9_FN_MASK 0x00F0 +#define WM8350_GP8_FN_MASK 0x000F + +/* + * R143 (0x8F) - GPIO Function Select 4 + */ +#define WM8350_GP12_FN_MASK 0x000F + +/* + * R230 (0xE6) - GPIO Pin Status + */ +#define WM8350_GP12_LVL 0x1000 +#define WM8350_GP11_LVL 0x0800 +#define WM8350_GP10_LVL 0x0400 +#define WM8350_GP9_LVL 0x0200 +#define WM8350_GP8_LVL 0x0100 +#define WM8350_GP7_LVL 0x0080 +#define WM8350_GP6_LVL 0x0040 +#define WM8350_GP5_LVL 0x0020 +#define WM8350_GP4_LVL 0x0010 +#define WM8350_GP3_LVL 0x0008 +#define WM8350_GP2_LVL 0x0004 +#define WM8350_GP1_LVL 0x0002 +#define WM8350_GP0_LVL 0x0001 + +struct wm8350; + +int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, + int pol, int pull, int invert, int debounce); + +struct wm8350_gpio { + struct platform_device *pdev; +}; + +/* + * GPIO Interrupts + */ +#define WM8350_IRQ_GPIO(x) (50 + x) + +#endif diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h new file mode 100644 index 00000000..579b50ca --- /dev/null +++ b/include/linux/mfd/wm8350/pmic.h @@ -0,0 +1,781 @@ +/* + * pmic.h -- Power Management Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_PMIC_H +#define __LINUX_MFD_WM8350_PMIC_H + +#include <linux/platform_device.h> +#include <linux/leds.h> +#include <linux/regulator/machine.h> + +/* + * Register values. + */ + +#define WM8350_CURRENT_SINK_DRIVER_A 0xAC +#define WM8350_CSA_FLASH_CONTROL 0xAD +#define WM8350_CURRENT_SINK_DRIVER_B 0xAE +#define WM8350_CSB_FLASH_CONTROL 0xAF +#define WM8350_DCDC_LDO_REQUESTED 0xB0 +#define WM8350_DCDC_ACTIVE_OPTIONS 0xB1 +#define WM8350_DCDC_SLEEP_OPTIONS 0xB2 +#define WM8350_POWER_CHECK_COMPARATOR 0xB3 +#define WM8350_DCDC1_CONTROL 0xB4 +#define WM8350_DCDC1_TIMEOUTS 0xB5 +#define WM8350_DCDC1_LOW_POWER 0xB6 +#define WM8350_DCDC2_CONTROL 0xB7 +#define WM8350_DCDC2_TIMEOUTS 0xB8 +#define WM8350_DCDC3_CONTROL 0xBA +#define WM8350_DCDC3_TIMEOUTS 0xBB +#define WM8350_DCDC3_LOW_POWER 0xBC +#define WM8350_DCDC4_CONTROL 0xBD +#define WM8350_DCDC4_TIMEOUTS 0xBE +#define WM8350_DCDC4_LOW_POWER 0xBF +#define WM8350_DCDC5_CONTROL 0xC0 +#define WM8350_DCDC5_TIMEOUTS 0xC1 +#define WM8350_DCDC6_CONTROL 0xC3 +#define WM8350_DCDC6_TIMEOUTS 0xC4 +#define WM8350_DCDC6_LOW_POWER 0xC5 +#define WM8350_LIMIT_SWITCH_CONTROL 0xC7 +#define WM8350_LDO1_CONTROL 0xC8 +#define WM8350_LDO1_TIMEOUTS 0xC9 +#define WM8350_LDO1_LOW_POWER 0xCA +#define WM8350_LDO2_CONTROL 0xCB +#define WM8350_LDO2_TIMEOUTS 0xCC +#define WM8350_LDO2_LOW_POWER 0xCD +#define WM8350_LDO3_CONTROL 0xCE +#define WM8350_LDO3_TIMEOUTS 0xCF +#define WM8350_LDO3_LOW_POWER 0xD0 +#define WM8350_LDO4_CONTROL 0xD1 +#define WM8350_LDO4_TIMEOUTS 0xD2 +#define WM8350_LDO4_LOW_POWER 0xD3 +#define WM8350_VCC_FAULT_MASKS 0xD7 +#define WM8350_MAIN_BANDGAP_CONTROL 0xD8 +#define WM8350_OSC_CONTROL 0xD9 +#define WM8350_RTC_TICK_CONTROL 0xDA +#define WM8350_SECURITY 0xDB +#define WM8350_RAM_BIST_1 0xDC +#define WM8350_DCDC_LDO_STATUS 0xE1 +#define WM8350_GPIO_PIN_STATUS 0xE6 + +#define WM8350_DCDC1_FORCE_PWM 0xF8 +#define WM8350_DCDC3_FORCE_PWM 0xFA +#define WM8350_DCDC4_FORCE_PWM 0xFB +#define WM8350_DCDC6_FORCE_PWM 0xFD + +/* + * R172 (0xAC) - Current Sink Driver A + */ +#define WM8350_CS1_HIB_MODE 0x1000 +#define WM8350_CS1_HIB_MODE_MASK 0x1000 +#define WM8350_CS1_HIB_MODE_SHIFT 12 +#define WM8350_CS1_ISEL_MASK 0x003F +#define WM8350_CS1_ISEL_SHIFT 0 + +/* Bit values for R172 (0xAC) */ +#define WM8350_CS1_HIB_MODE_DISABLE 0 +#define WM8350_CS1_HIB_MODE_LEAVE 1 + +#define WM8350_CS1_ISEL_220M 0x3F + +/* + * R173 (0xAD) - CSA Flash control + */ +#define WM8350_CS1_FLASH_MODE 0x8000 +#define WM8350_CS1_TRIGSRC 0x4000 +#define WM8350_CS1_DRIVE 0x2000 +#define WM8350_CS1_FLASH_DUR_MASK 0x0300 +#define WM8350_CS1_OFF_RAMP_MASK 0x0030 +#define WM8350_CS1_ON_RAMP_MASK 0x0003 + +/* + * R174 (0xAE) - Current Sink Driver B + */ +#define WM8350_CS2_HIB_MODE 0x1000 +#define WM8350_CS2_ISEL_MASK 0x003F + +/* + * R175 (0xAF) - CSB Flash control + */ +#define WM8350_CS2_FLASH_MODE 0x8000 +#define WM8350_CS2_TRIGSRC 0x4000 +#define WM8350_CS2_DRIVE 0x2000 +#define WM8350_CS2_FLASH_DUR_MASK 0x0300 +#define WM8350_CS2_OFF_RAMP_MASK 0x0030 +#define WM8350_CS2_ON_RAMP_MASK 0x0003 + +/* + * R176 (0xB0) - DCDC/LDO requested + */ +#define WM8350_LS_ENA 0x8000 +#define WM8350_LDO4_ENA 0x0800 +#define WM8350_LDO3_ENA 0x0400 +#define WM8350_LDO2_ENA 0x0200 +#define WM8350_LDO1_ENA 0x0100 +#define WM8350_DC6_ENA 0x0020 +#define WM8350_DC5_ENA 0x0010 +#define WM8350_DC4_ENA 0x0008 +#define WM8350_DC3_ENA 0x0004 +#define WM8350_DC2_ENA 0x0002 +#define WM8350_DC1_ENA 0x0001 + +/* + * R177 (0xB1) - DCDC Active options + */ +#define WM8350_PUTO_MASK 0x3000 +#define WM8350_PWRUP_DELAY_MASK 0x0300 +#define WM8350_DC6_ACTIVE 0x0020 +#define WM8350_DC4_ACTIVE 0x0008 +#define WM8350_DC3_ACTIVE 0x0004 +#define WM8350_DC1_ACTIVE 0x0001 + +/* + * R178 (0xB2) - DCDC Sleep options + */ +#define WM8350_DC6_SLEEP 0x0020 +#define WM8350_DC4_SLEEP 0x0008 +#define WM8350_DC3_SLEEP 0x0004 +#define WM8350_DC1_SLEEP 0x0001 + +/* + * R179 (0xB3) - Power-check comparator + */ +#define WM8350_PCCMP_ERRACT 0x4000 +#define WM8350_PCCMP_RAIL 0x0100 +#define WM8350_PCCMP_OFF_THR_MASK 0x0070 +#define WM8350_PCCMP_ON_THR_MASK 0x0007 + +/* + * R180 (0xB4) - DCDC1 Control + */ +#define WM8350_DC1_OPFLT 0x0400 +#define WM8350_DC1_VSEL_MASK 0x007F +#define WM8350_DC1_VSEL_SHIFT 0 + +/* + * R181 (0xB5) - DCDC1 Timeouts + */ +#define WM8350_DC1_ERRACT_MASK 0xC000 +#define WM8350_DC1_ERRACT_SHIFT 14 +#define WM8350_DC1_ENSLOT_MASK 0x3C00 +#define WM8350_DC1_ENSLOT_SHIFT 10 +#define WM8350_DC1_SDSLOT_MASK 0x03C0 +#define WM8350_DC1_UVTO_MASK 0x0030 +#define WM8350_DC1_SDSLOT_SHIFT 6 + +/* Bit values for R181 (0xB5) */ +#define WM8350_DC1_ERRACT_NONE 0 +#define WM8350_DC1_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC1_ERRACT_SHUTDOWN_SYS 2 + +/* + * R182 (0xB6) - DCDC1 Low Power + */ +#define WM8350_DC1_HIB_MODE_MASK 0x7000 +#define WM8350_DC1_HIB_TRIG_MASK 0x0300 +#define WM8350_DC1_VIMG_MASK 0x007F + +/* + * R183 (0xB7) - DCDC2 Control + */ +#define WM8350_DC2_MODE 0x4000 +#define WM8350_DC2_MODE_MASK 0x4000 +#define WM8350_DC2_MODE_SHIFT 14 +#define WM8350_DC2_HIB_MODE 0x1000 +#define WM8350_DC2_HIB_MODE_MASK 0x1000 +#define WM8350_DC2_HIB_MODE_SHIFT 12 +#define WM8350_DC2_HIB_TRIG_MASK 0x0300 +#define WM8350_DC2_HIB_TRIG_SHIFT 8 +#define WM8350_DC2_ILIM 0x0040 +#define WM8350_DC2_ILIM_MASK 0x0040 +#define WM8350_DC2_ILIM_SHIFT 6 +#define WM8350_DC2_RMP_MASK 0x0018 +#define WM8350_DC2_RMP_SHIFT 3 +#define WM8350_DC2_FBSRC_MASK 0x0003 +#define WM8350_DC2_FBSRC_SHIFT 0 + +/* Bit values for R183 (0xB7) */ +#define WM8350_DC2_MODE_BOOST 0 +#define WM8350_DC2_MODE_SWITCH 1 + +#define WM8350_DC2_HIB_MODE_ACTIVE 1 +#define WM8350_DC2_HIB_MODE_DISABLE 0 + +#define WM8350_DC2_HIB_TRIG_NONE 0 +#define WM8350_DC2_HIB_TRIG_LPWR1 1 +#define WM8350_DC2_HIB_TRIG_LPWR2 2 +#define WM8350_DC2_HIB_TRIG_LPWR3 3 + +#define WM8350_DC2_ILIM_HIGH 0 +#define WM8350_DC2_ILIM_LOW 1 + +#define WM8350_DC2_RMP_30V 0 +#define WM8350_DC2_RMP_20V 1 +#define WM8350_DC2_RMP_10V 2 +#define WM8350_DC2_RMP_5V 3 + +#define WM8350_DC2_FBSRC_FB2 0 +#define WM8350_DC2_FBSRC_ISINKA 1 +#define WM8350_DC2_FBSRC_ISINKB 2 +#define WM8350_DC2_FBSRC_USB 3 + +/* + * R184 (0xB8) - DCDC2 Timeouts + */ +#define WM8350_DC2_ERRACT_MASK 0xC000 +#define WM8350_DC2_ERRACT_SHIFT 14 +#define WM8350_DC2_ENSLOT_MASK 0x3C00 +#define WM8350_DC2_ENSLOT_SHIFT 10 +#define WM8350_DC2_SDSLOT_MASK 0x03C0 +#define WM8350_DC2_UVTO_MASK 0x0030 + +/* Bit values for R184 (0xB8) */ +#define WM8350_DC2_ERRACT_NONE 0 +#define WM8350_DC2_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC2_ERRACT_SHUTDOWN_SYS 2 + +/* + * R186 (0xBA) - DCDC3 Control + */ +#define WM8350_DC3_OPFLT 0x0400 +#define WM8350_DC3_VSEL_MASK 0x007F +#define WM8350_DC3_VSEL_SHIFT 0 + +/* + * R187 (0xBB) - DCDC3 Timeouts + */ +#define WM8350_DC3_ERRACT_MASK 0xC000 +#define WM8350_DC3_ERRACT_SHIFT 14 +#define WM8350_DC3_ENSLOT_MASK 0x3C00 +#define WM8350_DC3_ENSLOT_SHIFT 10 +#define WM8350_DC3_SDSLOT_MASK 0x03C0 +#define WM8350_DC3_UVTO_MASK 0x0030 +#define WM8350_DC3_SDSLOT_SHIFT 6 + +/* Bit values for R187 (0xBB) */ +#define WM8350_DC3_ERRACT_NONE 0 +#define WM8350_DC3_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC3_ERRACT_SHUTDOWN_SYS 2 +/* + * R188 (0xBC) - DCDC3 Low Power + */ +#define WM8350_DC3_HIB_MODE_MASK 0x7000 +#define WM8350_DC3_HIB_TRIG_MASK 0x0300 +#define WM8350_DC3_VIMG_MASK 0x007F + +/* + * R189 (0xBD) - DCDC4 Control + */ +#define WM8350_DC4_OPFLT 0x0400 +#define WM8350_DC4_VSEL_MASK 0x007F +#define WM8350_DC4_VSEL_SHIFT 0 + +/* + * R190 (0xBE) - DCDC4 Timeouts + */ +#define WM8350_DC4_ERRACT_MASK 0xC000 +#define WM8350_DC4_ERRACT_SHIFT 14 +#define WM8350_DC4_ENSLOT_MASK 0x3C00 +#define WM8350_DC4_ENSLOT_SHIFT 10 +#define WM8350_DC4_SDSLOT_MASK 0x03C0 +#define WM8350_DC4_UVTO_MASK 0x0030 +#define WM8350_DC4_SDSLOT_SHIFT 6 + +/* Bit values for R190 (0xBE) */ +#define WM8350_DC4_ERRACT_NONE 0 +#define WM8350_DC4_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC4_ERRACT_SHUTDOWN_SYS 2 + +/* + * R191 (0xBF) - DCDC4 Low Power + */ +#define WM8350_DC4_HIB_MODE_MASK 0x7000 +#define WM8350_DC4_HIB_TRIG_MASK 0x0300 +#define WM8350_DC4_VIMG_MASK 0x007F + +/* + * R192 (0xC0) - DCDC5 Control + */ +#define WM8350_DC5_MODE 0x4000 +#define WM8350_DC5_MODE_MASK 0x4000 +#define WM8350_DC5_MODE_SHIFT 14 +#define WM8350_DC5_HIB_MODE 0x1000 +#define WM8350_DC5_HIB_MODE_MASK 0x1000 +#define WM8350_DC5_HIB_MODE_SHIFT 12 +#define WM8350_DC5_HIB_TRIG_MASK 0x0300 +#define WM8350_DC5_HIB_TRIG_SHIFT 8 +#define WM8350_DC5_ILIM 0x0040 +#define WM8350_DC5_ILIM_MASK 0x0040 +#define WM8350_DC5_ILIM_SHIFT 6 +#define WM8350_DC5_RMP_MASK 0x0018 +#define WM8350_DC5_RMP_SHIFT 3 +#define WM8350_DC5_FBSRC_MASK 0x0003 +#define WM8350_DC5_FBSRC_SHIFT 0 + +/* Bit values for R192 (0xC0) */ +#define WM8350_DC5_MODE_BOOST 0 +#define WM8350_DC5_MODE_SWITCH 1 + +#define WM8350_DC5_HIB_MODE_ACTIVE 1 +#define WM8350_DC5_HIB_MODE_DISABLE 0 + +#define WM8350_DC5_HIB_TRIG_NONE 0 +#define WM8350_DC5_HIB_TRIG_LPWR1 1 +#define WM8350_DC5_HIB_TRIG_LPWR2 2 +#define WM8350_DC5_HIB_TRIG_LPWR3 3 + +#define WM8350_DC5_ILIM_HIGH 0 +#define WM8350_DC5_ILIM_LOW 1 + +#define WM8350_DC5_RMP_30V 0 +#define WM8350_DC5_RMP_20V 1 +#define WM8350_DC5_RMP_10V 2 +#define WM8350_DC5_RMP_5V 3 + +#define WM8350_DC5_FBSRC_FB2 0 +#define WM8350_DC5_FBSRC_ISINKA 1 +#define WM8350_DC5_FBSRC_ISINKB 2 +#define WM8350_DC5_FBSRC_USB 3 + +/* + * R193 (0xC1) - DCDC5 Timeouts + */ +#define WM8350_DC5_ERRACT_MASK 0xC000 +#define WM8350_DC5_ERRACT_SHIFT 14 +#define WM8350_DC5_ENSLOT_MASK 0x3C00 +#define WM8350_DC5_ENSLOT_SHIFT 10 +#define WM8350_DC5_SDSLOT_MASK 0x03C0 +#define WM8350_DC5_UVTO_MASK 0x0030 +#define WM8350_DC5_SDSLOT_SHIFT 6 + +/* Bit values for R193 (0xC1) */ +#define WM8350_DC5_ERRACT_NONE 0 +#define WM8350_DC5_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC5_ERRACT_SHUTDOWN_SYS 2 + +/* + * R195 (0xC3) - DCDC6 Control + */ +#define WM8350_DC6_OPFLT 0x0400 +#define WM8350_DC6_VSEL_MASK 0x007F +#define WM8350_DC6_VSEL_SHIFT 0 + +/* + * R196 (0xC4) - DCDC6 Timeouts + */ +#define WM8350_DC6_ERRACT_MASK 0xC000 +#define WM8350_DC6_ERRACT_SHIFT 14 +#define WM8350_DC6_ENSLOT_MASK 0x3C00 +#define WM8350_DC6_ENSLOT_SHIFT 10 +#define WM8350_DC6_SDSLOT_MASK 0x03C0 +#define WM8350_DC6_UVTO_MASK 0x0030 +#define WM8350_DC6_SDSLOT_SHIFT 6 + +/* Bit values for R196 (0xC4) */ +#define WM8350_DC6_ERRACT_NONE 0 +#define WM8350_DC6_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_DC6_ERRACT_SHUTDOWN_SYS 2 + +/* + * R197 (0xC5) - DCDC6 Low Power + */ +#define WM8350_DC6_HIB_MODE_MASK 0x7000 +#define WM8350_DC6_HIB_TRIG_MASK 0x0300 +#define WM8350_DC6_VIMG_MASK 0x007F + +/* + * R199 (0xC7) - Limit Switch Control + */ +#define WM8350_LS_ERRACT_MASK 0xC000 +#define WM8350_LS_ERRACT_SHIFT 14 +#define WM8350_LS_ENSLOT_MASK 0x3C00 +#define WM8350_LS_ENSLOT_SHIFT 10 +#define WM8350_LS_SDSLOT_MASK 0x03C0 +#define WM8350_LS_SDSLOT_SHIFT 6 +#define WM8350_LS_HIB_MODE 0x0010 +#define WM8350_LS_HIB_MODE_MASK 0x0010 +#define WM8350_LS_HIB_MODE_SHIFT 4 +#define WM8350_LS_HIB_PROT 0x0002 +#define WM8350_LS_HIB_PROT_MASK 0x0002 +#define WM8350_LS_HIB_PROT_SHIFT 1 +#define WM8350_LS_PROT 0x0001 +#define WM8350_LS_PROT_MASK 0x0001 +#define WM8350_LS_PROT_SHIFT 0 + +/* Bit values for R199 (0xC7) */ +#define WM8350_LS_ERRACT_NONE 0 +#define WM8350_LS_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LS_ERRACT_SHUTDOWN_SYS 2 + +/* + * R200 (0xC8) - LDO1 Control + */ +#define WM8350_LDO1_SWI 0x4000 +#define WM8350_LDO1_OPFLT 0x0400 +#define WM8350_LDO1_VSEL_MASK 0x001F +#define WM8350_LDO1_VSEL_SHIFT 0 + +/* + * R201 (0xC9) - LDO1 Timeouts + */ +#define WM8350_LDO1_ERRACT_MASK 0xC000 +#define WM8350_LDO1_ERRACT_SHIFT 14 +#define WM8350_LDO1_ENSLOT_MASK 0x3C00 +#define WM8350_LDO1_ENSLOT_SHIFT 10 +#define WM8350_LDO1_SDSLOT_MASK 0x03C0 +#define WM8350_LDO1_UVTO_MASK 0x0030 +#define WM8350_LDO1_SDSLOT_SHIFT 6 + +/* Bit values for R201 (0xC9) */ +#define WM8350_LDO1_ERRACT_NONE 0 +#define WM8350_LDO1_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO1_ERRACT_SHUTDOWN_SYS 2 + +/* + * R202 (0xCA) - LDO1 Low Power + */ +#define WM8350_LDO1_HIB_MODE_MASK 0x3000 +#define WM8350_LDO1_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO1_VIMG_MASK 0x001F +#define WM8350_LDO1_HIB_MODE_DIS (0x1 << 12) + + +/* + * R203 (0xCB) - LDO2 Control + */ +#define WM8350_LDO2_SWI 0x4000 +#define WM8350_LDO2_OPFLT 0x0400 +#define WM8350_LDO2_VSEL_MASK 0x001F +#define WM8350_LDO2_VSEL_SHIFT 0 + +/* + * R204 (0xCC) - LDO2 Timeouts + */ +#define WM8350_LDO2_ERRACT_MASK 0xC000 +#define WM8350_LDO2_ERRACT_SHIFT 14 +#define WM8350_LDO2_ENSLOT_MASK 0x3C00 +#define WM8350_LDO2_ENSLOT_SHIFT 10 +#define WM8350_LDO2_SDSLOT_MASK 0x03C0 +#define WM8350_LDO2_SDSLOT_SHIFT 6 + +/* Bit values for R204 (0xCC) */ +#define WM8350_LDO2_ERRACT_NONE 0 +#define WM8350_LDO2_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO2_ERRACT_SHUTDOWN_SYS 2 + +/* + * R205 (0xCD) - LDO2 Low Power + */ +#define WM8350_LDO2_HIB_MODE_MASK 0x3000 +#define WM8350_LDO2_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO2_VIMG_MASK 0x001F + +/* + * R206 (0xCE) - LDO3 Control + */ +#define WM8350_LDO3_SWI 0x4000 +#define WM8350_LDO3_OPFLT 0x0400 +#define WM8350_LDO3_VSEL_MASK 0x001F +#define WM8350_LDO3_VSEL_SHIFT 0 + +/* + * R207 (0xCF) - LDO3 Timeouts + */ +#define WM8350_LDO3_ERRACT_MASK 0xC000 +#define WM8350_LDO3_ERRACT_SHIFT 14 +#define WM8350_LDO3_ENSLOT_MASK 0x3C00 +#define WM8350_LDO3_ENSLOT_SHIFT 10 +#define WM8350_LDO3_SDSLOT_MASK 0x03C0 +#define WM8350_LDO3_UVTO_MASK 0x0030 +#define WM8350_LDO3_SDSLOT_SHIFT 6 + +/* Bit values for R207 (0xCF) */ +#define WM8350_LDO3_ERRACT_NONE 0 +#define WM8350_LDO3_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO3_ERRACT_SHUTDOWN_SYS 2 + +/* + * R208 (0xD0) - LDO3 Low Power + */ +#define WM8350_LDO3_HIB_MODE_MASK 0x3000 +#define WM8350_LDO3_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO3_VIMG_MASK 0x001F + +/* + * R209 (0xD1) - LDO4 Control + */ +#define WM8350_LDO4_SWI 0x4000 +#define WM8350_LDO4_OPFLT 0x0400 +#define WM8350_LDO4_VSEL_MASK 0x001F +#define WM8350_LDO4_VSEL_SHIFT 0 + +/* + * R210 (0xD2) - LDO4 Timeouts + */ +#define WM8350_LDO4_ERRACT_MASK 0xC000 +#define WM8350_LDO4_ERRACT_SHIFT 14 +#define WM8350_LDO4_ENSLOT_MASK 0x3C00 +#define WM8350_LDO4_ENSLOT_SHIFT 10 +#define WM8350_LDO4_SDSLOT_MASK 0x03C0 +#define WM8350_LDO4_UVTO_MASK 0x0030 +#define WM8350_LDO4_SDSLOT_SHIFT 6 + +/* Bit values for R210 (0xD2) */ +#define WM8350_LDO4_ERRACT_NONE 0 +#define WM8350_LDO4_ERRACT_SHUTDOWN_CONV 1 +#define WM8350_LDO4_ERRACT_SHUTDOWN_SYS 2 + +/* + * R211 (0xD3) - LDO4 Low Power + */ +#define WM8350_LDO4_HIB_MODE_MASK 0x3000 +#define WM8350_LDO4_HIB_TRIG_MASK 0x0300 +#define WM8350_LDO4_VIMG_MASK 0x001F + +/* + * R215 (0xD7) - VCC_FAULT Masks + */ +#define WM8350_LS_FAULT 0x8000 +#define WM8350_LDO4_FAULT 0x0800 +#define WM8350_LDO3_FAULT 0x0400 +#define WM8350_LDO2_FAULT 0x0200 +#define WM8350_LDO1_FAULT 0x0100 +#define WM8350_DC6_FAULT 0x0020 +#define WM8350_DC5_FAULT 0x0010 +#define WM8350_DC4_FAULT 0x0008 +#define WM8350_DC3_FAULT 0x0004 +#define WM8350_DC2_FAULT 0x0002 +#define WM8350_DC1_FAULT 0x0001 + +/* + * R216 (0xD8) - Main Bandgap Control + */ +#define WM8350_MBG_LOAD_FUSES 0x8000 +#define WM8350_MBG_FUSE_WPREP 0x4000 +#define WM8350_MBG_FUSE_WRITE 0x2000 +#define WM8350_MBG_FUSE_TRIM_MASK 0x1F00 +#define WM8350_MBG_TRIM_SRC 0x0020 +#define WM8350_MBG_USER_TRIM_MASK 0x001F + +/* + * R217 (0xD9) - OSC Control + */ +#define WM8350_OSC_LOAD_FUSES 0x8000 +#define WM8350_OSC_FUSE_WPREP 0x4000 +#define WM8350_OSC_FUSE_WRITE 0x2000 +#define WM8350_OSC_FUSE_TRIM_MASK 0x0F00 +#define WM8350_OSC_TRIM_SRC 0x0020 +#define WM8350_OSC_USER_TRIM_MASK 0x000F + +/* + * R248 (0xF8) - DCDC1 Force PWM + */ +#define WM8350_DCDC1_FORCE_PWM_ENA 0x0010 + +/* + * R250 (0xFA) - DCDC3 Force PWM + */ +#define WM8350_DCDC3_FORCE_PWM_ENA 0x0010 + +/* + * R251 (0xFB) - DCDC4 Force PWM + */ +#define WM8350_DCDC4_FORCE_PWM_ENA 0x0010 + +/* + * R253 (0xFD) - DCDC1 Force PWM + */ +#define WM8350_DCDC6_FORCE_PWM_ENA 0x0010 + +/* + * DCDC's + */ +#define WM8350_DCDC_1 0 +#define WM8350_DCDC_2 1 +#define WM8350_DCDC_3 2 +#define WM8350_DCDC_4 3 +#define WM8350_DCDC_5 4 +#define WM8350_DCDC_6 5 + +/* DCDC modes */ +#define WM8350_DCDC_ACTIVE_STANDBY 0 +#define WM8350_DCDC_ACTIVE_PULSE 1 +#define WM8350_DCDC_SLEEP_NORMAL 0 +#define WM8350_DCDC_SLEEP_LOW 1 + +/* DCDC Low power (Hibernate) mode */ +#define WM8350_DCDC_HIB_MODE_CUR (0 << 12) +#define WM8350_DCDC_HIB_MODE_IMAGE (1 << 12) +#define WM8350_DCDC_HIB_MODE_STANDBY (2 << 12) +#define WM8350_DCDC_HIB_MODE_LDO (4 << 12) +#define WM8350_DCDC_HIB_MODE_LDO_IM (5 << 12) +#define WM8350_DCDC_HIB_MODE_DIS (7 << 12) +#define WM8350_DCDC_HIB_MODE_MASK (7 << 12) + +/* DCDC Low Power (Hibernate) signal */ +#define WM8350_DCDC_HIB_SIG_REG (0 << 8) +#define WM8350_DCDC_HIB_SIG_LPWR1 (1 << 8) +#define WM8350_DCDC_HIB_SIG_LPWR2 (2 << 8) +#define WM8350_DCDC_HIB_SIG_LPWR3 (3 << 8) + +/* LDO Low power (Hibernate) mode */ +#define WM8350_LDO_HIB_MODE_IMAGE (0 << 0) +#define WM8350_LDO_HIB_MODE_DIS (1 << 0) + +/* LDO Low Power (Hibernate) signal */ +#define WM8350_LDO_HIB_SIG_REG (0 << 8) +#define WM8350_LDO_HIB_SIG_LPWR1 (1 << 8) +#define WM8350_LDO_HIB_SIG_LPWR2 (2 << 8) +#define WM8350_LDO_HIB_SIG_LPWR3 (3 << 8) + +/* + * LDOs + */ +#define WM8350_LDO_1 6 +#define WM8350_LDO_2 7 +#define WM8350_LDO_3 8 +#define WM8350_LDO_4 9 + +/* + * ISINKs + */ +#define WM8350_ISINK_A 10 +#define WM8350_ISINK_B 11 + +#define WM8350_ISINK_MODE_BOOST 0 +#define WM8350_ISINK_MODE_SWITCH 1 +#define WM8350_ISINK_ILIM_NORMAL 0 +#define WM8350_ISINK_ILIM_LOW 1 + +#define WM8350_ISINK_FLASH_DISABLE 0 +#define WM8350_ISINK_FLASH_ENABLE 1 +#define WM8350_ISINK_FLASH_TRIG_BIT 0 +#define WM8350_ISINK_FLASH_TRIG_GPIO 1 +#define WM8350_ISINK_FLASH_MODE_EN (1 << 13) +#define WM8350_ISINK_FLASH_MODE_DIS (0 << 13) +#define WM8350_ISINK_FLASH_DUR_32MS (0 << 8) +#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) +#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) +#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) +#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0) +#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0) +#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0) +#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0) +#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0) +#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0) +#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0) +#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4) +#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4) +#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4) +#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4) +#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4) +#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4) +#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4) + +/* + * Regulator Interrupts. + */ +#define WM8350_IRQ_CS1 13 +#define WM8350_IRQ_CS2 14 +#define WM8350_IRQ_UV_LDO4 25 +#define WM8350_IRQ_UV_LDO3 26 +#define WM8350_IRQ_UV_LDO2 27 +#define WM8350_IRQ_UV_LDO1 28 +#define WM8350_IRQ_UV_DC6 29 +#define WM8350_IRQ_UV_DC5 30 +#define WM8350_IRQ_UV_DC4 31 +#define WM8350_IRQ_UV_DC3 32 +#define WM8350_IRQ_UV_DC2 33 +#define WM8350_IRQ_UV_DC1 34 +#define WM8350_IRQ_OC_LS 35 + +#define NUM_WM8350_REGULATORS 12 + +struct wm8350; +struct platform_device; +struct regulator_init_data; + +/* + * WM8350 LED platform data + */ +struct wm8350_led_platform_data { + const char *name; + const char *default_trigger; + int max_uA; +}; + +struct wm8350_led { + struct platform_device *pdev; + struct mutex mutex; + struct work_struct work; + spinlock_t value_lock; + enum led_brightness value; + struct led_classdev cdev; + int max_uA_index; + int enabled; + + struct regulator *isink; + struct regulator_consumer_supply isink_consumer; + struct regulator_init_data isink_init; + struct regulator *dcdc; + struct regulator_consumer_supply dcdc_consumer; + struct regulator_init_data dcdc_init; +}; + +struct wm8350_pmic { + /* Number of regulators of each type on this device */ + int max_dcdc; + int max_isink; + + /* ISINK to DCDC mapping */ + int isink_A_dcdc; + int isink_B_dcdc; + + /* hibernate configs */ + u16 dcdc1_hib_mode; + u16 dcdc3_hib_mode; + u16 dcdc4_hib_mode; + u16 dcdc6_hib_mode; + + /* regulator devices */ + struct platform_device *pdev[NUM_WM8350_REGULATORS]; + + /* LED devices */ + struct wm8350_led led[2]; +}; + +int wm8350_register_regulator(struct wm8350 *wm8350, int reg, + struct regulator_init_data *initdata); +int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, + struct wm8350_led_platform_data *pdata); + +/* + * Additional DCDC control not supported via regulator API + */ +int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start, + u16 stop, u16 fault); +int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode, + u16 ilim, u16 ramp, u16 feedback); + +/* + * Additional LDO control not supported via regulator API + */ +int wm8350_ldo_set_slot(struct wm8350 *wm8350, int ldo, u16 start, u16 stop); + +/* + * Additional ISINK control not supported via regulator API + */ +int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode, + u16 trigger, u16 duration, u16 on_ramp, + u16 off_ramp, u16 drive); + +#endif diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h new file mode 100644 index 00000000..ebd72ffc --- /dev/null +++ b/include/linux/mfd/wm8350/rtc.h @@ -0,0 +1,269 @@ +/* + * rtc.h -- RTC driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_WM8350_RTC_H +#define __LINUX_MFD_WM8350_RTC_H + +#include <linux/platform_device.h> + +/* + * Register values. + */ +#define WM8350_RTC_SECONDS_MINUTES 0x10 +#define WM8350_RTC_HOURS_DAY 0x11 +#define WM8350_RTC_DATE_MONTH 0x12 +#define WM8350_RTC_YEAR 0x13 +#define WM8350_ALARM_SECONDS_MINUTES 0x14 +#define WM8350_ALARM_HOURS_DAY 0x15 +#define WM8350_ALARM_DATE_MONTH 0x16 +#define WM8350_RTC_TIME_CONTROL 0x17 + +/* + * R16 (0x10) - RTC Seconds/Minutes + */ +#define WM8350_RTC_MINS_MASK 0x7F00 +#define WM8350_RTC_MINS_SHIFT 8 +#define WM8350_RTC_SECS_MASK 0x007F +#define WM8350_RTC_SECS_SHIFT 0 + +/* + * R17 (0x11) - RTC Hours/Day + */ +#define WM8350_RTC_DAY_MASK 0x0700 +#define WM8350_RTC_DAY_SHIFT 8 +#define WM8350_RTC_HPM_MASK 0x0020 +#define WM8350_RTC_HPM_SHIFT 5 +#define WM8350_RTC_HRS_MASK 0x001F +#define WM8350_RTC_HRS_SHIFT 0 + +/* Bit values for R21 (0x15) */ +#define WM8350_RTC_DAY_SUN 1 +#define WM8350_RTC_DAY_MON 2 +#define WM8350_RTC_DAY_TUE 3 +#define WM8350_RTC_DAY_WED 4 +#define WM8350_RTC_DAY_THU 5 +#define WM8350_RTC_DAY_FRI 6 +#define WM8350_RTC_DAY_SAT 7 + +#define WM8350_RTC_HPM_AM 0 +#define WM8350_RTC_HPM_PM 1 + +/* + * R18 (0x12) - RTC Date/Month + */ +#define WM8350_RTC_MTH_MASK 0x1F00 +#define WM8350_RTC_MTH_SHIFT 8 +#define WM8350_RTC_DATE_MASK 0x003F +#define WM8350_RTC_DATE_SHIFT 0 + +/* Bit values for R22 (0x16) */ +#define WM8350_RTC_MTH_JAN 1 +#define WM8350_RTC_MTH_FEB 2 +#define WM8350_RTC_MTH_MAR 3 +#define WM8350_RTC_MTH_APR 4 +#define WM8350_RTC_MTH_MAY 5 +#define WM8350_RTC_MTH_JUN 6 +#define WM8350_RTC_MTH_JUL 7 +#define WM8350_RTC_MTH_AUG 8 +#define WM8350_RTC_MTH_SEP 9 +#define WM8350_RTC_MTH_OCT 10 +#define WM8350_RTC_MTH_NOV 11 +#define WM8350_RTC_MTH_DEC 12 +#define WM8350_RTC_MTH_JAN_BCD 0x01 +#define WM8350_RTC_MTH_FEB_BCD 0x02 +#define WM8350_RTC_MTH_MAR_BCD 0x03 +#define WM8350_RTC_MTH_APR_BCD 0x04 +#define WM8350_RTC_MTH_MAY_BCD 0x05 +#define WM8350_RTC_MTH_JUN_BCD 0x06 +#define WM8350_RTC_MTH_JUL_BCD 0x07 +#define WM8350_RTC_MTH_AUG_BCD 0x08 +#define WM8350_RTC_MTH_SEP_BCD 0x09 +#define WM8350_RTC_MTH_OCT_BCD 0x10 +#define WM8350_RTC_MTH_NOV_BCD 0x11 +#define WM8350_RTC_MTH_DEC_BCD 0x12 + +/* + * R19 (0x13) - RTC Year + */ +#define WM8350_RTC_YHUNDREDS_MASK 0x3F00 +#define WM8350_RTC_YHUNDREDS_SHIFT 8 +#define WM8350_RTC_YUNITS_MASK 0x00FF +#define WM8350_RTC_YUNITS_SHIFT 0 + +/* + * R20 (0x14) - Alarm Seconds/Minutes + */ +#define WM8350_RTC_ALMMINS_MASK 0x7F00 +#define WM8350_RTC_ALMMINS_SHIFT 8 +#define WM8350_RTC_ALMSECS_MASK 0x007F +#define WM8350_RTC_ALMSECS_SHIFT 0 + +/* Bit values for R20 (0x14) */ +#define WM8350_RTC_ALMMINS_DONT_CARE -1 +#define WM8350_RTC_ALMSECS_DONT_CARE -1 + +/* + * R21 (0x15) - Alarm Hours/Day + */ +#define WM8350_RTC_ALMDAY_MASK 0x0F00 +#define WM8350_RTC_ALMDAY_SHIFT 8 +#define WM8350_RTC_ALMHPM_MASK 0x0020 +#define WM8350_RTC_ALMHPM_SHIFT 5 +#define WM8350_RTC_ALMHRS_MASK 0x001F +#define WM8350_RTC_ALMHRS_SHIFT 0 + +/* Bit values for R21 (0x15) */ +#define WM8350_RTC_ALMDAY_DONT_CARE -1 +#define WM8350_RTC_ALMDAY_SUN 1 +#define WM8350_RTC_ALMDAY_MON 2 +#define WM8350_RTC_ALMDAY_TUE 3 +#define WM8350_RTC_ALMDAY_WED 4 +#define WM8350_RTC_ALMDAY_THU 5 +#define WM8350_RTC_ALMDAY_FRI 6 +#define WM8350_RTC_ALMDAY_SAT 7 + +#define WM8350_RTC_ALMHPM_AM 0 +#define WM8350_RTC_ALMHPM_PM 1 + +#define WM8350_RTC_ALMHRS_DONT_CARE -1 + +/* + * R22 (0x16) - Alarm Date/Month + */ +#define WM8350_RTC_ALMMTH_MASK 0x1F00 +#define WM8350_RTC_ALMMTH_SHIFT 8 +#define WM8350_RTC_ALMDATE_MASK 0x003F +#define WM8350_RTC_ALMDATE_SHIFT 0 + +/* Bit values for R22 (0x16) */ +#define WM8350_RTC_ALMDATE_DONT_CARE -1 + +#define WM8350_RTC_ALMMTH_DONT_CARE -1 +#define WM8350_RTC_ALMMTH_JAN 1 +#define WM8350_RTC_ALMMTH_FEB 2 +#define WM8350_RTC_ALMMTH_MAR 3 +#define WM8350_RTC_ALMMTH_APR 4 +#define WM8350_RTC_ALMMTH_MAY 5 +#define WM8350_RTC_ALMMTH_JUN 6 +#define WM8350_RTC_ALMMTH_JUL 7 +#define WM8350_RTC_ALMMTH_AUG 8 +#define WM8350_RTC_ALMMTH_SEP 9 +#define WM8350_RTC_ALMMTH_OCT 10 +#define WM8350_RTC_ALMMTH_NOV 11 +#define WM8350_RTC_ALMMTH_DEC 12 +#define WM8350_RTC_ALMMTH_JAN_BCD 0x01 +#define WM8350_RTC_ALMMTH_FEB_BCD 0x02 +#define WM8350_RTC_ALMMTH_MAR_BCD 0x03 +#define WM8350_RTC_ALMMTH_APR_BCD 0x04 +#define WM8350_RTC_ALMMTH_MAY_BCD 0x05 +#define WM8350_RTC_ALMMTH_JUN_BCD 0x06 +#define WM8350_RTC_ALMMTH_JUL_BCD 0x07 +#define WM8350_RTC_ALMMTH_AUG_BCD 0x08 +#define WM8350_RTC_ALMMTH_SEP_BCD 0x09 +#define WM8350_RTC_ALMMTH_OCT_BCD 0x10 +#define WM8350_RTC_ALMMTH_NOV_BCD 0x11 +#define WM8350_RTC_ALMMTH_DEC_BCD 0x12 + +/* + * R23 (0x17) - RTC Time Control + */ +#define WM8350_RTC_BCD 0x8000 +#define WM8350_RTC_BCD_MASK 0x8000 +#define WM8350_RTC_BCD_SHIFT 15 +#define WM8350_RTC_12HR 0x4000 +#define WM8350_RTC_12HR_MASK 0x4000 +#define WM8350_RTC_12HR_SHIFT 14 +#define WM8350_RTC_DST 0x2000 +#define WM8350_RTC_DST_MASK 0x2000 +#define WM8350_RTC_DST_SHIFT 13 +#define WM8350_RTC_SET 0x0800 +#define WM8350_RTC_SET_MASK 0x0800 +#define WM8350_RTC_SET_SHIFT 11 +#define WM8350_RTC_STS 0x0400 +#define WM8350_RTC_STS_MASK 0x0400 +#define WM8350_RTC_STS_SHIFT 10 +#define WM8350_RTC_ALMSET 0x0200 +#define WM8350_RTC_ALMSET_MASK 0x0200 +#define WM8350_RTC_ALMSET_SHIFT 9 +#define WM8350_RTC_ALMSTS 0x0100 +#define WM8350_RTC_ALMSTS_MASK 0x0100 +#define WM8350_RTC_ALMSTS_SHIFT 8 +#define WM8350_RTC_PINT 0x0070 +#define WM8350_RTC_PINT_MASK 0x0070 +#define WM8350_RTC_PINT_SHIFT 4 +#define WM8350_RTC_DSW 0x000F +#define WM8350_RTC_DSW_MASK 0x000F +#define WM8350_RTC_DSW_SHIFT 0 + +/* Bit values for R23 (0x17) */ +#define WM8350_RTC_BCD_BINARY 0 +#define WM8350_RTC_BCD_BCD 1 + +#define WM8350_RTC_12HR_24HR 0 +#define WM8350_RTC_12HR_12HR 1 + +#define WM8350_RTC_DST_DISABLED 0 +#define WM8350_RTC_DST_ENABLED 1 + +#define WM8350_RTC_SET_RUN 0 +#define WM8350_RTC_SET_SET 1 + +#define WM8350_RTC_STS_RUNNING 0 +#define WM8350_RTC_STS_STOPPED 1 + +#define WM8350_RTC_ALMSET_RUN 0 +#define WM8350_RTC_ALMSET_SET 1 + +#define WM8350_RTC_ALMSTS_RUNNING 0 +#define WM8350_RTC_ALMSTS_STOPPED 1 + +#define WM8350_RTC_PINT_DISABLED 0 +#define WM8350_RTC_PINT_SECS 1 +#define WM8350_RTC_PINT_MINS 2 +#define WM8350_RTC_PINT_HRS 3 +#define WM8350_RTC_PINT_DAYS 4 +#define WM8350_RTC_PINT_MTHS 5 + +#define WM8350_RTC_DSW_DISABLED 0 +#define WM8350_RTC_DSW_1HZ 1 +#define WM8350_RTC_DSW_2HZ 2 +#define WM8350_RTC_DSW_4HZ 3 +#define WM8350_RTC_DSW_8HZ 4 +#define WM8350_RTC_DSW_16HZ 5 +#define WM8350_RTC_DSW_32HZ 6 +#define WM8350_RTC_DSW_64HZ 7 +#define WM8350_RTC_DSW_128HZ 8 +#define WM8350_RTC_DSW_256HZ 9 +#define WM8350_RTC_DSW_512HZ 10 +#define WM8350_RTC_DSW_1024HZ 11 + +/* + * R218 (0xDA) - RTC Tick Control + */ +#define WM8350_RTC_TICKSTS 0x4000 +#define WM8350_RTC_CLKSRC 0x2000 +#define WM8350_RTC_TRIM_MASK 0x03FF + +/* + * RTC Interrupts. + */ +#define WM8350_IRQ_RTC_PER 7 +#define WM8350_IRQ_RTC_SEC 8 +#define WM8350_IRQ_RTC_ALM 9 + +struct wm8350_rtc { + struct platform_device *pdev; + struct rtc_device *rtc; + int alarm_enabled; /* used over suspend/resume */ + int update_enabled; +}; + +#endif diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h new file mode 100644 index 00000000..2b947931 --- /dev/null +++ b/include/linux/mfd/wm8350/supply.h @@ -0,0 +1,134 @@ +/* + * supply.h -- Power Supply Driver for Wolfson WM8350 PMIC + * + * Copyright 2007 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MFD_WM8350_SUPPLY_H_ +#define __LINUX_MFD_WM8350_SUPPLY_H_ + +#include <linux/mutex.h> +#include <linux/power_supply.h> + +/* + * Charger registers + */ +#define WM8350_BATTERY_CHARGER_CONTROL_1 0xA8 +#define WM8350_BATTERY_CHARGER_CONTROL_2 0xA9 +#define WM8350_BATTERY_CHARGER_CONTROL_3 0xAA + +/* + * R168 (0xA8) - Battery Charger Control 1 + */ +#define WM8350_CHG_ENA_R168 0x8000 +#define WM8350_CHG_THR 0x2000 +#define WM8350_CHG_EOC_SEL_MASK 0x1C00 +#define WM8350_CHG_TRICKLE_TEMP_CHOKE 0x0200 +#define WM8350_CHG_TRICKLE_USB_CHOKE 0x0100 +#define WM8350_CHG_RECOVER_T 0x0080 +#define WM8350_CHG_END_ACT 0x0040 +#define WM8350_CHG_FAST 0x0020 +#define WM8350_CHG_FAST_USB_THROTTLE 0x0010 +#define WM8350_CHG_NTC_MON 0x0008 +#define WM8350_CHG_BATT_HOT_MON 0x0004 +#define WM8350_CHG_BATT_COLD_MON 0x0002 +#define WM8350_CHG_CHIP_TEMP_MON 0x0001 + +/* + * R169 (0xA9) - Battery Charger Control 2 + */ +#define WM8350_CHG_ACTIVE 0x8000 +#define WM8350_CHG_PAUSE 0x4000 +#define WM8350_CHG_STS_MASK 0x3000 +#define WM8350_CHG_TIME_MASK 0x0F00 +#define WM8350_CHG_MASK_WALL_FB 0x0080 +#define WM8350_CHG_TRICKLE_SEL 0x0040 +#define WM8350_CHG_VSEL_MASK 0x0030 +#define WM8350_CHG_ISEL_MASK 0x000F +#define WM8350_CHG_STS_OFF 0x0000 +#define WM8350_CHG_STS_TRICKLE 0x1000 +#define WM8350_CHG_STS_FAST 0x2000 + +/* + * R170 (0xAA) - Battery Charger Control 3 + */ +#define WM8350_CHG_THROTTLE_T_MASK 0x0060 +#define WM8350_CHG_SMART 0x0010 +#define WM8350_CHG_TIMER_ADJT_MASK 0x000F + +/* + * Charger Interrupts + */ +#define WM8350_IRQ_CHG_BAT_HOT 0 +#define WM8350_IRQ_CHG_BAT_COLD 1 +#define WM8350_IRQ_CHG_BAT_FAIL 2 +#define WM8350_IRQ_CHG_TO 3 +#define WM8350_IRQ_CHG_END 4 +#define WM8350_IRQ_CHG_START 5 +#define WM8350_IRQ_CHG_FAST_RDY 6 +#define WM8350_IRQ_CHG_VBATT_LT_3P9 10 +#define WM8350_IRQ_CHG_VBATT_LT_3P1 11 +#define WM8350_IRQ_CHG_VBATT_LT_2P85 12 + +/* + * Charger Policy + */ +#define WM8350_CHG_TRICKLE_50mA (0 << 6) +#define WM8350_CHG_TRICKLE_100mA (1 << 6) +#define WM8350_CHG_4_05V (0 << 4) +#define WM8350_CHG_4_10V (1 << 4) +#define WM8350_CHG_4_15V (2 << 4) +#define WM8350_CHG_4_20V (3 << 4) +#define WM8350_CHG_FAST_LIMIT_mA(x) ((x / 50) & 0xf) +#define WM8350_CHG_EOC_mA(x) (((x - 10) & 0x7) << 10) +#define WM8350_CHG_TRICKLE_3_1V (0 << 13) +#define WM8350_CHG_TRICKLE_3_9V (1 << 13) + +/* + * Supply Registers. + */ +#define WM8350_USB_VOLTAGE_READBACK 0x9C +#define WM8350_LINE_VOLTAGE_READBACK 0x9D +#define WM8350_BATT_VOLTAGE_READBACK 0x9E + +/* + * Supply Interrupts. + */ +#define WM8350_IRQ_USB_LIMIT 15 +#define WM8350_IRQ_EXT_USB_FB 36 +#define WM8350_IRQ_EXT_WALL_FB 37 +#define WM8350_IRQ_EXT_BAT_FB 38 + +/* + * Policy to control charger state machine. + */ +struct wm8350_charger_policy { + + /* charger state machine policy - set in machine driver */ + int eoc_mA; /* end of charge current (mA) */ + int charge_mV; /* charge voltage */ + int fast_limit_mA; /* fast charge current limit */ + int fast_limit_USB_mA; /* USB fast charge current limit */ + int charge_timeout; /* charge timeout (mins) */ + int trickle_start_mV; /* trickle charge starts at mV */ + int trickle_charge_mA; /* trickle charge current */ + int trickle_charge_USB_mA; /* USB trickle charge current */ +}; + +struct wm8350_power { + struct platform_device *pdev; + struct power_supply battery; + struct power_supply usb; + struct power_supply ac; + struct wm8350_charger_policy *policy; + + int rev_g_coeff; +}; + +#endif diff --git a/include/linux/mfd/wm8350/wdt.h b/include/linux/mfd/wm8350/wdt.h new file mode 100644 index 00000000..f6135b5e --- /dev/null +++ b/include/linux/mfd/wm8350/wdt.h @@ -0,0 +1,28 @@ +/* + * wdt.h -- Watchdog Driver for Wolfson WM8350 PMIC + * + * Copyright 2007, 2008 Wolfson Microelectronics PLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_MFD_WM8350_WDT_H_ +#define __LINUX_MFD_WM8350_WDT_H_ + +#include <linux/platform_device.h> + +#define WM8350_WDOG_HIB_MODE 0x0080 +#define WM8350_WDOG_DEBUG 0x0040 +#define WM8350_WDOG_MODE_MASK 0x0030 +#define WM8350_WDOG_TO_MASK 0x0007 + +#define WM8350_IRQ_SYS_WDOG_TO 24 + +struct wm8350_wdt { + struct platform_device *pdev; +}; + +#endif diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h new file mode 100644 index 00000000..e06ed3eb --- /dev/null +++ b/include/linux/mfd/wm8400-audio.h @@ -0,0 +1,1187 @@ +/* + * wm8400 private definitions for audio + * + * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_WM8400_AUDIO_H +#define __LINUX_MFD_WM8400_AUDIO_H + +#include <linux/mfd/wm8400-audio.h> + +/* + * R2 (0x02) - Power Management (1) + */ +#define WM8400_CODEC_ENA 0x8000 /* CODEC_ENA */ +#define WM8400_CODEC_ENA_MASK 0x8000 /* CODEC_ENA */ +#define WM8400_CODEC_ENA_SHIFT 15 /* CODEC_ENA */ +#define WM8400_CODEC_ENA_WIDTH 1 /* CODEC_ENA */ +#define WM8400_SYSCLK_ENA 0x4000 /* SYSCLK_ENA */ +#define WM8400_SYSCLK_ENA_MASK 0x4000 /* SYSCLK_ENA */ +#define WM8400_SYSCLK_ENA_SHIFT 14 /* SYSCLK_ENA */ +#define WM8400_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */ +#define WM8400_SPK_MIX_ENA 0x2000 /* SPK_MIX_ENA */ +#define WM8400_SPK_MIX_ENA_MASK 0x2000 /* SPK_MIX_ENA */ +#define WM8400_SPK_MIX_ENA_SHIFT 13 /* SPK_MIX_ENA */ +#define WM8400_SPK_MIX_ENA_WIDTH 1 /* SPK_MIX_ENA */ +#define WM8400_SPK_ENA 0x1000 /* SPK_ENA */ +#define WM8400_SPK_ENA_MASK 0x1000 /* SPK_ENA */ +#define WM8400_SPK_ENA_SHIFT 12 /* SPK_ENA */ +#define WM8400_SPK_ENA_WIDTH 1 /* SPK_ENA */ +#define WM8400_OUT3_ENA 0x0800 /* OUT3_ENA */ +#define WM8400_OUT3_ENA_MASK 0x0800 /* OUT3_ENA */ +#define WM8400_OUT3_ENA_SHIFT 11 /* OUT3_ENA */ +#define WM8400_OUT3_ENA_WIDTH 1 /* OUT3_ENA */ +#define WM8400_OUT4_ENA 0x0400 /* OUT4_ENA */ +#define WM8400_OUT4_ENA_MASK 0x0400 /* OUT4_ENA */ +#define WM8400_OUT4_ENA_SHIFT 10 /* OUT4_ENA */ +#define WM8400_OUT4_ENA_WIDTH 1 /* OUT4_ENA */ +#define WM8400_LOUT_ENA 0x0200 /* LOUT_ENA */ +#define WM8400_LOUT_ENA_MASK 0x0200 /* LOUT_ENA */ +#define WM8400_LOUT_ENA_SHIFT 9 /* LOUT_ENA */ +#define WM8400_LOUT_ENA_WIDTH 1 /* LOUT_ENA */ +#define WM8400_ROUT_ENA 0x0100 /* ROUT_ENA */ +#define WM8400_ROUT_ENA_MASK 0x0100 /* ROUT_ENA */ +#define WM8400_ROUT_ENA_SHIFT 8 /* ROUT_ENA */ +#define WM8400_ROUT_ENA_WIDTH 1 /* ROUT_ENA */ +#define WM8400_MIC1BIAS_ENA 0x0010 /* MIC1BIAS_ENA */ +#define WM8400_MIC1BIAS_ENA_MASK 0x0010 /* MIC1BIAS_ENA */ +#define WM8400_MIC1BIAS_ENA_SHIFT 4 /* MIC1BIAS_ENA */ +#define WM8400_MIC1BIAS_ENA_WIDTH 1 /* MIC1BIAS_ENA */ +#define WM8400_VMID_MODE_MASK 0x0006 /* VMID_MODE - [2:1] */ +#define WM8400_VMID_MODE_SHIFT 1 /* VMID_MODE - [2:1] */ +#define WM8400_VMID_MODE_WIDTH 2 /* VMID_MODE - [2:1] */ +#define WM8400_VREF_ENA 0x0001 /* VREF_ENA */ +#define WM8400_VREF_ENA_MASK 0x0001 /* VREF_ENA */ +#define WM8400_VREF_ENA_SHIFT 0 /* VREF_ENA */ +#define WM8400_VREF_ENA_WIDTH 1 /* VREF_ENA */ + +/* + * R3 (0x03) - Power Management (2) + */ +#define WM8400_FLL_ENA 0x8000 /* FLL_ENA */ +#define WM8400_FLL_ENA_MASK 0x8000 /* FLL_ENA */ +#define WM8400_FLL_ENA_SHIFT 15 /* FLL_ENA */ +#define WM8400_FLL_ENA_WIDTH 1 /* FLL_ENA */ +#define WM8400_TSHUT_ENA 0x4000 /* TSHUT_ENA */ +#define WM8400_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */ +#define WM8400_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */ +#define WM8400_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */ +#define WM8400_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */ +#define WM8400_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */ +#define WM8400_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */ +#define WM8400_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */ +#define WM8400_OPCLK_ENA 0x0800 /* OPCLK_ENA */ +#define WM8400_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ +#define WM8400_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ +#define WM8400_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define WM8400_AINL_ENA 0x0200 /* AINL_ENA */ +#define WM8400_AINL_ENA_MASK 0x0200 /* AINL_ENA */ +#define WM8400_AINL_ENA_SHIFT 9 /* AINL_ENA */ +#define WM8400_AINL_ENA_WIDTH 1 /* AINL_ENA */ +#define WM8400_AINR_ENA 0x0100 /* AINR_ENA */ +#define WM8400_AINR_ENA_MASK 0x0100 /* AINR_ENA */ +#define WM8400_AINR_ENA_SHIFT 8 /* AINR_ENA */ +#define WM8400_AINR_ENA_WIDTH 1 /* AINR_ENA */ +#define WM8400_LIN34_ENA 0x0080 /* LIN34_ENA */ +#define WM8400_LIN34_ENA_MASK 0x0080 /* LIN34_ENA */ +#define WM8400_LIN34_ENA_SHIFT 7 /* LIN34_ENA */ +#define WM8400_LIN34_ENA_WIDTH 1 /* LIN34_ENA */ +#define WM8400_LIN12_ENA 0x0040 /* LIN12_ENA */ +#define WM8400_LIN12_ENA_MASK 0x0040 /* LIN12_ENA */ +#define WM8400_LIN12_ENA_SHIFT 6 /* LIN12_ENA */ +#define WM8400_LIN12_ENA_WIDTH 1 /* LIN12_ENA */ +#define WM8400_RIN34_ENA 0x0020 /* RIN34_ENA */ +#define WM8400_RIN34_ENA_MASK 0x0020 /* RIN34_ENA */ +#define WM8400_RIN34_ENA_SHIFT 5 /* RIN34_ENA */ +#define WM8400_RIN34_ENA_WIDTH 1 /* RIN34_ENA */ +#define WM8400_RIN12_ENA 0x0010 /* RIN12_ENA */ +#define WM8400_RIN12_ENA_MASK 0x0010 /* RIN12_ENA */ +#define WM8400_RIN12_ENA_SHIFT 4 /* RIN12_ENA */ +#define WM8400_RIN12_ENA_WIDTH 1 /* RIN12_ENA */ +#define WM8400_ADCL_ENA 0x0002 /* ADCL_ENA */ +#define WM8400_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ +#define WM8400_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ +#define WM8400_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ +#define WM8400_ADCR_ENA 0x0001 /* ADCR_ENA */ +#define WM8400_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ +#define WM8400_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ +#define WM8400_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ + +/* + * R4 (0x04) - Power Management (3) + */ +#define WM8400_LON_ENA 0x2000 /* LON_ENA */ +#define WM8400_LON_ENA_MASK 0x2000 /* LON_ENA */ +#define WM8400_LON_ENA_SHIFT 13 /* LON_ENA */ +#define WM8400_LON_ENA_WIDTH 1 /* LON_ENA */ +#define WM8400_LOP_ENA 0x1000 /* LOP_ENA */ +#define WM8400_LOP_ENA_MASK 0x1000 /* LOP_ENA */ +#define WM8400_LOP_ENA_SHIFT 12 /* LOP_ENA */ +#define WM8400_LOP_ENA_WIDTH 1 /* LOP_ENA */ +#define WM8400_RON_ENA 0x0800 /* RON_ENA */ +#define WM8400_RON_ENA_MASK 0x0800 /* RON_ENA */ +#define WM8400_RON_ENA_SHIFT 11 /* RON_ENA */ +#define WM8400_RON_ENA_WIDTH 1 /* RON_ENA */ +#define WM8400_ROP_ENA 0x0400 /* ROP_ENA */ +#define WM8400_ROP_ENA_MASK 0x0400 /* ROP_ENA */ +#define WM8400_ROP_ENA_SHIFT 10 /* ROP_ENA */ +#define WM8400_ROP_ENA_WIDTH 1 /* ROP_ENA */ +#define WM8400_LOPGA_ENA 0x0080 /* LOPGA_ENA */ +#define WM8400_LOPGA_ENA_MASK 0x0080 /* LOPGA_ENA */ +#define WM8400_LOPGA_ENA_SHIFT 7 /* LOPGA_ENA */ +#define WM8400_LOPGA_ENA_WIDTH 1 /* LOPGA_ENA */ +#define WM8400_ROPGA_ENA 0x0040 /* ROPGA_ENA */ +#define WM8400_ROPGA_ENA_MASK 0x0040 /* ROPGA_ENA */ +#define WM8400_ROPGA_ENA_SHIFT 6 /* ROPGA_ENA */ +#define WM8400_ROPGA_ENA_WIDTH 1 /* ROPGA_ENA */ +#define WM8400_LOMIX_ENA 0x0020 /* LOMIX_ENA */ +#define WM8400_LOMIX_ENA_MASK 0x0020 /* LOMIX_ENA */ +#define WM8400_LOMIX_ENA_SHIFT 5 /* LOMIX_ENA */ +#define WM8400_LOMIX_ENA_WIDTH 1 /* LOMIX_ENA */ +#define WM8400_ROMIX_ENA 0x0010 /* ROMIX_ENA */ +#define WM8400_ROMIX_ENA_MASK 0x0010 /* ROMIX_ENA */ +#define WM8400_ROMIX_ENA_SHIFT 4 /* ROMIX_ENA */ +#define WM8400_ROMIX_ENA_WIDTH 1 /* ROMIX_ENA */ +#define WM8400_DACL_ENA 0x0002 /* DACL_ENA */ +#define WM8400_DACL_ENA_MASK 0x0002 /* DACL_ENA */ +#define WM8400_DACL_ENA_SHIFT 1 /* DACL_ENA */ +#define WM8400_DACL_ENA_WIDTH 1 /* DACL_ENA */ +#define WM8400_DACR_ENA 0x0001 /* DACR_ENA */ +#define WM8400_DACR_ENA_MASK 0x0001 /* DACR_ENA */ +#define WM8400_DACR_ENA_SHIFT 0 /* DACR_ENA */ +#define WM8400_DACR_ENA_WIDTH 1 /* DACR_ENA */ + +/* + * R5 (0x05) - Audio Interface (1) + */ +#define WM8400_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */ +#define WM8400_AIFADCL_SRC_MASK 0x8000 /* AIFADCL_SRC */ +#define WM8400_AIFADCL_SRC_SHIFT 15 /* AIFADCL_SRC */ +#define WM8400_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */ +#define WM8400_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */ +#define WM8400_AIFADCR_SRC_MASK 0x4000 /* AIFADCR_SRC */ +#define WM8400_AIFADCR_SRC_SHIFT 14 /* AIFADCR_SRC */ +#define WM8400_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */ +#define WM8400_AIFADC_TDM 0x2000 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_MASK 0x2000 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_SHIFT 13 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */ +#define WM8400_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */ +#define WM8400_AIFADC_TDM_CHAN_MASK 0x1000 /* AIFADC_TDM_CHAN */ +#define WM8400_AIFADC_TDM_CHAN_SHIFT 12 /* AIFADC_TDM_CHAN */ +#define WM8400_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */ +#define WM8400_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */ +#define WM8400_AIF_BCLK_INV_MASK 0x0100 /* AIF_BCLK_INV */ +#define WM8400_AIF_BCLK_INV_SHIFT 8 /* AIF_BCLK_INV */ +#define WM8400_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */ +#define WM8400_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */ +#define WM8400_AIF_LRCLK_INV_MASK 0x0080 /* AIF_LRCLK_INV */ +#define WM8400_AIF_LRCLK_INV_SHIFT 7 /* AIF_LRCLK_INV */ +#define WM8400_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */ +#define WM8400_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */ +#define WM8400_AIF_WL_SHIFT 5 /* AIF_WL - [6:5] */ +#define WM8400_AIF_WL_WIDTH 2 /* AIF_WL - [6:5] */ +#define WM8400_AIF_WL_16BITS (0 << 5) +#define WM8400_AIF_WL_20BITS (1 << 5) +#define WM8400_AIF_WL_24BITS (2 << 5) +#define WM8400_AIF_WL_32BITS (3 << 5) +#define WM8400_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */ +#define WM8400_AIF_FMT_SHIFT 3 /* AIF_FMT - [4:3] */ +#define WM8400_AIF_FMT_WIDTH 2 /* AIF_FMT - [4:3] */ +#define WM8400_AIF_FMT_RIGHTJ (0 << 3) +#define WM8400_AIF_FMT_LEFTJ (1 << 3) +#define WM8400_AIF_FMT_I2S (2 << 3) +#define WM8400_AIF_FMT_DSP (3 << 3) + +/* + * R6 (0x06) - Audio Interface (2) + */ +#define WM8400_DACL_SRC 0x8000 /* DACL_SRC */ +#define WM8400_DACL_SRC_MASK 0x8000 /* DACL_SRC */ +#define WM8400_DACL_SRC_SHIFT 15 /* DACL_SRC */ +#define WM8400_DACL_SRC_WIDTH 1 /* DACL_SRC */ +#define WM8400_DACR_SRC 0x4000 /* DACR_SRC */ +#define WM8400_DACR_SRC_MASK 0x4000 /* DACR_SRC */ +#define WM8400_DACR_SRC_SHIFT 14 /* DACR_SRC */ +#define WM8400_DACR_SRC_WIDTH 1 /* DACR_SRC */ +#define WM8400_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */ +#define WM8400_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */ +#define WM8400_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */ +#define WM8400_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */ +#define WM8400_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */ +#define WM8400_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */ +#define WM8400_DAC_BOOST_SHIFT 10 /* DAC_BOOST - [11:10] */ +#define WM8400_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [11:10] */ +#define WM8400_DAC_COMP 0x0010 /* DAC_COMP */ +#define WM8400_DAC_COMP_MASK 0x0010 /* DAC_COMP */ +#define WM8400_DAC_COMP_SHIFT 4 /* DAC_COMP */ +#define WM8400_DAC_COMP_WIDTH 1 /* DAC_COMP */ +#define WM8400_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */ +#define WM8400_DAC_COMPMODE_MASK 0x0008 /* DAC_COMPMODE */ +#define WM8400_DAC_COMPMODE_SHIFT 3 /* DAC_COMPMODE */ +#define WM8400_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */ +#define WM8400_ADC_COMP 0x0004 /* ADC_COMP */ +#define WM8400_ADC_COMP_MASK 0x0004 /* ADC_COMP */ +#define WM8400_ADC_COMP_SHIFT 2 /* ADC_COMP */ +#define WM8400_ADC_COMP_WIDTH 1 /* ADC_COMP */ +#define WM8400_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */ +#define WM8400_ADC_COMPMODE_MASK 0x0002 /* ADC_COMPMODE */ +#define WM8400_ADC_COMPMODE_SHIFT 1 /* ADC_COMPMODE */ +#define WM8400_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */ +#define WM8400_LOOPBACK 0x0001 /* LOOPBACK */ +#define WM8400_LOOPBACK_MASK 0x0001 /* LOOPBACK */ +#define WM8400_LOOPBACK_SHIFT 0 /* LOOPBACK */ +#define WM8400_LOOPBACK_WIDTH 1 /* LOOPBACK */ + +/* + * R7 (0x07) - Clocking (1) + */ +#define WM8400_TOCLK_RATE 0x8000 /* TOCLK_RATE */ +#define WM8400_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */ +#define WM8400_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */ +#define WM8400_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */ +#define WM8400_TOCLK_ENA 0x4000 /* TOCLK_ENA */ +#define WM8400_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */ +#define WM8400_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */ +#define WM8400_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ +#define WM8400_OPCLKDIV_MASK 0x1E00 /* OPCLKDIV - [12:9] */ +#define WM8400_OPCLKDIV_SHIFT 9 /* OPCLKDIV - [12:9] */ +#define WM8400_OPCLKDIV_WIDTH 4 /* OPCLKDIV - [12:9] */ +#define WM8400_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */ +#define WM8400_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */ +#define WM8400_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */ +#define WM8400_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */ +#define WM8400_BCLK_DIV_SHIFT 1 /* BCLK_DIV - [4:1] */ +#define WM8400_BCLK_DIV_WIDTH 4 /* BCLK_DIV - [4:1] */ + +/* + * R8 (0x08) - Clocking (2) + */ +#define WM8400_MCLK_SRC 0x8000 /* MCLK_SRC */ +#define WM8400_MCLK_SRC_MASK 0x8000 /* MCLK_SRC */ +#define WM8400_MCLK_SRC_SHIFT 15 /* MCLK_SRC */ +#define WM8400_MCLK_SRC_WIDTH 1 /* MCLK_SRC */ +#define WM8400_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */ +#define WM8400_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */ +#define WM8400_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */ +#define WM8400_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */ +#define WM8400_CLK_FORCE 0x2000 /* CLK_FORCE */ +#define WM8400_CLK_FORCE_MASK 0x2000 /* CLK_FORCE */ +#define WM8400_CLK_FORCE_SHIFT 13 /* CLK_FORCE */ +#define WM8400_CLK_FORCE_WIDTH 1 /* CLK_FORCE */ +#define WM8400_MCLK_DIV_MASK 0x1800 /* MCLK_DIV - [12:11] */ +#define WM8400_MCLK_DIV_SHIFT 11 /* MCLK_DIV - [12:11] */ +#define WM8400_MCLK_DIV_WIDTH 2 /* MCLK_DIV - [12:11] */ +#define WM8400_MCLK_INV 0x0400 /* MCLK_INV */ +#define WM8400_MCLK_INV_MASK 0x0400 /* MCLK_INV */ +#define WM8400_MCLK_INV_SHIFT 10 /* MCLK_INV */ +#define WM8400_MCLK_INV_WIDTH 1 /* MCLK_INV */ +#define WM8400_ADC_CLKDIV_MASK 0x00E0 /* ADC_CLKDIV - [7:5] */ +#define WM8400_ADC_CLKDIV_SHIFT 5 /* ADC_CLKDIV - [7:5] */ +#define WM8400_ADC_CLKDIV_WIDTH 3 /* ADC_CLKDIV - [7:5] */ +#define WM8400_DAC_CLKDIV_MASK 0x001C /* DAC_CLKDIV - [4:2] */ +#define WM8400_DAC_CLKDIV_SHIFT 2 /* DAC_CLKDIV - [4:2] */ +#define WM8400_DAC_CLKDIV_WIDTH 3 /* DAC_CLKDIV - [4:2] */ + +/* + * R9 (0x09) - Audio Interface (3) + */ +#define WM8400_AIF_MSTR1 0x8000 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR1_MASK 0x8000 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR1_SHIFT 15 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR1_WIDTH 1 /* AIF_MSTR1 */ +#define WM8400_AIF_MSTR2 0x4000 /* AIF_MSTR2 */ +#define WM8400_AIF_MSTR2_MASK 0x4000 /* AIF_MSTR2 */ +#define WM8400_AIF_MSTR2_SHIFT 14 /* AIF_MSTR2 */ +#define WM8400_AIF_MSTR2_WIDTH 1 /* AIF_MSTR2 */ +#define WM8400_AIF_SEL 0x2000 /* AIF_SEL */ +#define WM8400_AIF_SEL_MASK 0x2000 /* AIF_SEL */ +#define WM8400_AIF_SEL_SHIFT 13 /* AIF_SEL */ +#define WM8400_AIF_SEL_WIDTH 1 /* AIF_SEL */ +#define WM8400_ADCLRC_DIR 0x0800 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_DIR_MASK 0x0800 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_DIR_SHIFT 11 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_DIR_WIDTH 1 /* ADCLRC_DIR */ +#define WM8400_ADCLRC_RATE_MASK 0x07FF /* ADCLRC_RATE - [10:0] */ +#define WM8400_ADCLRC_RATE_SHIFT 0 /* ADCLRC_RATE - [10:0] */ +#define WM8400_ADCLRC_RATE_WIDTH 11 /* ADCLRC_RATE - [10:0] */ + +/* + * R10 (0x0A) - Audio Interface (4) + */ +#define WM8400_ALRCGPIO1 0x8000 /* ALRCGPIO1 */ +#define WM8400_ALRCGPIO1_MASK 0x8000 /* ALRCGPIO1 */ +#define WM8400_ALRCGPIO1_SHIFT 15 /* ALRCGPIO1 */ +#define WM8400_ALRCGPIO1_WIDTH 1 /* ALRCGPIO1 */ +#define WM8400_ALRCBGPIO6 0x4000 /* ALRCBGPIO6 */ +#define WM8400_ALRCBGPIO6_MASK 0x4000 /* ALRCBGPIO6 */ +#define WM8400_ALRCBGPIO6_SHIFT 14 /* ALRCBGPIO6 */ +#define WM8400_ALRCBGPIO6_WIDTH 1 /* ALRCBGPIO6 */ +#define WM8400_AIF_TRIS 0x2000 /* AIF_TRIS */ +#define WM8400_AIF_TRIS_MASK 0x2000 /* AIF_TRIS */ +#define WM8400_AIF_TRIS_SHIFT 13 /* AIF_TRIS */ +#define WM8400_AIF_TRIS_WIDTH 1 /* AIF_TRIS */ +#define WM8400_DACLRC_DIR 0x0800 /* DACLRC_DIR */ +#define WM8400_DACLRC_DIR_MASK 0x0800 /* DACLRC_DIR */ +#define WM8400_DACLRC_DIR_SHIFT 11 /* DACLRC_DIR */ +#define WM8400_DACLRC_DIR_WIDTH 1 /* DACLRC_DIR */ +#define WM8400_DACLRC_RATE_MASK 0x07FF /* DACLRC_RATE - [10:0] */ +#define WM8400_DACLRC_RATE_SHIFT 0 /* DACLRC_RATE - [10:0] */ +#define WM8400_DACLRC_RATE_WIDTH 11 /* DACLRC_RATE - [10:0] */ + +/* + * R11 (0x0B) - DAC CTRL + */ +#define WM8400_DAC_SDMCLK_RATE 0x2000 /* DAC_SDMCLK_RATE */ +#define WM8400_DAC_SDMCLK_RATE_MASK 0x2000 /* DAC_SDMCLK_RATE */ +#define WM8400_DAC_SDMCLK_RATE_SHIFT 13 /* DAC_SDMCLK_RATE */ +#define WM8400_DAC_SDMCLK_RATE_WIDTH 1 /* DAC_SDMCLK_RATE */ +#define WM8400_AIF_LRCLKRATE 0x0400 /* AIF_LRCLKRATE */ +#define WM8400_AIF_LRCLKRATE_MASK 0x0400 /* AIF_LRCLKRATE */ +#define WM8400_AIF_LRCLKRATE_SHIFT 10 /* AIF_LRCLKRATE */ +#define WM8400_AIF_LRCLKRATE_WIDTH 1 /* AIF_LRCLKRATE */ +#define WM8400_DAC_MONO 0x0200 /* DAC_MONO */ +#define WM8400_DAC_MONO_MASK 0x0200 /* DAC_MONO */ +#define WM8400_DAC_MONO_SHIFT 9 /* DAC_MONO */ +#define WM8400_DAC_MONO_WIDTH 1 /* DAC_MONO */ +#define WM8400_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */ +#define WM8400_DAC_SB_FILT_MASK 0x0100 /* DAC_SB_FILT */ +#define WM8400_DAC_SB_FILT_SHIFT 8 /* DAC_SB_FILT */ +#define WM8400_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */ +#define WM8400_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTERATE_MASK 0x0080 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTERATE_SHIFT 7 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ +#define WM8400_DAC_MUTEMODE 0x0040 /* DAC_MUTEMODE */ +#define WM8400_DAC_MUTEMODE_MASK 0x0040 /* DAC_MUTEMODE */ +#define WM8400_DAC_MUTEMODE_SHIFT 6 /* DAC_MUTEMODE */ +#define WM8400_DAC_MUTEMODE_WIDTH 1 /* DAC_MUTEMODE */ +#define WM8400_DEEMP_MASK 0x0030 /* DEEMP - [5:4] */ +#define WM8400_DEEMP_SHIFT 4 /* DEEMP - [5:4] */ +#define WM8400_DEEMP_WIDTH 2 /* DEEMP - [5:4] */ +#define WM8400_DAC_MUTE 0x0004 /* DAC_MUTE */ +#define WM8400_DAC_MUTE_MASK 0x0004 /* DAC_MUTE */ +#define WM8400_DAC_MUTE_SHIFT 2 /* DAC_MUTE */ +#define WM8400_DAC_MUTE_WIDTH 1 /* DAC_MUTE */ +#define WM8400_DACL_DATINV 0x0002 /* DACL_DATINV */ +#define WM8400_DACL_DATINV_MASK 0x0002 /* DACL_DATINV */ +#define WM8400_DACL_DATINV_SHIFT 1 /* DACL_DATINV */ +#define WM8400_DACL_DATINV_WIDTH 1 /* DACL_DATINV */ +#define WM8400_DACR_DATINV 0x0001 /* DACR_DATINV */ +#define WM8400_DACR_DATINV_MASK 0x0001 /* DACR_DATINV */ +#define WM8400_DACR_DATINV_SHIFT 0 /* DACR_DATINV */ +#define WM8400_DACR_DATINV_WIDTH 1 /* DACR_DATINV */ + +/* + * R12 (0x0C) - Left DAC Digital Volume + */ +#define WM8400_DAC_VU 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */ +#define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */ +#define WM8400_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */ +#define WM8400_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */ +#define WM8400_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */ + +/* + * R13 (0x0D) - Right DAC Digital Volume + */ +#define WM8400_DAC_VU 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */ +#define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */ +#define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */ +#define WM8400_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */ +#define WM8400_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */ +#define WM8400_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */ + +/* + * R14 (0x0E) - Digital Side Tone + */ +#define WM8400_ADCL_DAC_SVOL_MASK 0x1E00 /* ADCL_DAC_SVOL - [12:9] */ +#define WM8400_ADCL_DAC_SVOL_SHIFT 9 /* ADCL_DAC_SVOL - [12:9] */ +#define WM8400_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [12:9] */ +#define WM8400_ADCR_DAC_SVOL_MASK 0x01E0 /* ADCR_DAC_SVOL - [8:5] */ +#define WM8400_ADCR_DAC_SVOL_SHIFT 5 /* ADCR_DAC_SVOL - [8:5] */ +#define WM8400_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [8:5] */ +#define WM8400_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */ +#define WM8400_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */ +#define WM8400_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */ +#define WM8400_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */ +#define WM8400_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */ +#define WM8400_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */ + +/* + * R15 (0x0F) - ADC CTRL + */ +#define WM8400_ADC_HPF_ENA 0x0100 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_ENA_MASK 0x0100 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_ENA_SHIFT 8 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_ENA_WIDTH 1 /* ADC_HPF_ENA */ +#define WM8400_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */ +#define WM8400_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */ +#define WM8400_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */ +#define WM8400_ADCL_DATINV 0x0002 /* ADCL_DATINV */ +#define WM8400_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */ +#define WM8400_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */ +#define WM8400_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */ +#define WM8400_ADCR_DATINV 0x0001 /* ADCR_DATINV */ +#define WM8400_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */ +#define WM8400_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */ +#define WM8400_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */ + +/* + * R16 (0x10) - Left ADC Digital Volume + */ +#define WM8400_ADC_VU 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */ +#define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */ +#define WM8400_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */ +#define WM8400_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */ +#define WM8400_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */ + +/* + * R17 (0x11) - Right ADC Digital Volume + */ +#define WM8400_ADC_VU 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */ +#define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */ +#define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */ +#define WM8400_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */ +#define WM8400_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */ +#define WM8400_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */ + +/* + * R24 (0x18) - Left Line Input 1&2 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_LI12MUTE 0x0080 /* LI12MUTE */ +#define WM8400_LI12MUTE_MASK 0x0080 /* LI12MUTE */ +#define WM8400_LI12MUTE_SHIFT 7 /* LI12MUTE */ +#define WM8400_LI12MUTE_WIDTH 1 /* LI12MUTE */ +#define WM8400_LI12ZC 0x0040 /* LI12ZC */ +#define WM8400_LI12ZC_MASK 0x0040 /* LI12ZC */ +#define WM8400_LI12ZC_SHIFT 6 /* LI12ZC */ +#define WM8400_LI12ZC_WIDTH 1 /* LI12ZC */ +#define WM8400_LIN12VOL_MASK 0x001F /* LIN12VOL - [4:0] */ +#define WM8400_LIN12VOL_SHIFT 0 /* LIN12VOL - [4:0] */ +#define WM8400_LIN12VOL_WIDTH 5 /* LIN12VOL - [4:0] */ + +/* + * R25 (0x19) - Left Line Input 3&4 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_LI34MUTE 0x0080 /* LI34MUTE */ +#define WM8400_LI34MUTE_MASK 0x0080 /* LI34MUTE */ +#define WM8400_LI34MUTE_SHIFT 7 /* LI34MUTE */ +#define WM8400_LI34MUTE_WIDTH 1 /* LI34MUTE */ +#define WM8400_LI34ZC 0x0040 /* LI34ZC */ +#define WM8400_LI34ZC_MASK 0x0040 /* LI34ZC */ +#define WM8400_LI34ZC_SHIFT 6 /* LI34ZC */ +#define WM8400_LI34ZC_WIDTH 1 /* LI34ZC */ +#define WM8400_LIN34VOL_MASK 0x001F /* LIN34VOL - [4:0] */ +#define WM8400_LIN34VOL_SHIFT 0 /* LIN34VOL - [4:0] */ +#define WM8400_LIN34VOL_WIDTH 5 /* LIN34VOL - [4:0] */ + +/* + * R26 (0x1A) - Right Line Input 1&2 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_RI12MUTE 0x0080 /* RI12MUTE */ +#define WM8400_RI12MUTE_MASK 0x0080 /* RI12MUTE */ +#define WM8400_RI12MUTE_SHIFT 7 /* RI12MUTE */ +#define WM8400_RI12MUTE_WIDTH 1 /* RI12MUTE */ +#define WM8400_RI12ZC 0x0040 /* RI12ZC */ +#define WM8400_RI12ZC_MASK 0x0040 /* RI12ZC */ +#define WM8400_RI12ZC_SHIFT 6 /* RI12ZC */ +#define WM8400_RI12ZC_WIDTH 1 /* RI12ZC */ +#define WM8400_RIN12VOL_MASK 0x001F /* RIN12VOL - [4:0] */ +#define WM8400_RIN12VOL_SHIFT 0 /* RIN12VOL - [4:0] */ +#define WM8400_RIN12VOL_WIDTH 5 /* RIN12VOL - [4:0] */ + +/* + * R27 (0x1B) - Right Line Input 3&4 Volume + */ +#define WM8400_IPVU 0x0100 /* IPVU */ +#define WM8400_IPVU_MASK 0x0100 /* IPVU */ +#define WM8400_IPVU_SHIFT 8 /* IPVU */ +#define WM8400_IPVU_WIDTH 1 /* IPVU */ +#define WM8400_RI34MUTE 0x0080 /* RI34MUTE */ +#define WM8400_RI34MUTE_MASK 0x0080 /* RI34MUTE */ +#define WM8400_RI34MUTE_SHIFT 7 /* RI34MUTE */ +#define WM8400_RI34MUTE_WIDTH 1 /* RI34MUTE */ +#define WM8400_RI34ZC 0x0040 /* RI34ZC */ +#define WM8400_RI34ZC_MASK 0x0040 /* RI34ZC */ +#define WM8400_RI34ZC_SHIFT 6 /* RI34ZC */ +#define WM8400_RI34ZC_WIDTH 1 /* RI34ZC */ +#define WM8400_RIN34VOL_MASK 0x001F /* RIN34VOL - [4:0] */ +#define WM8400_RIN34VOL_SHIFT 0 /* RIN34VOL - [4:0] */ +#define WM8400_RIN34VOL_WIDTH 5 /* RIN34VOL - [4:0] */ + +/* + * R28 (0x1C) - Left Output Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_LOZC 0x0080 /* LOZC */ +#define WM8400_LOZC_MASK 0x0080 /* LOZC */ +#define WM8400_LOZC_SHIFT 7 /* LOZC */ +#define WM8400_LOZC_WIDTH 1 /* LOZC */ +#define WM8400_LOUTVOL_MASK 0x007F /* LOUTVOL - [6:0] */ +#define WM8400_LOUTVOL_SHIFT 0 /* LOUTVOL - [6:0] */ +#define WM8400_LOUTVOL_WIDTH 7 /* LOUTVOL - [6:0] */ + +/* + * R29 (0x1D) - Right Output Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_ROZC 0x0080 /* ROZC */ +#define WM8400_ROZC_MASK 0x0080 /* ROZC */ +#define WM8400_ROZC_SHIFT 7 /* ROZC */ +#define WM8400_ROZC_WIDTH 1 /* ROZC */ +#define WM8400_ROUTVOL_MASK 0x007F /* ROUTVOL - [6:0] */ +#define WM8400_ROUTVOL_SHIFT 0 /* ROUTVOL - [6:0] */ +#define WM8400_ROUTVOL_WIDTH 7 /* ROUTVOL - [6:0] */ + +/* + * R30 (0x1E) - Line Outputs Volume + */ +#define WM8400_LONMUTE 0x0040 /* LONMUTE */ +#define WM8400_LONMUTE_MASK 0x0040 /* LONMUTE */ +#define WM8400_LONMUTE_SHIFT 6 /* LONMUTE */ +#define WM8400_LONMUTE_WIDTH 1 /* LONMUTE */ +#define WM8400_LOPMUTE 0x0020 /* LOPMUTE */ +#define WM8400_LOPMUTE_MASK 0x0020 /* LOPMUTE */ +#define WM8400_LOPMUTE_SHIFT 5 /* LOPMUTE */ +#define WM8400_LOPMUTE_WIDTH 1 /* LOPMUTE */ +#define WM8400_LOATTN 0x0010 /* LOATTN */ +#define WM8400_LOATTN_MASK 0x0010 /* LOATTN */ +#define WM8400_LOATTN_SHIFT 4 /* LOATTN */ +#define WM8400_LOATTN_WIDTH 1 /* LOATTN */ +#define WM8400_RONMUTE 0x0004 /* RONMUTE */ +#define WM8400_RONMUTE_MASK 0x0004 /* RONMUTE */ +#define WM8400_RONMUTE_SHIFT 2 /* RONMUTE */ +#define WM8400_RONMUTE_WIDTH 1 /* RONMUTE */ +#define WM8400_ROPMUTE 0x0002 /* ROPMUTE */ +#define WM8400_ROPMUTE_MASK 0x0002 /* ROPMUTE */ +#define WM8400_ROPMUTE_SHIFT 1 /* ROPMUTE */ +#define WM8400_ROPMUTE_WIDTH 1 /* ROPMUTE */ +#define WM8400_ROATTN 0x0001 /* ROATTN */ +#define WM8400_ROATTN_MASK 0x0001 /* ROATTN */ +#define WM8400_ROATTN_SHIFT 0 /* ROATTN */ +#define WM8400_ROATTN_WIDTH 1 /* ROATTN */ + +/* + * R31 (0x1F) - Out3/4 Volume + */ +#define WM8400_OUT3MUTE 0x0020 /* OUT3MUTE */ +#define WM8400_OUT3MUTE_MASK 0x0020 /* OUT3MUTE */ +#define WM8400_OUT3MUTE_SHIFT 5 /* OUT3MUTE */ +#define WM8400_OUT3MUTE_WIDTH 1 /* OUT3MUTE */ +#define WM8400_OUT3ATTN 0x0010 /* OUT3ATTN */ +#define WM8400_OUT3ATTN_MASK 0x0010 /* OUT3ATTN */ +#define WM8400_OUT3ATTN_SHIFT 4 /* OUT3ATTN */ +#define WM8400_OUT3ATTN_WIDTH 1 /* OUT3ATTN */ +#define WM8400_OUT4MUTE 0x0002 /* OUT4MUTE */ +#define WM8400_OUT4MUTE_MASK 0x0002 /* OUT4MUTE */ +#define WM8400_OUT4MUTE_SHIFT 1 /* OUT4MUTE */ +#define WM8400_OUT4MUTE_WIDTH 1 /* OUT4MUTE */ +#define WM8400_OUT4ATTN 0x0001 /* OUT4ATTN */ +#define WM8400_OUT4ATTN_MASK 0x0001 /* OUT4ATTN */ +#define WM8400_OUT4ATTN_SHIFT 0 /* OUT4ATTN */ +#define WM8400_OUT4ATTN_WIDTH 1 /* OUT4ATTN */ + +/* + * R32 (0x20) - Left OPGA Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_LOPGAZC 0x0080 /* LOPGAZC */ +#define WM8400_LOPGAZC_MASK 0x0080 /* LOPGAZC */ +#define WM8400_LOPGAZC_SHIFT 7 /* LOPGAZC */ +#define WM8400_LOPGAZC_WIDTH 1 /* LOPGAZC */ +#define WM8400_LOPGAVOL_MASK 0x007F /* LOPGAVOL - [6:0] */ +#define WM8400_LOPGAVOL_SHIFT 0 /* LOPGAVOL - [6:0] */ +#define WM8400_LOPGAVOL_WIDTH 7 /* LOPGAVOL - [6:0] */ + +/* + * R33 (0x21) - Right OPGA Volume + */ +#define WM8400_OPVU 0x0100 /* OPVU */ +#define WM8400_OPVU_MASK 0x0100 /* OPVU */ +#define WM8400_OPVU_SHIFT 8 /* OPVU */ +#define WM8400_OPVU_WIDTH 1 /* OPVU */ +#define WM8400_ROPGAZC 0x0080 /* ROPGAZC */ +#define WM8400_ROPGAZC_MASK 0x0080 /* ROPGAZC */ +#define WM8400_ROPGAZC_SHIFT 7 /* ROPGAZC */ +#define WM8400_ROPGAZC_WIDTH 1 /* ROPGAZC */ +#define WM8400_ROPGAVOL_MASK 0x007F /* ROPGAVOL - [6:0] */ +#define WM8400_ROPGAVOL_SHIFT 0 /* ROPGAVOL - [6:0] */ +#define WM8400_ROPGAVOL_WIDTH 7 /* ROPGAVOL - [6:0] */ + +/* + * R34 (0x22) - Speaker Volume + */ +#define WM8400_SPKATTN_MASK 0x0003 /* SPKATTN - [1:0] */ +#define WM8400_SPKATTN_SHIFT 0 /* SPKATTN - [1:0] */ +#define WM8400_SPKATTN_WIDTH 2 /* SPKATTN - [1:0] */ + +/* + * R35 (0x23) - ClassD1 + */ +#define WM8400_CDMODE 0x0100 /* CDMODE */ +#define WM8400_CDMODE_MASK 0x0100 /* CDMODE */ +#define WM8400_CDMODE_SHIFT 8 /* CDMODE */ +#define WM8400_CDMODE_WIDTH 1 /* CDMODE */ +#define WM8400_CLASSD_CLK_SEL 0x0080 /* CLASSD_CLK_SEL */ +#define WM8400_CLASSD_CLK_SEL_MASK 0x0080 /* CLASSD_CLK_SEL */ +#define WM8400_CLASSD_CLK_SEL_SHIFT 7 /* CLASSD_CLK_SEL */ +#define WM8400_CLASSD_CLK_SEL_WIDTH 1 /* CLASSD_CLK_SEL */ +#define WM8400_CD_SRCTRL 0x0040 /* CD_SRCTRL */ +#define WM8400_CD_SRCTRL_MASK 0x0040 /* CD_SRCTRL */ +#define WM8400_CD_SRCTRL_SHIFT 6 /* CD_SRCTRL */ +#define WM8400_CD_SRCTRL_WIDTH 1 /* CD_SRCTRL */ +#define WM8400_SPKNOPOP 0x0020 /* SPKNOPOP */ +#define WM8400_SPKNOPOP_MASK 0x0020 /* SPKNOPOP */ +#define WM8400_SPKNOPOP_SHIFT 5 /* SPKNOPOP */ +#define WM8400_SPKNOPOP_WIDTH 1 /* SPKNOPOP */ +#define WM8400_DBLERATE 0x0010 /* DBLERATE */ +#define WM8400_DBLERATE_MASK 0x0010 /* DBLERATE */ +#define WM8400_DBLERATE_SHIFT 4 /* DBLERATE */ +#define WM8400_DBLERATE_WIDTH 1 /* DBLERATE */ +#define WM8400_LOOPTEST 0x0008 /* LOOPTEST */ +#define WM8400_LOOPTEST_MASK 0x0008 /* LOOPTEST */ +#define WM8400_LOOPTEST_SHIFT 3 /* LOOPTEST */ +#define WM8400_LOOPTEST_WIDTH 1 /* LOOPTEST */ +#define WM8400_HALFABBIAS 0x0004 /* HALFABBIAS */ +#define WM8400_HALFABBIAS_MASK 0x0004 /* HALFABBIAS */ +#define WM8400_HALFABBIAS_SHIFT 2 /* HALFABBIAS */ +#define WM8400_HALFABBIAS_WIDTH 1 /* HALFABBIAS */ +#define WM8400_TRIDEL_MASK 0x0003 /* TRIDEL - [1:0] */ +#define WM8400_TRIDEL_SHIFT 0 /* TRIDEL - [1:0] */ +#define WM8400_TRIDEL_WIDTH 2 /* TRIDEL - [1:0] */ + +/* + * R37 (0x25) - ClassD3 + */ +#define WM8400_DCGAIN_MASK 0x0038 /* DCGAIN - [5:3] */ +#define WM8400_DCGAIN_SHIFT 3 /* DCGAIN - [5:3] */ +#define WM8400_DCGAIN_WIDTH 3 /* DCGAIN - [5:3] */ +#define WM8400_ACGAIN_MASK 0x0007 /* ACGAIN - [2:0] */ +#define WM8400_ACGAIN_SHIFT 0 /* ACGAIN - [2:0] */ +#define WM8400_ACGAIN_WIDTH 3 /* ACGAIN - [2:0] */ + +/* + * R39 (0x27) - Input Mixer1 + */ +#define WM8400_AINLMODE_MASK 0x000C /* AINLMODE - [3:2] */ +#define WM8400_AINLMODE_SHIFT 2 /* AINLMODE - [3:2] */ +#define WM8400_AINLMODE_WIDTH 2 /* AINLMODE - [3:2] */ +#define WM8400_AINRMODE_MASK 0x0003 /* AINRMODE - [1:0] */ +#define WM8400_AINRMODE_SHIFT 0 /* AINRMODE - [1:0] */ +#define WM8400_AINRMODE_WIDTH 2 /* AINRMODE - [1:0] */ + +/* + * R40 (0x28) - Input Mixer2 + */ +#define WM8400_LMP4 0x0080 /* LMP4 */ +#define WM8400_LMP4_MASK 0x0080 /* LMP4 */ +#define WM8400_LMP4_SHIFT 7 /* LMP4 */ +#define WM8400_LMP4_WIDTH 1 /* LMP4 */ +#define WM8400_LMN3 0x0040 /* LMN3 */ +#define WM8400_LMN3_MASK 0x0040 /* LMN3 */ +#define WM8400_LMN3_SHIFT 6 /* LMN3 */ +#define WM8400_LMN3_WIDTH 1 /* LMN3 */ +#define WM8400_LMP2 0x0020 /* LMP2 */ +#define WM8400_LMP2_MASK 0x0020 /* LMP2 */ +#define WM8400_LMP2_SHIFT 5 /* LMP2 */ +#define WM8400_LMP2_WIDTH 1 /* LMP2 */ +#define WM8400_LMN1 0x0010 /* LMN1 */ +#define WM8400_LMN1_MASK 0x0010 /* LMN1 */ +#define WM8400_LMN1_SHIFT 4 /* LMN1 */ +#define WM8400_LMN1_WIDTH 1 /* LMN1 */ +#define WM8400_RMP4 0x0008 /* RMP4 */ +#define WM8400_RMP4_MASK 0x0008 /* RMP4 */ +#define WM8400_RMP4_SHIFT 3 /* RMP4 */ +#define WM8400_RMP4_WIDTH 1 /* RMP4 */ +#define WM8400_RMN3 0x0004 /* RMN3 */ +#define WM8400_RMN3_MASK 0x0004 /* RMN3 */ +#define WM8400_RMN3_SHIFT 2 /* RMN3 */ +#define WM8400_RMN3_WIDTH 1 /* RMN3 */ +#define WM8400_RMP2 0x0002 /* RMP2 */ +#define WM8400_RMP2_MASK 0x0002 /* RMP2 */ +#define WM8400_RMP2_SHIFT 1 /* RMP2 */ +#define WM8400_RMP2_WIDTH 1 /* RMP2 */ +#define WM8400_RMN1 0x0001 /* RMN1 */ +#define WM8400_RMN1_MASK 0x0001 /* RMN1 */ +#define WM8400_RMN1_SHIFT 0 /* RMN1 */ +#define WM8400_RMN1_WIDTH 1 /* RMN1 */ + +/* + * R41 (0x29) - Input Mixer3 + */ +#define WM8400_L34MNB 0x0100 /* L34MNB */ +#define WM8400_L34MNB_MASK 0x0100 /* L34MNB */ +#define WM8400_L34MNB_SHIFT 8 /* L34MNB */ +#define WM8400_L34MNB_WIDTH 1 /* L34MNB */ +#define WM8400_L34MNBST 0x0080 /* L34MNBST */ +#define WM8400_L34MNBST_MASK 0x0080 /* L34MNBST */ +#define WM8400_L34MNBST_SHIFT 7 /* L34MNBST */ +#define WM8400_L34MNBST_WIDTH 1 /* L34MNBST */ +#define WM8400_L12MNB 0x0020 /* L12MNB */ +#define WM8400_L12MNB_MASK 0x0020 /* L12MNB */ +#define WM8400_L12MNB_SHIFT 5 /* L12MNB */ +#define WM8400_L12MNB_WIDTH 1 /* L12MNB */ +#define WM8400_L12MNBST 0x0010 /* L12MNBST */ +#define WM8400_L12MNBST_MASK 0x0010 /* L12MNBST */ +#define WM8400_L12MNBST_SHIFT 4 /* L12MNBST */ +#define WM8400_L12MNBST_WIDTH 1 /* L12MNBST */ +#define WM8400_LDBVOL_MASK 0x0007 /* LDBVOL - [2:0] */ +#define WM8400_LDBVOL_SHIFT 0 /* LDBVOL - [2:0] */ +#define WM8400_LDBVOL_WIDTH 3 /* LDBVOL - [2:0] */ + +/* + * R42 (0x2A) - Input Mixer4 + */ +#define WM8400_R34MNB 0x0100 /* R34MNB */ +#define WM8400_R34MNB_MASK 0x0100 /* R34MNB */ +#define WM8400_R34MNB_SHIFT 8 /* R34MNB */ +#define WM8400_R34MNB_WIDTH 1 /* R34MNB */ +#define WM8400_R34MNBST 0x0080 /* R34MNBST */ +#define WM8400_R34MNBST_MASK 0x0080 /* R34MNBST */ +#define WM8400_R34MNBST_SHIFT 7 /* R34MNBST */ +#define WM8400_R34MNBST_WIDTH 1 /* R34MNBST */ +#define WM8400_R12MNB 0x0020 /* R12MNB */ +#define WM8400_R12MNB_MASK 0x0020 /* R12MNB */ +#define WM8400_R12MNB_SHIFT 5 /* R12MNB */ +#define WM8400_R12MNB_WIDTH 1 /* R12MNB */ +#define WM8400_R12MNBST 0x0010 /* R12MNBST */ +#define WM8400_R12MNBST_MASK 0x0010 /* R12MNBST */ +#define WM8400_R12MNBST_SHIFT 4 /* R12MNBST */ +#define WM8400_R12MNBST_WIDTH 1 /* R12MNBST */ +#define WM8400_RDBVOL_MASK 0x0007 /* RDBVOL - [2:0] */ +#define WM8400_RDBVOL_SHIFT 0 /* RDBVOL - [2:0] */ +#define WM8400_RDBVOL_WIDTH 3 /* RDBVOL - [2:0] */ + +/* + * R43 (0x2B) - Input Mixer5 + */ +#define WM8400_LI2BVOL_MASK 0x01C0 /* LI2BVOL - [8:6] */ +#define WM8400_LI2BVOL_SHIFT 6 /* LI2BVOL - [8:6] */ +#define WM8400_LI2BVOL_WIDTH 3 /* LI2BVOL - [8:6] */ +#define WM8400_LR4BVOL_MASK 0x0038 /* LR4BVOL - [5:3] */ +#define WM8400_LR4BVOL_SHIFT 3 /* LR4BVOL - [5:3] */ +#define WM8400_LR4BVOL_WIDTH 3 /* LR4BVOL - [5:3] */ +#define WM8400_LL4BVOL_MASK 0x0007 /* LL4BVOL - [2:0] */ +#define WM8400_LL4BVOL_SHIFT 0 /* LL4BVOL - [2:0] */ +#define WM8400_LL4BVOL_WIDTH 3 /* LL4BVOL - [2:0] */ + +/* + * R44 (0x2C) - Input Mixer6 + */ +#define WM8400_RI2BVOL_MASK 0x01C0 /* RI2BVOL - [8:6] */ +#define WM8400_RI2BVOL_SHIFT 6 /* RI2BVOL - [8:6] */ +#define WM8400_RI2BVOL_WIDTH 3 /* RI2BVOL - [8:6] */ +#define WM8400_RL4BVOL_MASK 0x0038 /* RL4BVOL - [5:3] */ +#define WM8400_RL4BVOL_SHIFT 3 /* RL4BVOL - [5:3] */ +#define WM8400_RL4BVOL_WIDTH 3 /* RL4BVOL - [5:3] */ +#define WM8400_RR4BVOL_MASK 0x0007 /* RR4BVOL - [2:0] */ +#define WM8400_RR4BVOL_SHIFT 0 /* RR4BVOL - [2:0] */ +#define WM8400_RR4BVOL_WIDTH 3 /* RR4BVOL - [2:0] */ + +/* + * R45 (0x2D) - Output Mixer1 + */ +#define WM8400_LRBLO 0x0080 /* LRBLO */ +#define WM8400_LRBLO_MASK 0x0080 /* LRBLO */ +#define WM8400_LRBLO_SHIFT 7 /* LRBLO */ +#define WM8400_LRBLO_WIDTH 1 /* LRBLO */ +#define WM8400_LLBLO 0x0040 /* LLBLO */ +#define WM8400_LLBLO_MASK 0x0040 /* LLBLO */ +#define WM8400_LLBLO_SHIFT 6 /* LLBLO */ +#define WM8400_LLBLO_WIDTH 1 /* LLBLO */ +#define WM8400_LRI3LO 0x0020 /* LRI3LO */ +#define WM8400_LRI3LO_MASK 0x0020 /* LRI3LO */ +#define WM8400_LRI3LO_SHIFT 5 /* LRI3LO */ +#define WM8400_LRI3LO_WIDTH 1 /* LRI3LO */ +#define WM8400_LLI3LO 0x0010 /* LLI3LO */ +#define WM8400_LLI3LO_MASK 0x0010 /* LLI3LO */ +#define WM8400_LLI3LO_SHIFT 4 /* LLI3LO */ +#define WM8400_LLI3LO_WIDTH 1 /* LLI3LO */ +#define WM8400_LR12LO 0x0008 /* LR12LO */ +#define WM8400_LR12LO_MASK 0x0008 /* LR12LO */ +#define WM8400_LR12LO_SHIFT 3 /* LR12LO */ +#define WM8400_LR12LO_WIDTH 1 /* LR12LO */ +#define WM8400_LL12LO 0x0004 /* LL12LO */ +#define WM8400_LL12LO_MASK 0x0004 /* LL12LO */ +#define WM8400_LL12LO_SHIFT 2 /* LL12LO */ +#define WM8400_LL12LO_WIDTH 1 /* LL12LO */ +#define WM8400_LDLO 0x0001 /* LDLO */ +#define WM8400_LDLO_MASK 0x0001 /* LDLO */ +#define WM8400_LDLO_SHIFT 0 /* LDLO */ +#define WM8400_LDLO_WIDTH 1 /* LDLO */ + +/* + * R46 (0x2E) - Output Mixer2 + */ +#define WM8400_RLBRO 0x0080 /* RLBRO */ +#define WM8400_RLBRO_MASK 0x0080 /* RLBRO */ +#define WM8400_RLBRO_SHIFT 7 /* RLBRO */ +#define WM8400_RLBRO_WIDTH 1 /* RLBRO */ +#define WM8400_RRBRO 0x0040 /* RRBRO */ +#define WM8400_RRBRO_MASK 0x0040 /* RRBRO */ +#define WM8400_RRBRO_SHIFT 6 /* RRBRO */ +#define WM8400_RRBRO_WIDTH 1 /* RRBRO */ +#define WM8400_RLI3RO 0x0020 /* RLI3RO */ +#define WM8400_RLI3RO_MASK 0x0020 /* RLI3RO */ +#define WM8400_RLI3RO_SHIFT 5 /* RLI3RO */ +#define WM8400_RLI3RO_WIDTH 1 /* RLI3RO */ +#define WM8400_RRI3RO 0x0010 /* RRI3RO */ +#define WM8400_RRI3RO_MASK 0x0010 /* RRI3RO */ +#define WM8400_RRI3RO_SHIFT 4 /* RRI3RO */ +#define WM8400_RRI3RO_WIDTH 1 /* RRI3RO */ +#define WM8400_RL12RO 0x0008 /* RL12RO */ +#define WM8400_RL12RO_MASK 0x0008 /* RL12RO */ +#define WM8400_RL12RO_SHIFT 3 /* RL12RO */ +#define WM8400_RL12RO_WIDTH 1 /* RL12RO */ +#define WM8400_RR12RO 0x0004 /* RR12RO */ +#define WM8400_RR12RO_MASK 0x0004 /* RR12RO */ +#define WM8400_RR12RO_SHIFT 2 /* RR12RO */ +#define WM8400_RR12RO_WIDTH 1 /* RR12RO */ +#define WM8400_RDRO 0x0001 /* RDRO */ +#define WM8400_RDRO_MASK 0x0001 /* RDRO */ +#define WM8400_RDRO_SHIFT 0 /* RDRO */ +#define WM8400_RDRO_WIDTH 1 /* RDRO */ + +/* + * R47 (0x2F) - Output Mixer3 + */ +#define WM8400_LLI3LOVOL_MASK 0x01C0 /* LLI3LOVOL - [8:6] */ +#define WM8400_LLI3LOVOL_SHIFT 6 /* LLI3LOVOL - [8:6] */ +#define WM8400_LLI3LOVOL_WIDTH 3 /* LLI3LOVOL - [8:6] */ +#define WM8400_LR12LOVOL_MASK 0x0038 /* LR12LOVOL - [5:3] */ +#define WM8400_LR12LOVOL_SHIFT 3 /* LR12LOVOL - [5:3] */ +#define WM8400_LR12LOVOL_WIDTH 3 /* LR12LOVOL - [5:3] */ +#define WM8400_LL12LOVOL_MASK 0x0007 /* LL12LOVOL - [2:0] */ +#define WM8400_LL12LOVOL_SHIFT 0 /* LL12LOVOL - [2:0] */ +#define WM8400_LL12LOVOL_WIDTH 3 /* LL12LOVOL - [2:0] */ + +/* + * R48 (0x30) - Output Mixer4 + */ +#define WM8400_RRI3ROVOL_MASK 0x01C0 /* RRI3ROVOL - [8:6] */ +#define WM8400_RRI3ROVOL_SHIFT 6 /* RRI3ROVOL - [8:6] */ +#define WM8400_RRI3ROVOL_WIDTH 3 /* RRI3ROVOL - [8:6] */ +#define WM8400_RL12ROVOL_MASK 0x0038 /* RL12ROVOL - [5:3] */ +#define WM8400_RL12ROVOL_SHIFT 3 /* RL12ROVOL - [5:3] */ +#define WM8400_RL12ROVOL_WIDTH 3 /* RL12ROVOL - [5:3] */ +#define WM8400_RR12ROVOL_MASK 0x0007 /* RR12ROVOL - [2:0] */ +#define WM8400_RR12ROVOL_SHIFT 0 /* RR12ROVOL - [2:0] */ +#define WM8400_RR12ROVOL_WIDTH 3 /* RR12ROVOL - [2:0] */ + +/* + * R49 (0x31) - Output Mixer5 + */ +#define WM8400_LRI3LOVOL_MASK 0x01C0 /* LRI3LOVOL - [8:6] */ +#define WM8400_LRI3LOVOL_SHIFT 6 /* LRI3LOVOL - [8:6] */ +#define WM8400_LRI3LOVOL_WIDTH 3 /* LRI3LOVOL - [8:6] */ +#define WM8400_LRBLOVOL_MASK 0x0038 /* LRBLOVOL - [5:3] */ +#define WM8400_LRBLOVOL_SHIFT 3 /* LRBLOVOL - [5:3] */ +#define WM8400_LRBLOVOL_WIDTH 3 /* LRBLOVOL - [5:3] */ +#define WM8400_LLBLOVOL_MASK 0x0007 /* LLBLOVOL - [2:0] */ +#define WM8400_LLBLOVOL_SHIFT 0 /* LLBLOVOL - [2:0] */ +#define WM8400_LLBLOVOL_WIDTH 3 /* LLBLOVOL - [2:0] */ + +/* + * R50 (0x32) - Output Mixer6 + */ +#define WM8400_RLI3ROVOL_MASK 0x01C0 /* RLI3ROVOL - [8:6] */ +#define WM8400_RLI3ROVOL_SHIFT 6 /* RLI3ROVOL - [8:6] */ +#define WM8400_RLI3ROVOL_WIDTH 3 /* RLI3ROVOL - [8:6] */ +#define WM8400_RLBROVOL_MASK 0x0038 /* RLBROVOL - [5:3] */ +#define WM8400_RLBROVOL_SHIFT 3 /* RLBROVOL - [5:3] */ +#define WM8400_RLBROVOL_WIDTH 3 /* RLBROVOL - [5:3] */ +#define WM8400_RRBROVOL_MASK 0x0007 /* RRBROVOL - [2:0] */ +#define WM8400_RRBROVOL_SHIFT 0 /* RRBROVOL - [2:0] */ +#define WM8400_RRBROVOL_WIDTH 3 /* RRBROVOL - [2:0] */ + +/* + * R51 (0x33) - Out3/4 Mixer + */ +#define WM8400_VSEL_MASK 0x0180 /* VSEL - [8:7] */ +#define WM8400_VSEL_SHIFT 7 /* VSEL - [8:7] */ +#define WM8400_VSEL_WIDTH 2 /* VSEL - [8:7] */ +#define WM8400_LI4O3 0x0020 /* LI4O3 */ +#define WM8400_LI4O3_MASK 0x0020 /* LI4O3 */ +#define WM8400_LI4O3_SHIFT 5 /* LI4O3 */ +#define WM8400_LI4O3_WIDTH 1 /* LI4O3 */ +#define WM8400_LPGAO3 0x0010 /* LPGAO3 */ +#define WM8400_LPGAO3_MASK 0x0010 /* LPGAO3 */ +#define WM8400_LPGAO3_SHIFT 4 /* LPGAO3 */ +#define WM8400_LPGAO3_WIDTH 1 /* LPGAO3 */ +#define WM8400_RI4O4 0x0002 /* RI4O4 */ +#define WM8400_RI4O4_MASK 0x0002 /* RI4O4 */ +#define WM8400_RI4O4_SHIFT 1 /* RI4O4 */ +#define WM8400_RI4O4_WIDTH 1 /* RI4O4 */ +#define WM8400_RPGAO4 0x0001 /* RPGAO4 */ +#define WM8400_RPGAO4_MASK 0x0001 /* RPGAO4 */ +#define WM8400_RPGAO4_SHIFT 0 /* RPGAO4 */ +#define WM8400_RPGAO4_WIDTH 1 /* RPGAO4 */ + +/* + * R52 (0x34) - Line Mixer1 + */ +#define WM8400_LLOPGALON 0x0040 /* LLOPGALON */ +#define WM8400_LLOPGALON_MASK 0x0040 /* LLOPGALON */ +#define WM8400_LLOPGALON_SHIFT 6 /* LLOPGALON */ +#define WM8400_LLOPGALON_WIDTH 1 /* LLOPGALON */ +#define WM8400_LROPGALON 0x0020 /* LROPGALON */ +#define WM8400_LROPGALON_MASK 0x0020 /* LROPGALON */ +#define WM8400_LROPGALON_SHIFT 5 /* LROPGALON */ +#define WM8400_LROPGALON_WIDTH 1 /* LROPGALON */ +#define WM8400_LOPLON 0x0010 /* LOPLON */ +#define WM8400_LOPLON_MASK 0x0010 /* LOPLON */ +#define WM8400_LOPLON_SHIFT 4 /* LOPLON */ +#define WM8400_LOPLON_WIDTH 1 /* LOPLON */ +#define WM8400_LR12LOP 0x0004 /* LR12LOP */ +#define WM8400_LR12LOP_MASK 0x0004 /* LR12LOP */ +#define WM8400_LR12LOP_SHIFT 2 /* LR12LOP */ +#define WM8400_LR12LOP_WIDTH 1 /* LR12LOP */ +#define WM8400_LL12LOP 0x0002 /* LL12LOP */ +#define WM8400_LL12LOP_MASK 0x0002 /* LL12LOP */ +#define WM8400_LL12LOP_SHIFT 1 /* LL12LOP */ +#define WM8400_LL12LOP_WIDTH 1 /* LL12LOP */ +#define WM8400_LLOPGALOP 0x0001 /* LLOPGALOP */ +#define WM8400_LLOPGALOP_MASK 0x0001 /* LLOPGALOP */ +#define WM8400_LLOPGALOP_SHIFT 0 /* LLOPGALOP */ +#define WM8400_LLOPGALOP_WIDTH 1 /* LLOPGALOP */ + +/* + * R53 (0x35) - Line Mixer2 + */ +#define WM8400_RROPGARON 0x0040 /* RROPGARON */ +#define WM8400_RROPGARON_MASK 0x0040 /* RROPGARON */ +#define WM8400_RROPGARON_SHIFT 6 /* RROPGARON */ +#define WM8400_RROPGARON_WIDTH 1 /* RROPGARON */ +#define WM8400_RLOPGARON 0x0020 /* RLOPGARON */ +#define WM8400_RLOPGARON_MASK 0x0020 /* RLOPGARON */ +#define WM8400_RLOPGARON_SHIFT 5 /* RLOPGARON */ +#define WM8400_RLOPGARON_WIDTH 1 /* RLOPGARON */ +#define WM8400_ROPRON 0x0010 /* ROPRON */ +#define WM8400_ROPRON_MASK 0x0010 /* ROPRON */ +#define WM8400_ROPRON_SHIFT 4 /* ROPRON */ +#define WM8400_ROPRON_WIDTH 1 /* ROPRON */ +#define WM8400_RL12ROP 0x0004 /* RL12ROP */ +#define WM8400_RL12ROP_MASK 0x0004 /* RL12ROP */ +#define WM8400_RL12ROP_SHIFT 2 /* RL12ROP */ +#define WM8400_RL12ROP_WIDTH 1 /* RL12ROP */ +#define WM8400_RR12ROP 0x0002 /* RR12ROP */ +#define WM8400_RR12ROP_MASK 0x0002 /* RR12ROP */ +#define WM8400_RR12ROP_SHIFT 1 /* RR12ROP */ +#define WM8400_RR12ROP_WIDTH 1 /* RR12ROP */ +#define WM8400_RROPGAROP 0x0001 /* RROPGAROP */ +#define WM8400_RROPGAROP_MASK 0x0001 /* RROPGAROP */ +#define WM8400_RROPGAROP_SHIFT 0 /* RROPGAROP */ +#define WM8400_RROPGAROP_WIDTH 1 /* RROPGAROP */ + +/* + * R54 (0x36) - Speaker Mixer + */ +#define WM8400_LB2SPK 0x0080 /* LB2SPK */ +#define WM8400_LB2SPK_MASK 0x0080 /* LB2SPK */ +#define WM8400_LB2SPK_SHIFT 7 /* LB2SPK */ +#define WM8400_LB2SPK_WIDTH 1 /* LB2SPK */ +#define WM8400_RB2SPK 0x0040 /* RB2SPK */ +#define WM8400_RB2SPK_MASK 0x0040 /* RB2SPK */ +#define WM8400_RB2SPK_SHIFT 6 /* RB2SPK */ +#define WM8400_RB2SPK_WIDTH 1 /* RB2SPK */ +#define WM8400_LI2SPK 0x0020 /* LI2SPK */ +#define WM8400_LI2SPK_MASK 0x0020 /* LI2SPK */ +#define WM8400_LI2SPK_SHIFT 5 /* LI2SPK */ +#define WM8400_LI2SPK_WIDTH 1 /* LI2SPK */ +#define WM8400_RI2SPK 0x0010 /* RI2SPK */ +#define WM8400_RI2SPK_MASK 0x0010 /* RI2SPK */ +#define WM8400_RI2SPK_SHIFT 4 /* RI2SPK */ +#define WM8400_RI2SPK_WIDTH 1 /* RI2SPK */ +#define WM8400_LOPGASPK 0x0008 /* LOPGASPK */ +#define WM8400_LOPGASPK_MASK 0x0008 /* LOPGASPK */ +#define WM8400_LOPGASPK_SHIFT 3 /* LOPGASPK */ +#define WM8400_LOPGASPK_WIDTH 1 /* LOPGASPK */ +#define WM8400_ROPGASPK 0x0004 /* ROPGASPK */ +#define WM8400_ROPGASPK_MASK 0x0004 /* ROPGASPK */ +#define WM8400_ROPGASPK_SHIFT 2 /* ROPGASPK */ +#define WM8400_ROPGASPK_WIDTH 1 /* ROPGASPK */ +#define WM8400_LDSPK 0x0002 /* LDSPK */ +#define WM8400_LDSPK_MASK 0x0002 /* LDSPK */ +#define WM8400_LDSPK_SHIFT 1 /* LDSPK */ +#define WM8400_LDSPK_WIDTH 1 /* LDSPK */ +#define WM8400_RDSPK 0x0001 /* RDSPK */ +#define WM8400_RDSPK_MASK 0x0001 /* RDSPK */ +#define WM8400_RDSPK_SHIFT 0 /* RDSPK */ +#define WM8400_RDSPK_WIDTH 1 /* RDSPK */ + +/* + * R55 (0x37) - Additional Control + */ +#define WM8400_VROI 0x0001 /* VROI */ +#define WM8400_VROI_MASK 0x0001 /* VROI */ +#define WM8400_VROI_SHIFT 0 /* VROI */ +#define WM8400_VROI_WIDTH 1 /* VROI */ + +/* + * R56 (0x38) - AntiPOP1 + */ +#define WM8400_DIS_LLINE 0x0020 /* DIS_LLINE */ +#define WM8400_DIS_LLINE_MASK 0x0020 /* DIS_LLINE */ +#define WM8400_DIS_LLINE_SHIFT 5 /* DIS_LLINE */ +#define WM8400_DIS_LLINE_WIDTH 1 /* DIS_LLINE */ +#define WM8400_DIS_RLINE 0x0010 /* DIS_RLINE */ +#define WM8400_DIS_RLINE_MASK 0x0010 /* DIS_RLINE */ +#define WM8400_DIS_RLINE_SHIFT 4 /* DIS_RLINE */ +#define WM8400_DIS_RLINE_WIDTH 1 /* DIS_RLINE */ +#define WM8400_DIS_OUT3 0x0008 /* DIS_OUT3 */ +#define WM8400_DIS_OUT3_MASK 0x0008 /* DIS_OUT3 */ +#define WM8400_DIS_OUT3_SHIFT 3 /* DIS_OUT3 */ +#define WM8400_DIS_OUT3_WIDTH 1 /* DIS_OUT3 */ +#define WM8400_DIS_OUT4 0x0004 /* DIS_OUT4 */ +#define WM8400_DIS_OUT4_MASK 0x0004 /* DIS_OUT4 */ +#define WM8400_DIS_OUT4_SHIFT 2 /* DIS_OUT4 */ +#define WM8400_DIS_OUT4_WIDTH 1 /* DIS_OUT4 */ +#define WM8400_DIS_LOUT 0x0002 /* DIS_LOUT */ +#define WM8400_DIS_LOUT_MASK 0x0002 /* DIS_LOUT */ +#define WM8400_DIS_LOUT_SHIFT 1 /* DIS_LOUT */ +#define WM8400_DIS_LOUT_WIDTH 1 /* DIS_LOUT */ +#define WM8400_DIS_ROUT 0x0001 /* DIS_ROUT */ +#define WM8400_DIS_ROUT_MASK 0x0001 /* DIS_ROUT */ +#define WM8400_DIS_ROUT_SHIFT 0 /* DIS_ROUT */ +#define WM8400_DIS_ROUT_WIDTH 1 /* DIS_ROUT */ + +/* + * R57 (0x39) - AntiPOP2 + */ +#define WM8400_SOFTST 0x0040 /* SOFTST */ +#define WM8400_SOFTST_MASK 0x0040 /* SOFTST */ +#define WM8400_SOFTST_SHIFT 6 /* SOFTST */ +#define WM8400_SOFTST_WIDTH 1 /* SOFTST */ +#define WM8400_BUFIOEN 0x0008 /* BUFIOEN */ +#define WM8400_BUFIOEN_MASK 0x0008 /* BUFIOEN */ +#define WM8400_BUFIOEN_SHIFT 3 /* BUFIOEN */ +#define WM8400_BUFIOEN_WIDTH 1 /* BUFIOEN */ +#define WM8400_BUFDCOPEN 0x0004 /* BUFDCOPEN */ +#define WM8400_BUFDCOPEN_MASK 0x0004 /* BUFDCOPEN */ +#define WM8400_BUFDCOPEN_SHIFT 2 /* BUFDCOPEN */ +#define WM8400_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */ +#define WM8400_POBCTRL 0x0002 /* POBCTRL */ +#define WM8400_POBCTRL_MASK 0x0002 /* POBCTRL */ +#define WM8400_POBCTRL_SHIFT 1 /* POBCTRL */ +#define WM8400_POBCTRL_WIDTH 1 /* POBCTRL */ +#define WM8400_VMIDTOG 0x0001 /* VMIDTOG */ +#define WM8400_VMIDTOG_MASK 0x0001 /* VMIDTOG */ +#define WM8400_VMIDTOG_SHIFT 0 /* VMIDTOG */ +#define WM8400_VMIDTOG_WIDTH 1 /* VMIDTOG */ + +/* + * R58 (0x3A) - MICBIAS + */ +#define WM8400_MCDSCTH_MASK 0x00C0 /* MCDSCTH - [7:6] */ +#define WM8400_MCDSCTH_SHIFT 6 /* MCDSCTH - [7:6] */ +#define WM8400_MCDSCTH_WIDTH 2 /* MCDSCTH - [7:6] */ +#define WM8400_MCDTHR_MASK 0x0038 /* MCDTHR - [5:3] */ +#define WM8400_MCDTHR_SHIFT 3 /* MCDTHR - [5:3] */ +#define WM8400_MCDTHR_WIDTH 3 /* MCDTHR - [5:3] */ +#define WM8400_MCD 0x0004 /* MCD */ +#define WM8400_MCD_MASK 0x0004 /* MCD */ +#define WM8400_MCD_SHIFT 2 /* MCD */ +#define WM8400_MCD_WIDTH 1 /* MCD */ +#define WM8400_MBSEL 0x0001 /* MBSEL */ +#define WM8400_MBSEL_MASK 0x0001 /* MBSEL */ +#define WM8400_MBSEL_SHIFT 0 /* MBSEL */ +#define WM8400_MBSEL_WIDTH 1 /* MBSEL */ + +/* + * R60 (0x3C) - FLL Control 1 + */ +#define WM8400_FLL_REF_FREQ 0x1000 /* FLL_REF_FREQ */ +#define WM8400_FLL_REF_FREQ_MASK 0x1000 /* FLL_REF_FREQ */ +#define WM8400_FLL_REF_FREQ_SHIFT 12 /* FLL_REF_FREQ */ +#define WM8400_FLL_REF_FREQ_WIDTH 1 /* FLL_REF_FREQ */ +#define WM8400_FLL_CLK_SRC_MASK 0x0C00 /* FLL_CLK_SRC - [11:10] */ +#define WM8400_FLL_CLK_SRC_SHIFT 10 /* FLL_CLK_SRC - [11:10] */ +#define WM8400_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [11:10] */ +#define WM8400_FLL_FRAC 0x0200 /* FLL_FRAC */ +#define WM8400_FLL_FRAC_MASK 0x0200 /* FLL_FRAC */ +#define WM8400_FLL_FRAC_SHIFT 9 /* FLL_FRAC */ +#define WM8400_FLL_FRAC_WIDTH 1 /* FLL_FRAC */ +#define WM8400_FLL_OSC_ENA 0x0100 /* FLL_OSC_ENA */ +#define WM8400_FLL_OSC_ENA_MASK 0x0100 /* FLL_OSC_ENA */ +#define WM8400_FLL_OSC_ENA_SHIFT 8 /* FLL_OSC_ENA */ +#define WM8400_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ +#define WM8400_FLL_CTRL_RATE_MASK 0x00E0 /* FLL_CTRL_RATE - [7:5] */ +#define WM8400_FLL_CTRL_RATE_SHIFT 5 /* FLL_CTRL_RATE - [7:5] */ +#define WM8400_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [7:5] */ +#define WM8400_FLL_FRATIO_MASK 0x001F /* FLL_FRATIO - [4:0] */ +#define WM8400_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [4:0] */ +#define WM8400_FLL_FRATIO_WIDTH 5 /* FLL_FRATIO - [4:0] */ + +/* + * R61 (0x3D) - FLL Control 2 + */ +#define WM8400_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */ +#define WM8400_FLL_K_SHIFT 0 /* FLL_K - [15:0] */ +#define WM8400_FLL_K_WIDTH 16 /* FLL_K - [15:0] */ + +/* + * R62 (0x3E) - FLL Control 3 + */ +#define WM8400_FLL_N_MASK 0x03FF /* FLL_N - [9:0] */ +#define WM8400_FLL_N_SHIFT 0 /* FLL_N - [9:0] */ +#define WM8400_FLL_N_WIDTH 10 /* FLL_N - [9:0] */ + +/* + * R63 (0x3F) - FLL Control 4 + */ +#define WM8400_FLL_TRK_GAIN_MASK 0x0078 /* FLL_TRK_GAIN - [6:3] */ +#define WM8400_FLL_TRK_GAIN_SHIFT 3 /* FLL_TRK_GAIN - [6:3] */ +#define WM8400_FLL_TRK_GAIN_WIDTH 4 /* FLL_TRK_GAIN - [6:3] */ +#define WM8400_FLL_OUTDIV_MASK 0x0007 /* FLL_OUTDIV - [2:0] */ +#define WM8400_FLL_OUTDIV_SHIFT 0 /* FLL_OUTDIV - [2:0] */ +#define WM8400_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [2:0] */ + +struct wm8400; +void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400); + +#endif diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h new file mode 100644 index 00000000..0147b696 --- /dev/null +++ b/include/linux/mfd/wm8400-private.h @@ -0,0 +1,935 @@ +/* + * wm8400 private definitions. + * + * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_WM8400_PRIV_H +#define __LINUX_MFD_WM8400_PRIV_H + +#include <linux/mfd/wm8400.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> + +struct regmap; + +#define WM8400_REGISTER_COUNT 0x55 + +struct wm8400 { + struct device *dev; + + struct mutex io_lock; + struct regmap *regmap; + + u16 reg_cache[WM8400_REGISTER_COUNT]; + + struct platform_device regulators[6]; +}; + +/* + * Register values. + */ +#define WM8400_RESET_ID 0x00 +#define WM8400_ID 0x01 +#define WM8400_POWER_MANAGEMENT_1 0x02 +#define WM8400_POWER_MANAGEMENT_2 0x03 +#define WM8400_POWER_MANAGEMENT_3 0x04 +#define WM8400_AUDIO_INTERFACE_1 0x05 +#define WM8400_AUDIO_INTERFACE_2 0x06 +#define WM8400_CLOCKING_1 0x07 +#define WM8400_CLOCKING_2 0x08 +#define WM8400_AUDIO_INTERFACE_3 0x09 +#define WM8400_AUDIO_INTERFACE_4 0x0A +#define WM8400_DAC_CTRL 0x0B +#define WM8400_LEFT_DAC_DIGITAL_VOLUME 0x0C +#define WM8400_RIGHT_DAC_DIGITAL_VOLUME 0x0D +#define WM8400_DIGITAL_SIDE_TONE 0x0E +#define WM8400_ADC_CTRL 0x0F +#define WM8400_LEFT_ADC_DIGITAL_VOLUME 0x10 +#define WM8400_RIGHT_ADC_DIGITAL_VOLUME 0x11 +#define WM8400_GPIO_CTRL_1 0x12 +#define WM8400_GPIO1_GPIO2 0x13 +#define WM8400_GPIO3_GPIO4 0x14 +#define WM8400_GPIO5_GPIO6 0x15 +#define WM8400_GPIOCTRL_2 0x16 +#define WM8400_GPIO_POL 0x17 +#define WM8400_LEFT_LINE_INPUT_1_2_VOLUME 0x18 +#define WM8400_LEFT_LINE_INPUT_3_4_VOLUME 0x19 +#define WM8400_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A +#define WM8400_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B +#define WM8400_LEFT_OUTPUT_VOLUME 0x1C +#define WM8400_RIGHT_OUTPUT_VOLUME 0x1D +#define WM8400_LINE_OUTPUTS_VOLUME 0x1E +#define WM8400_OUT3_4_VOLUME 0x1F +#define WM8400_LEFT_OPGA_VOLUME 0x20 +#define WM8400_RIGHT_OPGA_VOLUME 0x21 +#define WM8400_SPEAKER_VOLUME 0x22 +#define WM8400_CLASSD1 0x23 +#define WM8400_CLASSD3 0x25 +#define WM8400_INPUT_MIXER1 0x27 +#define WM8400_INPUT_MIXER2 0x28 +#define WM8400_INPUT_MIXER3 0x29 +#define WM8400_INPUT_MIXER4 0x2A +#define WM8400_INPUT_MIXER5 0x2B +#define WM8400_INPUT_MIXER6 0x2C +#define WM8400_OUTPUT_MIXER1 0x2D +#define WM8400_OUTPUT_MIXER2 0x2E +#define WM8400_OUTPUT_MIXER3 0x2F +#define WM8400_OUTPUT_MIXER4 0x30 +#define WM8400_OUTPUT_MIXER5 0x31 +#define WM8400_OUTPUT_MIXER6 0x32 +#define WM8400_OUT3_4_MIXER 0x33 +#define WM8400_LINE_MIXER1 0x34 +#define WM8400_LINE_MIXER2 0x35 +#define WM8400_SPEAKER_MIXER 0x36 +#define WM8400_ADDITIONAL_CONTROL 0x37 +#define WM8400_ANTIPOP1 0x38 +#define WM8400_ANTIPOP2 0x39 +#define WM8400_MICBIAS 0x3A +#define WM8400_FLL_CONTROL_1 0x3C +#define WM8400_FLL_CONTROL_2 0x3D +#define WM8400_FLL_CONTROL_3 0x3E +#define WM8400_FLL_CONTROL_4 0x3F +#define WM8400_LDO1_CONTROL 0x41 +#define WM8400_LDO2_CONTROL 0x42 +#define WM8400_LDO3_CONTROL 0x43 +#define WM8400_LDO4_CONTROL 0x44 +#define WM8400_DCDC1_CONTROL_1 0x46 +#define WM8400_DCDC1_CONTROL_2 0x47 +#define WM8400_DCDC2_CONTROL_1 0x48 +#define WM8400_DCDC2_CONTROL_2 0x49 +#define WM8400_INTERFACE 0x4B +#define WM8400_PM_GENERAL 0x4C +#define WM8400_PM_SHUTDOWN_CONTROL 0x4E +#define WM8400_INTERRUPT_STATUS_1 0x4F +#define WM8400_INTERRUPT_STATUS_1_MASK 0x50 +#define WM8400_INTERRUPT_LEVELS 0x51 +#define WM8400_SHUTDOWN_REASON 0x52 +#define WM8400_LINE_CIRCUITS 0x54 + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Reset/ID + */ +#define WM8400_SW_RESET_CHIP_ID_MASK 0xFFFF /* SW_RESET/CHIP_ID - [15:0] */ +#define WM8400_SW_RESET_CHIP_ID_SHIFT 0 /* SW_RESET/CHIP_ID - [15:0] */ +#define WM8400_SW_RESET_CHIP_ID_WIDTH 16 /* SW_RESET/CHIP_ID - [15:0] */ + +/* + * R1 (0x01) - ID + */ +#define WM8400_CHIP_REV_MASK 0x7000 /* CHIP_REV - [14:12] */ +#define WM8400_CHIP_REV_SHIFT 12 /* CHIP_REV - [14:12] */ +#define WM8400_CHIP_REV_WIDTH 3 /* CHIP_REV - [14:12] */ + +/* + * R18 (0x12) - GPIO CTRL 1 + */ +#define WM8400_IRQ 0x1000 /* IRQ */ +#define WM8400_IRQ_MASK 0x1000 /* IRQ */ +#define WM8400_IRQ_SHIFT 12 /* IRQ */ +#define WM8400_IRQ_WIDTH 1 /* IRQ */ +#define WM8400_TEMPOK 0x0800 /* TEMPOK */ +#define WM8400_TEMPOK_MASK 0x0800 /* TEMPOK */ +#define WM8400_TEMPOK_SHIFT 11 /* TEMPOK */ +#define WM8400_TEMPOK_WIDTH 1 /* TEMPOK */ +#define WM8400_MIC1SHRT 0x0400 /* MIC1SHRT */ +#define WM8400_MIC1SHRT_MASK 0x0400 /* MIC1SHRT */ +#define WM8400_MIC1SHRT_SHIFT 10 /* MIC1SHRT */ +#define WM8400_MIC1SHRT_WIDTH 1 /* MIC1SHRT */ +#define WM8400_MIC1DET 0x0200 /* MIC1DET */ +#define WM8400_MIC1DET_MASK 0x0200 /* MIC1DET */ +#define WM8400_MIC1DET_SHIFT 9 /* MIC1DET */ +#define WM8400_MIC1DET_WIDTH 1 /* MIC1DET */ +#define WM8400_FLL_LCK 0x0100 /* FLL_LCK */ +#define WM8400_FLL_LCK_MASK 0x0100 /* FLL_LCK */ +#define WM8400_FLL_LCK_SHIFT 8 /* FLL_LCK */ +#define WM8400_FLL_LCK_WIDTH 1 /* FLL_LCK */ +#define WM8400_GPIO_STATUS_MASK 0x00FF /* GPIO_STATUS - [7:0] */ +#define WM8400_GPIO_STATUS_SHIFT 0 /* GPIO_STATUS - [7:0] */ +#define WM8400_GPIO_STATUS_WIDTH 8 /* GPIO_STATUS - [7:0] */ + +/* + * R19 (0x13) - GPIO1 & GPIO2 + */ +#define WM8400_GPIO2_DEB_ENA 0x8000 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_DEB_ENA_MASK 0x8000 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_DEB_ENA_SHIFT 15 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_DEB_ENA_WIDTH 1 /* GPIO2_DEB_ENA */ +#define WM8400_GPIO2_IRQ_ENA 0x4000 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_IRQ_ENA_MASK 0x4000 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_IRQ_ENA_SHIFT 14 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_IRQ_ENA_WIDTH 1 /* GPIO2_IRQ_ENA */ +#define WM8400_GPIO2_PU 0x2000 /* GPIO2_PU */ +#define WM8400_GPIO2_PU_MASK 0x2000 /* GPIO2_PU */ +#define WM8400_GPIO2_PU_SHIFT 13 /* GPIO2_PU */ +#define WM8400_GPIO2_PU_WIDTH 1 /* GPIO2_PU */ +#define WM8400_GPIO2_PD 0x1000 /* GPIO2_PD */ +#define WM8400_GPIO2_PD_MASK 0x1000 /* GPIO2_PD */ +#define WM8400_GPIO2_PD_SHIFT 12 /* GPIO2_PD */ +#define WM8400_GPIO2_PD_WIDTH 1 /* GPIO2_PD */ +#define WM8400_GPIO2_SEL_MASK 0x0F00 /* GPIO2_SEL - [11:8] */ +#define WM8400_GPIO2_SEL_SHIFT 8 /* GPIO2_SEL - [11:8] */ +#define WM8400_GPIO2_SEL_WIDTH 4 /* GPIO2_SEL - [11:8] */ +#define WM8400_GPIO1_DEB_ENA 0x0080 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_DEB_ENA_MASK 0x0080 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_DEB_ENA_SHIFT 7 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_DEB_ENA_WIDTH 1 /* GPIO1_DEB_ENA */ +#define WM8400_GPIO1_IRQ_ENA 0x0040 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_IRQ_ENA_MASK 0x0040 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_IRQ_ENA_SHIFT 6 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_IRQ_ENA_WIDTH 1 /* GPIO1_IRQ_ENA */ +#define WM8400_GPIO1_PU 0x0020 /* GPIO1_PU */ +#define WM8400_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */ +#define WM8400_GPIO1_PU_SHIFT 5 /* GPIO1_PU */ +#define WM8400_GPIO1_PU_WIDTH 1 /* GPIO1_PU */ +#define WM8400_GPIO1_PD 0x0010 /* GPIO1_PD */ +#define WM8400_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */ +#define WM8400_GPIO1_PD_SHIFT 4 /* GPIO1_PD */ +#define WM8400_GPIO1_PD_WIDTH 1 /* GPIO1_PD */ +#define WM8400_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */ +#define WM8400_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */ +#define WM8400_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */ + +/* + * R20 (0x14) - GPIO3 & GPIO4 + */ +#define WM8400_GPIO4_DEB_ENA 0x8000 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_DEB_ENA_MASK 0x8000 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_DEB_ENA_SHIFT 15 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_DEB_ENA_WIDTH 1 /* GPIO4_DEB_ENA */ +#define WM8400_GPIO4_IRQ_ENA 0x4000 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_IRQ_ENA_MASK 0x4000 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_IRQ_ENA_SHIFT 14 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_IRQ_ENA_WIDTH 1 /* GPIO4_IRQ_ENA */ +#define WM8400_GPIO4_PU 0x2000 /* GPIO4_PU */ +#define WM8400_GPIO4_PU_MASK 0x2000 /* GPIO4_PU */ +#define WM8400_GPIO4_PU_SHIFT 13 /* GPIO4_PU */ +#define WM8400_GPIO4_PU_WIDTH 1 /* GPIO4_PU */ +#define WM8400_GPIO4_PD 0x1000 /* GPIO4_PD */ +#define WM8400_GPIO4_PD_MASK 0x1000 /* GPIO4_PD */ +#define WM8400_GPIO4_PD_SHIFT 12 /* GPIO4_PD */ +#define WM8400_GPIO4_PD_WIDTH 1 /* GPIO4_PD */ +#define WM8400_GPIO4_SEL_MASK 0x0F00 /* GPIO4_SEL - [11:8] */ +#define WM8400_GPIO4_SEL_SHIFT 8 /* GPIO4_SEL - [11:8] */ +#define WM8400_GPIO4_SEL_WIDTH 4 /* GPIO4_SEL - [11:8] */ +#define WM8400_GPIO3_DEB_ENA 0x0080 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_DEB_ENA_MASK 0x0080 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_DEB_ENA_SHIFT 7 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_DEB_ENA_WIDTH 1 /* GPIO3_DEB_ENA */ +#define WM8400_GPIO3_IRQ_ENA 0x0040 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_IRQ_ENA_MASK 0x0040 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_IRQ_ENA_SHIFT 6 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_IRQ_ENA_WIDTH 1 /* GPIO3_IRQ_ENA */ +#define WM8400_GPIO3_PU 0x0020 /* GPIO3_PU */ +#define WM8400_GPIO3_PU_MASK 0x0020 /* GPIO3_PU */ +#define WM8400_GPIO3_PU_SHIFT 5 /* GPIO3_PU */ +#define WM8400_GPIO3_PU_WIDTH 1 /* GPIO3_PU */ +#define WM8400_GPIO3_PD 0x0010 /* GPIO3_PD */ +#define WM8400_GPIO3_PD_MASK 0x0010 /* GPIO3_PD */ +#define WM8400_GPIO3_PD_SHIFT 4 /* GPIO3_PD */ +#define WM8400_GPIO3_PD_WIDTH 1 /* GPIO3_PD */ +#define WM8400_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */ +#define WM8400_GPIO3_SEL_SHIFT 0 /* GPIO3_SEL - [3:0] */ +#define WM8400_GPIO3_SEL_WIDTH 4 /* GPIO3_SEL - [3:0] */ + +/* + * R21 (0x15) - GPIO5 & GPIO6 + */ +#define WM8400_GPIO6_DEB_ENA 0x8000 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_DEB_ENA_MASK 0x8000 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_DEB_ENA_SHIFT 15 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_DEB_ENA_WIDTH 1 /* GPIO6_DEB_ENA */ +#define WM8400_GPIO6_IRQ_ENA 0x4000 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_IRQ_ENA_MASK 0x4000 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_IRQ_ENA_SHIFT 14 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_IRQ_ENA_WIDTH 1 /* GPIO6_IRQ_ENA */ +#define WM8400_GPIO6_PU 0x2000 /* GPIO6_PU */ +#define WM8400_GPIO6_PU_MASK 0x2000 /* GPIO6_PU */ +#define WM8400_GPIO6_PU_SHIFT 13 /* GPIO6_PU */ +#define WM8400_GPIO6_PU_WIDTH 1 /* GPIO6_PU */ +#define WM8400_GPIO6_PD 0x1000 /* GPIO6_PD */ +#define WM8400_GPIO6_PD_MASK 0x1000 /* GPIO6_PD */ +#define WM8400_GPIO6_PD_SHIFT 12 /* GPIO6_PD */ +#define WM8400_GPIO6_PD_WIDTH 1 /* GPIO6_PD */ +#define WM8400_GPIO6_SEL_MASK 0x0F00 /* GPIO6_SEL - [11:8] */ +#define WM8400_GPIO6_SEL_SHIFT 8 /* GPIO6_SEL - [11:8] */ +#define WM8400_GPIO6_SEL_WIDTH 4 /* GPIO6_SEL - [11:8] */ +#define WM8400_GPIO5_DEB_ENA 0x0080 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_DEB_ENA_MASK 0x0080 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_DEB_ENA_SHIFT 7 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_DEB_ENA_WIDTH 1 /* GPIO5_DEB_ENA */ +#define WM8400_GPIO5_IRQ_ENA 0x0040 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_IRQ_ENA_MASK 0x0040 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_IRQ_ENA_SHIFT 6 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_IRQ_ENA_WIDTH 1 /* GPIO5_IRQ_ENA */ +#define WM8400_GPIO5_PU 0x0020 /* GPIO5_PU */ +#define WM8400_GPIO5_PU_MASK 0x0020 /* GPIO5_PU */ +#define WM8400_GPIO5_PU_SHIFT 5 /* GPIO5_PU */ +#define WM8400_GPIO5_PU_WIDTH 1 /* GPIO5_PU */ +#define WM8400_GPIO5_PD 0x0010 /* GPIO5_PD */ +#define WM8400_GPIO5_PD_MASK 0x0010 /* GPIO5_PD */ +#define WM8400_GPIO5_PD_SHIFT 4 /* GPIO5_PD */ +#define WM8400_GPIO5_PD_WIDTH 1 /* GPIO5_PD */ +#define WM8400_GPIO5_SEL_MASK 0x000F /* GPIO5_SEL - [3:0] */ +#define WM8400_GPIO5_SEL_SHIFT 0 /* GPIO5_SEL - [3:0] */ +#define WM8400_GPIO5_SEL_WIDTH 4 /* GPIO5_SEL - [3:0] */ + +/* + * R22 (0x16) - GPIOCTRL 2 + */ +#define WM8400_TEMPOK_IRQ_ENA 0x0800 /* TEMPOK_IRQ_ENA */ +#define WM8400_TEMPOK_IRQ_ENA_MASK 0x0800 /* TEMPOK_IRQ_ENA */ +#define WM8400_TEMPOK_IRQ_ENA_SHIFT 11 /* TEMPOK_IRQ_ENA */ +#define WM8400_TEMPOK_IRQ_ENA_WIDTH 1 /* TEMPOK_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA 0x0400 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA_MASK 0x0400 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA_SHIFT 10 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1SHRT_IRQ_ENA_WIDTH 1 /* MIC1SHRT_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA 0x0200 /* MIC1DET_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA_MASK 0x0200 /* MIC1DET_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA_SHIFT 9 /* MIC1DET_IRQ_ENA */ +#define WM8400_MIC1DET_IRQ_ENA_WIDTH 1 /* MIC1DET_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA 0x0100 /* FLL_LCK_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA_MASK 0x0100 /* FLL_LCK_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA_SHIFT 8 /* FLL_LCK_IRQ_ENA */ +#define WM8400_FLL_LCK_IRQ_ENA_WIDTH 1 /* FLL_LCK_IRQ_ENA */ +#define WM8400_GPI8_DEB_ENA 0x0080 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_DEB_ENA_MASK 0x0080 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_DEB_ENA_SHIFT 7 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_DEB_ENA_WIDTH 1 /* GPI8_DEB_ENA */ +#define WM8400_GPI8_IRQ_ENA 0x0040 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_IRQ_ENA_MASK 0x0040 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_IRQ_ENA_SHIFT 6 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_IRQ_ENA_WIDTH 1 /* GPI8_IRQ_ENA */ +#define WM8400_GPI8_ENA 0x0010 /* GPI8_ENA */ +#define WM8400_GPI8_ENA_MASK 0x0010 /* GPI8_ENA */ +#define WM8400_GPI8_ENA_SHIFT 4 /* GPI8_ENA */ +#define WM8400_GPI8_ENA_WIDTH 1 /* GPI8_ENA */ +#define WM8400_GPI7_DEB_ENA 0x0008 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_DEB_ENA_MASK 0x0008 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_DEB_ENA_SHIFT 3 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_DEB_ENA_WIDTH 1 /* GPI7_DEB_ENA */ +#define WM8400_GPI7_IRQ_ENA 0x0004 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_IRQ_ENA_MASK 0x0004 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_IRQ_ENA_SHIFT 2 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_IRQ_ENA_WIDTH 1 /* GPI7_IRQ_ENA */ +#define WM8400_GPI7_ENA 0x0001 /* GPI7_ENA */ +#define WM8400_GPI7_ENA_MASK 0x0001 /* GPI7_ENA */ +#define WM8400_GPI7_ENA_SHIFT 0 /* GPI7_ENA */ +#define WM8400_GPI7_ENA_WIDTH 1 /* GPI7_ENA */ + +/* + * R23 (0x17) - GPIO_POL + */ +#define WM8400_IRQ_INV 0x1000 /* IRQ_INV */ +#define WM8400_IRQ_INV_MASK 0x1000 /* IRQ_INV */ +#define WM8400_IRQ_INV_SHIFT 12 /* IRQ_INV */ +#define WM8400_IRQ_INV_WIDTH 1 /* IRQ_INV */ +#define WM8400_TEMPOK_POL 0x0800 /* TEMPOK_POL */ +#define WM8400_TEMPOK_POL_MASK 0x0800 /* TEMPOK_POL */ +#define WM8400_TEMPOK_POL_SHIFT 11 /* TEMPOK_POL */ +#define WM8400_TEMPOK_POL_WIDTH 1 /* TEMPOK_POL */ +#define WM8400_MIC1SHRT_POL 0x0400 /* MIC1SHRT_POL */ +#define WM8400_MIC1SHRT_POL_MASK 0x0400 /* MIC1SHRT_POL */ +#define WM8400_MIC1SHRT_POL_SHIFT 10 /* MIC1SHRT_POL */ +#define WM8400_MIC1SHRT_POL_WIDTH 1 /* MIC1SHRT_POL */ +#define WM8400_MIC1DET_POL 0x0200 /* MIC1DET_POL */ +#define WM8400_MIC1DET_POL_MASK 0x0200 /* MIC1DET_POL */ +#define WM8400_MIC1DET_POL_SHIFT 9 /* MIC1DET_POL */ +#define WM8400_MIC1DET_POL_WIDTH 1 /* MIC1DET_POL */ +#define WM8400_FLL_LCK_POL 0x0100 /* FLL_LCK_POL */ +#define WM8400_FLL_LCK_POL_MASK 0x0100 /* FLL_LCK_POL */ +#define WM8400_FLL_LCK_POL_SHIFT 8 /* FLL_LCK_POL */ +#define WM8400_FLL_LCK_POL_WIDTH 1 /* FLL_LCK_POL */ +#define WM8400_GPIO_POL_MASK 0x00FF /* GPIO_POL - [7:0] */ +#define WM8400_GPIO_POL_SHIFT 0 /* GPIO_POL - [7:0] */ +#define WM8400_GPIO_POL_WIDTH 8 /* GPIO_POL - [7:0] */ + +/* + * R65 (0x41) - LDO 1 Control + */ +#define WM8400_LDO1_ENA 0x8000 /* LDO1_ENA */ +#define WM8400_LDO1_ENA_MASK 0x8000 /* LDO1_ENA */ +#define WM8400_LDO1_ENA_SHIFT 15 /* LDO1_ENA */ +#define WM8400_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ +#define WM8400_LDO1_SWI 0x4000 /* LDO1_SWI */ +#define WM8400_LDO1_SWI_MASK 0x4000 /* LDO1_SWI */ +#define WM8400_LDO1_SWI_SHIFT 14 /* LDO1_SWI */ +#define WM8400_LDO1_SWI_WIDTH 1 /* LDO1_SWI */ +#define WM8400_LDO1_OPFLT 0x1000 /* LDO1_OPFLT */ +#define WM8400_LDO1_OPFLT_MASK 0x1000 /* LDO1_OPFLT */ +#define WM8400_LDO1_OPFLT_SHIFT 12 /* LDO1_OPFLT */ +#define WM8400_LDO1_OPFLT_WIDTH 1 /* LDO1_OPFLT */ +#define WM8400_LDO1_ERRACT 0x0800 /* LDO1_ERRACT */ +#define WM8400_LDO1_ERRACT_MASK 0x0800 /* LDO1_ERRACT */ +#define WM8400_LDO1_ERRACT_SHIFT 11 /* LDO1_ERRACT */ +#define WM8400_LDO1_ERRACT_WIDTH 1 /* LDO1_ERRACT */ +#define WM8400_LDO1_HIB_MODE 0x0400 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_HIB_MODE_MASK 0x0400 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_HIB_MODE_SHIFT 10 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_HIB_MODE_WIDTH 1 /* LDO1_HIB_MODE */ +#define WM8400_LDO1_VIMG_MASK 0x03E0 /* LDO1_VIMG - [9:5] */ +#define WM8400_LDO1_VIMG_SHIFT 5 /* LDO1_VIMG - [9:5] */ +#define WM8400_LDO1_VIMG_WIDTH 5 /* LDO1_VIMG - [9:5] */ +#define WM8400_LDO1_VSEL_MASK 0x001F /* LDO1_VSEL - [4:0] */ +#define WM8400_LDO1_VSEL_SHIFT 0 /* LDO1_VSEL - [4:0] */ +#define WM8400_LDO1_VSEL_WIDTH 5 /* LDO1_VSEL - [4:0] */ + +/* + * R66 (0x42) - LDO 2 Control + */ +#define WM8400_LDO2_ENA 0x8000 /* LDO2_ENA */ +#define WM8400_LDO2_ENA_MASK 0x8000 /* LDO2_ENA */ +#define WM8400_LDO2_ENA_SHIFT 15 /* LDO2_ENA */ +#define WM8400_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ +#define WM8400_LDO2_SWI 0x4000 /* LDO2_SWI */ +#define WM8400_LDO2_SWI_MASK 0x4000 /* LDO2_SWI */ +#define WM8400_LDO2_SWI_SHIFT 14 /* LDO2_SWI */ +#define WM8400_LDO2_SWI_WIDTH 1 /* LDO2_SWI */ +#define WM8400_LDO2_OPFLT 0x1000 /* LDO2_OPFLT */ +#define WM8400_LDO2_OPFLT_MASK 0x1000 /* LDO2_OPFLT */ +#define WM8400_LDO2_OPFLT_SHIFT 12 /* LDO2_OPFLT */ +#define WM8400_LDO2_OPFLT_WIDTH 1 /* LDO2_OPFLT */ +#define WM8400_LDO2_ERRACT 0x0800 /* LDO2_ERRACT */ +#define WM8400_LDO2_ERRACT_MASK 0x0800 /* LDO2_ERRACT */ +#define WM8400_LDO2_ERRACT_SHIFT 11 /* LDO2_ERRACT */ +#define WM8400_LDO2_ERRACT_WIDTH 1 /* LDO2_ERRACT */ +#define WM8400_LDO2_HIB_MODE 0x0400 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_HIB_MODE_MASK 0x0400 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_HIB_MODE_SHIFT 10 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_HIB_MODE_WIDTH 1 /* LDO2_HIB_MODE */ +#define WM8400_LDO2_VIMG_MASK 0x03E0 /* LDO2_VIMG - [9:5] */ +#define WM8400_LDO2_VIMG_SHIFT 5 /* LDO2_VIMG - [9:5] */ +#define WM8400_LDO2_VIMG_WIDTH 5 /* LDO2_VIMG - [9:5] */ +#define WM8400_LDO2_VSEL_MASK 0x001F /* LDO2_VSEL - [4:0] */ +#define WM8400_LDO2_VSEL_SHIFT 0 /* LDO2_VSEL - [4:0] */ +#define WM8400_LDO2_VSEL_WIDTH 5 /* LDO2_VSEL - [4:0] */ + +/* + * R67 (0x43) - LDO 3 Control + */ +#define WM8400_LDO3_ENA 0x8000 /* LDO3_ENA */ +#define WM8400_LDO3_ENA_MASK 0x8000 /* LDO3_ENA */ +#define WM8400_LDO3_ENA_SHIFT 15 /* LDO3_ENA */ +#define WM8400_LDO3_ENA_WIDTH 1 /* LDO3_ENA */ +#define WM8400_LDO3_SWI 0x4000 /* LDO3_SWI */ +#define WM8400_LDO3_SWI_MASK 0x4000 /* LDO3_SWI */ +#define WM8400_LDO3_SWI_SHIFT 14 /* LDO3_SWI */ +#define WM8400_LDO3_SWI_WIDTH 1 /* LDO3_SWI */ +#define WM8400_LDO3_OPFLT 0x1000 /* LDO3_OPFLT */ +#define WM8400_LDO3_OPFLT_MASK 0x1000 /* LDO3_OPFLT */ +#define WM8400_LDO3_OPFLT_SHIFT 12 /* LDO3_OPFLT */ +#define WM8400_LDO3_OPFLT_WIDTH 1 /* LDO3_OPFLT */ +#define WM8400_LDO3_ERRACT 0x0800 /* LDO3_ERRACT */ +#define WM8400_LDO3_ERRACT_MASK 0x0800 /* LDO3_ERRACT */ +#define WM8400_LDO3_ERRACT_SHIFT 11 /* LDO3_ERRACT */ +#define WM8400_LDO3_ERRACT_WIDTH 1 /* LDO3_ERRACT */ +#define WM8400_LDO3_HIB_MODE 0x0400 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_HIB_MODE_MASK 0x0400 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_HIB_MODE_SHIFT 10 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_HIB_MODE_WIDTH 1 /* LDO3_HIB_MODE */ +#define WM8400_LDO3_VIMG_MASK 0x03E0 /* LDO3_VIMG - [9:5] */ +#define WM8400_LDO3_VIMG_SHIFT 5 /* LDO3_VIMG - [9:5] */ +#define WM8400_LDO3_VIMG_WIDTH 5 /* LDO3_VIMG - [9:5] */ +#define WM8400_LDO3_VSEL_MASK 0x001F /* LDO3_VSEL - [4:0] */ +#define WM8400_LDO3_VSEL_SHIFT 0 /* LDO3_VSEL - [4:0] */ +#define WM8400_LDO3_VSEL_WIDTH 5 /* LDO3_VSEL - [4:0] */ + +/* + * R68 (0x44) - LDO 4 Control + */ +#define WM8400_LDO4_ENA 0x8000 /* LDO4_ENA */ +#define WM8400_LDO4_ENA_MASK 0x8000 /* LDO4_ENA */ +#define WM8400_LDO4_ENA_SHIFT 15 /* LDO4_ENA */ +#define WM8400_LDO4_ENA_WIDTH 1 /* LDO4_ENA */ +#define WM8400_LDO4_SWI 0x4000 /* LDO4_SWI */ +#define WM8400_LDO4_SWI_MASK 0x4000 /* LDO4_SWI */ +#define WM8400_LDO4_SWI_SHIFT 14 /* LDO4_SWI */ +#define WM8400_LDO4_SWI_WIDTH 1 /* LDO4_SWI */ +#define WM8400_LDO4_OPFLT 0x1000 /* LDO4_OPFLT */ +#define WM8400_LDO4_OPFLT_MASK 0x1000 /* LDO4_OPFLT */ +#define WM8400_LDO4_OPFLT_SHIFT 12 /* LDO4_OPFLT */ +#define WM8400_LDO4_OPFLT_WIDTH 1 /* LDO4_OPFLT */ +#define WM8400_LDO4_ERRACT 0x0800 /* LDO4_ERRACT */ +#define WM8400_LDO4_ERRACT_MASK 0x0800 /* LDO4_ERRACT */ +#define WM8400_LDO4_ERRACT_SHIFT 11 /* LDO4_ERRACT */ +#define WM8400_LDO4_ERRACT_WIDTH 1 /* LDO4_ERRACT */ +#define WM8400_LDO4_HIB_MODE 0x0400 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_HIB_MODE_MASK 0x0400 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_HIB_MODE_SHIFT 10 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_HIB_MODE_WIDTH 1 /* LDO4_HIB_MODE */ +#define WM8400_LDO4_VIMG_MASK 0x03E0 /* LDO4_VIMG - [9:5] */ +#define WM8400_LDO4_VIMG_SHIFT 5 /* LDO4_VIMG - [9:5] */ +#define WM8400_LDO4_VIMG_WIDTH 5 /* LDO4_VIMG - [9:5] */ +#define WM8400_LDO4_VSEL_MASK 0x001F /* LDO4_VSEL - [4:0] */ +#define WM8400_LDO4_VSEL_SHIFT 0 /* LDO4_VSEL - [4:0] */ +#define WM8400_LDO4_VSEL_WIDTH 5 /* LDO4_VSEL - [4:0] */ + +/* + * R70 (0x46) - DCDC1 Control 1 + */ +#define WM8400_DC1_ENA 0x8000 /* DC1_ENA */ +#define WM8400_DC1_ENA_MASK 0x8000 /* DC1_ENA */ +#define WM8400_DC1_ENA_SHIFT 15 /* DC1_ENA */ +#define WM8400_DC1_ENA_WIDTH 1 /* DC1_ENA */ +#define WM8400_DC1_ACTIVE 0x4000 /* DC1_ACTIVE */ +#define WM8400_DC1_ACTIVE_MASK 0x4000 /* DC1_ACTIVE */ +#define WM8400_DC1_ACTIVE_SHIFT 14 /* DC1_ACTIVE */ +#define WM8400_DC1_ACTIVE_WIDTH 1 /* DC1_ACTIVE */ +#define WM8400_DC1_SLEEP 0x2000 /* DC1_SLEEP */ +#define WM8400_DC1_SLEEP_MASK 0x2000 /* DC1_SLEEP */ +#define WM8400_DC1_SLEEP_SHIFT 13 /* DC1_SLEEP */ +#define WM8400_DC1_SLEEP_WIDTH 1 /* DC1_SLEEP */ +#define WM8400_DC1_OPFLT 0x1000 /* DC1_OPFLT */ +#define WM8400_DC1_OPFLT_MASK 0x1000 /* DC1_OPFLT */ +#define WM8400_DC1_OPFLT_SHIFT 12 /* DC1_OPFLT */ +#define WM8400_DC1_OPFLT_WIDTH 1 /* DC1_OPFLT */ +#define WM8400_DC1_ERRACT 0x0800 /* DC1_ERRACT */ +#define WM8400_DC1_ERRACT_MASK 0x0800 /* DC1_ERRACT */ +#define WM8400_DC1_ERRACT_SHIFT 11 /* DC1_ERRACT */ +#define WM8400_DC1_ERRACT_WIDTH 1 /* DC1_ERRACT */ +#define WM8400_DC1_HIB_MODE 0x0400 /* DC1_HIB_MODE */ +#define WM8400_DC1_HIB_MODE_MASK 0x0400 /* DC1_HIB_MODE */ +#define WM8400_DC1_HIB_MODE_SHIFT 10 /* DC1_HIB_MODE */ +#define WM8400_DC1_HIB_MODE_WIDTH 1 /* DC1_HIB_MODE */ +#define WM8400_DC1_SOFTST_MASK 0x0300 /* DC1_SOFTST - [9:8] */ +#define WM8400_DC1_SOFTST_SHIFT 8 /* DC1_SOFTST - [9:8] */ +#define WM8400_DC1_SOFTST_WIDTH 2 /* DC1_SOFTST - [9:8] */ +#define WM8400_DC1_OV_PROT 0x0080 /* DC1_OV_PROT */ +#define WM8400_DC1_OV_PROT_MASK 0x0080 /* DC1_OV_PROT */ +#define WM8400_DC1_OV_PROT_SHIFT 7 /* DC1_OV_PROT */ +#define WM8400_DC1_OV_PROT_WIDTH 1 /* DC1_OV_PROT */ +#define WM8400_DC1_VSEL_MASK 0x007F /* DC1_VSEL - [6:0] */ +#define WM8400_DC1_VSEL_SHIFT 0 /* DC1_VSEL - [6:0] */ +#define WM8400_DC1_VSEL_WIDTH 7 /* DC1_VSEL - [6:0] */ + +/* + * R71 (0x47) - DCDC1 Control 2 + */ +#define WM8400_DC1_FRC_PWM 0x2000 /* DC1_FRC_PWM */ +#define WM8400_DC1_FRC_PWM_MASK 0x2000 /* DC1_FRC_PWM */ +#define WM8400_DC1_FRC_PWM_SHIFT 13 /* DC1_FRC_PWM */ +#define WM8400_DC1_FRC_PWM_WIDTH 1 /* DC1_FRC_PWM */ +#define WM8400_DC1_STBY_LIM_MASK 0x0300 /* DC1_STBY_LIM - [9:8] */ +#define WM8400_DC1_STBY_LIM_SHIFT 8 /* DC1_STBY_LIM - [9:8] */ +#define WM8400_DC1_STBY_LIM_WIDTH 2 /* DC1_STBY_LIM - [9:8] */ +#define WM8400_DC1_ACT_LIM 0x0080 /* DC1_ACT_LIM */ +#define WM8400_DC1_ACT_LIM_MASK 0x0080 /* DC1_ACT_LIM */ +#define WM8400_DC1_ACT_LIM_SHIFT 7 /* DC1_ACT_LIM */ +#define WM8400_DC1_ACT_LIM_WIDTH 1 /* DC1_ACT_LIM */ +#define WM8400_DC1_VIMG_MASK 0x007F /* DC1_VIMG - [6:0] */ +#define WM8400_DC1_VIMG_SHIFT 0 /* DC1_VIMG - [6:0] */ +#define WM8400_DC1_VIMG_WIDTH 7 /* DC1_VIMG - [6:0] */ + +/* + * R72 (0x48) - DCDC2 Control 1 + */ +#define WM8400_DC2_ENA 0x8000 /* DC2_ENA */ +#define WM8400_DC2_ENA_MASK 0x8000 /* DC2_ENA */ +#define WM8400_DC2_ENA_SHIFT 15 /* DC2_ENA */ +#define WM8400_DC2_ENA_WIDTH 1 /* DC2_ENA */ +#define WM8400_DC2_ACTIVE 0x4000 /* DC2_ACTIVE */ +#define WM8400_DC2_ACTIVE_MASK 0x4000 /* DC2_ACTIVE */ +#define WM8400_DC2_ACTIVE_SHIFT 14 /* DC2_ACTIVE */ +#define WM8400_DC2_ACTIVE_WIDTH 1 /* DC2_ACTIVE */ +#define WM8400_DC2_SLEEP 0x2000 /* DC2_SLEEP */ +#define WM8400_DC2_SLEEP_MASK 0x2000 /* DC2_SLEEP */ +#define WM8400_DC2_SLEEP_SHIFT 13 /* DC2_SLEEP */ +#define WM8400_DC2_SLEEP_WIDTH 1 /* DC2_SLEEP */ +#define WM8400_DC2_OPFLT 0x1000 /* DC2_OPFLT */ +#define WM8400_DC2_OPFLT_MASK 0x1000 /* DC2_OPFLT */ +#define WM8400_DC2_OPFLT_SHIFT 12 /* DC2_OPFLT */ +#define WM8400_DC2_OPFLT_WIDTH 1 /* DC2_OPFLT */ +#define WM8400_DC2_ERRACT 0x0800 /* DC2_ERRACT */ +#define WM8400_DC2_ERRACT_MASK 0x0800 /* DC2_ERRACT */ +#define WM8400_DC2_ERRACT_SHIFT 11 /* DC2_ERRACT */ +#define WM8400_DC2_ERRACT_WIDTH 1 /* DC2_ERRACT */ +#define WM8400_DC2_HIB_MODE 0x0400 /* DC2_HIB_MODE */ +#define WM8400_DC2_HIB_MODE_MASK 0x0400 /* DC2_HIB_MODE */ +#define WM8400_DC2_HIB_MODE_SHIFT 10 /* DC2_HIB_MODE */ +#define WM8400_DC2_HIB_MODE_WIDTH 1 /* DC2_HIB_MODE */ +#define WM8400_DC2_SOFTST_MASK 0x0300 /* DC2_SOFTST - [9:8] */ +#define WM8400_DC2_SOFTST_SHIFT 8 /* DC2_SOFTST - [9:8] */ +#define WM8400_DC2_SOFTST_WIDTH 2 /* DC2_SOFTST - [9:8] */ +#define WM8400_DC2_OV_PROT 0x0080 /* DC2_OV_PROT */ +#define WM8400_DC2_OV_PROT_MASK 0x0080 /* DC2_OV_PROT */ +#define WM8400_DC2_OV_PROT_SHIFT 7 /* DC2_OV_PROT */ +#define WM8400_DC2_OV_PROT_WIDTH 1 /* DC2_OV_PROT */ +#define WM8400_DC2_VSEL_MASK 0x007F /* DC2_VSEL - [6:0] */ +#define WM8400_DC2_VSEL_SHIFT 0 /* DC2_VSEL - [6:0] */ +#define WM8400_DC2_VSEL_WIDTH 7 /* DC2_VSEL - [6:0] */ + +/* + * R73 (0x49) - DCDC2 Control 2 + */ +#define WM8400_DC2_FRC_PWM 0x2000 /* DC2_FRC_PWM */ +#define WM8400_DC2_FRC_PWM_MASK 0x2000 /* DC2_FRC_PWM */ +#define WM8400_DC2_FRC_PWM_SHIFT 13 /* DC2_FRC_PWM */ +#define WM8400_DC2_FRC_PWM_WIDTH 1 /* DC2_FRC_PWM */ +#define WM8400_DC2_STBY_LIM_MASK 0x0300 /* DC2_STBY_LIM - [9:8] */ +#define WM8400_DC2_STBY_LIM_SHIFT 8 /* DC2_STBY_LIM - [9:8] */ +#define WM8400_DC2_STBY_LIM_WIDTH 2 /* DC2_STBY_LIM - [9:8] */ +#define WM8400_DC2_ACT_LIM 0x0080 /* DC2_ACT_LIM */ +#define WM8400_DC2_ACT_LIM_MASK 0x0080 /* DC2_ACT_LIM */ +#define WM8400_DC2_ACT_LIM_SHIFT 7 /* DC2_ACT_LIM */ +#define WM8400_DC2_ACT_LIM_WIDTH 1 /* DC2_ACT_LIM */ +#define WM8400_DC2_VIMG_MASK 0x007F /* DC2_VIMG - [6:0] */ +#define WM8400_DC2_VIMG_SHIFT 0 /* DC2_VIMG - [6:0] */ +#define WM8400_DC2_VIMG_WIDTH 7 /* DC2_VIMG - [6:0] */ + +/* + * R75 (0x4B) - Interface + */ +#define WM8400_AUTOINC 0x0008 /* AUTOINC */ +#define WM8400_AUTOINC_MASK 0x0008 /* AUTOINC */ +#define WM8400_AUTOINC_SHIFT 3 /* AUTOINC */ +#define WM8400_AUTOINC_WIDTH 1 /* AUTOINC */ +#define WM8400_ARA_ENA 0x0004 /* ARA_ENA */ +#define WM8400_ARA_ENA_MASK 0x0004 /* ARA_ENA */ +#define WM8400_ARA_ENA_SHIFT 2 /* ARA_ENA */ +#define WM8400_ARA_ENA_WIDTH 1 /* ARA_ENA */ +#define WM8400_SPI_CFG 0x0002 /* SPI_CFG */ +#define WM8400_SPI_CFG_MASK 0x0002 /* SPI_CFG */ +#define WM8400_SPI_CFG_SHIFT 1 /* SPI_CFG */ +#define WM8400_SPI_CFG_WIDTH 1 /* SPI_CFG */ + +/* + * R76 (0x4C) - PM GENERAL + */ +#define WM8400_CODEC_SOFTST 0x8000 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTST_MASK 0x8000 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTST_SHIFT 15 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTST_WIDTH 1 /* CODEC_SOFTST */ +#define WM8400_CODEC_SOFTSD 0x4000 /* CODEC_SOFTSD */ +#define WM8400_CODEC_SOFTSD_MASK 0x4000 /* CODEC_SOFTSD */ +#define WM8400_CODEC_SOFTSD_SHIFT 14 /* CODEC_SOFTSD */ +#define WM8400_CODEC_SOFTSD_WIDTH 1 /* CODEC_SOFTSD */ +#define WM8400_CHIP_SOFTSD 0x2000 /* CHIP_SOFTSD */ +#define WM8400_CHIP_SOFTSD_MASK 0x2000 /* CHIP_SOFTSD */ +#define WM8400_CHIP_SOFTSD_SHIFT 13 /* CHIP_SOFTSD */ +#define WM8400_CHIP_SOFTSD_WIDTH 1 /* CHIP_SOFTSD */ +#define WM8400_DSLEEP1_POL 0x0008 /* DSLEEP1_POL */ +#define WM8400_DSLEEP1_POL_MASK 0x0008 /* DSLEEP1_POL */ +#define WM8400_DSLEEP1_POL_SHIFT 3 /* DSLEEP1_POL */ +#define WM8400_DSLEEP1_POL_WIDTH 1 /* DSLEEP1_POL */ +#define WM8400_DSLEEP2_POL 0x0004 /* DSLEEP2_POL */ +#define WM8400_DSLEEP2_POL_MASK 0x0004 /* DSLEEP2_POL */ +#define WM8400_DSLEEP2_POL_SHIFT 2 /* DSLEEP2_POL */ +#define WM8400_DSLEEP2_POL_WIDTH 1 /* DSLEEP2_POL */ +#define WM8400_PWR_STATE_MASK 0x0003 /* PWR_STATE - [1:0] */ +#define WM8400_PWR_STATE_SHIFT 0 /* PWR_STATE - [1:0] */ +#define WM8400_PWR_STATE_WIDTH 2 /* PWR_STATE - [1:0] */ + +/* + * R78 (0x4E) - PM Shutdown Control + */ +#define WM8400_CHIP_GT150_ERRACT 0x0200 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT150_ERRACT_MASK 0x0200 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT150_ERRACT_SHIFT 9 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT150_ERRACT_WIDTH 1 /* CHIP_GT150_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT 0x0100 /* CHIP_GT115_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT_MASK 0x0100 /* CHIP_GT115_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT_SHIFT 8 /* CHIP_GT115_ERRACT */ +#define WM8400_CHIP_GT115_ERRACT_WIDTH 1 /* CHIP_GT115_ERRACT */ +#define WM8400_LINE_CMP_ERRACT 0x0080 /* LINE_CMP_ERRACT */ +#define WM8400_LINE_CMP_ERRACT_MASK 0x0080 /* LINE_CMP_ERRACT */ +#define WM8400_LINE_CMP_ERRACT_SHIFT 7 /* LINE_CMP_ERRACT */ +#define WM8400_LINE_CMP_ERRACT_WIDTH 1 /* LINE_CMP_ERRACT */ +#define WM8400_UVLO_ERRACT 0x0040 /* UVLO_ERRACT */ +#define WM8400_UVLO_ERRACT_MASK 0x0040 /* UVLO_ERRACT */ +#define WM8400_UVLO_ERRACT_SHIFT 6 /* UVLO_ERRACT */ +#define WM8400_UVLO_ERRACT_WIDTH 1 /* UVLO_ERRACT */ + +/* + * R79 (0x4F) - Interrupt Status 1 + */ +#define WM8400_MICD_CINT 0x8000 /* MICD_CINT */ +#define WM8400_MICD_CINT_MASK 0x8000 /* MICD_CINT */ +#define WM8400_MICD_CINT_SHIFT 15 /* MICD_CINT */ +#define WM8400_MICD_CINT_WIDTH 1 /* MICD_CINT */ +#define WM8400_MICSCD_CINT 0x4000 /* MICSCD_CINT */ +#define WM8400_MICSCD_CINT_MASK 0x4000 /* MICSCD_CINT */ +#define WM8400_MICSCD_CINT_SHIFT 14 /* MICSCD_CINT */ +#define WM8400_MICSCD_CINT_WIDTH 1 /* MICSCD_CINT */ +#define WM8400_JDL_CINT 0x2000 /* JDL_CINT */ +#define WM8400_JDL_CINT_MASK 0x2000 /* JDL_CINT */ +#define WM8400_JDL_CINT_SHIFT 13 /* JDL_CINT */ +#define WM8400_JDL_CINT_WIDTH 1 /* JDL_CINT */ +#define WM8400_JDR_CINT 0x1000 /* JDR_CINT */ +#define WM8400_JDR_CINT_MASK 0x1000 /* JDR_CINT */ +#define WM8400_JDR_CINT_SHIFT 12 /* JDR_CINT */ +#define WM8400_JDR_CINT_WIDTH 1 /* JDR_CINT */ +#define WM8400_CODEC_SEQ_END_EINT 0x0800 /* CODEC_SEQ_END_EINT */ +#define WM8400_CODEC_SEQ_END_EINT_MASK 0x0800 /* CODEC_SEQ_END_EINT */ +#define WM8400_CODEC_SEQ_END_EINT_SHIFT 11 /* CODEC_SEQ_END_EINT */ +#define WM8400_CODEC_SEQ_END_EINT_WIDTH 1 /* CODEC_SEQ_END_EINT */ +#define WM8400_CDEL_TO_EINT 0x0400 /* CDEL_TO_EINT */ +#define WM8400_CDEL_TO_EINT_MASK 0x0400 /* CDEL_TO_EINT */ +#define WM8400_CDEL_TO_EINT_SHIFT 10 /* CDEL_TO_EINT */ +#define WM8400_CDEL_TO_EINT_WIDTH 1 /* CDEL_TO_EINT */ +#define WM8400_CHIP_GT150_EINT 0x0200 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT150_EINT_MASK 0x0200 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT150_EINT_SHIFT 9 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT150_EINT_WIDTH 1 /* CHIP_GT150_EINT */ +#define WM8400_CHIP_GT115_EINT 0x0100 /* CHIP_GT115_EINT */ +#define WM8400_CHIP_GT115_EINT_MASK 0x0100 /* CHIP_GT115_EINT */ +#define WM8400_CHIP_GT115_EINT_SHIFT 8 /* CHIP_GT115_EINT */ +#define WM8400_CHIP_GT115_EINT_WIDTH 1 /* CHIP_GT115_EINT */ +#define WM8400_LINE_CMP_EINT 0x0080 /* LINE_CMP_EINT */ +#define WM8400_LINE_CMP_EINT_MASK 0x0080 /* LINE_CMP_EINT */ +#define WM8400_LINE_CMP_EINT_SHIFT 7 /* LINE_CMP_EINT */ +#define WM8400_LINE_CMP_EINT_WIDTH 1 /* LINE_CMP_EINT */ +#define WM8400_UVLO_EINT 0x0040 /* UVLO_EINT */ +#define WM8400_UVLO_EINT_MASK 0x0040 /* UVLO_EINT */ +#define WM8400_UVLO_EINT_SHIFT 6 /* UVLO_EINT */ +#define WM8400_UVLO_EINT_WIDTH 1 /* UVLO_EINT */ +#define WM8400_DC2_UV_EINT 0x0020 /* DC2_UV_EINT */ +#define WM8400_DC2_UV_EINT_MASK 0x0020 /* DC2_UV_EINT */ +#define WM8400_DC2_UV_EINT_SHIFT 5 /* DC2_UV_EINT */ +#define WM8400_DC2_UV_EINT_WIDTH 1 /* DC2_UV_EINT */ +#define WM8400_DC1_UV_EINT 0x0010 /* DC1_UV_EINT */ +#define WM8400_DC1_UV_EINT_MASK 0x0010 /* DC1_UV_EINT */ +#define WM8400_DC1_UV_EINT_SHIFT 4 /* DC1_UV_EINT */ +#define WM8400_DC1_UV_EINT_WIDTH 1 /* DC1_UV_EINT */ +#define WM8400_LDO4_UV_EINT 0x0008 /* LDO4_UV_EINT */ +#define WM8400_LDO4_UV_EINT_MASK 0x0008 /* LDO4_UV_EINT */ +#define WM8400_LDO4_UV_EINT_SHIFT 3 /* LDO4_UV_EINT */ +#define WM8400_LDO4_UV_EINT_WIDTH 1 /* LDO4_UV_EINT */ +#define WM8400_LDO3_UV_EINT 0x0004 /* LDO3_UV_EINT */ +#define WM8400_LDO3_UV_EINT_MASK 0x0004 /* LDO3_UV_EINT */ +#define WM8400_LDO3_UV_EINT_SHIFT 2 /* LDO3_UV_EINT */ +#define WM8400_LDO3_UV_EINT_WIDTH 1 /* LDO3_UV_EINT */ +#define WM8400_LDO2_UV_EINT 0x0002 /* LDO2_UV_EINT */ +#define WM8400_LDO2_UV_EINT_MASK 0x0002 /* LDO2_UV_EINT */ +#define WM8400_LDO2_UV_EINT_SHIFT 1 /* LDO2_UV_EINT */ +#define WM8400_LDO2_UV_EINT_WIDTH 1 /* LDO2_UV_EINT */ +#define WM8400_LDO1_UV_EINT 0x0001 /* LDO1_UV_EINT */ +#define WM8400_LDO1_UV_EINT_MASK 0x0001 /* LDO1_UV_EINT */ +#define WM8400_LDO1_UV_EINT_SHIFT 0 /* LDO1_UV_EINT */ +#define WM8400_LDO1_UV_EINT_WIDTH 1 /* LDO1_UV_EINT */ + +/* + * R80 (0x50) - Interrupt Status 1 Mask + */ +#define WM8400_IM_MICD_CINT 0x8000 /* IM_MICD_CINT */ +#define WM8400_IM_MICD_CINT_MASK 0x8000 /* IM_MICD_CINT */ +#define WM8400_IM_MICD_CINT_SHIFT 15 /* IM_MICD_CINT */ +#define WM8400_IM_MICD_CINT_WIDTH 1 /* IM_MICD_CINT */ +#define WM8400_IM_MICSCD_CINT 0x4000 /* IM_MICSCD_CINT */ +#define WM8400_IM_MICSCD_CINT_MASK 0x4000 /* IM_MICSCD_CINT */ +#define WM8400_IM_MICSCD_CINT_SHIFT 14 /* IM_MICSCD_CINT */ +#define WM8400_IM_MICSCD_CINT_WIDTH 1 /* IM_MICSCD_CINT */ +#define WM8400_IM_JDL_CINT 0x2000 /* IM_JDL_CINT */ +#define WM8400_IM_JDL_CINT_MASK 0x2000 /* IM_JDL_CINT */ +#define WM8400_IM_JDL_CINT_SHIFT 13 /* IM_JDL_CINT */ +#define WM8400_IM_JDL_CINT_WIDTH 1 /* IM_JDL_CINT */ +#define WM8400_IM_JDR_CINT 0x1000 /* IM_JDR_CINT */ +#define WM8400_IM_JDR_CINT_MASK 0x1000 /* IM_JDR_CINT */ +#define WM8400_IM_JDR_CINT_SHIFT 12 /* IM_JDR_CINT */ +#define WM8400_IM_JDR_CINT_WIDTH 1 /* IM_JDR_CINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT 0x0800 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT_MASK 0x0800 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT_SHIFT 11 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CODEC_SEQ_END_EINT_WIDTH 1 /* IM_CODEC_SEQ_END_EINT */ +#define WM8400_IM_CDEL_TO_EINT 0x0400 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CDEL_TO_EINT_MASK 0x0400 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CDEL_TO_EINT_SHIFT 10 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CDEL_TO_EINT_WIDTH 1 /* IM_CDEL_TO_EINT */ +#define WM8400_IM_CHIP_GT150_EINT 0x0200 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT150_EINT_MASK 0x0200 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT150_EINT_SHIFT 9 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT150_EINT_WIDTH 1 /* IM_CHIP_GT150_EINT */ +#define WM8400_IM_CHIP_GT115_EINT 0x0100 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_CHIP_GT115_EINT_MASK 0x0100 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_CHIP_GT115_EINT_SHIFT 8 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_CHIP_GT115_EINT_WIDTH 1 /* IM_CHIP_GT115_EINT */ +#define WM8400_IM_LINE_CMP_EINT 0x0080 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_LINE_CMP_EINT_MASK 0x0080 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_LINE_CMP_EINT_SHIFT 7 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_LINE_CMP_EINT_WIDTH 1 /* IM_LINE_CMP_EINT */ +#define WM8400_IM_UVLO_EINT 0x0040 /* IM_UVLO_EINT */ +#define WM8400_IM_UVLO_EINT_MASK 0x0040 /* IM_UVLO_EINT */ +#define WM8400_IM_UVLO_EINT_SHIFT 6 /* IM_UVLO_EINT */ +#define WM8400_IM_UVLO_EINT_WIDTH 1 /* IM_UVLO_EINT */ +#define WM8400_IM_DC2_UV_EINT 0x0020 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC2_UV_EINT_MASK 0x0020 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC2_UV_EINT_SHIFT 5 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC2_UV_EINT_WIDTH 1 /* IM_DC2_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT 0x0010 /* IM_DC1_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT_MASK 0x0010 /* IM_DC1_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT_SHIFT 4 /* IM_DC1_UV_EINT */ +#define WM8400_IM_DC1_UV_EINT_WIDTH 1 /* IM_DC1_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT 0x0008 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT_MASK 0x0008 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT_SHIFT 3 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO4_UV_EINT_WIDTH 1 /* IM_LDO4_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT 0x0004 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT_MASK 0x0004 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT_SHIFT 2 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO3_UV_EINT_WIDTH 1 /* IM_LDO3_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT 0x0002 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT_MASK 0x0002 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT_SHIFT 1 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO2_UV_EINT_WIDTH 1 /* IM_LDO2_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT 0x0001 /* IM_LDO1_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT_MASK 0x0001 /* IM_LDO1_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT_SHIFT 0 /* IM_LDO1_UV_EINT */ +#define WM8400_IM_LDO1_UV_EINT_WIDTH 1 /* IM_LDO1_UV_EINT */ + +/* + * R81 (0x51) - Interrupt Levels + */ +#define WM8400_MICD_LVL 0x8000 /* MICD_LVL */ +#define WM8400_MICD_LVL_MASK 0x8000 /* MICD_LVL */ +#define WM8400_MICD_LVL_SHIFT 15 /* MICD_LVL */ +#define WM8400_MICD_LVL_WIDTH 1 /* MICD_LVL */ +#define WM8400_MICSCD_LVL 0x4000 /* MICSCD_LVL */ +#define WM8400_MICSCD_LVL_MASK 0x4000 /* MICSCD_LVL */ +#define WM8400_MICSCD_LVL_SHIFT 14 /* MICSCD_LVL */ +#define WM8400_MICSCD_LVL_WIDTH 1 /* MICSCD_LVL */ +#define WM8400_JDL_LVL 0x2000 /* JDL_LVL */ +#define WM8400_JDL_LVL_MASK 0x2000 /* JDL_LVL */ +#define WM8400_JDL_LVL_SHIFT 13 /* JDL_LVL */ +#define WM8400_JDL_LVL_WIDTH 1 /* JDL_LVL */ +#define WM8400_JDR_LVL 0x1000 /* JDR_LVL */ +#define WM8400_JDR_LVL_MASK 0x1000 /* JDR_LVL */ +#define WM8400_JDR_LVL_SHIFT 12 /* JDR_LVL */ +#define WM8400_JDR_LVL_WIDTH 1 /* JDR_LVL */ +#define WM8400_CODEC_SEQ_END_LVL 0x0800 /* CODEC_SEQ_END_LVL */ +#define WM8400_CODEC_SEQ_END_LVL_MASK 0x0800 /* CODEC_SEQ_END_LVL */ +#define WM8400_CODEC_SEQ_END_LVL_SHIFT 11 /* CODEC_SEQ_END_LVL */ +#define WM8400_CODEC_SEQ_END_LVL_WIDTH 1 /* CODEC_SEQ_END_LVL */ +#define WM8400_CDEL_TO_LVL 0x0400 /* CDEL_TO_LVL */ +#define WM8400_CDEL_TO_LVL_MASK 0x0400 /* CDEL_TO_LVL */ +#define WM8400_CDEL_TO_LVL_SHIFT 10 /* CDEL_TO_LVL */ +#define WM8400_CDEL_TO_LVL_WIDTH 1 /* CDEL_TO_LVL */ +#define WM8400_CHIP_GT150_LVL 0x0200 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT150_LVL_MASK 0x0200 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT150_LVL_SHIFT 9 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT150_LVL_WIDTH 1 /* CHIP_GT150_LVL */ +#define WM8400_CHIP_GT115_LVL 0x0100 /* CHIP_GT115_LVL */ +#define WM8400_CHIP_GT115_LVL_MASK 0x0100 /* CHIP_GT115_LVL */ +#define WM8400_CHIP_GT115_LVL_SHIFT 8 /* CHIP_GT115_LVL */ +#define WM8400_CHIP_GT115_LVL_WIDTH 1 /* CHIP_GT115_LVL */ +#define WM8400_LINE_CMP_LVL 0x0080 /* LINE_CMP_LVL */ +#define WM8400_LINE_CMP_LVL_MASK 0x0080 /* LINE_CMP_LVL */ +#define WM8400_LINE_CMP_LVL_SHIFT 7 /* LINE_CMP_LVL */ +#define WM8400_LINE_CMP_LVL_WIDTH 1 /* LINE_CMP_LVL */ +#define WM8400_UVLO_LVL 0x0040 /* UVLO_LVL */ +#define WM8400_UVLO_LVL_MASK 0x0040 /* UVLO_LVL */ +#define WM8400_UVLO_LVL_SHIFT 6 /* UVLO_LVL */ +#define WM8400_UVLO_LVL_WIDTH 1 /* UVLO_LVL */ +#define WM8400_DC2_UV_LVL 0x0020 /* DC2_UV_LVL */ +#define WM8400_DC2_UV_LVL_MASK 0x0020 /* DC2_UV_LVL */ +#define WM8400_DC2_UV_LVL_SHIFT 5 /* DC2_UV_LVL */ +#define WM8400_DC2_UV_LVL_WIDTH 1 /* DC2_UV_LVL */ +#define WM8400_DC1_UV_LVL 0x0010 /* DC1_UV_LVL */ +#define WM8400_DC1_UV_LVL_MASK 0x0010 /* DC1_UV_LVL */ +#define WM8400_DC1_UV_LVL_SHIFT 4 /* DC1_UV_LVL */ +#define WM8400_DC1_UV_LVL_WIDTH 1 /* DC1_UV_LVL */ +#define WM8400_LDO4_UV_LVL 0x0008 /* LDO4_UV_LVL */ +#define WM8400_LDO4_UV_LVL_MASK 0x0008 /* LDO4_UV_LVL */ +#define WM8400_LDO4_UV_LVL_SHIFT 3 /* LDO4_UV_LVL */ +#define WM8400_LDO4_UV_LVL_WIDTH 1 /* LDO4_UV_LVL */ +#define WM8400_LDO3_UV_LVL 0x0004 /* LDO3_UV_LVL */ +#define WM8400_LDO3_UV_LVL_MASK 0x0004 /* LDO3_UV_LVL */ +#define WM8400_LDO3_UV_LVL_SHIFT 2 /* LDO3_UV_LVL */ +#define WM8400_LDO3_UV_LVL_WIDTH 1 /* LDO3_UV_LVL */ +#define WM8400_LDO2_UV_LVL 0x0002 /* LDO2_UV_LVL */ +#define WM8400_LDO2_UV_LVL_MASK 0x0002 /* LDO2_UV_LVL */ +#define WM8400_LDO2_UV_LVL_SHIFT 1 /* LDO2_UV_LVL */ +#define WM8400_LDO2_UV_LVL_WIDTH 1 /* LDO2_UV_LVL */ +#define WM8400_LDO1_UV_LVL 0x0001 /* LDO1_UV_LVL */ +#define WM8400_LDO1_UV_LVL_MASK 0x0001 /* LDO1_UV_LVL */ +#define WM8400_LDO1_UV_LVL_SHIFT 0 /* LDO1_UV_LVL */ +#define WM8400_LDO1_UV_LVL_WIDTH 1 /* LDO1_UV_LVL */ + +/* + * R82 (0x52) - Shutdown Reason + */ +#define WM8400_SDR_CHIP_SOFTSD 0x2000 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_CHIP_SOFTSD_MASK 0x2000 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_CHIP_SOFTSD_SHIFT 13 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_CHIP_SOFTSD_WIDTH 1 /* SDR_CHIP_SOFTSD */ +#define WM8400_SDR_NPDN 0x0800 /* SDR_NPDN */ +#define WM8400_SDR_NPDN_MASK 0x0800 /* SDR_NPDN */ +#define WM8400_SDR_NPDN_SHIFT 11 /* SDR_NPDN */ +#define WM8400_SDR_NPDN_WIDTH 1 /* SDR_NPDN */ +#define WM8400_SDR_CHIP_GT150 0x0200 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT150_MASK 0x0200 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT150_SHIFT 9 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT150_WIDTH 1 /* SDR_CHIP_GT150 */ +#define WM8400_SDR_CHIP_GT115 0x0100 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_CHIP_GT115_MASK 0x0100 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_CHIP_GT115_SHIFT 8 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_CHIP_GT115_WIDTH 1 /* SDR_CHIP_GT115 */ +#define WM8400_SDR_LINE_CMP 0x0080 /* SDR_LINE_CMP */ +#define WM8400_SDR_LINE_CMP_MASK 0x0080 /* SDR_LINE_CMP */ +#define WM8400_SDR_LINE_CMP_SHIFT 7 /* SDR_LINE_CMP */ +#define WM8400_SDR_LINE_CMP_WIDTH 1 /* SDR_LINE_CMP */ +#define WM8400_SDR_UVLO 0x0040 /* SDR_UVLO */ +#define WM8400_SDR_UVLO_MASK 0x0040 /* SDR_UVLO */ +#define WM8400_SDR_UVLO_SHIFT 6 /* SDR_UVLO */ +#define WM8400_SDR_UVLO_WIDTH 1 /* SDR_UVLO */ +#define WM8400_SDR_DC2_UV 0x0020 /* SDR_DC2_UV */ +#define WM8400_SDR_DC2_UV_MASK 0x0020 /* SDR_DC2_UV */ +#define WM8400_SDR_DC2_UV_SHIFT 5 /* SDR_DC2_UV */ +#define WM8400_SDR_DC2_UV_WIDTH 1 /* SDR_DC2_UV */ +#define WM8400_SDR_DC1_UV 0x0010 /* SDR_DC1_UV */ +#define WM8400_SDR_DC1_UV_MASK 0x0010 /* SDR_DC1_UV */ +#define WM8400_SDR_DC1_UV_SHIFT 4 /* SDR_DC1_UV */ +#define WM8400_SDR_DC1_UV_WIDTH 1 /* SDR_DC1_UV */ +#define WM8400_SDR_LDO4_UV 0x0008 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO4_UV_MASK 0x0008 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO4_UV_SHIFT 3 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO4_UV_WIDTH 1 /* SDR_LDO4_UV */ +#define WM8400_SDR_LDO3_UV 0x0004 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO3_UV_MASK 0x0004 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO3_UV_SHIFT 2 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO3_UV_WIDTH 1 /* SDR_LDO3_UV */ +#define WM8400_SDR_LDO2_UV 0x0002 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO2_UV_MASK 0x0002 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO2_UV_SHIFT 1 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO2_UV_WIDTH 1 /* SDR_LDO2_UV */ +#define WM8400_SDR_LDO1_UV 0x0001 /* SDR_LDO1_UV */ +#define WM8400_SDR_LDO1_UV_MASK 0x0001 /* SDR_LDO1_UV */ +#define WM8400_SDR_LDO1_UV_SHIFT 0 /* SDR_LDO1_UV */ +#define WM8400_SDR_LDO1_UV_WIDTH 1 /* SDR_LDO1_UV */ + +/* + * R84 (0x54) - Line Circuits + */ +#define WM8400_BG_LINE_COMP 0x8000 /* BG_LINE_COMP */ +#define WM8400_BG_LINE_COMP_MASK 0x8000 /* BG_LINE_COMP */ +#define WM8400_BG_LINE_COMP_SHIFT 15 /* BG_LINE_COMP */ +#define WM8400_BG_LINE_COMP_WIDTH 1 /* BG_LINE_COMP */ +#define WM8400_LINE_CMP_VTHI_MASK 0x00F0 /* LINE_CMP_VTHI - [7:4] */ +#define WM8400_LINE_CMP_VTHI_SHIFT 4 /* LINE_CMP_VTHI - [7:4] */ +#define WM8400_LINE_CMP_VTHI_WIDTH 4 /* LINE_CMP_VTHI - [7:4] */ +#define WM8400_LINE_CMP_VTHD_MASK 0x000F /* LINE_CMP_VTHD - [3:0] */ +#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */ +#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */ + +u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg); +int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); +int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val); + +#endif diff --git a/include/linux/mfd/wm8400.h b/include/linux/mfd/wm8400.h new file mode 100644 index 00000000..b46b566a --- /dev/null +++ b/include/linux/mfd/wm8400.h @@ -0,0 +1,40 @@ +/* + * wm8400 client interface + * + * Copyright 2008 Wolfson Microelectronics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_MFD_WM8400_H +#define __LINUX_MFD_WM8400_H + +#include <linux/regulator/machine.h> + +#define WM8400_LDO1 0 +#define WM8400_LDO2 1 +#define WM8400_LDO3 2 +#define WM8400_LDO4 3 +#define WM8400_DCDC1 4 +#define WM8400_DCDC2 5 + +struct wm8400_platform_data { + int (*platform_init)(struct device *dev); +}; + +int wm8400_register_regulator(struct device *dev, int reg, + struct regulator_init_data *initdata); + +#endif diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h new file mode 100644 index 00000000..9eff2a35 --- /dev/null +++ b/include/linux/mfd/wm8994/core.h @@ -0,0 +1,113 @@ +/* + * include/linux/mfd/wm8994/core.h -- Core interface for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_CORE_H__ +#define __MFD_WM8994_CORE_H__ + +#include <linux/mutex.h> +#include <linux/interrupt.h> + +enum wm8994_type { + WM8994 = 0, + WM8958 = 1, + WM1811 = 2, +}; + +struct regulator_dev; +struct regulator_bulk_data; +struct regmap; + +#define WM8994_NUM_GPIO_REGS 11 +#define WM8994_NUM_LDO_REGS 2 +#define WM8994_NUM_IRQ_REGS 2 + +#define WM8994_IRQ_TEMP_SHUT 0 +#define WM8994_IRQ_MIC1_DET 1 +#define WM8994_IRQ_MIC1_SHRT 2 +#define WM8994_IRQ_MIC2_DET 3 +#define WM8994_IRQ_MIC2_SHRT 4 +#define WM8994_IRQ_FLL1_LOCK 5 +#define WM8994_IRQ_FLL2_LOCK 6 +#define WM8994_IRQ_SRC1_LOCK 7 +#define WM8994_IRQ_SRC2_LOCK 8 +#define WM8994_IRQ_AIF1DRC1_SIG_DET 9 +#define WM8994_IRQ_AIF1DRC2_SIG_DET 10 +#define WM8994_IRQ_AIF2DRC_SIG_DET 11 +#define WM8994_IRQ_FIFOS_ERR 12 +#define WM8994_IRQ_WSEQ_DONE 13 +#define WM8994_IRQ_DCS_DONE 14 +#define WM8994_IRQ_TEMP_WARN 15 + +/* GPIOs in the chip are numbered from 1-11 */ +#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN) + +struct wm8994 { + struct mutex irq_lock; + + enum wm8994_type type; + int revision; + + struct device *dev; + struct regmap *regmap; + + bool ldo_ena_always_driven; + + int gpio_base; + int irq_base; + + int irq; + struct regmap_irq_chip_data *irq_data; + + /* Used over suspend/resume */ + bool suspended; + + struct regulator_dev *dbvdd; + int num_supplies; + struct regulator_bulk_data *supplies; +}; + +/* Device I/O API */ +int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg); +int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg, + unsigned short val); +int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg, + unsigned short mask, unsigned short val); +int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg, + int count, u16 *buf); +int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg, + int count, const u16 *buf); + + +/* Helper to save on boilerplate */ +static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq, + irq_handler_t handler, const char *name, + void *data) +{ + if (!wm8994->irq_base) + return -EINVAL; + return request_threaded_irq(wm8994->irq_base + irq, NULL, handler, + IRQF_TRIGGER_RISING, name, + data); +} +static inline void wm8994_free_irq(struct wm8994 *wm8994, int irq, void *data) +{ + if (!wm8994->irq_base) + return; + free_irq(wm8994->irq_base + irq, data); +} + +int wm8994_irq_init(struct wm8994 *wm8994); +void wm8994_irq_exit(struct wm8994 *wm8994); + +#endif diff --git a/include/linux/mfd/wm8994/gpio.h b/include/linux/mfd/wm8994/gpio.h new file mode 100644 index 00000000..0c79b5ff --- /dev/null +++ b/include/linux/mfd/wm8994/gpio.h @@ -0,0 +1,76 @@ +/* + * include/linux/mfd/wm8994/gpio.h - GPIO configuration for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_GPIO_H__ +#define __MFD_WM8994_GPIO_H__ + +#define WM8994_GPIO_MAX 11 + +#define WM8994_GP_FN_PIN_SPECIFIC 0 +#define WM8994_GP_FN_GPIO 1 +#define WM8994_GP_FN_SDOUT 2 +#define WM8994_GP_FN_IRQ 3 +#define WM8994_GP_FN_TEMPERATURE 4 +#define WM8994_GP_FN_MICBIAS1_DET 5 +#define WM8994_GP_FN_MICBIAS1_SHORT 6 +#define WM8994_GP_FN_MICBIAS2_DET 7 +#define WM8994_GP_FN_MICBIAS2_SHORT 8 +#define WM8994_GP_FN_FLL1_LOCK 9 +#define WM8994_GP_FN_FLL2_LOCK 10 +#define WM8994_GP_FN_SRC1_LOCK 11 +#define WM8994_GP_FN_SRC2_LOCK 12 +#define WM8994_GP_FN_DRC1_ACT 13 +#define WM8994_GP_FN_DRC2_ACT 14 +#define WM8994_GP_FN_DRC3_ACT 15 +#define WM8994_GP_FN_WSEQ_STATUS 16 +#define WM8994_GP_FN_FIFO_ERROR 17 +#define WM8994_GP_FN_OPCLK 18 +#define WM8994_GP_FN_THW 19 +#define WM8994_GP_FN_DCS_DONE 20 +#define WM8994_GP_FN_FLL1_OUT 21 +#define WM8994_GP_FN_FLL2_OUT 22 + +#define WM8994_GPN_DIR 0x8000 /* GPN_DIR */ +#define WM8994_GPN_DIR_MASK 0x8000 /* GPN_DIR */ +#define WM8994_GPN_DIR_SHIFT 15 /* GPN_DIR */ +#define WM8994_GPN_DIR_WIDTH 1 /* GPN_DIR */ +#define WM8994_GPN_PU 0x4000 /* GPN_PU */ +#define WM8994_GPN_PU_MASK 0x4000 /* GPN_PU */ +#define WM8994_GPN_PU_SHIFT 14 /* GPN_PU */ +#define WM8994_GPN_PU_WIDTH 1 /* GPN_PU */ +#define WM8994_GPN_PD 0x2000 /* GPN_PD */ +#define WM8994_GPN_PD_MASK 0x2000 /* GPN_PD */ +#define WM8994_GPN_PD_SHIFT 13 /* GPN_PD */ +#define WM8994_GPN_PD_WIDTH 1 /* GPN_PD */ +#define WM8994_GPN_POL 0x0400 /* GPN_POL */ +#define WM8994_GPN_POL_MASK 0x0400 /* GPN_POL */ +#define WM8994_GPN_POL_SHIFT 10 /* GPN_POL */ +#define WM8994_GPN_POL_WIDTH 1 /* GPN_POL */ +#define WM8994_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */ +#define WM8994_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */ +#define WM8994_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */ +#define WM8994_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */ +#define WM8994_GPN_DB 0x0100 /* GPN_DB */ +#define WM8994_GPN_DB_MASK 0x0100 /* GPN_DB */ +#define WM8994_GPN_DB_SHIFT 8 /* GPN_DB */ +#define WM8994_GPN_DB_WIDTH 1 /* GPN_DB */ +#define WM8994_GPN_LVL 0x0040 /* GPN_LVL */ +#define WM8994_GPN_LVL_MASK 0x0040 /* GPN_LVL */ +#define WM8994_GPN_LVL_SHIFT 6 /* GPN_LVL */ +#define WM8994_GPN_LVL_WIDTH 1 /* GPN_LVL */ +#define WM8994_GPN_FN_MASK 0x001F /* GPN_FN - [4:0] */ +#define WM8994_GPN_FN_SHIFT 0 /* GPN_FN - [4:0] */ +#define WM8994_GPN_FN_WIDTH 5 /* GPN_FN - [4:0] */ + +#endif diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h new file mode 100644 index 00000000..893267bb --- /dev/null +++ b/include/linux/mfd/wm8994/pdata.h @@ -0,0 +1,210 @@ +/* + * include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_PDATA_H__ +#define __MFD_WM8994_PDATA_H__ + +#define WM8994_NUM_LDO 2 +#define WM8994_NUM_GPIO 11 + +struct wm8994_ldo_pdata { + /** GPIOs to enable regulator, 0 or less if not available */ + int enable; + + const struct regulator_init_data *init_data; +}; + +#define WM8994_CONFIGURE_GPIO 0x10000 + +#define WM8994_DRC_REGS 5 +#define WM8994_EQ_REGS 20 +#define WM8958_MBC_CUTOFF_REGS 20 +#define WM8958_MBC_COEFF_REGS 48 +#define WM8958_MBC_COMBINED_REGS 56 +#define WM8958_VSS_HPF_REGS 2 +#define WM8958_VSS_REGS 148 +#define WM8958_ENH_EQ_REGS 32 + +/** + * DRC configurations are specified with a label and a set of register + * values to write (the enable bits will be ignored). At runtime an + * enumerated control will be presented for each DRC block allowing + * the user to choose the configration to use. + * + * Configurations may be generated by hand or by using the DRC control + * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/ + * for details. + */ +struct wm8994_drc_cfg { + const char *name; + u16 regs[WM8994_DRC_REGS]; +}; + +/** + * ReTune Mobile configurations are specified with a label, sample + * rate and set of values to write (the enable bits will be ignored). + * + * Configurations are expected to be generated using the ReTune Mobile + * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/ + */ +struct wm8994_retune_mobile_cfg { + const char *name; + unsigned int rate; + u16 regs[WM8994_EQ_REGS]; +}; + +/** + * Multiband compressor configurations are specified with a label and + * two sets of values to write. Configurations are expected to be + * generated using the multiband compressor configuration panel in + * WISCE - see http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_mbc_cfg { + const char *name; + u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS]; + u16 coeff_regs[WM8958_MBC_COEFF_REGS]; + + /* Coefficient layout when using MBC+VSS firmware */ + u16 combined_regs[WM8958_MBC_COMBINED_REGS]; +}; + +/** + * VSS HPF configurations are specified with a label and two values to + * write. Configurations are expected to be generated using the + * multiband compressor configuration panel in WISCE - see + * http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_vss_hpf_cfg { + const char *name; + u16 regs[WM8958_VSS_HPF_REGS]; +}; + +/** + * VSS configurations are specified with a label and array of values + * to write. Configurations are expected to be generated using the + * multiband compressor configuration panel in WISCE - see + * http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_vss_cfg { + const char *name; + u16 regs[WM8958_VSS_REGS]; +}; + +/** + * Enhanced EQ configurations are specified with a label and array of + * values to write. Configurations are expected to be generated using + * the multiband compressor configuration panel in WISCE - see + * http://www.wolfsonmicro.com/wisce/ + */ +struct wm8958_enh_eq_cfg { + const char *name; + u16 regs[WM8958_ENH_EQ_REGS]; +}; + +/** + * Microphone detection rates, used to tune response rates and power + * consumption for WM8958/WM1811 microphone detection. + * + * @sysclk: System clock rate to use this configuration for. + * @idle: True if this configuration should use when no accessory is detected, + * false otherwise. + * @start: Value for MICD_BIAS_START_TIME register field (not shifted). + * @rate: Value for MICD_RATE register field (not shifted). + */ +struct wm8958_micd_rate { + int sysclk; + bool idle; + int start; + int rate; +}; + +struct wm8994_pdata { + int gpio_base; + + /** + * Default values for GPIOs if non-zero, WM8994_CONFIGURE_GPIO + * can be used for all zero values. + */ + int gpio_defaults[WM8994_NUM_GPIO]; + + struct wm8994_ldo_pdata ldo[WM8994_NUM_LDO]; + + int irq_base; /** Base IRQ number for WM8994, required for IRQs */ + + int num_drc_cfgs; + struct wm8994_drc_cfg *drc_cfgs; + + int num_retune_mobile_cfgs; + struct wm8994_retune_mobile_cfg *retune_mobile_cfgs; + + int num_mbc_cfgs; + struct wm8958_mbc_cfg *mbc_cfgs; + + int num_vss_cfgs; + struct wm8958_vss_cfg *vss_cfgs; + + int num_vss_hpf_cfgs; + struct wm8958_vss_hpf_cfg *vss_hpf_cfgs; + + int num_enh_eq_cfgs; + struct wm8958_enh_eq_cfg *enh_eq_cfgs; + + int num_micd_rates; + struct wm8958_micd_rate *micd_rates; + + /* LINEOUT can be differential or single ended */ + unsigned int lineout1_diff:1; + unsigned int lineout2_diff:1; + + /* Common mode feedback */ + unsigned int lineout1fb:1; + unsigned int lineout2fb:1; + + /* IRQ for microphone detection if brought out directly as a + * signal. + */ + int micdet_irq; + + /* WM8994 microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */ + unsigned int micbias1_lvl:1; + unsigned int micbias2_lvl:1; + + /* WM8994 jack detect threashold levels, see datasheet for values */ + unsigned int jd_scthr:2; + unsigned int jd_thr:2; + + /* Configure WM1811 jack detection for use with external capacitor */ + unsigned int jd_ext_cap:1; + + /* WM8958 microphone bias configuration */ + int micbias[2]; + + /* WM8958 microphone detection ranges */ + u16 micd_lvl_sel; + + /* Disable the internal pull downs on the LDOs if they are + * always driven (eg, connected to an always on supply or + * GPIO that always drives an output. If they float power + * consumption will rise. + */ + bool ldo_ena_always_driven; + + /* + * SPKMODE must be pulled internally by the device on this + * system. + */ + bool spkmode_pu; +}; + +#endif diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h new file mode 100644 index 00000000..86e6a032 --- /dev/null +++ b/include/linux/mfd/wm8994/registers.h @@ -0,0 +1,4811 @@ +/* + * include/linux/mfd/wm8994/registers.h -- Register definitions for WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM8994_REGISTERS_H__ +#define __MFD_WM8994_REGISTERS_H__ + +/* + * Register values. + */ +#define WM8994_SOFTWARE_RESET 0x00 +#define WM8994_POWER_MANAGEMENT_1 0x01 +#define WM8994_POWER_MANAGEMENT_2 0x02 +#define WM8994_POWER_MANAGEMENT_3 0x03 +#define WM8994_POWER_MANAGEMENT_4 0x04 +#define WM8994_POWER_MANAGEMENT_5 0x05 +#define WM8994_POWER_MANAGEMENT_6 0x06 +#define WM8994_INPUT_MIXER_1 0x15 +#define WM8994_LEFT_LINE_INPUT_1_2_VOLUME 0x18 +#define WM8994_LEFT_LINE_INPUT_3_4_VOLUME 0x19 +#define WM8994_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A +#define WM8994_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B +#define WM8994_LEFT_OUTPUT_VOLUME 0x1C +#define WM8994_RIGHT_OUTPUT_VOLUME 0x1D +#define WM8994_LINE_OUTPUTS_VOLUME 0x1E +#define WM8994_HPOUT2_VOLUME 0x1F +#define WM8994_LEFT_OPGA_VOLUME 0x20 +#define WM8994_RIGHT_OPGA_VOLUME 0x21 +#define WM8994_SPKMIXL_ATTENUATION 0x22 +#define WM8994_SPKMIXR_ATTENUATION 0x23 +#define WM8994_SPKOUT_MIXERS 0x24 +#define WM8994_CLASSD 0x25 +#define WM8994_SPEAKER_VOLUME_LEFT 0x26 +#define WM8994_SPEAKER_VOLUME_RIGHT 0x27 +#define WM8994_INPUT_MIXER_2 0x28 +#define WM8994_INPUT_MIXER_3 0x29 +#define WM8994_INPUT_MIXER_4 0x2A +#define WM8994_INPUT_MIXER_5 0x2B +#define WM8994_INPUT_MIXER_6 0x2C +#define WM8994_OUTPUT_MIXER_1 0x2D +#define WM8994_OUTPUT_MIXER_2 0x2E +#define WM8994_OUTPUT_MIXER_3 0x2F +#define WM8994_OUTPUT_MIXER_4 0x30 +#define WM8994_OUTPUT_MIXER_5 0x31 +#define WM8994_OUTPUT_MIXER_6 0x32 +#define WM8994_HPOUT2_MIXER 0x33 +#define WM8994_LINE_MIXER_1 0x34 +#define WM8994_LINE_MIXER_2 0x35 +#define WM8994_SPEAKER_MIXER 0x36 +#define WM8994_ADDITIONAL_CONTROL 0x37 +#define WM8994_ANTIPOP_1 0x38 +#define WM8994_ANTIPOP_2 0x39 +#define WM8994_MICBIAS 0x3A +#define WM8994_LDO_1 0x3B +#define WM8994_LDO_2 0x3C +#define WM8958_MICBIAS1 0x3D +#define WM8958_MICBIAS2 0x3E +#define WM8994_CHARGE_PUMP_1 0x4C +#define WM8958_CHARGE_PUMP_2 0x4D +#define WM8994_CLASS_W_1 0x51 +#define WM8994_DC_SERVO_1 0x54 +#define WM8994_DC_SERVO_2 0x55 +#define WM8994_DC_SERVO_4 0x57 +#define WM8994_DC_SERVO_READBACK 0x58 +#define WM8994_DC_SERVO_4E 0x59 +#define WM8994_ANALOGUE_HP_1 0x60 +#define WM8958_MIC_DETECT_1 0xD0 +#define WM8958_MIC_DETECT_2 0xD1 +#define WM8958_MIC_DETECT_3 0xD2 +#define WM8994_CHIP_REVISION 0x100 +#define WM8994_CONTROL_INTERFACE 0x101 +#define WM8994_WRITE_SEQUENCER_CTRL_1 0x110 +#define WM8994_WRITE_SEQUENCER_CTRL_2 0x111 +#define WM8994_AIF1_CLOCKING_1 0x200 +#define WM8994_AIF1_CLOCKING_2 0x201 +#define WM8994_AIF2_CLOCKING_1 0x204 +#define WM8994_AIF2_CLOCKING_2 0x205 +#define WM8994_CLOCKING_1 0x208 +#define WM8994_CLOCKING_2 0x209 +#define WM8994_AIF1_RATE 0x210 +#define WM8994_AIF2_RATE 0x211 +#define WM8994_RATE_STATUS 0x212 +#define WM8994_FLL1_CONTROL_1 0x220 +#define WM8994_FLL1_CONTROL_2 0x221 +#define WM8994_FLL1_CONTROL_3 0x222 +#define WM8994_FLL1_CONTROL_4 0x223 +#define WM8994_FLL1_CONTROL_5 0x224 +#define WM8958_FLL1_EFS_1 0x226 +#define WM8958_FLL1_EFS_2 0x227 +#define WM8994_FLL2_CONTROL_1 0x240 +#define WM8994_FLL2_CONTROL_2 0x241 +#define WM8994_FLL2_CONTROL_3 0x242 +#define WM8994_FLL2_CONTROL_4 0x243 +#define WM8994_FLL2_CONTROL_5 0x244 +#define WM8958_FLL2_EFS_1 0x246 +#define WM8958_FLL2_EFS_2 0x247 +#define WM8994_AIF1_CONTROL_1 0x300 +#define WM8994_AIF1_CONTROL_2 0x301 +#define WM8994_AIF1_MASTER_SLAVE 0x302 +#define WM8994_AIF1_BCLK 0x303 +#define WM8994_AIF1ADC_LRCLK 0x304 +#define WM8994_AIF1DAC_LRCLK 0x305 +#define WM8994_AIF1DAC_DATA 0x306 +#define WM8994_AIF1ADC_DATA 0x307 +#define WM8994_AIF2_CONTROL_1 0x310 +#define WM8994_AIF2_CONTROL_2 0x311 +#define WM8994_AIF2_MASTER_SLAVE 0x312 +#define WM8994_AIF2_BCLK 0x313 +#define WM8994_AIF2ADC_LRCLK 0x314 +#define WM8994_AIF2DAC_LRCLK 0x315 +#define WM8994_AIF2DAC_DATA 0x316 +#define WM8994_AIF2ADC_DATA 0x317 +#define WM1811_AIF2TX_CONTROL 0x318 +#define WM8958_AIF3_CONTROL_1 0x320 +#define WM8958_AIF3_CONTROL_2 0x321 +#define WM8958_AIF3DAC_DATA 0x322 +#define WM8958_AIF3ADC_DATA 0x323 +#define WM8994_AIF1_ADC1_LEFT_VOLUME 0x400 +#define WM8994_AIF1_ADC1_RIGHT_VOLUME 0x401 +#define WM8994_AIF1_DAC1_LEFT_VOLUME 0x402 +#define WM8994_AIF1_DAC1_RIGHT_VOLUME 0x403 +#define WM8994_AIF1_ADC2_LEFT_VOLUME 0x404 +#define WM8994_AIF1_ADC2_RIGHT_VOLUME 0x405 +#define WM8994_AIF1_DAC2_LEFT_VOLUME 0x406 +#define WM8994_AIF1_DAC2_RIGHT_VOLUME 0x407 +#define WM8994_AIF1_ADC1_FILTERS 0x410 +#define WM8994_AIF1_ADC2_FILTERS 0x411 +#define WM8994_AIF1_DAC1_FILTERS_1 0x420 +#define WM8994_AIF1_DAC1_FILTERS_2 0x421 +#define WM8994_AIF1_DAC2_FILTERS_1 0x422 +#define WM8994_AIF1_DAC2_FILTERS_2 0x423 +#define WM8958_AIF1_DAC1_NOISE_GATE 0x430 +#define WM8958_AIF1_DAC2_NOISE_GATE 0x431 +#define WM8994_AIF1_DRC1_1 0x440 +#define WM8994_AIF1_DRC1_2 0x441 +#define WM8994_AIF1_DRC1_3 0x442 +#define WM8994_AIF1_DRC1_4 0x443 +#define WM8994_AIF1_DRC1_5 0x444 +#define WM8994_AIF1_DRC2_1 0x450 +#define WM8994_AIF1_DRC2_2 0x451 +#define WM8994_AIF1_DRC2_3 0x452 +#define WM8994_AIF1_DRC2_4 0x453 +#define WM8994_AIF1_DRC2_5 0x454 +#define WM8994_AIF1_DAC1_EQ_GAINS_1 0x480 +#define WM8994_AIF1_DAC1_EQ_GAINS_2 0x481 +#define WM8994_AIF1_DAC1_EQ_BAND_1_A 0x482 +#define WM8994_AIF1_DAC1_EQ_BAND_1_B 0x483 +#define WM8994_AIF1_DAC1_EQ_BAND_1_PG 0x484 +#define WM8994_AIF1_DAC1_EQ_BAND_2_A 0x485 +#define WM8994_AIF1_DAC1_EQ_BAND_2_B 0x486 +#define WM8994_AIF1_DAC1_EQ_BAND_2_C 0x487 +#define WM8994_AIF1_DAC1_EQ_BAND_2_PG 0x488 +#define WM8994_AIF1_DAC1_EQ_BAND_3_A 0x489 +#define WM8994_AIF1_DAC1_EQ_BAND_3_B 0x48A +#define WM8994_AIF1_DAC1_EQ_BAND_3_C 0x48B +#define WM8994_AIF1_DAC1_EQ_BAND_3_PG 0x48C +#define WM8994_AIF1_DAC1_EQ_BAND_4_A 0x48D +#define WM8994_AIF1_DAC1_EQ_BAND_4_B 0x48E +#define WM8994_AIF1_DAC1_EQ_BAND_4_C 0x48F +#define WM8994_AIF1_DAC1_EQ_BAND_4_PG 0x490 +#define WM8994_AIF1_DAC1_EQ_BAND_5_A 0x491 +#define WM8994_AIF1_DAC1_EQ_BAND_5_B 0x492 +#define WM8994_AIF1_DAC1_EQ_BAND_5_PG 0x493 +#define WM8994_AIF1_DAC1_EQ_BAND_1_C 0x494 +#define WM8994_AIF1_DAC2_EQ_GAINS_1 0x4A0 +#define WM8994_AIF1_DAC2_EQ_GAINS_2 0x4A1 +#define WM8994_AIF1_DAC2_EQ_BAND_1_A 0x4A2 +#define WM8994_AIF1_DAC2_EQ_BAND_1_B 0x4A3 +#define WM8994_AIF1_DAC2_EQ_BAND_1_PG 0x4A4 +#define WM8994_AIF1_DAC2_EQ_BAND_2_A 0x4A5 +#define WM8994_AIF1_DAC2_EQ_BAND_2_B 0x4A6 +#define WM8994_AIF1_DAC2_EQ_BAND_2_C 0x4A7 +#define WM8994_AIF1_DAC2_EQ_BAND_2_PG 0x4A8 +#define WM8994_AIF1_DAC2_EQ_BAND_3_A 0x4A9 +#define WM8994_AIF1_DAC2_EQ_BAND_3_B 0x4AA +#define WM8994_AIF1_DAC2_EQ_BAND_3_C 0x4AB +#define WM8994_AIF1_DAC2_EQ_BAND_3_PG 0x4AC +#define WM8994_AIF1_DAC2_EQ_BAND_4_A 0x4AD +#define WM8994_AIF1_DAC2_EQ_BAND_4_B 0x4AE +#define WM8994_AIF1_DAC2_EQ_BAND_4_C 0x4AF +#define WM8994_AIF1_DAC2_EQ_BAND_4_PG 0x4B0 +#define WM8994_AIF1_DAC2_EQ_BAND_5_A 0x4B1 +#define WM8994_AIF1_DAC2_EQ_BAND_5_B 0x4B2 +#define WM8994_AIF1_DAC2_EQ_BAND_5_PG 0x4B3 +#define WM8994_AIF1_DAC2_EQ_BAND_1_C 0x4B4 +#define WM8994_AIF2_ADC_LEFT_VOLUME 0x500 +#define WM8994_AIF2_ADC_RIGHT_VOLUME 0x501 +#define WM8994_AIF2_DAC_LEFT_VOLUME 0x502 +#define WM8994_AIF2_DAC_RIGHT_VOLUME 0x503 +#define WM8994_AIF2_ADC_FILTERS 0x510 +#define WM8994_AIF2_DAC_FILTERS_1 0x520 +#define WM8994_AIF2_DAC_FILTERS_2 0x521 +#define WM8958_AIF2_DAC_NOISE_GATE 0x530 +#define WM8994_AIF2_DRC_1 0x540 +#define WM8994_AIF2_DRC_2 0x541 +#define WM8994_AIF2_DRC_3 0x542 +#define WM8994_AIF2_DRC_4 0x543 +#define WM8994_AIF2_DRC_5 0x544 +#define WM8994_AIF2_EQ_GAINS_1 0x580 +#define WM8994_AIF2_EQ_GAINS_2 0x581 +#define WM8994_AIF2_EQ_BAND_1_A 0x582 +#define WM8994_AIF2_EQ_BAND_1_B 0x583 +#define WM8994_AIF2_EQ_BAND_1_PG 0x584 +#define WM8994_AIF2_EQ_BAND_2_A 0x585 +#define WM8994_AIF2_EQ_BAND_2_B 0x586 +#define WM8994_AIF2_EQ_BAND_2_C 0x587 +#define WM8994_AIF2_EQ_BAND_2_PG 0x588 +#define WM8994_AIF2_EQ_BAND_3_A 0x589 +#define WM8994_AIF2_EQ_BAND_3_B 0x58A +#define WM8994_AIF2_EQ_BAND_3_C 0x58B +#define WM8994_AIF2_EQ_BAND_3_PG 0x58C +#define WM8994_AIF2_EQ_BAND_4_A 0x58D +#define WM8994_AIF2_EQ_BAND_4_B 0x58E +#define WM8994_AIF2_EQ_BAND_4_C 0x58F +#define WM8994_AIF2_EQ_BAND_4_PG 0x590 +#define WM8994_AIF2_EQ_BAND_5_A 0x591 +#define WM8994_AIF2_EQ_BAND_5_B 0x592 +#define WM8994_AIF2_EQ_BAND_5_PG 0x593 +#define WM8994_AIF2_EQ_BAND_1_C 0x594 +#define WM8994_DAC1_MIXER_VOLUMES 0x600 +#define WM8994_DAC1_LEFT_MIXER_ROUTING 0x601 +#define WM8994_DAC1_RIGHT_MIXER_ROUTING 0x602 +#define WM8994_DAC2_MIXER_VOLUMES 0x603 +#define WM8994_DAC2_LEFT_MIXER_ROUTING 0x604 +#define WM8994_DAC2_RIGHT_MIXER_ROUTING 0x605 +#define WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING 0x606 +#define WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING 0x607 +#define WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING 0x608 +#define WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING 0x609 +#define WM8994_DAC1_LEFT_VOLUME 0x610 +#define WM8994_DAC1_RIGHT_VOLUME 0x611 +#define WM8994_DAC2_LEFT_VOLUME 0x612 +#define WM8994_DAC2_RIGHT_VOLUME 0x613 +#define WM8994_DAC_SOFTMUTE 0x614 +#define WM8994_OVERSAMPLING 0x620 +#define WM8994_SIDETONE 0x621 +#define WM8994_GPIO_1 0x700 +#define WM8994_GPIO_2 0x701 +#define WM8994_GPIO_3 0x702 +#define WM8994_GPIO_4 0x703 +#define WM8994_GPIO_5 0x704 +#define WM8994_GPIO_6 0x705 +#define WM1811_JACKDET_CTRL 0x705 +#define WM8994_GPIO_7 0x706 +#define WM8994_GPIO_8 0x707 +#define WM8994_GPIO_9 0x708 +#define WM8994_GPIO_10 0x709 +#define WM8994_GPIO_11 0x70A +#define WM8994_PULL_CONTROL_1 0x720 +#define WM8994_PULL_CONTROL_2 0x721 +#define WM8994_INTERRUPT_STATUS_1 0x730 +#define WM8994_INTERRUPT_STATUS_2 0x731 +#define WM8994_INTERRUPT_RAW_STATUS_2 0x732 +#define WM8994_INTERRUPT_STATUS_1_MASK 0x738 +#define WM8994_INTERRUPT_STATUS_2_MASK 0x739 +#define WM8994_INTERRUPT_CONTROL 0x740 +#define WM8994_IRQ_DEBOUNCE 0x748 +#define WM8958_DSP2_PROGRAM 0x900 +#define WM8958_DSP2_CONFIG 0x901 +#define WM8958_DSP2_MAGICNUM 0xA00 +#define WM8958_DSP2_RELEASEYEAR 0xA01 +#define WM8958_DSP2_RELEASEMONTHDAY 0xA02 +#define WM8958_DSP2_RELEASETIME 0xA03 +#define WM8958_DSP2_VERMAJMIN 0xA04 +#define WM8958_DSP2_VERBUILD 0xA05 +#define WM8958_DSP2_TESTREG 0xA06 +#define WM8958_DSP2_XORREG 0xA07 +#define WM8958_DSP2_SHIFTMAXX 0xA08 +#define WM8958_DSP2_SHIFTMAXY 0xA09 +#define WM8958_DSP2_SHIFTMAXZ 0xA0A +#define WM8958_DSP2_SHIFTMAXEXTLO 0xA0B +#define WM8958_DSP2_AESSELECT 0xA0C +#define WM8958_DSP2_EXECCONTROL 0xA0D +#define WM8958_DSP2_SAMPLEBREAK 0xA0E +#define WM8958_DSP2_COUNTBREAK 0xA0F +#define WM8958_DSP2_INTSTATUS 0xA10 +#define WM8958_DSP2_EVENTSTATUS 0xA11 +#define WM8958_DSP2_INTMASK 0xA12 +#define WM8958_DSP2_CONFIGDWIDTH 0xA13 +#define WM8958_DSP2_CONFIGINSTR 0xA14 +#define WM8958_DSP2_CONFIGDMEM 0xA15 +#define WM8958_DSP2_CONFIGDELAYS 0xA16 +#define WM8958_DSP2_CONFIGNUMIO 0xA17 +#define WM8958_DSP2_CONFIGEXTDEPTH 0xA18 +#define WM8958_DSP2_CONFIGMULTIPLIER 0xA19 +#define WM8958_DSP2_CONFIGCTRLDWIDTH 0xA1A +#define WM8958_DSP2_CONFIGPIPELINE 0xA1B +#define WM8958_DSP2_SHIFTMAXEXTHI 0xA1C +#define WM8958_DSP2_SWVERSIONREG 0xA1D +#define WM8958_DSP2_CONFIGXMEM 0xA1E +#define WM8958_DSP2_CONFIGYMEM 0xA1F +#define WM8958_DSP2_CONFIGZMEM 0xA20 +#define WM8958_FW_BUILD_1 0x2000 +#define WM8958_FW_BUILD_0 0x2001 +#define WM8958_FW_ID_1 0x2002 +#define WM8958_FW_ID_0 0x2003 +#define WM8958_FW_MAJOR_1 0x2004 +#define WM8958_FW_MAJOR_0 0x2005 +#define WM8958_FW_MINOR_1 0x2006 +#define WM8958_FW_MINOR_0 0x2007 +#define WM8958_FW_PATCH_1 0x2008 +#define WM8958_FW_PATCH_0 0x2009 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1 0x2200 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2 0x2201 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1 0x2202 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2 0x2203 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1 0x2204 +#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2 0x2205 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1 0x2206 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2 0x2207 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1 0x2208 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2 0x2209 +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1 0x220A +#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2 0x220B +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1 0x220C +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2 0x220D +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1 0x220E +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2 0x220F +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1 0x2210 +#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2 0x2211 +#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1 0x2212 +#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2 0x2213 +#define WM8958_MBC_BAND_1_K_1 0x2400 +#define WM8958_MBC_BAND_1_K_2 0x2401 +#define WM8958_MBC_BAND_1_N1_1 0x2402 +#define WM8958_MBC_BAND_1_N1_2 0x2403 +#define WM8958_MBC_BAND_1_N2_1 0x2404 +#define WM8958_MBC_BAND_1_N2_2 0x2405 +#define WM8958_MBC_BAND_1_N3_1 0x2406 +#define WM8958_MBC_BAND_1_N3_2 0x2407 +#define WM8958_MBC_BAND_1_N4_1 0x2408 +#define WM8958_MBC_BAND_1_N4_2 0x2409 +#define WM8958_MBC_BAND_1_N5_1 0x240A +#define WM8958_MBC_BAND_1_N5_2 0x240B +#define WM8958_MBC_BAND_1_X1_1 0x240C +#define WM8958_MBC_BAND_1_X1_2 0x240D +#define WM8958_MBC_BAND_1_X2_1 0x240E +#define WM8958_MBC_BAND_1_X2_2 0x240F +#define WM8958_MBC_BAND_1_X3_1 0x2410 +#define WM8958_MBC_BAND_1_X3_2 0x2411 +#define WM8958_MBC_BAND_1_ATTACK_1 0x2412 +#define WM8958_MBC_BAND_1_ATTACK_2 0x2413 +#define WM8958_MBC_BAND_1_DECAY_1 0x2414 +#define WM8958_MBC_BAND_1_DECAY_2 0x2415 +#define WM8958_MBC_BAND_2_K_1 0x2416 +#define WM8958_MBC_BAND_2_K_2 0x2417 +#define WM8958_MBC_BAND_2_N1_1 0x2418 +#define WM8958_MBC_BAND_2_N1_2 0x2419 +#define WM8958_MBC_BAND_2_N2_1 0x241A +#define WM8958_MBC_BAND_2_N2_2 0x241B +#define WM8958_MBC_BAND_2_N3_1 0x241C +#define WM8958_MBC_BAND_2_N3_2 0x241D +#define WM8958_MBC_BAND_2_N4_1 0x241E +#define WM8958_MBC_BAND_2_N4_2 0x241F +#define WM8958_MBC_BAND_2_N5_1 0x2420 +#define WM8958_MBC_BAND_2_N5_2 0x2421 +#define WM8958_MBC_BAND_2_X1_1 0x2422 +#define WM8958_MBC_BAND_2_X1_2 0x2423 +#define WM8958_MBC_BAND_2_X2_1 0x2424 +#define WM8958_MBC_BAND_2_X2_2 0x2425 +#define WM8958_MBC_BAND_2_X3_1 0x2426 +#define WM8958_MBC_BAND_2_X3_2 0x2427 +#define WM8958_MBC_BAND_2_ATTACK_1 0x2428 +#define WM8958_MBC_BAND_2_ATTACK_2 0x2429 +#define WM8958_MBC_BAND_2_DECAY_1 0x242A +#define WM8958_MBC_BAND_2_DECAY_2 0x242B +#define WM8958_MBC_B2_PG2_1 0x242C +#define WM8958_MBC_B2_PG2_2 0x242D +#define WM8958_MBC_B1_PG2_1 0x242E +#define WM8958_MBC_B1_PG2_2 0x242F +#define WM8958_MBC_CROSSOVER_1 0x2600 +#define WM8958_MBC_CROSSOVER_2 0x2601 +#define WM8958_MBC_HPF_1 0x2602 +#define WM8958_MBC_HPF_2 0x2603 +#define WM8958_MBC_LPF_1 0x2606 +#define WM8958_MBC_LPF_2 0x2607 +#define WM8958_MBC_RMS_LIMIT_1 0x260A +#define WM8958_MBC_RMS_LIMIT_2 0x260B +#define WM8994_WRITE_SEQUENCER_0 0x3000 +#define WM8994_WRITE_SEQUENCER_1 0x3001 +#define WM8994_WRITE_SEQUENCER_2 0x3002 +#define WM8994_WRITE_SEQUENCER_3 0x3003 +#define WM8994_WRITE_SEQUENCER_4 0x3004 +#define WM8994_WRITE_SEQUENCER_5 0x3005 +#define WM8994_WRITE_SEQUENCER_6 0x3006 +#define WM8994_WRITE_SEQUENCER_7 0x3007 +#define WM8994_WRITE_SEQUENCER_8 0x3008 +#define WM8994_WRITE_SEQUENCER_9 0x3009 +#define WM8994_WRITE_SEQUENCER_10 0x300A +#define WM8994_WRITE_SEQUENCER_11 0x300B +#define WM8994_WRITE_SEQUENCER_12 0x300C +#define WM8994_WRITE_SEQUENCER_13 0x300D +#define WM8994_WRITE_SEQUENCER_14 0x300E +#define WM8994_WRITE_SEQUENCER_15 0x300F +#define WM8994_WRITE_SEQUENCER_16 0x3010 +#define WM8994_WRITE_SEQUENCER_17 0x3011 +#define WM8994_WRITE_SEQUENCER_18 0x3012 +#define WM8994_WRITE_SEQUENCER_19 0x3013 +#define WM8994_WRITE_SEQUENCER_20 0x3014 +#define WM8994_WRITE_SEQUENCER_21 0x3015 +#define WM8994_WRITE_SEQUENCER_22 0x3016 +#define WM8994_WRITE_SEQUENCER_23 0x3017 +#define WM8994_WRITE_SEQUENCER_24 0x3018 +#define WM8994_WRITE_SEQUENCER_25 0x3019 +#define WM8994_WRITE_SEQUENCER_26 0x301A +#define WM8994_WRITE_SEQUENCER_27 0x301B +#define WM8994_WRITE_SEQUENCER_28 0x301C +#define WM8994_WRITE_SEQUENCER_29 0x301D +#define WM8994_WRITE_SEQUENCER_30 0x301E +#define WM8994_WRITE_SEQUENCER_31 0x301F +#define WM8994_WRITE_SEQUENCER_32 0x3020 +#define WM8994_WRITE_SEQUENCER_33 0x3021 +#define WM8994_WRITE_SEQUENCER_34 0x3022 +#define WM8994_WRITE_SEQUENCER_35 0x3023 +#define WM8994_WRITE_SEQUENCER_36 0x3024 +#define WM8994_WRITE_SEQUENCER_37 0x3025 +#define WM8994_WRITE_SEQUENCER_38 0x3026 +#define WM8994_WRITE_SEQUENCER_39 0x3027 +#define WM8994_WRITE_SEQUENCER_40 0x3028 +#define WM8994_WRITE_SEQUENCER_41 0x3029 +#define WM8994_WRITE_SEQUENCER_42 0x302A +#define WM8994_WRITE_SEQUENCER_43 0x302B +#define WM8994_WRITE_SEQUENCER_44 0x302C +#define WM8994_WRITE_SEQUENCER_45 0x302D +#define WM8994_WRITE_SEQUENCER_46 0x302E +#define WM8994_WRITE_SEQUENCER_47 0x302F +#define WM8994_WRITE_SEQUENCER_48 0x3030 +#define WM8994_WRITE_SEQUENCER_49 0x3031 +#define WM8994_WRITE_SEQUENCER_50 0x3032 +#define WM8994_WRITE_SEQUENCER_51 0x3033 +#define WM8994_WRITE_SEQUENCER_52 0x3034 +#define WM8994_WRITE_SEQUENCER_53 0x3035 +#define WM8994_WRITE_SEQUENCER_54 0x3036 +#define WM8994_WRITE_SEQUENCER_55 0x3037 +#define WM8994_WRITE_SEQUENCER_56 0x3038 +#define WM8994_WRITE_SEQUENCER_57 0x3039 +#define WM8994_WRITE_SEQUENCER_58 0x303A +#define WM8994_WRITE_SEQUENCER_59 0x303B +#define WM8994_WRITE_SEQUENCER_60 0x303C +#define WM8994_WRITE_SEQUENCER_61 0x303D +#define WM8994_WRITE_SEQUENCER_62 0x303E +#define WM8994_WRITE_SEQUENCER_63 0x303F +#define WM8994_WRITE_SEQUENCER_64 0x3040 +#define WM8994_WRITE_SEQUENCER_65 0x3041 +#define WM8994_WRITE_SEQUENCER_66 0x3042 +#define WM8994_WRITE_SEQUENCER_67 0x3043 +#define WM8994_WRITE_SEQUENCER_68 0x3044 +#define WM8994_WRITE_SEQUENCER_69 0x3045 +#define WM8994_WRITE_SEQUENCER_70 0x3046 +#define WM8994_WRITE_SEQUENCER_71 0x3047 +#define WM8994_WRITE_SEQUENCER_72 0x3048 +#define WM8994_WRITE_SEQUENCER_73 0x3049 +#define WM8994_WRITE_SEQUENCER_74 0x304A +#define WM8994_WRITE_SEQUENCER_75 0x304B +#define WM8994_WRITE_SEQUENCER_76 0x304C +#define WM8994_WRITE_SEQUENCER_77 0x304D +#define WM8994_WRITE_SEQUENCER_78 0x304E +#define WM8994_WRITE_SEQUENCER_79 0x304F +#define WM8994_WRITE_SEQUENCER_80 0x3050 +#define WM8994_WRITE_SEQUENCER_81 0x3051 +#define WM8994_WRITE_SEQUENCER_82 0x3052 +#define WM8994_WRITE_SEQUENCER_83 0x3053 +#define WM8994_WRITE_SEQUENCER_84 0x3054 +#define WM8994_WRITE_SEQUENCER_85 0x3055 +#define WM8994_WRITE_SEQUENCER_86 0x3056 +#define WM8994_WRITE_SEQUENCER_87 0x3057 +#define WM8994_WRITE_SEQUENCER_88 0x3058 +#define WM8994_WRITE_SEQUENCER_89 0x3059 +#define WM8994_WRITE_SEQUENCER_90 0x305A +#define WM8994_WRITE_SEQUENCER_91 0x305B +#define WM8994_WRITE_SEQUENCER_92 0x305C +#define WM8994_WRITE_SEQUENCER_93 0x305D +#define WM8994_WRITE_SEQUENCER_94 0x305E +#define WM8994_WRITE_SEQUENCER_95 0x305F +#define WM8994_WRITE_SEQUENCER_96 0x3060 +#define WM8994_WRITE_SEQUENCER_97 0x3061 +#define WM8994_WRITE_SEQUENCER_98 0x3062 +#define WM8994_WRITE_SEQUENCER_99 0x3063 +#define WM8994_WRITE_SEQUENCER_100 0x3064 +#define WM8994_WRITE_SEQUENCER_101 0x3065 +#define WM8994_WRITE_SEQUENCER_102 0x3066 +#define WM8994_WRITE_SEQUENCER_103 0x3067 +#define WM8994_WRITE_SEQUENCER_104 0x3068 +#define WM8994_WRITE_SEQUENCER_105 0x3069 +#define WM8994_WRITE_SEQUENCER_106 0x306A +#define WM8994_WRITE_SEQUENCER_107 0x306B +#define WM8994_WRITE_SEQUENCER_108 0x306C +#define WM8994_WRITE_SEQUENCER_109 0x306D +#define WM8994_WRITE_SEQUENCER_110 0x306E +#define WM8994_WRITE_SEQUENCER_111 0x306F +#define WM8994_WRITE_SEQUENCER_112 0x3070 +#define WM8994_WRITE_SEQUENCER_113 0x3071 +#define WM8994_WRITE_SEQUENCER_114 0x3072 +#define WM8994_WRITE_SEQUENCER_115 0x3073 +#define WM8994_WRITE_SEQUENCER_116 0x3074 +#define WM8994_WRITE_SEQUENCER_117 0x3075 +#define WM8994_WRITE_SEQUENCER_118 0x3076 +#define WM8994_WRITE_SEQUENCER_119 0x3077 +#define WM8994_WRITE_SEQUENCER_120 0x3078 +#define WM8994_WRITE_SEQUENCER_121 0x3079 +#define WM8994_WRITE_SEQUENCER_122 0x307A +#define WM8994_WRITE_SEQUENCER_123 0x307B +#define WM8994_WRITE_SEQUENCER_124 0x307C +#define WM8994_WRITE_SEQUENCER_125 0x307D +#define WM8994_WRITE_SEQUENCER_126 0x307E +#define WM8994_WRITE_SEQUENCER_127 0x307F +#define WM8994_WRITE_SEQUENCER_128 0x3080 +#define WM8994_WRITE_SEQUENCER_129 0x3081 +#define WM8994_WRITE_SEQUENCER_130 0x3082 +#define WM8994_WRITE_SEQUENCER_131 0x3083 +#define WM8994_WRITE_SEQUENCER_132 0x3084 +#define WM8994_WRITE_SEQUENCER_133 0x3085 +#define WM8994_WRITE_SEQUENCER_134 0x3086 +#define WM8994_WRITE_SEQUENCER_135 0x3087 +#define WM8994_WRITE_SEQUENCER_136 0x3088 +#define WM8994_WRITE_SEQUENCER_137 0x3089 +#define WM8994_WRITE_SEQUENCER_138 0x308A +#define WM8994_WRITE_SEQUENCER_139 0x308B +#define WM8994_WRITE_SEQUENCER_140 0x308C +#define WM8994_WRITE_SEQUENCER_141 0x308D +#define WM8994_WRITE_SEQUENCER_142 0x308E +#define WM8994_WRITE_SEQUENCER_143 0x308F +#define WM8994_WRITE_SEQUENCER_144 0x3090 +#define WM8994_WRITE_SEQUENCER_145 0x3091 +#define WM8994_WRITE_SEQUENCER_146 0x3092 +#define WM8994_WRITE_SEQUENCER_147 0x3093 +#define WM8994_WRITE_SEQUENCER_148 0x3094 +#define WM8994_WRITE_SEQUENCER_149 0x3095 +#define WM8994_WRITE_SEQUENCER_150 0x3096 +#define WM8994_WRITE_SEQUENCER_151 0x3097 +#define WM8994_WRITE_SEQUENCER_152 0x3098 +#define WM8994_WRITE_SEQUENCER_153 0x3099 +#define WM8994_WRITE_SEQUENCER_154 0x309A +#define WM8994_WRITE_SEQUENCER_155 0x309B +#define WM8994_WRITE_SEQUENCER_156 0x309C +#define WM8994_WRITE_SEQUENCER_157 0x309D +#define WM8994_WRITE_SEQUENCER_158 0x309E +#define WM8994_WRITE_SEQUENCER_159 0x309F +#define WM8994_WRITE_SEQUENCER_160 0x30A0 +#define WM8994_WRITE_SEQUENCER_161 0x30A1 +#define WM8994_WRITE_SEQUENCER_162 0x30A2 +#define WM8994_WRITE_SEQUENCER_163 0x30A3 +#define WM8994_WRITE_SEQUENCER_164 0x30A4 +#define WM8994_WRITE_SEQUENCER_165 0x30A5 +#define WM8994_WRITE_SEQUENCER_166 0x30A6 +#define WM8994_WRITE_SEQUENCER_167 0x30A7 +#define WM8994_WRITE_SEQUENCER_168 0x30A8 +#define WM8994_WRITE_SEQUENCER_169 0x30A9 +#define WM8994_WRITE_SEQUENCER_170 0x30AA +#define WM8994_WRITE_SEQUENCER_171 0x30AB +#define WM8994_WRITE_SEQUENCER_172 0x30AC +#define WM8994_WRITE_SEQUENCER_173 0x30AD +#define WM8994_WRITE_SEQUENCER_174 0x30AE +#define WM8994_WRITE_SEQUENCER_175 0x30AF +#define WM8994_WRITE_SEQUENCER_176 0x30B0 +#define WM8994_WRITE_SEQUENCER_177 0x30B1 +#define WM8994_WRITE_SEQUENCER_178 0x30B2 +#define WM8994_WRITE_SEQUENCER_179 0x30B3 +#define WM8994_WRITE_SEQUENCER_180 0x30B4 +#define WM8994_WRITE_SEQUENCER_181 0x30B5 +#define WM8994_WRITE_SEQUENCER_182 0x30B6 +#define WM8994_WRITE_SEQUENCER_183 0x30B7 +#define WM8994_WRITE_SEQUENCER_184 0x30B8 +#define WM8994_WRITE_SEQUENCER_185 0x30B9 +#define WM8994_WRITE_SEQUENCER_186 0x30BA +#define WM8994_WRITE_SEQUENCER_187 0x30BB +#define WM8994_WRITE_SEQUENCER_188 0x30BC +#define WM8994_WRITE_SEQUENCER_189 0x30BD +#define WM8994_WRITE_SEQUENCER_190 0x30BE +#define WM8994_WRITE_SEQUENCER_191 0x30BF +#define WM8994_WRITE_SEQUENCER_192 0x30C0 +#define WM8994_WRITE_SEQUENCER_193 0x30C1 +#define WM8994_WRITE_SEQUENCER_194 0x30C2 +#define WM8994_WRITE_SEQUENCER_195 0x30C3 +#define WM8994_WRITE_SEQUENCER_196 0x30C4 +#define WM8994_WRITE_SEQUENCER_197 0x30C5 +#define WM8994_WRITE_SEQUENCER_198 0x30C6 +#define WM8994_WRITE_SEQUENCER_199 0x30C7 +#define WM8994_WRITE_SEQUENCER_200 0x30C8 +#define WM8994_WRITE_SEQUENCER_201 0x30C9 +#define WM8994_WRITE_SEQUENCER_202 0x30CA +#define WM8994_WRITE_SEQUENCER_203 0x30CB +#define WM8994_WRITE_SEQUENCER_204 0x30CC +#define WM8994_WRITE_SEQUENCER_205 0x30CD +#define WM8994_WRITE_SEQUENCER_206 0x30CE +#define WM8994_WRITE_SEQUENCER_207 0x30CF +#define WM8994_WRITE_SEQUENCER_208 0x30D0 +#define WM8994_WRITE_SEQUENCER_209 0x30D1 +#define WM8994_WRITE_SEQUENCER_210 0x30D2 +#define WM8994_WRITE_SEQUENCER_211 0x30D3 +#define WM8994_WRITE_SEQUENCER_212 0x30D4 +#define WM8994_WRITE_SEQUENCER_213 0x30D5 +#define WM8994_WRITE_SEQUENCER_214 0x30D6 +#define WM8994_WRITE_SEQUENCER_215 0x30D7 +#define WM8994_WRITE_SEQUENCER_216 0x30D8 +#define WM8994_WRITE_SEQUENCER_217 0x30D9 +#define WM8994_WRITE_SEQUENCER_218 0x30DA +#define WM8994_WRITE_SEQUENCER_219 0x30DB +#define WM8994_WRITE_SEQUENCER_220 0x30DC +#define WM8994_WRITE_SEQUENCER_221 0x30DD +#define WM8994_WRITE_SEQUENCER_222 0x30DE +#define WM8994_WRITE_SEQUENCER_223 0x30DF +#define WM8994_WRITE_SEQUENCER_224 0x30E0 +#define WM8994_WRITE_SEQUENCER_225 0x30E1 +#define WM8994_WRITE_SEQUENCER_226 0x30E2 +#define WM8994_WRITE_SEQUENCER_227 0x30E3 +#define WM8994_WRITE_SEQUENCER_228 0x30E4 +#define WM8994_WRITE_SEQUENCER_229 0x30E5 +#define WM8994_WRITE_SEQUENCER_230 0x30E6 +#define WM8994_WRITE_SEQUENCER_231 0x30E7 +#define WM8994_WRITE_SEQUENCER_232 0x30E8 +#define WM8994_WRITE_SEQUENCER_233 0x30E9 +#define WM8994_WRITE_SEQUENCER_234 0x30EA +#define WM8994_WRITE_SEQUENCER_235 0x30EB +#define WM8994_WRITE_SEQUENCER_236 0x30EC +#define WM8994_WRITE_SEQUENCER_237 0x30ED +#define WM8994_WRITE_SEQUENCER_238 0x30EE +#define WM8994_WRITE_SEQUENCER_239 0x30EF +#define WM8994_WRITE_SEQUENCER_240 0x30F0 +#define WM8994_WRITE_SEQUENCER_241 0x30F1 +#define WM8994_WRITE_SEQUENCER_242 0x30F2 +#define WM8994_WRITE_SEQUENCER_243 0x30F3 +#define WM8994_WRITE_SEQUENCER_244 0x30F4 +#define WM8994_WRITE_SEQUENCER_245 0x30F5 +#define WM8994_WRITE_SEQUENCER_246 0x30F6 +#define WM8994_WRITE_SEQUENCER_247 0x30F7 +#define WM8994_WRITE_SEQUENCER_248 0x30F8 +#define WM8994_WRITE_SEQUENCER_249 0x30F9 +#define WM8994_WRITE_SEQUENCER_250 0x30FA +#define WM8994_WRITE_SEQUENCER_251 0x30FB +#define WM8994_WRITE_SEQUENCER_252 0x30FC +#define WM8994_WRITE_SEQUENCER_253 0x30FD +#define WM8994_WRITE_SEQUENCER_254 0x30FE +#define WM8994_WRITE_SEQUENCER_255 0x30FF +#define WM8994_WRITE_SEQUENCER_256 0x3100 +#define WM8994_WRITE_SEQUENCER_257 0x3101 +#define WM8994_WRITE_SEQUENCER_258 0x3102 +#define WM8994_WRITE_SEQUENCER_259 0x3103 +#define WM8994_WRITE_SEQUENCER_260 0x3104 +#define WM8994_WRITE_SEQUENCER_261 0x3105 +#define WM8994_WRITE_SEQUENCER_262 0x3106 +#define WM8994_WRITE_SEQUENCER_263 0x3107 +#define WM8994_WRITE_SEQUENCER_264 0x3108 +#define WM8994_WRITE_SEQUENCER_265 0x3109 +#define WM8994_WRITE_SEQUENCER_266 0x310A +#define WM8994_WRITE_SEQUENCER_267 0x310B +#define WM8994_WRITE_SEQUENCER_268 0x310C +#define WM8994_WRITE_SEQUENCER_269 0x310D +#define WM8994_WRITE_SEQUENCER_270 0x310E +#define WM8994_WRITE_SEQUENCER_271 0x310F +#define WM8994_WRITE_SEQUENCER_272 0x3110 +#define WM8994_WRITE_SEQUENCER_273 0x3111 +#define WM8994_WRITE_SEQUENCER_274 0x3112 +#define WM8994_WRITE_SEQUENCER_275 0x3113 +#define WM8994_WRITE_SEQUENCER_276 0x3114 +#define WM8994_WRITE_SEQUENCER_277 0x3115 +#define WM8994_WRITE_SEQUENCER_278 0x3116 +#define WM8994_WRITE_SEQUENCER_279 0x3117 +#define WM8994_WRITE_SEQUENCER_280 0x3118 +#define WM8994_WRITE_SEQUENCER_281 0x3119 +#define WM8994_WRITE_SEQUENCER_282 0x311A +#define WM8994_WRITE_SEQUENCER_283 0x311B +#define WM8994_WRITE_SEQUENCER_284 0x311C +#define WM8994_WRITE_SEQUENCER_285 0x311D +#define WM8994_WRITE_SEQUENCER_286 0x311E +#define WM8994_WRITE_SEQUENCER_287 0x311F +#define WM8994_WRITE_SEQUENCER_288 0x3120 +#define WM8994_WRITE_SEQUENCER_289 0x3121 +#define WM8994_WRITE_SEQUENCER_290 0x3122 +#define WM8994_WRITE_SEQUENCER_291 0x3123 +#define WM8994_WRITE_SEQUENCER_292 0x3124 +#define WM8994_WRITE_SEQUENCER_293 0x3125 +#define WM8994_WRITE_SEQUENCER_294 0x3126 +#define WM8994_WRITE_SEQUENCER_295 0x3127 +#define WM8994_WRITE_SEQUENCER_296 0x3128 +#define WM8994_WRITE_SEQUENCER_297 0x3129 +#define WM8994_WRITE_SEQUENCER_298 0x312A +#define WM8994_WRITE_SEQUENCER_299 0x312B +#define WM8994_WRITE_SEQUENCER_300 0x312C +#define WM8994_WRITE_SEQUENCER_301 0x312D +#define WM8994_WRITE_SEQUENCER_302 0x312E +#define WM8994_WRITE_SEQUENCER_303 0x312F +#define WM8994_WRITE_SEQUENCER_304 0x3130 +#define WM8994_WRITE_SEQUENCER_305 0x3131 +#define WM8994_WRITE_SEQUENCER_306 0x3132 +#define WM8994_WRITE_SEQUENCER_307 0x3133 +#define WM8994_WRITE_SEQUENCER_308 0x3134 +#define WM8994_WRITE_SEQUENCER_309 0x3135 +#define WM8994_WRITE_SEQUENCER_310 0x3136 +#define WM8994_WRITE_SEQUENCER_311 0x3137 +#define WM8994_WRITE_SEQUENCER_312 0x3138 +#define WM8994_WRITE_SEQUENCER_313 0x3139 +#define WM8994_WRITE_SEQUENCER_314 0x313A +#define WM8994_WRITE_SEQUENCER_315 0x313B +#define WM8994_WRITE_SEQUENCER_316 0x313C +#define WM8994_WRITE_SEQUENCER_317 0x313D +#define WM8994_WRITE_SEQUENCER_318 0x313E +#define WM8994_WRITE_SEQUENCER_319 0x313F +#define WM8994_WRITE_SEQUENCER_320 0x3140 +#define WM8994_WRITE_SEQUENCER_321 0x3141 +#define WM8994_WRITE_SEQUENCER_322 0x3142 +#define WM8994_WRITE_SEQUENCER_323 0x3143 +#define WM8994_WRITE_SEQUENCER_324 0x3144 +#define WM8994_WRITE_SEQUENCER_325 0x3145 +#define WM8994_WRITE_SEQUENCER_326 0x3146 +#define WM8994_WRITE_SEQUENCER_327 0x3147 +#define WM8994_WRITE_SEQUENCER_328 0x3148 +#define WM8994_WRITE_SEQUENCER_329 0x3149 +#define WM8994_WRITE_SEQUENCER_330 0x314A +#define WM8994_WRITE_SEQUENCER_331 0x314B +#define WM8994_WRITE_SEQUENCER_332 0x314C +#define WM8994_WRITE_SEQUENCER_333 0x314D +#define WM8994_WRITE_SEQUENCER_334 0x314E +#define WM8994_WRITE_SEQUENCER_335 0x314F +#define WM8994_WRITE_SEQUENCER_336 0x3150 +#define WM8994_WRITE_SEQUENCER_337 0x3151 +#define WM8994_WRITE_SEQUENCER_338 0x3152 +#define WM8994_WRITE_SEQUENCER_339 0x3153 +#define WM8994_WRITE_SEQUENCER_340 0x3154 +#define WM8994_WRITE_SEQUENCER_341 0x3155 +#define WM8994_WRITE_SEQUENCER_342 0x3156 +#define WM8994_WRITE_SEQUENCER_343 0x3157 +#define WM8994_WRITE_SEQUENCER_344 0x3158 +#define WM8994_WRITE_SEQUENCER_345 0x3159 +#define WM8994_WRITE_SEQUENCER_346 0x315A +#define WM8994_WRITE_SEQUENCER_347 0x315B +#define WM8994_WRITE_SEQUENCER_348 0x315C +#define WM8994_WRITE_SEQUENCER_349 0x315D +#define WM8994_WRITE_SEQUENCER_350 0x315E +#define WM8994_WRITE_SEQUENCER_351 0x315F +#define WM8994_WRITE_SEQUENCER_352 0x3160 +#define WM8994_WRITE_SEQUENCER_353 0x3161 +#define WM8994_WRITE_SEQUENCER_354 0x3162 +#define WM8994_WRITE_SEQUENCER_355 0x3163 +#define WM8994_WRITE_SEQUENCER_356 0x3164 +#define WM8994_WRITE_SEQUENCER_357 0x3165 +#define WM8994_WRITE_SEQUENCER_358 0x3166 +#define WM8994_WRITE_SEQUENCER_359 0x3167 +#define WM8994_WRITE_SEQUENCER_360 0x3168 +#define WM8994_WRITE_SEQUENCER_361 0x3169 +#define WM8994_WRITE_SEQUENCER_362 0x316A +#define WM8994_WRITE_SEQUENCER_363 0x316B +#define WM8994_WRITE_SEQUENCER_364 0x316C +#define WM8994_WRITE_SEQUENCER_365 0x316D +#define WM8994_WRITE_SEQUENCER_366 0x316E +#define WM8994_WRITE_SEQUENCER_367 0x316F +#define WM8994_WRITE_SEQUENCER_368 0x3170 +#define WM8994_WRITE_SEQUENCER_369 0x3171 +#define WM8994_WRITE_SEQUENCER_370 0x3172 +#define WM8994_WRITE_SEQUENCER_371 0x3173 +#define WM8994_WRITE_SEQUENCER_372 0x3174 +#define WM8994_WRITE_SEQUENCER_373 0x3175 +#define WM8994_WRITE_SEQUENCER_374 0x3176 +#define WM8994_WRITE_SEQUENCER_375 0x3177 +#define WM8994_WRITE_SEQUENCER_376 0x3178 +#define WM8994_WRITE_SEQUENCER_377 0x3179 +#define WM8994_WRITE_SEQUENCER_378 0x317A +#define WM8994_WRITE_SEQUENCER_379 0x317B +#define WM8994_WRITE_SEQUENCER_380 0x317C +#define WM8994_WRITE_SEQUENCER_381 0x317D +#define WM8994_WRITE_SEQUENCER_382 0x317E +#define WM8994_WRITE_SEQUENCER_383 0x317F +#define WM8994_WRITE_SEQUENCER_384 0x3180 +#define WM8994_WRITE_SEQUENCER_385 0x3181 +#define WM8994_WRITE_SEQUENCER_386 0x3182 +#define WM8994_WRITE_SEQUENCER_387 0x3183 +#define WM8994_WRITE_SEQUENCER_388 0x3184 +#define WM8994_WRITE_SEQUENCER_389 0x3185 +#define WM8994_WRITE_SEQUENCER_390 0x3186 +#define WM8994_WRITE_SEQUENCER_391 0x3187 +#define WM8994_WRITE_SEQUENCER_392 0x3188 +#define WM8994_WRITE_SEQUENCER_393 0x3189 +#define WM8994_WRITE_SEQUENCER_394 0x318A +#define WM8994_WRITE_SEQUENCER_395 0x318B +#define WM8994_WRITE_SEQUENCER_396 0x318C +#define WM8994_WRITE_SEQUENCER_397 0x318D +#define WM8994_WRITE_SEQUENCER_398 0x318E +#define WM8994_WRITE_SEQUENCER_399 0x318F +#define WM8994_WRITE_SEQUENCER_400 0x3190 +#define WM8994_WRITE_SEQUENCER_401 0x3191 +#define WM8994_WRITE_SEQUENCER_402 0x3192 +#define WM8994_WRITE_SEQUENCER_403 0x3193 +#define WM8994_WRITE_SEQUENCER_404 0x3194 +#define WM8994_WRITE_SEQUENCER_405 0x3195 +#define WM8994_WRITE_SEQUENCER_406 0x3196 +#define WM8994_WRITE_SEQUENCER_407 0x3197 +#define WM8994_WRITE_SEQUENCER_408 0x3198 +#define WM8994_WRITE_SEQUENCER_409 0x3199 +#define WM8994_WRITE_SEQUENCER_410 0x319A +#define WM8994_WRITE_SEQUENCER_411 0x319B +#define WM8994_WRITE_SEQUENCER_412 0x319C +#define WM8994_WRITE_SEQUENCER_413 0x319D +#define WM8994_WRITE_SEQUENCER_414 0x319E +#define WM8994_WRITE_SEQUENCER_415 0x319F +#define WM8994_WRITE_SEQUENCER_416 0x31A0 +#define WM8994_WRITE_SEQUENCER_417 0x31A1 +#define WM8994_WRITE_SEQUENCER_418 0x31A2 +#define WM8994_WRITE_SEQUENCER_419 0x31A3 +#define WM8994_WRITE_SEQUENCER_420 0x31A4 +#define WM8994_WRITE_SEQUENCER_421 0x31A5 +#define WM8994_WRITE_SEQUENCER_422 0x31A6 +#define WM8994_WRITE_SEQUENCER_423 0x31A7 +#define WM8994_WRITE_SEQUENCER_424 0x31A8 +#define WM8994_WRITE_SEQUENCER_425 0x31A9 +#define WM8994_WRITE_SEQUENCER_426 0x31AA +#define WM8994_WRITE_SEQUENCER_427 0x31AB +#define WM8994_WRITE_SEQUENCER_428 0x31AC +#define WM8994_WRITE_SEQUENCER_429 0x31AD +#define WM8994_WRITE_SEQUENCER_430 0x31AE +#define WM8994_WRITE_SEQUENCER_431 0x31AF +#define WM8994_WRITE_SEQUENCER_432 0x31B0 +#define WM8994_WRITE_SEQUENCER_433 0x31B1 +#define WM8994_WRITE_SEQUENCER_434 0x31B2 +#define WM8994_WRITE_SEQUENCER_435 0x31B3 +#define WM8994_WRITE_SEQUENCER_436 0x31B4 +#define WM8994_WRITE_SEQUENCER_437 0x31B5 +#define WM8994_WRITE_SEQUENCER_438 0x31B6 +#define WM8994_WRITE_SEQUENCER_439 0x31B7 +#define WM8994_WRITE_SEQUENCER_440 0x31B8 +#define WM8994_WRITE_SEQUENCER_441 0x31B9 +#define WM8994_WRITE_SEQUENCER_442 0x31BA +#define WM8994_WRITE_SEQUENCER_443 0x31BB +#define WM8994_WRITE_SEQUENCER_444 0x31BC +#define WM8994_WRITE_SEQUENCER_445 0x31BD +#define WM8994_WRITE_SEQUENCER_446 0x31BE +#define WM8994_WRITE_SEQUENCER_447 0x31BF +#define WM8994_WRITE_SEQUENCER_448 0x31C0 +#define WM8994_WRITE_SEQUENCER_449 0x31C1 +#define WM8994_WRITE_SEQUENCER_450 0x31C2 +#define WM8994_WRITE_SEQUENCER_451 0x31C3 +#define WM8994_WRITE_SEQUENCER_452 0x31C4 +#define WM8994_WRITE_SEQUENCER_453 0x31C5 +#define WM8994_WRITE_SEQUENCER_454 0x31C6 +#define WM8994_WRITE_SEQUENCER_455 0x31C7 +#define WM8994_WRITE_SEQUENCER_456 0x31C8 +#define WM8994_WRITE_SEQUENCER_457 0x31C9 +#define WM8994_WRITE_SEQUENCER_458 0x31CA +#define WM8994_WRITE_SEQUENCER_459 0x31CB +#define WM8994_WRITE_SEQUENCER_460 0x31CC +#define WM8994_WRITE_SEQUENCER_461 0x31CD +#define WM8994_WRITE_SEQUENCER_462 0x31CE +#define WM8994_WRITE_SEQUENCER_463 0x31CF +#define WM8994_WRITE_SEQUENCER_464 0x31D0 +#define WM8994_WRITE_SEQUENCER_465 0x31D1 +#define WM8994_WRITE_SEQUENCER_466 0x31D2 +#define WM8994_WRITE_SEQUENCER_467 0x31D3 +#define WM8994_WRITE_SEQUENCER_468 0x31D4 +#define WM8994_WRITE_SEQUENCER_469 0x31D5 +#define WM8994_WRITE_SEQUENCER_470 0x31D6 +#define WM8994_WRITE_SEQUENCER_471 0x31D7 +#define WM8994_WRITE_SEQUENCER_472 0x31D8 +#define WM8994_WRITE_SEQUENCER_473 0x31D9 +#define WM8994_WRITE_SEQUENCER_474 0x31DA +#define WM8994_WRITE_SEQUENCER_475 0x31DB +#define WM8994_WRITE_SEQUENCER_476 0x31DC +#define WM8994_WRITE_SEQUENCER_477 0x31DD +#define WM8994_WRITE_SEQUENCER_478 0x31DE +#define WM8994_WRITE_SEQUENCER_479 0x31DF +#define WM8994_WRITE_SEQUENCER_480 0x31E0 +#define WM8994_WRITE_SEQUENCER_481 0x31E1 +#define WM8994_WRITE_SEQUENCER_482 0x31E2 +#define WM8994_WRITE_SEQUENCER_483 0x31E3 +#define WM8994_WRITE_SEQUENCER_484 0x31E4 +#define WM8994_WRITE_SEQUENCER_485 0x31E5 +#define WM8994_WRITE_SEQUENCER_486 0x31E6 +#define WM8994_WRITE_SEQUENCER_487 0x31E7 +#define WM8994_WRITE_SEQUENCER_488 0x31E8 +#define WM8994_WRITE_SEQUENCER_489 0x31E9 +#define WM8994_WRITE_SEQUENCER_490 0x31EA +#define WM8994_WRITE_SEQUENCER_491 0x31EB +#define WM8994_WRITE_SEQUENCER_492 0x31EC +#define WM8994_WRITE_SEQUENCER_493 0x31ED +#define WM8994_WRITE_SEQUENCER_494 0x31EE +#define WM8994_WRITE_SEQUENCER_495 0x31EF +#define WM8994_WRITE_SEQUENCER_496 0x31F0 +#define WM8994_WRITE_SEQUENCER_497 0x31F1 +#define WM8994_WRITE_SEQUENCER_498 0x31F2 +#define WM8994_WRITE_SEQUENCER_499 0x31F3 +#define WM8994_WRITE_SEQUENCER_500 0x31F4 +#define WM8994_WRITE_SEQUENCER_501 0x31F5 +#define WM8994_WRITE_SEQUENCER_502 0x31F6 +#define WM8994_WRITE_SEQUENCER_503 0x31F7 +#define WM8994_WRITE_SEQUENCER_504 0x31F8 +#define WM8994_WRITE_SEQUENCER_505 0x31F9 +#define WM8994_WRITE_SEQUENCER_506 0x31FA +#define WM8994_WRITE_SEQUENCER_507 0x31FB +#define WM8994_WRITE_SEQUENCER_508 0x31FC +#define WM8994_WRITE_SEQUENCER_509 0x31FD +#define WM8994_WRITE_SEQUENCER_510 0x31FE +#define WM8994_WRITE_SEQUENCER_511 0x31FF + +#define WM8994_REGISTER_COUNT 736 +#define WM8994_MAX_REGISTER 0x31FF +#define WM8994_MAX_CACHED_REGISTER 0x749 + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Software Reset + */ +#define WM8994_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */ +#define WM8994_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */ +#define WM8994_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */ + +/* + * R1 (0x01) - Power Management (1) + */ +#define WM8994_SPKOUTR_ENA 0x2000 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTR_ENA_MASK 0x2000 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTR_ENA_SHIFT 13 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTR_ENA_WIDTH 1 /* SPKOUTR_ENA */ +#define WM8994_SPKOUTL_ENA 0x1000 /* SPKOUTL_ENA */ +#define WM8994_SPKOUTL_ENA_MASK 0x1000 /* SPKOUTL_ENA */ +#define WM8994_SPKOUTL_ENA_SHIFT 12 /* SPKOUTL_ENA */ +#define WM8994_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */ +#define WM8994_HPOUT2_ENA 0x0800 /* HPOUT2_ENA */ +#define WM8994_HPOUT2_ENA_MASK 0x0800 /* HPOUT2_ENA */ +#define WM8994_HPOUT2_ENA_SHIFT 11 /* HPOUT2_ENA */ +#define WM8994_HPOUT2_ENA_WIDTH 1 /* HPOUT2_ENA */ +#define WM8994_HPOUT1L_ENA 0x0200 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1L_ENA_MASK 0x0200 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1L_ENA_SHIFT 9 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */ +#define WM8994_HPOUT1R_ENA 0x0100 /* HPOUT1R_ENA */ +#define WM8994_HPOUT1R_ENA_MASK 0x0100 /* HPOUT1R_ENA */ +#define WM8994_HPOUT1R_ENA_SHIFT 8 /* HPOUT1R_ENA */ +#define WM8994_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */ +#define WM8994_MICB2_ENA 0x0020 /* MICB2_ENA */ +#define WM8994_MICB2_ENA_MASK 0x0020 /* MICB2_ENA */ +#define WM8994_MICB2_ENA_SHIFT 5 /* MICB2_ENA */ +#define WM8994_MICB2_ENA_WIDTH 1 /* MICB2_ENA */ +#define WM8994_MICB1_ENA 0x0010 /* MICB1_ENA */ +#define WM8994_MICB1_ENA_MASK 0x0010 /* MICB1_ENA */ +#define WM8994_MICB1_ENA_SHIFT 4 /* MICB1_ENA */ +#define WM8994_MICB1_ENA_WIDTH 1 /* MICB1_ENA */ +#define WM8994_VMID_SEL_MASK 0x0006 /* VMID_SEL - [2:1] */ +#define WM8994_VMID_SEL_SHIFT 1 /* VMID_SEL - [2:1] */ +#define WM8994_VMID_SEL_WIDTH 2 /* VMID_SEL - [2:1] */ +#define WM8994_BIAS_ENA 0x0001 /* BIAS_ENA */ +#define WM8994_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */ +#define WM8994_BIAS_ENA_SHIFT 0 /* BIAS_ENA */ +#define WM8994_BIAS_ENA_WIDTH 1 /* BIAS_ENA */ + +/* + * R2 (0x02) - Power Management (2) + */ +#define WM8994_TSHUT_ENA 0x4000 /* TSHUT_ENA */ +#define WM8994_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */ +#define WM8994_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */ +#define WM8994_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */ +#define WM8994_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */ +#define WM8994_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */ +#define WM8994_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */ +#define WM8994_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */ +#define WM8994_OPCLK_ENA 0x0800 /* OPCLK_ENA */ +#define WM8994_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ +#define WM8994_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ +#define WM8994_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define WM8994_MIXINL_ENA 0x0200 /* MIXINL_ENA */ +#define WM8994_MIXINL_ENA_MASK 0x0200 /* MIXINL_ENA */ +#define WM8994_MIXINL_ENA_SHIFT 9 /* MIXINL_ENA */ +#define WM8994_MIXINL_ENA_WIDTH 1 /* MIXINL_ENA */ +#define WM8994_MIXINR_ENA 0x0100 /* MIXINR_ENA */ +#define WM8994_MIXINR_ENA_MASK 0x0100 /* MIXINR_ENA */ +#define WM8994_MIXINR_ENA_SHIFT 8 /* MIXINR_ENA */ +#define WM8994_MIXINR_ENA_WIDTH 1 /* MIXINR_ENA */ +#define WM8994_IN2L_ENA 0x0080 /* IN2L_ENA */ +#define WM8994_IN2L_ENA_MASK 0x0080 /* IN2L_ENA */ +#define WM8994_IN2L_ENA_SHIFT 7 /* IN2L_ENA */ +#define WM8994_IN2L_ENA_WIDTH 1 /* IN2L_ENA */ +#define WM8994_IN1L_ENA 0x0040 /* IN1L_ENA */ +#define WM8994_IN1L_ENA_MASK 0x0040 /* IN1L_ENA */ +#define WM8994_IN1L_ENA_SHIFT 6 /* IN1L_ENA */ +#define WM8994_IN1L_ENA_WIDTH 1 /* IN1L_ENA */ +#define WM8994_IN2R_ENA 0x0020 /* IN2R_ENA */ +#define WM8994_IN2R_ENA_MASK 0x0020 /* IN2R_ENA */ +#define WM8994_IN2R_ENA_SHIFT 5 /* IN2R_ENA */ +#define WM8994_IN2R_ENA_WIDTH 1 /* IN2R_ENA */ +#define WM8994_IN1R_ENA 0x0010 /* IN1R_ENA */ +#define WM8994_IN1R_ENA_MASK 0x0010 /* IN1R_ENA */ +#define WM8994_IN1R_ENA_SHIFT 4 /* IN1R_ENA */ +#define WM8994_IN1R_ENA_WIDTH 1 /* IN1R_ENA */ + +/* + * R3 (0x03) - Power Management (3) + */ +#define WM8994_LINEOUT1N_ENA 0x2000 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1N_ENA_MASK 0x2000 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1N_ENA_SHIFT 13 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1N_ENA_WIDTH 1 /* LINEOUT1N_ENA */ +#define WM8994_LINEOUT1P_ENA 0x1000 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT1P_ENA_MASK 0x1000 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT1P_ENA_SHIFT 12 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT1P_ENA_WIDTH 1 /* LINEOUT1P_ENA */ +#define WM8994_LINEOUT2N_ENA 0x0800 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2N_ENA_MASK 0x0800 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2N_ENA_SHIFT 11 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2N_ENA_WIDTH 1 /* LINEOUT2N_ENA */ +#define WM8994_LINEOUT2P_ENA 0x0400 /* LINEOUT2P_ENA */ +#define WM8994_LINEOUT2P_ENA_MASK 0x0400 /* LINEOUT2P_ENA */ +#define WM8994_LINEOUT2P_ENA_SHIFT 10 /* LINEOUT2P_ENA */ +#define WM8994_LINEOUT2P_ENA_WIDTH 1 /* LINEOUT2P_ENA */ +#define WM8994_SPKRVOL_ENA 0x0200 /* SPKRVOL_ENA */ +#define WM8994_SPKRVOL_ENA_MASK 0x0200 /* SPKRVOL_ENA */ +#define WM8994_SPKRVOL_ENA_SHIFT 9 /* SPKRVOL_ENA */ +#define WM8994_SPKRVOL_ENA_WIDTH 1 /* SPKRVOL_ENA */ +#define WM8994_SPKLVOL_ENA 0x0100 /* SPKLVOL_ENA */ +#define WM8994_SPKLVOL_ENA_MASK 0x0100 /* SPKLVOL_ENA */ +#define WM8994_SPKLVOL_ENA_SHIFT 8 /* SPKLVOL_ENA */ +#define WM8994_SPKLVOL_ENA_WIDTH 1 /* SPKLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA 0x0080 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA_MASK 0x0080 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA_SHIFT 7 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTLVOL_ENA_WIDTH 1 /* MIXOUTLVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA 0x0040 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA_MASK 0x0040 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA_SHIFT 6 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTRVOL_ENA_WIDTH 1 /* MIXOUTRVOL_ENA */ +#define WM8994_MIXOUTL_ENA 0x0020 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTL_ENA_MASK 0x0020 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTL_ENA_SHIFT 5 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTL_ENA_WIDTH 1 /* MIXOUTL_ENA */ +#define WM8994_MIXOUTR_ENA 0x0010 /* MIXOUTR_ENA */ +#define WM8994_MIXOUTR_ENA_MASK 0x0010 /* MIXOUTR_ENA */ +#define WM8994_MIXOUTR_ENA_SHIFT 4 /* MIXOUTR_ENA */ +#define WM8994_MIXOUTR_ENA_WIDTH 1 /* MIXOUTR_ENA */ + +/* + * R4 (0x04) - Power Management (4) + */ +#define WM8994_AIF2ADCL_ENA 0x2000 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCL_ENA_MASK 0x2000 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCL_ENA_SHIFT 13 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCL_ENA_WIDTH 1 /* AIF2ADCL_ENA */ +#define WM8994_AIF2ADCR_ENA 0x1000 /* AIF2ADCR_ENA */ +#define WM8994_AIF2ADCR_ENA_MASK 0x1000 /* AIF2ADCR_ENA */ +#define WM8994_AIF2ADCR_ENA_SHIFT 12 /* AIF2ADCR_ENA */ +#define WM8994_AIF2ADCR_ENA_WIDTH 1 /* AIF2ADCR_ENA */ +#define WM8994_AIF1ADC2L_ENA 0x0800 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2L_ENA_MASK 0x0800 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2L_ENA_SHIFT 11 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2L_ENA_WIDTH 1 /* AIF1ADC2L_ENA */ +#define WM8994_AIF1ADC2R_ENA 0x0400 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC2R_ENA_MASK 0x0400 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC2R_ENA_SHIFT 10 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC2R_ENA_WIDTH 1 /* AIF1ADC2R_ENA */ +#define WM8994_AIF1ADC1L_ENA 0x0200 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1L_ENA_MASK 0x0200 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1L_ENA_SHIFT 9 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1L_ENA_WIDTH 1 /* AIF1ADC1L_ENA */ +#define WM8994_AIF1ADC1R_ENA 0x0100 /* AIF1ADC1R_ENA */ +#define WM8994_AIF1ADC1R_ENA_MASK 0x0100 /* AIF1ADC1R_ENA */ +#define WM8994_AIF1ADC1R_ENA_SHIFT 8 /* AIF1ADC1R_ENA */ +#define WM8994_AIF1ADC1R_ENA_WIDTH 1 /* AIF1ADC1R_ENA */ +#define WM8994_DMIC2L_ENA 0x0020 /* DMIC2L_ENA */ +#define WM8994_DMIC2L_ENA_MASK 0x0020 /* DMIC2L_ENA */ +#define WM8994_DMIC2L_ENA_SHIFT 5 /* DMIC2L_ENA */ +#define WM8994_DMIC2L_ENA_WIDTH 1 /* DMIC2L_ENA */ +#define WM8994_DMIC2R_ENA 0x0010 /* DMIC2R_ENA */ +#define WM8994_DMIC2R_ENA_MASK 0x0010 /* DMIC2R_ENA */ +#define WM8994_DMIC2R_ENA_SHIFT 4 /* DMIC2R_ENA */ +#define WM8994_DMIC2R_ENA_WIDTH 1 /* DMIC2R_ENA */ +#define WM8994_DMIC1L_ENA 0x0008 /* DMIC1L_ENA */ +#define WM8994_DMIC1L_ENA_MASK 0x0008 /* DMIC1L_ENA */ +#define WM8994_DMIC1L_ENA_SHIFT 3 /* DMIC1L_ENA */ +#define WM8994_DMIC1L_ENA_WIDTH 1 /* DMIC1L_ENA */ +#define WM8994_DMIC1R_ENA 0x0004 /* DMIC1R_ENA */ +#define WM8994_DMIC1R_ENA_MASK 0x0004 /* DMIC1R_ENA */ +#define WM8994_DMIC1R_ENA_SHIFT 2 /* DMIC1R_ENA */ +#define WM8994_DMIC1R_ENA_WIDTH 1 /* DMIC1R_ENA */ +#define WM8994_ADCL_ENA 0x0002 /* ADCL_ENA */ +#define WM8994_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ +#define WM8994_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ +#define WM8994_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ +#define WM8994_ADCR_ENA 0x0001 /* ADCR_ENA */ +#define WM8994_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ +#define WM8994_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ +#define WM8994_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ + +/* + * R5 (0x05) - Power Management (5) + */ +#define WM8994_AIF2DACL_ENA 0x2000 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACL_ENA_MASK 0x2000 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACL_ENA_SHIFT 13 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACL_ENA_WIDTH 1 /* AIF2DACL_ENA */ +#define WM8994_AIF2DACR_ENA 0x1000 /* AIF2DACR_ENA */ +#define WM8994_AIF2DACR_ENA_MASK 0x1000 /* AIF2DACR_ENA */ +#define WM8994_AIF2DACR_ENA_SHIFT 12 /* AIF2DACR_ENA */ +#define WM8994_AIF2DACR_ENA_WIDTH 1 /* AIF2DACR_ENA */ +#define WM8994_AIF1DAC2L_ENA 0x0800 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2L_ENA_MASK 0x0800 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2L_ENA_SHIFT 11 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2L_ENA_WIDTH 1 /* AIF1DAC2L_ENA */ +#define WM8994_AIF1DAC2R_ENA 0x0400 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC2R_ENA_MASK 0x0400 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC2R_ENA_SHIFT 10 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC2R_ENA_WIDTH 1 /* AIF1DAC2R_ENA */ +#define WM8994_AIF1DAC1L_ENA 0x0200 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1L_ENA_MASK 0x0200 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1L_ENA_SHIFT 9 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1L_ENA_WIDTH 1 /* AIF1DAC1L_ENA */ +#define WM8994_AIF1DAC1R_ENA 0x0100 /* AIF1DAC1R_ENA */ +#define WM8994_AIF1DAC1R_ENA_MASK 0x0100 /* AIF1DAC1R_ENA */ +#define WM8994_AIF1DAC1R_ENA_SHIFT 8 /* AIF1DAC1R_ENA */ +#define WM8994_AIF1DAC1R_ENA_WIDTH 1 /* AIF1DAC1R_ENA */ +#define WM8994_DAC2L_ENA 0x0008 /* DAC2L_ENA */ +#define WM8994_DAC2L_ENA_MASK 0x0008 /* DAC2L_ENA */ +#define WM8994_DAC2L_ENA_SHIFT 3 /* DAC2L_ENA */ +#define WM8994_DAC2L_ENA_WIDTH 1 /* DAC2L_ENA */ +#define WM8994_DAC2R_ENA 0x0004 /* DAC2R_ENA */ +#define WM8994_DAC2R_ENA_MASK 0x0004 /* DAC2R_ENA */ +#define WM8994_DAC2R_ENA_SHIFT 2 /* DAC2R_ENA */ +#define WM8994_DAC2R_ENA_WIDTH 1 /* DAC2R_ENA */ +#define WM8994_DAC1L_ENA 0x0002 /* DAC1L_ENA */ +#define WM8994_DAC1L_ENA_MASK 0x0002 /* DAC1L_ENA */ +#define WM8994_DAC1L_ENA_SHIFT 1 /* DAC1L_ENA */ +#define WM8994_DAC1L_ENA_WIDTH 1 /* DAC1L_ENA */ +#define WM8994_DAC1R_ENA 0x0001 /* DAC1R_ENA */ +#define WM8994_DAC1R_ENA_MASK 0x0001 /* DAC1R_ENA */ +#define WM8994_DAC1R_ENA_SHIFT 0 /* DAC1R_ENA */ +#define WM8994_DAC1R_ENA_WIDTH 1 /* DAC1R_ENA */ + +/* + * R6 (0x06) - Power Management (6) + */ +#define WM8958_AIF3ADC_SRC_MASK 0x0600 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF3ADC_SRC_SHIFT 9 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF3ADC_SRC_WIDTH 2 /* AIF3ADC_SRC - [10:9] */ +#define WM8958_AIF2DAC_SRC_MASK 0x0180 /* AIF2DAC_SRC - [8:7] */ +#define WM8958_AIF2DAC_SRC_SHIFT 7 /* AIF2DAC_SRC - [8:7] */ +#define WM8958_AIF2DAC_SRC_WIDTH 2 /* AIF2DAC_SRC - [8:7] */ +#define WM8994_AIF3_TRI 0x0020 /* AIF3_TRI */ +#define WM8994_AIF3_TRI_MASK 0x0020 /* AIF3_TRI */ +#define WM8994_AIF3_TRI_SHIFT 5 /* AIF3_TRI */ +#define WM8994_AIF3_TRI_WIDTH 1 /* AIF3_TRI */ +#define WM8994_AIF3_ADCDAT_SRC_MASK 0x0018 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8994_AIF3_ADCDAT_SRC_SHIFT 3 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8994_AIF3_ADCDAT_SRC_WIDTH 2 /* AIF3_ADCDAT_SRC - [4:3] */ +#define WM8994_AIF2_ADCDAT_SRC 0x0004 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_ADCDAT_SRC_MASK 0x0004 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_ADCDAT_SRC_SHIFT 2 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_ADCDAT_SRC_WIDTH 1 /* AIF2_ADCDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC 0x0002 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC_MASK 0x0002 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC_SHIFT 1 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF2_DACDAT_SRC_WIDTH 1 /* AIF2_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC 0x0001 /* AIF1_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC_MASK 0x0001 /* AIF1_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC_SHIFT 0 /* AIF1_DACDAT_SRC */ +#define WM8994_AIF1_DACDAT_SRC_WIDTH 1 /* AIF1_DACDAT_SRC */ + +/* + * R21 (0x15) - Input Mixer (1) + */ +#define WM8994_IN1RP_MIXINR_BOOST 0x0100 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1RP_MIXINR_BOOST_MASK 0x0100 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1RP_MIXINR_BOOST_SHIFT 8 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1RP_MIXINR_BOOST_WIDTH 1 /* IN1RP_MIXINR_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST 0x0080 /* IN1LP_MIXINL_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST_MASK 0x0080 /* IN1LP_MIXINL_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST_SHIFT 7 /* IN1LP_MIXINL_BOOST */ +#define WM8994_IN1LP_MIXINL_BOOST_WIDTH 1 /* IN1LP_MIXINL_BOOST */ +#define WM8994_INPUTS_CLAMP 0x0040 /* INPUTS_CLAMP */ +#define WM8994_INPUTS_CLAMP_MASK 0x0040 /* INPUTS_CLAMP */ +#define WM8994_INPUTS_CLAMP_SHIFT 6 /* INPUTS_CLAMP */ +#define WM8994_INPUTS_CLAMP_WIDTH 1 /* INPUTS_CLAMP */ + +/* + * R24 (0x18) - Left Line Input 1&2 Volume + */ +#define WM8994_IN1_VU 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_MASK 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_SHIFT 8 /* IN1_VU */ +#define WM8994_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8994_IN1L_MUTE 0x0080 /* IN1L_MUTE */ +#define WM8994_IN1L_MUTE_MASK 0x0080 /* IN1L_MUTE */ +#define WM8994_IN1L_MUTE_SHIFT 7 /* IN1L_MUTE */ +#define WM8994_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */ +#define WM8994_IN1L_ZC 0x0040 /* IN1L_ZC */ +#define WM8994_IN1L_ZC_MASK 0x0040 /* IN1L_ZC */ +#define WM8994_IN1L_ZC_SHIFT 6 /* IN1L_ZC */ +#define WM8994_IN1L_ZC_WIDTH 1 /* IN1L_ZC */ +#define WM8994_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */ +#define WM8994_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */ +#define WM8994_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */ + +/* + * R25 (0x19) - Left Line Input 3&4 Volume + */ +#define WM8994_IN2_VU 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_MASK 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_SHIFT 8 /* IN2_VU */ +#define WM8994_IN2_VU_WIDTH 1 /* IN2_VU */ +#define WM8994_IN2L_MUTE 0x0080 /* IN2L_MUTE */ +#define WM8994_IN2L_MUTE_MASK 0x0080 /* IN2L_MUTE */ +#define WM8994_IN2L_MUTE_SHIFT 7 /* IN2L_MUTE */ +#define WM8994_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */ +#define WM8994_IN2L_ZC 0x0040 /* IN2L_ZC */ +#define WM8994_IN2L_ZC_MASK 0x0040 /* IN2L_ZC */ +#define WM8994_IN2L_ZC_SHIFT 6 /* IN2L_ZC */ +#define WM8994_IN2L_ZC_WIDTH 1 /* IN2L_ZC */ +#define WM8994_IN2L_VOL_MASK 0x001F /* IN2L_VOL - [4:0] */ +#define WM8994_IN2L_VOL_SHIFT 0 /* IN2L_VOL - [4:0] */ +#define WM8994_IN2L_VOL_WIDTH 5 /* IN2L_VOL - [4:0] */ + +/* + * R26 (0x1A) - Right Line Input 1&2 Volume + */ +#define WM8994_IN1_VU 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_MASK 0x0100 /* IN1_VU */ +#define WM8994_IN1_VU_SHIFT 8 /* IN1_VU */ +#define WM8994_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8994_IN1R_MUTE 0x0080 /* IN1R_MUTE */ +#define WM8994_IN1R_MUTE_MASK 0x0080 /* IN1R_MUTE */ +#define WM8994_IN1R_MUTE_SHIFT 7 /* IN1R_MUTE */ +#define WM8994_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */ +#define WM8994_IN1R_ZC 0x0040 /* IN1R_ZC */ +#define WM8994_IN1R_ZC_MASK 0x0040 /* IN1R_ZC */ +#define WM8994_IN1R_ZC_SHIFT 6 /* IN1R_ZC */ +#define WM8994_IN1R_ZC_WIDTH 1 /* IN1R_ZC */ +#define WM8994_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */ +#define WM8994_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */ +#define WM8994_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */ + +/* + * R27 (0x1B) - Right Line Input 3&4 Volume + */ +#define WM8994_IN2_VU 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_MASK 0x0100 /* IN2_VU */ +#define WM8994_IN2_VU_SHIFT 8 /* IN2_VU */ +#define WM8994_IN2_VU_WIDTH 1 /* IN2_VU */ +#define WM8994_IN2R_MUTE 0x0080 /* IN2R_MUTE */ +#define WM8994_IN2R_MUTE_MASK 0x0080 /* IN2R_MUTE */ +#define WM8994_IN2R_MUTE_SHIFT 7 /* IN2R_MUTE */ +#define WM8994_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */ +#define WM8994_IN2R_ZC 0x0040 /* IN2R_ZC */ +#define WM8994_IN2R_ZC_MASK 0x0040 /* IN2R_ZC */ +#define WM8994_IN2R_ZC_SHIFT 6 /* IN2R_ZC */ +#define WM8994_IN2R_ZC_WIDTH 1 /* IN2R_ZC */ +#define WM8994_IN2R_VOL_MASK 0x001F /* IN2R_VOL - [4:0] */ +#define WM8994_IN2R_VOL_SHIFT 0 /* IN2R_VOL - [4:0] */ +#define WM8994_IN2R_VOL_WIDTH 5 /* IN2R_VOL - [4:0] */ + +/* + * R28 (0x1C) - Left Output Volume + */ +#define WM8994_HPOUT1_VU 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */ +#define WM8994_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */ +#define WM8994_HPOUT1L_MUTE_N 0x0040 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_MUTE_N_MASK 0x0040 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_MUTE_N_SHIFT 6 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_MUTE_N_WIDTH 1 /* HPOUT1L_MUTE_N */ +#define WM8994_HPOUT1L_VOL_MASK 0x003F /* HPOUT1L_VOL - [5:0] */ +#define WM8994_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [5:0] */ +#define WM8994_HPOUT1L_VOL_WIDTH 6 /* HPOUT1L_VOL - [5:0] */ + +/* + * R29 (0x1D) - Right Output Volume + */ +#define WM8994_HPOUT1_VU 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */ +#define WM8994_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */ +#define WM8994_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */ +#define WM8994_HPOUT1R_MUTE_N 0x0040 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_MUTE_N_MASK 0x0040 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_MUTE_N_SHIFT 6 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_MUTE_N_WIDTH 1 /* HPOUT1R_MUTE_N */ +#define WM8994_HPOUT1R_VOL_MASK 0x003F /* HPOUT1R_VOL - [5:0] */ +#define WM8994_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [5:0] */ +#define WM8994_HPOUT1R_VOL_WIDTH 6 /* HPOUT1R_VOL - [5:0] */ + +/* + * R30 (0x1E) - Line Outputs Volume + */ +#define WM8994_LINEOUT1N_MUTE 0x0040 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1N_MUTE_MASK 0x0040 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1N_MUTE_SHIFT 6 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1N_MUTE_WIDTH 1 /* LINEOUT1N_MUTE */ +#define WM8994_LINEOUT1P_MUTE 0x0020 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1P_MUTE_MASK 0x0020 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1P_MUTE_SHIFT 5 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1P_MUTE_WIDTH 1 /* LINEOUT1P_MUTE */ +#define WM8994_LINEOUT1_VOL 0x0010 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT1_VOL_MASK 0x0010 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT1_VOL_SHIFT 4 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT1_VOL_WIDTH 1 /* LINEOUT1_VOL */ +#define WM8994_LINEOUT2N_MUTE 0x0004 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2N_MUTE_MASK 0x0004 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2N_MUTE_SHIFT 2 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2N_MUTE_WIDTH 1 /* LINEOUT2N_MUTE */ +#define WM8994_LINEOUT2P_MUTE 0x0002 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2P_MUTE_MASK 0x0002 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2P_MUTE_SHIFT 1 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2P_MUTE_WIDTH 1 /* LINEOUT2P_MUTE */ +#define WM8994_LINEOUT2_VOL 0x0001 /* LINEOUT2_VOL */ +#define WM8994_LINEOUT2_VOL_MASK 0x0001 /* LINEOUT2_VOL */ +#define WM8994_LINEOUT2_VOL_SHIFT 0 /* LINEOUT2_VOL */ +#define WM8994_LINEOUT2_VOL_WIDTH 1 /* LINEOUT2_VOL */ + +/* + * R31 (0x1F) - HPOUT2 Volume + */ +#define WM8994_HPOUT2_MUTE 0x0020 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_MUTE_MASK 0x0020 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_MUTE_SHIFT 5 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_MUTE_WIDTH 1 /* HPOUT2_MUTE */ +#define WM8994_HPOUT2_VOL 0x0010 /* HPOUT2_VOL */ +#define WM8994_HPOUT2_VOL_MASK 0x0010 /* HPOUT2_VOL */ +#define WM8994_HPOUT2_VOL_SHIFT 4 /* HPOUT2_VOL */ +#define WM8994_HPOUT2_VOL_WIDTH 1 /* HPOUT2_VOL */ + +/* + * R32 (0x20) - Left OPGA Volume + */ +#define WM8994_MIXOUT_VU 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */ +#define WM8994_MIXOUTL_ZC 0x0080 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_ZC_MASK 0x0080 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_ZC_SHIFT 7 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_ZC_WIDTH 1 /* MIXOUTL_ZC */ +#define WM8994_MIXOUTL_MUTE_N 0x0040 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_MUTE_N_MASK 0x0040 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_MUTE_N_SHIFT 6 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_MUTE_N_WIDTH 1 /* MIXOUTL_MUTE_N */ +#define WM8994_MIXOUTL_VOL_MASK 0x003F /* MIXOUTL_VOL - [5:0] */ +#define WM8994_MIXOUTL_VOL_SHIFT 0 /* MIXOUTL_VOL - [5:0] */ +#define WM8994_MIXOUTL_VOL_WIDTH 6 /* MIXOUTL_VOL - [5:0] */ + +/* + * R33 (0x21) - Right OPGA Volume + */ +#define WM8994_MIXOUT_VU 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */ +#define WM8994_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */ +#define WM8994_MIXOUTR_ZC 0x0080 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_ZC_MASK 0x0080 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_ZC_SHIFT 7 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_ZC_WIDTH 1 /* MIXOUTR_ZC */ +#define WM8994_MIXOUTR_MUTE_N 0x0040 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_MUTE_N_MASK 0x0040 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_MUTE_N_SHIFT 6 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_MUTE_N_WIDTH 1 /* MIXOUTR_MUTE_N */ +#define WM8994_MIXOUTR_VOL_MASK 0x003F /* MIXOUTR_VOL - [5:0] */ +#define WM8994_MIXOUTR_VOL_SHIFT 0 /* MIXOUTR_VOL - [5:0] */ +#define WM8994_MIXOUTR_VOL_WIDTH 6 /* MIXOUTR_VOL - [5:0] */ + +/* + * R34 (0x22) - SPKMIXL Attenuation + */ +#define WM8994_DAC2L_SPKMIXL_VOL 0x0040 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_DAC2L_SPKMIXL_VOL_MASK 0x0040 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_DAC2L_SPKMIXL_VOL_SHIFT 6 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_DAC2L_SPKMIXL_VOL_WIDTH 1 /* DAC2L_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL 0x0020 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL_MASK 0x0020 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL_SHIFT 5 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_MIXINL_SPKMIXL_VOL_WIDTH 1 /* MIXINL_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL 0x0010 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL_MASK 0x0010 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL_SHIFT 4 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_IN1LP_SPKMIXL_VOL_WIDTH 1 /* IN1LP_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL 0x0008 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL_MASK 0x0008 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL_SHIFT 3 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_MIXOUTL_SPKMIXL_VOL_WIDTH 1 /* MIXOUTL_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL 0x0004 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL_MASK 0x0004 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL_SHIFT 2 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_DAC1L_SPKMIXL_VOL_WIDTH 1 /* DAC1L_SPKMIXL_VOL */ +#define WM8994_SPKMIXL_VOL_MASK 0x0003 /* SPKMIXL_VOL - [1:0] */ +#define WM8994_SPKMIXL_VOL_SHIFT 0 /* SPKMIXL_VOL - [1:0] */ +#define WM8994_SPKMIXL_VOL_WIDTH 2 /* SPKMIXL_VOL - [1:0] */ + +/* + * R35 (0x23) - SPKMIXR Attenuation + */ +#define WM8994_SPKOUT_CLASSAB 0x0100 /* SPKOUT_CLASSAB */ +#define WM8994_SPKOUT_CLASSAB_MASK 0x0100 /* SPKOUT_CLASSAB */ +#define WM8994_SPKOUT_CLASSAB_SHIFT 8 /* SPKOUT_CLASSAB */ +#define WM8994_SPKOUT_CLASSAB_WIDTH 1 /* SPKOUT_CLASSAB */ +#define WM8994_DAC2R_SPKMIXR_VOL 0x0040 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_DAC2R_SPKMIXR_VOL_MASK 0x0040 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_DAC2R_SPKMIXR_VOL_SHIFT 6 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_DAC2R_SPKMIXR_VOL_WIDTH 1 /* DAC2R_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL 0x0020 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL_MASK 0x0020 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL_SHIFT 5 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_MIXINR_SPKMIXR_VOL_WIDTH 1 /* MIXINR_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL 0x0010 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL_MASK 0x0010 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL_SHIFT 4 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_IN1RP_SPKMIXR_VOL_WIDTH 1 /* IN1RP_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL 0x0008 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL_MASK 0x0008 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL_SHIFT 3 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_MIXOUTR_SPKMIXR_VOL_WIDTH 1 /* MIXOUTR_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL 0x0004 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL_MASK 0x0004 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL_SHIFT 2 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_DAC1R_SPKMIXR_VOL_WIDTH 1 /* DAC1R_SPKMIXR_VOL */ +#define WM8994_SPKMIXR_VOL_MASK 0x0003 /* SPKMIXR_VOL - [1:0] */ +#define WM8994_SPKMIXR_VOL_SHIFT 0 /* SPKMIXR_VOL - [1:0] */ +#define WM8994_SPKMIXR_VOL_WIDTH 2 /* SPKMIXR_VOL - [1:0] */ + +/* + * R36 (0x24) - SPKOUT Mixers + */ +#define WM8994_IN2LRP_TO_SPKOUTL 0x0020 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTL_MASK 0x0020 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTL_SHIFT 5 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTL_WIDTH 1 /* IN2LRP_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL 0x0010 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL_MASK 0x0010 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL_SHIFT 4 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXL_TO_SPKOUTL_WIDTH 1 /* SPKMIXL_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL 0x0008 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL_MASK 0x0008 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL_SHIFT 3 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_SPKMIXR_TO_SPKOUTL_WIDTH 1 /* SPKMIXR_TO_SPKOUTL */ +#define WM8994_IN2LRP_TO_SPKOUTR 0x0004 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_IN2LRP_TO_SPKOUTR_MASK 0x0004 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_IN2LRP_TO_SPKOUTR_SHIFT 2 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_IN2LRP_TO_SPKOUTR_WIDTH 1 /* IN2LRP_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR 0x0002 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR_MASK 0x0002 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR_SHIFT 1 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXL_TO_SPKOUTR_WIDTH 1 /* SPKMIXL_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR 0x0001 /* SPKMIXR_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR_MASK 0x0001 /* SPKMIXR_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR_SHIFT 0 /* SPKMIXR_TO_SPKOUTR */ +#define WM8994_SPKMIXR_TO_SPKOUTR_WIDTH 1 /* SPKMIXR_TO_SPKOUTR */ + +/* + * R37 (0x25) - ClassD + */ +#define WM8994_SPKOUTL_BOOST_MASK 0x0038 /* SPKOUTL_BOOST - [5:3] */ +#define WM8994_SPKOUTL_BOOST_SHIFT 3 /* SPKOUTL_BOOST - [5:3] */ +#define WM8994_SPKOUTL_BOOST_WIDTH 3 /* SPKOUTL_BOOST - [5:3] */ +#define WM8994_SPKOUTR_BOOST_MASK 0x0007 /* SPKOUTR_BOOST - [2:0] */ +#define WM8994_SPKOUTR_BOOST_SHIFT 0 /* SPKOUTR_BOOST - [2:0] */ +#define WM8994_SPKOUTR_BOOST_WIDTH 3 /* SPKOUTR_BOOST - [2:0] */ + +/* + * R38 (0x26) - Speaker Volume Left + */ +#define WM8994_SPKOUT_VU 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */ +#define WM8994_SPKOUTL_ZC 0x0080 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_ZC_MASK 0x0080 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_ZC_SHIFT 7 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_ZC_WIDTH 1 /* SPKOUTL_ZC */ +#define WM8994_SPKOUTL_MUTE_N 0x0040 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_MUTE_N_MASK 0x0040 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_MUTE_N_SHIFT 6 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_MUTE_N_WIDTH 1 /* SPKOUTL_MUTE_N */ +#define WM8994_SPKOUTL_VOL_MASK 0x003F /* SPKOUTL_VOL - [5:0] */ +#define WM8994_SPKOUTL_VOL_SHIFT 0 /* SPKOUTL_VOL - [5:0] */ +#define WM8994_SPKOUTL_VOL_WIDTH 6 /* SPKOUTL_VOL - [5:0] */ + +/* + * R39 (0x27) - Speaker Volume Right + */ +#define WM8994_SPKOUT_VU 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */ +#define WM8994_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */ +#define WM8994_SPKOUTR_ZC 0x0080 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_ZC_MASK 0x0080 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_ZC_SHIFT 7 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_ZC_WIDTH 1 /* SPKOUTR_ZC */ +#define WM8994_SPKOUTR_MUTE_N 0x0040 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_MUTE_N_MASK 0x0040 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_MUTE_N_SHIFT 6 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_MUTE_N_WIDTH 1 /* SPKOUTR_MUTE_N */ +#define WM8994_SPKOUTR_VOL_MASK 0x003F /* SPKOUTR_VOL - [5:0] */ +#define WM8994_SPKOUTR_VOL_SHIFT 0 /* SPKOUTR_VOL - [5:0] */ +#define WM8994_SPKOUTR_VOL_WIDTH 6 /* SPKOUTR_VOL - [5:0] */ + +/* + * R40 (0x28) - Input Mixer (2) + */ +#define WM8994_IN2LP_TO_IN2L 0x0080 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LP_TO_IN2L_MASK 0x0080 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LP_TO_IN2L_SHIFT 7 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LP_TO_IN2L_WIDTH 1 /* IN2LP_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L 0x0040 /* IN2LN_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L_MASK 0x0040 /* IN2LN_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L_SHIFT 6 /* IN2LN_TO_IN2L */ +#define WM8994_IN2LN_TO_IN2L_WIDTH 1 /* IN2LN_TO_IN2L */ +#define WM8994_IN1LP_TO_IN1L 0x0020 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LP_TO_IN1L_MASK 0x0020 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LP_TO_IN1L_SHIFT 5 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LP_TO_IN1L_WIDTH 1 /* IN1LP_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L 0x0010 /* IN1LN_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L_MASK 0x0010 /* IN1LN_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L_SHIFT 4 /* IN1LN_TO_IN1L */ +#define WM8994_IN1LN_TO_IN1L_WIDTH 1 /* IN1LN_TO_IN1L */ +#define WM8994_IN2RP_TO_IN2R 0x0008 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RP_TO_IN2R_MASK 0x0008 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RP_TO_IN2R_SHIFT 3 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RP_TO_IN2R_WIDTH 1 /* IN2RP_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R 0x0004 /* IN2RN_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R_MASK 0x0004 /* IN2RN_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R_SHIFT 2 /* IN2RN_TO_IN2R */ +#define WM8994_IN2RN_TO_IN2R_WIDTH 1 /* IN2RN_TO_IN2R */ +#define WM8994_IN1RP_TO_IN1R 0x0002 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RP_TO_IN1R_MASK 0x0002 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RP_TO_IN1R_SHIFT 1 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RP_TO_IN1R_WIDTH 1 /* IN1RP_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R 0x0001 /* IN1RN_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R_MASK 0x0001 /* IN1RN_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R_SHIFT 0 /* IN1RN_TO_IN1R */ +#define WM8994_IN1RN_TO_IN1R_WIDTH 1 /* IN1RN_TO_IN1R */ + +/* + * R41 (0x29) - Input Mixer (3) + */ +#define WM8994_IN2L_TO_MIXINL 0x0100 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_TO_MIXINL_MASK 0x0100 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_TO_MIXINL_SHIFT 8 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_TO_MIXINL_WIDTH 1 /* IN2L_TO_MIXINL */ +#define WM8994_IN2L_MIXINL_VOL 0x0080 /* IN2L_MIXINL_VOL */ +#define WM8994_IN2L_MIXINL_VOL_MASK 0x0080 /* IN2L_MIXINL_VOL */ +#define WM8994_IN2L_MIXINL_VOL_SHIFT 7 /* IN2L_MIXINL_VOL */ +#define WM8994_IN2L_MIXINL_VOL_WIDTH 1 /* IN2L_MIXINL_VOL */ +#define WM8994_IN1L_TO_MIXINL 0x0020 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_TO_MIXINL_MASK 0x0020 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_TO_MIXINL_SHIFT 5 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_TO_MIXINL_WIDTH 1 /* IN1L_TO_MIXINL */ +#define WM8994_IN1L_MIXINL_VOL 0x0010 /* IN1L_MIXINL_VOL */ +#define WM8994_IN1L_MIXINL_VOL_MASK 0x0010 /* IN1L_MIXINL_VOL */ +#define WM8994_IN1L_MIXINL_VOL_SHIFT 4 /* IN1L_MIXINL_VOL */ +#define WM8994_IN1L_MIXINL_VOL_WIDTH 1 /* IN1L_MIXINL_VOL */ +#define WM8994_MIXOUTL_MIXINL_VOL_MASK 0x0007 /* MIXOUTL_MIXINL_VOL - [2:0] */ +#define WM8994_MIXOUTL_MIXINL_VOL_SHIFT 0 /* MIXOUTL_MIXINL_VOL - [2:0] */ +#define WM8994_MIXOUTL_MIXINL_VOL_WIDTH 3 /* MIXOUTL_MIXINL_VOL - [2:0] */ + +/* + * R42 (0x2A) - Input Mixer (4) + */ +#define WM8994_IN2R_TO_MIXINR 0x0100 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_TO_MIXINR_MASK 0x0100 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_TO_MIXINR_SHIFT 8 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_TO_MIXINR_WIDTH 1 /* IN2R_TO_MIXINR */ +#define WM8994_IN2R_MIXINR_VOL 0x0080 /* IN2R_MIXINR_VOL */ +#define WM8994_IN2R_MIXINR_VOL_MASK 0x0080 /* IN2R_MIXINR_VOL */ +#define WM8994_IN2R_MIXINR_VOL_SHIFT 7 /* IN2R_MIXINR_VOL */ +#define WM8994_IN2R_MIXINR_VOL_WIDTH 1 /* IN2R_MIXINR_VOL */ +#define WM8994_IN1R_TO_MIXINR 0x0020 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_TO_MIXINR_MASK 0x0020 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_TO_MIXINR_SHIFT 5 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_TO_MIXINR_WIDTH 1 /* IN1R_TO_MIXINR */ +#define WM8994_IN1R_MIXINR_VOL 0x0010 /* IN1R_MIXINR_VOL */ +#define WM8994_IN1R_MIXINR_VOL_MASK 0x0010 /* IN1R_MIXINR_VOL */ +#define WM8994_IN1R_MIXINR_VOL_SHIFT 4 /* IN1R_MIXINR_VOL */ +#define WM8994_IN1R_MIXINR_VOL_WIDTH 1 /* IN1R_MIXINR_VOL */ +#define WM8994_MIXOUTR_MIXINR_VOL_MASK 0x0007 /* MIXOUTR_MIXINR_VOL - [2:0] */ +#define WM8994_MIXOUTR_MIXINR_VOL_SHIFT 0 /* MIXOUTR_MIXINR_VOL - [2:0] */ +#define WM8994_MIXOUTR_MIXINR_VOL_WIDTH 3 /* MIXOUTR_MIXINR_VOL - [2:0] */ + +/* + * R43 (0x2B) - Input Mixer (5) + */ +#define WM8994_IN1LP_MIXINL_VOL_MASK 0x01C0 /* IN1LP_MIXINL_VOL - [8:6] */ +#define WM8994_IN1LP_MIXINL_VOL_SHIFT 6 /* IN1LP_MIXINL_VOL - [8:6] */ +#define WM8994_IN1LP_MIXINL_VOL_WIDTH 3 /* IN1LP_MIXINL_VOL - [8:6] */ +#define WM8994_IN2LRP_MIXINL_VOL_MASK 0x0007 /* IN2LRP_MIXINL_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINL_VOL_SHIFT 0 /* IN2LRP_MIXINL_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINL_VOL_WIDTH 3 /* IN2LRP_MIXINL_VOL - [2:0] */ + +/* + * R44 (0x2C) - Input Mixer (6) + */ +#define WM8994_IN1RP_MIXINR_VOL_MASK 0x01C0 /* IN1RP_MIXINR_VOL - [8:6] */ +#define WM8994_IN1RP_MIXINR_VOL_SHIFT 6 /* IN1RP_MIXINR_VOL - [8:6] */ +#define WM8994_IN1RP_MIXINR_VOL_WIDTH 3 /* IN1RP_MIXINR_VOL - [8:6] */ +#define WM8994_IN2LRP_MIXINR_VOL_MASK 0x0007 /* IN2LRP_MIXINR_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINR_VOL_SHIFT 0 /* IN2LRP_MIXINR_VOL - [2:0] */ +#define WM8994_IN2LRP_MIXINR_VOL_WIDTH 3 /* IN2LRP_MIXINR_VOL - [2:0] */ + +/* + * R45 (0x2D) - Output Mixer (1) + */ +#define WM8994_DAC1L_TO_HPOUT1L 0x0100 /* DAC1L_TO_HPOUT1L */ +#define WM8994_DAC1L_TO_HPOUT1L_MASK 0x0100 /* DAC1L_TO_HPOUT1L */ +#define WM8994_DAC1L_TO_HPOUT1L_SHIFT 8 /* DAC1L_TO_HPOUT1L */ +#define WM8994_DAC1L_TO_HPOUT1L_WIDTH 1 /* DAC1L_TO_HPOUT1L */ +#define WM8994_MIXINR_TO_MIXOUTL 0x0080 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINR_TO_MIXOUTL_MASK 0x0080 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINR_TO_MIXOUTL_SHIFT 7 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINR_TO_MIXOUTL_WIDTH 1 /* MIXINR_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL 0x0040 /* MIXINL_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL_MASK 0x0040 /* MIXINL_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL_SHIFT 6 /* MIXINL_TO_MIXOUTL */ +#define WM8994_MIXINL_TO_MIXOUTL_WIDTH 1 /* MIXINL_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL 0x0020 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL_MASK 0x0020 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL_SHIFT 5 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2RN_TO_MIXOUTL_WIDTH 1 /* IN2RN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL 0x0010 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL_MASK 0x0010 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL_SHIFT 4 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN2LN_TO_MIXOUTL_WIDTH 1 /* IN2LN_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL 0x0008 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL_MASK 0x0008 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL_SHIFT 3 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1R_TO_MIXOUTL_WIDTH 1 /* IN1R_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL 0x0004 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL_MASK 0x0004 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL_SHIFT 2 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN1L_TO_MIXOUTL_WIDTH 1 /* IN1L_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL 0x0002 /* IN2LP_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL_MASK 0x0002 /* IN2LP_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL_SHIFT 1 /* IN2LP_TO_MIXOUTL */ +#define WM8994_IN2LP_TO_MIXOUTL_WIDTH 1 /* IN2LP_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL 0x0001 /* DAC1L_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL_MASK 0x0001 /* DAC1L_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL_SHIFT 0 /* DAC1L_TO_MIXOUTL */ +#define WM8994_DAC1L_TO_MIXOUTL_WIDTH 1 /* DAC1L_TO_MIXOUTL */ + +/* + * R46 (0x2E) - Output Mixer (2) + */ +#define WM8994_DAC1R_TO_HPOUT1R 0x0100 /* DAC1R_TO_HPOUT1R */ +#define WM8994_DAC1R_TO_HPOUT1R_MASK 0x0100 /* DAC1R_TO_HPOUT1R */ +#define WM8994_DAC1R_TO_HPOUT1R_SHIFT 8 /* DAC1R_TO_HPOUT1R */ +#define WM8994_DAC1R_TO_HPOUT1R_WIDTH 1 /* DAC1R_TO_HPOUT1R */ +#define WM8994_MIXINL_TO_MIXOUTR 0x0080 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINL_TO_MIXOUTR_MASK 0x0080 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINL_TO_MIXOUTR_SHIFT 7 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINL_TO_MIXOUTR_WIDTH 1 /* MIXINL_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR 0x0040 /* MIXINR_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR_MASK 0x0040 /* MIXINR_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR_SHIFT 6 /* MIXINR_TO_MIXOUTR */ +#define WM8994_MIXINR_TO_MIXOUTR_WIDTH 1 /* MIXINR_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR 0x0020 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR_MASK 0x0020 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR_SHIFT 5 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2LN_TO_MIXOUTR_WIDTH 1 /* IN2LN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR 0x0010 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR_MASK 0x0010 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR_SHIFT 4 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN2RN_TO_MIXOUTR_WIDTH 1 /* IN2RN_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR 0x0008 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR_MASK 0x0008 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR_SHIFT 3 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1L_TO_MIXOUTR_WIDTH 1 /* IN1L_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR 0x0004 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR_MASK 0x0004 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR_SHIFT 2 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN1R_TO_MIXOUTR_WIDTH 1 /* IN1R_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR 0x0002 /* IN2RP_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR_MASK 0x0002 /* IN2RP_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR_SHIFT 1 /* IN2RP_TO_MIXOUTR */ +#define WM8994_IN2RP_TO_MIXOUTR_WIDTH 1 /* IN2RP_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR 0x0001 /* DAC1R_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR_MASK 0x0001 /* DAC1R_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR_SHIFT 0 /* DAC1R_TO_MIXOUTR */ +#define WM8994_DAC1R_TO_MIXOUTR_WIDTH 1 /* DAC1R_TO_MIXOUTR */ + +/* + * R47 (0x2F) - Output Mixer (3) + */ +#define WM8994_IN2LP_MIXOUTL_VOL_MASK 0x0E00 /* IN2LP_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2LP_MIXOUTL_VOL_SHIFT 9 /* IN2LP_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2LP_MIXOUTL_VOL_WIDTH 3 /* IN2LP_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2LN_MIXOUTL_VOL_MASK 0x01C0 /* IN2LN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTL_VOL_SHIFT 6 /* IN2LN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTL_VOL_WIDTH 3 /* IN2LN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN1R_MIXOUTL_VOL_MASK 0x0038 /* IN1R_MIXOUTL_VOL - [5:3] */ +#define WM8994_IN1R_MIXOUTL_VOL_SHIFT 3 /* IN1R_MIXOUTL_VOL - [5:3] */ +#define WM8994_IN1R_MIXOUTL_VOL_WIDTH 3 /* IN1R_MIXOUTL_VOL - [5:3] */ +#define WM8994_IN1L_MIXOUTL_VOL_MASK 0x0007 /* IN1L_MIXOUTL_VOL - [2:0] */ +#define WM8994_IN1L_MIXOUTL_VOL_SHIFT 0 /* IN1L_MIXOUTL_VOL - [2:0] */ +#define WM8994_IN1L_MIXOUTL_VOL_WIDTH 3 /* IN1L_MIXOUTL_VOL - [2:0] */ + +/* + * R48 (0x30) - Output Mixer (4) + */ +#define WM8994_IN2RP_MIXOUTR_VOL_MASK 0x0E00 /* IN2RP_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2RP_MIXOUTR_VOL_SHIFT 9 /* IN2RP_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2RP_MIXOUTR_VOL_WIDTH 3 /* IN2RP_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2RN_MIXOUTR_VOL_MASK 0x01C0 /* IN2RN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTR_VOL_SHIFT 6 /* IN2RN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTR_VOL_WIDTH 3 /* IN2RN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN1L_MIXOUTR_VOL_MASK 0x0038 /* IN1L_MIXOUTR_VOL - [5:3] */ +#define WM8994_IN1L_MIXOUTR_VOL_SHIFT 3 /* IN1L_MIXOUTR_VOL - [5:3] */ +#define WM8994_IN1L_MIXOUTR_VOL_WIDTH 3 /* IN1L_MIXOUTR_VOL - [5:3] */ +#define WM8994_IN1R_MIXOUTR_VOL_MASK 0x0007 /* IN1R_MIXOUTR_VOL - [2:0] */ +#define WM8994_IN1R_MIXOUTR_VOL_SHIFT 0 /* IN1R_MIXOUTR_VOL - [2:0] */ +#define WM8994_IN1R_MIXOUTR_VOL_WIDTH 3 /* IN1R_MIXOUTR_VOL - [2:0] */ + +/* + * R49 (0x31) - Output Mixer (5) + */ +#define WM8994_DAC1L_MIXOUTL_VOL_MASK 0x0E00 /* DAC1L_MIXOUTL_VOL - [11:9] */ +#define WM8994_DAC1L_MIXOUTL_VOL_SHIFT 9 /* DAC1L_MIXOUTL_VOL - [11:9] */ +#define WM8994_DAC1L_MIXOUTL_VOL_WIDTH 3 /* DAC1L_MIXOUTL_VOL - [11:9] */ +#define WM8994_IN2RN_MIXOUTL_VOL_MASK 0x01C0 /* IN2RN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTL_VOL_SHIFT 6 /* IN2RN_MIXOUTL_VOL - [8:6] */ +#define WM8994_IN2RN_MIXOUTL_VOL_WIDTH 3 /* IN2RN_MIXOUTL_VOL - [8:6] */ +#define WM8994_MIXINR_MIXOUTL_VOL_MASK 0x0038 /* MIXINR_MIXOUTL_VOL - [5:3] */ +#define WM8994_MIXINR_MIXOUTL_VOL_SHIFT 3 /* MIXINR_MIXOUTL_VOL - [5:3] */ +#define WM8994_MIXINR_MIXOUTL_VOL_WIDTH 3 /* MIXINR_MIXOUTL_VOL - [5:3] */ +#define WM8994_MIXINL_MIXOUTL_VOL_MASK 0x0007 /* MIXINL_MIXOUTL_VOL - [2:0] */ +#define WM8994_MIXINL_MIXOUTL_VOL_SHIFT 0 /* MIXINL_MIXOUTL_VOL - [2:0] */ +#define WM8994_MIXINL_MIXOUTL_VOL_WIDTH 3 /* MIXINL_MIXOUTL_VOL - [2:0] */ + +/* + * R50 (0x32) - Output Mixer (6) + */ +#define WM8994_DAC1R_MIXOUTR_VOL_MASK 0x0E00 /* DAC1R_MIXOUTR_VOL - [11:9] */ +#define WM8994_DAC1R_MIXOUTR_VOL_SHIFT 9 /* DAC1R_MIXOUTR_VOL - [11:9] */ +#define WM8994_DAC1R_MIXOUTR_VOL_WIDTH 3 /* DAC1R_MIXOUTR_VOL - [11:9] */ +#define WM8994_IN2LN_MIXOUTR_VOL_MASK 0x01C0 /* IN2LN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTR_VOL_SHIFT 6 /* IN2LN_MIXOUTR_VOL - [8:6] */ +#define WM8994_IN2LN_MIXOUTR_VOL_WIDTH 3 /* IN2LN_MIXOUTR_VOL - [8:6] */ +#define WM8994_MIXINL_MIXOUTR_VOL_MASK 0x0038 /* MIXINL_MIXOUTR_VOL - [5:3] */ +#define WM8994_MIXINL_MIXOUTR_VOL_SHIFT 3 /* MIXINL_MIXOUTR_VOL - [5:3] */ +#define WM8994_MIXINL_MIXOUTR_VOL_WIDTH 3 /* MIXINL_MIXOUTR_VOL - [5:3] */ +#define WM8994_MIXINR_MIXOUTR_VOL_MASK 0x0007 /* MIXINR_MIXOUTR_VOL - [2:0] */ +#define WM8994_MIXINR_MIXOUTR_VOL_SHIFT 0 /* MIXINR_MIXOUTR_VOL - [2:0] */ +#define WM8994_MIXINR_MIXOUTR_VOL_WIDTH 3 /* MIXINR_MIXOUTR_VOL - [2:0] */ + +/* + * R51 (0x33) - HPOUT2 Mixer + */ +#define WM8994_IN2LRP_TO_HPOUT2 0x0020 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_IN2LRP_TO_HPOUT2_MASK 0x0020 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_IN2LRP_TO_HPOUT2_SHIFT 5 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_IN2LRP_TO_HPOUT2_WIDTH 1 /* IN2LRP_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2_MASK 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2_SHIFT 4 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTLVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTLVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2_MASK 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2_SHIFT 3 /* MIXOUTRVOL_TO_HPOUT2 */ +#define WM8994_MIXOUTRVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTRVOL_TO_HPOUT2 */ + +/* + * R52 (0x34) - Line Mixer (1) + */ +#define WM8994_MIXOUTL_TO_LINEOUT1N 0x0040 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTL_TO_LINEOUT1N_MASK 0x0040 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTL_TO_LINEOUT1N_SHIFT 6 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTL_TO_LINEOUT1N_WIDTH 1 /* MIXOUTL_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N 0x0020 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N_MASK 0x0020 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N_SHIFT 5 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_MIXOUTR_TO_LINEOUT1N_WIDTH 1 /* MIXOUTR_TO_LINEOUT1N */ +#define WM8994_LINEOUT1_MODE 0x0010 /* LINEOUT1_MODE */ +#define WM8994_LINEOUT1_MODE_MASK 0x0010 /* LINEOUT1_MODE */ +#define WM8994_LINEOUT1_MODE_SHIFT 4 /* LINEOUT1_MODE */ +#define WM8994_LINEOUT1_MODE_WIDTH 1 /* LINEOUT1_MODE */ +#define WM8994_IN1R_TO_LINEOUT1P 0x0004 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1R_TO_LINEOUT1P_MASK 0x0004 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1R_TO_LINEOUT1P_SHIFT 2 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1R_TO_LINEOUT1P_WIDTH 1 /* IN1R_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P 0x0002 /* IN1L_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P_MASK 0x0002 /* IN1L_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P_SHIFT 1 /* IN1L_TO_LINEOUT1P */ +#define WM8994_IN1L_TO_LINEOUT1P_WIDTH 1 /* IN1L_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P 0x0001 /* MIXOUTL_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P_MASK 0x0001 /* MIXOUTL_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P_SHIFT 0 /* MIXOUTL_TO_LINEOUT1P */ +#define WM8994_MIXOUTL_TO_LINEOUT1P_WIDTH 1 /* MIXOUTL_TO_LINEOUT1P */ + +/* + * R53 (0x35) - Line Mixer (2) + */ +#define WM8994_MIXOUTR_TO_LINEOUT2N 0x0040 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTR_TO_LINEOUT2N_MASK 0x0040 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTR_TO_LINEOUT2N_SHIFT 6 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTR_TO_LINEOUT2N_WIDTH 1 /* MIXOUTR_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N 0x0020 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N_MASK 0x0020 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N_SHIFT 5 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_MIXOUTL_TO_LINEOUT2N_WIDTH 1 /* MIXOUTL_TO_LINEOUT2N */ +#define WM8994_LINEOUT2_MODE 0x0010 /* LINEOUT2_MODE */ +#define WM8994_LINEOUT2_MODE_MASK 0x0010 /* LINEOUT2_MODE */ +#define WM8994_LINEOUT2_MODE_SHIFT 4 /* LINEOUT2_MODE */ +#define WM8994_LINEOUT2_MODE_WIDTH 1 /* LINEOUT2_MODE */ +#define WM8994_IN1L_TO_LINEOUT2P 0x0004 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1L_TO_LINEOUT2P_MASK 0x0004 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1L_TO_LINEOUT2P_SHIFT 2 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1L_TO_LINEOUT2P_WIDTH 1 /* IN1L_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P 0x0002 /* IN1R_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P_MASK 0x0002 /* IN1R_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P_SHIFT 1 /* IN1R_TO_LINEOUT2P */ +#define WM8994_IN1R_TO_LINEOUT2P_WIDTH 1 /* IN1R_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P 0x0001 /* MIXOUTR_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P_MASK 0x0001 /* MIXOUTR_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P_SHIFT 0 /* MIXOUTR_TO_LINEOUT2P */ +#define WM8994_MIXOUTR_TO_LINEOUT2P_WIDTH 1 /* MIXOUTR_TO_LINEOUT2P */ + +/* + * R54 (0x36) - Speaker Mixer + */ +#define WM8994_DAC2L_TO_SPKMIXL 0x0200 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2L_TO_SPKMIXL_MASK 0x0200 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2L_TO_SPKMIXL_SHIFT 9 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2L_TO_SPKMIXL_WIDTH 1 /* DAC2L_TO_SPKMIXL */ +#define WM8994_DAC2R_TO_SPKMIXR 0x0100 /* DAC2R_TO_SPKMIXR */ +#define WM8994_DAC2R_TO_SPKMIXR_MASK 0x0100 /* DAC2R_TO_SPKMIXR */ +#define WM8994_DAC2R_TO_SPKMIXR_SHIFT 8 /* DAC2R_TO_SPKMIXR */ +#define WM8994_DAC2R_TO_SPKMIXR_WIDTH 1 /* DAC2R_TO_SPKMIXR */ +#define WM8994_MIXINL_TO_SPKMIXL 0x0080 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINL_TO_SPKMIXL_MASK 0x0080 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINL_TO_SPKMIXL_SHIFT 7 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINL_TO_SPKMIXL_WIDTH 1 /* MIXINL_TO_SPKMIXL */ +#define WM8994_MIXINR_TO_SPKMIXR 0x0040 /* MIXINR_TO_SPKMIXR */ +#define WM8994_MIXINR_TO_SPKMIXR_MASK 0x0040 /* MIXINR_TO_SPKMIXR */ +#define WM8994_MIXINR_TO_SPKMIXR_SHIFT 6 /* MIXINR_TO_SPKMIXR */ +#define WM8994_MIXINR_TO_SPKMIXR_WIDTH 1 /* MIXINR_TO_SPKMIXR */ +#define WM8994_IN1LP_TO_SPKMIXL 0x0020 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1LP_TO_SPKMIXL_MASK 0x0020 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1LP_TO_SPKMIXL_SHIFT 5 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1LP_TO_SPKMIXL_WIDTH 1 /* IN1LP_TO_SPKMIXL */ +#define WM8994_IN1RP_TO_SPKMIXR 0x0010 /* IN1RP_TO_SPKMIXR */ +#define WM8994_IN1RP_TO_SPKMIXR_MASK 0x0010 /* IN1RP_TO_SPKMIXR */ +#define WM8994_IN1RP_TO_SPKMIXR_SHIFT 4 /* IN1RP_TO_SPKMIXR */ +#define WM8994_IN1RP_TO_SPKMIXR_WIDTH 1 /* IN1RP_TO_SPKMIXR */ +#define WM8994_MIXOUTL_TO_SPKMIXL 0x0008 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTL_TO_SPKMIXL_MASK 0x0008 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTL_TO_SPKMIXL_SHIFT 3 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTL_TO_SPKMIXL_WIDTH 1 /* MIXOUTL_TO_SPKMIXL */ +#define WM8994_MIXOUTR_TO_SPKMIXR 0x0004 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_MIXOUTR_TO_SPKMIXR_MASK 0x0004 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_MIXOUTR_TO_SPKMIXR_SHIFT 2 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_MIXOUTR_TO_SPKMIXR_WIDTH 1 /* MIXOUTR_TO_SPKMIXR */ +#define WM8994_DAC1L_TO_SPKMIXL 0x0002 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1L_TO_SPKMIXL_MASK 0x0002 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1L_TO_SPKMIXL_SHIFT 1 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1L_TO_SPKMIXL_WIDTH 1 /* DAC1L_TO_SPKMIXL */ +#define WM8994_DAC1R_TO_SPKMIXR 0x0001 /* DAC1R_TO_SPKMIXR */ +#define WM8994_DAC1R_TO_SPKMIXR_MASK 0x0001 /* DAC1R_TO_SPKMIXR */ +#define WM8994_DAC1R_TO_SPKMIXR_SHIFT 0 /* DAC1R_TO_SPKMIXR */ +#define WM8994_DAC1R_TO_SPKMIXR_WIDTH 1 /* DAC1R_TO_SPKMIXR */ + +/* + * R55 (0x37) - Additional Control + */ +#define WM8994_LINEOUT1_FB 0x0080 /* LINEOUT1_FB */ +#define WM8994_LINEOUT1_FB_MASK 0x0080 /* LINEOUT1_FB */ +#define WM8994_LINEOUT1_FB_SHIFT 7 /* LINEOUT1_FB */ +#define WM8994_LINEOUT1_FB_WIDTH 1 /* LINEOUT1_FB */ +#define WM8994_LINEOUT2_FB 0x0040 /* LINEOUT2_FB */ +#define WM8994_LINEOUT2_FB_MASK 0x0040 /* LINEOUT2_FB */ +#define WM8994_LINEOUT2_FB_SHIFT 6 /* LINEOUT2_FB */ +#define WM8994_LINEOUT2_FB_WIDTH 1 /* LINEOUT2_FB */ +#define WM8994_VROI 0x0001 /* VROI */ +#define WM8994_VROI_MASK 0x0001 /* VROI */ +#define WM8994_VROI_SHIFT 0 /* VROI */ +#define WM8994_VROI_WIDTH 1 /* VROI */ + +/* + * R56 (0x38) - AntiPOP (1) + */ +#define WM8994_LINEOUT_VMID_BUF_ENA 0x0080 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_LINEOUT_VMID_BUF_ENA_MASK 0x0080 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_LINEOUT_VMID_BUF_ENA_SHIFT 7 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_LINEOUT_VMID_BUF_ENA_WIDTH 1 /* LINEOUT_VMID_BUF_ENA */ +#define WM8994_HPOUT2_IN_ENA 0x0040 /* HPOUT2_IN_ENA */ +#define WM8994_HPOUT2_IN_ENA_MASK 0x0040 /* HPOUT2_IN_ENA */ +#define WM8994_HPOUT2_IN_ENA_SHIFT 6 /* HPOUT2_IN_ENA */ +#define WM8994_HPOUT2_IN_ENA_WIDTH 1 /* HPOUT2_IN_ENA */ +#define WM8994_LINEOUT1_DISCH 0x0020 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT1_DISCH_MASK 0x0020 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT1_DISCH_SHIFT 5 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT1_DISCH_WIDTH 1 /* LINEOUT1_DISCH */ +#define WM8994_LINEOUT2_DISCH 0x0010 /* LINEOUT2_DISCH */ +#define WM8994_LINEOUT2_DISCH_MASK 0x0010 /* LINEOUT2_DISCH */ +#define WM8994_LINEOUT2_DISCH_SHIFT 4 /* LINEOUT2_DISCH */ +#define WM8994_LINEOUT2_DISCH_WIDTH 1 /* LINEOUT2_DISCH */ + +/* + * R57 (0x39) - AntiPOP (2) + */ +#define WM1811_JACKDET_MODE_MASK 0x0180 /* JACKDET_MODE - [8:7] */ +#define WM1811_JACKDET_MODE_SHIFT 7 /* JACKDET_MODE - [8:7] */ +#define WM1811_JACKDET_MODE_WIDTH 2 /* JACKDET_MODE - [8:7] */ +#define WM8994_MICB2_DISCH 0x0100 /* MICB2_DISCH */ +#define WM8994_MICB2_DISCH_MASK 0x0100 /* MICB2_DISCH */ +#define WM8994_MICB2_DISCH_SHIFT 8 /* MICB2_DISCH */ +#define WM8994_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ +#define WM8994_MICB1_DISCH 0x0080 /* MICB1_DISCH */ +#define WM8994_MICB1_DISCH_MASK 0x0080 /* MICB1_DISCH */ +#define WM8994_MICB1_DISCH_SHIFT 7 /* MICB1_DISCH */ +#define WM8994_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ +#define WM8994_VMID_RAMP_MASK 0x0060 /* VMID_RAMP - [6:5] */ +#define WM8994_VMID_RAMP_SHIFT 5 /* VMID_RAMP - [6:5] */ +#define WM8994_VMID_RAMP_WIDTH 2 /* VMID_RAMP - [6:5] */ +#define WM8994_VMID_BUF_ENA 0x0008 /* VMID_BUF_ENA */ +#define WM8994_VMID_BUF_ENA_MASK 0x0008 /* VMID_BUF_ENA */ +#define WM8994_VMID_BUF_ENA_SHIFT 3 /* VMID_BUF_ENA */ +#define WM8994_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */ +#define WM8994_STARTUP_BIAS_ENA 0x0004 /* STARTUP_BIAS_ENA */ +#define WM8994_STARTUP_BIAS_ENA_MASK 0x0004 /* STARTUP_BIAS_ENA */ +#define WM8994_STARTUP_BIAS_ENA_SHIFT 2 /* STARTUP_BIAS_ENA */ +#define WM8994_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */ +#define WM8994_BIAS_SRC 0x0002 /* BIAS_SRC */ +#define WM8994_BIAS_SRC_MASK 0x0002 /* BIAS_SRC */ +#define WM8994_BIAS_SRC_SHIFT 1 /* BIAS_SRC */ +#define WM8994_BIAS_SRC_WIDTH 1 /* BIAS_SRC */ +#define WM8994_VMID_DISCH 0x0001 /* VMID_DISCH */ +#define WM8994_VMID_DISCH_MASK 0x0001 /* VMID_DISCH */ +#define WM8994_VMID_DISCH_SHIFT 0 /* VMID_DISCH */ +#define WM8994_VMID_DISCH_WIDTH 1 /* VMID_DISCH */ + +/* + * R58 (0x3A) - MICBIAS + */ +#define WM8994_MICD_SCTHR_MASK 0x00C0 /* MICD_SCTHR - [7:6] */ +#define WM8994_MICD_SCTHR_SHIFT 6 /* MICD_SCTHR - [7:6] */ +#define WM8994_MICD_SCTHR_WIDTH 2 /* MICD_SCTHR - [7:6] */ +#define WM8994_MICD_THR_MASK 0x0038 /* MICD_THR - [5:3] */ +#define WM8994_MICD_THR_SHIFT 3 /* MICD_THR - [5:3] */ +#define WM8994_MICD_THR_WIDTH 3 /* MICD_THR - [5:3] */ +#define WM8994_MICD_ENA 0x0004 /* MICD_ENA */ +#define WM8994_MICD_ENA_MASK 0x0004 /* MICD_ENA */ +#define WM8994_MICD_ENA_SHIFT 2 /* MICD_ENA */ +#define WM8994_MICD_ENA_WIDTH 1 /* MICD_ENA */ +#define WM8994_MICB2_LVL 0x0002 /* MICB2_LVL */ +#define WM8994_MICB2_LVL_MASK 0x0002 /* MICB2_LVL */ +#define WM8994_MICB2_LVL_SHIFT 1 /* MICB2_LVL */ +#define WM8994_MICB2_LVL_WIDTH 1 /* MICB2_LVL */ +#define WM8994_MICB1_LVL 0x0001 /* MICB1_LVL */ +#define WM8994_MICB1_LVL_MASK 0x0001 /* MICB1_LVL */ +#define WM8994_MICB1_LVL_SHIFT 0 /* MICB1_LVL */ +#define WM8994_MICB1_LVL_WIDTH 1 /* MICB1_LVL */ + +/* + * R59 (0x3B) - LDO 1 + */ +#define WM8994_LDO1_VSEL_MASK 0x000E /* LDO1_VSEL - [3:1] */ +#define WM8994_LDO1_VSEL_SHIFT 1 /* LDO1_VSEL - [3:1] */ +#define WM8994_LDO1_VSEL_WIDTH 3 /* LDO1_VSEL - [3:1] */ +#define WM8994_LDO1_DISCH 0x0001 /* LDO1_DISCH */ +#define WM8994_LDO1_DISCH_MASK 0x0001 /* LDO1_DISCH */ +#define WM8994_LDO1_DISCH_SHIFT 0 /* LDO1_DISCH */ +#define WM8994_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */ + +/* + * R60 (0x3C) - LDO 2 + */ +#define WM8994_LDO2_VSEL_MASK 0x0006 /* LDO2_VSEL - [2:1] */ +#define WM8994_LDO2_VSEL_SHIFT 1 /* LDO2_VSEL - [2:1] */ +#define WM8994_LDO2_VSEL_WIDTH 2 /* LDO2_VSEL - [2:1] */ +#define WM8994_LDO2_DISCH 0x0001 /* LDO2_DISCH */ +#define WM8994_LDO2_DISCH_MASK 0x0001 /* LDO2_DISCH */ +#define WM8994_LDO2_DISCH_SHIFT 0 /* LDO2_DISCH */ +#define WM8994_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */ + +/* + * R61 (0x3D) - MICBIAS1 + */ +#define WM8958_MICB1_RATE 0x0020 /* MICB1_RATE */ +#define WM8958_MICB1_RATE_MASK 0x0020 /* MICB1_RATE */ +#define WM8958_MICB1_RATE_SHIFT 5 /* MICB1_RATE */ +#define WM8958_MICB1_RATE_WIDTH 1 /* MICB1_RATE */ +#define WM8958_MICB1_MODE 0x0010 /* MICB1_MODE */ +#define WM8958_MICB1_MODE_MASK 0x0010 /* MICB1_MODE */ +#define WM8958_MICB1_MODE_SHIFT 4 /* MICB1_MODE */ +#define WM8958_MICB1_MODE_WIDTH 1 /* MICB1_MODE */ +#define WM8958_MICB1_LVL_MASK 0x000E /* MICB1_LVL - [3:1] */ +#define WM8958_MICB1_LVL_SHIFT 1 /* MICB1_LVL - [3:1] */ +#define WM8958_MICB1_LVL_WIDTH 3 /* MICB1_LVL - [3:1] */ +#define WM8958_MICB1_DISCH 0x0001 /* MICB1_DISCH */ +#define WM8958_MICB1_DISCH_MASK 0x0001 /* MICB1_DISCH */ +#define WM8958_MICB1_DISCH_SHIFT 0 /* MICB1_DISCH */ +#define WM8958_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ + +/* + * R62 (0x3E) - MICBIAS2 + */ +#define WM8958_MICB2_RATE 0x0020 /* MICB2_RATE */ +#define WM8958_MICB2_RATE_MASK 0x0020 /* MICB2_RATE */ +#define WM8958_MICB2_RATE_SHIFT 5 /* MICB2_RATE */ +#define WM8958_MICB2_RATE_WIDTH 1 /* MICB2_RATE */ +#define WM8958_MICB2_MODE 0x0010 /* MICB2_MODE */ +#define WM8958_MICB2_MODE_MASK 0x0010 /* MICB2_MODE */ +#define WM8958_MICB2_MODE_SHIFT 4 /* MICB2_MODE */ +#define WM8958_MICB2_MODE_WIDTH 1 /* MICB2_MODE */ +#define WM8958_MICB2_LVL_MASK 0x000E /* MICB2_LVL - [3:1] */ +#define WM8958_MICB2_LVL_SHIFT 1 /* MICB2_LVL - [3:1] */ +#define WM8958_MICB2_LVL_WIDTH 3 /* MICB2_LVL - [3:1] */ +#define WM8958_MICB2_DISCH 0x0001 /* MICB2_DISCH */ +#define WM8958_MICB2_DISCH_MASK 0x0001 /* MICB2_DISCH */ +#define WM8958_MICB2_DISCH_SHIFT 0 /* MICB2_DISCH */ +#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ + +/* + * R210 (0xD2) - Mic Detect 3 + */ +#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8958_MICD_STS 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R76 (0x4C) - Charge Pump (1) + */ +#define WM8994_CP_ENA 0x8000 /* CP_ENA */ +#define WM8994_CP_ENA_MASK 0x8000 /* CP_ENA */ +#define WM8994_CP_ENA_SHIFT 15 /* CP_ENA */ +#define WM8994_CP_ENA_WIDTH 1 /* CP_ENA */ + +/* + * R77 (0x4D) - Charge Pump (2) + */ +#define WM8958_CP_DISCH 0x8000 /* CP_DISCH */ +#define WM8958_CP_DISCH_MASK 0x8000 /* CP_DISCH */ +#define WM8958_CP_DISCH_SHIFT 15 /* CP_DISCH */ +#define WM8958_CP_DISCH_WIDTH 1 /* CP_DISCH */ + +/* + * R81 (0x51) - Class W (1) + */ +#define WM8994_CP_DYN_SRC_SEL_MASK 0x0300 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8994_CP_DYN_SRC_SEL_SHIFT 8 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8994_CP_DYN_SRC_SEL_WIDTH 2 /* CP_DYN_SRC_SEL - [9:8] */ +#define WM8994_CP_DYN_PWR 0x0001 /* CP_DYN_PWR */ +#define WM8994_CP_DYN_PWR_MASK 0x0001 /* CP_DYN_PWR */ +#define WM8994_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR */ +#define WM8994_CP_DYN_PWR_WIDTH 1 /* CP_DYN_PWR */ + +/* + * R84 (0x54) - DC Servo (1) + */ +#define WM8994_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */ +#define WM8994_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */ +#define WM8994_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */ +#define WM8994_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */ +#define WM8994_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */ +#define WM8994_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */ +#define WM8994_DCS_TRIG_DAC_WR_1 0x0008 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_1_MASK 0x0008 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_1_SHIFT 3 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */ +#define WM8994_DCS_TRIG_DAC_WR_0 0x0004 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_TRIG_DAC_WR_0_MASK 0x0004 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_TRIG_DAC_WR_0_SHIFT 2 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */ +#define WM8994_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */ +#define WM8994_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8994_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8994_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */ +#define WM8994_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */ + +/* + * R85 (0x55) - DC Servo (2) + */ +#define WM8994_DCS_SERIES_NO_01_MASK 0x0FE0 /* DCS_SERIES_NO_01 - [11:5] */ +#define WM8994_DCS_SERIES_NO_01_SHIFT 5 /* DCS_SERIES_NO_01 - [11:5] */ +#define WM8994_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [11:5] */ +#define WM8994_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8994_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8994_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */ + +/* + * R87 (0x57) - DC Servo (4) + */ +#define WM8994_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8994_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8994_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8994_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8994_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8994_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */ + +/* + * R88 (0x58) - DC Servo Readback + */ +#define WM8994_DCS_CAL_COMPLETE_MASK 0x0300 /* DCS_CAL_COMPLETE - [9:8] */ +#define WM8994_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [9:8] */ +#define WM8994_DCS_CAL_COMPLETE_WIDTH 2 /* DCS_CAL_COMPLETE - [9:8] */ +#define WM8994_DCS_DAC_WR_COMPLETE_MASK 0x0030 /* DCS_DAC_WR_COMPLETE - [5:4] */ +#define WM8994_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [5:4] */ +#define WM8994_DCS_DAC_WR_COMPLETE_WIDTH 2 /* DCS_DAC_WR_COMPLETE - [5:4] */ +#define WM8994_DCS_STARTUP_COMPLETE_MASK 0x0003 /* DCS_STARTUP_COMPLETE - [1:0] */ +#define WM8994_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [1:0] */ +#define WM8994_DCS_STARTUP_COMPLETE_WIDTH 2 /* DCS_STARTUP_COMPLETE - [1:0] */ + +/* + * R96 (0x60) - Analogue HP (1) + */ +#define WM1811_HPOUT1_ATTN 0x0100 /* HPOUT1_ATTN */ +#define WM1811_HPOUT1_ATTN_MASK 0x0100 /* HPOUT1_ATTN */ +#define WM1811_HPOUT1_ATTN_SHIFT 8 /* HPOUT1_ATTN */ +#define WM1811_HPOUT1_ATTN_WIDTH 1 /* HPOUT1_ATTN */ +#define WM8994_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */ +#define WM8994_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */ +#define WM8994_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */ +#define WM8994_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */ +#define WM8994_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */ +#define WM8994_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */ +#define WM8994_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */ +#define WM8994_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */ +#define WM8994_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */ + +/* + * R208 (0xD0) - Mic Detect 1 + */ +#define WM8958_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8958_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */ +#define WM8958_MICD_DBTIME 0x0002 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ +#define WM8958_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ +#define WM8958_MICD_ENA 0x0001 /* MICD_ENA */ +#define WM8958_MICD_ENA_MASK 0x0001 /* MICD_ENA */ +#define WM8958_MICD_ENA_SHIFT 0 /* MICD_ENA */ +#define WM8958_MICD_ENA_WIDTH 1 /* MICD_ENA */ + +/* + * R209 (0xD1) - Mic Detect 2 + */ +#define WM8958_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */ +#define WM8958_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */ +#define WM8958_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */ + +/* + * R210 (0xD2) - Mic Detect 3 + */ +#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8958_MICD_STS 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R256 (0x100) - Chip Revision + */ +#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */ +#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */ +#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */ + +/* + * R257 (0x101) - Control Interface + */ +#define WM8994_SPI_CONTRD 0x0040 /* SPI_CONTRD */ +#define WM8994_SPI_CONTRD_MASK 0x0040 /* SPI_CONTRD */ +#define WM8994_SPI_CONTRD_SHIFT 6 /* SPI_CONTRD */ +#define WM8994_SPI_CONTRD_WIDTH 1 /* SPI_CONTRD */ +#define WM8994_SPI_4WIRE 0x0020 /* SPI_4WIRE */ +#define WM8994_SPI_4WIRE_MASK 0x0020 /* SPI_4WIRE */ +#define WM8994_SPI_4WIRE_SHIFT 5 /* SPI_4WIRE */ +#define WM8994_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */ +#define WM8994_SPI_CFG 0x0010 /* SPI_CFG */ +#define WM8994_SPI_CFG_MASK 0x0010 /* SPI_CFG */ +#define WM8994_SPI_CFG_SHIFT 4 /* SPI_CFG */ +#define WM8994_SPI_CFG_WIDTH 1 /* SPI_CFG */ +#define WM8994_AUTO_INC 0x0004 /* AUTO_INC */ +#define WM8994_AUTO_INC_MASK 0x0004 /* AUTO_INC */ +#define WM8994_AUTO_INC_SHIFT 2 /* AUTO_INC */ +#define WM8994_AUTO_INC_WIDTH 1 /* AUTO_INC */ + +/* + * R272 (0x110) - Write Sequencer Ctrl (1) + */ +#define WM8994_WSEQ_ENA 0x8000 /* WSEQ_ENA */ +#define WM8994_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */ +#define WM8994_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */ +#define WM8994_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */ +#define WM8994_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */ +#define WM8994_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */ +#define WM8994_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */ +#define WM8994_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */ +#define WM8994_WSEQ_START 0x0100 /* WSEQ_START */ +#define WM8994_WSEQ_START_MASK 0x0100 /* WSEQ_START */ +#define WM8994_WSEQ_START_SHIFT 8 /* WSEQ_START */ +#define WM8994_WSEQ_START_WIDTH 1 /* WSEQ_START */ +#define WM8994_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */ +#define WM8994_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */ +#define WM8994_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */ + +/* + * R273 (0x111) - Write Sequencer Ctrl (2) + */ +#define WM8994_WSEQ_BUSY 0x0100 /* WSEQ_BUSY */ +#define WM8994_WSEQ_BUSY_MASK 0x0100 /* WSEQ_BUSY */ +#define WM8994_WSEQ_BUSY_SHIFT 8 /* WSEQ_BUSY */ +#define WM8994_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */ +#define WM8994_WSEQ_CURRENT_INDEX_MASK 0x007F /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8994_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8994_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [6:0] */ + +/* + * R512 (0x200) - AIF1 Clocking (1) + */ +#define WM8994_AIF1CLK_SRC_MASK 0x0018 /* AIF1CLK_SRC - [4:3] */ +#define WM8994_AIF1CLK_SRC_SHIFT 3 /* AIF1CLK_SRC - [4:3] */ +#define WM8994_AIF1CLK_SRC_WIDTH 2 /* AIF1CLK_SRC - [4:3] */ +#define WM8994_AIF1CLK_INV 0x0004 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_INV_MASK 0x0004 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_INV_SHIFT 2 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_INV_WIDTH 1 /* AIF1CLK_INV */ +#define WM8994_AIF1CLK_DIV 0x0002 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_DIV_MASK 0x0002 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_DIV_SHIFT 1 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_DIV_WIDTH 1 /* AIF1CLK_DIV */ +#define WM8994_AIF1CLK_ENA 0x0001 /* AIF1CLK_ENA */ +#define WM8994_AIF1CLK_ENA_MASK 0x0001 /* AIF1CLK_ENA */ +#define WM8994_AIF1CLK_ENA_SHIFT 0 /* AIF1CLK_ENA */ +#define WM8994_AIF1CLK_ENA_WIDTH 1 /* AIF1CLK_ENA */ + +/* + * R513 (0x201) - AIF1 Clocking (2) + */ +#define WM8994_AIF1DAC_DIV_MASK 0x0038 /* AIF1DAC_DIV - [5:3] */ +#define WM8994_AIF1DAC_DIV_SHIFT 3 /* AIF1DAC_DIV - [5:3] */ +#define WM8994_AIF1DAC_DIV_WIDTH 3 /* AIF1DAC_DIV - [5:3] */ +#define WM8994_AIF1ADC_DIV_MASK 0x0007 /* AIF1ADC_DIV - [2:0] */ +#define WM8994_AIF1ADC_DIV_SHIFT 0 /* AIF1ADC_DIV - [2:0] */ +#define WM8994_AIF1ADC_DIV_WIDTH 3 /* AIF1ADC_DIV - [2:0] */ + +/* + * R516 (0x204) - AIF2 Clocking (1) + */ +#define WM8994_AIF2CLK_SRC_MASK 0x0018 /* AIF2CLK_SRC - [4:3] */ +#define WM8994_AIF2CLK_SRC_SHIFT 3 /* AIF2CLK_SRC - [4:3] */ +#define WM8994_AIF2CLK_SRC_WIDTH 2 /* AIF2CLK_SRC - [4:3] */ +#define WM8994_AIF2CLK_INV 0x0004 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_INV_MASK 0x0004 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_INV_SHIFT 2 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_INV_WIDTH 1 /* AIF2CLK_INV */ +#define WM8994_AIF2CLK_DIV 0x0002 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_DIV_MASK 0x0002 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_DIV_SHIFT 1 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_DIV_WIDTH 1 /* AIF2CLK_DIV */ +#define WM8994_AIF2CLK_ENA 0x0001 /* AIF2CLK_ENA */ +#define WM8994_AIF2CLK_ENA_MASK 0x0001 /* AIF2CLK_ENA */ +#define WM8994_AIF2CLK_ENA_SHIFT 0 /* AIF2CLK_ENA */ +#define WM8994_AIF2CLK_ENA_WIDTH 1 /* AIF2CLK_ENA */ + +/* + * R517 (0x205) - AIF2 Clocking (2) + */ +#define WM8994_AIF2DAC_DIV_MASK 0x0038 /* AIF2DAC_DIV - [5:3] */ +#define WM8994_AIF2DAC_DIV_SHIFT 3 /* AIF2DAC_DIV - [5:3] */ +#define WM8994_AIF2DAC_DIV_WIDTH 3 /* AIF2DAC_DIV - [5:3] */ +#define WM8994_AIF2ADC_DIV_MASK 0x0007 /* AIF2ADC_DIV - [2:0] */ +#define WM8994_AIF2ADC_DIV_SHIFT 0 /* AIF2ADC_DIV - [2:0] */ +#define WM8994_AIF2ADC_DIV_WIDTH 3 /* AIF2ADC_DIV - [2:0] */ + +/* + * R520 (0x208) - Clocking (1) + */ +#define WM8958_DSP2CLK_ENA 0x4000 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_MASK 0x4000 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_SHIFT 14 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_ENA_WIDTH 1 /* DSP2CLK_ENA */ +#define WM8958_DSP2CLK_SRC 0x1000 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_MASK 0x1000 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_SHIFT 12 /* DSP2CLK_SRC */ +#define WM8958_DSP2CLK_SRC_WIDTH 1 /* DSP2CLK_SRC */ +#define WM8994_TOCLK_ENA 0x0010 /* TOCLK_ENA */ +#define WM8994_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */ +#define WM8994_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */ +#define WM8994_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA 0x0008 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA_MASK 0x0008 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA_SHIFT 3 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF1DSPCLK_ENA_WIDTH 1 /* AIF1DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA 0x0004 /* AIF2DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA_MASK 0x0004 /* AIF2DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA_SHIFT 2 /* AIF2DSPCLK_ENA */ +#define WM8994_AIF2DSPCLK_ENA_WIDTH 1 /* AIF2DSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA 0x0002 /* SYSDSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA_MASK 0x0002 /* SYSDSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA_SHIFT 1 /* SYSDSPCLK_ENA */ +#define WM8994_SYSDSPCLK_ENA_WIDTH 1 /* SYSDSPCLK_ENA */ +#define WM8994_SYSCLK_SRC 0x0001 /* SYSCLK_SRC */ +#define WM8994_SYSCLK_SRC_MASK 0x0001 /* SYSCLK_SRC */ +#define WM8994_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC */ +#define WM8994_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */ + +/* + * R521 (0x209) - Clocking (2) + */ +#define WM8994_TOCLK_DIV_MASK 0x0700 /* TOCLK_DIV - [10:8] */ +#define WM8994_TOCLK_DIV_SHIFT 8 /* TOCLK_DIV - [10:8] */ +#define WM8994_TOCLK_DIV_WIDTH 3 /* TOCLK_DIV - [10:8] */ +#define WM8994_DBCLK_DIV_MASK 0x0070 /* DBCLK_DIV - [6:4] */ +#define WM8994_DBCLK_DIV_SHIFT 4 /* DBCLK_DIV - [6:4] */ +#define WM8994_DBCLK_DIV_WIDTH 3 /* DBCLK_DIV - [6:4] */ +#define WM8994_OPCLK_DIV_MASK 0x0007 /* OPCLK_DIV - [2:0] */ +#define WM8994_OPCLK_DIV_SHIFT 0 /* OPCLK_DIV - [2:0] */ +#define WM8994_OPCLK_DIV_WIDTH 3 /* OPCLK_DIV - [2:0] */ + +/* + * R528 (0x210) - AIF1 Rate + */ +#define WM8994_AIF1_SR_MASK 0x00F0 /* AIF1_SR - [7:4] */ +#define WM8994_AIF1_SR_SHIFT 4 /* AIF1_SR - [7:4] */ +#define WM8994_AIF1_SR_WIDTH 4 /* AIF1_SR - [7:4] */ +#define WM8994_AIF1CLK_RATE_MASK 0x000F /* AIF1CLK_RATE - [3:0] */ +#define WM8994_AIF1CLK_RATE_SHIFT 0 /* AIF1CLK_RATE - [3:0] */ +#define WM8994_AIF1CLK_RATE_WIDTH 4 /* AIF1CLK_RATE - [3:0] */ + +/* + * R529 (0x211) - AIF2 Rate + */ +#define WM8994_AIF2_SR_MASK 0x00F0 /* AIF2_SR - [7:4] */ +#define WM8994_AIF2_SR_SHIFT 4 /* AIF2_SR - [7:4] */ +#define WM8994_AIF2_SR_WIDTH 4 /* AIF2_SR - [7:4] */ +#define WM8994_AIF2CLK_RATE_MASK 0x000F /* AIF2CLK_RATE - [3:0] */ +#define WM8994_AIF2CLK_RATE_SHIFT 0 /* AIF2CLK_RATE - [3:0] */ +#define WM8994_AIF2CLK_RATE_WIDTH 4 /* AIF2CLK_RATE - [3:0] */ + +/* + * R530 (0x212) - Rate Status + */ +#define WM8994_SR_ERROR_MASK 0x000F /* SR_ERROR - [3:0] */ +#define WM8994_SR_ERROR_SHIFT 0 /* SR_ERROR - [3:0] */ +#define WM8994_SR_ERROR_WIDTH 4 /* SR_ERROR - [3:0] */ + +/* + * R544 (0x220) - FLL1 Control (1) + */ +#define WM8994_FLL1_FRAC 0x0004 /* FLL1_FRAC */ +#define WM8994_FLL1_FRAC_MASK 0x0004 /* FLL1_FRAC */ +#define WM8994_FLL1_FRAC_SHIFT 2 /* FLL1_FRAC */ +#define WM8994_FLL1_FRAC_WIDTH 1 /* FLL1_FRAC */ +#define WM8994_FLL1_OSC_ENA 0x0002 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_OSC_ENA_MASK 0x0002 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_OSC_ENA_SHIFT 1 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_OSC_ENA_WIDTH 1 /* FLL1_OSC_ENA */ +#define WM8994_FLL1_ENA 0x0001 /* FLL1_ENA */ +#define WM8994_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */ +#define WM8994_FLL1_ENA_SHIFT 0 /* FLL1_ENA */ +#define WM8994_FLL1_ENA_WIDTH 1 /* FLL1_ENA */ + +/* + * R545 (0x221) - FLL1 Control (2) + */ +#define WM8994_FLL1_OUTDIV_MASK 0x3F00 /* FLL1_OUTDIV - [13:8] */ +#define WM8994_FLL1_OUTDIV_SHIFT 8 /* FLL1_OUTDIV - [13:8] */ +#define WM8994_FLL1_OUTDIV_WIDTH 6 /* FLL1_OUTDIV - [13:8] */ +#define WM8994_FLL1_CTRL_RATE_MASK 0x0070 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8994_FLL1_CTRL_RATE_SHIFT 4 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8994_FLL1_CTRL_RATE_WIDTH 3 /* FLL1_CTRL_RATE - [6:4] */ +#define WM8994_FLL1_FRATIO_MASK 0x0007 /* FLL1_FRATIO - [2:0] */ +#define WM8994_FLL1_FRATIO_SHIFT 0 /* FLL1_FRATIO - [2:0] */ +#define WM8994_FLL1_FRATIO_WIDTH 3 /* FLL1_FRATIO - [2:0] */ + +/* + * R546 (0x222) - FLL1 Control (3) + */ +#define WM8994_FLL1_K_MASK 0xFFFF /* FLL1_K - [15:0] */ +#define WM8994_FLL1_K_SHIFT 0 /* FLL1_K - [15:0] */ +#define WM8994_FLL1_K_WIDTH 16 /* FLL1_K - [15:0] */ + +/* + * R547 (0x223) - FLL1 Control (4) + */ +#define WM8994_FLL1_N_MASK 0x7FE0 /* FLL1_N - [14:5] */ +#define WM8994_FLL1_N_SHIFT 5 /* FLL1_N - [14:5] */ +#define WM8994_FLL1_N_WIDTH 10 /* FLL1_N - [14:5] */ +#define WM8994_FLL1_LOOP_GAIN_MASK 0x000F /* FLL1_LOOP_GAIN - [3:0] */ +#define WM8994_FLL1_LOOP_GAIN_SHIFT 0 /* FLL1_LOOP_GAIN - [3:0] */ +#define WM8994_FLL1_LOOP_GAIN_WIDTH 4 /* FLL1_LOOP_GAIN - [3:0] */ + +/* + * R548 (0x224) - FLL1 Control (5) + */ +#define WM8958_FLL1_BYP 0x8000 /* FLL1_BYP */ +#define WM8958_FLL1_BYP_MASK 0x8000 /* FLL1_BYP */ +#define WM8958_FLL1_BYP_SHIFT 15 /* FLL1_BYP */ +#define WM8958_FLL1_BYP_WIDTH 1 /* FLL1_BYP */ +#define WM8994_FLL1_FRC_NCO_VAL_MASK 0x1F80 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL1_FRC_NCO_VAL_SHIFT 7 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL1_FRC_NCO_VAL_WIDTH 6 /* FLL1_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL1_FRC_NCO 0x0040 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_FRC_NCO_MASK 0x0040 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_FRC_NCO_SHIFT 6 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_FRC_NCO_WIDTH 1 /* FLL1_FRC_NCO */ +#define WM8994_FLL1_REFCLK_DIV_MASK 0x0018 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8994_FLL1_REFCLK_DIV_SHIFT 3 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8994_FLL1_REFCLK_DIV_WIDTH 2 /* FLL1_REFCLK_DIV - [4:3] */ +#define WM8994_FLL1_REFCLK_SRC_MASK 0x0003 /* FLL1_REFCLK_SRC - [1:0] */ +#define WM8994_FLL1_REFCLK_SRC_SHIFT 0 /* FLL1_REFCLK_SRC - [1:0] */ +#define WM8994_FLL1_REFCLK_SRC_WIDTH 2 /* FLL1_REFCLK_SRC - [1:0] */ + +/* + * R550 (0x226) - FLL1 EFS 1 + */ +#define WM8958_FLL1_LAMBDA_MASK 0xFFFF /* FLL1_LAMBDA - [15:0] */ +#define WM8958_FLL1_LAMBDA_SHIFT 0 /* FLL1_LAMBDA - [15:0] */ +#define WM8958_FLL1_LAMBDA_WIDTH 16 /* FLL1_LAMBDA - [15:0] */ + +/* + * R551 (0x227) - FLL1 EFS 2 + */ +#define WM8958_FLL1_LFSR_SEL_MASK 0x0006 /* FLL1_LFSR_SEL - [2:1] */ +#define WM8958_FLL1_LFSR_SEL_SHIFT 1 /* FLL1_LFSR_SEL - [2:1] */ +#define WM8958_FLL1_LFSR_SEL_WIDTH 2 /* FLL1_LFSR_SEL - [2:1] */ +#define WM8958_FLL1_EFS_ENA 0x0001 /* FLL1_EFS_ENA */ +#define WM8958_FLL1_EFS_ENA_MASK 0x0001 /* FLL1_EFS_ENA */ +#define WM8958_FLL1_EFS_ENA_SHIFT 0 /* FLL1_EFS_ENA */ +#define WM8958_FLL1_EFS_ENA_WIDTH 1 /* FLL1_EFS_ENA */ + +/* + * R576 (0x240) - FLL2 Control (1) + */ +#define WM8994_FLL2_FRAC 0x0004 /* FLL2_FRAC */ +#define WM8994_FLL2_FRAC_MASK 0x0004 /* FLL2_FRAC */ +#define WM8994_FLL2_FRAC_SHIFT 2 /* FLL2_FRAC */ +#define WM8994_FLL2_FRAC_WIDTH 1 /* FLL2_FRAC */ +#define WM8994_FLL2_OSC_ENA 0x0002 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_OSC_ENA_MASK 0x0002 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_OSC_ENA_SHIFT 1 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_OSC_ENA_WIDTH 1 /* FLL2_OSC_ENA */ +#define WM8994_FLL2_ENA 0x0001 /* FLL2_ENA */ +#define WM8994_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */ +#define WM8994_FLL2_ENA_SHIFT 0 /* FLL2_ENA */ +#define WM8994_FLL2_ENA_WIDTH 1 /* FLL2_ENA */ + +/* + * R577 (0x241) - FLL2 Control (2) + */ +#define WM8994_FLL2_OUTDIV_MASK 0x3F00 /* FLL2_OUTDIV - [13:8] */ +#define WM8994_FLL2_OUTDIV_SHIFT 8 /* FLL2_OUTDIV - [13:8] */ +#define WM8994_FLL2_OUTDIV_WIDTH 6 /* FLL2_OUTDIV - [13:8] */ +#define WM8994_FLL2_CTRL_RATE_MASK 0x0070 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8994_FLL2_CTRL_RATE_SHIFT 4 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8994_FLL2_CTRL_RATE_WIDTH 3 /* FLL2_CTRL_RATE - [6:4] */ +#define WM8994_FLL2_FRATIO_MASK 0x0007 /* FLL2_FRATIO - [2:0] */ +#define WM8994_FLL2_FRATIO_SHIFT 0 /* FLL2_FRATIO - [2:0] */ +#define WM8994_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [2:0] */ + +/* + * R578 (0x242) - FLL2 Control (3) + */ +#define WM8994_FLL2_K_MASK 0xFFFF /* FLL2_K - [15:0] */ +#define WM8994_FLL2_K_SHIFT 0 /* FLL2_K - [15:0] */ +#define WM8994_FLL2_K_WIDTH 16 /* FLL2_K - [15:0] */ + +/* + * R579 (0x243) - FLL2 Control (4) + */ +#define WM8994_FLL2_N_MASK 0x7FE0 /* FLL2_N - [14:5] */ +#define WM8994_FLL2_N_SHIFT 5 /* FLL2_N - [14:5] */ +#define WM8994_FLL2_N_WIDTH 10 /* FLL2_N - [14:5] */ +#define WM8994_FLL2_LOOP_GAIN_MASK 0x000F /* FLL2_LOOP_GAIN - [3:0] */ +#define WM8994_FLL2_LOOP_GAIN_SHIFT 0 /* FLL2_LOOP_GAIN - [3:0] */ +#define WM8994_FLL2_LOOP_GAIN_WIDTH 4 /* FLL2_LOOP_GAIN - [3:0] */ + +/* + * R580 (0x244) - FLL2 Control (5) + */ +#define WM8958_FLL2_BYP 0x8000 /* FLL2_BYP */ +#define WM8958_FLL2_BYP_MASK 0x8000 /* FLL2_BYP */ +#define WM8958_FLL2_BYP_SHIFT 15 /* FLL2_BYP */ +#define WM8958_FLL2_BYP_WIDTH 1 /* FLL2_BYP */ +#define WM8994_FLL2_FRC_NCO_VAL_MASK 0x1F80 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL2_FRC_NCO_VAL_SHIFT 7 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL2_FRC_NCO_VAL_WIDTH 6 /* FLL2_FRC_NCO_VAL - [12:7] */ +#define WM8994_FLL2_FRC_NCO 0x0040 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_FRC_NCO_MASK 0x0040 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_FRC_NCO_SHIFT 6 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_FRC_NCO_WIDTH 1 /* FLL2_FRC_NCO */ +#define WM8994_FLL2_REFCLK_DIV_MASK 0x0018 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8994_FLL2_REFCLK_DIV_SHIFT 3 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8994_FLL2_REFCLK_DIV_WIDTH 2 /* FLL2_REFCLK_DIV - [4:3] */ +#define WM8994_FLL2_REFCLK_SRC_MASK 0x0003 /* FLL2_REFCLK_SRC - [1:0] */ +#define WM8994_FLL2_REFCLK_SRC_SHIFT 0 /* FLL2_REFCLK_SRC - [1:0] */ +#define WM8994_FLL2_REFCLK_SRC_WIDTH 2 /* FLL2_REFCLK_SRC - [1:0] */ + +/* + * R582 (0x246) - FLL2 EFS 1 + */ +#define WM8958_FLL2_LAMBDA_MASK 0xFFFF /* FLL2_LAMBDA - [15:0] */ +#define WM8958_FLL2_LAMBDA_SHIFT 0 /* FLL2_LAMBDA - [15:0] */ +#define WM8958_FLL2_LAMBDA_WIDTH 16 /* FLL2_LAMBDA - [15:0] */ + +/* + * R583 (0x247) - FLL2 EFS 2 + */ +#define WM8958_FLL2_LFSR_SEL_MASK 0x0006 /* FLL2_LFSR_SEL - [2:1] */ +#define WM8958_FLL2_LFSR_SEL_SHIFT 1 /* FLL2_LFSR_SEL - [2:1] */ +#define WM8958_FLL2_LFSR_SEL_WIDTH 2 /* FLL2_LFSR_SEL - [2:1] */ +#define WM8958_FLL2_EFS_ENA 0x0001 /* FLL2_EFS_ENA */ +#define WM8958_FLL2_EFS_ENA_MASK 0x0001 /* FLL2_EFS_ENA */ +#define WM8958_FLL2_EFS_ENA_SHIFT 0 /* FLL2_EFS_ENA */ +#define WM8958_FLL2_EFS_ENA_WIDTH 1 /* FLL2_EFS_ENA */ + +/* + * R768 (0x300) - AIF1 Control (1) + */ +#define WM8994_AIF1ADCL_SRC 0x8000 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCL_SRC_MASK 0x8000 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCL_SRC_SHIFT 15 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCL_SRC_WIDTH 1 /* AIF1ADCL_SRC */ +#define WM8994_AIF1ADCR_SRC 0x4000 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADCR_SRC_MASK 0x4000 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADCR_SRC_SHIFT 14 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADCR_SRC_WIDTH 1 /* AIF1ADCR_SRC */ +#define WM8994_AIF1ADC_TDM 0x2000 /* AIF1ADC_TDM */ +#define WM8994_AIF1ADC_TDM_MASK 0x2000 /* AIF1ADC_TDM */ +#define WM8994_AIF1ADC_TDM_SHIFT 13 /* AIF1ADC_TDM */ +#define WM8994_AIF1ADC_TDM_WIDTH 1 /* AIF1ADC_TDM */ +#define WM8994_AIF1_BCLK_INV 0x0100 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_BCLK_INV_MASK 0x0100 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_BCLK_INV_SHIFT 8 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */ +#define WM8994_AIF1_LRCLK_INV 0x0080 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_LRCLK_INV_MASK 0x0080 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_LRCLK_INV_SHIFT 7 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */ +#define WM8994_AIF1_WL_MASK 0x0060 /* AIF1_WL - [6:5] */ +#define WM8994_AIF1_WL_SHIFT 5 /* AIF1_WL - [6:5] */ +#define WM8994_AIF1_WL_WIDTH 2 /* AIF1_WL - [6:5] */ +#define WM8994_AIF1_FMT_MASK 0x0018 /* AIF1_FMT - [4:3] */ +#define WM8994_AIF1_FMT_SHIFT 3 /* AIF1_FMT - [4:3] */ +#define WM8994_AIF1_FMT_WIDTH 2 /* AIF1_FMT - [4:3] */ + +/* + * R769 (0x301) - AIF1 Control (2) + */ +#define WM8994_AIF1DACL_SRC 0x8000 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACL_SRC_MASK 0x8000 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACL_SRC_SHIFT 15 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACL_SRC_WIDTH 1 /* AIF1DACL_SRC */ +#define WM8994_AIF1DACR_SRC 0x4000 /* AIF1DACR_SRC */ +#define WM8994_AIF1DACR_SRC_MASK 0x4000 /* AIF1DACR_SRC */ +#define WM8994_AIF1DACR_SRC_SHIFT 14 /* AIF1DACR_SRC */ +#define WM8994_AIF1DACR_SRC_WIDTH 1 /* AIF1DACR_SRC */ +#define WM8994_AIF1DAC_BOOST_MASK 0x0C00 /* AIF1DAC_BOOST - [11:10] */ +#define WM8994_AIF1DAC_BOOST_SHIFT 10 /* AIF1DAC_BOOST - [11:10] */ +#define WM8994_AIF1DAC_BOOST_WIDTH 2 /* AIF1DAC_BOOST - [11:10] */ +#define WM8994_AIF1_MONO 0x0100 /* AIF1_MONO */ +#define WM8994_AIF1_MONO_MASK 0x0100 /* AIF1_MONO */ +#define WM8994_AIF1_MONO_SHIFT 8 /* AIF1_MONO */ +#define WM8994_AIF1_MONO_WIDTH 1 /* AIF1_MONO */ +#define WM8994_AIF1DAC_COMP 0x0010 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMP_MASK 0x0010 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMP_SHIFT 4 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMP_WIDTH 1 /* AIF1DAC_COMP */ +#define WM8994_AIF1DAC_COMPMODE 0x0008 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1DAC_COMPMODE_MASK 0x0008 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1DAC_COMPMODE_SHIFT 3 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1DAC_COMPMODE_WIDTH 1 /* AIF1DAC_COMPMODE */ +#define WM8994_AIF1ADC_COMP 0x0004 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMP_MASK 0x0004 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMP_SHIFT 2 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMP_WIDTH 1 /* AIF1ADC_COMP */ +#define WM8994_AIF1ADC_COMPMODE 0x0002 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1ADC_COMPMODE_MASK 0x0002 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1ADC_COMPMODE_SHIFT 1 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1ADC_COMPMODE_WIDTH 1 /* AIF1ADC_COMPMODE */ +#define WM8994_AIF1_LOOPBACK 0x0001 /* AIF1_LOOPBACK */ +#define WM8994_AIF1_LOOPBACK_MASK 0x0001 /* AIF1_LOOPBACK */ +#define WM8994_AIF1_LOOPBACK_SHIFT 0 /* AIF1_LOOPBACK */ +#define WM8994_AIF1_LOOPBACK_WIDTH 1 /* AIF1_LOOPBACK */ + +/* + * R770 (0x302) - AIF1 Master/Slave + */ +#define WM8994_AIF1_TRI 0x8000 /* AIF1_TRI */ +#define WM8994_AIF1_TRI_MASK 0x8000 /* AIF1_TRI */ +#define WM8994_AIF1_TRI_SHIFT 15 /* AIF1_TRI */ +#define WM8994_AIF1_TRI_WIDTH 1 /* AIF1_TRI */ +#define WM8994_AIF1_MSTR 0x4000 /* AIF1_MSTR */ +#define WM8994_AIF1_MSTR_MASK 0x4000 /* AIF1_MSTR */ +#define WM8994_AIF1_MSTR_SHIFT 14 /* AIF1_MSTR */ +#define WM8994_AIF1_MSTR_WIDTH 1 /* AIF1_MSTR */ +#define WM8994_AIF1_CLK_FRC 0x2000 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_CLK_FRC_MASK 0x2000 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_CLK_FRC_SHIFT 13 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_CLK_FRC_WIDTH 1 /* AIF1_CLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC 0x1000 /* AIF1_LRCLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC_MASK 0x1000 /* AIF1_LRCLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC_SHIFT 12 /* AIF1_LRCLK_FRC */ +#define WM8994_AIF1_LRCLK_FRC_WIDTH 1 /* AIF1_LRCLK_FRC */ + +/* + * R771 (0x303) - AIF1 BCLK + */ +#define WM8994_AIF1_BCLK_DIV_MASK 0x01F0 /* AIF1_BCLK_DIV - [8:4] */ +#define WM8994_AIF1_BCLK_DIV_SHIFT 4 /* AIF1_BCLK_DIV - [8:4] */ +#define WM8994_AIF1_BCLK_DIV_WIDTH 5 /* AIF1_BCLK_DIV - [8:4] */ + +/* + * R772 (0x304) - AIF1ADC LRCLK + */ +#define WM8994_AIF1ADC_LRCLK_DIR 0x0800 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_LRCLK_DIR_MASK 0x0800 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_LRCLK_DIR_SHIFT 11 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_LRCLK_DIR_WIDTH 1 /* AIF1ADC_LRCLK_DIR */ +#define WM8994_AIF1ADC_RATE_MASK 0x07FF /* AIF1ADC_RATE - [10:0] */ +#define WM8994_AIF1ADC_RATE_SHIFT 0 /* AIF1ADC_RATE - [10:0] */ +#define WM8994_AIF1ADC_RATE_WIDTH 11 /* AIF1ADC_RATE - [10:0] */ + +/* + * R773 (0x305) - AIF1DAC LRCLK + */ +#define WM8994_AIF1DAC_LRCLK_DIR 0x0800 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_LRCLK_DIR_MASK 0x0800 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_LRCLK_DIR_SHIFT 11 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_LRCLK_DIR_WIDTH 1 /* AIF1DAC_LRCLK_DIR */ +#define WM8994_AIF1DAC_RATE_MASK 0x07FF /* AIF1DAC_RATE - [10:0] */ +#define WM8994_AIF1DAC_RATE_SHIFT 0 /* AIF1DAC_RATE - [10:0] */ +#define WM8994_AIF1DAC_RATE_WIDTH 11 /* AIF1DAC_RATE - [10:0] */ + +/* + * R774 (0x306) - AIF1DAC Data + */ +#define WM8994_AIF1DACL_DAT_INV 0x0002 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACL_DAT_INV_MASK 0x0002 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACL_DAT_INV_SHIFT 1 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACL_DAT_INV_WIDTH 1 /* AIF1DACL_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV 0x0001 /* AIF1DACR_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV_MASK 0x0001 /* AIF1DACR_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV_SHIFT 0 /* AIF1DACR_DAT_INV */ +#define WM8994_AIF1DACR_DAT_INV_WIDTH 1 /* AIF1DACR_DAT_INV */ + +/* + * R775 (0x307) - AIF1ADC Data + */ +#define WM8994_AIF1ADCL_DAT_INV 0x0002 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCL_DAT_INV_MASK 0x0002 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCL_DAT_INV_SHIFT 1 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCL_DAT_INV_WIDTH 1 /* AIF1ADCL_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV 0x0001 /* AIF1ADCR_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV_MASK 0x0001 /* AIF1ADCR_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV_SHIFT 0 /* AIF1ADCR_DAT_INV */ +#define WM8994_AIF1ADCR_DAT_INV_WIDTH 1 /* AIF1ADCR_DAT_INV */ + +/* + * R784 (0x310) - AIF2 Control (1) + */ +#define WM8994_AIF2ADCL_SRC 0x8000 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCL_SRC_MASK 0x8000 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCL_SRC_SHIFT 15 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCL_SRC_WIDTH 1 /* AIF2ADCL_SRC */ +#define WM8994_AIF2ADCR_SRC 0x4000 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADCR_SRC_MASK 0x4000 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADCR_SRC_SHIFT 14 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADCR_SRC_WIDTH 1 /* AIF2ADCR_SRC */ +#define WM8994_AIF2ADC_TDM 0x2000 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_MASK 0x2000 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_SHIFT 13 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_WIDTH 1 /* AIF2ADC_TDM */ +#define WM8994_AIF2ADC_TDM_CHAN 0x1000 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2ADC_TDM_CHAN_MASK 0x1000 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2ADC_TDM_CHAN_SHIFT 12 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2ADC_TDM_CHAN_WIDTH 1 /* AIF2ADC_TDM_CHAN */ +#define WM8994_AIF2_BCLK_INV 0x0100 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_BCLK_INV_MASK 0x0100 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_BCLK_INV_SHIFT 8 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */ +#define WM8994_AIF2_LRCLK_INV 0x0080 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_LRCLK_INV_MASK 0x0080 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_LRCLK_INV_SHIFT 7 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_LRCLK_INV_WIDTH 1 /* AIF2_LRCLK_INV */ +#define WM8994_AIF2_WL_MASK 0x0060 /* AIF2_WL - [6:5] */ +#define WM8994_AIF2_WL_SHIFT 5 /* AIF2_WL - [6:5] */ +#define WM8994_AIF2_WL_WIDTH 2 /* AIF2_WL - [6:5] */ +#define WM8994_AIF2_FMT_MASK 0x0018 /* AIF2_FMT - [4:3] */ +#define WM8994_AIF2_FMT_SHIFT 3 /* AIF2_FMT - [4:3] */ +#define WM8994_AIF2_FMT_WIDTH 2 /* AIF2_FMT - [4:3] */ + +/* + * R785 (0x311) - AIF2 Control (2) + */ +#define WM8994_AIF2DACL_SRC 0x8000 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACL_SRC_MASK 0x8000 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACL_SRC_SHIFT 15 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACL_SRC_WIDTH 1 /* AIF2DACL_SRC */ +#define WM8994_AIF2DACR_SRC 0x4000 /* AIF2DACR_SRC */ +#define WM8994_AIF2DACR_SRC_MASK 0x4000 /* AIF2DACR_SRC */ +#define WM8994_AIF2DACR_SRC_SHIFT 14 /* AIF2DACR_SRC */ +#define WM8994_AIF2DACR_SRC_WIDTH 1 /* AIF2DACR_SRC */ +#define WM8994_AIF2DAC_TDM 0x2000 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_MASK 0x2000 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_SHIFT 13 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_WIDTH 1 /* AIF2DAC_TDM */ +#define WM8994_AIF2DAC_TDM_CHAN 0x1000 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_TDM_CHAN_MASK 0x1000 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_TDM_CHAN_SHIFT 12 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_TDM_CHAN_WIDTH 1 /* AIF2DAC_TDM_CHAN */ +#define WM8994_AIF2DAC_BOOST_MASK 0x0C00 /* AIF2DAC_BOOST - [11:10] */ +#define WM8994_AIF2DAC_BOOST_SHIFT 10 /* AIF2DAC_BOOST - [11:10] */ +#define WM8994_AIF2DAC_BOOST_WIDTH 2 /* AIF2DAC_BOOST - [11:10] */ +#define WM8994_AIF2_MONO 0x0100 /* AIF2_MONO */ +#define WM8994_AIF2_MONO_MASK 0x0100 /* AIF2_MONO */ +#define WM8994_AIF2_MONO_SHIFT 8 /* AIF2_MONO */ +#define WM8994_AIF2_MONO_WIDTH 1 /* AIF2_MONO */ +#define WM8994_AIF2DAC_COMP 0x0010 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMP_MASK 0x0010 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMP_SHIFT 4 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMP_WIDTH 1 /* AIF2DAC_COMP */ +#define WM8994_AIF2DAC_COMPMODE 0x0008 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2DAC_COMPMODE_MASK 0x0008 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2DAC_COMPMODE_SHIFT 3 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2DAC_COMPMODE_WIDTH 1 /* AIF2DAC_COMPMODE */ +#define WM8994_AIF2ADC_COMP 0x0004 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMP_MASK 0x0004 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMP_SHIFT 2 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMP_WIDTH 1 /* AIF2ADC_COMP */ +#define WM8994_AIF2ADC_COMPMODE 0x0002 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2ADC_COMPMODE_MASK 0x0002 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2ADC_COMPMODE_SHIFT 1 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2ADC_COMPMODE_WIDTH 1 /* AIF2ADC_COMPMODE */ +#define WM8994_AIF2_LOOPBACK 0x0001 /* AIF2_LOOPBACK */ +#define WM8994_AIF2_LOOPBACK_MASK 0x0001 /* AIF2_LOOPBACK */ +#define WM8994_AIF2_LOOPBACK_SHIFT 0 /* AIF2_LOOPBACK */ +#define WM8994_AIF2_LOOPBACK_WIDTH 1 /* AIF2_LOOPBACK */ + +/* + * R786 (0x312) - AIF2 Master/Slave + */ +#define WM8994_AIF2_TRI 0x8000 /* AIF2_TRI */ +#define WM8994_AIF2_TRI_MASK 0x8000 /* AIF2_TRI */ +#define WM8994_AIF2_TRI_SHIFT 15 /* AIF2_TRI */ +#define WM8994_AIF2_TRI_WIDTH 1 /* AIF2_TRI */ +#define WM8994_AIF2_MSTR 0x4000 /* AIF2_MSTR */ +#define WM8994_AIF2_MSTR_MASK 0x4000 /* AIF2_MSTR */ +#define WM8994_AIF2_MSTR_SHIFT 14 /* AIF2_MSTR */ +#define WM8994_AIF2_MSTR_WIDTH 1 /* AIF2_MSTR */ +#define WM8994_AIF2_CLK_FRC 0x2000 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_CLK_FRC_MASK 0x2000 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_CLK_FRC_SHIFT 13 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_CLK_FRC_WIDTH 1 /* AIF2_CLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC 0x1000 /* AIF2_LRCLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC_MASK 0x1000 /* AIF2_LRCLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC_SHIFT 12 /* AIF2_LRCLK_FRC */ +#define WM8994_AIF2_LRCLK_FRC_WIDTH 1 /* AIF2_LRCLK_FRC */ + +/* + * R787 (0x313) - AIF2 BCLK + */ +#define WM8994_AIF2_BCLK_DIV_MASK 0x01F0 /* AIF2_BCLK_DIV - [8:4] */ +#define WM8994_AIF2_BCLK_DIV_SHIFT 4 /* AIF2_BCLK_DIV - [8:4] */ +#define WM8994_AIF2_BCLK_DIV_WIDTH 5 /* AIF2_BCLK_DIV - [8:4] */ + +/* + * R788 (0x314) - AIF2ADC LRCLK + */ +#define WM8994_AIF2ADC_LRCLK_DIR 0x0800 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_LRCLK_DIR_MASK 0x0800 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_LRCLK_DIR_SHIFT 11 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_LRCLK_DIR_WIDTH 1 /* AIF2ADC_LRCLK_DIR */ +#define WM8994_AIF2ADC_RATE_MASK 0x07FF /* AIF2ADC_RATE - [10:0] */ +#define WM8994_AIF2ADC_RATE_SHIFT 0 /* AIF2ADC_RATE - [10:0] */ +#define WM8994_AIF2ADC_RATE_WIDTH 11 /* AIF2ADC_RATE - [10:0] */ + +/* + * R789 (0x315) - AIF2DAC LRCLK + */ +#define WM8994_AIF2DAC_LRCLK_DIR 0x0800 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_LRCLK_DIR_MASK 0x0800 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_LRCLK_DIR_SHIFT 11 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_LRCLK_DIR_WIDTH 1 /* AIF2DAC_LRCLK_DIR */ +#define WM8994_AIF2DAC_RATE_MASK 0x07FF /* AIF2DAC_RATE - [10:0] */ +#define WM8994_AIF2DAC_RATE_SHIFT 0 /* AIF2DAC_RATE - [10:0] */ +#define WM8994_AIF2DAC_RATE_WIDTH 11 /* AIF2DAC_RATE - [10:0] */ + +/* + * R790 (0x316) - AIF2DAC Data + */ +#define WM8994_AIF2DACL_DAT_INV 0x0002 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACL_DAT_INV_MASK 0x0002 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACL_DAT_INV_SHIFT 1 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACL_DAT_INV_WIDTH 1 /* AIF2DACL_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV 0x0001 /* AIF2DACR_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV_MASK 0x0001 /* AIF2DACR_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV_SHIFT 0 /* AIF2DACR_DAT_INV */ +#define WM8994_AIF2DACR_DAT_INV_WIDTH 1 /* AIF2DACR_DAT_INV */ + +/* + * R791 (0x317) - AIF2ADC Data + */ +#define WM8994_AIF2ADCL_DAT_INV 0x0002 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCL_DAT_INV_MASK 0x0002 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCL_DAT_INV_SHIFT 1 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCL_DAT_INV_WIDTH 1 /* AIF2ADCL_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV 0x0001 /* AIF2ADCR_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV_MASK 0x0001 /* AIF2ADCR_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV_SHIFT 0 /* AIF2ADCR_DAT_INV */ +#define WM8994_AIF2ADCR_DAT_INV_WIDTH 1 /* AIF2ADCR_DAT_INV */ + +/* + * R800 (0x320) - AIF3 Control (1) + */ +#define WM8958_AIF3_LRCLK_INV 0x0080 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_MASK 0x0080 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_SHIFT 7 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_LRCLK_INV_WIDTH 1 /* AIF3_LRCLK_INV */ +#define WM8958_AIF3_WL_MASK 0x0060 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_WL_SHIFT 5 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_WL_WIDTH 2 /* AIF3_WL - [6:5] */ +#define WM8958_AIF3_FMT_MASK 0x0018 /* AIF3_FMT - [4:3] */ +#define WM8958_AIF3_FMT_SHIFT 3 /* AIF3_FMT - [4:3] */ +#define WM8958_AIF3_FMT_WIDTH 2 /* AIF3_FMT - [4:3] */ + +/* + * R801 (0x321) - AIF3 Control (2) + */ +#define WM8958_AIF3DAC_BOOST_MASK 0x0C00 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_BOOST_SHIFT 10 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_BOOST_WIDTH 2 /* AIF3DAC_BOOST - [11:10] */ +#define WM8958_AIF3DAC_COMP 0x0010 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_MASK 0x0010 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_SHIFT 4 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMP_WIDTH 1 /* AIF3DAC_COMP */ +#define WM8958_AIF3DAC_COMPMODE 0x0008 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_MASK 0x0008 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_SHIFT 3 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3DAC_COMPMODE_WIDTH 1 /* AIF3DAC_COMPMODE */ +#define WM8958_AIF3ADC_COMP 0x0004 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_MASK 0x0004 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_SHIFT 2 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMP_WIDTH 1 /* AIF3ADC_COMP */ +#define WM8958_AIF3ADC_COMPMODE 0x0002 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_MASK 0x0002 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_SHIFT 1 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3ADC_COMPMODE_WIDTH 1 /* AIF3ADC_COMPMODE */ +#define WM8958_AIF3_LOOPBACK 0x0001 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_MASK 0x0001 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_SHIFT 0 /* AIF3_LOOPBACK */ +#define WM8958_AIF3_LOOPBACK_WIDTH 1 /* AIF3_LOOPBACK */ + +/* + * R802 (0x322) - AIF3DAC Data + */ +#define WM8958_AIF3DAC_DAT_INV 0x0001 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_MASK 0x0001 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_SHIFT 0 /* AIF3DAC_DAT_INV */ +#define WM8958_AIF3DAC_DAT_INV_WIDTH 1 /* AIF3DAC_DAT_INV */ + +/* + * R803 (0x323) - AIF3ADC Data + */ +#define WM8958_AIF3ADC_DAT_INV 0x0001 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_MASK 0x0001 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_SHIFT 0 /* AIF3ADC_DAT_INV */ +#define WM8958_AIF3ADC_DAT_INV_WIDTH 1 /* AIF3ADC_DAT_INV */ + +/* + * R1024 (0x400) - AIF1 ADC1 Left Volume + */ +#define WM8994_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1L_VOL_MASK 0x00FF /* AIF1ADC1L_VOL - [7:0] */ +#define WM8994_AIF1ADC1L_VOL_SHIFT 0 /* AIF1ADC1L_VOL - [7:0] */ +#define WM8994_AIF1ADC1L_VOL_WIDTH 8 /* AIF1ADC1L_VOL - [7:0] */ + +/* + * R1025 (0x401) - AIF1 ADC1 Right Volume + */ +#define WM8994_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */ +#define WM8994_AIF1ADC1R_VOL_MASK 0x00FF /* AIF1ADC1R_VOL - [7:0] */ +#define WM8994_AIF1ADC1R_VOL_SHIFT 0 /* AIF1ADC1R_VOL - [7:0] */ +#define WM8994_AIF1ADC1R_VOL_WIDTH 8 /* AIF1ADC1R_VOL - [7:0] */ + +/* + * R1026 (0x402) - AIF1 DAC1 Left Volume + */ +#define WM8994_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1L_VOL_MASK 0x00FF /* AIF1DAC1L_VOL - [7:0] */ +#define WM8994_AIF1DAC1L_VOL_SHIFT 0 /* AIF1DAC1L_VOL - [7:0] */ +#define WM8994_AIF1DAC1L_VOL_WIDTH 8 /* AIF1DAC1L_VOL - [7:0] */ + +/* + * R1027 (0x403) - AIF1 DAC1 Right Volume + */ +#define WM8994_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */ +#define WM8994_AIF1DAC1R_VOL_MASK 0x00FF /* AIF1DAC1R_VOL - [7:0] */ +#define WM8994_AIF1DAC1R_VOL_SHIFT 0 /* AIF1DAC1R_VOL - [7:0] */ +#define WM8994_AIF1DAC1R_VOL_WIDTH 8 /* AIF1DAC1R_VOL - [7:0] */ + +/* + * R1028 (0x404) - AIF1 ADC2 Left Volume + */ +#define WM8994_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2L_VOL_MASK 0x00FF /* AIF1ADC2L_VOL - [7:0] */ +#define WM8994_AIF1ADC2L_VOL_SHIFT 0 /* AIF1ADC2L_VOL - [7:0] */ +#define WM8994_AIF1ADC2L_VOL_WIDTH 8 /* AIF1ADC2L_VOL - [7:0] */ + +/* + * R1029 (0x405) - AIF1 ADC2 Right Volume + */ +#define WM8994_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */ +#define WM8994_AIF1ADC2R_VOL_MASK 0x00FF /* AIF1ADC2R_VOL - [7:0] */ +#define WM8994_AIF1ADC2R_VOL_SHIFT 0 /* AIF1ADC2R_VOL - [7:0] */ +#define WM8994_AIF1ADC2R_VOL_WIDTH 8 /* AIF1ADC2R_VOL - [7:0] */ + +/* + * R1030 (0x406) - AIF1 DAC2 Left Volume + */ +#define WM8994_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2L_VOL_MASK 0x00FF /* AIF1DAC2L_VOL - [7:0] */ +#define WM8994_AIF1DAC2L_VOL_SHIFT 0 /* AIF1DAC2L_VOL - [7:0] */ +#define WM8994_AIF1DAC2L_VOL_WIDTH 8 /* AIF1DAC2L_VOL - [7:0] */ + +/* + * R1031 (0x407) - AIF1 DAC2 Right Volume + */ +#define WM8994_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */ +#define WM8994_AIF1DAC2R_VOL_MASK 0x00FF /* AIF1DAC2R_VOL - [7:0] */ +#define WM8994_AIF1DAC2R_VOL_SHIFT 0 /* AIF1DAC2R_VOL - [7:0] */ +#define WM8994_AIF1DAC2R_VOL_WIDTH 8 /* AIF1DAC2R_VOL - [7:0] */ + +/* + * R1040 (0x410) - AIF1 ADC1 Filters + */ +#define WM8994_AIF1ADC_4FS 0x8000 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC_4FS_MASK 0x8000 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC_4FS_SHIFT 15 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC_4FS_WIDTH 1 /* AIF1ADC_4FS */ +#define WM8994_AIF1ADC1_HPF_CUT_MASK 0x6000 /* AIF1ADC1_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC1_HPF_CUT_SHIFT 13 /* AIF1ADC1_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC1_HPF_CUT_WIDTH 2 /* AIF1ADC1_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC1L_HPF 0x1000 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1L_HPF_MASK 0x1000 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1L_HPF_SHIFT 12 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1L_HPF_WIDTH 1 /* AIF1ADC1L_HPF */ +#define WM8994_AIF1ADC1R_HPF 0x0800 /* AIF1ADC1R_HPF */ +#define WM8994_AIF1ADC1R_HPF_MASK 0x0800 /* AIF1ADC1R_HPF */ +#define WM8994_AIF1ADC1R_HPF_SHIFT 11 /* AIF1ADC1R_HPF */ +#define WM8994_AIF1ADC1R_HPF_WIDTH 1 /* AIF1ADC1R_HPF */ + +/* + * R1041 (0x411) - AIF1 ADC2 Filters + */ +#define WM8994_AIF1ADC2_HPF_CUT_MASK 0x6000 /* AIF1ADC2_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC2_HPF_CUT_SHIFT 13 /* AIF1ADC2_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC2_HPF_CUT_WIDTH 2 /* AIF1ADC2_HPF_CUT - [14:13] */ +#define WM8994_AIF1ADC2L_HPF 0x1000 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2L_HPF_MASK 0x1000 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2L_HPF_SHIFT 12 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2L_HPF_WIDTH 1 /* AIF1ADC2L_HPF */ +#define WM8994_AIF1ADC2R_HPF 0x0800 /* AIF1ADC2R_HPF */ +#define WM8994_AIF1ADC2R_HPF_MASK 0x0800 /* AIF1ADC2R_HPF */ +#define WM8994_AIF1ADC2R_HPF_SHIFT 11 /* AIF1ADC2R_HPF */ +#define WM8994_AIF1ADC2R_HPF_WIDTH 1 /* AIF1ADC2R_HPF */ + +/* + * R1056 (0x420) - AIF1 DAC1 Filters (1) + */ +#define WM8994_AIF1DAC1_MUTE 0x0200 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MUTE_MASK 0x0200 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MUTE_SHIFT 9 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MUTE_WIDTH 1 /* AIF1DAC1_MUTE */ +#define WM8994_AIF1DAC1_MONO 0x0080 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MONO_MASK 0x0080 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MONO_SHIFT 7 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MONO_WIDTH 1 /* AIF1DAC1_MONO */ +#define WM8994_AIF1DAC1_MUTERATE 0x0020 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_MUTERATE_MASK 0x0020 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_MUTERATE_SHIFT 5 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_MUTERATE_WIDTH 1 /* AIF1DAC1_MUTERATE */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP 0x0010 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC1_UNMUTE_RAMP */ +#define WM8994_AIF1DAC1_DEEMP_MASK 0x0006 /* AIF1DAC1_DEEMP - [2:1] */ +#define WM8994_AIF1DAC1_DEEMP_SHIFT 1 /* AIF1DAC1_DEEMP - [2:1] */ +#define WM8994_AIF1DAC1_DEEMP_WIDTH 2 /* AIF1DAC1_DEEMP - [2:1] */ + +/* + * R1057 (0x421) - AIF1 DAC1 Filters (2) + */ +#define WM8994_AIF1DAC1_3D_GAIN_MASK 0x3E00 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC1_3D_GAIN_SHIFT 9 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC1_3D_GAIN_WIDTH 5 /* AIF1DAC1_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC1_3D_ENA 0x0100 /* AIF1DAC1_3D_ENA */ +#define WM8994_AIF1DAC1_3D_ENA_MASK 0x0100 /* AIF1DAC1_3D_ENA */ +#define WM8994_AIF1DAC1_3D_ENA_SHIFT 8 /* AIF1DAC1_3D_ENA */ +#define WM8994_AIF1DAC1_3D_ENA_WIDTH 1 /* AIF1DAC1_3D_ENA */ + +/* + * R1058 (0x422) - AIF1 DAC2 Filters (1) + */ +#define WM8994_AIF1DAC2_MUTE 0x0200 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MUTE_MASK 0x0200 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MUTE_SHIFT 9 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MUTE_WIDTH 1 /* AIF1DAC2_MUTE */ +#define WM8994_AIF1DAC2_MONO 0x0080 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MONO_MASK 0x0080 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MONO_SHIFT 7 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MONO_WIDTH 1 /* AIF1DAC2_MONO */ +#define WM8994_AIF1DAC2_MUTERATE 0x0020 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_MUTERATE_MASK 0x0020 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_MUTERATE_SHIFT 5 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_MUTERATE_WIDTH 1 /* AIF1DAC2_MUTERATE */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP 0x0010 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC2_UNMUTE_RAMP */ +#define WM8994_AIF1DAC2_DEEMP_MASK 0x0006 /* AIF1DAC2_DEEMP - [2:1] */ +#define WM8994_AIF1DAC2_DEEMP_SHIFT 1 /* AIF1DAC2_DEEMP - [2:1] */ +#define WM8994_AIF1DAC2_DEEMP_WIDTH 2 /* AIF1DAC2_DEEMP - [2:1] */ + +/* + * R1059 (0x423) - AIF1 DAC2 Filters (2) + */ +#define WM8994_AIF1DAC2_3D_GAIN_MASK 0x3E00 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC2_3D_GAIN_SHIFT 9 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC2_3D_GAIN_WIDTH 5 /* AIF1DAC2_3D_GAIN - [13:9] */ +#define WM8994_AIF1DAC2_3D_ENA 0x0100 /* AIF1DAC2_3D_ENA */ +#define WM8994_AIF1DAC2_3D_ENA_MASK 0x0100 /* AIF1DAC2_3D_ENA */ +#define WM8994_AIF1DAC2_3D_ENA_SHIFT 8 /* AIF1DAC2_3D_ENA */ +#define WM8994_AIF1DAC2_3D_ENA_WIDTH 1 /* AIF1DAC2_3D_ENA */ + +/* + * R1072 (0x430) - AIF1 DAC1 Noise Gate + */ +#define WM8958_AIF1DAC1_NG_HLD_MASK 0x0060 /* AIF1DAC1_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC1_NG_HLD_SHIFT 5 /* AIF1DAC1_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC1_NG_HLD_WIDTH 2 /* AIF1DAC1_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC1_NG_THR_MASK 0x000E /* AIF1DAC1_NG_THR - [3:1] */ +#define WM8958_AIF1DAC1_NG_THR_SHIFT 1 /* AIF1DAC1_NG_THR - [3:1] */ +#define WM8958_AIF1DAC1_NG_THR_WIDTH 3 /* AIF1DAC1_NG_THR - [3:1] */ +#define WM8958_AIF1DAC1_NG_ENA 0x0001 /* AIF1DAC1_NG_ENA */ +#define WM8958_AIF1DAC1_NG_ENA_MASK 0x0001 /* AIF1DAC1_NG_ENA */ +#define WM8958_AIF1DAC1_NG_ENA_SHIFT 0 /* AIF1DAC1_NG_ENA */ +#define WM8958_AIF1DAC1_NG_ENA_WIDTH 1 /* AIF1DAC1_NG_ENA */ + +/* + * R1073 (0x431) - AIF1 DAC2 Noise Gate + */ +#define WM8958_AIF1DAC2_NG_HLD_MASK 0x0060 /* AIF1DAC2_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC2_NG_HLD_SHIFT 5 /* AIF1DAC2_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC2_NG_HLD_WIDTH 2 /* AIF1DAC2_NG_HLD - [6:5] */ +#define WM8958_AIF1DAC2_NG_THR_MASK 0x000E /* AIF1DAC2_NG_THR - [3:1] */ +#define WM8958_AIF1DAC2_NG_THR_SHIFT 1 /* AIF1DAC2_NG_THR - [3:1] */ +#define WM8958_AIF1DAC2_NG_THR_WIDTH 3 /* AIF1DAC2_NG_THR - [3:1] */ +#define WM8958_AIF1DAC2_NG_ENA 0x0001 /* AIF1DAC2_NG_ENA */ +#define WM8958_AIF1DAC2_NG_ENA_MASK 0x0001 /* AIF1DAC2_NG_ENA */ +#define WM8958_AIF1DAC2_NG_ENA_SHIFT 0 /* AIF1DAC2_NG_ENA */ +#define WM8958_AIF1DAC2_NG_ENA_WIDTH 1 /* AIF1DAC2_NG_ENA */ + +/* + * R1088 (0x440) - AIF1 DRC1 (1) + */ +#define WM8994_AIF1DRC1_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC1_SIG_DET_RMS_SHIFT 11 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC1_SIG_DET_RMS_WIDTH 5 /* AIF1DRC1_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC1_SIG_DET_PK_MASK 0x0600 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC1_SIG_DET_PK_SHIFT 9 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC1_SIG_DET_PK_WIDTH 2 /* AIF1DRC1_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC1_NG_ENA 0x0100 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_NG_ENA_MASK 0x0100 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_NG_ENA_SHIFT 8 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_NG_ENA_WIDTH 1 /* AIF1DRC1_NG_ENA */ +#define WM8994_AIF1DRC1_SIG_DET_MODE 0x0080 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET_MODE_SHIFT 7 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET_MODE_WIDTH 1 /* AIF1DRC1_SIG_DET_MODE */ +#define WM8994_AIF1DRC1_SIG_DET 0x0040 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_SIG_DET_MASK 0x0040 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_SIG_DET_SHIFT 6 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_SIG_DET_WIDTH 1 /* AIF1DRC1_SIG_DET */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC1_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC1_QR 0x0010 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_QR_MASK 0x0010 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_QR_SHIFT 4 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_QR_WIDTH 1 /* AIF1DRC1_QR */ +#define WM8994_AIF1DRC1_ANTICLIP 0x0008 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DRC1_ANTICLIP_MASK 0x0008 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DRC1_ANTICLIP_SHIFT 3 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DRC1_ANTICLIP_WIDTH 1 /* AIF1DRC1_ANTICLIP */ +#define WM8994_AIF1DAC1_DRC_ENA 0x0004 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1DAC1_DRC_ENA_MASK 0x0004 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1DAC1_DRC_ENA_SHIFT 2 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1DAC1_DRC_ENA_WIDTH 1 /* AIF1DAC1_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA 0x0002 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA_MASK 0x0002 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA_SHIFT 1 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1L_DRC_ENA_WIDTH 1 /* AIF1ADC1L_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA 0x0001 /* AIF1ADC1R_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA_MASK 0x0001 /* AIF1ADC1R_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA_SHIFT 0 /* AIF1ADC1R_DRC_ENA */ +#define WM8994_AIF1ADC1R_DRC_ENA_WIDTH 1 /* AIF1ADC1R_DRC_ENA */ + +/* + * R1089 (0x441) - AIF1 DRC1 (2) + */ +#define WM8994_AIF1DRC1_ATK_MASK 0x1E00 /* AIF1DRC1_ATK - [12:9] */ +#define WM8994_AIF1DRC1_ATK_SHIFT 9 /* AIF1DRC1_ATK - [12:9] */ +#define WM8994_AIF1DRC1_ATK_WIDTH 4 /* AIF1DRC1_ATK - [12:9] */ +#define WM8994_AIF1DRC1_DCY_MASK 0x01E0 /* AIF1DRC1_DCY - [8:5] */ +#define WM8994_AIF1DRC1_DCY_SHIFT 5 /* AIF1DRC1_DCY - [8:5] */ +#define WM8994_AIF1DRC1_DCY_WIDTH 4 /* AIF1DRC1_DCY - [8:5] */ +#define WM8994_AIF1DRC1_MINGAIN_MASK 0x001C /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC1_MINGAIN_SHIFT 2 /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC1_MINGAIN_WIDTH 3 /* AIF1DRC1_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC1_MAXGAIN_MASK 0x0003 /* AIF1DRC1_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC1_MAXGAIN_SHIFT 0 /* AIF1DRC1_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC1_MAXGAIN_WIDTH 2 /* AIF1DRC1_MAXGAIN - [1:0] */ + +/* + * R1090 (0x442) - AIF1 DRC1 (3) + */ +#define WM8994_AIF1DRC1_NG_MINGAIN_MASK 0xF000 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC1_NG_MINGAIN_SHIFT 12 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC1_NG_MINGAIN_WIDTH 4 /* AIF1DRC1_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC1_NG_EXP_MASK 0x0C00 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC1_NG_EXP_SHIFT 10 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC1_NG_EXP_WIDTH 2 /* AIF1DRC1_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC1_QR_THR_MASK 0x0300 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8994_AIF1DRC1_QR_THR_SHIFT 8 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8994_AIF1DRC1_QR_THR_WIDTH 2 /* AIF1DRC1_QR_THR - [9:8] */ +#define WM8994_AIF1DRC1_QR_DCY_MASK 0x00C0 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC1_QR_DCY_SHIFT 6 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC1_QR_DCY_WIDTH 2 /* AIF1DRC1_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC1_HI_COMP_MASK 0x0038 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC1_HI_COMP_SHIFT 3 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC1_HI_COMP_WIDTH 3 /* AIF1DRC1_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC1_LO_COMP_MASK 0x0007 /* AIF1DRC1_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC1_LO_COMP_SHIFT 0 /* AIF1DRC1_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC1_LO_COMP_WIDTH 3 /* AIF1DRC1_LO_COMP - [2:0] */ + +/* + * R1091 (0x443) - AIF1 DRC1 (4) + */ +#define WM8994_AIF1DRC1_KNEE_IP_MASK 0x07E0 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC1_KNEE_IP_SHIFT 5 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC1_KNEE_IP_WIDTH 6 /* AIF1DRC1_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC1_KNEE_OP_MASK 0x001F /* AIF1DRC1_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE_OP_SHIFT 0 /* AIF1DRC1_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE_OP_WIDTH 5 /* AIF1DRC1_KNEE_OP - [4:0] */ + +/* + * R1092 (0x444) - AIF1 DRC1 (5) + */ +#define WM8994_AIF1DRC1_KNEE2_IP_MASK 0x03E0 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC1_KNEE2_IP_SHIFT 5 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC1_KNEE2_IP_WIDTH 5 /* AIF1DRC1_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC1_KNEE2_OP_MASK 0x001F /* AIF1DRC1_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE2_OP_SHIFT 0 /* AIF1DRC1_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC1_KNEE2_OP_WIDTH 5 /* AIF1DRC1_KNEE2_OP - [4:0] */ + +/* + * R1104 (0x450) - AIF1 DRC2 (1) + */ +#define WM8994_AIF1DRC2_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC2_SIG_DET_RMS_SHIFT 11 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC2_SIG_DET_RMS_WIDTH 5 /* AIF1DRC2_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF1DRC2_SIG_DET_PK_MASK 0x0600 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC2_SIG_DET_PK_SHIFT 9 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC2_SIG_DET_PK_WIDTH 2 /* AIF1DRC2_SIG_DET_PK - [10:9] */ +#define WM8994_AIF1DRC2_NG_ENA 0x0100 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_NG_ENA_MASK 0x0100 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_NG_ENA_SHIFT 8 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_NG_ENA_WIDTH 1 /* AIF1DRC2_NG_ENA */ +#define WM8994_AIF1DRC2_SIG_DET_MODE 0x0080 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET_MODE_SHIFT 7 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET_MODE_WIDTH 1 /* AIF1DRC2_SIG_DET_MODE */ +#define WM8994_AIF1DRC2_SIG_DET 0x0040 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_SIG_DET_MASK 0x0040 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_SIG_DET_SHIFT 6 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_SIG_DET_WIDTH 1 /* AIF1DRC2_SIG_DET */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC2_KNEE2_OP_ENA */ +#define WM8994_AIF1DRC2_QR 0x0010 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_QR_MASK 0x0010 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_QR_SHIFT 4 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_QR_WIDTH 1 /* AIF1DRC2_QR */ +#define WM8994_AIF1DRC2_ANTICLIP 0x0008 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DRC2_ANTICLIP_MASK 0x0008 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DRC2_ANTICLIP_SHIFT 3 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DRC2_ANTICLIP_WIDTH 1 /* AIF1DRC2_ANTICLIP */ +#define WM8994_AIF1DAC2_DRC_ENA 0x0004 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1DAC2_DRC_ENA_MASK 0x0004 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1DAC2_DRC_ENA_SHIFT 2 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1DAC2_DRC_ENA_WIDTH 1 /* AIF1DAC2_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA 0x0002 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA_MASK 0x0002 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA_SHIFT 1 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2L_DRC_ENA_WIDTH 1 /* AIF1ADC2L_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA 0x0001 /* AIF1ADC2R_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA_MASK 0x0001 /* AIF1ADC2R_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA_SHIFT 0 /* AIF1ADC2R_DRC_ENA */ +#define WM8994_AIF1ADC2R_DRC_ENA_WIDTH 1 /* AIF1ADC2R_DRC_ENA */ + +/* + * R1105 (0x451) - AIF1 DRC2 (2) + */ +#define WM8994_AIF1DRC2_ATK_MASK 0x1E00 /* AIF1DRC2_ATK - [12:9] */ +#define WM8994_AIF1DRC2_ATK_SHIFT 9 /* AIF1DRC2_ATK - [12:9] */ +#define WM8994_AIF1DRC2_ATK_WIDTH 4 /* AIF1DRC2_ATK - [12:9] */ +#define WM8994_AIF1DRC2_DCY_MASK 0x01E0 /* AIF1DRC2_DCY - [8:5] */ +#define WM8994_AIF1DRC2_DCY_SHIFT 5 /* AIF1DRC2_DCY - [8:5] */ +#define WM8994_AIF1DRC2_DCY_WIDTH 4 /* AIF1DRC2_DCY - [8:5] */ +#define WM8994_AIF1DRC2_MINGAIN_MASK 0x001C /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC2_MINGAIN_SHIFT 2 /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC2_MINGAIN_WIDTH 3 /* AIF1DRC2_MINGAIN - [4:2] */ +#define WM8994_AIF1DRC2_MAXGAIN_MASK 0x0003 /* AIF1DRC2_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC2_MAXGAIN_SHIFT 0 /* AIF1DRC2_MAXGAIN - [1:0] */ +#define WM8994_AIF1DRC2_MAXGAIN_WIDTH 2 /* AIF1DRC2_MAXGAIN - [1:0] */ + +/* + * R1106 (0x452) - AIF1 DRC2 (3) + */ +#define WM8994_AIF1DRC2_NG_MINGAIN_MASK 0xF000 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC2_NG_MINGAIN_SHIFT 12 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC2_NG_MINGAIN_WIDTH 4 /* AIF1DRC2_NG_MINGAIN - [15:12] */ +#define WM8994_AIF1DRC2_NG_EXP_MASK 0x0C00 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC2_NG_EXP_SHIFT 10 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC2_NG_EXP_WIDTH 2 /* AIF1DRC2_NG_EXP - [11:10] */ +#define WM8994_AIF1DRC2_QR_THR_MASK 0x0300 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8994_AIF1DRC2_QR_THR_SHIFT 8 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8994_AIF1DRC2_QR_THR_WIDTH 2 /* AIF1DRC2_QR_THR - [9:8] */ +#define WM8994_AIF1DRC2_QR_DCY_MASK 0x00C0 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC2_QR_DCY_SHIFT 6 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC2_QR_DCY_WIDTH 2 /* AIF1DRC2_QR_DCY - [7:6] */ +#define WM8994_AIF1DRC2_HI_COMP_MASK 0x0038 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC2_HI_COMP_SHIFT 3 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC2_HI_COMP_WIDTH 3 /* AIF1DRC2_HI_COMP - [5:3] */ +#define WM8994_AIF1DRC2_LO_COMP_MASK 0x0007 /* AIF1DRC2_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC2_LO_COMP_SHIFT 0 /* AIF1DRC2_LO_COMP - [2:0] */ +#define WM8994_AIF1DRC2_LO_COMP_WIDTH 3 /* AIF1DRC2_LO_COMP - [2:0] */ + +/* + * R1107 (0x453) - AIF1 DRC2 (4) + */ +#define WM8994_AIF1DRC2_KNEE_IP_MASK 0x07E0 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC2_KNEE_IP_SHIFT 5 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC2_KNEE_IP_WIDTH 6 /* AIF1DRC2_KNEE_IP - [10:5] */ +#define WM8994_AIF1DRC2_KNEE_OP_MASK 0x001F /* AIF1DRC2_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE_OP_SHIFT 0 /* AIF1DRC2_KNEE_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE_OP_WIDTH 5 /* AIF1DRC2_KNEE_OP - [4:0] */ + +/* + * R1108 (0x454) - AIF1 DRC2 (5) + */ +#define WM8994_AIF1DRC2_KNEE2_IP_MASK 0x03E0 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC2_KNEE2_IP_SHIFT 5 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC2_KNEE2_IP_WIDTH 5 /* AIF1DRC2_KNEE2_IP - [9:5] */ +#define WM8994_AIF1DRC2_KNEE2_OP_MASK 0x001F /* AIF1DRC2_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE2_OP_SHIFT 0 /* AIF1DRC2_KNEE2_OP - [4:0] */ +#define WM8994_AIF1DRC2_KNEE2_OP_WIDTH 5 /* AIF1DRC2_KNEE2_OP - [4:0] */ + +/* + * R1152 (0x480) - AIF1 DAC1 EQ Gains (1) + */ +#define WM8994_AIF1DAC1_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC1_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC1_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC1_EQ_ENA 0x0001 /* AIF1DAC1_EQ_ENA */ +#define WM8994_AIF1DAC1_EQ_ENA_MASK 0x0001 /* AIF1DAC1_EQ_ENA */ +#define WM8994_AIF1DAC1_EQ_ENA_SHIFT 0 /* AIF1DAC1_EQ_ENA */ +#define WM8994_AIF1DAC1_EQ_ENA_WIDTH 1 /* AIF1DAC1_EQ_ENA */ + +/* + * R1153 (0x481) - AIF1 DAC1 EQ Gains (2) + */ +#define WM8994_AIF1DAC1_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC1_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC1_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */ + +/* + * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A + */ +#define WM8994_AIF1DAC1_EQ_B1_A_MASK 0xFFFF /* AIF1DAC1_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_A_SHIFT 0 /* AIF1DAC1_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_A_WIDTH 16 /* AIF1DAC1_EQ_B1_A - [15:0] */ + +/* + * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B + */ +#define WM8994_AIF1DAC1_EQ_B1_B_MASK 0xFFFF /* AIF1DAC1_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_B_SHIFT 0 /* AIF1DAC1_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_B_WIDTH 16 /* AIF1DAC1_EQ_B1_B - [15:0] */ + +/* + * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG + */ +#define WM8994_AIF1DAC1_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_PG_SHIFT 0 /* AIF1DAC1_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B1_PG_WIDTH 16 /* AIF1DAC1_EQ_B1_PG - [15:0] */ + +/* + * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A + */ +#define WM8994_AIF1DAC1_EQ_B2_A_MASK 0xFFFF /* AIF1DAC1_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_A_SHIFT 0 /* AIF1DAC1_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_A_WIDTH 16 /* AIF1DAC1_EQ_B2_A - [15:0] */ + +/* + * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B + */ +#define WM8994_AIF1DAC1_EQ_B2_B_MASK 0xFFFF /* AIF1DAC1_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_B_SHIFT 0 /* AIF1DAC1_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_B_WIDTH 16 /* AIF1DAC1_EQ_B2_B - [15:0] */ + +/* + * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C + */ +#define WM8994_AIF1DAC1_EQ_B2_C_MASK 0xFFFF /* AIF1DAC1_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_C_SHIFT 0 /* AIF1DAC1_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_C_WIDTH 16 /* AIF1DAC1_EQ_B2_C - [15:0] */ + +/* + * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG + */ +#define WM8994_AIF1DAC1_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_PG_SHIFT 0 /* AIF1DAC1_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B2_PG_WIDTH 16 /* AIF1DAC1_EQ_B2_PG - [15:0] */ + +/* + * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A + */ +#define WM8994_AIF1DAC1_EQ_B3_A_MASK 0xFFFF /* AIF1DAC1_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_A_SHIFT 0 /* AIF1DAC1_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_A_WIDTH 16 /* AIF1DAC1_EQ_B3_A - [15:0] */ + +/* + * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B + */ +#define WM8994_AIF1DAC1_EQ_B3_B_MASK 0xFFFF /* AIF1DAC1_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_B_SHIFT 0 /* AIF1DAC1_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_B_WIDTH 16 /* AIF1DAC1_EQ_B3_B - [15:0] */ + +/* + * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C + */ +#define WM8994_AIF1DAC1_EQ_B3_C_MASK 0xFFFF /* AIF1DAC1_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_C_SHIFT 0 /* AIF1DAC1_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_C_WIDTH 16 /* AIF1DAC1_EQ_B3_C - [15:0] */ + +/* + * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG + */ +#define WM8994_AIF1DAC1_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_PG_SHIFT 0 /* AIF1DAC1_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B3_PG_WIDTH 16 /* AIF1DAC1_EQ_B3_PG - [15:0] */ + +/* + * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A + */ +#define WM8994_AIF1DAC1_EQ_B4_A_MASK 0xFFFF /* AIF1DAC1_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_A_SHIFT 0 /* AIF1DAC1_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_A_WIDTH 16 /* AIF1DAC1_EQ_B4_A - [15:0] */ + +/* + * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B + */ +#define WM8994_AIF1DAC1_EQ_B4_B_MASK 0xFFFF /* AIF1DAC1_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_B_SHIFT 0 /* AIF1DAC1_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_B_WIDTH 16 /* AIF1DAC1_EQ_B4_B - [15:0] */ + +/* + * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C + */ +#define WM8994_AIF1DAC1_EQ_B4_C_MASK 0xFFFF /* AIF1DAC1_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_C_SHIFT 0 /* AIF1DAC1_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_C_WIDTH 16 /* AIF1DAC1_EQ_B4_C - [15:0] */ + +/* + * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG + */ +#define WM8994_AIF1DAC1_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_PG_SHIFT 0 /* AIF1DAC1_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B4_PG_WIDTH 16 /* AIF1DAC1_EQ_B4_PG - [15:0] */ + +/* + * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A + */ +#define WM8994_AIF1DAC1_EQ_B5_A_MASK 0xFFFF /* AIF1DAC1_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_A_SHIFT 0 /* AIF1DAC1_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_A_WIDTH 16 /* AIF1DAC1_EQ_B5_A - [15:0] */ + +/* + * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B + */ +#define WM8994_AIF1DAC1_EQ_B5_B_MASK 0xFFFF /* AIF1DAC1_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_B_SHIFT 0 /* AIF1DAC1_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_B_WIDTH 16 /* AIF1DAC1_EQ_B5_B - [15:0] */ + +/* + * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG + */ +#define WM8994_AIF1DAC1_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_PG_SHIFT 0 /* AIF1DAC1_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC1_EQ_B5_PG_WIDTH 16 /* AIF1DAC1_EQ_B5_PG - [15:0] */ + +/* + * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1) + */ +#define WM8994_AIF1DAC2_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC2_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC2_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF1DAC2_EQ_ENA 0x0001 /* AIF1DAC2_EQ_ENA */ +#define WM8994_AIF1DAC2_EQ_ENA_MASK 0x0001 /* AIF1DAC2_EQ_ENA */ +#define WM8994_AIF1DAC2_EQ_ENA_SHIFT 0 /* AIF1DAC2_EQ_ENA */ +#define WM8994_AIF1DAC2_EQ_ENA_WIDTH 1 /* AIF1DAC2_EQ_ENA */ + +/* + * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2) + */ +#define WM8994_AIF1DAC2_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF1DAC2_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF1DAC2_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */ + +/* + * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A + */ +#define WM8994_AIF1DAC2_EQ_B1_A_MASK 0xFFFF /* AIF1DAC2_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_A_SHIFT 0 /* AIF1DAC2_EQ_B1_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_A_WIDTH 16 /* AIF1DAC2_EQ_B1_A - [15:0] */ + +/* + * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B + */ +#define WM8994_AIF1DAC2_EQ_B1_B_MASK 0xFFFF /* AIF1DAC2_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_B_SHIFT 0 /* AIF1DAC2_EQ_B1_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_B_WIDTH 16 /* AIF1DAC2_EQ_B1_B - [15:0] */ + +/* + * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG + */ +#define WM8994_AIF1DAC2_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_PG_SHIFT 0 /* AIF1DAC2_EQ_B1_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B1_PG_WIDTH 16 /* AIF1DAC2_EQ_B1_PG - [15:0] */ + +/* + * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A + */ +#define WM8994_AIF1DAC2_EQ_B2_A_MASK 0xFFFF /* AIF1DAC2_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_A_SHIFT 0 /* AIF1DAC2_EQ_B2_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_A_WIDTH 16 /* AIF1DAC2_EQ_B2_A - [15:0] */ + +/* + * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B + */ +#define WM8994_AIF1DAC2_EQ_B2_B_MASK 0xFFFF /* AIF1DAC2_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_B_SHIFT 0 /* AIF1DAC2_EQ_B2_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_B_WIDTH 16 /* AIF1DAC2_EQ_B2_B - [15:0] */ + +/* + * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C + */ +#define WM8994_AIF1DAC2_EQ_B2_C_MASK 0xFFFF /* AIF1DAC2_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_C_SHIFT 0 /* AIF1DAC2_EQ_B2_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_C_WIDTH 16 /* AIF1DAC2_EQ_B2_C - [15:0] */ + +/* + * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG + */ +#define WM8994_AIF1DAC2_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_PG_SHIFT 0 /* AIF1DAC2_EQ_B2_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B2_PG_WIDTH 16 /* AIF1DAC2_EQ_B2_PG - [15:0] */ + +/* + * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A + */ +#define WM8994_AIF1DAC2_EQ_B3_A_MASK 0xFFFF /* AIF1DAC2_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_A_SHIFT 0 /* AIF1DAC2_EQ_B3_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_A_WIDTH 16 /* AIF1DAC2_EQ_B3_A - [15:0] */ + +/* + * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B + */ +#define WM8994_AIF1DAC2_EQ_B3_B_MASK 0xFFFF /* AIF1DAC2_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_B_SHIFT 0 /* AIF1DAC2_EQ_B3_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_B_WIDTH 16 /* AIF1DAC2_EQ_B3_B - [15:0] */ + +/* + * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C + */ +#define WM8994_AIF1DAC2_EQ_B3_C_MASK 0xFFFF /* AIF1DAC2_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_C_SHIFT 0 /* AIF1DAC2_EQ_B3_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_C_WIDTH 16 /* AIF1DAC2_EQ_B3_C - [15:0] */ + +/* + * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG + */ +#define WM8994_AIF1DAC2_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_PG_SHIFT 0 /* AIF1DAC2_EQ_B3_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B3_PG_WIDTH 16 /* AIF1DAC2_EQ_B3_PG - [15:0] */ + +/* + * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A + */ +#define WM8994_AIF1DAC2_EQ_B4_A_MASK 0xFFFF /* AIF1DAC2_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_A_SHIFT 0 /* AIF1DAC2_EQ_B4_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_A_WIDTH 16 /* AIF1DAC2_EQ_B4_A - [15:0] */ + +/* + * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B + */ +#define WM8994_AIF1DAC2_EQ_B4_B_MASK 0xFFFF /* AIF1DAC2_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_B_SHIFT 0 /* AIF1DAC2_EQ_B4_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_B_WIDTH 16 /* AIF1DAC2_EQ_B4_B - [15:0] */ + +/* + * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C + */ +#define WM8994_AIF1DAC2_EQ_B4_C_MASK 0xFFFF /* AIF1DAC2_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_C_SHIFT 0 /* AIF1DAC2_EQ_B4_C - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_C_WIDTH 16 /* AIF1DAC2_EQ_B4_C - [15:0] */ + +/* + * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG + */ +#define WM8994_AIF1DAC2_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_PG_SHIFT 0 /* AIF1DAC2_EQ_B4_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B4_PG_WIDTH 16 /* AIF1DAC2_EQ_B4_PG - [15:0] */ + +/* + * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A + */ +#define WM8994_AIF1DAC2_EQ_B5_A_MASK 0xFFFF /* AIF1DAC2_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_A_SHIFT 0 /* AIF1DAC2_EQ_B5_A - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_A_WIDTH 16 /* AIF1DAC2_EQ_B5_A - [15:0] */ + +/* + * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B + */ +#define WM8994_AIF1DAC2_EQ_B5_B_MASK 0xFFFF /* AIF1DAC2_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_B_SHIFT 0 /* AIF1DAC2_EQ_B5_B - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_B_WIDTH 16 /* AIF1DAC2_EQ_B5_B - [15:0] */ + +/* + * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG + */ +#define WM8994_AIF1DAC2_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_PG_SHIFT 0 /* AIF1DAC2_EQ_B5_PG - [15:0] */ +#define WM8994_AIF1DAC2_EQ_B5_PG_WIDTH 16 /* AIF1DAC2_EQ_B5_PG - [15:0] */ + +/* + * R1280 (0x500) - AIF2 ADC Left Volume + */ +#define WM8994_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */ +#define WM8994_AIF2ADCL_VOL_MASK 0x00FF /* AIF2ADCL_VOL - [7:0] */ +#define WM8994_AIF2ADCL_VOL_SHIFT 0 /* AIF2ADCL_VOL - [7:0] */ +#define WM8994_AIF2ADCL_VOL_WIDTH 8 /* AIF2ADCL_VOL - [7:0] */ + +/* + * R1281 (0x501) - AIF2 ADC Right Volume + */ +#define WM8994_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */ +#define WM8994_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */ +#define WM8994_AIF2ADCR_VOL_MASK 0x00FF /* AIF2ADCR_VOL - [7:0] */ +#define WM8994_AIF2ADCR_VOL_SHIFT 0 /* AIF2ADCR_VOL - [7:0] */ +#define WM8994_AIF2ADCR_VOL_WIDTH 8 /* AIF2ADCR_VOL - [7:0] */ + +/* + * R1282 (0x502) - AIF2 DAC Left Volume + */ +#define WM8994_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */ +#define WM8994_AIF2DACL_VOL_MASK 0x00FF /* AIF2DACL_VOL - [7:0] */ +#define WM8994_AIF2DACL_VOL_SHIFT 0 /* AIF2DACL_VOL - [7:0] */ +#define WM8994_AIF2DACL_VOL_WIDTH 8 /* AIF2DACL_VOL - [7:0] */ + +/* + * R1283 (0x503) - AIF2 DAC Right Volume + */ +#define WM8994_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */ +#define WM8994_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */ +#define WM8994_AIF2DACR_VOL_MASK 0x00FF /* AIF2DACR_VOL - [7:0] */ +#define WM8994_AIF2DACR_VOL_SHIFT 0 /* AIF2DACR_VOL - [7:0] */ +#define WM8994_AIF2DACR_VOL_WIDTH 8 /* AIF2DACR_VOL - [7:0] */ + +/* + * R1296 (0x510) - AIF2 ADC Filters + */ +#define WM8994_AIF2ADC_4FS 0x8000 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_4FS_MASK 0x8000 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_4FS_SHIFT 15 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_4FS_WIDTH 1 /* AIF2ADC_4FS */ +#define WM8994_AIF2ADC_HPF_CUT_MASK 0x6000 /* AIF2ADC_HPF_CUT - [14:13] */ +#define WM8994_AIF2ADC_HPF_CUT_SHIFT 13 /* AIF2ADC_HPF_CUT - [14:13] */ +#define WM8994_AIF2ADC_HPF_CUT_WIDTH 2 /* AIF2ADC_HPF_CUT - [14:13] */ +#define WM8994_AIF2ADCL_HPF 0x1000 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCL_HPF_MASK 0x1000 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCL_HPF_SHIFT 12 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCL_HPF_WIDTH 1 /* AIF2ADCL_HPF */ +#define WM8994_AIF2ADCR_HPF 0x0800 /* AIF2ADCR_HPF */ +#define WM8994_AIF2ADCR_HPF_MASK 0x0800 /* AIF2ADCR_HPF */ +#define WM8994_AIF2ADCR_HPF_SHIFT 11 /* AIF2ADCR_HPF */ +#define WM8994_AIF2ADCR_HPF_WIDTH 1 /* AIF2ADCR_HPF */ + +/* + * R1312 (0x520) - AIF2 DAC Filters (1) + */ +#define WM8994_AIF2DAC_MUTE 0x0200 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MUTE_MASK 0x0200 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MUTE_SHIFT 9 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MUTE_WIDTH 1 /* AIF2DAC_MUTE */ +#define WM8994_AIF2DAC_MONO 0x0080 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MONO_MASK 0x0080 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MONO_SHIFT 7 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MONO_WIDTH 1 /* AIF2DAC_MONO */ +#define WM8994_AIF2DAC_MUTERATE 0x0020 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_MUTERATE_MASK 0x0020 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_MUTERATE_SHIFT 5 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_MUTERATE_WIDTH 1 /* AIF2DAC_MUTERATE */ +#define WM8994_AIF2DAC_UNMUTE_RAMP 0x0010 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_UNMUTE_RAMP_MASK 0x0010 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_UNMUTE_RAMP_SHIFT 4 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_UNMUTE_RAMP_WIDTH 1 /* AIF2DAC_UNMUTE_RAMP */ +#define WM8994_AIF2DAC_DEEMP_MASK 0x0006 /* AIF2DAC_DEEMP - [2:1] */ +#define WM8994_AIF2DAC_DEEMP_SHIFT 1 /* AIF2DAC_DEEMP - [2:1] */ +#define WM8994_AIF2DAC_DEEMP_WIDTH 2 /* AIF2DAC_DEEMP - [2:1] */ + +/* + * R1313 (0x521) - AIF2 DAC Filters (2) + */ +#define WM8994_AIF2DAC_3D_GAIN_MASK 0x3E00 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8994_AIF2DAC_3D_GAIN_SHIFT 9 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8994_AIF2DAC_3D_GAIN_WIDTH 5 /* AIF2DAC_3D_GAIN - [13:9] */ +#define WM8994_AIF2DAC_3D_ENA 0x0100 /* AIF2DAC_3D_ENA */ +#define WM8994_AIF2DAC_3D_ENA_MASK 0x0100 /* AIF2DAC_3D_ENA */ +#define WM8994_AIF2DAC_3D_ENA_SHIFT 8 /* AIF2DAC_3D_ENA */ +#define WM8994_AIF2DAC_3D_ENA_WIDTH 1 /* AIF2DAC_3D_ENA */ + +/* + * R1328 (0x530) - AIF2 DAC Noise Gate + */ +#define WM8958_AIF2DAC_NG_HLD_MASK 0x0060 /* AIF2DAC_NG_HLD - [6:5] */ +#define WM8958_AIF2DAC_NG_HLD_SHIFT 5 /* AIF2DAC_NG_HLD - [6:5] */ +#define WM8958_AIF2DAC_NG_HLD_WIDTH 2 /* AIF2DAC_NG_HLD - [6:5] */ +#define WM8958_AIF2DAC_NG_THR_MASK 0x000E /* AIF2DAC_NG_THR - [3:1] */ +#define WM8958_AIF2DAC_NG_THR_SHIFT 1 /* AIF2DAC_NG_THR - [3:1] */ +#define WM8958_AIF2DAC_NG_THR_WIDTH 3 /* AIF2DAC_NG_THR - [3:1] */ +#define WM8958_AIF2DAC_NG_ENA 0x0001 /* AIF2DAC_NG_ENA */ +#define WM8958_AIF2DAC_NG_ENA_MASK 0x0001 /* AIF2DAC_NG_ENA */ +#define WM8958_AIF2DAC_NG_ENA_SHIFT 0 /* AIF2DAC_NG_ENA */ +#define WM8958_AIF2DAC_NG_ENA_WIDTH 1 /* AIF2DAC_NG_ENA */ + +/* + * R1344 (0x540) - AIF2 DRC (1) + */ +#define WM8994_AIF2DRC_SIG_DET_RMS_MASK 0xF800 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF2DRC_SIG_DET_RMS_SHIFT 11 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF2DRC_SIG_DET_RMS_WIDTH 5 /* AIF2DRC_SIG_DET_RMS - [15:11] */ +#define WM8994_AIF2DRC_SIG_DET_PK_MASK 0x0600 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8994_AIF2DRC_SIG_DET_PK_SHIFT 9 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8994_AIF2DRC_SIG_DET_PK_WIDTH 2 /* AIF2DRC_SIG_DET_PK - [10:9] */ +#define WM8994_AIF2DRC_NG_ENA 0x0100 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_NG_ENA_MASK 0x0100 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_NG_ENA_SHIFT 8 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_NG_ENA_WIDTH 1 /* AIF2DRC_NG_ENA */ +#define WM8994_AIF2DRC_SIG_DET_MODE 0x0080 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET_MODE_MASK 0x0080 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET_MODE_SHIFT 7 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET_MODE_WIDTH 1 /* AIF2DRC_SIG_DET_MODE */ +#define WM8994_AIF2DRC_SIG_DET 0x0040 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_SIG_DET_MASK 0x0040 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_SIG_DET_SHIFT 6 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_SIG_DET_WIDTH 1 /* AIF2DRC_SIG_DET */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA 0x0020 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA_MASK 0x0020 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA_SHIFT 5 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_KNEE2_OP_ENA_WIDTH 1 /* AIF2DRC_KNEE2_OP_ENA */ +#define WM8994_AIF2DRC_QR 0x0010 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_QR_MASK 0x0010 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_QR_SHIFT 4 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_QR_WIDTH 1 /* AIF2DRC_QR */ +#define WM8994_AIF2DRC_ANTICLIP 0x0008 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DRC_ANTICLIP_MASK 0x0008 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DRC_ANTICLIP_SHIFT 3 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DRC_ANTICLIP_WIDTH 1 /* AIF2DRC_ANTICLIP */ +#define WM8994_AIF2DAC_DRC_ENA 0x0004 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2DAC_DRC_ENA_MASK 0x0004 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2DAC_DRC_ENA_SHIFT 2 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2DAC_DRC_ENA_WIDTH 1 /* AIF2DAC_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA 0x0002 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA_MASK 0x0002 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA_SHIFT 1 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCL_DRC_ENA_WIDTH 1 /* AIF2ADCL_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA 0x0001 /* AIF2ADCR_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA_MASK 0x0001 /* AIF2ADCR_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA_SHIFT 0 /* AIF2ADCR_DRC_ENA */ +#define WM8994_AIF2ADCR_DRC_ENA_WIDTH 1 /* AIF2ADCR_DRC_ENA */ + +/* + * R1345 (0x541) - AIF2 DRC (2) + */ +#define WM8994_AIF2DRC_ATK_MASK 0x1E00 /* AIF2DRC_ATK - [12:9] */ +#define WM8994_AIF2DRC_ATK_SHIFT 9 /* AIF2DRC_ATK - [12:9] */ +#define WM8994_AIF2DRC_ATK_WIDTH 4 /* AIF2DRC_ATK - [12:9] */ +#define WM8994_AIF2DRC_DCY_MASK 0x01E0 /* AIF2DRC_DCY - [8:5] */ +#define WM8994_AIF2DRC_DCY_SHIFT 5 /* AIF2DRC_DCY - [8:5] */ +#define WM8994_AIF2DRC_DCY_WIDTH 4 /* AIF2DRC_DCY - [8:5] */ +#define WM8994_AIF2DRC_MINGAIN_MASK 0x001C /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8994_AIF2DRC_MINGAIN_SHIFT 2 /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8994_AIF2DRC_MINGAIN_WIDTH 3 /* AIF2DRC_MINGAIN - [4:2] */ +#define WM8994_AIF2DRC_MAXGAIN_MASK 0x0003 /* AIF2DRC_MAXGAIN - [1:0] */ +#define WM8994_AIF2DRC_MAXGAIN_SHIFT 0 /* AIF2DRC_MAXGAIN - [1:0] */ +#define WM8994_AIF2DRC_MAXGAIN_WIDTH 2 /* AIF2DRC_MAXGAIN - [1:0] */ + +/* + * R1346 (0x542) - AIF2 DRC (3) + */ +#define WM8994_AIF2DRC_NG_MINGAIN_MASK 0xF000 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8994_AIF2DRC_NG_MINGAIN_SHIFT 12 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8994_AIF2DRC_NG_MINGAIN_WIDTH 4 /* AIF2DRC_NG_MINGAIN - [15:12] */ +#define WM8994_AIF2DRC_NG_EXP_MASK 0x0C00 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8994_AIF2DRC_NG_EXP_SHIFT 10 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8994_AIF2DRC_NG_EXP_WIDTH 2 /* AIF2DRC_NG_EXP - [11:10] */ +#define WM8994_AIF2DRC_QR_THR_MASK 0x0300 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8994_AIF2DRC_QR_THR_SHIFT 8 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8994_AIF2DRC_QR_THR_WIDTH 2 /* AIF2DRC_QR_THR - [9:8] */ +#define WM8994_AIF2DRC_QR_DCY_MASK 0x00C0 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8994_AIF2DRC_QR_DCY_SHIFT 6 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8994_AIF2DRC_QR_DCY_WIDTH 2 /* AIF2DRC_QR_DCY - [7:6] */ +#define WM8994_AIF2DRC_HI_COMP_MASK 0x0038 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8994_AIF2DRC_HI_COMP_SHIFT 3 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8994_AIF2DRC_HI_COMP_WIDTH 3 /* AIF2DRC_HI_COMP - [5:3] */ +#define WM8994_AIF2DRC_LO_COMP_MASK 0x0007 /* AIF2DRC_LO_COMP - [2:0] */ +#define WM8994_AIF2DRC_LO_COMP_SHIFT 0 /* AIF2DRC_LO_COMP - [2:0] */ +#define WM8994_AIF2DRC_LO_COMP_WIDTH 3 /* AIF2DRC_LO_COMP - [2:0] */ + +/* + * R1347 (0x543) - AIF2 DRC (4) + */ +#define WM8994_AIF2DRC_KNEE_IP_MASK 0x07E0 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8994_AIF2DRC_KNEE_IP_SHIFT 5 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8994_AIF2DRC_KNEE_IP_WIDTH 6 /* AIF2DRC_KNEE_IP - [10:5] */ +#define WM8994_AIF2DRC_KNEE_OP_MASK 0x001F /* AIF2DRC_KNEE_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE_OP_SHIFT 0 /* AIF2DRC_KNEE_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE_OP_WIDTH 5 /* AIF2DRC_KNEE_OP - [4:0] */ + +/* + * R1348 (0x544) - AIF2 DRC (5) + */ +#define WM8994_AIF2DRC_KNEE2_IP_MASK 0x03E0 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8994_AIF2DRC_KNEE2_IP_SHIFT 5 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8994_AIF2DRC_KNEE2_IP_WIDTH 5 /* AIF2DRC_KNEE2_IP - [9:5] */ +#define WM8994_AIF2DRC_KNEE2_OP_MASK 0x001F /* AIF2DRC_KNEE2_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE2_OP_SHIFT 0 /* AIF2DRC_KNEE2_OP - [4:0] */ +#define WM8994_AIF2DRC_KNEE2_OP_WIDTH 5 /* AIF2DRC_KNEE2_OP - [4:0] */ + +/* + * R1408 (0x580) - AIF2 EQ Gains (1) + */ +#define WM8994_AIF2DAC_EQ_B1_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B1_GAIN_SHIFT 11 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B1_GAIN_WIDTH 5 /* AIF2DAC_EQ_B1_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B2_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B2_GAIN_SHIFT 6 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B2_GAIN_WIDTH 5 /* AIF2DAC_EQ_B2_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B3_GAIN_MASK 0x003E /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF2DAC_EQ_B3_GAIN_SHIFT 1 /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF2DAC_EQ_B3_GAIN_WIDTH 5 /* AIF2DAC_EQ_B3_GAIN - [5:1] */ +#define WM8994_AIF2DAC_EQ_ENA 0x0001 /* AIF2DAC_EQ_ENA */ +#define WM8994_AIF2DAC_EQ_ENA_MASK 0x0001 /* AIF2DAC_EQ_ENA */ +#define WM8994_AIF2DAC_EQ_ENA_SHIFT 0 /* AIF2DAC_EQ_ENA */ +#define WM8994_AIF2DAC_EQ_ENA_WIDTH 1 /* AIF2DAC_EQ_ENA */ + +/* + * R1409 (0x581) - AIF2 EQ Gains (2) + */ +#define WM8994_AIF2DAC_EQ_B4_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B4_GAIN_SHIFT 11 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B4_GAIN_WIDTH 5 /* AIF2DAC_EQ_B4_GAIN - [15:11] */ +#define WM8994_AIF2DAC_EQ_B5_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B5_GAIN_SHIFT 6 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ +#define WM8994_AIF2DAC_EQ_B5_GAIN_WIDTH 5 /* AIF2DAC_EQ_B5_GAIN - [10:6] */ + +/* + * R1410 (0x582) - AIF2 EQ Band 1 A + */ +#define WM8994_AIF2DAC_EQ_B1_A_MASK 0xFFFF /* AIF2DAC_EQ_B1_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_A_SHIFT 0 /* AIF2DAC_EQ_B1_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_A_WIDTH 16 /* AIF2DAC_EQ_B1_A - [15:0] */ + +/* + * R1411 (0x583) - AIF2 EQ Band 1 B + */ +#define WM8994_AIF2DAC_EQ_B1_B_MASK 0xFFFF /* AIF2DAC_EQ_B1_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_B_SHIFT 0 /* AIF2DAC_EQ_B1_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_B_WIDTH 16 /* AIF2DAC_EQ_B1_B - [15:0] */ + +/* + * R1412 (0x584) - AIF2 EQ Band 1 PG + */ +#define WM8994_AIF2DAC_EQ_B1_PG_MASK 0xFFFF /* AIF2DAC_EQ_B1_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_PG_SHIFT 0 /* AIF2DAC_EQ_B1_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B1_PG_WIDTH 16 /* AIF2DAC_EQ_B1_PG - [15:0] */ + +/* + * R1413 (0x585) - AIF2 EQ Band 2 A + */ +#define WM8994_AIF2DAC_EQ_B2_A_MASK 0xFFFF /* AIF2DAC_EQ_B2_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_A_SHIFT 0 /* AIF2DAC_EQ_B2_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_A_WIDTH 16 /* AIF2DAC_EQ_B2_A - [15:0] */ + +/* + * R1414 (0x586) - AIF2 EQ Band 2 B + */ +#define WM8994_AIF2DAC_EQ_B2_B_MASK 0xFFFF /* AIF2DAC_EQ_B2_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_B_SHIFT 0 /* AIF2DAC_EQ_B2_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_B_WIDTH 16 /* AIF2DAC_EQ_B2_B - [15:0] */ + +/* + * R1415 (0x587) - AIF2 EQ Band 2 C + */ +#define WM8994_AIF2DAC_EQ_B2_C_MASK 0xFFFF /* AIF2DAC_EQ_B2_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_C_SHIFT 0 /* AIF2DAC_EQ_B2_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_C_WIDTH 16 /* AIF2DAC_EQ_B2_C - [15:0] */ + +/* + * R1416 (0x588) - AIF2 EQ Band 2 PG + */ +#define WM8994_AIF2DAC_EQ_B2_PG_MASK 0xFFFF /* AIF2DAC_EQ_B2_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_PG_SHIFT 0 /* AIF2DAC_EQ_B2_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B2_PG_WIDTH 16 /* AIF2DAC_EQ_B2_PG - [15:0] */ + +/* + * R1417 (0x589) - AIF2 EQ Band 3 A + */ +#define WM8994_AIF2DAC_EQ_B3_A_MASK 0xFFFF /* AIF2DAC_EQ_B3_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_A_SHIFT 0 /* AIF2DAC_EQ_B3_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_A_WIDTH 16 /* AIF2DAC_EQ_B3_A - [15:0] */ + +/* + * R1418 (0x58A) - AIF2 EQ Band 3 B + */ +#define WM8994_AIF2DAC_EQ_B3_B_MASK 0xFFFF /* AIF2DAC_EQ_B3_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_B_SHIFT 0 /* AIF2DAC_EQ_B3_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_B_WIDTH 16 /* AIF2DAC_EQ_B3_B - [15:0] */ + +/* + * R1419 (0x58B) - AIF2 EQ Band 3 C + */ +#define WM8994_AIF2DAC_EQ_B3_C_MASK 0xFFFF /* AIF2DAC_EQ_B3_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_C_SHIFT 0 /* AIF2DAC_EQ_B3_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_C_WIDTH 16 /* AIF2DAC_EQ_B3_C - [15:0] */ + +/* + * R1420 (0x58C) - AIF2 EQ Band 3 PG + */ +#define WM8994_AIF2DAC_EQ_B3_PG_MASK 0xFFFF /* AIF2DAC_EQ_B3_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_PG_SHIFT 0 /* AIF2DAC_EQ_B3_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B3_PG_WIDTH 16 /* AIF2DAC_EQ_B3_PG - [15:0] */ + +/* + * R1421 (0x58D) - AIF2 EQ Band 4 A + */ +#define WM8994_AIF2DAC_EQ_B4_A_MASK 0xFFFF /* AIF2DAC_EQ_B4_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_A_SHIFT 0 /* AIF2DAC_EQ_B4_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_A_WIDTH 16 /* AIF2DAC_EQ_B4_A - [15:0] */ + +/* + * R1422 (0x58E) - AIF2 EQ Band 4 B + */ +#define WM8994_AIF2DAC_EQ_B4_B_MASK 0xFFFF /* AIF2DAC_EQ_B4_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_B_SHIFT 0 /* AIF2DAC_EQ_B4_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_B_WIDTH 16 /* AIF2DAC_EQ_B4_B - [15:0] */ + +/* + * R1423 (0x58F) - AIF2 EQ Band 4 C + */ +#define WM8994_AIF2DAC_EQ_B4_C_MASK 0xFFFF /* AIF2DAC_EQ_B4_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_C_SHIFT 0 /* AIF2DAC_EQ_B4_C - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_C_WIDTH 16 /* AIF2DAC_EQ_B4_C - [15:0] */ + +/* + * R1424 (0x590) - AIF2 EQ Band 4 PG + */ +#define WM8994_AIF2DAC_EQ_B4_PG_MASK 0xFFFF /* AIF2DAC_EQ_B4_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_PG_SHIFT 0 /* AIF2DAC_EQ_B4_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B4_PG_WIDTH 16 /* AIF2DAC_EQ_B4_PG - [15:0] */ + +/* + * R1425 (0x591) - AIF2 EQ Band 5 A + */ +#define WM8994_AIF2DAC_EQ_B5_A_MASK 0xFFFF /* AIF2DAC_EQ_B5_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_A_SHIFT 0 /* AIF2DAC_EQ_B5_A - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_A_WIDTH 16 /* AIF2DAC_EQ_B5_A - [15:0] */ + +/* + * R1426 (0x592) - AIF2 EQ Band 5 B + */ +#define WM8994_AIF2DAC_EQ_B5_B_MASK 0xFFFF /* AIF2DAC_EQ_B5_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_B_SHIFT 0 /* AIF2DAC_EQ_B5_B - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_B_WIDTH 16 /* AIF2DAC_EQ_B5_B - [15:0] */ + +/* + * R1427 (0x593) - AIF2 EQ Band 5 PG + */ +#define WM8994_AIF2DAC_EQ_B5_PG_MASK 0xFFFF /* AIF2DAC_EQ_B5_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_PG_SHIFT 0 /* AIF2DAC_EQ_B5_PG - [15:0] */ +#define WM8994_AIF2DAC_EQ_B5_PG_WIDTH 16 /* AIF2DAC_EQ_B5_PG - [15:0] */ + +/* + * R1536 (0x600) - DAC1 Mixer Volumes + */ +#define WM8994_ADCR_DAC1_VOL_MASK 0x01E0 /* ADCR_DAC1_VOL - [8:5] */ +#define WM8994_ADCR_DAC1_VOL_SHIFT 5 /* ADCR_DAC1_VOL - [8:5] */ +#define WM8994_ADCR_DAC1_VOL_WIDTH 4 /* ADCR_DAC1_VOL - [8:5] */ +#define WM8994_ADCL_DAC1_VOL_MASK 0x000F /* ADCL_DAC1_VOL - [3:0] */ +#define WM8994_ADCL_DAC1_VOL_SHIFT 0 /* ADCL_DAC1_VOL - [3:0] */ +#define WM8994_ADCL_DAC1_VOL_WIDTH 4 /* ADCL_DAC1_VOL - [3:0] */ + +/* + * R1537 (0x601) - DAC1 Left Mixer Routing + */ +#define WM8994_ADCR_TO_DAC1L 0x0020 /* ADCR_TO_DAC1L */ +#define WM8994_ADCR_TO_DAC1L_MASK 0x0020 /* ADCR_TO_DAC1L */ +#define WM8994_ADCR_TO_DAC1L_SHIFT 5 /* ADCR_TO_DAC1L */ +#define WM8994_ADCR_TO_DAC1L_WIDTH 1 /* ADCR_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L 0x0010 /* ADCL_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L_MASK 0x0010 /* ADCL_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L_SHIFT 4 /* ADCL_TO_DAC1L */ +#define WM8994_ADCL_TO_DAC1L_WIDTH 1 /* ADCL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L 0x0004 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L_MASK 0x0004 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L_SHIFT 2 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF2DACL_TO_DAC1L_WIDTH 1 /* AIF2DACL_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L 0x0002 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L_MASK 0x0002 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L_SHIFT 1 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC2L_TO_DAC1L_WIDTH 1 /* AIF1DAC2L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L 0x0001 /* AIF1DAC1L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L_MASK 0x0001 /* AIF1DAC1L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L_SHIFT 0 /* AIF1DAC1L_TO_DAC1L */ +#define WM8994_AIF1DAC1L_TO_DAC1L_WIDTH 1 /* AIF1DAC1L_TO_DAC1L */ + +/* + * R1538 (0x602) - DAC1 Right Mixer Routing + */ +#define WM8994_ADCR_TO_DAC1R 0x0020 /* ADCR_TO_DAC1R */ +#define WM8994_ADCR_TO_DAC1R_MASK 0x0020 /* ADCR_TO_DAC1R */ +#define WM8994_ADCR_TO_DAC1R_SHIFT 5 /* ADCR_TO_DAC1R */ +#define WM8994_ADCR_TO_DAC1R_WIDTH 1 /* ADCR_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R 0x0010 /* ADCL_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R_MASK 0x0010 /* ADCL_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R_SHIFT 4 /* ADCL_TO_DAC1R */ +#define WM8994_ADCL_TO_DAC1R_WIDTH 1 /* ADCL_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R 0x0004 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R_MASK 0x0004 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R_SHIFT 2 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF2DACR_TO_DAC1R_WIDTH 1 /* AIF2DACR_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R 0x0002 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R_MASK 0x0002 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R_SHIFT 1 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC2R_TO_DAC1R_WIDTH 1 /* AIF1DAC2R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R 0x0001 /* AIF1DAC1R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R_MASK 0x0001 /* AIF1DAC1R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R_SHIFT 0 /* AIF1DAC1R_TO_DAC1R */ +#define WM8994_AIF1DAC1R_TO_DAC1R_WIDTH 1 /* AIF1DAC1R_TO_DAC1R */ + +/* + * R1539 (0x603) - DAC2 Mixer Volumes + */ +#define WM8994_ADCR_DAC2_VOL_MASK 0x01E0 /* ADCR_DAC2_VOL - [8:5] */ +#define WM8994_ADCR_DAC2_VOL_SHIFT 5 /* ADCR_DAC2_VOL - [8:5] */ +#define WM8994_ADCR_DAC2_VOL_WIDTH 4 /* ADCR_DAC2_VOL - [8:5] */ +#define WM8994_ADCL_DAC2_VOL_MASK 0x000F /* ADCL_DAC2_VOL - [3:0] */ +#define WM8994_ADCL_DAC2_VOL_SHIFT 0 /* ADCL_DAC2_VOL - [3:0] */ +#define WM8994_ADCL_DAC2_VOL_WIDTH 4 /* ADCL_DAC2_VOL - [3:0] */ + +/* + * R1540 (0x604) - DAC2 Left Mixer Routing + */ +#define WM8994_ADCR_TO_DAC2L 0x0020 /* ADCR_TO_DAC2L */ +#define WM8994_ADCR_TO_DAC2L_MASK 0x0020 /* ADCR_TO_DAC2L */ +#define WM8994_ADCR_TO_DAC2L_SHIFT 5 /* ADCR_TO_DAC2L */ +#define WM8994_ADCR_TO_DAC2L_WIDTH 1 /* ADCR_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L 0x0010 /* ADCL_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L_MASK 0x0010 /* ADCL_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L_SHIFT 4 /* ADCL_TO_DAC2L */ +#define WM8994_ADCL_TO_DAC2L_WIDTH 1 /* ADCL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L 0x0004 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L_MASK 0x0004 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L_SHIFT 2 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF2DACL_TO_DAC2L_WIDTH 1 /* AIF2DACL_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L 0x0002 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L_MASK 0x0002 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L_SHIFT 1 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC2L_TO_DAC2L_WIDTH 1 /* AIF1DAC2L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L 0x0001 /* AIF1DAC1L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L_MASK 0x0001 /* AIF1DAC1L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L_SHIFT 0 /* AIF1DAC1L_TO_DAC2L */ +#define WM8994_AIF1DAC1L_TO_DAC2L_WIDTH 1 /* AIF1DAC1L_TO_DAC2L */ + +/* + * R1541 (0x605) - DAC2 Right Mixer Routing + */ +#define WM8994_ADCR_TO_DAC2R 0x0020 /* ADCR_TO_DAC2R */ +#define WM8994_ADCR_TO_DAC2R_MASK 0x0020 /* ADCR_TO_DAC2R */ +#define WM8994_ADCR_TO_DAC2R_SHIFT 5 /* ADCR_TO_DAC2R */ +#define WM8994_ADCR_TO_DAC2R_WIDTH 1 /* ADCR_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R 0x0010 /* ADCL_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R_MASK 0x0010 /* ADCL_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R_SHIFT 4 /* ADCL_TO_DAC2R */ +#define WM8994_ADCL_TO_DAC2R_WIDTH 1 /* ADCL_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R 0x0004 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R_MASK 0x0004 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R_SHIFT 2 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF2DACR_TO_DAC2R_WIDTH 1 /* AIF2DACR_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R 0x0002 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R_MASK 0x0002 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R_SHIFT 1 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC2R_TO_DAC2R_WIDTH 1 /* AIF1DAC2R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R 0x0001 /* AIF1DAC1R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R_MASK 0x0001 /* AIF1DAC1R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R_SHIFT 0 /* AIF1DAC1R_TO_DAC2R */ +#define WM8994_AIF1DAC1R_TO_DAC2R_WIDTH 1 /* AIF1DAC1R_TO_DAC2R */ + +/* + * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing + */ +#define WM8994_ADC1L_TO_AIF1ADC1L 0x0002 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_ADC1L_TO_AIF1ADC1L_MASK 0x0002 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_ADC1L_TO_AIF1ADC1L_SHIFT 1 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_ADC1L_TO_AIF1ADC1L_WIDTH 1 /* ADC1L_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L 0x0001 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC1L */ +#define WM8994_AIF2DACL_TO_AIF1ADC1L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC1L */ + +/* + * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing + */ +#define WM8994_ADC1R_TO_AIF1ADC1R 0x0002 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_ADC1R_TO_AIF1ADC1R_MASK 0x0002 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_ADC1R_TO_AIF1ADC1R_SHIFT 1 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_ADC1R_TO_AIF1ADC1R_WIDTH 1 /* ADC1R_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R 0x0001 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC1R */ +#define WM8994_AIF2DACR_TO_AIF1ADC1R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC1R */ + +/* + * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing + */ +#define WM8994_ADC2L_TO_AIF1ADC2L 0x0002 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_ADC2L_TO_AIF1ADC2L_MASK 0x0002 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_ADC2L_TO_AIF1ADC2L_SHIFT 1 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_ADC2L_TO_AIF1ADC2L_WIDTH 1 /* ADC2L_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L 0x0001 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC2L */ +#define WM8994_AIF2DACL_TO_AIF1ADC2L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC2L */ + +/* + * R1545 (0x609) - AIF1 ADC2 Right mixer Routing + */ +#define WM8994_ADC2R_TO_AIF1ADC2R 0x0002 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_ADC2R_TO_AIF1ADC2R_MASK 0x0002 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_ADC2R_TO_AIF1ADC2R_SHIFT 1 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_ADC2R_TO_AIF1ADC2R_WIDTH 1 /* ADC2R_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R 0x0001 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC2R */ +#define WM8994_AIF2DACR_TO_AIF1ADC2R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC2R */ + +/* + * R1552 (0x610) - DAC1 Left Volume + */ +#define WM8994_DAC1L_MUTE 0x0200 /* DAC1L_MUTE */ +#define WM8994_DAC1L_MUTE_MASK 0x0200 /* DAC1L_MUTE */ +#define WM8994_DAC1L_MUTE_SHIFT 9 /* DAC1L_MUTE */ +#define WM8994_DAC1L_MUTE_WIDTH 1 /* DAC1L_MUTE */ +#define WM8994_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8994_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8994_DAC1L_VOL_MASK 0x00FF /* DAC1L_VOL - [7:0] */ +#define WM8994_DAC1L_VOL_SHIFT 0 /* DAC1L_VOL - [7:0] */ +#define WM8994_DAC1L_VOL_WIDTH 8 /* DAC1L_VOL - [7:0] */ + +/* + * R1553 (0x611) - DAC1 Right Volume + */ +#define WM8994_DAC1R_MUTE 0x0200 /* DAC1R_MUTE */ +#define WM8994_DAC1R_MUTE_MASK 0x0200 /* DAC1R_MUTE */ +#define WM8994_DAC1R_MUTE_SHIFT 9 /* DAC1R_MUTE */ +#define WM8994_DAC1R_MUTE_WIDTH 1 /* DAC1R_MUTE */ +#define WM8994_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8994_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8994_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8994_DAC1R_VOL_MASK 0x00FF /* DAC1R_VOL - [7:0] */ +#define WM8994_DAC1R_VOL_SHIFT 0 /* DAC1R_VOL - [7:0] */ +#define WM8994_DAC1R_VOL_WIDTH 8 /* DAC1R_VOL - [7:0] */ + +/* + * R1554 (0x612) - DAC2 Left Volume + */ +#define WM8994_DAC2L_MUTE 0x0200 /* DAC2L_MUTE */ +#define WM8994_DAC2L_MUTE_MASK 0x0200 /* DAC2L_MUTE */ +#define WM8994_DAC2L_MUTE_SHIFT 9 /* DAC2L_MUTE */ +#define WM8994_DAC2L_MUTE_WIDTH 1 /* DAC2L_MUTE */ +#define WM8994_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8994_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8994_DAC2L_VOL_MASK 0x00FF /* DAC2L_VOL - [7:0] */ +#define WM8994_DAC2L_VOL_SHIFT 0 /* DAC2L_VOL - [7:0] */ +#define WM8994_DAC2L_VOL_WIDTH 8 /* DAC2L_VOL - [7:0] */ + +/* + * R1555 (0x613) - DAC2 Right Volume + */ +#define WM8994_DAC2R_MUTE 0x0200 /* DAC2R_MUTE */ +#define WM8994_DAC2R_MUTE_MASK 0x0200 /* DAC2R_MUTE */ +#define WM8994_DAC2R_MUTE_SHIFT 9 /* DAC2R_MUTE */ +#define WM8994_DAC2R_MUTE_WIDTH 1 /* DAC2R_MUTE */ +#define WM8994_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8994_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8994_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8994_DAC2R_VOL_MASK 0x00FF /* DAC2R_VOL - [7:0] */ +#define WM8994_DAC2R_VOL_SHIFT 0 /* DAC2R_VOL - [7:0] */ +#define WM8994_DAC2R_VOL_WIDTH 8 /* DAC2R_VOL - [7:0] */ + +/* + * R1556 (0x614) - DAC Softmute + */ +#define WM8994_DAC_SOFTMUTEMODE 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_SOFTMUTEMODE_MASK 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_SOFTMUTEMODE_SHIFT 1 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_SOFTMUTEMODE_WIDTH 1 /* DAC_SOFTMUTEMODE */ +#define WM8994_DAC_MUTERATE 0x0001 /* DAC_MUTERATE */ +#define WM8994_DAC_MUTERATE_MASK 0x0001 /* DAC_MUTERATE */ +#define WM8994_DAC_MUTERATE_SHIFT 0 /* DAC_MUTERATE */ +#define WM8994_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ + +/* + * R1568 (0x620) - Oversampling + */ +#define WM8994_ADC_OSR128 0x0002 /* ADC_OSR128 */ +#define WM8994_ADC_OSR128_MASK 0x0002 /* ADC_OSR128 */ +#define WM8994_ADC_OSR128_SHIFT 1 /* ADC_OSR128 */ +#define WM8994_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */ +#define WM8994_DAC_OSR128 0x0001 /* DAC_OSR128 */ +#define WM8994_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */ +#define WM8994_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */ +#define WM8994_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */ + +/* + * R1569 (0x621) - Sidetone + */ +#define WM8994_ST_HPF_CUT_MASK 0x0380 /* ST_HPF_CUT - [9:7] */ +#define WM8994_ST_HPF_CUT_SHIFT 7 /* ST_HPF_CUT - [9:7] */ +#define WM8994_ST_HPF_CUT_WIDTH 3 /* ST_HPF_CUT - [9:7] */ +#define WM8994_ST_HPF 0x0040 /* ST_HPF */ +#define WM8994_ST_HPF_MASK 0x0040 /* ST_HPF */ +#define WM8994_ST_HPF_SHIFT 6 /* ST_HPF */ +#define WM8994_ST_HPF_WIDTH 1 /* ST_HPF */ +#define WM8994_STR_SEL 0x0002 /* STR_SEL */ +#define WM8994_STR_SEL_MASK 0x0002 /* STR_SEL */ +#define WM8994_STR_SEL_SHIFT 1 /* STR_SEL */ +#define WM8994_STR_SEL_WIDTH 1 /* STR_SEL */ +#define WM8994_STL_SEL 0x0001 /* STL_SEL */ +#define WM8994_STL_SEL_MASK 0x0001 /* STL_SEL */ +#define WM8994_STL_SEL_SHIFT 0 /* STL_SEL */ +#define WM8994_STL_SEL_WIDTH 1 /* STL_SEL */ + +/* + * R1797 (0x705) - JACKDET Ctrl + */ +#define WM1811_JACKDET_DB 0x0100 /* JACKDET_DB */ +#define WM1811_JACKDET_DB_MASK 0x0100 /* JACKDET_DB */ +#define WM1811_JACKDET_DB_SHIFT 8 /* JACKDET_DB */ +#define WM1811_JACKDET_DB_WIDTH 1 /* JACKDET_DB */ +#define WM1811_JACKDET_LVL 0x0040 /* JACKDET_LVL */ +#define WM1811_JACKDET_LVL_MASK 0x0040 /* JACKDET_LVL */ +#define WM1811_JACKDET_LVL_SHIFT 6 /* JACKDET_LVL */ +#define WM1811_JACKDET_LVL_WIDTH 1 /* JACKDET_LVL */ + +/* + * R1824 (0x720) - Pull Control (1) + */ +#define WM8994_DMICDAT2_PU 0x0800 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PU_MASK 0x0800 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PU_SHIFT 11 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PU_WIDTH 1 /* DMICDAT2_PU */ +#define WM8994_DMICDAT2_PD 0x0400 /* DMICDAT2_PD */ +#define WM8994_DMICDAT2_PD_MASK 0x0400 /* DMICDAT2_PD */ +#define WM8994_DMICDAT2_PD_SHIFT 10 /* DMICDAT2_PD */ +#define WM8994_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */ +#define WM8994_DMICDAT1_PU 0x0200 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PU_MASK 0x0200 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PU_SHIFT 9 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PU_WIDTH 1 /* DMICDAT1_PU */ +#define WM8994_DMICDAT1_PD 0x0100 /* DMICDAT1_PD */ +#define WM8994_DMICDAT1_PD_MASK 0x0100 /* DMICDAT1_PD */ +#define WM8994_DMICDAT1_PD_SHIFT 8 /* DMICDAT1_PD */ +#define WM8994_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */ +#define WM8994_MCLK1_PU 0x0080 /* MCLK1_PU */ +#define WM8994_MCLK1_PU_MASK 0x0080 /* MCLK1_PU */ +#define WM8994_MCLK1_PU_SHIFT 7 /* MCLK1_PU */ +#define WM8994_MCLK1_PU_WIDTH 1 /* MCLK1_PU */ +#define WM8994_MCLK1_PD 0x0040 /* MCLK1_PD */ +#define WM8994_MCLK1_PD_MASK 0x0040 /* MCLK1_PD */ +#define WM8994_MCLK1_PD_SHIFT 6 /* MCLK1_PD */ +#define WM8994_MCLK1_PD_WIDTH 1 /* MCLK1_PD */ +#define WM8994_DACDAT1_PU 0x0020 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PU_MASK 0x0020 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PU_SHIFT 5 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PU_WIDTH 1 /* DACDAT1_PU */ +#define WM8994_DACDAT1_PD 0x0010 /* DACDAT1_PD */ +#define WM8994_DACDAT1_PD_MASK 0x0010 /* DACDAT1_PD */ +#define WM8994_DACDAT1_PD_SHIFT 4 /* DACDAT1_PD */ +#define WM8994_DACDAT1_PD_WIDTH 1 /* DACDAT1_PD */ +#define WM8994_DACLRCLK1_PU 0x0008 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PU_MASK 0x0008 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PU_SHIFT 3 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PU_WIDTH 1 /* DACLRCLK1_PU */ +#define WM8994_DACLRCLK1_PD 0x0004 /* DACLRCLK1_PD */ +#define WM8994_DACLRCLK1_PD_MASK 0x0004 /* DACLRCLK1_PD */ +#define WM8994_DACLRCLK1_PD_SHIFT 2 /* DACLRCLK1_PD */ +#define WM8994_DACLRCLK1_PD_WIDTH 1 /* DACLRCLK1_PD */ +#define WM8994_BCLK1_PU 0x0002 /* BCLK1_PU */ +#define WM8994_BCLK1_PU_MASK 0x0002 /* BCLK1_PU */ +#define WM8994_BCLK1_PU_SHIFT 1 /* BCLK1_PU */ +#define WM8994_BCLK1_PU_WIDTH 1 /* BCLK1_PU */ +#define WM8994_BCLK1_PD 0x0001 /* BCLK1_PD */ +#define WM8994_BCLK1_PD_MASK 0x0001 /* BCLK1_PD */ +#define WM8994_BCLK1_PD_SHIFT 0 /* BCLK1_PD */ +#define WM8994_BCLK1_PD_WIDTH 1 /* BCLK1_PD */ + +/* + * R1825 (0x721) - Pull Control (2) + */ +#define WM8994_CSNADDR_PD 0x0100 /* CSNADDR_PD */ +#define WM8994_CSNADDR_PD_MASK 0x0100 /* CSNADDR_PD */ +#define WM8994_CSNADDR_PD_SHIFT 8 /* CSNADDR_PD */ +#define WM8994_CSNADDR_PD_WIDTH 1 /* CSNADDR_PD */ +#define WM8994_LDO2ENA_PD 0x0040 /* LDO2ENA_PD */ +#define WM8994_LDO2ENA_PD_MASK 0x0040 /* LDO2ENA_PD */ +#define WM8994_LDO2ENA_PD_SHIFT 6 /* LDO2ENA_PD */ +#define WM8994_LDO2ENA_PD_WIDTH 1 /* LDO2ENA_PD */ +#define WM8994_LDO1ENA_PD 0x0010 /* LDO1ENA_PD */ +#define WM8994_LDO1ENA_PD_MASK 0x0010 /* LDO1ENA_PD */ +#define WM8994_LDO1ENA_PD_SHIFT 4 /* LDO1ENA_PD */ +#define WM8994_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */ +#define WM8994_CIFMODE_PD 0x0004 /* CIFMODE_PD */ +#define WM8994_CIFMODE_PD_MASK 0x0004 /* CIFMODE_PD */ +#define WM8994_CIFMODE_PD_SHIFT 2 /* CIFMODE_PD */ +#define WM8994_CIFMODE_PD_WIDTH 1 /* CIFMODE_PD */ +#define WM8994_SPKMODE_PU 0x0002 /* SPKMODE_PU */ +#define WM8994_SPKMODE_PU_MASK 0x0002 /* SPKMODE_PU */ +#define WM8994_SPKMODE_PU_SHIFT 1 /* SPKMODE_PU */ +#define WM8994_SPKMODE_PU_WIDTH 1 /* SPKMODE_PU */ + +/* + * R1840 (0x730) - Interrupt Status 1 + */ +#define WM8994_GP11_EINT 0x0400 /* GP11_EINT */ +#define WM8994_GP11_EINT_MASK 0x0400 /* GP11_EINT */ +#define WM8994_GP11_EINT_SHIFT 10 /* GP11_EINT */ +#define WM8994_GP11_EINT_WIDTH 1 /* GP11_EINT */ +#define WM8994_GP10_EINT 0x0200 /* GP10_EINT */ +#define WM8994_GP10_EINT_MASK 0x0200 /* GP10_EINT */ +#define WM8994_GP10_EINT_SHIFT 9 /* GP10_EINT */ +#define WM8994_GP10_EINT_WIDTH 1 /* GP10_EINT */ +#define WM8994_GP9_EINT 0x0100 /* GP9_EINT */ +#define WM8994_GP9_EINT_MASK 0x0100 /* GP9_EINT */ +#define WM8994_GP9_EINT_SHIFT 8 /* GP9_EINT */ +#define WM8994_GP9_EINT_WIDTH 1 /* GP9_EINT */ +#define WM8994_GP8_EINT 0x0080 /* GP8_EINT */ +#define WM8994_GP8_EINT_MASK 0x0080 /* GP8_EINT */ +#define WM8994_GP8_EINT_SHIFT 7 /* GP8_EINT */ +#define WM8994_GP8_EINT_WIDTH 1 /* GP8_EINT */ +#define WM8994_GP7_EINT 0x0040 /* GP7_EINT */ +#define WM8994_GP7_EINT_MASK 0x0040 /* GP7_EINT */ +#define WM8994_GP7_EINT_SHIFT 6 /* GP7_EINT */ +#define WM8994_GP7_EINT_WIDTH 1 /* GP7_EINT */ +#define WM8994_GP6_EINT 0x0020 /* GP6_EINT */ +#define WM8994_GP6_EINT_MASK 0x0020 /* GP6_EINT */ +#define WM8994_GP6_EINT_SHIFT 5 /* GP6_EINT */ +#define WM8994_GP6_EINT_WIDTH 1 /* GP6_EINT */ +#define WM8994_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM8994_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM8994_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM8994_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM8994_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM8994_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM8994_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM8994_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM8994_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM8994_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM8994_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM8994_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM8994_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM8994_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM8994_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM8994_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM8994_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM8994_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM8994_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM8994_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R1841 (0x731) - Interrupt Status 2 + */ +#define WM8994_TEMP_WARN_EINT 0x8000 /* TEMP_WARN_EINT */ +#define WM8994_TEMP_WARN_EINT_MASK 0x8000 /* TEMP_WARN_EINT */ +#define WM8994_TEMP_WARN_EINT_SHIFT 15 /* TEMP_WARN_EINT */ +#define WM8994_TEMP_WARN_EINT_WIDTH 1 /* TEMP_WARN_EINT */ +#define WM8994_DCS_DONE_EINT 0x4000 /* DCS_DONE_EINT */ +#define WM8994_DCS_DONE_EINT_MASK 0x4000 /* DCS_DONE_EINT */ +#define WM8994_DCS_DONE_EINT_SHIFT 14 /* DCS_DONE_EINT */ +#define WM8994_DCS_DONE_EINT_WIDTH 1 /* DCS_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT 0x2000 /* WSEQ_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT_MASK 0x2000 /* WSEQ_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT_SHIFT 13 /* WSEQ_DONE_EINT */ +#define WM8994_WSEQ_DONE_EINT_WIDTH 1 /* WSEQ_DONE_EINT */ +#define WM8994_FIFOS_ERR_EINT 0x1000 /* FIFOS_ERR_EINT */ +#define WM8994_FIFOS_ERR_EINT_MASK 0x1000 /* FIFOS_ERR_EINT */ +#define WM8994_FIFOS_ERR_EINT_SHIFT 12 /* FIFOS_ERR_EINT */ +#define WM8994_FIFOS_ERR_EINT_WIDTH 1 /* FIFOS_ERR_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT 0x0800 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT_MASK 0x0800 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT_SHIFT 11 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* AIF2DRC_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT 0x0400 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT_MASK 0x0400 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT_SHIFT 10 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* AIF1DRC2_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT 0x0200 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT_MASK 0x0200 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT_SHIFT 9 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* AIF1DRC1_SIG_DET_EINT */ +#define WM8994_SRC2_LOCK_EINT 0x0100 /* SRC2_LOCK_EINT */ +#define WM8994_SRC2_LOCK_EINT_MASK 0x0100 /* SRC2_LOCK_EINT */ +#define WM8994_SRC2_LOCK_EINT_SHIFT 8 /* SRC2_LOCK_EINT */ +#define WM8994_SRC2_LOCK_EINT_WIDTH 1 /* SRC2_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT 0x0080 /* SRC1_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT_MASK 0x0080 /* SRC1_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT_SHIFT 7 /* SRC1_LOCK_EINT */ +#define WM8994_SRC1_LOCK_EINT_WIDTH 1 /* SRC1_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT 0x0040 /* FLL2_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT_MASK 0x0040 /* FLL2_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT_SHIFT 6 /* FLL2_LOCK_EINT */ +#define WM8994_FLL2_LOCK_EINT_WIDTH 1 /* FLL2_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT 0x0020 /* FLL1_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT_MASK 0x0020 /* FLL1_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT_SHIFT 5 /* FLL1_LOCK_EINT */ +#define WM8994_FLL1_LOCK_EINT_WIDTH 1 /* FLL1_LOCK_EINT */ +#define WM8994_MIC2_SHRT_EINT 0x0010 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_SHRT_EINT_MASK 0x0010 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_SHRT_EINT_SHIFT 4 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_SHRT_EINT_WIDTH 1 /* MIC2_SHRT_EINT */ +#define WM8994_MIC2_DET_EINT 0x0008 /* MIC2_DET_EINT */ +#define WM8994_MIC2_DET_EINT_MASK 0x0008 /* MIC2_DET_EINT */ +#define WM8994_MIC2_DET_EINT_SHIFT 3 /* MIC2_DET_EINT */ +#define WM8994_MIC2_DET_EINT_WIDTH 1 /* MIC2_DET_EINT */ +#define WM8994_MIC1_SHRT_EINT 0x0004 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_SHRT_EINT_MASK 0x0004 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_SHRT_EINT_SHIFT 2 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_SHRT_EINT_WIDTH 1 /* MIC1_SHRT_EINT */ +#define WM8994_MIC1_DET_EINT 0x0002 /* MIC1_DET_EINT */ +#define WM8994_MIC1_DET_EINT_MASK 0x0002 /* MIC1_DET_EINT */ +#define WM8994_MIC1_DET_EINT_SHIFT 1 /* MIC1_DET_EINT */ +#define WM8994_MIC1_DET_EINT_WIDTH 1 /* MIC1_DET_EINT */ +#define WM8994_TEMP_SHUT_EINT 0x0001 /* TEMP_SHUT_EINT */ +#define WM8994_TEMP_SHUT_EINT_MASK 0x0001 /* TEMP_SHUT_EINT */ +#define WM8994_TEMP_SHUT_EINT_SHIFT 0 /* TEMP_SHUT_EINT */ +#define WM8994_TEMP_SHUT_EINT_WIDTH 1 /* TEMP_SHUT_EINT */ + +/* + * R1842 (0x732) - Interrupt Raw Status 2 + */ +#define WM8994_TEMP_WARN_STS 0x8000 /* TEMP_WARN_STS */ +#define WM8994_TEMP_WARN_STS_MASK 0x8000 /* TEMP_WARN_STS */ +#define WM8994_TEMP_WARN_STS_SHIFT 15 /* TEMP_WARN_STS */ +#define WM8994_TEMP_WARN_STS_WIDTH 1 /* TEMP_WARN_STS */ +#define WM8994_DCS_DONE_STS 0x4000 /* DCS_DONE_STS */ +#define WM8994_DCS_DONE_STS_MASK 0x4000 /* DCS_DONE_STS */ +#define WM8994_DCS_DONE_STS_SHIFT 14 /* DCS_DONE_STS */ +#define WM8994_DCS_DONE_STS_WIDTH 1 /* DCS_DONE_STS */ +#define WM8994_WSEQ_DONE_STS 0x2000 /* WSEQ_DONE_STS */ +#define WM8994_WSEQ_DONE_STS_MASK 0x2000 /* WSEQ_DONE_STS */ +#define WM8994_WSEQ_DONE_STS_SHIFT 13 /* WSEQ_DONE_STS */ +#define WM8994_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */ +#define WM8994_FIFOS_ERR_STS 0x1000 /* FIFOS_ERR_STS */ +#define WM8994_FIFOS_ERR_STS_MASK 0x1000 /* FIFOS_ERR_STS */ +#define WM8994_FIFOS_ERR_STS_SHIFT 12 /* FIFOS_ERR_STS */ +#define WM8994_FIFOS_ERR_STS_WIDTH 1 /* FIFOS_ERR_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS 0x0800 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS_MASK 0x0800 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS_SHIFT 11 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF2DRC_SIG_DET_STS_WIDTH 1 /* AIF2DRC_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS 0x0400 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS_MASK 0x0400 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS_SHIFT 10 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC2_SIG_DET_STS_WIDTH 1 /* AIF1DRC2_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS 0x0200 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS_MASK 0x0200 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS_SHIFT 9 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_AIF1DRC1_SIG_DET_STS_WIDTH 1 /* AIF1DRC1_SIG_DET_STS */ +#define WM8994_SRC2_LOCK_STS 0x0100 /* SRC2_LOCK_STS */ +#define WM8994_SRC2_LOCK_STS_MASK 0x0100 /* SRC2_LOCK_STS */ +#define WM8994_SRC2_LOCK_STS_SHIFT 8 /* SRC2_LOCK_STS */ +#define WM8994_SRC2_LOCK_STS_WIDTH 1 /* SRC2_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS 0x0080 /* SRC1_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS_MASK 0x0080 /* SRC1_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS_SHIFT 7 /* SRC1_LOCK_STS */ +#define WM8994_SRC1_LOCK_STS_WIDTH 1 /* SRC1_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS 0x0040 /* FLL2_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS_MASK 0x0040 /* FLL2_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS_SHIFT 6 /* FLL2_LOCK_STS */ +#define WM8994_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS 0x0020 /* FLL1_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS_MASK 0x0020 /* FLL1_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS_SHIFT 5 /* FLL1_LOCK_STS */ +#define WM8994_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */ +#define WM8994_MIC2_SHRT_STS 0x0010 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_SHRT_STS_MASK 0x0010 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_SHRT_STS_SHIFT 4 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_SHRT_STS_WIDTH 1 /* MIC2_SHRT_STS */ +#define WM8994_MIC2_DET_STS 0x0008 /* MIC2_DET_STS */ +#define WM8994_MIC2_DET_STS_MASK 0x0008 /* MIC2_DET_STS */ +#define WM8994_MIC2_DET_STS_SHIFT 3 /* MIC2_DET_STS */ +#define WM8994_MIC2_DET_STS_WIDTH 1 /* MIC2_DET_STS */ +#define WM8994_MIC1_SHRT_STS 0x0004 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_SHRT_STS_MASK 0x0004 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_SHRT_STS_SHIFT 2 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_SHRT_STS_WIDTH 1 /* MIC1_SHRT_STS */ +#define WM8994_MIC1_DET_STS 0x0002 /* MIC1_DET_STS */ +#define WM8994_MIC1_DET_STS_MASK 0x0002 /* MIC1_DET_STS */ +#define WM8994_MIC1_DET_STS_SHIFT 1 /* MIC1_DET_STS */ +#define WM8994_MIC1_DET_STS_WIDTH 1 /* MIC1_DET_STS */ +#define WM8994_TEMP_SHUT_STS 0x0001 /* TEMP_SHUT_STS */ +#define WM8994_TEMP_SHUT_STS_MASK 0x0001 /* TEMP_SHUT_STS */ +#define WM8994_TEMP_SHUT_STS_SHIFT 0 /* TEMP_SHUT_STS */ +#define WM8994_TEMP_SHUT_STS_WIDTH 1 /* TEMP_SHUT_STS */ + +/* + * R1848 (0x738) - Interrupt Status 1 Mask + */ +#define WM8994_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ +#define WM8994_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ +#define WM8994_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ +#define WM8994_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ +#define WM8994_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ +#define WM8994_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ +#define WM8994_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ +#define WM8994_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ +#define WM8994_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ +#define WM8994_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ +#define WM8994_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ +#define WM8994_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ +#define WM8994_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ +#define WM8994_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ +#define WM8994_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ +#define WM8994_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ +#define WM8994_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ +#define WM8994_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ +#define WM8994_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ +#define WM8994_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ +#define WM8994_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ +#define WM8994_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ +#define WM8994_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ +#define WM8994_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ +#define WM8994_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM8994_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM8994_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM8994_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM8994_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM8994_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM8994_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM8994_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM8994_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM8994_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM8994_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM8994_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM8994_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM8994_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM8994_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM8994_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM8994_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM8994_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM8994_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM8994_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + +/* + * R1849 (0x739) - Interrupt Status 2 Mask + */ +#define WM8994_IM_TEMP_WARN_EINT 0x8000 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_TEMP_WARN_EINT_MASK 0x8000 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_TEMP_WARN_EINT_SHIFT 15 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_TEMP_WARN_EINT_WIDTH 1 /* IM_TEMP_WARN_EINT */ +#define WM8994_IM_DCS_DONE_EINT 0x4000 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_DCS_DONE_EINT_MASK 0x4000 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_DCS_DONE_EINT_SHIFT 14 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_DCS_DONE_EINT_WIDTH 1 /* IM_DCS_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT 0x2000 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT_MASK 0x2000 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT_SHIFT 13 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_WSEQ_DONE_EINT_WIDTH 1 /* IM_WSEQ_DONE_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT 0x1000 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT_MASK 0x1000 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT_SHIFT 12 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_FIFOS_ERR_EINT_WIDTH 1 /* IM_FIFOS_ERR_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT 0x0800 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT_MASK 0x0800 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT_SHIFT 11 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* IM_AIF2DRC_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT 0x0400 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_MASK 0x0400 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_SHIFT 10 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC2_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT 0x0200 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_MASK 0x0200 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_SHIFT 9 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC1_SIG_DET_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT 0x0100 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT_MASK 0x0100 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT_SHIFT 8 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC2_LOCK_EINT_WIDTH 1 /* IM_SRC2_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT 0x0080 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT_MASK 0x0080 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT_SHIFT 7 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_SRC1_LOCK_EINT_WIDTH 1 /* IM_SRC1_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT 0x0040 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT_MASK 0x0040 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT_SHIFT 6 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL2_LOCK_EINT_WIDTH 1 /* IM_FLL2_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT 0x0020 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT_MASK 0x0020 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT_SHIFT 5 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_FLL1_LOCK_EINT_WIDTH 1 /* IM_FLL1_LOCK_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT 0x0010 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT_MASK 0x0010 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT_SHIFT 4 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_SHRT_EINT_WIDTH 1 /* IM_MIC2_SHRT_EINT */ +#define WM8994_IM_MIC2_DET_EINT 0x0008 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC2_DET_EINT_MASK 0x0008 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC2_DET_EINT_SHIFT 3 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC2_DET_EINT_WIDTH 1 /* IM_MIC2_DET_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT 0x0004 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT_MASK 0x0004 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT_SHIFT 2 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_SHRT_EINT_WIDTH 1 /* IM_MIC1_SHRT_EINT */ +#define WM8994_IM_MIC1_DET_EINT 0x0002 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_MIC1_DET_EINT_MASK 0x0002 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_MIC1_DET_EINT_SHIFT 1 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_MIC1_DET_EINT_WIDTH 1 /* IM_MIC1_DET_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT 0x0001 /* IM_TEMP_SHUT_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT_MASK 0x0001 /* IM_TEMP_SHUT_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT_SHIFT 0 /* IM_TEMP_SHUT_EINT */ +#define WM8994_IM_TEMP_SHUT_EINT_WIDTH 1 /* IM_TEMP_SHUT_EINT */ + +/* + * R1856 (0x740) - Interrupt Control + */ +#define WM8994_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM8994_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM8994_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM8994_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R1864 (0x748) - IRQ Debounce + */ +#define WM8994_TEMP_WARN_DB 0x0020 /* TEMP_WARN_DB */ +#define WM8994_TEMP_WARN_DB_MASK 0x0020 /* TEMP_WARN_DB */ +#define WM8994_TEMP_WARN_DB_SHIFT 5 /* TEMP_WARN_DB */ +#define WM8994_TEMP_WARN_DB_WIDTH 1 /* TEMP_WARN_DB */ +#define WM8994_MIC2_SHRT_DB 0x0010 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_SHRT_DB_MASK 0x0010 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_SHRT_DB_SHIFT 4 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_SHRT_DB_WIDTH 1 /* MIC2_SHRT_DB */ +#define WM8994_MIC2_DET_DB 0x0008 /* MIC2_DET_DB */ +#define WM8994_MIC2_DET_DB_MASK 0x0008 /* MIC2_DET_DB */ +#define WM8994_MIC2_DET_DB_SHIFT 3 /* MIC2_DET_DB */ +#define WM8994_MIC2_DET_DB_WIDTH 1 /* MIC2_DET_DB */ +#define WM8994_MIC1_SHRT_DB 0x0004 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_SHRT_DB_MASK 0x0004 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_SHRT_DB_SHIFT 2 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_SHRT_DB_WIDTH 1 /* MIC1_SHRT_DB */ +#define WM8994_MIC1_DET_DB 0x0002 /* MIC1_DET_DB */ +#define WM8994_MIC1_DET_DB_MASK 0x0002 /* MIC1_DET_DB */ +#define WM8994_MIC1_DET_DB_SHIFT 1 /* MIC1_DET_DB */ +#define WM8994_MIC1_DET_DB_WIDTH 1 /* MIC1_DET_DB */ +#define WM8994_TEMP_SHUT_DB 0x0001 /* TEMP_SHUT_DB */ +#define WM8994_TEMP_SHUT_DB_MASK 0x0001 /* TEMP_SHUT_DB */ +#define WM8994_TEMP_SHUT_DB_SHIFT 0 /* TEMP_SHUT_DB */ +#define WM8994_TEMP_SHUT_DB_WIDTH 1 /* TEMP_SHUT_DB */ + +/* + * R2304 (0x900) - DSP2_Program + */ +#define WM8958_DSP2_ENA 0x0001 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_MASK 0x0001 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_SHIFT 0 /* DSP2_ENA */ +#define WM8958_DSP2_ENA_WIDTH 1 /* DSP2_ENA */ + +/* + * R2305 (0x901) - DSP2_Config + */ +#define WM8958_MBC_SEL_MASK 0x0030 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_SEL_SHIFT 4 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_SEL_WIDTH 2 /* MBC_SEL - [5:4] */ +#define WM8958_MBC_ENA 0x0001 /* MBC_ENA */ +#define WM8958_MBC_ENA_MASK 0x0001 /* MBC_ENA */ +#define WM8958_MBC_ENA_SHIFT 0 /* MBC_ENA */ +#define WM8958_MBC_ENA_WIDTH 1 /* MBC_ENA */ + +/* + * R2560 (0xA00) - DSP2_MagicNum + */ +#define WM8958_DSP2_MAGIC_NUM_MASK 0xFFFF /* DSP2_MAGIC_NUM - [15:0] */ +#define WM8958_DSP2_MAGIC_NUM_SHIFT 0 /* DSP2_MAGIC_NUM - [15:0] */ +#define WM8958_DSP2_MAGIC_NUM_WIDTH 16 /* DSP2_MAGIC_NUM - [15:0] */ + +/* + * R2561 (0xA01) - DSP2_ReleaseYear + */ +#define WM8958_DSP2_RELEASE_YEAR_MASK 0xFFFF /* DSP2_RELEASE_YEAR - [15:0] */ +#define WM8958_DSP2_RELEASE_YEAR_SHIFT 0 /* DSP2_RELEASE_YEAR - [15:0] */ +#define WM8958_DSP2_RELEASE_YEAR_WIDTH 16 /* DSP2_RELEASE_YEAR - [15:0] */ + +/* + * R2562 (0xA02) - DSP2_ReleaseMonthDay + */ +#define WM8958_DSP2_RELEASE_MONTH_MASK 0xFF00 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_MONTH_SHIFT 8 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_MONTH_WIDTH 8 /* DSP2_RELEASE_MONTH - [15:8] */ +#define WM8958_DSP2_RELEASE_DAY_MASK 0x00FF /* DSP2_RELEASE_DAY - [7:0] */ +#define WM8958_DSP2_RELEASE_DAY_SHIFT 0 /* DSP2_RELEASE_DAY - [7:0] */ +#define WM8958_DSP2_RELEASE_DAY_WIDTH 8 /* DSP2_RELEASE_DAY - [7:0] */ + +/* + * R2563 (0xA03) - DSP2_ReleaseTime + */ +#define WM8958_DSP2_RELEASE_HOURS_MASK 0xFF00 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_HOURS_SHIFT 8 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_HOURS_WIDTH 8 /* DSP2_RELEASE_HOURS - [15:8] */ +#define WM8958_DSP2_RELEASE_MINS_MASK 0x00FF /* DSP2_RELEASE_MINS - [7:0] */ +#define WM8958_DSP2_RELEASE_MINS_SHIFT 0 /* DSP2_RELEASE_MINS - [7:0] */ +#define WM8958_DSP2_RELEASE_MINS_WIDTH 8 /* DSP2_RELEASE_MINS - [7:0] */ + +/* + * R2564 (0xA04) - DSP2_VerMajMin + */ +#define WM8958_DSP2_MAJOR_VER_MASK 0xFF00 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MAJOR_VER_SHIFT 8 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MAJOR_VER_WIDTH 8 /* DSP2_MAJOR_VER - [15:8] */ +#define WM8958_DSP2_MINOR_VER_MASK 0x00FF /* DSP2_MINOR_VER - [7:0] */ +#define WM8958_DSP2_MINOR_VER_SHIFT 0 /* DSP2_MINOR_VER - [7:0] */ +#define WM8958_DSP2_MINOR_VER_WIDTH 8 /* DSP2_MINOR_VER - [7:0] */ + +/* + * R2565 (0xA05) - DSP2_VerBuild + */ +#define WM8958_DSP2_BUILD_VER_MASK 0xFFFF /* DSP2_BUILD_VER - [15:0] */ +#define WM8958_DSP2_BUILD_VER_SHIFT 0 /* DSP2_BUILD_VER - [15:0] */ +#define WM8958_DSP2_BUILD_VER_WIDTH 16 /* DSP2_BUILD_VER - [15:0] */ + +/* + * R2573 (0xA0D) - DSP2_ExecControl + */ +#define WM8958_DSP2_STOPC 0x0020 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_MASK 0x0020 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_SHIFT 5 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPC_WIDTH 1 /* DSP2_STOPC */ +#define WM8958_DSP2_STOPS 0x0010 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_MASK 0x0010 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_SHIFT 4 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPS_WIDTH 1 /* DSP2_STOPS */ +#define WM8958_DSP2_STOPI 0x0008 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_MASK 0x0008 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_SHIFT 3 /* DSP2_STOPI */ +#define WM8958_DSP2_STOPI_WIDTH 1 /* DSP2_STOPI */ +#define WM8958_DSP2_STOP 0x0004 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_MASK 0x0004 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_SHIFT 2 /* DSP2_STOP */ +#define WM8958_DSP2_STOP_WIDTH 1 /* DSP2_STOP */ +#define WM8958_DSP2_RUNR 0x0002 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_MASK 0x0002 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_SHIFT 1 /* DSP2_RUNR */ +#define WM8958_DSP2_RUNR_WIDTH 1 /* DSP2_RUNR */ +#define WM8958_DSP2_RUN 0x0001 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_MASK 0x0001 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_SHIFT 0 /* DSP2_RUN */ +#define WM8958_DSP2_RUN_WIDTH 1 /* DSP2_RUN */ + +#endif diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h new file mode 100644 index 00000000..e11f4d9f --- /dev/null +++ b/include/linux/mg_disk.h @@ -0,0 +1,45 @@ +/* + * include/linux/mg_disk.c + * + * Private data for mflash platform driver + * + * (c) 2008 mGine Co.,LTD + * (c) 2008 unsik Kim <donari75@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MG_DISK_H__ +#define __MG_DISK_H__ + +/* name for platform device */ +#define MG_DEV_NAME "mg_disk" + +/* names of GPIO resource */ +#define MG_RST_PIN "mg_rst" +/* except MG_BOOT_DEV, reset-out pin should be assigned */ +#define MG_RSTOUT_PIN "mg_rstout" + +/* device attribution */ +/* use mflash as boot device */ +#define MG_BOOT_DEV (1 << 0) +/* use mflash as storage device */ +#define MG_STORAGE_DEV (1 << 1) +/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ +#define MG_STORAGE_DEV_SKIP_RST (1 << 2) + +/* private driver data */ +struct mg_drv_data { + /* disk resource */ + u32 use_polling; + + /* device attribution */ + u32 dev_attr; + + /* internally used */ + void *host; +}; + +#endif diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h new file mode 100644 index 00000000..dd8da342 --- /dev/null +++ b/include/linux/micrel_phy.h @@ -0,0 +1,16 @@ +#ifndef _MICREL_PHY_H +#define _MICREL_PHY_H + +#define MICREL_PHY_ID_MASK 0x00fffff0 + +#define PHY_ID_KSZ9021 0x00221611 +#define PHY_ID_KS8737 0x00221720 +#define PHY_ID_KS8041 0x00221510 +#define PHY_ID_KS8051 0x00221550 +/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ +#define PHY_ID_KS8001 0x0022161A + +/* struct phy_device dev_flags definitions */ +#define MICREL_PHY_50MHZ_CLK 0x00000001 + +#endif /* _MICREL_PHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h new file mode 100644 index 00000000..855c337b --- /dev/null +++ b/include/linux/migrate.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_MIGRATE_H +#define _LINUX_MIGRATE_H + +#include <linux/mm.h> +#include <linux/mempolicy.h> +#include <linux/migrate_mode.h> + +typedef struct page *new_page_t(struct page *, unsigned long private, int **); + +#ifdef CONFIG_MIGRATION + +extern void putback_lru_pages(struct list_head *l); +extern int migrate_page(struct address_space *, + struct page *, struct page *, enum migrate_mode); +extern int migrate_pages(struct list_head *l, new_page_t x, + unsigned long private, bool offlining, + enum migrate_mode mode); +extern int migrate_huge_pages(struct list_head *l, new_page_t x, + unsigned long private, bool offlining, + enum migrate_mode mode); + +extern int fail_migrate_page(struct address_space *, + struct page *, struct page *); + +extern int migrate_prep(void); +extern int migrate_prep_local(void); +extern int migrate_vmas(struct mm_struct *mm, + const nodemask_t *from, const nodemask_t *to, + unsigned long flags); +extern void migrate_page_copy(struct page *newpage, struct page *page); +extern int migrate_huge_page_move_mapping(struct address_space *mapping, + struct page *newpage, struct page *page); +#else + +static inline void putback_lru_pages(struct list_head *l) {} +static inline int migrate_pages(struct list_head *l, new_page_t x, + unsigned long private, bool offlining, + enum migrate_mode mode) { return -ENOSYS; } +static inline int migrate_huge_pages(struct list_head *l, new_page_t x, + unsigned long private, bool offlining, + enum migrate_mode mode) { return -ENOSYS; } + +static inline int migrate_prep(void) { return -ENOSYS; } +static inline int migrate_prep_local(void) { return -ENOSYS; } + +static inline int migrate_vmas(struct mm_struct *mm, + const nodemask_t *from, const nodemask_t *to, + unsigned long flags) +{ + return -ENOSYS; +} + +static inline void migrate_page_copy(struct page *newpage, + struct page *page) {} + +static inline int migrate_huge_page_move_mapping(struct address_space *mapping, + struct page *newpage, struct page *page) +{ + return -ENOSYS; +} + +/* Possible settings for the migrate_page() method in address_operations */ +#define migrate_page NULL +#define fail_migrate_page NULL + +#endif /* CONFIG_MIGRATION */ +#endif /* _LINUX_MIGRATE_H */ diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h new file mode 100644 index 00000000..ebf3d89a --- /dev/null +++ b/include/linux/migrate_mode.h @@ -0,0 +1,16 @@ +#ifndef MIGRATE_MODE_H_INCLUDED +#define MIGRATE_MODE_H_INCLUDED +/* + * MIGRATE_ASYNC means never block + * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking + * on most operations but not ->writepage as the potential stall time + * is too significant + * MIGRATE_SYNC will block when migrating pages + */ +enum migrate_mode { + MIGRATE_ASYNC, + MIGRATE_SYNC_LIGHT, + MIGRATE_SYNC, +}; + +#endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/include/linux/mii.h b/include/linux/mii.h new file mode 100644 index 00000000..2783eca6 --- /dev/null +++ b/include/linux/mii.h @@ -0,0 +1,482 @@ +/* + * linux/mii.h: definitions for MII-compatible transceivers + * Originally drivers/net/sunhme.h. + * + * Copyright (C) 1996, 1999, 2001 David S. Miller (davem@redhat.com) + */ + +#ifndef __LINUX_MII_H__ +#define __LINUX_MII_H__ + +#include <linux/types.h> +#include <linux/ethtool.h> + +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +#define MII_ESTATUS 0x0f /* Extended Status */ +#define MII_DCOUNTER 0x12 /* Disconnect counter */ +#define MII_FCSCOUNTER 0x13 /* False carrier counter */ +#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ +#define MII_RERRCOUNTER 0x15 /* Receive error counter */ +#define MII_SREVISION 0x16 /* Silicon revision */ +#define MII_RESV1 0x17 /* Reserved... */ +#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */ +#define MII_PHYADDR 0x19 /* PHY address */ +#define MII_RESV2 0x1a /* Reserved... */ +#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */ +#define MII_NCONFIG 0x1c /* Network interface config */ + +/* Basic mode control register. */ +#define BMCR_RESV 0x003f /* Unused... */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ +#define BMCR_CTST 0x0080 /* Collision test */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */ +#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define BMCR_PDOWN 0x0800 /* Enable low power state */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +#define BMCR_SPEED100 0x2000 /* Select 100Mbps */ +#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ +#define BMCR_RESET 0x8000 /* Reset to default state */ + +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_JCD 0x0002 /* Jabber detected */ +#define BMSR_LSTATUS 0x0004 /* Link status */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_RFAULT 0x0010 /* Remote fault detected */ +#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ +#define BMSR_RESV 0x00c0 /* Unused... */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +#define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */ +#define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +#define BMSR_100BASE4 0x8000 /* Can do 100mbps, 4k packets */ + +/* Advertisement control register. */ +#define ADVERTISE_SLCT 0x001f /* Selector bits */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */ +#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */ +#define ADVERTISE_RESV 0x1000 /* Unused... */ +#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */ +#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */ +#define ADVERTISE_NPAGE 0x8000 /* Next page bit */ + +#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \ + ADVERTISE_CSMA) +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) + +/* Link partner ability register. */ +#define LPA_SLCT 0x001f /* Same as advertise selector */ +#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */ +#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */ +#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */ +#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/ +#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +#define LPA_RESV 0x1000 /* Unused... */ +#define LPA_RFAULT 0x2000 /* Link partner faulted */ +#define LPA_LPACK 0x4000 /* Link partner acked us */ +#define LPA_NPAGE 0x8000 /* Next page bit */ + +#define LPA_DUPLEX (LPA_10FULL | LPA_100FULL) +#define LPA_100 (LPA_100FULL | LPA_100HALF | LPA_100BASE4) + +/* Expansion register for auto-negotiation. */ +#define EXPANSION_NWAY 0x0001 /* Can do N-way auto-nego */ +#define EXPANSION_LCWP 0x0002 /* Got new RX page code word */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#define EXPANSION_NPCAPABLE 0x0008 /* Link partner supports npage */ +#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ +#define EXPANSION_RESV 0xffe0 /* Unused... */ + +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +/* N-way test register. */ +#define NWAYTEST_RESV1 0x00ff /* Unused... */ +#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */ +#define NWAYTEST_RESV2 0xfe00 /* Unused... */ + +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ +#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ +#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */ + +/* Flow control flags */ +#define FLOW_CTRL_TX 0x01 +#define FLOW_CTRL_RX 0x02 + +/* This structure is used in all SIOCxMIIxxx ioctl calls */ +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +#ifdef __KERNEL__ + +#include <linux/if.h> + +struct ethtool_cmd; + +struct mii_if_info { + int phy_id; + int advertising; + int phy_id_mask; + int reg_num_mask; + + unsigned int full_duplex : 1; /* is full duplex? */ + unsigned int force_media : 1; /* is autoneg. disabled? */ + unsigned int supports_gmii : 1; /* are GMII registers supported? */ + + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int phy_id, int location); + void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); +}; + +extern int mii_link_ok (struct mii_if_info *mii); +extern int mii_nway_restart (struct mii_if_info *mii); +extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_check_gmii_support(struct mii_if_info *mii); +extern void mii_check_link (struct mii_if_info *mii); +extern unsigned int mii_check_media (struct mii_if_info *mii, + unsigned int ok_to_print, + unsigned int init_media); +extern int generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_changed); + + +static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +/** + * mii_nway_result + * @negotiated: value of MII ANAR and'd with ANLPAR + * + * Given a set of MII abilities, check each bit and returns the + * currently supported media, in the priority order defined by + * IEEE 802.3u. We use LPA_xxx constants but note this is not the + * value of LPA solely, as described above. + * + * The one exception to IEEE 802.3u is that 100baseT4 is placed + * between 100T-full and 100T-half. If your phy does not support + * 100T4 this is fine. If your phy places 100T4 elsewhere in the + * priority order, you will need to roll your own function. + */ +static inline unsigned int mii_nway_result (unsigned int negotiated) +{ + unsigned int ret; + + if (negotiated & LPA_100FULL) + ret = LPA_100FULL; + else if (negotiated & LPA_100BASE4) + ret = LPA_100BASE4; + else if (negotiated & LPA_100HALF) + ret = LPA_100HALF; + else if (negotiated & LPA_10FULL) + ret = LPA_10FULL; + else + ret = LPA_10HALF; + + return ret; +} + +/** + * mii_duplex + * @duplex_lock: Non-zero if duplex is locked at full + * @negotiated: value of MII ANAR and'd with ANLPAR + * + * A small helper function for a common case. Returns one + * if the media is operating or locked at full duplex, and + * returns zero otherwise. + */ +static inline unsigned int mii_duplex (unsigned int duplex_lock, + unsigned int negotiated) +{ + if (duplex_lock) + return 1; + if (mii_nway_result(negotiated) & LPA_DUPLEX) + return 1; + return 0; +} + +/** + * ethtool_adv_to_mii_adv_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_ADVERTISE register. + */ +static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_10baseT_Half) + result |= ADVERTISE_10HALF; + if (ethadv & ADVERTISED_10baseT_Full) + result |= ADVERTISE_10FULL; + if (ethadv & ADVERTISED_100baseT_Half) + result |= ADVERTISE_100HALF; + if (ethadv & ADVERTISED_100baseT_Full) + result |= ADVERTISE_100FULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_PAUSE_CAP; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_PAUSE_ASYM; + + return result; +} + +/** + * mii_adv_to_ethtool_adv_t + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits + * to ethtool advertisement settings. + */ +static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (adv & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +/** + * ethtool_adv_to_mii_ctrl1000_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000T mode. + */ +static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000HALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000FULL; + + return result; +} + +/** + * mii_ctrl1000_to_ethtool_adv_t + * @adv: value of the MII_CTRL1000 register + * + * A small helper function that translates MII_CTRL1000 + * bits, when in 1000Base-T mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * mii_lpa_to_ethtool_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA + * bits, when in 1000Base-T mode, to ethtool + * LP advertisement settings. + */ +static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_t(lpa); +} + +/** + * mii_stat1000_to_ethtool_lpa_t + * @adv: value of the MII_STAT1000 register + * + * A small helper function that translates MII_STAT1000 + * bits, when in 1000Base-T mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * ethtool_adv_to_mii_adv_x + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000Base-X mode. + */ +static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000XHALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000XFULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_1000XPAUSE; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_1000XPSE_ASYM; + + return result; +} + +/** + * mii_adv_to_ethtool_adv_x + * @adv: value of the MII_CTRL1000 register + * + * A small helper function that translates MII_CTRL1000 + * bits, when in 1000Base-X mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000XHALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000XFULL) + result |= ADVERTISED_1000baseT_Full; + if (adv & ADVERTISE_1000XPAUSE) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_1000XPSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +/** + * mii_lpa_to_ethtool_lpa_x + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA + * bits, when in 1000Base-X mode, to ethtool + * LP advertisement settings. + */ +static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_x(lpa); +} + +/** + * mii_advertise_flowctrl - get flow control advertisement flags + * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) + */ +static inline u16 mii_advertise_flowctrl(int cap) +{ + u16 adv = 0; + + if (cap & FLOW_CTRL_RX) + adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (cap & FLOW_CTRL_TX) + adv ^= ADVERTISE_PAUSE_ASYM; + + return adv; +} + +/** + * mii_resolve_flowctrl_fdx + * @lcladv: value of MII ADVERTISE register + * @rmtadv: value of MII LPA register + * + * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3 + */ +static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) { + if (lcladv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_RX; + else if (rmtadv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +#endif /* __KERNEL__ */ +#endif /* __LINUX_MII_H__ */ diff --git a/include/linux/minix_fs.h b/include/linux/minix_fs.h new file mode 100644 index 00000000..13fe09e0 --- /dev/null +++ b/include/linux/minix_fs.h @@ -0,0 +1,106 @@ +#ifndef _LINUX_MINIX_FS_H +#define _LINUX_MINIX_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* + * The minix filesystem constants/structures + */ + +/* + * Thanks to Kees J Bot for sending me the definitions of the new + * minix filesystem (aka V2) with bigger inodes and 32-bit block + * pointers. + */ + +#define MINIX_ROOT_INO 1 + +/* Not the same as the bogus LINK_MAX in <linux/limits.h>. Oh well. */ +#define MINIX_LINK_MAX 250 +#define MINIX2_LINK_MAX 65530 + +#define MINIX_I_MAP_SLOTS 8 +#define MINIX_Z_MAP_SLOTS 64 +#define MINIX_VALID_FS 0x0001 /* Clean fs. */ +#define MINIX_ERROR_FS 0x0002 /* fs has errors. */ + +#define MINIX_INODES_PER_BLOCK ((BLOCK_SIZE)/(sizeof (struct minix_inode))) + +/* + * This is the original minix inode layout on disk. + * Note the 8-bit gid and atime and ctime. + */ +struct minix_inode { + __u16 i_mode; + __u16 i_uid; + __u32 i_size; + __u32 i_time; + __u8 i_gid; + __u8 i_nlinks; + __u16 i_zone[9]; +}; + +/* + * The new minix inode has all the time entries, as well as + * long block numbers and a third indirect block (7+1+1+1 + * instead of 7+1+1). Also, some previously 8-bit values are + * now 16-bit. The inode is now 64 bytes instead of 32. + */ +struct minix2_inode { + __u16 i_mode; + __u16 i_nlinks; + __u16 i_uid; + __u16 i_gid; + __u32 i_size; + __u32 i_atime; + __u32 i_mtime; + __u32 i_ctime; + __u32 i_zone[10]; +}; + +/* + * minix super-block data on disk + */ +struct minix_super_block { + __u16 s_ninodes; + __u16 s_nzones; + __u16 s_imap_blocks; + __u16 s_zmap_blocks; + __u16 s_firstdatazone; + __u16 s_log_zone_size; + __u32 s_max_size; + __u16 s_magic; + __u16 s_state; + __u32 s_zones; +}; + +/* + * V3 minix super-block data on disk + */ +struct minix3_super_block { + __u32 s_ninodes; + __u16 s_pad0; + __u16 s_imap_blocks; + __u16 s_zmap_blocks; + __u16 s_firstdatazone; + __u16 s_log_zone_size; + __u16 s_pad1; + __u32 s_max_size; + __u32 s_zones; + __u16 s_magic; + __u16 s_pad2; + __u16 s_blocksize; + __u8 s_disk_version; +}; + +struct minix_dir_entry { + __u16 inode; + char name[0]; +}; + +struct minix3_dir_entry { + __u32 inode; + char name[0]; +}; +#endif diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h new file mode 100644 index 00000000..0549d211 --- /dev/null +++ b/include/linux/miscdevice.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_MISCDEVICE_H +#define _LINUX_MISCDEVICE_H +#include <linux/major.h> +#include <linux/list.h> +#include <linux/types.h> + +/* + * These allocations are managed by device@lanana.org. If you use an + * entry that is not in assigned your entry may well be moved and + * reassigned, or set dynamic if a fixed value is not justified. + */ + +#define PSMOUSE_MINOR 1 +#define MS_BUSMOUSE_MINOR 2 +#define ATIXL_BUSMOUSE_MINOR 3 +/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */ +#define ATARIMOUSE_MINOR 5 +#define SUN_MOUSE_MINOR 6 +#define APOLLO_MOUSE_MINOR 7 +#define PC110PAD_MINOR 9 +/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ +#define WATCHDOG_MINOR 130 /* Watchdog timer */ +#define TEMP_MINOR 131 /* Temperature Sensor */ +#define RTC_MINOR 135 +#define EFI_RTC_MINOR 136 /* EFI Time services */ +#define SUN_OPENPROM_MINOR 139 +#define DMAPI_MINOR 140 /* DMAPI */ +#define NVRAM_MINOR 144 +#define SGI_MMTIMER 153 +#define STORE_QUEUE_MINOR 155 +#define I2O_MINOR 166 +#define MICROCODE_MINOR 184 +#define TUN_MINOR 200 +#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ +#define MPT_MINOR 220 +#define MPT2SAS_MINOR 221 +#define UINPUT_MINOR 223 +#define HPET_MINOR 228 +#define FUSE_MINOR 229 +#define KVM_MINOR 232 +#define BTRFS_MINOR 234 +#define AUTOFS_MINOR 235 +#define MAPPER_CTRL_MINOR 236 +#define LOOP_CTRL_MINOR 237 +#define VHOST_NET_MINOR 238 +#define MISC_DYNAMIC_MINOR 255 + +struct device; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const char *nodename; + umode_t mode; +}; + +extern int misc_register(struct miscdevice * misc); +extern int misc_deregister(struct miscdevice *misc); + +#define MODULE_ALIAS_MISCDEV(minor) \ + MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \ + "-" __stringify(minor)) +#endif diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h new file mode 100644 index 00000000..9958ff2c --- /dev/null +++ b/include/linux/mlx4/cmd.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_CMD_H +#define MLX4_CMD_H + +#include <linux/dma-mapping.h> + +enum { + /* initialization and general commands */ + MLX4_CMD_SYS_EN = 0x1, + MLX4_CMD_SYS_DIS = 0x2, + MLX4_CMD_MAP_FA = 0xfff, + MLX4_CMD_UNMAP_FA = 0xffe, + MLX4_CMD_RUN_FW = 0xff6, + MLX4_CMD_MOD_STAT_CFG = 0x34, + MLX4_CMD_QUERY_DEV_CAP = 0x3, + MLX4_CMD_QUERY_FW = 0x4, + MLX4_CMD_ENABLE_LAM = 0xff8, + MLX4_CMD_DISABLE_LAM = 0xff7, + MLX4_CMD_QUERY_DDR = 0x5, + MLX4_CMD_QUERY_ADAPTER = 0x6, + MLX4_CMD_INIT_HCA = 0x7, + MLX4_CMD_CLOSE_HCA = 0x8, + MLX4_CMD_INIT_PORT = 0x9, + MLX4_CMD_CLOSE_PORT = 0xa, + MLX4_CMD_QUERY_HCA = 0xb, + MLX4_CMD_QUERY_PORT = 0x43, + MLX4_CMD_SENSE_PORT = 0x4d, + MLX4_CMD_HW_HEALTH_CHECK = 0x50, + MLX4_CMD_SET_PORT = 0xc, + MLX4_CMD_SET_NODE = 0x5a, + MLX4_CMD_QUERY_FUNC = 0x56, + MLX4_CMD_ACCESS_DDR = 0x2e, + MLX4_CMD_MAP_ICM = 0xffa, + MLX4_CMD_UNMAP_ICM = 0xff9, + MLX4_CMD_MAP_ICM_AUX = 0xffc, + MLX4_CMD_UNMAP_ICM_AUX = 0xffb, + MLX4_CMD_SET_ICM_SIZE = 0xffd, + /*master notify fw on finish for slave's flr*/ + MLX4_CMD_INFORM_FLR_DONE = 0x5b, + + /* TPT commands */ + MLX4_CMD_SW2HW_MPT = 0xd, + MLX4_CMD_QUERY_MPT = 0xe, + MLX4_CMD_HW2SW_MPT = 0xf, + MLX4_CMD_READ_MTT = 0x10, + MLX4_CMD_WRITE_MTT = 0x11, + MLX4_CMD_SYNC_TPT = 0x2f, + + /* EQ commands */ + MLX4_CMD_MAP_EQ = 0x12, + MLX4_CMD_SW2HW_EQ = 0x13, + MLX4_CMD_HW2SW_EQ = 0x14, + MLX4_CMD_QUERY_EQ = 0x15, + + /* CQ commands */ + MLX4_CMD_SW2HW_CQ = 0x16, + MLX4_CMD_HW2SW_CQ = 0x17, + MLX4_CMD_QUERY_CQ = 0x18, + MLX4_CMD_MODIFY_CQ = 0x2c, + + /* SRQ commands */ + MLX4_CMD_SW2HW_SRQ = 0x35, + MLX4_CMD_HW2SW_SRQ = 0x36, + MLX4_CMD_QUERY_SRQ = 0x37, + MLX4_CMD_ARM_SRQ = 0x40, + + /* QP/EE commands */ + MLX4_CMD_RST2INIT_QP = 0x19, + MLX4_CMD_INIT2RTR_QP = 0x1a, + MLX4_CMD_RTR2RTS_QP = 0x1b, + MLX4_CMD_RTS2RTS_QP = 0x1c, + MLX4_CMD_SQERR2RTS_QP = 0x1d, + MLX4_CMD_2ERR_QP = 0x1e, + MLX4_CMD_RTS2SQD_QP = 0x1f, + MLX4_CMD_SQD2SQD_QP = 0x38, + MLX4_CMD_SQD2RTS_QP = 0x20, + MLX4_CMD_2RST_QP = 0x21, + MLX4_CMD_QUERY_QP = 0x22, + MLX4_CMD_INIT2INIT_QP = 0x2d, + MLX4_CMD_SUSPEND_QP = 0x32, + MLX4_CMD_UNSUSPEND_QP = 0x33, + /* special QP and management commands */ + MLX4_CMD_CONF_SPECIAL_QP = 0x23, + MLX4_CMD_MAD_IFC = 0x24, + + /* multicast commands */ + MLX4_CMD_READ_MCG = 0x25, + MLX4_CMD_WRITE_MCG = 0x26, + MLX4_CMD_MGID_HASH = 0x27, + + /* miscellaneous commands */ + MLX4_CMD_DIAG_RPRT = 0x30, + MLX4_CMD_NOP = 0x31, + MLX4_CMD_ACCESS_MEM = 0x2e, + MLX4_CMD_SET_VEP = 0x52, + + /* Ethernet specific commands */ + MLX4_CMD_SET_VLAN_FLTR = 0x47, + MLX4_CMD_SET_MCAST_FLTR = 0x48, + MLX4_CMD_DUMP_ETH_STATS = 0x49, + + /* Communication channel commands */ + MLX4_CMD_ARM_COMM_CHANNEL = 0x57, + MLX4_CMD_GEN_EQE = 0x58, + + /* virtual commands */ + MLX4_CMD_ALLOC_RES = 0xf00, + MLX4_CMD_FREE_RES = 0xf01, + MLX4_CMD_MCAST_ATTACH = 0xf05, + MLX4_CMD_UCAST_ATTACH = 0xf06, + MLX4_CMD_PROMISC = 0xf08, + MLX4_CMD_QUERY_FUNC_CAP = 0xf0a, + MLX4_CMD_QP_ATTACH = 0xf0b, + + /* debug commands */ + MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, + MLX4_CMD_SET_DEBUG_MSG = 0x2b, + + /* statistics commands */ + MLX4_CMD_QUERY_IF_STAT = 0X54, + MLX4_CMD_SET_IF_STAT = 0X55, +}; + +enum { + MLX4_CMD_TIME_CLASS_A = 10000, + MLX4_CMD_TIME_CLASS_B = 10000, + MLX4_CMD_TIME_CLASS_C = 10000, +}; + +enum { + MLX4_MAILBOX_SIZE = 4096, + MLX4_ACCESS_MEM_ALIGN = 256, +}; + +enum { + /* set port opcode modifiers */ + MLX4_SET_PORT_GENERAL = 0x0, + MLX4_SET_PORT_RQP_CALC = 0x1, + MLX4_SET_PORT_MAC_TABLE = 0x2, + MLX4_SET_PORT_VLAN_TABLE = 0x3, + MLX4_SET_PORT_PRIO_MAP = 0x4, + MLX4_SET_PORT_GID_TABLE = 0x5, +}; + +enum { + MLX4_CMD_WRAPPED, + MLX4_CMD_NATIVE +}; + +struct mlx4_dev; + +struct mlx4_cmd_mailbox { + void *buf; + dma_addr_t dma; +}; + +int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout, int native); + +/* Invoke a command with no output parameter */ +static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, + u8 op_modifier, u16 op, unsigned long timeout, + int native) +{ + return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, + op_modifier, op, timeout, native); +} + +/* Invoke a command with an output mailbox */ +static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, + u32 in_modifier, u8 op_modifier, u16 op, + unsigned long timeout, int native) +{ + return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, + op_modifier, op, timeout, native); +} + +/* + * Invoke a command with an immediate output parameter (and copy the + * output into the caller's out_param pointer after the command + * executes). + */ +static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + u32 in_modifier, u8 op_modifier, u16 op, + unsigned long timeout, int native) +{ + return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, + op_modifier, op, timeout, native); +} + +struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); + +u32 mlx4_comm_get_version(void); + +#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) + +#endif /* MLX4_CMD_H */ diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h new file mode 100644 index 00000000..6f65b2c8 --- /dev/null +++ b/include/linux/mlx4/cq.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_CQ_H +#define MLX4_CQ_H + +#include <linux/types.h> + +#include <linux/mlx4/device.h> +#include <linux/mlx4/doorbell.h> + +struct mlx4_cqe { + __be32 vlan_my_qpn; + __be32 immed_rss_invalid; + __be32 g_mlpath_rqpn; + __be16 sl_vid; + __be16 rlid; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; + __be32 byte_cnt; + __be16 wqe_index; + __be16 checksum; + u8 reserved[3]; + u8 owner_sr_opcode; +}; + +struct mlx4_err_cqe { + __be32 my_qpn; + u32 reserved1[5]; + __be16 wqe_index; + u8 vendor_err_syndrome; + u8 syndrome; + u8 reserved2[3]; + u8 owner_sr_opcode; +}; + +enum { + MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_QPN_MASK = 0xffffff, +}; + +enum { + MLX4_CQE_OWNER_MASK = 0x80, + MLX4_CQE_IS_SEND_MASK = 0x40, + MLX4_CQE_OPCODE_MASK = 0x1f +}; + +enum { + MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, + MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, + MLX4_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, + MLX4_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, + MLX4_CQE_SYNDROME_MW_BIND_ERR = 0x06, + MLX4_CQE_SYNDROME_BAD_RESP_ERR = 0x10, + MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, + MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, + MLX4_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, + MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, + MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, +}; + +enum { + MLX4_CQE_STATUS_IPV4 = 1 << 6, + MLX4_CQE_STATUS_IPV4F = 1 << 7, + MLX4_CQE_STATUS_IPV6 = 1 << 8, + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, + MLX4_CQE_STATUS_TCP = 1 << 10, + MLX4_CQE_STATUS_UDP = 1 << 11, + MLX4_CQE_STATUS_IPOK = 1 << 12, +}; + +enum { + MLX4_CQE_LLC = 1, + MLX4_CQE_SNAP = 1 << 1, + MLX4_CQE_BAD_FCS = 1 << 4, +}; + +static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, + void __iomem *uar_page, + spinlock_t *doorbell_lock) +{ + __be32 doorbell[2]; + u32 sn; + u32 ci; + + sn = cq->arm_sn & 3; + ci = cq->cons_index & 0xffffff; + + *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); + + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32(sn << 28 | cmd | cq->cqn); + doorbell[1] = cpu_to_be32(ci); + + mlx4_write64(doorbell, uar_page + MLX4_CQ_DOORBELL, doorbell_lock); +} + +static inline void mlx4_cq_set_ci(struct mlx4_cq *cq) +{ + *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); +} + +enum { + MLX4_CQ_DB_REQ_NOT_SOL = 1 << 24, + MLX4_CQ_DB_REQ_NOT = 2 << 24 +}; + +int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, + u16 count, u16 period); +int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, + int entries, struct mlx4_mtt *mtt); + +#endif /* MLX4_CQ_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h new file mode 100644 index 00000000..834c96c5 --- /dev/null +++ b/include/linux/mlx4/device.h @@ -0,0 +1,654 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DEVICE_H +#define MLX4_DEVICE_H + +#include <linux/pci.h> +#include <linux/completion.h> +#include <linux/radix-tree.h> + +#include <linux/atomic.h> + +#define MAX_MSIX_P_PORT 17 +#define MAX_MSIX 64 +#define MSIX_LEGACY_SZ 4 +#define MIN_MSIX_P_PORT 5 + +enum { + MLX4_FLAG_MSI_X = 1 << 0, + MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, + MLX4_FLAG_MASTER = 1 << 2, + MLX4_FLAG_SLAVE = 1 << 3, + MLX4_FLAG_SRIOV = 1 << 4, +}; + +enum { + MLX4_MAX_PORTS = 2 +}; + +enum { + MLX4_BOARD_ID_LEN = 64 +}; + +enum { + MLX4_MAX_NUM_PF = 16, + MLX4_MAX_NUM_VF = 64, + MLX4_MFUNC_MAX = 80, + MLX4_MFUNC_EQ_NUM = 4, + MLX4_MFUNC_MAX_EQES = 8, + MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) +}; + +enum { + MLX4_DEV_CAP_FLAG_RC = 1LL << 0, + MLX4_DEV_CAP_FLAG_UC = 1LL << 1, + MLX4_DEV_CAP_FLAG_UD = 1LL << 2, + MLX4_DEV_CAP_FLAG_XRC = 1LL << 3, + MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, + MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, + MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12, + MLX4_DEV_CAP_FLAG_BLH = 1LL << 15, + MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16, + MLX4_DEV_CAP_FLAG_APM = 1LL << 17, + MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19, + MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20, + MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21, + MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, + MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, + MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, + MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37, + MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38, + MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, + MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, + MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, + MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 +}; + +#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum { + MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, + MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, + MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, + MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, + MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, +}; + +enum mlx4_event { + MLX4_EVENT_TYPE_COMP = 0x00, + MLX4_EVENT_TYPE_PATH_MIG = 0x01, + MLX4_EVENT_TYPE_COMM_EST = 0x02, + MLX4_EVENT_TYPE_SQ_DRAINED = 0x03, + MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, + MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14, + MLX4_EVENT_TYPE_CQ_ERROR = 0x04, + MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, + MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, + MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, + MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, + MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, + MLX4_EVENT_TYPE_CMD = 0x0a, + MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, + MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, + MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, + MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, + MLX4_EVENT_TYPE_NONE = 0xff, +}; + +enum { + MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1, + MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 +}; + +enum { + MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, +}; + +enum { + MLX4_PERM_LOCAL_READ = 1 << 10, + MLX4_PERM_LOCAL_WRITE = 1 << 11, + MLX4_PERM_REMOTE_READ = 1 << 12, + MLX4_PERM_REMOTE_WRITE = 1 << 13, + MLX4_PERM_ATOMIC = 1 << 14 +}; + +enum { + MLX4_OPCODE_NOP = 0x00, + MLX4_OPCODE_SEND_INVAL = 0x01, + MLX4_OPCODE_RDMA_WRITE = 0x08, + MLX4_OPCODE_RDMA_WRITE_IMM = 0x09, + MLX4_OPCODE_SEND = 0x0a, + MLX4_OPCODE_SEND_IMM = 0x0b, + MLX4_OPCODE_LSO = 0x0e, + MLX4_OPCODE_RDMA_READ = 0x10, + MLX4_OPCODE_ATOMIC_CS = 0x11, + MLX4_OPCODE_ATOMIC_FA = 0x12, + MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14, + MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15, + MLX4_OPCODE_BIND_MW = 0x18, + MLX4_OPCODE_FMR = 0x19, + MLX4_OPCODE_LOCAL_INVAL = 0x1b, + MLX4_OPCODE_CONFIG_CMD = 0x1f, + + MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, + MLX4_RECV_OPCODE_SEND = 0x01, + MLX4_RECV_OPCODE_SEND_IMM = 0x02, + MLX4_RECV_OPCODE_SEND_INVAL = 0x03, + + MLX4_CQE_OPCODE_ERROR = 0x1e, + MLX4_CQE_OPCODE_RESIZE = 0x16, +}; + +enum { + MLX4_STAT_RATE_OFFSET = 5 +}; + +enum mlx4_protocol { + MLX4_PROT_IB_IPV6 = 0, + MLX4_PROT_ETH, + MLX4_PROT_IB_IPV4, + MLX4_PROT_FCOE +}; + +enum { + MLX4_MTT_FLAG_PRESENT = 1 +}; + +enum mlx4_qp_region { + MLX4_QP_REGION_FW = 0, + MLX4_QP_REGION_ETH_ADDR, + MLX4_QP_REGION_FC_ADDR, + MLX4_QP_REGION_FC_EXCH, + MLX4_NUM_QP_REGION +}; + +enum mlx4_port_type { + MLX4_PORT_TYPE_NONE = 0, + MLX4_PORT_TYPE_IB = 1, + MLX4_PORT_TYPE_ETH = 2, + MLX4_PORT_TYPE_AUTO = 3 +}; + +enum mlx4_special_vlan_idx { + MLX4_NO_VLAN_IDX = 0, + MLX4_VLAN_MISS_IDX, + MLX4_VLAN_REGULAR +}; + +enum mlx4_steer_type { + MLX4_MC_STEER = 0, + MLX4_UC_STEER, + MLX4_NUM_STEERS +}; + +enum { + MLX4_NUM_FEXCH = 64 * 1024, +}; + +enum { + MLX4_MAX_FAST_REG_PAGES = 511, +}; + +static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) +{ + return (major << 32) | (minor << 16) | subminor; +} + +struct mlx4_caps { + u64 fw_ver; + u32 function; + int num_ports; + int vl_cap[MLX4_MAX_PORTS + 1]; + int ib_mtu_cap[MLX4_MAX_PORTS + 1]; + __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1]; + u64 def_mac[MLX4_MAX_PORTS + 1]; + int eth_mtu_cap[MLX4_MAX_PORTS + 1]; + int gid_table_len[MLX4_MAX_PORTS + 1]; + int pkey_table_len[MLX4_MAX_PORTS + 1]; + int trans_type[MLX4_MAX_PORTS + 1]; + int vendor_oui[MLX4_MAX_PORTS + 1]; + int wavelength[MLX4_MAX_PORTS + 1]; + u64 trans_code[MLX4_MAX_PORTS + 1]; + int local_ca_ack_delay; + int num_uars; + u32 uar_page_size; + int bf_reg_size; + int bf_regs_per_page; + int max_sq_sg; + int max_rq_sg; + int num_qps; + int max_wqes; + int max_sq_desc_sz; + int max_rq_desc_sz; + int max_qp_init_rdma; + int max_qp_dest_rdma; + int sqp_start; + int num_srqs; + int max_srq_wqes; + int max_srq_sge; + int reserved_srqs; + int num_cqs; + int max_cqes; + int reserved_cqs; + int num_eqs; + int reserved_eqs; + int num_comp_vectors; + int comp_pool; + int num_mpts; + int max_fmr_maps; + int num_mtts; + int fmr_reserved_mtts; + int reserved_mtts; + int reserved_mrws; + int reserved_uars; + int num_mgms; + int num_amgms; + int reserved_mcgs; + int num_qp_per_mgm; + int num_pds; + int reserved_pds; + int max_xrcds; + int reserved_xrcds; + int mtt_entry_sz; + u32 max_msg_sz; + u32 page_size_cap; + u64 flags; + u32 bmme_flags; + u32 reserved_lkey; + u16 stat_rate_support; + u8 port_width_cap[MLX4_MAX_PORTS + 1]; + int max_gso_sz; + int reserved_qps_cnt[MLX4_NUM_QP_REGION]; + int reserved_qps; + int reserved_qps_base[MLX4_NUM_QP_REGION]; + int log_num_macs; + int log_num_vlans; + int log_num_prios; + enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; + u8 supported_type[MLX4_MAX_PORTS + 1]; + u8 suggested_type[MLX4_MAX_PORTS + 1]; + u8 default_sense[MLX4_MAX_PORTS + 1]; + u32 port_mask[MLX4_MAX_PORTS + 1]; + enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; + u32 max_counters; + u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_buf_list { + void *buf; + dma_addr_t map; +}; + +struct mlx4_buf { + struct mlx4_buf_list direct; + struct mlx4_buf_list *page_list; + int nbufs; + int npages; + int page_shift; +}; + +struct mlx4_mtt { + u32 offset; + int order; + int page_shift; +}; + +enum { + MLX4_DB_PER_PAGE = PAGE_SIZE / 4 +}; + +struct mlx4_db_pgdir { + struct list_head list; + DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); + DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); + unsigned long *bits[2]; + __be32 *db_page; + dma_addr_t db_dma; +}; + +struct mlx4_ib_user_db_page; + +struct mlx4_db { + __be32 *db; + union { + struct mlx4_db_pgdir *pgdir; + struct mlx4_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; + int order; +}; + +struct mlx4_hwq_resources { + struct mlx4_db db; + struct mlx4_mtt mtt; + struct mlx4_buf buf; +}; + +struct mlx4_mr { + struct mlx4_mtt mtt; + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; + int enabled; +}; + +struct mlx4_fmr { + struct mlx4_mr mr; + struct mlx4_mpt_entry *mpt; + __be64 *mtts; + dma_addr_t dma_handle; + int max_pages; + int max_maps; + int maps; + u8 page_shift; +}; + +struct mlx4_uar { + unsigned long pfn; + int index; + struct list_head bf_list; + unsigned free_bf_bmap; + void __iomem *map; + void __iomem *bf_map; +}; + +struct mlx4_bf { + unsigned long offset; + int buf_size; + struct mlx4_uar *uar; + void __iomem *reg; +}; + +struct mlx4_cq { + void (*comp) (struct mlx4_cq *); + void (*event) (struct mlx4_cq *, enum mlx4_event); + + struct mlx4_uar *uar; + + u32 cons_index; + + __be32 *set_ci_db; + __be32 *arm_db; + int arm_sn; + + int cqn; + unsigned vector; + + atomic_t refcount; + struct completion free; +}; + +struct mlx4_qp { + void (*event) (struct mlx4_qp *, enum mlx4_event); + + int qpn; + + atomic_t refcount; + struct completion free; +}; + +struct mlx4_srq { + void (*event) (struct mlx4_srq *, enum mlx4_event); + + int srqn; + int max; + int max_gs; + int wqe_shift; + + atomic_t refcount; + struct completion free; +}; + +struct mlx4_av { + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 stat_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 dgid[16]; +}; + +struct mlx4_eth_av { + __be32 port_pd; + u8 reserved1; + u8 smac_idx; + u16 reserved2; + u8 reserved3; + u8 gid_index; + u8 stat_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 dgid[16]; + u32 reserved4[2]; + __be16 vlan; + u8 mac[6]; +}; + +union mlx4_ext_av { + struct mlx4_av ib; + struct mlx4_eth_av eth; +}; + +struct mlx4_counter { + u8 reserved1[3]; + u8 counter_mode; + __be32 num_ifc; + u32 reserved2[2]; + __be64 rx_frames; + __be64 rx_bytes; + __be64 tx_frames; + __be64 tx_bytes; +}; + +struct mlx4_dev { + struct pci_dev *pdev; + unsigned long flags; + unsigned long num_slaves; + struct mlx4_caps caps; + struct radix_tree_root qp_table_tree; + u8 rev_id; + char board_id[MLX4_BOARD_ID_LEN]; + int num_vfs; +}; + +struct mlx4_init_port_param { + int set_guid0; + int set_node_guid; + int set_si_guid; + u16 mtu; + int port_width_cap; + u16 vl_cap; + u16 max_gid; + u16 max_pkey; + u64 guid0; + u64 node_guid; + u64 si_guid; +}; + +#define mlx4_foreach_port(port, dev, type) \ + for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ + if ((type) == (dev)->caps.port_mask[(port)]) + +#define mlx4_foreach_ib_transport_port(port, dev) \ + for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ + if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ + ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + +static inline int mlx4_is_master(struct mlx4_dev *dev) +{ + return dev->flags & MLX4_FLAG_MASTER; +} + +static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) +{ + return (qpn < dev->caps.sqp_start + 8); +} + +static inline int mlx4_is_mfunc(struct mlx4_dev *dev) +{ + return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); +} + +static inline int mlx4_is_slave(struct mlx4_dev *dev) +{ + return dev->flags & MLX4_FLAG_SLAVE; +} + +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf); +void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); +static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) +{ + if (BITS_PER_LONG == 64 || buf->nbufs == 1) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); +void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); + +int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); +void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); +int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); +void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); + +int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, + struct mlx4_mtt *mtt); +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); +u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr); +void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list); +int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf); + +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); + +int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size, int max_direct); +void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, + int size); + +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, + struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, + unsigned vector, int collapsed); +void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); + +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); +void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); + +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); +void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq); +void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); +int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); +int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); + +int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); +int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); + +int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot); +int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot); +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol protocol); +int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol protocol); +int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); + +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); +int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); +int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); +void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); +void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); +int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, + u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); +int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, + u8 promisc); +int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); +int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); + +int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova, u32 *lkey, u32 *rkey); +int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr); +int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); +void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + u32 *lkey, u32 *rkey); +int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); +int mlx4_SYNC_TPT(struct mlx4_dev *dev); +int mlx4_test_interrupts(struct mlx4_dev *dev); +int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector); +void mlx4_release_eq(struct mlx4_dev *dev, int vec); + +int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); +int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); + +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); +void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); + +#endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/doorbell.h b/include/linux/mlx4/doorbell.h new file mode 100644 index 00000000..f31bba27 --- /dev/null +++ b/include/linux/mlx4/doorbell.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DOORBELL_H +#define MLX4_DOORBELL_H + +#include <linux/types.h> +#include <linux/io.h> + +#define MLX4_SEND_DOORBELL 0x14 +#define MLX4_CQ_DOORBELL 0x20 + +#if BITS_PER_LONG == 64 +/* + * Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MLX4_DECLARE_DOORBELL_LOCK(name) +#define MLX4_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX4_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx4_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *) val, dest); +} + +#else + +/* + * Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX4_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX4_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX4_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx4_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* MLX4_DOORBELL_H */ diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h new file mode 100644 index 00000000..5f1298b1 --- /dev/null +++ b/include/linux/mlx4/driver.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_DRIVER_H +#define MLX4_DRIVER_H + +#include <linux/mlx4/device.h> + +struct mlx4_dev; + +enum mlx4_dev_event { + MLX4_DEV_EVENT_CATASTROPHIC_ERROR, + MLX4_DEV_EVENT_PORT_UP, + MLX4_DEV_EVENT_PORT_DOWN, + MLX4_DEV_EVENT_PORT_REINIT, +}; + +struct mlx4_interface { + void * (*add) (struct mlx4_dev *dev); + void (*remove)(struct mlx4_dev *dev, void *context); + void (*event) (struct mlx4_dev *dev, void *context, + enum mlx4_dev_event event, int port); + void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); + struct list_head list; + enum mlx4_protocol protocol; +}; + +int mlx4_register_interface(struct mlx4_interface *intf); +void mlx4_unregister_interface(struct mlx4_interface *intf); + +void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); + +#endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h new file mode 100644 index 00000000..091f9e7d --- /dev/null +++ b/include/linux/mlx4/qp.h @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_QP_H +#define MLX4_QP_H + +#include <linux/types.h> + +#include <linux/mlx4/device.h> + +#define MLX4_INVALID_LKEY 0x100 + +enum mlx4_qp_optpar { + MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MLX4_QP_OPTPAR_RRE = 1 << 1, + MLX4_QP_OPTPAR_RAE = 1 << 2, + MLX4_QP_OPTPAR_RWE = 1 << 3, + MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MLX4_QP_OPTPAR_Q_KEY = 1 << 5, + MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MLX4_QP_OPTPAR_SRA_MAX = 1 << 8, + MLX4_QP_OPTPAR_RRA_MAX = 1 << 9, + MLX4_QP_OPTPAR_PM_STATE = 1 << 10, + MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, + MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, + MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20 +}; + +enum mlx4_qp_state { + MLX4_QP_STATE_RST = 0, + MLX4_QP_STATE_INIT = 1, + MLX4_QP_STATE_RTR = 2, + MLX4_QP_STATE_RTS = 3, + MLX4_QP_STATE_SQER = 4, + MLX4_QP_STATE_SQD = 5, + MLX4_QP_STATE_ERR = 6, + MLX4_QP_STATE_SQ_DRAINING = 7, + MLX4_QP_NUM_STATE +}; + +enum { + MLX4_QP_ST_RC = 0x0, + MLX4_QP_ST_UC = 0x1, + MLX4_QP_ST_RD = 0x2, + MLX4_QP_ST_UD = 0x3, + MLX4_QP_ST_XRC = 0x6, + MLX4_QP_ST_MLX = 0x7 +}; + +enum { + MLX4_QP_PM_MIGRATED = 0x3, + MLX4_QP_PM_ARMED = 0x0, + MLX4_QP_PM_REARM = 0x1 +}; + +enum { + /* params1 */ + MLX4_QP_BIT_SRE = 1 << 15, + MLX4_QP_BIT_SWE = 1 << 14, + MLX4_QP_BIT_SAE = 1 << 13, + /* params2 */ + MLX4_QP_BIT_RRE = 1 << 15, + MLX4_QP_BIT_RWE = 1 << 14, + MLX4_QP_BIT_RAE = 1 << 13, + MLX4_QP_BIT_RIC = 1 << 4, +}; + +enum { + MLX4_RSS_HASH_XOR = 0, + MLX4_RSS_HASH_TOP = 1, + + MLX4_RSS_UDP_IPV6 = 1 << 0, + MLX4_RSS_UDP_IPV4 = 1 << 1, + MLX4_RSS_TCP_IPV6 = 1 << 2, + MLX4_RSS_IPV6 = 1 << 3, + MLX4_RSS_TCP_IPV4 = 1 << 4, + MLX4_RSS_IPV4 = 1 << 5, + + /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24, + /* offset of being RSS indirection QP within mlx4_qp_context.flags */ + MLX4_RSS_QPC_FLAG_OFFSET = 13, +}; + +struct mlx4_rss_context { + __be32 base_qpn; + __be32 default_qpn; + u16 reserved; + u8 hash_fn; + u8 flags; + __be32 rss_key[10]; + __be32 base_qpn_udp; +}; + +struct mlx4_qp_path { + u8 fl; + u8 reserved1[2]; + u8 pkey_index; + u8 counter_index; + u8 grh_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 sched_queue; + u8 vlan_index; + u8 reserved3[2]; + u8 reserved4[2]; + u8 dmac[6]; +}; + +struct mlx4_qp_context { + __be32 flags; + __be32 pd; + u8 mtu_msgmax; + u8 rq_size_stride; + u8 sq_size_stride; + u8 rlkey; + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + struct mlx4_qp_path pri_path; + struct mlx4_qp_path alt_path; + __be32 params1; + u32 reserved1; + __be32 next_send_psn; + __be32 cqn_send; + u32 reserved2[2]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 srqn; + __be32 msn; + __be16 rq_wqe_counter; + __be16 sq_wqe_counter; + u32 reserved3[2]; + __be32 param3; + __be32 nummmcpeers_basemkey; + u8 log_page_size; + u8 reserved4[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved5[10]; +}; + +/* Which firmware version adds support for NEC (NoErrorCompletion) bit */ +#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) + +enum { + MLX4_WQE_CTRL_NEC = 1 << 29, + MLX4_WQE_CTRL_FENCE = 1 << 6, + MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, + MLX4_WQE_CTRL_SOLICITED = 1 << 1, + MLX4_WQE_CTRL_IP_CSUM = 1 << 4, + MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, + MLX4_WQE_CTRL_INS_VLAN = 1 << 6, + MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, + MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, +}; + +struct mlx4_wqe_ctrl_seg { + __be32 owner_opcode; + __be16 vlan_tag; + u8 ins_vlan; + u8 fence_size; + /* + * High 24 bits are SRC remote buffer; low 8 bits are flags: + * [7] SO (strong ordering) + * [5] TCP/UDP checksum + * [4] IP checksum + * [3:2] C (generate completion queue entry) + * [1] SE (solicited event) + * [0] FL (force loopback) + */ + union { + __be32 srcrb_flags; + __be16 srcrb_flags16[2]; + }; + /* + * imm is immediate data for send/RDMA write w/ immediate; + * also invalidation key for send with invalidate; input + * modifier for WQEs on CCQs. + */ + __be32 imm; +}; + +enum { + MLX4_WQE_MLX_VL15 = 1 << 17, + MLX4_WQE_MLX_SLR = 1 << 16 +}; + +struct mlx4_wqe_mlx_seg { + u8 owner; + u8 reserved1[2]; + u8 opcode; + u8 reserved2[3]; + u8 size; + /* + * [17] VL15 + * [16] SLR + * [15:12] static rate + * [11:8] SL + * [4] ICRC + * [3:2] C + * [0] FL (force loopback) + */ + __be32 flags; + __be16 rlid; + u16 reserved3; +}; + +struct mlx4_wqe_datagram_seg { + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + __be16 vlan; + u8 mac[6]; +}; + +struct mlx4_wqe_lso_seg { + __be32 mss_hdr_size; + __be32 header[0]; +}; + +struct mlx4_wqe_bind_seg { + __be32 flags1; + __be32 flags2; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; +}; + +enum { + MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29, + MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, + MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31 +}; + +struct mlx4_wqe_fmr_seg { + __be32 flags; + __be32 mem_key; + __be64 buf_list; + __be64 start_addr; + __be64 reg_len; + __be32 offset; + __be32 page_size; + u32 reserved[2]; +}; + +struct mlx4_wqe_fmr_ext_seg { + u8 flags; + u8 reserved; + __be16 app_mask; + __be16 wire_app_tag; + __be16 mem_app_tag; + __be32 wire_ref_tag_base; + __be32 mem_ref_tag_base; +}; + +struct mlx4_wqe_local_inval_seg { + __be32 flags; + u32 reserved1; + __be32 mem_key; + u32 reserved2[2]; + __be32 guest_id; + __be64 pa; +}; + +struct mlx4_wqe_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mlx4_wqe_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mlx4_wqe_masked_atomic_seg { + __be64 swap_add; + __be64 compare; + __be64 swap_add_mask; + __be64 compare_mask; +}; + +struct mlx4_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +enum { + MLX4_INLINE_ALIGN = 64, + MLX4_INLINE_SEG = 1 << 31, +}; + +struct mlx4_wqe_inline_seg { + __be32 byte_count; +}; + +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp); + +int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, + struct mlx4_qp_context *context); + +int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_qp_context *context, + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); + +static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); +} + +void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp); + +#endif /* MLX4_QP_H */ diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h new file mode 100644 index 00000000..799a0697 --- /dev/null +++ b/include/linux/mlx4/srq.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_SRQ_H +#define MLX4_SRQ_H + +struct mlx4_wqe_srq_next_seg { + u16 reserved1; + __be16 next_wqe_index; + u32 reserved2[3]; +}; + +#endif /* MLX4_SRQ_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h new file mode 100644 index 00000000..3bf2f379 --- /dev/null +++ b/include/linux/mm.h @@ -0,0 +1,1638 @@ +#ifndef _LINUX_MM_H +#define _LINUX_MM_H + +#include <linux/errno.h> + +#ifdef __KERNEL__ + +#include <linux/gfp.h> +#include <linux/bug.h> +#include <linux/list.h> +#include <linux/mmzone.h> +#include <linux/rbtree.h> +#include <linux/prio_tree.h> +#include <linux/atomic.h> +#include <linux/debug_locks.h> +#include <linux/mm_types.h> +#include <linux/range.h> +#include <linux/pfn.h> +#include <linux/bit_spinlock.h> +#include <linux/shrinker.h> + +struct mempolicy; +struct anon_vma; +struct file_ra_state; +struct user_struct; +struct writeback_control; + +#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ +extern unsigned long max_mapnr; +#endif + +extern unsigned long num_physpages; +extern unsigned long totalram_pages; +extern void * high_memory; +extern int page_cluster; + +#ifdef CONFIG_SYSCTL +extern int sysctl_legacy_va_layout; +#else +#define sysctl_legacy_va_layout 0 +#endif + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/processor.h> + +#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + +/* + * Linux kernel virtual memory manager primitives. + * The idea being to have a "virtual" mm in the same way + * we have a virtual fs - giving a cleaner interface to the + * mm details, and allowing different kinds of memory mappings + * (from shared memory to executable loading to arbitrary + * mmap() functions). + */ + +extern struct kmem_cache *vm_area_cachep; + +#ifndef CONFIG_MMU +extern struct rb_root nommu_region_tree; +extern struct rw_semaphore nommu_region_sem; + +extern unsigned int kobjsize(const void *objp); +#endif + +/* + * vm_flags in vm_area_struct, see mm_types.h. + */ +#define VM_READ 0x00000001 /* currently active flags */ +#define VM_WRITE 0x00000002 +#define VM_EXEC 0x00000004 +#define VM_SHARED 0x00000008 + +/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ +#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ +#define VM_MAYWRITE 0x00000020 +#define VM_MAYEXEC 0x00000040 +#define VM_MAYSHARE 0x00000080 + +#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) +#define VM_GROWSUP 0x00000200 +#else +#define VM_GROWSUP 0x00000000 +#define VM_NOHUGEPAGE 0x00000200 /* MADV_NOHUGEPAGE marked this vma */ +#endif +#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ +#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ + +#define VM_EXECUTABLE 0x00001000 +#define VM_LOCKED 0x00002000 +#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ + + /* Used by sys_madvise() */ +#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ +#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ + +#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ +#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ +#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ +#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ +#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ +#else +#define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ +#endif +#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ +#define VM_NODUMP 0x04000000 /* Do not include in the core dump */ + +#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ +#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ +#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ +#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ +#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ + +/* Bits set in the VMA until the stack is in its final location */ +#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) + +#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ +#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS +#endif + +#ifdef CONFIG_STACK_GROWSUP +#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +#else +#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +#endif + +#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) +#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK +#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK)) +#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ) +#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) + +/* + * Special vmas that are non-mergable, non-mlock()able. + * Note: mm/huge_memory.c VM_NO_THP depends on this definition. + */ +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) + +/* + * mapping from the currently active vm_flags protection bits (the + * low four bits) to a page protection mask.. + */ +extern pgprot_t protection_map[16]; + +#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ +#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ +#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ +#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ +#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ +#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ + +/* + * This interface is used by x86 PAT code to identify a pfn mapping that is + * linear over entire vma. This is to optimize PAT code that deals with + * marking the physical region with a particular prot. This is not for generic + * mm use. Note also that this check will not work if the pfn mapping is + * linear for a vma starting at physical address 0. In which case PAT code + * falls back to slow path of reserving physical range page by page. + */ +static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) +{ + return !!(vma->vm_flags & VM_PFN_AT_MMAP); +} + +static inline int is_pfn_mapping(struct vm_area_struct *vma) +{ + return !!(vma->vm_flags & VM_PFNMAP); +} + +/* + * vm_fault is filled by the the pagefault handler and passed to the vma's + * ->fault function. The vma's ->fault is responsible for returning a bitmask + * of VM_FAULT_xxx flags that give details about how the fault was handled. + * + * pgoff should be used in favour of virtual_address, if possible. If pgoff + * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear + * mapping support. + */ +struct vm_fault { + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pgoff_t pgoff; /* Logical page offset based on vma */ + void __user *virtual_address; /* Faulting virtual address */ + + struct page *page; /* ->fault handlers should return a + * page here, unless VM_FAULT_NOPAGE + * is set (which is also implied by + * VM_FAULT_ERROR). + */ +}; + +/* + * These are the virtual MM functions - opening of an area, closing and + * unmapping it (needed to keep files on disk up-to-date etc), pointer + * to the functions called when a no-page or a wp-page exception occurs. + */ +struct vm_operations_struct { + void (*open)(struct vm_area_struct * area); + void (*close)(struct vm_area_struct * area); + int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); + + /* notification that a previously read-only page is about to become + * writable, if an error is returned it will cause a SIGBUS */ + int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); +#ifdef CONFIG_NUMA + /* + * set_policy() op must add a reference to any non-NULL @new mempolicy + * to hold the policy upon return. Caller should pass NULL @new to + * remove a policy and fall back to surrounding context--i.e. do not + * install a MPOL_DEFAULT policy, nor the task or system default + * mempolicy. + */ + int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); + + /* + * get_policy() op must add reference [mpol_get()] to any policy at + * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure + * in mm/mempolicy.c will do this automatically. + * get_policy() must NOT add a ref if the policy at (vma,addr) is not + * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * If no [shared/vma] mempolicy exists at the addr, get_policy() op + * must return NULL--i.e., do not "fallback" to task or system default + * policy. + */ + struct mempolicy *(*get_policy)(struct vm_area_struct *vma, + unsigned long addr); + int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, + const nodemask_t *to, unsigned long flags); +#endif +}; + +struct mmu_gather; +struct inode; + +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) + +/* + * FIXME: take this include out, include page-flags.h in + * files which need it (119 of them) + */ +#include <linux/page-flags.h> +#include <linux/huge_mm.h> + +/* + * Methods to modify the page usage count. + * + * What counts for a page usage: + * - cache mapping (page->mapping) + * - private data (page->private) + * - page mapped in a task's page tables, each mapping + * is counted separately + * + * Also, many kernel routines increase the page count before a critical + * routine so they can be sure the page doesn't go away from under them. + */ + +/* + * Drop a ref, return true if the refcount fell to zero (the page has no users) + */ +static inline int put_page_testzero(struct page *page) +{ + VM_BUG_ON(atomic_read(&page->_count) == 0); + return atomic_dec_and_test(&page->_count); +} + +/* + * Try to grab a ref unless the page has a refcount of zero, return false if + * that is the case. + */ +static inline int get_page_unless_zero(struct page *page) +{ + return atomic_inc_not_zero(&page->_count); +} + +extern int page_is_ram(unsigned long pfn); + +/* Support for virtually mapped pages */ +struct page *vmalloc_to_page(const void *addr); +unsigned long vmalloc_to_pfn(const void *addr); + +/* + * Determine if an address is within the vmalloc range + * + * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there + * is no special casing required. + */ +static inline int is_vmalloc_addr(const void *x) +{ +#ifdef CONFIG_MMU + unsigned long addr = (unsigned long)x; + + return addr >= VMALLOC_START && addr < VMALLOC_END; +#else + return 0; +#endif +} +#ifdef CONFIG_MMU +extern int is_vmalloc_or_module_addr(const void *x); +#else +static inline int is_vmalloc_or_module_addr(const void *x) +{ + return 0; +} +#endif + +static inline void compound_lock(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + bit_spin_lock(PG_compound_lock, &page->flags); +#endif +} + +static inline void compound_unlock(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + bit_spin_unlock(PG_compound_lock, &page->flags); +#endif +} + +static inline unsigned long compound_lock_irqsave(struct page *page) +{ + unsigned long uninitialized_var(flags); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + local_irq_save(flags); + compound_lock(page); +#endif + return flags; +} + +static inline void compound_unlock_irqrestore(struct page *page, + unsigned long flags) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + compound_unlock(page); + local_irq_restore(flags); +#endif +} + +static inline struct page *compound_head(struct page *page) +{ + if (unlikely(PageTail(page))) + return page->first_page; + return page; +} + +/* + * The atomic page->_mapcount, starts from -1: so that transitions + * both from it and to it can be tracked, using atomic_inc_and_test + * and atomic_add_negative(-1). + */ +static inline void reset_page_mapcount(struct page *page) +{ + atomic_set(&(page)->_mapcount, -1); +} + +static inline int page_mapcount(struct page *page) +{ + return atomic_read(&(page)->_mapcount) + 1; +} + +static inline int page_count(struct page *page) +{ + return atomic_read(&compound_head(page)->_count); +} + +static inline void get_huge_page_tail(struct page *page) +{ + /* + * __split_huge_page_refcount() cannot run + * from under us. + */ + VM_BUG_ON(page_mapcount(page) < 0); + VM_BUG_ON(atomic_read(&page->_count) != 0); + atomic_inc(&page->_mapcount); +} + +extern bool __get_page_tail(struct page *page); + +static inline void get_page(struct page *page) +{ + if (unlikely(PageTail(page))) + if (likely(__get_page_tail(page))) + return; + /* + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_count. + */ + VM_BUG_ON(atomic_read(&page->_count) <= 0); + atomic_inc(&page->_count); +} + +static inline struct page *virt_to_head_page(const void *x) +{ + struct page *page = virt_to_page(x); + return compound_head(page); +} + +/* + * Setup the page count before being freed into the page allocator for + * the first time (boot or memory hotplug) + */ +static inline void init_page_count(struct page *page) +{ + atomic_set(&page->_count, 1); +} + +/* + * PageBuddy() indicate that the page is free and in the buddy system + * (see mm/page_alloc.c). + * + * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to + * -2 so that an underflow of the page_mapcount() won't be mistaken + * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very + * efficiently by most CPU architectures. + */ +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) + +static inline int PageBuddy(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; +} + +static inline void __SetPageBuddy(struct page *page) +{ + VM_BUG_ON(atomic_read(&page->_mapcount) != -1); + atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBuddy(struct page *page) +{ + VM_BUG_ON(!PageBuddy(page)); + atomic_set(&page->_mapcount, -1); +} + +void put_page(struct page *page); +void put_pages_list(struct list_head *pages); + +void split_page(struct page *page, unsigned int order); +int split_free_page(struct page *page); + +/* + * Compound pages have a destructor function. Provide a + * prototype for that function and accessor functions. + * These are _only_ valid on the head of a PG_compound page. + */ +typedef void compound_page_dtor(struct page *); + +static inline void set_compound_page_dtor(struct page *page, + compound_page_dtor *dtor) +{ + page[1].lru.next = (void *)dtor; +} + +static inline compound_page_dtor *get_compound_page_dtor(struct page *page) +{ + return (compound_page_dtor *)page[1].lru.next; +} + +static inline int compound_order(struct page *page) +{ + if (!PageHead(page)) + return 0; + return (unsigned long)page[1].lru.prev; +} + +static inline int compound_trans_order(struct page *page) +{ + int order; + unsigned long flags; + + if (!PageHead(page)) + return 0; + + flags = compound_lock_irqsave(page); + order = compound_order(page); + compound_unlock_irqrestore(page, flags); + return order; +} + +static inline void set_compound_order(struct page *page, unsigned long order) +{ + page[1].lru.prev = (void *)order; +} + +#ifdef CONFIG_MMU +/* + * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when + * servicing faults for write access. In the normal case, do always want + * pte_mkwrite. But get_user_pages can cause write faults for mappings + * that do not have writing enabled, when used by access_process_vm. + */ +static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (likely(vma->vm_flags & VM_WRITE)) + pte = pte_mkwrite(pte); + return pte; +} +#endif + +/* + * Multiple processes may "see" the same page. E.g. for untouched + * mappings of /dev/null, all processes see the same page full of + * zeroes, and text pages of executables and shared libraries have + * only one copy in memory, at most, normally. + * + * For the non-reserved pages, page_count(page) denotes a reference count. + * page_count() == 0 means the page is free. page->lru is then used for + * freelist management in the buddy allocator. + * page_count() > 0 means the page has been allocated. + * + * Pages are allocated by the slab allocator in order to provide memory + * to kmalloc and kmem_cache_alloc. In this case, the management of the + * page, and the fields in 'struct page' are the responsibility of mm/slab.c + * unless a particular usage is carefully commented. (the responsibility of + * freeing the kmalloc memory is the caller's, of course). + * + * A page may be used by anyone else who does a __get_free_page(). + * In this case, page_count still tracks the references, and should only + * be used through the normal accessor functions. The top bits of page->flags + * and page->virtual store page management information, but all other fields + * are unused and could be used privately, carefully. The management of this + * page is the responsibility of the one who allocated it, and those who have + * subsequently been given references to it. + * + * The other pages (we may call them "pagecache pages") are completely + * managed by the Linux memory manager: I/O, buffers, swapping etc. + * The following discussion applies only to them. + * + * A pagecache page contains an opaque `private' member, which belongs to the + * page's address_space. Usually, this is the address of a circular list of + * the page's disk buffers. PG_private must be set to tell the VM to call + * into the filesystem to release these pages. + * + * A page may belong to an inode's memory mapping. In this case, page->mapping + * is the pointer to the inode, and page->index is the file offset of the page, + * in units of PAGE_CACHE_SIZE. + * + * If pagecache pages are not associated with an inode, they are said to be + * anonymous pages. These may become associated with the swapcache, and in that + * case PG_swapcache is set, and page->private is an offset into the swapcache. + * + * In either case (swapcache or inode backed), the pagecache itself holds one + * reference to the page. Setting PG_private should also increment the + * refcount. The each user mapping also has a reference to the page. + * + * The pagecache pages are stored in a per-mapping radix tree, which is + * rooted at mapping->page_tree, and indexed by offset. + * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space + * lists, we instead now tag pages as dirty/writeback in the radix tree. + * + * All pagecache pages may be subject to I/O: + * - inode pages may need to be read from disk, + * - inode pages which have been modified and are MAP_SHARED may need + * to be written back to the inode on disk, + * - anonymous pages (including MAP_PRIVATE file mappings) which have been + * modified may need to be swapped out to swap space and (later) to be read + * back into memory. + */ + +/* + * The zone field is never updated after free_area_init_core() + * sets it, so none of the operations on it need to be atomic. + */ + + +/* + * page->flags layout: + * + * There are three possibilities for how page->flags get + * laid out. The first is for the normal case, without + * sparsemem. The second is for sparsemem when there is + * plenty of space for node and section. The last is when + * we have run out of space and have to fall back to an + * alternate (slower) way of determining the node. + * + * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | + * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | + * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | + */ +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) +#define SECTIONS_WIDTH SECTIONS_SHIFT +#else +#define SECTIONS_WIDTH 0 +#endif + +#define ZONES_WIDTH ZONES_SHIFT + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#define NODES_WIDTH NODES_SHIFT +#else +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#error "Vmemmap: No space for nodes field in page flags" +#endif +#define NODES_WIDTH 0 +#endif + +/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ +#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) +#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) + +/* + * We are going to use the flags for the page to node mapping if its in + * there. This includes the case where there is no node, so it is implicit. + */ +#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) +#define NODE_NOT_IN_PAGE_FLAGS +#endif + +/* + * Define the bit shifts to access each section. For non-existent + * sections we define the shift as 0; that plus a 0 mask ensures + * the compiler will optimise away reference to them. + */ +#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) +#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) +#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) + +/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ +#ifdef NODE_NOT_IN_PAGE_FLAGS +#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ + SECTIONS_PGOFF : ZONES_PGOFF) +#else +#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ + NODES_PGOFF : ZONES_PGOFF) +#endif + +#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) + +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#endif + +#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) +#define NODES_MASK ((1UL << NODES_WIDTH) - 1) +#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) +#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) + +static inline enum zone_type page_zonenum(const struct page *page) +{ + return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; +} + +/* + * The identification function is only used by the buddy allocator for + * determining if two pages could be buddies. We are not really + * identifying a zone since we could be using a the section number + * id if we have not node id available in page flags. + * We guarantee only that it will return the same value for two + * combinable pages in a zone. + */ +static inline int page_zone_id(struct page *page) +{ + return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; +} + +static inline int zone_to_nid(struct zone *zone) +{ +#ifdef CONFIG_NUMA + return zone->node; +#else + return 0; +#endif +} + +#ifdef NODE_NOT_IN_PAGE_FLAGS +extern int page_to_nid(const struct page *page); +#else +static inline int page_to_nid(const struct page *page) +{ + return (page->flags >> NODES_PGSHIFT) & NODES_MASK; +} +#endif + +static inline struct zone *page_zone(const struct page *page) +{ + return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; +} + +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) +static inline void set_page_section(struct page *page, unsigned long section) +{ + page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); + page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; +} + +static inline unsigned long page_to_section(const struct page *page) +{ + return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; +} +#endif + +static inline void set_page_zone(struct page *page, enum zone_type zone) +{ + page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); + page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; +} + +static inline void set_page_node(struct page *page, unsigned long node) +{ + page->flags &= ~(NODES_MASK << NODES_PGSHIFT); + page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; +} + +static inline void set_page_links(struct page *page, enum zone_type zone, + unsigned long node, unsigned long pfn) +{ + set_page_zone(page, zone); + set_page_node(page, node); +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) + set_page_section(page, pfn_to_section_nr(pfn)); +#endif +} + +/* + * Some inline functions in vmstat.h depend on page_zone() + */ +#include <linux/vmstat.h> + +static __always_inline void *lowmem_page_address(const struct page *page) +{ + return __va(PFN_PHYS(page_to_pfn(page))); +} + +#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) +#define HASHED_PAGE_VIRTUAL +#endif + +#if defined(WANT_PAGE_VIRTUAL) +#define page_address(page) ((page)->virtual) +#define set_page_address(page, address) \ + do { \ + (page)->virtual = (address); \ + } while(0) +#define page_address_init() do { } while(0) +#endif + +#if defined(HASHED_PAGE_VIRTUAL) +void *page_address(const struct page *page); +void set_page_address(struct page *page, void *virtual); +void page_address_init(void); +#endif + +#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) +#define page_address(page) lowmem_page_address(page) +#define set_page_address(page, address) do { } while(0) +#define page_address_init() do { } while(0) +#endif + +/* + * On an anonymous page mapped into a user virtual memory area, + * page->mapping points to its anon_vma, not to a struct address_space; + * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * + * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; + * and then page->mapping points, not to an anon_vma, but to a private + * structure which KSM associates with that merged page. See ksm.h. + * + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. + * + * Please note that, confusingly, "page_mapping" refers to the inode + * address_space which maps the page from disk; whereas "page_mapped" + * refers to user virtual address space into which the page is mapped. + */ +#define PAGE_MAPPING_ANON 1 +#define PAGE_MAPPING_KSM 2 +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) + +extern struct address_space swapper_space; +static inline struct address_space *page_mapping(struct page *page) +{ + struct address_space *mapping = page->mapping; + + VM_BUG_ON(PageSlab(page)); + if (unlikely(PageSwapCache(page))) + mapping = &swapper_space; + else if ((unsigned long)mapping & PAGE_MAPPING_ANON) + mapping = NULL; + return mapping; +} + +/* Neutral page->mapping pointer to address_space or anon_vma or other */ +static inline void *page_rmapping(struct page *page) +{ + return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); +} + +static inline int PageAnon(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + +/* + * Return the pagecache index of the passed page. Regular pagecache pages + * use ->index whereas swapcache pages use ->private + */ +static inline pgoff_t page_index(struct page *page) +{ + if (unlikely(PageSwapCache(page))) + return page_private(page); + return page->index; +} + +/* + * Return true if this page is mapped into pagetables. + */ +static inline int page_mapped(struct page *page) +{ + return atomic_read(&(page)->_mapcount) >= 0; +} + +/* + * Different kinds of faults, as returned by handle_mm_fault(). + * Used to decide whether a process gets delivered SIGBUS or + * just gets major/minor fault counters bumped up. + */ + +#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ + +#define VM_FAULT_OOM 0x0001 +#define VM_FAULT_SIGBUS 0x0002 +#define VM_FAULT_MAJOR 0x0004 +#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ +#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ + +#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ +#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ + +#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ + VM_FAULT_HWPOISON_LARGE) + +/* Encode hstate index for a hwpoisoned large page */ +#define VM_FAULT_SET_HINDEX(x) ((x) << 12) +#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) + +/* + * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. + */ +extern void pagefault_out_of_memory(void); + +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) + +/* + * Flags passed to show_mem() and show_free_areas() to suppress output in + * various contexts. + */ +#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */ + +extern void show_free_areas(unsigned int flags); +extern bool skip_free_areas_node(unsigned int flags, int nid); + +int shmem_lock(struct file *file, int lock, struct user_struct *user); +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); +void shmem_set_file(struct vm_area_struct *vma, struct file *file); +int shmem_zero_setup(struct vm_area_struct *); + +extern int can_do_mlock(void); +extern int user_shm_lock(size_t, struct user_struct *); +extern void user_shm_unlock(size_t, struct user_struct *); + +/* + * Parameter block passed down to zap_pte_range in exceptional cases. + */ +struct zap_details { + struct vm_area_struct *nonlinear_vma; /* Check page->index if set */ + struct address_space *check_mapping; /* Check page->mapping if set */ + pgoff_t first_index; /* Lowest page->index to unmap */ + pgoff_t last_index; /* Highest page->index to unmap */ +}; + +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); + +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); +void zap_page_range(struct vm_area_struct *vma, unsigned long address, + unsigned long size, struct zap_details *); +void unmap_vmas(struct mmu_gather *tlb, + struct vm_area_struct *start_vma, unsigned long start_addr, + unsigned long end_addr, unsigned long *nr_accounted, + struct zap_details *); + +/** + * mm_walk - callbacks for walk_page_range + * @pgd_entry: if set, called for each non-empty PGD (top-level) entry + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * this handler is required to be able to handle + * pmd_trans_huge() pmds. They may simply choose to + * split_huge_page() instead of handling it explicitly. + * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry + * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry + * is used. + * + * (see walk_page_range for more details) + */ +struct mm_walk { + int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); + int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, unsigned long, + unsigned long, unsigned long, struct mm_walk *); + struct mm_struct *mm; + void *private; +}; + +int walk_page_range(unsigned long addr, unsigned long end, + struct mm_walk *walk); +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); +int copy_page_range(struct mm_struct *dst, struct mm_struct *src, + struct vm_area_struct *vma); +void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn); +int follow_phys(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, unsigned long *prot, resource_size_t *phys); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); + +static inline void unmap_shared_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen) +{ + unmap_mapping_range(mapping, holebegin, holelen, 0); +} + +extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); +extern void truncate_setsize(struct inode *inode, loff_t newsize); +extern int vmtruncate(struct inode *inode, loff_t offset); +extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); +void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); +int truncate_inode_page(struct address_space *mapping, struct page *page); +int generic_error_remove_page(struct address_space *mapping, struct page *page); + +int invalidate_inode_page(struct page *page); + +#ifdef CONFIG_MMU +extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, unsigned int flags); +extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int fault_flags); +#else +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + unsigned int flags) +{ + /* should never happen if there's no MMU */ + BUG(); + return VM_FAULT_SIGBUS; +} +static inline int fixup_user_fault(struct task_struct *tsk, + struct mm_struct *mm, unsigned long address, + unsigned int fault_flags) +{ + /* should never happen if there's no MMU */ + BUG(); + return -EFAULT; +} +#endif + +extern int make_pages_present(unsigned long addr, unsigned long end); +extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); +extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, + void *buf, int len, int write); + +int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int len, unsigned int foll_flags, + struct page **pages, struct vm_area_struct **vmas, + int *nonblocking); +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int nr_pages, int write, int force, + struct page **pages, struct vm_area_struct **vmas); +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); +struct page *get_dump_page(unsigned long addr); + +extern int try_to_release_page(struct page * page, gfp_t gfp_mask); +extern void do_invalidatepage(struct page *page, unsigned long offset); + +int __set_page_dirty_nobuffers(struct page *page); +int __set_page_dirty_no_writeback(struct page *page); +int redirty_page_for_writepage(struct writeback_control *wbc, + struct page *page); +void account_page_dirtied(struct page *page, struct address_space *mapping); +void account_page_writeback(struct page *page); +int set_page_dirty(struct page *page); +int set_page_dirty_lock(struct page *page); +int clear_page_dirty_for_io(struct page *page); + +/* Is the vma a continuation of the stack vma above it? */ +static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +} + +static inline int stack_guard_page_start(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_growsdown(vma->vm_prev, addr); +} + +/* Is the vma a continuation of the stack vma below it? */ +static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +} + +static inline int stack_guard_page_end(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSUP) && + (vma->vm_end == addr) && + !vma_growsup(vma->vm_next, addr); +} + +extern pid_t +vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); + +extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len); +extern unsigned long do_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr); +extern int mprotect_fixup(struct vm_area_struct *vma, + struct vm_area_struct **pprev, unsigned long start, + unsigned long end, unsigned long newflags); + +/* + * doesn't attempt to fault and will return short. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); +/* + * per-process(per-mm_struct) statistics. + */ +static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) +{ + long val = atomic_long_read(&mm->rss_stat.count[member]); + +#ifdef SPLIT_RSS_COUNTING + /* + * counter is updated in asynchronous manner and may go to minus. + * But it's never be expected number for users. + */ + if (val < 0) + val = 0; +#endif + return (unsigned long)val; +} + +static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + atomic_long_add(value, &mm->rss_stat.count[member]); +} + +static inline void inc_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_inc(&mm->rss_stat.count[member]); +} + +static inline void dec_mm_counter(struct mm_struct *mm, int member) +{ + atomic_long_dec(&mm->rss_stat.count[member]); +} + +static inline unsigned long get_mm_rss(struct mm_struct *mm) +{ + return get_mm_counter(mm, MM_FILEPAGES) + + get_mm_counter(mm, MM_ANONPAGES); +} + +static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) +{ + return max(mm->hiwater_rss, get_mm_rss(mm)); +} + +static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) +{ + return max(mm->hiwater_vm, mm->total_vm); +} + +static inline void update_hiwater_rss(struct mm_struct *mm) +{ + unsigned long _rss = get_mm_rss(mm); + + if ((mm)->hiwater_rss < _rss) + (mm)->hiwater_rss = _rss; +} + +static inline void update_hiwater_vm(struct mm_struct *mm) +{ + if (mm->hiwater_vm < mm->total_vm) + mm->hiwater_vm = mm->total_vm; +} + +static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + +#if defined(SPLIT_RSS_COUNTING) +void sync_mm_rss(struct mm_struct *mm); +#else +static inline void sync_mm_rss(struct mm_struct *mm) +{ +} +#endif + +int vma_wants_writenotify(struct vm_area_struct *vma); + +extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl); +static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pte_t *ptep; + __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); + return ptep; +} + +#ifdef __PAGETABLE_PUD_FOLDED +static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} +#else +int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +#endif + +#ifdef __PAGETABLE_PMD_FOLDED +static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, + unsigned long address) +{ + return 0; +} +#else +int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); +#endif + +int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t *pmd, unsigned long address); +int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); + +/* + * The following ifdef needed to get the 4level-fixup.h header to work. + * Remove it when 4level-fixup.h has been removed. + */ +#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) +static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +{ + return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? + NULL: pud_offset(pgd, address); +} + +static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +{ + return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? + NULL: pmd_offset(pud, address); +} +#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ + +#if USE_SPLIT_PTLOCKS +/* + * We tuck a spinlock to guard each pagetable page into its struct page, + * at page->private, with BUILD_BUG_ON to make sure that this will not + * overflow into the next struct page (as it might with DEBUG_SPINLOCK). + * When freeing, reset page->mapping so free_pages_check won't complain. + */ +#define __pte_lockptr(page) &((page)->ptl) +#define pte_lock_init(_page) do { \ + spin_lock_init(__pte_lockptr(_page)); \ +} while (0) +#define pte_lock_deinit(page) ((page)->mapping = NULL) +#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) +#else /* !USE_SPLIT_PTLOCKS */ +/* + * We use mm->page_table_lock to guard all pagetable pages of the mm. + */ +#define pte_lock_init(page) do {} while (0) +#define pte_lock_deinit(page) do {} while (0) +#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) +#endif /* USE_SPLIT_PTLOCKS */ + +static inline void pgtable_page_ctor(struct page *page) +{ + pte_lock_init(page); + inc_zone_page_state(page, NR_PAGETABLE); +} + +static inline void pgtable_page_dtor(struct page *page) +{ + pte_lock_deinit(page); + dec_zone_page_state(page, NR_PAGETABLE); +} + +#define pte_offset_map_lock(mm, pmd, address, ptlp) \ +({ \ + spinlock_t *__ptl = pte_lockptr(mm, pmd); \ + pte_t *__pte = pte_offset_map(pmd, address); \ + *(ptlp) = __ptl; \ + spin_lock(__ptl); \ + __pte; \ +}) + +#define pte_unmap_unlock(pte, ptl) do { \ + spin_unlock(ptl); \ + pte_unmap(pte); \ +} while (0) + +#define pte_alloc_map(mm, vma, pmd, address) \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ + pmd, address))? \ + NULL: pte_offset_map(pmd, address)) + +#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ + pmd, address))? \ + NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) + +#define pte_alloc_kernel(pmd, address) \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ + NULL: pte_offset_kernel(pmd, address)) + +extern void free_area_init(unsigned long * zones_size); +extern void free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); +extern void free_initmem(void); + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +/* + * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its + * zones, allocate the backing mem_map and account for memory holes in a more + * architecture independent manner. This is a substitute for creating the + * zone_sizes[] and zholes_size[] arrays and passing them to + * free_area_init_node() + * + * An architecture is expected to register range of page frames backed by + * physical memory with memblock_add[_node]() before calling + * free_area_init_nodes() passing in the PFN each zone ends at. At a basic + * usage, an architecture is expected to do something like + * + * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, + * max_highmem_pfn}; + * for_each_valid_physical_page_range() + * memblock_add_node(base, size, nid) + * free_area_init_nodes(max_zone_pfns); + * + * free_bootmem_with_active_regions() calls free_bootmem_node() for each + * registered physical page range. Similarly + * sparse_memory_present_with_active_regions() calls memory_present() for + * each range when SPARSEMEM is enabled. + * + * See mm/page_alloc.c for more information on each function exposed by + * CONFIG_HAVE_MEMBLOCK_NODE_MAP. + */ +extern void free_area_init_nodes(unsigned long *max_zone_pfn); +unsigned long node_map_pfn_alignment(void); +unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern unsigned long absent_pages_in_range(unsigned long start_pfn, + unsigned long end_pfn); +extern void get_pfn_range_for_nid(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn); +extern unsigned long find_min_pfn_with_active_regions(void); +extern void free_bootmem_with_active_regions(int nid, + unsigned long max_low_pfn); +extern void sparse_memory_present_with_active_regions(int nid); + +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ + !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) +static inline int __early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} +#else +/* please see mm/page_alloc.c */ +extern int __meminit early_pfn_to_nid(unsigned long pfn); +#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +/* there is a per-arch backend function. */ +extern int __meminit __early_pfn_to_nid(unsigned long pfn); +#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ +#endif + +extern void set_dma_reserve(unsigned long new_dma_reserve); +extern void memmap_init_zone(unsigned long, int, unsigned long, + unsigned long, enum memmap_context); +extern void setup_per_zone_wmarks(void); +extern int __meminit init_per_zone_wmark_min(void); +extern void mem_init(void); +extern void __init mmap_init(void); +extern void show_mem(unsigned int flags); +extern void si_meminfo(struct sysinfo * val); +extern void si_meminfo_node(struct sysinfo *val, int nid); +extern int after_bootmem; + +extern __printf(3, 4) +void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); + +extern void setup_per_cpu_pageset(void); + +extern void zone_pcp_update(struct zone *zone); + +/* nommu.c */ +extern atomic_long_t mmap_pages_allocated; +extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); + +/* prio_tree.c */ +void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); +void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); +void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); +struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, + struct prio_tree_iter *iter); + +#define vma_prio_tree_foreach(vma, iter, root, begin, end) \ + for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \ + (vma = vma_prio_tree_next(vma, iter)); ) + +static inline void vma_nonlinear_insert(struct vm_area_struct *vma, + struct list_head *list) +{ + vma->shared.vm_set.parent = NULL; + list_add_tail(&vma->shared.vm_set.list, list); +} + +/* mmap.c */ +extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); +extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, + unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); +extern struct vm_area_struct *vma_merge(struct mm_struct *, + struct vm_area_struct *prev, unsigned long addr, unsigned long end, + unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, + struct mempolicy *); +extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); +extern int split_vma(struct mm_struct *, + struct vm_area_struct *, unsigned long addr, int new_below); +extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); +extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, + struct rb_node **, struct rb_node *); +extern void unlink_file_vma(struct vm_area_struct *); +extern struct vm_area_struct *copy_vma(struct vm_area_struct **, + unsigned long addr, unsigned long len, pgoff_t pgoff); +extern void exit_mmap(struct mm_struct *); + +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + +/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ +extern void added_exe_file_vma(struct mm_struct *mm); +extern void removed_exe_file_vma(struct mm_struct *mm); +extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern struct file *get_mm_exe_file(struct mm_struct *mm); + +extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); +extern int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, struct page **pages); + +extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + +extern unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, unsigned long flags, + vm_flags_t vm_flags, unsigned long pgoff); +extern unsigned long do_mmap(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); +extern int do_munmap(struct mm_struct *, unsigned long, size_t); + +/* These take the mm semaphore themselves */ +extern unsigned long vm_brk(unsigned long, unsigned long); +extern int vm_munmap(unsigned long, size_t); +extern unsigned long vm_mmap(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + +/* truncate.c */ +extern void truncate_inode_pages(struct address_space *, loff_t); +extern void truncate_inode_pages_range(struct address_space *, + loff_t lstart, loff_t lend); + +/* generic vm_area_ops exported for stackable file systems */ +extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); + +/* mm/page-writeback.c */ +int write_one_page(struct page *page, int wait); +void task_dirty_inc(struct task_struct *tsk); + +/* readahead.c */ +#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ + +int force_page_cache_readahead(struct address_space *mapping, struct file *filp, + pgoff_t offset, unsigned long nr_to_read); + +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + pgoff_t offset, + unsigned long size); + +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, + struct file *filp, + struct page *pg, + pgoff_t offset, + unsigned long size); + +unsigned long max_sane_readahead(unsigned long nr); +unsigned long ra_submit(struct file_ra_state *ra, + struct address_space *mapping, + struct file *filp); + +/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ +extern int expand_stack(struct vm_area_struct *vma, unsigned long address); + +/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ +extern int expand_downwards(struct vm_area_struct *vma, + unsigned long address); +#if VM_GROWSUP +extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#else + #define expand_upwards(vma, address) do { } while (0) +#endif + +/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ +extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); +extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + +/* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ +static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +{ + struct vm_area_struct * vma = find_vma(mm,start_addr); + + if (vma && end_addr <= vma->vm_start) + vma = NULL; + return vma; +} + +static inline unsigned long vma_pages(struct vm_area_struct *vma) +{ + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; +} + +/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ +static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, + unsigned long vm_start, unsigned long vm_end) +{ + struct vm_area_struct *vma = find_vma(mm, vm_start); + + if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) + vma = NULL; + + return vma; +} + +#ifdef CONFIG_MMU +pgprot_t vm_get_page_prot(unsigned long vm_flags); +#else +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return __pgprot(0); +} +#endif + +struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); +int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); +int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); + +struct page *follow_page(struct vm_area_struct *, unsigned long address, + unsigned int foll_flags); +#define FOLL_WRITE 0x01 /* check pte is writable */ +#define FOLL_TOUCH 0x02 /* mark page accessed */ +#define FOLL_GET 0x04 /* do get_page on page */ +#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ +#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ +#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO + * and return without waiting upon it */ +#define FOLL_MLOCK 0x40 /* mark page as mlocked */ +#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ +#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ + +typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); +extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, + unsigned long size, pte_fn_t fn, void *data); + +#ifdef CONFIG_PROC_FS +void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); +#else +static inline void vm_stat_account(struct mm_struct *mm, + unsigned long flags, struct file *file, long pages) +{ +} +#endif /* CONFIG_PROC_FS */ + +#ifdef CONFIG_DEBUG_PAGEALLOC +extern void kernel_map_pages(struct page *page, int numpages, int enable); +#ifdef CONFIG_HIBERNATION +extern bool kernel_page_present(struct page *page); +#endif /* CONFIG_HIBERNATION */ +#else +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) {} +#ifdef CONFIG_HIBERNATION +static inline bool kernel_page_present(struct page *page) { return true; } +#endif /* CONFIG_HIBERNATION */ +#endif + +extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); +#ifdef __HAVE_ARCH_GATE_AREA +int in_gate_area_no_mm(unsigned long addr); +int in_gate_area(struct mm_struct *mm, unsigned long addr); +#else +int in_gate_area_no_mm(unsigned long addr); +#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);}) +#endif /* __HAVE_ARCH_GATE_AREA */ + +int drop_caches_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +unsigned long shrink_slab(struct shrink_control *shrink, + unsigned long nr_pages_scanned, + unsigned long lru_pages); + +#ifndef CONFIG_MMU +#define randomize_va_space 0 +#else +extern int randomize_va_space; +#endif + +const char * arch_vma_name(struct vm_area_struct *vma); +void print_vma_addr(char *prefix, unsigned long rip); + +void sparse_mem_maps_populate_node(struct page **map_map, + unsigned long pnum_begin, + unsigned long pnum_end, + unsigned long map_count, + int nodeid); + +struct page *sparse_mem_map_populate(unsigned long pnum, int nid); +pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); +pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); +pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +void *vmemmap_alloc_block(unsigned long size, int node); +void *vmemmap_alloc_block_buf(unsigned long size, int node); +void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); +int vmemmap_populate_basepages(struct page *start_page, + unsigned long pages, int node); +int vmemmap_populate(struct page *start_page, unsigned long pages, int node); +void vmemmap_populate_print_last(void); + + +enum mf_flags { + MF_COUNT_INCREASED = 1 << 0, + MF_ACTION_REQUIRED = 1 << 1, +}; +extern int memory_failure(unsigned long pfn, int trapno, int flags); +extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); +extern int unpoison_memory(unsigned long pfn); +extern int sysctl_memory_failure_early_kill; +extern int sysctl_memory_failure_recovery; +extern void shake_page(struct page *p, int access); +extern atomic_long_t mce_bad_pages; +extern int soft_offline_page(struct page *page, int flags); + +extern void dump_page(struct page *page); + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) +extern void clear_huge_page(struct page *page, + unsigned long addr, + unsigned int pages_per_huge_page); +extern void copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr, struct vm_area_struct *vma, + unsigned int pages_per_huge_page); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ + +#ifdef CONFIG_DEBUG_PAGEALLOC +extern unsigned int _debug_guardpage_minorder; + +static inline unsigned int debug_guardpage_minorder(void) +{ + return _debug_guardpage_minorder; +} + +static inline bool page_is_guard(struct page *page) +{ + return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); +} +#else +static inline unsigned int debug_guardpage_minorder(void) { return 0; } +static inline bool page_is_guard(struct page *page) { return false; } +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +#endif /* __KERNEL__ */ +#endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h new file mode 100644 index 00000000..227fd3e9 --- /dev/null +++ b/include/linux/mm_inline.h @@ -0,0 +1,102 @@ +#ifndef LINUX_MM_INLINE_H +#define LINUX_MM_INLINE_H + +#include <linux/huge_mm.h> + +/** + * page_is_file_cache - should the page be on a file LRU or anon LRU? + * @page: the page to test + * + * Returns 1 if @page is page cache page backed by a regular filesystem, + * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. + * Used by functions that manipulate the LRU lists, to sort a page + * onto the right LRU list. + * + * We would like to get this info without a page flag, but the state + * needs to survive until the page is last deleted from the LRU, which + * could be as far down as __page_cache_release. + */ +static inline int page_is_file_cache(struct page *page) +{ + return !PageSwapBacked(page); +} + +static inline void +add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) +{ + struct lruvec *lruvec; + + lruvec = mem_cgroup_lru_add_list(zone, page, lru); + list_add(&page->lru, &lruvec->lists[lru]); + __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); +} + +static inline void +del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) +{ + mem_cgroup_lru_del_list(page, lru); + list_del(&page->lru); + __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); +} + +/** + * page_lru_base_type - which LRU list type should a page be on? + * @page: the page to test + * + * Used for LRU list index arithmetic. + * + * Returns the base LRU type - file or anon - @page should be on. + */ +static inline enum lru_list page_lru_base_type(struct page *page) +{ + if (page_is_file_cache(page)) + return LRU_INACTIVE_FILE; + return LRU_INACTIVE_ANON; +} + +/** + * page_off_lru - which LRU list was page on? clearing its lru flags. + * @page: the page to test + * + * Returns the LRU list a page was on, as an index into the array of LRU + * lists; and clears its Unevictable or Active flags, ready for freeing. + */ +static inline enum lru_list page_off_lru(struct page *page) +{ + enum lru_list lru; + + if (PageUnevictable(page)) { + __ClearPageUnevictable(page); + lru = LRU_UNEVICTABLE; + } else { + lru = page_lru_base_type(page); + if (PageActive(page)) { + __ClearPageActive(page); + lru += LRU_ACTIVE; + } + } + return lru; +} + +/** + * page_lru - which LRU list should a page be on? + * @page: the page to test + * + * Returns the LRU list a page should be on, as an index + * into the array of LRU lists. + */ +static inline enum lru_list page_lru(struct page *page) +{ + enum lru_list lru; + + if (PageUnevictable(page)) + lru = LRU_UNEVICTABLE; + else { + lru = page_lru_base_type(page); + if (PageActive(page)) + lru += LRU_ACTIVE; + } + return lru; +} + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h new file mode 100644 index 00000000..b35752fb --- /dev/null +++ b/include/linux/mm_types.h @@ -0,0 +1,416 @@ +#ifndef _LINUX_MM_TYPES_H +#define _LINUX_MM_TYPES_H + +#include <linux/auxvec.h> +#include <linux/types.h> +#include <linux/threads.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/prio_tree.h> +#include <linux/rbtree.h> +#include <linux/rwsem.h> +#include <linux/completion.h> +#include <linux/cpumask.h> +#include <linux/page-debug-flags.h> +#include <asm/page.h> +#include <asm/mmu.h> + +#ifndef AT_VECTOR_SIZE_ARCH +#define AT_VECTOR_SIZE_ARCH 0 +#endif +#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) + +struct address_space; + +#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) + +/* + * Each physical page in the system has a struct page associated with + * it to keep track of whatever it is we are using the page for at the + * moment. Note that we have no way to track which tasks are using + * a page, though if it is a pagecache page, rmap structures can tell us + * who is mapping it. + * + * The objects in struct page are organized in double word blocks in + * order to allows us to use atomic double word operations on portions + * of struct page. That is currently only used by slub but the arrangement + * allows the use of atomic double word operations on the flags/mapping + * and lru list pointers also. + */ +struct page { + /* First double word block */ + unsigned long flags; /* Atomic flags, some possibly + * updated asynchronously */ + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + /* Second double word */ + struct { + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* slub first free object */ + }; + + union { +#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ + defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) + /* Used for cmpxchg_double in slub */ + unsigned long counters; +#else + /* + * Keep _count separate from slub cmpxchg_double data. + * As the rest of the double word is protected by + * slab_lock but _count is not. + */ + unsigned counters; +#endif + + struct { + + union { + /* + * Count of ptes mapped in + * mms, to show when page is + * mapped & limit reverse map + * searches. + * + * Used also for tail pages + * refcounting instead of + * _count. Tail pages cannot + * be mapped and keeping the + * tail page _count zero at + * all times guarantees + * get_page_unless_zero() will + * never succeed on tail + * pages. + */ + atomic_t _mapcount; + + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; + atomic_t _count; /* Usage count, see below. */ + }; + }; + }; + + /* Third double word block */ + union { + struct list_head lru; /* Pageout list, eg. active_list + * protected by zone->lru_lock ! + */ + struct { /* slub per cpu partial pages */ + struct page *next; /* Next partial slab */ +#ifdef CONFIG_64BIT + int pages; /* Nr of partial slabs left */ + int pobjects; /* Approximate # of objects */ +#else + short int pages; + short int pobjects; +#endif + }; + }; + + /* Remainder is not double word aligned */ + union { + unsigned long private; /* Mapping-private opaque data: + * usually used for buffer_heads + * if PagePrivate set; used for + * swp_entry_t if PageSwapCache; + * indicates order in the buddy + * system if PG_buddy is set. + */ +#if USE_SPLIT_PTLOCKS + spinlock_t ptl; +#endif + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ + }; + + /* + * On machines where all RAM is mapped into kernel address space, + * we can simply calculate the virtual address. On machines with + * highmem some memory is mapped into kernel virtual memory + * dynamically, so we need a place to store that address. + * Note that this field could be 16 bits on x86 ... ;) + * + * Architectures with slow multiplication can define + * WANT_PAGE_VIRTUAL in asm/page.h + */ +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; /* Kernel virtual address (NULL if + not kmapped, ie. highmem) */ +#endif /* WANT_PAGE_VIRTUAL */ +#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS + unsigned long debug_flags; /* Use atomic bitops on this */ +#endif + +#ifdef CONFIG_KMEMCHECK + /* + * kmemcheck wants to track the status of each byte in a page; this + * is a pointer to such a status block. NULL if not tracked. + */ + void *shadow; +#endif +} +/* + * The struct page can be forced to be double word aligned so that atomic ops + * on double words work. The SLUB allocator can make use of such a feature. + */ +#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE + __aligned(2 * sizeof(unsigned long)) +#endif +; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; + +typedef unsigned long __nocast vm_flags_t; + +/* + * A region containing a mapping of a non-memory backed file under NOMMU + * conditions. These are held in a global tree and are pinned by the VMAs that + * map parts of them. + */ +struct vm_region { + struct rb_node vm_rb; /* link in global region tree */ + vm_flags_t vm_flags; /* VMA vm_flags */ + unsigned long vm_start; /* start address of region */ + unsigned long vm_end; /* region initialised to here */ + unsigned long vm_top; /* region allocated to here */ + unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ + struct file *vm_file; /* the backing file or NULL */ + + int vm_usage; /* region usage count (access under nommu_region_sem) */ + bool vm_icache_flushed : 1; /* true if the icache has been flushed for + * this region */ +}; + +/* + * This struct defines a memory VMM memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory + * space that has a special rule for the page-fault handlers (ie a shared + * library, the executable area etc). + */ +struct vm_area_struct { + struct mm_struct * vm_mm; /* The address space we belong to. */ + unsigned long vm_start; /* Our start address within vm_mm. */ + unsigned long vm_end; /* The first byte after our end address + within vm_mm. */ + + /* linked list of VM areas per task, sorted by address */ + struct vm_area_struct *vm_next, *vm_prev; + + pgprot_t vm_page_prot; /* Access permissions of this VMA. */ + unsigned long vm_flags; /* Flags, see mm.h. */ + + struct rb_node vm_rb; + + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap prio tree, or + * linkage to the list of like vmas hanging off its node, or + * linkage of vma in the address_space->i_mmap_nonlinear list. + */ + union { + struct { + struct list_head list; + void *parent; /* aligns with prio_tree_node parent */ + struct vm_area_struct *head; + } vm_set; + + struct raw_prio_tree_node prio_tree_node; + } shared; + + /* + * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma + * list, after a COW of one of the file pages. A MAP_SHARED vma + * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack + * or brk vma (with NULL file) can only be in an anon_vma list. + */ + struct list_head anon_vma_chain; /* Serialized by mmap_sem & + * page_table_lock */ + struct anon_vma *anon_vma; /* Serialized by page_table_lock */ + + /* Function pointers to deal with this struct. */ + const struct vm_operations_struct *vm_ops; + + /* Information about our backing store: */ + unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE + units, *not* PAGE_CACHE_SIZE */ + struct file * vm_file; /* File we map to (can be NULL). */ + void * vm_private_data; /* was vm_pte (shared mem) */ + +#ifndef CONFIG_MMU + struct vm_region *vm_region; /* NOMMU mapping region */ +#endif +#ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ +#endif +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +enum { + MM_FILEPAGES, + MM_ANONPAGES, + MM_SWAPENTS, + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct mm_struct { + struct vm_area_struct * mmap; /* list of VMAs */ + struct rb_root mm_rb; + struct vm_area_struct * mmap_cache; /* last find_vma result */ +#ifdef CONFIG_MMU + unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); + void (*unmap_area) (struct mm_struct *mm, unsigned long addr); +#endif + unsigned long mmap_base; /* base of mmap area */ + unsigned long task_size; /* size of task vm space */ + unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ + unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ + pgd_t * pgd; + atomic_t mm_users; /* How many users with user space? */ + atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + int map_count; /* number of VMAs */ + + spinlock_t page_table_lock; /* Protects page tables and some counters */ + struct rw_semaphore mmap_sem; + + struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung + * together off init_mm.mmlist, and are protected + * by mmlist_lock + */ + + + unsigned long hiwater_rss; /* High-watermark of RSS usage */ + unsigned long hiwater_vm; /* High-water virtual memory usage */ + + unsigned long total_vm; /* Total pages mapped */ + unsigned long locked_vm; /* Pages that have PG_mlocked set */ + unsigned long pinned_vm; /* Refcount permanently increased */ + unsigned long shared_vm; /* Shared pages (files) */ + unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ + unsigned long stack_vm; /* VM_GROWSUP/DOWN */ + unsigned long reserved_vm; /* VM_RESERVED|VM_IO pages */ + unsigned long def_flags; + unsigned long nr_ptes; /* Page table pages */ + unsigned long start_code, end_code, start_data, end_data; + unsigned long start_brk, brk, start_stack; + unsigned long arg_start, arg_end, env_start, env_end; + + unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ + + /* + * Special counters, in some configurations protected by the + * page_table_lock, in other configurations by being atomic. + */ + struct mm_rss_stat rss_stat; + + struct linux_binfmt *binfmt; + + cpumask_var_t cpu_vm_mask_var; + + /* Architecture-specific MM context */ + mm_context_t context; + + /* Swap token stuff */ + /* + * Last value of global fault stamp as seen by this process. + * In other words, this value gives an indication of how long + * it has been since this task got the token. + * Look at mm/thrash.c + */ + unsigned int faultstamp; + unsigned int token_priority; + unsigned int last_interval; + + unsigned long flags; /* Must use atomic bitops to access the bits */ + + struct core_state *core_state; /* coredumping support */ +#ifdef CONFIG_AIO + spinlock_t ioctx_lock; + struct hlist_head ioctx_list; +#endif +#ifdef CONFIG_MM_OWNER + /* + * "owner" points to a task that is regarded as the canonical + * user/owner of this mm. All of the following must be true in + * order for it to be changed: + * + * current == mm->owner + * current->mm != mm + * new_owner->mm == mm + * new_owner->alloc_lock is held + */ + struct task_struct __rcu *owner; +#endif + + /* store ref to file /proc/<pid>/exe symlink points to */ + struct file *exe_file; + unsigned long num_exe_file_vmas; +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pgtable_t pmd_huge_pte; /* protected by page_table_lock */ +#endif +#ifdef CONFIG_CPUMASK_OFFSTACK + struct cpumask cpumask_allocation; +#endif +}; + +static inline void mm_init_cpumask(struct mm_struct *mm) +{ +#ifdef CONFIG_CPUMASK_OFFSTACK + mm->cpu_vm_mask_var = &mm->cpumask_allocation; +#endif +} + +/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ +static inline cpumask_t *mm_cpumask(struct mm_struct *mm) +{ + return mm->cpu_vm_mask_var; +} + +#endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mman.h b/include/linux/mman.h new file mode 100644 index 00000000..8b74e9b1 --- /dev/null +++ b/include/linux/mman.h @@ -0,0 +1,93 @@ +#ifndef _LINUX_MMAN_H +#define _LINUX_MMAN_H + +#include <asm/mman.h> + +#define MREMAP_MAYMOVE 1 +#define MREMAP_FIXED 2 + +#define OVERCOMMIT_GUESS 0 +#define OVERCOMMIT_ALWAYS 1 +#define OVERCOMMIT_NEVER 2 + +#ifdef __KERNEL__ +#include <linux/mm.h> +#include <linux/percpu_counter.h> + +#include <linux/atomic.h> + +extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; +extern struct percpu_counter vm_committed_as; + +static inline void vm_acct_memory(long pages) +{ + percpu_counter_add(&vm_committed_as, pages); +} + +static inline void vm_unacct_memory(long pages) +{ + vm_acct_memory(-pages); +} + +/* + * Allow architectures to handle additional protection bits + */ + +#ifndef arch_calc_vm_prot_bits +#define arch_calc_vm_prot_bits(prot) 0 +#endif + +#ifndef arch_vm_get_page_prot +#define arch_vm_get_page_prot(vm_flags) __pgprot(0) +#endif + +#ifndef arch_validate_prot +/* + * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have + * already been masked out. + * + * Returns true if the prot flags are valid + */ +static inline int arch_validate_prot(unsigned long prot) +{ + return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; +} +#define arch_validate_prot arch_validate_prot +#endif + +/* + * Optimisation macro. It is equivalent to: + * (x & bit1) ? bit2 : 0 + * but this version is faster. + * ("bit1" and "bit2" must be single bits) + */ +#define _calc_vm_trans(x, bit1, bit2) \ + ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ + : ((x) & (bit1)) / ((bit1) / (bit2))) + +/* + * Combine the mmap "prot" argument into "vm_flags" used internally. + */ +static inline unsigned long +calc_vm_prot_bits(unsigned long prot) +{ + return _calc_vm_trans(prot, PROT_READ, VM_READ ) | + _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | + arch_calc_vm_prot_bits(prot); +} + +/* + * Combine the mmap "flags" argument into "vm_flags" used internally. + */ +static inline unsigned long +calc_vm_flag_bits(unsigned long flags) +{ + return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | + _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | + _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) | + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); +} +#endif /* __KERNEL__ */ +#endif /* _LINUX_MMAN_H */ diff --git a/include/linux/mmc/Kbuild b/include/linux/mmc/Kbuild new file mode 100644 index 00000000..1fb26448 --- /dev/null +++ b/include/linux/mmc/Kbuild @@ -0,0 +1 @@ +header-y += ioctl.h diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h new file mode 100644 index 00000000..23acc3ba --- /dev/null +++ b/include/linux/mmc/boot.h @@ -0,0 +1,7 @@ +#ifndef LINUX_MMC_BOOT_H +#define LINUX_MMC_BOOT_H + +enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT, + MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE }; + +#endif /* LINUX_MMC_BOOT_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h new file mode 100644 index 00000000..629b823f --- /dev/null +++ b/include/linux/mmc/card.h @@ -0,0 +1,494 @@ +/* + * linux/include/linux/mmc/card.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Card driver specific definitions. + */ +#ifndef LINUX_MMC_CARD_H +#define LINUX_MMC_CARD_H + +#include <linux/device.h> +#include <linux/mmc/core.h> +#include <linux/mod_devicetable.h> + +struct mmc_cid { + unsigned int manfid; + char prod_name[8]; + unsigned int serial; + unsigned short oemid; + unsigned short year; + unsigned char hwrev; + unsigned char fwrev; + unsigned char month; +}; + +struct mmc_csd { + unsigned char structure; + unsigned char mmca_vsn; + unsigned short cmdclass; + unsigned short tacc_clks; + unsigned int tacc_ns; + unsigned int c_size; + unsigned int r2w_factor; + unsigned int max_dtr; + unsigned int erase_size; /* In sectors */ + unsigned int read_blkbits; + unsigned int write_blkbits; + unsigned int capacity; + unsigned int read_partial:1, + read_misalign:1, + write_partial:1, + write_misalign:1; +}; + +struct mmc_ext_csd { + u8 rev; + u8 erase_group_def; + u8 sec_feature_support; + u8 rel_sectors; + u8 rel_param; + u8 part_config; + u8 cache_ctrl; + u8 rst_n_function; + unsigned int part_time; /* Units: ms */ + unsigned int sa_timeout; /* Units: 100ns */ + unsigned int generic_cmd6_time; /* Units: 10ms */ + unsigned int power_off_longtime; /* Units: ms */ + unsigned int hs_max_dtr; + unsigned int sectors; + unsigned int card_type; + unsigned int hc_erase_size; /* In sectors */ + unsigned int hc_erase_timeout; /* In milliseconds */ + unsigned int sec_trim_mult; /* Secure trim multiplier */ + unsigned int sec_erase_mult; /* Secure erase multiplier */ + unsigned int trim_timeout; /* In milliseconds */ + bool enhanced_area_en; /* enable bit */ + unsigned long long enhanced_area_offset; /* Units: Byte */ + unsigned int enhanced_area_size; /* Units: KB */ + unsigned int cache_size; /* Units: KB */ + bool hpi_en; /* HPI enablebit */ + bool hpi; /* HPI support bit */ + unsigned int hpi_cmd; /* cmd used as HPI */ + unsigned int data_sector_size; /* 512 bytes or 4KB */ + unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ + unsigned int boot_ro_lock; /* ro lock support */ + bool boot_ro_lockable; + u8 raw_partition_support; /* 160 */ + u8 raw_erased_mem_count; /* 181 */ + u8 raw_ext_csd_structure; /* 194 */ + u8 raw_card_type; /* 196 */ + u8 out_of_int_time; /* 198 */ + u8 raw_s_a_timeout; /* 217 */ + u8 raw_hc_erase_gap_size; /* 221 */ + u8 raw_erase_timeout_mult; /* 223 */ + u8 raw_hc_erase_grp_size; /* 224 */ + u8 raw_sec_trim_mult; /* 229 */ + u8 raw_sec_erase_mult; /* 230 */ + u8 raw_sec_feature_support;/* 231 */ + u8 raw_trim_mult; /* 232 */ + u8 raw_sectors[4]; /* 212 - 4 bytes */ + + unsigned int feature_support; +#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ +}; + +struct sd_scr { + unsigned char sda_vsn; + unsigned char sda_spec3; + unsigned char bus_widths; +#define SD_SCR_BUS_WIDTH_1 (1<<0) +#define SD_SCR_BUS_WIDTH_4 (1<<2) + unsigned char cmds; +#define SD_SCR_CMD20_SUPPORT (1<<0) +#define SD_SCR_CMD23_SUPPORT (1<<1) +}; + +struct sd_ssr { + unsigned int au; /* In sectors */ + unsigned int erase_timeout; /* In milliseconds */ + unsigned int erase_offset; /* In milliseconds */ +}; + +struct sd_switch_caps { + unsigned int hs_max_dtr; + unsigned int uhs_max_dtr; +#define HIGH_SPEED_MAX_DTR 50000000 +#define UHS_SDR104_MAX_DTR 208000000 +#define UHS_SDR50_MAX_DTR 100000000 +#define UHS_DDR50_MAX_DTR 50000000 +#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR +#define UHS_SDR12_MAX_DTR 25000000 + unsigned int sd3_bus_mode; +#define UHS_SDR12_BUS_SPEED 0 +#define HIGH_SPEED_BUS_SPEED 1 +#define UHS_SDR25_BUS_SPEED 1 +#define UHS_SDR50_BUS_SPEED 2 +#define UHS_SDR104_BUS_SPEED 3 +#define UHS_DDR50_BUS_SPEED 4 + +#define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED) +#define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED) +#define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED) +#define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED) +#define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED) +#define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED) + unsigned int sd3_drv_type; +#define SD_DRIVER_TYPE_B 0x01 +#define SD_DRIVER_TYPE_A 0x02 +#define SD_DRIVER_TYPE_C 0x04 +#define SD_DRIVER_TYPE_D 0x08 + unsigned int sd3_curr_limit; +#define SD_SET_CURRENT_LIMIT_200 0 +#define SD_SET_CURRENT_LIMIT_400 1 +#define SD_SET_CURRENT_LIMIT_600 2 +#define SD_SET_CURRENT_LIMIT_800 3 + +#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200) +#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) +#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600) +#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) +}; + +struct sdio_cccr { + unsigned int sdio_vsn; + unsigned int sd_vsn; + unsigned int multi_block:1, + low_speed:1, + wide_bus:1, + high_power:1, + high_speed:1, + disable_cd:1; +}; + +struct sdio_cis { + unsigned short vendor; + unsigned short device; + unsigned short blksize; + unsigned int max_dtr; +}; + +struct mmc_host; +struct sdio_func; +struct sdio_func_tuple; + +#define SDIO_MAX_FUNCS 7 + +/* The number of MMC physical partitions. These consist of: + * boot partitions (2), general purpose partitions (4) in MMC v4.4. + */ +#define MMC_NUM_BOOT_PARTITION 2 +#define MMC_NUM_GP_PARTITION 4 +#define MMC_NUM_PHY_PARTITION 6 +#define MAX_MMC_PART_NAME_LEN 20 + +/* + * MMC Physical partitions + */ +struct mmc_part { + unsigned int size; /* partition size (in bytes) */ + unsigned int part_cfg; /* partition type */ + char name[MAX_MMC_PART_NAME_LEN]; + bool force_ro; /* to make boot parts RO by default */ + unsigned int area_type; +#define MMC_BLK_DATA_AREA_MAIN (1<<0) +#define MMC_BLK_DATA_AREA_BOOT (1<<1) +#define MMC_BLK_DATA_AREA_GP (1<<2) +}; + +/* + * MMC device + */ +struct mmc_card { + struct mmc_host *host; /* the host this device belongs to */ + struct device dev; /* the device */ + unsigned int rca; /* relative card address of device */ + unsigned int type; /* card type */ +#define MMC_TYPE_MMC 0 /* MMC card */ +#define MMC_TYPE_SD 1 /* SD card */ +#define MMC_TYPE_SDIO 2 /* SDIO card */ +#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ + unsigned int state; /* (our) card state */ +#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ +#define MMC_STATE_READONLY (1<<1) /* card is read-only */ +#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ +#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ +#define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ +#define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */ +#define MMC_CARD_SDXC (1<<6) /* card is SDXC */ +#define MMC_CARD_REMOVED (1<<7) /* card has been removed */ +#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ +#define MMC_STATE_SLEEP (1<<9) /* card is in sleep state */ + unsigned int quirks; /* card quirks */ +#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ +#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ + /* for byte mode */ +#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ + /* (missing CIA registers) */ +#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ +#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ +#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ +#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ +#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ +#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ +#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ + /* byte mode */ + unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ +#define MMC_NO_POWER_NOTIFICATION 0 +#define MMC_POWERED_ON 1 +#define MMC_POWEROFF_SHORT 2 +#define MMC_POWEROFF_LONG 3 + + unsigned int erase_size; /* erase size in sectors */ + unsigned int erase_shift; /* if erase unit is power 2 */ + unsigned int pref_erase; /* in sectors */ + u8 erased_byte; /* value of erased bytes */ + + u32 raw_cid[4]; /* raw card CID */ + u32 raw_csd[4]; /* raw card CSD */ + u32 raw_scr[2]; /* raw card SCR */ + struct mmc_cid cid; /* card identification */ + struct mmc_csd csd; /* card specific */ + struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ + struct sd_scr scr; /* extra SD information */ + struct sd_ssr ssr; /* yet more SD information */ + struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ + + unsigned int sdio_funcs; /* number of SDIO functions */ + struct sdio_cccr cccr; /* common card info */ + struct sdio_cis cis; /* common tuple info */ + struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ + struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */ + unsigned num_info; /* number of info strings */ + const char **info; /* info strings */ + struct sdio_func_tuple *tuples; /* unknown common tuples */ + + unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ + + struct dentry *debugfs_root; + struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ + unsigned int nr_parts; +}; + +/* + * This function fill contents in mmc_part. + */ +static inline void mmc_part_add(struct mmc_card *card, unsigned int size, + unsigned int part_cfg, char *name, int idx, bool ro, + int area_type) +{ + card->part[card->nr_parts].size = size; + card->part[card->nr_parts].part_cfg = part_cfg; + sprintf(card->part[card->nr_parts].name, name, idx); + card->part[card->nr_parts].force_ro = ro; + card->part[card->nr_parts].area_type = area_type; + card->nr_parts++; +} + +/* + * The world is not perfect and supplies us with broken mmc/sdio devices. + * For at least some of these bugs we need a work-around. + */ + +struct mmc_fixup { + /* CID-specific fields. */ + const char *name; + + /* Valid revision range */ + u64 rev_start, rev_end; + + unsigned int manfid; + unsigned short oemid; + + /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */ + u16 cis_vendor, cis_device; + + void (*vendor_fixup)(struct mmc_card *card, int data); + int data; +}; + +#define CID_MANFID_ANY (-1u) +#define CID_OEMID_ANY ((unsigned short) -1) +#define CID_NAME_ANY (NULL) + +#define END_FIXUP { 0 } + +#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ + _cis_vendor, _cis_device, \ + _fixup, _data) \ + { \ + .name = (_name), \ + .manfid = (_manfid), \ + .oemid = (_oemid), \ + .rev_start = (_rev_start), \ + .rev_end = (_rev_end), \ + .cis_vendor = (_cis_vendor), \ + .cis_device = (_cis_device), \ + .vendor_fixup = (_fixup), \ + .data = (_data), \ + } + +#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \ + _fixup, _data) \ + _FIXUP_EXT(_name, _manfid, \ + _oemid, _rev_start, _rev_end, \ + SDIO_ANY_ID, SDIO_ANY_ID, \ + _fixup, _data) \ + +#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \ + MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data) + +#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \ + _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \ + CID_OEMID_ANY, 0, -1ull, \ + _vendor, _device, \ + _fixup, _data) \ + +#define cid_rev(hwrev, fwrev, year, month) \ + (((u64) hwrev) << 40 | \ + ((u64) fwrev) << 32 | \ + ((u64) year) << 16 | \ + ((u64) month)) + +#define cid_rev_card(card) \ + cid_rev(card->cid.hwrev, \ + card->cid.fwrev, \ + card->cid.year, \ + card->cid.month) + +/* + * Unconditionally quirk add/remove. + */ + +static inline void __maybe_unused add_quirk(struct mmc_card *card, int data) +{ + card->quirks |= data; +} + +static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) +{ + card->quirks &= ~data; +} + +#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) +#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) +#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) + +#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) +#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) +#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) +#define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200) +#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) +#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) +#define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) +#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) +#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) +#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) +#define mmc_card_is_sleep(c) ((c)->state & MMC_STATE_SLEEP) + +#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) +#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) +#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) +#define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200) +#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) +#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) +#define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) +#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) +#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) +#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) +#define mmc_card_set_sleep(c) ((c)->state |= MMC_STATE_SLEEP) + +#define mmc_card_clr_sleep(c) ((c)->state &= ~MMC_STATE_SLEEP) +/* + * Quirk add/remove for MMC products. + */ + +static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data) +{ + if (mmc_card_mmc(card)) + card->quirks |= data; +} + +static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card, + int data) +{ + if (mmc_card_mmc(card)) + card->quirks &= ~data; +} + +/* + * Quirk add/remove for SD products. + */ + +static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data) +{ + if (mmc_card_sd(card)) + card->quirks |= data; +} + +static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card, + int data) +{ + if (mmc_card_sd(card)) + card->quirks &= ~data; +} + +static inline int mmc_card_lenient_fn0(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LENIENT_FN0; +} + +static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; +} + +static inline int mmc_card_disable_cd(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_DISABLE_CD; +} + +static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF; +} + +static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; +} + +static inline int mmc_card_long_read_time(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LONG_READ_TIME; +} + +#define mmc_card_name(c) ((c)->cid.prod_name) +#define mmc_card_id(c) (dev_name(&(c)->dev)) + +#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) + +#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) +#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) +#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d) + +/* + * MMC device driver (e.g., Flash card, I/O card...) + */ +struct mmc_driver { + struct device_driver drv; + int (*probe)(struct mmc_card *); + void (*remove)(struct mmc_card *); + int (*suspend)(struct mmc_card *); + int (*resume)(struct mmc_card *); +}; + +extern int mmc_register_driver(struct mmc_driver *); +extern void mmc_unregister_driver(struct mmc_driver *); + +extern void mmc_fixup_device(struct mmc_card *card, + const struct mmc_fixup *table); + +#endif /* LINUX_MMC_CARD_H */ diff --git a/include/linux/mmc/cd-gpio.h b/include/linux/mmc/cd-gpio.h new file mode 100644 index 00000000..cefaba03 --- /dev/null +++ b/include/linux/mmc/cd-gpio.h @@ -0,0 +1,18 @@ +/* + * Generic GPIO card-detect helper header + * + * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MMC_CD_GPIO_H +#define MMC_CD_GPIO_H + +struct mmc_host; +int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio); +void mmc_cd_gpio_free(struct mmc_host *host); + +#endif diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h new file mode 100644 index 00000000..1b431c72 --- /dev/null +++ b/include/linux/mmc/core.h @@ -0,0 +1,197 @@ +/* + * linux/include/linux/mmc/core.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_MMC_CORE_H +#define LINUX_MMC_CORE_H + +#include <linux/interrupt.h> +#include <linux/completion.h> + +struct request; +struct mmc_data; +struct mmc_request; + +struct mmc_command { + u32 opcode; + u32 arg; + u32 resp[4]; + unsigned int flags; /* expected response type */ +#define MMC_RSP_PRESENT (1 << 0) +#define MMC_RSP_136 (1 << 1) /* 136 bit response */ +#define MMC_RSP_CRC (1 << 2) /* expect valid crc */ +#define MMC_RSP_BUSY (1 << 3) /* card may send busy */ +#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */ + +#define MMC_CMD_MASK (3 << 5) /* non-SPI command type */ +#define MMC_CMD_AC (0 << 5) +#define MMC_CMD_ADTC (1 << 5) +#define MMC_CMD_BC (2 << 5) +#define MMC_CMD_BCR (3 << 5) + +#define MMC_RSP_SPI_S1 (1 << 7) /* one status byte */ +#define MMC_RSP_SPI_S2 (1 << 8) /* second byte */ +#define MMC_RSP_SPI_B4 (1 << 9) /* four data bytes */ +#define MMC_RSP_SPI_BUSY (1 << 10) /* card may send busy */ + +/* + * These are the native response types, and correspond to valid bit + * patterns of the above flags. One additional valid pattern + * is all zeros, which means we don't expect a response. + */ +#define MMC_RSP_NONE (0) +#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) +#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) +#define MMC_RSP_R3 (MMC_RSP_PRESENT) +#define MMC_RSP_R4 (MMC_RSP_PRESENT) +#define MMC_RSP_R5 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) +#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) + +#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) + +/* + * These are the SPI response types for MMC, SD, and SDIO cards. + * Commands return R1, with maybe more info. Zero is an error type; + * callers must always provide the appropriate MMC_RSP_SPI_Rx flags. + */ +#define MMC_RSP_SPI_R1 (MMC_RSP_SPI_S1) +#define MMC_RSP_SPI_R1B (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY) +#define MMC_RSP_SPI_R2 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2) +#define MMC_RSP_SPI_R3 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) +#define MMC_RSP_SPI_R4 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) +#define MMC_RSP_SPI_R5 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2) +#define MMC_RSP_SPI_R7 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) + +#define mmc_spi_resp_type(cmd) ((cmd)->flags & \ + (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY|MMC_RSP_SPI_S2|MMC_RSP_SPI_B4)) + +/* + * These are the command types. + */ +#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK) + + unsigned int retries; /* max number of retries */ + unsigned int error; /* command error */ + +/* + * Standard errno values are used for errors, but some have specific + * meaning in the MMC layer: + * + * ETIMEDOUT Card took too long to respond + * EILSEQ Basic format problem with the received or sent data + * (e.g. CRC check failed, incorrect opcode in response + * or bad end bit) + * EINVAL Request cannot be performed because of restrictions + * in hardware and/or the driver + * ENOMEDIUM Host can determine that the slot is empty and is + * actively failing requests + */ + + unsigned int cmd_timeout_ms; /* in milliseconds */ + + struct mmc_data *data; /* data segment associated with cmd */ + struct mmc_request *mrq; /* associated request */ +}; + +struct mmc_data { + unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ + unsigned int timeout_clks; /* data timeout (in clocks) */ + unsigned int blksz; /* data block size */ + unsigned int blocks; /* number of blocks */ + unsigned int error; /* data error */ + unsigned int flags; + +#define MMC_DATA_WRITE (1 << 8) +#define MMC_DATA_READ (1 << 9) +#define MMC_DATA_STREAM (1 << 10) + + unsigned int bytes_xfered; + + struct mmc_command *stop; /* stop command */ + struct mmc_request *mrq; /* associated request */ + + unsigned int sg_len; /* size of scatter list */ + struct scatterlist *sg; /* I/O scatter list */ + s32 host_cookie; /* host private data */ +}; + +struct mmc_request { + struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */ + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_command *stop; + + struct completion completion; + void (*done)(struct mmc_request *);/* completion function */ +}; + +struct mmc_host; +struct mmc_card; +struct mmc_async_req; + +extern struct mmc_async_req *mmc_start_req(struct mmc_host *, + struct mmc_async_req *, int *); +extern int mmc_interrupt_hpi(struct mmc_card *); +extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); +extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); +extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); +extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, + struct mmc_command *, int); +extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); + +#define MMC_ERASE_ARG 0x00000000 +#define MMC_SECURE_ERASE_ARG 0x80000000 +#define MMC_TRIM_ARG 0x00000001 +#define MMC_DISCARD_ARG 0x00000003 +#define MMC_SECURE_TRIM1_ARG 0x80000001 +#define MMC_SECURE_TRIM2_ARG 0x80008000 + +#define MMC_SECURE_ARGS 0x80000000 +#define MMC_TRIM_ARGS 0x00008001 + +extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, + unsigned int arg); +extern int mmc_can_erase(struct mmc_card *card); +extern int mmc_can_trim(struct mmc_card *card); +extern int mmc_can_discard(struct mmc_card *card); +extern int mmc_can_sanitize(struct mmc_card *card); +extern int mmc_can_secure_erase_trim(struct mmc_card *card); +extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, + unsigned int nr); +extern unsigned int mmc_calc_max_discard(struct mmc_card *card); + +extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); +extern int mmc_hw_reset(struct mmc_host *host); +extern int mmc_hw_reset_check(struct mmc_host *host); +extern int mmc_can_reset(struct mmc_card *card); + +extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); +extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); + +extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); +extern void mmc_release_host(struct mmc_host *host); +extern int mmc_try_claim_host(struct mmc_host *host); + +extern int mmc_flush_cache(struct mmc_card *); + +extern int mmc_detect_card_removed(struct mmc_host *host); + +/** + * mmc_claim_host - exclusively claim a host + * @host: mmc host to claim + * + * Claim a host for a set of operations. + */ +static inline void mmc_claim_host(struct mmc_host *host) +{ + __mmc_claim_host(host, NULL); +} + +extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); + +#endif /* LINUX_MMC_CORE_H */ diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h new file mode 100644 index 00000000..8f66e28f --- /dev/null +++ b/include/linux/mmc/dw_mmc.h @@ -0,0 +1,253 @@ +/* + * Synopsys DesignWare Multimedia Card Interface driver + * (Based on NXP driver for lpc 31xx) + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef LINUX_MMC_DW_MMC_H +#define LINUX_MMC_DW_MMC_H + +#include <linux/scatterlist.h> + +#define MAX_MCI_SLOTS 2 + +enum dw_mci_state { + STATE_IDLE = 0, + STATE_SENDING_CMD, + STATE_SENDING_DATA, + STATE_DATA_BUSY, + STATE_SENDING_STOP, + STATE_DATA_ERROR, +}; + +enum { + EVENT_CMD_COMPLETE = 0, + EVENT_XFER_COMPLETE, + EVENT_DATA_COMPLETE, + EVENT_DATA_ERROR, + EVENT_XFER_ERROR +}; + +struct mmc_data; + +/** + * struct dw_mci - MMC controller state shared between all slots + * @lock: Spinlock protecting the queue and associated data. + * @regs: Pointer to MMIO registers. + * @sg: Scatterlist entry currently being processed by PIO code, if any. + * @sg_miter: PIO mapping scatterlist iterator. + * @cur_slot: The slot which is currently using the controller. + * @mrq: The request currently being processed on @cur_slot, + * or NULL if the controller is idle. + * @cmd: The command currently being sent to the card, or NULL. + * @data: The data currently being transferred, or NULL if no data + * transfer is in progress. + * @use_dma: Whether DMA channel is initialized or not. + * @using_dma: Whether DMA is in use for the current transfer. + * @sg_dma: Bus address of DMA buffer. + * @sg_cpu: Virtual address of DMA buffer. + * @dma_ops: Pointer to platform-specific DMA callbacks. + * @cmd_status: Snapshot of SR taken upon completion of the current + * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @data_status: Snapshot of SR taken upon completion of the current + * data transfer. Only valid when EVENT_DATA_COMPLETE or + * EVENT_DATA_ERROR is pending. + * @stop_cmdr: Value to be loaded into CMDR when the stop command is + * to be sent. + * @dir_status: Direction of current transfer. + * @tasklet: Tasklet running the request state machine. + * @card_tasklet: Tasklet handling card detect. + * @pending_events: Bitmask of events flagged by the interrupt handler + * to be processed by the tasklet. + * @completed_events: Bitmask of events which the state machine has + * processed. + * @state: Tasklet state. + * @queue: List of slots waiting for access to the controller. + * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus + * rate and timeout calculations. + * @current_speed: Configured rate of the controller. + * @num_slots: Number of slots available. + * @verid: Denote Version ID. + * @data_offset: Set the offset of DATA register according to VERID. + * @dev: Device associated with the MMC controller. + * @pdata: Platform data associated with the MMC controller. + * @slot: Slots sharing this MMC controller. + * @fifo_depth: depth of FIFO. + * @data_shift: log2 of FIFO item size. + * @part_buf_start: Start index in part_buf. + * @part_buf_count: Bytes of partial data in part_buf. + * @part_buf: Simple buffer for partial fifo reads/writes. + * @push_data: Pointer to FIFO push function. + * @pull_data: Pointer to FIFO pull function. + * @quirks: Set of quirks that apply to specific versions of the IP. + * @irq_flags: The flags to be passed to request_irq. + * @irq: The irq value to be passed to request_irq. + * + * Locking + * ======= + * + * @lock is a softirq-safe spinlock protecting @queue as well as + * @cur_slot, @mrq and @state. These must always be updated + * at the same time while holding @lock. + * + * The @mrq field of struct dw_mci_slot is also protected by @lock, + * and must always be written at the same time as the slot is added to + * @queue. + * + * @pending_events and @completed_events are accessed using atomic bit + * operations, so they don't need any locking. + * + * None of the fields touched by the interrupt handler need any + * locking. However, ordering is important: Before EVENT_DATA_ERROR or + * EVENT_DATA_COMPLETE is set in @pending_events, all data-related + * interrupts must be disabled and @data_status updated with a + * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the + * CMDRDY interrupt must be disabled and @cmd_status updated with a + * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the + * bytes_xfered field of @data must be written. This is ensured by + * using barriers. + */ +struct dw_mci { + spinlock_t lock; + void __iomem *regs; + + struct scatterlist *sg; + struct sg_mapping_iter sg_miter; + + struct dw_mci_slot *cur_slot; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + /* DMA interface members*/ + int use_dma; + int using_dma; + + dma_addr_t sg_dma; + void *sg_cpu; + struct dw_mci_dma_ops *dma_ops; +#ifdef CONFIG_MMC_DW_IDMAC + unsigned int ring_size; +#else + struct dw_mci_dma_data *dma_data; +#endif + u32 cmd_status; + u32 data_status; + u32 stop_cmdr; + u32 dir_status; + struct tasklet_struct tasklet; + struct work_struct card_work; + unsigned long pending_events; + unsigned long completed_events; + enum dw_mci_state state; + struct list_head queue; + + u32 bus_hz; + u32 current_speed; + u32 num_slots; + u32 fifoth_val; + u16 verid; + u16 data_offset; + struct device dev; + struct dw_mci_board *pdata; + struct dw_mci_slot *slot[MAX_MCI_SLOTS]; + + /* FIFO push and pull */ + int fifo_depth; + int data_shift; + u8 part_buf_start; + u8 part_buf_count; + union { + u16 part_buf16; + u32 part_buf32; + u64 part_buf; + }; + void (*push_data)(struct dw_mci *host, void *buf, int cnt); + void (*pull_data)(struct dw_mci *host, void *buf, int cnt); + + /* Workaround flags */ + u32 quirks; + + struct regulator *vmmc; /* Power regulator */ + unsigned long irq_flags; /* IRQ flags */ + unsigned int irq; +}; + +/* DMA ops for Internal/External DMAC interface */ +struct dw_mci_dma_ops { + /* DMA Ops */ + int (*init)(struct dw_mci *host); + void (*start)(struct dw_mci *host, unsigned int sg_len); + void (*complete)(struct dw_mci *host); + void (*stop)(struct dw_mci *host); + void (*cleanup)(struct dw_mci *host); + void (*exit)(struct dw_mci *host); +}; + +/* IP Quirks/flags. */ +/* DTO fix for command transmission with IDMAC configured */ +#define DW_MCI_QUIRK_IDMAC_DTO BIT(0) +/* delay needed between retries on some 2.11a implementations */ +#define DW_MCI_QUIRK_RETRY_DELAY BIT(1) +/* High Speed Capable - Supports HS cards (up to 50MHz) */ +#define DW_MCI_QUIRK_HIGHSPEED BIT(2) +/* Unreliable card detection */ +#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) + + +struct dma_pdata; + +struct block_settings { + unsigned short max_segs; /* see blk_queue_max_segments */ + unsigned int max_blk_size; /* maximum size of one mmc block */ + unsigned int max_blk_count; /* maximum number of blocks in one req*/ + unsigned int max_req_size; /* maximum number of bytes in one req*/ + unsigned int max_seg_size; /* see blk_queue_max_segment_size */ +}; + +/* Board platform data */ +struct dw_mci_board { + u32 num_slots; + + u32 quirks; /* Workaround / Quirk flags */ + unsigned int bus_hz; /* Bus speed */ + + unsigned int caps; /* Capabilities */ + unsigned int caps2; /* More capabilities */ + /* + * Override fifo depth. If 0, autodetect it from the FIFOTH register, + * but note that this may not be reliable after a bootloader has used + * it. + */ + unsigned int fifo_depth; + + /* delay in mS before detecting cards after interrupt */ + u32 detect_delay_ms; + + int (*init)(u32 slot_id, irq_handler_t , void *); + int (*get_ro)(u32 slot_id); + int (*get_cd)(u32 slot_id); + int (*get_ocr)(u32 slot_id); + int (*get_bus_wd)(u32 slot_id); + /* + * Enable power to selected slot and set voltage to desired level. + * Voltage levels are specified using MMC_VDD_xxx defines defined + * in linux/mmc/host.h file. + */ + void (*setpower)(u32 slot_id, u32 volt); + void (*exit)(u32 slot_id); + void (*select_slot)(u32 slot_id); + + struct dw_mci_dma_ops *dma_ops; + struct dma_pdata *data; + struct block_settings *blk_settings; +}; + +#endif /* LINUX_MMC_DW_MMC_H */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h new file mode 100644 index 00000000..39c2def3 --- /dev/null +++ b/include/linux/mmc/host.h @@ -0,0 +1,486 @@ +/* + * linux/include/linux/mmc/host.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Host driver specific definitions. + */ +#ifndef LINUX_MMC_HOST_H +#define LINUX_MMC_HOST_H + +#include <linux/leds.h> +#include <linux/sched.h> +#include <linux/device.h> +#include <linux/fault-inject.h> +#include <linux/wakelock.h> +#include <linux/semaphore.h> + +#include <linux/mmc/core.h> +#include <linux/mmc/pm.h> + +struct mmc_ios { + unsigned int clock; /* clock rate */ + unsigned short vdd; + +/* vdd stores the bit number of the selected voltage range from below. */ + + unsigned char bus_mode; /* command output mode */ + +#define MMC_BUSMODE_OPENDRAIN 1 +#define MMC_BUSMODE_PUSHPULL 2 + + unsigned char chip_select; /* SPI chip select */ + +#define MMC_CS_DONTCARE 0 +#define MMC_CS_HIGH 1 +#define MMC_CS_LOW 2 + + unsigned char power_mode; /* power supply mode */ + +#define MMC_POWER_OFF 0 +#define MMC_POWER_UP 1 +#define MMC_POWER_ON 2 + + unsigned char bus_width; /* data bus width */ + +#define MMC_BUS_WIDTH_1 0 +#define MMC_BUS_WIDTH_4 2 +#define MMC_BUS_WIDTH_8 3 + + unsigned char timing; /* timing specification used */ + +#define MMC_TIMING_LEGACY 0 +#define MMC_TIMING_MMC_HS 1 +#define MMC_TIMING_SD_HS 2 +#define MMC_TIMING_UHS_SDR12 MMC_TIMING_LEGACY +#define MMC_TIMING_UHS_SDR25 MMC_TIMING_SD_HS +#define MMC_TIMING_UHS_SDR50 3 +#define MMC_TIMING_UHS_SDR104 4 +#define MMC_TIMING_UHS_DDR50 5 +#define MMC_TIMING_MMC_HS200 6 + +#define MMC_SDR_MODE 0 +#define MMC_1_2V_DDR_MODE 1 +#define MMC_1_8V_DDR_MODE 2 +#define MMC_1_2V_SDR_MODE 3 +#define MMC_1_8V_SDR_MODE 4 + + unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ + +#define MMC_SIGNAL_VOLTAGE_330 0 +#define MMC_SIGNAL_VOLTAGE_180 1 +#define MMC_SIGNAL_VOLTAGE_120 2 + + unsigned char drv_type; /* driver type (A, B, C, D) */ + +#define MMC_SET_DRIVER_TYPE_B 0 +#define MMC_SET_DRIVER_TYPE_A 1 +#define MMC_SET_DRIVER_TYPE_C 2 +#define MMC_SET_DRIVER_TYPE_D 3 +}; + +struct mmc_host_ops { + /* + * 'enable' is called when the host is claimed and 'disable' is called + * when the host is released. 'enable' and 'disable' are deprecated. + */ + int (*enable)(struct mmc_host *host); + int (*disable)(struct mmc_host *host); + /* + * It is optional for the host to implement pre_req and post_req in + * order to support double buffering of requests (prepare one + * request while another request is active). + * pre_req() must always be followed by a post_req(). + * To undo a call made to pre_req(), call post_req() with + * a nonzero err condition. + */ + void (*post_req)(struct mmc_host *host, struct mmc_request *req, + int err); + void (*pre_req)(struct mmc_host *host, struct mmc_request *req, + bool is_first_req); + void (*request)(struct mmc_host *host, struct mmc_request *req); + /* + * Avoid calling these three functions too often or in a "fast path", + * since underlaying controller might implement them in an expensive + * and/or slow way. + * + * Also note that these functions might sleep, so don't call them + * in the atomic contexts! + * + * Return values for the get_ro callback should be: + * 0 for a read/write card + * 1 for a read-only card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + * + * Return values for the get_cd callback should be: + * 0 for a absent card + * 1 for a present card + * -ENOSYS when not supported (equal to NULL callback) + * or a negative errno value when something bad happened + */ + void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); + int (*get_ro)(struct mmc_host *host); + int (*get_cd)(struct mmc_host *host); + int (*get_slot_status)(struct mmc_host *host); + void (*dump_host_regs)(struct mmc_host *host); + + void (*enable_sdio_irq)(struct mmc_host *host, int enable); + + /* optional callback for HC quirks */ + void (*init_card)(struct mmc_host *host, struct mmc_card *card); + + int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); + + /* The tuning command opcode value is different for SD and eMMC cards */ + int (*execute_tuning)(struct mmc_host *host, u32 opcode); + void (*enable_preset_value)(struct mmc_host *host, bool enable); + int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); + void (*hw_reset)(struct mmc_host *host); +}; + +struct mmc_card; +struct device; + +struct mmc_async_req { + /* active mmc request */ + struct mmc_request *mrq; + /* + * Check error status of completed mmc request. + * Returns 0 if success otherwise non zero. + */ + int (*err_check) (struct mmc_card *, struct mmc_async_req *); +}; + +struct mmc_hotplug { + unsigned int irq; + void *handler_priv; +}; + +struct mmc_host { + struct device *parent; + struct device class_dev; + int index; + const struct mmc_host_ops *ops; + unsigned int f_min; + unsigned int f_max; + unsigned int f_init; + u32 ocr_avail; + u32 ocr_avail_sdio; /* SDIO-specific OCR */ + u32 ocr_avail_sd; /* SD-specific OCR */ + u32 ocr_avail_mmc; /* MMC-specific OCR */ + struct notifier_block pm_notify; + +#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ +#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ +#define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ +#define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ +#define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */ +#define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */ +#define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */ +#define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */ +#define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */ +#define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */ +#define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */ +#define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */ +#define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */ +#define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */ +#define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */ +#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ +#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ + + unsigned long caps; /* Host capabilities */ + +#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ +#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ +#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ +#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ +#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ +#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ +#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ + +#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ +#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ +#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ +#define MMC_CAP_1_8V_DDR (1 << 11) /* can support */ + /* DDR mode at 1.8V */ +#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ + /* DDR mode at 1.2V */ +#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ +#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */ +#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */ +#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */ +#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ +#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ +#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ +#define MMC_CAP_SET_XPC_330 (1 << 20) /* Host supports >150mA current at 3.3V */ +#define MMC_CAP_SET_XPC_300 (1 << 21) /* Host supports >150mA current at 3.0V */ +#define MMC_CAP_SET_XPC_180 (1 << 22) /* Host supports >150mA current at 1.8V */ +#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ +#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ +#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ +#define MMC_CAP_MAX_CURRENT_200 (1 << 26) /* Host max current limit is 200mA */ +#define MMC_CAP_MAX_CURRENT_400 (1 << 27) /* Host max current limit is 400mA */ +#define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */ +#define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */ +#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ +#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ + + unsigned int caps2; /* More host capabilities */ + +#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ +#define MMC_CAP2_CACHE_CTRL (1 << 1) /* Allow cache control */ +#define MMC_CAP2_POWEROFF_NOTIFY (1 << 2) /* Notify poweroff supported */ +#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */ +#define MMC_CAP2_NO_SLEEP_CMD (1 << 4) /* Don't allow sleep command */ +#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ +#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ +#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ + MMC_CAP2_HS200_1_2V_SDR) +#define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */ +#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */ +#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */ + + mmc_pm_flag_t pm_caps; /* supported pm features */ + unsigned int power_notify_type; +#define MMC_HOST_PW_NOTIFY_NONE 0 +#define MMC_HOST_PW_NOTIFY_SHORT 1 +#define MMC_HOST_PW_NOTIFY_LONG 2 + +#ifdef CONFIG_MMC_CLKGATE + int clk_requests; /* internal reference counter */ + unsigned int clk_delay; /* number of MCI clk hold cycles */ + bool clk_gated; /* clock gated */ + struct delayed_work clk_gate_work; /* delayed clock gate */ + unsigned int clk_old; /* old clock value cache */ + spinlock_t clk_lock; /* lock for clk fields */ + struct mutex clk_gate_mutex; /* mutex for clock gating */ + struct device_attribute clkgate_delay_attr; + unsigned long clkgate_delay; +#endif + + /* host specific block data */ + unsigned int max_seg_size; /* see blk_queue_max_segment_size */ + unsigned short max_segs; /* see blk_queue_max_segments */ + unsigned short unused; + unsigned int max_req_size; /* maximum number of bytes in one req */ + unsigned int max_blk_size; /* maximum size of one mmc block */ + unsigned int max_blk_count; /* maximum number of blocks in one req */ + unsigned int max_discard_to; /* max. discard timeout in ms */ + + /* private data */ + spinlock_t lock; /* lock for claim and bus ops */ + + struct mmc_ios ios; /* current io bus settings */ + u32 ocr; /* the current OCR setting */ + + /* group bitfields together to minimize padding */ + unsigned int use_spi_crc:1; + unsigned int claimed:1; /* host exclusively claimed */ + unsigned int bus_dead:1; /* bus has been released */ +#ifdef CONFIG_MMC_DEBUG + unsigned int removed:1; /* host is being removed */ +#endif + + int rescan_disable; /* disable card detection */ + + struct mmc_card *card; /* device attached to this host */ + + wait_queue_head_t wq; + struct task_struct *claimer; /* task that has host claimed */ + int claim_cnt; /* "claim" nesting count */ + + struct delayed_work detect; + struct wake_lock detect_wake_lock; + int detect_change; /* card detect flag */ + struct mmc_hotplug hotplug; + + const struct mmc_bus_ops *bus_ops; /* current bus driver */ + unsigned int bus_refs; /* reference counter */ + + unsigned int bus_resume_flags; +#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0) +#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1) + + unsigned int sdio_irqs; + struct task_struct *sdio_irq_thread; + bool sdio_irq_pending; + atomic_t sdio_irq_thread_abort; + + mmc_pm_flag_t pm_flags; /* requested pm features */ + +#ifdef CONFIG_LEDS_TRIGGERS + struct led_trigger *led; /* activity led */ +#endif + +#ifdef CONFIG_REGULATOR + bool regulator_enabled; /* regulator state */ +#endif + + struct dentry *debugfs_root; + + struct mmc_async_req *areq; /* active async req */ + +#ifdef CONFIG_FAIL_MMC_REQUEST + struct fault_attr fail_mmc_request; +#endif + + unsigned int actual_clock; /* Actual HC clock rate */ + +#ifdef CONFIG_MMC_EMBEDDED_SDIO + struct { + struct sdio_cis *cis; + struct sdio_cccr *cccr; + struct sdio_embedded_func *funcs; + int num_funcs; + } embedded_sdio_data; +#endif + +#ifdef CONFIG_MMC_UNSAFE_RESUME + /* Use to record if the card attath staus change in suspend mode*/ + int card_attath_status; + +#define card_attach_status_change 1 +#define card_attach_status_unchange 0 +#endif + /*Use to enable scan retry when request or cmd fail*/ + int scan_retry; + bool card_scan_status; /*use to record if the card scan have done and card init OK 0:fail 1:OK*/ + struct semaphore req_sema; /*use to handle request asynchronous, to prevent issue command in the same time*/ + int wmt_host_index; /*use to identify the host number*/ + + unsigned long private[0] ____cacheline_aligned; +}; + +extern struct mmc_host *mmc_alloc_host(int extra, struct device *); +extern int mmc_add_host(struct mmc_host *, bool); +extern void mmc_remove_host(struct mmc_host *); +extern void mmc_free_host(struct mmc_host *); + +#ifdef CONFIG_MMC_EMBEDDED_SDIO +extern void mmc_set_embedded_sdio_data(struct mmc_host *host, + struct sdio_cis *cis, + struct sdio_cccr *cccr, + struct sdio_embedded_func *funcs, + int num_funcs); +#endif + +static inline void *mmc_priv(struct mmc_host *host) +{ + return (void *)host->private; +} + +#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) + +#define mmc_dev(x) ((x)->parent) +#define mmc_classdev(x) (&(x)->class_dev) +#define mmc_hostname(x) (dev_name(&(x)->class_dev)) +#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME) +#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME) + +static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual) +{ + if (manual) + host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME; + else + host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME; +} + +extern int mmc_resume_bus(struct mmc_host *host); + +extern int mmc_suspend_host(struct mmc_host *); +extern int mmc_resume_host(struct mmc_host *); + +extern int mmc_power_save_host(struct mmc_host *host); +extern int mmc_power_restore_host(struct mmc_host *host); + +extern void mmc_detect_change(struct mmc_host *, unsigned long delay); +extern void mmc_request_done(struct mmc_host *, struct mmc_request *); +extern void mmc_force_remove_card(struct mmc_host *host); + +extern int mmc_cache_ctrl(struct mmc_host *, u8); + +static inline void mmc_signal_sdio_irq(struct mmc_host *host) +{ + host->ops->enable_sdio_irq(host, 0); + host->sdio_irq_pending = true; + wake_up_process(host->sdio_irq_thread); +} + +struct regulator; + +#ifdef CONFIG_REGULATOR +int mmc_regulator_get_ocrmask(struct regulator *supply); +int mmc_regulator_set_ocr(struct mmc_host *mmc, + struct regulator *supply, + unsigned short vdd_bit); +#else +static inline int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + return 0; +} + +static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, + struct regulator *supply, + unsigned short vdd_bit) +{ + return 0; +} +#endif + +int mmc_card_awake(struct mmc_host *host); +int mmc_card_sleep(struct mmc_host *host); +int mmc_card_can_sleep(struct mmc_host *host); + +int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); + +/* Module parameter */ +extern bool mmc_assume_removable; + +static inline int mmc_card_is_removable(struct mmc_host *host) +{ + return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable; +} + +static inline int mmc_card_keep_power(struct mmc_host *host) +{ + return host->pm_flags & MMC_PM_KEEP_POWER; +} + +static inline int mmc_card_wake_sdio_irq(struct mmc_host *host) +{ + return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; +} + +static inline int mmc_host_cmd23(struct mmc_host *host) +{ + return host->caps & MMC_CAP_CMD23; +} + +static inline int mmc_boot_partition_access(struct mmc_host *host) +{ + return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); +} + +#ifdef CONFIG_MMC_CLKGATE +void mmc_host_clk_hold(struct mmc_host *host); +void mmc_host_clk_release(struct mmc_host *host); +unsigned int mmc_host_clk_rate(struct mmc_host *host); + +#else +static inline void mmc_host_clk_hold(struct mmc_host *host) +{ +} + +static inline void mmc_host_clk_release(struct mmc_host *host) +{ +} + +static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) +{ + return host->ios.clock; +} +#endif +#endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/ioctl.h b/include/linux/mmc/ioctl.h new file mode 100644 index 00000000..1f5e6892 --- /dev/null +++ b/include/linux/mmc/ioctl.h @@ -0,0 +1,57 @@ +#ifndef LINUX_MMC_IOCTL_H +#define LINUX_MMC_IOCTL_H + +#include <linux/types.h> + +struct mmc_ioc_cmd { + /* Implies direction of data. true = write, false = read */ + int write_flag; + + /* Application-specific command. true = precede with CMD55 */ + int is_acmd; + + __u32 opcode; + __u32 arg; + __u32 response[4]; /* CMD response */ + unsigned int flags; + unsigned int blksz; + unsigned int blocks; + + /* + * Sleep at least postsleep_min_us useconds, and at most + * postsleep_max_us useconds *after* issuing command. Needed for + * some read commands for which cards have no other way of indicating + * they're ready for the next command (i.e. there is no equivalent of + * a "busy" indicator for read operations). + */ + unsigned int postsleep_min_us; + unsigned int postsleep_max_us; + + /* + * Override driver-computed timeouts. Note the difference in units! + */ + unsigned int data_timeout_ns; + unsigned int cmd_timeout_ms; + + /* + * For 64-bit machines, the next member, ``__u64 data_ptr``, wants to + * be 8-byte aligned. Make sure this struct is the same size when + * built for 32-bit. + */ + __u32 __pad; + + /* DAT buffer */ + __u64 data_ptr; +}; +#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr + +#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) + +/* + * Since this ioctl is only meant to enhance (and not replace) normal access + * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES + * is enforced per ioctl call. For larger data transfers, use the normal + * block device operations. + */ +#define MMC_IOC_MAX_BYTES (512L * 256) +#endif /* LINUX_MMC_IOCTL_H */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h new file mode 100644 index 00000000..b822a2cb --- /dev/null +++ b/include/linux/mmc/mmc.h @@ -0,0 +1,449 @@ +/* + * Header for MultiMediaCard (MMC) + * + * Copyright 2002 Hewlett-Packard Company + * + * Use consistent with the GNU GPL is permitted, + * provided that this copyright notice is + * preserved in its entirety in all copies and derived works. + * + * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, + * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS + * FITNESS FOR ANY PARTICULAR PURPOSE. + * + * Many thanks to Alessandro Rubini and Jonathan Corbet! + * + * Based strongly on code by: + * + * Author: Yong-iL Joh <tolkien@mizi.com> + * + * Author: Andrew Christian + * 15 May 2002 + */ + +#ifndef LINUX_MMC_MMC_H +#define LINUX_MMC_MMC_H + +/* Standard MMC commands (4.1) type argument response */ + /* class 1 */ +#define MMC_GO_IDLE_STATE 0 /* bc */ +#define MMC_SEND_OP_COND 1 /* bcr [31:0] OCR R3 */ +#define MMC_ALL_SEND_CID 2 /* bcr R2 */ +#define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ +#define MMC_SET_DSR 4 /* bc [31:16] RCA */ +#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ +#define MMC_SWITCH 6 /* ac [31:0] See below R1b */ +#define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ +#define MMC_SEND_EXT_CSD 8 /* adtc R1 */ +#define MMC_SEND_CSD 9 /* ac [31:16] RCA R2 */ +#define MMC_SEND_CID 10 /* ac [31:16] RCA R2 */ +#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ +#define MMC_STOP_TRANSMISSION 12 /* ac R1b */ +#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ +#define MMC_BUS_TEST_R 14 /* adtc R1 */ +#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ +#define MMC_BUS_TEST_W 19 /* adtc R1 */ +#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */ +#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */ + + /* class 2 */ +#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ +#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ +#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ +#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ +#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ + + /* class 3 */ +#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ + + /* class 4 */ +#define MMC_SET_BLOCK_COUNT 23 /* adtc [31:0] data addr R1 */ +#define MMC_WRITE_BLOCK 24 /* adtc [31:0] data addr R1 */ +#define MMC_WRITE_MULTIPLE_BLOCK 25 /* adtc R1 */ +#define MMC_PROGRAM_CID 26 /* adtc R1 */ +#define MMC_PROGRAM_CSD 27 /* adtc R1 */ + + /* class 6 */ +#define MMC_SET_WRITE_PROT 28 /* ac [31:0] data addr R1b */ +#define MMC_CLR_WRITE_PROT 29 /* ac [31:0] data addr R1b */ +#define MMC_SEND_WRITE_PROT 30 /* adtc [31:0] wpdata addr R1 */ + + /* class 5 */ +#define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */ +#define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */ +#define MMC_ERASE 38 /* ac R1b */ + + /* class 9 */ +#define MMC_FAST_IO 39 /* ac <Complex> R4 */ +#define MMC_GO_IRQ_STATE 40 /* bcr R5 */ + + /* class 7 */ +#define MMC_LOCK_UNLOCK 42 /* adtc R1b */ + + /* class 8 */ +#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ +#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ + +static inline bool mmc_op_multi(u32 opcode) +{ + return opcode == MMC_WRITE_MULTIPLE_BLOCK || + opcode == MMC_READ_MULTIPLE_BLOCK; +} + +/* + * MMC_SWITCH argument format: + * + * [31:26] Always 0 + * [25:24] Access Mode + * [23:16] Location of target Byte in EXT_CSD + * [15:08] Value Byte + * [07:03] Always 0 + * [02:00] Command Set + */ + +/* + MMC status in R1, for native mode (SPI bits are different) + Type + e : error bit + s : status bit + r : detected and set for the actual command response + x : detected and set during command execution. the host must poll + the card by sending status command in order to read these bits. + Clear condition + a : according to the card state + b : always related to the previous command. Reception of + a valid command will clear it (with a delay of one command) + c : clear by read + */ + +#define R1_OUT_OF_RANGE (1 << 31) /* er, c */ +#define R1_ADDRESS_ERROR (1 << 30) /* erx, c */ +#define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */ +#define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */ +#define R1_ERASE_PARAM (1 << 27) /* ex, c */ +#define R1_WP_VIOLATION (1 << 26) /* erx, c */ +#define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */ +#define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */ +#define R1_COM_CRC_ERROR (1 << 23) /* er, b */ +#define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */ +#define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */ +#define R1_CC_ERROR (1 << 20) /* erx, c */ +#define R1_ERROR (1 << 19) /* erx, c */ +#define R1_UNDERRUN (1 << 18) /* ex, c */ +#define R1_OVERRUN (1 << 17) /* ex, c */ +#define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */ +#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */ +#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */ +#define R1_ERASE_RESET (1 << 13) /* sr, c */ +#define R1_STATUS(x) (x & 0xFFFFE000) +#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ +#define R1_READY_FOR_DATA (1 << 8) /* sx, a */ +#define R1_SWITCH_ERROR (1 << 7) /* sx, c */ +#define R1_APP_CMD (1 << 5) /* sr, c */ + +#define R1_STATE_IDLE 0 +#define R1_STATE_READY 1 +#define R1_STATE_IDENT 2 +#define R1_STATE_STBY 3 +#define R1_STATE_TRAN 4 +#define R1_STATE_DATA 5 +#define R1_STATE_RCV 6 +#define R1_STATE_PRG 7 +#define R1_STATE_DIS 8 + +/* + * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS + * R1 is the low order byte; R2 is the next highest byte, when present. + */ +#define R1_SPI_IDLE (1 << 0) +#define R1_SPI_ERASE_RESET (1 << 1) +#define R1_SPI_ILLEGAL_COMMAND (1 << 2) +#define R1_SPI_COM_CRC (1 << 3) +#define R1_SPI_ERASE_SEQ (1 << 4) +#define R1_SPI_ADDRESS (1 << 5) +#define R1_SPI_PARAMETER (1 << 6) +/* R1 bit 7 is always zero */ +#define R2_SPI_CARD_LOCKED (1 << 8) +#define R2_SPI_WP_ERASE_SKIP (1 << 9) /* or lock/unlock fail */ +#define R2_SPI_LOCK_UNLOCK_FAIL R2_SPI_WP_ERASE_SKIP +#define R2_SPI_ERROR (1 << 10) +#define R2_SPI_CC_ERROR (1 << 11) +#define R2_SPI_CARD_ECC_ERROR (1 << 12) +#define R2_SPI_WP_VIOLATION (1 << 13) +#define R2_SPI_ERASE_PARAM (1 << 14) +#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */ +#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE + +/* These are unpacked versions of the actual responses */ + +struct _mmc_csd { + u8 csd_structure; + u8 spec_vers; + u8 taac; + u8 nsac; + u8 tran_speed; + u16 ccc; + u8 read_bl_len; + u8 read_bl_partial; + u8 write_blk_misalign; + u8 read_blk_misalign; + u8 dsr_imp; + u16 c_size; + u8 vdd_r_curr_min; + u8 vdd_r_curr_max; + u8 vdd_w_curr_min; + u8 vdd_w_curr_max; + u8 c_size_mult; + union { + struct { /* MMC system specification version 3.1 */ + u8 erase_grp_size; + u8 erase_grp_mult; + } v31; + struct { /* MMC system specification version 2.2 */ + u8 sector_size; + u8 erase_grp_size; + } v22; + } erase; + u8 wp_grp_size; + u8 wp_grp_enable; + u8 default_ecc; + u8 r2w_factor; + u8 write_bl_len; + u8 write_bl_partial; + u8 file_format_grp; + u8 copy; + u8 perm_write_protect; + u8 tmp_write_protect; + u8 file_format; + u8 ecc; +}; + +/* + * OCR bits are mostly in host.h + */ +#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ + +/* + * Card Command Classes (CCC) + */ +#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */ + /* (CMD0,1,2,3,4,7,9,10,12,13,15) */ + /* (and for SPI, CMD58,59) */ +#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */ + /* (CMD11) */ +#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */ + /* (CMD16,17,18) */ +#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */ + /* (CMD20) */ +#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */ + /* (CMD16,24,25,26,27) */ +#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */ + /* (CMD32,33,34,35,36,37,38,39) */ +#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */ + /* (CMD28,29,30) */ +#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */ + /* (CMD16,CMD42) */ +#define CCC_APP_SPEC (1<<8) /* (8) Application specific */ + /* (CMD55,56,57,ACMD*) */ +#define CCC_IO_MODE (1<<9) /* (9) I/O mode */ + /* (CMD5,39,40,52,53) */ +#define CCC_SWITCH (1<<10) /* (10) High speed switch */ + /* (CMD6,34,35,36,37,50) */ + /* (11) Reserved */ + /* (CMD?) */ + +/* + * CSD field definitions + */ + +#define CSD_STRUCT_VER_1_0 0 /* Valid for system specification 1.0 - 1.2 */ +#define CSD_STRUCT_VER_1_1 1 /* Valid for system specification 1.4 - 2.2 */ +#define CSD_STRUCT_VER_1_2 2 /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */ +#define CSD_STRUCT_EXT_CSD 3 /* Version is coded in CSD_STRUCTURE in EXT_CSD */ + +#define CSD_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.2 */ +#define CSD_SPEC_VER_1 1 /* Implements system specification 1.4 */ +#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */ +#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 - 3.2 - 3.31 */ +#define CSD_SPEC_VER_4 4 /* Implements system specification 4.0 - 4.1 */ + +/* + * EXT_CSD fields + */ + +#define EXT_CSD_FLUSH_CACHE 32 /* W */ +#define EXT_CSD_CACHE_CTRL 33 /* R/W */ +#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ +#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ +#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ +#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ +#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ +#define EXT_CSD_HPI_MGMT 161 /* R/W */ +#define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ +#define EXT_CSD_SANITIZE_START 165 /* W */ +#define EXT_CSD_WR_REL_PARAM 166 /* RO */ +#define EXT_CSD_BOOT_WP 173 /* R/W */ +#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ +#define EXT_CSD_PART_CONFIG 179 /* R/W */ +#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ +#define EXT_CSD_BUS_WIDTH 183 /* R/W */ +#define EXT_CSD_HS_TIMING 185 /* R/W */ +#define EXT_CSD_POWER_CLASS 187 /* R/W */ +#define EXT_CSD_REV 192 /* RO */ +#define EXT_CSD_STRUCTURE 194 /* RO */ +#define EXT_CSD_CARD_TYPE 196 /* RO */ +#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */ +#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */ +#define EXT_CSD_PWR_CL_52_195 200 /* RO */ +#define EXT_CSD_PWR_CL_26_195 201 /* RO */ +#define EXT_CSD_PWR_CL_52_360 202 /* RO */ +#define EXT_CSD_PWR_CL_26_360 203 /* RO */ +#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ +#define EXT_CSD_S_A_TIMEOUT 217 /* RO */ +#define EXT_CSD_REL_WR_SEC_C 222 /* RO */ +#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */ +#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ +#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ +#define EXT_CSD_BOOT_MULT 226 /* RO */ +#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */ +#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ +#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ +#define EXT_CSD_TRIM_MULT 232 /* RO */ +#define EXT_CSD_PWR_CL_200_195 236 /* RO */ +#define EXT_CSD_PWR_CL_200_360 237 /* RO */ +#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */ +#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */ +#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ +#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ +#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ +#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ +#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ +#define EXT_CSD_HPI_FEATURES 503 /* RO */ + +/* + * EXT_CSD field definitions + */ + +#define EXT_CSD_WR_REL_PARAM_EN (1<<2) + +#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) +#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) +#define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04) +#define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01) + +#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7) +#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1) +#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) + +#define EXT_CSD_PART_SUPPORT_PART_EN (0x1) + +#define EXT_CSD_CMD_SET_NORMAL (1<<0) +#define EXT_CSD_CMD_SET_SECURE (1<<1) +#define EXT_CSD_CMD_SET_CPSECURE (1<<2) + +#define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ +#define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ +#define EXT_CSD_CARD_TYPE_MASK 0x3F /* Mask out reserved bits */ +#define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ + /* DDR mode @1.8V or 3V I/O */ +#define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ + /* DDR mode @1.2V I/O */ +#define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ + | EXT_CSD_CARD_TYPE_DDR_1_2V) +#define EXT_CSD_CARD_TYPE_SDR_1_8V (1<<4) /* Card can run at 200MHz */ +#define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */ + /* SDR mode @1.2V I/O */ + +#define EXT_CSD_CARD_TYPE_SDR_200 (EXT_CSD_CARD_TYPE_SDR_1_8V | \ + EXT_CSD_CARD_TYPE_SDR_1_2V) + +#define EXT_CSD_CARD_TYPE_SDR_ALL (EXT_CSD_CARD_TYPE_SDR_200 | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_2V_ALL (EXT_CSD_CARD_TYPE_SDR_1_2V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_8V_ALL (EXT_CSD_CARD_TYPE_SDR_1_8V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_2V | \ + EXT_CSD_CARD_TYPE_DDR_1_8V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_8V | \ + EXT_CSD_CARD_TYPE_DDR_1_8V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_2V | \ + EXT_CSD_CARD_TYPE_DDR_1_2V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_8V | \ + EXT_CSD_CARD_TYPE_DDR_1_2V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_2V | \ + EXT_CSD_CARD_TYPE_DDR_52 | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_8V | \ + EXT_CSD_CARD_TYPE_DDR_52 | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_200 | \ + EXT_CSD_CARD_TYPE_DDR_1_8V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_200 | \ + EXT_CSD_CARD_TYPE_DDR_1_2V | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52 (EXT_CSD_CARD_TYPE_SDR_200 | \ + EXT_CSD_CARD_TYPE_DDR_52 | \ + EXT_CSD_CARD_TYPE_52 | \ + EXT_CSD_CARD_TYPE_26) + +#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ +#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ +#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ +#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ +#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ + +#define EXT_CSD_SEC_ER_EN BIT(0) +#define EXT_CSD_SEC_BD_BLK_EN BIT(2) +#define EXT_CSD_SEC_GB_CL_EN BIT(4) +#define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */ + +#define EXT_CSD_RST_N_EN_MASK 0x3 +#define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */ + +#define EXT_CSD_NO_POWER_NOTIFICATION 0 +#define EXT_CSD_POWER_ON 1 +#define EXT_CSD_POWER_OFF_SHORT 2 +#define EXT_CSD_POWER_OFF_LONG 3 + +#define EXT_CSD_PWR_CL_8BIT_MASK 0xF0 /* 8 bit PWR CLS */ +#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */ +#define EXT_CSD_PWR_CL_8BIT_SHIFT 4 +#define EXT_CSD_PWR_CL_4BIT_SHIFT 0 +/* + * MMC_SWITCH access modes + */ + +#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ +#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ +#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ +#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ + +#endif /* LINUX_MMC_MMC_H */ diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h new file mode 100644 index 00000000..6e2d6a13 --- /dev/null +++ b/include/linux/mmc/pm.h @@ -0,0 +1,31 @@ +/* + * linux/include/linux/mmc/pm.h + * + * Author: Nicolas Pitre + * Copyright: (C) 2009 Marvell Technology Group Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LINUX_MMC_PM_H +#define LINUX_MMC_PM_H + +/* + * These flags are used to describe power management features that + * some cards (typically SDIO cards) might wish to benefit from when + * the host system is being suspended. There are several layers of + * abstractions involved, from the host controller driver, to the MMC core + * code, to the SDIO core code, to finally get to the actual SDIO function + * driver. This file is therefore used for common definitions shared across + * all those layers. + */ + +typedef unsigned int mmc_pm_flag_t; + +#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ +#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ +#define MMC_PM_IGNORE_PM_NOTIFY (1 << 2) /* ignore mmc pm notify */ + +#endif /* LINUX_MMC_PM_H */ diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h new file mode 100644 index 00000000..1ebcf9ba --- /dev/null +++ b/include/linux/mmc/sd.h @@ -0,0 +1,94 @@ +/* + * include/linux/mmc/sd.h + * + * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef LINUX_MMC_SD_H +#define LINUX_MMC_SD_H + +/* SD commands type argument response */ + /* class 0 */ +/* This is basically the same command as for MMC with some quirks. */ +#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ +#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ +#define SD_SWITCH_VOLTAGE 11 /* ac R1 */ + + /* class 10 */ +#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ + + /* class 5 */ +#define SD_ERASE_WR_BLK_START 32 /* ac [31:0] data addr R1 */ +#define SD_ERASE_WR_BLK_END 33 /* ac [31:0] data addr R1 */ + + /* Application commands */ +#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ +#define SD_APP_SD_STATUS 13 /* adtc R1 */ +#define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ +#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ +#define SD_APP_SEND_SCR 51 /* adtc R1 */ + +/* OCR bit definitions */ +#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ +#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ +#define SD_OCR_XPC (1 << 28) /* SDXC power control */ +#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */ + +/* + * SD_SWITCH argument format: + * + * [31] Check (0) or switch (1) + * [30:24] Reserved (0) + * [23:20] Function group 6 + * [19:16] Function group 5 + * [15:12] Function group 4 + * [11:8] Function group 3 + * [7:4] Function group 2 + * [3:0] Function group 1 + */ + +/* + * SD_SEND_IF_COND argument format: + * + * [31:12] Reserved (0) + * [11:8] Host Voltage Supply Flags + * [7:0] Check Pattern (0xAA) + */ + +/* + * SCR field definitions + */ + +#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ +#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ +#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00-3.0X */ + +/* + * SD bus widths + */ +#define SD_BUS_WIDTH_1 0 +#define SD_BUS_WIDTH_4 2 + +/* + * SD_SWITCH mode + */ +#define SD_SWITCH_CHECK 0 +#define SD_SWITCH_SET 1 + +/* + * SD_SWITCH function groups + */ +#define SD_SWITCH_GRP_ACCESS 0 + +/* + * SD_SWITCH access modes + */ +#define SD_SWITCH_ACCESS_DEF 0 +#define SD_SWITCH_ACCESS_HS 1 + +#endif /* LINUX_MMC_SD_H */ diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h new file mode 100644 index 00000000..8959604a --- /dev/null +++ b/include/linux/mmc/sdhci-pci-data.h @@ -0,0 +1,18 @@ +#ifndef LINUX_MMC_SDHCI_PCI_DATA_H +#define LINUX_MMC_SDHCI_PCI_DATA_H + +struct pci_dev; + +struct sdhci_pci_data { + struct pci_dev *pdev; + int slotno; + int rst_n_gpio; /* Set to -EINVAL if unused */ + int cd_gpio; /* Set to -EINVAL if unused */ + int (*setup)(struct sdhci_pci_data *data); + void (*cleanup)(struct sdhci_pci_data *data); +}; + +extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, + int slotno); + +#endif diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h new file mode 100644 index 00000000..5cdc96da --- /dev/null +++ b/include/linux/mmc/sdhci-spear.h @@ -0,0 +1,42 @@ +/* + * include/linux/mmc/sdhci-spear.h + * + * SDHCI declarations specific to ST SPEAr platform + * + * Copyright (C) 2010 ST Microelectronics + * Viresh Kumar<viresh.kumar@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef LINUX_MMC_SDHCI_SPEAR_H +#define LINUX_MMC_SDHCI_SPEAR_H + +#include <linux/platform_device.h> +/* + * struct sdhci_plat_data: spear sdhci platform data structure + * + * @card_power_gpio: gpio pin for enabling/disabling power to sdhci socket + * @power_active_high: if set, enable power to sdhci socket by setting + * card_power_gpio + * @power_always_enb: If set, then enable power on probe, otherwise enable only + * on card insertion and disable on card removal. + * card_int_gpio: gpio pin used for card detection + */ +struct sdhci_plat_data { + int card_power_gpio; + int power_active_high; + int power_always_enb; + int card_int_gpio; +}; + +/* This function is used to set platform_data field of pdev->dev */ +static inline void +sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data) +{ + pdev->dev.platform_data = data; +} + +#endif /* LINUX_MMC_SDHCI_SPEAR_H */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h new file mode 100644 index 00000000..e9051e1c --- /dev/null +++ b/include/linux/mmc/sdhci.h @@ -0,0 +1,174 @@ +/* + * linux/include/linux/mmc/sdhci.h - Secure Digital Host Controller Interface + * + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#ifndef LINUX_MMC_SDHCI_H +#define LINUX_MMC_SDHCI_H + +#include <linux/scatterlist.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/mmc/host.h> + +struct sdhci_host { + /* Data set by hardware interface driver */ + const char *hw_name; /* Hardware bus name */ + + unsigned int quirks; /* Deviations from spec. */ + +/* Controller doesn't honor resets unless we touch the clock register */ +#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) +/* Controller has bad caps bits, but really supports DMA */ +#define SDHCI_QUIRK_FORCE_DMA (1<<1) +/* Controller doesn't like to be reset when there is no card inserted. */ +#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) +/* Controller doesn't like clearing the power reg before a change */ +#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) +/* Controller has flaky internal state so reset it on each ios change */ +#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) +/* Controller has an unusable DMA engine */ +#define SDHCI_QUIRK_BROKEN_DMA (1<<5) +/* Controller has an unusable ADMA engine */ +#define SDHCI_QUIRK_BROKEN_ADMA (1<<6) +/* Controller can only DMA from 32-bit aligned addresses */ +#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7) +/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8) +/* Controller can only ADMA chunks that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9) +/* Controller needs to be reset after each request to stay stable */ +#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10) +/* Controller needs voltage and power writes to happen separately */ +#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) +/* Controller provides an incorrect timeout value for transfers */ +#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) +/* Controller has an issue with buffer bits for small transfers */ +#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13) +/* Controller does not provide transfer-complete interrupt when not busy */ +#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14) +/* Controller has unreliable card detection */ +#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) +/* Controller reports inverted write-protect state */ +#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) +/* Controller has nonstandard clock management */ +#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17) +/* Controller does not like fast PIO transfers */ +#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) +/* Controller losing signal/interrupt enable states after reset */ +#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19) +/* Controller has to be forced to use block size of 2048 bytes */ +#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) +/* Controller cannot do multi-block transfers */ +#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21) +/* Controller can only handle 1-bit data transfers */ +#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22) +/* Controller needs 10ms delay between applying power and clock */ +#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) +/* Controller uses SDCLK instead of TMCLK for data timeouts */ +#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) +/* Controller reports wrong base clock capability */ +#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25) +/* Controller cannot support End Attribute in NOP ADMA descriptor */ +#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26) +/* Controller is missing device caps. Use caps provided by host */ +#define SDHCI_QUIRK_MISSING_CAPS (1<<27) +/* Controller uses Auto CMD12 command to stop the transfer */ +#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) +/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ +#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) +/* Controller treats ADMA descriptors with length 0000h incorrectly */ +#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30) +/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */ +#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31) + + unsigned int quirks2; /* More deviations from spec. */ + +#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0) + + int irq; /* Device IRQ */ + void __iomem *ioaddr; /* Mapped address */ + + const struct sdhci_ops *ops; /* Low level hw interface */ + + struct regulator *vmmc; /* Power regulator */ + + /* Internal data */ + struct mmc_host *mmc; /* MMC structure */ + u64 dma_mask; /* custom DMA mask */ + +#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) + struct led_classdev led; /* LED control */ + char led_name[32]; +#endif + + spinlock_t lock; /* Mutex */ + + int flags; /* Host attributes */ +#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */ +#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ +#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ +#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ +#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */ +#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */ +#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */ +#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ +#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ +#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ +#define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */ + + unsigned int version; /* SDHCI spec. version */ + + unsigned int max_clk; /* Max possible freq (MHz) */ + unsigned int timeout_clk; /* Timeout freq (KHz) */ + unsigned int clk_mul; /* Clock Muliplier value */ + + unsigned int clock; /* Current clock (MHz) */ + u8 pwr; /* Current voltage */ + + bool runtime_suspended; /* Host is runtime suspended */ + + struct mmc_request *mrq; /* Current request */ + struct mmc_command *cmd; /* Current command */ + struct mmc_data *data; /* Current data request */ + unsigned int data_early:1; /* Data finished before cmd */ + + struct sg_mapping_iter sg_miter; /* SG state for PIO */ + unsigned int blocks; /* remaining PIO blocks */ + + int sg_count; /* Mapped sg entries */ + + u8 *adma_desc; /* ADMA descriptor table */ + u8 *align_buffer; /* Bounce buffer */ + + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ + dma_addr_t align_addr; /* Mapped bounce buffer */ + + struct tasklet_struct card_tasklet; /* Tasklet structures */ + struct tasklet_struct finish_tasklet; + + struct timer_list timer; /* Timer for timeouts */ + + unsigned int caps; /* Alternative capabilities */ + + unsigned int ocr_avail_sdio; /* OCR bit masks */ + unsigned int ocr_avail_sd; + unsigned int ocr_avail_mmc; + + wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ + unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ + + unsigned int tuning_count; /* Timer count for re-tuning */ + unsigned int tuning_mode; /* Re-tuning mode supported by host */ +#define SDHCI_TUNING_MODE_1 0 + struct timer_list tuning_timer; /* Timer for tuning */ + + unsigned long private[0] ____cacheline_aligned; +}; +#endif /* LINUX_MMC_SDHCI_H */ diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h new file mode 100644 index 00000000..c9fe66c5 --- /dev/null +++ b/include/linux/mmc/sdio.h @@ -0,0 +1,191 @@ +/* + * include/linux/mmc/sdio.h + * + * Copyright 2006-2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef LINUX_MMC_SDIO_H +#define LINUX_MMC_SDIO_H + +/* SDIO commands type argument response */ +#define SD_IO_SEND_OP_COND 5 /* bcr [23:0] OCR R4 */ +#define SD_IO_RW_DIRECT 52 /* ac [31:0] See below R5 */ +#define SD_IO_RW_EXTENDED 53 /* adtc [31:0] See below R5 */ + +/* + * SD_IO_RW_DIRECT argument format: + * + * [31] R/W flag + * [30:28] Function number + * [27] RAW flag + * [25:9] Register address + * [7:0] Data + */ + +/* + * SD_IO_RW_EXTENDED argument format: + * + * [31] R/W flag + * [30:28] Function number + * [27] Block mode + * [26] Increment address + * [25:9] Register address + * [8:0] Byte/block count + */ + +#define R4_18V_PRESENT (1<<24) +#define R4_MEMORY_PRESENT (1 << 27) + +/* + SDIO status in R5 + Type + e : error bit + s : status bit + r : detected and set for the actual command response + x : detected and set during command execution. the host must poll + the card by sending status command in order to read these bits. + Clear condition + a : according to the card state + b : always related to the previous command. Reception of + a valid command will clear it (with a delay of one command) + c : clear by read + */ + +#define R5_COM_CRC_ERROR (1 << 15) /* er, b */ +#define R5_ILLEGAL_COMMAND (1 << 14) /* er, b */ +#define R5_ERROR (1 << 11) /* erx, c */ +#define R5_FUNCTION_NUMBER (1 << 9) /* er, c */ +#define R5_OUT_OF_RANGE (1 << 8) /* er, c */ +#define R5_STATUS(x) (x & 0xCB00) +#define R5_IO_CURRENT_STATE(x) ((x & 0x3000) >> 12) /* s, b */ + +/* + * Card Common Control Registers (CCCR) + */ + +#define SDIO_CCCR_CCCR 0x00 + +#define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */ +#define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */ +#define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */ +#define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 3.00 */ + +#define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */ +#define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */ +#define SDIO_SDIO_REV_1_20 2 /* SDIO Spec Version 1.20 */ +#define SDIO_SDIO_REV_2_00 3 /* SDIO Spec Version 2.00 */ +#define SDIO_SDIO_REV_3_00 4 /* SDIO Spec Version 3.00 */ + +#define SDIO_CCCR_SD 0x01 + +#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */ +#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */ +#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */ +#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */ + +#define SDIO_CCCR_IOEx 0x02 +#define SDIO_CCCR_IORx 0x03 + +#define SDIO_CCCR_IENx 0x04 /* Function/Master Interrupt Enable */ +#define SDIO_CCCR_INTx 0x05 /* Function Interrupt Pending */ + +#define SDIO_CCCR_ABORT 0x06 /* function abort/card reset */ + +#define SDIO_CCCR_IF 0x07 /* bus interface controls */ + +#define SDIO_BUS_WIDTH_1BIT 0x00 +#define SDIO_BUS_WIDTH_4BIT 0x02 +#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ +#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ + +#define SDIO_BUS_ASYNC_INT 0x20 + +#define SDIO_BUS_CD_DISABLE 0x80 /* disable pull-up on DAT3 (pin 1) */ + +#define SDIO_CCCR_CAPS 0x08 + +#define SDIO_CCCR_CAP_SDC 0x01 /* can do CMD52 while data transfer */ +#define SDIO_CCCR_CAP_SMB 0x02 /* can do multi-block xfers (CMD53) */ +#define SDIO_CCCR_CAP_SRW 0x04 /* supports read-wait protocol */ +#define SDIO_CCCR_CAP_SBS 0x08 /* supports suspend/resume */ +#define SDIO_CCCR_CAP_S4MI 0x10 /* interrupt during 4-bit CMD53 */ +#define SDIO_CCCR_CAP_E4MI 0x20 /* enable ints during 4-bit CMD53 */ +#define SDIO_CCCR_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CCCR_CAP_4BLS 0x80 /* 4 bit low speed card */ + +#define SDIO_CCCR_CIS 0x09 /* common CIS pointer (3 bytes) */ + +/* Following 4 regs are valid only if SBS is set */ +#define SDIO_CCCR_SUSPEND 0x0c +#define SDIO_CCCR_SELx 0x0d +#define SDIO_CCCR_EXECx 0x0e +#define SDIO_CCCR_READYx 0x0f + +#define SDIO_CCCR_BLKSIZE 0x10 + +#define SDIO_CCCR_POWER 0x12 + +#define SDIO_POWER_SMPC 0x01 /* Supports Master Power Control */ +#define SDIO_POWER_EMPC 0x02 /* Enable Master Power Control */ + +#define SDIO_CCCR_SPEED 0x13 + +#define SDIO_SPEED_SHS 0x01 /* Supports High-Speed mode */ +#define SDIO_SPEED_BSS_SHIFT 1 +#define SDIO_SPEED_BSS_MASK (7<<SDIO_SPEED_BSS_SHIFT) +#define SDIO_SPEED_SDR12 (0<<SDIO_SPEED_BSS_SHIFT) +#define SDIO_SPEED_SDR25 (1<<SDIO_SPEED_BSS_SHIFT) +#define SDIO_SPEED_SDR50 (2<<SDIO_SPEED_BSS_SHIFT) +#define SDIO_SPEED_SDR104 (3<<SDIO_SPEED_BSS_SHIFT) +#define SDIO_SPEED_DDR50 (4<<SDIO_SPEED_BSS_SHIFT) +#define SDIO_SPEED_EHS SDIO_SPEED_SDR25 /* Enable High-Speed */ + +#define SDIO_CCCR_UHS 0x14 +#define SDIO_UHS_SDR50 0x01 +#define SDIO_UHS_SDR104 0x02 +#define SDIO_UHS_DDR50 0x04 + +#define SDIO_CCCR_DRIVE_STRENGTH 0x15 +#define SDIO_SDTx_MASK 0x07 +#define SDIO_DRIVE_SDTA (1<<0) +#define SDIO_DRIVE_SDTC (1<<1) +#define SDIO_DRIVE_SDTD (1<<2) +#define SDIO_DRIVE_DTSx_MASK 0x03 +#define SDIO_DRIVE_DTSx_SHIFT 4 +#define SDIO_DTSx_SET_TYPE_B (0 << SDIO_DRIVE_DTSx_SHIFT) +#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT) +#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT) +#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT) +/* + * Function Basic Registers (FBR) + */ + +#define SDIO_FBR_BASE(f) ((f) * 0x100) /* base of function f's FBRs */ + +#define SDIO_FBR_STD_IF 0x00 + +#define SDIO_FBR_SUPPORTS_CSA 0x40 /* supports Code Storage Area */ +#define SDIO_FBR_ENABLE_CSA 0x80 /* enable Code Storage Area */ + +#define SDIO_FBR_STD_IF_EXT 0x01 + +#define SDIO_FBR_POWER 0x02 + +#define SDIO_FBR_POWER_SPS 0x01 /* Supports Power Selection */ +#define SDIO_FBR_POWER_EPS 0x02 /* Enable (low) Power Selection */ + +#define SDIO_FBR_CIS 0x09 /* CIS pointer (3 bytes) */ + + +#define SDIO_FBR_CSA 0x0C /* CSA pointer (3 bytes) */ + +#define SDIO_FBR_CSA_DATA 0x0F + +#define SDIO_FBR_BLKSIZE 0x10 /* block size (2 bytes) */ + +#endif /* LINUX_MMC_SDIO_H */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h new file mode 100755 index 00000000..dc680c4b --- /dev/null +++ b/include/linux/mmc/sdio_func.h @@ -0,0 +1,174 @@ +/* + * include/linux/mmc/sdio_func.h + * + * Copyright 2007-2008 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef LINUX_MMC_SDIO_FUNC_H +#define LINUX_MMC_SDIO_FUNC_H + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +#include <linux/mmc/pm.h> + +struct mmc_card; +struct sdio_func; + +typedef void (sdio_irq_handler_t)(struct sdio_func *); + +/* + * Structure used to hold embedded SDIO device data from platform layer + */ +struct sdio_embedded_func { + uint8_t f_class; + uint32_t f_maxblksize; +}; + +/* + * SDIO function CIS tuple (unknown to the core) + */ +struct sdio_func_tuple { + struct sdio_func_tuple *next; + unsigned char code; + unsigned char size; + unsigned char data[0]; +}; + +/* + * SDIO function devices + */ +struct sdio_func { + struct mmc_card *card; /* the card this device belongs to */ + struct device dev; /* the device */ + sdio_irq_handler_t *irq_handler; /* IRQ callback */ + unsigned int num; /* function number */ + + unsigned char class; /* standard interface class */ + unsigned short vendor; /* vendor id */ + unsigned short device; /* device id */ + + unsigned max_blksize; /* maximum block size */ + unsigned cur_blksize; /* current block size */ + + unsigned enable_timeout; /* max enable timeout in msec */ + + unsigned int state; /* function state */ +#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ + + u8 tmpbuf[4]; /* DMA:able scratch buffer */ + + unsigned num_info; /* number of info strings */ + const char **info; /* info strings */ + + struct sdio_func_tuple *tuples; +}; + +#define sdio_func_present(f) ((f)->state & SDIO_STATE_PRESENT) + +#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) + +#define sdio_func_id(f) (dev_name(&(f)->dev)) + +#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) +#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) +#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) + +/* + * SDIO function device driver + */ +struct sdio_driver { + char *name; + const struct sdio_device_id *id_table; + + int (*probe)(struct sdio_func *, const struct sdio_device_id *); + void (*remove)(struct sdio_func *); + + struct device_driver drv; +}; + +#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) + +/** + * SDIO_DEVICE - macro used to describe a specific SDIO device + * @vend: the 16 bit manufacturer code + * @dev: the 16 bit function id + * + * This macro is used to create a struct sdio_device_id that matches a + * specific device. The class field will be set to SDIO_ANY_ID. + */ +#define SDIO_DEVICE(vend,dev) \ + .class = SDIO_ANY_ID, \ + .vendor = (vend), .device = (dev) + +/** + * SDIO_DEVICE_CLASS - macro used to describe a specific SDIO device class + * @dev_class: the 8 bit standard interface code + * + * This macro is used to create a struct sdio_device_id that matches a + * specific standard SDIO function type. The vendor and device fields will + * be set to SDIO_ANY_ID. + */ +#define SDIO_DEVICE_CLASS(dev_class) \ + .class = (dev_class), \ + .vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID + +extern int sdio_register_driver(struct sdio_driver *); +extern void sdio_unregister_driver(struct sdio_driver *); + +/* + * SDIO I/O operations + */ +extern void sdio_claim_host(struct sdio_func *func); +extern void sdio_release_host(struct sdio_func *func); + +extern int sdio_enable_func(struct sdio_func *func); +extern int sdio_disable_func(struct sdio_func *func); + +extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz); + +extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); +extern int sdio_release_irq(struct sdio_func *func); + +extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); + +extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret, + unsigned in); +extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); +extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); + +extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, + unsigned int addr, int count); +extern int sdio_readsb(struct sdio_func *func, void *dst, + unsigned int addr, int count); + +extern void sdio_writeb(struct sdio_func *func, u8 b, + unsigned int addr, int *err_ret); +extern void sdio_writew(struct sdio_func *func, u16 b, + unsigned int addr, int *err_ret); +extern void sdio_writel(struct sdio_func *func, u32 b, + unsigned int addr, int *err_ret); + +extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, + unsigned int addr, int *err_ret); + +extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, + void *src, int count); +extern int sdio_writesb(struct sdio_func *func, unsigned int addr, + void *src, int count); + +extern unsigned char sdio_f0_readb(struct sdio_func *func, + unsigned int addr, int *err_ret); +extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, + unsigned int addr, int *err_ret); + +extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); +extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); + +#endif /* LINUX_MMC_SDIO_FUNC_H */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h new file mode 100644 index 00000000..9f03feed --- /dev/null +++ b/include/linux/mmc/sdio_ids.h @@ -0,0 +1,47 @@ +/* + * SDIO Classes, Interface Types, Manufacturer IDs, etc. + */ + +#ifndef LINUX_MMC_SDIO_IDS_H +#define LINUX_MMC_SDIO_IDS_H + +/* + * Standard SDIO Function Interfaces + */ + +#define SDIO_CLASS_NONE 0x00 /* Not a SDIO standard interface */ +#define SDIO_CLASS_UART 0x01 /* standard UART interface */ +#define SDIO_CLASS_BT_A 0x02 /* Type-A BlueTooth std interface */ +#define SDIO_CLASS_BT_B 0x03 /* Type-B BlueTooth std interface */ +#define SDIO_CLASS_GPS 0x04 /* GPS standard interface */ +#define SDIO_CLASS_CAMERA 0x05 /* Camera standard interface */ +#define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ +#define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ +#define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ +#define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */ + +/* + * Vendors and devices. Sort key: vendor first, device next. + */ +#define SDIO_VENDOR_ID_INTEL 0x0089 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 +#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 + +#define SDIO_VENDOR_ID_MARVELL 0x02df +#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 +#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 +#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 + +#define SDIO_VENDOR_ID_SIANO 0x039a +#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 +#define SDIO_DEVICE_ID_SIANO_NICE 0x0202 +#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 +#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 +#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 +#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 + +#endif /* LINUX_MMC_SDIO_IDS_H */ diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h new file mode 100644 index 00000000..05f0e3db --- /dev/null +++ b/include/linux/mmc/sh_mmcif.h @@ -0,0 +1,222 @@ +/* + * include/linux/mmc/sh_mmcif.h + * + * platform data for eMMC driver + * + * Copyright (C) 2010 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + */ + +#ifndef LINUX_MMC_SH_MMCIF_H +#define LINUX_MMC_SH_MMCIF_H + +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/sh_dma.h> + +/* + * MMCIF : CE_CLK_CTRL [19:16] + * 1000 : Peripheral clock / 512 + * 0111 : Peripheral clock / 256 + * 0110 : Peripheral clock / 128 + * 0101 : Peripheral clock / 64 + * 0100 : Peripheral clock / 32 + * 0011 : Peripheral clock / 16 + * 0010 : Peripheral clock / 8 + * 0001 : Peripheral clock / 4 + * 0000 : Peripheral clock / 2 + * 1111 : Peripheral clock (sup_pclk set '1') + */ + +struct sh_mmcif_dma { + struct sh_dmae_slave chan_priv_tx; + struct sh_dmae_slave chan_priv_rx; +}; + +struct sh_mmcif_plat_data { + void (*set_pwr)(struct platform_device *pdev, int state); + void (*down_pwr)(struct platform_device *pdev); + int (*get_cd)(struct platform_device *pdef); + struct sh_mmcif_dma *dma; /* Deprecated. Instead */ + unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */ + unsigned int slave_id_rx; + u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ + unsigned long caps; + u32 ocr; +}; + +#define MMCIF_CE_CMD_SET 0x00000000 +#define MMCIF_CE_ARG 0x00000008 +#define MMCIF_CE_ARG_CMD12 0x0000000C +#define MMCIF_CE_CMD_CTRL 0x00000010 +#define MMCIF_CE_BLOCK_SET 0x00000014 +#define MMCIF_CE_CLK_CTRL 0x00000018 +#define MMCIF_CE_BUF_ACC 0x0000001C +#define MMCIF_CE_RESP3 0x00000020 +#define MMCIF_CE_RESP2 0x00000024 +#define MMCIF_CE_RESP1 0x00000028 +#define MMCIF_CE_RESP0 0x0000002C +#define MMCIF_CE_RESP_CMD12 0x00000030 +#define MMCIF_CE_DATA 0x00000034 +#define MMCIF_CE_INT 0x00000040 +#define MMCIF_CE_INT_MASK 0x00000044 +#define MMCIF_CE_HOST_STS1 0x00000048 +#define MMCIF_CE_HOST_STS2 0x0000004C +#define MMCIF_CE_VERSION 0x0000007C + +/* CE_BUF_ACC */ +#define BUF_ACC_DMAWEN (1 << 25) +#define BUF_ACC_DMAREN (1 << 24) +#define BUF_ACC_BUSW_32 (0 << 17) +#define BUF_ACC_BUSW_16 (1 << 17) +#define BUF_ACC_ATYP (1 << 16) + +/* CE_CLK_CTRL */ +#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ +#define CLK_CLEAR (0xf << 16) +#define CLK_SUP_PCLK (0xf << 16) +#define CLKDIV_4 (1 << 16) /* mmc clock frequency. + * n: bus clock/(2^(n+1)) */ +#define CLKDIV_256 (7 << 16) /* mmc clock frequency. (see above) */ +#define SRSPTO_256 (2 << 12) /* resp timeout */ +#define SRBSYTO_29 (0xf << 8) /* resp busy timeout */ +#define SRWDTO_29 (0xf << 4) /* read/write timeout */ +#define SCCSTO_29 (0xf << 0) /* ccs timeout */ + +/* CE_VERSION */ +#define SOFT_RST_ON (1 << 31) +#define SOFT_RST_OFF 0 + +static inline u32 sh_mmcif_readl(void __iomem *addr, int reg) +{ + return __raw_readl(addr + reg); +} + +static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) +{ + __raw_writel(val, addr + reg); +} + +#define SH_MMCIF_BBS 512 /* boot block size */ + +static inline void sh_mmcif_boot_cmd_send(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_writel(base, MMCIF_CE_INT, 0); + sh_mmcif_writel(base, MMCIF_CE_ARG, arg); + sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); +} + +static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) +{ + unsigned long tmp; + int cnt; + + for (cnt = 0; cnt < 1000000; cnt++) { + tmp = sh_mmcif_readl(base, MMCIF_CE_INT); + if (tmp & mask) { + sh_mmcif_writel(base, MMCIF_CE_INT, tmp & ~mask); + return 0; + } + } + + return -1; +} + +static inline int sh_mmcif_boot_cmd(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_boot_cmd_send(base, cmd, arg); + return sh_mmcif_boot_cmd_poll(base, 0x00010000); +} + +static inline int sh_mmcif_boot_do_read_single(void __iomem *base, + unsigned int block_nr, + unsigned long *buf) +{ + int k; + + /* CMD13 - Status */ + sh_mmcif_boot_cmd(base, 0x0d400000, 0x00010000); + + if (sh_mmcif_readl(base, MMCIF_CE_RESP0) != 0x0900) + return -1; + + /* CMD17 - Read */ + sh_mmcif_boot_cmd(base, 0x11480000, block_nr * SH_MMCIF_BBS); + if (sh_mmcif_boot_cmd_poll(base, 0x00100000) < 0) + return -1; + + for (k = 0; k < (SH_MMCIF_BBS / 4); k++) + buf[k] = sh_mmcif_readl(base, MMCIF_CE_DATA); + + return 0; +} + +static inline int sh_mmcif_boot_do_read(void __iomem *base, + unsigned long first_block, + unsigned long nr_blocks, + void *buf) +{ + unsigned long k; + int ret = 0; + + /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_4 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD9 - Get CSD */ + sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); + + /* CMD7 - Select the card */ + sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); + + /* CMD16 - Set the block size */ + sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS); + + for (k = 0; !ret && k < nr_blocks; k++) + ret = sh_mmcif_boot_do_read_single(base, first_block + k, + buf + (k * SH_MMCIF_BBS)); + + return ret; +} + +static inline void sh_mmcif_boot_init(void __iomem *base) +{ + /* reset */ + sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_OFF); + + /* byte swap */ + sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); + + /* Set block size in MMCIF hardware */ + sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS); + + /* Enable the clock, set it to Bus clock/256 (about 325Khz). */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, + CLK_ENABLE | CLKDIV_256 | SRSPTO_256 | + SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + + /* CMD0 */ + sh_mmcif_boot_cmd(base, 0x00000040, 0); + + /* CMD1 - Get OCR */ + do { + sh_mmcif_boot_cmd(base, 0x01405040, 0x40300000); /* CMD1 */ + } while ((sh_mmcif_readl(base, MMCIF_CE_RESP0) & 0x80000000) + != 0x80000000); + + /* CMD2 - Get CID */ + sh_mmcif_boot_cmd(base, 0x02806040, 0); + + /* CMD3 - Set card relative address */ + sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); +} + +#endif /* LINUX_MMC_SH_MMCIF_H */ diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h new file mode 100644 index 00000000..e94e620a --- /dev/null +++ b/include/linux/mmc/sh_mobile_sdhi.h @@ -0,0 +1,38 @@ +#ifndef LINUX_MMC_SH_MOBILE_SDHI_H +#define LINUX_MMC_SH_MOBILE_SDHI_H + +#include <linux/types.h> + +struct platform_device; +struct tmio_mmc_data; + +#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect" +#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" +#define SH_MOBILE_SDHI_IRQ_SDIO "sdio" + +/** + * struct sh_mobile_sdhi_ops - SDHI driver callbacks + * @cd_wakeup: trigger a card-detection run + */ +struct sh_mobile_sdhi_ops { + void (*cd_wakeup)(const struct platform_device *pdev); +}; + +struct sh_mobile_sdhi_info { + int dma_slave_tx; + int dma_slave_rx; + unsigned long tmio_flags; + unsigned long tmio_caps; + u32 tmio_ocr_mask; /* available MMC voltages */ + unsigned int cd_gpio; + struct tmio_mmc_data *pdata; + void (*set_pwr)(struct platform_device *pdev, int state); + int (*get_cd)(struct platform_device *pdev); + + /* callbacks for board specific setup code */ + int (*init)(struct platform_device *pdev, + const struct sh_mobile_sdhi_ops *ops); + void (*cleanup)(struct platform_device *pdev); +}; + +#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h new file mode 100644 index 00000000..a1c1f321 --- /dev/null +++ b/include/linux/mmc/tmio.h @@ -0,0 +1,65 @@ +/* + * include/linux/mmc/tmio.h + * + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the MMC / SD / SDIO cell found in: + * + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 + */ +#ifndef LINUX_MMC_TMIO_H +#define LINUX_MMC_TMIO_H + +#define CTL_SD_CMD 0x00 +#define CTL_ARG_REG 0x04 +#define CTL_STOP_INTERNAL_ACTION 0x08 +#define CTL_XFER_BLK_COUNT 0xa +#define CTL_RESPONSE 0x0c +#define CTL_STATUS 0x1c +#define CTL_STATUS2 0x1e +#define CTL_IRQ_MASK 0x20 +#define CTL_SD_CARD_CLK_CTL 0x24 +#define CTL_SD_XFER_LEN 0x26 +#define CTL_SD_MEM_CARD_OPT 0x28 +#define CTL_SD_ERROR_DETAIL_STATUS 0x2c +#define CTL_SD_DATA_PORT 0x30 +#define CTL_TRANSACTION_CTL 0x34 +#define CTL_SDIO_STATUS 0x36 +#define CTL_SDIO_IRQ_MASK 0x38 +#define CTL_DMA_ENABLE 0xd8 +#define CTL_RESET_SD 0xe0 +#define CTL_SDIO_REGS 0x100 +#define CTL_CLK_AND_WAIT_CTL 0x138 +#define CTL_RESET_SDIO 0x1e0 + +/* Definitions for values the CTRL_STATUS register can take. */ +#define TMIO_STAT_CMDRESPEND 0x00000001 +#define TMIO_STAT_DATAEND 0x00000004 +#define TMIO_STAT_CARD_REMOVE 0x00000008 +#define TMIO_STAT_CARD_INSERT 0x00000010 +#define TMIO_STAT_SIGSTATE 0x00000020 +#define TMIO_STAT_WRPROTECT 0x00000080 +#define TMIO_STAT_CARD_REMOVE_A 0x00000100 +#define TMIO_STAT_CARD_INSERT_A 0x00000200 +#define TMIO_STAT_SIGSTATE_A 0x00000400 +#define TMIO_STAT_CMD_IDX_ERR 0x00010000 +#define TMIO_STAT_CRCFAIL 0x00020000 +#define TMIO_STAT_STOPBIT_ERR 0x00040000 +#define TMIO_STAT_DATATIMEOUT 0x00080000 +#define TMIO_STAT_RXOVERFLOW 0x00100000 +#define TMIO_STAT_TXUNDERRUN 0x00200000 +#define TMIO_STAT_CMDTIMEOUT 0x00400000 +#define TMIO_STAT_RXRDY 0x01000000 +#define TMIO_STAT_TXRQ 0x02000000 +#define TMIO_STAT_ILL_FUNC 0x20000000 +#define TMIO_STAT_CMD_BUSY 0x40000000 +#define TMIO_STAT_ILL_ACCESS 0x80000000 + +#define TMIO_BBS 512 /* Boot block size */ + +#endif /* LINUX_MMC_TMIO_H */ diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h new file mode 100644 index 00000000..c04ecfe0 --- /dev/null +++ b/include/linux/mmdebug.h @@ -0,0 +1,16 @@ +#ifndef LINUX_MM_DEBUG_H +#define LINUX_MM_DEBUG_H 1 + +#ifdef CONFIG_DEBUG_VM +#define VM_BUG_ON(cond) BUG_ON(cond) +#else +#define VM_BUG_ON(cond) do { (void)(cond); } while (0) +#endif + +#ifdef CONFIG_DEBUG_VIRTUAL +#define VIRTUAL_BUG_ON(cond) BUG_ON(cond) +#else +#define VIRTUAL_BUG_ON(cond) do { } while (0) +#endif + +#endif diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h new file mode 100644 index 00000000..c5d52780 --- /dev/null +++ b/include/linux/mmiotrace.h @@ -0,0 +1,111 @@ +#ifndef _LINUX_MMIOTRACE_H +#define _LINUX_MMIOTRACE_H + +#include <linux/types.h> +#include <linux/list.h> + +struct kmmio_probe; +struct pt_regs; + +typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, + struct pt_regs *, unsigned long addr); +typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, + unsigned long condition, struct pt_regs *); + +struct kmmio_probe { + /* kmmio internal list: */ + struct list_head list; + /* start location of the probe point: */ + unsigned long addr; + /* length of the probe region: */ + unsigned long len; + /* Called before addr is executed: */ + kmmio_pre_handler_t pre_handler; + /* Called after addr is executed: */ + kmmio_post_handler_t post_handler; + void *private; +}; + +extern unsigned int kmmio_count; + +extern int register_kmmio_probe(struct kmmio_probe *p); +extern void unregister_kmmio_probe(struct kmmio_probe *p); +extern int kmmio_init(void); +extern void kmmio_cleanup(void); + +#ifdef CONFIG_MMIOTRACE +/* kmmio is active by some kmmio_probes? */ +static inline int is_kmmio_active(void) +{ + return kmmio_count; +} + +/* Called from page fault handler. */ +extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); + +/* Called from ioremap.c */ +extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, + void __iomem *addr); +extern void mmiotrace_iounmap(volatile void __iomem *addr); + +/* For anyone to insert markers. Remember trailing newline. */ +extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...); +#else /* !CONFIG_MMIOTRACE: */ +static inline int is_kmmio_active(void) +{ + return 0; +} + +static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr) +{ + return 0; +} + +static inline void mmiotrace_ioremap(resource_size_t offset, + unsigned long size, void __iomem *addr) +{ +} + +static inline void mmiotrace_iounmap(volatile void __iomem *addr) +{ +} + +static inline __printf(1, 2) int mmiotrace_printk(const char *fmt, ...) +{ + return 0; +} +#endif /* CONFIG_MMIOTRACE */ + +enum mm_io_opcode { + MMIO_READ = 0x1, /* struct mmiotrace_rw */ + MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ + MMIO_PROBE = 0x3, /* struct mmiotrace_map */ + MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ + MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ +}; + +struct mmiotrace_rw { + resource_size_t phys; /* PCI address of register */ + unsigned long value; + unsigned long pc; /* optional program counter */ + int map_id; + unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ + unsigned char width; /* size of register access in bytes */ +}; + +struct mmiotrace_map { + resource_size_t phys; /* base address in PCI space */ + unsigned long virt; /* base virtual address */ + unsigned long len; /* mapping size */ + int map_id; + unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ +}; + +/* in kernel/trace/trace_mmiotrace.c */ +extern void enable_mmiotrace(void); +extern void disable_mmiotrace(void); +extern void mmio_trace_rw(struct mmiotrace_rw *rw); +extern void mmio_trace_mapping(struct mmiotrace_map *map); +extern int mmio_trace_printk(const char *fmt, va_list args); + +#endif /* _LINUX_MMIOTRACE_H */ diff --git a/include/linux/mmtimer.h b/include/linux/mmtimer.h new file mode 100644 index 00000000..884cabf1 --- /dev/null +++ b/include/linux/mmtimer.h @@ -0,0 +1,56 @@ +/* + * Intel Multimedia Timer device interface + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. + * + * This file should define an interface compatible with the IA-PC Multimedia + * Timers Draft Specification (rev. 0.97) from Intel. Note that some + * hardware may not be able to safely export its registers to userspace, + * so the ioctl interface should support all necessary functionality. + * + * 11/01/01 - jbarnes - initial revision + * 9/10/04 - Christoph Lameter - remove interrupt support + * 9/17/04 - jbarnes - remove test program, move some #defines to the driver + */ + +#ifndef _LINUX_MMTIMER_H +#define _LINUX_MMTIMER_H + +/* + * Breakdown of the ioctl's available. An 'optional' next to the command + * indicates that supporting this command is optional, while 'required' + * commands must be implemented if conformance is desired. + * + * MMTIMER_GETOFFSET - optional + * Should return the offset (relative to the start of the page where the + * registers are mapped) for the counter in question. + * + * MMTIMER_GETRES - required + * The resolution of the clock in femto (10^-15) seconds + * + * MMTIMER_GETFREQ - required + * Frequency of the clock in Hz + * + * MMTIMER_GETBITS - required + * Number of bits in the clock's counter + * + * MMTIMER_MMAPAVAIL - required + * Returns nonzero if the registers can be mmap'd into userspace, 0 otherwise + * + * MMTIMER_GETCOUNTER - required + * Gets the current value in the counter + */ +#define MMTIMER_IOCTL_BASE 'm' + +#define MMTIMER_GETOFFSET _IO(MMTIMER_IOCTL_BASE, 0) +#define MMTIMER_GETRES _IOR(MMTIMER_IOCTL_BASE, 1, unsigned long) +#define MMTIMER_GETFREQ _IOR(MMTIMER_IOCTL_BASE, 2, unsigned long) +#define MMTIMER_GETBITS _IO(MMTIMER_IOCTL_BASE, 4) +#define MMTIMER_MMAPAVAIL _IO(MMTIMER_IOCTL_BASE, 6) +#define MMTIMER_GETCOUNTER _IOR(MMTIMER_IOCTL_BASE, 9, unsigned long) + +#endif /* _LINUX_MMTIMER_H */ diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h new file mode 100644 index 00000000..70fffeba --- /dev/null +++ b/include/linux/mmu_context.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_MMU_CONTEXT_H +#define _LINUX_MMU_CONTEXT_H + +struct mm_struct; + +void use_mm(struct mm_struct *mm); +void unuse_mm(struct mm_struct *mm); + +#endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 00000000..1d1b1e13 --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,379 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mm_types.h> + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * test_young is called to check the young/accessed bitflag in + * the secondary pte. This is used to know if the page is + * frequently used without actually clearing the flag or tearing + * down the secondary mapping on the page. + */ + int (*test_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * change_pte is called in cases that pte mapping to page is changed: + * for example, when ksm remaps pte to point to a new shared page. + */ + void (*change_pte)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, + pte_t pte); + + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. The subsystem + * must guarantee that no additional references are taken to + * the pages in the range established between the call to + * invalidate_range_start() and the matching call to + * invalidate_range_end(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + */ + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern int __mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline int mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_test_young(mm, address); + return 0; +} + +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_change_pte(mm, address, pte); +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +/* + * These two macros will sometime replace ptep_clear_flush. + * ptep_clear_flush is implemented as macro itself, so this also is + * implemented as a macro until ptep_clear_flush will converted to an + * inline function, to diminish the risk of compilation failure. The + * invalidate_page method over time can be moved outside the PT lock + * and these two macros can be later removed. + */ +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ + __pte; \ +}) + +#define pmdp_clear_flush_notify(__vma, __address, __pmdp) \ +({ \ + pmd_t __pmd; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \ + mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE);\ + __pmd = pmdp_clear_flush(___vma, ___address, __pmdp); \ + mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE); \ + __pmd; \ +}) + +#define pmdp_splitting_flush_notify(__vma, __address, __pmdp) \ +({ \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \ + mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE);\ + pmdp_splitting_flush(___vma, ___address, __pmdp); \ + mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \ + (__address)+HPAGE_PMD_SIZE); \ +}) + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ +({ \ + struct mm_struct *___mm = __mm; \ + unsigned long ___address = __address; \ + pte_t ___pte = __pte; \ + \ + set_pte_at(___mm, ___address, __ptep, ___pte); \ + mmu_notifier_change_pte(___mm, ___address, ___pte); \ +}) + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline int mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_change_pte(struct mm_struct *mm, + unsigned long address, pte_t pte) +{ +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define pmdp_clear_flush_young_notify pmdp_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush +#define pmdp_clear_flush_notify pmdp_clear_flush +#define pmdp_splitting_flush_notify pmdp_splitting_flush +#define set_pte_at_notify set_pte_at + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h new file mode 100644 index 00000000..5f6806bd --- /dev/null +++ b/include/linux/mmzone.h @@ -0,0 +1,1182 @@ +#ifndef _LINUX_MMZONE_H +#define _LINUX_MMZONE_H + +#ifndef __ASSEMBLY__ +#ifndef __GENERATING_BOUNDS_H + +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/bitops.h> +#include <linux/cache.h> +#include <linux/threads.h> +#include <linux/numa.h> +#include <linux/init.h> +#include <linux/seqlock.h> +#include <linux/nodemask.h> +#include <linux/pageblock-flags.h> +#include <generated/bounds.h> +#include <linux/atomic.h> +#include <asm/page.h> + +/* Free memory management - zoned buddy allocator. */ +#ifndef CONFIG_FORCE_MAX_ZONEORDER +#define MAX_ORDER 11 +#else +#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER +#endif +#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) + +/* + * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed + * costly to service. That is between allocation orders which should + * coelesce naturally under reasonable reclaim pressure and those which + * will not. + */ +#define PAGE_ALLOC_COSTLY_ORDER 3 + +#define MIGRATE_UNMOVABLE 0 +#define MIGRATE_RECLAIMABLE 1 +#define MIGRATE_MOVABLE 2 +#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ +#define MIGRATE_RESERVE 3 +#define MIGRATE_ISOLATE 4 /* can't allocate from here */ +#define MIGRATE_TYPES 5 + +#define for_each_migratetype_order(order, type) \ + for (order = 0; order < MAX_ORDER; order++) \ + for (type = 0; type < MIGRATE_TYPES; type++) + +extern int page_group_by_mobility_disabled; + +static inline int get_pageblock_migratetype(struct page *page) +{ + return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); +} + +struct free_area { + struct list_head free_list[MIGRATE_TYPES]; + unsigned long nr_free; +}; + +struct pglist_data; + +/* + * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. + * So add a wild amount of padding here to ensure that they fall into separate + * cachelines. There are very few zone structures in the machine, so space + * consumption is not a concern here. + */ +#if defined(CONFIG_SMP) +struct zone_padding { + char x[0]; +} ____cacheline_internodealigned_in_smp; +#define ZONE_PADDING(name) struct zone_padding name; +#else +#define ZONE_PADDING(name) +#endif + +enum zone_stat_item { + /* First 128 byte cacheline (assuming 64 bit words) */ + NR_FREE_PAGES, + NR_LRU_BASE, + NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ + NR_ACTIVE_ANON, /* " " " " " */ + NR_INACTIVE_FILE, /* " " " " " */ + NR_ACTIVE_FILE, /* " " " " " */ + NR_UNEVICTABLE, /* " " " " " */ + NR_MLOCK, /* mlock()ed pages found and moved off LRU */ + NR_ANON_PAGES, /* Mapped anonymous pages */ + NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. + only modified from process context */ + NR_FILE_PAGES, + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_PAGETABLE, /* used for pagetables */ + NR_KERNEL_STACK, + /* Second 128 byte cacheline */ + NR_UNSTABLE_NFS, /* NFS unstable pages */ + NR_BOUNCE, + NR_VMSCAN_WRITE, + NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ + NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ + NR_DIRTIED, /* page dirtyings since bootup */ + NR_WRITTEN, /* page writings since bootup */ +#ifdef CONFIG_NUMA + NUMA_HIT, /* allocated in intended node */ + NUMA_MISS, /* allocated in non intended node */ + NUMA_FOREIGN, /* was intended here, hit elsewhere */ + NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ + NUMA_LOCAL, /* allocation from local node */ + NUMA_OTHER, /* allocation from other node */ +#endif + NR_ANON_TRANSPARENT_HUGEPAGES, + NR_VM_ZONE_STAT_ITEMS }; + +/* + * We do arithmetic on the LRU lists in various places in the code, + * so it is important to keep the active lists LRU_ACTIVE higher in + * the array than the corresponding inactive lists, and to keep + * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. + * + * This has to be kept in sync with the statistics in zone_stat_item + * above and the descriptions in vmstat_text in mm/vmstat.c + */ +#define LRU_BASE 0 +#define LRU_ACTIVE 1 +#define LRU_FILE 2 + +enum lru_list { + LRU_INACTIVE_ANON = LRU_BASE, + LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, + LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, + LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, + LRU_UNEVICTABLE, + NR_LRU_LISTS +}; + +#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) + +#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) + +static inline int is_file_lru(enum lru_list lru) +{ + return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); +} + +static inline int is_active_lru(enum lru_list lru) +{ + return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); +} + +static inline int is_unevictable_lru(enum lru_list lru) +{ + return (lru == LRU_UNEVICTABLE); +} + +struct lruvec { + struct list_head lists[NR_LRU_LISTS]; +}; + +/* Mask used at gathering information at once (see memcontrol.c) */ +#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) +#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) +#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) +#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) + +/* Isolate inactive pages */ +#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) +/* Isolate active pages */ +#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) +/* Isolate clean file */ +#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) +/* Isolate unmapped file */ +#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) +/* Isolate for asynchronous migration */ +#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) + +/* LRU Isolation modes. */ +typedef unsigned __bitwise__ isolate_mode_t; + +enum zone_watermarks { + WMARK_MIN, + WMARK_LOW, + WMARK_HIGH, + NR_WMARK +}; + +#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) +#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) +#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) + +struct per_cpu_pages { + int count; /* number of pages in the list */ + int high; /* high watermark, emptying needed */ + int batch; /* chunk size for buddy add/remove */ + + /* Lists of pages, one per migrate type stored on the pcp-lists */ + struct list_head lists[MIGRATE_PCPTYPES]; +}; + +struct per_cpu_pageset { + struct per_cpu_pages pcp; +#ifdef CONFIG_NUMA + s8 expire; +#endif +#ifdef CONFIG_SMP + s8 stat_threshold; + s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; +#endif +}; + +#endif /* !__GENERATING_BOUNDS.H */ + +enum zone_type { +#ifdef CONFIG_ZONE_DMA + /* + * ZONE_DMA is used when there are devices that are not able + * to do DMA to all of addressable memory (ZONE_NORMAL). Then we + * carve out the portion of memory that is needed for these devices. + * The range is arch specific. + * + * Some examples + * + * Architecture Limit + * --------------------------- + * parisc, ia64, sparc <4G + * s390 <2G + * arm Various + * alpha Unlimited or 0-16MB. + * + * i386, x86_64 and multiple other arches + * <16M. + */ + ZONE_DMA, +#endif +#ifdef CONFIG_ZONE_DMA32 + /* + * x86_64 needs two ZONE_DMAs because it supports devices that are + * only able to do DMA to the lower 16M but also 32 bit devices that + * can only do DMA areas below 4G. + */ + ZONE_DMA32, +#endif + /* + * Normal addressable memory is in ZONE_NORMAL. DMA operations can be + * performed on pages in ZONE_NORMAL if the DMA devices support + * transfers to all addressable memory. + */ + ZONE_NORMAL, +#ifdef CONFIG_HIGHMEM + /* + * A memory area that is only addressable by the kernel through + * mapping portions into its own address space. This is for example + * used by i386 to allow the kernel to address the memory beyond + * 900MB. The kernel will set up special mappings (page + * table entries on i386) for each page that the kernel needs to + * access. + */ + ZONE_HIGHMEM, +#endif + ZONE_MOVABLE, + __MAX_NR_ZONES +}; + +#ifndef __GENERATING_BOUNDS_H + +/* + * When a memory allocation must conform to specific limitations (such + * as being suitable for DMA) the caller will pass in hints to the + * allocator in the gfp_mask, in the zone modifier bits. These bits + * are used to select a priority ordered list of memory zones which + * match the requested limits. See gfp_zone() in include/linux/gfp.h + */ + +#if MAX_NR_ZONES < 2 +#define ZONES_SHIFT 0 +#elif MAX_NR_ZONES <= 2 +#define ZONES_SHIFT 1 +#elif MAX_NR_ZONES <= 4 +#define ZONES_SHIFT 2 +#else +#error ZONES_SHIFT -- too many zones configured adjust calculation +#endif + +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are refeferenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + +struct zone { + /* Fields commonly accessed by the page allocator */ + + /* zone watermarks, access with *_wmark_pages(zone) macros */ + unsigned long watermark[NR_WMARK]; + + /* + * When free pages are below this point, additional steps are taken + * when reading the number of free pages to avoid per-cpu counter + * drift allowing watermarks to be breached + */ + unsigned long percpu_drift_mark; + + /* + * We don't know if the memory that we're going to allocate will be freeable + * or/and it will be released eventually, so to avoid totally wasting several + * GB of ram we must reserve some of the lower zone memory (otherwise we risk + * to run OOM on the lower zones despite there's tons of freeable ram + * on the higher zones). This array is recalculated at runtime if the + * sysctl_lowmem_reserve_ratio sysctl changes. + */ + unsigned long lowmem_reserve[MAX_NR_ZONES]; + + /* + * This is a per-zone reserve of pages that should not be + * considered dirtyable memory. + */ + unsigned long dirty_balance_reserve; + +#ifdef CONFIG_NUMA + int node; + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_pages; + unsigned long min_slab_pages; +#endif + struct per_cpu_pageset __percpu *pageset; + /* + * free areas of different sizes + */ + spinlock_t lock; + int all_unreclaimable; /* All pages pinned */ +#ifdef CONFIG_MEMORY_HOTPLUG + /* see spanned/present_pages for more description */ + seqlock_t span_seqlock; +#endif + struct free_area free_area[MAX_ORDER]; + +#ifndef CONFIG_SPARSEMEM + /* + * Flags for a pageblock_nr_pages block. See pageblock-flags.h. + * In SPARSEMEM, this map is stored in struct mem_section + */ + unsigned long *pageblock_flags; +#endif /* CONFIG_SPARSEMEM */ + +#ifdef CONFIG_COMPACTION + /* + * On compaction failure, 1<<compact_defer_shift compactions + * are skipped before trying again. The number attempted since + * last failure is tracked with compact_considered. + */ + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; +#endif + + ZONE_PADDING(_pad1_) + + /* Fields commonly accessed by the page reclaim scanner */ + spinlock_t lru_lock; + struct lruvec lruvec; + + struct zone_reclaim_stat reclaim_stat; + + unsigned long pages_scanned; /* since last reclaim */ + unsigned long flags; /* zone flags, see below */ + + /* Zone statistics */ + atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + /* + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on + * this zone's LRU. Maintained by the pageout code. + */ + unsigned int inactive_ratio; + + + ZONE_PADDING(_pad2_) + /* Rarely used or read-mostly fields */ + + /* + * wait_table -- the array holding the hash table + * wait_table_hash_nr_entries -- the size of the hash table array + * wait_table_bits -- wait_table_size == (1 << wait_table_bits) + * + * The purpose of all these is to keep track of the people + * waiting for a page to become available and make them + * runnable again when possible. The trouble is that this + * consumes a lot of space, especially when so few things + * wait on pages at a given time. So instead of using + * per-page waitqueues, we use a waitqueue hash table. + * + * The bucket discipline is to sleep on the same queue when + * colliding and wake all in that wait queue when removing. + * When something wakes, it must check to be sure its page is + * truly available, a la thundering herd. The cost of a + * collision is great, but given the expected load of the + * table, they should be so rare as to be outweighed by the + * benefits from the saved space. + * + * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the + * primary users of these fields, and in mm/page_alloc.c + * free_area_init_core() performs the initialization of them. + */ + wait_queue_head_t * wait_table; + unsigned long wait_table_hash_nr_entries; + unsigned long wait_table_bits; + + /* + * Discontig memory support fields. + */ + struct pglist_data *zone_pgdat; + /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ + unsigned long zone_start_pfn; + + /* + * zone_start_pfn, spanned_pages and present_pages are all + * protected by span_seqlock. It is a seqlock because it has + * to be read outside of zone->lock, and it is done in the main + * allocator path. But, it is written quite infrequently. + * + * The lock is declared along with zone->lock because it is + * frequently read in proximity to zone->lock. It's good to + * give them a chance of being in the same cacheline. + */ + unsigned long spanned_pages; /* total size, including holes */ + unsigned long present_pages; /* amount of memory (excluding holes) */ + + /* + * rarely used fields: + */ + const char *name; +} ____cacheline_internodealigned_in_smp; + +typedef enum { + ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ + ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ + ZONE_CONGESTED, /* zone has many dirty pages backed by + * a congested BDI + */ +} zone_flags_t; + +static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) +{ + set_bit(flag, &zone->flags); +} + +static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) +{ + return test_and_set_bit(flag, &zone->flags); +} + +static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) +{ + clear_bit(flag, &zone->flags); +} + +static inline int zone_is_reclaim_congested(const struct zone *zone) +{ + return test_bit(ZONE_CONGESTED, &zone->flags); +} + +static inline int zone_is_reclaim_locked(const struct zone *zone) +{ + return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); +} + +static inline int zone_is_oom_locked(const struct zone *zone) +{ + return test_bit(ZONE_OOM_LOCKED, &zone->flags); +} + +/* + * The "priority" of VM scanning is how much of the queues we will scan in one + * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the + * queues ("queue_length >> 12") during an aging round. + */ +#define DEF_PRIORITY 12 + +/* Maximum number of zones on a zonelist */ +#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) + +#ifdef CONFIG_NUMA + +/* + * The NUMA zonelists are doubled because we need zonelists that restrict the + * allocations to a single node for GFP_THISNODE. + * + * [0] : Zonelist with fallback + * [1] : No fallback (GFP_THISNODE) + */ +#define MAX_ZONELISTS 2 + + +/* + * We cache key information from each zonelist for smaller cache + * footprint when scanning for free pages in get_page_from_freelist(). + * + * 1) The BITMAP fullzones tracks which zones in a zonelist have come + * up short of free memory since the last time (last_fullzone_zap) + * we zero'd fullzones. + * 2) The array z_to_n[] maps each zone in the zonelist to its node + * id, so that we can efficiently evaluate whether that node is + * set in the current tasks mems_allowed. + * + * Both fullzones and z_to_n[] are one-to-one with the zonelist, + * indexed by a zones offset in the zonelist zones[] array. + * + * The get_page_from_freelist() routine does two scans. During the + * first scan, we skip zones whose corresponding bit in 'fullzones' + * is set or whose corresponding node in current->mems_allowed (which + * comes from cpusets) is not set. During the second scan, we bypass + * this zonelist_cache, to ensure we look methodically at each zone. + * + * Once per second, we zero out (zap) fullzones, forcing us to + * reconsider nodes that might have regained more free memory. + * The field last_full_zap is the time we last zapped fullzones. + * + * This mechanism reduces the amount of time we waste repeatedly + * reexaming zones for free memory when they just came up low on + * memory momentarilly ago. + * + * The zonelist_cache struct members logically belong in struct + * zonelist. However, the mempolicy zonelists constructed for + * MPOL_BIND are intentionally variable length (and usually much + * shorter). A general purpose mechanism for handling structs with + * multiple variable length members is more mechanism than we want + * here. We resort to some special case hackery instead. + * + * The MPOL_BIND zonelists don't need this zonelist_cache (in good + * part because they are shorter), so we put the fixed length stuff + * at the front of the zonelist struct, ending in a variable length + * zones[], as is needed by MPOL_BIND. + * + * Then we put the optional zonelist cache on the end of the zonelist + * struct. This optional stuff is found by a 'zlcache_ptr' pointer in + * the fixed length portion at the front of the struct. This pointer + * both enables us to find the zonelist cache, and in the case of + * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) + * to know that the zonelist cache is not there. + * + * The end result is that struct zonelists come in two flavors: + * 1) The full, fixed length version, shown below, and + * 2) The custom zonelists for MPOL_BIND. + * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. + * + * Even though there may be multiple CPU cores on a node modifying + * fullzones or last_full_zap in the same zonelist_cache at the same + * time, we don't lock it. This is just hint data - if it is wrong now + * and then, the allocator will still function, perhaps a bit slower. + */ + + +struct zonelist_cache { + unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ + DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ + unsigned long last_full_zap; /* when last zap'd (jiffies) */ +}; +#else +#define MAX_ZONELISTS 1 +struct zonelist_cache; +#endif + +/* + * This struct contains information about a zone in a zonelist. It is stored + * here to avoid dereferences into large structures and lookups of tables + */ +struct zoneref { + struct zone *zone; /* Pointer to actual zone */ + int zone_idx; /* zone_idx(zoneref->zone) */ +}; + +/* + * One allocation request operates on a zonelist. A zonelist + * is a list of zones, the first one is the 'goal' of the + * allocation, the other zones are fallback zones, in decreasing + * priority. + * + * If zlcache_ptr is not NULL, then it is just the address of zlcache, + * as explained above. If zlcache_ptr is NULL, there is no zlcache. + * * + * To speed the reading of the zonelist, the zonerefs contain the zone index + * of the entry being read. Helper functions to access information given + * a struct zoneref are + * + * zonelist_zone() - Return the struct zone * for an entry in _zonerefs + * zonelist_zone_idx() - Return the index of the zone for an entry + * zonelist_node_idx() - Return the index of the node for an entry + */ +struct zonelist { + struct zonelist_cache *zlcache_ptr; // NULL or &zlcache + struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; +#ifdef CONFIG_NUMA + struct zonelist_cache zlcache; // optional ... +#endif +}; + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +struct node_active_region { + unsigned long start_pfn; + unsigned long end_pfn; + int nid; +}; +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +#ifndef CONFIG_DISCONTIGMEM +/* The array of struct pages - for discontigmem use pgdat->lmem_map */ +extern struct page *mem_map; +#endif + +/* + * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM + * (mostly NUMA machines?) to denote a higher-level memory zone than the + * zone denotes. + * + * On NUMA machines, each NUMA node would have a pg_data_t to describe + * it's memory layout. + * + * Memory statistics and page replacement data structures are maintained on a + * per-zone basis. + */ +struct bootmem_data; +typedef struct pglist_data { + struct zone node_zones[MAX_NR_ZONES]; + struct zonelist node_zonelists[MAX_ZONELISTS]; + int nr_zones; +#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ + struct page *node_mem_map; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR + struct page_cgroup *node_page_cgroup; +#endif +#endif +#ifndef CONFIG_NO_BOOTMEM + struct bootmem_data *bdata; +#endif +#ifdef CONFIG_MEMORY_HOTPLUG + /* + * Must be held any time you expect node_start_pfn, node_present_pages + * or node_spanned_pages stay constant. Holding this will also + * guarantee that any pfn_valid() stays that way. + * + * Nests above zone->lock and zone->size_seqlock. + */ + spinlock_t node_size_lock; +#endif + unsigned long node_start_pfn; + unsigned long node_present_pages; /* total number of physical pages */ + unsigned long node_spanned_pages; /* total size of physical page + range, including holes */ + int node_id; + wait_queue_head_t kswapd_wait; + struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ + int kswapd_max_order; + enum zone_type classzone_idx; +} pg_data_t; + +#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) +#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) +#ifdef CONFIG_FLAT_NODE_MEM_MAP +#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) +#else +#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) +#endif +#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) + +#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) + +#define node_end_pfn(nid) ({\ + pg_data_t *__pgdat = NODE_DATA(nid);\ + __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ +}) + +#include <linux/memory_hotplug.h> + +extern struct mutex zonelists_mutex; +void build_all_zonelists(void *data); +void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags); +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags); +enum memmap_context { + MEMMAP_EARLY, + MEMMAP_HOTPLUG, +}; +extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, + unsigned long size, + enum memmap_context context); + +#ifdef CONFIG_HAVE_MEMORY_PRESENT +void memory_present(int nid, unsigned long start, unsigned long end); +#else +static inline void memory_present(int nid, unsigned long start, unsigned long end) {} +#endif + +#ifdef CONFIG_HAVE_MEMORYLESS_NODES +int local_memory_node(int node_id); +#else +static inline int local_memory_node(int node_id) { return node_id; }; +#endif + +#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE +unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); +#endif + +/* + * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. + */ +#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) + +static inline int populated_zone(struct zone *zone) +{ + return (!!zone->present_pages); +} + +extern int movable_zone; + +static inline int zone_movable_is_highmem(void) +{ +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) + return movable_zone == ZONE_HIGHMEM; +#else + return 0; +#endif +} + +static inline int is_highmem_idx(enum zone_type idx) +{ +#ifdef CONFIG_HIGHMEM + return (idx == ZONE_HIGHMEM || + (idx == ZONE_MOVABLE && zone_movable_is_highmem())); +#else + return 0; +#endif +} + +static inline int is_normal_idx(enum zone_type idx) +{ + return (idx == ZONE_NORMAL); +} + +/** + * is_highmem - helper function to quickly check if a struct zone is a + * highmem zone or not. This is an attempt to keep references + * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. + * @zone - pointer to struct zone variable + */ +static inline int is_highmem(struct zone *zone) +{ +#ifdef CONFIG_HIGHMEM + int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; + return zone_off == ZONE_HIGHMEM * sizeof(*zone) || + (zone_off == ZONE_MOVABLE * sizeof(*zone) && + zone_movable_is_highmem()); +#else + return 0; +#endif +} + +static inline int is_normal(struct zone *zone) +{ + return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; +} + +static inline int is_dma32(struct zone *zone) +{ +#ifdef CONFIG_ZONE_DMA32 + return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; +#else + return 0; +#endif +} + +static inline int is_dma(struct zone *zone) +{ +#ifdef CONFIG_ZONE_DMA + return zone == zone->zone_pgdat->node_zones + ZONE_DMA; +#else + return 0; +#endif +} + +/* These two functions are used to setup the per zone pages min values */ +struct ctl_table; +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + +extern int numa_zonelist_order_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern char numa_zonelist_order[]; +#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ + +#ifndef CONFIG_NEED_MULTIPLE_NODES + +extern struct pglist_data contig_page_data; +#define NODE_DATA(nid) (&contig_page_data) +#define NODE_MEM_MAP(nid) mem_map + +#else /* CONFIG_NEED_MULTIPLE_NODES */ + +#include <asm/mmzone.h> + +#endif /* !CONFIG_NEED_MULTIPLE_NODES */ + +extern struct pglist_data *first_online_pgdat(void); +extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); +extern struct zone *next_zone(struct zone *zone); + +/** + * for_each_online_pgdat - helper macro to iterate over all online nodes + * @pgdat - pointer to a pg_data_t variable + */ +#define for_each_online_pgdat(pgdat) \ + for (pgdat = first_online_pgdat(); \ + pgdat; \ + pgdat = next_online_pgdat(pgdat)) +/** + * for_each_zone - helper macro to iterate over all memory zones + * @zone - pointer to struct zone variable + * + * The user only needs to declare the zone variable, for_each_zone + * fills it in. + */ +#define for_each_zone(zone) \ + for (zone = (first_online_pgdat())->node_zones; \ + zone; \ + zone = next_zone(zone)) + +#define for_each_populated_zone(zone) \ + for (zone = (first_online_pgdat())->node_zones; \ + zone; \ + zone = next_zone(zone)) \ + if (!populated_zone(zone)) \ + ; /* do nothing */ \ + else + +static inline struct zone *zonelist_zone(struct zoneref *zoneref) +{ + return zoneref->zone; +} + +static inline int zonelist_zone_idx(struct zoneref *zoneref) +{ + return zoneref->zone_idx; +} + +static inline int zonelist_node_idx(struct zoneref *zoneref) +{ +#ifdef CONFIG_NUMA + /* zone_to_nid not available in this context */ + return zoneref->zone->node; +#else + return 0; +#endif /* CONFIG_NUMA */ +} + +/** + * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point + * @z - The cursor used as a starting point for the search + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * @zone - The first suitable zone found is returned via this parameter + * + * This function returns the next zone at or below a given zone index that is + * within the allowed nodemask using a cursor as the starting point for the + * search. The zoneref returned is a cursor that represents the current zone + * being examined. It should be advanced by one before calling + * next_zones_zonelist again. + */ +struct zoneref *next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes, + struct zone **zone); + +/** + * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist + * @zonelist - The zonelist to search for a suitable zone + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * @zone - The first suitable zone found is returned via this parameter + * + * This function returns the first zone at or below a given zone index that is + * within the allowed nodemask. The zoneref returned is a cursor that can be + * used to iterate the zonelist with next_zones_zonelist by advancing it by + * one before calling. + */ +static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, + enum zone_type highest_zoneidx, + nodemask_t *nodes, + struct zone **zone) +{ + return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, + zone); +} + +/** + * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * @nodemask - Nodemask allowed by the allocator + * + * This iterator iterates though all zones at or below a given zone index and + * within a given nodemask + */ +#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ + zone; \ + z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ + +/** + * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * + * This iterator iterates though all zones at or below a given zone index. + */ +#define for_each_zone_zonelist(zone, z, zlist, highidx) \ + for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) + +#ifdef CONFIG_SPARSEMEM +#include <asm/sparsemem.h> +#endif + +#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ + !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) +static inline unsigned long early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} +#endif + +#ifdef CONFIG_FLATMEM +#define pfn_to_nid(pfn) (0) +#endif + +#ifdef CONFIG_SPARSEMEM + +/* + * SECTION_SHIFT #bits space required to store a section # + * + * PA_SECTION_SHIFT physical address to/from section number + * PFN_SECTION_SHIFT pfn to/from section number + */ +#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) + +#define PA_SECTION_SHIFT (SECTION_SIZE_BITS) +#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) + +#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) + +#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) +#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) + +#define SECTION_BLOCKFLAGS_BITS \ + ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) + +#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS +#error Allocator MAX_ORDER exceeds SECTION_SIZE +#endif + +#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) +#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) + +#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) +#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) + +struct page; +struct page_cgroup; +struct mem_section { + /* + * This is, logically, a pointer to an array of struct + * pages. However, it is stored with some other magic. + * (see sparse.c::sparse_init_one_section()) + * + * Additionally during early boot we encode node id of + * the location of the section here to guide allocation. + * (see sparse.c::memory_present()) + * + * Making it a UL at least makes someone do a cast + * before using it wrong. + */ + unsigned long section_mem_map; + + /* See declaration of similar field in struct zone */ + unsigned long *pageblock_flags; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR + /* + * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use + * section. (see memcontrol.h/page_cgroup.h about this.) + */ + struct page_cgroup *page_cgroup; + unsigned long pad; +#endif +}; + +#ifdef CONFIG_SPARSEMEM_EXTREME +#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) +#else +#define SECTIONS_PER_ROOT 1 +#endif + +#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) +#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) +#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) + +#ifdef CONFIG_SPARSEMEM_EXTREME +extern struct mem_section *mem_section[NR_SECTION_ROOTS]; +#else +extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; +#endif + +static inline struct mem_section *__nr_to_section(unsigned long nr) +{ + if (!mem_section[SECTION_NR_TO_ROOT(nr)]) + return NULL; + return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; +} +extern int __section_nr(struct mem_section* ms); +extern unsigned long usemap_size(void); + +/* + * We use the lower bits of the mem_map pointer to store + * a little bit of information. There should be at least + * 3 bits here due to 32-bit alignment. + */ +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_MAP_LAST_BIT (1UL<<2) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) +#define SECTION_NID_SHIFT 2 + +static inline struct page *__section_mem_map_addr(struct mem_section *section) +{ + unsigned long map = section->section_mem_map; + map &= SECTION_MAP_MASK; + return (struct page *)map; +} + +static inline int present_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); +} + +static inline int present_section_nr(unsigned long nr) +{ + return present_section(__nr_to_section(nr)); +} + +static inline int valid_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); +} + +static inline int valid_section_nr(unsigned long nr) +{ + return valid_section(__nr_to_section(nr)); +} + +static inline struct mem_section *__pfn_to_section(unsigned long pfn) +{ + return __nr_to_section(pfn_to_section_nr(pfn)); +} + +#ifndef CONFIG_HAVE_ARCH_PFN_VALID +static inline int pfn_valid(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); +} +#endif + +static inline int pfn_present(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + return present_section(__nr_to_section(pfn_to_section_nr(pfn))); +} + +/* + * These are _only_ used during initialisation, therefore they + * can use __initdata ... They could have names to indicate + * this restriction. + */ +#ifdef CONFIG_NUMA +#define pfn_to_nid(pfn) \ +({ \ + unsigned long __pfn_to_nid_pfn = (pfn); \ + page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ +}) +#else +#define pfn_to_nid(pfn) (0) +#endif + +#define early_pfn_valid(pfn) pfn_valid(pfn) +void sparse_init(void); +#else +#define sparse_init() do {} while (0) +#define sparse_index_init(_sec, _nid) do {} while (0) +#endif /* CONFIG_SPARSEMEM */ + +#ifdef CONFIG_NODES_SPAN_OTHER_NODES +bool early_pfn_in_nid(unsigned long pfn, int nid); +#else +#define early_pfn_in_nid(pfn, nid) (1) +#endif + +#ifndef early_pfn_valid +#define early_pfn_valid(pfn) (1) +#endif + +void memory_present(int nid, unsigned long start, unsigned long end); +unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); + +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + +#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL +/* + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap + * associated with it or not. In FLATMEM, it is expected that holes always + * have valid memmap as long as there is valid PFNs either side of the hole. + * In SPARSEMEM, it is assumed that a valid section has a memmap for the + * entire section. + * + * However, an ARM, and maybe other embedded architectures in the future + * free memmap backing holes to save memory on the assumption the memmap is + * never used. The page_zone linkages are then broken even though pfn_valid() + * returns true. A walker of the full memmap must then do this additional + * check to ensure the memmap they are looking at is sane by making sure + * the zone and PFN linkages are still valid. This is expensive, but walkers + * of the full memmap are extremely rare. + */ +int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone); +#else +static inline int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + return 1; +} +#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ + +#endif /* !__GENERATING_BOUNDS.H */ +#endif /* !__ASSEMBLY__ */ +#endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h new file mode 100644 index 00000000..5a8e3903 --- /dev/null +++ b/include/linux/mnt_namespace.h @@ -0,0 +1,17 @@ +#ifndef _NAMESPACE_H_ +#define _NAMESPACE_H_ +#ifdef __KERNEL__ + +struct mnt_namespace; +struct fs_struct; + +extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, + struct fs_struct *); +extern void put_mnt_ns(struct mnt_namespace *ns); + +extern const struct file_operations proc_mounts_operations; +extern const struct file_operations proc_mountinfo_operations; +extern const struct file_operations proc_mountstats_operations; + +#endif +#endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h new file mode 100644 index 00000000..501da4cb --- /dev/null +++ b/include/linux/mod_devicetable.h @@ -0,0 +1,593 @@ +/* + * Device tables which are exported to userspace via + * scripts/mod/file2alias.c. You must keep that file in sync with this + * header. + */ + +#ifndef LINUX_MOD_DEVICETABLE_H +#define LINUX_MOD_DEVICETABLE_H + +#ifdef __KERNEL__ +#include <linux/types.h> +typedef unsigned long kernel_ulong_t; +#endif + +#define PCI_ANY_ID (~0) + +struct pci_device_id { + __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ + __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ + __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + + +#define IEEE1394_MATCH_VENDOR_ID 0x0001 +#define IEEE1394_MATCH_MODEL_ID 0x0002 +#define IEEE1394_MATCH_SPECIFIER_ID 0x0004 +#define IEEE1394_MATCH_VERSION 0x0008 + +struct ieee1394_device_id { + __u32 match_flags; + __u32 vendor_id; + __u32 model_id; + __u32 specifier_id; + __u32 version; + kernel_ulong_t driver_data + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + + +/* + * Device table entry for "new style" table-driven USB drivers. + * User mode code can read these tables to choose which modules to load. + * Declare the table as a MODULE_DEVICE_TABLE. + * + * A probe() parameter will point to a matching entry from this table. + * Use the driver_info field for each match to hold information tied + * to that match: device quirks, etc. + * + * Terminate the driver's table with an all-zeroes entry. + * Use the flag values to control which fields are compared. + */ + +/** + * struct usb_device_id - identifies USB devices for probing and hotplugging + * @match_flags: Bit mask controlling of the other fields are used to match + * against new devices. Any field except for driver_info may be used, + * although some only make sense in conjunction with other fields. + * This is usually set by a USB_DEVICE_*() macro, which sets all + * other fields in this structure except for driver_info. + * @idVendor: USB vendor ID for a device; numbers are assigned + * by the USB forum to its members. + * @idProduct: Vendor-assigned product ID. + * @bcdDevice_lo: Low end of range of vendor-assigned product version numbers. + * This is also used to identify individual product versions, for + * a range consisting of a single device. + * @bcdDevice_hi: High end of version number range. The range of product + * versions is inclusive. + * @bDeviceClass: Class of device; numbers are assigned + * by the USB forum. Products may choose to implement classes, + * or be vendor-specific. Device classes specify behavior of all + * the interfaces on a devices. + * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. + * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. + * @bInterfaceClass: Class of interface; numbers are assigned + * by the USB forum. Products may choose to implement classes, + * or be vendor-specific. Interface classes specify behavior only + * of a given interface; other interfaces may support other classes. + * @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass. + * @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass. + * @driver_info: Holds information used by the driver. Usually it holds + * a pointer to a descriptor understood by the driver, or perhaps + * device flags. + * + * In most cases, drivers will create a table of device IDs by using + * USB_DEVICE(), or similar macros designed for that purpose. + * They will then export it to userspace using MODULE_DEVICE_TABLE(), + * and provide it to the USB core through their usb_driver structure. + * + * See the usb_match_id() function for information about how matches are + * performed. Briefly, you will normally use one of several macros to help + * construct these entries. Each entry you provide will either identify + * one or more specific products, or will identify a class of products + * which have agreed to behave the same. You should put the more specific + * matches towards the beginning of your table, so that driver_info can + * record quirks of specific products. + */ +struct usb_device_id { + /* which fields to match against? */ + __u16 match_flags; + + /* Used for product specific matches; range is inclusive */ + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + + /* Used for device class matches */ + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + + /* Used for interface class matches */ + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + + /* not matched against */ + kernel_ulong_t driver_info; +}; + +/* Some useful macros to use to create struct usb_device_id */ +#define USB_DEVICE_ID_MATCH_VENDOR 0x0001 +#define USB_DEVICE_ID_MATCH_PRODUCT 0x0002 +#define USB_DEVICE_ID_MATCH_DEV_LO 0x0004 +#define USB_DEVICE_ID_MATCH_DEV_HI 0x0008 +#define USB_DEVICE_ID_MATCH_DEV_CLASS 0x0010 +#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS 0x0020 +#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL 0x0040 +#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080 +#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100 +#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 + +#define HID_ANY_ID (~0) + +struct hid_device_id { + __u16 bus; + __u16 pad1; + __u32 vendor; + __u32 product; + kernel_ulong_t driver_data + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +/* s390 CCW devices */ +struct ccw_device_id { + __u16 match_flags; /* which fields to match against */ + + __u16 cu_type; /* control unit type */ + __u16 dev_type; /* device type */ + __u8 cu_model; /* control unit model */ + __u8 dev_model; /* device model */ + + kernel_ulong_t driver_info; +}; + +#define CCW_DEVICE_ID_MATCH_CU_TYPE 0x01 +#define CCW_DEVICE_ID_MATCH_CU_MODEL 0x02 +#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04 +#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08 + +/* s390 AP bus devices */ +struct ap_device_id { + __u16 match_flags; /* which fields to match against */ + __u8 dev_type; /* device type */ + __u8 pad1; + __u32 pad2; + kernel_ulong_t driver_info; +}; + +#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 + +/* s390 css bus devices (subchannels) */ +struct css_device_id { + __u8 match_flags; + __u8 type; /* subchannel type */ + __u16 pad2; + __u32 pad3; + kernel_ulong_t driver_data; +}; + +#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */ + /* to workaround crosscompile issues */ + +struct acpi_device_id { + __u8 id[ACPI_ID_LEN]; + kernel_ulong_t driver_data; +}; + +#define PNP_ID_LEN 8 +#define PNP_MAX_DEVICES 8 + +struct pnp_device_id { + __u8 id[PNP_ID_LEN]; + kernel_ulong_t driver_data; +}; + +struct pnp_card_device_id { + __u8 id[PNP_ID_LEN]; + kernel_ulong_t driver_data; + struct { + __u8 id[PNP_ID_LEN]; + } devs[PNP_MAX_DEVICES]; +}; + + +#define SERIO_ANY 0xff + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +/* + * Struct used for matching a device + */ +struct of_device_id +{ + char name[32]; + char type[32]; + char compatible[128]; +#ifdef __KERNEL__ + void *data; +#else + kernel_ulong_t data; +#endif +}; + +/* VIO */ +struct vio_device_id { + char type[32]; + char compat[32]; +}; + +/* PCMCIA */ + +struct pcmcia_device_id { + __u16 match_flags; + + __u16 manf_id; + __u16 card_id; + + __u8 func_id; + + /* for real multi-function devices */ + __u8 function; + + /* for pseudo multi-function devices */ + __u8 device_no; + + __u32 prod_id_hash[4] + __attribute__((aligned(sizeof(__u32)))); + + /* not matched against in kernelspace*/ +#ifdef __KERNEL__ + const char * prod_id[4]; +#else + kernel_ulong_t prod_id[4] + __attribute__((aligned(sizeof(kernel_ulong_t)))); +#endif + + /* not matched against */ + kernel_ulong_t driver_info; +#ifdef __KERNEL__ + char * cisfile; +#else + kernel_ulong_t cisfile; +#endif +}; + +#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001 +#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002 +#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004 +#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008 +#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010 +#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020 +#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040 +#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080 +#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100 +#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200 +#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400 + +/* Input */ +#define INPUT_DEVICE_ID_EV_MAX 0x1f +#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71 +#define INPUT_DEVICE_ID_KEY_MAX 0x2ff +#define INPUT_DEVICE_ID_REL_MAX 0x0f +#define INPUT_DEVICE_ID_ABS_MAX 0x3f +#define INPUT_DEVICE_ID_MSC_MAX 0x07 +#define INPUT_DEVICE_ID_LED_MAX 0x0f +#define INPUT_DEVICE_ID_SND_MAX 0x07 +#define INPUT_DEVICE_ID_FF_MAX 0x7f +#define INPUT_DEVICE_ID_SW_MAX 0x0f + +#define INPUT_DEVICE_ID_MATCH_BUS 1 +#define INPUT_DEVICE_ID_MATCH_VENDOR 2 +#define INPUT_DEVICE_ID_MATCH_PRODUCT 4 +#define INPUT_DEVICE_ID_MATCH_VERSION 8 + +#define INPUT_DEVICE_ID_MATCH_EVBIT 0x0010 +#define INPUT_DEVICE_ID_MATCH_KEYBIT 0x0020 +#define INPUT_DEVICE_ID_MATCH_RELBIT 0x0040 +#define INPUT_DEVICE_ID_MATCH_ABSBIT 0x0080 +#define INPUT_DEVICE_ID_MATCH_MSCIT 0x0100 +#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x0200 +#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 +#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 +#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 + +struct input_device_id { + + kernel_ulong_t flags; + + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + + kernel_ulong_t evbit[INPUT_DEVICE_ID_EV_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t keybit[INPUT_DEVICE_ID_KEY_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t relbit[INPUT_DEVICE_ID_REL_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t absbit[INPUT_DEVICE_ID_ABS_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t mscbit[INPUT_DEVICE_ID_MSC_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t ledbit[INPUT_DEVICE_ID_LED_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; + kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; + + kernel_ulong_t driver_info; +}; + +/* EISA */ + +#define EISA_SIG_LEN 8 + +/* The EISA signature, in ASCII form, null terminated */ +struct eisa_device_id { + char sig[EISA_SIG_LEN]; + kernel_ulong_t driver_data; +}; + +#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s" + +struct parisc_device_id { + __u8 hw_type; /* 5 bits used */ + __u8 hversion_rev; /* 4 bits */ + __u16 hversion; /* 12 bits */ + __u32 sversion; /* 20 bits */ +}; + +#define PA_HWTYPE_ANY_ID 0xff +#define PA_HVERSION_REV_ANY_ID 0xff +#define PA_HVERSION_ANY_ID 0xffff +#define PA_SVERSION_ANY_ID 0xffffffff + +/* SDIO */ + +#define SDIO_ANY_ID (~0) + +struct sdio_device_id { + __u8 class; /* Standard interface or SDIO_ANY_ID */ + __u16 vendor; /* Vendor or SDIO_ANY_ID */ + __u16 device; /* Device ID or SDIO_ANY_ID */ + kernel_ulong_t driver_data /* Data private to the driver */ + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +/* SSB core, see drivers/ssb/ */ +struct ssb_device_id { + __u16 vendor; + __u16 coreid; + __u8 revision; +}; +#define SSB_DEVICE(_vendor, _coreid, _revision) \ + { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } +#define SSB_DEVTABLE_END \ + { 0, }, + +#define SSB_ANY_VENDOR 0xFFFF +#define SSB_ANY_ID 0xFFFF +#define SSB_ANY_REV 0xFF + +/* Broadcom's specific AMBA core, see drivers/bcma/ */ +struct bcma_device_id { + __u16 manuf; + __u16 id; + __u8 rev; + __u8 class; +}; +#define BCMA_CORE(_manuf, _id, _rev, _class) \ + { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } +#define BCMA_CORETABLE_END \ + { 0, }, + +#define BCMA_ANY_MANUF 0xFFFF +#define BCMA_ANY_ID 0xFFFF +#define BCMA_ANY_REV 0xFF +#define BCMA_ANY_CLASS 0xFF + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; +#define VIRTIO_DEV_ANY_ID 0xffffffff + +/* + * For Hyper-V devices we use the device guid as the id. + */ +struct hv_vmbus_device_id { + __u8 guid[16]; + kernel_ulong_t driver_data /* Data private to the driver */ + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +/* rpmsg */ + +#define RPMSG_NAME_SIZE 32 +#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s" + +struct rpmsg_device_id { + char name[RPMSG_NAME_SIZE]; +}; + +/* i2c */ + +#define I2C_NAME_SIZE 20 +#define I2C_MODULE_PREFIX "i2c:" + +struct i2c_device_id { + char name[I2C_NAME_SIZE]; + kernel_ulong_t driver_data /* Data private to the driver */ + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +/* spi */ + +#define SPI_NAME_SIZE 32 +#define SPI_MODULE_PREFIX "spi:" + +struct spi_device_id { + char name[SPI_NAME_SIZE]; + kernel_ulong_t driver_data /* Data private to the driver */ + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +/* dmi */ +enum dmi_field { + DMI_NONE, + DMI_BIOS_VENDOR, + DMI_BIOS_VERSION, + DMI_BIOS_DATE, + DMI_SYS_VENDOR, + DMI_PRODUCT_NAME, + DMI_PRODUCT_VERSION, + DMI_PRODUCT_SERIAL, + DMI_PRODUCT_UUID, + DMI_BOARD_VENDOR, + DMI_BOARD_NAME, + DMI_BOARD_VERSION, + DMI_BOARD_SERIAL, + DMI_BOARD_ASSET_TAG, + DMI_CHASSIS_VENDOR, + DMI_CHASSIS_TYPE, + DMI_CHASSIS_VERSION, + DMI_CHASSIS_SERIAL, + DMI_CHASSIS_ASSET_TAG, + DMI_STRING_MAX, +}; + +struct dmi_strmatch { + unsigned char slot; + char substr[79]; +}; + +#ifndef __KERNEL__ +struct dmi_system_id { + kernel_ulong_t callback; + kernel_ulong_t ident; + struct dmi_strmatch matches[4]; + kernel_ulong_t driver_data + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; +#else +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; +/* + * struct dmi_device_id appears during expansion of + * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it + * but this is enough for gcc 3.4.6 to error out: + * error: storage size of '__mod_dmi_device_table' isn't known + */ +#define dmi_device_id dmi_system_id +#endif + +#define DMI_MATCH(a, b) { a, b } + +#define PLATFORM_NAME_SIZE 20 +#define PLATFORM_MODULE_PREFIX "platform:" + +struct platform_device_id { + char name[PLATFORM_NAME_SIZE]; + kernel_ulong_t driver_data + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + +#define MDIO_MODULE_PREFIX "mdio:" + +#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" +#define MDIO_ID_ARGS(_id) \ + (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \ + ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \ + ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \ + ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \ + ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \ + ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \ + ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \ + ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1 + +/** + * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus + * @phy_id: The result of + * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask + * for this PHY type + * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 + * is used to terminate an array of struct mdio_device_id. + */ +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +struct zorro_device_id { + __u32 id; /* Device ID or ZORRO_WILDCARD */ + kernel_ulong_t driver_data; /* Data private to the driver */ +}; + +#define ZORRO_WILDCARD (0xffffffff) /* not official */ + +#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" + +#define ISAPNP_ANY_ID 0xffff +struct isapnp_device_id { + unsigned short card_vendor, card_device; + unsigned short vendor, function; + kernel_ulong_t driver_data; /* data private to the driver */ +}; + +/** + * struct amba_id - identifies a device on an AMBA bus + * @id: The significant bits if the hardware device ID + * @mask: Bitmask specifying which bits of the id field are significant when + * matching. A driver binds to a device when ((hardware device ID) & mask) + * == id. + * @data: Private data used by the driver. + */ +struct amba_id { + unsigned int id; + unsigned int mask; +#ifndef __KERNEL__ + kernel_ulong_t data; +#else + void *data; +#endif +}; + +/* + * Match x86 CPUs for CPU specific drivers. + * See documentation of "x86_match_cpu" for details. + */ + +struct x86_cpu_id { + __u16 vendor; + __u16 family; + __u16 model; + __u16 feature; /* bit index */ + kernel_ulong_t driver_data; +}; + +#define X86_FEATURE_MATCH(x) \ + { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x } + +#define X86_VENDOR_ANY 0xffff +#define X86_FAMILY_ANY 0 +#define X86_MODEL_ANY 0 +#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ + +#endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h new file mode 100644 index 00000000..fbcafe2e --- /dev/null +++ b/include/linux/module.h @@ -0,0 +1,651 @@ +#ifndef _LINUX_MODULE_H +#define _LINUX_MODULE_H +/* + * Dynamic loading of modules into the kernel. + * + * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996 + * Rewritten again by Rusty Russell, 2002 + */ +#include <linux/list.h> +#include <linux/stat.h> +#include <linux/compiler.h> +#include <linux/cache.h> +#include <linux/kmod.h> +#include <linux/elf.h> +#include <linux/stringify.h> +#include <linux/kobject.h> +#include <linux/moduleparam.h> +#include <linux/tracepoint.h> +#include <linux/export.h> + +#include <linux/percpu.h> +#include <asm/module.h> + +/* Not Yet Implemented */ +#define MODULE_SUPPORTED_DEVICE(name) + +#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN + +struct modversion_info +{ + unsigned long crc; + char name[MODULE_NAME_LEN]; +}; + +struct module; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, + char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, + const char *, size_t count); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +} __attribute__ ((__aligned__(sizeof(void *)))); + +extern ssize_t __modver_version_show(struct module_attribute *, + struct module_kobject *, char *); + +extern struct module_attribute module_uevent; + +/* These are either module local, or the kernel's dummy ones. */ +extern int init_module(void); +extern void cleanup_module(void); + +/* Archs provide a method of finding the correct exception table. */ +struct exception_table_entry; + +const struct exception_table_entry * +search_extable(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value); +void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish); +void sort_main_extable(void); +void trim_init_extable(struct module *m); + +#ifdef MODULE +#define MODULE_GENERIC_TABLE(gtype,name) \ +extern const struct gtype##_id __mod_##gtype##_table \ + __attribute__ ((unused, alias(__stringify(name)))) + +#else /* !MODULE */ +#define MODULE_GENERIC_TABLE(gtype,name) +#endif + +/* Generic info of form tag = "info" */ +#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) + +/* For userspace: you can also call me... */ +#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias) + +/* + * The following license idents are currently accepted as indicating free + * software modules + * + * "GPL" [GNU Public License v2 or later] + * "GPL v2" [GNU Public License v2] + * "GPL and additional rights" [GNU Public License v2 rights and more] + * "Dual BSD/GPL" [GNU Public License v2 + * or BSD license choice] + * "Dual MIT/GPL" [GNU Public License v2 + * or MIT license choice] + * "Dual MPL/GPL" [GNU Public License v2 + * or Mozilla license choice] + * + * The following other idents are available + * + * "Proprietary" [Non free products] + * + * There are dual licensed components, but when running with Linux it is the + * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL + * is a GPL combined work. + * + * This exists for several reasons + * 1. So modinfo can show license info for users wanting to vet their setup + * is free + * 2. So the community can ignore bug reports including proprietary modules + * 3. So vendors can do likewise based on their own policies + */ +#define MODULE_LICENSE(_license) MODULE_INFO(license, _license) + +/* + * Author(s), use "Name <email>" or just "Name", for multiple + * authors use multiple MODULE_AUTHOR() statements/lines. + */ +#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) + +/* What your module does. */ +#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description) + +#define MODULE_DEVICE_TABLE(type,name) \ + MODULE_GENERIC_TABLE(type##_device,name) + +/* Version of form [<epoch>:]<version>[-<extra-version>]. + Or for CVS/RCS ID version, everything but the number is stripped. + <epoch>: A (small) unsigned integer which allows you to start versions + anew. If not mentioned, it's zero. eg. "2:1.0" is after + "1:2.0". + <version>: The <version> may contain only alphanumerics and the + character `.'. Ordered by numeric sort for numeric parts, + ascii sort for ascii parts (as per RPM or DEB algorithm). + <extraversion>: Like <version>, but inserted for local + customizations, eg "rh3" or "rusty1". + + Using this automatically adds a checksum of the .c files and the + local headers in "srcversion". +*/ + +#if defined(MODULE) || !defined(CONFIG_SYSFS) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#else +#define MODULE_VERSION(_version) \ + static struct module_version_attribute ___modver_attr = { \ + .mattr = { \ + .attr = { \ + .name = "version", \ + .mode = S_IRUGO, \ + }, \ + .show = __modver_version_show, \ + }, \ + .module_name = KBUILD_MODNAME, \ + .version = _version, \ + }; \ + static const struct module_version_attribute \ + __used __attribute__ ((__section__ ("__modver"))) \ + * __moduleparam_const __modver_attr = &___modver_attr +#endif + +/* Optional firmware file (or files) needed by the module + * format is simply firmware file name. Multiple firmware + * files require multiple MODULE_FIRMWARE() specifiers */ +#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) + +/* Given an address, look for it in the exception tables */ +const struct exception_table_entry *search_exception_tables(unsigned long add); + +struct notifier_block; + +#ifdef CONFIG_MODULES + +extern int modules_disabled; /* for sysctl */ +/* Get/put a kernel symbol (calls must be symmetric) */ +void *__symbol_get(const char *symbol); +void *__symbol_get_gpl(const char *symbol); +#define symbol_get(x) ((typeof(&x))(__symbol_get(MODULE_SYMBOL_PREFIX #x))) + +/* modules using other modules: kdb wants to see this. */ +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source, *target; +}; + +enum module_state +{ + MODULE_STATE_LIVE, + MODULE_STATE_COMING, + MODULE_STATE_GOING, +}; + +/** + * struct module_ref - per cpu module reference counts + * @incs: number of module get on this cpu + * @decs: number of module put on this cpu + * + * We force an alignment on 8 or 16 bytes, so that alloc_percpu() + * put @incs/@decs in same cache line, with no extra memory cost, + * since alloc_percpu() is fine grained. + */ +struct module_ref { + unsigned long incs; + unsigned long decs; +} __attribute((aligned(2 * sizeof(unsigned long)))); + +struct module +{ + enum module_state state; + + /* Member of list of modules */ + struct list_head list; + + /* Unique handle for this module */ + char name[MODULE_NAME_LEN]; + + /* Sysfs stuff. */ + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + + /* Exported symbols */ + const struct kernel_symbol *syms; + const unsigned long *crcs; + unsigned int num_syms; + + /* Kernel parameters. */ + struct kernel_param *kp; + unsigned int num_kp; + + /* GPL-only exported symbols. */ + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const unsigned long *gpl_crcs; + +#ifdef CONFIG_UNUSED_SYMBOLS + /* unused exported symbols. */ + const struct kernel_symbol *unused_syms; + const unsigned long *unused_crcs; + unsigned int num_unused_syms; + + /* GPL-only, unused exported symbols. */ + unsigned int num_unused_gpl_syms; + const struct kernel_symbol *unused_gpl_syms; + const unsigned long *unused_gpl_crcs; +#endif + + /* symbols that will be GPL-only in the near future. */ + const struct kernel_symbol *gpl_future_syms; + const unsigned long *gpl_future_crcs; + unsigned int num_gpl_future_syms; + + /* Exception table */ + unsigned int num_exentries; + struct exception_table_entry *extable; + + /* Startup function. */ + int (*init)(void); + + /* If this is non-NULL, vfree after init() returns */ + void *module_init; + + /* Here is the actual code + data, vfree'd on unload. */ + void *module_core; + + /* Here are the sizes of the init and core sections */ + unsigned int init_size, core_size; + + /* The size of the executable code in each section. */ + unsigned int init_text_size, core_text_size; + + /* Size of RO sections of the module (text+rodata) */ + unsigned int init_ro_size, core_ro_size; + + /* Arch-specific module values */ + struct mod_arch_specific arch; + + unsigned int taints; /* same bits as kernel:tainted */ + +#ifdef CONFIG_GENERIC_BUG + /* Support for BUG */ + unsigned num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; +#endif + +#ifdef CONFIG_KALLSYMS + /* + * We keep the symbol and string tables for kallsyms. + * The core_* fields below are temporary, loader-only (they + * could really be discarded after module init). + */ + Elf_Sym *symtab, *core_symtab; + unsigned int num_symtab, core_num_syms; + char *strtab, *core_strtab; + + /* Section attributes */ + struct module_sect_attrs *sect_attrs; + + /* Notes attributes */ + struct module_notes_attrs *notes_attrs; +#endif + + /* The command line arguments (may be mangled). People like + keeping pointers to this stuff */ + char *args; + +#ifdef CONFIG_SMP + /* Per-cpu data. */ + void __percpu *percpu; + unsigned int percpu_size; +#endif + +#ifdef CONFIG_TRACEPOINTS + unsigned int num_tracepoints; + struct tracepoint * const *tracepoints_ptrs; +#endif +#ifdef HAVE_JUMP_LABEL + struct jump_entry *jump_entries; + unsigned int num_jump_entries; +#endif +#ifdef CONFIG_TRACING + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; +#endif +#ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call **trace_events; + unsigned int num_trace_events; +#endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD + unsigned int num_ftrace_callsites; + unsigned long *ftrace_callsites; +#endif + +#ifdef CONFIG_MODULE_UNLOAD + /* What modules depend on me? */ + struct list_head source_list; + /* What modules do I depend on? */ + struct list_head target_list; + + /* Who is waiting for us to be unloaded */ + struct task_struct *waiter; + + /* Destruction function. */ + void (*exit)(void); + + struct module_ref __percpu *refptr; +#endif + +#ifdef CONFIG_CONSTRUCTORS + /* Constructor functions. */ + ctor_fn_t *ctors; + unsigned int num_ctors; +#endif +}; +#ifndef MODULE_ARCH_INIT +#define MODULE_ARCH_INIT {} +#endif + +extern struct mutex module_mutex; + +/* FIXME: It'd be nice to isolate modules during init, too, so they + aren't used before they (may) fail. But presently too much code + (IDE & SCSI) require entry into the module during init.*/ +static inline int module_is_live(struct module *mod) +{ + return mod->state != MODULE_STATE_GOING; +} + +struct module *__module_text_address(unsigned long addr); +struct module *__module_address(unsigned long addr); +bool is_module_address(unsigned long addr); +bool is_module_percpu_address(unsigned long addr); +bool is_module_text_address(unsigned long addr); + +static inline int within_module_core(unsigned long addr, struct module *mod) +{ + return (unsigned long)mod->module_core <= addr && + addr < (unsigned long)mod->module_core + mod->core_size; +} + +static inline int within_module_init(unsigned long addr, struct module *mod) +{ + return (unsigned long)mod->module_init <= addr && + addr < (unsigned long)mod->module_init + mod->init_size; +} + +/* Search for module by name: must hold module_mutex. */ +struct module *find_module(const char *name); + +struct symsearch { + const struct kernel_symbol *start, *stop; + const unsigned long *crcs; + enum { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } licence; + bool unused; +}; + +/* Search for an exported symbol by name. */ +const struct kernel_symbol *find_symbol(const char *name, + struct module **owner, + const unsigned long **crc, + bool gplok, + bool warn); + +/* Walk the exported symbol table */ +bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + struct module *owner, + void *data), void *data); + +/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if + symnum out of range. */ +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported); + +/* Look for this name: can be of form module:name. */ +unsigned long module_kallsyms_lookup_name(const char *name); + +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + +extern void __module_put_and_exit(struct module *mod, long code) + __attribute__((noreturn)); +#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); + +#ifdef CONFIG_MODULE_UNLOAD +unsigned long module_refcount(struct module *mod); +void __symbol_put(const char *symbol); +#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) +void symbol_put_addr(void *addr); + +/* Sometimes we know we already have a refcount, and it's easier not + to handle the error case (which only happens with rmmod --wait). */ +extern void __module_get(struct module *module); + +/* This is the Right Way to get a module: if it fails, it's being removed, + * so pretend it's not there. */ +extern bool try_module_get(struct module *module); + +extern void module_put(struct module *module); + +#else /*!CONFIG_MODULE_UNLOAD*/ +static inline int try_module_get(struct module *module) +{ + return !module || module_is_live(module); +} +static inline void module_put(struct module *module) +{ +} +static inline void __module_get(struct module *module) +{ +} +#define symbol_put(x) do { } while(0) +#define symbol_put_addr(p) do { } while(0) + +#endif /* CONFIG_MODULE_UNLOAD */ +int ref_module(struct module *a, struct module *b); + +/* This is a #define so the string doesn't get put in every .o file */ +#define module_name(mod) \ +({ \ + struct module *__mod = (mod); \ + __mod ? __mod->name : "kernel"; \ +}) + +/* For kallsyms to ask for address resolution. namebuf should be at + * least KSYM_NAME_LEN long: a pointer to namebuf is returned if + * found, otherwise NULL. */ +const char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf); +int lookup_module_symbol_name(unsigned long addr, char *symname); +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); + +/* For extable.c to search modules' exception tables. */ +const struct exception_table_entry *search_module_extables(unsigned long addr); + +int register_module_notifier(struct notifier_block * nb); +int unregister_module_notifier(struct notifier_block * nb); + +extern void print_modules(void); + +#else /* !CONFIG_MODULES... */ + +/* Given an address, look for it in the exception tables. */ +static inline const struct exception_table_entry * +search_module_extables(unsigned long addr) +{ + return NULL; +} + +static inline struct module *__module_address(unsigned long addr) +{ + return NULL; +} + +static inline struct module *__module_text_address(unsigned long addr) +{ + return NULL; +} + +static inline bool is_module_address(unsigned long addr) +{ + return false; +} + +static inline bool is_module_percpu_address(unsigned long addr) +{ + return false; +} + +static inline bool is_module_text_address(unsigned long addr) +{ + return false; +} + +/* Get/put a kernel symbol (calls should be symmetric) */ +#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) +#define symbol_put(x) do { } while(0) +#define symbol_put_addr(x) do { } while(0) + +static inline void __module_get(struct module *module) +{ +} + +static inline int try_module_get(struct module *module) +{ + return 1; +} + +static inline void module_put(struct module *module) +{ +} + +#define module_name(mod) "kernel" + +/* For kallsyms to ask for address resolution. NULL means not found. */ +static inline const char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf) +{ + return NULL; +} + +static inline int lookup_module_symbol_name(unsigned long addr, char *symname) +{ + return -ERANGE; +} + +static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) +{ + return -ERANGE; +} + +static inline int module_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported) +{ + return -ERANGE; +} + +static inline unsigned long module_kallsyms_lookup_name(const char *name) +{ + return 0; +} + +static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + +static inline int register_module_notifier(struct notifier_block * nb) +{ + /* no events will happen anyway, so this can always succeed */ + return 0; +} + +static inline int unregister_module_notifier(struct notifier_block * nb) +{ + return 0; +} + +#define module_put_and_exit(code) do_exit(code) + +static inline void print_modules(void) +{ +} +#endif /* CONFIG_MODULES */ + +#ifdef CONFIG_SYSFS +extern struct kset *module_kset; +extern struct kobj_type module_ktype; +extern int module_sysfs_initialized; +#endif /* CONFIG_SYSFS */ + +#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x) + +/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */ + +#define __MODULE_STRING(x) __stringify(x) + +#ifdef CONFIG_DEBUG_SET_MODULE_RONX +extern void set_all_modules_text_rw(void); +extern void set_all_modules_text_ro(void); +#else +static inline void set_all_modules_text_rw(void) { } +static inline void set_all_modules_text_ro(void) { } +#endif + +#ifdef CONFIG_GENERIC_BUG +void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, + struct module *); +void module_bug_cleanup(struct module *); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline void module_bug_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod) +{ +} +static inline void module_bug_cleanup(struct module *mod) {} +#endif /* CONFIG_GENERIC_BUG */ + +#endif /* _LINUX_MODULE_H */ diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h new file mode 100644 index 00000000..b2be02eb --- /dev/null +++ b/include/linux/moduleloader.h @@ -0,0 +1,55 @@ +#ifndef _LINUX_MODULELOADER_H +#define _LINUX_MODULELOADER_H +/* The stuff needed for archs to support modules. */ + +#include <linux/module.h> +#include <linux/elf.h> + +/* These may be implemented by architectures that need to hook into the + * module loader code. Architectures that don't need to do anything special + * can just rely on the 'weak' default hooks defined in kernel/module.c. + * Note, however, that at least one of apply_relocate or apply_relocate_add + * must be implemented by each architecture. + */ + +/* Adjust arch-specific sections. Return 0 on success. */ +int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod); + +/* Additional bytes needed by arch in front of individual sections */ +unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); + +/* Allocator used for allocating struct module, core sections and init + sections. Returns NULL on failure. */ +void *module_alloc(unsigned long size); + +/* Free memory returned from module_alloc. */ +void module_free(struct module *mod, void *module_region); + +/* Apply the given relocation to the (simplified) ELF. Return -error + or 0. */ +int apply_relocate(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *mod); + +/* Apply the given add relocation to the (simplified) ELF. Return + -error or 0 */ +int apply_relocate_add(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *mod); + +/* Any final processing of module before access. Return -error or 0. */ +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod); + +/* Any cleanup needed when module leaves. */ +void module_arch_cleanup(struct module *mod); + +#endif diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h new file mode 100644 index 00000000..944bc186 --- /dev/null +++ b/include/linux/moduleparam.h @@ -0,0 +1,465 @@ +#ifndef _LINUX_MODULE_PARAMS_H +#define _LINUX_MODULE_PARAMS_H +/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ +#include <linux/init.h> +#include <linux/stringify.h> +#include <linux/kernel.h> + +/* You can override this manually, but generally this should match the + module name. */ +#ifdef MODULE +#define MODULE_PARAM_PREFIX /* empty */ +#else +#define MODULE_PARAM_PREFIX KBUILD_MODNAME "." +#endif + +/* Chosen so that structs with an unsigned long line up. */ +#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) + +#define ___module_cat(a,b) __mod_ ## a ## b +#define __module_cat(a,b) ___module_cat(a,b) +#ifdef MODULE +#define __MODULE_INFO(tag, name, info) \ +static const char __module_cat(name,__LINE__)[] \ + __used __attribute__((section(".modinfo"), unused, aligned(1))) \ + = __stringify(tag) "=" info +#else /* !MODULE */ +/* This struct is here for syntactic coherency, it is not used */ +#define __MODULE_INFO(tag, name, info) \ + struct __module_cat(name,__LINE__) {} +#endif +#define __MODULE_PARM_TYPE(name, _type) \ + __MODULE_INFO(parmtype, name##type, #name ":" _type) + +/* One for each parameter, describing how to use it. Some files do + multiple of these per line, so can't just use MODULE_INFO. */ +#define MODULE_PARM_DESC(_parm, desc) \ + __MODULE_INFO(parm, _parm, #_parm ":" desc) + +struct kernel_param; + +struct kernel_param_ops { + /* Returns 0, or -errno. arg is in kp->arg. */ + int (*set)(const char *val, const struct kernel_param *kp); + /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ + int (*get)(char *buffer, const struct kernel_param *kp); + /* Optional function to free kp->arg when module unloaded. */ + void (*free)(void *arg); +}; + +struct kernel_param { + const char *name; + const struct kernel_param_ops *ops; + u16 perm; + s16 level; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +/* Special one for strings we want to copy into */ +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +/* Special one for arrays */ +struct kparam_array +{ + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +/** + * module_param - typesafe helper for a module/cmdline parameter + * @value: the variable to alter, and exposed parameter name. + * @type: the type of the parameter + * @perm: visibility in sysfs. + * + * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a + * ".") the kernel commandline parameter. Note that - is changed to _, so + * the user can use "foo-bar=1" even for variable "foo_bar". + * + * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * for world-readable, 0644 for root-writable, etc. Note that if it + * is writable, you may need to use kparam_block_sysfs_write() around + * accesses (esp. charp, which can be kfreed when it changes). + * + * The @type is simply pasted to refer to a param_ops_##type and a + * param_check_##type: for convenience many standard types are provided but + * you can create your own by defining those variables. + * + * Standard types are: + * byte, short, ushort, int, uint, long, ulong + * charp: a character pointer + * bool: a bool, values 0/1, y/n, Y/N. + * invbool: the above, only sense-reversed (N = true). + */ +#define module_param(name, type, perm) \ + module_param_named(name, name, type, perm) + +/** + * module_param_named - typesafe helper for a renamed module/cmdline parameter + * @name: a valid C identifier which is the parameter name. + * @value: the actual lvalue to alter. + * @type: the type of the parameter + * @perm: visibility in sysfs. + * + * Usually it's a good idea to have variable names and user-exposed names the + * same, but that's harder if the variable must be non-static or is inside a + * structure. This allows exposure under a different name. + */ +#define module_param_named(name, value, type, perm) \ + param_check_##type(name, &(value)); \ + module_param_cb(name, ¶m_ops_##type, &value, perm); \ + __MODULE_PARM_TYPE(name, #type) + +/** + * module_param_cb - general callback for a module/cmdline parameter + * @name: a valid C identifier which is the parameter name. + * @ops: the set & get operations for this parameter. + * @perm: visibility in sysfs. + * + * The ops can have NULL set or get functions. + */ +#define module_param_cb(name, ops, arg, perm) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) + +/** + * <level>_param_cb - general callback for a module/cmdline parameter + * to be evaluated before certain initcall level + * @name: a valid C identifier which is the parameter name. + * @ops: the set & get operations for this parameter. + * @perm: visibility in sysfs. + * + * The ops can have NULL set or get functions. + */ +#define __level_param_cb(name, ops, arg, perm, level) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level) + +#define core_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 1) + +#define postcore_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 2) + +#define arch_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 3) + +#define subsys_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 4) + +#define fs_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 5) + +#define device_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 6) + +#define late_param_cb(name, ops, arg, perm) \ + __level_param_cb(name, ops, arg, perm, 7) + +/* On alpha, ia64 and ppc64 relocations to global data cannot go into + read-only sections (which is part of respective UNIX ABI on these + platforms). So 'const' makes no sense and even causes compile failures + with some compilers. */ +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#define __moduleparam_const +#else +#define __moduleparam_const const +#endif + +/* This is the fundamental function for registering boot/module + parameters. */ +#define __module_param_call(prefix, name, ops, arg, perm, level) \ + /* Default value instead of permissions? */ \ + static int __param_perm_check_##name __attribute__((unused)) = \ + BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ + + BUILD_BUG_ON_ZERO(sizeof(""prefix) > MAX_PARAM_PREFIX_LEN); \ + static const char __param_str_##name[] = prefix #name; \ + static struct kernel_param __moduleparam_const __param_##name \ + __used \ + __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ + = { __param_str_##name, ops, perm, level, { arg } } + +/* Obsolete - use module_param_cb() */ +#define module_param_call(name, set, get, arg, perm) \ + static struct kernel_param_ops __param_ops_##name = \ + { (void *)set, (void *)get }; \ + __module_param_call(MODULE_PARAM_PREFIX, \ + name, &__param_ops_##name, arg, \ + (perm) + sizeof(__check_old_set_param(set))*0, -1) + +/* We don't get oldget: it's often a new-style param_get_uint, etc. */ +static inline int +__check_old_set_param(int (*oldset)(const char *, struct kernel_param *)) +{ + return 0; +} + +/** + * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs. + * @name: the name of the parameter + * + * There's no point blocking write on a paramter that isn't writable via sysfs! + */ +#define kparam_block_sysfs_write(name) \ + do { \ + BUG_ON(!(__param_##name.perm & 0222)); \ + __kernel_param_lock(); \ + } while (0) + +/** + * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again. + * @name: the name of the parameter + */ +#define kparam_unblock_sysfs_write(name) \ + do { \ + BUG_ON(!(__param_##name.perm & 0222)); \ + __kernel_param_unlock(); \ + } while (0) + +/** + * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs. + * @name: the name of the parameter + * + * This also blocks sysfs writes. + */ +#define kparam_block_sysfs_read(name) \ + do { \ + BUG_ON(!(__param_##name.perm & 0444)); \ + __kernel_param_lock(); \ + } while (0) + +/** + * kparam_unblock_sysfs_read - allows sysfs to read a parameter again. + * @name: the name of the parameter + */ +#define kparam_unblock_sysfs_read(name) \ + do { \ + BUG_ON(!(__param_##name.perm & 0444)); \ + __kernel_param_unlock(); \ + } while (0) + +#ifdef CONFIG_SYSFS +extern void __kernel_param_lock(void); +extern void __kernel_param_unlock(void); +#else +static inline void __kernel_param_lock(void) +{ +} +static inline void __kernel_param_unlock(void) +{ +} +#endif + +#ifndef MODULE +/** + * core_param - define a historical core kernel parameter. + * @name: the name of the cmdline and sysfs parameter (often the same as var) + * @var: the variable + * @type: the type of the parameter + * @perm: visibility in sysfs + * + * core_param is just like module_param(), but cannot be modular and + * doesn't add a prefix (such as "printk."). This is for compatibility + * with __setup(), and it makes sense as truly core parameters aren't + * tied to the particular file they're in. + */ +#define core_param(name, var, type, perm) \ + param_check_##type(name, &(var)); \ + __module_param_call("", name, ¶m_ops_##type, &var, perm, -1) +#endif /* !MODULE */ + +/** + * module_param_string - a char array parameter + * @name: the name of the parameter + * @string: the string variable + * @len: the maximum length of the string, incl. terminator + * @perm: visibility in sysfs. + * + * This actually copies the string when it's set (unlike type charp). + * @len is usually just sizeof(string). + */ +#define module_param_string(name, string, len, perm) \ + static const struct kparam_string __param_string_##name \ + = { len, string }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_ops_string, \ + .str = &__param_string_##name, perm, -1); \ + __MODULE_PARM_TYPE(name, "string") + +/** + * parameq - checks if two parameter names match + * @name1: parameter name 1 + * @name2: parameter name 2 + * + * Returns true if the two parameter names are equal. + * Dashes (-) are considered equal to underscores (_). + */ +extern bool parameq(const char *name1, const char *name2); + +/** + * parameqn - checks if two parameter names match + * @name1: parameter name 1 + * @name2: parameter name 2 + * @n: the length to compare + * + * Similar to parameq(), except it compares @n characters. + */ +extern bool parameqn(const char *name1, const char *name2, size_t n); + +/* Called on module insert or kernel boot */ +extern int parse_args(const char *name, + char *args, + const struct kernel_param *params, + unsigned num, + s16 level_min, + s16 level_max, + int (*unknown)(char *param, char *val)); + +/* Called by module remove. */ +#ifdef CONFIG_SYSFS +extern void destroy_params(const struct kernel_param *params, unsigned num); +#else +static inline void destroy_params(const struct kernel_param *params, + unsigned num) +{ +} +#endif /* !CONFIG_SYSFS */ + +/* All the helper functions */ +/* The macros to do compile-time type checking stolen from Jakub + Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ +#define __param_check(name, p, type) \ + static inline type *__check_##name(void) { return(p); } + +extern struct kernel_param_ops param_ops_byte; +extern int param_set_byte(const char *val, const struct kernel_param *kp); +extern int param_get_byte(char *buffer, const struct kernel_param *kp); +#define param_check_byte(name, p) __param_check(name, p, unsigned char) + +extern struct kernel_param_ops param_ops_short; +extern int param_set_short(const char *val, const struct kernel_param *kp); +extern int param_get_short(char *buffer, const struct kernel_param *kp); +#define param_check_short(name, p) __param_check(name, p, short) + +extern struct kernel_param_ops param_ops_ushort; +extern int param_set_ushort(const char *val, const struct kernel_param *kp); +extern int param_get_ushort(char *buffer, const struct kernel_param *kp); +#define param_check_ushort(name, p) __param_check(name, p, unsigned short) + +extern struct kernel_param_ops param_ops_int; +extern int param_set_int(const char *val, const struct kernel_param *kp); +extern int param_get_int(char *buffer, const struct kernel_param *kp); +#define param_check_int(name, p) __param_check(name, p, int) + +extern struct kernel_param_ops param_ops_uint; +extern int param_set_uint(const char *val, const struct kernel_param *kp); +extern int param_get_uint(char *buffer, const struct kernel_param *kp); +#define param_check_uint(name, p) __param_check(name, p, unsigned int) + +extern struct kernel_param_ops param_ops_long; +extern int param_set_long(const char *val, const struct kernel_param *kp); +extern int param_get_long(char *buffer, const struct kernel_param *kp); +#define param_check_long(name, p) __param_check(name, p, long) + +extern struct kernel_param_ops param_ops_ulong; +extern int param_set_ulong(const char *val, const struct kernel_param *kp); +extern int param_get_ulong(char *buffer, const struct kernel_param *kp); +#define param_check_ulong(name, p) __param_check(name, p, unsigned long) + +extern struct kernel_param_ops param_ops_charp; +extern int param_set_charp(const char *val, const struct kernel_param *kp); +extern int param_get_charp(char *buffer, const struct kernel_param *kp); +#define param_check_charp(name, p) __param_check(name, p, char *) + +/* We used to allow int as well as bool. We're taking that away! */ +extern struct kernel_param_ops param_ops_bool; +extern int param_set_bool(const char *val, const struct kernel_param *kp); +extern int param_get_bool(char *buffer, const struct kernel_param *kp); +#define param_check_bool(name, p) __param_check(name, p, bool) + +extern struct kernel_param_ops param_ops_invbool; +extern int param_set_invbool(const char *val, const struct kernel_param *kp); +extern int param_get_invbool(char *buffer, const struct kernel_param *kp); +#define param_check_invbool(name, p) __param_check(name, p, bool) + +/* An int, which can only be set like a bool (though it shows as an int). */ +extern struct kernel_param_ops param_ops_bint; +extern int param_set_bint(const char *val, const struct kernel_param *kp); +#define param_get_bint param_get_int +#define param_check_bint param_check_int + +/** + * module_param_array - a parameter which is an array of some type + * @name: the name of the array variable + * @type: the type, as per module_param() + * @nump: optional pointer filled in with the number written + * @perm: visibility in sysfs + * + * Input and output are as comma-separated values. Commas inside values + * don't work properly (eg. an array of charp). + * + * ARRAY_SIZE(@name) is used to determine the number of elements in the + * array, so the definition must be visible. + */ +#define module_param_array(name, type, nump, perm) \ + module_param_array_named(name, name, type, nump, perm) + +/** + * module_param_array_named - renamed parameter which is an array of some type + * @name: a valid C identifier which is the parameter name + * @array: the name of the array variable + * @type: the type, as per module_param() + * @nump: optional pointer filled in with the number written + * @perm: visibility in sysfs + * + * This exposes a different name than the actual variable name. See + * module_param_named() for why this might be necessary. + */ +#define module_param_array_named(name, array, type, nump, perm) \ + param_check_##type(name, &(array)[0]); \ + static const struct kparam_array __param_arr_##name \ + = { .max = ARRAY_SIZE(array), .num = nump, \ + .ops = ¶m_ops_##type, \ + .elemsize = sizeof(array[0]), .elem = array }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_array_ops, \ + .arr = &__param_arr_##name, \ + perm, -1); \ + __MODULE_PARM_TYPE(name, "array of " #type) + +extern struct kernel_param_ops param_array_ops; + +extern struct kernel_param_ops param_ops_string; +extern int param_set_copystring(const char *val, const struct kernel_param *); +extern int param_get_string(char *buffer, const struct kernel_param *kp); + +/* for exporting parameters in /sys/parameters */ + +struct module; + +#if defined(CONFIG_SYSFS) && defined(CONFIG_MODULES) +extern int module_param_sysfs_setup(struct module *mod, + const struct kernel_param *kparam, + unsigned int num_params); + +extern void module_param_sysfs_remove(struct module *mod); +#else +static inline int module_param_sysfs_setup(struct module *mod, + const struct kernel_param *kparam, + unsigned int num_params) +{ + return 0; +} + +static inline void module_param_sysfs_remove(struct module *mod) +{ } +#endif + +#endif /* _LINUX_MODULE_PARAMS_H */ diff --git a/include/linux/mount.h b/include/linux/mount.h new file mode 100644 index 00000000..d7029f4a --- /dev/null +++ b/include/linux/mount.h @@ -0,0 +1,79 @@ +/* + * + * Definitions for mount interface. This describes the in the kernel build + * linkedlist with mounted filesystems. + * + * Author: Marco van Wieringen <mvw@planets.elm.net> + * + */ +#ifndef _LINUX_MOUNT_H +#define _LINUX_MOUNT_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/nodemask.h> +#include <linux/spinlock.h> +#include <linux/seqlock.h> +#include <linux/atomic.h> + +struct super_block; +struct vfsmount; +struct dentry; +struct mnt_namespace; + +#define MNT_NOSUID 0x01 +#define MNT_NODEV 0x02 +#define MNT_NOEXEC 0x04 +#define MNT_NOATIME 0x08 +#define MNT_NODIRATIME 0x10 +#define MNT_RELATIME 0x20 +#define MNT_READONLY 0x40 /* does the user want this to be r/o? */ + +#define MNT_SHRINKABLE 0x100 +#define MNT_WRITE_HOLD 0x200 + +#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ +#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ +/* + * MNT_SHARED_MASK is the set of flags that should be cleared when a + * mount becomes shared. Currently, this is only the flag that says a + * mount cannot be bind mounted, since this is how we create a mount + * that shares events with another mount. If you add a new MNT_* + * flag, consider how it interacts with shared mounts. + */ +#define MNT_SHARED_MASK (MNT_UNBINDABLE) +#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE) + + +#define MNT_INTERNAL 0x4000 + +struct vfsmount { + struct dentry *mnt_root; /* root of the mounted tree */ + struct super_block *mnt_sb; /* pointer to superblock */ + int mnt_flags; +}; + +struct file; /* forward dec */ + +extern int mnt_want_write(struct vfsmount *mnt); +extern int mnt_want_write_file(struct file *file); +extern int mnt_clone_write(struct vfsmount *mnt); +extern void mnt_drop_write(struct vfsmount *mnt); +extern void mnt_drop_write_file(struct file *file); +extern void mntput(struct vfsmount *mnt); +extern struct vfsmount *mntget(struct vfsmount *mnt); +extern void mnt_pin(struct vfsmount *mnt); +extern void mnt_unpin(struct vfsmount *mnt); +extern int __mnt_is_readonly(struct vfsmount *mnt); + +struct file_system_type; +extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, + int flags, const char *name, + void *data); + +extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); +extern void mark_mounts_for_expiry(struct list_head *mounts); + +extern dev_t name_to_dev_t(char *name); + +#endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mpage.h b/include/linux/mpage.h new file mode 100644 index 00000000..068a0c99 --- /dev/null +++ b/include/linux/mpage.h @@ -0,0 +1,24 @@ +/* + * include/linux/mpage.h + * + * Contains declarations related to preparing and submitting BIOS which contain + * multiple pagecache pages. + */ + +/* + * (And no, it doesn't do the #ifdef __MPAGE_H thing, and it doesn't do + * nested includes. Get it right in the .c file). + */ +#ifdef CONFIG_BLOCK + +struct writeback_control; + +int mpage_readpages(struct address_space *mapping, struct list_head *pages, + unsigned nr_pages, get_block_t get_block); +int mpage_readpage(struct page *page, get_block_t get_block); +int mpage_writepages(struct address_space *mapping, + struct writeback_control *wbc, get_block_t get_block); +int mpage_writepage(struct page *page, get_block_t *get_block, + struct writeback_control *wbc); + +#endif diff --git a/include/linux/mpi.h b/include/linux/mpi.h new file mode 100644 index 00000000..d02cca6c --- /dev/null +++ b/include/linux/mpi.h @@ -0,0 +1,144 @@ +/* mpi.h - Multi Precision Integers + * Copyright (C) 1994, 1996, 1998, 1999, + * 2000, 2001 Free Software Foundation, Inc. + * + * This file is part of GNUPG. + * + * GNUPG is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * GNUPG is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + * The GNU MP Library itself is published under the LGPL; + * however I decided to publish this code under the plain GPL. + */ + +#ifndef G10_MPI_H +#define G10_MPI_H + +#include <linux/types.h> + +/* DSI defines */ + +#define SHA1_DIGEST_LENGTH 20 + +/*end of DSI defines */ + +#define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8) +#define BITS_PER_MPI_LIMB BITS_PER_LONG + +typedef unsigned long int mpi_limb_t; +typedef signed long int mpi_limb_signed_t; + +struct gcry_mpi { + int alloced; /* array size (# of allocated limbs) */ + int nlimbs; /* number of valid limbs */ + int nbits; /* the real number of valid bits (info only) */ + int sign; /* indicates a negative number */ + unsigned flags; /* bit 0: array must be allocated in secure memory space */ + /* bit 1: not used */ + /* bit 2: the limb is a pointer to some m_alloced data */ + mpi_limb_t *d; /* array with the limbs */ +}; + +typedef struct gcry_mpi *MPI; + +#define mpi_get_nlimbs(a) ((a)->nlimbs) +#define mpi_is_neg(a) ((a)->sign) + +/*-- mpiutil.c --*/ +MPI mpi_alloc(unsigned nlimbs); +MPI mpi_alloc_secure(unsigned nlimbs); +MPI mpi_alloc_like(MPI a); +void mpi_free(MPI a); +int mpi_resize(MPI a, unsigned nlimbs); +int mpi_copy(MPI *copy, const MPI a); +void mpi_clear(MPI a); +int mpi_set(MPI w, MPI u); +int mpi_set_ui(MPI w, ulong u); +MPI mpi_alloc_set_ui(unsigned long u); +void mpi_m_check(MPI a); +void mpi_swap(MPI a, MPI b); + +/*-- mpicoder.c --*/ +MPI do_encode_md(const void *sha_buffer, unsigned nbits); +MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); +int mpi_fromstr(MPI val, const char *str); +u32 mpi_get_keyid(MPI a, u32 *keyid); +void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); +void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); +int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); + +#define log_mpidump g10_log_mpidump + +/*-- mpi-add.c --*/ +int mpi_add_ui(MPI w, MPI u, ulong v); +int mpi_add(MPI w, MPI u, MPI v); +int mpi_addm(MPI w, MPI u, MPI v, MPI m); +int mpi_sub_ui(MPI w, MPI u, ulong v); +int mpi_sub(MPI w, MPI u, MPI v); +int mpi_subm(MPI w, MPI u, MPI v, MPI m); + +/*-- mpi-mul.c --*/ +int mpi_mul_ui(MPI w, MPI u, ulong v); +int mpi_mul_2exp(MPI w, MPI u, ulong cnt); +int mpi_mul(MPI w, MPI u, MPI v); +int mpi_mulm(MPI w, MPI u, MPI v, MPI m); + +/*-- mpi-div.c --*/ +ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor); +int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); +int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor); +int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor); +int mpi_tdiv_r(MPI rem, MPI num, MPI den); +int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den); +int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count); +int mpi_divisible_ui(const MPI dividend, ulong divisor); + +/*-- mpi-gcd.c --*/ +int mpi_gcd(MPI g, const MPI a, const MPI b); + +/*-- mpi-pow.c --*/ +int mpi_pow(MPI w, MPI u, MPI v); +int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); + +/*-- mpi-mpow.c --*/ +int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod); + +/*-- mpi-cmp.c --*/ +int mpi_cmp_ui(MPI u, ulong v); +int mpi_cmp(MPI u, MPI v); + +/*-- mpi-scan.c --*/ +int mpi_getbyte(MPI a, unsigned idx); +void mpi_putbyte(MPI a, unsigned idx, int value); +unsigned mpi_trailing_zeros(MPI a); + +/*-- mpi-bit.c --*/ +void mpi_normalize(MPI a); +unsigned mpi_get_nbits(MPI a); +int mpi_test_bit(MPI a, unsigned n); +int mpi_set_bit(MPI a, unsigned n); +int mpi_set_highbit(MPI a, unsigned n); +void mpi_clear_highbit(MPI a, unsigned n); +void mpi_clear_bit(MPI a, unsigned n); +int mpi_rshift(MPI x, MPI a, unsigned n); + +/*-- mpi-inv.c --*/ +int mpi_invm(MPI x, MPI u, MPI v); + +#endif /*G10_MPI_H */ diff --git a/include/linux/mqueue.h b/include/linux/mqueue.h new file mode 100644 index 00000000..8b5a7961 --- /dev/null +++ b/include/linux/mqueue.h @@ -0,0 +1,53 @@ +/* Copyright (C) 2003 Krzysztof Benedyczak & Michal Wronski + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + It is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this software; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef _LINUX_MQUEUE_H +#define _LINUX_MQUEUE_H + +#define MQ_PRIO_MAX 32768 +/* per-uid limit of kernel memory used by mqueue, in bytes */ +#define MQ_BYTES_MAX 819200 + +struct mq_attr { + long mq_flags; /* message queue flags */ + long mq_maxmsg; /* maximum number of messages */ + long mq_msgsize; /* maximum message size */ + long mq_curmsgs; /* number of messages currently queued */ + long __reserved[4]; /* ignored for input, zeroed for output */ +}; + +/* + * SIGEV_THREAD implementation: + * SIGEV_THREAD must be implemented in user space. If SIGEV_THREAD is passed + * to mq_notify, then + * - sigev_signo must be the file descriptor of an AF_NETLINK socket. It's not + * necessary that the socket is bound. + * - sigev_value.sival_ptr must point to a cookie that is NOTIFY_COOKIE_LEN + * bytes long. + * If the notification is triggered, then the cookie is sent to the netlink + * socket. The last byte of the cookie is replaced with the NOTIFY_?? codes: + * NOTIFY_WOKENUP if the notification got triggered, NOTIFY_REMOVED if it was + * removed, either due to a close() on the message queue fd or due to a + * mq_notify() that removed the notification. + */ +#define NOTIFY_NONE 0 +#define NOTIFY_WOKENUP 1 +#define NOTIFY_REMOVED 2 + +#define NOTIFY_COOKIE_LEN 32 + +#endif diff --git a/include/linux/mroute.h b/include/linux/mroute.h new file mode 100644 index 00000000..46caaf44 --- /dev/null +++ b/include/linux/mroute.h @@ -0,0 +1,251 @@ +#ifndef __LINUX_MROUTE_H +#define __LINUX_MROUTE_H + +#include <linux/sockios.h> +#include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/in.h> +#endif + +/* + * Based on the MROUTING 3.5 defines primarily to keep + * source compatibility with BSD. + * + * See the mrouted code for the original history. + * + * Protocol Independent Multicast (PIM) data structures included + * Carlos Picoto (cap@di.fc.ul.pt) + * + */ + +#define MRT_BASE 200 +#define MRT_INIT (MRT_BASE) /* Activate the kernel mroute code */ +#define MRT_DONE (MRT_BASE+1) /* Shutdown the kernel mroute */ +#define MRT_ADD_VIF (MRT_BASE+2) /* Add a virtual interface */ +#define MRT_DEL_VIF (MRT_BASE+3) /* Delete a virtual interface */ +#define MRT_ADD_MFC (MRT_BASE+4) /* Add a multicast forwarding entry */ +#define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */ +#define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */ +#define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */ +#define MRT_PIM (MRT_BASE+8) /* enable PIM code */ +#define MRT_TABLE (MRT_BASE+9) /* Specify mroute table ID */ + +#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */ +#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1) +#define SIOCGETRPF (SIOCPROTOPRIVATE+2) + +#define MAXVIFS 32 +typedef unsigned long vifbitmap_t; /* User mode code depends on this lot */ +typedef unsigned short vifi_t; +#define ALL_VIFS ((vifi_t)(-1)) + +/* + * Same idea as select + */ + +#define VIFM_SET(n,m) ((m)|=(1<<(n))) +#define VIFM_CLR(n,m) ((m)&=~(1<<(n))) +#define VIFM_ISSET(n,m) ((m)&(1<<(n))) +#define VIFM_CLRALL(m) ((m)=0) +#define VIFM_COPY(mfrom,mto) ((mto)=(mfrom)) +#define VIFM_SAME(m1,m2) ((m1)==(m2)) + +/* + * Passed by mrouted for an MRT_ADD_VIF - again we use the + * mrouted 3.6 structures for compatibility + */ + +struct vifctl { + vifi_t vifc_vifi; /* Index of VIF */ + unsigned char vifc_flags; /* VIFF_ flags */ + unsigned char vifc_threshold; /* ttl limit */ + unsigned int vifc_rate_limit; /* Rate limiter values (NI) */ + union { + struct in_addr vifc_lcl_addr; /* Local interface address */ + int vifc_lcl_ifindex; /* Local interface index */ + }; + struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */ +}; + +#define VIFF_TUNNEL 0x1 /* IPIP tunnel */ +#define VIFF_SRCRT 0x2 /* NI */ +#define VIFF_REGISTER 0x4 /* register vif */ +#define VIFF_USE_IFINDEX 0x8 /* use vifc_lcl_ifindex instead of + vifc_lcl_addr to find an interface */ + +/* + * Cache manipulation structures for mrouted and PIMd + */ + +struct mfcctl { + struct in_addr mfcc_origin; /* Origin of mcast */ + struct in_addr mfcc_mcastgrp; /* Group in question */ + vifi_t mfcc_parent; /* Where it arrived */ + unsigned char mfcc_ttls[MAXVIFS]; /* Where it is going */ + unsigned int mfcc_pkt_cnt; /* pkt count for src-grp */ + unsigned int mfcc_byte_cnt; + unsigned int mfcc_wrong_if; + int mfcc_expire; +}; + +/* + * Group count retrieval for mrouted + */ + +struct sioc_sg_req { + struct in_addr src; + struct in_addr grp; + unsigned long pktcnt; + unsigned long bytecnt; + unsigned long wrong_if; +}; + +/* + * To get vif packet counts + */ + +struct sioc_vif_req { + vifi_t vifi; /* Which iface */ + unsigned long icount; /* In packets */ + unsigned long ocount; /* Out packets */ + unsigned long ibytes; /* In bytes */ + unsigned long obytes; /* Out bytes */ +}; + +/* + * This is the format the mroute daemon expects to see IGMP control + * data. Magically happens to be like an IP packet as per the original + */ + +struct igmpmsg { + __u32 unused1,unused2; + unsigned char im_msgtype; /* What is this */ + unsigned char im_mbz; /* Must be zero */ + unsigned char im_vif; /* Interface (this ought to be a vifi_t!) */ + unsigned char unused3; + struct in_addr im_src,im_dst; +}; + +/* + * That's all usermode folks + */ + +#ifdef __KERNEL__ +#include <linux/pim.h> +#include <net/sock.h> + +#ifdef CONFIG_IP_MROUTE +static inline int ip_mroute_opt(int opt) +{ + return (opt >= MRT_BASE) && (opt <= MRT_BASE + 10); +} +#else +static inline int ip_mroute_opt(int opt) +{ + return 0; +} +#endif + +#ifdef CONFIG_IP_MROUTE +extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); +extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); +extern int ip_mr_init(void); +#else +static inline +int ip_mroute_setsockopt(struct sock *sock, + int optname, char __user *optval, unsigned int optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip_mroute_getsockopt(struct sock *sock, + int optname, char __user *optval, int __user *optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +{ + return -ENOIOCTLCMD; +} + +static inline int ip_mr_init(void) +{ + return 0; +} +#endif + +struct vif_device { + struct net_device *dev; /* Device we are using */ + unsigned long bytes_in,bytes_out; + unsigned long pkt_in,pkt_out; /* Statistics */ + unsigned long rate_limit; /* Traffic shaping (NI) */ + unsigned char threshold; /* TTL threshold */ + unsigned short flags; /* Control flags */ + __be32 local,remote; /* Addresses(remote for tunnels)*/ + int link; /* Physical interface index */ +}; + +#define VIFF_STATIC 0x8000 + +struct mfc_cache { + struct list_head list; + __be32 mfc_mcastgrp; /* Group the entry belongs to */ + __be32 mfc_origin; /* Source of packet */ + vifi_t mfc_parent; /* Source interface */ + int mfc_flags; /* Flags on line */ + + union { + struct { + unsigned long expires; + struct sk_buff_head unresolved; /* Unresolved buffers */ + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned char ttls[MAXVIFS]; /* TTL thresholds */ + } res; + } mfc_un; + struct rcu_head rcu; +}; + +#define MFC_STATIC 1 +#define MFC_NOTIFY 2 + +#define MFC_LINES 64 + +#ifdef __BIG_ENDIAN +#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) +#else +#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) +#endif + +#endif + + +#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ + +/* + * Pseudo messages used by mrouted + */ + +#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */ +#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */ +#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ + +#ifdef __KERNEL__ +struct rtmsg; +extern int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, + struct rtmsg *rtm, int nowait); +#endif + +#endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h new file mode 100644 index 00000000..6d8c7251 --- /dev/null +++ b/include/linux/mroute6.h @@ -0,0 +1,269 @@ +#ifndef __LINUX_MROUTE6_H +#define __LINUX_MROUTE6_H + +#include <linux/types.h> +#include <linux/sockios.h> + +/* + * Based on the MROUTING 3.5 defines primarily to keep + * source compatibility with BSD. + * + * See the pim6sd code for the original history. + * + * Protocol Independent Multicast (PIM) data structures included + * Carlos Picoto (cap@di.fc.ul.pt) + * + */ + +#define MRT6_BASE 200 +#define MRT6_INIT (MRT6_BASE) /* Activate the kernel mroute code */ +#define MRT6_DONE (MRT6_BASE+1) /* Shutdown the kernel mroute */ +#define MRT6_ADD_MIF (MRT6_BASE+2) /* Add a virtual interface */ +#define MRT6_DEL_MIF (MRT6_BASE+3) /* Delete a virtual interface */ +#define MRT6_ADD_MFC (MRT6_BASE+4) /* Add a multicast forwarding entry */ +#define MRT6_DEL_MFC (MRT6_BASE+5) /* Delete a multicast forwarding entry */ +#define MRT6_VERSION (MRT6_BASE+6) /* Get the kernel multicast version */ +#define MRT6_ASSERT (MRT6_BASE+7) /* Activate PIM assert mode */ +#define MRT6_PIM (MRT6_BASE+8) /* enable PIM code */ +#define MRT6_TABLE (MRT6_BASE+9) /* Specify mroute table ID */ + +#define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */ +#define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1) +#define SIOCGETRPF (SIOCPROTOPRIVATE+2) + +#define MAXMIFS 32 +typedef unsigned long mifbitmap_t; /* User mode code depends on this lot */ +typedef unsigned short mifi_t; +#define ALL_MIFS ((mifi_t)(-1)) + +#ifndef IF_SETSIZE +#define IF_SETSIZE 256 +#endif + +typedef __u32 if_mask; +#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */ + +#if !defined(__KERNEL__) +#if !defined(DIV_ROUND_UP) +#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y)) +#endif +#endif + +typedef struct if_set { + if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)]; +} if_set; + +#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS))) +#define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS))) +#define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS))) +#define IF_COPY(f, t) bcopy(f, t, sizeof(*(f))) +#define IF_ZERO(p) bzero(p, sizeof(*(p))) + +/* + * Passed by mrouted for an MRT_ADD_MIF - again we use the + * mrouted 3.6 structures for compatibility + */ + +struct mif6ctl { + mifi_t mif6c_mifi; /* Index of MIF */ + unsigned char mif6c_flags; /* MIFF_ flags */ + unsigned char vifc_threshold; /* ttl limit */ + __u16 mif6c_pifi; /* the index of the physical IF */ + unsigned int vifc_rate_limit; /* Rate limiter values (NI) */ +}; + +#define MIFF_REGISTER 0x1 /* register vif */ + +/* + * Cache manipulation structures for mrouted and PIMd + */ + +struct mf6cctl { + struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */ + struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */ + mifi_t mf6cc_parent; /* Where it arrived */ + struct if_set mf6cc_ifset; /* Where it is going */ +}; + +/* + * Group count retrieval for pim6sd + */ + +struct sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + unsigned long pktcnt; + unsigned long bytecnt; + unsigned long wrong_if; +}; + +/* + * To get vif packet counts + */ + +struct sioc_mif_req6 { + mifi_t mifi; /* Which iface */ + unsigned long icount; /* In packets */ + unsigned long ocount; /* Out packets */ + unsigned long ibytes; /* In bytes */ + unsigned long obytes; /* Out bytes */ +}; + +/* + * That's all usermode folks + */ + +#ifdef __KERNEL__ + +#include <linux/pim.h> +#include <linux/skbuff.h> /* for struct sk_buff_head */ +#include <net/net_namespace.h> + +#ifdef CONFIG_IPV6_MROUTE +static inline int ip6_mroute_opt(int opt) +{ + return (opt >= MRT6_BASE) && (opt <= MRT6_BASE + 10); +} +#else +static inline int ip6_mroute_opt(int opt) +{ + return 0; +} +#endif + +struct sock; + +#ifdef CONFIG_IPV6_MROUTE +extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +extern int ip6_mr_input(struct sk_buff *skb); +extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); +extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); +extern int ip6_mr_init(void); +extern void ip6_mr_cleanup(void); +#else +static inline +int ip6_mroute_setsockopt(struct sock *sock, + int optname, char __user *optval, unsigned int optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip6_mroute_getsockopt(struct sock *sock, + int optname, char __user *optval, int __user *optlen) +{ + return -ENOPROTOOPT; +} + +static inline +int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) +{ + return -ENOIOCTLCMD; +} + +static inline int ip6_mr_init(void) +{ + return 0; +} + +static inline void ip6_mr_cleanup(void) +{ + return; +} +#endif + +struct mif_device { + struct net_device *dev; /* Device we are using */ + unsigned long bytes_in,bytes_out; + unsigned long pkt_in,pkt_out; /* Statistics */ + unsigned long rate_limit; /* Traffic shaping (NI) */ + unsigned char threshold; /* TTL threshold */ + unsigned short flags; /* Control flags */ + int link; /* Physical interface index */ +}; + +#define VIFF_STATIC 0x8000 + +struct mfc6_cache { + struct list_head list; + struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */ + struct in6_addr mf6c_origin; /* Source of packet */ + mifi_t mf6c_parent; /* Source interface */ + int mfc_flags; /* Flags on line */ + + union { + struct { + unsigned long expires; + struct sk_buff_head unresolved; /* Unresolved buffers */ + } unres; + struct { + unsigned long last_assert; + int minvif; + int maxvif; + unsigned long bytes; + unsigned long pkt; + unsigned long wrong_if; + unsigned char ttls[MAXMIFS]; /* TTL thresholds */ + } res; + } mfc_un; +}; + +#define MFC_STATIC 1 +#define MFC_NOTIFY 2 + +#define MFC6_LINES 64 + +#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \ + (__force u32)(a)->s6_addr32[1] ^ \ + (__force u32)(a)->s6_addr32[2] ^ \ + (__force u32)(a)->s6_addr32[3] ^ \ + (__force u32)(g)->s6_addr32[0] ^ \ + (__force u32)(g)->s6_addr32[1] ^ \ + (__force u32)(g)->s6_addr32[2] ^ \ + (__force u32)(g)->s6_addr32[3]) % MFC6_LINES) + +#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ + +#endif + +#ifdef __KERNEL__ +struct rtmsg; +extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, + struct rtmsg *rtm, int nowait); + +#ifdef CONFIG_IPV6_MROUTE +extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); +extern int ip6mr_sk_done(struct sock *sk); +#else +static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) +{ + return NULL; +} +static inline int ip6mr_sk_done(struct sock *sk) +{ + return 0; +} +#endif +#endif + +/* + * Structure used to communicate from kernel to multicast router. + * We'll overlay the structure onto an MLD header (not an IPv6 heder like igmpmsg{} + * used for IPv4 implementation). This is because this structure will be passed via an + * IPv6 raw socket, on which an application will only receiver the payload i.e the data after + * the IPv6 header and all the extension headers. (See section 3 of RFC 3542) + */ + +struct mrt6msg { +#define MRT6MSG_NOCACHE 1 +#define MRT6MSG_WRONGMIF 2 +#define MRT6MSG_WHOLEPKT 3 /* used for use level encap */ + __u8 im6_mbz; /* must be zero */ + __u8 im6_msgtype; /* what type of message */ + __u16 im6_mif; /* mif rec'd on */ + __u32 im6_pad; /* padding for 64 bit arch */ + struct in6_addr im6_src, im6_dst; +}; + +#endif diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h new file mode 100644 index 00000000..f38d4f0a --- /dev/null +++ b/include/linux/msdos_fs.h @@ -0,0 +1,185 @@ +#ifndef _LINUX_MSDOS_FS_H +#define _LINUX_MSDOS_FS_H + +#include <linux/types.h> +#include <linux/magic.h> +#include <asm/byteorder.h> + +/* + * The MS-DOS filesystem constants/structures + */ + +#define SECTOR_SIZE 512 /* sector size (bytes) */ +#define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ +#define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */ +#define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */ +#define MSDOS_DPS (SECTOR_SIZE / sizeof(struct msdos_dir_entry)) +#define MSDOS_DPS_BITS 4 /* log2(MSDOS_DPS) */ +#define MSDOS_LONGNAME 256 /* maximum name length */ +#define CF_LE_W(v) le16_to_cpu(v) +#define CF_LE_L(v) le32_to_cpu(v) +#define CT_LE_W(v) cpu_to_le16(v) +#define CT_LE_L(v) cpu_to_le32(v) + + +#define MSDOS_ROOT_INO 1 /* == MINIX_ROOT_INO */ +#define MSDOS_DIR_BITS 5 /* log2(sizeof(struct msdos_dir_entry)) */ + +/* directory limit */ +#define FAT_MAX_DIR_ENTRIES (65536) +#define FAT_MAX_DIR_SIZE (FAT_MAX_DIR_ENTRIES << MSDOS_DIR_BITS) + +#define ATTR_NONE 0 /* no attribute bits */ +#define ATTR_RO 1 /* read-only */ +#define ATTR_HIDDEN 2 /* hidden */ +#define ATTR_SYS 4 /* system */ +#define ATTR_VOLUME 8 /* volume label */ +#define ATTR_DIR 16 /* directory */ +#define ATTR_ARCH 32 /* archived */ + +/* attribute bits that are copied "as is" */ +#define ATTR_UNUSED (ATTR_VOLUME | ATTR_ARCH | ATTR_SYS | ATTR_HIDDEN) +/* bits that are used by the Windows 95/Windows NT extended FAT */ +#define ATTR_EXT (ATTR_RO | ATTR_HIDDEN | ATTR_SYS | ATTR_VOLUME) + +#define CASE_LOWER_BASE 8 /* base is lower case */ +#define CASE_LOWER_EXT 16 /* extension is lower case */ + +#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */ +#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG) + +#define FAT_LFN_LEN 255 /* maximum long name length */ +#define MSDOS_NAME 11 /* maximum name length */ +#define MSDOS_SLOTS 21 /* max # of slots for short and long names */ +#define MSDOS_DOT ". " /* ".", padded to MSDOS_NAME chars */ +#define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */ + +#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \ + MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x)) + +/* start of data cluster's entry (number of reserved clusters) */ +#define FAT_START_ENT 2 + +/* maximum number of clusters */ +#define MAX_FAT12 0xFF4 +#define MAX_FAT16 0xFFF4 +#define MAX_FAT32 0x0FFFFFF6 +#define MAX_FAT(s) (MSDOS_SB(s)->fat_bits == 32 ? MAX_FAT32 : \ + MSDOS_SB(s)->fat_bits == 16 ? MAX_FAT16 : MAX_FAT12) + +/* bad cluster mark */ +#define BAD_FAT12 0xFF7 +#define BAD_FAT16 0xFFF7 +#define BAD_FAT32 0x0FFFFFF7 + +/* standard EOF */ +#define EOF_FAT12 0xFFF +#define EOF_FAT16 0xFFFF +#define EOF_FAT32 0x0FFFFFFF + +#define FAT_ENT_FREE (0) +#define FAT_ENT_BAD (BAD_FAT32) +#define FAT_ENT_EOF (EOF_FAT32) + +#define FAT_FSINFO_SIG1 0x41615252 +#define FAT_FSINFO_SIG2 0x61417272 +#define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \ + && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2) + +struct __fat_dirent { + long d_ino; + __kernel_off_t d_off; + unsigned short d_reclen; + char d_name[256]; /* We must not include limits.h! */ +}; + +/* + * ioctl commands + */ +#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct __fat_dirent[2]) +#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct __fat_dirent[2]) +/* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ +#define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) +#define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) +#define VFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32) + +struct fat_boot_sector { + __u8 ignored[3]; /* Boot strap short or near jump */ + __u8 system_id[8]; /* Name - can be used to special case + partition manager volumes */ + __u8 sector_size[2]; /* bytes per logical sector */ + __u8 sec_per_clus; /* sectors/cluster */ + __le16 reserved; /* reserved sectors */ + __u8 fats; /* number of FATs */ + __u8 dir_entries[2]; /* root directory entries */ + __u8 sectors[2]; /* number of sectors */ + __u8 media; /* media code */ + __le16 fat_length; /* sectors/FAT */ + __le16 secs_track; /* sectors per track */ + __le16 heads; /* number of heads */ + __le32 hidden; /* hidden sectors (unused) */ + __le32 total_sect; /* number of sectors (if sectors == 0) */ + + /* The following fields are only used by FAT32 */ + __le32 fat32_length; /* sectors/FAT */ + __le16 flags; /* bit 8: fat mirroring, low 4: active fat */ + __u8 version[2]; /* major, minor filesystem version */ + __le32 root_cluster; /* first cluster in root directory */ + __le16 info_sector; /* filesystem info sector */ + __le16 backup_boot; /* backup boot sector */ + __le16 reserved2[6]; /* Unused */ +}; + +struct fat_boot_fsinfo { + __le32 signature1; /* 0x41615252L */ + __le32 reserved1[120]; /* Nothing as far as I can tell */ + __le32 signature2; /* 0x61417272L */ + __le32 free_clusters; /* Free cluster count. -1 if unknown */ + __le32 next_cluster; /* Most recently allocated cluster */ + __le32 reserved2[4]; +}; + +struct fat_boot_bsx { + __u8 drive; /* drive number */ + __u8 reserved1; + __u8 signature; /* extended boot signature */ + __u8 vol_id[4]; /* volume ID */ + __u8 vol_label[11]; /* volume label */ + __u8 type[8]; /* file system type */ +}; +#define FAT16_BSX_OFFSET 36 /* offset of fat_boot_bsx in FAT12 and FAT16 */ +#define FAT32_BSX_OFFSET 64 /* offset of fat_boot_bsx in FAT32 */ + +struct msdos_dir_entry { + __u8 name[MSDOS_NAME];/* name and extension */ + __u8 attr; /* attribute bits */ + __u8 lcase; /* Case for base and extension */ + __u8 ctime_cs; /* Creation time, centiseconds (0-199) */ + __le16 ctime; /* Creation time */ + __le16 cdate; /* Creation date */ + __le16 adate; /* Last access date */ + __le16 starthi; /* High 16 bits of cluster in FAT32 */ + __le16 time,date,start;/* time, date and first cluster */ + __le32 size; /* file size (in bytes) */ +}; + +/* Up to 13 characters of the name */ +struct msdos_dir_slot { + __u8 id; /* sequence number for slot */ + __u8 name0_4[10]; /* first 5 characters in name */ + __u8 attr; /* attribute byte */ + __u8 reserved; /* always 0 */ + __u8 alias_checksum; /* checksum for 8.3 alias */ + __u8 name5_10[12]; /* 6 more characters in name */ + __le16 start; /* starting cluster number, 0 in long slots */ + __u8 name11_12[4]; /* last 2 characters in name */ +}; + +#ifdef __KERNEL__ +/* media of boot sector */ +static inline int fat_valid_media(u8 media) +{ + return 0xf8 <= media || media == 0xf0; +} +#endif /* !__KERNEL__ */ +#endif /* !_LINUX_MSDOS_FS_H */ diff --git a/include/linux/msg.h b/include/linux/msg.h new file mode 100644 index 00000000..56abf155 --- /dev/null +++ b/include/linux/msg.h @@ -0,0 +1,112 @@ +#ifndef _LINUX_MSG_H +#define _LINUX_MSG_H + +#include <linux/ipc.h> + +/* ipcs ctl commands */ +#define MSG_STAT 11 +#define MSG_INFO 12 + +/* msgrcv options */ +#define MSG_NOERROR 010000 /* no error if message is too big */ +#define MSG_EXCEPT 020000 /* recv any msg except of specified type.*/ + +/* Obsolete, used only for backwards compatibility and libc5 compiles */ +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; /* first message on queue,unused */ + struct msg *msg_last; /* last message in queue,unused */ + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_lcbytes; /* Reuse junk fields for 32 bit */ + unsigned long msg_lqbytes; /* ditto */ + unsigned short msg_cbytes; /* current number of bytes on queue */ + unsigned short msg_qnum; /* number of messages in queue */ + unsigned short msg_qbytes; /* max number of bytes on queue */ + __kernel_ipc_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_ipc_pid_t msg_lrpid; /* last receive pid */ +}; + +/* Include the definition of msqid64_ds */ +#include <asm/msgbuf.h> + +/* message buffer for msgsnd and msgrcv calls */ +struct msgbuf { + long mtype; /* type of message */ + char mtext[1]; /* message text */ +}; + +/* buffer for msgctl calls IPC_INFO, MSG_INFO */ +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + unsigned short msgseg; +}; + +/* + * Scaling factor to compute msgmni: + * the memory dedicated to msg queues (msgmni * msgmnb) should occupy + * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c): + * up to 8MB : msgmni = 16 (MSGMNI) + * 4 GB : msgmni = 8K + * more than 16 GB : msgmni = 32K (IPCMNI) + */ +#define MSG_MEM_SCALE 32 + +#define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ +#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ +#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ + +/* unused */ +#define MSGPOOL (MSGMNI * MSGMNB / 1024) /* size in kbytes of message pool */ +#define MSGTQL MSGMNB /* number of system message headers */ +#define MSGMAP MSGMNB /* number of entries in message map */ +#define MSGSSZ 16 /* message segment size */ +#define __MSGSEG ((MSGPOOL * 1024) / MSGSSZ) /* max no. of segments */ +#define MSGSEG (__MSGSEG <= 0xffff ? __MSGSEG : 0xffff) + +#ifdef __KERNEL__ +#include <linux/list.h> + +/* one msg_msg structure for each message */ +struct msg_msg { + struct list_head m_list; + long m_type; + int m_ts; /* message text size */ + struct msg_msgseg* next; + void *security; + /* the actual message follows immediately */ +}; + +/* one msq_queue structure for each present queue on the system */ +struct msg_queue { + struct kern_ipc_perm q_perm; + time_t q_stime; /* last msgsnd time */ + time_t q_rtime; /* last msgrcv time */ + time_t q_ctime; /* last change time */ + unsigned long q_cbytes; /* current number of bytes on queue */ + unsigned long q_qnum; /* number of messages in queue */ + unsigned long q_qbytes; /* max number of bytes on queue */ + pid_t q_lspid; /* pid of last msgsnd */ + pid_t q_lrpid; /* last receive pid */ + + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; +}; + +/* Helper routines for sys_msgsnd and sys_msgrcv */ +extern long do_msgsnd(int msqid, long mtype, void __user *mtext, + size_t msgsz, int msgflg); +extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext, + size_t msgsz, long msgtyp, int msgflg); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_MSG_H */ diff --git a/include/linux/msi.h b/include/linux/msi.h new file mode 100644 index 00000000..ce93a341 --- /dev/null +++ b/include/linux/msi.h @@ -0,0 +1,62 @@ +#ifndef LINUX_MSI_H +#define LINUX_MSI_H + +#include <linux/kobject.h> +#include <linux/list.h> + +struct msi_msg { + u32 address_lo; /* low 32 bits of msi message address */ + u32 address_hi; /* high 32 bits of msi message address */ + u32 data; /* 16 bits of msi message data */ +}; + +/* Helper functions */ +struct irq_data; +struct msi_desc; +extern void mask_msi_irq(struct irq_data *data); +extern void unmask_msi_irq(struct irq_data *data); +extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); +extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); +extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); + +struct msi_desc { + struct { + __u8 is_msix : 1; + __u8 multiple: 3; /* log2 number of messages */ + __u8 maskbit : 1; /* mask-pending bit supported ? */ + __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ + __u8 pos; /* Location of the msi capability */ + __u16 entry_nr; /* specific enabled entry */ + unsigned default_irq; /* default pre-assigned irq */ + } msi_attrib; + + u32 masked; /* mask bits */ + unsigned int irq; + struct list_head list; + + union { + void __iomem *mask_base; + u8 mask_pos; + }; + struct pci_dev *dev; + + /* Last set MSI message */ + struct msi_msg msg; + + struct kobject kobj; +}; + +/* + * The arch hook for setup up msi irqs + */ +int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); +void arch_teardown_msi_irq(unsigned int irq); +extern int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern void arch_teardown_msi_irqs(struct pci_dev *dev); +extern int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); + + +#endif /* LINUX_MSI_H */ diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h new file mode 100644 index 00000000..fe722c1f --- /dev/null +++ b/include/linux/msm_mdp.h @@ -0,0 +1,79 @@ +/* include/linux/msm_mdp.h + * + * Copyright (C) 2007 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MSM_MDP_H_ +#define _MSM_MDP_H_ + +#include <linux/types.h> + +#define MSMFB_IOCTL_MAGIC 'm' +#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int) +#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int) + +enum { + MDP_RGB_565, /* RGB 565 planar */ + MDP_XRGB_8888, /* RGB 888 padded */ + MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */ + MDP_ARGB_8888, /* ARGB 888 */ + MDP_RGB_888, /* RGB 888 planar */ + MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */ + MDP_YCRYCB_H2V1, /* YCrYCb interleave */ + MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ + MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ + MDP_RGBA_8888, /* ARGB 888 */ + MDP_BGRA_8888, /* ABGR 888 */ + MDP_RGBX_8888, /* RGBX 888 */ + MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */ +}; + +enum { + PMEM_IMG, + FB_IMG, +}; + +/* flag values */ +#define MDP_ROT_NOP 0 +#define MDP_FLIP_LR 0x1 +#define MDP_FLIP_UD 0x2 +#define MDP_ROT_90 0x4 +#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) +#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) +#define MDP_DITHER 0x8 +#define MDP_BLUR 0x10 + +#define MDP_TRANSP_NOP 0xffffffff +#define MDP_ALPHA_NOP 0xff + +struct mdp_rect { + u32 x, y, w, h; +}; + +struct mdp_img { + u32 width, height, format, offset; + int memory_id; /* the file descriptor */ +}; + +struct mdp_blit_req { + struct mdp_img src; + struct mdp_img dst; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + u32 alpha, transp_mask, flags; +}; + +struct mdp_blit_req_list { + u32 count; + struct mdp_blit_req req[]; +}; + +#endif /* _MSM_MDP_H_ */ diff --git a/include/linux/mt5931_6622.h b/include/linux/mt5931_6622.h new file mode 100755 index 00000000..e7715119 --- /dev/null +++ b/include/linux/mt5931_6622.h @@ -0,0 +1,26 @@ + + +#ifndef __MT5931__H__ +#define __MT5931__H__ + +struct mt5931_sdio_eint_platform_data { + /* used for sdio eint. */ + int sdio_eint; +}; + +struct mtk_mt5931_platform_data { + /* GPIO pin config */ + int pmu; + int rst; +}; + +struct mtk_mt6622_platform_data { + /* GPIO pin config */ + int pmu; + int rst; + + /* used for bt eint. */ + int bt_eint; +}; + +#endif diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h new file mode 100644 index 00000000..5bc50e08 --- /dev/null +++ b/include/linux/mtd/bbm.h @@ -0,0 +1,176 @@ +/* + * linux/include/linux/mtd/bbm.h + * + * NAND family Bad Block Management (BBM) header file + * - Bad Block Table (BBT) implementation + * + * Copyright © 2005 Samsung Electronics + * Kyungmin Park <kyungmin.park@samsung.com> + * + * Copyright © 2000-2005 + * Thomas Gleixner <tglx@linuxtronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#ifndef __LINUX_MTD_BBM_H +#define __LINUX_MTD_BBM_H + +/* The maximum number of NAND chips in an array */ +#define NAND_MAX_CHIPS 8 + +/** + * struct nand_bbt_descr - bad block table descriptor + * @options: options for this descriptor + * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE + * when bbt is searched, then we store the found bbts pages here. + * Its an array and supports up to 8 chips now + * @offs: offset of the pattern in the oob area of the page + * @veroffs: offset of the bbt version counter in the oob are of the page + * @version: version read from the bbt page during scan + * @len: length of the pattern, if 0 no pattern check is performed + * @maxblocks: maximum number of blocks to search for a bbt. This number of + * blocks is reserved at the end of the device where the tables are + * written. + * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than + * bad) block in the stored bbt + * @pattern: pattern to identify bad block table or factory marked good / + * bad blocks, can be NULL, if len = 0 + * + * Descriptor for the bad block table marker and the descriptor for the + * pattern which identifies good and bad blocks. The assumption is made + * that the pattern and the version count are always located in the oob area + * of the first block. + */ +struct nand_bbt_descr { + int options; + int pages[NAND_MAX_CHIPS]; + int offs; + int veroffs; + uint8_t version[NAND_MAX_CHIPS]; + int len; + int maxblocks; + int reserved_block_code; + uint8_t *pattern; + int page_offset[2]; /* used to record bad block signature in which pages.*/ +}; + +/* Options for the bad block table descriptors */ + +/* The number of bits used per block in the bbt on the device */ +#define NAND_BBT_NRBITS_MSK 0x0000000F +#define NAND_BBT_1BIT 0x00000001 +#define NAND_BBT_2BIT 0x00000002 +#define NAND_BBT_4BIT 0x00000004 +#define NAND_BBT_8BIT 0x00000008 +/* The bad block table is in the last good block of the device */ +#define NAND_BBT_LASTBLOCK 0x00000010 +/* The bbt is at the given page, else we must scan for the bbt */ +#define NAND_BBT_ABSPAGE 0x00000020 +/* The bbt is at the given page, else we must scan for the bbt */ +#define NAND_BBT_SEARCH 0x00000040 +/* bbt is stored per chip on multichip devices */ +#define NAND_BBT_PERCHIP 0x00000080 +/* bbt has a version counter at offset veroffs */ +#define NAND_BBT_VERSION 0x00000100 +/* Create a bbt if none exists */ +#define NAND_BBT_CREATE 0x00000200 +/* + * Create an empty BBT with no vendor information. Vendor's information may be + * unavailable, for example, if the NAND controller has a different data and OOB + * layout or if this information is already purged. Must be used in conjunction + * with NAND_BBT_CREATE. + */ +#define NAND_BBT_CREATE_EMPTY 0x00000400 +/* Search good / bad pattern through all pages of a block */ +#define NAND_BBT_SCANALLPAGES 0x00000800 +/* Scan block empty during good / bad block scan */ +#define NAND_BBT_SCANEMPTY 0x00001000 +/* Write bbt if neccecary */ +#define NAND_BBT_WRITE 0x00002000 +/* Read and write back block contents when writing bbt */ +#define NAND_BBT_SAVECONTENT 0x00004000 +/* Search good / bad pattern on the first and the second page */ +#define NAND_BBT_SCAN2NDPAGE 0x00008000 +/* Search good / bad pattern on the last page of the eraseblock */ +#define NAND_BBT_SCANLASTPAGE 0x00010000 +/* + * Use a flash based bad block table. By default, OOB identifier is saved in + * OOB area. This option is passed to the default bad block table function. + */ +#define NAND_BBT_USE_FLASH 0x00020000 +/* Do not store flash based bad block table in OOB area; store it in-band */ +#define NAND_BBT_NO_OOB 0x00040000 +/* + * Do not write new bad block markers to OOB; useful, e.g., when ECC covers + * entire spare area. Must be used with NAND_BBT_USE_FLASH. + */ +#define NAND_BBT_NO_OOB_BBM 0x00080000 + +/* + * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr + * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * in nand_chip.bbt_options. + */ +#define NAND_BBT_DYNAMICSTRUCT 0x80000000 + +/* The maximum number of blocks to scan for a bbt */ +#define NAND_BBT_SCAN_MAXBLOCKS 4 + +/* + * Constants for oob configuration + */ +#define NAND_SMALL_BADBLOCK_POS 5 +#define NAND_LARGE_BADBLOCK_POS 0 +#define ONENAND_BADBLOCK_POS 0 + +/* + * Bad block scanning errors + */ +#define ONENAND_BBT_READ_ERROR 1 +#define ONENAND_BBT_READ_ECC_ERROR 2 +#define ONENAND_BBT_READ_FATAL_ERROR 4 + +/** + * struct bbm_info - [GENERIC] Bad Block Table data structure + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @badblockpos: [INTERN] position of the bad block marker in the oob area + * @options: options for this descriptor + * @bbt: [INTERN] bad block table pointer + * @isbad_bbt: function to determine if a block is bad + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for + * initial bad block scan + * @priv: [OPTIONAL] pointer to private bbm date + */ +struct bbm_info { + int bbt_erase_shift; + int badblockpos; + int options; + + uint8_t *bbt; + + int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt); + + /* TODO Add more NAND specific fileds */ + struct nand_bbt_descr *badblock_pattern; + + void *priv; +}; + +/* OneNAND BBT interface */ +extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); +extern int onenand_default_bbt(struct mtd_info *mtd); + +#endif /* __LINUX_MTD_BBM_H */ diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h new file mode 100644 index 00000000..ed270bd2 --- /dev/null +++ b/include/linux/mtd/blktrans.h @@ -0,0 +1,94 @@ +/* + * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_TRANS_H__ +#define __MTD_TRANS_H__ + +#include <linux/mutex.h> +#include <linux/kref.h> +#include <linux/sysfs.h> + +struct hd_geometry; +struct mtd_info; +struct mtd_blktrans_ops; +struct file; +struct inode; + +struct mtd_blktrans_dev { + struct mtd_blktrans_ops *tr; + struct list_head list; + struct mtd_info *mtd; + struct mutex lock; + int devnum; + bool bg_stop; + unsigned long size; + int readonly; + int open; + struct kref ref; + struct gendisk *disk; + struct attribute_group *disk_attributes; + struct task_struct *thread; + struct request_queue *rq; + spinlock_t queue_lock; + void *priv; + fmode_t file_mode; +}; + +struct mtd_blktrans_ops { + char *name; + int major; + int part_bits; + int blksize; + int blkshift; + + /* Access functions */ + int (*readsect)(struct mtd_blktrans_dev *dev, + unsigned long block, char *buffer); + int (*writesect)(struct mtd_blktrans_dev *dev, + unsigned long block, char *buffer); + int (*discard)(struct mtd_blktrans_dev *dev, + unsigned long block, unsigned nr_blocks); + void (*background)(struct mtd_blktrans_dev *dev); + + /* Block layer ioctls */ + int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); + int (*flush)(struct mtd_blktrans_dev *dev); + + /* Called with mtd_table_mutex held; no race with add/remove */ + int (*open)(struct mtd_blktrans_dev *dev); + int (*release)(struct mtd_blktrans_dev *dev); + + /* Called on {de,}registration and on subsequent addition/removal + of devices, with mtd_table_mutex held. */ + void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd); + void (*remove_dev)(struct mtd_blktrans_dev *dev); + + struct list_head devs; + struct list_head list; + struct module *owner; +}; + +extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr); +extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr); +extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); +extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); +extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev); + + +#endif /* __MTD_TRANS_H__ */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h new file mode 100644 index 00000000..37ef6b19 --- /dev/null +++ b/include/linux/mtd/cfi.h @@ -0,0 +1,564 @@ +/* + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_CFI_H__ +#define __MTD_CFI_H__ + +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/bug.h> +#include <linux/interrupt.h> +#include <linux/mtd/flashchip.h> +#include <linux/mtd/map.h> +#include <linux/mtd/cfi_endian.h> +#include <linux/mtd/xip.h> + +#ifdef CONFIG_MTD_CFI_I1 +#define cfi_interleave(cfi) 1 +#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) +#else +#define cfi_interleave_is_1(cfi) (0) +#endif + +#ifdef CONFIG_MTD_CFI_I2 +# ifdef cfi_interleave +# undef cfi_interleave +# define cfi_interleave(cfi) ((cfi)->interleave) +# else +# define cfi_interleave(cfi) 2 +# endif +#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) +#else +#define cfi_interleave_is_2(cfi) (0) +#endif + +#ifdef CONFIG_MTD_CFI_I4 +# ifdef cfi_interleave +# undef cfi_interleave +# define cfi_interleave(cfi) ((cfi)->interleave) +# else +# define cfi_interleave(cfi) 4 +# endif +#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) +#else +#define cfi_interleave_is_4(cfi) (0) +#endif + +#ifdef CONFIG_MTD_CFI_I8 +# ifdef cfi_interleave +# undef cfi_interleave +# define cfi_interleave(cfi) ((cfi)->interleave) +# else +# define cfi_interleave(cfi) 8 +# endif +#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) +#else +#define cfi_interleave_is_8(cfi) (0) +#endif + +#ifndef cfi_interleave +#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work. +static inline int cfi_interleave(void *cfi) +{ + BUG(); + return 0; +} +#endif + +static inline int cfi_interleave_supported(int i) +{ + switch (i) { +#ifdef CONFIG_MTD_CFI_I1 + case 1: +#endif +#ifdef CONFIG_MTD_CFI_I2 + case 2: +#endif +#ifdef CONFIG_MTD_CFI_I4 + case 4: +#endif +#ifdef CONFIG_MTD_CFI_I8 + case 8: +#endif + return 1; + + default: + return 0; + } +} + + +/* NB: these values must represents the number of bytes needed to meet the + * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. + * These numbers are used in calculations. + */ +#define CFI_DEVICETYPE_X8 (8 / 8) +#define CFI_DEVICETYPE_X16 (16 / 8) +#define CFI_DEVICETYPE_X32 (32 / 8) +#define CFI_DEVICETYPE_X64 (64 / 8) + + +/* Device Interface Code Assignments from the "Common Flash Memory Interface + * Publication 100" dated December 1, 2001. + */ +#define CFI_INTERFACE_X8_ASYNC 0x0000 +#define CFI_INTERFACE_X16_ASYNC 0x0001 +#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002 +#define CFI_INTERFACE_X32_ASYNC 0x0003 +#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005 +#define CFI_INTERFACE_NOT_ALLOWED 0xffff + + +/* NB: We keep these structures in memory in HOST byteorder, except + * where individually noted. + */ + +/* Basic Query Structure */ +struct cfi_ident { + uint8_t qry[3]; + uint16_t P_ID; + uint16_t P_ADR; + uint16_t A_ID; + uint16_t A_ADR; + uint8_t VccMin; + uint8_t VccMax; + uint8_t VppMin; + uint8_t VppMax; + uint8_t WordWriteTimeoutTyp; + uint8_t BufWriteTimeoutTyp; + uint8_t BlockEraseTimeoutTyp; + uint8_t ChipEraseTimeoutTyp; + uint8_t WordWriteTimeoutMax; + uint8_t BufWriteTimeoutMax; + uint8_t BlockEraseTimeoutMax; + uint8_t ChipEraseTimeoutMax; + uint8_t DevSize; + uint16_t InterfaceDesc; + uint16_t MaxBufWriteSize; + uint8_t NumEraseRegions; + uint32_t EraseRegionInfo[0]; /* Not host ordered */ +} __attribute__((packed)); + +/* Extended Query Structure for both PRI and ALT */ + +struct cfi_extquery { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; +} __attribute__((packed)); + +/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ + +struct cfi_pri_intelext { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; + uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature + block follows - FIXME - not currently supported */ + uint8_t SuspendCmdSupport; + uint16_t BlkStatusRegMask; + uint8_t VccOptimal; + uint8_t VppOptimal; + uint8_t NumProtectionFields; + uint16_t ProtRegAddr; + uint8_t FactProtRegSize; + uint8_t UserProtRegSize; + uint8_t extra[0]; +} __attribute__((packed)); + +struct cfi_intelext_otpinfo { + uint32_t ProtRegAddr; + uint16_t FactGroups; + uint8_t FactProtRegSize; + uint16_t UserGroups; + uint8_t UserProtRegSize; +} __attribute__((packed)); + +struct cfi_intelext_blockinfo { + uint16_t NumIdentBlocks; + uint16_t BlockSize; + uint16_t MinBlockEraseCycles; + uint8_t BitsPerCell; + uint8_t BlockCap; +} __attribute__((packed)); + +struct cfi_intelext_regioninfo { + uint16_t NumIdentPartitions; + uint8_t NumOpAllowed; + uint8_t NumOpAllowedSimProgMode; + uint8_t NumOpAllowedSimEraMode; + uint8_t NumBlockTypes; + struct cfi_intelext_blockinfo BlockTypes[1]; +} __attribute__((packed)); + +struct cfi_intelext_programming_regioninfo { + uint8_t ProgRegShift; + uint8_t Reserved1; + uint8_t ControlValid; + uint8_t Reserved2; + uint8_t ControlInvalid; + uint8_t Reserved3; +} __attribute__((packed)); + +/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ + +struct cfi_pri_amdstd { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; + uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ + uint8_t EraseSuspend; + uint8_t BlkProt; + uint8_t TmpBlkUnprotect; + uint8_t BlkProtUnprot; + uint8_t SimultaneousOps; + uint8_t BurstMode; + uint8_t PageMode; + uint8_t VppMin; + uint8_t VppMax; + uint8_t TopBottom; +} __attribute__((packed)); + +/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ + +struct cfi_pri_atmel { + uint8_t pri[3]; + uint8_t MajorVersion; + uint8_t MinorVersion; + uint8_t Features; + uint8_t BottomBoot; + uint8_t BurstMode; + uint8_t PageMode; +} __attribute__((packed)); + +struct cfi_pri_query { + uint8_t NumFields; + uint32_t ProtField[1]; /* Not host ordered */ +} __attribute__((packed)); + +struct cfi_bri_query { + uint8_t PageModeReadCap; + uint8_t NumFields; + uint32_t ConfField[1]; /* Not host ordered */ +} __attribute__((packed)); + +#define P_ID_NONE 0x0000 +#define P_ID_INTEL_EXT 0x0001 +#define P_ID_AMD_STD 0x0002 +#define P_ID_INTEL_STD 0x0003 +#define P_ID_AMD_EXT 0x0004 +#define P_ID_WINBOND 0x0006 +#define P_ID_ST_ADV 0x0020 +#define P_ID_MITSUBISHI_STD 0x0100 +#define P_ID_MITSUBISHI_EXT 0x0101 +#define P_ID_SST_PAGE 0x0102 +#define P_ID_SST_OLD 0x0701 +#define P_ID_INTEL_PERFORMANCE 0x0200 +#define P_ID_INTEL_DATA 0x0210 +#define P_ID_RESERVED 0xffff + + +#define CFI_MODE_CFI 1 +#define CFI_MODE_JEDEC 0 + +struct cfi_private { + uint16_t cmdset; + void *cmdset_priv; + int interleave; + int device_type; + int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ + int addr_unlock1; + int addr_unlock2; + struct mtd_info *(*cmdset_setup)(struct map_info *); + struct cfi_ident *cfiq; /* For now only one. We insist that all devs + must be of the same type. */ + int mfr, id; + int numchips; + map_word sector_erase_cmd; + unsigned long chipshift; /* Because they're of the same type */ + const char *im_name; /* inter_module name for cmdset_setup */ + struct flchip chips[0]; /* per-chip data structure for each chip */ +}; + +/* + * Returns the command address according to the given geometry. + */ +static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, + struct map_info *map, struct cfi_private *cfi) +{ + unsigned bankwidth = map_bankwidth(map); + unsigned interleave = cfi_interleave(cfi); + unsigned type = cfi->device_type; + uint32_t addr; + + addr = (cmd_ofs * type) * interleave; + + /* Modify the unlock address if we are in compatibility mode. + * For 16bit devices on 8 bit busses + * and 32bit devices on 16 bit busses + * set the low bit of the alternating bit sequence of the address. + */ + if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa)) + addr |= (type >> 1)*interleave; + + return addr; +} + +/* + * Transforms the CFI command for the given geometry (bus width & interleave). + * It looks too long to be inline, but in the common case it should almost all + * get optimised away. + */ +static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi) +{ + map_word val = { {0} }; + int wordwidth, words_per_bus, chip_mode, chips_per_word; + unsigned long onecmd; + int i; + + /* We do it this way to give the compiler a fighting chance + of optimising away all the crap for 'bankwidth' larger than + an unsigned long, in the common case where that support is + disabled */ + if (map_bankwidth_is_large(map)) { + wordwidth = sizeof(unsigned long); + words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 + } else { + wordwidth = map_bankwidth(map); + words_per_bus = 1; + } + + chip_mode = map_bankwidth(map) / cfi_interleave(cfi); + chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); + + /* First, determine what the bit-pattern should be for a single + device, according to chip mode and endianness... */ + switch (chip_mode) { + default: BUG(); + case 1: + onecmd = cmd; + break; + case 2: + onecmd = cpu_to_cfi16(map, cmd); + break; + case 4: + onecmd = cpu_to_cfi32(map, cmd); + break; + } + + /* Now replicate it across the size of an unsigned long, or + just to the bus width as appropriate */ + switch (chips_per_word) { + default: BUG(); +#if BITS_PER_LONG >= 64 + case 8: + onecmd |= (onecmd << (chip_mode * 32)); +#endif + case 4: + onecmd |= (onecmd << (chip_mode * 16)); + case 2: + onecmd |= (onecmd << (chip_mode * 8)); + case 1: + ; + } + + /* And finally, for the multi-word case, replicate it + in all words in the structure */ + for (i=0; i < words_per_bus; i++) { + val.x[i] = onecmd; + } + + return val; +} +#define CMD(x) cfi_build_cmd((x), map, cfi) + + +static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, + struct cfi_private *cfi) +{ + int wordwidth, words_per_bus, chip_mode, chips_per_word; + unsigned long onestat, res = 0; + int i; + + /* We do it this way to give the compiler a fighting chance + of optimising away all the crap for 'bankwidth' larger than + an unsigned long, in the common case where that support is + disabled */ + if (map_bankwidth_is_large(map)) { + wordwidth = sizeof(unsigned long); + words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 + } else { + wordwidth = map_bankwidth(map); + words_per_bus = 1; + } + + chip_mode = map_bankwidth(map) / cfi_interleave(cfi); + chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); + + onestat = val.x[0]; + /* Or all status words together */ + for (i=1; i < words_per_bus; i++) { + onestat |= val.x[i]; + } + + res = onestat; + switch(chips_per_word) { + default: BUG(); +#if BITS_PER_LONG >= 64 + case 8: + res |= (onestat >> (chip_mode * 32)); +#endif + case 4: + res |= (onestat >> (chip_mode * 16)); + case 2: + res |= (onestat >> (chip_mode * 8)); + case 1: + ; + } + + /* Last, determine what the bit-pattern should be for a single + device, according to chip mode and endianness... */ + switch (chip_mode) { + case 1: + break; + case 2: + res = cfi16_to_cpu(map, res); + break; + case 4: + res = cfi32_to_cpu(map, res); + break; + default: BUG(); + } + return res; +} + +#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) + + +/* + * Sends a CFI command to a bank of flash for the given geometry. + * + * Returns the offset in flash where the command was written. + * If prev_val is non-null, it will be set to the value at the command address, + * before the command was written. + */ +static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, + struct map_info *map, struct cfi_private *cfi, + int type, map_word *prev_val) +{ + map_word val; + uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi); + val = cfi_build_cmd(cmd, map, cfi); + + if (prev_val) + *prev_val = map_read(map, addr); + + map_write(map, val, addr); + + return addr - base; +} + +static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) +{ + map_word val = map_read(map, addr); + + if (map_bankwidth_is_1(map)) { + return val.x[0]; + } else if (map_bankwidth_is_2(map)) { + return cfi16_to_cpu(map, val.x[0]); + } else { + /* No point in a 64-bit byteswap since that would just be + swapping the responses from different chips, and we are + only interested in one chip (a representative sample) */ + return cfi32_to_cpu(map, val.x[0]); + } +} + +static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) +{ + map_word val = map_read(map, addr); + + if (map_bankwidth_is_1(map)) { + return val.x[0] & 0xff; + } else if (map_bankwidth_is_2(map)) { + return cfi16_to_cpu(map, val.x[0]); + } else { + /* No point in a 64-bit byteswap since that would just be + swapping the responses from different chips, and we are + only interested in one chip (a representative sample) */ + return cfi32_to_cpu(map, val.x[0]); + } +} + +static inline void cfi_udelay(int us) +{ + if (us >= 1000) { + msleep((us+999)/1000); + } else { + udelay(us); + cond_resched(); + } +} + +int __xipram cfi_qry_present(struct map_info *map, __u32 base, + struct cfi_private *cfi); +int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, + struct cfi_private *cfi); +void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, + struct cfi_private *cfi); + +struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, + const char* name); +struct cfi_fixup { + uint16_t mfr; + uint16_t id; + void (*fixup)(struct mtd_info *mtd); +}; + +#define CFI_MFR_ANY 0xFFFF +#define CFI_ID_ANY 0xFFFF +#define CFI_MFR_CONTINUATION 0x007F + +#define CFI_MFR_AMD 0x0001 +#define CFI_MFR_AMIC 0x0037 +#define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_EON 0x001C +#define CFI_MFR_FUJITSU 0x0004 +#define CFI_MFR_HYUNDAI 0x00AD +#define CFI_MFR_INTEL 0x0089 +#define CFI_MFR_MACRONIX 0x00C2 +#define CFI_MFR_NEC 0x0010 +#define CFI_MFR_PMC 0x009D +#define CFI_MFR_SAMSUNG 0x00EC +#define CFI_MFR_SHARP 0x00B0 +#define CFI_MFR_SST 0x00BF +#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_TOSHIBA 0x0098 +#define CFI_MFR_WINBOND 0x00DA + +void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); + +typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, + unsigned long adr, int len, void *thunk); + +int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, + loff_t ofs, size_t len, void *thunk); + + +#endif /* __MTD_CFI_H__ */ diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h new file mode 100644 index 00000000..b97a6250 --- /dev/null +++ b/include/linux/mtd/cfi_endian.h @@ -0,0 +1,53 @@ +/* + * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <asm/byteorder.h> + +#define CFI_HOST_ENDIAN 1 +#define CFI_LITTLE_ENDIAN 2 +#define CFI_BIG_ENDIAN 3 + +#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP) +#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN +#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP) +#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN +#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP) +#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN +#else +#error No CFI endianness defined +#endif + +#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN) +#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN) +#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN) +#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN) + +#define cpu_to_cfi8(map, x) (x) +#define cfi8_to_cpu(map, x) (x) +#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x)) +#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x)) +#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x)) +#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x)) +#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x)) +#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x)) + +#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x)) +#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x)) +#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x)) +#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x)) diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h new file mode 100644 index 00000000..ccdbe93a --- /dev/null +++ b/include/linux/mtd/concat.h @@ -0,0 +1,34 @@ +/* + * MTD device concatenation layer definitions + * + * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef MTD_CONCAT_H +#define MTD_CONCAT_H + + +struct mtd_info *mtd_concat_create( + struct mtd_info *subdev[], /* subdevices to concatenate */ + int num_devs, /* number of subdevices */ + const char *name); /* name for the new device */ + +void mtd_concat_destroy(struct mtd_info *mtd); + +#endif + diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h new file mode 100644 index 00000000..0f6fea73 --- /dev/null +++ b/include/linux/mtd/doc2000.h @@ -0,0 +1,206 @@ +/* + * Linux driver for Disk-On-Chip devices + * + * Copyright © 1999 Machine Vision Holdings, Inc. + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> + * Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com> + * Copyright © 2002-2003 SnapGear Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_DOC2000_H__ +#define __MTD_DOC2000_H__ + +#include <linux/mtd/mtd.h> +#include <linux/mutex.h> + +#define DoC_Sig1 0 +#define DoC_Sig2 1 + +#define DoC_ChipID 0x1000 +#define DoC_DOCStatus 0x1001 +#define DoC_DOCControl 0x1002 +#define DoC_FloorSelect 0x1003 +#define DoC_CDSNControl 0x1004 +#define DoC_CDSNDeviceSelect 0x1005 +#define DoC_ECCConf 0x1006 +#define DoC_2k_ECCStatus 0x1007 + +#define DoC_CDSNSlowIO 0x100d +#define DoC_ECCSyndrome0 0x1010 +#define DoC_ECCSyndrome1 0x1011 +#define DoC_ECCSyndrome2 0x1012 +#define DoC_ECCSyndrome3 0x1013 +#define DoC_ECCSyndrome4 0x1014 +#define DoC_ECCSyndrome5 0x1015 +#define DoC_AliasResolution 0x101b +#define DoC_ConfigInput 0x101c +#define DoC_ReadPipeInit 0x101d +#define DoC_WritePipeTerm 0x101e +#define DoC_LastDataRead 0x101f +#define DoC_NOP 0x1020 + +#define DoC_Mil_CDSN_IO 0x0800 +#define DoC_2k_CDSN_IO 0x1800 + +#define DoC_Mplus_NOP 0x1002 +#define DoC_Mplus_AliasResolution 0x1004 +#define DoC_Mplus_DOCControl 0x1006 +#define DoC_Mplus_AccessStatus 0x1008 +#define DoC_Mplus_DeviceSelect 0x1008 +#define DoC_Mplus_Configuration 0x100a +#define DoC_Mplus_OutputControl 0x100c +#define DoC_Mplus_FlashControl 0x1020 +#define DoC_Mplus_FlashSelect 0x1022 +#define DoC_Mplus_FlashCmd 0x1024 +#define DoC_Mplus_FlashAddress 0x1026 +#define DoC_Mplus_FlashData0 0x1028 +#define DoC_Mplus_FlashData1 0x1029 +#define DoC_Mplus_ReadPipeInit 0x102a +#define DoC_Mplus_LastDataRead 0x102c +#define DoC_Mplus_LastDataRead1 0x102d +#define DoC_Mplus_WritePipeTerm 0x102e +#define DoC_Mplus_ECCSyndrome0 0x1040 +#define DoC_Mplus_ECCSyndrome1 0x1041 +#define DoC_Mplus_ECCSyndrome2 0x1042 +#define DoC_Mplus_ECCSyndrome3 0x1043 +#define DoC_Mplus_ECCSyndrome4 0x1044 +#define DoC_Mplus_ECCSyndrome5 0x1045 +#define DoC_Mplus_ECCConf 0x1046 +#define DoC_Mplus_Toggle 0x1046 +#define DoC_Mplus_DownloadStatus 0x1074 +#define DoC_Mplus_CtrlConfirm 0x1076 +#define DoC_Mplus_Power 0x1fff + +/* How to access the device? + * On ARM, it'll be mmap'd directly with 32-bit wide accesses. + * On PPC, it's mmap'd and 16-bit wide. + * Others use readb/writeb + */ +#if defined(__arm__) +#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)))) +#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0) +#define DOC_IOREMAP_LEN 0x8000 +#elif defined(__ppc__) +#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)))) +#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0) +#define DOC_IOREMAP_LEN 0x4000 +#else +#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg)) +#define WriteDOC_(d, adr, reg) writeb(d, (void __iomem *)(adr) + (reg)) +#define DOC_IOREMAP_LEN 0x2000 + +#endif + +#if defined(__i386__) || defined(__x86_64__) +#define USE_MEMCPY +#endif + +/* These are provided to directly use the DoC_xxx defines */ +#define ReadDOC(adr, reg) ReadDOC_(adr,DoC_##reg) +#define WriteDOC(d, adr, reg) WriteDOC_(d,adr,DoC_##reg) + +#define DOC_MODE_RESET 0 +#define DOC_MODE_NORMAL 1 +#define DOC_MODE_RESERVED1 2 +#define DOC_MODE_RESERVED2 3 + +#define DOC_MODE_CLR_ERR 0x80 +#define DOC_MODE_RST_LAT 0x10 +#define DOC_MODE_BDECT 0x08 +#define DOC_MODE_MDWREN 0x04 + +#define DOC_ChipID_Doc2k 0x20 +#define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */ +#define DOC_ChipID_DocMil 0x30 +#define DOC_ChipID_DocMilPlus32 0x40 +#define DOC_ChipID_DocMilPlus16 0x41 + +#define CDSN_CTRL_FR_B 0x80 +#define CDSN_CTRL_FR_B0 0x40 +#define CDSN_CTRL_FR_B1 0x80 + +#define CDSN_CTRL_ECC_IO 0x20 +#define CDSN_CTRL_FLASH_IO 0x10 +#define CDSN_CTRL_WP 0x08 +#define CDSN_CTRL_ALE 0x04 +#define CDSN_CTRL_CLE 0x02 +#define CDSN_CTRL_CE 0x01 + +#define DOC_ECC_RESET 0 +#define DOC_ECC_ERROR 0x80 +#define DOC_ECC_RW 0x20 +#define DOC_ECC__EN 0x08 +#define DOC_TOGGLE_BIT 0x04 +#define DOC_ECC_RESV 0x02 +#define DOC_ECC_IGNORE 0x01 + +#define DOC_FLASH_CE 0x80 +#define DOC_FLASH_WP 0x40 +#define DOC_FLASH_BANK 0x02 + +/* We have to also set the reserved bit 1 for enable */ +#define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV) +#define DOC_ECC_DIS (DOC_ECC_RESV) + +struct Nand { + char floor, chip; + unsigned long curadr; + unsigned char curmode; + /* Also some erase/write/pipeline info when we get that far */ +}; + +#define MAX_FLOORS 4 +#define MAX_CHIPS 4 + +#define MAX_FLOORS_MIL 1 +#define MAX_CHIPS_MIL 1 + +#define MAX_FLOORS_MPLUS 2 +#define MAX_CHIPS_MPLUS 1 + +#define ADDR_COLUMN 1 +#define ADDR_PAGE 2 +#define ADDR_COLUMN_PAGE 3 + +struct DiskOnChip { + unsigned long physadr; + void __iomem *virtadr; + unsigned long totlen; + unsigned char ChipID; /* Type of DiskOnChip */ + int ioreg; + + unsigned long mfr; /* Flash IDs - only one type of flash per device */ + unsigned long id; + int chipshift; + char page256; + char pageadrlen; + char interleave; /* Internal interleaving - Millennium Plus style */ + unsigned long erasesize; + + int curfloor; + int curchip; + + int numchips; + struct Nand *chips; + struct mtd_info *nextdoc; + struct mutex lock; +}; + +int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]); + +#endif /* __MTD_DOC2000_H__ */ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h new file mode 100644 index 00000000..b63fa457 --- /dev/null +++ b/include/linux/mtd/flashchip.h @@ -0,0 +1,112 @@ +/* + * Copyright © 2000 Red Hat UK Limited + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_FLASHCHIP_H__ +#define __MTD_FLASHCHIP_H__ + +/* For spinlocks. sched.h includes spinlock.h from whichever directory it + * happens to be in - so we don't have to care whether we're on 2.2, which + * has asm/spinlock.h, or 2.4, which has linux/spinlock.h + */ +#include <linux/sched.h> +#include <linux/mutex.h> + +typedef enum { + FL_READY, + FL_STATUS, + FL_CFI_QUERY, + FL_JEDEC_QUERY, + FL_ERASING, + FL_ERASE_SUSPENDING, + FL_ERASE_SUSPENDED, + FL_WRITING, + FL_WRITING_TO_BUFFER, + FL_OTP_WRITE, + FL_WRITE_SUSPENDING, + FL_WRITE_SUSPENDED, + FL_PM_SUSPENDED, + FL_SYNCING, + FL_UNLOADING, + FL_LOCKING, + FL_UNLOCKING, + FL_POINT, + FL_XIP_WHILE_ERASING, + FL_XIP_WHILE_WRITING, + FL_SHUTDOWN, + /* These 2 come from nand_state_t, which has been unified here */ + FL_READING, + FL_CACHEDPRG, + /* These 4 come from onenand_state_t, which has been unified here */ + FL_RESETING, + FL_OTPING, + FL_PREPARING_ERASE, + FL_VERIFYING_ERASE, + + FL_UNKNOWN +} flstate_t; + + + +/* NOTE: confusingly, this can be used to refer to more than one chip at a time, + if they're interleaved. This can even refer to individual partitions on + the same physical chip when present. */ + +struct flchip { + unsigned long start; /* Offset within the map */ + // unsigned long len; + /* We omit len for now, because when we group them together + we insist that they're all of the same size, and the chip size + is held in the next level up. If we get more versatile later, + it'll make it a damn sight harder to find which chip we want from + a given offset, and we'll want to add the per-chip length field + back in. + */ + int ref_point_counter; + flstate_t state; + flstate_t oldstate; + + unsigned int write_suspended:1; + unsigned int erase_suspended:1; + unsigned long in_progress_block_addr; + + struct mutex mutex; + wait_queue_head_t wq; /* Wait on here when we're waiting for the chip + to be ready */ + int word_write_time; + int buffer_write_time; + int erase_time; + + int word_write_time_max; + int buffer_write_time_max; + int erase_time_max; + + void *priv; +}; + +/* This is used to handle contention on write/erase operations + between partitions of the same physical chip. */ +struct flchip_shared { + struct mutex lock; + struct flchip *writing; + struct flchip *erasing; +}; + + +#endif /* __MTD_FLASHCHIP_H__ */ diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h new file mode 100644 index 00000000..b2002922 --- /dev/null +++ b/include/linux/mtd/fsmc.h @@ -0,0 +1,176 @@ +/* + * incude/mtd/fsmc.h + * + * ST Microelectronics + * Flexible Static Memory Controller (FSMC) + * platform data interface and header file + * + * Copyright © 2010 ST Microelectronics + * Vipin Kumar <vipin.kumar@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MTD_FSMC_H +#define __MTD_FSMC_H + +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/mtd/physmap.h> +#include <linux/types.h> +#include <linux/mtd/partitions.h> +#include <asm/param.h> + +#define FSMC_NAND_BW8 1 +#define FSMC_NAND_BW16 2 + +#define FSMC_MAX_NOR_BANKS 4 +#define FSMC_MAX_NAND_BANKS 4 + +#define FSMC_FLASH_WIDTH8 1 +#define FSMC_FLASH_WIDTH16 2 + +/* fsmc controller registers for NOR flash */ +#define CTRL 0x0 + /* ctrl register definitions */ + #define BANK_ENABLE (1 << 0) + #define MUXED (1 << 1) + #define NOR_DEV (2 << 2) + #define WIDTH_8 (0 << 4) + #define WIDTH_16 (1 << 4) + #define RSTPWRDWN (1 << 6) + #define WPROT (1 << 7) + #define WRT_ENABLE (1 << 12) + #define WAIT_ENB (1 << 13) + +#define CTRL_TIM 0x4 + /* ctrl_tim register definitions */ + +#define FSMC_NOR_BANK_SZ 0x8 +#define FSMC_NOR_REG_SIZE 0x40 + +#define FSMC_NOR_REG(base, bank, reg) (base + \ + FSMC_NOR_BANK_SZ * (bank) + \ + reg) + +/* fsmc controller registers for NAND flash */ +#define PC 0x00 + /* pc register definitions */ + #define FSMC_RESET (1 << 0) + #define FSMC_WAITON (1 << 1) + #define FSMC_ENABLE (1 << 2) + #define FSMC_DEVTYPE_NAND (1 << 3) + #define FSMC_DEVWID_8 (0 << 4) + #define FSMC_DEVWID_16 (1 << 4) + #define FSMC_ECCEN (1 << 6) + #define FSMC_ECCPLEN_512 (0 << 7) + #define FSMC_ECCPLEN_256 (1 << 7) + #define FSMC_TCLR_1 (1) + #define FSMC_TCLR_SHIFT (9) + #define FSMC_TCLR_MASK (0xF) + #define FSMC_TAR_1 (1) + #define FSMC_TAR_SHIFT (13) + #define FSMC_TAR_MASK (0xF) +#define STS 0x04 + /* sts register definitions */ + #define FSMC_CODE_RDY (1 << 15) +#define COMM 0x08 + /* comm register definitions */ + #define FSMC_TSET_0 0 + #define FSMC_TSET_SHIFT 0 + #define FSMC_TSET_MASK 0xFF + #define FSMC_TWAIT_6 6 + #define FSMC_TWAIT_SHIFT 8 + #define FSMC_TWAIT_MASK 0xFF + #define FSMC_THOLD_4 4 + #define FSMC_THOLD_SHIFT 16 + #define FSMC_THOLD_MASK 0xFF + #define FSMC_THIZ_1 1 + #define FSMC_THIZ_SHIFT 24 + #define FSMC_THIZ_MASK 0xFF +#define ATTRIB 0x0C +#define IOATA 0x10 +#define ECC1 0x14 +#define ECC2 0x18 +#define ECC3 0x1C +#define FSMC_NAND_BANK_SZ 0x20 + +#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \ + (FSMC_NAND_BANK_SZ * (bank)) + \ + reg) + +#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) + +/* + * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 + * and it has to be read consecutively and immediately after the 512 + * byte data block for hardware to generate the error bit offsets + * Managing the ecc bytes in the following way is easier. This way is + * similar to oobfree structure maintained already in u-boot nand driver + */ +#define MAX_ECCPLACE_ENTRIES 32 + +struct fsmc_nand_eccplace { + uint8_t offset; + uint8_t length; +}; + +struct fsmc_eccplace { + struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; +}; + +struct fsmc_nand_timings { + uint8_t tclr; + uint8_t tar; + uint8_t thiz; + uint8_t thold; + uint8_t twait; + uint8_t tset; +}; + +enum access_mode { + USE_DMA_ACCESS = 1, + USE_WORD_ACCESS, +}; + +/** + * fsmc_nand_platform_data - platform specific NAND controller config + * @partitions: partition table for the platform, use a default fallback + * if this is NULL + * @nr_partitions: the number of partitions in the previous entry + * @options: different options for the driver + * @width: bus width + * @bank: default bank + * @select_bank: callback to select a certain bank, this is + * platform-specific. If the controller only supports one bank + * this may be set to NULL + */ +struct fsmc_nand_platform_data { + struct fsmc_nand_timings *nand_timings; + struct mtd_partition *partitions; + unsigned int nr_partitions; + unsigned int options; + unsigned int width; + unsigned int bank; + + /* CLE, ALE offsets */ + unsigned int cle_off; + unsigned int ale_off; + enum access_mode mode; + + void (*select_bank)(uint32_t bank, uint32_t busw); + + /* priv structures for dma accesses */ + void *read_dma_priv; + void *write_dma_priv; +}; + +extern int __init fsmc_nor_init(struct platform_device *pdev, + unsigned long base, uint32_t bank, uint32_t width); +extern void __init fsmc_init_board_info(struct platform_device *pdev, + struct mtd_partition *partitions, unsigned int nr_partitions, + unsigned int width); + +#endif /* __MTD_FSMC_H */ diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h new file mode 100644 index 00000000..0555f7a0 --- /dev/null +++ b/include/linux/mtd/ftl.h @@ -0,0 +1,74 @@ +/* + * Derived from (and probably identical to): + * ftl.h 1.7 1999/10/25 20:23:17 + * + * The contents of this file are subject to the Mozilla Public License + * Version 1.1 (the "License"); you may not use this file except in + * compliance with the License. You may obtain a copy of the License + * at http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and + * limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + * Alternatively, the contents of this file may be used under the + * terms of the GNU General Public License version 2 (the "GPL"), in + * which case the provisions of the GPL are applicable instead of the + * above. If you wish to allow the use of your version of this file + * only under the terms of the GPL and not to allow others to use + * your version of this file under the MPL, indicate your decision by + * deleting the provisions above and replace them with the notice and + * other provisions required by the GPL. If you do not delete the + * provisions above, a recipient may use your version of this file + * under either the MPL or the GPL. + */ + +#ifndef _LINUX_FTL_H +#define _LINUX_FTL_H + +typedef struct erase_unit_header_t { + uint8_t LinkTargetTuple[5]; + uint8_t DataOrgTuple[10]; + uint8_t NumTransferUnits; + uint32_t EraseCount; + uint16_t LogicalEUN; + uint8_t BlockSize; + uint8_t EraseUnitSize; + uint16_t FirstPhysicalEUN; + uint16_t NumEraseUnits; + uint32_t FormattedSize; + uint32_t FirstVMAddress; + uint16_t NumVMPages; + uint8_t Flags; + uint8_t Code; + uint32_t SerialNumber; + uint32_t AltEUHOffset; + uint32_t BAMOffset; + uint8_t Reserved[12]; + uint8_t EndTuple[2]; +} erase_unit_header_t; + +/* Flags in erase_unit_header_t */ +#define HIDDEN_AREA 0x01 +#define REVERSE_POLARITY 0x02 +#define DOUBLE_BAI 0x04 + +/* Definitions for block allocation information */ + +#define BLOCK_FREE(b) ((b) == 0xffffffff) +#define BLOCK_DELETED(b) (((b) == 0) || ((b) == 0xfffffffe)) + +#define BLOCK_TYPE(b) ((b) & 0x7f) +#define BLOCK_ADDRESS(b) ((b) & ~0x7f) +#define BLOCK_NUMBER(b) ((b) >> 9) +#define BLOCK_CONTROL 0x30 +#define BLOCK_DATA 0x40 +#define BLOCK_REPLACEMENT 0x60 +#define BLOCK_BAD 0x70 + +#endif /* _LINUX_FTL_H */ diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h new file mode 100644 index 00000000..2c456054 --- /dev/null +++ b/include/linux/mtd/gen_probe.h @@ -0,0 +1,37 @@ +/* + * Copyright © 2001 Red Hat UK Limited + * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_MTD_GEN_PROBE_H__ +#define __LINUX_MTD_GEN_PROBE_H__ + +#include <linux/mtd/flashchip.h> +#include <linux/mtd/map.h> +#include <linux/mtd/cfi.h> +#include <linux/bitops.h> + +struct chip_probe { + char *name; + int (*probe_chip)(struct map_info *map, __u32 base, + unsigned long *chip_map, struct cfi_private *cfi); +}; + +struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp); + +#endif /* __LINUX_MTD_GEN_PROBE_H__ */ diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h new file mode 100644 index 00000000..69b6dbf4 --- /dev/null +++ b/include/linux/mtd/gpmi-nand.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef __MACH_MXS_GPMI_NAND_H__ +#define __MACH_MXS_GPMI_NAND_H__ + +/* The size of the resources is fixed. */ +#define GPMI_NAND_RES_SIZE 6 + +/* Resource names for the GPMI NAND driver. */ +#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "GPMI NAND GPMI Registers" +#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt" +#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "GPMI NAND BCH Registers" +#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "GPMI NAND BCH Interrupt" +#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels" +#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "GPMI NAND DMA Interrupt" + +/** + * struct gpmi_nand_platform_data - GPMI NAND driver platform data. + * + * This structure communicates platform-specific information to the GPMI NAND + * driver that can't be expressed as resources. + * + * @platform_init: A pointer to a function the driver will call to + * initialize the platform (e.g., set up the pin mux). + * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and + * from the NAND Flash device, in nanoseconds. + * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and + * from the NAND Flash device, in nanoseconds. + * @max_chip_count: The maximum number of chips for which the driver + * should configure the hardware. This value most + * likely reflects the number of pins that are + * connected to a NAND Flash device. If this is + * greater than the SoC hardware can support, the + * driver will print a message and fail to initialize. + * @partitions: An optional pointer to an array of partition + * descriptions. + * @partition_count: The number of elements in the partitions array. + */ +struct gpmi_nand_platform_data { + /* SoC hardware information. */ + int (*platform_init)(void); + + /* NAND Flash information. */ + unsigned int min_prop_delay_in_ns; + unsigned int max_prop_delay_in_ns; + unsigned int max_chip_count; + + /* Medium information. */ + struct mtd_partition *partitions; + unsigned partition_count; +}; +#endif diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h new file mode 100644 index 00000000..02cd5f9b --- /dev/null +++ b/include/linux/mtd/inftl.h @@ -0,0 +1,63 @@ +/* + * inftl.h -- defines to support the Inverse NAND Flash Translation Layer + * + * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) + */ + +#ifndef __MTD_INFTL_H__ +#define __MTD_INFTL_H__ + +#ifndef __KERNEL__ +#error This is a kernel header. Perhaps include nftl-user.h instead? +#endif + +#include <linux/mtd/blktrans.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nftl.h> + +#include <mtd/inftl-user.h> + +#ifndef INFTL_MAJOR +#define INFTL_MAJOR 96 +#endif +#define INFTL_PARTN_BITS 4 + +#ifdef __KERNEL__ + +struct INFTLrecord { + struct mtd_blktrans_dev mbd; + __u16 MediaUnit; + __u32 EraseSize; + struct INFTLMediaHeader MediaHdr; + int usecount; + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + __u16 numvunits; + __u16 firstEUN; + __u16 lastEUN; + __u16 numfreeEUNs; + __u16 LastFreeEUN; /* To speed up finding a free EUN */ + int head,sect,cyl; + __u16 *PUtable; /* Physical Unit Table */ + __u16 *VUtable; /* Virtual Unit Table */ + unsigned int nb_blocks; /* number of physical blocks */ + unsigned int nb_boot_blocks; /* number of blocks used by the bios */ + struct erase_info instr; + struct nand_ecclayout oobinfo; +}; + +int INFTL_mount(struct INFTLrecord *s); +int INFTL_formatblock(struct INFTLrecord *s, int block); + +void INFTL_dumptables(struct INFTLrecord *s); +void INFTL_dumpVUchains(struct INFTLrecord *s); + +int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); +int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); + +#endif /* __KERNEL__ */ + +#endif /* __MTD_INFTL_H__ */ diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h new file mode 100644 index 00000000..e94b8e12 --- /dev/null +++ b/include/linux/mtd/latch-addr-flash.h @@ -0,0 +1,29 @@ +/* + * Interface for NOR flash driver whose high address lines are latched + * + * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ +#ifndef __LATCH_ADDR_FLASH__ +#define __LATCH_ADDR_FLASH__ + +struct map_info; +struct mtd_partition; + +struct latch_addr_flash_data { + unsigned int width; + unsigned int size; + + int (*init)(void *data, int cs); + void (*done)(void *data); + void (*set_window)(unsigned long offset, void *data); + void *data; + + unsigned int nr_parts; + struct mtd_partition *parts; +}; + +#endif diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h new file mode 100644 index 00000000..3595a023 --- /dev/null +++ b/include/linux/mtd/map.h @@ -0,0 +1,465 @@ +/* + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* Overhauled routines for dealing with different mmap regions of flash */ + +#ifndef __LINUX_MTD_MAP_H__ +#define __LINUX_MTD_MAP_H__ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/string.h> +#include <linux/bug.h> +#include <linux/kernel.h> + +#include <asm/unaligned.h> +#include <asm/io.h> +#include <asm/barrier.h> + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 +#define map_bankwidth(map) 1 +#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1) +#define map_bankwidth_is_large(map) (0) +#define map_words(map) (1) +#define MAX_MAP_BANKWIDTH 1 +#else +#define map_bankwidth_is_1(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# else +# define map_bankwidth(map) 2 +# define map_bankwidth_is_large(map) (0) +# define map_words(map) (1) +# endif +#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 2 +#else +#define map_bankwidth_is_2(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# else +# define map_bankwidth(map) 4 +# define map_bankwidth_is_large(map) (0) +# define map_words(map) (1) +# endif +#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 4 +#else +#define map_bankwidth_is_4(map) (0) +#endif + +/* ensure we never evaluate anything shorted than an unsigned long + * to zero, and ensure we'll never miss the end of an comparison (bjd) */ + +#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1))/ sizeof(unsigned long)) + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# if BITS_PER_LONG < 64 +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) +# endif +# else +# define map_bankwidth(map) 8 +# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64) +# define map_words(map) map_calc_words(map) +# endif +#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 8 +#else +#define map_bankwidth_is_8(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) +# else +# define map_bankwidth(map) 16 +# define map_bankwidth_is_large(map) (1) +# define map_words(map) map_calc_words(map) +# endif +#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 16 +#else +#define map_bankwidth_is_16(map) (0) +#endif + +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 +# ifdef map_bankwidth +# undef map_bankwidth +# define map_bankwidth(map) ((map)->bankwidth) +# undef map_bankwidth_is_large +# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) +# undef map_words +# define map_words(map) map_calc_words(map) +# else +# define map_bankwidth(map) 32 +# define map_bankwidth_is_large(map) (1) +# define map_words(map) map_calc_words(map) +# endif +#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) +#undef MAX_MAP_BANKWIDTH +#define MAX_MAP_BANKWIDTH 32 +#else +#define map_bankwidth_is_32(map) (0) +#endif + +#ifndef map_bankwidth +#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" +static inline int map_bankwidth(void *map) +{ + BUG(); + return 0; +} +#define map_bankwidth_is_large(map) (0) +#define map_words(map) (0) +#define MAX_MAP_BANKWIDTH 1 +#endif + +static inline int map_bankwidth_supported(int w) +{ + switch (w) { +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 + case 1: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 + case 2: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 + case 4: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 + case 8: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 + case 16: +#endif +#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 + case 32: +#endif + return 1; + + default: + return 0; + } +} + +#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG ) + +typedef union { + unsigned long x[MAX_MAP_LONGS]; +} map_word; + +/* The map stuff is very simple. You fill in your struct map_info with + a handful of routines for accessing the device, making sure they handle + paging etc. correctly if your device needs it. Then you pass it off + to a chip probe routine -- either JEDEC or CFI probe or both -- via + do_map_probe(). If a chip is recognised, the probe code will invoke the + appropriate chip driver (if present) and return a struct mtd_info. + At which point, you fill in the mtd->module with your own module + address, and register it with the MTD core code. Or you could partition + it and register the partitions instead, or keep it for your own private + use; whatever. + + The mtd->priv field will point to the struct map_info, and any further + private data required by the chip driver is linked from the + mtd->priv->fldrv_priv field. This allows the map driver to get at + the destructor function map->fldrv_destroy() when it's tired + of living. +*/ + +struct map_info { + const char *name; + unsigned long size; + resource_size_t phys; +#define NO_XIP (-1UL) + + void __iomem *virt; + void *cached; + + int swap; /* this mapping's byte-swapping requirement */ + int bankwidth; /* in octets. This isn't necessarily the width + of actual bus cycles -- it's the repeat interval + in bytes, before you are talking to the first chip again. + */ + +#ifdef CONFIG_MTD_COMPLEX_MAPPINGS + map_word (*read)(struct map_info *, unsigned long); + void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t); + + void (*write)(struct map_info *, const map_word, unsigned long); + void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t); + + /* We can perhaps put in 'point' and 'unpoint' methods, if we really + want to enable XIP for non-linear mappings. Not yet though. */ +#endif + /* It's possible for the map driver to use cached memory in its + copy_from implementation (and _only_ with copy_from). However, + when the chip driver knows some flash area has changed contents, + it will signal it to the map driver through this routine to let + the map driver invalidate the corresponding cache as needed. + If there is no cache to care about this can be set to NULL. */ + void (*inval_cache)(struct map_info *, unsigned long, ssize_t); + + /* set_vpp() must handle being reentered -- enable, enable, disable + must leave it enabled. */ + void (*set_vpp)(struct map_info *, int); + + unsigned long pfow_base; + unsigned long map_priv_1; + unsigned long map_priv_2; + void *fldrv_priv; + struct mtd_chip_driver *fldrv; +}; + +struct mtd_chip_driver { + struct mtd_info *(*probe)(struct map_info *map); + void (*destroy)(struct mtd_info *); + struct module *module; + char *name; + struct list_head list; +}; + +void register_mtd_chip_driver(struct mtd_chip_driver *); +void unregister_mtd_chip_driver(struct mtd_chip_driver *); + +struct mtd_info *do_map_probe(const char *name, struct map_info *map); +void map_destroy(struct mtd_info *mtd); + +#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0) +#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0) + +#define INVALIDATE_CACHED_RANGE(map, from, size) \ + do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0) + + +static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) +{ + int i; + for (i=0; i<map_words(map); i++) { + if (val1.x[i] != val2.x[i]) + return 0; + } + return 1; +} + +static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) +{ + map_word r; + int i; + + for (i=0; i<map_words(map); i++) { + r.x[i] = val1.x[i] & val2.x[i]; + } + return r; +} + +static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) +{ + map_word r; + int i; + + for (i=0; i<map_words(map); i++) { + r.x[i] = val1.x[i] & ~val2.x[i]; + } + return r; +} + +static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) +{ + map_word r; + int i; + + for (i=0; i<map_words(map); i++) { + r.x[i] = val1.x[i] | val2.x[i]; + } + return r; +} + +#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b)) + +static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) +{ + int i; + + for (i=0; i<map_words(map); i++) { + if (val1.x[i] & val2.x[i]) + return 1; + } + return 0; +} + +static inline map_word map_word_load(struct map_info *map, const void *ptr) +{ + map_word r; + + if (map_bankwidth_is_1(map)) + r.x[0] = *(unsigned char *)ptr; + else if (map_bankwidth_is_2(map)) + r.x[0] = get_unaligned((uint16_t *)ptr); + else if (map_bankwidth_is_4(map)) + r.x[0] = get_unaligned((uint32_t *)ptr); +#if BITS_PER_LONG >= 64 + else if (map_bankwidth_is_8(map)) + r.x[0] = get_unaligned((uint64_t *)ptr); +#endif + else if (map_bankwidth_is_large(map)) + memcpy(r.x, ptr, map->bankwidth); + + return r; +} + +static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) +{ + int i; + + if (map_bankwidth_is_large(map)) { + char *dest = (char *)&orig; + memcpy(dest+start, buf, len); + } else { + for (i=start; i < start+len; i++) { + int bitpos; +#ifdef __LITTLE_ENDIAN + bitpos = i*8; +#else /* __BIG_ENDIAN */ + bitpos = (map_bankwidth(map)-1-i)*8; +#endif + orig.x[0] &= ~(0xff << bitpos); + orig.x[0] |= buf[i-start] << bitpos; + } + } + return orig; +} + +#if BITS_PER_LONG < 64 +#define MAP_FF_LIMIT 4 +#else +#define MAP_FF_LIMIT 8 +#endif + +static inline map_word map_word_ff(struct map_info *map) +{ + map_word r; + int i; + + if (map_bankwidth(map) < MAP_FF_LIMIT) { + int bw = 8 * map_bankwidth(map); + r.x[0] = (1 << bw) - 1; + } else { + for (i=0; i<map_words(map); i++) + r.x[i] = ~0UL; + } + return r; +} + +static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) +{ + map_word r; + + if (map_bankwidth_is_1(map)) + r.x[0] = __raw_readb(map->virt + ofs); + else if (map_bankwidth_is_2(map)) + r.x[0] = __raw_readw(map->virt + ofs); + else if (map_bankwidth_is_4(map)) + r.x[0] = __raw_readl(map->virt + ofs); +#if BITS_PER_LONG >= 64 + else if (map_bankwidth_is_8(map)) + r.x[0] = __raw_readq(map->virt + ofs); +#endif + else if (map_bankwidth_is_large(map)) + memcpy_fromio(r.x, map->virt+ofs, map->bankwidth); + else + BUG(); + + return r; +} + +static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs) +{ + if (map_bankwidth_is_1(map)) + __raw_writeb(datum.x[0], map->virt + ofs); + else if (map_bankwidth_is_2(map)) + __raw_writew(datum.x[0], map->virt + ofs); + else if (map_bankwidth_is_4(map)) + __raw_writel(datum.x[0], map->virt + ofs); +#if BITS_PER_LONG >= 64 + else if (map_bankwidth_is_8(map)) + __raw_writeq(datum.x[0], map->virt + ofs); +#endif + else if (map_bankwidth_is_large(map)) + memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); + mb(); +} + +static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) +{ + if (map->cached) + memcpy(to, (char *)map->cached + from, len); + else + memcpy_fromio(to, map->virt + from, len); +} + +static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) +{ + memcpy_toio(map->virt + to, from, len); +} + +#ifdef CONFIG_MTD_COMPLEX_MAPPINGS +#define map_read(map, ofs) (map)->read(map, ofs) +#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) +#define map_write(map, datum, ofs) (map)->write(map, datum, ofs) +#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) + +extern void simple_map_init(struct map_info *); +#define map_is_linear(map) (map->phys != NO_XIP) + +#else +#define map_read(map, ofs) inline_map_read(map, ofs) +#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) +#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs) +#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) + + +#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth)) +#define map_is_linear(map) ({ (void)(map); 1; }) + +#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */ + +#endif /* __LINUX_MTD_MAP_H__ */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h new file mode 100644 index 00000000..80affde5 --- /dev/null +++ b/include/linux/mtd/mtd.h @@ -0,0 +1,429 @@ +/* + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_MTD_H__ +#define __MTD_MTD_H__ + +#include <linux/types.h> +#include <linux/uio.h> +#include <linux/notifier.h> +#include <linux/device.h> + +#include <mtd/mtd-abi.h> + +#include <asm/div64.h> + +#define MTD_CHAR_MAJOR 90 +#define MTD_BLOCK_MAJOR 31 +#define MAX_MTD_DEVICES 32 + +#define MTD_ERASE_PENDING 0x01 +#define MTD_ERASING 0x02 +#define MTD_ERASE_SUSPEND 0x04 +#define MTD_ERASE_DONE 0x08 +#define MTD_ERASE_FAILED 0x10 + +#define MTD_FAIL_ADDR_UNKNOWN -1LL + +/* + * If the erase fails, fail_addr might indicate exactly which block failed. If + * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level + * or was not specific to any particular block. + */ +struct erase_info { + struct mtd_info *mtd; + uint64_t addr; + uint64_t len; + uint64_t fail_addr; + u_long time; + u_long retries; + unsigned dev; + unsigned cell; + void (*callback) (struct erase_info *self); + u_long priv; + u_char state; + struct erase_info *next; +}; + +struct mtd_erase_region_info { + uint64_t offset; /* At which this region starts, from the beginning of the MTD */ + uint32_t erasesize; /* For this region */ + uint32_t numblocks; /* Number of blocks of erasesize in this region */ + unsigned long *lockmap; /* If keeping bitmap of locks */ +}; + +/** + * struct mtd_oob_ops - oob operation operands + * @mode: operation mode + * + * @len: number of data bytes to write/read + * + * @retlen: number of data bytes written/read + * + * @ooblen: number of oob bytes to write/read + * @oobretlen: number of oob bytes written/read + * @ooboffs: offset of oob data in the oob area (only relevant when + * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW) + * @datbuf: data buffer - if NULL only oob data are read/written + * @oobbuf: oob data buffer + * + * Note, it is allowed to read more than one OOB area at one go, but not write. + * The interface assumes that the OOB write requests program only one page's + * OOB area. + */ +struct mtd_oob_ops { + unsigned int mode; + size_t len; + size_t retlen; + size_t ooblen; + size_t oobretlen; + uint32_t ooboffs; + uint8_t *datbuf; + uint8_t *oobbuf; +}; + +#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 +#define MTD_MAX_ECCPOS_ENTRIES_LARGE 448 +/* + * Internal ECC layout control structure. For historical reasons, there is a + * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained + * for export to user-space via the ECCGETLAYOUT ioctl. + * nand_ecclayout should be expandable in the future simply by the above macros. + */ +struct nand_ecclayout { + __u32 eccbytes; + __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; + __u32 oobavail; + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; +}; + +struct module; /* only needed for owner field in mtd_info */ + +struct mtd_info { + u_char type; + uint32_t flags; + uint64_t size; // Total size of the MTD + + /* "Major" erase size for the device. Naïve users may take this + * to be the only erase size available, or may use the more detailed + * information below if they desire + */ + uint32_t erasesize; + /* Minimal writable flash unit size. In case of NOR flash it is 1 (even + * though individual bits can be cleared), in case of NAND flash it is + * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR + * it is of ECC block size, etc. It is illegal to have writesize = 0. + * Any driver registering a struct mtd_info must ensure a writesize of + * 1 or larger. + */ + uint32_t writesize; + + /* + * Size of the write buffer used by the MTD. MTD devices having a write + * buffer can write multiple writesize chunks at a time. E.g. while + * writing 4 * writesize bytes to a device with 2 * writesize bytes + * buffer the MTD driver can (but doesn't have to) do 2 writesize + * operations, but not 4. Currently, all NANDs have writebufsize + * equivalent to writesize (NAND page size). Some NOR flashes do have + * writebufsize greater than writesize. + */ + uint32_t writebufsize; + + uint32_t oobsize; // Amount of OOB data per block (e.g. 16) + uint32_t oobavail; // Available OOB bytes per block + + uint32_t realerasesize; //dan_multi add for multi plane access + uint32_t realwritesize; + uint32_t realoobsize; + int planenum; + + /* + * If erasesize is a power of 2 then the shift is stored in + * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. + */ + unsigned int erasesize_shift; + unsigned int writesize_shift; + /* Masks based on erasesize_shift and writesize_shift */ + unsigned int erasesize_mask; + unsigned int writesize_mask; + + // Kernel-only stuff starts here. + const char *name; + int index; + + /* ECC layout structure pointer - read only! */ + struct nand_ecclayout *ecclayout; + + /* max number of correctible bit errors per writesize */ + unsigned int ecc_strength; + + /* Data for variable erase regions. If numeraseregions is zero, + * it means that the whole device has erasesize as given above. + */ + int numeraseregions; + struct mtd_erase_region_info *eraseregions; + + /* + * Do not call via these pointers, use corresponding mtd_*() + * wrappers instead. + */ + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); + int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys); + int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); + unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags); + int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); + int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*_read_oob) (struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); + int (*read_bbinfo_facmk) (struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); //Vincent 20090526 + int (*_write_oob) (struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); + int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, + size_t len); + int (*get_para) (struct mtd_info *mtd, int chip); + int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, + size_t len); + int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, + size_t len, size_t *retlen, u_char *buf); + int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len); + int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); + void (*_sync) (struct mtd_info *mtd); + int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); + int (*_suspend) (struct mtd_info *mtd); + void (*_resume) (struct mtd_info *mtd); + /* + * If the driver is something smart, like UBI, it may need to maintain + * its own reference counting. The below functions are only for driver. + */ + int (*_get_device) (struct mtd_info *mtd); + void (*_put_device) (struct mtd_info *mtd); + + /* Backing device capabilities for this device + * - provides mmap capabilities + */ + struct backing_dev_info *backing_dev_info; + + struct notifier_block reboot_notifier; /* default mode before reboot */ + + /* ECC status information */ + struct mtd_ecc_stats ecc_stats; + /* Subpage shift (NAND) */ + int subpage_sft; + + void *priv; + + struct module *owner; + struct device dev; + int usecount; + int blkcnt; + int pagecnt; + int dwECCBitNum; + int dwRetry; + int dwRdmz; + int dwDDR; + int id; + int id2; + unsigned int spec_clk; + int spec_tadl; + int bbt_sw_rdmz; + uint32_t pageSizek; + int ecc_err_cnt; + int ecc_err_level; +}; + +int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); +int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + void **virt, resource_size_t *phys); +int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); +unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, + unsigned long offset, unsigned long flags); +int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + u_char *buf); +int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf); +int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf); + +static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + ops->retlen = ops->oobretlen = 0; + if (!mtd->_read_oob) + return -EOPNOTSUPP; + return mtd->_read_oob(mtd, from, ops); +} + +static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + ops->retlen = ops->oobretlen = 0; + if (!mtd->_write_oob) + return -EOPNOTSUPP; + if (!(mtd->flags & MTD_WRITEABLE)) + return -EROFS; + return mtd->_write_oob(mtd, to, ops); +} + +int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, + size_t len); +int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); +int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, + size_t len); +int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); +int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, u_char *buf); +int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); + +int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); + +static inline void mtd_sync(struct mtd_info *mtd) +{ + if (mtd->_sync) + mtd->_sync(mtd); +} + +int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); +int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); + +static inline int mtd_suspend(struct mtd_info *mtd) +{ + return mtd->_suspend ? mtd->_suspend(mtd) : 0; +} + +static inline void mtd_resume(struct mtd_info *mtd) +{ + if (mtd->_resume) + mtd->_resume(mtd); +} + +static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz >> mtd->erasesize_shift; + do_div(sz, mtd->erasesize); + return sz; +} + +static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz & mtd->erasesize_mask; + return do_div(sz, mtd->erasesize); +} + +static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz >> mtd->writesize_shift; + do_div(sz, mtd->writesize); + return sz; +} + +static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz & mtd->writesize_mask; + return do_div(sz, mtd->writesize); +} + +static inline int mtd_has_oob(const struct mtd_info *mtd) +{ + return mtd->_read_oob && mtd->_write_oob; +} + +static inline int mtd_can_have_bb(const struct mtd_info *mtd) +{ + return !!mtd->_block_isbad; +} + + /* Kernel-side ioctl definitions */ + +struct mtd_partition; +struct mtd_part_parser_data; +extern int add_mtd_device(struct mtd_info *mtd); +extern int del_mtd_device (struct mtd_info *mtd); + +extern int mtd_device_parse_register(struct mtd_info *mtd, + const char **part_probe_types, + struct mtd_part_parser_data *parser_data, + const struct mtd_partition *defparts, + int defnr_parts); +#define mtd_device_register(master, parts, nr_parts) \ + mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) +extern int mtd_device_unregister(struct mtd_info *master); +extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); +extern int __get_mtd_device(struct mtd_info *mtd); +extern void __put_mtd_device(struct mtd_info *mtd); +extern struct mtd_info *get_mtd_device_nm(const char *name); +extern void put_mtd_device(struct mtd_info *mtd); +typedef int (*SF_FPTR)(int l); +extern SF_FPTR wmt_sf_prot; + + +struct mtd_notifier { + void (*add)(struct mtd_info *mtd); + void (*remove)(struct mtd_info *mtd); + struct list_head list; +}; + + +extern void register_mtd_user (struct mtd_notifier *new); +extern int unregister_mtd_user (struct mtd_notifier *old); +void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); + +void mtd_erase_callback(struct erase_info *instr); + +static inline int mtd_is_bitflip(int err) { + return err == -EUCLEAN; +} + +static inline int mtd_is_eccerr(int err) { + return err == -EBADMSG; +} + +static inline int mtd_is_bitflip_or_eccerr(int err) { + return mtd_is_bitflip(err) || mtd_is_eccerr(err); +} + +#endif /* __MTD_MTD_H__ */ diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h new file mode 100644 index 00000000..68891313 --- /dev/null +++ b/include/linux/mtd/mtdram.h @@ -0,0 +1,8 @@ +#ifndef __MTD_MTDRAM_H__ +#define __MTD_MTDRAM_H__ + +#include <linux/mtd/mtd.h> +int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, + unsigned long size, char *name); + +#endif /* __MTD_MTDRAM_H__ */ diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h new file mode 100644 index 00000000..51534e50 --- /dev/null +++ b/include/linux/mtd/nand-gpio.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_MTD_NAND_GPIO_H +#define __LINUX_MTD_NAND_GPIO_H + +#include <linux/mtd/nand.h> + +struct gpio_nand_platdata { + int gpio_nce; + int gpio_nwp; + int gpio_cle; + int gpio_ale; + int gpio_rdy; + void (*adjust_parts)(struct gpio_nand_platdata *, size_t); + struct mtd_partition *parts; + unsigned int num_parts; + unsigned int options; + int chip_delay; +}; + +#endif diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h new file mode 100644 index 00000000..19eb23c1 --- /dev/null +++ b/include/linux/mtd/nand.h @@ -0,0 +1,828 @@ +/* + * linux/include/linux/mtd/nand.h + * + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> + * Steven J. Hill <sjhill@realitydiluted.com> + * Thomas Gleixner <tglx@linutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Info: + * Contains standard defines and IDs for NAND flash devices + * + * Changelog: + * See git changelog. + */ +#ifndef __LINUX_MTD_NAND_H +#define __LINUX_MTD_NAND_H + +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/flashchip.h> +#include <linux/mtd/bbm.h> + +struct mtd_info; +struct nand_flash_dev; +/* Scan and identify a NAND device */ +extern int nand_scan(struct mtd_info *mtd, int max_chips); +/* + * Separate phases of nand_scan(), allowing board driver to intervene + * and override command or ECC setup according to flash type. + */ +extern int nand_scan_ident(struct mtd_info *mtd, int max_chips, + struct nand_flash_dev *table); +extern int nand_scan_tail(struct mtd_info *mtd); + +/* Free resources held by the NAND device */ +extern void nand_release(struct mtd_info *mtd); + +/* Internal helper for board drivers which need to override command function */ +extern void nand_wait_ready(struct mtd_info *mtd); + +/* locks all blocks present in the device */ +extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); + +/* unlocks specified locked blocks */ +extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); + +/* The maximum number of NAND chips in an array */ +#define NAND_MAX_CHIPS 8 + +/* + * This constant declares the max. oobsize / page, which + * is supported now. If you add a chip with bigger oobsize/page + * adjust this accordingly. + */ +#define NAND_MAX_OOBSIZE 2560 +#define NAND_MAX_PAGESIZE 32768 + +/* + * Constants for hardware specific CLE/ALE/NCE function + * + * These are bits which can be or'ed to set/clear multiple + * bits in one go. + */ +/* Select the chip by setting nCE to low */ +#define NAND_NCE 0x01 +/* Select the command latch by setting CLE to high */ +#define NAND_CLE 0x02 +/* Select the address latch by setting ALE to high */ +#define NAND_ALE 0x04 + +#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE) +#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE) +#define NAND_CTRL_CHANGE 0x80 + +/* + * Standard NAND flash commands + */ +#define NAND_CMD_READ0 0 +#define NAND_CMD_READ1 1 +#define NAND_CMD_RNDOUT 5 +#define NAND_CMD_PAGEPROG 0x10 +#define NAND_CMD_READOOB 0x50 +#define NAND_CMD_ERASE1 0x60 +#define NAND_CMD_STATUS 0x70 +#define NAND_CMD_STATUS_MULTI 0x71 +#define NAND_CMD_SEQIN 0x80 +#define NAND_CMD_RNDIN 0x85 +#define NAND_CMD_READID 0x90 +#define NAND_CMD_ERASE2 0xd0 +#define NAND_CMD_ERASE3 0xd1 +#define NAND_CMD_PARAM 0xec +#define NAND_CMD_RESET 0xff +#define NAND_CMD_RESET_NO_STATUS_READ 0xFF1 +#define NAND_CMD_HYNIX_RETRY_END 0x38 + +#define NAND_CMD_LOCK 0x2a +#define NAND_CMD_UNLOCK1 0x23 +#define NAND_CMD_UNLOCK2 0x24 + +/* Extended commands for large page devices */ +#define NAND_CMD_READSTART 0x30 +#define NAND_CMD_RNDOUTSTART 0xE0 +#define NAND_CMD_CACHEDPROG 0x15 + +/* Extended commands for AG-AND device */ +/* + * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but + * there is no way to distinguish that from NAND_CMD_READ0 + * until the remaining sequence of commands has been completed + * so add a high order bit and mask it off in the command. + */ +#define NAND_CMD_DEPLETE1 0x100 +#define NAND_CMD_DEPLETE2 0x38 +#define NAND_CMD_STATUS_MULTI 0x71 +#define NAND_CMD_STATUS_ERROR 0x72 +/* multi-bank error status (banks 0-3) */ +#define NAND_CMD_STATUS_ERROR0 0x73 +#define NAND_CMD_STATUS_ERROR1 0x74 +#define NAND_CMD_STATUS_ERROR2 0x75 +#define NAND_CMD_STATUS_ERROR3 0x76 +#define NAND_CMD_STATUS_RESET 0x7f +#define NAND_CMD_STATUS_CLEAR 0xff + +#define MULTI_READ_1CYCLE 0x200 +#define MULTI_READ_2CYCLE 0x201 +#define MULTI_COPY_1CYCLE 0x202 +#define MULTI_COPY_2CYCLE 0x203 +#define MULTI_COPY_3CYCLE 0x204 +#define COPY_BACK_1CYCLE 0x205 +#define COPY_BACK_2CYCLE 0x206 + + +/* + * Support Nand special commands + */ +#define CACHE_READ (1<<0) +#define CACHE_PROG (1<<1) +#define COPY_BACK (1<<2) +#define INTEL_COPY_BACK (1<<3) +#define RANDOM_INPUT (1<<4) +#define RANDOM_OUTPUT (1<<5) +#define PLANE2_READ (1<<6) +#define PLANE2_PROG (1<<7) +#define PLANE2_CACHE_READ (1<<8) +#define PLANE2_CACHE_PROG (1<<9) +#define PLANE2_COPY_BACK (1<<10) +#define PLANE2_INTEL_COPY_BACK (1<<11) +#define PLANE2_RANDOM_OUTPUT (1<<12) +#define PLANE2_ERASE (1<<13) + +#define PLANE4_READ (1<<16) +#define PLANE4_PROG (1<<17) +#define PLANE4_CACHE_READ (1<<18) +#define PLANE4_CACHE_PROG (1<<19) +#define PLANE4_COPY_BACK (1<<20) +#define PLANE4_INTEL_COPY_BACK (1<<21) +#define PLANE4_RANDOM_OUTPUT (1<<22) +#define PLANE4_ERASE (1<<23) + + +#define NAND_CMD_NONE -1 + +/* Status bits */ +#define NAND_STATUS_FAIL 0x01 +#define NAND_STATUS_FAIL_N1 0x02 +#define NAND_STATUS_TRUE_READY 0x20 +#define NAND_STATUS_READY 0x40 +#define NAND_STATUS_WP 0x80 + +/* + * Constants for ECC_MODES + */ +typedef enum { + NAND_ECC_NONE, + NAND_ECC_SOFT, + NAND_ECC_HW, + NAND_ECC_HW_SYNDROME, + NAND_ECC_HW_OOB_FIRST, + NAND_ECC_SOFT_BCH, +} nand_ecc_modes_t; + +/* + * Constants for Hardware ECC + */ +/* Reset Hardware ECC for read */ +#define NAND_ECC_READ 0 +/* Reset Hardware ECC for write */ +#define NAND_ECC_WRITE 1 +/* Enable Hardware ECC before syndrome is read back from flash */ +#define NAND_ECC_READSYN 2 + +/* Bit mask for flags passed to do_nand_read_ecc */ +#define NAND_GET_DEVICE 0x80 + + +/* + * Option constants for bizarre disfunctionality and real + * features. + */ +/* Chip can not auto increment pages */ +#define NAND_NO_AUTOINCR 0x00000001 +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 0x00000002 +/* Device supports partial programming without padding */ +#define NAND_NO_PADDING 0x00000004 +/* Chip has cache program function */ +#define NAND_CACHEPRG 0x00000008 +/* Chip has copy back function */ +#define NAND_COPYBACK 0x00000010 +/* + * AND Chip which has 4 banks and a confusing page / block + * assignment. See Renesas datasheet for further information. + */ +#define NAND_IS_AND 0x00000020 +/* + * Chip has a array of 4 pages which can be read without + * additional ready /busy waits. + */ +#define NAND_4PAGE_ARRAY 0x00000040 +/* + * Chip requires that BBT is periodically rewritten to prevent + * bits from adjacent blocks from 'leaking' in altering data. + * This happens with the Renesas AG-AND chips, possibly others. + */ +#define BBT_AUTO_REFRESH 0x00000080 +/* + * Chip does not require ready check on read. True + * for all large page devices, as they do not support + * autoincrement. + */ +#define NAND_NO_READRDY 0x00000100 +/* Chip does not allow subpage writes */ +#define NAND_NO_SUBPAGE_WRITE 0x00000200 + +/* Device is one of 'new' xD cards that expose fake nand command set */ +#define NAND_BROKEN_XD 0x00000400 + +/* Device behaves just like nand, but is readonly */ +#define NAND_ROM 0x00000800 + +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS \ + (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) + +/* Macros to identify the above */ +#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR)) +#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) +#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) +#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) +/* Large page NAND with SOFT_ECC should support subpage reads */ +#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ + && (chip->page_shift > 9)) + +/* Mask to zero out the chip options, which come from the id table */ +#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) + +/* Non chip related options */ +/* This option skips the bbt scan during initialization. */ +#define NAND_SKIP_BBTSCAN 0x00010000 +/* + * This option is defined if the board driver allocates its own buffers + * (e.g. because it needs them DMA-coherent). + */ +#define NAND_OWN_BUFFERS 0x00020000 +/* Chip may not exist, so silence any errors in scan */ +#define NAND_SCAN_SILENT_NODEV 0x00040000 + +/* Options set by nand scan */ +/* Nand scan has allocated controller struct */ +#define NAND_CONTROLLER_ALLOC 0x80000000 + +/* Cell info constants */ +#define NAND_CI_CHIPNR_MSK 0x03 +#define NAND_CI_CELLTYPE_MSK 0x0C + +/* Keep gcc happy */ +struct nand_chip; + +struct nand_onfi_params { + /* rev info and features block */ + /* 'O' 'N' 'F' 'I' */ + u8 sig[4]; + __le16 revision; + __le16 features; + __le16 opt_cmd; + u8 reserved[22]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id; + __le16 date_code; + u8 reserved2[13]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + __le32 data_bytes_per_ppage; + __le16 spare_bytes_per_ppage; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + __le16 bb_per_lun; + __le16 block_endurance; + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + u8 programs_per_page; + u8 ppage_attr; + u8 ecc_bits; + u8 interleaved_bits; + u8 interleaved_ops; + u8 reserved3[13]; + + /* electrical parameter block */ + u8 io_pin_capacitance_max; + __le16 async_timing_mode; + __le16 program_cache_timing_mode; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_ccs; + __le16 src_sync_timing_mode; + __le16 src_ssync_features; + __le16 clk_pin_capacitance_typ; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + u8 input_pin_capacitance_max; + u8 driver_strenght_support; + __le16 t_int_r; + __le16 t_ald; + u8 reserved4[7]; + + /* vendor */ + u8 reserved5[90]; + + __le16 crc; +} __attribute__((packed)); + +#define ONFI_CRC_BASE 0x4F4E + +/** + * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices + * @lock: protection lock + * @active: the mtd device which holds the controller currently + * @wq: wait queue to sleep on if a NAND operation is in + * progress used instead of the per chip wait queue + * when a hw controller is available. + */ +struct nand_hw_control { + spinlock_t lock; + struct nand_chip *active; + wait_queue_head_t wq; +}; + +/** + * struct nand_ecc_ctrl - Control structure for ECC + * @mode: ECC mode + * @steps: number of ECC steps per page + * @size: data bytes per ECC step + * @bytes: ECC bytes per step + * @strength: max number of correctible bits per ECC step + * @total: total number of ECC bytes per page + * @prepad: padding information for syndrome based ECC generators + * @postpad: padding information for syndrome based ECC generators + * @layout: ECC layout control struct pointer + * @priv: pointer to private ECC control data + * @hwctl: function to control hardware ECC generator. Must only + * be provided if an hardware ECC is available + * @calculate: function for ECC calculation or readback from ECC hardware + * @correct: function for ECC correction, matching to ECC generator (sw/hw) + * @read_page_raw: function to read a raw page without ECC + * @write_page_raw: function to write a raw page without ECC + * @read_page: function to read a page according to the ECC generator + * requirements. + * @read_subpage: function to read parts of the page covered by ECC. + * @write_page: function to write a page according to the ECC generator + * requirements. + * @write_oob_raw: function to write chip OOB data without ECC + * @read_oob_raw: function to read chip OOB data without ECC + * @read_oob: function to read chip OOB data + * @write_oob: function to write chip OOB data + */ +struct nand_ecc_ctrl { + nand_ecc_modes_t mode; + int steps; + int size; + int bytes; + int total; + int strength; + int prepad; + int postpad; + struct nand_ecclayout *layout; + void *priv; + void (*hwctl)(struct mtd_info *mtd, int mode); + int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, + uint8_t *ecc_code); + int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, + uint8_t *calc_ecc); + int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int page); + void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf); + int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int page); + int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offs, uint32_t len, uint8_t *buf); + void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf); + int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page, int sndcmd); + int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page, + int sndcmd); + int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_bb_oob)(struct mtd_info *mtd, + struct nand_chip *chip, + int page, + int sndcmd); +}; + +/** + * struct nand_buffers - buffer structure for read/write + * @ecccalc: buffer for calculated ECC + * @ecccode: buffer for ECC read from flash + * @databuf: buffer for data - dynamically sized + * + * Do not change the order of buffers. databuf and oobrbuf must be in + * consecutive order. + */ +struct nand_buffers { + uint8_t ecccalc[NAND_MAX_OOBSIZE]; + uint8_t ecccode[NAND_MAX_OOBSIZE]; + uint8_t databuf[NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE]; +}; + +/** + * struct nand_chip - NAND Private Flash Chip Data + * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the + * flash device + * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the + * flash device. + * @read_byte: [REPLACEABLE] read one byte from the chip + * @read_word: [REPLACEABLE] read one word from the chip + * @write_buf: [REPLACEABLE] write data from the buffer to the chip + * @read_buf: [REPLACEABLE] read data from the chip into the buffer + * @verify_buf: [REPLACEABLE] verify buffer contents against the chip + * data. + * @select_chip: [REPLACEABLE] select chip nr + * @block_bad: [REPLACEABLE] check, if the block is bad + * @block_markbad: [REPLACEABLE] mark the block bad + * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting + * mtd->oobsize, mtd->writesize and so on. + * @id_data contains the 8 bytes values of NAND_CMD_READID. + * Return with the bus width. + * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing + * device ready/busy line. If set to NULL no access to + * ready/busy is available and the ready/busy information + * is read from the chip status register. + * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing + * commands to the chip. + * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on + * ready. + * @ecc: [BOARDSPECIFIC] ECC control structure + * @buffers: buffer structure for read/write + * @hwcontrol: platform-specific hardware control structure + * @erase_cmd: [INTERN] erase command write function, selectable due + * to AND support. + * @scan_bbt: [REPLACEABLE] function to scan bad block table + * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring + * data from array to read regs (tR). + * @state: [INTERN] the current state of the NAND device + * @oob_poi: "poison value buffer," used for laying out OOB data + * before writing + * @page_shift: [INTERN] number of address bits in a page (column + * address bits). + * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @chip_shift: [INTERN] number of address bits in one chip + * @options: [BOARDSPECIFIC] various chip options. They can partly + * be set to inform nand_scan about special functionality. + * See the defines for further explanation. + * @bbt_options: [INTERN] bad block specific options. All options used + * here must come from bbm.h. By default, these options + * will be copied to the appropriate nand_bbt_descr's. + * @badblockpos: [INTERN] position of the bad block marker in the oob + * area. + * @badblockbits: [INTERN] minimum number of set bits in a good block's + * bad block marker position; i.e., BBM == 11110111b is + * not bad when badblockbits == 7 + * @cellinfo: [INTERN] MLC/multichip data from chip ident + * @numchips: [INTERN] number of physical chips + * @chipsize: [INTERN] the size of one chip for multichip arrays + * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 + * @pagebuf: [INTERN] holds the pagenumber which is currently in + * data_buf. + * @subpagesize: [INTERN] holds the subpagesize + * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), + * non 0 if ONFI supported. + * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is + * supported, 0 otherwise. + * @ecclayout: [REPLACEABLE] the default ECC placement scheme + * @bbt: [INTERN] bad block table pointer + * @bbt_td: [REPLACEABLE] bad block table descriptor for flash + * lookup. + * @bbt_md: [REPLACEABLE] bad block table mirror descriptor + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial + * bad block scan. + * @controller: [REPLACEABLE] a pointer to a hardware controller + * structure which is shared among multiple independent + * devices. + * @priv: [OPTIONAL] pointer to private chip data + * @errstat: [OPTIONAL] hardware specific function to perform + * additional error status checks (determine if errors are + * correctable). + * @write_page: [REPLACEABLE] High-level page write function + */ + +struct nand_chip { + void __iomem *IO_ADDR_R; + void __iomem *IO_ADDR_W; + + uint8_t (*read_byte)(struct mtd_info *mtd); + u16 (*read_word)(struct mtd_info *mtd); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + int (*verify_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*select_chip)(struct mtd_info *mtd, int chip); + int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs, int type); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + int (*init_size)(struct mtd_info *mtd, struct nand_chip *this, + u8 *id_data); + int (*dev_ready)(struct mtd_info *mtd); + void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, + int page_addr); + int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); + void (*erase_cmd)(struct mtd_info *mtd, int page); + int (*scan_bbt)(struct mtd_info *mtd); + int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, + int status, int page); + int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int page, int cached, int raw); + int (*get_para)(struct mtd_info *mtd, struct nand_chip *chip); + + int chip_delay; + unsigned int options; + unsigned int bbt_options; + + int page_shift; + int pagecnt_shift; + int phys_erase_shift; + int bbt_erase_shift; + int chip_shift; + int numchips; + uint64_t chipsize; + int pagemask; + int pagebuf; + int subpagesize; + uint8_t cellinfo; + int badblockpos; + int badblockbits; + + int onfi_version; + struct nand_onfi_params onfi_params; + + flstate_t state; + + uint8_t *oob_poi; + struct nand_hw_control *controller; + struct nand_ecclayout *ecclayout; + + struct nand_ecc_ctrl ecc; + struct nand_buffers *buffers; + struct nand_hw_control hwcontrol; + + uint8_t *bbt; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + + struct nand_bbt_descr *badblock_pattern; + struct nand_bbt_descr *retry_pattern; + int page_offset[2]; + struct nand_read_retry_param *cur_chip; + int realplanenum; + int bbt_plane[2]; + int status_plane[2]; + + void *priv; +}; + +/* + * NAND Flash Manufacturer ID Codes + */ +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_SAMSUNG 0xec +#define NAND_MFR_FUJITSU 0x04 +#define NAND_MFR_NATIONAL 0x8f +#define NAND_MFR_RENESAS 0x07 +#define NAND_MFR_STMICRO 0x20 +#define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c +#define NAND_MFR_SANDISK 0x45 +#define NAND_MFR_AMD 0x01 +#define NAND_MFR_INTEL 0x89 +#define NAND_MFR_MXIC 0xc2 +#define NAND_MFR_MIRA 0x92 +#define NAND_MFR_MACRONIX 0xc2 + +/** + * struct nand_flash_dev - NAND Flash Device ID Structure + * @name: Identify the device type + * @id: device ID code + * @pagesize: Pagesize in bytes. Either 256 or 512 or 0 + * If the pagesize is 0, then the real pagesize + * and the eraseize are determined from the + * extended id bytes in the chip + * @erasesize: Size of an erase block in the flash device. + * @chipsize: Total chipsize in Mega Bytes + * @options: Bitfield to store chip relevant options + */ +struct nand_flash_dev { + char *name; + int id; + unsigned long pagesize; + unsigned long chipsize; + unsigned long erasesize; + unsigned long options; +}; + +#define BOOT_MAGIC_SIZE 0x8 +struct nand_rdtry_param_hdr { + unsigned char magic[BOOT_MAGIC_SIZE]; + unsigned int nand_id; //nand id + unsigned int nand_id_5th; //nand 5th id + unsigned int eslc_reg_cnt; // Enhanced SLC register count. + unsigned int total_retry_cnt; // total retry count + unsigned int retry_reg_cnt; // retry register count + //unsigned char eslc[eslc_reg]; //eslc default data. + //unsigned char retry_data[total_retry_cnt * retry_reg_cnt]; /* param data */ +}; +//All +struct nand_read_retry_param { + char magic[32]; + unsigned int nand_id; + unsigned int nand_id_5th; + unsigned int eslc_reg_num; + unsigned char eslc_offset[32]; + unsigned char eslc_def_value[32]; + unsigned char eslc_set_value[32]; + unsigned int retry_reg_num; + unsigned char retry_offset[32]; + unsigned char retry_def_value[32]; + unsigned char retry_value[256]; + unsigned int otp_len; + unsigned char otp_offset[32]; + unsigned char otp_data[32]; + unsigned int total_try_times; + int cur_try_times; + int (*set_parameter)(struct mtd_info *mtd, int mode, int def_mode); + int (*get_parameter)(struct mtd_info *mtd, int mode); + int (*get_otp_table)(struct mtd_info *mtd, struct nand_chip *chip); + int retry; //1: in retry mode +}; +/* #define RETRY_DEBUG */ + + +/* DannierChen-2009-10-07 add for new nand flash support list */ +#ifndef DWORD +#define DWORD unsigned int +#endif +#define MAX_PRODUCT_NAME_LENGTH 0x20 +struct WMT_nand_flash_dev { + DWORD dwFlashID; //composed by 4 bytes of ID. For example:0xADF1801D + DWORD dwBlockCount; //block count of one chip. For example: 1024 + DWORD dwPageSize; //page size. For example:2048(other value can be 512 or 4096) + DWORD dwSpareSize; //spare area size. For example:16(almost all kinds of nand is 16) + DWORD dwBlockSize; //block size = dwPageSize * PageCntPerBlock. For example:131072 + DWORD dwAddressCycle; //address cycle 4 or 5 + DWORD dwBI0Position; //BI0 page postion in block + DWORD dwBI1Position; //BI1 page postion in block + DWORD dwBIOffset; //BI offset in page + DWORD dwDataWidth; //data with X8 or X16 + DWORD dwPageProgramLimit; //chip can program PAGE_PROGRAM_LIMIT times within the same page + DWORD dwSeqRowReadSupport; //whether support sequential row read, 1 = support 0 = not support + DWORD dwSeqPageProgram; //chip need sequential page program in a block. 1 = need + DWORD dwNandType; //MLC or SLC + DWORD dwECCBitNum; //ECC bit number needed + DWORD dwRWTimming; //NFC Read Pulse width and Read hold time, write so does. default =0x12101210 + DWORD dwTadl; //NFC write TADL timeing config + DWORD dwDDR; //NFC support toshia ddr(toggle) mode = 1 not support = 0 + DWORD dwRetry; //NFC Read Retry support = 1 not support = 0 + DWORD dwRdmz; //NFC Randomizer support = 1 not support = 0 + DWORD dwFlashID2; //composed by additional 2 bytes of ID from original 4 bytes. + DWORD dwSpeedUpCmd; /* supprot speed up nand command ex: 2-plane cache read and so on.. */ + char ProductName[MAX_PRODUCT_NAME_LENGTH]; //product name. for example "HYNIX_NF_HY27UF081G2A" + unsigned long options; +}; +/** + * struct nand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. +*/ +struct nand_manufacturers { + int id; + char *name; +}; + +extern struct WMT_nand_flash_dev WMT_nand_flash_ids[]; +extern struct nand_flash_dev nand_flash_ids[]; +extern struct nand_manufacturers nand_manuf_ids[]; + +extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); +extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); +extern int nand_default_bbt(struct mtd_info *mtd); +extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt, int allow_readfail); +extern int nand_isbad_bbt_multi(struct mtd_info *mtd, loff_t offs, int allowbbt, int allow_readfail); +extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, + int allowbbt); +extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, uint8_t *buf); +extern struct nand_read_retry_param chip_table[]; +extern int hynix_get_parameter(struct mtd_info *mtd, int mode); +extern int hynix_set_parameter(struct mtd_info *mtd, int mode, int def_mode); +extern int hynix_get_otp(struct mtd_info *mtd, struct nand_chip *chip); +extern int toshiba_get_parameter(struct mtd_info *mtd, int mode); +extern int toshiba_set_parameter(struct mtd_info *mtd, int mode, int def_mode); +extern int samsung_get_parameter(struct mtd_info *mtd, int mode); +extern int samsung_set_parameter(struct mtd_info *mtd, int mode, int def_mode); +extern int sandisk_get_parameter(struct mtd_info *mtd, int mode); +extern int sandisk_set_parameter(struct mtd_info *mtd, int mode, int def_mode); +extern int micron_get_parameter(struct mtd_info *mtd, int mode); +extern int micron_set_parameter(struct mtd_info *mtd, int mode, int def_mode); + + + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#endif +#define READ_RETRY_CHIP_NUM ARRAY_SIZE(chip_table) +#define ESLC_MODE 0 +#define READ_RETRY_MODE 1 +#define TEST_MODE 2 +#define DEFAULT_VALUE 0 +#define ECC_ERROR_VALUE 1 +/** + * struct platform_nand_chip - chip level device structure + * @nr_chips: max. number of chips to scan for + * @chip_offset: chip number offset + * @nr_partitions: number of partitions pointed to by partitions (or zero) + * @partitions: mtd partition list + * @chip_delay: R/B delay value in us + * @options: Option flags, e.g. 16bit buswidth + * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH + * @ecclayout: ECC layout info structure + * @part_probe_types: NULL-terminated array of probe types + */ +struct platform_nand_chip { + int nr_chips; + int chip_offset; + int nr_partitions; + struct mtd_partition *partitions; + struct nand_ecclayout *ecclayout; + int chip_delay; + unsigned int options; + unsigned int bbt_options; + const char **part_probe_types; +}; + +/* Keep gcc happy */ +struct platform_device; + +/** + * struct platform_nand_ctrl - controller level device structure + * @probe: platform specific function to probe/setup hardware + * @remove: platform specific function to remove/teardown hardware + * @hwcontrol: platform specific hardware control structure + * @dev_ready: platform specific function to read ready/busy pin + * @select_chip: platform specific chip select function + * @cmd_ctrl: platform specific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @write_buf: platform specific function for write buffer + * @read_buf: platform specific function for read buffer + * @priv: private data to transport driver specific settings + * + * All fields are optional and depend on the hardware driver requirements + */ +struct platform_nand_ctrl { + int (*probe)(struct platform_device *pdev); + void (*remove)(struct platform_device *pdev); + void (*hwcontrol)(struct mtd_info *mtd, int cmd); + int (*dev_ready)(struct mtd_info *mtd); + void (*select_chip)(struct mtd_info *mtd, int chip); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + void *priv; +}; + +/** + * struct platform_nand_data - container structure for platform-specific data + * @chip: chip level chip structure + * @ctrl: controller level device structure + */ +struct platform_nand_data { + struct platform_nand_chip chip; + struct platform_nand_ctrl ctrl; +}; + +/* Some helpers to access the data structures */ +static inline +struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + + return chip->priv; +} + +#endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h new file mode 100644 index 00000000..74acf536 --- /dev/null +++ b/include/linux/mtd/nand_bch.h @@ -0,0 +1,72 @@ +/* + * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file is the header for the NAND BCH ECC implementation. + */ + +#ifndef __MTD_NAND_BCH_H__ +#define __MTD_NAND_BCH_H__ + +struct mtd_info; +struct nand_bch_control; + +#if defined(CONFIG_MTD_NAND_ECC_BCH) + +static inline int mtd_nand_has_bch(void) { return 1; } + +/* + * Calculate BCH ecc code + */ +int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code); + +/* + * Detect and correct bit errors + */ +int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, + u_char *calc_ecc); +/* + * Initialize BCH encoder/decoder + */ +struct nand_bch_control * +nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, + unsigned int eccbytes, struct nand_ecclayout **ecclayout); +/* + * Release BCH encoder/decoder resources + */ +void nand_bch_free(struct nand_bch_control *nbc); + +#else /* !CONFIG_MTD_NAND_ECC_BCH */ + +static inline int mtd_nand_has_bch(void) { return 0; } + +static inline int +nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + return -1; +} + +static inline int +nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + return -1; +} + +static inline struct nand_bch_control * +nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, + unsigned int eccbytes, struct nand_ecclayout **ecclayout) +{ + return NULL; +} + +static inline void nand_bch_free(struct nand_bch_control *nbc) {} + +#endif /* CONFIG_MTD_NAND_ECC_BCH */ + +#endif /* __MTD_NAND_BCH_H__ */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h new file mode 100644 index 00000000..4d8406c8 --- /dev/null +++ b/include/linux/mtd/nand_ecc.h @@ -0,0 +1,42 @@ +/* + * drivers/mtd/nand_ecc.h + * + * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> + * David Woodhouse <dwmw2@infradead.org> + * Thomas Gleixner <tglx@linutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file is the header for the ECC algorithm. + */ + +#ifndef __MTD_NAND_ECC_H__ +#define __MTD_NAND_ECC_H__ + +struct mtd_info; + +/* + * Calculate 3 byte ECC code for eccsize byte block + */ +void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, + u_char *ecc_code); + +/* + * Calculate 3 byte ECC code for 256/512 byte block + */ +int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); + +/* + * Detect and correct a 1 bit error for eccsize byte block + */ +int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, + unsigned int eccsize); + +/* + * Detect and correct a 1 bit error for 256/512 byte block + */ +int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); + +#endif /* __MTD_NAND_ECC_H__ */ diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h new file mode 100644 index 00000000..d0558a98 --- /dev/null +++ b/include/linux/mtd/ndfc.h @@ -0,0 +1,67 @@ +/* + * linux/include/linux/mtd/ndfc.h + * + * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Info: + * Contains defines, datastructures for ndfc nand controller + * + */ +#ifndef __LINUX_MTD_NDFC_H +#define __LINUX_MTD_NDFC_H + +/* NDFC Register definitions */ +#define NDFC_CMD 0x00 +#define NDFC_ALE 0x04 +#define NDFC_DATA 0x08 +#define NDFC_ECC 0x10 +#define NDFC_BCFG0 0x30 +#define NDFC_BCFG1 0x34 +#define NDFC_BCFG2 0x38 +#define NDFC_BCFG3 0x3c +#define NDFC_CCR 0x40 +#define NDFC_STAT 0x44 +#define NDFC_HWCTL 0x48 +#define NDFC_REVID 0x50 + +#define NDFC_STAT_IS_READY 0x01000000 + +#define NDFC_CCR_RESET_CE 0x80000000 /* CE Reset */ +#define NDFC_CCR_RESET_ECC 0x40000000 /* ECC Reset */ +#define NDFC_CCR_RIE 0x20000000 /* Interrupt Enable on Device Rdy */ +#define NDFC_CCR_REN 0x10000000 /* Enable wait for Rdy in LinearR */ +#define NDFC_CCR_ROMEN 0x08000000 /* Enable ROM In LinearR */ +#define NDFC_CCR_ARE 0x04000000 /* Auto-Read Enable */ +#define NDFC_CCR_BS(x) (((x) & 0x3) << 24) /* Select Bank on CE[x] */ +#define NDFC_CCR_BS_MASK 0x03000000 /* Select Bank */ +#define NDFC_CCR_ARAC0 0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */ +#define NDFC_CCR_ARAC1 0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */ +#define NDFC_CCR_ARAC2 0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */ +#define NDFC_CCR_ARAC3 0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */ +#define NDFC_CCR_ARAC_MASK 0x00003000 /* Auto-Read mode Addr Cycles */ +#define NDFC_CCR_RPG 0x0000C000 /* Auto-Read Page */ +#define NDFC_CCR_EBCC 0x00000004 /* EBC Configuration Completed */ +#define NDFC_CCR_DHC 0x00000002 /* Direct Hardware Control Enable */ + +#define NDFC_BxCFG_EN 0x80000000 /* Bank Enable */ +#define NDFC_BxCFG_CED 0x40000000 /* nCE Style */ +#define NDFC_BxCFG_SZ_MASK 0x08000000 /* Bank Size */ +#define NDFC_BxCFG_SZ_8BIT 0x00000000 /* 8bit */ +#define NDFC_BxCFG_SZ_16BIT 0x08000000 /* 16bit */ + +#define NDFC_MAX_BANKS 4 + +struct ndfc_controller_settings { + uint32_t ccr_settings; + uint64_t ndfc_erpn; +}; + +struct ndfc_chip_settings { + uint32_t bank_settings; +}; + +#endif diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h new file mode 100644 index 00000000..b059629e --- /dev/null +++ b/include/linux/mtd/nftl.h @@ -0,0 +1,72 @@ +/* + * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __MTD_NFTL_H__ +#define __MTD_NFTL_H__ + +#include <linux/mtd/mtd.h> +#include <linux/mtd/blktrans.h> + +#include <mtd/nftl-user.h> + +/* these info are used in ReplUnitTable */ +#define BLOCK_NIL 0xffff /* last block of a chain */ +#define BLOCK_FREE 0xfffe /* free block */ +#define BLOCK_NOTEXPLORED 0xfffd /* non explored block, only used during mounting */ +#define BLOCK_RESERVED 0xfffc /* bios block or bad block */ + +struct NFTLrecord { + struct mtd_blktrans_dev mbd; + __u16 MediaUnit, SpareMediaUnit; + __u32 EraseSize; + struct NFTLMediaHeader MediaHdr; + int usecount; + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + __u16 numvunits; + __u16 lastEUN; /* should be suppressed */ + __u16 numfreeEUNs; + __u16 LastFreeEUN; /* To speed up finding a free EUN */ + int head,sect,cyl; + __u16 *EUNtable; /* [numvunits]: First EUN for each virtual unit */ + __u16 *ReplUnitTable; /* [numEUNs]: ReplUnitNumber for each */ + unsigned int nb_blocks; /* number of physical blocks */ + unsigned int nb_boot_blocks; /* number of blocks used by the bios */ + struct erase_info instr; + struct nand_ecclayout oobinfo; +}; + +int NFTL_mount(struct NFTLrecord *s); +int NFTL_formatblock(struct NFTLrecord *s, int block); + +int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); +int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, + size_t *retlen, uint8_t *buf); + +#ifndef NFTL_MAJOR +#define NFTL_MAJOR 93 +#endif + +#define MAX_NFTLS 16 +#define MAX_SECTORS_PER_UNIT 64 +#define NFTL_PARTN_BITS 4 + +#endif /* __MTD_NFTL_H__ */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h new file mode 100644 index 00000000..4596503c --- /dev/null +++ b/include/linux/mtd/onenand.h @@ -0,0 +1,242 @@ +/* + * linux/include/linux/mtd/onenand.h + * + * Copyright © 2005-2009 Samsung Electronics + * Kyungmin Park <kyungmin.park@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_ONENAND_H +#define __LINUX_MTD_ONENAND_H + +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/mtd/flashchip.h> +#include <linux/mtd/onenand_regs.h> +#include <linux/mtd/bbm.h> + +#define MAX_DIES 2 +#define MAX_BUFFERRAM 2 + +/* Scan and identify a OneNAND device */ +extern int onenand_scan(struct mtd_info *mtd, int max_chips); +/* Free resources held by the OneNAND device */ +extern void onenand_release(struct mtd_info *mtd); + +/** + * struct onenand_bufferram - OneNAND BufferRAM Data + * @blockpage: block & page address in BufferRAM + */ +struct onenand_bufferram { + int blockpage; +}; + +/** + * struct onenand_chip - OneNAND Private Flash Chip Data + * @base: [BOARDSPECIFIC] address to access OneNAND + * @dies: [INTERN][FLEX-ONENAND] number of dies on chip + * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies + * @diesize: [INTERN][FLEX-ONENAND] Size of the dies + * @chipsize: [INTERN] the size of one chip for multichip arrays + * FIXME For Flex-OneNAND, chipsize holds maximum possible + * device size ie when all blocks are considered MLC + * @device_id: [INTERN] device ID + * @density_mask: chip density, used for DDP devices + * @verstion_id: [INTERN] version ID + * @options: [BOARDSPECIFIC] various chip options. They can + * partly be set to inform onenand_scan about + * @erase_shift: [INTERN] number of address bits in a block + * @page_shift: [INTERN] number of address bits in a page + * @page_mask: [INTERN] a page per block mask + * @writesize: [INTERN] a real page size + * @bufferram_index: [INTERN] BufferRAM index + * @bufferram: [INTERN] BufferRAM info + * @readw: [REPLACEABLE] hardware specific function for read short + * @writew: [REPLACEABLE] hardware specific function for write short + * @command: [REPLACEABLE] hardware specific function for writing + * commands to the chip + * @wait: [REPLACEABLE] hardware specific function for wait on ready + * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready + * @unlock_all: [REPLACEABLE] hardware specific function for unlock all + * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @read_word: [REPLACEABLE] hardware specific function for read + * register of OneNAND + * @write_word: [REPLACEABLE] hardware specific function for write + * register of OneNAND + * @mmcontrol: sync burst read function + * @chip_probe: [REPLACEABLE] hardware specific function for chip probe + * @block_markbad: function to mark a block as bad + * @scan_bbt: [REPLACEALBE] hardware specific function for scanning + * Bad block Table + * @chip_lock: [INTERN] spinlock used to protect access to this + * structure and the chip + * @wq: [INTERN] wait queue to sleep on if a OneNAND + * operation is in progress + * @state: [INTERN] the current state of the OneNAND device + * @page_buf: [INTERN] page main data buffer + * @oob_buf: [INTERN] page oob data buffer + * @subpagesize: [INTERN] holds the subpagesize + * @ecclayout: [REPLACEABLE] the default ecc placement scheme + * @bbm: [REPLACEABLE] pointer to Bad Block Management + * @priv: [OPTIONAL] pointer to private chip date + */ +struct onenand_chip { + void __iomem *base; + unsigned dies; + unsigned boundary[MAX_DIES]; + loff_t diesize[MAX_DIES]; + unsigned int chipsize; + unsigned int device_id; + unsigned int version_id; + unsigned int technology; + unsigned int density_mask; + unsigned int options; + + unsigned int erase_shift; + unsigned int page_shift; + unsigned int page_mask; + unsigned int writesize; + + unsigned int bufferram_index; + struct onenand_bufferram bufferram[MAX_BUFFERRAM]; + + int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len); + int (*wait)(struct mtd_info *mtd, int state); + int (*bbt_wait)(struct mtd_info *mtd, int state); + void (*unlock_all)(struct mtd_info *mtd); + int (*read_bufferram)(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, size_t count); + int (*write_bufferram)(struct mtd_info *mtd, int area, + const unsigned char *buffer, int offset, size_t count); + unsigned short (*read_word)(void __iomem *addr); + void (*write_word)(unsigned short value, void __iomem *addr); + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); + int (*chip_probe)(struct mtd_info *mtd); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); + int (*scan_bbt)(struct mtd_info *mtd); + int (*enable)(struct mtd_info *mtd); + int (*disable)(struct mtd_info *mtd); + + struct completion complete; + int irq; + + spinlock_t chip_lock; + wait_queue_head_t wq; + flstate_t state; + unsigned char *page_buf; + unsigned char *oob_buf; +#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE + unsigned char *verify_buf; +#endif + + int subpagesize; + struct nand_ecclayout *ecclayout; + + void *bbm; + + void *priv; + + /* + * Shows that the current operation is composed + * of sequence of commands. For example, cache program. + * Such command status OnGo bit is checked at the end of + * sequence. + */ + unsigned int ongoing; +}; + +/* + * Helper macros + */ +#define ONENAND_PAGES_PER_BLOCK (1<<6) + +#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) +#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) +#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) +#define ONENAND_SET_PREV_BUFFERRAM(this) (this->bufferram_index ^= 1) +#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0) +#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1) + +#define FLEXONENAND(this) \ + (this->device_id & DEVICE_IS_FLEXONENAND) +#define ONENAND_GET_SYS_CFG1(this) \ + (this->read_word(this->base + ONENAND_REG_SYS_CFG1)) +#define ONENAND_SET_SYS_CFG1(v, this) \ + (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1)) + +#define ONENAND_IS_DDP(this) \ + (this->device_id & ONENAND_DEVICE_IS_DDP) + +#define ONENAND_IS_MLC(this) \ + (this->technology & ONENAND_TECHNOLOGY_IS_MLC) + +#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM +#define ONENAND_IS_2PLANE(this) \ + (this->options & ONENAND_HAS_2PLANE) +#else +#define ONENAND_IS_2PLANE(this) (0) +#endif + +#define ONENAND_IS_CACHE_PROGRAM(this) \ + (this->options & ONENAND_HAS_CACHE_PROGRAM) + +#define ONENAND_IS_NOP_1(this) \ + (this->options & ONENAND_HAS_NOP_1) + +/* Check byte access in OneNAND */ +#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) + +/* + * Options bits + */ +#define ONENAND_HAS_CONT_LOCK (0x0001) +#define ONENAND_HAS_UNLOCK_ALL (0x0002) +#define ONENAND_HAS_2PLANE (0x0004) +#define ONENAND_HAS_4KB_PAGE (0x0008) +#define ONENAND_HAS_CACHE_PROGRAM (0x0010) +#define ONENAND_HAS_NOP_1 (0x0020) +#define ONENAND_SKIP_UNLOCK_CHECK (0x0100) +#define ONENAND_PAGEBUF_ALLOC (0x1000) +#define ONENAND_OOBBUF_ALLOC (0x2000) +#define ONENAND_SKIP_INITIAL_UNLOCKING (0x4000) + +#define ONENAND_IS_4KB_PAGE(this) \ + (this->options & ONENAND_HAS_4KB_PAGE) + +/* + * OneNAND Flash Manufacturer ID Codes + */ +#define ONENAND_MFR_SAMSUNG 0xec +#define ONENAND_MFR_NUMONYX 0x20 + +/** + * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. +*/ +struct onenand_manufacturers { + int id; + char *name; +}; + +int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); +unsigned onenand_block(struct onenand_chip *this, loff_t addr); +loff_t onenand_addr(struct onenand_chip *this, int block); +int flexonenand_region(struct mtd_info *mtd, loff_t addr); + +struct mtd_partition; + +struct onenand_platform_data { + void (*mmcontrol)(struct mtd_info *mtd, int sync_read); + int (*read_bufferram)(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, size_t count); + struct mtd_partition *parts; + unsigned int nr_parts; +}; + +#endif /* __LINUX_MTD_ONENAND_H */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h new file mode 100644 index 00000000..d60130f8 --- /dev/null +++ b/include/linux/mtd/onenand_regs.h @@ -0,0 +1,223 @@ +/* + * linux/include/linux/mtd/onenand_regs.h + * + * OneNAND Register header file + * + * Copyright (C) 2005-2007 Samsung Electronics + * Kyungmin Park <kyungmin.park@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ONENAND_REG_H +#define __ONENAND_REG_H + +/* Memory Address Map Translation (Word order) */ +#define ONENAND_MEMORY_MAP(x) ((x) << 1) + +/* + * External BufferRAM area + */ +#define ONENAND_BOOTRAM ONENAND_MEMORY_MAP(0x0000) +#define ONENAND_DATARAM ONENAND_MEMORY_MAP(0x0200) +#define ONENAND_SPARERAM ONENAND_MEMORY_MAP(0x8010) + +/* + * OneNAND Registers + */ +#define ONENAND_REG_MANUFACTURER_ID ONENAND_MEMORY_MAP(0xF000) +#define ONENAND_REG_DEVICE_ID ONENAND_MEMORY_MAP(0xF001) +#define ONENAND_REG_VERSION_ID ONENAND_MEMORY_MAP(0xF002) +#define ONENAND_REG_DATA_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF003) +#define ONENAND_REG_BOOT_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF004) +#define ONENAND_REG_NUM_BUFFERS ONENAND_MEMORY_MAP(0xF005) +#define ONENAND_REG_TECHNOLOGY ONENAND_MEMORY_MAP(0xF006) + +#define ONENAND_REG_START_ADDRESS1 ONENAND_MEMORY_MAP(0xF100) +#define ONENAND_REG_START_ADDRESS2 ONENAND_MEMORY_MAP(0xF101) +#define ONENAND_REG_START_ADDRESS3 ONENAND_MEMORY_MAP(0xF102) +#define ONENAND_REG_START_ADDRESS4 ONENAND_MEMORY_MAP(0xF103) +#define ONENAND_REG_START_ADDRESS5 ONENAND_MEMORY_MAP(0xF104) +#define ONENAND_REG_START_ADDRESS6 ONENAND_MEMORY_MAP(0xF105) +#define ONENAND_REG_START_ADDRESS7 ONENAND_MEMORY_MAP(0xF106) +#define ONENAND_REG_START_ADDRESS8 ONENAND_MEMORY_MAP(0xF107) + +#define ONENAND_REG_START_BUFFER ONENAND_MEMORY_MAP(0xF200) +#define ONENAND_REG_COMMAND ONENAND_MEMORY_MAP(0xF220) +#define ONENAND_REG_SYS_CFG1 ONENAND_MEMORY_MAP(0xF221) +#define ONENAND_REG_SYS_CFG2 ONENAND_MEMORY_MAP(0xF222) +#define ONENAND_REG_CTRL_STATUS ONENAND_MEMORY_MAP(0xF240) +#define ONENAND_REG_INTERRUPT ONENAND_MEMORY_MAP(0xF241) +#define ONENAND_REG_START_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24C) +#define ONENAND_REG_END_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24D) +#define ONENAND_REG_WP_STATUS ONENAND_MEMORY_MAP(0xF24E) + +#define ONENAND_REG_ECC_STATUS ONENAND_MEMORY_MAP(0xFF00) +#define ONENAND_REG_ECC_M0 ONENAND_MEMORY_MAP(0xFF01) +#define ONENAND_REG_ECC_S0 ONENAND_MEMORY_MAP(0xFF02) +#define ONENAND_REG_ECC_M1 ONENAND_MEMORY_MAP(0xFF03) +#define ONENAND_REG_ECC_S1 ONENAND_MEMORY_MAP(0xFF04) +#define ONENAND_REG_ECC_M2 ONENAND_MEMORY_MAP(0xFF05) +#define ONENAND_REG_ECC_S2 ONENAND_MEMORY_MAP(0xFF06) +#define ONENAND_REG_ECC_M3 ONENAND_MEMORY_MAP(0xFF07) +#define ONENAND_REG_ECC_S3 ONENAND_MEMORY_MAP(0xFF08) + +/* + * Device ID Register F001h (R) + */ +#define DEVICE_IS_FLEXONENAND (1 << 9) +#define FLEXONENAND_PI_MASK (0x3ff) +#define FLEXONENAND_PI_UNLOCK_SHIFT (14) +#define ONENAND_DEVICE_DENSITY_MASK (0xf) +#define ONENAND_DEVICE_DENSITY_SHIFT (4) +#define ONENAND_DEVICE_IS_DDP (1 << 3) +#define ONENAND_DEVICE_IS_DEMUX (1 << 2) +#define ONENAND_DEVICE_VCC_MASK (0x3) + +#define ONENAND_DEVICE_DENSITY_512Mb (0x002) +#define ONENAND_DEVICE_DENSITY_1Gb (0x003) +#define ONENAND_DEVICE_DENSITY_2Gb (0x004) +#define ONENAND_DEVICE_DENSITY_4Gb (0x005) + +/* + * Version ID Register F002h (R) + */ +#define ONENAND_VERSION_PROCESS_SHIFT (8) + +/* + * Technology Register F006h (R) + */ +#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0) + +/* + * Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W) + */ +#define ONENAND_DDP_SHIFT (15) +#define ONENAND_DDP_CHIP0 (0) +#define ONENAND_DDP_CHIP1 (1 << ONENAND_DDP_SHIFT) + +/* + * Start Address 8 F107h (R/W) + */ +/* Note: It's actually 0x3f in case of SLC */ +#define ONENAND_FPA_MASK (0x7f) +#define ONENAND_FPA_SHIFT (2) +#define ONENAND_FSA_MASK (0x03) + +/* + * Start Buffer Register F200h (R/W) + */ +#define ONENAND_BSA_MASK (0x03) +#define ONENAND_BSA_SHIFT (8) +#define ONENAND_BSA_BOOTRAM (0 << 2) +#define ONENAND_BSA_DATARAM0 (2 << 2) +#define ONENAND_BSA_DATARAM1 (3 << 2) +/* Note: It's actually 0x03 in case of SLC */ +#define ONENAND_BSC_MASK (0x07) + +/* + * Command Register F220h (R/W) + */ +#define ONENAND_CMD_READ (0x00) +#define ONENAND_CMD_READOOB (0x13) +#define ONENAND_CMD_PROG (0x80) +#define ONENAND_CMD_PROGOOB (0x1A) +#define ONENAND_CMD_2X_PROG (0x7D) +#define ONENAND_CMD_2X_CACHE_PROG (0x7F) +#define ONENAND_CMD_UNLOCK (0x23) +#define ONENAND_CMD_LOCK (0x2A) +#define ONENAND_CMD_LOCK_TIGHT (0x2C) +#define ONENAND_CMD_UNLOCK_ALL (0x27) +#define ONENAND_CMD_ERASE (0x94) +#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) +#define ONENAND_CMD_ERASE_VERIFY (0x71) +#define ONENAND_CMD_RESET (0xF0) +#define ONENAND_CMD_OTP_ACCESS (0x65) +#define ONENAND_CMD_READID (0x90) +#define FLEXONENAND_CMD_PI_UPDATE (0x05) +#define FLEXONENAND_CMD_PI_ACCESS (0x66) +#define FLEXONENAND_CMD_RECOVER_LSB (0x05) + +/* NOTE: Those are not *REAL* commands */ +#define ONENAND_CMD_BUFFERRAM (0x1978) +#define FLEXONENAND_CMD_READ_PI (0x1985) + +/* + * System Configuration 1 Register F221h (R, R/W) + */ +#define ONENAND_SYS_CFG1_SYNC_READ (1 << 15) +#define ONENAND_SYS_CFG1_BRL_7 (7 << 12) +#define ONENAND_SYS_CFG1_BRL_6 (6 << 12) +#define ONENAND_SYS_CFG1_BRL_5 (5 << 12) +#define ONENAND_SYS_CFG1_BRL_4 (4 << 12) +#define ONENAND_SYS_CFG1_BRL_3 (3 << 12) +#define ONENAND_SYS_CFG1_BRL_10 (2 << 12) +#define ONENAND_SYS_CFG1_BRL_9 (1 << 12) +#define ONENAND_SYS_CFG1_BRL_8 (0 << 12) +#define ONENAND_SYS_CFG1_BRL_SHIFT (12) +#define ONENAND_SYS_CFG1_BL_32 (4 << 9) +#define ONENAND_SYS_CFG1_BL_16 (3 << 9) +#define ONENAND_SYS_CFG1_BL_8 (2 << 9) +#define ONENAND_SYS_CFG1_BL_4 (1 << 9) +#define ONENAND_SYS_CFG1_BL_CONT (0 << 9) +#define ONENAND_SYS_CFG1_BL_SHIFT (9) +#define ONENAND_SYS_CFG1_NO_ECC (1 << 8) +#define ONENAND_SYS_CFG1_RDY (1 << 7) +#define ONENAND_SYS_CFG1_INT (1 << 6) +#define ONENAND_SYS_CFG1_IOBE (1 << 5) +#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) +#define ONENAND_SYS_CFG1_VHF (1 << 3) +#define ONENAND_SYS_CFG1_HF (1 << 2) +#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1) + +/* + * Controller Status Register F240h (R) + */ +#define ONENAND_CTRL_ONGO (1 << 15) +#define ONENAND_CTRL_LOCK (1 << 14) +#define ONENAND_CTRL_LOAD (1 << 13) +#define ONENAND_CTRL_PROGRAM (1 << 12) +#define ONENAND_CTRL_ERASE (1 << 11) +#define ONENAND_CTRL_ERROR (1 << 10) +#define ONENAND_CTRL_RSTB (1 << 7) +#define ONENAND_CTRL_OTP_L (1 << 6) +#define ONENAND_CTRL_OTP_BL (1 << 5) + +/* + * Interrupt Status Register F241h (R) + */ +#define ONENAND_INT_MASTER (1 << 15) +#define ONENAND_INT_READ (1 << 7) +#define ONENAND_INT_WRITE (1 << 6) +#define ONENAND_INT_ERASE (1 << 5) +#define ONENAND_INT_RESET (1 << 4) +#define ONENAND_INT_CLEAR (0 << 0) + +/* + * NAND Flash Write Protection Status Register F24Eh (R) + */ +#define ONENAND_WP_US (1 << 2) +#define ONENAND_WP_LS (1 << 1) +#define ONENAND_WP_LTS (1 << 0) + +/* + * ECC Status Reigser FF00h (R) + */ +#define ONENAND_ECC_1BIT (1 << 0) +#define ONENAND_ECC_1BIT_ALL (0x5555) +#define ONENAND_ECC_2BIT (1 << 1) +#define ONENAND_ECC_2BIT_ALL (0xAAAA) +#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) +#define ONENAND_ECC_3BIT (1 << 2) +#define ONENAND_ECC_4BIT (1 << 3) +#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) + +/* + * One-Time Programmable (OTP) + */ +#define FLEXONENAND_OTP_LOCK_OFFSET (2048) +#define ONENAND_OTP_LOCK_OFFSET (14) + +#endif /* __ONENAND_REG_H */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h new file mode 100644 index 00000000..e4f732ae --- /dev/null +++ b/include/linux/mtd/partitions.h @@ -0,0 +1,87 @@ +/* + * MTD partitioning layer definitions + * + * (C) 2000 Nicolas Pitre <nico@fluxnic.net> + * + * This code is GPL + */ + +#ifndef MTD_PARTITIONS_H +#define MTD_PARTITIONS_H + +#include <linux/types.h> + + +/* + * Partition definition structure: + * + * An array of struct partition is passed along with a MTD object to + * mtd_device_register() to create them. + * + * For each partition, these fields are available: + * name: string that will be used to label the partition's MTD device. + * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition + * will extend to the end of the master MTD device. + * offset: absolute starting position within the master MTD device; if + * defined as MTDPART_OFS_APPEND, the partition will start where the + * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block; + * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size + * after the end of partition. + * mask_flags: contains flags that have to be masked (removed) from the + * master MTD flag set for the corresponding MTD partition. + * For example, to force a read-only partition, simply adding + * MTD_WRITEABLE to the mask_flags will do the trick. + * + * Note: writeable partitions require their size and offset be + * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). + */ + +struct mtd_partition { + char *name; /* identifier string */ + uint64_t size; /* partition size */ + uint64_t offset; /* offset within the master MTD space */ + uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ +}; + +#define MTDPART_OFS_RETAIN (-3) +#define MTDPART_OFS_NXTBLK (-2) +#define MTDPART_OFS_APPEND (-1) +#define MTDPART_SIZ_FULL (0) + + +struct mtd_info; +struct device_node; + +/** + * struct mtd_part_parser_data - used to pass data to MTD partition parsers. + * @origin: for RedBoot, start address of MTD device + * @of_node: for OF parsers, device node containing partitioning information + */ +struct mtd_part_parser_data { + unsigned long origin; + struct device_node *of_node; +}; + + +/* + * Functions dealing with the various ways of partitioning the space + */ + +struct mtd_part_parser { + struct list_head list; + struct module *owner; + const char *name; + int (*parse_fn)(struct mtd_info *, struct mtd_partition **, + struct mtd_part_parser_data *); +}; + +extern int register_mtd_parser(struct mtd_part_parser *parser); +extern int deregister_mtd_parser(struct mtd_part_parser *parser); + +int mtd_is_partition(struct mtd_info *mtd); +int mtd_add_partition(struct mtd_info *master, char *name, + long long offset, long long length); +int mtd_del_partition(struct mtd_info *master, int partno); +uint64_t mtd_get_device_size(struct mtd_info *mtd); +#endif diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h new file mode 100644 index 00000000..b730d4f8 --- /dev/null +++ b/include/linux/mtd/pfow.h @@ -0,0 +1,159 @@ +/* Primary function overlay window definitions + * and service functions used by LPDDR chips + */ +#ifndef __LINUX_MTD_PFOW_H +#define __LINUX_MTD_PFOW_H + +#include <linux/mtd/qinfo.h> + +/* PFOW registers addressing */ +/* Address of symbol "P" */ +#define PFOW_QUERY_STRING_P 0x0000 +/* Address of symbol "F" */ +#define PFOW_QUERY_STRING_F 0x0002 +/* Address of symbol "O" */ +#define PFOW_QUERY_STRING_O 0x0004 +/* Address of symbol "W" */ +#define PFOW_QUERY_STRING_W 0x0006 +/* Identification info for LPDDR chip */ +#define PFOW_MANUFACTURER_ID 0x0020 +#define PFOW_DEVICE_ID 0x0022 +/* Address in PFOW where prog buffer can can be found */ +#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 +/* Size of program buffer in words */ +#define PFOW_PROGRAM_BUFFER_SIZE 0x0042 +/* Address command code register */ +#define PFOW_COMMAND_CODE 0x0080 +/* command data register */ +#define PFOW_COMMAND_DATA 0x0084 +/* command address register lower address bits */ +#define PFOW_COMMAND_ADDRESS_L 0x0088 +/* command address register upper address bits */ +#define PFOW_COMMAND_ADDRESS_H 0x008a +/* number of bytes to be proggrammed lower address bits */ +#define PFOW_DATA_COUNT_L 0x0090 +/* number of bytes to be proggrammed higher address bits */ +#define PFOW_DATA_COUNT_H 0x0092 +/* command execution register, the only possible value is 0x01 */ +#define PFOW_COMMAND_EXECUTE 0x00c0 +/* 0x01 should be written at this address to clear buffer */ +#define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4 +/* device program/erase suspend register */ +#define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8 +/* device status register */ +#define PFOW_DSR 0x00cc + +/* LPDDR memory device command codes */ +/* They are possible values of PFOW command code register */ +#define LPDDR_WORD_PROGRAM 0x0041 +#define LPDDR_BUFF_PROGRAM 0x00E9 +#define LPDDR_BLOCK_ERASE 0x0020 +#define LPDDR_LOCK_BLOCK 0x0061 +#define LPDDR_UNLOCK_BLOCK 0x0062 +#define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065 +#define LPDDR_INFO_QUERY 0x0098 +#define LPDDR_READ_OTP 0x0097 +#define LPDDR_PROG_OTP 0x00C0 +#define LPDDR_RESUME 0x00D0 + +/* Defines possible value of PFOW command execution register */ +#define LPDDR_START_EXECUTION 0x0001 + +/* Defines possible value of PFOW program/erase suspend register */ +#define LPDDR_SUSPEND 0x0001 + +/* Possible values of PFOW device status register */ +/* access R - read; RC read & clearable */ +#define DSR_DPS (1<<1) /* RC; device protect status + * 0 - not protected 1 - locked */ +#define DSR_PSS (1<<2) /* R; program suspend status; + * 0-prog in progress/completed, + * 1- prog suspended */ +#define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */ +#define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */ +#define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status; + * 0-success erase/blank check, + * 1 blank check error */ +#define DSR_ESS (1<<6) /* R; erase suspend status; + * 0-erase in progress/complete, + * 1 erase suspended */ +#define DSR_READY_STATUS (1<<7) /* R; Device status + * 0-busy, + * 1-ready */ +#define DSR_RPS (0x3<<8) /* RC; region program status + * 00 - Success, + * 01-re-program attempt in region with + * object mode data, + * 10-object mode program w attempt in + * region with control mode data + * 11-attempt to program invalid half + * with 0x41 command */ +#define DSR_AOS (1<<12) /* RC; 1- AO related failure */ +#define DSR_AVAILABLE (1<<15) /* R; Device availbility + * 1 - Device available + * 0 - not available */ + +/* The superset of all possible error bits in DSR */ +#define DSR_ERR 0x133A + +static inline void send_pfow_command(struct map_info *map, + unsigned long cmd_code, unsigned long adr, + unsigned long len, map_word *datum) +{ + int bits_per_chip = map_bankwidth(map) * 8; + int chipnum; + struct lpddr_private *lpddr = map->fldrv_priv; + chipnum = adr >> lpddr->chipshift; + + map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); + map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), + map->pfow_base + PFOW_COMMAND_ADDRESS_L); + map_write(map, CMD(adr>>bits_per_chip), + map->pfow_base + PFOW_COMMAND_ADDRESS_H); + if (len) { + map_write(map, CMD(len & ((1<<bits_per_chip) - 1)), + map->pfow_base + PFOW_DATA_COUNT_L); + map_write(map, CMD(len>>bits_per_chip), + map->pfow_base + PFOW_DATA_COUNT_H); + } + if (datum) + map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); + + /* Command execution start */ + map_write(map, CMD(LPDDR_START_EXECUTION), + map->pfow_base + PFOW_COMMAND_EXECUTE); +} + +static inline void print_drs_error(unsigned dsr) +{ + int prog_status = (dsr & DSR_RPS) >> 8; + + if (!(dsr & DSR_AVAILABLE)) + printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); + if (prog_status & 0x03) + printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " + "half with 41h command\n"); + else if (prog_status & 0x02) + printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " + "in region with Control Mode data\n"); + else if (prog_status & 0x01) + printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " + "with Object Mode data\n"); + if (!(dsr & DSR_READY_STATUS)) + printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); + if (dsr & DSR_ESS) + printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); + if (dsr & DSR_ERASE_STATUS) + printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); + if (dsr & DSR_PROGRAM_STATUS) + printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); + if (dsr & DSR_VPPS) + printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " + "aborted\n"); + if (dsr & DSR_PSS) + printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); + if (dsr & DSR_DPS) + printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " + "on locked block\n"); +} +#endif /* __LINUX_MTD_PFOW_H */ diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h new file mode 100644 index 00000000..d2887e76 --- /dev/null +++ b/include/linux/mtd/physmap.h @@ -0,0 +1,36 @@ +/* + * For boards with physically mapped flash and using + * drivers/mtd/maps/physmap.c mapping driver. + * + * Copyright (C) 2003 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MTD_PHYSMAP__ +#define __LINUX_MTD_PHYSMAP__ + +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> + +struct map_info; +struct platform_device; + +struct physmap_flash_data { + unsigned int width; + int (*init)(struct platform_device *); + void (*exit)(struct platform_device *); + void (*set_vpp)(struct platform_device *, int); + unsigned int nr_parts; + unsigned int pfow_base; + char *probe_type; + struct mtd_partition *parts; + const char **part_probe_types; +}; + +#endif /* __LINUX_MTD_PHYSMAP__ */ diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h new file mode 100644 index 00000000..8dfb7e14 --- /dev/null +++ b/include/linux/mtd/pismo.h @@ -0,0 +1,17 @@ +/* + * PISMO memory driver - http://www.pismoworld.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ +#ifndef __LINUX_MTD_PISMO_H +#define __LINUX_MTD_PISMO_H + +struct pismo_pdata { + void (*set_vpp)(void *, int); + void *vpp_data; + phys_addr_t cs_addrs[5]; +}; + +#endif diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h new file mode 100644 index 00000000..e07890af --- /dev/null +++ b/include/linux/mtd/plat-ram.h @@ -0,0 +1,34 @@ +/* linux/include/linux/mtd/plat-ram.h + * + * (c) 2004 Simtec Electronics + * http://www.simtec.co.uk/products/SWLINUX/ + * Ben Dooks <ben@simtec.co.uk> + * + * Generic platform device based RAM map + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_MTD_PLATRAM_H +#define __LINUX_MTD_PLATRAM_H __FILE__ + +#define PLATRAM_RO (0) +#define PLATRAM_RW (1) + +struct platdata_mtd_ram { + const char *mapname; + const char **map_probes; + const char **probes; + struct mtd_partition *partitions; + int nr_partitions; + int bankwidth; + + /* control callbacks */ + + void (*set_rw)(struct device *dev, int to); +}; + +#endif /* __LINUX_MTD_PLATRAM_H */ diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h new file mode 100644 index 00000000..7b3d487d --- /dev/null +++ b/include/linux/mtd/qinfo.h @@ -0,0 +1,91 @@ +#ifndef __LINUX_MTD_QINFO_H +#define __LINUX_MTD_QINFO_H + +#include <linux/mtd/map.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/flashchip.h> +#include <linux/mtd/partitions.h> + +/* lpddr_private describes lpddr flash chip in memory map + * @ManufactId - Chip Manufacture ID + * @DevId - Chip Device ID + * @qinfo - pointer to qinfo records describing the chip + * @numchips - number of chips including virual RWW partitions + * @chipshift - Chip/partiton size 2^chipshift + * @chips - per-chip data structure + */ +struct lpddr_private { + uint16_t ManufactId; + uint16_t DevId; + struct qinfo_chip *qinfo; + int numchips; + unsigned long chipshift; + struct flchip chips[0]; +}; + +/* qinfo_query_info structure contains request information for + * each qinfo record + * @major - major number of qinfo record + * @major - minor number of qinfo record + * @id_str - descriptive string to access the record + * @desc - detailed description for the qinfo record + */ +struct qinfo_query_info { + uint8_t major; + uint8_t minor; + char *id_str; + char *desc; +}; + +/* + * qinfo_chip structure contains necessary qinfo records data + * @DevSizeShift - Device size 2^n bytes + * @BufSizeShift - Program buffer size 2^n bytes + * @TotalBlocksNum - Total number of blocks + * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes + * @HWPartsNum - Number of hardware partitions + * @SuspEraseSupp - Suspend erase supported + * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec + * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec + * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec + */ +struct qinfo_chip { + /* General device info */ + uint16_t DevSizeShift; + uint16_t BufSizeShift; + /* Erase block information */ + uint16_t TotalBlocksNum; + uint16_t UniformBlockSizeShift; + /* Partition information */ + uint16_t HWPartsNum; + /* Optional features */ + uint16_t SuspEraseSupp; + /* Operation typical time */ + uint16_t SingleWordProgTime; + uint16_t ProgBufferTime; + uint16_t BlockEraseTime; +}; + +/* defines for fixup usage */ +#define LPDDR_MFR_ANY 0xffff +#define LPDDR_ID_ANY 0xffff +#define NUMONYX_MFGR_ID 0x0089 +#define R18_DEVICE_ID_1G 0x893c + +static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) +{ + map_word val = { {0} }; + val.x[0] = cmd; + return val; +} + +#define CMD(x) lpddr_build_cmd(x, map) +#define CMDVAL(cmd) cmd.x[0] + +struct mtd_info *lpddr_cmdset(struct map_info *); + +#endif + diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h new file mode 100644 index 00000000..a38e1fa8 --- /dev/null +++ b/include/linux/mtd/sh_flctl.h @@ -0,0 +1,171 @@ +/* + * SuperH FLCTL nand controller + * + * Copyright © 2008 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __SH_FLCTL_H__ +#define __SH_FLCTL_H__ + +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> +#include <linux/pm_qos.h> + +/* FLCTL registers */ +#define FLCMNCR(f) (f->reg + 0x0) +#define FLCMDCR(f) (f->reg + 0x4) +#define FLCMCDR(f) (f->reg + 0x8) +#define FLADR(f) (f->reg + 0xC) +#define FLADR2(f) (f->reg + 0x3C) +#define FLDATAR(f) (f->reg + 0x10) +#define FLDTCNTR(f) (f->reg + 0x14) +#define FLINTDMACR(f) (f->reg + 0x18) +#define FLBSYTMR(f) (f->reg + 0x1C) +#define FLBSYCNT(f) (f->reg + 0x20) +#define FLDTFIFO(f) (f->reg + 0x24) +#define FLECFIFO(f) (f->reg + 0x28) +#define FLTRCR(f) (f->reg + 0x2C) +#define FLHOLDCR(f) (f->reg + 0x38) +#define FL4ECCRESULT0(f) (f->reg + 0x80) +#define FL4ECCRESULT1(f) (f->reg + 0x84) +#define FL4ECCRESULT2(f) (f->reg + 0x88) +#define FL4ECCRESULT3(f) (f->reg + 0x8C) +#define FL4ECCCR(f) (f->reg + 0x90) +#define FL4ECCCNT(f) (f->reg + 0x94) +#define FLERRADR(f) (f->reg + 0x98) + +/* FLCMNCR control bits */ +#define ECCPOS2 (0x1 << 25) +#define _4ECCCNTEN (0x1 << 24) +#define _4ECCEN (0x1 << 23) +#define _4ECCCORRECT (0x1 << 22) +#define SHBUSSEL (0x1 << 20) +#define SEL_16BIT (0x1 << 19) +#define SNAND_E (0x1 << 18) /* SNAND (0=512 1=2048)*/ +#define QTSEL_E (0x1 << 17) +#define ENDIAN (0x1 << 16) /* 1 = little endian */ +#define FCKSEL_E (0x1 << 15) +#define ECCPOS_00 (0x00 << 12) +#define ECCPOS_01 (0x01 << 12) +#define ECCPOS_02 (0x02 << 12) +#define ACM_SACCES_MODE (0x01 << 10) +#define NANWF_E (0x1 << 9) +#define SE_D (0x1 << 8) /* Spare area disable */ +#define CE1_ENABLE (0x1 << 4) /* Chip Enable 1 */ +#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */ +#define TYPESEL_SET (0x1 << 0) + +/* + * Clock settings using the PULSEx registers from FLCMNCR + * + * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E + * to control the clock divider used between the High-Speed Peripheral Clock + * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit + * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16 + * bit version the divider is seperate for the pulse width of high and low + * signals. + */ +#define PULSE3 (0x1 << 27) +#define PULSE2 (0x1 << 17) +#define PULSE1 (0x1 << 15) +#define PULSE0 (0x1 << 9) +#define CLK_8B_0_5 PULSE1 +#define CLK_8B_1 0x0 +#define CLK_8B_1_5 (PULSE1 | PULSE2) +#define CLK_8B_2 PULSE0 +#define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2) +#define CLK_8B_4 (PULSE0 | PULSE2) +#define CLK_16B_6L_2H PULSE0 +#define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2) +#define CLK_16B_12L_4H (PULSE0 | PULSE2) + +/* FLCMDCR control bits */ +#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */ +#define ADRMD_E (0x1 << 26) /* Sector address access */ +#define CDSRC_E (0x1 << 25) /* Data buffer selection */ +#define DOSR_E (0x1 << 24) /* Status read check */ +#define SELRW (0x1 << 21) /* 0:read 1:write */ +#define DOADR_E (0x1 << 20) /* Address stage execute */ +#define ADRCNT_1 (0x00 << 18) /* Address data bytes: 1byte */ +#define ADRCNT_2 (0x01 << 18) /* Address data bytes: 2byte */ +#define ADRCNT_3 (0x02 << 18) /* Address data bytes: 3byte */ +#define ADRCNT_4 (0x03 << 18) /* Address data bytes: 4byte */ +#define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */ +#define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */ + +/* FLTRCR control bits */ +#define TRSTRT (0x1 << 0) /* translation start */ +#define TREND (0x1 << 1) /* translation end */ + +/* + * FLHOLDCR control bits + * + * HOLDEN: Bus Occupancy Enable (inverted) + * Enable this bit when the external bus might be used in between transfers. + * If not set and the bus gets used by other modules, a deadlock occurs. + */ +#define HOLDEN (0x1 << 0) + +/* FL4ECCCR control bits */ +#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */ +#define _4ECCEND (0x1 << 1) /* 4 symbols end */ +#define _4ECCEXST (0x1 << 0) /* 4 symbols exist */ + +#define INIT_FL4ECCRESULT_VAL 0x03FF03FF +#define LOOP_TIMEOUT_MAX 0x00010000 + +struct sh_flctl { + struct mtd_info mtd; + struct nand_chip chip; + struct platform_device *pdev; + struct dev_pm_qos_request pm_qos; + void __iomem *reg; + + uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ + int read_bytes; + int index; + int seqin_column; /* column in SEQIN cmd */ + int seqin_page_addr; /* page_addr in SEQIN cmd */ + uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */ + int erase1_page_addr; /* page_addr in ERASE1 cmd */ + uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ + uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ + uint32_t flcmncr_base; /* base value of FLCMNCR */ + + int hwecc_cant_correct[4]; + + unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ + unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ + unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ + unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ +}; + +struct sh_flctl_platform_data { + struct mtd_partition *parts; + int nr_parts; + unsigned long flcmncr_val; + + unsigned has_hwecc:1; + unsigned use_holden:1; +}; + +static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) +{ + return container_of(mtdinfo, struct sh_flctl, mtd); +} + +#endif /* __SH_FLCTL_H__ */ diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h new file mode 100644 index 00000000..25f4d2a8 --- /dev/null +++ b/include/linux/mtd/sharpsl.h @@ -0,0 +1,20 @@ +/* + * SharpSL NAND support + * + * Copyright (C) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/partitions.h> + +struct sharpsl_nand_platform_data { + struct nand_bbt_descr *badblock_pattern; + struct nand_ecclayout *ecc_layout; + struct mtd_partition *partitions; + unsigned int nr_partitions; +}; diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h new file mode 100644 index 00000000..8ae17260 --- /dev/null +++ b/include/linux/mtd/spear_smi.h @@ -0,0 +1,65 @@ +/* + * Copyright © 2010 ST Microelectronics + * Shiraz Hashim <shiraz.hashim@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __MTD_SPEAR_SMI_H +#define __MTD_SPEAR_SMI_H + +#include <linux/types.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/platform_device.h> +#include <linux/of.h> + +/* max possible slots for serial-nor flash chip in the SMI controller */ +#define MAX_NUM_FLASH_CHIP 4 + +/* macro to define partitions for flash devices */ +#define DEFINE_PARTS(n, of, s) \ +{ \ + .name = n, \ + .offset = of, \ + .size = s, \ +} + +/** + * struct spear_smi_flash_info - platform structure for passing flash + * information + * + * name: name of the serial nor flash for identification + * mem_base: the memory base on which the flash is mapped + * size: size of the flash in bytes + * partitions: parition details + * nr_partitions: number of partitions + * fast_mode: whether flash supports fast mode + */ + +struct spear_smi_flash_info { + char *name; + unsigned long mem_base; + unsigned long size; + struct mtd_partition *partitions; + int nr_partitions; + u8 fast_mode; +}; + +/** + * struct spear_smi_plat_data - platform structure for configuring smi + * + * clk_rate: clk rate at which SMI must operate + * num_flashes: number of flashes present on board + * board_flash_info: specific details of each flash present on board + */ +struct spear_smi_plat_data { + unsigned long clk_rate; + int num_flashes; + struct spear_smi_flash_info *board_flash_info; + struct device_node *np[MAX_NUM_FLASH_CHIP]; +}; + +#endif /* __MTD_SPEAR_SMI_H */ diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h new file mode 100644 index 00000000..f456230f --- /dev/null +++ b/include/linux/mtd/super.h @@ -0,0 +1,29 @@ +/* MTD-based superblock handling + * + * Copyright © 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __MTD_SUPER_H__ +#define __MTD_SUPER_H__ + +#ifdef __KERNEL__ + +#include <linux/mtd/mtd.h> +#include <linux/fs.h> +#include <linux/mount.h> + +extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern void kill_mtd_super(struct super_block *sb); + + +#endif /* __KERNEL__ */ + +#endif /* __MTD_SUPER_H__ */ diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h new file mode 100644 index 00000000..c2e92b36 --- /dev/null +++ b/include/linux/mtd/ubi.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __LINUX_UBI_H__ +#define __LINUX_UBI_H__ + +#include <linux/ioctl.h> +#include <linux/types.h> +#include <mtd/ubi-user.h> + + +#define UBI_ALL -1 +/* + * enum ubi_open_mode - UBI volume open mode constants. + * + * UBI_READONLY: read-only mode + * UBI_READWRITE: read-write mode + * UBI_EXCLUSIVE: exclusive mode + */ +enum { + UBI_READONLY = 1, + UBI_READWRITE, + UBI_EXCLUSIVE +}; + +/** + * struct ubi_volume_info - UBI volume description data structure. + * @vol_id: volume ID + * @ubi_num: UBI device number this volume belongs to + * @size: how many physical eraseblocks are reserved for this volume + * @used_bytes: how many bytes of data this volume contains + * @used_ebs: how many physical eraseblocks of this volume actually contain any + * data + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @corrupted: non-zero if the volume is corrupted (static volumes only) + * @upd_marker: non-zero if the volume has update marker set + * @alignment: volume alignment + * @usable_leb_size: how many bytes are available in logical eraseblocks of + * this volume + * @name_len: volume name length + * @name: volume name + * @cdev: UBI volume character device major and minor numbers + * + * The @corrupted flag is only relevant to static volumes and is always zero + * for dynamic ones. This is because UBI does not care about dynamic volume + * data protection and only cares about protecting static volume data. + * + * The @upd_marker flag is set if the volume update operation was interrupted. + * Before touching the volume data during the update operation, UBI first sets + * the update marker flag for this volume. If the volume update operation was + * further interrupted, the update marker indicates this. If the update marker + * is set, the contents of the volume is certainly damaged and a new volume + * update operation has to be started. + * + * To put it differently, @corrupted and @upd_marker fields have different + * semantics: + * o the @corrupted flag means that this static volume is corrupted for some + * reasons, but not because an interrupted volume update + * o the @upd_marker field means that the volume is damaged because of an + * interrupted update operation. + * + * I.e., the @corrupted flag is never set if the @upd_marker flag is set. + * + * The @used_bytes and @used_ebs fields are only really needed for static + * volumes and contain the number of bytes stored in this static volume and how + * many eraseblock this data occupies. In case of dynamic volumes, the + * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs + * field is equivalent to @size. + * + * In general, logical eraseblock size is a property of the UBI device, not + * of the UBI volume. Indeed, the logical eraseblock size depends on the + * physical eraseblock size and on how much bytes UBI headers consume. But + * because of the volume alignment (@alignment), the usable size of logical + * eraseblocks if a volume may be less. The following equation is true: + * @usable_leb_size = LEB size - (LEB size mod @alignment), + * where LEB size is the logical eraseblock size defined by the UBI device. + * + * The alignment is multiple to the minimal flash input/output unit size or %1 + * if all the available space is used. + * + * To put this differently, alignment may be considered is a way to change + * volume logical eraseblock sizes. + */ +struct ubi_volume_info { + int ubi_num; + int vol_id; + int size; + long long used_bytes; + int used_ebs; + int vol_type; + int corrupted; + int upd_marker; + int alignment; + int usable_leb_size; + int name_len; + const char *name; + dev_t cdev; +}; + +/** + * struct ubi_device_info - UBI device description data structure. + * @ubi_num: ubi device number + * @leb_size: logical eraseblock size on this UBI device + * @leb_start: starting offset of logical eraseblocks within physical + * eraseblocks + * @min_io_size: minimal I/O unit size + * @max_write_size: maximum amount of bytes the underlying flash can write at a + * time (MTD write buffer size) + * @ro_mode: if this device is in read-only mode + * @cdev: UBI character device major and minor numbers + * + * Note, @leb_size is the logical eraseblock size offered by the UBI device. + * Volumes of this UBI device may have smaller logical eraseblock size if their + * alignment is not equivalent to %1. + * + * The @max_write_size field describes flash write maximum write unit. For + * example, NOR flash allows for changing individual bytes, so @min_io_size is + * %1. However, it does not mean than NOR flash has to write data byte-by-byte. + * Instead, CFI NOR flashes have a write-buffer of, e.g., 64 bytes, and when + * writing large chunks of data, they write 64-bytes at a time. Obviously, this + * improves write throughput. + * + * Also, the MTD device may have N interleaved (striped) flash chips + * underneath, in which case @min_io_size can be physical min. I/O size of + * single flash chip, while @max_write_size can be N * @min_io_size. + * + * The @max_write_size field is always greater or equivalent to @min_io_size. + * E.g., some NOR flashes may have (@min_io_size = 1, @max_write_size = 64). In + * contrast, NAND flashes usually have @min_io_size = @max_write_size = NAND + * page size. + */ +struct ubi_device_info { + int ubi_num; + int leb_size; + int leb_start; + int min_io_size; + int max_write_size; + int ro_mode; + dev_t cdev; +}; + +/* + * Volume notification types. + * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a + * volume was created) + * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached + * or a volume was removed) + * @UBI_VOLUME_RESIZED: a volume has been re-sized + * @UBI_VOLUME_RENAMED: a volume has been re-named + * @UBI_VOLUME_UPDATED: data has been written to a volume + * + * These constants define which type of event has happened when a volume + * notification function is invoked. + */ +enum { + UBI_VOLUME_ADDED, + UBI_VOLUME_REMOVED, + UBI_VOLUME_RESIZED, + UBI_VOLUME_RENAMED, + UBI_VOLUME_UPDATED, +}; + +/* + * struct ubi_notification - UBI notification description structure. + * @di: UBI device description object + * @vi: UBI volume description object + * + * UBI notifiers are called with a pointer to an object of this type. The + * object describes the notification. Namely, it provides a description of the + * UBI device and UBI volume the notification informs about. + */ +struct ubi_notification { + struct ubi_device_info di; + struct ubi_volume_info vi; +}; + +/* UBI descriptor given to users when they open UBI volumes */ +struct ubi_volume_desc; + +int ubi_get_device_info(int ubi_num, struct ubi_device_info *di); +void ubi_get_volume_info(struct ubi_volume_desc *desc, + struct ubi_volume_info *vi); +struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); +struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, + int mode); + +void ubi_update_volume(struct ubi_volume_desc *desc); + +struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode); + +int ubi_register_volume_notifier(struct notifier_block *nb, + int ignore_existing); +int ubi_unregister_volume_notifier(struct notifier_block *nb); + +void ubi_close_volume(struct ubi_volume_desc *desc); +void ubi_set_volume(struct ubi_volume_desc *desc); + +int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, + int len, int check); +int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, + int offset, int len); + +int ubi_leb_read_oob(struct ubi_volume_desc *desc, int lnum, void *buf, int offset, + int len, void *spare); + +int ubi_leb_write_oob(struct ubi_volume_desc *desc, int lnum, const void *buf, int offset, + int len, void *spare, int dtype); + +int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, + int len); +int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_map(struct ubi_volume_desc *desc, int lnum); +int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); +int ubi_sync(int ubi_num); +int ubi_flush(int ubi_num, int vol_id, int lnum); + +/* + * This function is the same as the 'ubi_leb_read()' function, but it does not + * provide the checking capability. + */ +static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf, + int offset, int len) +{ + return ubi_leb_read(desc, lnum, buf, offset, len, 0); +} + +/* + * This function is the same as the 'ubi_leb_write()' functions, but it does + * not have the data type argument. + */ +static inline int ubi_write(struct ubi_volume_desc *desc, int lnum, + const void *buf, int offset, int len) +{ + return ubi_leb_write(desc, lnum, buf, offset, len); +} + +/* + * This function is the same as the 'ubi_leb_change()' functions, but it does + * not have the data type argument. + */ +static inline int ubi_change(struct ubi_volume_desc *desc, int lnum, + const void *buf, int len) +{ + return ubi_leb_change(desc, lnum, buf, len); +} + +#endif /* !__LINUX_UBI_H__ */ diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h new file mode 100644 index 00000000..abed4dec --- /dev/null +++ b/include/linux/mtd/xip.h @@ -0,0 +1,99 @@ +/* + * MTD primitives for XIP support + * + * Author: Nicolas Pitre + * Created: Nov 2, 2004 + * Copyright: (C) 2004 MontaVista Software, Inc. + * + * This XIP support for MTD has been loosely inspired + * by an earlier patch authored by David Woodhouse. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_XIP_H__ +#define __LINUX_MTD_XIP_H__ + + +#ifdef CONFIG_MTD_XIP + +/* + * We really don't want gcc to guess anything. + * We absolutely _need_ proper inlining. + */ +#include <linux/compiler.h> + +/* + * Function that are modifying the flash state away from array mode must + * obviously not be running from flash. The __xipram is therefore marking + * those functions so they get relocated to ram. + */ +#define __xipram noinline __attribute__ ((__section__ (".data"))) + +/* + * Each architecture has to provide the following macros. They must access + * the hardware directly and not rely on any other (XIP) functions since they + * won't be available when used (flash not in array mode). + * + * xip_irqpending() + * + * return non zero when any hardware interrupt is pending. + * + * xip_currtime() + * + * return a platform specific time reference to be used with + * xip_elapsed_since(). + * + * xip_elapsed_since(x) + * + * return in usecs the elapsed timebetween now and the reference x as + * returned by xip_currtime(). + * + * note 1: conversion to usec can be approximated, as long as the + * returned value is <= the real elapsed time. + * note 2: this should be able to cope with a few seconds without + * overflowing. + * + * xip_iprefetch() + * + * Macro to fill instruction prefetch + * e.g. a series of nops: asm volatile (".rep 8; nop; .endr"); + */ + +#include <asm/mtd-xip.h> + +#ifndef xip_irqpending + +#warning "missing IRQ and timer primitives for XIP MTD support" +#warning "some of the XIP MTD support code will be disabled" +#warning "your system will therefore be unresponsive when writing or erasing flash" + +#define xip_irqpending() (0) +#define xip_currtime() (0) +#define xip_elapsed_since(x) (0) + +#endif + +#ifndef xip_iprefetch +#define xip_iprefetch() do { } while (0) +#endif + +/* + * xip_cpu_idle() is used when waiting for a delay equal or larger than + * the system timer tick period. This should put the CPU into idle mode + * to save power and to be woken up only when some interrupts are pending. + * This should not rely upon standard kernel code. + */ +#ifndef xip_cpu_idle +#define xip_cpu_idle() do { } while (0) +#endif + +#else + +#define __xipram + +#endif /* CONFIG_MTD_XIP */ + +#endif /* __LINUX_MTD_XIP_H__ */ diff --git a/include/linux/mtio.h b/include/linux/mtio.h new file mode 100644 index 00000000..18543e2d --- /dev/null +++ b/include/linux/mtio.h @@ -0,0 +1,208 @@ +/* + * linux/mtio.h header file for Linux. Written by H. Bergman + * + * Modified for special ioctls provided by zftape in September 1997 + * by C.-J. Heine. + */ + +#ifndef _LINUX_MTIO_H +#define _LINUX_MTIO_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * Structures and definitions for mag tape io control commands + */ + +/* structure for MTIOCTOP - mag tape op command */ +struct mtop { + short mt_op; /* operations defined below */ + int mt_count; /* how many of them */ +}; + +/* Magnetic Tape operations [Not all operations supported by all drivers]: */ +#define MTRESET 0 /* +reset drive in case of problems */ +#define MTFSF 1 /* forward space over FileMark, + * position at first record of next file + */ +#define MTBSF 2 /* backward space FileMark (position before FM) */ +#define MTFSR 3 /* forward space record */ +#define MTBSR 4 /* backward space record */ +#define MTWEOF 5 /* write an end-of-file record (mark) */ +#define MTREW 6 /* rewind */ +#define MTOFFL 7 /* rewind and put the drive offline (eject?) */ +#define MTNOP 8 /* no op, set status only (read with MTIOCGET) */ +#define MTRETEN 9 /* retension tape */ +#define MTBSFM 10 /* +backward space FileMark, position at FM */ +#define MTFSFM 11 /* +forward space FileMark, position at FM */ +#define MTEOM 12 /* goto end of recorded media (for appending files). + * MTEOM positions after the last FM, ready for + * appending another file. + */ +#define MTERASE 13 /* erase tape -- be careful! */ + +#define MTRAS1 14 /* run self test 1 (nondestructive) */ +#define MTRAS2 15 /* run self test 2 (destructive) */ +#define MTRAS3 16 /* reserved for self test 3 */ + +#define MTSETBLK 20 /* set block length (SCSI) */ +#define MTSETDENSITY 21 /* set tape density (SCSI) */ +#define MTSEEK 22 /* seek to block (Tandberg, etc.) */ +#define MTTELL 23 /* tell block (Tandberg, etc.) */ +#define MTSETDRVBUFFER 24 /* set the drive buffering according to SCSI-2 */ + /* ordinary buffered operation with code 1 */ +#define MTFSS 25 /* space forward over setmarks */ +#define MTBSS 26 /* space backward over setmarks */ +#define MTWSM 27 /* write setmarks */ + +#define MTLOCK 28 /* lock the drive door */ +#define MTUNLOCK 29 /* unlock the drive door */ +#define MTLOAD 30 /* execute the SCSI load command */ +#define MTUNLOAD 31 /* execute the SCSI unload command */ +#define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ +#define MTSETPART 33 /* Change the active tape partition */ +#define MTMKPART 34 /* Format the tape with one or two partitions */ +#define MTWEOFI 35 /* write an end-of-file record (mark) in immediate mode */ + +/* structure for MTIOCGET - mag tape get status command */ + +struct mtget { + long mt_type; /* type of magtape device */ + long mt_resid; /* residual count: (not sure) + * number of bytes ignored, or + * number of files not skipped, or + * number of records not skipped. + */ + /* the following registers are device dependent */ + long mt_dsreg; /* status register */ + long mt_gstat; /* generic (device independent) status */ + long mt_erreg; /* error register */ + /* The next two fields are not always used */ + __kernel_daddr_t mt_fileno; /* number of current file on tape */ + __kernel_daddr_t mt_blkno; /* current block number */ +}; + + + +/* + * Constants for mt_type. Not all of these are supported, + * and these are not all of the ones that are supported. + */ +#define MT_ISUNKNOWN 0x01 +#define MT_ISQIC02 0x02 /* Generic QIC-02 tape streamer */ +#define MT_ISWT5150 0x03 /* Wangtek 5150EQ, QIC-150, QIC-02 */ +#define MT_ISARCHIVE_5945L2 0x04 /* Archive 5945L-2, QIC-24, QIC-02? */ +#define MT_ISCMSJ500 0x05 /* CMS Jumbo 500 (QIC-02?) */ +#define MT_ISTDC3610 0x06 /* Tandberg 6310, QIC-24 */ +#define MT_ISARCHIVE_VP60I 0x07 /* Archive VP60i, QIC-02 */ +#define MT_ISARCHIVE_2150L 0x08 /* Archive Viper 2150L */ +#define MT_ISARCHIVE_2060L 0x09 /* Archive Viper 2060L */ +#define MT_ISARCHIVESC499 0x0A /* Archive SC-499 QIC-36 controller */ +#define MT_ISQIC02_ALL_FEATURES 0x0F /* Generic QIC-02 with all features */ +#define MT_ISWT5099EEN24 0x11 /* Wangtek 5099-een24, 60MB, QIC-24 */ +#define MT_ISTEAC_MT2ST 0x12 /* Teac MT-2ST 155mb drive, Teac DC-1 card (Wangtek type) */ +#define MT_ISEVEREX_FT40A 0x32 /* Everex FT40A (QIC-40) */ +#define MT_ISDDS1 0x51 /* DDS device without partitions */ +#define MT_ISDDS2 0x52 /* DDS device with partitions */ +#define MT_ISONSTREAM_SC 0x61 /* OnStream SCSI tape drives (SC-x0) + and SCSI emulated (DI, DP, USB) */ +#define MT_ISSCSI1 0x71 /* Generic ANSI SCSI-1 tape unit */ +#define MT_ISSCSI2 0x72 /* Generic ANSI SCSI-2 tape unit */ + +/* QIC-40/80/3010/3020 ftape supported drives. + * 20bit vendor ID + 0x800000 (see ftape-vendors.h) + */ +#define MT_ISFTAPE_UNKNOWN 0x800000 /* obsolete */ +#define MT_ISFTAPE_FLAG 0x800000 + + +/* structure for MTIOCPOS - mag tape get position command */ + +struct mtpos { + long mt_blkno; /* current block number */ +}; + + +/* mag tape io control commands */ +#define MTIOCTOP _IOW('m', 1, struct mtop) /* do a mag tape op */ +#define MTIOCGET _IOR('m', 2, struct mtget) /* get tape status */ +#define MTIOCPOS _IOR('m', 3, struct mtpos) /* get tape position */ + + +/* Generic Mag Tape (device independent) status macros for examining + * mt_gstat -- HP-UX compatible. + * There is room for more generic status bits here, but I don't + * know which of them are reserved. At least three or so should + * be added to make this really useful. + */ +#define GMT_EOF(x) ((x) & 0x80000000) +#define GMT_BOT(x) ((x) & 0x40000000) +#define GMT_EOT(x) ((x) & 0x20000000) +#define GMT_SM(x) ((x) & 0x10000000) /* DDS setmark */ +#define GMT_EOD(x) ((x) & 0x08000000) /* DDS EOD */ +#define GMT_WR_PROT(x) ((x) & 0x04000000) +/* #define GMT_ ? ((x) & 0x02000000) */ +#define GMT_ONLINE(x) ((x) & 0x01000000) +#define GMT_D_6250(x) ((x) & 0x00800000) +#define GMT_D_1600(x) ((x) & 0x00400000) +#define GMT_D_800(x) ((x) & 0x00200000) +/* #define GMT_ ? ((x) & 0x00100000) */ +/* #define GMT_ ? ((x) & 0x00080000) */ +#define GMT_DR_OPEN(x) ((x) & 0x00040000) /* door open (no tape) */ +/* #define GMT_ ? ((x) & 0x00020000) */ +#define GMT_IM_REP_EN(x) ((x) & 0x00010000) /* immediate report mode */ +#define GMT_CLN(x) ((x) & 0x00008000) /* cleaning requested */ +/* 15 generic status bits unused */ + + +/* SCSI-tape specific definitions */ +/* Bitfield shifts in the status */ +#define MT_ST_BLKSIZE_SHIFT 0 +#define MT_ST_BLKSIZE_MASK 0xffffff +#define MT_ST_DENSITY_SHIFT 24 +#define MT_ST_DENSITY_MASK 0xff000000 + +#define MT_ST_SOFTERR_SHIFT 0 +#define MT_ST_SOFTERR_MASK 0xffff + +/* Bitfields for the MTSETDRVBUFFER ioctl */ +#define MT_ST_OPTIONS 0xf0000000 +#define MT_ST_BOOLEANS 0x10000000 +#define MT_ST_SETBOOLEANS 0x30000000 +#define MT_ST_CLEARBOOLEANS 0x40000000 +#define MT_ST_WRITE_THRESHOLD 0x20000000 +#define MT_ST_DEF_BLKSIZE 0x50000000 +#define MT_ST_DEF_OPTIONS 0x60000000 +#define MT_ST_TIMEOUTS 0x70000000 +#define MT_ST_SET_TIMEOUT (MT_ST_TIMEOUTS | 0x000000) +#define MT_ST_SET_LONG_TIMEOUT (MT_ST_TIMEOUTS | 0x100000) +#define MT_ST_SET_CLN 0x80000000 + +#define MT_ST_BUFFER_WRITES 0x1 +#define MT_ST_ASYNC_WRITES 0x2 +#define MT_ST_READ_AHEAD 0x4 +#define MT_ST_DEBUGGING 0x8 +#define MT_ST_TWO_FM 0x10 +#define MT_ST_FAST_MTEOM 0x20 +#define MT_ST_AUTO_LOCK 0x40 +#define MT_ST_DEF_WRITES 0x80 +#define MT_ST_CAN_BSR 0x100 +#define MT_ST_NO_BLKLIMS 0x200 +#define MT_ST_CAN_PARTITIONS 0x400 +#define MT_ST_SCSI2LOGICAL 0x800 +#define MT_ST_SYSV 0x1000 +#define MT_ST_NOWAIT 0x2000 +#define MT_ST_SILI 0x4000 +#define MT_ST_NOWAIT_EOF 0x8000 + +/* The mode parameters to be controlled. Parameter chosen with bits 20-28 */ +#define MT_ST_CLEAR_DEFAULT 0xfffff +#define MT_ST_DEF_DENSITY (MT_ST_DEF_OPTIONS | 0x100000) +#define MT_ST_DEF_COMPRESSION (MT_ST_DEF_OPTIONS | 0x200000) +#define MT_ST_DEF_DRVBUFFER (MT_ST_DEF_OPTIONS | 0x300000) + +/* The offset for the arguments for the special HP changer load command. */ +#define MT_ST_HPLOADER_OFFSET 10000 + +#endif /* _LINUX_MTIO_H */ diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h new file mode 100644 index 00000000..731d77d6 --- /dev/null +++ b/include/linux/mutex-debug.h @@ -0,0 +1,23 @@ +#ifndef __LINUX_MUTEX_DEBUG_H +#define __LINUX_MUTEX_DEBUG_H + +#include <linux/linkage.h> +#include <linux/lockdep.h> + +/* + * Mutexes - debugging helpers: + */ + +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname + +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) + +extern void mutex_destroy(struct mutex *lock); + +#endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h new file mode 100644 index 00000000..9121595a --- /dev/null +++ b/include/linux/mutex.h @@ -0,0 +1,176 @@ +/* + * Mutexes: blocking mutual exclusion locks + * + * started by Ingo Molnar: + * + * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * + * This file contains the main data structure and API definitions. + */ +#ifndef __LINUX_MUTEX_H +#define __LINUX_MUTEX_H + +#include <linux/list.h> +#include <linux/spinlock_types.h> +#include <linux/linkage.h> +#include <linux/lockdep.h> + +#include <linux/atomic.h> + +/* + * Simple, straightforward mutexes with strict semantics: + * + * - only one task can hold the mutex at a time + * - only the owner can unlock the mutex + * - multiple unlocks are not permitted + * - recursive locking is not permitted + * - a mutex object must be initialized via the API + * - a mutex object must not be initialized via memset or copying + * - task may not exit with mutex held + * - memory areas where held locks reside must not be freed + * - held mutexes must not be reinitialized + * - mutexes may not be used in hardware or software interrupt + * contexts such as tasklets and timers + * + * These semantics are fully enforced when DEBUG_MUTEXES is + * enabled. Furthermore, besides enforcing the above rules, the mutex + * debugging code also implements a number of additional features + * that make lock debugging easier and faster: + * + * - uses symbolic names of mutexes, whenever they are printed in debug output + * - point-of-acquire tracking, symbolic lookup of function names + * - list of all locks held in the system, printout of them + * - owner tracking + * - detects self-recursing locks and prints out all relevant info + * - detects multi-task circular deadlocks and prints out all affected + * locks and tasks (and only those tasks) + */ +struct mutex { + /* 1: unlocked, 0: locked, negative: locked, possible waiters */ + atomic_t count; + spinlock_t wait_lock; + struct list_head wait_list; +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) + struct task_struct *owner; +#endif +#ifdef CONFIG_DEBUG_MUTEXES + const char *name; + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/* + * This is the control structure for tasks blocked on mutex, + * which resides on the blocked task's kernel stack: + */ +struct mutex_waiter { + struct list_head list; + struct task_struct *task; +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +}; + +#ifdef CONFIG_DEBUG_MUTEXES +# include <linux/mutex-debug.h> +#else +# define __DEBUG_MUTEX_INITIALIZER(lockname) +/** + * mutex_init - initialize the mutex + * @mutex: the mutex to be initialized + * + * Initialize the mutex to unlocked state. + * + * It is not allowed to initialize an already locked mutex. + */ +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) +static inline void mutex_destroy(struct mutex *lock) {} +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ + __DEBUG_MUTEX_INITIALIZER(lockname) \ + __DEP_MAP_MUTEX_INITIALIZER(lockname) } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); + +/** + * mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int mutex_is_locked(struct mutex *lock) +{ + return atomic_read(&lock->count) != 1; +} + +/* + * See kernel/mutex.c for detailed documentation of these APIs. + * Also see Documentation/mutex-design.txt. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); +extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, + unsigned int subclass); +extern int __must_check mutex_lock_killable_nested(struct mutex *lock, + unsigned int subclass); + +#define mutex_lock(lock) mutex_lock_nested(lock, 0) +#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) +#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) + +#define mutex_lock_nest_lock(lock, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ +} while (0) + +#else +extern void mutex_lock(struct mutex *lock); +extern int __must_check mutex_lock_interruptible(struct mutex *lock); +extern int __must_check mutex_lock_killable(struct mutex *lock); + +# define mutex_lock_nested(lock, subclass) mutex_lock(lock) +# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) +# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +#endif + +/* + * NOTE: mutex_trylock() follows the spin_trylock() convention, + * not the down_trylock() convention! + * + * Returns 1 if the mutex has been acquired successfully, and 0 on contention. + */ +extern int mutex_trylock(struct mutex *lock); +extern void mutex_unlock(struct mutex *lock); +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + +#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX +#define arch_mutex_cpu_relax() cpu_relax() +#endif + +#endif diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h new file mode 100644 index 00000000..69327b7b --- /dev/null +++ b/include/linux/mv643xx.h @@ -0,0 +1,979 @@ +/* + * mv643xx.h - MV-643XX Internal registers definition file. + * + * Copyright 2002 Momentum Computer, Inc. + * Author: Matthew Dharm <mdharm@momenco.com> + * Copyright 2002 GALILEO TECHNOLOGY, LTD. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __ASM_MV643XX_H +#define __ASM_MV643XX_H + +#include <asm/types.h> +#include <linux/mv643xx_eth.h> +#include <linux/mv643xx_i2c.h> + +/****************************************/ +/* Processor Address Space */ +/****************************************/ + +/* DDR SDRAM BAR and size registers */ + +#define MV64340_CS_0_BASE_ADDR 0x008 +#define MV64340_CS_0_SIZE 0x010 +#define MV64340_CS_1_BASE_ADDR 0x208 +#define MV64340_CS_1_SIZE 0x210 +#define MV64340_CS_2_BASE_ADDR 0x018 +#define MV64340_CS_2_SIZE 0x020 +#define MV64340_CS_3_BASE_ADDR 0x218 +#define MV64340_CS_3_SIZE 0x220 + +/* Devices BAR and size registers */ + +#define MV64340_DEV_CS0_BASE_ADDR 0x028 +#define MV64340_DEV_CS0_SIZE 0x030 +#define MV64340_DEV_CS1_BASE_ADDR 0x228 +#define MV64340_DEV_CS1_SIZE 0x230 +#define MV64340_DEV_CS2_BASE_ADDR 0x248 +#define MV64340_DEV_CS2_SIZE 0x250 +#define MV64340_DEV_CS3_BASE_ADDR 0x038 +#define MV64340_DEV_CS3_SIZE 0x040 +#define MV64340_BOOTCS_BASE_ADDR 0x238 +#define MV64340_BOOTCS_SIZE 0x240 + +/* PCI 0 BAR and size registers */ + +#define MV64340_PCI_0_IO_BASE_ADDR 0x048 +#define MV64340_PCI_0_IO_SIZE 0x050 +#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058 +#define MV64340_PCI_0_MEMORY0_SIZE 0x060 +#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080 +#define MV64340_PCI_0_MEMORY1_SIZE 0x088 +#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258 +#define MV64340_PCI_0_MEMORY2_SIZE 0x260 +#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280 +#define MV64340_PCI_0_MEMORY3_SIZE 0x288 + +/* PCI 1 BAR and size registers */ +#define MV64340_PCI_1_IO_BASE_ADDR 0x090 +#define MV64340_PCI_1_IO_SIZE 0x098 +#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0 +#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8 +#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0 +#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8 +#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0 +#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8 +#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0 +#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8 + +/* SRAM base address */ +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 + +/* internal registers space base address */ +#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068 + +/* Enables the CS , DEV_CS , PCI 0 and PCI 1 + windows above */ +#define MV64340_BASE_ADDR_ENABLE 0x278 + +/****************************************/ +/* PCI remap registers */ +/****************************************/ + /* PCI 0 */ +#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0 +#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8 +#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320 +#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100 +#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328 +#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8 +#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330 +#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300 +#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338 + /* PCI 1 */ +#define MV64340_PCI_1_IO_ADDR_REMAP 0x108 +#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110 +#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340 +#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118 +#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348 +#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310 +#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350 +#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318 +#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358 + +#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0 +#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8 +#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0 +#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8 +#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0 +#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8 +#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0 +#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8 + +/****************************************/ +/* CPU Control Registers */ +/****************************************/ + +#define MV64340_CPU_CONFIG 0x000 +#define MV64340_CPU_MODE 0x120 +#define MV64340_CPU_MASTER_CONTROL 0x160 +#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150 +#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158 +#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168 + +/****************************************/ +/* SMP RegisterS */ +/****************************************/ + +#define MV64340_SMP_WHO_AM_I 0x200 +#define MV64340_SMP_CPU0_DOORBELL 0x214 +#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C +#define MV64340_SMP_CPU1_DOORBELL 0x224 +#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C +#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234 +#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C +#define MV64340_SMP_SEMAPHOR0 0x244 +#define MV64340_SMP_SEMAPHOR1 0x24c +#define MV64340_SMP_SEMAPHOR2 0x254 +#define MV64340_SMP_SEMAPHOR3 0x25c +#define MV64340_SMP_SEMAPHOR4 0x264 +#define MV64340_SMP_SEMAPHOR5 0x26c +#define MV64340_SMP_SEMAPHOR6 0x274 +#define MV64340_SMP_SEMAPHOR7 0x27c + +/****************************************/ +/* CPU Sync Barrier Register */ +/****************************************/ + +#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0 +#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8 +#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0 +#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8 + +/****************************************/ +/* CPU Access Protect */ +/****************************************/ + +#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180 +#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188 +#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190 +#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198 +#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0 +#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8 +#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0 +#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8 + + +/****************************************/ +/* CPU Error Report */ +/****************************************/ + +#define MV64340_CPU_ERROR_ADDR_LOW 0x070 +#define MV64340_CPU_ERROR_ADDR_HIGH 0x078 +#define MV64340_CPU_ERROR_DATA_LOW 0x128 +#define MV64340_CPU_ERROR_DATA_HIGH 0x130 +#define MV64340_CPU_ERROR_PARITY 0x138 +#define MV64340_CPU_ERROR_CAUSE 0x140 +#define MV64340_CPU_ERROR_MASK 0x148 + +/****************************************/ +/* CPU Interface Debug Registers */ +/****************************************/ + +#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360 +#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368 +#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370 +#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378 +#define MV64340_PUNIT_MMASK 0x3e4 + +/****************************************/ +/* Integrated SRAM Registers */ +/****************************************/ + +#define MV64340_SRAM_CONFIG 0x380 +#define MV64340_SRAM_TEST_MODE 0X3F4 +#define MV64340_SRAM_ERROR_CAUSE 0x388 +#define MV64340_SRAM_ERROR_ADDR 0x390 +#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8 +#define MV64340_SRAM_ERROR_DATA_LOW 0x398 +#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0 +#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8 + +/****************************************/ +/* SDRAM Configuration */ +/****************************************/ + +#define MV64340_SDRAM_CONFIG 0x1400 +#define MV64340_D_UNIT_CONTROL_LOW 0x1404 +#define MV64340_D_UNIT_CONTROL_HIGH 0x1424 +#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408 +#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c +#define MV64340_SDRAM_ADDR_CONTROL 0x1410 +#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414 +#define MV64340_SDRAM_OPERATION 0x1418 +#define MV64340_SDRAM_MODE 0x141c +#define MV64340_EXTENDED_DRAM_MODE 0x1420 +#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430 +#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434 +#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438 +#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0 +#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4 + +/****************************************/ +/* SDRAM Error Report */ +/****************************************/ + +#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444 +#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440 +#define MV64340_SDRAM_ERROR_ADDR 0x1450 +#define MV64340_SDRAM_RECEIVED_ECC 0x1448 +#define MV64340_SDRAM_CALCULATED_ECC 0x144c +#define MV64340_SDRAM_ECC_CONTROL 0x1454 +#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458 + +/******************************************/ +/* Controlled Delay Line (CDL) Registers */ +/******************************************/ + +#define MV64340_DFCDL_CONFIG0 0x1480 +#define MV64340_DFCDL_CONFIG1 0x1484 +#define MV64340_DLL_WRITE 0x1488 +#define MV64340_DLL_READ 0x148c +#define MV64340_SRAM_ADDR 0x1490 +#define MV64340_SRAM_DATA0 0x1494 +#define MV64340_SRAM_DATA1 0x1498 +#define MV64340_SRAM_DATA2 0x149c +#define MV64340_DFCL_PROBE 0x14a0 + +/******************************************/ +/* Debug Registers */ +/******************************************/ + +#define MV64340_DUNIT_DEBUG_LOW 0x1460 +#define MV64340_DUNIT_DEBUG_HIGH 0x1464 +#define MV64340_DUNIT_MMASK 0X1b40 + +/****************************************/ +/* Device Parameters */ +/****************************************/ + +#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c +#define MV64340_DEVICE_BANK1_PARAMETERS 0x460 +#define MV64340_DEVICE_BANK2_PARAMETERS 0x464 +#define MV64340_DEVICE_BANK3_PARAMETERS 0x468 +#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c +#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0 +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8 +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4 + +/****************************************/ +/* Device interrupt registers */ +/****************************************/ + +#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0 +#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4 +#define MV64340_DEVICE_ERROR_ADDR 0x4d8 +#define MV64340_DEVICE_ERROR_DATA 0x4dc +#define MV64340_DEVICE_ERROR_PARITY 0x4e0 + +/****************************************/ +/* Device debug registers */ +/****************************************/ + +#define MV64340_DEVICE_DEBUG_LOW 0x4e4 +#define MV64340_DEVICE_DEBUG_HIGH 0x4e8 +#define MV64340_RUNIT_MMASK 0x4f0 + +/****************************************/ +/* PCI Slave Address Decoding registers */ +/****************************************/ + +#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08 +#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88 +#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08 +#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88 +#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c +#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c +#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c +#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c +#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10 +#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90 +#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10 +#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90 +#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18 +#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98 +#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14 +#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94 +#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14 +#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94 +#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c +#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c +#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20 +#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0 +#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24 +#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4 +#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28 +#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8 +#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00 +#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80 +#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c +#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c +#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c +#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc +#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48 +#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8 +#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48 +#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8 +#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c +#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc +#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c +#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc +#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04 +#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84 +#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08 +#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88 +#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C +#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C +#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10 +#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90 +#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50 +#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0 +#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50 +#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0 +#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58 +#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8 +#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54 +#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4 +#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54 +#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4 +#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c +#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc +#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60 +#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0 +#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64 +#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4 +#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68 +#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8 +#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c +#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec +#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70 +#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0 +#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74 +#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4 +#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00 +#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80 +#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38 +#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8 +#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c +#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc +#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40 +#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0 +#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44 +#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4 +#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48 +#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8 + +/***********************************/ +/* PCI Control Register Map */ +/***********************************/ + +#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20 +#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0 +#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C +#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C +#define MV64340_PCI_0_COMMAND 0xc00 +#define MV64340_PCI_1_COMMAND 0xc80 +#define MV64340_PCI_0_MODE 0xd00 +#define MV64340_PCI_1_MODE 0xd80 +#define MV64340_PCI_0_RETRY 0xc04 +#define MV64340_PCI_1_RETRY 0xc84 +#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04 +#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84 +#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38 +#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8 +#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00 +#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80 +#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08 +#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88 +#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c +#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c +#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04 +#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84 +#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18 +#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98 +#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10 +#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90 +#define MV64340_PCI_0_P2P_CONFIG 0x1d14 +#define MV64340_PCI_1_P2P_CONFIG 0x1d94 + +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58 + +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8 + +/****************************************/ +/* PCI Configuration Access Registers */ +/****************************************/ + +#define MV64340_PCI_0_CONFIG_ADDR 0xcf8 +#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc +#define MV64340_PCI_1_CONFIG_ADDR 0xc78 +#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c +#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34 +#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4 + +/****************************************/ +/* PCI Error Report Registers */ +/****************************************/ + +#define MV64340_PCI_0_SERR_MASK 0xc28 +#define MV64340_PCI_1_SERR_MASK 0xca8 +#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40 +#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0 +#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44 +#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4 +#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48 +#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8 +#define MV64340_PCI_0_ERROR_COMMAND 0x1d50 +#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0 +#define MV64340_PCI_0_ERROR_CAUSE 0x1d58 +#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8 +#define MV64340_PCI_0_ERROR_MASK 0x1d5c +#define MV64340_PCI_1_ERROR_MASK 0x1ddc + +/****************************************/ +/* PCI Debug Registers */ +/****************************************/ + +#define MV64340_PCI_0_MMASK 0X1D24 +#define MV64340_PCI_1_MMASK 0X1DA4 + +/*********************************************/ +/* PCI Configuration, Function 0, Registers */ +/*********************************************/ + +#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000 +#define MV64340_PCI_STATUS_AND_COMMAND 0x004 +#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008 +#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C + +#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010 +#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014 +#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018 +#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C +#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020 +#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024 +#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c +#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030 +#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034 +#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C + /* capability list */ +#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040 +#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044 +#define MV64340_PCI_VPD_ADDR 0x048 +#define MV64340_PCI_VPD_DATA 0x04c +#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050 +#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054 +#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058 +#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c +#define MV64340_PCI_X_COMMAND 0x060 +#define MV64340_PCI_X_STATUS 0x064 +#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068 + +/***********************************************/ +/* PCI Configuration, Function 1, Registers */ +/***********************************************/ + +#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110 +#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114 +#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118 +#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c +#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120 +#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124 + +/***********************************************/ +/* PCI Configuration, Function 2, Registers */ +/***********************************************/ + +#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210 +#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214 +#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218 +#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c +#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220 +#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224 + +/***********************************************/ +/* PCI Configuration, Function 3, Registers */ +/***********************************************/ + +#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310 +#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314 +#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318 +#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c +#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220 +#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224 + +/***********************************************/ +/* PCI Configuration, Function 4, Registers */ +/***********************************************/ + +#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410 +#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414 +#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418 +#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c +#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420 +#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424 + +/****************************************/ +/* Messaging Unit Registers (I20) */ +/****************************************/ + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C +#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044 +#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C +#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4 +#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C +#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44 +#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC +#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C +#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4 +#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C + +/****************************************/ +/* Ethernet Unit Registers */ +/****************************************/ + +/*******************************************/ +/* CUNIT Registers */ +/*******************************************/ + + /* Address Decoding Register Map */ + +#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200 +#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208 +#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210 +#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218 +#define MV64340_CUNIT_SIZE0 0xf204 +#define MV64340_CUNIT_SIZE1 0xf20c +#define MV64340_CUNIT_SIZE2 0xf214 +#define MV64340_CUNIT_SIZE3 0xf21c +#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240 +#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244 +#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250 +#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254 +#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258 +#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C + + /* Error Report Registers */ + +#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310 +#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314 +#define MV64340_CUNIT_ERROR_ADDR 0xf318 + + /* Cunit Control Registers */ + +#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300 +#define MV64340_CUNIT_CONFIG_REG 0xb40c +#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304 + + /* Cunit Debug Registers */ + +#define MV64340_CUNIT_DEBUG_LOW 0xf340 +#define MV64340_CUNIT_DEBUG_HIGH 0xf344 +#define MV64340_CUNIT_MMASK 0xf380 + + /* MPSCs Clocks Routing Registers */ + +#define MV64340_MPSC_ROUTING_REG 0xb400 +#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404 +#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408 + + /* MPSCs Interrupts Registers */ + +#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3)) +#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3)) + +#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12)) +#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12)) +#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12)) +#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12)) + + /* MPSC0 Registers */ + + +/***************************************/ +/* SDMA Registers */ +/***************************************/ + +#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13)) +#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13)) +#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13)) +#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13)) +#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13)) + +#define MV64340_SDMA_CAUSE_REG 0xb800 +#define MV64340_SDMA_MASK_REG 0xb880 + +/* BRG Interrupts */ + +#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3)) +#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3)) +#define MV64340_BRG_CAUSE_REG 0xb834 +#define MV64340_BRG_MASK_REG 0xb8b4 + +/****************************************/ +/* DMA Channel Control */ +/****************************************/ + +#define MV64340_DMA_CHANNEL0_CONTROL 0x840 +#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880 +#define MV64340_DMA_CHANNEL1_CONTROL 0x844 +#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884 +#define MV64340_DMA_CHANNEL2_CONTROL 0x848 +#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888 +#define MV64340_DMA_CHANNEL3_CONTROL 0x84C +#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C + + +/****************************************/ +/* IDMA Registers */ +/****************************************/ + +#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800 +#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804 +#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808 +#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C +#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810 +#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814 +#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818 +#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c +#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820 +#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824 +#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828 +#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C +#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830 +#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834 +#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838 +#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C +#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870 +#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874 +#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878 +#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C + + /* IDMA Address Decoding Base Address Registers */ + +#define MV64340_DMA_BASE_ADDR_REG0 0xa00 +#define MV64340_DMA_BASE_ADDR_REG1 0xa08 +#define MV64340_DMA_BASE_ADDR_REG2 0xa10 +#define MV64340_DMA_BASE_ADDR_REG3 0xa18 +#define MV64340_DMA_BASE_ADDR_REG4 0xa20 +#define MV64340_DMA_BASE_ADDR_REG5 0xa28 +#define MV64340_DMA_BASE_ADDR_REG6 0xa30 +#define MV64340_DMA_BASE_ADDR_REG7 0xa38 + + /* IDMA Address Decoding Size Address Register */ + +#define MV64340_DMA_SIZE_REG0 0xa04 +#define MV64340_DMA_SIZE_REG1 0xa0c +#define MV64340_DMA_SIZE_REG2 0xa14 +#define MV64340_DMA_SIZE_REG3 0xa1c +#define MV64340_DMA_SIZE_REG4 0xa24 +#define MV64340_DMA_SIZE_REG5 0xa2c +#define MV64340_DMA_SIZE_REG6 0xa34 +#define MV64340_DMA_SIZE_REG7 0xa3C + + /* IDMA Address Decoding High Address Remap and Access + Protection Registers */ + +#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C +#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80 +#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70 +#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74 +#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78 +#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c +#define MV64340_DMA_ARBITER_CONTROL 0x860 +#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0 + + /* IDMA Headers Retarget Registers */ + +#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84 +#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88 + + /* IDMA Interrupt Register */ + +#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0 +#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4 +#define MV64340_DMA_ERROR_ADDR 0x8c8 +#define MV64340_DMA_ERROR_SELECT 0x8cc + + /* IDMA Debug Register ( for internal use ) */ + +#define MV64340_DMA_DEBUG_LOW 0x8e0 +#define MV64340_DMA_DEBUG_HIGH 0x8e4 +#define MV64340_DMA_SPARE 0xA8C + +/****************************************/ +/* Timer_Counter */ +/****************************************/ + +#define MV64340_TIMER_COUNTER0 0x850 +#define MV64340_TIMER_COUNTER1 0x854 +#define MV64340_TIMER_COUNTER2 0x858 +#define MV64340_TIMER_COUNTER3 0x85C +#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864 +#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868 +#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c + +/****************************************/ +/* Watchdog registers */ +/****************************************/ + +#define MV64340_WATCHDOG_CONFIG_REG 0xb410 +#define MV64340_WATCHDOG_VALUE_REG 0xb414 + +/****************************************/ +/* I2C Registers */ +/****************************************/ + +#define MV64XXX_I2C_OFFSET 0xc000 +#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 + +/****************************************/ +/* GPP Interface Registers */ +/****************************************/ + +#define MV64340_GPP_IO_CONTROL 0xf100 +#define MV64340_GPP_LEVEL_CONTROL 0xf110 +#define MV64340_GPP_VALUE 0xf104 +#define MV64340_GPP_INTERRUPT_CAUSE 0xf108 +#define MV64340_GPP_INTERRUPT_MASK0 0xf10c +#define MV64340_GPP_INTERRUPT_MASK1 0xf114 +#define MV64340_GPP_VALUE_SET 0xf118 +#define MV64340_GPP_VALUE_CLEAR 0xf11c + +/****************************************/ +/* Interrupt Controller Registers */ +/****************************************/ + +/****************************************/ +/* Interrupts */ +/****************************************/ + +#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004 +#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c +#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014 +#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c +#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024 +#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034 +#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c +#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044 +#define MV64340_INTERRUPT0_MASK_0_LOW 0x054 +#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c +#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064 +#define MV64340_INTERRUPT1_MASK_0_LOW 0x074 +#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c +#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084 + +/****************************************/ +/* MPP Interface Registers */ +/****************************************/ + +#define MV64340_MPP_CONTROL0 0xf000 +#define MV64340_MPP_CONTROL1 0xf004 +#define MV64340_MPP_CONTROL2 0xf008 +#define MV64340_MPP_CONTROL3 0xf00c + +/****************************************/ +/* Serial Initialization registers */ +/****************************************/ + +#define MV64340_SERIAL_INIT_LAST_DATA 0xf324 +#define MV64340_SERIAL_INIT_CONTROL 0xf328 +#define MV64340_SERIAL_INIT_STATUS 0xf32c + +extern void mv64340_irq_init(unsigned int base); + +/* MPSC Platform Device, Driver Data (Shared register regions) */ +#define MPSC_SHARED_NAME "mpsc_shared" + +#define MPSC_ROUTING_BASE_ORDER 0 +#define MPSC_SDMA_INTR_BASE_ORDER 1 + +#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c +#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084 + +struct mpsc_shared_pdata { + u32 mrr_val; + u32 rcrr_val; + u32 tcrr_val; + u32 intr_cause_val; + u32 intr_mask_val; +}; + +/* MPSC Platform Device, Driver Data */ +#define MPSC_CTLR_NAME "mpsc" + +#define MPSC_BASE_ORDER 0 +#define MPSC_SDMA_BASE_ORDER 1 +#define MPSC_BRG_BASE_ORDER 2 + +#define MPSC_REG_BLOCK_SIZE 0x0038 +#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18 +#define MPSC_BRG_REG_BLOCK_SIZE 0x0008 + +struct mpsc_pdata { + u8 mirror_regs; + u8 cache_mgmt; + u8 max_idle; + int default_baud; + int default_bits; + int default_parity; + int default_flow; + u32 chr_1_val; + u32 chr_2_val; + u32 chr_10_val; + u32 mpcr_val; + u32 bcr_val; + u8 brg_can_tune; + u8 brg_clk_src; + u32 brg_clk_freq; +}; + +/* Watchdog Platform Device, Driver Data */ +#define MV64x60_WDT_NAME "mv64x60_wdt" + +struct mv64x60_wdt_pdata { + int timeout; /* watchdog expiry in seconds, default 10 */ + int bus_clk; /* bus clock in MHz, default 133 */ +}; + +#endif /* __ASM_MV643XX_H */ diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h new file mode 100644 index 00000000..30b0c4e7 --- /dev/null +++ b/include/linux/mv643xx_eth.h @@ -0,0 +1,83 @@ +/* + * MV-643XX ethernet platform device data definition file. + */ + +#ifndef __LINUX_MV643XX_ETH_H +#define __LINUX_MV643XX_ETH_H + +#include <linux/mbus.h> + +#define MV643XX_ETH_SHARED_NAME "mv643xx_eth" +#define MV643XX_ETH_NAME "mv643xx_eth_port" +#define MV643XX_ETH_SHARED_REGS 0x2000 +#define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 +#define MV643XX_ETH_BAR_4 0x2220 +#define MV643XX_ETH_SIZE_REG_4 0x2224 +#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 + +struct mv643xx_eth_shared_platform_data { + struct mbus_dram_target_info *dram; + struct platform_device *shared_smi; + unsigned int t_clk; + /* + * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default + * limit of 9KiB will be used. + */ + int tx_csum_limit; +}; + +#define MV643XX_ETH_PHY_ADDR_DEFAULT 0 +#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) +#define MV643XX_ETH_PHY_NONE 0xff + +struct mv643xx_eth_platform_data { + /* + * Pointer back to our parent instance, and our port number. + */ + struct platform_device *shared; + int port_number; + + /* + * Whether a PHY is present, and if yes, at which address. + */ + int phy_addr; + + /* + * Use this MAC address if it is valid, overriding the + * address that is already in the hardware. + */ + u8 mac_addr[6]; + + /* + * If speed is 0, autonegotiation is enabled. + * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000. + * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL. + */ + int speed; + int duplex; + + /* + * How many RX/TX queues to use. + */ + int rx_queue_count; + int tx_queue_count; + + /* + * Override default RX/TX queue sizes if nonzero. + */ + int rx_queue_size; + int tx_queue_size; + + /* + * Use on-chip SRAM for RX/TX descriptors if size is nonzero + * and sufficient to contain all descriptors for the requested + * ring sizes. + */ + unsigned long rx_sram_addr; + int rx_sram_size; + unsigned long tx_sram_addr; + int tx_sram_size; +}; + + +#endif diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h new file mode 100644 index 00000000..5db5152e --- /dev/null +++ b/include/linux/mv643xx_i2c.h @@ -0,0 +1,22 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _MV64XXX_I2C_H_ +#define _MV64XXX_I2C_H_ + +#include <linux/types.h> + +#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c" + +/* i2c Platform Device, Driver Data */ +struct mv64xxx_i2c_pdata { + u32 freq_m; + u32 freq_n; + u32 timeout; /* In milliseconds */ +}; + +#endif /*_MV64XXX_I2C_H_*/ diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h new file mode 100644 index 00000000..617a2950 --- /dev/null +++ b/include/linux/mxm-wmi.h @@ -0,0 +1,33 @@ +/* + * MXM WMI driver + * + * Copyright(C) 2010 Red Hat. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef MXM_WMI_H +#define MXM_WMI_H + +/* discrete adapters */ +#define MXM_MXDS_ADAPTER_0 0x0 +#define MXM_MXDS_ADAPTER_1 0x0 +/* integrated adapter */ +#define MXM_MXDS_ADAPTER_IGD 0x10 +int mxm_wmi_call_mxds(int adapter); +int mxm_wmi_call_mxmx(int adapter); +bool mxm_wmi_supported(void); + +#endif diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h new file mode 100644 index 00000000..54b8e0d8 --- /dev/null +++ b/include/linux/n_r3964.h @@ -0,0 +1,231 @@ +/* r3964 linediscipline for linux + * + * ----------------------------------------------------------- + * Copyright by + * Philips Automation Projects + * Kassel (Germany) + * ----------------------------------------------------------- + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + * + * Author: + * L. Haag + * + * $Log: r3964.h,v $ + * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de> + * Fixed HZ usage on 2.6 kernels + * Removed unnecessary include + * + * Revision 1.3 2001/03/18 13:02:24 dwmw2 + * Fix timer usage, use spinlocks properly. + * + * Revision 1.2 2001/03/18 12:53:15 dwmw2 + * Merge changes in 2.4.2 + * + * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2 + * This'll screw the version control + * + * Revision 1.6 1998/09/30 00:40:38 dwmw2 + * Updated to use kernel's N_R3964 if available + * + * Revision 1.4 1998/04/02 20:29:44 lhaag + * select, blocking, ... + * + * Revision 1.3 1998/02/12 18:58:43 root + * fixed some memory leaks + * calculation of checksum characters + * + * Revision 1.2 1998/02/07 13:03:17 root + * ioctl read_telegram + * + * Revision 1.1 1998/02/06 19:19:43 root + * Initial revision + * + * + */ + +#ifndef __LINUX_N_R3964_H__ +#define __LINUX_N_R3964_H__ + +/* line disciplines for r3964 protocol */ + +#ifdef __KERNEL__ + +#include <linux/param.h> + +/* + * Common ascii handshake characters: + */ + +#define STX 0x02 +#define ETX 0x03 +#define DLE 0x10 +#define NAK 0x15 + +/* + * Timeouts (from milliseconds to jiffies) + */ + +#define R3964_TO_QVZ ((550)*HZ/1000) +#define R3964_TO_ZVZ ((220)*HZ/1000) +#define R3964_TO_NO_BUF ((400)*HZ/1000) +#define R3964_NO_TX_ROOM ((100)*HZ/1000) +#define R3964_TO_RX_PANIC ((4000)*HZ/1000) +#define R3964_MAX_RETRIES 5 + +#endif + +/* + * Ioctl-commands + */ + +#define R3964_ENABLE_SIGNALS 0x5301 +#define R3964_SETPRIORITY 0x5302 +#define R3964_USE_BCC 0x5303 +#define R3964_READ_TELEGRAM 0x5304 + +/* Options for R3964_SETPRIORITY */ +#define R3964_MASTER 0 +#define R3964_SLAVE 1 + +/* Options for R3964_ENABLE_SIGNALS */ +#define R3964_SIG_ACK 0x0001 +#define R3964_SIG_DATA 0x0002 +#define R3964_SIG_ALL 0x000f +#define R3964_SIG_NONE 0x0000 +#define R3964_USE_SIGIO 0x1000 + +/* + * r3964 operation states: + */ +#ifdef __KERNEL__ + +enum { R3964_IDLE, + R3964_TX_REQUEST, R3964_TRANSMITTING, + R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK, + R3964_WAIT_FOR_RX_BUF, + R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT + }; + +/* + * All open file-handles are 'clients' and are stored in a linked list: + */ + +struct r3964_message; + +struct r3964_client_info { + spinlock_t lock; + struct pid *pid; + unsigned int sig_flags; + + struct r3964_client_info *next; + + struct r3964_message *first_msg; + struct r3964_message *last_msg; + struct r3964_block_header *next_block_to_read; + int msg_count; +}; + + +#endif + +/* types for msg_id: */ +enum {R3964_MSG_ACK=1, R3964_MSG_DATA }; + +#define R3964_MAX_MSG_COUNT 32 + +/* error codes for client messages */ +#define R3964_OK 0 /* no error. */ +#define R3964_TX_FAIL -1 /* transmission error, block NOT sent */ +#define R3964_OVERFLOW -2 /* msg queue overflow */ + +/* the client gets this struct when calling read(fd,...): */ +struct r3964_client_message { + int msg_id; + int arg; + int error_code; +}; + +#define R3964_MTU 256 + + +#ifdef __KERNEL__ + +struct r3964_block_header; + +/* internal version of client_message: */ +struct r3964_message { + int msg_id; + int arg; + int error_code; + struct r3964_block_header *block; + struct r3964_message *next; +}; + +/* + * Header of received block in rx_buf/tx_buf: + */ + +struct r3964_block_header +{ + unsigned int length; /* length in chars without header */ + unsigned char *data; /* usually data is located + immediately behind this struct */ + unsigned int locks; /* only used in rx_buffer */ + + struct r3964_block_header *next; + struct r3964_client_info *owner; /* =NULL in rx_buffer */ +}; + +/* + * If rx_buf hasn't enough space to store R3964_MTU chars, + * we will reject all incoming STX-requests by sending NAK. + */ + +#define RX_BUF_SIZE 4000 +#define TX_BUF_SIZE 4000 +#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100 + +#define R3964_PARITY 0x0001 +#define R3964_FRAME 0x0002 +#define R3964_OVERRUN 0x0004 +#define R3964_UNKNOWN 0x0008 +#define R3964_BREAK 0x0010 +#define R3964_CHECKSUM 0x0020 +#define R3964_ERROR 0x003f +#define R3964_BCC 0x4000 +#define R3964_DEBUG 0x8000 + + +struct r3964_info { + spinlock_t lock; + struct tty_struct *tty; + unsigned char priority; + unsigned char *rx_buf; /* ring buffer */ + unsigned char *tx_buf; + + wait_queue_head_t read_wait; + //struct wait_queue *read_wait; + + struct r3964_block_header *rx_first; + struct r3964_block_header *rx_last; + struct r3964_block_header *tx_first; + struct r3964_block_header *tx_last; + unsigned int tx_position; + unsigned int rx_position; + unsigned char last_rx; + unsigned char bcc; + unsigned int blocks_in_rx_queue; + + + struct r3964_client_info *firstClient; + unsigned int state; + unsigned int flags; + + struct timer_list tmr; + int nRetry; +}; + +#endif + +#endif diff --git a/include/linux/namei.h b/include/linux/namei.h new file mode 100644 index 00000000..ffc02135 --- /dev/null +++ b/include/linux/namei.h @@ -0,0 +1,112 @@ +#ifndef _LINUX_NAMEI_H +#define _LINUX_NAMEI_H + +#include <linux/dcache.h> +#include <linux/linkage.h> +#include <linux/path.h> + +struct vfsmount; + +struct open_intent { + int flags; + int create_mode; + struct file *file; +}; + +enum { MAX_NESTED_LINKS = 8 }; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; /* path.dentry.d_inode */ + unsigned int flags; + unsigned seq; + int last_type; + unsigned depth; + char *saved_names[MAX_NESTED_LINKS + 1]; + + /* Intent data */ + union { + struct open_intent open; + } intent; +}; + +/* + * Type of the last component on LOOKUP_PARENT + */ +enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; + +/* + * The bitmask for a lookup event: + * - follow links at the end + * - require a directory + * - ending slashes ok even for nonexistent files + * - internal "there are more path components" flag + * - dentry cache is untrusted; force a real lookup + * - suppress terminal automount + */ +#define LOOKUP_FOLLOW 0x0001 +#define LOOKUP_DIRECTORY 0x0002 +#define LOOKUP_AUTOMOUNT 0x0004 + +#define LOOKUP_PARENT 0x0010 +#define LOOKUP_REVAL 0x0020 +#define LOOKUP_RCU 0x0040 + +/* + * Intent data + */ +#define LOOKUP_OPEN 0x0100 +#define LOOKUP_CREATE 0x0200 +#define LOOKUP_EXCL 0x0400 +#define LOOKUP_RENAME_TARGET 0x0800 + +#define LOOKUP_JUMPED 0x1000 +#define LOOKUP_ROOT 0x2000 +#define LOOKUP_EMPTY 0x4000 + +extern int user_path_at(int, const char __user *, unsigned, struct path *); +extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); + +#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) +#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path) +#define user_path_dir(name, path) \ + user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path) + +extern int kern_path(const char *, unsigned, struct path *); + +extern struct dentry *kern_path_create(int, const char *, struct path *, int); +extern struct dentry *user_path_create(int, const char __user *, struct path *, int); +extern int kern_path_parent(const char *, struct nameidata *); +extern int vfs_path_lookup(struct dentry *, struct vfsmount *, + const char *, unsigned int, struct path *); + +extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, + int (*open)(struct inode *, struct file *)); + +extern struct dentry *lookup_one_len(const char *, struct dentry *, int); + +extern int follow_down_one(struct path *); +extern int follow_down(struct path *); +extern int follow_up(struct path *); + +extern struct dentry *lock_rename(struct dentry *, struct dentry *); +extern void unlock_rename(struct dentry *, struct dentry *); + +static inline void nd_set_link(struct nameidata *nd, char *path) +{ + nd->saved_names[nd->depth] = path; +} + +static inline char *nd_get_link(struct nameidata *nd) +{ + return nd->saved_names[nd->depth]; +} + +static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) +{ + ((char *) name)[min(len, maxlen)] = '\0'; +} + +#endif /* _LINUX_NAMEI_H */ diff --git a/include/linux/nbd.h b/include/linux/nbd.h new file mode 100644 index 00000000..d146ca10 --- /dev/null +++ b/include/linux/nbd.h @@ -0,0 +1,102 @@ +/* + * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL. + * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne) + * Made nbd_end_request() use the io_request_lock + * 2001 Copyright (C) Steven Whitehouse + * New nbd_end_request() for compatibility with new linux block + * layer code. + * 2003/06/24 Louis D. Langholtz <ldl@aros.net> + * Removed unneeded blksize_bits field from nbd_device struct. + * Cleanup PARANOIA usage & code. + * 2004/02/19 Paul Clements + * Removed PARANOIA, plus various cleanup and comments + */ + +#ifndef LINUX_NBD_H +#define LINUX_NBD_H + +#include <linux/types.h> + +#define NBD_SET_SOCK _IO( 0xab, 0 ) +#define NBD_SET_BLKSIZE _IO( 0xab, 1 ) +#define NBD_SET_SIZE _IO( 0xab, 2 ) +#define NBD_DO_IT _IO( 0xab, 3 ) +#define NBD_CLEAR_SOCK _IO( 0xab, 4 ) +#define NBD_CLEAR_QUE _IO( 0xab, 5 ) +#define NBD_PRINT_DEBUG _IO( 0xab, 6 ) +#define NBD_SET_SIZE_BLOCKS _IO( 0xab, 7 ) +#define NBD_DISCONNECT _IO( 0xab, 8 ) +#define NBD_SET_TIMEOUT _IO( 0xab, 9 ) + +enum { + NBD_CMD_READ = 0, + NBD_CMD_WRITE = 1, + NBD_CMD_DISC = 2 +}; + +#define nbd_cmd(req) ((req)->cmd[0]) + +/* userspace doesn't need the nbd_device structure */ +#ifdef __KERNEL__ + +#include <linux/wait.h> +#include <linux/mutex.h> + +/* values for flags field */ +#define NBD_READ_ONLY 0x0001 +#define NBD_WRITE_NOCHK 0x0002 + +struct request; + +struct nbd_device { + int flags; + int harderror; /* Code of hard error */ + struct socket * sock; + struct file * file; /* If == NULL, device is not ready, yet */ + int magic; + + spinlock_t queue_lock; + struct list_head queue_head; /* Requests waiting result */ + struct request *active_req; + wait_queue_head_t active_wq; + struct list_head waiting_queue; /* Requests to be sent */ + wait_queue_head_t waiting_wq; + + struct mutex tx_lock; + struct gendisk *disk; + int blksize; + u64 bytesize; + pid_t pid; /* pid of nbd-client, if attached */ + int xmit_timeout; +}; + +#endif + +/* These are sent over the network in the request/reply magic fields */ + +#define NBD_REQUEST_MAGIC 0x25609513 +#define NBD_REPLY_MAGIC 0x67446698 +/* Do *not* use magics: 0x12560953 0x96744668. */ + +/* + * This is the packet used for communication between client and + * server. All data are in network byte order. + */ +struct nbd_request { + __be32 magic; + __be32 type; /* == READ || == WRITE */ + char handle[8]; + __be64 from; + __be32 len; +} __attribute__((packed)); + +/* + * This is the reply packet that nbd-server sends back to the client after + * it has completed an I/O request (or an error occurs). + */ +struct nbd_reply { + __be32 magic; + __be32 error; /* 0 = ok, else error */ + char handle[8]; /* handle you got from request */ +}; +#endif diff --git a/include/linux/ncp.h b/include/linux/ncp.h new file mode 100644 index 00000000..99f0adee --- /dev/null +++ b/include/linux/ncp.h @@ -0,0 +1,201 @@ +/* + * ncp.h + * + * Copyright (C) 1995 by Volker Lendecke + * Modified for sparc by J.F. Chadima + * Modified for __constant_ntoh by Frank A. Vorstenbosch + * + */ + +#ifndef _LINUX_NCP_H +#define _LINUX_NCP_H + +#include <linux/types.h> + +#define NCP_PTYPE (0x11) +#define NCP_PORT (0x0451) + +#define NCP_ALLOC_SLOT_REQUEST (0x1111) +#define NCP_REQUEST (0x2222) +#define NCP_DEALLOC_SLOT_REQUEST (0x5555) + +struct ncp_request_header { + __u16 type; + __u8 sequence; + __u8 conn_low; + __u8 task; + __u8 conn_high; + __u8 function; + __u8 data[0]; +} __attribute__((packed)); + +#define NCP_REPLY (0x3333) +#define NCP_WATCHDOG (0x3E3E) +#define NCP_POSITIVE_ACK (0x9999) + +struct ncp_reply_header { + __u16 type; + __u8 sequence; + __u8 conn_low; + __u8 task; + __u8 conn_high; + __u8 completion_code; + __u8 connection_state; + __u8 data[0]; +} __attribute__((packed)); + +#define NCP_VOLNAME_LEN (16) +#define NCP_NUMBER_OF_VOLUMES (256) +struct ncp_volume_info { + __u32 total_blocks; + __u32 free_blocks; + __u32 purgeable_blocks; + __u32 not_yet_purgeable_blocks; + __u32 total_dir_entries; + __u32 available_dir_entries; + __u8 sectors_per_block; + char volume_name[NCP_VOLNAME_LEN + 1]; +}; + +#define AR_READ (cpu_to_le16(1)) +#define AR_WRITE (cpu_to_le16(2)) +#define AR_EXCLUSIVE (cpu_to_le16(0x20)) + +#define NCP_FILE_ID_LEN 6 + +/* Defines for Name Spaces */ +#define NW_NS_DOS 0 +#define NW_NS_MAC 1 +#define NW_NS_NFS 2 +#define NW_NS_FTAM 3 +#define NW_NS_OS2 4 + +/* Defines for ReturnInformationMask */ +#define RIM_NAME (cpu_to_le32(1)) +#define RIM_SPACE_ALLOCATED (cpu_to_le32(2)) +#define RIM_ATTRIBUTES (cpu_to_le32(4)) +#define RIM_DATA_SIZE (cpu_to_le32(8)) +#define RIM_TOTAL_SIZE (cpu_to_le32(0x10)) +#define RIM_EXT_ATTR_INFO (cpu_to_le32(0x20)) +#define RIM_ARCHIVE (cpu_to_le32(0x40)) +#define RIM_MODIFY (cpu_to_le32(0x80)) +#define RIM_CREATION (cpu_to_le32(0x100)) +#define RIM_OWNING_NAMESPACE (cpu_to_le32(0x200)) +#define RIM_DIRECTORY (cpu_to_le32(0x400)) +#define RIM_RIGHTS (cpu_to_le32(0x800)) +#define RIM_ALL (cpu_to_le32(0xFFF)) +#define RIM_COMPRESSED_INFO (cpu_to_le32(0x80000000)) + +/* Defines for NSInfoBitMask */ +#define NSIBM_NFS_NAME 0x0001 +#define NSIBM_NFS_MODE 0x0002 +#define NSIBM_NFS_GID 0x0004 +#define NSIBM_NFS_NLINKS 0x0008 +#define NSIBM_NFS_RDEV 0x0010 +#define NSIBM_NFS_LINK 0x0020 +#define NSIBM_NFS_CREATED 0x0040 +#define NSIBM_NFS_UID 0x0080 +#define NSIBM_NFS_ACSFLAG 0x0100 +#define NSIBM_NFS_MYFLAG 0x0200 + +/* open/create modes */ +#define OC_MODE_OPEN 0x01 +#define OC_MODE_TRUNCATE 0x02 +#define OC_MODE_REPLACE 0x02 +#define OC_MODE_CREATE 0x08 + +/* open/create results */ +#define OC_ACTION_NONE 0x00 +#define OC_ACTION_OPEN 0x01 +#define OC_ACTION_CREATE 0x02 +#define OC_ACTION_TRUNCATE 0x04 +#define OC_ACTION_REPLACE 0x04 + +/* access rights attributes */ +#ifndef AR_READ_ONLY +#define AR_READ_ONLY 0x0001 +#define AR_WRITE_ONLY 0x0002 +#define AR_DENY_READ 0x0004 +#define AR_DENY_WRITE 0x0008 +#define AR_COMPATIBILITY 0x0010 +#define AR_WRITE_THROUGH 0x0040 +#define AR_OPEN_COMPRESSED 0x0100 +#endif + +struct nw_nfs_info { + __u32 mode; + __u32 rdev; +}; + +struct nw_info_struct { + __u32 spaceAlloc; + __le32 attributes; + __u16 flags; + __le32 dataStreamSize; + __le32 totalStreamSize; + __u16 numberOfStreams; + __le16 creationTime; + __le16 creationDate; + __u32 creatorID; + __le16 modifyTime; + __le16 modifyDate; + __u32 modifierID; + __le16 lastAccessDate; + __u16 archiveTime; + __u16 archiveDate; + __u32 archiverID; + __u16 inheritedRightsMask; + __le32 dirEntNum; + __le32 DosDirNum; + __u32 volNumber; + __u32 EADataSize; + __u32 EAKeyCount; + __u32 EAKeySize; + __u32 NSCreator; + __u8 nameLen; + __u8 entryName[256]; + /* libncp may depend on there being nothing after entryName */ +#ifdef __KERNEL__ + struct nw_nfs_info nfs; +#endif +} __attribute__((packed)); + +/* modify mask - use with MODIFY_DOS_INFO structure */ +#define DM_ATTRIBUTES (cpu_to_le32(0x02)) +#define DM_CREATE_DATE (cpu_to_le32(0x04)) +#define DM_CREATE_TIME (cpu_to_le32(0x08)) +#define DM_CREATOR_ID (cpu_to_le32(0x10)) +#define DM_ARCHIVE_DATE (cpu_to_le32(0x20)) +#define DM_ARCHIVE_TIME (cpu_to_le32(0x40)) +#define DM_ARCHIVER_ID (cpu_to_le32(0x80)) +#define DM_MODIFY_DATE (cpu_to_le32(0x0100)) +#define DM_MODIFY_TIME (cpu_to_le32(0x0200)) +#define DM_MODIFIER_ID (cpu_to_le32(0x0400)) +#define DM_LAST_ACCESS_DATE (cpu_to_le32(0x0800)) +#define DM_INHERITED_RIGHTS_MASK (cpu_to_le32(0x1000)) +#define DM_MAXIMUM_SPACE (cpu_to_le32(0x2000)) + +struct nw_modify_dos_info { + __le32 attributes; + __le16 creationDate; + __le16 creationTime; + __u32 creatorID; + __le16 modifyDate; + __le16 modifyTime; + __u32 modifierID; + __u16 archiveDate; + __u16 archiveTime; + __u32 archiverID; + __le16 lastAccessDate; + __u16 inheritanceGrantMask; + __u16 inheritanceRevokeMask; + __u32 maximumSpace; +} __attribute__((packed)); + +struct nw_search_sequence { + __u8 volNumber; + __u32 dirBase; + __u32 sequence; +} __attribute__((packed)); + +#endif /* _LINUX_NCP_H */ diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h new file mode 100644 index 00000000..e13eefef --- /dev/null +++ b/include/linux/ncp_fs.h @@ -0,0 +1,146 @@ +/* + * ncp_fs.h + * + * Copyright (C) 1995, 1996 by Volker Lendecke + * + */ + +#ifndef _LINUX_NCP_FS_H +#define _LINUX_NCP_FS_H + +#include <linux/fs.h> +#include <linux/in.h> +#include <linux/types.h> +#include <linux/magic.h> + +#include <linux/ipx.h> +#include <linux/ncp_no.h> + +/* + * ioctl commands + */ + +struct ncp_ioctl_request { + unsigned int function; + unsigned int size; + char __user *data; +}; + +struct ncp_fs_info { + int version; + struct sockaddr_ipx addr; + __kernel_uid_t mounted_uid; + int connection; /* Connection number the server assigned us */ + int buffer_size; /* The negotiated buffer size, to be + used for read/write requests! */ + + int volume_number; + __le32 directory_id; +}; + +struct ncp_fs_info_v2 { + int version; + unsigned long mounted_uid; + unsigned int connection; + unsigned int buffer_size; + + unsigned int volume_number; + __le32 directory_id; + + __u32 dummy1; + __u32 dummy2; + __u32 dummy3; +}; + +struct ncp_sign_init +{ + char sign_root[8]; + char sign_last[16]; +}; + +struct ncp_lock_ioctl +{ +#define NCP_LOCK_LOG 0 +#define NCP_LOCK_SH 1 +#define NCP_LOCK_EX 2 +#define NCP_LOCK_CLEAR 256 + int cmd; + int origin; + unsigned int offset; + unsigned int length; +#define NCP_LOCK_DEFAULT_TIMEOUT 18 +#define NCP_LOCK_MAX_TIMEOUT 180 + int timeout; +}; + +struct ncp_setroot_ioctl +{ + int volNumber; + int namespace; + __le32 dirEntNum; +}; + +struct ncp_objectname_ioctl +{ +#define NCP_AUTH_NONE 0x00 +#define NCP_AUTH_BIND 0x31 +#define NCP_AUTH_NDS 0x32 + int auth_type; + size_t object_name_len; + void __user * object_name; /* a userspace data, in most cases user name */ +}; + +struct ncp_privatedata_ioctl +{ + size_t len; + void __user * data; /* ~1000 for NDS */ +}; + +/* NLS charsets by ioctl */ +#define NCP_IOCSNAME_LEN 20 +struct ncp_nls_ioctl +{ + unsigned char codepage[NCP_IOCSNAME_LEN+1]; + unsigned char iocharset[NCP_IOCSNAME_LEN+1]; +}; + +#define NCP_IOC_NCPREQUEST _IOR('n', 1, struct ncp_ioctl_request) +#define NCP_IOC_GETMOUNTUID _IOW('n', 2, __kernel_old_uid_t) +#define NCP_IOC_GETMOUNTUID2 _IOW('n', 2, unsigned long) + +#define NCP_IOC_CONN_LOGGED_IN _IO('n', 3) + +#define NCP_GET_FS_INFO_VERSION (1) +#define NCP_IOC_GET_FS_INFO _IOWR('n', 4, struct ncp_fs_info) +#define NCP_GET_FS_INFO_VERSION_V2 (2) +#define NCP_IOC_GET_FS_INFO_V2 _IOWR('n', 4, struct ncp_fs_info_v2) + +#define NCP_IOC_SIGN_INIT _IOR('n', 5, struct ncp_sign_init) +#define NCP_IOC_SIGN_WANTED _IOR('n', 6, int) +#define NCP_IOC_SET_SIGN_WANTED _IOW('n', 6, int) + +#define NCP_IOC_LOCKUNLOCK _IOR('n', 7, struct ncp_lock_ioctl) + +#define NCP_IOC_GETROOT _IOW('n', 8, struct ncp_setroot_ioctl) +#define NCP_IOC_SETROOT _IOR('n', 8, struct ncp_setroot_ioctl) + +#define NCP_IOC_GETOBJECTNAME _IOWR('n', 9, struct ncp_objectname_ioctl) +#define NCP_IOC_SETOBJECTNAME _IOR('n', 9, struct ncp_objectname_ioctl) +#define NCP_IOC_GETPRIVATEDATA _IOWR('n', 10, struct ncp_privatedata_ioctl) +#define NCP_IOC_SETPRIVATEDATA _IOR('n', 10, struct ncp_privatedata_ioctl) + +#define NCP_IOC_GETCHARSETS _IOWR('n', 11, struct ncp_nls_ioctl) +#define NCP_IOC_SETCHARSETS _IOR('n', 11, struct ncp_nls_ioctl) + +#define NCP_IOC_GETDENTRYTTL _IOW('n', 12, __u32) +#define NCP_IOC_SETDENTRYTTL _IOR('n', 12, __u32) + +/* + * The packet size to allocate. One page should be enough. + */ +#define NCP_PACKET_SIZE 4070 + +#define NCP_MAXPATHLEN 255 +#define NCP_MAXNAMELEN 14 + +#endif /* _LINUX_NCP_FS_H */ diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h new file mode 100644 index 00000000..dfcbea2d --- /dev/null +++ b/include/linux/ncp_mount.h @@ -0,0 +1,71 @@ +/* + * ncp_mount.h + * + * Copyright (C) 1995, 1996 by Volker Lendecke + * + */ + +#ifndef _LINUX_NCP_MOUNT_H +#define _LINUX_NCP_MOUNT_H + +#include <linux/types.h> +#include <linux/ncp.h> + +#define NCP_MOUNT_VERSION 3 /* Binary */ + +/* Values for flags */ +#define NCP_MOUNT_SOFT 0x0001 +#define NCP_MOUNT_INTR 0x0002 +#define NCP_MOUNT_STRONG 0x0004 /* enable delete/rename of r/o files */ +#define NCP_MOUNT_NO_OS2 0x0008 /* do not use OS/2 (LONG) namespace */ +#define NCP_MOUNT_NO_NFS 0x0010 /* do not use NFS namespace */ +#define NCP_MOUNT_EXTRAS 0x0020 +#define NCP_MOUNT_SYMLINKS 0x0040 /* enable symlinks */ +#define NCP_MOUNT_NFS_EXTRAS 0x0080 /* Enable use of NFS NS meta-info */ + +struct ncp_mount_data { + int version; + unsigned int ncp_fd; /* The socket to the ncp port */ + __kernel_uid_t mounted_uid; /* Who may umount() this filesystem? */ + __kernel_pid_t wdog_pid; /* Who cares for our watchdog packets? */ + + unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; + unsigned int time_out; /* How long should I wait after + sending a NCP request? */ + unsigned int retry_count; /* And how often should I retry? */ + unsigned int flags; + + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_mode_t file_mode; + __kernel_mode_t dir_mode; +}; + +#define NCP_MOUNT_VERSION_V4 (4) /* Binary or text */ + +struct ncp_mount_data_v4 { + int version; + unsigned long flags; /* NCP_MOUNT_* flags */ + /* MIPS uses long __kernel_uid_t, but... */ + /* we neever pass -1, so it is safe */ + unsigned long mounted_uid; /* Who may umount() this filesystem? */ + /* MIPS uses long __kernel_pid_t */ + long wdog_pid; /* Who cares for our watchdog packets? */ + + unsigned int ncp_fd; /* The socket to the ncp port */ + unsigned int time_out; /* How long should I wait after + sending a NCP request? */ + unsigned int retry_count; /* And how often should I retry? */ + + /* MIPS uses long __kernel_uid_t... */ + /* we never pass -1, so it is safe */ + unsigned long uid; + unsigned long gid; + /* MIPS uses unsigned long __kernel_mode_t */ + unsigned long file_mode; + unsigned long dir_mode; +}; + +#define NCP_MOUNT_VERSION_V5 (5) /* Text only */ + +#endif diff --git a/include/linux/ncp_no.h b/include/linux/ncp_no.h new file mode 100644 index 00000000..cddaa48f --- /dev/null +++ b/include/linux/ncp_no.h @@ -0,0 +1,19 @@ +#ifndef _NCP_NO +#define _NCP_NO + +/* these define the attribute byte as seen by NCP */ +#define aRONLY (__cpu_to_le32(1)) +#define aHIDDEN (__cpu_to_le32(2)) +#define aSYSTEM (__cpu_to_le32(4)) +#define aEXECUTE (__cpu_to_le32(8)) +#define aDIR (__cpu_to_le32(0x10)) +#define aARCH (__cpu_to_le32(0x20)) +#define aSHARED (__cpu_to_le32(0x80)) +#define aDONTSUBALLOCATE (__cpu_to_le32(1L<<11)) +#define aTRANSACTIONAL (__cpu_to_le32(1L<<12)) +#define aPURGE (__cpu_to_le32(1L<<16)) +#define aRENAMEINHIBIT (__cpu_to_le32(1L<<17)) +#define aDELETEINHIBIT (__cpu_to_le32(1L<<18)) +#define aDONTCOMPRESS (__cpu_to_le32(1L<<27)) + +#endif /* _NCP_NO */ diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h new file mode 100644 index 00000000..b188f68a --- /dev/null +++ b/include/linux/neighbour.h @@ -0,0 +1,156 @@ +#ifndef __LINUX_NEIGHBOUR_H +#define __LINUX_NEIGHBOUR_H + +#include <linux/types.h> +#include <linux/netlink.h> + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +enum { + NDA_UNSPEC, + NDA_DST, + NDA_LLADDR, + NDA_CACHEINFO, + NDA_PROBES, + __NDA_MAX +}; + +#define NDA_MAX (__NDA_MAX - 1) + +/* + * Neighbor Cache Entry Flags + */ + +#define NTF_USE 0x01 +#define NTF_PROXY 0x08 /* == ATF_PUBL */ +#define NTF_ROUTER 0x80 + +/* + * Neighbor Cache Entry States. + */ + +#define NUD_INCOMPLETE 0x01 +#define NUD_REACHABLE 0x02 +#define NUD_STALE 0x04 +#define NUD_DELAY 0x08 +#define NUD_PROBE 0x10 +#define NUD_FAILED 0x20 + +/* Dummy states */ +#define NUD_NOARP 0x40 +#define NUD_PERMANENT 0x80 +#define NUD_NONE 0x00 + +/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change + and make no address resolution or NUD. + NUD_PERMANENT is also cannot be deleted by garbage collectors. + */ + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +/***************************************************************** + * Neighbour tables specific messages. + * + * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the + * NLM_F_DUMP flag set. Every neighbour table configuration is + * spread over multiple messages to avoid running into message + * size limits on systems with many interfaces. The first message + * in the sequence transports all not device specific data such as + * statistics, configuration, and the default parameter set. + * This message is followed by 0..n messages carrying device + * specific parameter sets. + * Although the ordering should be sufficient, NDTA_NAME can be + * used to identify sequences. The initial message can be identified + * by checking for NDTA_CONFIG. The device specific messages do + * not contain this TLV but have NDTPA_IFINDEX set to the + * corresponding interface index. + * + * To change neighbour table attributes, send RTM_SETNEIGHTBL + * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3], + * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked + * otherwise. Device specific parameter sets can be changed by + * setting NDTPA_IFINDEX to the interface index of the corresponding + * device. + ****/ + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; +}; + +enum { + NDTPA_UNSPEC, + NDTPA_IFINDEX, /* u32, unchangeable */ + NDTPA_REFCNT, /* u32, read-only */ + NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */ + NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */ + NDTPA_RETRANS_TIME, /* u64, msecs */ + NDTPA_GC_STALETIME, /* u64, msecs */ + NDTPA_DELAY_PROBE_TIME, /* u64, msecs */ + NDTPA_QUEUE_LEN, /* u32 */ + NDTPA_APP_PROBES, /* u32 */ + NDTPA_UCAST_PROBES, /* u32 */ + NDTPA_MCAST_PROBES, /* u32 */ + NDTPA_ANYCAST_DELAY, /* u64, msecs */ + NDTPA_PROXY_DELAY, /* u64, msecs */ + NDTPA_PROXY_QLEN, /* u32 */ + NDTPA_LOCKTIME, /* u64, msecs */ + NDTPA_QUEUE_LENBYTES, /* u32 */ + __NDTPA_MAX +}; +#define NDTPA_MAX (__NDTPA_MAX - 1) + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; /* delta to now in msecs */ + __u32 ndtc_last_rand; /* delta to now in msecs */ + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC, + NDTA_NAME, /* char *, unchangeable */ + NDTA_THRESH1, /* u32 */ + NDTA_THRESH2, /* u32 */ + NDTA_THRESH3, /* u32 */ + NDTA_CONFIG, /* struct ndt_config, read-only */ + NDTA_PARMS, /* nested TLV NDTPA_* */ + NDTA_STATS, /* struct ndt_stats, read-only */ + NDTA_GC_INTERVAL, /* u64, msecs */ + __NDTA_MAX +}; +#define NDTA_MAX (__NDTA_MAX - 1) + +#endif diff --git a/include/linux/net.h b/include/linux/net.h new file mode 100644 index 00000000..be60c7f5 --- /dev/null +++ b/include/linux/net.h @@ -0,0 +1,294 @@ +/* + * NET An implementation of the SOCKET network access protocol. + * This is the master header file for the Linux NET layer, + * or, in plain English: the networking handling part of the + * kernel. + * + * Version: @(#)net.h 1.0.3 05/25/93 + * + * Authors: Orest Zborowski, <obz@Kodak.COM> + * Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_NET_H +#define _LINUX_NET_H + +#include <linux/socket.h> +#include <asm/socket.h> + +#define NPROTO AF_MAX + +#define SYS_SOCKET 1 /* sys_socket(2) */ +#define SYS_BIND 2 /* sys_bind(2) */ +#define SYS_CONNECT 3 /* sys_connect(2) */ +#define SYS_LISTEN 4 /* sys_listen(2) */ +#define SYS_ACCEPT 5 /* sys_accept(2) */ +#define SYS_GETSOCKNAME 6 /* sys_getsockname(2) */ +#define SYS_GETPEERNAME 7 /* sys_getpeername(2) */ +#define SYS_SOCKETPAIR 8 /* sys_socketpair(2) */ +#define SYS_SEND 9 /* sys_send(2) */ +#define SYS_RECV 10 /* sys_recv(2) */ +#define SYS_SENDTO 11 /* sys_sendto(2) */ +#define SYS_RECVFROM 12 /* sys_recvfrom(2) */ +#define SYS_SHUTDOWN 13 /* sys_shutdown(2) */ +#define SYS_SETSOCKOPT 14 /* sys_setsockopt(2) */ +#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */ +#define SYS_SENDMSG 16 /* sys_sendmsg(2) */ +#define SYS_RECVMSG 17 /* sys_recvmsg(2) */ +#define SYS_ACCEPT4 18 /* sys_accept4(2) */ +#define SYS_RECVMMSG 19 /* sys_recvmmsg(2) */ +#define SYS_SENDMMSG 20 /* sys_sendmmsg(2) */ + +typedef enum { + SS_FREE = 0, /* not allocated */ + SS_UNCONNECTED, /* unconnected to any socket */ + SS_CONNECTING, /* in process of connecting */ + SS_CONNECTED, /* connected to socket */ + SS_DISCONNECTING /* in process of disconnecting */ +} socket_state; + +#define __SO_ACCEPTCON (1 << 16) /* performed a listen */ + +#ifdef __KERNEL__ +#include <linux/stringify.h> +#include <linux/random.h> +#include <linux/wait.h> +#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/kmemcheck.h> +#include <linux/rcupdate.h> + +struct poll_table_struct; +struct pipe_inode_info; +struct inode; +struct net; + +#define SOCK_ASYNC_NOSPACE 0 +#define SOCK_ASYNC_WAITDATA 1 +#define SOCK_NOSPACE 2 +#define SOCK_PASSCRED 3 +#define SOCK_PASSSEC 4 + +#ifndef ARCH_HAS_SOCKET_TYPES +/** + * enum sock_type - Socket types + * @SOCK_STREAM: stream (connection) socket + * @SOCK_DGRAM: datagram (conn.less) socket + * @SOCK_RAW: raw socket + * @SOCK_RDM: reliably-delivered message + * @SOCK_SEQPACKET: sequential packet socket + * @SOCK_DCCP: Datagram Congestion Control Protocol socket + * @SOCK_PACKET: linux specific way of getting packets at the dev level. + * For writing rarp and other similar things on the user level. + * + * When adding some new socket type please + * grep ARCH_HAS_SOCKET_TYPE include/asm-* /socket.h, at least MIPS + * overrides this enum for binary compat reasons. + */ +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +#define SOCK_MAX (SOCK_PACKET + 1) +/* Mask which covers at least up to SOCK_MASK-1. The + * remaining bits are used as flags. */ +#define SOCK_TYPE_MASK 0xf + +/* Flags for socket, socketpair, accept4 */ +#define SOCK_CLOEXEC O_CLOEXEC +#ifndef SOCK_NONBLOCK +#define SOCK_NONBLOCK O_NONBLOCK +#endif + +#endif /* ARCH_HAS_SOCKET_TYPES */ + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct socket_wq { + /* Note: wait MUST be first field of socket_wq */ + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + struct rcu_head rcu; +} ____cacheline_aligned_in_smp; + +/** + * struct socket - general BSD socket + * @state: socket state (%SS_CONNECTED, etc) + * @type: socket type (%SOCK_STREAM, etc) + * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) + * @ops: protocol specific socket operations + * @file: File back pointer for gc + * @sk: internal networking protocol agnostic socket representation + * @wq: wait queue for several uses + */ +struct socket { + socket_state state; + + kmemcheck_bitfield_begin(type); + short type; + kmemcheck_bitfield_end(type); + + unsigned long flags; + + struct socket_wq __rcu *wq; + + struct file *file; + struct sock *sk; + const struct proto_ops *ops; +}; + +struct vm_area_struct; +struct page; +struct kiocb; +struct sockaddr; +struct msghdr; +struct module; + +struct proto_ops { + int family; + struct module *owner; + int (*release) (struct socket *sock); + int (*bind) (struct socket *sock, + struct sockaddr *myaddr, + int sockaddr_len); + int (*connect) (struct socket *sock, + struct sockaddr *vaddr, + int sockaddr_len, int flags); + int (*socketpair)(struct socket *sock1, + struct socket *sock2); + int (*accept) (struct socket *sock, + struct socket *newsock, int flags); + int (*getname) (struct socket *sock, + struct sockaddr *addr, + int *sockaddr_len, int peer); + unsigned int (*poll) (struct file *file, struct socket *sock, + struct poll_table_struct *wait); + int (*ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); +#ifdef CONFIG_COMPAT + int (*compat_ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); +#endif + int (*listen) (struct socket *sock, int len); + int (*shutdown) (struct socket *sock, int flags); + int (*setsockopt)(struct socket *sock, int level, + int optname, char __user *optval, unsigned int optlen); + int (*getsockopt)(struct socket *sock, int level, + int optname, char __user *optval, int __user *optlen); +#ifdef CONFIG_COMPAT + int (*compat_setsockopt)(struct socket *sock, int level, + int optname, char __user *optval, unsigned int optlen); + int (*compat_getsockopt)(struct socket *sock, int level, + int optname, char __user *optval, int __user *optlen); +#endif + int (*sendmsg) (struct kiocb *iocb, struct socket *sock, + struct msghdr *m, size_t total_len); + int (*recvmsg) (struct kiocb *iocb, struct socket *sock, + struct msghdr *m, size_t total_len, + int flags); + int (*mmap) (struct file *file, struct socket *sock, + struct vm_area_struct * vma); + ssize_t (*sendpage) (struct socket *sock, struct page *page, + int offset, size_t size, int flags); + ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags); + void (*set_peek_off)(struct sock *sk, int val); +}; + +#define DECLARE_SOCKADDR(type, dst, src) \ + type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; }) + +struct net_proto_family { + int family; + int (*create)(struct net *net, struct socket *sock, + int protocol, int kern); + struct module *owner; +}; + +struct iovec; +struct kvec; + +enum { + SOCK_WAKE_IO, + SOCK_WAKE_WAITD, + SOCK_WAKE_SPACE, + SOCK_WAKE_URG, +}; + +extern int sock_wake_async(struct socket *sk, int how, int band); +extern int sock_register(const struct net_proto_family *fam); +extern void sock_unregister(int family); +extern int __sock_create(struct net *net, int family, int type, int proto, + struct socket **res, int kern); +extern int sock_create(int family, int type, int proto, + struct socket **res); +extern int sock_create_kern(int family, int type, int proto, + struct socket **res); +extern int sock_create_lite(int family, int type, int proto, + struct socket **res); +extern void sock_release(struct socket *sock); +extern int sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len); +extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags); +extern int sock_map_fd(struct socket *sock, int flags); +extern struct socket *sockfd_lookup(int fd, int *err); +#define sockfd_put(sock) fput(sock->file) +extern int net_ratelimit(void); + +#define net_random() random32() +#define net_srandom(seed) srandom32((__force u32)seed) + +extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg, + struct kvec *vec, size_t num, size_t len); +extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg, + struct kvec *vec, size_t num, + size_t len, int flags); + +extern int kernel_bind(struct socket *sock, struct sockaddr *addr, + int addrlen); +extern int kernel_listen(struct socket *sock, int backlog); +extern int kernel_accept(struct socket *sock, struct socket **newsock, + int flags); +extern int kernel_connect(struct socket *sock, struct sockaddr *addr, + int addrlen, int flags); +extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr, + int *addrlen); +extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr, + int *addrlen); +extern int kernel_getsockopt(struct socket *sock, int level, int optname, + char *optval, int *optlen); +extern int kernel_setsockopt(struct socket *sock, int level, int optname, + char *optval, unsigned int optlen); +extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); +extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); +extern int kernel_sock_shutdown(struct socket *sock, + enum sock_shutdown_cmd how); + +#define MODULE_ALIAS_NETPROTO(proto) \ + MODULE_ALIAS("net-pf-" __stringify(proto)) + +#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \ + MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto)) + +#define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \ + MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ + "-type-" __stringify(type)) + +#endif /* __KERNEL__ */ +#endif /* _LINUX_NET_H */ diff --git a/include/linux/net_dropmon.h b/include/linux/net_dropmon.h new file mode 100644 index 00000000..2a739462 --- /dev/null +++ b/include/linux/net_dropmon.h @@ -0,0 +1,64 @@ +#ifndef __NET_DROPMON_H +#define __NET_DROPMON_H + +#include <linux/types.h> +#include <linux/netlink.h> + +struct net_dm_drop_point { + __u8 pc[8]; + __u32 count; +}; + +#define is_drop_point_hw(x) do {\ + int ____i, ____j;\ + for (____i = 0; ____i < 8; i ____i++)\ + ____j |= x[____i];\ + ____j;\ +} while (0) + +#define NET_DM_CFG_VERSION 0 +#define NET_DM_CFG_ALERT_COUNT 1 +#define NET_DM_CFG_ALERT_DELAY 2 +#define NET_DM_CFG_MAX 3 + +struct net_dm_config_entry { + __u32 type; + __u64 data __attribute__((aligned(8))); +}; + +struct net_dm_config_msg { + __u32 entries; + struct net_dm_config_entry options[0]; +}; + +struct net_dm_alert_msg { + __u32 entries; + struct net_dm_drop_point points[0]; +}; + +struct net_dm_user_msg { + union { + struct net_dm_config_msg user; + struct net_dm_alert_msg alert; + } u; +}; + + +/* These are the netlink message types for this protocol */ + +enum { + NET_DM_CMD_UNSPEC = 0, + NET_DM_CMD_ALERT, + NET_DM_CMD_CONFIG, + NET_DM_CMD_START, + NET_DM_CMD_STOP, + _NET_DM_CMD_MAX, +}; + +#define NET_DM_CMD_MAX (_NET_DM_CMD_MAX - 1) + +/* + * Our group identifiers + */ +#define NET_DM_GRP_ALERT 1 +#endif diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h new file mode 100644 index 00000000..ae5df122 --- /dev/null +++ b/include/linux/net_tstamp.h @@ -0,0 +1,113 @@ +/* + * Userspace API for hardware time stamping of network packets + * + * Copyright (C) 2008,2009 Intel Corporation + * Author: Patrick Ohly <patrick.ohly@intel.com> + * + */ + +#ifndef _NET_TIMESTAMPING_H +#define _NET_TIMESTAMPING_H + +#include <linux/socket.h> /* for SO_TIMESTAMPING */ + +/* SO_TIMESTAMPING gets an integer bit field comprised of these values */ +enum { + SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), + SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), + SOF_TIMESTAMPING_RX_HARDWARE = (1<<2), + SOF_TIMESTAMPING_RX_SOFTWARE = (1<<3), + SOF_TIMESTAMPING_SOFTWARE = (1<<4), + SOF_TIMESTAMPING_SYS_HARDWARE = (1<<5), + SOF_TIMESTAMPING_RAW_HARDWARE = (1<<6), + SOF_TIMESTAMPING_MASK = + (SOF_TIMESTAMPING_RAW_HARDWARE - 1) | + SOF_TIMESTAMPING_RAW_HARDWARE +}; + +/** + * struct hwtstamp_config - %SIOCSHWTSTAMP parameter + * + * @flags: no flags defined right now, must be zero + * @tx_type: one of HWTSTAMP_TX_* + * @rx_type: one of one of HWTSTAMP_FILTER_* + * + * %SIOCSHWTSTAMP expects a &struct ifreq with a ifr_data pointer to + * this structure. dev_ifsioc() in the kernel takes care of the + * translation between 32 bit userspace and 64 bit kernel. The + * structure is intentionally chosen so that it has the same layout on + * 32 and 64 bit systems, don't break this! + */ +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +/* possible values for hwtstamp_config->tx_type */ +enum hwtstamp_tx_types { + /* + * No outgoing packet will need hardware time stamping; + * should a packet arrive which asks for it, no hardware + * time stamping will be done. + */ + HWTSTAMP_TX_OFF, + + /* + * Enables hardware time stamping for outgoing packets; + * the sender of the packet decides which are to be + * time stamped by setting %SOF_TIMESTAMPING_TX_SOFTWARE + * before sending the packet. + */ + HWTSTAMP_TX_ON, + + /* + * Enables time stamping for outgoing packets just as + * HWTSTAMP_TX_ON does, but also enables time stamp insertion + * directly into Sync packets. In this case, transmitted Sync + * packets will not received a time stamp via the socket error + * queue. + */ + HWTSTAMP_TX_ONESTEP_SYNC, +}; + +/* possible values for hwtstamp_config->rx_filter */ +enum hwtstamp_rx_filters { + /* time stamp no incoming packet at all */ + HWTSTAMP_FILTER_NONE, + + /* time stamp any incoming packet */ + HWTSTAMP_FILTER_ALL, + + /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_SOME, + + /* PTP v1, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V1_L4_EVENT, + /* PTP v1, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V1_L4_SYNC, + /* PTP v1, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ, + /* PTP v2, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L4_EVENT, + /* PTP v2, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L4_SYNC, + /* PTP v2, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ, + + /* 802.AS1, Ethernet, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L2_EVENT, + /* 802.AS1, Ethernet, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L2_SYNC, + /* 802.AS1, Ethernet, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ, + + /* PTP v2/802.AS1, any layer, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_EVENT, + /* PTP v2/802.AS1, any layer, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_SYNC, + /* PTP v2/802.AS1, any layer, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ, +}; + +#endif /* _NET_TIMESTAMPING_H */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h new file mode 100644 index 00000000..5ac32123 --- /dev/null +++ b/include/linux/netdev_features.h @@ -0,0 +1,150 @@ +/* + * Network device features. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_NETDEV_FEATURES_H +#define _LINUX_NETDEV_FEATURES_H + +#include <linux/types.h> + +typedef u64 netdev_features_t; + +enum { + NETIF_F_SG_BIT, /* Scatter/gather IO. */ + NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ + __UNUSED_NETIF_F_1, + NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ + NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ + NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ + NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ + NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */ + NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */ + NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */ + NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ + NETIF_F_GSO_BIT, /* Enable software GSO. */ + NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ + /* do not use LLTX in new drivers */ + NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ + NETIF_F_GRO_BIT, /* Generic receive offload */ + NETIF_F_LRO_BIT, /* large receive offload */ + + /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ + NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ + = NETIF_F_GSO_SHIFT, + NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ + NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ + NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ + NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ + NETIF_F_FSO_BIT, /* ... FCoE segmentation */ + NETIF_F_GSO_RESERVED1, /* ... free (fill GSO_MASK to 8 bits) */ + /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ + NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ + = NETIF_F_GSO_LAST, + + NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ + NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ + NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ + NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ + NETIF_F_RXHASH_BIT, /* Receive hashing offload */ + NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ + NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */ + NETIF_F_LOOPBACK_BIT, /* Enable loopback */ + NETIF_F_RXFCS_BIT, /* Append FCS to skb pkt data */ + NETIF_F_RXALL_BIT, /* Receive errored frames too */ + + /* + * Add your fresh new feature above and remember to update + * netdev_features_strings[] in net/core/ethtool.c and maybe + * some feature mask #defines below. Please also describe it + * in Documentation/networking/netdev-features.txt. + */ + + /**/NETDEV_FEATURE_COUNT +}; + +/* copy'n'paste compression ;) */ +#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit)) +#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) + +#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) +#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) +#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) +#define NETIF_F_FSO __NETIF_F(FSO) +#define NETIF_F_GRO __NETIF_F(GRO) +#define NETIF_F_GSO __NETIF_F(GSO) +#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) +#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) +#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) +#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER) +#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX) +#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX) +#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) +#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) +#define NETIF_F_LLTX __NETIF_F(LLTX) +#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) +#define NETIF_F_LRO __NETIF_F(LRO) +#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) +#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) +#define NETIF_F_NTUPLE __NETIF_F(NTUPLE) +#define NETIF_F_RXCSUM __NETIF_F(RXCSUM) +#define NETIF_F_RXHASH __NETIF_F(RXHASH) +#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) +#define NETIF_F_SG __NETIF_F(SG) +#define NETIF_F_TSO6 __NETIF_F(TSO6) +#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) +#define NETIF_F_TSO __NETIF_F(TSO) +#define NETIF_F_UFO __NETIF_F(UFO) +#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) +#define NETIF_F_RXFCS __NETIF_F(RXFCS) +#define NETIF_F_RXALL __NETIF_F(RXALL) + +/* Features valid for ethtool to change */ +/* = all defined minus driver/device-class-related */ +#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ + NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) + +/* remember that ((t)1 << t_BITS) is undefined in C99 */ +#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ + (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \ + ~NETIF_F_NEVER_CHANGE) + +/* Segmentation offload feature mask */ +#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ + __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) + +/* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ + NETIF_F_TSO6 | NETIF_F_UFO) + +#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM +#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) +#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) + +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) + +/* + * If one device supports one of these features, then enable them + * for all in netdev_increment_features. + */ +#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ + NETIF_F_SG | NETIF_F_HIGHDMA | \ + NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) +/* + * If one device doesn't support one of these features, then disable it + * for all in netdev_increment_features. + */ +#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) + +/* changeable features with no special hardware requirements */ +#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) + +#endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h new file mode 100644 index 00000000..33900a53 --- /dev/null +++ b/include/linux/netdevice.h @@ -0,0 +1,2804 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Interfaces handler. + * + * Version: @(#)dev.h 1.0.10 08/12/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Corey Minyard <wf-rch!minyard@relay.EU.net> + * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> + * Alan Cox, <alan@lxorguk.ukuu.org.uk> + * Bjorn Ekwall. <bj0rn@blox.se> + * Pekka Riikonen <priikone@poseidon.pspt.fi> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Moved to /usr/include/linux for NET3 + */ +#ifndef _LINUX_NETDEVICE_H +#define _LINUX_NETDEVICE_H + +#include <linux/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_link.h> + +#ifdef __KERNEL__ +#include <linux/pm_qos.h> +#include <linux/timer.h> +#include <linux/bug.h> +#include <linux/delay.h> +#include <linux/atomic.h> +#include <asm/cache.h> +#include <asm/byteorder.h> + +#include <linux/percpu.h> +#include <linux/rculist.h> +#include <linux/dmaengine.h> +#include <linux/workqueue.h> +#include <linux/dynamic_queue_limits.h> + +#include <linux/ethtool.h> +#include <net/net_namespace.h> +#include <net/dsa.h> +#ifdef CONFIG_DCB +#include <net/dcbnl.h> +#endif +#include <net/netprio_cgroup.h> + +#include <linux/netdev_features.h> + +struct netpoll_info; +struct device; +struct phy_device; +/* 802.11 specific */ +struct wireless_dev; + /* source back-compat hooks */ +#define SET_ETHTOOL_OPS(netdev,ops) \ + ( (netdev)->ethtool_ops = (ops) ) + +/* hardware address assignment types */ +#define NET_ADDR_PERM 0 /* address is permanent (default) */ +#define NET_ADDR_RANDOM 1 /* address is generated randomly */ +#define NET_ADDR_STOLEN 2 /* address is stolen from other device */ + +/* Backlog congestion levels */ +#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ +#define NET_RX_DROP 1 /* packet dropped */ + +/* + * Transmit return codes: transmit return codes originate from three different + * namespaces: + * + * - qdisc return codes + * - driver transmit return codes + * - errno values + * + * Drivers are allowed to return any one of those in their hard_start_xmit() + * function. Real network devices commonly used with qdiscs should only return + * the driver transmit return codes though - when qdiscs are used, the actual + * transmission happens asynchronously, so the value is not propagated to + * higher layers. Virtual network devices transmit synchronously, in this case + * the driver transmit return codes are consumed by dev_queue_xmit(), all + * others are propagated to higher layers. + */ + +/* qdisc ->enqueue() return codes. */ +#define NET_XMIT_SUCCESS 0x00 +#define NET_XMIT_DROP 0x01 /* skb dropped */ +#define NET_XMIT_CN 0x02 /* congestion notification */ +#define NET_XMIT_POLICED 0x03 /* skb is shot by police */ +#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ + +/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It + * indicates that the device will soon be dropping packets, or already drops + * some packets of the same priority; prompting us to send less aggressively. */ +#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) +#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) + +/* Driver transmit return codes */ +#define NETDEV_TX_MASK 0xf0 + +enum netdev_tx { + __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ + NETDEV_TX_OK = 0x00, /* driver took care of packet */ + NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ + NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ +}; +typedef enum netdev_tx netdev_tx_t; + +/* + * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; + * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. + */ +static inline bool dev_xmit_complete(int rc) +{ + /* + * Positive cases with an skb consumed by a driver: + * - successful transmission (rc == NETDEV_TX_OK) + * - error while transmitting (rc < 0) + * - error while queueing to a different device (rc & NET_XMIT_MASK) + */ + if (likely(rc < NET_XMIT_MASK)) + return true; + + return false; +} + +#endif + +#define MAX_ADDR_LEN 32 /* Largest hardware address length */ + +/* Initial net device group. All devices belong to group 0 by default. */ +#define INIT_NETDEV_GROUP 0 + +#ifdef __KERNEL__ +/* + * Compute the worst case header length according to the protocols + * used. + */ + +#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +# if defined(CONFIG_MAC80211_MESH) +# define LL_MAX_HEADER 128 +# else +# define LL_MAX_HEADER 96 +# endif +#elif IS_ENABLED(CONFIG_TR) +# define LL_MAX_HEADER 48 +#else +# define LL_MAX_HEADER 32 +#endif + +#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ + !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) +#define MAX_HEADER LL_MAX_HEADER +#else +#define MAX_HEADER (LL_MAX_HEADER + 48) +#endif + +/* + * Old network device statistics. Fields are native words + * (unsigned long) so they can be read and written atomically. + */ + +struct net_device_stats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_errors; + unsigned long tx_errors; + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long multicast; + unsigned long collisions; + unsigned long rx_length_errors; + unsigned long rx_over_errors; + unsigned long rx_crc_errors; + unsigned long rx_frame_errors; + unsigned long rx_fifo_errors; + unsigned long rx_missed_errors; + unsigned long tx_aborted_errors; + unsigned long tx_carrier_errors; + unsigned long tx_fifo_errors; + unsigned long tx_heartbeat_errors; + unsigned long tx_window_errors; + unsigned long rx_compressed; + unsigned long tx_compressed; +}; + +#endif /* __KERNEL__ */ + + +/* Media selection options. */ +enum { + IF_PORT_UNKNOWN = 0, + IF_PORT_10BASE2, + IF_PORT_10BASET, + IF_PORT_AUI, + IF_PORT_100BASET, + IF_PORT_100BASETX, + IF_PORT_100BASEFX +}; + +#ifdef __KERNEL__ + +#include <linux/cache.h> +#include <linux/skbuff.h> + +#ifdef CONFIG_RPS +#include <linux/static_key.h> +extern struct static_key rps_needed; +#endif + +struct neighbour; +struct neigh_parms; +struct sk_buff; + +struct netdev_hw_addr { + struct list_head list; + unsigned char addr[MAX_ADDR_LEN]; + unsigned char type; +#define NETDEV_HW_ADDR_T_LAN 1 +#define NETDEV_HW_ADDR_T_SAN 2 +#define NETDEV_HW_ADDR_T_SLAVE 3 +#define NETDEV_HW_ADDR_T_UNICAST 4 +#define NETDEV_HW_ADDR_T_MULTICAST 5 + bool synced; + bool global_use; + int refcount; + struct rcu_head rcu_head; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; + +#define netdev_hw_addr_list_count(l) ((l)->count) +#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) + +#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) +#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) +#define netdev_for_each_uc_addr(ha, dev) \ + netdev_hw_addr_list_for_each(ha, &(dev)->uc) + +#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) +#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) +#define netdev_for_each_mc_addr(ha, dev) \ + netdev_hw_addr_list_for_each(ha, &(dev)->mc) + +struct hh_cache { + u16 hh_len; + u16 __pad; + seqlock_t hh_lock; + + /* cached hardware header; allow for machine alignment needs. */ +#define HH_DATA_MOD 16 +#define HH_DATA_OFF(__len) \ + (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) +#define HH_DATA_ALIGN(__len) \ + (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) + unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; +}; + +/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. + * Alternative is: + * dev->hard_header_len ? (dev->hard_header_len + + * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 + * + * We could use other alignment values, but we must maintain the + * relationship HH alignment <= LL alignment. + */ +#define LL_RESERVED_SPACE(dev) \ + ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) +#define LL_RESERVED_SPACE_EXTRA(dev,extra) \ + ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + +struct header_ops { + int (*create) (struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len); + int (*parse)(const struct sk_buff *skb, unsigned char *haddr); + int (*rebuild)(struct sk_buff *skb); + int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); + void (*cache_update)(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr); +}; + +/* These flag bits are private to the generic network queueing + * layer, they may not be explicitly referenced by any other + * code. + */ + +enum netdev_state_t { + __LINK_STATE_START, + __LINK_STATE_PRESENT, + __LINK_STATE_NOCARRIER, + __LINK_STATE_LINKWATCH_PENDING, + __LINK_STATE_DORMANT, +}; + + +/* + * This structure holds at boot time configured netdevice settings. They + * are then used in the device probing. + */ +struct netdev_boot_setup { + char name[IFNAMSIZ]; + struct ifmap map; +}; +#define NETDEV_BOOT_SETUP_MAX 8 + +extern int __init netdev_boot_setup(char *str); + +/* + * Structure for NAPI scheduling similar to tasklet but with weighting + */ +struct napi_struct { + /* The poll_list must only be managed by the entity which + * changes the state of the NAPI_STATE_SCHED bit. This means + * whoever atomically sets that bit can add this napi_struct + * to the per-cpu poll_list, and whoever clears that bit + * can remove from the list right before clearing the bit. + */ + struct list_head poll_list; + + unsigned long state; + int weight; + int (*poll)(struct napi_struct *, int); +#ifdef CONFIG_NETPOLL + spinlock_t poll_lock; + int poll_owner; +#endif + + unsigned int gro_count; + + struct net_device *dev; + struct list_head dev_list; + struct sk_buff *gro_list; + struct sk_buff *skb; +}; + +enum { + NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_DISABLE, /* Disable pending */ + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ +}; + +enum gro_result { + GRO_MERGED, + GRO_MERGED_FREE, + GRO_HELD, + GRO_NORMAL, + GRO_DROP, +}; +typedef enum gro_result gro_result_t; + +/* + * enum rx_handler_result - Possible return values for rx_handlers. + * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it + * further. + * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in + * case skb->dev was changed by rx_handler. + * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. + * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. + * + * rx_handlers are functions called from inside __netif_receive_skb(), to do + * special processing of the skb, prior to delivery to protocol handlers. + * + * Currently, a net_device can only have a single rx_handler registered. Trying + * to register a second rx_handler will return -EBUSY. + * + * To register a rx_handler on a net_device, use netdev_rx_handler_register(). + * To unregister a rx_handler on a net_device, use + * netdev_rx_handler_unregister(). + * + * Upon return, rx_handler is expected to tell __netif_receive_skb() what to + * do with the skb. + * + * If the rx_handler consumed to skb in some way, it should return + * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for + * the skb to be delivered in some other ways. + * + * If the rx_handler changed skb->dev, to divert the skb to another + * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the + * new device will be called if it exists. + * + * If the rx_handler consider the skb should be ignored, it should return + * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that + * are registred on exact device (ptype->dev == skb->dev). + * + * If the rx_handler didn't changed skb->dev, but want the skb to be normally + * delivered, it should return RX_HANDLER_PASS. + * + * A device without a registered rx_handler will behave as if rx_handler + * returned RX_HANDLER_PASS. + */ + +enum rx_handler_result { + RX_HANDLER_CONSUMED, + RX_HANDLER_ANOTHER, + RX_HANDLER_EXACT, + RX_HANDLER_PASS, +}; +typedef enum rx_handler_result rx_handler_result_t; +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); + +extern void __napi_schedule(struct napi_struct *n); + +static inline bool napi_disable_pending(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_DISABLE, &n->state); +} + +/** + * napi_schedule_prep - check if napi can be scheduled + * @n: napi context + * + * Test if NAPI routine is already running, and if not mark + * it as running. This is used as a condition variable + * insure only one NAPI poll instance runs. We also make + * sure there is no pending NAPI disable. + */ +static inline bool napi_schedule_prep(struct napi_struct *n) +{ + return !napi_disable_pending(n) && + !test_and_set_bit(NAPI_STATE_SCHED, &n->state); +} + +/** + * napi_schedule - schedule NAPI poll + * @n: napi context + * + * Schedule NAPI poll routine to be called if it is not already + * running. + */ +static inline void napi_schedule(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule(n); +} + +/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ +static inline bool napi_reschedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) { + __napi_schedule(napi); + return true; + } + return false; +} + +/** + * napi_complete - NAPI processing complete + * @n: napi context + * + * Mark NAPI processing as complete. + */ +extern void __napi_complete(struct napi_struct *n); +extern void napi_complete(struct napi_struct *n); + +/** + * napi_disable - prevent NAPI from scheduling + * @n: napi context + * + * Stop NAPI from being scheduled on this context. + * Waits till any outstanding processing completes. + */ +static inline void napi_disable(struct napi_struct *n) +{ + set_bit(NAPI_STATE_DISABLE, &n->state); + while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + clear_bit(NAPI_STATE_DISABLE, &n->state); +} + +/** + * napi_enable - enable NAPI scheduling + * @n: napi context + * + * Resume NAPI from being scheduled on this context. + * Must be paired with napi_disable. + */ +static inline void napi_enable(struct napi_struct *n) +{ + BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); + smp_mb__before_clear_bit(); + clear_bit(NAPI_STATE_SCHED, &n->state); +} + +#ifdef CONFIG_SMP +/** + * napi_synchronize - wait until NAPI is not running + * @n: napi context + * + * Wait until NAPI is done being scheduled on this context. + * Waits till any outstanding processing completes but + * does not disable future activations. + */ +static inline void napi_synchronize(const struct napi_struct *n) +{ + while (test_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); +} +#else +# define napi_synchronize(n) barrier() +#endif + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF, + __QUEUE_STATE_STACK_XOFF, + __QUEUE_STATE_FROZEN, +#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ + (1 << __QUEUE_STATE_STACK_XOFF)) +#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ + (1 << __QUEUE_STATE_FROZEN)) +}; +/* + * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The + * netif_tx_* functions below are used to manipulate this flag. The + * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit + * queue independently. The netif_xmit_*stopped functions below are called + * to check if the queue has been stopped by the driver or stack (either + * of the XOFF bits are set in the state). Drivers should not need to call + * netif_xmit*stopped functions, they should only be using netif_tx_*. + */ + +struct netdev_queue { +/* + * read mostly part + */ + struct net_device *dev; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; +#ifdef CONFIG_SYSFS + struct kobject kobj; +#endif +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + int numa_node; +#endif +/* + * write mostly part + */ + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; + int xmit_lock_owner; + /* + * please use this field instead of dev->trans_start + */ + unsigned long trans_start; + + /* + * Number of TX timeouts for this queue + * (/sys/class/net/DEV/Q/trans_timeout) + */ + unsigned long trans_timeout; + + unsigned long state; + +#ifdef CONFIG_BQL + struct dql dql; +#endif +} ____cacheline_aligned_in_smp; + +static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) +{ +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + return q->numa_node; +#else + return NUMA_NO_NODE; +#endif +} + +static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) +{ +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + q->numa_node = node; +#endif +} + +#ifdef CONFIG_RPS +/* + * This structure holds an RPS map which can be of variable length. The + * map is an array of CPUs. + */ +struct rps_map { + unsigned int len; + struct rcu_head rcu; + u16 cpus[0]; +}; +#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) + +/* + * The rps_dev_flow structure contains the mapping of a flow to a CPU, the + * tail pointer for that CPU's input queue at the time of last enqueue, and + * a hardware filter index. + */ +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; +#define RPS_NO_FILTER 0xffff + +/* + * The rps_dev_flow_table structure contains a table of flow mappings. + */ +struct rps_dev_flow_table { + unsigned int mask; + struct rcu_head rcu; + struct work_struct free_work; + struct rps_dev_flow flows[0]; +}; +#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ + ((_num) * sizeof(struct rps_dev_flow))) + +/* + * The rps_sock_flow_table contains mappings of flows to the last CPU + * on which they were processed by the application (set in recvmsg). + */ +struct rps_sock_flow_table { + unsigned int mask; + u16 ents[0]; +}; +#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ + ((_num) * sizeof(u16))) + +#define RPS_NO_CPU 0xffff + +static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, + u32 hash) +{ + if (table && hash) { + unsigned int cpu, index = hash & table->mask; + + /* We only give a hint, preemption can change cpu under us */ + cpu = raw_smp_processor_id(); + + if (table->ents[index] != cpu) + table->ents[index] = cpu; + } +} + +static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, + u32 hash) +{ + if (table && hash) + table->ents[hash & table->mask] = RPS_NO_CPU; +} + +extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; + +#ifdef CONFIG_RFS_ACCEL +extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, + u32 flow_id, u16 filter_id); +#endif + +/* This structure contains an instance of an RX queue. */ +struct netdev_rx_queue { + struct rps_map __rcu *rps_map; + struct rps_dev_flow_table __rcu *rps_flow_table; + struct kobject kobj; + struct net_device *dev; +} ____cacheline_aligned_in_smp; +#endif /* CONFIG_RPS */ + +#ifdef CONFIG_XPS +/* + * This structure holds an XPS map which can be of variable length. The + * map is an array of queues. + */ +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct rcu_head rcu; + u16 queues[0]; +}; +#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) +#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ + / sizeof(u16)) + +/* + * This structure holds all XPS maps for device. Maps are indexed by CPU. + */ +struct xps_dev_maps { + struct rcu_head rcu; + struct xps_map __rcu *cpu_map[0]; +}; +#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ + (nr_cpu_ids * sizeof(struct xps_map *))) +#endif /* CONFIG_XPS */ + +#define TC_MAX_QUEUE 16 +#define TC_BITMASK 15 +/* HW offloaded queuing disciplines txq count and offset maps */ +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +/* + * This structure is to hold information about the device + * configured to run FCoE protocol stack. + */ +struct netdev_fcoe_hbainfo { + char manufacturer[64]; + char serial_number[64]; + char hardware_version[64]; + char driver_version[64]; + char optionrom_version[64]; + char firmware_version[64]; + char model[256]; + char model_description[256]; +}; +#endif + +/* + * This structure defines the management hooks for network devices. + * The following hooks can be defined; unless noted otherwise, they are + * optional and can be filled with a null pointer. + * + * int (*ndo_init)(struct net_device *dev); + * This function is called once when network device is registered. + * The network device can use this to any late stage initializaton + * or semantic validattion. It can fail with an error code which will + * be propogated back to register_netdev + * + * void (*ndo_uninit)(struct net_device *dev); + * This function is called when device is unregistered or when registration + * fails. It is not called if init fails. + * + * int (*ndo_open)(struct net_device *dev); + * This function is called when network device transistions to the up + * state. + * + * int (*ndo_stop)(struct net_device *dev); + * This function is called when network device transistions to the down + * state. + * + * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + * struct net_device *dev); + * Called when a packet needs to be transmitted. + * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. + * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) + * Required can not be NULL. + * + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); + * Called to decide which queue to when device supports multiple + * transmit queues. + * + * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); + * This function is called to allow device receiver to make + * changes to configuration when multicast or promiscious is enabled. + * + * void (*ndo_set_rx_mode)(struct net_device *dev); + * This function is called device changes address list filtering. + * If driver handles unicast address filtering, it should set + * IFF_UNICAST_FLT to its priv_flags. + * + * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); + * This function is called when the Media Access Control address + * needs to be changed. If this interface is not defined, the + * mac address can not be changed. + * + * int (*ndo_validate_addr)(struct net_device *dev); + * Test if Media Access Control address is valid for the device. + * + * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Called when a user request an ioctl which can't be handled by + * the generic interface code. If not defined ioctl's return + * not supported error code. + * + * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); + * Used to set network devices bus interface parameters. This interface + * is retained for legacy reason, new devices should use the bus + * interface (PCI) for low level management. + * + * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); + * Called when a user wants to change the Maximum Transfer Unit + * of a device. If not defined, any request to change MTU will + * will return an error. + * + * void (*ndo_tx_timeout)(struct net_device *dev); + * Callback uses when the transmitter has not made any progress + * for dev->watchdog ticks. + * + * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + * struct rtnl_link_stats64 *storage); + * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + * Called when a user wants to get the network device usage + * statistics. Drivers must do one of the following: + * 1. Define @ndo_get_stats64 to fill in a zero-initialised + * rtnl_link_stats64 structure passed by the caller. + * 2. Define @ndo_get_stats to update a net_device_stats structure + * (which should normally be dev->stats) and return a pointer to + * it. The structure may be changed asynchronously only if each + * field is written atomically. + * 3. Update dev->stats asynchronously and atomically, and define + * neither operation. + * + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); + * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) + * this function is called when a VLAN id is registered. + * + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); + * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) + * this function is called when a VLAN id is unregistered. + * + * void (*ndo_poll_controller)(struct net_device *dev); + * + * SR-IOV management functions. + * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); + * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); + * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); + * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + * int (*ndo_get_vf_config)(struct net_device *dev, + * int vf, struct ifla_vf_info *ivf); + * int (*ndo_set_vf_port)(struct net_device *dev, int vf, + * struct nlattr *port[]); + * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); + * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) + * Called to setup 'tc' number of traffic classes in the net device. This + * is always called from the stack with the rtnl lock held and netif tx + * queues stopped. This allows the netdevice to perform queue management + * safely. + * + * Fiber Channel over Ethernet (FCoE) offload functions. + * int (*ndo_fcoe_enable)(struct net_device *dev); + * Called when the FCoE protocol stack wants to start using LLD for FCoE + * so the underlying device can perform whatever needed configuration or + * initialization to support acceleration of FCoE traffic. + * + * int (*ndo_fcoe_disable)(struct net_device *dev); + * Called when the FCoE protocol stack wants to stop using LLD for FCoE + * so the underlying device can perform whatever needed clean-ups to + * stop supporting acceleration of FCoE traffic. + * + * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, + * struct scatterlist *sgl, unsigned int sgc); + * Called when the FCoE Initiator wants to initialize an I/O that + * is a possible candidate for Direct Data Placement (DDP). The LLD can + * perform necessary setup and returns 1 to indicate the device is set up + * successfully to perform DDP on this I/O, otherwise this returns 0. + * + * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); + * Called when the FCoE Initiator/Target is done with the DDPed I/O as + * indicated by the FC exchange id 'xid', so the underlying device can + * clean up and reuse resources for later DDP requests. + * + * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, + * struct scatterlist *sgl, unsigned int sgc); + * Called when the FCoE Target wants to initialize an I/O that + * is a possible candidate for Direct Data Placement (DDP). The LLD can + * perform necessary setup and returns 1 to indicate the device is set up + * successfully to perform DDP on this I/O, otherwise this returns 0. + * + * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + * struct netdev_fcoe_hbainfo *hbainfo); + * Called when the FCoE Protocol stack wants information on the underlying + * device. This information is utilized by the FCoE protocol stack to + * register attributes with Fiber Channel management service as per the + * FC-GS Fabric Device Management Information(FDMI) specification. + * + * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); + * Called when the underlying device wants to override default World Wide + * Name (WWN) generation mechanism in FCoE protocol stack to pass its own + * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE + * protocol stack to use. + * + * RFS acceleration. + * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, + * u16 rxq_index, u32 flow_id); + * Set hardware filter for RFS. rxq_index is the target queue index; + * flow_id is a flow ID to be passed to rps_may_expire_flow() later. + * Return the filter ID on success, or a negative error code. + * + * Slave management functions (for bridge, bonding, etc). User should + * call netdev_set_master() to set dev->master properly. + * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); + * Called to make another netdev an underling. + * + * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); + * Called to release previously enslaved netdev. + * + * Feature/offload setting functions. + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); + * Adjusts the requested feature flags according to device-specific + * constraints, and returns the resulting flags. Must not modify + * the device state. + * + * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); + * Called to update device configuration to new features. Passed + * feature set might be less than what was returned by ndo_fix_features()). + * Must return >0 or -errno if it changed dev->features itself. + * + */ +struct net_device_ops { + int (*ndo_init)(struct net_device *dev); + void (*ndo_uninit)(struct net_device *dev); + int (*ndo_open)(struct net_device *dev); + int (*ndo_stop)(struct net_device *dev); + netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, + struct net_device *dev); + u16 (*ndo_select_queue)(struct net_device *dev, + struct sk_buff *skb); + void (*ndo_change_rx_flags)(struct net_device *dev, + int flags); + void (*ndo_set_rx_mode)(struct net_device *dev); + int (*ndo_set_mac_address)(struct net_device *dev, + void *addr); + int (*ndo_validate_addr)(struct net_device *dev); + int (*ndo_do_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_set_config)(struct net_device *dev, + struct ifmap *map); + int (*ndo_change_mtu)(struct net_device *dev, + int new_mtu); + int (*ndo_neigh_setup)(struct net_device *dev, + struct neigh_parms *); + void (*ndo_tx_timeout) (struct net_device *dev); + + struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); + struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + + int (*ndo_vlan_rx_add_vid)(struct net_device *dev, + unsigned short vid); + int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, + unsigned short vid); +#ifdef CONFIG_NET_POLL_CONTROLLER + void (*ndo_poll_controller)(struct net_device *dev); + int (*ndo_netpoll_setup)(struct net_device *dev, + struct netpoll_info *info); + void (*ndo_netpoll_cleanup)(struct net_device *dev); +#endif + int (*ndo_set_vf_mac)(struct net_device *dev, + int queue, u8 *mac); + int (*ndo_set_vf_vlan)(struct net_device *dev, + int queue, u16 vlan, u8 qos); + int (*ndo_set_vf_tx_rate)(struct net_device *dev, + int vf, int rate); + int (*ndo_set_vf_spoofchk)(struct net_device *dev, + int vf, bool setting); + int (*ndo_get_vf_config)(struct net_device *dev, + int vf, + struct ifla_vf_info *ivf); + int (*ndo_set_vf_port)(struct net_device *dev, + int vf, + struct nlattr *port[]); + int (*ndo_get_vf_port)(struct net_device *dev, + int vf, struct sk_buff *skb); + int (*ndo_setup_tc)(struct net_device *dev, u8 tc); +#if IS_ENABLED(CONFIG_FCOE) + int (*ndo_fcoe_enable)(struct net_device *dev); + int (*ndo_fcoe_disable)(struct net_device *dev); + int (*ndo_fcoe_ddp_setup)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_ddp_done)(struct net_device *dev, + u16 xid); + int (*ndo_fcoe_ddp_target)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + struct netdev_fcoe_hbainfo *hbainfo); +#endif + +#if IS_ENABLED(CONFIG_LIBFCOE) +#define NETDEV_FCOE_WWNN 0 +#define NETDEV_FCOE_WWPN 1 + int (*ndo_fcoe_get_wwn)(struct net_device *dev, + u64 *wwn, int type); +#endif + +#ifdef CONFIG_RFS_ACCEL + int (*ndo_rx_flow_steer)(struct net_device *dev, + const struct sk_buff *skb, + u16 rxq_index, + u32 flow_id); +#endif + int (*ndo_add_slave)(struct net_device *dev, + struct net_device *slave_dev); + int (*ndo_del_slave)(struct net_device *dev, + struct net_device *slave_dev); + netdev_features_t (*ndo_fix_features)(struct net_device *dev, + netdev_features_t features); + int (*ndo_set_features)(struct net_device *dev, + netdev_features_t features); + int (*ndo_neigh_construct)(struct neighbour *n); + void (*ndo_neigh_destroy)(struct neighbour *n); +}; + +/* + * The DEVICE structure. + * Actually, this whole structure is a big mistake. It mixes I/O + * data with strictly "high-level" data, and it has to know about + * almost every data structure used in the INET module. + * + * FIXME: cleanup struct net_device such that network protocol info + * moves out. + */ + +struct net_device { + + /* + * This is the first field of the "visible" part of this structure + * (i.e. as seen by users in the "Space.c" file). It is the name + * of the interface. + */ + char name[IFNAMSIZ]; + + struct pm_qos_request pm_qos_req; + + /* device name hash chain */ + struct hlist_node name_hlist; + /* snmp alias */ + char *ifalias; + + /* + * I/O specific fields + * FIXME: Merge these and struct ifmap into one + */ + unsigned long mem_end; /* shared mem end */ + unsigned long mem_start; /* shared mem start */ + unsigned long base_addr; /* device I/O address */ + unsigned int irq; /* device IRQ number */ + + /* + * Some hardware also needs these fields, but they are not + * part of the usual set specified in Space.c. + */ + + unsigned long state; + + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + + /* currently active device features */ + netdev_features_t features; + /* user-changeable features */ + netdev_features_t hw_features; + /* user-requested features */ + netdev_features_t wanted_features; + /* mask of features inheritable by VLAN devices */ + netdev_features_t vlan_features; + + /* Interface index. Unique device identifier */ + int ifindex; + int iflink; + + struct net_device_stats stats; + atomic_long_t rx_dropped; /* dropped packets by core network + * Do not use this in drivers. + */ + +#ifdef CONFIG_WIRELESS_EXT + /* List of functions to handle Wireless Extensions (instead of ioctl). + * See <net/iw_handler.h> for details. Jean II */ + const struct iw_handler_def * wireless_handlers; + /* Instance data managed by the core of Wireless Extensions. */ + struct iw_public_data * wireless_data; +#endif + /* Management operations */ + const struct net_device_ops *netdev_ops; + const struct ethtool_ops *ethtool_ops; + + /* Hardware header description */ + const struct header_ops *header_ops; + + unsigned int flags; /* interface flags (a la BSD) */ + unsigned int priv_flags; /* Like 'flags' but invisible to userspace. + * See if.h for definitions. */ + unsigned short gflags; + unsigned short padded; /* How much padding added by alloc_netdev() */ + + unsigned char operstate; /* RFC2863 operstate */ + unsigned char link_mode; /* mapping policy to operstate */ + + unsigned char if_port; /* Selectable AUI, TP,..*/ + unsigned char dma; /* DMA channel */ + + unsigned int mtu; /* interface MTU value */ + unsigned short type; /* interface hardware type */ + unsigned short hard_header_len; /* hardware hdr length */ + + /* extra head- and tailroom the hardware may need, but not in all cases + * can this be guaranteed, especially tailroom. Some cases also use + * LL_MAX_HEADER instead to allocate the skb. + */ + unsigned short needed_headroom; + unsigned short needed_tailroom; + + /* Interface address info. */ + unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ + unsigned char addr_assign_type; /* hw address assignment type */ + unsigned char addr_len; /* hardware address length */ + unsigned char neigh_priv_len; + unsigned short dev_id; /* for shared network cards */ + + spinlock_t addr_list_lock; + struct netdev_hw_addr_list uc; /* Unicast mac addresses */ + struct netdev_hw_addr_list mc; /* Multicast mac addresses */ + bool uc_promisc; + unsigned int promiscuity; + unsigned int allmulti; + + + /* Protocol specific pointers */ + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + struct vlan_info __rcu *vlan_info; /* VLAN info */ +#endif +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ +#endif + void *atalk_ptr; /* AppleTalk link */ + struct in_device __rcu *ip_ptr; /* IPv4 specific data */ + struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ + struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ + void *ec_ptr; /* Econet specific data */ + void *ax25_ptr; /* AX.25 specific data */ + struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, + assign before registering */ + +/* + * Cache lines mostly used on receive path (including eth_type_trans()) + */ + unsigned long last_rx; /* Time of last Rx + * This should not be set in + * drivers, unless really needed, + * because network stack (bonding) + * use it if/when necessary, to + * avoid dirtying this cache line. + */ + + struct net_device *master; /* Pointer to master device of a group, + * which this device is member of. + */ + + /* Interface address info used in eth_type_trans() */ + unsigned char *dev_addr; /* hw address, (before bcast + because most packets are + unicast) */ + + struct netdev_hw_addr_list dev_addrs; /* list of device + hw addresses */ + + unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ + +#ifdef CONFIG_SYSFS + struct kset *queues_kset; +#endif + +#ifdef CONFIG_RPS + struct netdev_rx_queue *_rx; + + /* Number of RX queues allocated at register_netdev() time */ + unsigned int num_rx_queues; + + /* Number of RX queues currently active in device */ + unsigned int real_num_rx_queues; + +#ifdef CONFIG_RFS_ACCEL + /* CPU reverse-mapping for RX completion interrupts, indexed + * by RX queue number. Assigned by driver. This must only be + * set if the ndo_rx_flow_steer operation is defined. */ + struct cpu_rmap *rx_cpu_rmap; +#endif +#endif + + rx_handler_func_t __rcu *rx_handler; + void __rcu *rx_handler_data; + + struct netdev_queue __rcu *ingress_queue; + +/* + * Cache lines mostly used on transmit path + */ + struct netdev_queue *_tx ____cacheline_aligned_in_smp; + + /* Number of TX queues allocated at alloc_netdev_mq() time */ + unsigned int num_tx_queues; + + /* Number of TX queues currently active in device */ + unsigned int real_num_tx_queues; + + /* root qdisc from userspace point of view */ + struct Qdisc *qdisc; + + unsigned long tx_queue_len; /* Max frames per queue allowed */ + spinlock_t tx_global_lock; + +#ifdef CONFIG_XPS + struct xps_dev_maps __rcu *xps_maps; +#endif + + /* These may be needed for future network-power-down code. */ + + /* + * trans_start here is expensive for high speed devices on SMP, + * please use netdev_queue->trans_start instead. + */ + unsigned long trans_start; /* Time (in jiffies) of last Tx */ + + int watchdog_timeo; /* used by dev_watchdog() */ + struct timer_list watchdog_timer; + + /* Number of references to this device */ + int __percpu *pcpu_refcnt; + + /* delayed register/unregister */ + struct list_head todo_list; + /* device index hash chain */ + struct hlist_node index_hlist; + + struct list_head link_watch_list; + + /* register/unregister state machine */ + enum { NETREG_UNINITIALIZED=0, + NETREG_REGISTERED, /* completed register_netdevice */ + NETREG_UNREGISTERING, /* called unregister_netdevice */ + NETREG_UNREGISTERED, /* completed unregister todo */ + NETREG_RELEASED, /* called free_netdev */ + NETREG_DUMMY, /* dummy device for NAPI poll */ + } reg_state:8; + + bool dismantle; /* device is going do be freed */ + + enum { + RTNL_LINK_INITIALIZED, + RTNL_LINK_INITIALIZING, + } rtnl_link_state:16; + + /* Called from unregister, can be used to call free_netdev */ + void (*destructor)(struct net_device *dev); + +#ifdef CONFIG_NETPOLL + struct netpoll_info *npinfo; +#endif + +#ifdef CONFIG_NET_NS + /* Network namespace this network device is inside */ + struct net *nd_net; +#endif + + /* mid-layer private */ + union { + void *ml_priv; + struct pcpu_lstats __percpu *lstats; /* loopback stats */ + struct pcpu_tstats __percpu *tstats; /* tunnel stats */ + struct pcpu_dstats __percpu *dstats; /* dummy stats */ + }; + /* GARP */ + struct garp_port __rcu *garp_port; + + /* class/net/name entry */ + struct device dev; + /* space for optional device, statistics, and wireless sysfs groups */ + const struct attribute_group *sysfs_groups[4]; + + /* rtnetlink link ops */ + const struct rtnl_link_ops *rtnl_link_ops; + + /* for setting kernel sock attribute on TCP connection setup */ +#define GSO_MAX_SIZE 65536 + unsigned int gso_max_size; + +#ifdef CONFIG_DCB + /* Data Center Bridging netlink ops */ + const struct dcbnl_rtnl_ops *dcbnl_ops; +#endif + u8 num_tc; + struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; + u8 prio_tc_map[TC_BITMASK + 1]; + +#if IS_ENABLED(CONFIG_FCOE) + /* max exchange id for FCoE LRO by ddp */ + unsigned int fcoe_ddp_xid; +#endif +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) + struct netprio_map __rcu *priomap; +#endif + /* phy device may attach itself for hardware timestamping */ + struct phy_device *phydev; + + /* group the device belongs to */ + int group; +}; +#define to_net_dev(d) container_of(d, struct net_device, dev) + +#define NETDEV_ALIGN 32 + +static inline +int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) +{ + return dev->prio_tc_map[prio & TC_BITMASK]; +} + +static inline +int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) +{ + if (tc >= dev->num_tc) + return -EINVAL; + + dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; + return 0; +} + +static inline +void netdev_reset_tc(struct net_device *dev) +{ + dev->num_tc = 0; + memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); + memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); +} + +static inline +int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) +{ + if (tc >= dev->num_tc) + return -EINVAL; + + dev->tc_to_txq[tc].count = count; + dev->tc_to_txq[tc].offset = offset; + return 0; +} + +static inline +int netdev_set_num_tc(struct net_device *dev, u8 num_tc) +{ + if (num_tc > TC_MAX_QUEUE) + return -EINVAL; + + dev->num_tc = num_tc; + return 0; +} + +static inline +int netdev_get_num_tc(struct net_device *dev) +{ + return dev->num_tc; +} + +static inline +struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, + unsigned int index) +{ + return &dev->_tx[index]; +} + +static inline void netdev_for_each_tx_queue(struct net_device *dev, + void (*f)(struct net_device *, + struct netdev_queue *, + void *), + void *arg) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + f(dev, &dev->_tx[i], arg); +} + +/* + * Net namespace inlines + */ +static inline +struct net *dev_net(const struct net_device *dev) +{ + return read_pnet(&dev->nd_net); +} + +static inline +void dev_net_set(struct net_device *dev, struct net *net) +{ +#ifdef CONFIG_NET_NS + release_net(dev->nd_net); + dev->nd_net = hold_net(net); +#endif +} + +static inline bool netdev_uses_dsa_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_DSA + if (dev->dsa_ptr != NULL) + return dsa_uses_dsa_tags(dev->dsa_ptr); +#endif + + return 0; +} + +static inline bool netdev_uses_trailer_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_TRAILER + if (dev->dsa_ptr != NULL) + return dsa_uses_trailer_tags(dev->dsa_ptr); +#endif + + return 0; +} + +/** + * netdev_priv - access network device private data + * @dev: network device + * + * Get network device private data + */ +static inline void *netdev_priv(const struct net_device *dev) +{ + return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); +} + +/* Set the sysfs physical device reference for the network logical device + * if set prior to registration will cause a symlink during initialization. + */ +#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) + +/* Set the sysfs device type for the network logical device to allow + * fin grained indentification of different network device types. For + * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. + */ +#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) + +/** + * netif_napi_add - initialize a napi context + * @dev: network device + * @napi: napi context + * @poll: polling function + * @weight: default weight + * + * netif_napi_add() must be used to initialize a napi context prior to calling + * *any* of the other napi related functions. + */ +void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight); + +/** + * netif_napi_del - remove a napi context + * @napi: napi context + * + * netif_napi_del() removes a napi context from the network device napi list + */ +void netif_napi_del(struct napi_struct *napi); + +struct napi_gro_cb { + /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ + void *frag0; + + /* Length of frag0. */ + unsigned int frag0_len; + + /* This indicates where we are processing relative to skb->data. */ + int data_offset; + + /* This is non-zero if the packet may be of the same flow. */ + int same_flow; + + /* This is non-zero if the packet cannot be merged with the new skb. */ + int flush; + + /* Number of segments aggregated. */ + int count; + + /* Free the skb? */ + int free; +}; + +#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) + +struct packet_type { + __be16 type; /* This is really htons(ether_type). */ + struct net_device *dev; /* NULL is wildcarded here */ + int (*func) (struct sk_buff *, + struct net_device *, + struct packet_type *, + struct net_device *); + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + netdev_features_t features); + int (*gso_send_check)(struct sk_buff *skb); + struct sk_buff **(*gro_receive)(struct sk_buff **head, + struct sk_buff *skb); + int (*gro_complete)(struct sk_buff *skb); + void *af_packet_priv; + struct list_head list; +}; + +#include <linux/notifier.h> + +/* netdevice notifier chain. Please remember to update the rtnetlink + * notification exclusion list in rtnetlink_event() when adding new + * types. + */ +#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ +#define NETDEV_DOWN 0x0002 +#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface + detected a hardware crash and restarted + - we can use this eg to kick tcp sessions + once done */ +#define NETDEV_CHANGE 0x0004 /* Notify device state change */ +#define NETDEV_REGISTER 0x0005 +#define NETDEV_UNREGISTER 0x0006 +#define NETDEV_CHANGEMTU 0x0007 +#define NETDEV_CHANGEADDR 0x0008 +#define NETDEV_GOING_DOWN 0x0009 +#define NETDEV_CHANGENAME 0x000A +#define NETDEV_FEAT_CHANGE 0x000B +#define NETDEV_BONDING_FAILOVER 0x000C +#define NETDEV_PRE_UP 0x000D +#define NETDEV_PRE_TYPE_CHANGE 0x000E +#define NETDEV_POST_TYPE_CHANGE 0x000F +#define NETDEV_POST_INIT 0x0010 +#define NETDEV_UNREGISTER_BATCH 0x0011 +#define NETDEV_RELEASE 0x0012 +#define NETDEV_NOTIFY_PEERS 0x0013 +#define NETDEV_JOIN 0x0014 + +extern int register_netdevice_notifier(struct notifier_block *nb); +extern int unregister_netdevice_notifier(struct notifier_block *nb); +extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); + + +extern rwlock_t dev_base_lock; /* Device list lock */ + + +#define for_each_netdev(net, d) \ + list_for_each_entry(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_reverse(net, d) \ + list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_rcu(net, d) \ + list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_safe(net, d, n) \ + list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) +#define for_each_netdev_continue(net, d) \ + list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_continue_rcu(net, d) \ + list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) +#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) + +static inline struct net_device *next_net_device(struct net_device *dev) +{ + struct list_head *lh; + struct net *net; + + net = dev_net(dev); + lh = dev->dev_list.next; + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); +} + +static inline struct net_device *next_net_device_rcu(struct net_device *dev) +{ + struct list_head *lh; + struct net *net; + + net = dev_net(dev); + lh = rcu_dereference(list_next_rcu(&dev->dev_list)); + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); +} + +static inline struct net_device *first_net_device(struct net *net) +{ + return list_empty(&net->dev_base_head) ? NULL : + net_device_entry(net->dev_base_head.next); +} + +static inline struct net_device *first_net_device_rcu(struct net *net) +{ + struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); + + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); +} + +extern int netdev_boot_setup_check(struct net_device *dev); +extern unsigned long netdev_boot_base(const char *prefix, int unit); +extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, + const char *hwaddr); +extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); +extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); +extern void dev_add_pack(struct packet_type *pt); +extern void dev_remove_pack(struct packet_type *pt); +extern void __dev_remove_pack(struct packet_type *pt); + +extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, + unsigned short mask); +extern struct net_device *dev_get_by_name(struct net *net, const char *name); +extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); +extern struct net_device *__dev_get_by_name(struct net *net, const char *name); +extern int dev_alloc_name(struct net_device *dev, const char *name); +extern int dev_open(struct net_device *dev); +extern int dev_close(struct net_device *dev); +extern void dev_disable_lro(struct net_device *dev); +extern int dev_queue_xmit(struct sk_buff *skb); +extern int register_netdevice(struct net_device *dev); +extern void unregister_netdevice_queue(struct net_device *dev, + struct list_head *head); +extern void unregister_netdevice_many(struct list_head *head); +static inline void unregister_netdevice(struct net_device *dev) +{ + unregister_netdevice_queue(dev, NULL); +} + +extern int netdev_refcnt_read(const struct net_device *dev); +extern void free_netdev(struct net_device *dev); +extern void synchronize_net(void); +extern int init_dummy_netdev(struct net_device *dev); +extern void netdev_resync_ops(struct net_device *dev); + +extern struct net_device *dev_get_by_index(struct net *net, int ifindex); +extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); +extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +extern int dev_restart(struct net_device *dev); +#ifdef CONFIG_NETPOLL_TRAP +extern int netpoll_trap(void); +#endif +extern int skb_gro_receive(struct sk_buff **head, + struct sk_buff *skb); +extern void skb_gro_reset_offset(struct sk_buff *skb); + +static inline unsigned int skb_gro_offset(const struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->data_offset; +} + +static inline unsigned int skb_gro_len(const struct sk_buff *skb) +{ + return skb->len - NAPI_GRO_CB(skb)->data_offset; +} + +static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) +{ + NAPI_GRO_CB(skb)->data_offset += len; +} + +static inline void *skb_gro_header_fast(struct sk_buff *skb, + unsigned int offset) +{ + return NAPI_GRO_CB(skb)->frag0 + offset; +} + +static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) +{ + return NAPI_GRO_CB(skb)->frag0_len < hlen; +} + +static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, + unsigned int offset) +{ + if (!pskb_may_pull(skb, hlen)) + return NULL; + + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; + return skb->data + offset; +} + +static inline void *skb_gro_mac_header(struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); +} + +static inline void *skb_gro_network_header(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + + skb_network_offset(skb); +} + +static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, + unsigned len) +{ + if (!dev->header_ops || !dev->header_ops->create) + return 0; + + return dev->header_ops->create(skb, dev, type, daddr, saddr, len); +} + +static inline int dev_parse_header(const struct sk_buff *skb, + unsigned char *haddr) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->parse) + return 0; + return dev->header_ops->parse(skb, haddr); +} + +typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); +extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); +static inline int unregister_gifconf(unsigned int family) +{ + return register_gifconf(family, NULL); +} + +/* + * Incoming packets are placed on per-cpu queues + */ +struct softnet_data { + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct list_head poll_list; + struct sk_buff *completion_queue; + struct sk_buff_head process_queue; + + /* stats */ + unsigned int processed; + unsigned int time_squeeze; + unsigned int cpu_collision; + unsigned int received_rps; + +#ifdef CONFIG_RPS + struct softnet_data *rps_ipi_list; + + /* Elements below can be accessed between CPUs for RPS */ + struct call_single_data csd ____cacheline_aligned_in_smp; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_head; + unsigned int input_queue_tail; +#endif + unsigned dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; +}; + +static inline void input_queue_head_incr(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + sd->input_queue_head++; +#endif +} + +static inline void input_queue_tail_incr_save(struct softnet_data *sd, + unsigned int *qtail) +{ +#ifdef CONFIG_RPS + *qtail = ++sd->input_queue_tail; +#endif +} + +DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); + +extern void __netif_schedule(struct Qdisc *q); + +static inline void netif_schedule_queue(struct netdev_queue *txq) +{ + if (!(txq->state & QUEUE_STATE_ANY_XOFF)) + __netif_schedule(txq->qdisc); +} + +static inline void netif_tx_schedule_all(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + netif_schedule_queue(netdev_get_tx_queue(dev, i)); +} + +static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) +{ + clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + +/** + * netif_start_queue - allow transmit + * @dev: network device + * + * Allow upper layers to call the device hard_start_xmit routine. + */ +static inline void netif_start_queue(struct net_device *dev) +{ + netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_start_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_start_queue(txq); + } +} + +static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) { + netif_tx_start_queue(dev_queue); + return; + } +#endif + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) + __netif_schedule(dev_queue->qdisc); +} + +/** + * netif_wake_queue - restart transmit + * @dev: network device + * + * Allow upper layers to call the device hard_start_xmit routine. + * Used for flow control when transmit resources are available. + */ +static inline void netif_wake_queue(struct net_device *dev) +{ + netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_wake_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_wake_queue(txq); + } +} + +static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) +{ + if (WARN_ON(!dev_queue)) { + pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); + return; + } + set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + +/** + * netif_stop_queue - stop transmitted packets + * @dev: network device + * + * Stop upper layers calling the device hard_start_xmit routine. + * Used for flow control when transmit resources are unavailable. + */ +static inline void netif_stop_queue(struct net_device *dev) +{ + netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline void netif_tx_stop_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_stop_queue(txq); + } +} + +static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + +/** + * netif_queue_stopped - test if transmit queue is flowblocked + * @dev: network device + * + * Test if transmit queue on device is currently unable to send. + */ +static inline bool netif_queue_stopped(const struct net_device *dev) +{ + return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); +} + +static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_ANY_XOFF; +} + +static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; +} + +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{ +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); + + if (likely(dql_avail(&dev_queue->dql) >= 0)) + return; + + set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); + + /* + * The XOFF flag must be set before checking the dql_avail below, + * because in netdev_tx_completed_queue we update the dql_completed + * before checking the XOFF flag. + */ + smp_mb(); + + /* check again in case another CPU has just made room avail */ + if (unlikely(dql_avail(&dev_queue->dql) >= 0)) + clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); +#endif +} + +static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) +{ + netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); +} + +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned pkts, unsigned bytes) +{ +#ifdef CONFIG_BQL + if (unlikely(!bytes)) + return; + + dql_completed(&dev_queue->dql, bytes); + + /* + * Without the memory barrier there is a small possiblity that + * netdev_tx_sent_queue will miss the update and cause the queue to + * be stopped forever + */ + smp_mb(); + + if (dql_avail(&dev_queue->dql) < 0) + return; + + if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) + netif_schedule_queue(dev_queue); +#endif +} + +static inline void netdev_completed_queue(struct net_device *dev, + unsigned pkts, unsigned bytes) +{ + netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); +} + +static inline void netdev_tx_reset_queue(struct netdev_queue *q) +{ +#ifdef CONFIG_BQL + clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); + dql_reset(&q->dql); +#endif +} + +static inline void netdev_reset_queue(struct net_device *dev_queue) +{ + netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); +} + +/** + * netif_running - test if up + * @dev: network device + * + * Test if the device has been brought up. + */ +static inline bool netif_running(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_START, &dev->state); +} + +/* + * Routines to manage the subqueues on a device. We only need start + * stop, and a check if it's stopped. All other device management is + * done at the overall netdevice level. + * Also test the device if we're multiqueue. + */ + +/** + * netif_start_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Start individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_start_queue(txq); +} + +/** + * netif_stop_subqueue - stop sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Stop individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) + return; +#endif + netif_tx_stop_queue(txq); +} + +/** + * netif_subqueue_stopped - test status of subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Check individual transmit queue of a device with multiple transmit queues. + */ +static inline bool __netif_subqueue_stopped(const struct net_device *dev, + u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + return netif_tx_queue_stopped(txq); +} + +static inline bool netif_subqueue_stopped(const struct net_device *dev, + struct sk_buff *skb) +{ + return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); +} + +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ +static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) + return; +#endif + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); +} + +/* + * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used + * as a distribution range limit for the returned value. + */ +static inline u16 skb_tx_hash(const struct net_device *dev, + const struct sk_buff *skb) +{ + return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); +} + +/** + * netif_is_multiqueue - test if device has multiple transmit queues + * @dev: network device + * + * Check if device has multiple transmit queues + */ +static inline bool netif_is_multiqueue(const struct net_device *dev) +{ + return dev->num_tx_queues > 1; +} + +extern int netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq); + +#ifdef CONFIG_RPS +extern int netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq); +#else +static inline int netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq) +{ + return 0; +} +#endif + +static inline int netif_copy_real_num_queues(struct net_device *to_dev, + const struct net_device *from_dev) +{ + netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); +#ifdef CONFIG_RPS + return netif_set_real_num_rx_queues(to_dev, + from_dev->real_num_rx_queues); +#else + return 0; +#endif +} + +/* Use this variant when it is known for sure that it + * is executing from hardware interrupt context or with hardware interrupts + * disabled. + */ +extern void dev_kfree_skb_irq(struct sk_buff *skb); + +/* Use this variant in places where it could be invoked + * from either hardware interrupt or other context, with hardware interrupts + * either disabled or enabled. + */ +extern void dev_kfree_skb_any(struct sk_buff *skb); + +extern int netif_rx(struct sk_buff *skb); +extern int netif_rx_ni(struct sk_buff *skb); +extern int netif_receive_skb(struct sk_buff *skb); +extern gro_result_t dev_gro_receive(struct napi_struct *napi, + struct sk_buff *skb); +extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); +extern gro_result_t napi_gro_receive(struct napi_struct *napi, + struct sk_buff *skb); +extern void napi_gro_flush(struct napi_struct *napi); +extern struct sk_buff * napi_get_frags(struct napi_struct *napi); +extern gro_result_t napi_frags_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret); +extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); +extern gro_result_t napi_gro_frags(struct napi_struct *napi); + +static inline void napi_free_frags(struct napi_struct *napi) +{ + kfree_skb(napi->skb); + napi->skb = NULL; +} + +extern int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +extern void netdev_rx_handler_unregister(struct net_device *dev); + +extern bool dev_valid_name(const char *name); +extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); +extern int dev_ethtool(struct net *net, struct ifreq *); +extern unsigned dev_get_flags(const struct net_device *); +extern int __dev_change_flags(struct net_device *, unsigned int flags); +extern int dev_change_flags(struct net_device *, unsigned); +extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); +extern int dev_change_name(struct net_device *, const char *); +extern int dev_set_alias(struct net_device *, const char *, size_t); +extern int dev_change_net_namespace(struct net_device *, + struct net *, const char *); +extern int dev_set_mtu(struct net_device *, int); +extern void dev_set_group(struct net_device *, int); +extern int dev_set_mac_address(struct net_device *, + struct sockaddr *); +extern int dev_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev, + struct netdev_queue *txq); +extern int dev_forward_skb(struct net_device *dev, + struct sk_buff *skb); + +extern int netdev_budget; + +/* Called by rtnetlink.c:rtnl_unlock() */ +extern void netdev_run_todo(void); + +/** + * dev_put - release reference to device + * @dev: network device + * + * Release reference to device to allow it to be freed. + */ +static inline void dev_put(struct net_device *dev) +{ + this_cpu_dec(*dev->pcpu_refcnt); +} + +/** + * dev_hold - get reference to device + * @dev: network device + * + * Hold reference to device to keep it from being freed. + */ +static inline void dev_hold(struct net_device *dev) +{ + this_cpu_inc(*dev->pcpu_refcnt); +} + +/* Carrier loss detection, dial on demand. The functions netif_carrier_on + * and _off may be called from IRQ context, but it is caller + * who is responsible for serialization of these calls. + * + * The name carrier is inappropriate, these functions should really be + * called netif_lowerlayer_*() because they represent the state of any + * kind of lower layer not just hardware media. + */ + +extern void linkwatch_fire_event(struct net_device *dev); +extern void linkwatch_forget_dev(struct net_device *dev); + +/** + * netif_carrier_ok - test if carrier present + * @dev: network device + * + * Check if carrier is present on device + */ +static inline bool netif_carrier_ok(const struct net_device *dev) +{ + return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); +} + +extern unsigned long dev_trans_start(struct net_device *dev); + +extern void __netdev_watchdog_up(struct net_device *dev); + +extern void netif_carrier_on(struct net_device *dev); + +extern void netif_carrier_off(struct net_device *dev); + +extern void netif_notify_peers(struct net_device *dev); + +/** + * netif_dormant_on - mark device as dormant. + * @dev: network device + * + * Mark device as dormant (as per RFC2863). + * + * The dormant state indicates that the relevant interface is not + * actually in a condition to pass packets (i.e., it is not 'up') but is + * in a "pending" state, waiting for some external event. For "on- + * demand" interfaces, this new state identifies the situation where the + * interface is waiting for events to place it in the up state. + * + */ +static inline void netif_dormant_on(struct net_device *dev) +{ + if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_dormant_off - set device as not dormant. + * @dev: network device + * + * Device is not in dormant state. + */ +static inline void netif_dormant_off(struct net_device *dev) +{ + if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_dormant - test if carrier present + * @dev: network device + * + * Check if carrier is present on device + */ +static inline bool netif_dormant(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_DORMANT, &dev->state); +} + + +/** + * netif_oper_up - test if device is operational + * @dev: network device + * + * Check if carrier is operational + */ +static inline bool netif_oper_up(const struct net_device *dev) +{ + return (dev->operstate == IF_OPER_UP || + dev->operstate == IF_OPER_UNKNOWN /* backward compat */); +} + +/** + * netif_device_present - is device available or removed + * @dev: network device + * + * Check if device has not been removed from system. + */ +static inline bool netif_device_present(struct net_device *dev) +{ + return test_bit(__LINK_STATE_PRESENT, &dev->state); +} + +extern void netif_device_detach(struct net_device *dev); + +extern void netif_device_attach(struct net_device *dev); + +/* + * Network interface message level settings + */ + +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) + +static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) - 1; +} + +static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +{ + spin_lock(&txq->_xmit_lock); + txq->xmit_lock_owner = cpu; +} + +static inline void __netif_tx_lock_bh(struct netdev_queue *txq) +{ + spin_lock_bh(&txq->_xmit_lock); + txq->xmit_lock_owner = smp_processor_id(); +} + +static inline bool __netif_tx_trylock(struct netdev_queue *txq) +{ + bool ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) + txq->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + +static inline void txq_trans_update(struct netdev_queue *txq) +{ + if (txq->xmit_lock_owner != -1) + txq->trans_start = jiffies; +} + +/** + * netif_tx_lock - grab network device transmit lock + * @dev: network device + * + * Get network device transmit lock + */ +static inline void netif_tx_lock(struct net_device *dev) +{ + unsigned int i; + int cpu; + + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } +} + +static inline void netif_tx_lock_bh(struct net_device *dev) +{ + local_bh_disable(); + netif_tx_lock(dev); +} + +static inline void netif_tx_unlock(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + netif_schedule_queue(txq); + } + spin_unlock(&dev->tx_global_lock); +} + +static inline void netif_tx_unlock_bh(struct net_device *dev) +{ + netif_tx_unlock(dev); + local_bh_enable(); +} + +#define HARD_TX_LOCK(dev, txq, cpu) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + __netif_tx_lock(txq, cpu); \ + } \ +} + +#define HARD_TX_UNLOCK(dev, txq) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + __netif_tx_unlock(txq); \ + } \ +} + +static inline void netif_tx_disable(struct net_device *dev) +{ + unsigned int i; + int cpu; + + local_bh_disable(); + cpu = smp_processor_id(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); + netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); + } + local_bh_enable(); +} + +static inline void netif_addr_lock(struct net_device *dev) +{ + spin_lock(&dev->addr_list_lock); +} + +static inline void netif_addr_lock_nested(struct net_device *dev) +{ + spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); +} + +static inline void netif_addr_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock(struct net_device *dev) +{ + spin_unlock(&dev->addr_list_lock); +} + +static inline void netif_addr_unlock_bh(struct net_device *dev) +{ + spin_unlock_bh(&dev->addr_list_lock); +} + +/* + * dev_addrs walker. Should be used only for read access. Call with + * rcu_read_lock held. + */ +#define for_each_dev_addr(dev, ha) \ + list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) + +/* These functions live elsewhere (drivers/net/net_init.c, but related) */ + +extern void ether_setup(struct net_device *dev); + +/* Support for loadable net-drivers */ +extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, + void (*setup)(struct net_device *), + unsigned int txqs, unsigned int rxqs); +#define alloc_netdev(sizeof_priv, name, setup) \ + alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) + +#define alloc_netdev_mq(sizeof_priv, name, setup, count) \ + alloc_netdev_mqs(sizeof_priv, name, setup, count, count) + +extern int register_netdev(struct net_device *dev); +extern void unregister_netdev(struct net_device *dev); + +/* General hardware address lists handling functions */ +extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type); +extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type); +extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len); +extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len); +extern void __hw_addr_flush(struct netdev_hw_addr_list *list); +extern void __hw_addr_init(struct netdev_hw_addr_list *list); + +/* Functions used for device addresses handling */ +extern int dev_addr_add(struct net_device *dev, unsigned char *addr, + unsigned char addr_type); +extern int dev_addr_del(struct net_device *dev, unsigned char *addr, + unsigned char addr_type); +extern int dev_addr_add_multiple(struct net_device *to_dev, + struct net_device *from_dev, + unsigned char addr_type); +extern int dev_addr_del_multiple(struct net_device *to_dev, + struct net_device *from_dev, + unsigned char addr_type); +extern void dev_addr_flush(struct net_device *dev); +extern int dev_addr_init(struct net_device *dev); + +/* Functions used for unicast addresses handling */ +extern int dev_uc_add(struct net_device *dev, unsigned char *addr); +extern int dev_uc_del(struct net_device *dev, unsigned char *addr); +extern int dev_uc_sync(struct net_device *to, struct net_device *from); +extern void dev_uc_unsync(struct net_device *to, struct net_device *from); +extern void dev_uc_flush(struct net_device *dev); +extern void dev_uc_init(struct net_device *dev); + +/* Functions used for multicast addresses handling */ +extern int dev_mc_add(struct net_device *dev, unsigned char *addr); +extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); +extern int dev_mc_del(struct net_device *dev, unsigned char *addr); +extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); +extern int dev_mc_sync(struct net_device *to, struct net_device *from); +extern void dev_mc_unsync(struct net_device *to, struct net_device *from); +extern void dev_mc_flush(struct net_device *dev); +extern void dev_mc_init(struct net_device *dev); + +/* Functions used for secondary unicast and multicast support */ +extern void dev_set_rx_mode(struct net_device *dev); +extern void __dev_set_rx_mode(struct net_device *dev); +extern int dev_set_promiscuity(struct net_device *dev, int inc); +extern int dev_set_allmulti(struct net_device *dev, int inc); +extern void netdev_state_change(struct net_device *dev); +extern int netdev_bonding_change(struct net_device *dev, + unsigned long event); +extern void netdev_features_change(struct net_device *dev); +/* Load a device via the kmod */ +extern void dev_load(struct net *net, const char *name); +extern void dev_mcast_init(void); +extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *storage); +extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats); + +extern int netdev_max_backlog; +extern int netdev_tstamp_prequeue; +extern int weight_p; +extern int bpf_jit_enable; +extern int netdev_set_master(struct net_device *dev, struct net_device *master); +extern int netdev_set_bond_master(struct net_device *dev, + struct net_device *master); +extern int skb_checksum_help(struct sk_buff *skb); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, + netdev_features_t features); +#ifdef CONFIG_BUG +extern void netdev_rx_csum_fault(struct net_device *dev); +#else +static inline void netdev_rx_csum_fault(struct net_device *dev) +{ +} +#endif +/* rx skb timestamps */ +extern void net_enable_timestamp(void); +extern void net_disable_timestamp(void); + +#ifdef CONFIG_PROC_FS +extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); +extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); +extern void dev_seq_stop(struct seq_file *seq, void *v); +#endif + +extern int netdev_class_create_file(struct class_attribute *class_attr); +extern void netdev_class_remove_file(struct class_attribute *class_attr); + +extern struct kobj_ns_type_operations net_ns_type_operations; + +extern const char *netdev_drivername(const struct net_device *dev); + +extern void linkwatch_run_queue(void); + +static inline netdev_features_t netdev_get_wanted_features( + struct net_device *dev) +{ + return (dev->features & ~dev->hw_features) | dev->wanted_features; +} +netdev_features_t netdev_increment_features(netdev_features_t all, + netdev_features_t one, netdev_features_t mask); +int __netdev_update_features(struct net_device *dev); +void netdev_update_features(struct net_device *dev); +void netdev_change_features(struct net_device *dev); + +void netif_stacked_transfer_operstate(const struct net_device *rootdev, + struct net_device *dev); + +netdev_features_t netif_skb_features(struct sk_buff *skb); + +static inline bool net_gso_ok(netdev_features_t features, int gso_type) +{ + netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; + + /* check flags correspondence */ + BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); + + return (features & feature) == feature; +} + +static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) +{ + return net_gso_ok(features, skb_shinfo(skb)->gso_type) && + (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); +} + +static inline bool netif_needs_gso(struct sk_buff *skb, + netdev_features_t features) +{ + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || + unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && + (skb->ip_summed != CHECKSUM_UNNECESSARY))); +} + +static inline void netif_set_gso_max_size(struct net_device *dev, + unsigned int size) +{ + dev->gso_max_size = size; +} + +static inline bool netif_is_bond_slave(struct net_device *dev) +{ + return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; +} + +static inline bool netif_supports_nofcs(struct net_device *dev) +{ + return dev->priv_flags & IFF_SUPP_NOFCS; +} + +extern struct pernet_operations __net_initdata loopback_net_ops; + +/* Logging, debugging and troubleshooting/diagnostic helpers. */ + +/* netdev_printk helpers, similar to dev_printk */ + +static inline const char *netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} + +extern int __netdev_printk(const char *level, const struct net_device *dev, + struct va_format *vaf); + +extern __printf(3, 4) +int netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...); +extern __printf(2, 3) +int netdev_emerg(const struct net_device *dev, const char *format, ...); +extern __printf(2, 3) +int netdev_alert(const struct net_device *dev, const char *format, ...); +extern __printf(2, 3) +int netdev_crit(const struct net_device *dev, const char *format, ...); +extern __printf(2, 3) +int netdev_err(const struct net_device *dev, const char *format, ...); +extern __printf(2, 3) +int netdev_warn(const struct net_device *dev, const char *format, ...); +extern __printf(2, 3) +int netdev_notice(const struct net_device *dev, const char *format, ...); +extern __printf(2, 3) +int netdev_info(const struct net_device *dev, const char *format, ...); + +#define MODULE_ALIAS_NETDEV(device) \ + MODULE_ALIAS("netdev-" device) + +#if defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_netdev_dbg(__dev, format, ##args); \ +} while (0) +#elif defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#else +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define netdev_vdbg netdev_dbg +#else + +#define netdev_vdbg(dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* + * netdev_WARN() acts like dev_printk(), but with the key difference + * of using a WARN/WARN_ON to get the message out, including the + * file/line information and a backtrace. + */ +#define netdev_WARN(dev, format, args...) \ + WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); + +/* netif printk helpers, similar to netdev_printk */ + +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) + +#if defined(DEBUG) +#define netif_dbg(priv, type, dev, format, args...) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netif_dbg(priv, type, netdev, format, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + dynamic_netdev_dbg(netdev, format, ##args); \ +} while (0) +#else +#define netif_dbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define netif_vdbg netif_dbg +#else +#define netif_vdbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h new file mode 100644 index 00000000..29734be3 --- /dev/null +++ b/include/linux/netfilter.h @@ -0,0 +1,397 @@ +#ifndef __LINUX_NETFILTER_H +#define __LINUX_NETFILTER_H + +#ifdef __KERNEL__ +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/net.h> +#include <linux/if.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/wait.h> +#include <linux/list.h> +#endif +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/sysctl.h> + +/* Responses from hook functions. */ +#define NF_DROP 0 +#define NF_ACCEPT 1 +#define NF_STOLEN 2 +#define NF_QUEUE 3 +#define NF_REPEAT 4 +#define NF_STOP 5 +#define NF_MAX_VERDICT NF_STOP + +/* we overload the higher bits for encoding auxiliary data such as the queue + * number or errno values. Not nice, but better than additional function + * arguments. */ +#define NF_VERDICT_MASK 0x000000ff + +/* extra verdict flags have mask 0x0000ff00 */ +#define NF_VERDICT_FLAG_QUEUE_BYPASS 0x00008000 + +/* queue number (NF_QUEUE) or errno (NF_DROP) */ +#define NF_VERDICT_QMASK 0xffff0000 +#define NF_VERDICT_QBITS 16 + +#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE) + +#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP) + +/* only for userspace compatibility */ +#ifndef __KERNEL__ +/* Generic cache responses from hook functions. + <= 0x2000 is used for protocol-flags. */ +#define NFC_UNKNOWN 0x4000 +#define NFC_ALTERED 0x8000 + +/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */ +#define NF_VERDICT_BITS 16 +#endif + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING, + NF_INET_LOCAL_IN, + NF_INET_FORWARD, + NF_INET_LOCAL_OUT, + NF_INET_POST_ROUTING, + NF_INET_NUMHOOKS +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_DECNET = 12, + NFPROTO_NUMPROTO, +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_NETFILTER +static inline int NF_DROP_GETERR(int verdict) +{ + return -(verdict >> NF_VERDICT_QBITS); +} + +static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, + const union nf_inet_addr *a2) +{ + return a1->all[0] == a2->all[0] && + a1->all[1] == a2->all[1] && + a1->all[2] == a2->all[2] && + a1->all[3] == a2->all[3]; +} + +extern void netfilter_init(void); + +/* Largest hook number + 1 */ +#define NF_MAX_HOOKS 8 + +struct sk_buff; + +typedef unsigned int nf_hookfn(unsigned int hooknum, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)); + +struct nf_hook_ops { + struct list_head list; + + /* User fills in from here down. */ + nf_hookfn *hook; + struct module *owner; + u_int8_t pf; + unsigned int hooknum; + /* Hooks are ordered in ascending priority. */ + int priority; +}; + +struct nf_sockopt_ops { + struct list_head list; + + u_int8_t pf; + + /* Non-inclusive ranges: use 0/0/NULL to never get called. */ + int set_optmin; + int set_optmax; + int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); +#ifdef CONFIG_COMPAT + int (*compat_set)(struct sock *sk, int optval, + void __user *user, unsigned int len); +#endif + int get_optmin; + int get_optmax; + int (*get)(struct sock *sk, int optval, void __user *user, int *len); +#ifdef CONFIG_COMPAT + int (*compat_get)(struct sock *sk, int optval, + void __user *user, int *len); +#endif + /* Use the module struct to lock set/get code in place */ + struct module *owner; +}; + +/* Function to register/unregister hook points. */ +int nf_register_hook(struct nf_hook_ops *reg); +void nf_unregister_hook(struct nf_hook_ops *reg); +int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); +void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); + +/* Functions to register get/setsockopt ranges (non-inclusive). You + need to check permissions yourself! */ +int nf_register_sockopt(struct nf_sockopt_ops *reg); +void nf_unregister_sockopt(struct nf_sockopt_ops *reg); + +#ifdef CONFIG_SYSCTL +/* Sysctl registration */ +extern struct ctl_path nf_net_netfilter_sysctl_path[]; +extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; +#endif /* CONFIG_SYSCTL */ + +extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; + +#if defined(CONFIG_JUMP_LABEL) +#include <linux/static_key.h> +extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) +{ + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook)) + return static_key_false(&nf_hooks_needed[pf][hook]); + + return !list_empty(&nf_hooks[pf][hook]); +} +#else +static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) +{ + return !list_empty(&nf_hooks[pf][hook]); +} +#endif + +int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct sk_buff *), int thresh); + +/** + * nf_hook_thresh - call a netfilter hook + * + * Returns 1 if the hook has allowed the packet to pass. The function + * okfn must be invoked by the caller in this case. Any other return + * value indicates the packet has been consumed by the hook. + */ +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct sk_buff *skb, + struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sk_buff *), int thresh) +{ + if (nf_hooks_active(pf, hook)) + return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); + return 1; +} + +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct sk_buff *)) +{ + return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN); +} + +/* Activate hook; either okfn or kfree_skb called, unless a hook + returns NF_STOLEN (in which case, it's up to the hook to deal with + the consequences). + + Returns -ERRNO if packet dropped. Zero means queued, stolen or + accepted. +*/ + +/* RR: + > I don't want nf_hook to return anything because people might forget + > about async and trust the return value to mean "packet was ok". + + AK: + Just document it clearly, then you can expect some sense from kernel + coders :) +*/ + +static inline int +NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb, + struct net_device *in, struct net_device *out, + int (*okfn)(struct sk_buff *), int thresh) +{ + int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh); + if (ret == 1) + ret = okfn(skb); + return ret; +} + +static inline int +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb, + struct net_device *in, struct net_device *out, + int (*okfn)(struct sk_buff *), bool cond) +{ + int ret; + + if (!cond || + ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) + ret = okfn(skb); + return ret; +} + +static inline int +NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb, + struct net_device *in, struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN); +} + +/* Call setsockopt() */ +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, + unsigned int len); +int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, + int *len); +#ifdef CONFIG_COMPAT +int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, + char __user *opt, unsigned int len); +int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, + char __user *opt, int *len); +#endif + +/* Call this before modifying an existing packet: ensures it is + modifiable and linear to the point you care about (writable_len). + Returns true or false. */ +extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); + +struct flowi; +struct nf_queue_entry; + +struct nf_afinfo { + unsigned short family; + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + __sum16 (*checksum_partial)(struct sk_buff *skb, + unsigned int hook, + unsigned int dataoff, + unsigned int len, + u_int8_t protocol); + int (*route)(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict); + void (*saveroute)(const struct sk_buff *skb, + struct nf_queue_entry *entry); + int (*reroute)(struct sk_buff *skb, + const struct nf_queue_entry *entry); + int route_key_size; +}; + +extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; +static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) +{ + return rcu_dereference(nf_afinfo[family]); +} + +static inline __sum16 +nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, + u_int8_t protocol, unsigned short family) +{ + const struct nf_afinfo *afinfo; + __sum16 csum = 0; + + rcu_read_lock(); + afinfo = nf_get_afinfo(family); + if (afinfo) + csum = afinfo->checksum(skb, hook, dataoff, protocol); + rcu_read_unlock(); + return csum; +} + +static inline __sum16 +nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family) +{ + const struct nf_afinfo *afinfo; + __sum16 csum = 0; + + rcu_read_lock(); + afinfo = nf_get_afinfo(family); + if (afinfo) + csum = afinfo->checksum_partial(skb, hook, dataoff, len, + protocol); + rcu_read_unlock(); + return csum; +} + +extern int nf_register_afinfo(const struct nf_afinfo *afinfo); +extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); + +#include <net/flow.h> +extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); + +static inline void +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) +{ +#ifdef CONFIG_NF_NAT_NEEDED + void (*decodefn)(struct sk_buff *, struct flowi *); + + if (family == AF_INET) { + rcu_read_lock(); + decodefn = rcu_dereference(ip_nat_decode_session); + if (decodefn) + decodefn(skb, fl); + rcu_read_unlock(); + } +#endif +} + +#ifdef CONFIG_PROC_FS +#include <linux/proc_fs.h> +extern struct proc_dir_entry *proc_net_netfilter; +#endif + +#else /* !CONFIG_NETFILTER */ +#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) +#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct sk_buff *skb, + struct net_device *indev, + struct net_device *outdev, + int (*okfn)(struct sk_buff *), int thresh) +{ + return okfn(skb); +} +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct sk_buff *)) +{ + return 1; +} +struct flowi; +static inline void +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) +{ +} +#endif /*CONFIG_NETFILTER*/ + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu; +extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); +extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; +#else +static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +#endif + +#endif /*__KERNEL__*/ +#endif /*__LINUX_NETFILTER_H*/ diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild new file mode 100644 index 00000000..16970363 --- /dev/null +++ b/include/linux/netfilter/Kbuild @@ -0,0 +1,77 @@ +header-y += ipset/ + +header-y += nf_conntrack_common.h +header-y += nf_conntrack_ftp.h +header-y += nf_conntrack_sctp.h +header-y += nf_conntrack_tcp.h +header-y += nf_conntrack_tuple_common.h +header-y += nf_nat.h +header-y += nfnetlink.h +header-y += nfnetlink_acct.h +header-y += nfnetlink_compat.h +header-y += nfnetlink_conntrack.h +header-y += nfnetlink_cttimeout.h +header-y += nfnetlink_log.h +header-y += nfnetlink_queue.h +header-y += x_tables.h +header-y += xt_AUDIT.h +header-y += xt_CHECKSUM.h +header-y += xt_CLASSIFY.h +header-y += xt_CONNMARK.h +header-y += xt_CONNSECMARK.h +header-y += xt_CT.h +header-y += xt_DSCP.h +header-y += xt_IDLETIMER.h +header-y += xt_LED.h +header-y += xt_LOG.h +header-y += xt_MARK.h +header-y += xt_nfacct.h +header-y += xt_NFLOG.h +header-y += xt_NFQUEUE.h +header-y += xt_RATEEST.h +header-y += xt_SECMARK.h +header-y += xt_TCPMSS.h +header-y += xt_TCPOPTSTRIP.h +header-y += xt_TEE.h +header-y += xt_TPROXY.h +header-y += xt_addrtype.h +header-y += xt_cluster.h +header-y += xt_comment.h +header-y += xt_connbytes.h +header-y += xt_connlimit.h +header-y += xt_connmark.h +header-y += xt_conntrack.h +header-y += xt_cpu.h +header-y += xt_dccp.h +header-y += xt_devgroup.h +header-y += xt_dscp.h +header-y += xt_ecn.h +header-y += xt_esp.h +header-y += xt_hashlimit.h +header-y += xt_helper.h +header-y += xt_iprange.h +header-y += xt_ipvs.h +header-y += xt_length.h +header-y += xt_limit.h +header-y += xt_mac.h +header-y += xt_mark.h +header-y += xt_multiport.h +header-y += xt_osf.h +header-y += xt_owner.h +header-y += xt_physdev.h +header-y += xt_pkttype.h +header-y += xt_policy.h +header-y += xt_quota.h +header-y += xt_rateest.h +header-y += xt_realm.h +header-y += xt_recent.h +header-y += xt_set.h +header-y += xt_sctp.h +header-y += xt_socket.h +header-y += xt_state.h +header-y += xt_statistic.h +header-y += xt_string.h +header-y += xt_tcpmss.h +header-y += xt_tcpudp.h +header-y += xt_time.h +header-y += xt_u32.h diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild new file mode 100644 index 00000000..601fe71d --- /dev/null +++ b/include/linux/netfilter/ipset/Kbuild @@ -0,0 +1,4 @@ +header-y += ip_set.h +header-y += ip_set_bitmap.h +header-y += ip_set_hash.h +header-y += ip_set_list.h diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h new file mode 100644 index 00000000..2f8e18a2 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set.h @@ -0,0 +1,489 @@ +#ifndef _IP_SET_H +#define _IP_SET_H + +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> + * Patrick Schaaf <bof@bof.de> + * Martin Josefsson <gandalf@wlug.westbo.se> + * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> + +/* The protocol version */ +#define IPSET_PROTOCOL 6 + +/* The max length of strings including NUL: set and type identifiers */ +#define IPSET_MAXNAMELEN 32 + +/* Message types and commands */ +enum ipset_cmd { + IPSET_CMD_NONE, + IPSET_CMD_PROTOCOL, /* 1: Return protocol version */ + IPSET_CMD_CREATE, /* 2: Create a new (empty) set */ + IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */ + IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */ + IPSET_CMD_RENAME, /* 5: Rename a set */ + IPSET_CMD_SWAP, /* 6: Swap two sets */ + IPSET_CMD_LIST, /* 7: List sets */ + IPSET_CMD_SAVE, /* 8: Save sets */ + IPSET_CMD_ADD, /* 9: Add an element to a set */ + IPSET_CMD_DEL, /* 10: Delete an element from a set */ + IPSET_CMD_TEST, /* 11: Test an element in a set */ + IPSET_CMD_HEADER, /* 12: Get set header data only */ + IPSET_CMD_TYPE, /* 13: Get set type */ + IPSET_MSG_MAX, /* Netlink message commands */ + + /* Commands in userspace: */ + IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */ + IPSET_CMD_HELP, /* 15: Get help */ + IPSET_CMD_VERSION, /* 16: Get program version */ + IPSET_CMD_QUIT, /* 17: Quit from interactive mode */ + + IPSET_CMD_MAX, + + IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */ +}; + +/* Attributes at command level */ +enum { + IPSET_ATTR_UNSPEC, + IPSET_ATTR_PROTOCOL, /* 1: Protocol version */ + IPSET_ATTR_SETNAME, /* 2: Name of the set */ + IPSET_ATTR_TYPENAME, /* 3: Typename */ + IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */ + IPSET_ATTR_REVISION, /* 4: Settype revision */ + IPSET_ATTR_FAMILY, /* 5: Settype family */ + IPSET_ATTR_FLAGS, /* 6: Flags at command level */ + IPSET_ATTR_DATA, /* 7: Nested attributes */ + IPSET_ATTR_ADT, /* 8: Multiple data containers */ + IPSET_ATTR_LINENO, /* 9: Restore lineno */ + IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */ + IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */ + __IPSET_ATTR_CMD_MAX, +}; +#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1) + +/* CADT specific attributes */ +enum { + IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1, + IPSET_ATTR_IP_FROM = IPSET_ATTR_IP, + IPSET_ATTR_IP_TO, /* 2 */ + IPSET_ATTR_CIDR, /* 3 */ + IPSET_ATTR_PORT, /* 4 */ + IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT, + IPSET_ATTR_PORT_TO, /* 5 */ + IPSET_ATTR_TIMEOUT, /* 6 */ + IPSET_ATTR_PROTO, /* 7 */ + IPSET_ATTR_CADT_FLAGS, /* 8 */ + IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */ + /* Reserve empty slots */ + IPSET_ATTR_CADT_MAX = 16, + /* Create-only specific attributes */ + IPSET_ATTR_GC, + IPSET_ATTR_HASHSIZE, + IPSET_ATTR_MAXELEM, + IPSET_ATTR_NETMASK, + IPSET_ATTR_PROBES, + IPSET_ATTR_RESIZE, + IPSET_ATTR_SIZE, + /* Kernel-only */ + IPSET_ATTR_ELEMENTS, + IPSET_ATTR_REFERENCES, + IPSET_ATTR_MEMSIZE, + + __IPSET_ATTR_CREATE_MAX, +}; +#define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1) + +/* ADT specific attributes */ +enum { + IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1, + IPSET_ATTR_NAME, + IPSET_ATTR_NAMEREF, + IPSET_ATTR_IP2, + IPSET_ATTR_CIDR2, + IPSET_ATTR_IP2_TO, + IPSET_ATTR_IFACE, + __IPSET_ATTR_ADT_MAX, +}; +#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) + +/* IP specific attributes */ +enum { + IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1, + IPSET_ATTR_IPADDR_IPV6, + __IPSET_ATTR_IPADDR_MAX, +}; +#define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1) + +/* Error codes */ +enum ipset_errno { + IPSET_ERR_PRIVATE = 4096, + IPSET_ERR_PROTOCOL, + IPSET_ERR_FIND_TYPE, + IPSET_ERR_MAX_SETS, + IPSET_ERR_BUSY, + IPSET_ERR_EXIST_SETNAME2, + IPSET_ERR_TYPE_MISMATCH, + IPSET_ERR_EXIST, + IPSET_ERR_INVALID_CIDR, + IPSET_ERR_INVALID_NETMASK, + IPSET_ERR_INVALID_FAMILY, + IPSET_ERR_TIMEOUT, + IPSET_ERR_REFERENCED, + IPSET_ERR_IPADDR_IPV4, + IPSET_ERR_IPADDR_IPV6, + + /* Type specific error codes */ + IPSET_ERR_TYPE_SPECIFIC = 4352, +}; + +/* Flags at command level */ +enum ipset_cmd_flags { + IPSET_FLAG_BIT_EXIST = 0, + IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), + IPSET_FLAG_BIT_LIST_SETNAME = 1, + IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME), + IPSET_FLAG_BIT_LIST_HEADER = 2, + IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER), + IPSET_FLAG_CMD_MAX = 15, /* Lower half */ +}; + +/* Flags at CADT attribute level */ +enum ipset_cadt_flags { + IPSET_FLAG_BIT_BEFORE = 0, + IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), + IPSET_FLAG_BIT_PHYSDEV = 1, + IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV), + IPSET_FLAG_BIT_NOMATCH = 2, + IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH), + IPSET_FLAG_CADT_MAX = 15, /* Upper half */ +}; + +/* Commands with settype-specific attributes */ +enum ipset_adt { + IPSET_ADD, + IPSET_DEL, + IPSET_TEST, + IPSET_ADT_MAX, + IPSET_CREATE = IPSET_ADT_MAX, + IPSET_CADT_MAX, +}; + +/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t + * and IPSET_INVALID_ID if you want to increase the max number of sets. + */ +typedef __u16 ip_set_id_t; + +#define IPSET_INVALID_ID 65535 + +enum ip_set_dim { + IPSET_DIM_ZERO = 0, + IPSET_DIM_ONE, + IPSET_DIM_TWO, + IPSET_DIM_THREE, + /* Max dimension in elements. + * If changed, new revision of iptables match/target is required. + */ + IPSET_DIM_MAX = 6, +}; + +/* Option flags for kernel operations */ +enum ip_set_kopt { + IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO), + IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE), + IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO), + IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE), +}; + +#ifdef __KERNEL__ +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/netlink.h> +#include <linux/netfilter.h> +#include <linux/netfilter/x_tables.h> +#include <linux/vmalloc.h> +#include <net/netlink.h> + +/* Set features */ +enum ip_set_feature { + IPSET_TYPE_IP_FLAG = 0, + IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG), + IPSET_TYPE_PORT_FLAG = 1, + IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG), + IPSET_TYPE_MAC_FLAG = 2, + IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG), + IPSET_TYPE_IP2_FLAG = 3, + IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG), + IPSET_TYPE_NAME_FLAG = 4, + IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG), + IPSET_TYPE_IFACE_FLAG = 5, + IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG), + /* Strictly speaking not a feature, but a flag for dumping: + * this settype must be dumped last */ + IPSET_DUMP_LAST_FLAG = 7, + IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG), +}; + +struct ip_set; + +typedef int (*ipset_adtfn)(struct ip_set *set, void *value, + u32 timeout, u32 flags); + +/* Kernel API function options */ +struct ip_set_adt_opt { + u8 family; /* Actual protocol family */ + u8 dim; /* Dimension of match/target */ + u8 flags; /* Direction and negation flags */ + u32 cmdflags; /* Command-like flags */ + u32 timeout; /* Timeout value */ +}; + +/* Set type, variant-specific part */ +struct ip_set_type_variant { + /* Kernelspace: test/add/del entries + * returns negative error code, + * zero for no match/success to add/delete + * positive for matching element */ + int (*kadt)(struct ip_set *set, const struct sk_buff * skb, + const struct xt_action_param *par, + enum ipset_adt adt, const struct ip_set_adt_opt *opt); + + /* Userspace: test/add/del entries + * returns negative error code, + * zero for no match/success to add/delete + * positive for matching element */ + int (*uadt)(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); + + /* Low level add/del/test functions */ + ipset_adtfn adt[IPSET_ADT_MAX]; + + /* When adding entries and set is full, try to resize the set */ + int (*resize)(struct ip_set *set, bool retried); + /* Destroy the set */ + void (*destroy)(struct ip_set *set); + /* Flush the elements */ + void (*flush)(struct ip_set *set); + /* Expire entries before listing */ + void (*expire)(struct ip_set *set); + /* List set header data */ + int (*head)(struct ip_set *set, struct sk_buff *skb); + /* List elements */ + int (*list)(const struct ip_set *set, struct sk_buff *skb, + struct netlink_callback *cb); + + /* Return true if "b" set is the same as "a" + * according to the create set parameters */ + bool (*same_set)(const struct ip_set *a, const struct ip_set *b); +}; + +/* The core set type structure */ +struct ip_set_type { + struct list_head list; + + /* Typename */ + char name[IPSET_MAXNAMELEN]; + /* Protocol version */ + u8 protocol; + /* Set features to control swapping */ + u8 features; + /* Set type dimension */ + u8 dimension; + /* + * Supported family: may be NFPROTO_UNSPEC for both + * NFPROTO_IPV4/NFPROTO_IPV6. + */ + u8 family; + /* Type revisions */ + u8 revision_min, revision_max; + + /* Create set */ + int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags); + + /* Attribute policies */ + const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1]; + const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1]; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; +}; + +/* register and unregister set type */ +extern int ip_set_type_register(struct ip_set_type *set_type); +extern void ip_set_type_unregister(struct ip_set_type *set_type); + +/* A generic IP set */ +struct ip_set { + /* The name of the set */ + char name[IPSET_MAXNAMELEN]; + /* Lock protecting the set data */ + rwlock_t lock; + /* References to the set */ + u32 ref; + /* The core set type */ + struct ip_set_type *type; + /* The type variant doing the real job */ + const struct ip_set_type_variant *variant; + /* The actual INET family of the set */ + u8 family; + /* The type revision */ + u8 revision; + /* The type specific data */ + void *data; +}; + +/* register and unregister set references */ +extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set); +extern void ip_set_put_byindex(ip_set_id_t index); +extern const char *ip_set_name_byindex(ip_set_id_t index); +extern ip_set_id_t ip_set_nfnl_get(const char *name); +extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index); +extern void ip_set_nfnl_put(ip_set_id_t index); + +/* API for iptables set match, and SET target */ + +extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb, + const struct xt_action_param *par, + const struct ip_set_adt_opt *opt); +extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb, + const struct xt_action_param *par, + const struct ip_set_adt_opt *opt); +extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, + const struct xt_action_param *par, + const struct ip_set_adt_opt *opt); + +/* Utility functions */ +extern void *ip_set_alloc(size_t size); +extern void ip_set_free(void *members); +extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); +extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); + +static inline int +ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) +{ + __be32 ip; + int ret = ip_set_get_ipaddr4(nla, &ip); + + if (ret) + return ret; + *ipaddr = ntohl(ip); + return 0; +} + +/* Ignore IPSET_ERR_EXIST errors if asked to do so? */ +static inline bool +ip_set_eexist(int ret, u32 flags) +{ + return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST); +} + +/* Check the NLA_F_NET_BYTEORDER flag */ +static inline bool +ip_set_attr_netorder(struct nlattr *tb[], int type) +{ + return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER); +} + +static inline bool +ip_set_optattr_netorder(struct nlattr *tb[], int type) +{ + return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER); +} + +/* Useful converters */ +static inline u32 +ip_set_get_h32(const struct nlattr *attr) +{ + return ntohl(nla_get_be32(attr)); +} + +static inline u16 +ip_set_get_h16(const struct nlattr *attr) +{ + return ntohs(nla_get_be16(attr)); +} + +#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED) +#define ipset_nest_end(skb, start) nla_nest_end(skb, start) + +#define NLA_PUT_IPADDR4(skb, type, ipaddr) \ +do { \ + struct nlattr *__nested = ipset_nest_start(skb, type); \ + \ + if (!__nested) \ + goto nla_put_failure; \ + NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \ + ipset_nest_end(skb, __nested); \ +} while (0) + +#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \ +do { \ + struct nlattr *__nested = ipset_nest_start(skb, type); \ + \ + if (!__nested) \ + goto nla_put_failure; \ + NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \ + sizeof(struct in6_addr), ipaddrptr); \ + ipset_nest_end(skb, __nested); \ +} while (0) + +/* Get address from skbuff */ +static inline __be32 +ip4addr(const struct sk_buff *skb, bool src) +{ + return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr; +} + +static inline void +ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr) +{ + *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr; +} + +static inline void +ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) +{ + memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr, + sizeof(*addr)); +} + +/* Calculate the bytes required to store the inclusive range of a-b */ +static inline int +bitmap_bytes(u32 a, u32 b) +{ + return 4 * ((((b - a + 8) / 8) + 3) / 4); +} + +#endif /* __KERNEL__ */ + +/* Interface to iptables/ip6tables */ + +#define SO_IP_SET 83 + +union ip_set_name_index { + char name[IPSET_MAXNAMELEN]; + ip_set_id_t index; +}; + +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ +struct ip_set_req_get_set { + unsigned op; + unsigned version; + union ip_set_name_index set; +}; + +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */ +/* Uses ip_set_req_get_set */ + +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */ +struct ip_set_req_version { + unsigned op; + unsigned version; +}; + +#endif /*_IP_SET_H */ diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h new file mode 100644 index 00000000..230a290e --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_ahash.h @@ -0,0 +1,1214 @@ +#ifndef _IP_SET_AHASH_H +#define _IP_SET_AHASH_H + +#include <linux/rcupdate.h> +#include <linux/jhash.h> +#include <linux/netfilter/ipset/ip_set_timeout.h> + +#define CONCAT(a, b, c) a##b##c +#define TOKEN(a, b, c) CONCAT(a, b, c) + +#define type_pf_next TOKEN(TYPE, PF, _elem) + +/* Hashing which uses arrays to resolve clashing. The hash table is resized + * (doubled) when searching becomes too long. + * Internally jhash is used with the assumption that the size of the + * stored data is a multiple of sizeof(u32). If storage supports timeout, + * the timeout field must be the last one in the data structure - that field + * is ignored when computing the hash key. + * + * Readers and resizing + * + * Resizing can be triggered by userspace command only, and those + * are serialized by the nfnl mutex. During resizing the set is + * read-locked, so the only possible concurrent operations are + * the kernel side readers. Those must be protected by proper RCU locking. + */ + +/* Number of elements to store in an initial array block */ +#define AHASH_INIT_SIZE 4 +/* Max number of elements to store in an array block */ +#define AHASH_MAX_SIZE (3*AHASH_INIT_SIZE) + +/* Max number of elements can be tuned */ +#ifdef IP_SET_HASH_WITH_MULTI +#define AHASH_MAX(h) ((h)->ahash_max) + +static inline u8 +tune_ahash_max(u8 curr, u32 multi) +{ + u32 n; + + if (multi < curr) + return curr; + + n = curr + AHASH_INIT_SIZE; + /* Currently, at listing one hash bucket must fit into a message. + * Therefore we have a hard limit here. + */ + return n > curr && n <= 64 ? n : curr; +} +#define TUNE_AHASH_MAX(h, multi) \ + ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) +#else +#define AHASH_MAX(h) AHASH_MAX_SIZE +#define TUNE_AHASH_MAX(h, multi) +#endif + +/* A hash bucket */ +struct hbucket { + void *value; /* the array of the values */ + u8 size; /* size of the array */ + u8 pos; /* position of the first free entry */ +}; + +/* The hash table: the table size stored here in order to make resizing easy */ +struct htable { + u8 htable_bits; /* size of hash table == 2^htable_bits */ + struct hbucket bucket[0]; /* hashtable buckets */ +}; + +#define hbucket(h, i) (&((h)->bucket[i])) + +/* Book-keeping of the prefixes added to the set */ +struct ip_set_hash_nets { + u8 cidr; /* the different cidr values in the set */ + u32 nets; /* number of elements per cidr */ +}; + +/* The generic ip_set hash structure */ +struct ip_set_hash { + struct htable *table; /* the hash table */ + u32 maxelem; /* max elements in the hash */ + u32 elements; /* current element (vs timeout) */ + u32 initval; /* random jhash init value */ + u32 timeout; /* timeout value, if enabled */ + struct timer_list gc; /* garbage collection when timeout enabled */ + struct type_pf_next next; /* temporary storage for uadd */ +#ifdef IP_SET_HASH_WITH_MULTI + u8 ahash_max; /* max elements in an array block */ +#endif +#ifdef IP_SET_HASH_WITH_NETMASK + u8 netmask; /* netmask value for subnets to store */ +#endif +#ifdef IP_SET_HASH_WITH_RBTREE + struct rb_root rbtree; +#endif +#ifdef IP_SET_HASH_WITH_NETS + struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */ +#endif +}; + +static size_t +htable_size(u8 hbits) +{ + size_t hsize; + + /* We must fit both into u32 in jhash and size_t */ + if (hbits > 31) + return 0; + hsize = jhash_size(hbits); + if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket) + < hsize) + return 0; + + return hsize * sizeof(struct hbucket) + sizeof(struct htable); +} + +/* Compute htable_bits from the user input parameter hashsize */ +static u8 +htable_bits(u32 hashsize) +{ + /* Assume that hashsize == 2^htable_bits */ + u8 bits = fls(hashsize - 1); + if (jhash_size(bits) != hashsize) + /* Round up to the first 2^n value */ + bits = fls(hashsize); + + return bits; +} + +#ifdef IP_SET_HASH_WITH_NETS +#ifdef IP_SET_HASH_WITH_NETS_PACKED +/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */ +#define CIDR(cidr) (cidr + 1) +#else +#define CIDR(cidr) (cidr) +#endif + +#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) + +/* Network cidr size book keeping when the hash stores different + * sized networks */ +static void +add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) +{ + u8 i; + + ++h->nets[cidr-1].nets; + + pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets); + + if (h->nets[cidr-1].nets > 1) + return; + + /* New cidr size */ + for (i = 0; i < host_mask && h->nets[i].cidr; i++) { + /* Add in increasing prefix order, so larger cidr first */ + if (h->nets[i].cidr < cidr) + swap(h->nets[i].cidr, cidr); + } + if (i < host_mask) + h->nets[i].cidr = cidr; +} + +static void +del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) +{ + u8 i; + + --h->nets[cidr-1].nets; + + pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets); + + if (h->nets[cidr-1].nets != 0) + return; + + /* All entries with this cidr size deleted, so cleanup h->cidr[] */ + for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) { + if (h->nets[i].cidr == cidr) + h->nets[i].cidr = cidr = h->nets[i+1].cidr; + } + h->nets[i - 1].cidr = 0; +} +#endif + +/* Destroy the hashtable part of the set */ +static void +ahash_destroy(struct htable *t) +{ + struct hbucket *n; + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + if (n->size) + /* FIXME: use slab cache */ + kfree(n->value); + } + + ip_set_free(t); +} + +/* Calculate the actual memory size of the set data */ +static size_t +ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask) +{ + u32 i; + struct htable *t = h->table; + size_t memsize = sizeof(*h) + + sizeof(*t) +#ifdef IP_SET_HASH_WITH_NETS + + sizeof(struct ip_set_hash_nets) * host_mask +#endif + + jhash_size(t->htable_bits) * sizeof(struct hbucket); + + for (i = 0; i < jhash_size(t->htable_bits); i++) + memsize += t->bucket[i].size * dsize; + + return memsize; +} + +/* Flush a hash type of set: destroy all elements */ +static void +ip_set_hash_flush(struct ip_set *set) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct hbucket *n; + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + if (n->size) { + n->size = n->pos = 0; + /* FIXME: use slab cache */ + kfree(n->value); + } + } +#ifdef IP_SET_HASH_WITH_NETS + memset(h->nets, 0, sizeof(struct ip_set_hash_nets) + * SET_HOST_MASK(set->family)); +#endif + h->elements = 0; +} + +/* Destroy a hash type of set */ +static void +ip_set_hash_destroy(struct ip_set *set) +{ + struct ip_set_hash *h = set->data; + + if (with_timeout(h->timeout)) + del_timer_sync(&h->gc); + + ahash_destroy(h->table); +#ifdef IP_SET_HASH_WITH_RBTREE + rbtree_destroy(&h->rbtree); +#endif + kfree(h); + + set->data = NULL; +} + +#endif /* _IP_SET_AHASH_H */ + +#ifndef HKEY_DATALEN +#define HKEY_DATALEN sizeof(struct type_pf_elem) +#endif + +#define HKEY(data, initval, htable_bits) \ +(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ + & jhash_mask(htable_bits)) + +#define CONCAT(a, b, c) a##b##c +#define TOKEN(a, b, c) CONCAT(a, b, c) + +/* Type/family dependent function prototypes */ + +#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) +#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull) +#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy) +#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out) +#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask) +#define type_pf_data_list TOKEN(TYPE, PF, _data_list) +#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) +#define type_pf_data_next TOKEN(TYPE, PF, _data_next) +#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags) +#ifdef IP_SET_HASH_WITH_NETS +#define type_pf_data_match TOKEN(TYPE, PF, _data_match) +#else +#define type_pf_data_match(d) 1 +#endif + +#define type_pf_elem TOKEN(TYPE, PF, _elem) +#define type_pf_telem TOKEN(TYPE, PF, _telem) +#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout) +#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired) +#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set) + +#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add) +#define type_pf_add TOKEN(TYPE, PF, _add) +#define type_pf_del TOKEN(TYPE, PF, _del) +#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs) +#define type_pf_test TOKEN(TYPE, PF, _test) + +#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd) +#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem) +#define type_pf_expire TOKEN(TYPE, PF, _expire) +#define type_pf_tadd TOKEN(TYPE, PF, _tadd) +#define type_pf_tdel TOKEN(TYPE, PF, _tdel) +#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs) +#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest) + +#define type_pf_resize TOKEN(TYPE, PF, _resize) +#define type_pf_tresize TOKEN(TYPE, PF, _tresize) +#define type_pf_flush ip_set_hash_flush +#define type_pf_destroy ip_set_hash_destroy +#define type_pf_head TOKEN(TYPE, PF, _head) +#define type_pf_list TOKEN(TYPE, PF, _list) +#define type_pf_tlist TOKEN(TYPE, PF, _tlist) +#define type_pf_same_set TOKEN(TYPE, PF, _same_set) +#define type_pf_kadt TOKEN(TYPE, PF, _kadt) +#define type_pf_uadt TOKEN(TYPE, PF, _uadt) +#define type_pf_gc TOKEN(TYPE, PF, _gc) +#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init) +#define type_pf_variant TOKEN(TYPE, PF, _variant) +#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant) + +/* Flavour without timeout */ + +/* Get the ith element from the array block n */ +#define ahash_data(n, i) \ + ((struct type_pf_elem *)((n)->value) + (i)) + +/* Add an element to the hash table when resizing the set: + * we spare the maintenance of the internal counters. */ +static int +type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value, + u8 ahash_max, u32 cadt_flags) +{ + struct type_pf_elem *data; + + if (n->pos >= n->size) { + void *tmp; + + if (n->size >= ahash_max) + /* Trigger rehashing */ + return -EAGAIN; + + tmp = kzalloc((n->size + AHASH_INIT_SIZE) + * sizeof(struct type_pf_elem), + GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + if (n->size) { + memcpy(tmp, n->value, + sizeof(struct type_pf_elem) * n->size); + kfree(n->value); + } + n->value = tmp; + n->size += AHASH_INIT_SIZE; + } + data = ahash_data(n, n->pos++); + type_pf_data_copy(data, value); +#ifdef IP_SET_HASH_WITH_NETS + /* Resizing won't overwrite stored flags */ + if (cadt_flags) + type_pf_data_flags(data, cadt_flags); +#endif + return 0; +} + +/* Resize a hash: create a new hash table with doubling the hashsize + * and inserting the elements to it. Repeat until we succeed or + * fail due to memory pressures. */ +static int +type_pf_resize(struct ip_set *set, bool retried) +{ + struct ip_set_hash *h = set->data; + struct htable *t, *orig = h->table; + u8 htable_bits = orig->htable_bits; + const struct type_pf_elem *data; + struct hbucket *n, *m; + u32 i, j; + int ret; + +retry: + ret = 0; + htable_bits++; + pr_debug("attempt to resize set %s from %u to %u, t %p\n", + set->name, orig->htable_bits, htable_bits, orig); + if (!htable_bits) { + /* In case we have plenty of memory :-) */ + pr_warning("Cannot increase the hashsize of set %s further\n", + set->name); + return -IPSET_ERR_HASH_FULL; + } + t = ip_set_alloc(sizeof(*t) + + jhash_size(htable_bits) * sizeof(struct hbucket)); + if (!t) + return -ENOMEM; + t->htable_bits = htable_bits; + + read_lock_bh(&set->lock); + for (i = 0; i < jhash_size(orig->htable_bits); i++) { + n = hbucket(orig, i); + for (j = 0; j < n->pos; j++) { + data = ahash_data(n, j); + m = hbucket(t, HKEY(data, h->initval, htable_bits)); + ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0); + if (ret < 0) { + read_unlock_bh(&set->lock); + ahash_destroy(t); + if (ret == -EAGAIN) + goto retry; + return ret; + } + } + } + + rcu_assign_pointer(h->table, t); + read_unlock_bh(&set->lock); + + /* Give time to other readers of the set */ + synchronize_rcu_bh(); + + pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, + orig->htable_bits, orig, t->htable_bits, t); + ahash_destroy(orig); + + return 0; +} + +static inline void +type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d); + +/* Add an element to a hash and update the internal counters when succeeded, + * otherwise report the proper error code. */ +static int +type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) +{ + struct ip_set_hash *h = set->data; + struct htable *t; + const struct type_pf_elem *d = value; + struct hbucket *n; + int i, ret = 0; + u32 key, multi = 0; + u32 cadt_flags = flags >> 16; + + if (h->elements >= h->maxelem) { + if (net_ratelimit()) + pr_warning("Set %s is full, maxelem %u reached\n", + set->name, h->maxelem); + return -IPSET_ERR_HASH_FULL; + } + + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) + if (type_pf_data_equal(ahash_data(n, i), d, &multi)) { +#ifdef IP_SET_HASH_WITH_NETS + if (flags & IPSET_FLAG_EXIST) + /* Support overwriting just the flags */ + type_pf_data_flags(ahash_data(n, i), + cadt_flags); +#endif + ret = -IPSET_ERR_EXIST; + goto out; + } + TUNE_AHASH_MAX(h, multi); + ret = type_pf_elem_add(n, value, AHASH_MAX(h), cadt_flags); + if (ret != 0) { + if (ret == -EAGAIN) + type_pf_data_next(h, d); + goto out; + } + +#ifdef IP_SET_HASH_WITH_NETS + add_cidr(h, CIDR(d->cidr), HOST_MASK); +#endif + h->elements++; +out: + rcu_read_unlock_bh(); + return ret; +} + +/* Delete an element from the hash: swap it with the last element + * and free up space if possible. + */ +static int +type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + const struct type_pf_elem *d = value; + struct hbucket *n; + int i; + struct type_pf_elem *data; + u32 key, multi = 0; + + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + if (!type_pf_data_equal(data, d, &multi)) + continue; + if (i != n->pos - 1) + /* Not last one */ + type_pf_data_copy(data, ahash_data(n, n->pos - 1)); + + n->pos--; + h->elements--; +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, CIDR(d->cidr), HOST_MASK); +#endif + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * sizeof(struct type_pf_elem), + GFP_ATOMIC); + if (!tmp) + return 0; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, + n->size * sizeof(struct type_pf_elem)); + kfree(n->value); + n->value = tmp; + } + return 0; + } + + return -IPSET_ERR_EXIST; +} + +#ifdef IP_SET_HASH_WITH_NETS + +/* Special test function which takes into account the different network + * sizes added to the set */ +static int +type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct hbucket *n; + const struct type_pf_elem *data; + int i, j = 0; + u32 key, multi = 0; + u8 host_mask = SET_HOST_MASK(set->family); + + pr_debug("test by nets\n"); + for (; j < host_mask && h->nets[j].cidr && !multi; j++) { + type_pf_data_netmask(d, h->nets[j].cidr); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + if (type_pf_data_equal(data, d, &multi)) + return type_pf_data_match(data); + } + } + return 0; +} +#endif + +/* Test whether the element is added to the set */ +static int +type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct type_pf_elem *d = value; + struct hbucket *n; + const struct type_pf_elem *data; + int i; + u32 key, multi = 0; + +#ifdef IP_SET_HASH_WITH_NETS + /* If we test an IP address and not a network address, + * try all possible network sizes */ + if (CIDR(d->cidr) == SET_HOST_MASK(set->family)) + return type_pf_test_cidrs(set, d, timeout); +#endif + + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + if (type_pf_data_equal(data, d, &multi)) + return type_pf_data_match(data); + } + return 0; +} + +/* Reply a HEADER request: fill out the header part of the set */ +static int +type_pf_head(struct ip_set *set, struct sk_buff *skb) +{ + const struct ip_set_hash *h = set->data; + struct nlattr *nested; + size_t memsize; + + read_lock_bh(&set->lock); + memsize = ahash_memsize(h, with_timeout(h->timeout) + ? sizeof(struct type_pf_telem) + : sizeof(struct type_pf_elem), + set->family == AF_INET ? 32 : 128); + read_unlock_bh(&set->lock); + + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) + goto nla_put_failure; + NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE, + htonl(jhash_size(h->table->htable_bits))); + NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)); +#ifdef IP_SET_HASH_WITH_NETMASK + if (h->netmask != HOST_MASK) + NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); +#endif + NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); + NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); + if (with_timeout(h->timeout)) + NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); + ipset_nest_end(skb, nested); + + return 0; +nla_put_failure: + return -EMSGSIZE; +} + +/* Reply a LIST/SAVE request: dump the elements of the specified set */ +static int +type_pf_list(const struct ip_set *set, + struct sk_buff *skb, struct netlink_callback *cb) +{ + const struct ip_set_hash *h = set->data; + const struct htable *t = h->table; + struct nlattr *atd, *nested; + const struct hbucket *n; + const struct type_pf_elem *data; + u32 first = cb->args[2]; + /* We assume that one hash bucket fills into one page */ + void *incomplete; + int i; + + atd = ipset_nest_start(skb, IPSET_ATTR_ADT); + if (!atd) + return -EMSGSIZE; + pr_debug("list hash set %s\n", set->name); + for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { + incomplete = skb_tail_pointer(skb); + n = hbucket(t, cb->args[2]); + pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); + for (i = 0; i < n->pos; i++) { + data = ahash_data(n, i); + pr_debug("list hash %lu hbucket %p i %u, data %p\n", + cb->args[2], n, i, data); + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) { + if (cb->args[2] == first) { + nla_nest_cancel(skb, atd); + return -EMSGSIZE; + } else + goto nla_put_failure; + } + if (type_pf_data_list(skb, data)) + goto nla_put_failure; + ipset_nest_end(skb, nested); + } + } + ipset_nest_end(skb, atd); + /* Set listing finished */ + cb->args[2] = 0; + + return 0; + +nla_put_failure: + nlmsg_trim(skb, incomplete); + ipset_nest_end(skb, atd); + if (unlikely(first == cb->args[2])) { + pr_warning("Can't list set %s: one bucket does not fit into " + "a message. Please report it!\n", set->name); + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; +} + +static int +type_pf_kadt(struct ip_set *set, const struct sk_buff * skb, + const struct xt_action_param *par, + enum ipset_adt adt, const struct ip_set_adt_opt *opt); +static int +type_pf_uadt(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); + +static const struct ip_set_type_variant type_pf_variant = { + .kadt = type_pf_kadt, + .uadt = type_pf_uadt, + .adt = { + [IPSET_ADD] = type_pf_add, + [IPSET_DEL] = type_pf_del, + [IPSET_TEST] = type_pf_test, + }, + .destroy = type_pf_destroy, + .flush = type_pf_flush, + .head = type_pf_head, + .list = type_pf_list, + .resize = type_pf_resize, + .same_set = type_pf_same_set, +}; + +/* Flavour with timeout support */ + +#define ahash_tdata(n, i) \ + (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i)) + +static inline u32 +type_pf_data_timeout(const struct type_pf_elem *data) +{ + const struct type_pf_telem *tdata = + (const struct type_pf_telem *) data; + + return tdata->timeout; +} + +static inline bool +type_pf_data_expired(const struct type_pf_elem *data) +{ + const struct type_pf_telem *tdata = + (const struct type_pf_telem *) data; + + return ip_set_timeout_expired(tdata->timeout); +} + +static inline void +type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) +{ + struct type_pf_telem *tdata = (struct type_pf_telem *) data; + + tdata->timeout = ip_set_timeout_set(timeout); +} + +static int +type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, + u8 ahash_max, u32 cadt_flags, u32 timeout) +{ + struct type_pf_elem *data; + + if (n->pos >= n->size) { + void *tmp; + + if (n->size >= ahash_max) + /* Trigger rehashing */ + return -EAGAIN; + + tmp = kzalloc((n->size + AHASH_INIT_SIZE) + * sizeof(struct type_pf_telem), + GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + if (n->size) { + memcpy(tmp, n->value, + sizeof(struct type_pf_telem) * n->size); + kfree(n->value); + } + n->value = tmp; + n->size += AHASH_INIT_SIZE; + } + data = ahash_tdata(n, n->pos++); + type_pf_data_copy(data, value); + type_pf_data_timeout_set(data, timeout); +#ifdef IP_SET_HASH_WITH_NETS + /* Resizing won't overwrite stored flags */ + if (cadt_flags) + type_pf_data_flags(data, cadt_flags); +#endif + return 0; +} + +/* Delete expired elements from the hashtable */ +static void +type_pf_expire(struct ip_set_hash *h) +{ + struct htable *t = h->table; + struct hbucket *n; + struct type_pf_elem *data; + u32 i; + int j; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { + n = hbucket(t, i); + for (j = 0; j < n->pos; j++) { + data = ahash_tdata(n, j); + if (type_pf_data_expired(data)) { + pr_debug("expired %u/%u\n", i, j); +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, CIDR(data->cidr), HOST_MASK); +#endif + if (j != n->pos - 1) + /* Not last one */ + type_pf_data_copy(data, + ahash_tdata(n, n->pos - 1)); + n->pos--; + h->elements--; + } + } + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * sizeof(struct type_pf_telem), + GFP_ATOMIC); + if (!tmp) + /* Still try to delete expired elements */ + continue; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, + n->size * sizeof(struct type_pf_telem)); + kfree(n->value); + n->value = tmp; + } + } +} + +static int +type_pf_tresize(struct ip_set *set, bool retried) +{ + struct ip_set_hash *h = set->data; + struct htable *t, *orig = h->table; + u8 htable_bits = orig->htable_bits; + const struct type_pf_elem *data; + struct hbucket *n, *m; + u32 i, j; + int ret; + + /* Try to cleanup once */ + if (!retried) { + i = h->elements; + write_lock_bh(&set->lock); + type_pf_expire(set->data); + write_unlock_bh(&set->lock); + if (h->elements < i) + return 0; + } + +retry: + ret = 0; + htable_bits++; + if (!htable_bits) { + /* In case we have plenty of memory :-) */ + pr_warning("Cannot increase the hashsize of set %s further\n", + set->name); + return -IPSET_ERR_HASH_FULL; + } + t = ip_set_alloc(sizeof(*t) + + jhash_size(htable_bits) * sizeof(struct hbucket)); + if (!t) + return -ENOMEM; + t->htable_bits = htable_bits; + + read_lock_bh(&set->lock); + for (i = 0; i < jhash_size(orig->htable_bits); i++) { + n = hbucket(orig, i); + for (j = 0; j < n->pos; j++) { + data = ahash_tdata(n, j); + m = hbucket(t, HKEY(data, h->initval, htable_bits)); + ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, + type_pf_data_timeout(data)); + if (ret < 0) { + read_unlock_bh(&set->lock); + ahash_destroy(t); + if (ret == -EAGAIN) + goto retry; + return ret; + } + } + } + + rcu_assign_pointer(h->table, t); + read_unlock_bh(&set->lock); + + /* Give time to other readers of the set */ + synchronize_rcu_bh(); + + ahash_destroy(orig); + + return 0; +} + +static int +type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + const struct type_pf_elem *d = value; + struct hbucket *n; + struct type_pf_elem *data; + int ret = 0, i, j = AHASH_MAX(h) + 1; + bool flag_exist = flags & IPSET_FLAG_EXIST; + u32 key, multi = 0; + u32 cadt_flags = flags >> 16; + + if (h->elements >= h->maxelem) + /* FIXME: when set is full, we slow down here */ + type_pf_expire(h); + if (h->elements >= h->maxelem) { + if (net_ratelimit()) + pr_warning("Set %s is full, maxelem %u reached\n", + set->name, h->maxelem); + return -IPSET_ERR_HASH_FULL; + } + + rcu_read_lock_bh(); + t = rcu_dereference_bh(h->table); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (type_pf_data_equal(data, d, &multi)) { + if (type_pf_data_expired(data) || flag_exist) + /* Just timeout value may be updated */ + j = i; + else { + ret = -IPSET_ERR_EXIST; + goto out; + } + } else if (j == AHASH_MAX(h) + 1 && + type_pf_data_expired(data)) + j = i; + } + if (j != AHASH_MAX(h) + 1) { + data = ahash_tdata(n, j); +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, CIDR(data->cidr), HOST_MASK); + add_cidr(h, CIDR(d->cidr), HOST_MASK); +#endif + type_pf_data_copy(data, d); + type_pf_data_timeout_set(data, timeout); +#ifdef IP_SET_HASH_WITH_NETS + type_pf_data_flags(data, cadt_flags); +#endif + goto out; + } + TUNE_AHASH_MAX(h, multi); + ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), cadt_flags, timeout); + if (ret != 0) { + if (ret == -EAGAIN) + type_pf_data_next(h, d); + goto out; + } + +#ifdef IP_SET_HASH_WITH_NETS + add_cidr(h, CIDR(d->cidr), HOST_MASK); +#endif + h->elements++; +out: + rcu_read_unlock_bh(); + return ret; +} + +static int +type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + const struct type_pf_elem *d = value; + struct hbucket *n; + int i; + struct type_pf_elem *data; + u32 key, multi = 0; + + key = HKEY(value, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (!type_pf_data_equal(data, d, &multi)) + continue; + if (type_pf_data_expired(data)) + return -IPSET_ERR_EXIST; + if (i != n->pos - 1) + /* Not last one */ + type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); + + n->pos--; + h->elements--; +#ifdef IP_SET_HASH_WITH_NETS + del_cidr(h, CIDR(d->cidr), HOST_MASK); +#endif + if (n->pos + AHASH_INIT_SIZE < n->size) { + void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) + * sizeof(struct type_pf_telem), + GFP_ATOMIC); + if (!tmp) + return 0; + n->size -= AHASH_INIT_SIZE; + memcpy(tmp, n->value, + n->size * sizeof(struct type_pf_telem)); + kfree(n->value); + n->value = tmp; + } + return 0; + } + + return -IPSET_ERR_EXIST; +} + +#ifdef IP_SET_HASH_WITH_NETS +static int +type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct type_pf_elem *data; + struct hbucket *n; + int i, j = 0; + u32 key, multi = 0; + u8 host_mask = SET_HOST_MASK(set->family); + + for (; j < host_mask && h->nets[j].cidr && !multi; j++) { + type_pf_data_netmask(d, h->nets[j].cidr); + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); +#ifdef IP_SET_HASH_WITH_MULTI + if (type_pf_data_equal(data, d, &multi)) { + if (!type_pf_data_expired(data)) + return type_pf_data_match(data); + multi = 0; + } +#else + if (type_pf_data_equal(data, d, &multi) && + !type_pf_data_expired(data)) + return type_pf_data_match(data); +#endif + } + } + return 0; +} +#endif + +static int +type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) +{ + struct ip_set_hash *h = set->data; + struct htable *t = h->table; + struct type_pf_elem *data, *d = value; + struct hbucket *n; + int i; + u32 key, multi = 0; + +#ifdef IP_SET_HASH_WITH_NETS + if (CIDR(d->cidr) == SET_HOST_MASK(set->family)) + return type_pf_ttest_cidrs(set, d, timeout); +#endif + key = HKEY(d, h->initval, t->htable_bits); + n = hbucket(t, key); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + if (type_pf_data_equal(data, d, &multi) && + !type_pf_data_expired(data)) + return type_pf_data_match(data); + } + return 0; +} + +static int +type_pf_tlist(const struct ip_set *set, + struct sk_buff *skb, struct netlink_callback *cb) +{ + const struct ip_set_hash *h = set->data; + const struct htable *t = h->table; + struct nlattr *atd, *nested; + const struct hbucket *n; + const struct type_pf_elem *data; + u32 first = cb->args[2]; + /* We assume that one hash bucket fills into one page */ + void *incomplete; + int i; + + atd = ipset_nest_start(skb, IPSET_ATTR_ADT); + if (!atd) + return -EMSGSIZE; + for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) { + incomplete = skb_tail_pointer(skb); + n = hbucket(t, cb->args[2]); + for (i = 0; i < n->pos; i++) { + data = ahash_tdata(n, i); + pr_debug("list %p %u\n", n, i); + if (type_pf_data_expired(data)) + continue; + pr_debug("do list %p %u\n", n, i); + nested = ipset_nest_start(skb, IPSET_ATTR_DATA); + if (!nested) { + if (cb->args[2] == first) { + nla_nest_cancel(skb, atd); + return -EMSGSIZE; + } else + goto nla_put_failure; + } + if (type_pf_data_tlist(skb, data)) + goto nla_put_failure; + ipset_nest_end(skb, nested); + } + } + ipset_nest_end(skb, atd); + /* Set listing finished */ + cb->args[2] = 0; + + return 0; + +nla_put_failure: + nlmsg_trim(skb, incomplete); + ipset_nest_end(skb, atd); + if (unlikely(first == cb->args[2])) { + pr_warning("Can't list set %s: one bucket does not fit into " + "a message. Please report it!\n", set->name); + cb->args[2] = 0; + return -EMSGSIZE; + } + return 0; +} + +static const struct ip_set_type_variant type_pf_tvariant = { + .kadt = type_pf_kadt, + .uadt = type_pf_uadt, + .adt = { + [IPSET_ADD] = type_pf_tadd, + [IPSET_DEL] = type_pf_tdel, + [IPSET_TEST] = type_pf_ttest, + }, + .destroy = type_pf_destroy, + .flush = type_pf_flush, + .head = type_pf_head, + .list = type_pf_tlist, + .resize = type_pf_tresize, + .same_set = type_pf_same_set, +}; + +static void +type_pf_gc(unsigned long ul_set) +{ + struct ip_set *set = (struct ip_set *) ul_set; + struct ip_set_hash *h = set->data; + + pr_debug("called\n"); + write_lock_bh(&set->lock); + type_pf_expire(h); + write_unlock_bh(&set->lock); + + h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; + add_timer(&h->gc); +} + +static void +type_pf_gc_init(struct ip_set *set) +{ + struct ip_set_hash *h = set->data; + + init_timer(&h->gc); + h->gc.data = (unsigned long) set; + h->gc.function = type_pf_gc; + h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; + add_timer(&h->gc); + pr_debug("gc initialized, run in every %u\n", + IPSET_GC_PERIOD(h->timeout)); +} + +#undef HKEY_DATALEN +#undef HKEY +#undef type_pf_data_equal +#undef type_pf_data_isnull +#undef type_pf_data_copy +#undef type_pf_data_zero_out +#undef type_pf_data_netmask +#undef type_pf_data_list +#undef type_pf_data_tlist +#undef type_pf_data_next +#undef type_pf_data_flags +#undef type_pf_data_match + +#undef type_pf_elem +#undef type_pf_telem +#undef type_pf_data_timeout +#undef type_pf_data_expired +#undef type_pf_data_timeout_set + +#undef type_pf_elem_add +#undef type_pf_add +#undef type_pf_del +#undef type_pf_test_cidrs +#undef type_pf_test + +#undef type_pf_elem_tadd +#undef type_pf_del_telem +#undef type_pf_expire +#undef type_pf_tadd +#undef type_pf_tdel +#undef type_pf_ttest_cidrs +#undef type_pf_ttest + +#undef type_pf_resize +#undef type_pf_tresize +#undef type_pf_flush +#undef type_pf_destroy +#undef type_pf_head +#undef type_pf_list +#undef type_pf_tlist +#undef type_pf_same_set +#undef type_pf_kadt +#undef type_pf_uadt +#undef type_pf_gc +#undef type_pf_gc_init +#undef type_pf_variant +#undef type_pf_tvariant diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h new file mode 100644 index 00000000..61a9e874 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h @@ -0,0 +1,31 @@ +#ifndef __IP_SET_BITMAP_H +#define __IP_SET_BITMAP_H + +/* Bitmap type specific error codes */ +enum { + /* The element is out of the range of the set */ + IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC, + /* The range exceeds the size limit of the set type */ + IPSET_ERR_BITMAP_RANGE_SIZE, +}; + +#ifdef __KERNEL__ +#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF + +/* Common functions */ + +static inline u32 +range_to_mask(u32 from, u32 to, u8 *bits) +{ + u32 mask = 0xFFFFFFFE; + + *bits = 32; + while (--(*bits) > 0 && mask && (to & mask) != from) + mask <<= 1; + + return mask; +} + +#endif /* __KERNEL__ */ + +#endif /* __IP_SET_BITMAP_H */ diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h new file mode 100644 index 00000000..90d09300 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -0,0 +1,33 @@ +#ifndef _IP_SET_GETPORT_H +#define _IP_SET_GETPORT_H + +extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto); + +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) +extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto); +#else +static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, + __be16 *port, u8 *proto) +{ + return false; +} +#endif + +extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, + __be16 *port); + +static inline bool ip_set_proto_with_ports(u8 proto) +{ + switch (proto) { + case IPPROTO_TCP: + case IPPROTO_SCTP: + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + return true; + } + return false; +} + +#endif /*_IP_SET_GETPORT_H*/ diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h new file mode 100644 index 00000000..e2a9fae7 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_hash.h @@ -0,0 +1,30 @@ +#ifndef __IP_SET_HASH_H +#define __IP_SET_HASH_H + +/* Hash type specific error codes */ +enum { + /* Hash is full */ + IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC, + /* Null-valued element */ + IPSET_ERR_HASH_ELEM, + /* Invalid protocol */ + IPSET_ERR_INVALID_PROTO, + /* Protocol missing but must be specified */ + IPSET_ERR_MISSING_PROTO, + /* Range not supported */ + IPSET_ERR_HASH_RANGE_UNSUPPORTED, + /* Invalid range */ + IPSET_ERR_HASH_RANGE, +}; + +#ifdef __KERNEL__ + +#define IPSET_DEFAULT_HASHSIZE 1024 +#define IPSET_MIMINAL_HASHSIZE 64 +#define IPSET_DEFAULT_MAXELEM 65536 +#define IPSET_DEFAULT_PROBES 4 +#define IPSET_DEFAULT_RESIZE 100 + +#endif /* __KERNEL__ */ + +#endif /* __IP_SET_HASH_H */ diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h new file mode 100644 index 00000000..40a63f30 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -0,0 +1,27 @@ +#ifndef __IP_SET_LIST_H +#define __IP_SET_LIST_H + +/* List type specific error codes */ +enum { + /* Set name to be added/deleted/tested does not exist. */ + IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC, + /* list:set type is not permitted to add */ + IPSET_ERR_LOOP, + /* Missing reference set */ + IPSET_ERR_BEFORE, + /* Reference set does not exist */ + IPSET_ERR_NAMEREF, + /* Set is full */ + IPSET_ERR_LIST_FULL, + /* Reference set is not added to the set */ + IPSET_ERR_REF_EXIST, +}; + +#ifdef __KERNEL__ + +#define IP_SET_LIST_DEFAULT_SIZE 8 +#define IP_SET_LIST_MIN_SIZE 4 + +#endif /* __KERNEL__ */ + +#endif /* __IP_SET_LIST_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h new file mode 100644 index 00000000..47923205 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -0,0 +1,132 @@ +#ifndef _IP_SET_TIMEOUT_H +#define _IP_SET_TIMEOUT_H + +/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef __KERNEL__ + +/* How often should the gc be run by default */ +#define IPSET_GC_TIME (3 * 60) + +/* Timeout period depending on the timeout value of the given set */ +#define IPSET_GC_PERIOD(timeout) \ + ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) + +/* Set is defined without timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX + +#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT) + +#define opt_timeout(opt, map) \ + (with_timeout((opt)->timeout) ? (opt)->timeout : (map)->timeout) + +static inline unsigned int +ip_set_timeout_uget(struct nlattr *tb) +{ + unsigned int timeout = ip_set_get_h32(tb); + + /* Userspace supplied TIMEOUT parameter: adjust crazy size */ + return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; +} + +#ifdef IP_SET_BITMAP_TIMEOUT + +/* Bitmap specific timeout constants and macros for the entries */ + +/* Bitmap entry is unset */ +#define IPSET_ELEM_UNSET 0 +/* Bitmap entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT (UINT_MAX/2) + +static inline bool +ip_set_timeout_test(unsigned long timeout) +{ + return timeout != IPSET_ELEM_UNSET && + (timeout == IPSET_ELEM_PERMANENT || + time_is_after_jiffies(timeout)); +} + +static inline bool +ip_set_timeout_expired(unsigned long timeout) +{ + return timeout != IPSET_ELEM_UNSET && + timeout != IPSET_ELEM_PERMANENT && + time_is_before_jiffies(timeout); +} + +static inline unsigned long +ip_set_timeout_set(u32 timeout) +{ + unsigned long t; + + if (!timeout) + return IPSET_ELEM_PERMANENT; + + t = msecs_to_jiffies(timeout * 1000) + jiffies; + if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT) + /* Bingo! */ + t++; + + return t; +} + +static inline u32 +ip_set_timeout_get(unsigned long timeout) +{ + return timeout == IPSET_ELEM_PERMANENT ? 0 : + jiffies_to_msecs(timeout - jiffies)/1000; +} + +#else + +/* Hash specific timeout constants and macros for the entries */ + +/* Hash entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 + +static inline bool +ip_set_timeout_test(unsigned long timeout) +{ + return timeout == IPSET_ELEM_PERMANENT || + time_is_after_jiffies(timeout); +} + +static inline bool +ip_set_timeout_expired(unsigned long timeout) +{ + return timeout != IPSET_ELEM_PERMANENT && + time_is_before_jiffies(timeout); +} + +static inline unsigned long +ip_set_timeout_set(u32 timeout) +{ + unsigned long t; + + if (!timeout) + return IPSET_ELEM_PERMANENT; + + t = msecs_to_jiffies(timeout * 1000) + jiffies; + if (t == IPSET_ELEM_PERMANENT) + /* Bingo! :-) */ + t++; + + return t; +} + +static inline u32 +ip_set_timeout_get(unsigned long timeout) +{ + return timeout == IPSET_ELEM_PERMANENT ? 0 : + jiffies_to_msecs(timeout - jiffies)/1000; +} +#endif /* ! IP_SET_BITMAP_TIMEOUT */ + +#endif /* __KERNEL__ */ + +#endif /* _IP_SET_TIMEOUT_H */ diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h new file mode 100644 index 00000000..199fd11f --- /dev/null +++ b/include/linux/netfilter/ipset/pfxlen.h @@ -0,0 +1,44 @@ +#ifndef _PFXLEN_H +#define _PFXLEN_H + +#include <asm/byteorder.h> +#include <linux/netfilter.h> +#include <net/tcp.h> + +/* Prefixlen maps, by Jan Engelhardt */ +extern const union nf_inet_addr ip_set_netmask_map[]; +extern const union nf_inet_addr ip_set_hostmask_map[]; + +static inline __be32 +ip_set_netmask(u8 pfxlen) +{ + return ip_set_netmask_map[pfxlen].ip; +} + +static inline const __be32 * +ip_set_netmask6(u8 pfxlen) +{ + return &ip_set_netmask_map[pfxlen].ip6[0]; +} + +static inline u32 +ip_set_hostmask(u8 pfxlen) +{ + return (__force u32) ip_set_hostmask_map[pfxlen].ip; +} + +static inline const __be32 * +ip_set_hostmask6(u8 pfxlen) +{ + return &ip_set_hostmask_map[pfxlen].ip6[0]; +} + +extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr); + +#define ip_set_mask_from_to(from, to, cidr) \ +do { \ + from &= ip_set_hostmask(cidr); \ + to = from | ~ip_set_hostmask(cidr); \ +} while (0) + +#endif /*_PFXLEN_H */ diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h new file mode 100644 index 00000000..0bb5a697 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_amanda.h @@ -0,0 +1,10 @@ +#ifndef _NF_CONNTRACK_AMANDA_H +#define _NF_CONNTRACK_AMANDA_H +/* AMANDA tracking. */ + +extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp); +#endif /* _NF_CONNTRACK_AMANDA_H */ diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h new file mode 100644 index 00000000..0d3dd663 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -0,0 +1,137 @@ +#ifndef _NF_CONNTRACK_COMMON_H +#define _NF_CONNTRACK_COMMON_H +/* Connection state tracking for netfilter. This is separated from, + but required by, the NAT layer; it can also be used by an iptables + extension. */ +enum ip_conntrack_info { + /* Part of an established connection (either direction). */ + IP_CT_ESTABLISHED, + + /* Like NEW, but related to an existing connection, or ICMP error + (in either direction). */ + IP_CT_RELATED, + + /* Started a new connection to track (only + IP_CT_DIR_ORIGINAL); may be a retransmission. */ + IP_CT_NEW, + + /* >= this indicates reply direction */ + IP_CT_IS_REPLY, + + IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, + IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, + IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY, + /* Number of distinct IP_CT types (no NEW in reply dirn). */ + IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 +}; + +/* Bitset representing status of connection. */ +enum ip_conntrack_status { + /* It's an expected connection: bit 0 set. This bit never changed */ + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = (1 << IPS_EXPECTED_BIT), + + /* We've seen packets both ways: bit 1 set. Can be set, not unset. */ + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT), + + /* Conntrack should never be early-expired. */ + IPS_ASSURED_BIT = 2, + IPS_ASSURED = (1 << IPS_ASSURED_BIT), + + /* Connection is confirmed: originating packet has left box */ + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT), + + /* Connection needs src nat in orig dir. This bit never changed. */ + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT), + + /* Connection needs dst nat in orig dir. This bit never changed. */ + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = (1 << IPS_DST_NAT_BIT), + + /* Both together. */ + IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT), + + /* Connection needs TCP sequence adjusted. */ + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT), + + /* NAT initialization bits. */ + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT), + + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT), + + /* Both together */ + IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE), + + /* Connection is dying (removed from lists), can not be unset. */ + IPS_DYING_BIT = 9, + IPS_DYING = (1 << IPS_DYING_BIT), + + /* Connection has fixed timeout. */ + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), + + /* Conntrack is a template */ + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), + + /* Conntrack is a fake untracked entry */ + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), +}; + +/* Connection tracking event types */ +enum ip_conntrack_events { + IPCT_NEW, /* new conntrack */ + IPCT_RELATED, /* related conntrack */ + IPCT_DESTROY, /* destroyed conntrack */ + IPCT_REPLY, /* connection has seen two-way traffic */ + IPCT_ASSURED, /* connection status has changed to assured */ + IPCT_PROTOINFO, /* protocol information has changed */ + IPCT_HELPER, /* new helper has been set */ + IPCT_MARK, /* new mark has been set */ + IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */ + IPCT_SECMARK, /* new security mark has been set */ +}; + +enum ip_conntrack_expect_events { + IPEXP_NEW, /* new expectation */ + IPEXP_DESTROY, /* destroyed expectation */ +}; + +/* expectation flags */ +#define NF_CT_EXPECT_PERMANENT 0x1 +#define NF_CT_EXPECT_INACTIVE 0x2 +#define NF_CT_EXPECT_USERSPACE 0x4 + +#ifdef __KERNEL__ +struct ip_conntrack_stat { + unsigned int searched; + unsigned int found; + unsigned int new; + unsigned int invalid; + unsigned int ignore; + unsigned int delete; + unsigned int delete_list; + unsigned int insert; + unsigned int insert_failed; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; +}; + +/* call to create an explicit dependency on nf_conntrack. */ +extern void need_conntrack(void); + +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h new file mode 100644 index 00000000..40dcc820 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -0,0 +1,40 @@ +#ifndef _NF_CONNTRACK_DCCP_H +#define _NF_CONNTRACK_DCCP_H + +/* Exposed to userspace over nfnetlink */ +enum ct_dccp_states { + CT_DCCP_NONE, + CT_DCCP_REQUEST, + CT_DCCP_RESPOND, + CT_DCCP_PARTOPEN, + CT_DCCP_OPEN, + CT_DCCP_CLOSEREQ, + CT_DCCP_CLOSING, + CT_DCCP_TIMEWAIT, + CT_DCCP_IGNORE, + CT_DCCP_INVALID, + __CT_DCCP_MAX +}; +#define CT_DCCP_MAX (__CT_DCCP_MAX - 1) + +enum ct_dccp_roles { + CT_DCCP_ROLE_CLIENT, + CT_DCCP_ROLE_SERVER, + __CT_DCCP_ROLE_MAX +}; +#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) + +#ifdef __KERNEL__ +#include <net/netfilter/nf_conntrack_tuple.h> + +struct nf_ct_dccp { + u_int8_t role[IP_CT_DIR_MAX]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_DCCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h new file mode 100644 index 00000000..3e3aa089 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_ftp.h @@ -0,0 +1,42 @@ +#ifndef _NF_CONNTRACK_FTP_H +#define _NF_CONNTRACK_FTP_H +/* FTP tracking. */ + +/* This enum is exposed to userspace */ +enum nf_ct_ftp_type { + /* PORT command from client */ + NF_CT_FTP_PORT, + /* PASV response from server */ + NF_CT_FTP_PASV, + /* EPRT command from client */ + NF_CT_FTP_EPRT, + /* EPSV response from server */ + NF_CT_FTP_EPSV, +}; + +#ifdef __KERNEL__ + +#define FTP_PORT 21 + +#define NUM_SEQ_TO_REMEMBER 2 +/* This structure exists only once per master */ +struct nf_ct_ftp_master { + /* Valid seq positions for cmd matching after newline */ + u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER]; + /* 0 means seq_match_aft_nl not set */ + int seq_aft_nl_num[IP_CT_DIR_MAX]; +}; + +struct nf_conntrack_expect; + +/* For NAT to hook in when we find a packet which describes what other + * connection we should expect. */ +extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + enum nf_ct_ftp_type type, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp); +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_FTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h new file mode 100644 index 00000000..26f9226e --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -0,0 +1,92 @@ +#ifndef _NF_CONNTRACK_H323_H +#define _NF_CONNTRACK_H323_H + +#ifdef __KERNEL__ + +#include <linux/netfilter/nf_conntrack_h323_asn1.h> + +#define RAS_PORT 1719 +#define Q931_PORT 1720 +#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */ + +/* This structure exists only once per master */ +struct nf_ct_h323_master { + + /* Original and NATed Q.931 or H.245 signal ports */ + __be16 sig_port[IP_CT_DIR_MAX]; + + /* Original and NATed RTP ports */ + __be16 rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX]; + + union { + /* RAS connection timeout */ + u_int32_t timeout; + + /* Next TPKT length (for separate TPKT header and data) */ + u_int16_t tpkt_len[IP_CT_DIR_MAX]; + }; +}; + +struct nf_conn; + +extern int get_h225_addr(struct nf_conn *ct, unsigned char *data, + TransportAddress *taddr, + union nf_inet_addr *addr, __be16 *port); +extern void nf_conntrack_h245_expect(struct nf_conn *new, + struct nf_conntrack_expect *this); +extern void nf_conntrack_q931_expect(struct nf_conn *new, + struct nf_conntrack_expect *this); +extern int (*set_h245_addr_hook) (struct sk_buff *skb, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, + union nf_inet_addr *addr, + __be16 port); +extern int (*set_h225_addr_hook) (struct sk_buff *skb, + unsigned char **data, int dataoff, + TransportAddress *taddr, + union nf_inet_addr *addr, + __be16 port); +extern int (*set_sig_addr_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, + TransportAddress *taddr, int count); +extern int (*set_ras_addr_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, + TransportAddress *taddr, int count); +extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, + __be16 port, __be16 rtp_port, + struct nf_conntrack_expect *rtp_exp, + struct nf_conntrack_expect *rtcp_exp); +extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); +extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, int dataoff, + TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); +extern int (*nat_callforwarding_hook) (struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, int dataoff, + TransportAddress *taddr, + __be16 port, + struct nf_conntrack_expect *exp); +extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned char **data, TransportAddress *taddr, + int idx, __be16 port, + struct nf_conntrack_expect *exp); + +#endif + +#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h new file mode 100644 index 00000000..8dab5968 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -0,0 +1,98 @@ +/**************************************************************************** + * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 + * conntrack/NAT module. + * + * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> + * + * This source code is licensed under General Public License version 2. + * + * + * This library is based on H.225 version 4, H.235 version 2 and H.245 + * version 7. It is extremely optimized to decode only the absolutely + * necessary objects in a signal for Linux kernel NAT module use, so don't + * expect it to be a full ASN.1 library. + * + * Features: + * + * 1. Small. The total size of code plus data is less than 20 KB (IA32). + * 2. Fast. Decoding Netmeeting's Setup signal 1 million times on a PIII 866 + * takes only 3.9 seconds. + * 3. No memory allocation. It uses a static object. No need to initialize or + * cleanup. + * 4. Thread safe. + * 5. Support embedded architectures that has no misaligned memory access + * support. + * + * Limitations: + * + * 1. At most 30 faststart entries. Actually this is limited by ethernet's MTU. + * If a Setup signal contains more than 30 faststart, the packet size will + * very likely exceed the MTU size, then the TPKT will be fragmented. I + * don't know how to handle this in a Netfilter module. Anybody can help? + * Although I think 30 is enough for most of the cases. + * 2. IPv4 addresses only. + * + ****************************************************************************/ + +#ifndef _NF_CONNTRACK_HELPER_H323_ASN1_H_ +#define _NF_CONNTRACK_HELPER_H323_ASN1_H_ + +/***************************************************************************** + * H.323 Types + ****************************************************************************/ +#include "nf_conntrack_h323_types.h" + +typedef struct { + enum { + Q931_NationalEscape = 0x00, + Q931_Alerting = 0x01, + Q931_CallProceeding = 0x02, + Q931_Connect = 0x07, + Q931_ConnectAck = 0x0F, + Q931_Progress = 0x03, + Q931_Setup = 0x05, + Q931_SetupAck = 0x0D, + Q931_Resume = 0x26, + Q931_ResumeAck = 0x2E, + Q931_ResumeReject = 0x22, + Q931_Suspend = 0x25, + Q931_SuspendAck = 0x2D, + Q931_SuspendReject = 0x21, + Q931_UserInformation = 0x20, + Q931_Disconnect = 0x45, + Q931_Release = 0x4D, + Q931_ReleaseComplete = 0x5A, + Q931_Restart = 0x46, + Q931_RestartAck = 0x4E, + Q931_Segment = 0x60, + Q931_CongestionCtrl = 0x79, + Q931_Information = 0x7B, + Q931_Notify = 0x6E, + Q931_Status = 0x7D, + Q931_StatusEnquiry = 0x75, + Q931_Facility = 0x62 + } MessageType; + H323_UserInformation UUIE; +} Q931; + +/***************************************************************************** + * Decode Functions Return Codes + ****************************************************************************/ + +#define H323_ERROR_NONE 0 /* Decoded successfully */ +#define H323_ERROR_STOP 1 /* Decoding stopped, not really an error */ +#define H323_ERROR_BOUND -1 +#define H323_ERROR_RANGE -2 + + +/***************************************************************************** + * Decode Functions + ****************************************************************************/ + +int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras); +int DecodeQ931(unsigned char *buf, size_t sz, Q931 * q931); +int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz, + MultimediaSystemControlMessage * + mscm); + +#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h new file mode 100644 index 00000000..f35b6b48 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_h323_types.h @@ -0,0 +1,934 @@ +/* Generated by Jing Min Zhao's ASN.1 parser, May 16 2007 + * + * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> + * + * This source code is licensed under General Public License version 2. + */ + +typedef struct TransportAddress_ipAddress { /* SEQUENCE */ + int options; /* No use */ + unsigned ip; +} TransportAddress_ipAddress; + +typedef struct TransportAddress_ip6Address { /* SEQUENCE */ + int options; /* No use */ + unsigned ip; +} TransportAddress_ip6Address; + +typedef struct TransportAddress { /* CHOICE */ + enum { + eTransportAddress_ipAddress, + eTransportAddress_ipSourceRoute, + eTransportAddress_ipxAddress, + eTransportAddress_ip6Address, + eTransportAddress_netBios, + eTransportAddress_nsap, + eTransportAddress_nonStandardAddress, + } choice; + union { + TransportAddress_ipAddress ipAddress; + TransportAddress_ip6Address ip6Address; + }; +} TransportAddress; + +typedef struct DataProtocolCapability { /* CHOICE */ + enum { + eDataProtocolCapability_nonStandard, + eDataProtocolCapability_v14buffered, + eDataProtocolCapability_v42lapm, + eDataProtocolCapability_hdlcFrameTunnelling, + eDataProtocolCapability_h310SeparateVCStack, + eDataProtocolCapability_h310SingleVCStack, + eDataProtocolCapability_transparent, + eDataProtocolCapability_segmentationAndReassembly, + eDataProtocolCapability_hdlcFrameTunnelingwSAR, + eDataProtocolCapability_v120, + eDataProtocolCapability_separateLANStack, + eDataProtocolCapability_v76wCompression, + eDataProtocolCapability_tcp, + eDataProtocolCapability_udp, + } choice; +} DataProtocolCapability; + +typedef struct DataApplicationCapability_application { /* CHOICE */ + enum { + eDataApplicationCapability_application_nonStandard, + eDataApplicationCapability_application_t120, + eDataApplicationCapability_application_dsm_cc, + eDataApplicationCapability_application_userData, + eDataApplicationCapability_application_t84, + eDataApplicationCapability_application_t434, + eDataApplicationCapability_application_h224, + eDataApplicationCapability_application_nlpid, + eDataApplicationCapability_application_dsvdControl, + eDataApplicationCapability_application_h222DataPartitioning, + eDataApplicationCapability_application_t30fax, + eDataApplicationCapability_application_t140, + eDataApplicationCapability_application_t38fax, + eDataApplicationCapability_application_genericDataCapability, + } choice; + union { + DataProtocolCapability t120; + }; +} DataApplicationCapability_application; + +typedef struct DataApplicationCapability { /* SEQUENCE */ + int options; /* No use */ + DataApplicationCapability_application application; +} DataApplicationCapability; + +typedef struct DataType { /* CHOICE */ + enum { + eDataType_nonStandard, + eDataType_nullData, + eDataType_videoData, + eDataType_audioData, + eDataType_data, + eDataType_encryptionData, + eDataType_h235Control, + eDataType_h235Media, + eDataType_multiplexedStream, + } choice; + union { + DataApplicationCapability data; + }; +} DataType; + +typedef struct UnicastAddress_iPAddress { /* SEQUENCE */ + int options; /* No use */ + unsigned network; +} UnicastAddress_iPAddress; + +typedef struct UnicastAddress_iP6Address { /* SEQUENCE */ + int options; /* No use */ + unsigned network; +} UnicastAddress_iP6Address; + +typedef struct UnicastAddress { /* CHOICE */ + enum { + eUnicastAddress_iPAddress, + eUnicastAddress_iPXAddress, + eUnicastAddress_iP6Address, + eUnicastAddress_netBios, + eUnicastAddress_iPSourceRouteAddress, + eUnicastAddress_nsap, + eUnicastAddress_nonStandardAddress, + } choice; + union { + UnicastAddress_iPAddress iPAddress; + UnicastAddress_iP6Address iP6Address; + }; +} UnicastAddress; + +typedef struct H245_TransportAddress { /* CHOICE */ + enum { + eH245_TransportAddress_unicastAddress, + eH245_TransportAddress_multicastAddress, + } choice; + union { + UnicastAddress unicastAddress; + }; +} H245_TransportAddress; + +typedef struct H2250LogicalChannelParameters { /* SEQUENCE */ + enum { + eH2250LogicalChannelParameters_nonStandard = (1 << 31), + eH2250LogicalChannelParameters_associatedSessionID = + (1 << 30), + eH2250LogicalChannelParameters_mediaChannel = (1 << 29), + eH2250LogicalChannelParameters_mediaGuaranteedDelivery = + (1 << 28), + eH2250LogicalChannelParameters_mediaControlChannel = + (1 << 27), + eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery + = (1 << 26), + eH2250LogicalChannelParameters_silenceSuppression = (1 << 25), + eH2250LogicalChannelParameters_destination = (1 << 24), + eH2250LogicalChannelParameters_dynamicRTPPayloadType = + (1 << 23), + eH2250LogicalChannelParameters_mediaPacketization = (1 << 22), + eH2250LogicalChannelParameters_transportCapability = + (1 << 21), + eH2250LogicalChannelParameters_redundancyEncoding = (1 << 20), + eH2250LogicalChannelParameters_source = (1 << 19), + } options; + H245_TransportAddress mediaChannel; + H245_TransportAddress mediaControlChannel; +} H2250LogicalChannelParameters; + +typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters { /* CHOICE */ + enum { + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters, + eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none, + } choice; + union { + H2250LogicalChannelParameters h2250LogicalChannelParameters; + }; +} OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters; + +typedef struct OpenLogicalChannel_forwardLogicalChannelParameters { /* SEQUENCE */ + enum { + eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber + = (1 << 31), + eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency + = (1 << 30), + eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor + = (1 << 29), + } options; + DataType dataType; + OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters + multiplexParameters; +} OpenLogicalChannel_forwardLogicalChannelParameters; + +typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */ + enum { + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters, + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters, + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters, + } choice; + union { + H2250LogicalChannelParameters h2250LogicalChannelParameters; + }; +} OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters; + +typedef struct OpenLogicalChannel_reverseLogicalChannelParameters { /* SEQUENCE */ + enum { + eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters + = (1 << 31), + eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency + = (1 << 30), + eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor + = (1 << 29), + } options; + OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters + multiplexParameters; +} OpenLogicalChannel_reverseLogicalChannelParameters; + +typedef struct NetworkAccessParameters_networkAddress { /* CHOICE */ + enum { + eNetworkAccessParameters_networkAddress_q2931Address, + eNetworkAccessParameters_networkAddress_e164Address, + eNetworkAccessParameters_networkAddress_localAreaAddress, + } choice; + union { + H245_TransportAddress localAreaAddress; + }; +} NetworkAccessParameters_networkAddress; + +typedef struct NetworkAccessParameters { /* SEQUENCE */ + enum { + eNetworkAccessParameters_distribution = (1 << 31), + eNetworkAccessParameters_externalReference = (1 << 30), + eNetworkAccessParameters_t120SetupProcedure = (1 << 29), + } options; + NetworkAccessParameters_networkAddress networkAddress; +} NetworkAccessParameters; + +typedef struct OpenLogicalChannel { /* SEQUENCE */ + enum { + eOpenLogicalChannel_reverseLogicalChannelParameters = + (1 << 31), + eOpenLogicalChannel_separateStack = (1 << 30), + eOpenLogicalChannel_encryptionSync = (1 << 29), + } options; + OpenLogicalChannel_forwardLogicalChannelParameters + forwardLogicalChannelParameters; + OpenLogicalChannel_reverseLogicalChannelParameters + reverseLogicalChannelParameters; + NetworkAccessParameters separateStack; +} OpenLogicalChannel; + +typedef struct Setup_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Setup_UUIE_fastStart; + +typedef struct Setup_UUIE { /* SEQUENCE */ + enum { + eSetup_UUIE_h245Address = (1 << 31), + eSetup_UUIE_sourceAddress = (1 << 30), + eSetup_UUIE_destinationAddress = (1 << 29), + eSetup_UUIE_destCallSignalAddress = (1 << 28), + eSetup_UUIE_destExtraCallInfo = (1 << 27), + eSetup_UUIE_destExtraCRV = (1 << 26), + eSetup_UUIE_callServices = (1 << 25), + eSetup_UUIE_sourceCallSignalAddress = (1 << 24), + eSetup_UUIE_remoteExtensionAddress = (1 << 23), + eSetup_UUIE_callIdentifier = (1 << 22), + eSetup_UUIE_h245SecurityCapability = (1 << 21), + eSetup_UUIE_tokens = (1 << 20), + eSetup_UUIE_cryptoTokens = (1 << 19), + eSetup_UUIE_fastStart = (1 << 18), + eSetup_UUIE_mediaWaitForConnect = (1 << 17), + eSetup_UUIE_canOverlapSend = (1 << 16), + eSetup_UUIE_endpointIdentifier = (1 << 15), + eSetup_UUIE_multipleCalls = (1 << 14), + eSetup_UUIE_maintainConnection = (1 << 13), + eSetup_UUIE_connectionParameters = (1 << 12), + eSetup_UUIE_language = (1 << 11), + eSetup_UUIE_presentationIndicator = (1 << 10), + eSetup_UUIE_screeningIndicator = (1 << 9), + eSetup_UUIE_serviceControl = (1 << 8), + eSetup_UUIE_symmetricOperationRequired = (1 << 7), + eSetup_UUIE_capacity = (1 << 6), + eSetup_UUIE_circuitInfo = (1 << 5), + eSetup_UUIE_desiredProtocols = (1 << 4), + eSetup_UUIE_neededFeatures = (1 << 3), + eSetup_UUIE_desiredFeatures = (1 << 2), + eSetup_UUIE_supportedFeatures = (1 << 1), + eSetup_UUIE_parallelH245Control = (1 << 0), + } options; + TransportAddress h245Address; + TransportAddress destCallSignalAddress; + TransportAddress sourceCallSignalAddress; + Setup_UUIE_fastStart fastStart; +} Setup_UUIE; + +typedef struct CallProceeding_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} CallProceeding_UUIE_fastStart; + +typedef struct CallProceeding_UUIE { /* SEQUENCE */ + enum { + eCallProceeding_UUIE_h245Address = (1 << 31), + eCallProceeding_UUIE_callIdentifier = (1 << 30), + eCallProceeding_UUIE_h245SecurityMode = (1 << 29), + eCallProceeding_UUIE_tokens = (1 << 28), + eCallProceeding_UUIE_cryptoTokens = (1 << 27), + eCallProceeding_UUIE_fastStart = (1 << 26), + eCallProceeding_UUIE_multipleCalls = (1 << 25), + eCallProceeding_UUIE_maintainConnection = (1 << 24), + eCallProceeding_UUIE_fastConnectRefused = (1 << 23), + eCallProceeding_UUIE_featureSet = (1 << 22), + } options; + TransportAddress h245Address; + CallProceeding_UUIE_fastStart fastStart; +} CallProceeding_UUIE; + +typedef struct Connect_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Connect_UUIE_fastStart; + +typedef struct Connect_UUIE { /* SEQUENCE */ + enum { + eConnect_UUIE_h245Address = (1 << 31), + eConnect_UUIE_callIdentifier = (1 << 30), + eConnect_UUIE_h245SecurityMode = (1 << 29), + eConnect_UUIE_tokens = (1 << 28), + eConnect_UUIE_cryptoTokens = (1 << 27), + eConnect_UUIE_fastStart = (1 << 26), + eConnect_UUIE_multipleCalls = (1 << 25), + eConnect_UUIE_maintainConnection = (1 << 24), + eConnect_UUIE_language = (1 << 23), + eConnect_UUIE_connectedAddress = (1 << 22), + eConnect_UUIE_presentationIndicator = (1 << 21), + eConnect_UUIE_screeningIndicator = (1 << 20), + eConnect_UUIE_fastConnectRefused = (1 << 19), + eConnect_UUIE_serviceControl = (1 << 18), + eConnect_UUIE_capacity = (1 << 17), + eConnect_UUIE_featureSet = (1 << 16), + } options; + TransportAddress h245Address; + Connect_UUIE_fastStart fastStart; +} Connect_UUIE; + +typedef struct Alerting_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Alerting_UUIE_fastStart; + +typedef struct Alerting_UUIE { /* SEQUENCE */ + enum { + eAlerting_UUIE_h245Address = (1 << 31), + eAlerting_UUIE_callIdentifier = (1 << 30), + eAlerting_UUIE_h245SecurityMode = (1 << 29), + eAlerting_UUIE_tokens = (1 << 28), + eAlerting_UUIE_cryptoTokens = (1 << 27), + eAlerting_UUIE_fastStart = (1 << 26), + eAlerting_UUIE_multipleCalls = (1 << 25), + eAlerting_UUIE_maintainConnection = (1 << 24), + eAlerting_UUIE_alertingAddress = (1 << 23), + eAlerting_UUIE_presentationIndicator = (1 << 22), + eAlerting_UUIE_screeningIndicator = (1 << 21), + eAlerting_UUIE_fastConnectRefused = (1 << 20), + eAlerting_UUIE_serviceControl = (1 << 19), + eAlerting_UUIE_capacity = (1 << 18), + eAlerting_UUIE_featureSet = (1 << 17), + } options; + TransportAddress h245Address; + Alerting_UUIE_fastStart fastStart; +} Alerting_UUIE; + +typedef struct FacilityReason { /* CHOICE */ + enum { + eFacilityReason_routeCallToGatekeeper, + eFacilityReason_callForwarded, + eFacilityReason_routeCallToMC, + eFacilityReason_undefinedReason, + eFacilityReason_conferenceListChoice, + eFacilityReason_startH245, + eFacilityReason_noH245, + eFacilityReason_newTokens, + eFacilityReason_featureSetUpdate, + eFacilityReason_forwardedElements, + eFacilityReason_transportedInformation, + } choice; +} FacilityReason; + +typedef struct Facility_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Facility_UUIE_fastStart; + +typedef struct Facility_UUIE { /* SEQUENCE */ + enum { + eFacility_UUIE_alternativeAddress = (1 << 31), + eFacility_UUIE_alternativeAliasAddress = (1 << 30), + eFacility_UUIE_conferenceID = (1 << 29), + eFacility_UUIE_callIdentifier = (1 << 28), + eFacility_UUIE_destExtraCallInfo = (1 << 27), + eFacility_UUIE_remoteExtensionAddress = (1 << 26), + eFacility_UUIE_tokens = (1 << 25), + eFacility_UUIE_cryptoTokens = (1 << 24), + eFacility_UUIE_conferences = (1 << 23), + eFacility_UUIE_h245Address = (1 << 22), + eFacility_UUIE_fastStart = (1 << 21), + eFacility_UUIE_multipleCalls = (1 << 20), + eFacility_UUIE_maintainConnection = (1 << 19), + eFacility_UUIE_fastConnectRefused = (1 << 18), + eFacility_UUIE_serviceControl = (1 << 17), + eFacility_UUIE_circuitInfo = (1 << 16), + eFacility_UUIE_featureSet = (1 << 15), + eFacility_UUIE_destinationInfo = (1 << 14), + eFacility_UUIE_h245SecurityMode = (1 << 13), + } options; + TransportAddress alternativeAddress; + FacilityReason reason; + TransportAddress h245Address; + Facility_UUIE_fastStart fastStart; +} Facility_UUIE; + +typedef struct Progress_UUIE_fastStart { /* SEQUENCE OF */ + int count; + OpenLogicalChannel item[30]; +} Progress_UUIE_fastStart; + +typedef struct Progress_UUIE { /* SEQUENCE */ + enum { + eProgress_UUIE_h245Address = (1 << 31), + eProgress_UUIE_h245SecurityMode = (1 << 30), + eProgress_UUIE_tokens = (1 << 29), + eProgress_UUIE_cryptoTokens = (1 << 28), + eProgress_UUIE_fastStart = (1 << 27), + eProgress_UUIE_multipleCalls = (1 << 26), + eProgress_UUIE_maintainConnection = (1 << 25), + eProgress_UUIE_fastConnectRefused = (1 << 24), + } options; + TransportAddress h245Address; + Progress_UUIE_fastStart fastStart; +} Progress_UUIE; + +typedef struct H323_UU_PDU_h323_message_body { /* CHOICE */ + enum { + eH323_UU_PDU_h323_message_body_setup, + eH323_UU_PDU_h323_message_body_callProceeding, + eH323_UU_PDU_h323_message_body_connect, + eH323_UU_PDU_h323_message_body_alerting, + eH323_UU_PDU_h323_message_body_information, + eH323_UU_PDU_h323_message_body_releaseComplete, + eH323_UU_PDU_h323_message_body_facility, + eH323_UU_PDU_h323_message_body_progress, + eH323_UU_PDU_h323_message_body_empty, + eH323_UU_PDU_h323_message_body_status, + eH323_UU_PDU_h323_message_body_statusInquiry, + eH323_UU_PDU_h323_message_body_setupAcknowledge, + eH323_UU_PDU_h323_message_body_notify, + } choice; + union { + Setup_UUIE setup; + CallProceeding_UUIE callProceeding; + Connect_UUIE connect; + Alerting_UUIE alerting; + Facility_UUIE facility; + Progress_UUIE progress; + }; +} H323_UU_PDU_h323_message_body; + +typedef struct RequestMessage { /* CHOICE */ + enum { + eRequestMessage_nonStandard, + eRequestMessage_masterSlaveDetermination, + eRequestMessage_terminalCapabilitySet, + eRequestMessage_openLogicalChannel, + eRequestMessage_closeLogicalChannel, + eRequestMessage_requestChannelClose, + eRequestMessage_multiplexEntrySend, + eRequestMessage_requestMultiplexEntry, + eRequestMessage_requestMode, + eRequestMessage_roundTripDelayRequest, + eRequestMessage_maintenanceLoopRequest, + eRequestMessage_communicationModeRequest, + eRequestMessage_conferenceRequest, + eRequestMessage_multilinkRequest, + eRequestMessage_logicalChannelRateRequest, + } choice; + union { + OpenLogicalChannel openLogicalChannel; + }; +} RequestMessage; + +typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */ + enum { + eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters, + eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters, + } choice; + union { + H2250LogicalChannelParameters h2250LogicalChannelParameters; + }; +} OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters; + +typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters { /* SEQUENCE */ + enum { + eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber + = (1 << 31), + eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters + = (1 << 30), + eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor + = (1 << 29), + } options; + OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters + multiplexParameters; +} OpenLogicalChannelAck_reverseLogicalChannelParameters; + +typedef struct H2250LogicalChannelAckParameters { /* SEQUENCE */ + enum { + eH2250LogicalChannelAckParameters_nonStandard = (1 << 31), + eH2250LogicalChannelAckParameters_sessionID = (1 << 30), + eH2250LogicalChannelAckParameters_mediaChannel = (1 << 29), + eH2250LogicalChannelAckParameters_mediaControlChannel = + (1 << 28), + eH2250LogicalChannelAckParameters_dynamicRTPPayloadType = + (1 << 27), + eH2250LogicalChannelAckParameters_flowControlToZero = + (1 << 26), + eH2250LogicalChannelAckParameters_portNumber = (1 << 25), + } options; + H245_TransportAddress mediaChannel; + H245_TransportAddress mediaControlChannel; +} H2250LogicalChannelAckParameters; + +typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters { /* CHOICE */ + enum { + eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters, + } choice; + union { + H2250LogicalChannelAckParameters + h2250LogicalChannelAckParameters; + }; +} OpenLogicalChannelAck_forwardMultiplexAckParameters; + +typedef struct OpenLogicalChannelAck { /* SEQUENCE */ + enum { + eOpenLogicalChannelAck_reverseLogicalChannelParameters = + (1 << 31), + eOpenLogicalChannelAck_separateStack = (1 << 30), + eOpenLogicalChannelAck_forwardMultiplexAckParameters = + (1 << 29), + eOpenLogicalChannelAck_encryptionSync = (1 << 28), + } options; + OpenLogicalChannelAck_reverseLogicalChannelParameters + reverseLogicalChannelParameters; + NetworkAccessParameters separateStack; + OpenLogicalChannelAck_forwardMultiplexAckParameters + forwardMultiplexAckParameters; +} OpenLogicalChannelAck; + +typedef struct ResponseMessage { /* CHOICE */ + enum { + eResponseMessage_nonStandard, + eResponseMessage_masterSlaveDeterminationAck, + eResponseMessage_masterSlaveDeterminationReject, + eResponseMessage_terminalCapabilitySetAck, + eResponseMessage_terminalCapabilitySetReject, + eResponseMessage_openLogicalChannelAck, + eResponseMessage_openLogicalChannelReject, + eResponseMessage_closeLogicalChannelAck, + eResponseMessage_requestChannelCloseAck, + eResponseMessage_requestChannelCloseReject, + eResponseMessage_multiplexEntrySendAck, + eResponseMessage_multiplexEntrySendReject, + eResponseMessage_requestMultiplexEntryAck, + eResponseMessage_requestMultiplexEntryReject, + eResponseMessage_requestModeAck, + eResponseMessage_requestModeReject, + eResponseMessage_roundTripDelayResponse, + eResponseMessage_maintenanceLoopAck, + eResponseMessage_maintenanceLoopReject, + eResponseMessage_communicationModeResponse, + eResponseMessage_conferenceResponse, + eResponseMessage_multilinkResponse, + eResponseMessage_logicalChannelRateAcknowledge, + eResponseMessage_logicalChannelRateReject, + } choice; + union { + OpenLogicalChannelAck openLogicalChannelAck; + }; +} ResponseMessage; + +typedef struct MultimediaSystemControlMessage { /* CHOICE */ + enum { + eMultimediaSystemControlMessage_request, + eMultimediaSystemControlMessage_response, + eMultimediaSystemControlMessage_command, + eMultimediaSystemControlMessage_indication, + } choice; + union { + RequestMessage request; + ResponseMessage response; + }; +} MultimediaSystemControlMessage; + +typedef struct H323_UU_PDU_h245Control { /* SEQUENCE OF */ + int count; + MultimediaSystemControlMessage item[4]; +} H323_UU_PDU_h245Control; + +typedef struct H323_UU_PDU { /* SEQUENCE */ + enum { + eH323_UU_PDU_nonStandardData = (1 << 31), + eH323_UU_PDU_h4501SupplementaryService = (1 << 30), + eH323_UU_PDU_h245Tunneling = (1 << 29), + eH323_UU_PDU_h245Control = (1 << 28), + eH323_UU_PDU_nonStandardControl = (1 << 27), + eH323_UU_PDU_callLinkage = (1 << 26), + eH323_UU_PDU_tunnelledSignallingMessage = (1 << 25), + eH323_UU_PDU_provisionalRespToH245Tunneling = (1 << 24), + eH323_UU_PDU_stimulusControl = (1 << 23), + eH323_UU_PDU_genericData = (1 << 22), + } options; + H323_UU_PDU_h323_message_body h323_message_body; + H323_UU_PDU_h245Control h245Control; +} H323_UU_PDU; + +typedef struct H323_UserInformation { /* SEQUENCE */ + enum { + eH323_UserInformation_user_data = (1 << 31), + } options; + H323_UU_PDU h323_uu_pdu; +} H323_UserInformation; + +typedef struct GatekeeperRequest { /* SEQUENCE */ + enum { + eGatekeeperRequest_nonStandardData = (1 << 31), + eGatekeeperRequest_gatekeeperIdentifier = (1 << 30), + eGatekeeperRequest_callServices = (1 << 29), + eGatekeeperRequest_endpointAlias = (1 << 28), + eGatekeeperRequest_alternateEndpoints = (1 << 27), + eGatekeeperRequest_tokens = (1 << 26), + eGatekeeperRequest_cryptoTokens = (1 << 25), + eGatekeeperRequest_authenticationCapability = (1 << 24), + eGatekeeperRequest_algorithmOIDs = (1 << 23), + eGatekeeperRequest_integrity = (1 << 22), + eGatekeeperRequest_integrityCheckValue = (1 << 21), + eGatekeeperRequest_supportsAltGK = (1 << 20), + eGatekeeperRequest_featureSet = (1 << 19), + eGatekeeperRequest_genericData = (1 << 18), + } options; + TransportAddress rasAddress; +} GatekeeperRequest; + +typedef struct GatekeeperConfirm { /* SEQUENCE */ + enum { + eGatekeeperConfirm_nonStandardData = (1 << 31), + eGatekeeperConfirm_gatekeeperIdentifier = (1 << 30), + eGatekeeperConfirm_alternateGatekeeper = (1 << 29), + eGatekeeperConfirm_authenticationMode = (1 << 28), + eGatekeeperConfirm_tokens = (1 << 27), + eGatekeeperConfirm_cryptoTokens = (1 << 26), + eGatekeeperConfirm_algorithmOID = (1 << 25), + eGatekeeperConfirm_integrity = (1 << 24), + eGatekeeperConfirm_integrityCheckValue = (1 << 23), + eGatekeeperConfirm_featureSet = (1 << 22), + eGatekeeperConfirm_genericData = (1 << 21), + } options; + TransportAddress rasAddress; +} GatekeeperConfirm; + +typedef struct RegistrationRequest_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} RegistrationRequest_callSignalAddress; + +typedef struct RegistrationRequest_rasAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} RegistrationRequest_rasAddress; + +typedef struct RegistrationRequest { /* SEQUENCE */ + enum { + eRegistrationRequest_nonStandardData = (1 << 31), + eRegistrationRequest_terminalAlias = (1 << 30), + eRegistrationRequest_gatekeeperIdentifier = (1 << 29), + eRegistrationRequest_alternateEndpoints = (1 << 28), + eRegistrationRequest_timeToLive = (1 << 27), + eRegistrationRequest_tokens = (1 << 26), + eRegistrationRequest_cryptoTokens = (1 << 25), + eRegistrationRequest_integrityCheckValue = (1 << 24), + eRegistrationRequest_keepAlive = (1 << 23), + eRegistrationRequest_endpointIdentifier = (1 << 22), + eRegistrationRequest_willSupplyUUIEs = (1 << 21), + eRegistrationRequest_maintainConnection = (1 << 20), + eRegistrationRequest_alternateTransportAddresses = (1 << 19), + eRegistrationRequest_additiveRegistration = (1 << 18), + eRegistrationRequest_terminalAliasPattern = (1 << 17), + eRegistrationRequest_supportsAltGK = (1 << 16), + eRegistrationRequest_usageReportingCapability = (1 << 15), + eRegistrationRequest_multipleCalls = (1 << 14), + eRegistrationRequest_supportedH248Packages = (1 << 13), + eRegistrationRequest_callCreditCapability = (1 << 12), + eRegistrationRequest_capacityReportingCapability = (1 << 11), + eRegistrationRequest_capacity = (1 << 10), + eRegistrationRequest_featureSet = (1 << 9), + eRegistrationRequest_genericData = (1 << 8), + } options; + RegistrationRequest_callSignalAddress callSignalAddress; + RegistrationRequest_rasAddress rasAddress; + unsigned timeToLive; +} RegistrationRequest; + +typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} RegistrationConfirm_callSignalAddress; + +typedef struct RegistrationConfirm { /* SEQUENCE */ + enum { + eRegistrationConfirm_nonStandardData = (1 << 31), + eRegistrationConfirm_terminalAlias = (1 << 30), + eRegistrationConfirm_gatekeeperIdentifier = (1 << 29), + eRegistrationConfirm_alternateGatekeeper = (1 << 28), + eRegistrationConfirm_timeToLive = (1 << 27), + eRegistrationConfirm_tokens = (1 << 26), + eRegistrationConfirm_cryptoTokens = (1 << 25), + eRegistrationConfirm_integrityCheckValue = (1 << 24), + eRegistrationConfirm_willRespondToIRR = (1 << 23), + eRegistrationConfirm_preGrantedARQ = (1 << 22), + eRegistrationConfirm_maintainConnection = (1 << 21), + eRegistrationConfirm_serviceControl = (1 << 20), + eRegistrationConfirm_supportsAdditiveRegistration = (1 << 19), + eRegistrationConfirm_terminalAliasPattern = (1 << 18), + eRegistrationConfirm_supportedPrefixes = (1 << 17), + eRegistrationConfirm_usageSpec = (1 << 16), + eRegistrationConfirm_featureServerAlias = (1 << 15), + eRegistrationConfirm_capacityReportingSpec = (1 << 14), + eRegistrationConfirm_featureSet = (1 << 13), + eRegistrationConfirm_genericData = (1 << 12), + } options; + RegistrationConfirm_callSignalAddress callSignalAddress; + unsigned timeToLive; +} RegistrationConfirm; + +typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} UnregistrationRequest_callSignalAddress; + +typedef struct UnregistrationRequest { /* SEQUENCE */ + enum { + eUnregistrationRequest_endpointAlias = (1 << 31), + eUnregistrationRequest_nonStandardData = (1 << 30), + eUnregistrationRequest_endpointIdentifier = (1 << 29), + eUnregistrationRequest_alternateEndpoints = (1 << 28), + eUnregistrationRequest_gatekeeperIdentifier = (1 << 27), + eUnregistrationRequest_tokens = (1 << 26), + eUnregistrationRequest_cryptoTokens = (1 << 25), + eUnregistrationRequest_integrityCheckValue = (1 << 24), + eUnregistrationRequest_reason = (1 << 23), + eUnregistrationRequest_endpointAliasPattern = (1 << 22), + eUnregistrationRequest_supportedPrefixes = (1 << 21), + eUnregistrationRequest_alternateGatekeeper = (1 << 20), + eUnregistrationRequest_genericData = (1 << 19), + } options; + UnregistrationRequest_callSignalAddress callSignalAddress; +} UnregistrationRequest; + +typedef struct AdmissionRequest { /* SEQUENCE */ + enum { + eAdmissionRequest_callModel = (1 << 31), + eAdmissionRequest_destinationInfo = (1 << 30), + eAdmissionRequest_destCallSignalAddress = (1 << 29), + eAdmissionRequest_destExtraCallInfo = (1 << 28), + eAdmissionRequest_srcCallSignalAddress = (1 << 27), + eAdmissionRequest_nonStandardData = (1 << 26), + eAdmissionRequest_callServices = (1 << 25), + eAdmissionRequest_canMapAlias = (1 << 24), + eAdmissionRequest_callIdentifier = (1 << 23), + eAdmissionRequest_srcAlternatives = (1 << 22), + eAdmissionRequest_destAlternatives = (1 << 21), + eAdmissionRequest_gatekeeperIdentifier = (1 << 20), + eAdmissionRequest_tokens = (1 << 19), + eAdmissionRequest_cryptoTokens = (1 << 18), + eAdmissionRequest_integrityCheckValue = (1 << 17), + eAdmissionRequest_transportQOS = (1 << 16), + eAdmissionRequest_willSupplyUUIEs = (1 << 15), + eAdmissionRequest_callLinkage = (1 << 14), + eAdmissionRequest_gatewayDataRate = (1 << 13), + eAdmissionRequest_capacity = (1 << 12), + eAdmissionRequest_circuitInfo = (1 << 11), + eAdmissionRequest_desiredProtocols = (1 << 10), + eAdmissionRequest_desiredTunnelledProtocol = (1 << 9), + eAdmissionRequest_featureSet = (1 << 8), + eAdmissionRequest_genericData = (1 << 7), + } options; + TransportAddress destCallSignalAddress; + TransportAddress srcCallSignalAddress; +} AdmissionRequest; + +typedef struct AdmissionConfirm { /* SEQUENCE */ + enum { + eAdmissionConfirm_irrFrequency = (1 << 31), + eAdmissionConfirm_nonStandardData = (1 << 30), + eAdmissionConfirm_destinationInfo = (1 << 29), + eAdmissionConfirm_destExtraCallInfo = (1 << 28), + eAdmissionConfirm_destinationType = (1 << 27), + eAdmissionConfirm_remoteExtensionAddress = (1 << 26), + eAdmissionConfirm_alternateEndpoints = (1 << 25), + eAdmissionConfirm_tokens = (1 << 24), + eAdmissionConfirm_cryptoTokens = (1 << 23), + eAdmissionConfirm_integrityCheckValue = (1 << 22), + eAdmissionConfirm_transportQOS = (1 << 21), + eAdmissionConfirm_willRespondToIRR = (1 << 20), + eAdmissionConfirm_uuiesRequested = (1 << 19), + eAdmissionConfirm_language = (1 << 18), + eAdmissionConfirm_alternateTransportAddresses = (1 << 17), + eAdmissionConfirm_useSpecifiedTransport = (1 << 16), + eAdmissionConfirm_circuitInfo = (1 << 15), + eAdmissionConfirm_usageSpec = (1 << 14), + eAdmissionConfirm_supportedProtocols = (1 << 13), + eAdmissionConfirm_serviceControl = (1 << 12), + eAdmissionConfirm_multipleCalls = (1 << 11), + eAdmissionConfirm_featureSet = (1 << 10), + eAdmissionConfirm_genericData = (1 << 9), + } options; + TransportAddress destCallSignalAddress; +} AdmissionConfirm; + +typedef struct LocationRequest { /* SEQUENCE */ + enum { + eLocationRequest_endpointIdentifier = (1 << 31), + eLocationRequest_nonStandardData = (1 << 30), + eLocationRequest_sourceInfo = (1 << 29), + eLocationRequest_canMapAlias = (1 << 28), + eLocationRequest_gatekeeperIdentifier = (1 << 27), + eLocationRequest_tokens = (1 << 26), + eLocationRequest_cryptoTokens = (1 << 25), + eLocationRequest_integrityCheckValue = (1 << 24), + eLocationRequest_desiredProtocols = (1 << 23), + eLocationRequest_desiredTunnelledProtocol = (1 << 22), + eLocationRequest_featureSet = (1 << 21), + eLocationRequest_genericData = (1 << 20), + eLocationRequest_hopCount = (1 << 19), + eLocationRequest_circuitInfo = (1 << 18), + } options; + TransportAddress replyAddress; +} LocationRequest; + +typedef struct LocationConfirm { /* SEQUENCE */ + enum { + eLocationConfirm_nonStandardData = (1 << 31), + eLocationConfirm_destinationInfo = (1 << 30), + eLocationConfirm_destExtraCallInfo = (1 << 29), + eLocationConfirm_destinationType = (1 << 28), + eLocationConfirm_remoteExtensionAddress = (1 << 27), + eLocationConfirm_alternateEndpoints = (1 << 26), + eLocationConfirm_tokens = (1 << 25), + eLocationConfirm_cryptoTokens = (1 << 24), + eLocationConfirm_integrityCheckValue = (1 << 23), + eLocationConfirm_alternateTransportAddresses = (1 << 22), + eLocationConfirm_supportedProtocols = (1 << 21), + eLocationConfirm_multipleCalls = (1 << 20), + eLocationConfirm_featureSet = (1 << 19), + eLocationConfirm_genericData = (1 << 18), + eLocationConfirm_circuitInfo = (1 << 17), + eLocationConfirm_serviceControl = (1 << 16), + } options; + TransportAddress callSignalAddress; + TransportAddress rasAddress; +} LocationConfirm; + +typedef struct InfoRequestResponse_callSignalAddress { /* SEQUENCE OF */ + int count; + TransportAddress item[10]; +} InfoRequestResponse_callSignalAddress; + +typedef struct InfoRequestResponse { /* SEQUENCE */ + enum { + eInfoRequestResponse_nonStandardData = (1 << 31), + eInfoRequestResponse_endpointAlias = (1 << 30), + eInfoRequestResponse_perCallInfo = (1 << 29), + eInfoRequestResponse_tokens = (1 << 28), + eInfoRequestResponse_cryptoTokens = (1 << 27), + eInfoRequestResponse_integrityCheckValue = (1 << 26), + eInfoRequestResponse_needResponse = (1 << 25), + eInfoRequestResponse_capacity = (1 << 24), + eInfoRequestResponse_irrStatus = (1 << 23), + eInfoRequestResponse_unsolicited = (1 << 22), + eInfoRequestResponse_genericData = (1 << 21), + } options; + TransportAddress rasAddress; + InfoRequestResponse_callSignalAddress callSignalAddress; +} InfoRequestResponse; + +typedef struct RasMessage { /* CHOICE */ + enum { + eRasMessage_gatekeeperRequest, + eRasMessage_gatekeeperConfirm, + eRasMessage_gatekeeperReject, + eRasMessage_registrationRequest, + eRasMessage_registrationConfirm, + eRasMessage_registrationReject, + eRasMessage_unregistrationRequest, + eRasMessage_unregistrationConfirm, + eRasMessage_unregistrationReject, + eRasMessage_admissionRequest, + eRasMessage_admissionConfirm, + eRasMessage_admissionReject, + eRasMessage_bandwidthRequest, + eRasMessage_bandwidthConfirm, + eRasMessage_bandwidthReject, + eRasMessage_disengageRequest, + eRasMessage_disengageConfirm, + eRasMessage_disengageReject, + eRasMessage_locationRequest, + eRasMessage_locationConfirm, + eRasMessage_locationReject, + eRasMessage_infoRequest, + eRasMessage_infoRequestResponse, + eRasMessage_nonStandardMessage, + eRasMessage_unknownMessageResponse, + eRasMessage_requestInProgress, + eRasMessage_resourcesAvailableIndicate, + eRasMessage_resourcesAvailableConfirm, + eRasMessage_infoRequestAck, + eRasMessage_infoRequestNak, + eRasMessage_serviceControlIndication, + eRasMessage_serviceControlResponse, + } choice; + union { + GatekeeperRequest gatekeeperRequest; + GatekeeperConfirm gatekeeperConfirm; + RegistrationRequest registrationRequest; + RegistrationConfirm registrationConfirm; + UnregistrationRequest unregistrationRequest; + AdmissionRequest admissionRequest; + AdmissionConfirm admissionConfirm; + LocationRequest locationRequest; + LocationConfirm locationConfirm; + InfoRequestResponse infoRequestResponse; + }; +} RasMessage; diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h new file mode 100644 index 00000000..36282bf7 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_irc.h @@ -0,0 +1,15 @@ +#ifndef _NF_CONNTRACK_IRC_H +#define _NF_CONNTRACK_IRC_H + +#ifdef __KERNEL__ + +#define IRC_PORT 6667 + +extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + unsigned int matchoff, + unsigned int matchlen, + struct nf_conntrack_expect *exp); + +#endif /* __KERNEL__ */ +#endif /* _NF_CONNTRACK_IRC_H */ diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h new file mode 100644 index 00000000..3bbde0c3 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -0,0 +1,324 @@ +/* PPTP constants and structs */ +#ifndef _NF_CONNTRACK_PPTP_H +#define _NF_CONNTRACK_PPTP_H + +#include <linux/netfilter/nf_conntrack_common.h> + +extern const char *const pptp_msg_name[]; + +/* state of the control session */ +enum pptp_ctrlsess_state { + PPTP_SESSION_NONE, /* no session present */ + PPTP_SESSION_ERROR, /* some session error */ + PPTP_SESSION_STOPREQ, /* stop_sess request seen */ + PPTP_SESSION_REQUESTED, /* start_sess request seen */ + PPTP_SESSION_CONFIRMED, /* session established */ +}; + +/* state of the call inside the control session */ +enum pptp_ctrlcall_state { + PPTP_CALL_NONE, + PPTP_CALL_ERROR, + PPTP_CALL_OUT_REQ, + PPTP_CALL_OUT_CONF, + PPTP_CALL_IN_REQ, + PPTP_CALL_IN_REP, + PPTP_CALL_IN_CONF, + PPTP_CALL_CLEAR_REQ, +}; + +/* conntrack private data */ +struct nf_ct_pptp_master { + enum pptp_ctrlsess_state sstate; /* session state */ + enum pptp_ctrlcall_state cstate; /* call state */ + __be16 pac_call_id; /* call id of PAC */ + __be16 pns_call_id; /* call id of PNS */ + + /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack + * and therefore imposes a fixed limit on the number of maps */ + struct nf_ct_gre_keymap *keymap[IP_CT_DIR_MAX]; +}; + +struct nf_nat_pptp { + __be16 pns_call_id; /* NAT'ed PNS call id */ + __be16 pac_call_id; /* NAT'ed PAC call id */ +}; + +#ifdef __KERNEL__ + +#define PPTP_CONTROL_PORT 1723 + +#define PPTP_PACKET_CONTROL 1 +#define PPTP_PACKET_MGMT 2 + +#define PPTP_MAGIC_COOKIE 0x1a2b3c4d + +struct pptp_pkt_hdr { + __u16 packetLength; + __be16 packetType; + __be32 magicCookie; +}; + +/* PptpControlMessageType values */ +#define PPTP_START_SESSION_REQUEST 1 +#define PPTP_START_SESSION_REPLY 2 +#define PPTP_STOP_SESSION_REQUEST 3 +#define PPTP_STOP_SESSION_REPLY 4 +#define PPTP_ECHO_REQUEST 5 +#define PPTP_ECHO_REPLY 6 +#define PPTP_OUT_CALL_REQUEST 7 +#define PPTP_OUT_CALL_REPLY 8 +#define PPTP_IN_CALL_REQUEST 9 +#define PPTP_IN_CALL_REPLY 10 +#define PPTP_IN_CALL_CONNECT 11 +#define PPTP_CALL_CLEAR_REQUEST 12 +#define PPTP_CALL_DISCONNECT_NOTIFY 13 +#define PPTP_WAN_ERROR_NOTIFY 14 +#define PPTP_SET_LINK_INFO 15 + +#define PPTP_MSG_MAX 15 + +/* PptpGeneralError values */ +#define PPTP_ERROR_CODE_NONE 0 +#define PPTP_NOT_CONNECTED 1 +#define PPTP_BAD_FORMAT 2 +#define PPTP_BAD_VALUE 3 +#define PPTP_NO_RESOURCE 4 +#define PPTP_BAD_CALLID 5 +#define PPTP_REMOVE_DEVICE_ERROR 6 + +struct PptpControlHeader { + __be16 messageType; + __u16 reserved; +}; + +/* FramingCapability Bitmap Values */ +#define PPTP_FRAME_CAP_ASYNC 0x1 +#define PPTP_FRAME_CAP_SYNC 0x2 + +/* BearerCapability Bitmap Values */ +#define PPTP_BEARER_CAP_ANALOG 0x1 +#define PPTP_BEARER_CAP_DIGITAL 0x2 + +struct PptpStartSessionRequest { + __be16 protocolVersion; + __u16 reserved1; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStartSessionResultCode Values */ +#define PPTP_START_OK 1 +#define PPTP_START_GENERAL_ERROR 2 +#define PPTP_START_ALREADY_CONNECTED 3 +#define PPTP_START_NOT_AUTHORIZED 4 +#define PPTP_START_UNKNOWN_PROTOCOL 5 + +struct PptpStartSessionReply { + __be16 protocolVersion; + __u8 resultCode; + __u8 generalErrorCode; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStopReasons */ +#define PPTP_STOP_NONE 1 +#define PPTP_STOP_PROTOCOL 2 +#define PPTP_STOP_LOCAL_SHUTDOWN 3 + +struct PptpStopSessionRequest { + __u8 reason; + __u8 reserved1; + __u16 reserved2; +}; + +/* PptpStopSessionResultCode */ +#define PPTP_STOP_OK 1 +#define PPTP_STOP_GENERAL_ERROR 2 + +struct PptpStopSessionReply { + __u8 resultCode; + __u8 generalErrorCode; + __u16 reserved1; +}; + +struct PptpEchoRequest { + __be32 identNumber; +}; + +/* PptpEchoReplyResultCode */ +#define PPTP_ECHO_OK 1 +#define PPTP_ECHO_GENERAL_ERROR 2 + +struct PptpEchoReply { + __be32 identNumber; + __u8 resultCode; + __u8 generalErrorCode; + __u16 reserved; +}; + +/* PptpFramingType */ +#define PPTP_ASYNC_FRAMING 1 +#define PPTP_SYNC_FRAMING 2 +#define PPTP_DONT_CARE_FRAMING 3 + +/* PptpCallBearerType */ +#define PPTP_ANALOG_TYPE 1 +#define PPTP_DIGITAL_TYPE 2 +#define PPTP_DONT_CARE_BEARER_TYPE 3 + +struct PptpOutCallRequest { + __be16 callID; + __be16 callSerialNumber; + __be32 minBPS; + __be32 maxBPS; + __be32 bearerType; + __be32 framingType; + __be16 packetWindow; + __be16 packetProcDelay; + __be16 phoneNumberLength; + __u16 reserved1; + __u8 phoneNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpCallResultCode */ +#define PPTP_OUTCALL_CONNECT 1 +#define PPTP_OUTCALL_GENERAL_ERROR 2 +#define PPTP_OUTCALL_NO_CARRIER 3 +#define PPTP_OUTCALL_BUSY 4 +#define PPTP_OUTCALL_NO_DIAL_TONE 5 +#define PPTP_OUTCALL_TIMEOUT 6 +#define PPTP_OUTCALL_DONT_ACCEPT 7 + +struct PptpOutCallReply { + __be16 callID; + __be16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __be16 causeCode; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 physChannelID; +}; + +struct PptpInCallRequest { + __be16 callID; + __be16 callSerialNumber; + __be32 callBearerType; + __be32 physChannelID; + __be16 dialedNumberLength; + __be16 dialingNumberLength; + __u8 dialedNumber[64]; + __u8 dialingNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpInCallResultCode */ +#define PPTP_INCALL_ACCEPT 1 +#define PPTP_INCALL_GENERAL_ERROR 2 +#define PPTP_INCALL_DONT_ACCEPT 3 + +struct PptpInCallReply { + __be16 callID; + __be16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __be16 packetWindow; + __be16 packetProcDelay; + __u16 reserved; +}; + +struct PptpInCallConnected { + __be16 peersCallID; + __u16 reserved; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 callFramingType; +}; + +struct PptpClearCallRequest { + __be16 callID; + __u16 reserved; +}; + +struct PptpCallDisconnectNotify { + __be16 callID; + __u8 resultCode; + __u8 generalErrorCode; + __be16 causeCode; + __u16 reserved; + __u8 callStatistics[128]; +}; + +struct PptpWanErrorNotify { + __be16 peersCallID; + __u16 reserved; + __be32 crcErrors; + __be32 framingErrors; + __be32 hardwareOverRuns; + __be32 bufferOverRuns; + __be32 timeoutErrors; + __be32 alignmentErrors; +}; + +struct PptpSetLinkInfo { + __be16 peersCallID; + __u16 reserved; + __be32 sendAccm; + __be32 recvAccm; +}; + +union pptp_ctrl_union { + struct PptpStartSessionRequest sreq; + struct PptpStartSessionReply srep; + struct PptpStopSessionRequest streq; + struct PptpStopSessionReply strep; + struct PptpOutCallRequest ocreq; + struct PptpOutCallReply ocack; + struct PptpInCallRequest icreq; + struct PptpInCallReply icack; + struct PptpInCallConnected iccon; + struct PptpClearCallRequest clrreq; + struct PptpCallDisconnectNotify disc; + struct PptpWanErrorNotify wanerr; + struct PptpSetLinkInfo setlink; +}; + +/* crap needed for nf_conntrack_compat.h */ +struct nf_conn; +struct nf_conntrack_expect; + +extern int +(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +extern int +(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + +extern void +(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig, + struct nf_conntrack_expect *exp_reply); + +extern void +(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, + struct nf_conntrack_expect *exp); + +#endif /* __KERNEL__ */ +#endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h new file mode 100644 index 00000000..6a0664c0 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -0,0 +1,94 @@ +#ifndef _CONNTRACK_PROTO_GRE_H +#define _CONNTRACK_PROTO_GRE_H +#include <asm/byteorder.h> + +/* GRE PROTOCOL HEADER */ + +/* GRE Version field */ +#define GRE_VERSION_1701 0x0 +#define GRE_VERSION_PPTP 0x1 + +/* GRE Protocol field */ +#define GRE_PROTOCOL_PPTP 0x880B + +/* GRE Flags */ +#define GRE_FLAG_C 0x80 +#define GRE_FLAG_R 0x40 +#define GRE_FLAG_K 0x20 +#define GRE_FLAG_S 0x10 +#define GRE_FLAG_A 0x80 + +#define GRE_IS_C(f) ((f)&GRE_FLAG_C) +#define GRE_IS_R(f) ((f)&GRE_FLAG_R) +#define GRE_IS_K(f) ((f)&GRE_FLAG_K) +#define GRE_IS_S(f) ((f)&GRE_FLAG_S) +#define GRE_IS_A(f) ((f)&GRE_FLAG_A) + +/* GRE is a mess: Four different standards */ +struct gre_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 rec:3, + srr:1, + seq:1, + key:1, + routing:1, + csum:1, + version:3, + reserved:4, + ack:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 csum:1, + routing:1, + key:1, + seq:1, + srr:1, + rec:3, + ack:1, + reserved:4, + version:3; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __be16 protocol; +}; + +/* modified GRE header for PPTP */ +struct gre_hdr_pptp { + __u8 flags; /* bitfield */ + __u8 version; /* should be GRE_VERSION_PPTP */ + __be16 protocol; /* should be GRE_PROTOCOL_PPTP */ + __be16 payload_len; /* size of ppp payload, not inc. gre header */ + __be16 call_id; /* peer's call_id for this session */ + __be32 seq; /* sequence number. Present if S==1 */ + __be32 ack; /* seq number of highest packet received by */ + /* sender in this session */ +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +#ifdef __KERNEL__ +#include <net/netfilter/nf_conntrack_tuple.h> + +struct nf_conn; + +/* structure for original <-> reply keymap */ +struct nf_ct_gre_keymap { + struct list_head list; + struct nf_conntrack_tuple tuple; +}; + +/* add new tuple->key_reply pair to keymap */ +int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, + struct nf_conntrack_tuple *t); + +/* delete keymap entries */ +void nf_ct_gre_keymap_destroy(struct nf_conn *ct); + +extern void nf_ct_gre_keymap_flush(struct net *net); +extern void nf_nat_need_gre(void); + +#endif /* __KERNEL__ */ +#endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h new file mode 100644 index 00000000..4767d6e2 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sane.h @@ -0,0 +1,21 @@ +#ifndef _NF_CONNTRACK_SANE_H +#define _NF_CONNTRACK_SANE_H +/* SANE tracking. */ + +#ifdef __KERNEL__ + +#define SANE_PORT 6566 + +enum sane_state { + SANE_STATE_NORMAL, + SANE_STATE_START_REQUESTED, +}; + +/* This structure exists only once per master */ +struct nf_ct_sane_master { + enum sane_state state; +}; + +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_SANE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h new file mode 100644 index 00000000..ceeefe66 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -0,0 +1,25 @@ +#ifndef _NF_CONNTRACK_SCTP_H +#define _NF_CONNTRACK_SCTP_H +/* SCTP tracking. */ + +#include <linux/netfilter/nf_conntrack_tuple_common.h> + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE, + SCTP_CONNTRACK_CLOSED, + SCTP_CONNTRACK_COOKIE_WAIT, + SCTP_CONNTRACK_COOKIE_ECHOED, + SCTP_CONNTRACK_ESTABLISHED, + SCTP_CONNTRACK_SHUTDOWN_SENT, + SCTP_CONNTRACK_SHUTDOWN_RECD, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, + SCTP_CONNTRACK_MAX +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; +}; + +#endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h new file mode 100644 index 00000000..0ce91d56 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -0,0 +1,179 @@ +#ifndef __NF_CONNTRACK_SIP_H__ +#define __NF_CONNTRACK_SIP_H__ +#ifdef __KERNEL__ + +#define SIP_PORT 5060 +#define SIP_TIMEOUT 3600 + +struct nf_ct_sip_master { + unsigned int register_cseq; + unsigned int invite_cseq; +}; + +enum sip_expectation_classes { + SIP_EXPECT_SIGNALLING, + SIP_EXPECT_AUDIO, + SIP_EXPECT_VIDEO, + SIP_EXPECT_IMAGE, + __SIP_EXPECT_MAX +}; +#define SIP_EXPECT_MAX (__SIP_EXPECT_MAX - 1) + +struct sdp_media_type { + const char *name; + unsigned int len; + enum sip_expectation_classes class; +}; + +#define SDP_MEDIA_TYPE(__name, __class) \ +{ \ + .name = (__name), \ + .len = sizeof(__name) - 1, \ + .class = (__class), \ +} + +struct sip_handler { + const char *method; + unsigned int len; + int (*request)(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int cseq); + int (*response)(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int cseq, unsigned int code); +}; + +#define SIP_HANDLER(__method, __request, __response) \ +{ \ + .method = (__method), \ + .len = sizeof(__method) - 1, \ + .request = (__request), \ + .response = (__response), \ +} + +struct sip_header { + const char *name; + const char *cname; + const char *search; + unsigned int len; + unsigned int clen; + unsigned int slen; + int (*match_len)(const struct nf_conn *ct, + const char *dptr, const char *limit, + int *shift); +}; + +#define __SIP_HDR(__name, __cname, __search, __match) \ +{ \ + .name = (__name), \ + .len = sizeof(__name) - 1, \ + .cname = (__cname), \ + .clen = (__cname) ? sizeof(__cname) - 1 : 0, \ + .search = (__search), \ + .slen = (__search) ? sizeof(__search) - 1 : 0, \ + .match_len = (__match), \ +} + +#define SIP_HDR(__name, __cname, __search, __match) \ + __SIP_HDR(__name, __cname, __search, __match) + +#define SDP_HDR(__name, __search, __match) \ + __SIP_HDR(__name, NULL, __search, __match) + +enum sip_header_types { + SIP_HDR_CSEQ, + SIP_HDR_FROM, + SIP_HDR_TO, + SIP_HDR_CONTACT, + SIP_HDR_VIA_UDP, + SIP_HDR_VIA_TCP, + SIP_HDR_EXPIRES, + SIP_HDR_CONTENT_LENGTH, + SIP_HDR_CALL_ID, +}; + +enum sdp_header_types { + SDP_HDR_UNSPEC, + SDP_HDR_VERSION, + SDP_HDR_OWNER_IP4, + SDP_HDR_CONNECTION_IP4, + SDP_HDR_OWNER_IP6, + SDP_HDR_CONNECTION_IP6, + SDP_HDR_MEDIA, +}; + +extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen); +extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off); +extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + struct nf_conntrack_expect *exp, + unsigned int matchoff, + unsigned int matchlen); +extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + unsigned int sdpoff, + enum sdp_header_types type, + enum sdp_header_types term, + const union nf_inet_addr *addr); +extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + unsigned int matchoff, + unsigned int matchlen, + u_int16_t port); +extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + unsigned int sdpoff, + const union nf_inet_addr *addr); +extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, + unsigned int dataoff, + const char **dptr, + unsigned int *datalen, + struct nf_conntrack_expect *rtp_exp, + struct nf_conntrack_expect *rtcp_exp, + unsigned int mediaoff, + unsigned int medialen, + union nf_inet_addr *rtp_addr); + +extern int ct_sip_parse_request(const struct nf_conn *ct, + const char *dptr, unsigned int datalen, + unsigned int *matchoff, unsigned int *matchlen, + union nf_inet_addr *addr, __be16 *port); +extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + enum sip_header_types type, + unsigned int *matchoff, unsigned int *matchlen); +extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, + unsigned int *dataoff, unsigned int datalen, + enum sip_header_types type, int *in_header, + unsigned int *matchoff, unsigned int *matchlen, + union nf_inet_addr *addr, __be16 *port); +extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + const char *name, + unsigned int *matchoff, unsigned int *matchlen, + union nf_inet_addr *addr); +extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, + unsigned int off, unsigned int datalen, + const char *name, + unsigned int *matchoff, unsigned int *matchen, + unsigned int *val); + +extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + enum sdp_header_types type, + enum sdp_header_types term, + unsigned int *matchoff, unsigned int *matchlen); + +#endif /* __KERNEL__ */ +#endif /* __NF_CONNTRACK_SIP_H__ */ diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h new file mode 100644 index 00000000..064bc63a --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_snmp.h @@ -0,0 +1,9 @@ +#ifndef _NF_CONNTRACK_SNMP_H +#define _NF_CONNTRACK_SNMP_H + +extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, + unsigned int protoff, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo); + +#endif /* _NF_CONNTRACK_SNMP_H */ diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h new file mode 100644 index 00000000..e59868ae --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -0,0 +1,79 @@ +#ifndef _NF_CONNTRACK_TCP_H +#define _NF_CONNTRACK_TCP_H +/* TCP tracking. */ + +#include <linux/types.h> + +/* This is exposed to userspace (ctnetlink) */ +enum tcp_conntrack { + TCP_CONNTRACK_NONE, + TCP_CONNTRACK_SYN_SENT, + TCP_CONNTRACK_SYN_RECV, + TCP_CONNTRACK_ESTABLISHED, + TCP_CONNTRACK_FIN_WAIT, + TCP_CONNTRACK_CLOSE_WAIT, + TCP_CONNTRACK_LAST_ACK, + TCP_CONNTRACK_TIME_WAIT, + TCP_CONNTRACK_CLOSE, + TCP_CONNTRACK_LISTEN, /* obsolete */ +#define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN + TCP_CONNTRACK_MAX, + TCP_CONNTRACK_IGNORE, + TCP_CONNTRACK_RETRANS, + TCP_CONNTRACK_UNACK, + TCP_CONNTRACK_TIMEOUT_MAX +}; + +/* Window scaling is advertised by the sender */ +#define IP_CT_TCP_FLAG_WINDOW_SCALE 0x01 + +/* SACK is permitted by the sender */ +#define IP_CT_TCP_FLAG_SACK_PERM 0x02 + +/* This sender sent FIN first */ +#define IP_CT_TCP_FLAG_CLOSE_INIT 0x04 + +/* Be liberal in window checking */ +#define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 + +/* Has unacknowledged data */ +#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 + +/* The field td_maxack has been set */ +#define IP_CT_TCP_FLAG_MAXACK_SET 0x20 + +struct nf_ct_tcp_flags { + __u8 flags; + __u8 mask; +}; + +#ifdef __KERNEL__ + +struct ip_ct_tcp_state { + u_int32_t td_end; /* max of seq + len */ + u_int32_t td_maxend; /* max of ack + max(win, 1) */ + u_int32_t td_maxwin; /* max(win) */ + u_int32_t td_maxack; /* max of ack */ + u_int8_t td_scale; /* window scale factor */ + u_int8_t flags; /* per direction options */ +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; /* connection parameters per direction */ + u_int8_t state; /* state of the connection (enum tcp_conntrack) */ + /* For detecting stale connections */ + u_int8_t last_dir; /* Direction of the last packet (enum ip_conntrack_dir) */ + u_int8_t retrans; /* Number of retransmitted packets */ + u_int8_t last_index; /* Index of the last packet */ + u_int32_t last_seq; /* Last sequence number seen in dir */ + u_int32_t last_ack; /* Last sequence number seen in opposite dir */ + u_int32_t last_end; /* Last seq + len */ + u_int16_t last_win; /* Last window advertisement seen in dir */ + /* For SYN packets while we may be out-of-sync */ + u_int8_t last_wscale; /* Last window scaling factor seen */ + u_int8_t last_flags; /* Last flags set */ +}; + +#endif /* __KERNEL__ */ + +#endif /* _NF_CONNTRACK_TCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h new file mode 100644 index 00000000..c78d38fd --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_tftp.h @@ -0,0 +1,20 @@ +#ifndef _NF_CONNTRACK_TFTP_H +#define _NF_CONNTRACK_TFTP_H + +#define TFTP_PORT 69 + +struct tftphdr { + __be16 opcode; +}; + +#define TFTP_OPCODE_READ 1 +#define TFTP_OPCODE_WRITE 2 +#define TFTP_OPCODE_DATA 3 +#define TFTP_OPCODE_ACK 4 +#define TFTP_OPCODE_ERROR 5 + +extern unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + struct nf_conntrack_expect *exp); + +#endif /* _NF_CONNTRACK_TFTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h new file mode 100644 index 00000000..2f6bbc5b --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_tuple_common.h @@ -0,0 +1,39 @@ +#ifndef _NF_CONNTRACK_TUPLE_COMMON_H +#define _NF_CONNTRACK_TUPLE_COMMON_H + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL, + IP_CT_DIR_REPLY, + IP_CT_DIR_MAX +}; + +/* The protocol-specific manipulable parts of the tuple: always in + * network order + */ +union nf_conntrack_man_proto { + /* Add other protocols here. */ + __be16 all; + + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ + } gre; +}; + +#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) + +#endif /* _NF_CONNTRACK_TUPLE_COMMON_H */ diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h new file mode 100644 index 00000000..8df2d137 --- /dev/null +++ b/include/linux/netfilter/nf_nat.h @@ -0,0 +1,25 @@ +#ifndef _NETFILTER_NF_NAT_H +#define _NETFILTER_NF_NAT_H + +#include <linux/netfilter.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> + +#define NF_NAT_RANGE_MAP_IPS 1 +#define NF_NAT_RANGE_PROTO_SPECIFIED 2 +#define NF_NAT_RANGE_PROTO_RANDOM 4 +#define NF_NAT_RANGE_PERSISTENT 8 + +struct nf_nat_ipv4_range { + unsigned int flags; + __be32 min_ip; + __be32 max_ip; + union nf_conntrack_man_proto min; + union nf_conntrack_man_proto max; +}; + +struct nf_nat_ipv4_multi_range_compat { + unsigned int rangesize; + struct nf_nat_ipv4_range range[1]; +}; + +#endif /* _NETFILTER_NF_NAT_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h new file mode 100644 index 00000000..6fd1f0d0 --- /dev/null +++ b/include/linux/netfilter/nfnetlink.h @@ -0,0 +1,95 @@ +#ifndef _NFNETLINK_H +#define _NFNETLINK_H +#include <linux/types.h> +#include <linux/netfilter/nfnetlink_compat.h> + +enum nfnetlink_groups { + NFNLGRP_NONE, +#define NFNLGRP_NONE NFNLGRP_NONE + NFNLGRP_CONNTRACK_NEW, +#define NFNLGRP_CONNTRACK_NEW NFNLGRP_CONNTRACK_NEW + NFNLGRP_CONNTRACK_UPDATE, +#define NFNLGRP_CONNTRACK_UPDATE NFNLGRP_CONNTRACK_UPDATE + NFNLGRP_CONNTRACK_DESTROY, +#define NFNLGRP_CONNTRACK_DESTROY NFNLGRP_CONNTRACK_DESTROY + NFNLGRP_CONNTRACK_EXP_NEW, +#define NFNLGRP_CONNTRACK_EXP_NEW NFNLGRP_CONNTRACK_EXP_NEW + NFNLGRP_CONNTRACK_EXP_UPDATE, +#define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE + NFNLGRP_CONNTRACK_EXP_DESTROY, +#define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY + __NFNLGRP_MAX, +}; +#define NFNLGRP_MAX (__NFNLGRP_MAX - 1) + +/* General form of address family dependent message. + */ +struct nfgenmsg { + __u8 nfgen_family; /* AF_xxx */ + __u8 version; /* nfnetlink version */ + __be16 res_id; /* resource id */ +}; + +#define NFNETLINK_V0 0 + +/* netfilter netlink message types are split in two pieces: + * 8 bit subsystem, 8bit operation. + */ + +#define NFNL_SUBSYS_ID(x) ((x & 0xff00) >> 8) +#define NFNL_MSG_TYPE(x) (x & 0x00ff) + +/* No enum here, otherwise __stringify() trick of MODULE_ALIAS_NFNL_SUBSYS() + * won't work anymore */ +#define NFNL_SUBSYS_NONE 0 +#define NFNL_SUBSYS_CTNETLINK 1 +#define NFNL_SUBSYS_CTNETLINK_EXP 2 +#define NFNL_SUBSYS_QUEUE 3 +#define NFNL_SUBSYS_ULOG 4 +#define NFNL_SUBSYS_OSF 5 +#define NFNL_SUBSYS_IPSET 6 +#define NFNL_SUBSYS_ACCT 7 +#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8 +#define NFNL_SUBSYS_COUNT 9 + +#ifdef __KERNEL__ + +#include <linux/netlink.h> +#include <linux/capability.h> +#include <net/netlink.h> + +struct nfnl_callback { + int (*call)(struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); + int (*call_rcu)(struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); + const struct nla_policy *policy; /* netlink attribute policy */ + const u_int16_t attr_count; /* number of nlattr's */ +}; + +struct nfnetlink_subsystem { + const char *name; + __u8 subsys_id; /* nfnetlink subsystem ID */ + __u8 cb_count; /* number of callbacks */ + const struct nfnl_callback *cb; /* callback for individual types */ +}; + +extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); +extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); + +extern int nfnetlink_has_listeners(struct net *net, unsigned int group); +extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, + int echo, gfp_t flags); +extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); +extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); + +extern void nfnl_lock(void); +extern void nfnl_unlock(void); + +#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ + MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) + +#endif /* __KERNEL__ */ +#endif /* _NFNETLINK_H */ diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h new file mode 100644 index 00000000..7c4279b4 --- /dev/null +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -0,0 +1,36 @@ +#ifndef _NFNL_ACCT_H_ +#define _NFNL_ACCT_H_ + +#ifndef NFACCT_NAME_MAX +#define NFACCT_NAME_MAX 32 +#endif + +enum nfnl_acct_msg_types { + NFNL_MSG_ACCT_NEW, + NFNL_MSG_ACCT_GET, + NFNL_MSG_ACCT_GET_CTRZERO, + NFNL_MSG_ACCT_DEL, + NFNL_MSG_ACCT_MAX +}; + +enum nfnl_acct_type { + NFACCT_UNSPEC, + NFACCT_NAME, + NFACCT_PKTS, + NFACCT_BYTES, + NFACCT_USE, + __NFACCT_MAX +}; +#define NFACCT_MAX (__NFACCT_MAX - 1) + +#ifdef __KERNEL__ + +struct nf_acct; + +extern struct nf_acct *nfnl_acct_find_get(const char *filter_name); +extern void nfnl_acct_put(struct nf_acct *acct); +extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); + +#endif /* __KERNEL__ */ + +#endif /* _NFNL_ACCT_H */ diff --git a/include/linux/netfilter/nfnetlink_compat.h b/include/linux/netfilter/nfnetlink_compat.h new file mode 100644 index 00000000..ffb95036 --- /dev/null +++ b/include/linux/netfilter/nfnetlink_compat.h @@ -0,0 +1,63 @@ +#ifndef _NFNETLINK_COMPAT_H +#define _NFNETLINK_COMPAT_H + +#include <linux/types.h> + +#ifndef __KERNEL__ +/* Old nfnetlink macros for userspace */ + +/* nfnetlink groups: Up to 32 maximum */ +#define NF_NETLINK_CONNTRACK_NEW 0x00000001 +#define NF_NETLINK_CONNTRACK_UPDATE 0x00000002 +#define NF_NETLINK_CONNTRACK_DESTROY 0x00000004 +#define NF_NETLINK_CONNTRACK_EXP_NEW 0x00000008 +#define NF_NETLINK_CONNTRACK_EXP_UPDATE 0x00000010 +#define NF_NETLINK_CONNTRACK_EXP_DESTROY 0x00000020 + +/* Generic structure for encapsulation optional netfilter information. + * It is reminiscent of sockaddr, but with sa_family replaced + * with attribute type. + * ! This should someday be put somewhere generic as now rtnetlink and + * ! nfnetlink use the same attributes methods. - J. Schulist. + */ + +struct nfattr { + __u16 nfa_len; + __u16 nfa_type; /* we use 15 bits for the type, and the highest + * bit to indicate whether the payload is nested */ +}; + +/* FIXME: Apart from NFNL_NFA_NESTED shamelessly copy and pasted from + * rtnetlink.h, it's time to put this in a generic file */ + +#define NFNL_NFA_NEST 0x8000 +#define NFA_TYPE(attr) ((attr)->nfa_type & 0x7fff) + +#define NFA_ALIGNTO 4 +#define NFA_ALIGN(len) (((len) + NFA_ALIGNTO - 1) & ~(NFA_ALIGNTO - 1)) +#define NFA_OK(nfa,len) ((len) > 0 && (nfa)->nfa_len >= sizeof(struct nfattr) \ + && (nfa)->nfa_len <= (len)) +#define NFA_NEXT(nfa,attrlen) ((attrlen) -= NFA_ALIGN((nfa)->nfa_len), \ + (struct nfattr *)(((char *)(nfa)) + NFA_ALIGN((nfa)->nfa_len))) +#define NFA_LENGTH(len) (NFA_ALIGN(sizeof(struct nfattr)) + (len)) +#define NFA_SPACE(len) NFA_ALIGN(NFA_LENGTH(len)) +#define NFA_DATA(nfa) ((void *)(((char *)(nfa)) + NFA_LENGTH(0))) +#define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0)) +#define NFA_NEST(skb, type) \ +({ struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \ + NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \ + __start; }) +#define NFA_NEST_END(skb, start) \ +({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ + (skb)->len; }) +#define NFA_NEST_CANCEL(skb, start) \ +({ if (start) \ + skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ + -1; }) + +#define NFM_NFA(n) ((struct nfattr *)(((char *)(n)) \ + + NLMSG_ALIGN(sizeof(struct nfgenmsg)))) +#define NFM_PAYLOAD(n) NLMSG_PAYLOAD(n, sizeof(struct nfgenmsg)) + +#endif /* ! __KERNEL__ */ +#endif /* _NFNETLINK_COMPAT_H */ diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h new file mode 100644 index 00000000..e58e4b93 --- /dev/null +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -0,0 +1,205 @@ +#ifndef _IPCONNTRACK_NETLINK_H +#define _IPCONNTRACK_NETLINK_H +#include <linux/netfilter/nfnetlink.h> + +enum cntl_msg_types { + IPCTNL_MSG_CT_NEW, + IPCTNL_MSG_CT_GET, + IPCTNL_MSG_CT_DELETE, + IPCTNL_MSG_CT_GET_CTRZERO, + + IPCTNL_MSG_MAX +}; + +enum ctnl_exp_msg_types { + IPCTNL_MSG_EXP_NEW, + IPCTNL_MSG_EXP_GET, + IPCTNL_MSG_EXP_DELETE, + + IPCTNL_MSG_EXP_MAX +}; + + +enum ctattr_type { + CTA_UNSPEC, + CTA_TUPLE_ORIG, + CTA_TUPLE_REPLY, + CTA_STATUS, + CTA_PROTOINFO, + CTA_HELP, + CTA_NAT_SRC, +#define CTA_NAT CTA_NAT_SRC /* backwards compatibility */ + CTA_TIMEOUT, + CTA_MARK, + CTA_COUNTERS_ORIG, + CTA_COUNTERS_REPLY, + CTA_USE, + CTA_ID, + CTA_NAT_DST, + CTA_TUPLE_MASTER, + CTA_NAT_SEQ_ADJ_ORIG, + CTA_NAT_SEQ_ADJ_REPLY, + CTA_SECMARK, /* obsolete */ + CTA_ZONE, + CTA_SECCTX, + CTA_TIMESTAMP, + CTA_MARK_MASK, + __CTA_MAX +}; +#define CTA_MAX (__CTA_MAX - 1) + +enum ctattr_tuple { + CTA_TUPLE_UNSPEC, + CTA_TUPLE_IP, + CTA_TUPLE_PROTO, + __CTA_TUPLE_MAX +}; +#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1) + +enum ctattr_ip { + CTA_IP_UNSPEC, + CTA_IP_V4_SRC, + CTA_IP_V4_DST, + CTA_IP_V6_SRC, + CTA_IP_V6_DST, + __CTA_IP_MAX +}; +#define CTA_IP_MAX (__CTA_IP_MAX - 1) + +enum ctattr_l4proto { + CTA_PROTO_UNSPEC, + CTA_PROTO_NUM, + CTA_PROTO_SRC_PORT, + CTA_PROTO_DST_PORT, + CTA_PROTO_ICMP_ID, + CTA_PROTO_ICMP_TYPE, + CTA_PROTO_ICMP_CODE, + CTA_PROTO_ICMPV6_ID, + CTA_PROTO_ICMPV6_TYPE, + CTA_PROTO_ICMPV6_CODE, + __CTA_PROTO_MAX +}; +#define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1) + +enum ctattr_protoinfo { + CTA_PROTOINFO_UNSPEC, + CTA_PROTOINFO_TCP, + CTA_PROTOINFO_DCCP, + CTA_PROTOINFO_SCTP, + __CTA_PROTOINFO_MAX +}; +#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) + +enum ctattr_protoinfo_tcp { + CTA_PROTOINFO_TCP_UNSPEC, + CTA_PROTOINFO_TCP_STATE, + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, + CTA_PROTOINFO_TCP_WSCALE_REPLY, + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, + CTA_PROTOINFO_TCP_FLAGS_REPLY, + __CTA_PROTOINFO_TCP_MAX +}; +#define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) + +enum ctattr_protoinfo_dccp { + CTA_PROTOINFO_DCCP_UNSPEC, + CTA_PROTOINFO_DCCP_STATE, + CTA_PROTOINFO_DCCP_ROLE, + CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, + __CTA_PROTOINFO_DCCP_MAX, +}; +#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) + +enum ctattr_protoinfo_sctp { + CTA_PROTOINFO_SCTP_UNSPEC, + CTA_PROTOINFO_SCTP_STATE, + CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, + CTA_PROTOINFO_SCTP_VTAG_REPLY, + __CTA_PROTOINFO_SCTP_MAX +}; +#define CTA_PROTOINFO_SCTP_MAX (__CTA_PROTOINFO_SCTP_MAX - 1) + +enum ctattr_counters { + CTA_COUNTERS_UNSPEC, + CTA_COUNTERS_PACKETS, /* 64bit counters */ + CTA_COUNTERS_BYTES, /* 64bit counters */ + CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */ + CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */ + __CTA_COUNTERS_MAX +}; +#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) + +enum ctattr_tstamp { + CTA_TIMESTAMP_UNSPEC, + CTA_TIMESTAMP_START, + CTA_TIMESTAMP_STOP, + __CTA_TIMESTAMP_MAX +}; +#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1) + +enum ctattr_nat { + CTA_NAT_UNSPEC, + CTA_NAT_MINIP, + CTA_NAT_MAXIP, + CTA_NAT_PROTO, + __CTA_NAT_MAX +}; +#define CTA_NAT_MAX (__CTA_NAT_MAX - 1) + +enum ctattr_protonat { + CTA_PROTONAT_UNSPEC, + CTA_PROTONAT_PORT_MIN, + CTA_PROTONAT_PORT_MAX, + __CTA_PROTONAT_MAX +}; +#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1) + +enum ctattr_natseq { + CTA_NAT_SEQ_UNSPEC, + CTA_NAT_SEQ_CORRECTION_POS, + CTA_NAT_SEQ_OFFSET_BEFORE, + CTA_NAT_SEQ_OFFSET_AFTER, + __CTA_NAT_SEQ_MAX +}; +#define CTA_NAT_SEQ_MAX (__CTA_NAT_SEQ_MAX - 1) + +enum ctattr_expect { + CTA_EXPECT_UNSPEC, + CTA_EXPECT_MASTER, + CTA_EXPECT_TUPLE, + CTA_EXPECT_MASK, + CTA_EXPECT_TIMEOUT, + CTA_EXPECT_ID, + CTA_EXPECT_HELP_NAME, + CTA_EXPECT_ZONE, + CTA_EXPECT_FLAGS, + CTA_EXPECT_CLASS, + CTA_EXPECT_NAT, + CTA_EXPECT_FN, + __CTA_EXPECT_MAX +}; +#define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1) + +enum ctattr_expect_nat { + CTA_EXPECT_NAT_UNSPEC, + CTA_EXPECT_NAT_DIR, + CTA_EXPECT_NAT_TUPLE, + __CTA_EXPECT_NAT_MAX +}; +#define CTA_EXPECT_NAT_MAX (__CTA_EXPECT_NAT_MAX - 1) + +enum ctattr_help { + CTA_HELP_UNSPEC, + CTA_HELP_NAME, + __CTA_HELP_MAX +}; +#define CTA_HELP_MAX (__CTA_HELP_MAX - 1) + +enum ctattr_secctx { + CTA_SECCTX_UNSPEC, + CTA_SECCTX_NAME, + __CTA_SECCTX_MAX +}; +#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1) + +#endif /* _IPCONNTRACK_NETLINK_H */ diff --git a/include/linux/netfilter/nfnetlink_cttimeout.h b/include/linux/netfilter/nfnetlink_cttimeout.h new file mode 100644 index 00000000..a2810a7c --- /dev/null +++ b/include/linux/netfilter/nfnetlink_cttimeout.h @@ -0,0 +1,114 @@ +#ifndef _CTTIMEOUT_NETLINK_H +#define _CTTIMEOUT_NETLINK_H +#include <linux/netfilter/nfnetlink.h> + +enum ctnl_timeout_msg_types { + IPCTNL_MSG_TIMEOUT_NEW, + IPCTNL_MSG_TIMEOUT_GET, + IPCTNL_MSG_TIMEOUT_DELETE, + + IPCTNL_MSG_TIMEOUT_MAX +}; + +enum ctattr_timeout { + CTA_TIMEOUT_UNSPEC, + CTA_TIMEOUT_NAME, + CTA_TIMEOUT_L3PROTO, + CTA_TIMEOUT_L4PROTO, + CTA_TIMEOUT_DATA, + CTA_TIMEOUT_USE, + __CTA_TIMEOUT_MAX +}; +#define CTA_TIMEOUT_MAX (__CTA_TIMEOUT_MAX - 1) + +enum ctattr_timeout_generic { + CTA_TIMEOUT_GENERIC_UNSPEC, + CTA_TIMEOUT_GENERIC_TIMEOUT, + __CTA_TIMEOUT_GENERIC_MAX +}; +#define CTA_TIMEOUT_GENERIC_MAX (__CTA_TIMEOUT_GENERIC_MAX - 1) + +enum ctattr_timeout_tcp { + CTA_TIMEOUT_TCP_UNSPEC, + CTA_TIMEOUT_TCP_SYN_SENT, + CTA_TIMEOUT_TCP_SYN_RECV, + CTA_TIMEOUT_TCP_ESTABLISHED, + CTA_TIMEOUT_TCP_FIN_WAIT, + CTA_TIMEOUT_TCP_CLOSE_WAIT, + CTA_TIMEOUT_TCP_LAST_ACK, + CTA_TIMEOUT_TCP_TIME_WAIT, + CTA_TIMEOUT_TCP_CLOSE, + CTA_TIMEOUT_TCP_SYN_SENT2, + CTA_TIMEOUT_TCP_RETRANS, + CTA_TIMEOUT_TCP_UNACK, + __CTA_TIMEOUT_TCP_MAX +}; +#define CTA_TIMEOUT_TCP_MAX (__CTA_TIMEOUT_TCP_MAX - 1) + +enum ctattr_timeout_udp { + CTA_TIMEOUT_UDP_UNSPEC, + CTA_TIMEOUT_UDP_UNREPLIED, + CTA_TIMEOUT_UDP_REPLIED, + __CTA_TIMEOUT_UDP_MAX +}; +#define CTA_TIMEOUT_UDP_MAX (__CTA_TIMEOUT_UDP_MAX - 1) + +enum ctattr_timeout_udplite { + CTA_TIMEOUT_UDPLITE_UNSPEC, + CTA_TIMEOUT_UDPLITE_UNREPLIED, + CTA_TIMEOUT_UDPLITE_REPLIED, + __CTA_TIMEOUT_UDPLITE_MAX +}; +#define CTA_TIMEOUT_UDPLITE_MAX (__CTA_TIMEOUT_UDPLITE_MAX - 1) + +enum ctattr_timeout_icmp { + CTA_TIMEOUT_ICMP_UNSPEC, + CTA_TIMEOUT_ICMP_TIMEOUT, + __CTA_TIMEOUT_ICMP_MAX +}; +#define CTA_TIMEOUT_ICMP_MAX (__CTA_TIMEOUT_ICMP_MAX - 1) + +enum ctattr_timeout_dccp { + CTA_TIMEOUT_DCCP_UNSPEC, + CTA_TIMEOUT_DCCP_REQUEST, + CTA_TIMEOUT_DCCP_RESPOND, + CTA_TIMEOUT_DCCP_PARTOPEN, + CTA_TIMEOUT_DCCP_OPEN, + CTA_TIMEOUT_DCCP_CLOSEREQ, + CTA_TIMEOUT_DCCP_CLOSING, + CTA_TIMEOUT_DCCP_TIMEWAIT, + __CTA_TIMEOUT_DCCP_MAX +}; +#define CTA_TIMEOUT_DCCP_MAX (__CTA_TIMEOUT_DCCP_MAX - 1) + +enum ctattr_timeout_sctp { + CTA_TIMEOUT_SCTP_UNSPEC, + CTA_TIMEOUT_SCTP_CLOSED, + CTA_TIMEOUT_SCTP_COOKIE_WAIT, + CTA_TIMEOUT_SCTP_COOKIE_ECHOED, + CTA_TIMEOUT_SCTP_ESTABLISHED, + CTA_TIMEOUT_SCTP_SHUTDOWN_SENT, + CTA_TIMEOUT_SCTP_SHUTDOWN_RECD, + CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, + __CTA_TIMEOUT_SCTP_MAX +}; +#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1) + +enum ctattr_timeout_icmpv6 { + CTA_TIMEOUT_ICMPV6_UNSPEC, + CTA_TIMEOUT_ICMPV6_TIMEOUT, + __CTA_TIMEOUT_ICMPV6_MAX +}; +#define CTA_TIMEOUT_ICMPV6_MAX (__CTA_TIMEOUT_ICMPV6_MAX - 1) + +enum ctattr_timeout_gre { + CTA_TIMEOUT_GRE_UNSPEC, + CTA_TIMEOUT_GRE_UNREPLIED, + CTA_TIMEOUT_GRE_REPLIED, + __CTA_TIMEOUT_GRE_MAX +}; +#define CTA_TIMEOUT_GRE_MAX (__CTA_TIMEOUT_GRE_MAX - 1) + +#define CTNL_TIMEOUT_NAME_MAX 32 + +#endif diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h new file mode 100644 index 00000000..90c2c957 --- /dev/null +++ b/include/linux/netfilter/nfnetlink_log.h @@ -0,0 +1,97 @@ +#ifndef _NFNETLINK_LOG_H +#define _NFNETLINK_LOG_H + +/* This file describes the netlink messages (i.e. 'protocol packets'), + * and not any kind of function definitions. It is shared between kernel and + * userspace. Don't put kernel specific stuff in here */ + +#include <linux/types.h> +#include <linux/netfilter/nfnetlink.h> + +enum nfulnl_msg_types { + NFULNL_MSG_PACKET, /* packet from kernel to userspace */ + NFULNL_MSG_CONFIG, /* connect to a particular queue */ + + NFULNL_MSG_MAX +}; + +struct nfulnl_msg_packet_hdr { + __be16 hw_protocol; /* hw protocol (network order) */ + __u8 hook; /* netfilter hook */ + __u8 _pad; +}; + +struct nfulnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfulnl_msg_packet_timestamp { + __aligned_be64 sec; + __aligned_be64 usec; +}; + +enum nfulnl_attr_type { + NFULA_UNSPEC, + NFULA_PACKET_HDR, + NFULA_MARK, /* __u32 nfmark */ + NFULA_TIMESTAMP, /* nfulnl_msg_packet_timestamp */ + NFULA_IFINDEX_INDEV, /* __u32 ifindex */ + NFULA_IFINDEX_OUTDEV, /* __u32 ifindex */ + NFULA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ + NFULA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ + NFULA_HWADDR, /* nfulnl_msg_packet_hw */ + NFULA_PAYLOAD, /* opaque data payload */ + NFULA_PREFIX, /* string prefix */ + NFULA_UID, /* user id of socket */ + NFULA_SEQ, /* instance-local sequence number */ + NFULA_SEQ_GLOBAL, /* global sequence number */ + NFULA_GID, /* group id of socket */ + NFULA_HWTYPE, /* hardware type */ + NFULA_HWHEADER, /* hardware header */ + NFULA_HWLEN, /* hardware header length */ + + __NFULA_MAX +}; +#define NFULA_MAX (__NFULA_MAX - 1) + +enum nfulnl_msg_config_cmds { + NFULNL_CFG_CMD_NONE, + NFULNL_CFG_CMD_BIND, + NFULNL_CFG_CMD_UNBIND, + NFULNL_CFG_CMD_PF_BIND, + NFULNL_CFG_CMD_PF_UNBIND, +}; + +struct nfulnl_msg_config_cmd { + __u8 command; /* nfulnl_msg_config_cmds */ +} __attribute__ ((packed)); + +struct nfulnl_msg_config_mode { + __be32 copy_range; + __u8 copy_mode; + __u8 _pad; +} __attribute__ ((packed)); + +enum nfulnl_attr_config { + NFULA_CFG_UNSPEC, + NFULA_CFG_CMD, /* nfulnl_msg_config_cmd */ + NFULA_CFG_MODE, /* nfulnl_msg_config_mode */ + NFULA_CFG_NLBUFSIZ, /* __u32 buffer size */ + NFULA_CFG_TIMEOUT, /* __u32 in 1/100 s */ + NFULA_CFG_QTHRESH, /* __u32 */ + NFULA_CFG_FLAGS, /* __u16 */ + __NFULA_CFG_MAX +}; +#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1) + +#define NFULNL_COPY_NONE 0x00 +#define NFULNL_COPY_META 0x01 +#define NFULNL_COPY_PACKET 0x02 +/* 0xff is reserved, don't use it for new copy modes. */ + +#define NFULNL_CFG_F_SEQ 0x0001 +#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002 + +#endif /* _NFNETLINK_LOG_H */ diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h new file mode 100644 index 00000000..24b32e6c --- /dev/null +++ b/include/linux/netfilter/nfnetlink_queue.h @@ -0,0 +1,91 @@ +#ifndef _NFNETLINK_QUEUE_H +#define _NFNETLINK_QUEUE_H + +#include <linux/types.h> +#include <linux/netfilter/nfnetlink.h> + +enum nfqnl_msg_types { + NFQNL_MSG_PACKET, /* packet from kernel to userspace */ + NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ + NFQNL_MSG_CONFIG, /* connect to a particular queue */ + NFQNL_MSG_VERDICT_BATCH, /* batchv from userspace to kernel */ + + NFQNL_MSG_MAX +}; + +struct nfqnl_msg_packet_hdr { + __be32 packet_id; /* unique ID of packet in queue */ + __be16 hw_protocol; /* hw protocol (network order) */ + __u8 hook; /* netfilter hook */ +} __attribute__ ((packed)); + +struct nfqnl_msg_packet_hw { + __be16 hw_addrlen; + __u16 _pad; + __u8 hw_addr[8]; +}; + +struct nfqnl_msg_packet_timestamp { + __aligned_be64 sec; + __aligned_be64 usec; +}; + +enum nfqnl_attr_type { + NFQA_UNSPEC, + NFQA_PACKET_HDR, + NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */ + NFQA_MARK, /* __u32 nfmark */ + NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */ + NFQA_IFINDEX_INDEV, /* __u32 ifindex */ + NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */ + NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ + NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ + NFQA_HWADDR, /* nfqnl_msg_packet_hw */ + NFQA_PAYLOAD, /* opaque data payload */ + + __NFQA_MAX +}; +#define NFQA_MAX (__NFQA_MAX - 1) + +struct nfqnl_msg_verdict_hdr { + __be32 verdict; + __be32 id; +}; + + +enum nfqnl_msg_config_cmds { + NFQNL_CFG_CMD_NONE, + NFQNL_CFG_CMD_BIND, + NFQNL_CFG_CMD_UNBIND, + NFQNL_CFG_CMD_PF_BIND, + NFQNL_CFG_CMD_PF_UNBIND, +}; + +struct nfqnl_msg_config_cmd { + __u8 command; /* nfqnl_msg_config_cmds */ + __u8 _pad; + __be16 pf; /* AF_xxx for PF_[UN]BIND */ +}; + +enum nfqnl_config_mode { + NFQNL_COPY_NONE, + NFQNL_COPY_META, + NFQNL_COPY_PACKET, +}; + +struct nfqnl_msg_config_params { + __be32 copy_range; + __u8 copy_mode; /* enum nfqnl_config_mode */ +} __attribute__ ((packed)); + + +enum nfqnl_attr_config { + NFQA_CFG_UNSPEC, + NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ + NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ + NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ + __NFQA_CFG_MAX +}; +#define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) + +#endif /* _NFNETLINK_QUEUE_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h new file mode 100644 index 00000000..8d674a78 --- /dev/null +++ b/include/linux/netfilter/x_tables.h @@ -0,0 +1,622 @@ +#ifndef _X_TABLES_H +#define _X_TABLES_H +#include <linux/kernel.h> +#include <linux/types.h> + +#define XT_FUNCTION_MAXNAMELEN 30 +#define XT_EXTENSION_MAXNAMELEN 29 +#define XT_TABLE_MAXNAMELEN 32 + +struct xt_entry_match { + union { + struct { + __u16 match_size; + + /* Used by userspace */ + char name[XT_EXTENSION_MAXNAMELEN]; + __u8 revision; + } user; + struct { + __u16 match_size; + + /* Used inside the kernel */ + struct xt_match *match; + } kernel; + + /* Total length */ + __u16 match_size; + } u; + + unsigned char data[0]; +}; + +struct xt_entry_target { + union { + struct { + __u16 target_size; + + /* Used by userspace */ + char name[XT_EXTENSION_MAXNAMELEN]; + __u8 revision; + } user; + struct { + __u16 target_size; + + /* Used inside the kernel */ + struct xt_target *target; + } kernel; + + /* Total length */ + __u16 target_size; + } u; + + unsigned char data[0]; +}; + +#define XT_TARGET_INIT(__name, __size) \ +{ \ + .target.u.user = { \ + .target_size = XT_ALIGN(__size), \ + .name = __name, \ + }, \ +} + +struct xt_standard_target { + struct xt_entry_target target; + int verdict; +}; + +struct xt_error_target { + struct xt_entry_target target; + char errorname[XT_FUNCTION_MAXNAMELEN]; +}; + +/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision + * kernel supports, if >= revision. */ +struct xt_get_revision { + char name[XT_EXTENSION_MAXNAMELEN]; + __u8 revision; +}; + +/* CONTINUE verdict for targets */ +#define XT_CONTINUE 0xFFFFFFFF + +/* For standard target */ +#define XT_RETURN (-NF_REPEAT - 1) + +/* this is a dummy structure to find out the alignment requirement for a struct + * containing all the fundamental data types that are used in ipt_entry, + * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my + * personal pleasure to remove it -HW + */ +struct _xt_align { + __u8 u8; + __u16 u16; + __u32 u32; + __u64 u64; +}; + +#define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align)) + +/* Standard return verdict, or do jump. */ +#define XT_STANDARD_TARGET "" +/* Error verdict. */ +#define XT_ERROR_TARGET "ERROR" + +#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) +#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) + +struct xt_counters { + __u64 pcnt, bcnt; /* Packet and byte counters */ +}; + +/* The argument to IPT_SO_ADD_COUNTERS. */ +struct xt_counters_info { + /* Which table. */ + char name[XT_TABLE_MAXNAMELEN]; + + unsigned int num_counters; + + /* The counters (actually `number' of these). */ + struct xt_counters counters[0]; +}; + +#define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ + +#ifndef __KERNEL__ +/* fn returns 0 to continue iteration */ +#define XT_MATCH_ITERATE(type, e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct xt_entry_match *__m; \ + \ + for (__i = sizeof(type); \ + __i < (e)->target_offset; \ + __i += __m->u.match_size) { \ + __m = (void *)e + __i; \ + \ + __ret = fn(__m , ## args); \ + if (__ret != 0) \ + break; \ + } \ + __ret; \ +}) + +/* fn returns 0 to continue iteration */ +#define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ +({ \ + unsigned int __i, __n; \ + int __ret = 0; \ + type *__entry; \ + \ + for (__i = 0, __n = 0; __i < (size); \ + __i += __entry->next_offset, __n++) { \ + __entry = (void *)(entries) + __i; \ + if (__n < n) \ + continue; \ + \ + __ret = fn(__entry , ## args); \ + if (__ret != 0) \ + break; \ + } \ + __ret; \ +}) + +/* fn returns 0 to continue iteration */ +#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ + XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) + +#endif /* !__KERNEL__ */ + +/* pos is normally a struct ipt_entry/ip6t_entry/etc. */ +#define xt_entry_foreach(pos, ehead, esize) \ + for ((pos) = (typeof(pos))(ehead); \ + (pos) < (typeof(pos))((char *)(ehead) + (esize)); \ + (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset)) + +/* can only be xt_entry_match, so no use of typeof here */ +#define xt_ematch_foreach(pos, entry) \ + for ((pos) = (struct xt_entry_match *)entry->elems; \ + (pos) < (struct xt_entry_match *)((char *)(entry) + \ + (entry)->target_offset); \ + (pos) = (struct xt_entry_match *)((char *)(pos) + \ + (pos)->u.match_size)) + +#ifdef __KERNEL__ + +#include <linux/netdevice.h> + +/** + * struct xt_action_param - parameters for matches/targets + * + * @match: the match extension + * @target: the target extension + * @matchinfo: per-match data + * @targetinfo: per-target data + * @in: input netdevice + * @out: output netdevice + * @fragoff: packet is a fragment, this is the data offset + * @thoff: position of transport header relative to skb->data + * @hook: hook number given packet came from + * @family: Actual NFPROTO_* through which the function is invoked + * (helpful when match->family == NFPROTO_UNSPEC) + * + * Fields written to by extensions: + * + * @hotdrop: drop packet if we had inspection problems + * Network namespace obtainable using dev_net(in/out) + */ +struct xt_action_param { + union { + const struct xt_match *match; + const struct xt_target *target; + }; + union { + const void *matchinfo, *targinfo; + }; + const struct net_device *in, *out; + int fragoff; + unsigned int thoff; + unsigned int hooknum; + u_int8_t family; + bool hotdrop; +}; + +/** + * struct xt_mtchk_param - parameters for match extensions' + * checkentry functions + * + * @net: network namespace through which the check was invoked + * @table: table the rule is tried to be inserted into + * @entryinfo: the family-specific rule data + * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry) + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @hook_mask: via which hooks the new rule is reachable + * Other fields as above. + */ +struct xt_mtchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/** + * struct xt_mdtor_param - match destructor parameters + * Fields as above. + */ +struct xt_mtdtor_param { + struct net *net; + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +/** + * struct xt_tgchk_param - parameters for target extensions' + * checkentry functions + * + * @entryinfo: the family-specific rule data + * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) + * + * Other fields see above. + */ +struct xt_tgchk_param { + struct net *net; + const char *table; + const void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Target destructor parameters */ +struct xt_tgdtor_param { + struct net *net; + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + +struct xt_match { + struct list_head list; + + const char name[XT_EXTENSION_MAXNAMELEN]; + u_int8_t revision; + + /* Return true or false: return FALSE and set *hotdrop = 1 to + force immediate packet drop. */ + /* Arguments changed since 2.6.9, as this must now handle + non-linear skb, using skb_header_pointer and + skb_ip_make_writable. */ + bool (*match)(const struct sk_buff *skb, + struct xt_action_param *); + + /* Called when user tries to insert an entry of this type. */ + int (*checkentry)(const struct xt_mtchk_param *); + + /* Called when entry of this type deleted. */ + void (*destroy)(const struct xt_mtdtor_param *); +#ifdef CONFIG_COMPAT + /* Called when userspace align differs from kernel space one */ + void (*compat_from_user)(void *dst, const void *src); + int (*compat_to_user)(void __user *dst, const void *src); +#endif + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + const char *table; + unsigned int matchsize; +#ifdef CONFIG_COMPAT + unsigned int compatsize; +#endif + unsigned int hooks; + unsigned short proto; + + unsigned short family; +}; + +/* Registration hooks for targets. */ +struct xt_target { + struct list_head list; + + const char name[XT_EXTENSION_MAXNAMELEN]; + u_int8_t revision; + + /* Returns verdict. Argument order changed since 2.6.9, as this + must now handle non-linear skbs, using skb_copy_bits and + skb_ip_make_writable. */ + unsigned int (*target)(struct sk_buff *skb, + const struct xt_action_param *); + + /* Called when user tries to insert an entry of this type: + hook_mask is a bitmask of hooks from which it can be + called. */ + /* Should return 0 on success or an error code otherwise (-Exxxx). */ + int (*checkentry)(const struct xt_tgchk_param *); + + /* Called when entry of this type deleted. */ + void (*destroy)(const struct xt_tgdtor_param *); +#ifdef CONFIG_COMPAT + /* Called when userspace align differs from kernel space one */ + void (*compat_from_user)(void *dst, const void *src); + int (*compat_to_user)(void __user *dst, const void *src); +#endif + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + const char *table; + unsigned int targetsize; +#ifdef CONFIG_COMPAT + unsigned int compatsize; +#endif + unsigned int hooks; + unsigned short proto; + + unsigned short family; +}; + +/* Furniture shopping... */ +struct xt_table { + struct list_head list; + + /* What hooks you will enter on */ + unsigned int valid_hooks; + + /* Man behind the curtain... */ + struct xt_table_info *private; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; + + u_int8_t af; /* address/protocol family */ + int priority; /* hook order */ + + /* A unique name... */ + const char name[XT_TABLE_MAXNAMELEN]; +}; + +#include <linux/netfilter_ipv4.h> + +/* The table itself */ +struct xt_table_info { + /* Size per table */ + unsigned int size; + /* Number of entries: FIXME. --RR */ + unsigned int number; + /* Initial number of entries. Needed for module usage count */ + unsigned int initial_entries; + + /* Entry points and underflows */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* + * Number of user chains. Since tables cannot have loops, at most + * @stacksize jumps (number of user chains) can possibly be made. + */ + unsigned int stacksize; + unsigned int __percpu *stackptr; + void ***jumpstack; + /* ipt_entry tables: one per CPU */ + /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ + void *entries[1]; +}; + +#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ + + nr_cpu_ids * sizeof(char *)) +extern int xt_register_target(struct xt_target *target); +extern void xt_unregister_target(struct xt_target *target); +extern int xt_register_targets(struct xt_target *target, unsigned int n); +extern void xt_unregister_targets(struct xt_target *target, unsigned int n); + +extern int xt_register_match(struct xt_match *target); +extern void xt_unregister_match(struct xt_match *target); +extern int xt_register_matches(struct xt_match *match, unsigned int n); +extern void xt_unregister_matches(struct xt_match *match, unsigned int n); + +extern int xt_check_match(struct xt_mtchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); +extern int xt_check_target(struct xt_tgchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); + +extern struct xt_table *xt_register_table(struct net *net, + const struct xt_table *table, + struct xt_table_info *bootstrap, + struct xt_table_info *newinfo); +extern void *xt_unregister_table(struct xt_table *table); + +extern struct xt_table_info *xt_replace_table(struct xt_table *table, + unsigned int num_counters, + struct xt_table_info *newinfo, + int *error); + +extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); +extern struct xt_match *xt_request_find_match(u8 af, const char *name, + u8 revision); +extern struct xt_target *xt_request_find_target(u8 af, const char *name, + u8 revision); +extern int xt_find_revision(u8 af, const char *name, u8 revision, + int target, int *err); + +extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, + const char *name); +extern void xt_table_unlock(struct xt_table *t); + +extern int xt_proto_init(struct net *net, u_int8_t af); +extern void xt_proto_fini(struct net *net, u_int8_t af); + +extern struct xt_table_info *xt_alloc_table_info(unsigned int size); +extern void xt_free_table_info(struct xt_table_info *info); + +/** + * xt_recseq - recursive seqcount for netfilter use + * + * Packet processing changes the seqcount only if no recursion happened + * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), + * because we use the normal seqcount convention : + * Low order bit set to 1 if a writer is active. + */ +DECLARE_PER_CPU(seqcount_t, xt_recseq); + +/** + * xt_write_recseq_begin - start of a write section + * + * Begin packet processing : all readers must wait the end + * 1) Must be called with preemption disabled + * 2) softirqs must be disabled too (or we should use this_cpu_add()) + * Returns : + * 1 if no recursion on this cpu + * 0 if recursion detected + */ +static inline unsigned int xt_write_recseq_begin(void) +{ + unsigned int addend; + + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). + */ + addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; + + /* + * This is kind of a write_seqcount_begin(), but addend is 0 or 1 + * We dont check addend value to avoid a test and conditional jump, + * since addend is most likely 1 + */ + __this_cpu_add(xt_recseq.sequence, addend); + smp_wmb(); + + return addend; +} + +/** + * xt_write_recseq_end - end of a write section + * @addend: return value from previous xt_write_recseq_begin() + * + * End packet processing : all readers can proceed + * 1) Must be called with preemption disabled + * 2) softirqs must be disabled too (or we should use this_cpu_add()) + */ +static inline void xt_write_recseq_end(unsigned int addend) +{ + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); +} + +/* + * This helper is performance critical and must be inlined + */ +static inline unsigned long ifname_compare_aligned(const char *_a, + const char *_b, + const char *_mask) +{ + const unsigned long *a = (const unsigned long *)_a; + const unsigned long *b = (const unsigned long *)_b; + const unsigned long *mask = (const unsigned long *)_mask; + unsigned long ret; + + ret = (a[0] ^ b[0]) & mask[0]; + if (IFNAMSIZ > sizeof(unsigned long)) + ret |= (a[1] ^ b[1]) & mask[1]; + if (IFNAMSIZ > 2 * sizeof(unsigned long)) + ret |= (a[2] ^ b[2]) & mask[2]; + if (IFNAMSIZ > 3 * sizeof(unsigned long)) + ret |= (a[3] ^ b[3]) & mask[3]; + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); + return ret; +} + +extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); +extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); + +#ifdef CONFIG_COMPAT +#include <net/compat.h> + +struct compat_xt_entry_match { + union { + struct { + u_int16_t match_size; + char name[XT_FUNCTION_MAXNAMELEN - 1]; + u_int8_t revision; + } user; + struct { + u_int16_t match_size; + compat_uptr_t match; + } kernel; + u_int16_t match_size; + } u; + unsigned char data[0]; +}; + +struct compat_xt_entry_target { + union { + struct { + u_int16_t target_size; + char name[XT_FUNCTION_MAXNAMELEN - 1]; + u_int8_t revision; + } user; + struct { + u_int16_t target_size; + compat_uptr_t target; + } kernel; + u_int16_t target_size; + } u; + unsigned char data[0]; +}; + +/* FIXME: this works only on 32 bit tasks + * need to change whole approach in order to calculate align as function of + * current task alignment */ + +struct compat_xt_counters { + compat_u64 pcnt, bcnt; /* Packet and byte counters */ +}; + +struct compat_xt_counters_info { + char name[XT_TABLE_MAXNAMELEN]; + compat_uint_t num_counters; + struct compat_xt_counters counters[0]; +}; + +struct _compat_xt_align { + __u8 u8; + __u16 u16; + __u32 u32; + compat_u64 u64; +}; + +#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align)) + +extern void xt_compat_lock(u_int8_t af); +extern void xt_compat_unlock(u_int8_t af); + +extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); +extern void xt_compat_flush_offsets(u_int8_t af); +extern void xt_compat_init_offsets(u_int8_t af, unsigned int number); +extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset); + +extern int xt_compat_match_offset(const struct xt_match *match); +extern int xt_compat_match_from_user(struct xt_entry_match *m, + void **dstptr, unsigned int *size); +extern int xt_compat_match_to_user(const struct xt_entry_match *m, + void __user **dstptr, unsigned int *size); + +extern int xt_compat_target_offset(const struct xt_target *target); +extern void xt_compat_target_from_user(struct xt_entry_target *t, + void **dstptr, unsigned int *size); +extern int xt_compat_target_to_user(const struct xt_entry_target *t, + void __user **dstptr, unsigned int *size); + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ + +#endif /* _X_TABLES_H */ diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h new file mode 100644 index 00000000..38751d2e --- /dev/null +++ b/include/linux/netfilter/xt_AUDIT.h @@ -0,0 +1,30 @@ +/* + * Header file for iptables xt_AUDIT target + * + * (C) 2010-2011 Thomas Graf <tgraf@redhat.com> + * (C) 2010-2011 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _XT_AUDIT_TARGET_H +#define _XT_AUDIT_TARGET_H + +#include <linux/types.h> + +enum { + XT_AUDIT_TYPE_ACCEPT = 0, + XT_AUDIT_TYPE_DROP, + XT_AUDIT_TYPE_REJECT, + __XT_AUDIT_TYPE_MAX, +}; + +#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1) + +struct xt_audit_info { + __u8 type; /* XT_AUDIT_TYPE_* */ +}; + +#endif /* _XT_AUDIT_TARGET_H */ diff --git a/include/linux/netfilter/xt_CHECKSUM.h b/include/linux/netfilter/xt_CHECKSUM.h new file mode 100644 index 00000000..9a2e4661 --- /dev/null +++ b/include/linux/netfilter/xt_CHECKSUM.h @@ -0,0 +1,20 @@ +/* Header file for iptables ipt_CHECKSUM target + * + * (C) 2002 by Harald Welte <laforge@gnumonks.org> + * (C) 2010 Red Hat Inc + * Author: Michael S. Tsirkin <mst@redhat.com> + * + * This software is distributed under GNU GPL v2, 1991 +*/ +#ifndef _XT_CHECKSUM_TARGET_H +#define _XT_CHECKSUM_TARGET_H + +#include <linux/types.h> + +#define XT_CHECKSUM_OP_FILL 0x01 /* fill in checksum in IP header */ + +struct xt_CHECKSUM_info { + __u8 operation; /* bitset of operations */ +}; + +#endif /* _XT_CHECKSUM_TARGET_H */ diff --git a/include/linux/netfilter/xt_CLASSIFY.h b/include/linux/netfilter/xt_CLASSIFY.h new file mode 100644 index 00000000..a813bf14 --- /dev/null +++ b/include/linux/netfilter/xt_CLASSIFY.h @@ -0,0 +1,10 @@ +#ifndef _XT_CLASSIFY_H +#define _XT_CLASSIFY_H + +#include <linux/types.h> + +struct xt_classify_target_info { + __u32 priority; +}; + +#endif /*_XT_CLASSIFY_H */ diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h new file mode 100644 index 00000000..2f2e48ec --- /dev/null +++ b/include/linux/netfilter/xt_CONNMARK.h @@ -0,0 +1,6 @@ +#ifndef _XT_CONNMARK_H_target +#define _XT_CONNMARK_H_target + +#include <linux/netfilter/xt_connmark.h> + +#endif /*_XT_CONNMARK_H_target*/ diff --git a/include/linux/netfilter/xt_CONNSECMARK.h b/include/linux/netfilter/xt_CONNSECMARK.h new file mode 100644 index 00000000..b973ff80 --- /dev/null +++ b/include/linux/netfilter/xt_CONNSECMARK.h @@ -0,0 +1,15 @@ +#ifndef _XT_CONNSECMARK_H_target +#define _XT_CONNSECMARK_H_target + +#include <linux/types.h> + +enum { + CONNSECMARK_SAVE = 1, + CONNSECMARK_RESTORE, +}; + +struct xt_connsecmark_target_info { + __u8 mode; +}; + +#endif /*_XT_CONNSECMARK_H_target */ diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h new file mode 100644 index 00000000..a064b8af --- /dev/null +++ b/include/linux/netfilter/xt_CT.h @@ -0,0 +1,31 @@ +#ifndef _XT_CT_H +#define _XT_CT_H + +#include <linux/types.h> + +#define XT_CT_NOTRACK 0x1 + +struct xt_ct_target_info { + __u16 flags; + __u16 zone; + __u32 ct_events; + __u32 exp_events; + char helper[16]; + + /* Used internally by the kernel */ + struct nf_conn *ct __attribute__((aligned(8))); +}; + +struct xt_ct_target_info_v1 { + __u16 flags; + __u16 zone; + __u32 ct_events; + __u32 exp_events; + char helper[16]; + char timeout[32]; + + /* Used internally by the kernel */ + struct nf_conn *ct __attribute__((aligned(8))); +}; + +#endif /* _XT_CT_H */ diff --git a/include/linux/netfilter/xt_DSCP.h b/include/linux/netfilter/xt_DSCP.h new file mode 100644 index 00000000..648e0b3b --- /dev/null +++ b/include/linux/netfilter/xt_DSCP.h @@ -0,0 +1,26 @@ +/* x_tables module for setting the IPv4/IPv6 DSCP field + * + * (C) 2002 Harald Welte <laforge@gnumonks.org> + * based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh <mgm@paktronix.com> + * This software is distributed under GNU GPL v2, 1991 + * + * See RFC2474 for a description of the DSCP field within the IP Header. + * + * xt_DSCP.h,v 1.7 2002/03/14 12:03:13 laforge Exp +*/ +#ifndef _XT_DSCP_TARGET_H +#define _XT_DSCP_TARGET_H +#include <linux/netfilter/xt_dscp.h> +#include <linux/types.h> + +/* target info */ +struct xt_DSCP_info { + __u8 dscp; +}; + +struct xt_tos_target_info { + __u8 tos_value; + __u8 tos_mask; +}; + +#endif /* _XT_DSCP_TARGET_H */ diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h new file mode 100644 index 00000000..faaa28b3 --- /dev/null +++ b/include/linux/netfilter/xt_IDLETIMER.h @@ -0,0 +1,53 @@ +/* + * linux/include/linux/netfilter/xt_IDLETIMER.h + * + * Header file for Xtables timer target module. + * + * Copyright (C) 2004, 2010 Nokia Corporation + * + * Written by Timo Teras <ext-timo.teras@nokia.com> + * + * Converted to x_tables and forward-ported to 2.6.34 + * by Luciano Coelho <luciano.coelho@nokia.com> + * + * Contact: Luciano Coelho <luciano.coelho@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _XT_IDLETIMER_H +#define _XT_IDLETIMER_H + +#include <linux/types.h> + +#define MAX_IDLETIMER_LABEL_SIZE 28 +#define NLMSG_MAX_SIZE 64 + +#define NL_EVENT_TYPE_INACTIVE 0 +#define NL_EVENT_TYPE_ACTIVE 1 + +struct idletimer_tg_info { + __u32 timeout; + + char label[MAX_IDLETIMER_LABEL_SIZE]; + + /* Use netlink messages for notification in addition to sysfs */ + __u8 send_nl_msg; + + /* for kernel module internal use only */ + struct idletimer_tg *timer __attribute__((aligned(8))); +}; + +#endif diff --git a/include/linux/netfilter/xt_LED.h b/include/linux/netfilter/xt_LED.h new file mode 100644 index 00000000..f5509e75 --- /dev/null +++ b/include/linux/netfilter/xt_LED.h @@ -0,0 +1,15 @@ +#ifndef _XT_LED_H +#define _XT_LED_H + +#include <linux/types.h> + +struct xt_led_info { + char id[27]; /* Unique ID for this trigger in the LED class */ + __u8 always_blink; /* Blink even if the LED is already on */ + __u32 delay; /* Delay until LED is switched off after trigger */ + + /* Kernel data used in the module */ + void *internal_data __attribute__((aligned(8))); +}; + +#endif /* _XT_LED_H */ diff --git a/include/linux/netfilter/xt_LOG.h b/include/linux/netfilter/xt_LOG.h new file mode 100644 index 00000000..cac07909 --- /dev/null +++ b/include/linux/netfilter/xt_LOG.h @@ -0,0 +1,19 @@ +#ifndef _XT_LOG_H +#define _XT_LOG_H + +/* make sure not to change this without changing nf_log.h:NF_LOG_* (!) */ +#define XT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ +#define XT_LOG_TCPOPT 0x02 /* Log TCP options */ +#define XT_LOG_IPOPT 0x04 /* Log IP options */ +#define XT_LOG_UID 0x08 /* Log UID owning local socket */ +#define XT_LOG_NFLOG 0x10 /* Unsupported, don't reuse */ +#define XT_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define XT_LOG_MASK 0x2f + +struct xt_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +#endif /* _XT_LOG_H */ diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h new file mode 100644 index 00000000..41c456de --- /dev/null +++ b/include/linux/netfilter/xt_MARK.h @@ -0,0 +1,6 @@ +#ifndef _XT_MARK_H_target +#define _XT_MARK_H_target + +#include <linux/netfilter/xt_mark.h> + +#endif /*_XT_MARK_H_target */ diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h new file mode 100644 index 00000000..87b58311 --- /dev/null +++ b/include/linux/netfilter/xt_NFLOG.h @@ -0,0 +1,20 @@ +#ifndef _XT_NFLOG_TARGET +#define _XT_NFLOG_TARGET + +#include <linux/types.h> + +#define XT_NFLOG_DEFAULT_GROUP 0x1 +#define XT_NFLOG_DEFAULT_THRESHOLD 0 + +#define XT_NFLOG_MASK 0x0 + +struct xt_nflog_info { + __u32 len; + __u16 group; + __u16 threshold; + __u16 flags; + __u16 pad; + char prefix[64]; +}; + +#endif /* _XT_NFLOG_TARGET */ diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h new file mode 100644 index 00000000..9eafdbbb --- /dev/null +++ b/include/linux/netfilter/xt_NFQUEUE.h @@ -0,0 +1,29 @@ +/* iptables module for using NFQUEUE mechanism + * + * (C) 2005 Harald Welte <laforge@netfilter.org> + * + * This software is distributed under GNU GPL v2, 1991 + * +*/ +#ifndef _XT_NFQ_TARGET_H +#define _XT_NFQ_TARGET_H + +#include <linux/types.h> + +/* target info */ +struct xt_NFQ_info { + __u16 queuenum; +}; + +struct xt_NFQ_info_v1 { + __u16 queuenum; + __u16 queues_total; +}; + +struct xt_NFQ_info_v2 { + __u16 queuenum; + __u16 queues_total; + __u16 bypass; +}; + +#endif /* _XT_NFQ_TARGET_H */ diff --git a/include/linux/netfilter/xt_RATEEST.h b/include/linux/netfilter/xt_RATEEST.h new file mode 100644 index 00000000..6605e20a --- /dev/null +++ b/include/linux/netfilter/xt_RATEEST.h @@ -0,0 +1,15 @@ +#ifndef _XT_RATEEST_TARGET_H +#define _XT_RATEEST_TARGET_H + +#include <linux/types.h> + +struct xt_rateest_target_info { + char name[IFNAMSIZ]; + __s8 interval; + __u8 ewma_log; + + /* Used internally by the kernel */ + struct xt_rateest *est __attribute__((aligned(8))); +}; + +#endif /* _XT_RATEEST_TARGET_H */ diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h new file mode 100644 index 00000000..989092bd --- /dev/null +++ b/include/linux/netfilter/xt_SECMARK.h @@ -0,0 +1,22 @@ +#ifndef _XT_SECMARK_H_target +#define _XT_SECMARK_H_target + +#include <linux/types.h> + +/* + * This is intended for use by various security subsystems (but not + * at the same time). + * + * 'mode' refers to the specific security subsystem which the + * packets are being marked for. + */ +#define SECMARK_MODE_SEL 0x01 /* SELinux */ +#define SECMARK_SECCTX_MAX 256 + +struct xt_secmark_target_info { + __u8 mode; + __u32 secid; + char secctx[SECMARK_SECCTX_MAX]; +}; + +#endif /*_XT_SECMARK_H_target */ diff --git a/include/linux/netfilter/xt_TCPMSS.h b/include/linux/netfilter/xt_TCPMSS.h new file mode 100644 index 00000000..9a6960af --- /dev/null +++ b/include/linux/netfilter/xt_TCPMSS.h @@ -0,0 +1,12 @@ +#ifndef _XT_TCPMSS_H +#define _XT_TCPMSS_H + +#include <linux/types.h> + +struct xt_tcpmss_info { + __u16 mss; +}; + +#define XT_TCPMSS_CLAMP_PMTU 0xffff + +#endif /* _XT_TCPMSS_H */ diff --git a/include/linux/netfilter/xt_TCPOPTSTRIP.h b/include/linux/netfilter/xt_TCPOPTSTRIP.h new file mode 100644 index 00000000..71573184 --- /dev/null +++ b/include/linux/netfilter/xt_TCPOPTSTRIP.h @@ -0,0 +1,15 @@ +#ifndef _XT_TCPOPTSTRIP_H +#define _XT_TCPOPTSTRIP_H + +#include <linux/types.h> + +#define tcpoptstrip_set_bit(bmap, idx) \ + (bmap[(idx) >> 5] |= 1U << (idx & 31)) +#define tcpoptstrip_test_bit(bmap, idx) \ + (((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0) + +struct xt_tcpoptstrip_target_info { + __u32 strip_bmap[8]; +}; + +#endif /* _XT_TCPOPTSTRIP_H */ diff --git a/include/linux/netfilter/xt_TEE.h b/include/linux/netfilter/xt_TEE.h new file mode 100644 index 00000000..5c21d5c8 --- /dev/null +++ b/include/linux/netfilter/xt_TEE.h @@ -0,0 +1,12 @@ +#ifndef _XT_TEE_TARGET_H +#define _XT_TEE_TARGET_H + +struct xt_tee_tginfo { + union nf_inet_addr gw; + char oif[16]; + + /* used internally by the kernel */ + struct xt_tee_priv *priv __attribute__((aligned(8))); +}; + +#endif /* _XT_TEE_TARGET_H */ diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h new file mode 100644 index 00000000..902043c2 --- /dev/null +++ b/include/linux/netfilter/xt_TPROXY.h @@ -0,0 +1,23 @@ +#ifndef _XT_TPROXY_H +#define _XT_TPROXY_H + +#include <linux/types.h> + +/* TPROXY target is capable of marking the packet to perform + * redirection. We can get rid of that whenever we get support for + * mutliple targets in the same rule. */ +struct xt_tproxy_target_info { + __u32 mark_mask; + __u32 mark_value; + __be32 laddr; + __be16 lport; +}; + +struct xt_tproxy_target_info_v1 { + __u32 mark_mask; + __u32 mark_value; + union nf_inet_addr laddr; + __be16 lport; +}; + +#endif /* _XT_TPROXY_H */ diff --git a/include/linux/netfilter/xt_addrtype.h b/include/linux/netfilter/xt_addrtype.h new file mode 100644 index 00000000..b156baa9 --- /dev/null +++ b/include/linux/netfilter/xt_addrtype.h @@ -0,0 +1,44 @@ +#ifndef _XT_ADDRTYPE_H +#define _XT_ADDRTYPE_H + +#include <linux/types.h> + +enum { + XT_ADDRTYPE_INVERT_SOURCE = 0x0001, + XT_ADDRTYPE_INVERT_DEST = 0x0002, + XT_ADDRTYPE_LIMIT_IFACE_IN = 0x0004, + XT_ADDRTYPE_LIMIT_IFACE_OUT = 0x0008, +}; + + +/* rtn_type enum values from rtnetlink.h, but shifted */ +enum { + XT_ADDRTYPE_UNSPEC = 1 << 0, + XT_ADDRTYPE_UNICAST = 1 << 1, /* 1 << RTN_UNICAST */ + XT_ADDRTYPE_LOCAL = 1 << 2, /* 1 << RTN_LOCAL, etc */ + XT_ADDRTYPE_BROADCAST = 1 << 3, + XT_ADDRTYPE_ANYCAST = 1 << 4, + XT_ADDRTYPE_MULTICAST = 1 << 5, + XT_ADDRTYPE_BLACKHOLE = 1 << 6, + XT_ADDRTYPE_UNREACHABLE = 1 << 7, + XT_ADDRTYPE_PROHIBIT = 1 << 8, + XT_ADDRTYPE_THROW = 1 << 9, + XT_ADDRTYPE_NAT = 1 << 10, + XT_ADDRTYPE_XRESOLVE = 1 << 11, +}; + +struct xt_addrtype_info_v1 { + __u16 source; /* source-type mask */ + __u16 dest; /* dest-type mask */ + __u32 flags; +}; + +/* revision 0 */ +struct xt_addrtype_info { + __u16 source; /* source-type mask */ + __u16 dest; /* dest-type mask */ + __u32 invert_source; + __u32 invert_dest; +}; + +#endif diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h new file mode 100644 index 00000000..9b883c8f --- /dev/null +++ b/include/linux/netfilter/xt_cluster.h @@ -0,0 +1,19 @@ +#ifndef _XT_CLUSTER_MATCH_H +#define _XT_CLUSTER_MATCH_H + +#include <linux/types.h> + +enum xt_cluster_flags { + XT_CLUSTER_F_INV = (1 << 0) +}; + +struct xt_cluster_match_info { + __u32 total_nodes; + __u32 node_mask; + __u32 hash_seed; + __u32 flags; +}; + +#define XT_CLUSTER_NODES_MAX 32 + +#endif /* _XT_CLUSTER_MATCH_H */ diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h new file mode 100644 index 00000000..0ea5e79f --- /dev/null +++ b/include/linux/netfilter/xt_comment.h @@ -0,0 +1,10 @@ +#ifndef _XT_COMMENT_H +#define _XT_COMMENT_H + +#define XT_MAX_COMMENT_LEN 256 + +struct xt_comment_info { + char comment[XT_MAX_COMMENT_LEN]; +}; + +#endif /* XT_COMMENT_H */ diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h new file mode 100644 index 00000000..f1d6c15b --- /dev/null +++ b/include/linux/netfilter/xt_connbytes.h @@ -0,0 +1,26 @@ +#ifndef _XT_CONNBYTES_H +#define _XT_CONNBYTES_H + +#include <linux/types.h> + +enum xt_connbytes_what { + XT_CONNBYTES_PKTS, + XT_CONNBYTES_BYTES, + XT_CONNBYTES_AVGPKT, +}; + +enum xt_connbytes_direction { + XT_CONNBYTES_DIR_ORIGINAL, + XT_CONNBYTES_DIR_REPLY, + XT_CONNBYTES_DIR_BOTH, +}; + +struct xt_connbytes_info { + struct { + __aligned_u64 from; /* count to be matched */ + __aligned_u64 to; /* count to be matched */ + } count; + __u8 what; /* ipt_connbytes_what */ + __u8 direction; /* ipt_connbytes_direction */ +}; +#endif diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h new file mode 100644 index 00000000..d1366f05 --- /dev/null +++ b/include/linux/netfilter/xt_connlimit.h @@ -0,0 +1,37 @@ +#ifndef _XT_CONNLIMIT_H +#define _XT_CONNLIMIT_H + +#include <linux/types.h> +#include <linux/netfilter.h> + +struct xt_connlimit_data; + +enum { + XT_CONNLIMIT_INVERT = 1 << 0, + XT_CONNLIMIT_DADDR = 1 << 1, +}; + +struct xt_connlimit_info { + union { + union nf_inet_addr mask; +#ifndef __KERNEL__ + union { + __be32 v4_mask; + __be32 v6_mask[4]; + }; +#endif + }; + unsigned int limit; + union { + /* revision 0 */ + unsigned int inverse; + + /* revision 1 */ + __u32 flags; + }; + + /* Used internally by the kernel */ + struct xt_connlimit_data *data __attribute__((aligned(8))); +}; + +#endif /* _XT_CONNLIMIT_H */ diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h new file mode 100644 index 00000000..efc17a83 --- /dev/null +++ b/include/linux/netfilter/xt_connmark.h @@ -0,0 +1,31 @@ +#ifndef _XT_CONNMARK_H +#define _XT_CONNMARK_H + +#include <linux/types.h> + +/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> + * by Henrik Nordstrom <hno@marasystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +enum { + XT_CONNMARK_SET = 0, + XT_CONNMARK_SAVE, + XT_CONNMARK_RESTORE +}; + +struct xt_connmark_tginfo1 { + __u32 ctmark, ctmask, nfmask; + __u8 mode; +}; + +struct xt_connmark_mtinfo1 { + __u32 mark, mask; + __u8 invert; +}; + +#endif /*_XT_CONNMARK_H*/ diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h new file mode 100644 index 00000000..e3c041d5 --- /dev/null +++ b/include/linux/netfilter/xt_conntrack.h @@ -0,0 +1,77 @@ +/* Header file for kernel module to match connection tracking information. + * GPL (C) 2001 Marc Boucher (marc@mbsi.ca). + */ + +#ifndef _XT_CONNTRACK_H +#define _XT_CONNTRACK_H + +#include <linux/types.h> +#include <linux/netfilter.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> + +#define XT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1)) +#define XT_CONNTRACK_STATE_INVALID (1 << 0) + +#define XT_CONNTRACK_STATE_SNAT (1 << (IP_CT_NUMBER + 1)) +#define XT_CONNTRACK_STATE_DNAT (1 << (IP_CT_NUMBER + 2)) +#define XT_CONNTRACK_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 3)) + +/* flags, invflags: */ +enum { + XT_CONNTRACK_STATE = 1 << 0, + XT_CONNTRACK_PROTO = 1 << 1, + XT_CONNTRACK_ORIGSRC = 1 << 2, + XT_CONNTRACK_ORIGDST = 1 << 3, + XT_CONNTRACK_REPLSRC = 1 << 4, + XT_CONNTRACK_REPLDST = 1 << 5, + XT_CONNTRACK_STATUS = 1 << 6, + XT_CONNTRACK_EXPIRES = 1 << 7, + XT_CONNTRACK_ORIGSRC_PORT = 1 << 8, + XT_CONNTRACK_ORIGDST_PORT = 1 << 9, + XT_CONNTRACK_REPLSRC_PORT = 1 << 10, + XT_CONNTRACK_REPLDST_PORT = 1 << 11, + XT_CONNTRACK_DIRECTION = 1 << 12, +}; + +struct xt_conntrack_mtinfo1 { + union nf_inet_addr origsrc_addr, origsrc_mask; + union nf_inet_addr origdst_addr, origdst_mask; + union nf_inet_addr replsrc_addr, replsrc_mask; + union nf_inet_addr repldst_addr, repldst_mask; + __u32 expires_min, expires_max; + __u16 l4proto; + __be16 origsrc_port, origdst_port; + __be16 replsrc_port, repldst_port; + __u16 match_flags, invert_flags; + __u8 state_mask, status_mask; +}; + +struct xt_conntrack_mtinfo2 { + union nf_inet_addr origsrc_addr, origsrc_mask; + union nf_inet_addr origdst_addr, origdst_mask; + union nf_inet_addr replsrc_addr, replsrc_mask; + union nf_inet_addr repldst_addr, repldst_mask; + __u32 expires_min, expires_max; + __u16 l4proto; + __be16 origsrc_port, origdst_port; + __be16 replsrc_port, repldst_port; + __u16 match_flags, invert_flags; + __u16 state_mask, status_mask; +}; + +struct xt_conntrack_mtinfo3 { + union nf_inet_addr origsrc_addr, origsrc_mask; + union nf_inet_addr origdst_addr, origdst_mask; + union nf_inet_addr replsrc_addr, replsrc_mask; + union nf_inet_addr repldst_addr, repldst_mask; + __u32 expires_min, expires_max; + __u16 l4proto; + __u16 origsrc_port, origdst_port; + __u16 replsrc_port, repldst_port; + __u16 match_flags, invert_flags; + __u16 state_mask, status_mask; + __u16 origsrc_port_high, origdst_port_high; + __u16 replsrc_port_high, repldst_port_high; +}; + +#endif /*_XT_CONNTRACK_H*/ diff --git a/include/linux/netfilter/xt_cpu.h b/include/linux/netfilter/xt_cpu.h new file mode 100644 index 00000000..93c7f11d --- /dev/null +++ b/include/linux/netfilter/xt_cpu.h @@ -0,0 +1,11 @@ +#ifndef _XT_CPU_H +#define _XT_CPU_H + +#include <linux/types.h> + +struct xt_cpu_info { + __u32 cpu; + __u32 invert; +}; + +#endif /*_XT_CPU_H*/ diff --git a/include/linux/netfilter/xt_dccp.h b/include/linux/netfilter/xt_dccp.h new file mode 100644 index 00000000..a579e1b6 --- /dev/null +++ b/include/linux/netfilter/xt_dccp.h @@ -0,0 +1,25 @@ +#ifndef _XT_DCCP_H_ +#define _XT_DCCP_H_ + +#include <linux/types.h> + +#define XT_DCCP_SRC_PORTS 0x01 +#define XT_DCCP_DEST_PORTS 0x02 +#define XT_DCCP_TYPE 0x04 +#define XT_DCCP_OPTION 0x08 + +#define XT_DCCP_VALID_FLAGS 0x0f + +struct xt_dccp_info { + __u16 dpts[2]; /* Min, Max */ + __u16 spts[2]; /* Min, Max */ + + __u16 flags; + __u16 invflags; + + __u16 typemask; + __u8 option; +}; + +#endif /* _XT_DCCP_H_ */ + diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h new file mode 100644 index 00000000..1babde0e --- /dev/null +++ b/include/linux/netfilter/xt_devgroup.h @@ -0,0 +1,21 @@ +#ifndef _XT_DEVGROUP_H +#define _XT_DEVGROUP_H + +#include <linux/types.h> + +enum xt_devgroup_flags { + XT_DEVGROUP_MATCH_SRC = 0x1, + XT_DEVGROUP_INVERT_SRC = 0x2, + XT_DEVGROUP_MATCH_DST = 0x4, + XT_DEVGROUP_INVERT_DST = 0x8, +}; + +struct xt_devgroup_info { + __u32 flags; + __u32 src_group; + __u32 src_mask; + __u32 dst_group; + __u32 dst_mask; +}; + +#endif /* _XT_DEVGROUP_H */ diff --git a/include/linux/netfilter/xt_dscp.h b/include/linux/netfilter/xt_dscp.h new file mode 100644 index 00000000..15f8932a --- /dev/null +++ b/include/linux/netfilter/xt_dscp.h @@ -0,0 +1,31 @@ +/* x_tables module for matching the IPv4/IPv6 DSCP field + * + * (C) 2002 Harald Welte <laforge@gnumonks.org> + * This software is distributed under GNU GPL v2, 1991 + * + * See RFC2474 for a description of the DSCP field within the IP Header. + * + * xt_dscp.h,v 1.3 2002/08/05 19:00:21 laforge Exp +*/ +#ifndef _XT_DSCP_H +#define _XT_DSCP_H + +#include <linux/types.h> + +#define XT_DSCP_MASK 0xfc /* 11111100 */ +#define XT_DSCP_SHIFT 2 +#define XT_DSCP_MAX 0x3f /* 00111111 */ + +/* match info */ +struct xt_dscp_info { + __u8 dscp; + __u8 invert; +}; + +struct xt_tos_match_info { + __u8 tos_mask; + __u8 tos_value; + __u8 invert; +}; + +#endif /* _XT_DSCP_H */ diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h new file mode 100644 index 00000000..7158fca3 --- /dev/null +++ b/include/linux/netfilter/xt_ecn.h @@ -0,0 +1,35 @@ +/* iptables module for matching the ECN header in IPv4 and TCP header + * + * (C) 2002 Harald Welte <laforge@gnumonks.org> + * + * This software is distributed under GNU GPL v2, 1991 + * + * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp +*/ +#ifndef _XT_ECN_H +#define _XT_ECN_H + +#include <linux/types.h> +#include <linux/netfilter/xt_dscp.h> + +#define XT_ECN_IP_MASK (~XT_DSCP_MASK) + +#define XT_ECN_OP_MATCH_IP 0x01 +#define XT_ECN_OP_MATCH_ECE 0x10 +#define XT_ECN_OP_MATCH_CWR 0x20 + +#define XT_ECN_OP_MATCH_MASK 0xce + +/* match info */ +struct xt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; +}; + +#endif /* _XT_ECN_H */ diff --git a/include/linux/netfilter/xt_esp.h b/include/linux/netfilter/xt_esp.h new file mode 100644 index 00000000..ee688240 --- /dev/null +++ b/include/linux/netfilter/xt_esp.h @@ -0,0 +1,15 @@ +#ifndef _XT_ESP_H +#define _XT_ESP_H + +#include <linux/types.h> + +struct xt_esp { + __u32 spis[2]; /* Security Parameter Index */ + __u8 invflags; /* Inverse flags */ +}; + +/* Values for "invflags" field in struct xt_esp. */ +#define XT_ESP_INV_SPI 0x01 /* Invert the sense of spi. */ +#define XT_ESP_INV_MASK 0x01 /* All possible flags. */ + +#endif /*_XT_ESP_H*/ diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h new file mode 100644 index 00000000..b1925b59 --- /dev/null +++ b/include/linux/netfilter/xt_hashlimit.h @@ -0,0 +1,68 @@ +#ifndef _XT_HASHLIMIT_H +#define _XT_HASHLIMIT_H + +#include <linux/types.h> + +/* timings are in milliseconds. */ +#define XT_HASHLIMIT_SCALE 10000 +/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 + seconds, or one every 59 hours. */ + +/* details of this structure hidden by the implementation */ +struct xt_hashlimit_htable; + +enum { + XT_HASHLIMIT_HASH_DIP = 1 << 0, + XT_HASHLIMIT_HASH_DPT = 1 << 1, + XT_HASHLIMIT_HASH_SIP = 1 << 2, + XT_HASHLIMIT_HASH_SPT = 1 << 3, + XT_HASHLIMIT_INVERT = 1 << 4, +}; + +struct hashlimit_cfg { + __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ + + /* user specified */ + __u32 size; /* how many buckets */ + __u32 max; /* max number of entries */ + __u32 gc_interval; /* gc interval */ + __u32 expire; /* when do entries expire? */ +}; + +struct xt_hashlimit_info { + char name [IFNAMSIZ]; /* name */ + struct hashlimit_cfg cfg; + + /* Used internally by the kernel */ + struct xt_hashlimit_htable *hinfo; + union { + void *ptr; + struct xt_hashlimit_info *master; + } u; +}; + +struct hashlimit_cfg1 { + __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ + + /* user specified */ + __u32 size; /* how many buckets */ + __u32 max; /* max number of entries */ + __u32 gc_interval; /* gc interval */ + __u32 expire; /* when do entries expire? */ + + __u8 srcmask, dstmask; +}; + +struct xt_hashlimit_mtinfo1 { + char name[IFNAMSIZ]; + struct hashlimit_cfg1 cfg; + + /* Used internally by the kernel */ + struct xt_hashlimit_htable *hinfo __attribute__((aligned(8))); +}; + +#endif /*_XT_HASHLIMIT_H*/ diff --git a/include/linux/netfilter/xt_helper.h b/include/linux/netfilter/xt_helper.h new file mode 100644 index 00000000..6b42763f --- /dev/null +++ b/include/linux/netfilter/xt_helper.h @@ -0,0 +1,8 @@ +#ifndef _XT_HELPER_H +#define _XT_HELPER_H + +struct xt_helper_info { + int invert; + char name[30]; +}; +#endif /* _XT_HELPER_H */ diff --git a/include/linux/netfilter/xt_iprange.h b/include/linux/netfilter/xt_iprange.h new file mode 100644 index 00000000..25fd7cf8 --- /dev/null +++ b/include/linux/netfilter/xt_iprange.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_NETFILTER_XT_IPRANGE_H +#define _LINUX_NETFILTER_XT_IPRANGE_H 1 + +#include <linux/types.h> +#include <linux/netfilter.h> + +enum { + IPRANGE_SRC = 1 << 0, /* match source IP address */ + IPRANGE_DST = 1 << 1, /* match destination IP address */ + IPRANGE_SRC_INV = 1 << 4, /* negate the condition */ + IPRANGE_DST_INV = 1 << 5, /* -"- */ +}; + +struct xt_iprange_mtinfo { + union nf_inet_addr src_min, src_max; + union nf_inet_addr dst_min, dst_max; + __u8 flags; +}; + +#endif /* _LINUX_NETFILTER_XT_IPRANGE_H */ diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h new file mode 100644 index 00000000..eff34ac1 --- /dev/null +++ b/include/linux/netfilter/xt_ipvs.h @@ -0,0 +1,29 @@ +#ifndef _XT_IPVS_H +#define _XT_IPVS_H + +#include <linux/types.h> + +enum { + XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ + XT_IPVS_PROTO = 1 << 1, + XT_IPVS_VADDR = 1 << 2, + XT_IPVS_VPORT = 1 << 3, + XT_IPVS_DIR = 1 << 4, + XT_IPVS_METHOD = 1 << 5, + XT_IPVS_VPORTCTL = 1 << 6, + XT_IPVS_MASK = (1 << 7) - 1, + XT_IPVS_ONCE_MASK = XT_IPVS_MASK & ~XT_IPVS_IPVS_PROPERTY +}; + +struct xt_ipvs_mtinfo { + union nf_inet_addr vaddr, vmask; + __be16 vport; + __u8 l4proto; + __u8 fwd_method; + __be16 vportctl; + + __u8 invert; + __u8 bitmask; +}; + +#endif /* _XT_IPVS_H */ diff --git a/include/linux/netfilter/xt_length.h b/include/linux/netfilter/xt_length.h new file mode 100644 index 00000000..b82ed7c4 --- /dev/null +++ b/include/linux/netfilter/xt_length.h @@ -0,0 +1,11 @@ +#ifndef _XT_LENGTH_H +#define _XT_LENGTH_H + +#include <linux/types.h> + +struct xt_length_info { + __u16 min, max; + __u8 invert; +}; + +#endif /*_XT_LENGTH_H*/ diff --git a/include/linux/netfilter/xt_limit.h b/include/linux/netfilter/xt_limit.h new file mode 100644 index 00000000..bb47fc4d --- /dev/null +++ b/include/linux/netfilter/xt_limit.h @@ -0,0 +1,24 @@ +#ifndef _XT_RATE_H +#define _XT_RATE_H + +#include <linux/types.h> + +/* timings are in milliseconds. */ +#define XT_LIMIT_SCALE 10000 + +struct xt_limit_priv; + +/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 + seconds, or one every 59 hours. */ +struct xt_rateinfo { + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ + + /* Used internally by the kernel */ + unsigned long prev; /* moved to xt_limit_priv */ + __u32 credit; /* moved to xt_limit_priv */ + __u32 credit_cap, cost; + + struct xt_limit_priv *master; +}; +#endif /*_XT_RATE_H*/ diff --git a/include/linux/netfilter/xt_mac.h b/include/linux/netfilter/xt_mac.h new file mode 100644 index 00000000..b892cdc6 --- /dev/null +++ b/include/linux/netfilter/xt_mac.h @@ -0,0 +1,8 @@ +#ifndef _XT_MAC_H +#define _XT_MAC_H + +struct xt_mac_info { + unsigned char srcaddr[ETH_ALEN]; + int invert; +}; +#endif /*_XT_MAC_H*/ diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h new file mode 100644 index 00000000..ecadc40d --- /dev/null +++ b/include/linux/netfilter/xt_mark.h @@ -0,0 +1,15 @@ +#ifndef _XT_MARK_H +#define _XT_MARK_H + +#include <linux/types.h> + +struct xt_mark_tginfo2 { + __u32 mark, mask; +}; + +struct xt_mark_mtinfo1 { + __u32 mark, mask; + __u8 invert; +}; + +#endif /*_XT_MARK_H*/ diff --git a/include/linux/netfilter/xt_multiport.h b/include/linux/netfilter/xt_multiport.h new file mode 100644 index 00000000..5b7e72df --- /dev/null +++ b/include/linux/netfilter/xt_multiport.h @@ -0,0 +1,29 @@ +#ifndef _XT_MULTIPORT_H +#define _XT_MULTIPORT_H + +#include <linux/types.h> + +enum xt_multiport_flags { + XT_MULTIPORT_SOURCE, + XT_MULTIPORT_DESTINATION, + XT_MULTIPORT_EITHER +}; + +#define XT_MULTI_PORTS 15 + +/* Must fit inside union xt_matchinfo: 16 bytes */ +struct xt_multiport { + __u8 flags; /* Type of comparison */ + __u8 count; /* Number of ports */ + __u16 ports[XT_MULTI_PORTS]; /* Ports */ +}; + +struct xt_multiport_v1 { + __u8 flags; /* Type of comparison */ + __u8 count; /* Number of ports */ + __u16 ports[XT_MULTI_PORTS]; /* Ports */ + __u8 pflags[XT_MULTI_PORTS]; /* Port flags */ + __u8 invert; /* Invert flag */ +}; + +#endif /*_XT_MULTIPORT_H*/ diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h new file mode 100644 index 00000000..3e19c8a8 --- /dev/null +++ b/include/linux/netfilter/xt_nfacct.h @@ -0,0 +1,13 @@ +#ifndef _XT_NFACCT_MATCH_H +#define _XT_NFACCT_MATCH_H + +#include <linux/netfilter/nfnetlink_acct.h> + +struct nf_acct; + +struct xt_nfacct_match_info { + char name[NFACCT_NAME_MAX]; + struct nf_acct *nfacct; +}; + +#endif /* _XT_NFACCT_MATCH_H */ diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h new file mode 100644 index 00000000..18afa495 --- /dev/null +++ b/include/linux/netfilter/xt_osf.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _XT_OSF_H +#define _XT_OSF_H + +#include <linux/types.h> + +#define MAXGENRELEN 32 + +#define XT_OSF_GENRE (1<<0) +#define XT_OSF_TTL (1<<1) +#define XT_OSF_LOG (1<<2) +#define XT_OSF_INVERT (1<<3) + +#define XT_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */ +#define XT_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */ +#define XT_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */ + +#define XT_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */ +#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */ +#define XT_OSF_TTL_NOCHECK 2 /* Do not compare ip and fingerprint TTL at all */ + +struct xt_osf_info { + char genre[MAXGENRELEN]; + __u32 len; + __u32 flags; + __u32 loglevel; + __u32 ttl; +}; + +/* + * Wildcard MSS (kind of). + * It is used to implement a state machine for the different wildcard values + * of the MSS and window sizes. + */ +struct xt_osf_wc { + __u32 wc; + __u32 val; +}; + +/* + * This struct represents IANA options + * http://www.iana.org/assignments/tcp-parameters + */ +struct xt_osf_opt { + __u16 kind, length; + struct xt_osf_wc wc; +}; + +struct xt_osf_user_finger { + struct xt_osf_wc wss; + + __u8 ttl, df; + __u16 ss, mss; + __u16 opt_num; + + char genre[MAXGENRELEN]; + char version[MAXGENRELEN]; + char subtype[MAXGENRELEN]; + + /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */ + struct xt_osf_opt opt[MAX_IPOPTLEN]; +}; + +struct xt_osf_nlmsg { + struct xt_osf_user_finger f; + struct iphdr ip; + struct tcphdr tcp; +}; + +/* Defines for IANA option kinds */ + +enum iana_options { + OSFOPT_EOL = 0, /* End of options */ + OSFOPT_NOP, /* NOP */ + OSFOPT_MSS, /* Maximum segment size */ + OSFOPT_WSO, /* Window scale option */ + OSFOPT_SACKP, /* SACK permitted */ + OSFOPT_SACK, /* SACK */ + OSFOPT_ECHO, + OSFOPT_ECHOREPLY, + OSFOPT_TS, /* Timestamp option */ + OSFOPT_POCP, /* Partial Order Connection Permitted */ + OSFOPT_POSP, /* Partial Order Service Profile */ + + /* Others are not used in the current OSF */ + OSFOPT_EMPTY = 255, +}; + +/* + * Initial window size option state machine: multiple of mss, mtu or + * plain numeric value. Can also be made as plain numeric value which + * is not a multiple of specified value. + */ +enum xt_osf_window_size_options { + OSF_WSS_PLAIN = 0, + OSF_WSS_MSS, + OSF_WSS_MTU, + OSF_WSS_MODULO, + OSF_WSS_MAX, +}; + +/* + * Add/remove fingerprint from the kernel. + */ +enum xt_osf_msg_types { + OSF_MSG_ADD, + OSF_MSG_REMOVE, + OSF_MSG_MAX, +}; + +enum xt_osf_attr_type { + OSF_ATTR_UNSPEC, + OSF_ATTR_FINGER, + OSF_ATTR_MAX, +}; + +#endif /* _XT_OSF_H */ diff --git a/include/linux/netfilter/xt_owner.h b/include/linux/netfilter/xt_owner.h new file mode 100644 index 00000000..20817617 --- /dev/null +++ b/include/linux/netfilter/xt_owner.h @@ -0,0 +1,18 @@ +#ifndef _XT_OWNER_MATCH_H +#define _XT_OWNER_MATCH_H + +#include <linux/types.h> + +enum { + XT_OWNER_UID = 1 << 0, + XT_OWNER_GID = 1 << 1, + XT_OWNER_SOCKET = 1 << 2, +}; + +struct xt_owner_match_info { + __u32 uid_min, uid_max; + __u32 gid_min, gid_max; + __u8 match, invert; +}; + +#endif /* _XT_OWNER_MATCH_H */ diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h new file mode 100644 index 00000000..8555e399 --- /dev/null +++ b/include/linux/netfilter/xt_physdev.h @@ -0,0 +1,26 @@ +#ifndef _XT_PHYSDEV_H +#define _XT_PHYSDEV_H + +#include <linux/types.h> + +#ifdef __KERNEL__ +#include <linux/if.h> +#endif + +#define XT_PHYSDEV_OP_IN 0x01 +#define XT_PHYSDEV_OP_OUT 0x02 +#define XT_PHYSDEV_OP_BRIDGED 0x04 +#define XT_PHYSDEV_OP_ISIN 0x08 +#define XT_PHYSDEV_OP_ISOUT 0x10 +#define XT_PHYSDEV_OP_MASK (0x20 - 1) + +struct xt_physdev_info { + char physindev[IFNAMSIZ]; + char in_mask[IFNAMSIZ]; + char physoutdev[IFNAMSIZ]; + char out_mask[IFNAMSIZ]; + __u8 invert; + __u8 bitmask; +}; + +#endif /*_XT_PHYSDEV_H*/ diff --git a/include/linux/netfilter/xt_pkttype.h b/include/linux/netfilter/xt_pkttype.h new file mode 100644 index 00000000..f265cf52 --- /dev/null +++ b/include/linux/netfilter/xt_pkttype.h @@ -0,0 +1,8 @@ +#ifndef _XT_PKTTYPE_H +#define _XT_PKTTYPE_H + +struct xt_pkttype_info { + int pkttype; + int invert; +}; +#endif /*_XT_PKTTYPE_H*/ diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h new file mode 100644 index 00000000..be8ead05 --- /dev/null +++ b/include/linux/netfilter/xt_policy.h @@ -0,0 +1,69 @@ +#ifndef _XT_POLICY_H +#define _XT_POLICY_H + +#include <linux/types.h> + +#define XT_POLICY_MAX_ELEM 4 + +enum xt_policy_flags { + XT_POLICY_MATCH_IN = 0x1, + XT_POLICY_MATCH_OUT = 0x2, + XT_POLICY_MATCH_NONE = 0x4, + XT_POLICY_MATCH_STRICT = 0x8, +}; + +enum xt_policy_modes { + XT_POLICY_MODE_TRANSPORT, + XT_POLICY_MODE_TUNNEL +}; + +struct xt_policy_spec { + __u8 saddr:1, + daddr:1, + proto:1, + mode:1, + spi:1, + reqid:1; +}; + +#ifndef __KERNEL__ +union xt_policy_addr { + struct in_addr a4; + struct in6_addr a6; +}; +#endif + +struct xt_policy_elem { + union { +#ifdef __KERNEL__ + struct { + union nf_inet_addr saddr; + union nf_inet_addr smask; + union nf_inet_addr daddr; + union nf_inet_addr dmask; + }; +#else + struct { + union xt_policy_addr saddr; + union xt_policy_addr smask; + union xt_policy_addr daddr; + union xt_policy_addr dmask; + }; +#endif + }; + __be32 spi; + __u32 reqid; + __u8 proto; + __u8 mode; + + struct xt_policy_spec match; + struct xt_policy_spec invert; +}; + +struct xt_policy_info { + struct xt_policy_elem pol[XT_POLICY_MAX_ELEM]; + __u16 flags; + __u16 len; +}; + +#endif /* _XT_POLICY_H */ diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h new file mode 100644 index 00000000..ca60fbde --- /dev/null +++ b/include/linux/netfilter/xt_qtaguid.h @@ -0,0 +1,13 @@ +#ifndef _XT_QTAGUID_MATCH_H +#define _XT_QTAGUID_MATCH_H + +/* For now we just replace the xt_owner. + * FIXME: make iptables aware of qtaguid. */ +#include <linux/netfilter/xt_owner.h> + +#define XT_QTAGUID_UID XT_OWNER_UID +#define XT_QTAGUID_GID XT_OWNER_GID +#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET +#define xt_qtaguid_match_info xt_owner_match_info + +#endif /* _XT_QTAGUID_MATCH_H */ diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h new file mode 100644 index 00000000..9314723f --- /dev/null +++ b/include/linux/netfilter/xt_quota.h @@ -0,0 +1,22 @@ +#ifndef _XT_QUOTA_H +#define _XT_QUOTA_H + +#include <linux/types.h> + +enum xt_quota_flags { + XT_QUOTA_INVERT = 0x1, +}; +#define XT_QUOTA_MASK 0x1 + +struct xt_quota_priv; + +struct xt_quota_info { + __u32 flags; + __u32 pad; + __aligned_u64 quota; + + /* Used internally by the kernel */ + struct xt_quota_priv *master; +}; + +#endif /* _XT_QUOTA_H */ diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h new file mode 100644 index 00000000..eadc6903 --- /dev/null +++ b/include/linux/netfilter/xt_quota2.h @@ -0,0 +1,25 @@ +#ifndef _XT_QUOTA_H +#define _XT_QUOTA_H + +enum xt_quota_flags { + XT_QUOTA_INVERT = 1 << 0, + XT_QUOTA_GROW = 1 << 1, + XT_QUOTA_PACKET = 1 << 2, + XT_QUOTA_NO_CHANGE = 1 << 3, + XT_QUOTA_MASK = 0x0F, +}; + +struct xt_quota_counter; + +struct xt_quota_mtinfo2 { + char name[15]; + u_int8_t flags; + + /* Comparison-invariant */ + aligned_u64 quota; + + /* Used internally by the kernel */ + struct xt_quota_counter *master __attribute__((aligned(8))); +}; + +#endif /* _XT_QUOTA_H */ diff --git a/include/linux/netfilter/xt_rateest.h b/include/linux/netfilter/xt_rateest.h new file mode 100644 index 00000000..d40a6196 --- /dev/null +++ b/include/linux/netfilter/xt_rateest.h @@ -0,0 +1,37 @@ +#ifndef _XT_RATEEST_MATCH_H +#define _XT_RATEEST_MATCH_H + +#include <linux/types.h> + +enum xt_rateest_match_flags { + XT_RATEEST_MATCH_INVERT = 1<<0, + XT_RATEEST_MATCH_ABS = 1<<1, + XT_RATEEST_MATCH_REL = 1<<2, + XT_RATEEST_MATCH_DELTA = 1<<3, + XT_RATEEST_MATCH_BPS = 1<<4, + XT_RATEEST_MATCH_PPS = 1<<5, +}; + +enum xt_rateest_match_mode { + XT_RATEEST_MATCH_NONE, + XT_RATEEST_MATCH_EQ, + XT_RATEEST_MATCH_LT, + XT_RATEEST_MATCH_GT, +}; + +struct xt_rateest_match_info { + char name1[IFNAMSIZ]; + char name2[IFNAMSIZ]; + __u16 flags; + __u16 mode; + __u32 bps1; + __u32 pps1; + __u32 bps2; + __u32 pps2; + + /* Used internally by the kernel */ + struct xt_rateest *est1 __attribute__((aligned(8))); + struct xt_rateest *est2 __attribute__((aligned(8))); +}; + +#endif /* _XT_RATEEST_MATCH_H */ diff --git a/include/linux/netfilter/xt_realm.h b/include/linux/netfilter/xt_realm.h new file mode 100644 index 00000000..d4a82ee5 --- /dev/null +++ b/include/linux/netfilter/xt_realm.h @@ -0,0 +1,12 @@ +#ifndef _XT_REALM_H +#define _XT_REALM_H + +#include <linux/types.h> + +struct xt_realm_info { + __u32 id; + __u32 mask; + __u8 invert; +}; + +#endif /* _XT_REALM_H */ diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h new file mode 100644 index 00000000..83318e01 --- /dev/null +++ b/include/linux/netfilter/xt_recent.h @@ -0,0 +1,35 @@ +#ifndef _LINUX_NETFILTER_XT_RECENT_H +#define _LINUX_NETFILTER_XT_RECENT_H 1 + +#include <linux/types.h> + +enum { + XT_RECENT_CHECK = 1 << 0, + XT_RECENT_SET = 1 << 1, + XT_RECENT_UPDATE = 1 << 2, + XT_RECENT_REMOVE = 1 << 3, + XT_RECENT_TTL = 1 << 4, + XT_RECENT_REAP = 1 << 5, + + XT_RECENT_SOURCE = 0, + XT_RECENT_DEST = 1, + + XT_RECENT_NAME_LEN = 200, +}; + +/* Only allowed with --rcheck and --update */ +#define XT_RECENT_MODIFIERS (XT_RECENT_TTL|XT_RECENT_REAP) + +#define XT_RECENT_VALID_FLAGS (XT_RECENT_CHECK|XT_RECENT_SET|XT_RECENT_UPDATE|\ + XT_RECENT_REMOVE|XT_RECENT_TTL|XT_RECENT_REAP) + +struct xt_recent_mtinfo { + __u32 seconds; + __u32 hit_count; + __u8 check_set; + __u8 invert; + char name[XT_RECENT_NAME_LEN]; + __u8 side; +}; + +#endif /* _LINUX_NETFILTER_XT_RECENT_H */ diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h new file mode 100644 index 00000000..8358d4f7 --- /dev/null +++ b/include/linux/netfilter/xt_rpfilter.h @@ -0,0 +1,23 @@ +#ifndef _XT_RPATH_H +#define _XT_RPATH_H + +#include <linux/types.h> + +enum { + XT_RPFILTER_LOOSE = 1 << 0, + XT_RPFILTER_VALID_MARK = 1 << 1, + XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, + XT_RPFILTER_INVERT = 1 << 3, +#ifdef __KERNEL__ + XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | + XT_RPFILTER_VALID_MARK | + XT_RPFILTER_ACCEPT_LOCAL | + XT_RPFILTER_INVERT, +#endif +}; + +struct xt_rpfilter_info { + __u8 flags; +}; + +#endif diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h new file mode 100644 index 00000000..29287be6 --- /dev/null +++ b/include/linux/netfilter/xt_sctp.h @@ -0,0 +1,92 @@ +#ifndef _XT_SCTP_H_ +#define _XT_SCTP_H_ + +#include <linux/types.h> + +#define XT_SCTP_SRC_PORTS 0x01 +#define XT_SCTP_DEST_PORTS 0x02 +#define XT_SCTP_CHUNK_TYPES 0x04 + +#define XT_SCTP_VALID_FLAGS 0x07 + +struct xt_sctp_flag_info { + __u8 chunktype; + __u8 flag; + __u8 flag_mask; +}; + +#define XT_NUM_SCTP_FLAGS 4 + +struct xt_sctp_info { + __u16 dpts[2]; /* Min, Max */ + __u16 spts[2]; /* Min, Max */ + + __u32 chunkmap[256 / sizeof (__u32)]; /* Bit mask of chunks to be matched according to RFC 2960 */ + +#define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */ +#define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */ +#define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */ + + __u32 chunk_match_type; + struct xt_sctp_flag_info flag_info[XT_NUM_SCTP_FLAGS]; + int flag_count; + + __u32 flags; + __u32 invflags; +}; + +#define bytes(type) (sizeof(type) * 8) + +#define SCTP_CHUNKMAP_SET(chunkmap, type) \ + do { \ + (chunkmap)[type / bytes(__u32)] |= \ + 1 << (type % bytes(__u32)); \ + } while (0) + +#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ + do { \ + (chunkmap)[type / bytes(__u32)] &= \ + ~(1 << (type % bytes(__u32))); \ + } while (0) + +#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ +({ \ + ((chunkmap)[type / bytes (__u32)] & \ + (1 << (type % bytes (__u32)))) ? 1: 0; \ +}) + +#define SCTP_CHUNKMAP_RESET(chunkmap) \ + memset((chunkmap), 0, sizeof(chunkmap)) + +#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \ + memset((chunkmap), ~0U, sizeof(chunkmap)) + +#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \ + memcpy((destmap), (srcmap), sizeof(srcmap)) + +#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \ + __sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap)) +static inline bool +__sctp_chunkmap_is_clear(const __u32 *chunkmap, unsigned int n) +{ + unsigned int i; + for (i = 0; i < n; ++i) + if (chunkmap[i]) + return false; + return true; +} + +#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \ + __sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap)) +static inline bool +__sctp_chunkmap_is_all_set(const __u32 *chunkmap, unsigned int n) +{ + unsigned int i; + for (i = 0; i < n; ++i) + if (chunkmap[i] != ~0U) + return false; + return true; +} + +#endif /* _XT_SCTP_H_ */ + diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h new file mode 100644 index 00000000..e3a9978f --- /dev/null +++ b/include/linux/netfilter/xt_set.h @@ -0,0 +1,65 @@ +#ifndef _XT_SET_H +#define _XT_SET_H + +#include <linux/types.h> +#include <linux/netfilter/ipset/ip_set.h> + +/* Revision 0 interface: backward compatible with netfilter/iptables */ + +/* + * Option flags for kernel operations (xt_set_info_v0) + */ +#define IPSET_SRC 0x01 /* Source match/add */ +#define IPSET_DST 0x02 /* Destination match/add */ +#define IPSET_MATCH_INV 0x04 /* Inverse matching */ + +struct xt_set_info_v0 { + ip_set_id_t index; + union { + __u32 flags[IPSET_DIM_MAX + 1]; + struct { + __u32 __flags[IPSET_DIM_MAX]; + __u8 dim; + __u8 flags; + } compat; + } u; +}; + +/* match and target infos */ +struct xt_set_info_match_v0 { + struct xt_set_info_v0 match_set; +}; + +struct xt_set_info_target_v0 { + struct xt_set_info_v0 add_set; + struct xt_set_info_v0 del_set; +}; + +/* Revision 1 match and target */ + +struct xt_set_info { + ip_set_id_t index; + __u8 dim; + __u8 flags; +}; + +/* match and target infos */ +struct xt_set_info_match_v1 { + struct xt_set_info match_set; +}; + +struct xt_set_info_target_v1 { + struct xt_set_info add_set; + struct xt_set_info del_set; +}; + +/* Revision 2 target */ + +struct xt_set_info_target_v2 { + struct xt_set_info add_set; + struct xt_set_info del_set; + __u32 flags; + __u32 timeout; +}; + +#endif /*_XT_SET_H*/ diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h new file mode 100644 index 00000000..63594564 --- /dev/null +++ b/include/linux/netfilter/xt_socket.h @@ -0,0 +1,20 @@ +#ifndef _XT_SOCKET_H +#define _XT_SOCKET_H + +#include <linux/types.h> + +enum { + XT_SOCKET_TRANSPARENT = 1 << 0, +}; + +struct xt_socket_mtinfo1 { + __u8 flags; +}; + +void xt_socket_put_sk(struct sock *sk); +struct sock *xt_socket_get4_sk(const struct sk_buff *skb, + struct xt_action_param *par); +struct sock *xt_socket_get6_sk(const struct sk_buff *skb, + struct xt_action_param *par); + +#endif /* _XT_SOCKET_H */ diff --git a/include/linux/netfilter/xt_state.h b/include/linux/netfilter/xt_state.h new file mode 100644 index 00000000..7b32de88 --- /dev/null +++ b/include/linux/netfilter/xt_state.h @@ -0,0 +1,12 @@ +#ifndef _XT_STATE_H +#define _XT_STATE_H + +#define XT_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1)) +#define XT_STATE_INVALID (1 << 0) + +#define XT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1)) + +struct xt_state_info { + unsigned int statemask; +}; +#endif /*_XT_STATE_H*/ diff --git a/include/linux/netfilter/xt_statistic.h b/include/linux/netfilter/xt_statistic.h new file mode 100644 index 00000000..4e983ef0 --- /dev/null +++ b/include/linux/netfilter/xt_statistic.h @@ -0,0 +1,36 @@ +#ifndef _XT_STATISTIC_H +#define _XT_STATISTIC_H + +#include <linux/types.h> + +enum xt_statistic_mode { + XT_STATISTIC_MODE_RANDOM, + XT_STATISTIC_MODE_NTH, + __XT_STATISTIC_MODE_MAX +}; +#define XT_STATISTIC_MODE_MAX (__XT_STATISTIC_MODE_MAX - 1) + +enum xt_statistic_flags { + XT_STATISTIC_INVERT = 0x1, +}; +#define XT_STATISTIC_MASK 0x1 + +struct xt_statistic_priv; + +struct xt_statistic_info { + __u16 mode; + __u16 flags; + union { + struct { + __u32 probability; + } random; + struct { + __u32 every; + __u32 packet; + __u32 count; /* unused */ + } nth; + } u; + struct xt_statistic_priv *master __attribute__((aligned(8))); +}; + +#endif /* _XT_STATISTIC_H */ diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h new file mode 100644 index 00000000..235347c0 --- /dev/null +++ b/include/linux/netfilter/xt_string.h @@ -0,0 +1,34 @@ +#ifndef _XT_STRING_H +#define _XT_STRING_H + +#include <linux/types.h> + +#define XT_STRING_MAX_PATTERN_SIZE 128 +#define XT_STRING_MAX_ALGO_NAME_SIZE 16 + +enum { + XT_STRING_FLAG_INVERT = 0x01, + XT_STRING_FLAG_IGNORECASE = 0x02 +}; + +struct xt_string_info { + __u16 from_offset; + __u16 to_offset; + char algo[XT_STRING_MAX_ALGO_NAME_SIZE]; + char pattern[XT_STRING_MAX_PATTERN_SIZE]; + __u8 patlen; + union { + struct { + __u8 invert; + } v0; + + struct { + __u8 flags; + } v1; + } u; + + /* Used internally by the kernel */ + struct ts_config __attribute__((aligned(8))) *config; +}; + +#endif /*_XT_STRING_H*/ diff --git a/include/linux/netfilter/xt_tcpmss.h b/include/linux/netfilter/xt_tcpmss.h new file mode 100644 index 00000000..fbac56b9 --- /dev/null +++ b/include/linux/netfilter/xt_tcpmss.h @@ -0,0 +1,11 @@ +#ifndef _XT_TCPMSS_MATCH_H +#define _XT_TCPMSS_MATCH_H + +#include <linux/types.h> + +struct xt_tcpmss_match_info { + __u16 mss_min, mss_max; + __u8 invert; +}; + +#endif /*_XT_TCPMSS_MATCH_H*/ diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h new file mode 100644 index 00000000..38aa7b39 --- /dev/null +++ b/include/linux/netfilter/xt_tcpudp.h @@ -0,0 +1,36 @@ +#ifndef _XT_TCPUDP_H +#define _XT_TCPUDP_H + +#include <linux/types.h> + +/* TCP matching stuff */ +struct xt_tcp { + __u16 spts[2]; /* Source port range. */ + __u16 dpts[2]; /* Destination port range. */ + __u8 option; /* TCP Option iff non-zero*/ + __u8 flg_mask; /* TCP flags mask byte */ + __u8 flg_cmp; /* TCP flags compare byte */ + __u8 invflags; /* Inverse flags */ +}; + +/* Values for "inv" field in struct ipt_tcp. */ +#define XT_TCP_INV_SRCPT 0x01 /* Invert the sense of source ports. */ +#define XT_TCP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */ +#define XT_TCP_INV_FLAGS 0x04 /* Invert the sense of TCP flags. */ +#define XT_TCP_INV_OPTION 0x08 /* Invert the sense of option test. */ +#define XT_TCP_INV_MASK 0x0F /* All possible flags. */ + +/* UDP matching stuff */ +struct xt_udp { + __u16 spts[2]; /* Source port range. */ + __u16 dpts[2]; /* Destination port range. */ + __u8 invflags; /* Inverse flags */ +}; + +/* Values for "invflags" field in struct ipt_udp. */ +#define XT_UDP_INV_SRCPT 0x01 /* Invert the sense of source ports. */ +#define XT_UDP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */ +#define XT_UDP_INV_MASK 0x03 /* All possible flags. */ + + +#endif diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h new file mode 100644 index 00000000..7c37fac5 --- /dev/null +++ b/include/linux/netfilter/xt_time.h @@ -0,0 +1,27 @@ +#ifndef _XT_TIME_H +#define _XT_TIME_H 1 + +#include <linux/types.h> + +struct xt_time_info { + __u32 date_start; + __u32 date_stop; + __u32 daytime_start; + __u32 daytime_stop; + __u32 monthdays_match; + __u8 weekdays_match; + __u8 flags; +}; + +enum { + /* Match against local time (instead of UTC) */ + XT_TIME_LOCAL_TZ = 1 << 0, + + /* Shortcuts */ + XT_TIME_ALL_MONTHDAYS = 0xFFFFFFFE, + XT_TIME_ALL_WEEKDAYS = 0xFE, + XT_TIME_MIN_DAYTIME = 0, + XT_TIME_MAX_DAYTIME = 24 * 60 * 60 - 1, +}; + +#endif /* _XT_TIME_H */ diff --git a/include/linux/netfilter/xt_u32.h b/include/linux/netfilter/xt_u32.h new file mode 100644 index 00000000..04d1bfea --- /dev/null +++ b/include/linux/netfilter/xt_u32.h @@ -0,0 +1,42 @@ +#ifndef _XT_U32_H +#define _XT_U32_H 1 + +#include <linux/types.h> + +enum xt_u32_ops { + XT_U32_AND, + XT_U32_LEFTSH, + XT_U32_RIGHTSH, + XT_U32_AT, +}; + +struct xt_u32_location_element { + __u32 number; + __u8 nextop; +}; + +struct xt_u32_value_element { + __u32 min; + __u32 max; +}; + +/* + * Any way to allow for an arbitrary number of elements? + * For now, I settle with a limit of 10 each. + */ +#define XT_U32_MAXSIZE 10 + +struct xt_u32_test { + struct xt_u32_location_element location[XT_U32_MAXSIZE+1]; + struct xt_u32_value_element value[XT_U32_MAXSIZE+1]; + __u8 nnums; + __u8 nvalues; +}; + +struct xt_u32 { + struct xt_u32_test tests[XT_U32_MAXSIZE+1]; + __u8 ntests; + __u8 invert; +}; + +#endif /* _XT_U32_H */ diff --git a/include/linux/netfilter_arp.h b/include/linux/netfilter_arp.h new file mode 100644 index 00000000..92bc6ddc --- /dev/null +++ b/include/linux/netfilter_arp.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_ARP_NETFILTER_H +#define __LINUX_ARP_NETFILTER_H + +/* ARP-specific defines for netfilter. + * (C)2002 Rusty Russell IBM -- This code is GPL. + */ + +#include <linux/netfilter.h> + +/* There is no PF_ARP. */ +#define NF_ARP 0 + +/* ARP Hooks */ +#define NF_ARP_IN 0 +#define NF_ARP_OUT 1 +#define NF_ARP_FORWARD 2 +#define NF_ARP_NUMHOOKS 3 + +#endif /* __LINUX_ARP_NETFILTER_H */ diff --git a/include/linux/netfilter_arp/Kbuild b/include/linux/netfilter_arp/Kbuild new file mode 100644 index 00000000..b27439c7 --- /dev/null +++ b/include/linux/netfilter_arp/Kbuild @@ -0,0 +1,2 @@ +header-y += arp_tables.h +header-y += arpt_mangle.h diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h new file mode 100644 index 00000000..e08565d4 --- /dev/null +++ b/include/linux/netfilter_arp/arp_tables.h @@ -0,0 +1,278 @@ +/* + * Format of an ARP firewall descriptor + * + * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in + * network byte order. + * flags are stored in host byte order (of course). + */ + +#ifndef _ARPTABLES_H +#define _ARPTABLES_H + +#ifdef __KERNEL__ +#include <linux/if.h> +#include <linux/in.h> +#include <linux/if_arp.h> +#include <linux/skbuff.h> +#endif +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/netfilter_arp.h> + +#include <linux/netfilter/x_tables.h> + +#ifndef __KERNEL__ +#define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN +#define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN +#define arpt_entry_target xt_entry_target +#define arpt_standard_target xt_standard_target +#define arpt_error_target xt_error_target +#define ARPT_CONTINUE XT_CONTINUE +#define ARPT_RETURN XT_RETURN +#define arpt_counters_info xt_counters_info +#define arpt_counters xt_counters +#define ARPT_STANDARD_TARGET XT_STANDARD_TARGET +#define ARPT_ERROR_TARGET XT_ERROR_TARGET +#define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) +#endif + +#define ARPT_DEV_ADDR_LEN_MAX 16 + +struct arpt_devaddr_info { + char addr[ARPT_DEV_ADDR_LEN_MAX]; + char mask[ARPT_DEV_ADDR_LEN_MAX]; +}; + +/* Yes, Virginia, you have to zero the padding. */ +struct arpt_arp { + /* Source and target IP addr */ + struct in_addr src, tgt; + /* Mask for src and target IP addr */ + struct in_addr smsk, tmsk; + + /* Device hw address length, src+target device addresses */ + __u8 arhln, arhln_mask; + struct arpt_devaddr_info src_devaddr; + struct arpt_devaddr_info tgt_devaddr; + + /* ARP operation code. */ + __be16 arpop, arpop_mask; + + /* ARP hardware address and protocol address format. */ + __be16 arhrd, arhrd_mask; + __be16 arpro, arpro_mask; + + /* The protocol address length is only accepted if it is 4 + * so there is no use in offering a way to do filtering on it. + */ + + char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; + unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; + + /* Flags word */ + __u8 flags; + /* Inverse flags */ + __u16 invflags; +}; + +/* Values for "flag" field in struct arpt_ip (general arp structure). + * No flags defined yet. + */ +#define ARPT_F_MASK 0x00 /* All possible flag bits mask. */ + +/* Values for "inv" field in struct arpt_arp. */ +#define ARPT_INV_VIA_IN 0x0001 /* Invert the sense of IN IFACE. */ +#define ARPT_INV_VIA_OUT 0x0002 /* Invert the sense of OUT IFACE */ +#define ARPT_INV_SRCIP 0x0004 /* Invert the sense of SRC IP. */ +#define ARPT_INV_TGTIP 0x0008 /* Invert the sense of TGT IP. */ +#define ARPT_INV_SRCDEVADDR 0x0010 /* Invert the sense of SRC DEV ADDR. */ +#define ARPT_INV_TGTDEVADDR 0x0020 /* Invert the sense of TGT DEV ADDR. */ +#define ARPT_INV_ARPOP 0x0040 /* Invert the sense of ARP OP. */ +#define ARPT_INV_ARPHRD 0x0080 /* Invert the sense of ARP HRD. */ +#define ARPT_INV_ARPPRO 0x0100 /* Invert the sense of ARP PRO. */ +#define ARPT_INV_ARPHLN 0x0200 /* Invert the sense of ARP HLN. */ +#define ARPT_INV_MASK 0x03FF /* All possible flag bits mask. */ + +/* This structure defines each of the firewall rules. Consists of 3 + parts which are 1) general ARP header stuff 2) match specific + stuff 3) the target to perform if the rule matches */ +struct arpt_entry +{ + struct arpt_arp arp; + + /* Size of arpt_entry + matches */ + __u16 target_offset; + /* Size of arpt_entry + matches + target */ + __u16 next_offset; + + /* Back pointer */ + unsigned int comefrom; + + /* Packet and byte counters. */ + struct xt_counters counters; + + /* The matches (if any), then the target. */ + unsigned char elems[0]; +}; + +/* + * New IP firewall options for [gs]etsockopt at the RAW IP level. + * Unlike BSD Linux inherits IP options so you don't have to use a raw + * socket for this. Instead we check rights in the calls. + * + * ATTENTION: check linux/in.h before adding new number here. + */ +#define ARPT_BASE_CTL 96 + +#define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL) +#define ARPT_SO_SET_ADD_COUNTERS (ARPT_BASE_CTL + 1) +#define ARPT_SO_SET_MAX ARPT_SO_SET_ADD_COUNTERS + +#define ARPT_SO_GET_INFO (ARPT_BASE_CTL) +#define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1) +/* #define ARPT_SO_GET_REVISION_MATCH (APRT_BASE_CTL + 2) */ +#define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) +#define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) + +/* The argument to ARPT_SO_GET_INFO */ +struct arpt_getinfo { + /* Which table: caller fills this in. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* Kernel fills these in. */ + /* Which hook entry points are valid: bitmask */ + unsigned int valid_hooks; + + /* Hook entry points: one per netfilter hook. */ + unsigned int hook_entry[NF_ARP_NUMHOOKS]; + + /* Underflow points. */ + unsigned int underflow[NF_ARP_NUMHOOKS]; + + /* Number of entries */ + unsigned int num_entries; + + /* Size of entries. */ + unsigned int size; +}; + +/* The argument to ARPT_SO_SET_REPLACE. */ +struct arpt_replace { + /* Which table. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* Which hook entry points are valid: bitmask. You can't + change this. */ + unsigned int valid_hooks; + + /* Number of entries */ + unsigned int num_entries; + + /* Total size of new entries */ + unsigned int size; + + /* Hook entry points. */ + unsigned int hook_entry[NF_ARP_NUMHOOKS]; + + /* Underflow points. */ + unsigned int underflow[NF_ARP_NUMHOOKS]; + + /* Information about old entries: */ + /* Number of counters (must be equal to current number of entries). */ + unsigned int num_counters; + /* The old entries' counters. */ + struct xt_counters __user *counters; + + /* The entries (hang off end: not really an array). */ + struct arpt_entry entries[0]; +}; + +/* The argument to ARPT_SO_GET_ENTRIES. */ +struct arpt_get_entries { + /* Which table: user fills this in. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* User fills this in: total entry size. */ + unsigned int size; + + /* The entries. */ + struct arpt_entry entrytable[0]; +}; + +/* Helper functions */ +static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e) +{ + return (void *)e + e->target_offset; +} + +/* + * Main firewall chains definitions and global var's definitions. + */ +#ifdef __KERNEL__ + +/* Standard entry. */ +struct arpt_standard { + struct arpt_entry entry; + struct xt_standard_target target; +}; + +struct arpt_error { + struct arpt_entry entry; + struct xt_error_target target; +}; + +#define ARPT_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct arpt_entry), \ + .next_offset = (__size), \ +} + +#define ARPT_STANDARD_INIT(__verdict) \ +{ \ + .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ + .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ + sizeof(struct xt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define ARPT_ERROR_INIT \ +{ \ + .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ + .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ + sizeof(struct xt_error_target)), \ + .target.errorname = "ERROR", \ +} + +extern void *arpt_alloc_initial_table(const struct xt_table *); +extern struct xt_table *arpt_register_table(struct net *net, + const struct xt_table *table, + const struct arpt_replace *repl); +extern void arpt_unregister_table(struct xt_table *table); +extern unsigned int arpt_do_table(struct sk_buff *skb, + unsigned int hook, + const struct net_device *in, + const struct net_device *out, + struct xt_table *table); + +#ifdef CONFIG_COMPAT +#include <net/compat.h> + +struct compat_arpt_entry { + struct arpt_arp arp; + __u16 target_offset; + __u16 next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +static inline struct xt_entry_target * +compat_arpt_get_target(struct compat_arpt_entry *e) +{ + return (void *)e + e->target_offset; +} + +#endif /* CONFIG_COMPAT */ +#endif /*__KERNEL__*/ +#endif /* _ARPTABLES_H */ diff --git a/include/linux/netfilter_arp/arpt_mangle.h b/include/linux/netfilter_arp/arpt_mangle.h new file mode 100644 index 00000000..250f5029 --- /dev/null +++ b/include/linux/netfilter_arp/arpt_mangle.h @@ -0,0 +1,26 @@ +#ifndef _ARPT_MANGLE_H +#define _ARPT_MANGLE_H +#include <linux/netfilter_arp/arp_tables.h> + +#define ARPT_MANGLE_ADDR_LEN_MAX sizeof(struct in_addr) +struct arpt_mangle +{ + char src_devaddr[ARPT_DEV_ADDR_LEN_MAX]; + char tgt_devaddr[ARPT_DEV_ADDR_LEN_MAX]; + union { + struct in_addr src_ip; + } u_s; + union { + struct in_addr tgt_ip; + } u_t; + u_int8_t flags; + int target; +}; + +#define ARPT_MANGLE_SDEV 0x01 +#define ARPT_MANGLE_TDEV 0x02 +#define ARPT_MANGLE_SIP 0x04 +#define ARPT_MANGLE_TIP 0x08 +#define ARPT_MANGLE_MASK 0x0f + +#endif /* _ARPT_MANGLE_H */ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h new file mode 100644 index 00000000..31d2844e --- /dev/null +++ b/include/linux/netfilter_bridge.h @@ -0,0 +1,122 @@ +#ifndef __LINUX_BRIDGE_NETFILTER_H +#define __LINUX_BRIDGE_NETFILTER_H + +/* bridge-specific defines for netfilter. + */ + +#include <linux/netfilter.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/if_pppox.h> + +/* Bridge Hooks */ +/* After promisc drops, checksum checks. */ +#define NF_BR_PRE_ROUTING 0 +/* If the packet is destined for this box. */ +#define NF_BR_LOCAL_IN 1 +/* If the packet is destined for another interface. */ +#define NF_BR_FORWARD 2 +/* Packets coming from a local process. */ +#define NF_BR_LOCAL_OUT 3 +/* Packets about to hit the wire. */ +#define NF_BR_POST_ROUTING 4 +/* Not really a hook, but used for the ebtables broute table */ +#define NF_BR_BROUTING 5 +#define NF_BR_NUMHOOKS 6 + +#ifdef __KERNEL__ + +enum nf_br_hook_priorities { + NF_BR_PRI_FIRST = INT_MIN, + NF_BR_PRI_NAT_DST_BRIDGED = -300, + NF_BR_PRI_FILTER_BRIDGED = -200, + NF_BR_PRI_BRNF = 0, + NF_BR_PRI_NAT_DST_OTHER = 100, + NF_BR_PRI_FILTER_OTHER = 200, + NF_BR_PRI_NAT_SRC = 300, + NF_BR_PRI_LAST = INT_MAX, +}; + +#ifdef CONFIG_BRIDGE_NETFILTER + +#define BRNF_PKT_TYPE 0x01 +#define BRNF_BRIDGED_DNAT 0x02 +#define BRNF_BRIDGED 0x04 +#define BRNF_NF_BRIDGE_PREROUTING 0x08 +#define BRNF_8021Q 0x10 +#define BRNF_PPPoE 0x20 + +/* Only used in br_forward.c */ +extern int nf_bridge_copy_header(struct sk_buff *skb); +static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) +{ + if (skb->nf_bridge && + skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) + return nf_bridge_copy_header(skb); + return 0; +} + +static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) +{ + switch (skb->protocol) { + case __cpu_to_be16(ETH_P_8021Q): + return VLAN_HLEN; + case __cpu_to_be16(ETH_P_PPP_SES): + return PPPOE_SES_HLEN; + default: + return 0; + } +} + +static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) +{ + if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) + return PPPOE_SES_HLEN; + return 0; +} + +extern int br_handle_frame_finish(struct sk_buff *skb); +/* Only used in br_device.c */ +static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) +{ + struct nf_bridge_info *nf_bridge = skb->nf_bridge; + + skb_pull(skb, ETH_HLEN); + nf_bridge->mask ^= BRNF_BRIDGED_DNAT; + skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), + skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); + skb->dev = nf_bridge->physindev; + return br_handle_frame_finish(skb); +} + +/* This is called by the IP fragmenting code and it ensures there is + * enough room for the encapsulating header (if there is one). */ +static inline unsigned int nf_bridge_pad(const struct sk_buff *skb) +{ + if (skb->nf_bridge) + return nf_bridge_encap_header_len(skb); + return 0; +} + +struct bridge_skb_cb { + union { + __be32 ipv4; + } daddr; +}; + +static inline void br_drop_fake_rtable(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && (dst->flags & DST_FAKE_RTABLE)) + skb_dst_drop(skb); +} + +#else +#define nf_bridge_maybe_copy_header(skb) (0) +#define nf_bridge_pad(skb) (0) +#define br_drop_fake_rtable(skb) do { } while (0) +#endif /* CONFIG_BRIDGE_NETFILTER */ + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild new file mode 100644 index 00000000..e48f1a3f --- /dev/null +++ b/include/linux/netfilter_bridge/Kbuild @@ -0,0 +1,18 @@ +header-y += ebt_802_3.h +header-y += ebt_among.h +header-y += ebt_arp.h +header-y += ebt_arpreply.h +header-y += ebt_ip.h +header-y += ebt_ip6.h +header-y += ebt_limit.h +header-y += ebt_log.h +header-y += ebt_mark_m.h +header-y += ebt_mark_t.h +header-y += ebt_nat.h +header-y += ebt_nflog.h +header-y += ebt_pkttype.h +header-y += ebt_redirect.h +header-y += ebt_stp.h +header-y += ebt_ulog.h +header-y += ebt_vlan.h +header-y += ebtables.h diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h new file mode 100644 index 00000000..be5be157 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_802_3.h @@ -0,0 +1,70 @@ +#ifndef __LINUX_BRIDGE_EBT_802_3_H +#define __LINUX_BRIDGE_EBT_802_3_H + +#include <linux/types.h> + +#define EBT_802_3_SAP 0x01 +#define EBT_802_3_TYPE 0x02 + +#define EBT_802_3_MATCH "802_3" + +/* + * If frame has DSAP/SSAP value 0xaa you must check the SNAP type + * to discover what kind of packet we're carrying. + */ +#define CHECK_TYPE 0xaa + +/* + * Control field may be one or two bytes. If the first byte has + * the value 0x03 then the entire length is one byte, otherwise it is two. + * One byte controls are used in Unnumbered Information frames. + * Two byte controls are used in Numbered Information frames. + */ +#define IS_UI 0x03 + +#define EBT_802_3_MASK (EBT_802_3_SAP | EBT_802_3_TYPE | EBT_802_3) + +/* ui has one byte ctrl, ni has two */ +struct hdr_ui { + __u8 dsap; + __u8 ssap; + __u8 ctrl; + __u8 orig[3]; + __be16 type; +}; + +struct hdr_ni { + __u8 dsap; + __u8 ssap; + __be16 ctrl; + __u8 orig[3]; + __be16 type; +}; + +struct ebt_802_3_hdr { + __u8 daddr[6]; + __u8 saddr[6]; + __be16 len; + union { + struct hdr_ui ui; + struct hdr_ni ni; + } llc; +}; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) +{ + return (struct ebt_802_3_hdr *)skb_mac_header(skb); +} +#endif + +struct ebt_802_3_info { + __u8 sap; + __be16 type; + __u8 bitmask; + __u8 invflags; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h new file mode 100644 index 00000000..bd4e3ad0 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_among.h @@ -0,0 +1,64 @@ +#ifndef __LINUX_BRIDGE_EBT_AMONG_H +#define __LINUX_BRIDGE_EBT_AMONG_H + +#include <linux/types.h> + +#define EBT_AMONG_DST 0x01 +#define EBT_AMONG_SRC 0x02 + +/* Grzegorz Borowiak <grzes@gnu.univ.gda.pl> 2003 + * + * Write-once-read-many hash table, used for checking if a given + * MAC address belongs to a set or not and possibly for checking + * if it is related with a given IPv4 address. + * + * The hash value of an address is its last byte. + * + * In real-world ethernet addresses, values of the last byte are + * evenly distributed and there is no need to consider other bytes. + * It would only slow the routines down. + * + * For MAC address comparison speedup reasons, we introduce a trick. + * MAC address is mapped onto an array of two 32-bit integers. + * This pair of integers is compared with MAC addresses in the + * hash table, which are stored also in form of pairs of integers + * (in `cmp' array). This is quick as it requires only two elementary + * number comparisons in worst case. Further, we take advantage of + * fact that entropy of 3 last bytes of address is larger than entropy + * of 3 first bytes. So first we compare 4 last bytes of addresses and + * if they are the same we compare 2 first. + * + * Yes, it is a memory overhead, but in 2003 AD, who cares? + */ + +struct ebt_mac_wormhash_tuple { + __u32 cmp[2]; + __be32 ip; +}; + +struct ebt_mac_wormhash { + int table[257]; + int poolsize; + struct ebt_mac_wormhash_tuple pool[0]; +}; + +#define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \ + + (x)->poolsize * sizeof(struct ebt_mac_wormhash_tuple) : 0) + +struct ebt_among_info { + int wh_dst_ofs; + int wh_src_ofs; + int bitmask; +}; + +#define EBT_AMONG_DST_NEG 0x1 +#define EBT_AMONG_SRC_NEG 0x2 + +#define ebt_among_wh_dst(x) ((x)->wh_dst_ofs ? \ + (struct ebt_mac_wormhash*)((char*)(x) + (x)->wh_dst_ofs) : NULL) +#define ebt_among_wh_src(x) ((x)->wh_src_ofs ? \ + (struct ebt_mac_wormhash*)((char*)(x) + (x)->wh_src_ofs) : NULL) + +#define EBT_AMONG_MATCH "among" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h new file mode 100644 index 00000000..522f3e42 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_arp.h @@ -0,0 +1,36 @@ +#ifndef __LINUX_BRIDGE_EBT_ARP_H +#define __LINUX_BRIDGE_EBT_ARP_H + +#include <linux/types.h> + +#define EBT_ARP_OPCODE 0x01 +#define EBT_ARP_HTYPE 0x02 +#define EBT_ARP_PTYPE 0x04 +#define EBT_ARP_SRC_IP 0x08 +#define EBT_ARP_DST_IP 0x10 +#define EBT_ARP_SRC_MAC 0x20 +#define EBT_ARP_DST_MAC 0x40 +#define EBT_ARP_GRAT 0x80 +#define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | \ + EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC | \ + EBT_ARP_GRAT) +#define EBT_ARP_MATCH "arp" + +struct ebt_arp_info +{ + __be16 htype; + __be16 ptype; + __be16 opcode; + __be32 saddr; + __be32 smsk; + __be32 daddr; + __be32 dmsk; + unsigned char smaddr[ETH_ALEN]; + unsigned char smmsk[ETH_ALEN]; + unsigned char dmaddr[ETH_ALEN]; + unsigned char dmmsk[ETH_ALEN]; + __u8 bitmask; + __u8 invflags; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_arpreply.h b/include/linux/netfilter_bridge/ebt_arpreply.h new file mode 100644 index 00000000..7e77896e --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_arpreply.h @@ -0,0 +1,10 @@ +#ifndef __LINUX_BRIDGE_EBT_ARPREPLY_H +#define __LINUX_BRIDGE_EBT_ARPREPLY_H + +struct ebt_arpreply_info { + unsigned char mac[ETH_ALEN]; + int target; +}; +#define EBT_ARPREPLY_TARGET "arpreply" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h new file mode 100644 index 00000000..c4bbc41b --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_ip.h @@ -0,0 +1,44 @@ +/* + * ebt_ip + * + * Authors: + * Bart De Schuymer <bart.de.schuymer@pandora.be> + * + * April, 2002 + * + * Changes: + * added ip-sport and ip-dport + * Innominate Security Technologies AG <mhopf@innominate.com> + * September, 2002 + */ + +#ifndef __LINUX_BRIDGE_EBT_IP_H +#define __LINUX_BRIDGE_EBT_IP_H + +#include <linux/types.h> + +#define EBT_IP_SOURCE 0x01 +#define EBT_IP_DEST 0x02 +#define EBT_IP_TOS 0x04 +#define EBT_IP_PROTO 0x08 +#define EBT_IP_SPORT 0x10 +#define EBT_IP_DPORT 0x20 +#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO |\ + EBT_IP_SPORT | EBT_IP_DPORT ) +#define EBT_IP_MATCH "ip" + +/* the same values are used for the invflags */ +struct ebt_ip_info { + __be32 saddr; + __be32 daddr; + __be32 smsk; + __be32 dmsk; + __u8 tos; + __u8 protocol; + __u8 bitmask; + __u8 invflags; + __u16 sport[2]; + __u16 dport[2]; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h new file mode 100644 index 00000000..42b88968 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_ip6.h @@ -0,0 +1,50 @@ +/* + * ebt_ip6 + * + * Authors: + * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> + * Manohar Castelino <manohar.r.castelino@intel.com> + * + * Jan 11, 2008 + * + */ + +#ifndef __LINUX_BRIDGE_EBT_IP6_H +#define __LINUX_BRIDGE_EBT_IP6_H + +#include <linux/types.h> + +#define EBT_IP6_SOURCE 0x01 +#define EBT_IP6_DEST 0x02 +#define EBT_IP6_TCLASS 0x04 +#define EBT_IP6_PROTO 0x08 +#define EBT_IP6_SPORT 0x10 +#define EBT_IP6_DPORT 0x20 +#define EBT_IP6_ICMP6 0x40 + +#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\ + EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \ + EBT_IP6_ICMP6) +#define EBT_IP6_MATCH "ip6" + +/* the same values are used for the invflags */ +struct ebt_ip6_info { + struct in6_addr saddr; + struct in6_addr daddr; + struct in6_addr smsk; + struct in6_addr dmsk; + __u8 tclass; + __u8 protocol; + __u8 bitmask; + __u8 invflags; + union { + __u16 sport[2]; + __u8 icmpv6_type[2]; + }; + union { + __u16 dport[2]; + __u8 icmpv6_code[2]; + }; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_limit.h b/include/linux/netfilter_bridge/ebt_limit.h new file mode 100644 index 00000000..66d80b30 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_limit.h @@ -0,0 +1,24 @@ +#ifndef __LINUX_BRIDGE_EBT_LIMIT_H +#define __LINUX_BRIDGE_EBT_LIMIT_H + +#include <linux/types.h> + +#define EBT_LIMIT_MATCH "limit" + +/* timings are in milliseconds. */ +#define EBT_LIMIT_SCALE 10000 + +/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 + seconds, or one every 59 hours. */ + +struct ebt_limit_info { + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ + + /* Used internally by the kernel */ + unsigned long prev; + __u32 credit; + __u32 credit_cap, cost; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h new file mode 100644 index 00000000..7e7f1d1f --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_log.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_BRIDGE_EBT_LOG_H +#define __LINUX_BRIDGE_EBT_LOG_H + +#include <linux/types.h> + +#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */ +#define EBT_LOG_ARP 0x02 +#define EBT_LOG_NFLOG 0x04 +#define EBT_LOG_IP6 0x08 +#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP | EBT_LOG_IP6) +#define EBT_LOG_PREFIX_SIZE 30 +#define EBT_LOG_WATCHER "log" + +struct ebt_log_info { + __u8 loglevel; + __u8 prefix[EBT_LOG_PREFIX_SIZE]; + __u32 bitmask; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_mark_m.h b/include/linux/netfilter_bridge/ebt_mark_m.h new file mode 100644 index 00000000..410f9e5a --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_mark_m.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_BRIDGE_EBT_MARK_M_H +#define __LINUX_BRIDGE_EBT_MARK_M_H + +#include <linux/types.h> + +#define EBT_MARK_AND 0x01 +#define EBT_MARK_OR 0x02 +#define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR) +struct ebt_mark_m_info { + unsigned long mark, mask; + __u8 invert; + __u8 bitmask; +}; +#define EBT_MARK_MATCH "mark_m" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_mark_t.h b/include/linux/netfilter_bridge/ebt_mark_t.h new file mode 100644 index 00000000..7d5a268a --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_mark_t.h @@ -0,0 +1,23 @@ +#ifndef __LINUX_BRIDGE_EBT_MARK_T_H +#define __LINUX_BRIDGE_EBT_MARK_T_H + +/* The target member is reused for adding new actions, the + * value of the real target is -1 to -NUM_STANDARD_TARGETS. + * For backward compatibility, the 4 lsb (2 would be enough, + * but let's play it safe) are kept to designate this target. + * The remaining bits designate the action. By making the set + * action 0xfffffff0, the result will look ok for older + * versions. [September 2006] */ +#define MARK_SET_VALUE (0xfffffff0) +#define MARK_OR_VALUE (0xffffffe0) +#define MARK_AND_VALUE (0xffffffd0) +#define MARK_XOR_VALUE (0xffffffc0) + +struct ebt_mark_t_info { + unsigned long mark; + /* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */ + int target; +}; +#define EBT_MARK_TARGET "mark" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_nat.h b/include/linux/netfilter_bridge/ebt_nat.h new file mode 100644 index 00000000..5e74e3b0 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_nat.h @@ -0,0 +1,13 @@ +#ifndef __LINUX_BRIDGE_EBT_NAT_H +#define __LINUX_BRIDGE_EBT_NAT_H + +#define NAT_ARP_BIT (0x00000010) +struct ebt_nat_info { + unsigned char mac[ETH_ALEN]; + /* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */ + int target; +}; +#define EBT_SNAT_TARGET "snat" +#define EBT_DNAT_TARGET "dnat" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_nflog.h b/include/linux/netfilter_bridge/ebt_nflog.h new file mode 100644 index 00000000..df829fce --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_nflog.h @@ -0,0 +1,23 @@ +#ifndef __LINUX_BRIDGE_EBT_NFLOG_H +#define __LINUX_BRIDGE_EBT_NFLOG_H + +#include <linux/types.h> + +#define EBT_NFLOG_MASK 0x0 + +#define EBT_NFLOG_PREFIX_SIZE 64 +#define EBT_NFLOG_WATCHER "nflog" + +#define EBT_NFLOG_DEFAULT_GROUP 0x1 +#define EBT_NFLOG_DEFAULT_THRESHOLD 1 + +struct ebt_nflog_info { + __u32 len; + __u16 group; + __u16 threshold; + __u16 flags; + __u16 pad; + char prefix[EBT_NFLOG_PREFIX_SIZE]; +}; + +#endif /* __LINUX_BRIDGE_EBT_NFLOG_H */ diff --git a/include/linux/netfilter_bridge/ebt_pkttype.h b/include/linux/netfilter_bridge/ebt_pkttype.h new file mode 100644 index 00000000..c241badc --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_pkttype.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H +#define __LINUX_BRIDGE_EBT_PKTTYPE_H + +#include <linux/types.h> + +struct ebt_pkttype_info { + __u8 pkt_type; + __u8 invert; +}; +#define EBT_PKTTYPE_MATCH "pkttype" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_redirect.h b/include/linux/netfilter_bridge/ebt_redirect.h new file mode 100644 index 00000000..dd9622ce --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_redirect.h @@ -0,0 +1,10 @@ +#ifndef __LINUX_BRIDGE_EBT_REDIRECT_H +#define __LINUX_BRIDGE_EBT_REDIRECT_H + +struct ebt_redirect_info { + /* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */ + int target; +}; +#define EBT_REDIRECT_TARGET "redirect" + +#endif diff --git a/include/linux/netfilter_bridge/ebt_stp.h b/include/linux/netfilter_bridge/ebt_stp.h new file mode 100644 index 00000000..1025b9f5 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_stp.h @@ -0,0 +1,46 @@ +#ifndef __LINUX_BRIDGE_EBT_STP_H +#define __LINUX_BRIDGE_EBT_STP_H + +#include <linux/types.h> + +#define EBT_STP_TYPE 0x0001 + +#define EBT_STP_FLAGS 0x0002 +#define EBT_STP_ROOTPRIO 0x0004 +#define EBT_STP_ROOTADDR 0x0008 +#define EBT_STP_ROOTCOST 0x0010 +#define EBT_STP_SENDERPRIO 0x0020 +#define EBT_STP_SENDERADDR 0x0040 +#define EBT_STP_PORT 0x0080 +#define EBT_STP_MSGAGE 0x0100 +#define EBT_STP_MAXAGE 0x0200 +#define EBT_STP_HELLOTIME 0x0400 +#define EBT_STP_FWDD 0x0800 + +#define EBT_STP_MASK 0x0fff +#define EBT_STP_CONFIG_MASK 0x0ffe + +#define EBT_STP_MATCH "stp" + +struct ebt_stp_config_info { + __u8 flags; + __u16 root_priol, root_priou; + char root_addr[6], root_addrmsk[6]; + __u32 root_costl, root_costu; + __u16 sender_priol, sender_priou; + char sender_addr[6], sender_addrmsk[6]; + __u16 portl, portu; + __u16 msg_agel, msg_ageu; + __u16 max_agel, max_ageu; + __u16 hello_timel, hello_timeu; + __u16 forward_delayl, forward_delayu; +}; + +struct ebt_stp_info { + __u8 type; + struct ebt_stp_config_info config; + __u16 bitmask; + __u16 invflags; +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebt_ulog.h b/include/linux/netfilter_bridge/ebt_ulog.h new file mode 100644 index 00000000..89a6becb --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_ulog.h @@ -0,0 +1,38 @@ +#ifndef _EBT_ULOG_H +#define _EBT_ULOG_H + +#include <linux/types.h> + +#define EBT_ULOG_DEFAULT_NLGROUP 0 +#define EBT_ULOG_DEFAULT_QTHRESHOLD 1 +#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */ +#define EBT_ULOG_PREFIX_LEN 32 +#define EBT_ULOG_MAX_QLEN 50 +#define EBT_ULOG_WATCHER "ulog" +#define EBT_ULOG_VERSION 1 + +struct ebt_ulog_info { + __u32 nlgroup; + unsigned int cprange; + unsigned int qthreshold; + char prefix[EBT_ULOG_PREFIX_LEN]; +}; + +typedef struct ebt_ulog_packet_msg { + int version; + char indev[IFNAMSIZ]; + char outdev[IFNAMSIZ]; + char physindev[IFNAMSIZ]; + char physoutdev[IFNAMSIZ]; + char prefix[EBT_ULOG_PREFIX_LEN]; + struct timeval stamp; + unsigned long mark; + unsigned int hook; + size_t data_len; + /* The complete packet, including Ethernet header and perhaps + * the VLAN header is appended */ + unsigned char data[0] __attribute__ + ((aligned (__alignof__(struct ebt_ulog_info)))); +} ebt_ulog_packet_msg_t; + +#endif /* _EBT_ULOG_H */ diff --git a/include/linux/netfilter_bridge/ebt_vlan.h b/include/linux/netfilter_bridge/ebt_vlan.h new file mode 100644 index 00000000..967d1d5c --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_vlan.h @@ -0,0 +1,22 @@ +#ifndef __LINUX_BRIDGE_EBT_VLAN_H +#define __LINUX_BRIDGE_EBT_VLAN_H + +#include <linux/types.h> + +#define EBT_VLAN_ID 0x01 +#define EBT_VLAN_PRIO 0x02 +#define EBT_VLAN_ENCAP 0x04 +#define EBT_VLAN_MASK (EBT_VLAN_ID | EBT_VLAN_PRIO | EBT_VLAN_ENCAP) +#define EBT_VLAN_MATCH "vlan" + +struct ebt_vlan_info { + __u16 id; /* VLAN ID {1-4095} */ + __u8 prio; /* VLAN User Priority {0-7} */ + __be16 encap; /* VLAN Encapsulated frame code {0-65535} */ + __u8 bitmask; /* Args bitmask bit 1=1 - ID arg, + bit 2=1 User-Priority arg, bit 3=1 encap*/ + __u8 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg, + bit 2=1 - inversed Pirority arg */ +}; + +#endif diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h new file mode 100644 index 00000000..4dd5bd69 --- /dev/null +++ b/include/linux/netfilter_bridge/ebtables.h @@ -0,0 +1,380 @@ +/* + * ebtables + * + * Authors: + * Bart De Schuymer <bdschuym@pandora.be> + * + * ebtables.c,v 2.0, April, 2002 + * + * This code is stongly inspired on the iptables code which is + * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling + */ + +#ifndef __LINUX_BRIDGE_EFF_H +#define __LINUX_BRIDGE_EFF_H +#include <linux/if.h> +#include <linux/netfilter_bridge.h> +#include <linux/if_ether.h> + +#define EBT_TABLE_MAXNAMELEN 32 +#define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN +#define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN + +/* verdicts >0 are "branches" */ +#define EBT_ACCEPT -1 +#define EBT_DROP -2 +#define EBT_CONTINUE -3 +#define EBT_RETURN -4 +#define NUM_STANDARD_TARGETS 4 +/* ebtables target modules store the verdict inside an int. We can + * reclaim a part of this int for backwards compatible extensions. + * The 4 lsb are more than enough to store the verdict. */ +#define EBT_VERDICT_BITS 0x0000000F + +struct xt_match; +struct xt_target; + +struct ebt_counter { + uint64_t pcnt; + uint64_t bcnt; +}; + +struct ebt_replace { + char name[EBT_TABLE_MAXNAMELEN]; + unsigned int valid_hooks; + /* nr of rules in the table */ + unsigned int nentries; + /* total size of the entries */ + unsigned int entries_size; + /* start of the chains */ + struct ebt_entries __user *hook_entry[NF_BR_NUMHOOKS]; + /* nr of counters userspace expects back */ + unsigned int num_counters; + /* where the kernel will put the old counters */ + struct ebt_counter __user *counters; + char __user *entries; +}; + +struct ebt_replace_kernel { + char name[EBT_TABLE_MAXNAMELEN]; + unsigned int valid_hooks; + /* nr of rules in the table */ + unsigned int nentries; + /* total size of the entries */ + unsigned int entries_size; + /* start of the chains */ + struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; + /* nr of counters userspace expects back */ + unsigned int num_counters; + /* where the kernel will put the old counters */ + struct ebt_counter *counters; + char *entries; +}; + +struct ebt_entries { + /* this field is always set to zero + * See EBT_ENTRY_OR_ENTRIES. + * Must be same size as ebt_entry.bitmask */ + unsigned int distinguisher; + /* the chain name */ + char name[EBT_CHAIN_MAXNAMELEN]; + /* counter offset for this chain */ + unsigned int counter_offset; + /* one standard (accept, drop, return) per hook */ + int policy; + /* nr. of entries */ + unsigned int nentries; + /* entry list */ + char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); +}; + +/* used for the bitmask of struct ebt_entry */ + +/* This is a hack to make a difference between an ebt_entry struct and an + * ebt_entries struct when traversing the entries from start to end. + * Using this simplifies the code a lot, while still being able to use + * ebt_entries. + * Contrary, iptables doesn't use something like ebt_entries and therefore uses + * different techniques for naming the policy and such. So, iptables doesn't + * need a hack like this. + */ +#define EBT_ENTRY_OR_ENTRIES 0x01 +/* these are the normal masks */ +#define EBT_NOPROTO 0x02 +#define EBT_802_3 0x04 +#define EBT_SOURCEMAC 0x08 +#define EBT_DESTMAC 0x10 +#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \ + | EBT_ENTRY_OR_ENTRIES) + +#define EBT_IPROTO 0x01 +#define EBT_IIN 0x02 +#define EBT_IOUT 0x04 +#define EBT_ISOURCE 0x8 +#define EBT_IDEST 0x10 +#define EBT_ILOGICALIN 0x20 +#define EBT_ILOGICALOUT 0x40 +#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \ + | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST) + +struct ebt_entry_match { + union { + char name[EBT_FUNCTION_MAXNAMELEN]; + struct xt_match *match; + } u; + /* size of data */ + unsigned int match_size; + unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); +}; + +struct ebt_entry_watcher { + union { + char name[EBT_FUNCTION_MAXNAMELEN]; + struct xt_target *watcher; + } u; + /* size of data */ + unsigned int watcher_size; + unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); +}; + +struct ebt_entry_target { + union { + char name[EBT_FUNCTION_MAXNAMELEN]; + struct xt_target *target; + } u; + /* size of data */ + unsigned int target_size; + unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); +}; + +#define EBT_STANDARD_TARGET "standard" +struct ebt_standard_target { + struct ebt_entry_target target; + int verdict; +}; + +/* one entry */ +struct ebt_entry { + /* this needs to be the first field */ + unsigned int bitmask; + unsigned int invflags; + __be16 ethproto; + /* the physical in-dev */ + char in[IFNAMSIZ]; + /* the logical in-dev */ + char logical_in[IFNAMSIZ]; + /* the physical out-dev */ + char out[IFNAMSIZ]; + /* the logical out-dev */ + char logical_out[IFNAMSIZ]; + unsigned char sourcemac[ETH_ALEN]; + unsigned char sourcemsk[ETH_ALEN]; + unsigned char destmac[ETH_ALEN]; + unsigned char destmsk[ETH_ALEN]; + /* sizeof ebt_entry + matches */ + unsigned int watchers_offset; + /* sizeof ebt_entry + matches + watchers */ + unsigned int target_offset; + /* sizeof ebt_entry + matches + watchers + target */ + unsigned int next_offset; + unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); +}; + +/* {g,s}etsockopt numbers */ +#define EBT_BASE_CTL 128 + +#define EBT_SO_SET_ENTRIES (EBT_BASE_CTL) +#define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1) +#define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1) + +#define EBT_SO_GET_INFO (EBT_BASE_CTL) +#define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1) +#define EBT_SO_GET_INIT_INFO (EBT_SO_GET_ENTRIES+1) +#define EBT_SO_GET_INIT_ENTRIES (EBT_SO_GET_INIT_INFO+1) +#define EBT_SO_GET_MAX (EBT_SO_GET_INIT_ENTRIES+1) + +#ifdef __KERNEL__ + +/* return values for match() functions */ +#define EBT_MATCH 0 +#define EBT_NOMATCH 1 + +struct ebt_match { + struct list_head list; + const char name[EBT_FUNCTION_MAXNAMELEN]; + bool (*match)(const struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const struct xt_match *match, + const void *matchinfo, int offset, unsigned int protoff, + bool *hotdrop); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_match *match, void *matchinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_match *match, void *matchinfo); + unsigned int matchsize; + u_int8_t revision; + u_int8_t family; + struct module *me; +}; + +struct ebt_watcher { + struct list_head list; + const char name[EBT_FUNCTION_MAXNAMELEN]; + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; + struct module *me; +}; + +struct ebt_target { + struct list_head list; + const char name[EBT_FUNCTION_MAXNAMELEN]; + /* returns one of the standard EBT_* verdicts */ + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; + struct module *me; +}; + +/* used for jumping from and into user defined chains (udc) */ +struct ebt_chainstack { + struct ebt_entries *chaininfo; /* pointer to chain data */ + struct ebt_entry *e; /* pointer to entry data */ + unsigned int n; /* n'th entry */ +}; + +struct ebt_table_info { + /* total size of the entries */ + unsigned int entries_size; + unsigned int nentries; + /* pointers to the start of the chains */ + struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; + /* room to maintain the stack used for jumping from and into udc */ + struct ebt_chainstack **chainstack; + char *entries; + struct ebt_counter counters[0] ____cacheline_aligned; +}; + +struct ebt_table { + struct list_head list; + char name[EBT_TABLE_MAXNAMELEN]; + struct ebt_replace_kernel *table; + unsigned int valid_hooks; + rwlock_t lock; + /* e.g. could be the table explicitly only allows certain + * matches, targets, ... 0 == let it in */ + int (*check)(const struct ebt_table_info *info, + unsigned int valid_hooks); + /* the data used by the kernel */ + struct ebt_table_info *private; + struct module *me; +}; + +#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ + ~(__alignof__(struct _xt_align)-1)) +extern struct ebt_table *ebt_register_table(struct net *net, + const struct ebt_table *table); +extern void ebt_unregister_table(struct net *net, struct ebt_table *table); +extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + struct ebt_table *table); + +/* Used in the kernel match() functions */ +#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) +/* True if the hook mask denotes that the rule is in a base chain, + * used in the check() functions */ +#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) +/* Clear the bit in the hook mask that tells if the rule is on a base chain */ +#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) +/* True if the target is not a standard target */ +#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) + +#endif /* __KERNEL__ */ + +/* blatently stolen from ip_tables.h + * fn returns 0 to continue iteration */ +#define EBT_MATCH_ITERATE(e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct ebt_entry_match *__match; \ + \ + for (__i = sizeof(struct ebt_entry); \ + __i < (e)->watchers_offset; \ + __i += __match->match_size + \ + sizeof(struct ebt_entry_match)) { \ + __match = (void *)(e) + __i; \ + \ + __ret = fn(__match , ## args); \ + if (__ret != 0) \ + break; \ + } \ + if (__ret == 0) { \ + if (__i != (e)->watchers_offset) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + +#define EBT_WATCHER_ITERATE(e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct ebt_entry_watcher *__watcher; \ + \ + for (__i = e->watchers_offset; \ + __i < (e)->target_offset; \ + __i += __watcher->watcher_size + \ + sizeof(struct ebt_entry_watcher)) { \ + __watcher = (void *)(e) + __i; \ + \ + __ret = fn(__watcher , ## args); \ + if (__ret != 0) \ + break; \ + } \ + if (__ret == 0) { \ + if (__i != (e)->target_offset) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + +#define EBT_ENTRY_ITERATE(entries, size, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct ebt_entry *__entry; \ + \ + for (__i = 0; __i < (size);) { \ + __entry = (void *)(entries) + __i; \ + __ret = fn(__entry , ## args); \ + if (__ret != 0) \ + break; \ + if (__entry->bitmask != 0) \ + __i += __entry->next_offset; \ + else \ + __i += sizeof(struct ebt_entries); \ + } \ + if (__ret == 0) { \ + if (__i != (size)) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + +#endif diff --git a/include/linux/netfilter_decnet.h b/include/linux/netfilter_decnet.h new file mode 100644 index 00000000..0b09732a --- /dev/null +++ b/include/linux/netfilter_decnet.h @@ -0,0 +1,79 @@ +#ifndef __LINUX_DECNET_NETFILTER_H +#define __LINUX_DECNET_NETFILTER_H + +/* DECnet-specific defines for netfilter. + * This file (C) Steve Whitehouse 1999 derived from the + * ipv4 netfilter header file which is + * (C)1998 Rusty Russell -- This code is GPL. + */ + +#include <linux/netfilter.h> + +/* only for userspace compatibility */ +#ifndef __KERNEL__ + +#include <limits.h> /* for INT_MIN, INT_MAX */ + +/* IP Cache bits. */ +/* Src IP address. */ +#define NFC_DN_SRC 0x0001 +/* Dest IP address. */ +#define NFC_DN_DST 0x0002 +/* Input device. */ +#define NFC_DN_IF_IN 0x0004 +/* Output device. */ +#define NFC_DN_IF_OUT 0x0008 +#endif /* ! __KERNEL__ */ + +/* DECnet Hooks */ +/* After promisc drops, checksum checks. */ +#define NF_DN_PRE_ROUTING 0 +/* If the packet is destined for this box. */ +#define NF_DN_LOCAL_IN 1 +/* If the packet is destined for another interface. */ +#define NF_DN_FORWARD 2 +/* Packets coming from a local process. */ +#define NF_DN_LOCAL_OUT 3 +/* Packets about to hit the wire. */ +#define NF_DN_POST_ROUTING 4 +/* Input Hello Packets */ +#define NF_DN_HELLO 5 +/* Input Routing Packets */ +#define NF_DN_ROUTE 6 +#define NF_DN_NUMHOOKS 7 + +enum nf_dn_hook_priorities { + NF_DN_PRI_FIRST = INT_MIN, + NF_DN_PRI_CONNTRACK = -200, + NF_DN_PRI_MANGLE = -150, + NF_DN_PRI_NAT_DST = -100, + NF_DN_PRI_FILTER = 0, + NF_DN_PRI_NAT_SRC = 100, + NF_DN_PRI_DNRTMSG = 200, + NF_DN_PRI_LAST = INT_MAX, +}; + +struct nf_dn_rtmsg { + int nfdn_ifindex; +}; + +#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg))) + +#ifndef __KERNEL__ +/* backwards compatibility for userspace */ +#define DNRMG_L1_GROUP 0x01 +#define DNRMG_L2_GROUP 0x02 +#endif + +enum { + DNRNG_NLGRP_NONE, +#define DNRNG_NLGRP_NONE DNRNG_NLGRP_NONE + DNRNG_NLGRP_L1, +#define DNRNG_NLGRP_L1 DNRNG_NLGRP_L1 + DNRNG_NLGRP_L2, +#define DNRNG_NLGRP_L2 DNRNG_NLGRP_L2 + __DNRNG_NLGRP_MAX +}; +#define DNRNG_NLGRP_MAX (__DNRNG_NLGRP_MAX - 1) + +#endif /*__LINUX_DECNET_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h new file mode 100644 index 00000000..fa0946c5 --- /dev/null +++ b/include/linux/netfilter_ipv4.h @@ -0,0 +1,86 @@ +#ifndef __LINUX_IP_NETFILTER_H +#define __LINUX_IP_NETFILTER_H + +/* IPv4-specific defines for netfilter. + * (C)1998 Rusty Russell -- This code is GPL. + */ + +#include <linux/netfilter.h> + +/* only for userspace compatibility */ +#ifndef __KERNEL__ + +#include <limits.h> /* for INT_MIN, INT_MAX */ + +/* IP Cache bits. */ +/* Src IP address. */ +#define NFC_IP_SRC 0x0001 +/* Dest IP address. */ +#define NFC_IP_DST 0x0002 +/* Input device. */ +#define NFC_IP_IF_IN 0x0004 +/* Output device. */ +#define NFC_IP_IF_OUT 0x0008 +/* TOS. */ +#define NFC_IP_TOS 0x0010 +/* Protocol. */ +#define NFC_IP_PROTO 0x0020 +/* IP options. */ +#define NFC_IP_OPTIONS 0x0040 +/* Frag & flags. */ +#define NFC_IP_FRAG 0x0080 + +/* Per-protocol information: only matters if proto match. */ +/* TCP flags. */ +#define NFC_IP_TCPFLAGS 0x0100 +/* Source port. */ +#define NFC_IP_SRC_PT 0x0200 +/* Dest port. */ +#define NFC_IP_DST_PT 0x0400 +/* Something else about the proto */ +#define NFC_IP_PROTO_UNKNOWN 0x2000 + +/* IP Hooks */ +/* After promisc drops, checksum checks. */ +#define NF_IP_PRE_ROUTING 0 +/* If the packet is destined for this box. */ +#define NF_IP_LOCAL_IN 1 +/* If the packet is destined for another interface. */ +#define NF_IP_FORWARD 2 +/* Packets coming from a local process. */ +#define NF_IP_LOCAL_OUT 3 +/* Packets about to hit the wire. */ +#define NF_IP_POST_ROUTING 4 +#define NF_IP_NUMHOOKS 5 +#endif /* ! __KERNEL__ */ + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = INT_MIN, + NF_IP_PRI_CONNTRACK_DEFRAG = -400, + NF_IP_PRI_RAW = -300, + NF_IP_PRI_SELINUX_FIRST = -225, + NF_IP_PRI_CONNTRACK = -200, + NF_IP_PRI_MANGLE = -150, + NF_IP_PRI_NAT_DST = -100, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX, + NF_IP_PRI_LAST = INT_MAX, +}; + +/* Arguments for setsockopt SOL_IP: */ +/* 2.0 firewalling went from 64 through 71 (and +256, +512, etc). */ +/* 2.2 firewalling (+ masq) went from 64 through 76 */ +/* 2.4 firewalling went 64 through 67. */ +#define SO_ORIGINAL_DST 80 + +#ifdef __KERNEL__ +extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); +extern int ip_xfrm_me_harder(struct sk_buff *skb); +extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); +#endif /*__KERNEL__*/ + +#endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild new file mode 100644 index 00000000..31f8bec9 --- /dev/null +++ b/include/linux/netfilter_ipv4/Kbuild @@ -0,0 +1,12 @@ +header-y += ip_queue.h +header-y += ip_tables.h +header-y += ipt_CLUSTERIP.h +header-y += ipt_ECN.h +header-y += ipt_LOG.h +header-y += ipt_REJECT.h +header-y += ipt_TTL.h +header-y += ipt_ULOG.h +header-y += ipt_addrtype.h +header-y += ipt_ah.h +header-y += ipt_ecn.h +header-y += ipt_ttl.h diff --git a/include/linux/netfilter_ipv4/ip_queue.h b/include/linux/netfilter_ipv4/ip_queue.h new file mode 100644 index 00000000..a03507f4 --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_queue.h @@ -0,0 +1,72 @@ +/* + * This is a module which is used for queueing IPv4 packets and + * communicating with userspace via netlink. + * + * (C) 2000 James Morris, this code is GPL. + */ +#ifndef _IP_QUEUE_H +#define _IP_QUEUE_H + +#ifdef __KERNEL__ +#ifdef DEBUG_IPQ +#define QDEBUG(x...) printk(KERN_DEBUG ## x) +#else +#define QDEBUG(x...) +#endif /* DEBUG_IPQ */ +#else +#include <net/if.h> +#endif /* ! __KERNEL__ */ + +/* Messages sent from kernel */ +typedef struct ipq_packet_msg { + unsigned long packet_id; /* ID of queued packet */ + unsigned long mark; /* Netfilter mark value */ + long timestamp_sec; /* Packet arrival time (seconds) */ + long timestamp_usec; /* Packet arrvial time (+useconds) */ + unsigned int hook; /* Netfilter hook we rode in on */ + char indev_name[IFNAMSIZ]; /* Name of incoming interface */ + char outdev_name[IFNAMSIZ]; /* Name of outgoing interface */ + __be16 hw_protocol; /* Hardware protocol (network order) */ + unsigned short hw_type; /* Hardware type */ + unsigned char hw_addrlen; /* Hardware address length */ + unsigned char hw_addr[8]; /* Hardware address */ + size_t data_len; /* Length of packet data */ + unsigned char payload[0]; /* Optional packet data */ +} ipq_packet_msg_t; + +/* Messages sent from userspace */ +typedef struct ipq_mode_msg { + unsigned char value; /* Requested mode */ + size_t range; /* Optional range of packet requested */ +} ipq_mode_msg_t; + +typedef struct ipq_verdict_msg { + unsigned int value; /* Verdict to hand to netfilter */ + unsigned long id; /* Packet ID for this verdict */ + size_t data_len; /* Length of replacement data */ + unsigned char payload[0]; /* Optional replacement packet */ +} ipq_verdict_msg_t; + +typedef struct ipq_peer_msg { + union { + ipq_verdict_msg_t verdict; + ipq_mode_msg_t mode; + } msg; +} ipq_peer_msg_t; + +/* Packet delivery modes */ +enum { + IPQ_COPY_NONE, /* Initial mode, packets are dropped */ + IPQ_COPY_META, /* Copy metadata */ + IPQ_COPY_PACKET /* Copy metadata + packet (range) */ +}; +#define IPQ_COPY_MAX IPQ_COPY_PACKET + +/* Types of messages */ +#define IPQM_BASE 0x10 /* standard netlink messages below this */ +#define IPQM_MODE (IPQM_BASE + 1) /* Mode request from peer */ +#define IPQM_VERDICT (IPQM_BASE + 2) /* Verdict from peer */ +#define IPQM_PACKET (IPQM_BASE + 3) /* Packet from kernel */ +#define IPQM_MAX (IPQM_BASE + 4) + +#endif /*_IP_QUEUE_H*/ diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h new file mode 100644 index 00000000..db792319 --- /dev/null +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -0,0 +1,307 @@ +/* + * 25-Jul-1998 Major changes to allow for ip chain table + * + * 3-Jan-2000 Named tables to allow packet selection for different uses. + */ + +/* + * Format of an IP firewall descriptor + * + * src, dst, src_mask, dst_mask are always stored in network byte order. + * flags are stored in host byte order (of course). + * Port numbers are stored in HOST byte order. + */ + +#ifndef _IPTABLES_H +#define _IPTABLES_H + +#ifdef __KERNEL__ +#include <linux/if.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/skbuff.h> +#endif +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/netfilter_ipv4.h> + +#include <linux/netfilter/x_tables.h> + +#ifndef __KERNEL__ +#define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN +#define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN +#define ipt_match xt_match +#define ipt_target xt_target +#define ipt_table xt_table +#define ipt_get_revision xt_get_revision +#define ipt_entry_match xt_entry_match +#define ipt_entry_target xt_entry_target +#define ipt_standard_target xt_standard_target +#define ipt_error_target xt_error_target +#define ipt_counters xt_counters +#define IPT_CONTINUE XT_CONTINUE +#define IPT_RETURN XT_RETURN + +/* This group is older than old (iptables < v1.4.0-rc1~89) */ +#include <linux/netfilter/xt_tcpudp.h> +#define ipt_udp xt_udp +#define ipt_tcp xt_tcp +#define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT +#define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT +#define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS +#define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION +#define IPT_TCP_INV_MASK XT_TCP_INV_MASK +#define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT +#define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT +#define IPT_UDP_INV_MASK XT_UDP_INV_MASK + +/* The argument to IPT_SO_ADD_COUNTERS. */ +#define ipt_counters_info xt_counters_info +/* Standard return verdict, or do jump. */ +#define IPT_STANDARD_TARGET XT_STANDARD_TARGET +/* Error verdict. */ +#define IPT_ERROR_TARGET XT_ERROR_TARGET + +/* fn returns 0 to continue iteration */ +#define IPT_MATCH_ITERATE(e, fn, args...) \ + XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) + +/* fn returns 0 to continue iteration */ +#define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) +#endif + +/* Yes, Virginia, you have to zero the padding. */ +struct ipt_ip { + /* Source and destination IP addr */ + struct in_addr src, dst; + /* Mask for src and dest IP addr */ + struct in_addr smsk, dmsk; + char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; + unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; + + /* Protocol, 0 = ANY */ + __u16 proto; + + /* Flags word */ + __u8 flags; + /* Inverse flags */ + __u8 invflags; +}; + +/* Values for "flag" field in struct ipt_ip (general ip structure). */ +#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ +#define IPT_F_GOTO 0x02 /* Set if jump is a goto */ +#define IPT_F_MASK 0x03 /* All possible flag bits mask. */ + +/* Values for "inv" field in struct ipt_ip. */ +#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ +#define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ +#define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */ +#define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ +#define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ +#define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */ +#define IPT_INV_PROTO XT_INV_PROTO +#define IPT_INV_MASK 0x7F /* All possible flag bits mask. */ + +/* This structure defines each of the firewall rules. Consists of 3 + parts which are 1) general IP header stuff 2) match specific + stuff 3) the target to perform if the rule matches */ +struct ipt_entry { + struct ipt_ip ip; + + /* Mark with fields that we care about. */ + unsigned int nfcache; + + /* Size of ipt_entry + matches */ + __u16 target_offset; + /* Size of ipt_entry + matches + target */ + __u16 next_offset; + + /* Back pointer */ + unsigned int comefrom; + + /* Packet and byte counters. */ + struct xt_counters counters; + + /* The matches (if any), then the target. */ + unsigned char elems[0]; +}; + +/* + * New IP firewall options for [gs]etsockopt at the RAW IP level. + * Unlike BSD Linux inherits IP options so you don't have to use a raw + * socket for this. Instead we check rights in the calls. + * + * ATTENTION: check linux/in.h before adding new number here. + */ +#define IPT_BASE_CTL 64 + +#define IPT_SO_SET_REPLACE (IPT_BASE_CTL) +#define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1) +#define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS + +#define IPT_SO_GET_INFO (IPT_BASE_CTL) +#define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1) +#define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2) +#define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) +#define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET + +/* ICMP matching stuff */ +struct ipt_icmp { + __u8 type; /* type to match */ + __u8 code[2]; /* range of code */ + __u8 invflags; /* Inverse flags */ +}; + +/* Values for "inv" field for struct ipt_icmp. */ +#define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */ + +/* The argument to IPT_SO_GET_INFO */ +struct ipt_getinfo { + /* Which table: caller fills this in. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* Kernel fills these in. */ + /* Which hook entry points are valid: bitmask */ + unsigned int valid_hooks; + + /* Hook entry points: one per netfilter hook. */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + + /* Underflow points. */ + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* Number of entries */ + unsigned int num_entries; + + /* Size of entries. */ + unsigned int size; +}; + +/* The argument to IPT_SO_SET_REPLACE. */ +struct ipt_replace { + /* Which table. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* Which hook entry points are valid: bitmask. You can't + change this. */ + unsigned int valid_hooks; + + /* Number of entries */ + unsigned int num_entries; + + /* Total size of new entries */ + unsigned int size; + + /* Hook entry points. */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + + /* Underflow points. */ + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* Information about old entries: */ + /* Number of counters (must be equal to current number of entries). */ + unsigned int num_counters; + /* The old entries' counters. */ + struct xt_counters __user *counters; + + /* The entries (hang off end: not really an array). */ + struct ipt_entry entries[0]; +}; + +/* The argument to IPT_SO_GET_ENTRIES. */ +struct ipt_get_entries { + /* Which table: user fills this in. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* User fills this in: total entry size. */ + unsigned int size; + + /* The entries. */ + struct ipt_entry entrytable[0]; +}; + +/* Helper functions */ +static __inline__ struct xt_entry_target * +ipt_get_target(struct ipt_entry *e) +{ + return (void *)e + e->target_offset; +} + +/* + * Main firewall chains definitions and global var's definitions. + */ +#ifdef __KERNEL__ + +#include <linux/init.h> +extern void ipt_init(void) __init; + +extern struct xt_table *ipt_register_table(struct net *net, + const struct xt_table *table, + const struct ipt_replace *repl); +extern void ipt_unregister_table(struct net *net, struct xt_table *table); + +/* Standard entry. */ +struct ipt_standard { + struct ipt_entry entry; + struct xt_standard_target target; +}; + +struct ipt_error { + struct ipt_entry entry; + struct xt_error_target target; +}; + +#define IPT_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct ipt_entry), \ + .next_offset = (__size), \ +} + +#define IPT_STANDARD_INIT(__verdict) \ +{ \ + .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ + .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ + sizeof(struct xt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define IPT_ERROR_INIT \ +{ \ + .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ + .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ + sizeof(struct xt_error_target)), \ + .target.errorname = "ERROR", \ +} + +extern void *ipt_alloc_initial_table(const struct xt_table *); +extern unsigned int ipt_do_table(struct sk_buff *skb, + unsigned int hook, + const struct net_device *in, + const struct net_device *out, + struct xt_table *table); + +#ifdef CONFIG_COMPAT +#include <net/compat.h> + +struct compat_ipt_entry { + struct ipt_ip ip; + compat_uint_t nfcache; + __u16 target_offset; + __u16 next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +/* Helper functions */ +static inline struct xt_entry_target * +compat_ipt_get_target(struct compat_ipt_entry *e) +{ + return (void *)e + e->target_offset; +} + +#endif /* CONFIG_COMPAT */ +#endif /*__KERNEL__*/ +#endif /* _IPTABLES_H */ diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h new file mode 100644 index 00000000..c6a204c9 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h @@ -0,0 +1,36 @@ +#ifndef _IPT_CLUSTERIP_H_target +#define _IPT_CLUSTERIP_H_target + +#include <linux/types.h> + +enum clusterip_hashmode { + CLUSTERIP_HASHMODE_SIP = 0, + CLUSTERIP_HASHMODE_SIP_SPT, + CLUSTERIP_HASHMODE_SIP_SPT_DPT, +}; + +#define CLUSTERIP_HASHMODE_MAX CLUSTERIP_HASHMODE_SIP_SPT_DPT + +#define CLUSTERIP_MAX_NODES 16 + +#define CLUSTERIP_FLAG_NEW 0x00000001 + +struct clusterip_config; + +struct ipt_clusterip_tgt_info { + + __u32 flags; + + /* only relevant for new ones */ + __u8 clustermac[6]; + __u16 num_total_nodes; + __u16 num_local_nodes; + __u16 local_nodes[CLUSTERIP_MAX_NODES]; + __u32 hash_mode; + __u32 hash_initval; + + /* Used internally by the kernel */ + struct clusterip_config *config; +}; + +#endif /*_IPT_CLUSTERIP_H_target*/ diff --git a/include/linux/netfilter_ipv4/ipt_ECN.h b/include/linux/netfilter_ipv4/ipt_ECN.h new file mode 100644 index 00000000..bb88d531 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_ECN.h @@ -0,0 +1,33 @@ +/* Header file for iptables ipt_ECN target + * + * (C) 2002 by Harald Welte <laforge@gnumonks.org> + * + * This software is distributed under GNU GPL v2, 1991 + * + * ipt_ECN.h,v 1.3 2002/05/29 12:17:40 laforge Exp +*/ +#ifndef _IPT_ECN_TARGET_H +#define _IPT_ECN_TARGET_H + +#include <linux/types.h> +#include <linux/netfilter/xt_DSCP.h> + +#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) + +#define IPT_ECN_OP_SET_IP 0x01 /* set ECN bits of IPv4 header */ +#define IPT_ECN_OP_SET_ECE 0x10 /* set ECE bit of TCP header */ +#define IPT_ECN_OP_SET_CWR 0x20 /* set CWR bit of TCP header */ + +#define IPT_ECN_OP_MASK 0xce + +struct ipt_ECN_info { + __u8 operation; /* bitset of operations */ + __u8 ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */ + union { + struct { + __u8 ece:1, cwr:1; /* TCP ECT bits */ + } tcp; + } proto; +}; + +#endif /* _IPT_ECN_TARGET_H */ diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h new file mode 100644 index 00000000..5d815207 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_LOG.h @@ -0,0 +1,21 @@ +#ifndef _IPT_LOG_H +#define _IPT_LOG_H + +#warning "Please update iptables, this file will be removed soon!" + +/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */ +#define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ +#define IPT_LOG_TCPOPT 0x02 /* Log TCP options */ +#define IPT_LOG_IPOPT 0x04 /* Log IP options */ +#define IPT_LOG_UID 0x08 /* Log UID owning local socket */ +#define IPT_LOG_NFLOG 0x10 /* Unsupported, don't reuse */ +#define IPT_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define IPT_LOG_MASK 0x2f + +struct ipt_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +#endif /*_IPT_LOG_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_REJECT.h b/include/linux/netfilter_ipv4/ipt_REJECT.h new file mode 100644 index 00000000..4293a1ad --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_REJECT.h @@ -0,0 +1,20 @@ +#ifndef _IPT_REJECT_H +#define _IPT_REJECT_H + +enum ipt_reject_with { + IPT_ICMP_NET_UNREACHABLE, + IPT_ICMP_HOST_UNREACHABLE, + IPT_ICMP_PROT_UNREACHABLE, + IPT_ICMP_PORT_UNREACHABLE, + IPT_ICMP_ECHOREPLY, + IPT_ICMP_NET_PROHIBITED, + IPT_ICMP_HOST_PROHIBITED, + IPT_TCP_RESET, + IPT_ICMP_ADMIN_PROHIBITED +}; + +struct ipt_reject_info { + enum ipt_reject_with with; /* reject type */ +}; + +#endif /*_IPT_REJECT_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_TTL.h b/include/linux/netfilter_ipv4/ipt_TTL.h new file mode 100644 index 00000000..f6ac169d --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_TTL.h @@ -0,0 +1,23 @@ +/* TTL modification module for IP tables + * (C) 2000 by Harald Welte <laforge@netfilter.org> */ + +#ifndef _IPT_TTL_H +#define _IPT_TTL_H + +#include <linux/types.h> + +enum { + IPT_TTL_SET = 0, + IPT_TTL_INC, + IPT_TTL_DEC +}; + +#define IPT_TTL_MAXMODE IPT_TTL_DEC + +struct ipt_TTL_info { + __u8 mode; + __u8 ttl; +}; + + +#endif diff --git a/include/linux/netfilter_ipv4/ipt_ULOG.h b/include/linux/netfilter_ipv4/ipt_ULOG.h new file mode 100644 index 00000000..417aad28 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_ULOG.h @@ -0,0 +1,49 @@ +/* Header file for IP tables userspace logging, Version 1.8 + * + * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> + * + * Distributed under the terms of GNU GPL */ + +#ifndef _IPT_ULOG_H +#define _IPT_ULOG_H + +#ifndef NETLINK_NFLOG +#define NETLINK_NFLOG 5 +#endif + +#define ULOG_DEFAULT_NLGROUP 1 +#define ULOG_DEFAULT_QTHRESHOLD 1 + +#define ULOG_MAC_LEN 80 +#define ULOG_PREFIX_LEN 32 + +#define ULOG_MAX_QLEN 50 +/* Why 50? Well... there is a limit imposed by the slab cache 131000 + * bytes. So the multipart netlink-message has to be < 131000 bytes. + * Assuming a standard ethernet-mtu of 1500, we could define this up + * to 80... but even 50 seems to be big enough. */ + +/* private data structure for each rule with a ULOG target */ +struct ipt_ulog_info { + unsigned int nl_group; + size_t copy_range; + size_t qthreshold; + char prefix[ULOG_PREFIX_LEN]; +}; + +/* Format of the ULOG packets passed through netlink */ +typedef struct ulog_packet_msg { + unsigned long mark; + long timestamp_sec; + long timestamp_usec; + unsigned int hook; + char indev_name[IFNAMSIZ]; + char outdev_name[IFNAMSIZ]; + size_t data_len; + char prefix[ULOG_PREFIX_LEN]; + unsigned char mac_len; + unsigned char mac[ULOG_MAC_LEN]; + unsigned char payload[0]; +} ulog_packet_msg_t; + +#endif /*_IPT_ULOG_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h new file mode 100644 index 00000000..0da42237 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_addrtype.h @@ -0,0 +1,27 @@ +#ifndef _IPT_ADDRTYPE_H +#define _IPT_ADDRTYPE_H + +#include <linux/types.h> + +enum { + IPT_ADDRTYPE_INVERT_SOURCE = 0x0001, + IPT_ADDRTYPE_INVERT_DEST = 0x0002, + IPT_ADDRTYPE_LIMIT_IFACE_IN = 0x0004, + IPT_ADDRTYPE_LIMIT_IFACE_OUT = 0x0008, +}; + +struct ipt_addrtype_info_v1 { + __u16 source; /* source-type mask */ + __u16 dest; /* dest-type mask */ + __u32 flags; +}; + +/* revision 0 */ +struct ipt_addrtype_info { + __u16 source; /* source-type mask */ + __u16 dest; /* dest-type mask */ + __u32 invert_source; + __u32 invert_dest; +}; + +#endif diff --git a/include/linux/netfilter_ipv4/ipt_ah.h b/include/linux/netfilter_ipv4/ipt_ah.h new file mode 100644 index 00000000..4e02bb01 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_ah.h @@ -0,0 +1,17 @@ +#ifndef _IPT_AH_H +#define _IPT_AH_H + +#include <linux/types.h> + +struct ipt_ah { + __u32 spis[2]; /* Security Parameter Index */ + __u8 invflags; /* Inverse flags */ +}; + + + +/* Values for "invflags" field in struct ipt_ah. */ +#define IPT_AH_INV_SPI 0x01 /* Invert the sense of spi. */ +#define IPT_AH_INV_MASK 0x01 /* All possible flags. */ + +#endif /*_IPT_AH_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h new file mode 100644 index 00000000..0e0c063d --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_ecn.h @@ -0,0 +1,15 @@ +#ifndef _IPT_ECN_H +#define _IPT_ECN_H + +#include <linux/netfilter/xt_ecn.h> +#define ipt_ecn_info xt_ecn_info + +enum { + IPT_ECN_IP_MASK = XT_ECN_IP_MASK, + IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP, + IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE, + IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR, + IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK, +}; + +#endif /* IPT_ECN_H */ diff --git a/include/linux/netfilter_ipv4/ipt_ttl.h b/include/linux/netfilter_ipv4/ipt_ttl.h new file mode 100644 index 00000000..37bee444 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_ttl.h @@ -0,0 +1,23 @@ +/* IP tables module for matching the value of the TTL + * (C) 2000 by Harald Welte <laforge@gnumonks.org> */ + +#ifndef _IPT_TTL_H +#define _IPT_TTL_H + +#include <linux/types.h> + +enum { + IPT_TTL_EQ = 0, /* equals */ + IPT_TTL_NE, /* not equals */ + IPT_TTL_LT, /* less than */ + IPT_TTL_GT, /* greater than */ +}; + + +struct ipt_ttl_info { + __u8 mode; + __u8 ttl; +}; + + +#endif diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h new file mode 100644 index 00000000..57c02512 --- /dev/null +++ b/include/linux/netfilter_ipv6.h @@ -0,0 +1,93 @@ +#ifndef __LINUX_IP6_NETFILTER_H +#define __LINUX_IP6_NETFILTER_H + +/* IPv6-specific defines for netfilter. + * (C)1998 Rusty Russell -- This code is GPL. + * (C)1999 David Jeffery + * this header was blatantly ripped from netfilter_ipv4.h + * it's amazing what adding a bunch of 6s can do =8^) + */ + +#include <linux/netfilter.h> + +/* only for userspace compatibility */ +#ifndef __KERNEL__ + +#include <limits.h> /* for INT_MIN, INT_MAX */ + +/* IP Cache bits. */ +/* Src IP address. */ +#define NFC_IP6_SRC 0x0001 +/* Dest IP address. */ +#define NFC_IP6_DST 0x0002 +/* Input device. */ +#define NFC_IP6_IF_IN 0x0004 +/* Output device. */ +#define NFC_IP6_IF_OUT 0x0008 +/* TOS. */ +#define NFC_IP6_TOS 0x0010 +/* Protocol. */ +#define NFC_IP6_PROTO 0x0020 +/* IP options. */ +#define NFC_IP6_OPTIONS 0x0040 +/* Frag & flags. */ +#define NFC_IP6_FRAG 0x0080 + + +/* Per-protocol information: only matters if proto match. */ +/* TCP flags. */ +#define NFC_IP6_TCPFLAGS 0x0100 +/* Source port. */ +#define NFC_IP6_SRC_PT 0x0200 +/* Dest port. */ +#define NFC_IP6_DST_PT 0x0400 +/* Something else about the proto */ +#define NFC_IP6_PROTO_UNKNOWN 0x2000 + +/* IP6 Hooks */ +/* After promisc drops, checksum checks. */ +#define NF_IP6_PRE_ROUTING 0 +/* If the packet is destined for this box. */ +#define NF_IP6_LOCAL_IN 1 +/* If the packet is destined for another interface. */ +#define NF_IP6_FORWARD 2 +/* Packets coming from a local process. */ +#define NF_IP6_LOCAL_OUT 3 +/* Packets about to hit the wire. */ +#define NF_IP6_POST_ROUTING 4 +#define NF_IP6_NUMHOOKS 5 +#endif /* ! __KERNEL__ */ + + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = INT_MIN, + NF_IP6_PRI_CONNTRACK_DEFRAG = -400, + NF_IP6_PRI_RAW = -300, + NF_IP6_PRI_SELINUX_FIRST = -225, + NF_IP6_PRI_CONNTRACK = -200, + NF_IP6_PRI_MANGLE = -150, + NF_IP6_PRI_NAT_DST = -100, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_LAST = INT_MAX, +}; + +#ifdef __KERNEL__ + +#ifdef CONFIG_NETFILTER +extern int ip6_route_me_harder(struct sk_buff *skb); +extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + +extern int ipv6_netfilter_init(void); +extern void ipv6_netfilter_fini(void); +#else /* CONFIG_NETFILTER */ +static inline int ipv6_netfilter_init(void) { return 0; } +static inline void ipv6_netfilter_fini(void) { return; } +#endif /* CONFIG_NETFILTER */ + +#endif /* __KERNEL__ */ + +#endif /*__LINUX_IP6_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild new file mode 100644 index 00000000..bd095bc0 --- /dev/null +++ b/include/linux/netfilter_ipv6/Kbuild @@ -0,0 +1,11 @@ +header-y += ip6_tables.h +header-y += ip6t_HL.h +header-y += ip6t_LOG.h +header-y += ip6t_REJECT.h +header-y += ip6t_ah.h +header-y += ip6t_frag.h +header-y += ip6t_hl.h +header-y += ip6t_ipv6header.h +header-y += ip6t_mh.h +header-y += ip6t_opts.h +header-y += ip6t_rt.h diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h new file mode 100644 index 00000000..1bc898b1 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -0,0 +1,326 @@ +/* + * 25-Jul-1998 Major changes to allow for ip chain table + * + * 3-Jan-2000 Named tables to allow packet selection for different uses. + */ + +/* + * Format of an IP6 firewall descriptor + * + * src, dst, src_mask, dst_mask are always stored in network byte order. + * flags are stored in host byte order (of course). + * Port numbers are stored in HOST byte order. + */ + +#ifndef _IP6_TABLES_H +#define _IP6_TABLES_H + +#ifdef __KERNEL__ +#include <linux/if.h> +#include <linux/in6.h> +#include <linux/ipv6.h> +#include <linux/skbuff.h> +#endif +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/netfilter_ipv6.h> + +#include <linux/netfilter/x_tables.h> + +#ifndef __KERNEL__ +#define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN +#define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN +#define ip6t_match xt_match +#define ip6t_target xt_target +#define ip6t_table xt_table +#define ip6t_get_revision xt_get_revision +#define ip6t_entry_match xt_entry_match +#define ip6t_entry_target xt_entry_target +#define ip6t_standard_target xt_standard_target +#define ip6t_error_target xt_error_target +#define ip6t_counters xt_counters +#define IP6T_CONTINUE XT_CONTINUE +#define IP6T_RETURN XT_RETURN + +/* Pre-iptables-1.4.0 */ +#include <linux/netfilter/xt_tcpudp.h> +#define ip6t_tcp xt_tcp +#define ip6t_udp xt_udp +#define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT +#define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT +#define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS +#define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION +#define IP6T_TCP_INV_MASK XT_TCP_INV_MASK +#define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT +#define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT +#define IP6T_UDP_INV_MASK XT_UDP_INV_MASK + +#define ip6t_counters_info xt_counters_info +#define IP6T_STANDARD_TARGET XT_STANDARD_TARGET +#define IP6T_ERROR_TARGET XT_ERROR_TARGET +#define IP6T_MATCH_ITERATE(e, fn, args...) \ + XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) +#define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ + XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) +#endif + +/* Yes, Virginia, you have to zero the padding. */ +struct ip6t_ip6 { + /* Source and destination IP6 addr */ + struct in6_addr src, dst; + /* Mask for src and dest IP6 addr */ + struct in6_addr smsk, dmsk; + char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; + unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; + + /* Upper protocol number + * - The allowed value is 0 (any) or protocol number of last parsable + * header, which is 50 (ESP), 59 (No Next Header), 135 (MH), or + * the non IPv6 extension headers. + * - The protocol numbers of IPv6 extension headers except of ESP and + * MH do not match any packets. + * - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol. + */ + __u16 proto; + /* TOS to match iff flags & IP6T_F_TOS */ + __u8 tos; + + /* Flags word */ + __u8 flags; + /* Inverse flags */ + __u8 invflags; +}; + +/* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ +#define IP6T_F_PROTO 0x01 /* Set if rule cares about upper + protocols */ +#define IP6T_F_TOS 0x02 /* Match the TOS. */ +#define IP6T_F_GOTO 0x04 /* Set if jump is a goto */ +#define IP6T_F_MASK 0x07 /* All possible flag bits mask. */ + +/* Values for "inv" field in struct ip6t_ip6. */ +#define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ +#define IP6T_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ +#define IP6T_INV_TOS 0x04 /* Invert the sense of TOS. */ +#define IP6T_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ +#define IP6T_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ +#define IP6T_INV_FRAG 0x20 /* Invert the sense of FRAG. */ +#define IP6T_INV_PROTO XT_INV_PROTO +#define IP6T_INV_MASK 0x7F /* All possible flag bits mask. */ + +/* This structure defines each of the firewall rules. Consists of 3 + parts which are 1) general IP header stuff 2) match specific + stuff 3) the target to perform if the rule matches */ +struct ip6t_entry { + struct ip6t_ip6 ipv6; + + /* Mark with fields that we care about. */ + unsigned int nfcache; + + /* Size of ipt_entry + matches */ + __u16 target_offset; + /* Size of ipt_entry + matches + target */ + __u16 next_offset; + + /* Back pointer */ + unsigned int comefrom; + + /* Packet and byte counters. */ + struct xt_counters counters; + + /* The matches (if any), then the target. */ + unsigned char elems[0]; +}; + +/* Standard entry */ +struct ip6t_standard { + struct ip6t_entry entry; + struct xt_standard_target target; +}; + +struct ip6t_error { + struct ip6t_entry entry; + struct xt_error_target target; +}; + +#define IP6T_ENTRY_INIT(__size) \ +{ \ + .target_offset = sizeof(struct ip6t_entry), \ + .next_offset = (__size), \ +} + +#define IP6T_STANDARD_INIT(__verdict) \ +{ \ + .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ + .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ + sizeof(struct xt_standard_target)), \ + .target.verdict = -(__verdict) - 1, \ +} + +#define IP6T_ERROR_INIT \ +{ \ + .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ + .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ + sizeof(struct xt_error_target)), \ + .target.errorname = "ERROR", \ +} + +/* + * New IP firewall options for [gs]etsockopt at the RAW IP level. + * Unlike BSD Linux inherits IP options so you don't have to use + * a raw socket for this. Instead we check rights in the calls. + * + * ATTENTION: check linux/in6.h before adding new number here. + */ +#define IP6T_BASE_CTL 64 + +#define IP6T_SO_SET_REPLACE (IP6T_BASE_CTL) +#define IP6T_SO_SET_ADD_COUNTERS (IP6T_BASE_CTL + 1) +#define IP6T_SO_SET_MAX IP6T_SO_SET_ADD_COUNTERS + +#define IP6T_SO_GET_INFO (IP6T_BASE_CTL) +#define IP6T_SO_GET_ENTRIES (IP6T_BASE_CTL + 1) +#define IP6T_SO_GET_REVISION_MATCH (IP6T_BASE_CTL + 4) +#define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) +#define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET + +/* ICMP matching stuff */ +struct ip6t_icmp { + __u8 type; /* type to match */ + __u8 code[2]; /* range of code */ + __u8 invflags; /* Inverse flags */ +}; + +/* Values for "inv" field for struct ipt_icmp. */ +#define IP6T_ICMP_INV 0x01 /* Invert the sense of type/code test */ + +/* The argument to IP6T_SO_GET_INFO */ +struct ip6t_getinfo { + /* Which table: caller fills this in. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* Kernel fills these in. */ + /* Which hook entry points are valid: bitmask */ + unsigned int valid_hooks; + + /* Hook entry points: one per netfilter hook. */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + + /* Underflow points. */ + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* Number of entries */ + unsigned int num_entries; + + /* Size of entries. */ + unsigned int size; +}; + +/* The argument to IP6T_SO_SET_REPLACE. */ +struct ip6t_replace { + /* Which table. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* Which hook entry points are valid: bitmask. You can't + change this. */ + unsigned int valid_hooks; + + /* Number of entries */ + unsigned int num_entries; + + /* Total size of new entries */ + unsigned int size; + + /* Hook entry points. */ + unsigned int hook_entry[NF_INET_NUMHOOKS]; + + /* Underflow points. */ + unsigned int underflow[NF_INET_NUMHOOKS]; + + /* Information about old entries: */ + /* Number of counters (must be equal to current number of entries). */ + unsigned int num_counters; + /* The old entries' counters. */ + struct xt_counters __user *counters; + + /* The entries (hang off end: not really an array). */ + struct ip6t_entry entries[0]; +}; + +/* The argument to IP6T_SO_GET_ENTRIES. */ +struct ip6t_get_entries { + /* Which table: user fills this in. */ + char name[XT_TABLE_MAXNAMELEN]; + + /* User fills this in: total entry size. */ + unsigned int size; + + /* The entries. */ + struct ip6t_entry entrytable[0]; +}; + +/* Helper functions */ +static __inline__ struct xt_entry_target * +ip6t_get_target(struct ip6t_entry *e) +{ + return (void *)e + e->target_offset; +} + +/* + * Main firewall chains definitions and global var's definitions. + */ + +#ifdef __KERNEL__ + +#include <linux/init.h> +extern void ip6t_init(void) __init; + +extern void *ip6t_alloc_initial_table(const struct xt_table *); +extern struct xt_table *ip6t_register_table(struct net *net, + const struct xt_table *table, + const struct ip6t_replace *repl); +extern void ip6t_unregister_table(struct net *net, struct xt_table *table); +extern unsigned int ip6t_do_table(struct sk_buff *skb, + unsigned int hook, + const struct net_device *in, + const struct net_device *out, + struct xt_table *table); + +/* Check for an extension */ +static inline int +ip6t_ext_hdr(u8 nexthdr) +{ return (nexthdr == IPPROTO_HOPOPTS) || + (nexthdr == IPPROTO_ROUTING) || + (nexthdr == IPPROTO_FRAGMENT) || + (nexthdr == IPPROTO_ESP) || + (nexthdr == IPPROTO_AH) || + (nexthdr == IPPROTO_NONE) || + (nexthdr == IPPROTO_DSTOPTS); +} + +/* find specified header and get offset to it */ +extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff); + +#ifdef CONFIG_COMPAT +#include <net/compat.h> + +struct compat_ip6t_entry { + struct ip6t_ip6 ipv6; + compat_uint_t nfcache; + __u16 target_offset; + __u16 next_offset; + compat_uint_t comefrom; + struct compat_xt_counters counters; + unsigned char elems[0]; +}; + +static inline struct xt_entry_target * +compat_ip6t_get_target(struct compat_ip6t_entry *e) +{ + return (void *)e + e->target_offset; +} + +#endif /* CONFIG_COMPAT */ +#endif /*__KERNEL__*/ +#endif /* _IP6_TABLES_H */ diff --git a/include/linux/netfilter_ipv6/ip6t_HL.h b/include/linux/netfilter_ipv6/ip6t_HL.h new file mode 100644 index 00000000..ebd8ead1 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_HL.h @@ -0,0 +1,24 @@ +/* Hop Limit modification module for ip6tables + * Maciej Soltysiak <solt@dns.toxicfilms.tv> + * Based on HW's TTL module */ + +#ifndef _IP6T_HL_H +#define _IP6T_HL_H + +#include <linux/types.h> + +enum { + IP6T_HL_SET = 0, + IP6T_HL_INC, + IP6T_HL_DEC +}; + +#define IP6T_HL_MAXMODE IP6T_HL_DEC + +struct ip6t_HL_info { + __u8 mode; + __u8 hop_limit; +}; + + +#endif diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h new file mode 100644 index 00000000..3dd0bc4e --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_LOG.h @@ -0,0 +1,21 @@ +#ifndef _IP6T_LOG_H +#define _IP6T_LOG_H + +#warning "Please update iptables, this file will be removed soon!" + +/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */ +#define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ +#define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */ +#define IP6T_LOG_IPOPT 0x04 /* Log IP options */ +#define IP6T_LOG_UID 0x08 /* Log UID owning local socket */ +#define IP6T_LOG_NFLOG 0x10 /* Unsupported, don't use */ +#define IP6T_LOG_MACDECODE 0x20 /* Decode MAC header */ +#define IP6T_LOG_MASK 0x2f + +struct ip6t_log_info { + unsigned char level; + unsigned char logflags; + char prefix[30]; +}; + +#endif /*_IPT_LOG_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_REJECT.h b/include/linux/netfilter_ipv6/ip6t_REJECT.h new file mode 100644 index 00000000..205ed62e --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_REJECT.h @@ -0,0 +1,20 @@ +#ifndef _IP6T_REJECT_H +#define _IP6T_REJECT_H + +#include <linux/types.h> + +enum ip6t_reject_with { + IP6T_ICMP6_NO_ROUTE, + IP6T_ICMP6_ADM_PROHIBITED, + IP6T_ICMP6_NOT_NEIGHBOUR, + IP6T_ICMP6_ADDR_UNREACH, + IP6T_ICMP6_PORT_UNREACH, + IP6T_ICMP6_ECHOREPLY, + IP6T_TCP_RESET +}; + +struct ip6t_reject_info { + __u32 with; /* reject type */ +}; + +#endif /*_IP6T_REJECT_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h new file mode 100644 index 00000000..5da2b65c --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_ah.h @@ -0,0 +1,22 @@ +#ifndef _IP6T_AH_H +#define _IP6T_AH_H + +#include <linux/types.h> + +struct ip6t_ah { + __u32 spis[2]; /* Security Parameter Index */ + __u32 hdrlen; /* Header Length */ + __u8 hdrres; /* Test of the Reserved Filed */ + __u8 invflags; /* Inverse flags */ +}; + +#define IP6T_AH_SPI 0x01 +#define IP6T_AH_LEN 0x02 +#define IP6T_AH_RES 0x04 + +/* Values for "invflags" field in struct ip6t_ah. */ +#define IP6T_AH_INV_SPI 0x01 /* Invert the sense of spi. */ +#define IP6T_AH_INV_LEN 0x02 /* Invert the sense of length. */ +#define IP6T_AH_INV_MASK 0x03 /* All possible flags. */ + +#endif /*_IP6T_AH_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h new file mode 100644 index 00000000..b47f61b9 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_frag.h @@ -0,0 +1,25 @@ +#ifndef _IP6T_FRAG_H +#define _IP6T_FRAG_H + +#include <linux/types.h> + +struct ip6t_frag { + __u32 ids[2]; /* Security Parameter Index */ + __u32 hdrlen; /* Header Length */ + __u8 flags; /* */ + __u8 invflags; /* Inverse flags */ +}; + +#define IP6T_FRAG_IDS 0x01 +#define IP6T_FRAG_LEN 0x02 +#define IP6T_FRAG_RES 0x04 +#define IP6T_FRAG_FST 0x08 +#define IP6T_FRAG_MF 0x10 +#define IP6T_FRAG_NMF 0x20 + +/* Values for "invflags" field in struct ip6t_frag. */ +#define IP6T_FRAG_INV_IDS 0x01 /* Invert the sense of ids. */ +#define IP6T_FRAG_INV_LEN 0x02 /* Invert the sense of length. */ +#define IP6T_FRAG_INV_MASK 0x03 /* All possible flags. */ + +#endif /*_IP6T_FRAG_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_hl.h b/include/linux/netfilter_ipv6/ip6t_hl.h new file mode 100644 index 00000000..6e76dbc6 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_hl.h @@ -0,0 +1,24 @@ +/* ip6tables module for matching the Hop Limit value + * Maciej Soltysiak <solt@dns.toxicfilms.tv> + * Based on HW's ttl module */ + +#ifndef _IP6T_HL_H +#define _IP6T_HL_H + +#include <linux/types.h> + +enum { + IP6T_HL_EQ = 0, /* equals */ + IP6T_HL_NE, /* not equals */ + IP6T_HL_LT, /* less than */ + IP6T_HL_GT, /* greater than */ +}; + + +struct ip6t_hl_info { + __u8 mode; + __u8 hop_limit; +}; + + +#endif diff --git a/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/include/linux/netfilter_ipv6/ip6t_ipv6header.h new file mode 100644 index 00000000..efae3a20 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_ipv6header.h @@ -0,0 +1,28 @@ +/* ipv6header match - matches IPv6 packets based +on whether they contain certain headers */ + +/* Original idea: Brad Chapman + * Rewritten by: Andras Kis-Szabo <kisza@sch.bme.hu> */ + + +#ifndef __IPV6HEADER_H +#define __IPV6HEADER_H + +#include <linux/types.h> + +struct ip6t_ipv6header_info { + __u8 matchflags; + __u8 invflags; + __u8 modeflag; +}; + +#define MASK_HOPOPTS 128 +#define MASK_DSTOPTS 64 +#define MASK_ROUTING 32 +#define MASK_FRAGMENT 16 +#define MASK_AH 8 +#define MASK_ESP 4 +#define MASK_NONE 2 +#define MASK_PROTO 1 + +#endif /* __IPV6HEADER_H */ diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h new file mode 100644 index 00000000..a7729a50 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_mh.h @@ -0,0 +1,16 @@ +#ifndef _IP6T_MH_H +#define _IP6T_MH_H + +#include <linux/types.h> + +/* MH matching stuff */ +struct ip6t_mh { + __u8 types[2]; /* MH type range */ + __u8 invflags; /* Inverse flags */ +}; + +/* Values for "invflags" field in struct ip6t_mh. */ +#define IP6T_MH_INV_TYPE 0x01 /* Invert the sense of type. */ +#define IP6T_MH_INV_MASK 0x01 /* All possible flags. */ + +#endif /*_IP6T_MH_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h new file mode 100644 index 00000000..17d419a8 --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_opts.h @@ -0,0 +1,24 @@ +#ifndef _IP6T_OPTS_H +#define _IP6T_OPTS_H + +#include <linux/types.h> + +#define IP6T_OPTS_OPTSNR 16 + +struct ip6t_opts { + __u32 hdrlen; /* Header Length */ + __u8 flags; /* */ + __u8 invflags; /* Inverse flags */ + __u16 opts[IP6T_OPTS_OPTSNR]; /* opts */ + __u8 optsnr; /* Nr of OPts */ +}; + +#define IP6T_OPTS_LEN 0x01 +#define IP6T_OPTS_OPTS 0x02 +#define IP6T_OPTS_NSTRICT 0x04 + +/* Values for "invflags" field in struct ip6t_rt. */ +#define IP6T_OPTS_INV_LEN 0x01 /* Invert the sense of length. */ +#define IP6T_OPTS_INV_MASK 0x01 /* All possible flags. */ + +#endif /*_IP6T_OPTS_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h new file mode 100644 index 00000000..7605a5ff --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_rt.h @@ -0,0 +1,33 @@ +#ifndef _IP6T_RT_H +#define _IP6T_RT_H + +#include <linux/types.h> +/*#include <linux/in6.h>*/ + +#define IP6T_RT_HOPS 16 + +struct ip6t_rt { + __u32 rt_type; /* Routing Type */ + __u32 segsleft[2]; /* Segments Left */ + __u32 hdrlen; /* Header Length */ + __u8 flags; /* */ + __u8 invflags; /* Inverse flags */ + struct in6_addr addrs[IP6T_RT_HOPS]; /* Hops */ + __u8 addrnr; /* Nr of Addresses */ +}; + +#define IP6T_RT_TYP 0x01 +#define IP6T_RT_SGS 0x02 +#define IP6T_RT_LEN 0x04 +#define IP6T_RT_RES 0x08 +#define IP6T_RT_FST_MASK 0x30 +#define IP6T_RT_FST 0x10 +#define IP6T_RT_FST_NSTRICT 0x20 + +/* Values for "invflags" field in struct ip6t_rt. */ +#define IP6T_RT_INV_TYP 0x01 /* Invert the sense of type. */ +#define IP6T_RT_INV_SGS 0x02 /* Invert the sense of Segments. */ +#define IP6T_RT_INV_LEN 0x04 /* Invert the sense of length. */ +#define IP6T_RT_INV_MASK 0x07 /* All possible flags. */ + +#endif /*_IP6T_RT_H*/ diff --git a/include/linux/netlink.h b/include/linux/netlink.h new file mode 100644 index 00000000..a2092f58 --- /dev/null +++ b/include/linux/netlink.h @@ -0,0 +1,270 @@ +#ifndef __LINUX_NETLINK_H +#define __LINUX_NETLINK_H + +#include <linux/socket.h> /* for __kernel_sa_family_t */ +#include <linux/types.h> + +#define NETLINK_ROUTE 0 /* Routing/device hook */ +#define NETLINK_UNUSED 1 /* Unused number */ +#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ +#define NETLINK_FIREWALL 3 /* Firewalling hook */ +#define NETLINK_SOCK_DIAG 4 /* socket monitoring */ +#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ +#define NETLINK_XFRM 6 /* ipsec */ +#define NETLINK_SELINUX 7 /* SELinux event notifications */ +#define NETLINK_ISCSI 8 /* Open-iSCSI */ +#define NETLINK_AUDIT 9 /* auditing */ +#define NETLINK_FIB_LOOKUP 10 +#define NETLINK_CONNECTOR 11 +#define NETLINK_NETFILTER 12 /* netfilter subsystem */ +#define NETLINK_IP6_FW 13 +#define NETLINK_DNRTMSG 14 /* DECnet routing messages */ +#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */ +#define NETLINK_GENERIC 16 +/* leave room for NETLINK_DM (DM Events) */ +#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ +#define NETLINK_ECRYPTFS 19 +#define NETLINK_RDMA 20 +#define NETLINK_CRYPTO 21 /* Crypto layer */ + +#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG + +#define MAX_LINKS 32 + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; /* AF_NETLINK */ + unsigned short nl_pad; /* zero */ + __u32 nl_pid; /* port ID */ + __u32 nl_groups; /* multicast groups mask */ +}; + +struct nlmsghdr { + __u32 nlmsg_len; /* Length of message including header */ + __u16 nlmsg_type; /* Message content */ + __u16 nlmsg_flags; /* Additional flags */ + __u32 nlmsg_seq; /* Sequence number */ + __u32 nlmsg_pid; /* Sending process port ID */ +}; + +/* Flags values */ + +#define NLM_F_REQUEST 1 /* It is request message. */ +#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */ +#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */ +#define NLM_F_ECHO 8 /* Echo this request */ +#define NLM_F_DUMP_INTR 16 /* Dump was inconsistent due to sequence change */ + +/* Modifiers to GET request */ +#define NLM_F_ROOT 0x100 /* specify tree root */ +#define NLM_F_MATCH 0x200 /* return all matching */ +#define NLM_F_ATOMIC 0x400 /* atomic GET */ +#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH) + +/* Modifiers to NEW request */ +#define NLM_F_REPLACE 0x100 /* Override existing */ +#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */ +#define NLM_F_CREATE 0x400 /* Create, if it does not exist */ +#define NLM_F_APPEND 0x800 /* Add to end of list */ + +/* + 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL + 4.4BSD CHANGE NLM_F_REPLACE + + True CHANGE NLM_F_CREATE|NLM_F_REPLACE + Append NLM_F_CREATE + Check NLM_F_EXCL + */ + +#define NLMSG_ALIGNTO 4U +#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) +#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) +#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN)) +#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) +#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) +#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ + (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) +#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \ + (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ + (nlh)->nlmsg_len <= (len)) +#define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len))) + +#define NLMSG_NOOP 0x1 /* Nothing. */ +#define NLMSG_ERROR 0x2 /* Error */ +#define NLMSG_DONE 0x3 /* End of a dump */ +#define NLMSG_OVERRUN 0x4 /* Data lost */ + +#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */ + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +#define NETLINK_ADD_MEMBERSHIP 1 +#define NETLINK_DROP_MEMBERSHIP 2 +#define NETLINK_PKTINFO 3 +#define NETLINK_BROADCAST_ERROR 4 +#define NETLINK_NO_ENOBUFS 5 + +struct nl_pktinfo { + __u32 group; +}; + +#define NET_MAJOR 36 /* Major 36 is reserved for networking */ + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED, +}; + +/* + * <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)--> + * +---------------------+- - -+- - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct nlattr) | ing | | ing | + * +---------------------+- - -+- - - - - - - - - -+- - -+ + * <-------------- nlattr->nla_len --------------> + */ + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +/* + * nla_type (16 bits) + * +---+---+-------------------------------+ + * | N | O | Attribute Type | + * +---+---+-------------------------------+ + * N := Carries nested attributes + * O := Payload stored in network byte order + * + * Note: The N and O flag are mutually exclusive. + */ +#define NLA_F_NESTED (1 << 15) +#define NLA_F_NET_BYTEORDER (1 << 14) +#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) + +#define NLA_ALIGNTO 4 +#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) +#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) + +#ifdef __KERNEL__ + +#include <linux/capability.h> +#include <linux/skbuff.h> + +struct net; + +static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) +{ + return (struct nlmsghdr *)skb->data; +} + +struct netlink_skb_parms { + struct ucred creds; /* Skb credentials */ + __u32 pid; + __u32 dst_group; +}; + +#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) +#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) + + +extern void netlink_table_grab(void); +extern void netlink_table_ungrab(void); + +extern struct sock *netlink_kernel_create(struct net *net, + int unit,unsigned int groups, + void (*input)(struct sk_buff *skb), + struct mutex *cb_mutex, + struct module *module); +extern void netlink_kernel_release(struct sock *sk); +extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); +extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); +extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); +extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); +extern int netlink_has_listeners(struct sock *sk, unsigned int group); +extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); +extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, + __u32 group, gfp_t allocation); +extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 pid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), + void *filter_data); +extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); +extern int netlink_register_notifier(struct notifier_block *nb); +extern int netlink_unregister_notifier(struct notifier_block *nb); + +/* finegrained unicast helpers: */ +struct sock *netlink_getsockbyfilp(struct file *filp); +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + long *timeo, struct sock *ssk); +void netlink_detachskb(struct sock *sk, struct sk_buff *skb); +int netlink_sendskb(struct sock *sk, struct sk_buff *skb); + +/* + * skb should fit one page. This choice is good for headerless malloc. + * But we should limit to 8K so that userspace does not have to + * use enormous buffer sizes on recvmsg() calls just to avoid + * MSG_TRUNC when PAGE_SIZE is very large. + */ +#if PAGE_SIZE < 8192UL +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE) +#else +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL) +#endif + +#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN) + + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff * skb, + struct netlink_callback *cb); + int (*done)(struct netlink_callback *cb); + void *data; + u16 family; + u16 min_dump_alloc; + unsigned int prev_seq, seq; + long args[6]; +}; + +struct netlink_notify { + struct net *net; + int pid; + int protocol; +}; + +struct nlmsghdr * +__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags); + +#define NLMSG_NEW(skb, pid, seq, type, len, flags) \ +({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \ + goto nlmsg_failure; \ + __nlmsg_put(skb, pid, seq, type, len, flags); }) + +#define NLMSG_PUT(skb, pid, seq, type, len) \ + NLMSG_NEW(skb, pid, seq, type, len, 0) + +struct netlink_dump_control { + int (*dump)(struct sk_buff *skb, struct netlink_callback *); + int (*done)(struct netlink_callback*); + void *data; + u16 min_dump_alloc; +}; + +extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control); + + +#define NL_NONROOT_RECV 0x1 +#define NL_NONROOT_SEND 0x2 +extern void netlink_set_nonroot(int protocol, unsigned flag); + +#endif /* __KERNEL__ */ + +#endif /* __LINUX_NETLINK_H */ diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h new file mode 100644 index 00000000..5dfa091c --- /dev/null +++ b/include/linux/netpoll.h @@ -0,0 +1,156 @@ +/* + * Common code for low-level network console, dump, and debugger code + * + * Derived from netconsole, kgdb-over-ethernet, and netdump patches + */ + +#ifndef _LINUX_NETPOLL_H +#define _LINUX_NETPOLL_H + +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/rcupdate.h> +#include <linux/list.h> + +struct netpoll { + struct net_device *dev; + char dev_name[IFNAMSIZ]; + const char *name; + void (*rx_hook)(struct netpoll *, int, char *, int); + + __be32 local_ip, remote_ip; + u16 local_port, remote_port; + u8 remote_mac[ETH_ALEN]; + + struct list_head rx; /* rx_np list element */ +}; + +struct netpoll_info { + atomic_t refcnt; + + int rx_flags; + spinlock_t rx_lock; + struct list_head rx_np; /* netpolls that registered an rx_hook */ + + struct sk_buff_head arp_tx; /* list of arp requests to reply to */ + struct sk_buff_head txq; + + struct delayed_work tx_work; + + struct netpoll *netpoll; +}; + +void netpoll_send_udp(struct netpoll *np, const char *msg, int len); +void netpoll_print_options(struct netpoll *np); +int netpoll_parse_options(struct netpoll *np, char *opt); +int __netpoll_setup(struct netpoll *np); +int netpoll_setup(struct netpoll *np); +int netpoll_trap(void); +void netpoll_set_trap(int trap); +void __netpoll_cleanup(struct netpoll *np); +void netpoll_cleanup(struct netpoll *np); +int __netpoll_rx(struct sk_buff *skb); +void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, + struct net_device *dev); +static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +{ + netpoll_send_skb_on_dev(np, skb, np->dev); +} + + + +#ifdef CONFIG_NETPOLL +static inline bool netpoll_rx(struct sk_buff *skb) +{ + struct netpoll_info *npinfo; + unsigned long flags; + bool ret = false; + + local_irq_save(flags); + npinfo = rcu_dereference_bh(skb->dev->npinfo); + + if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) + goto out; + + spin_lock(&npinfo->rx_lock); + /* check rx_flags again with the lock held */ + if (npinfo->rx_flags && __netpoll_rx(skb)) + ret = true; + spin_unlock(&npinfo->rx_lock); + +out: + local_irq_restore(flags); + return ret; +} + +static inline int netpoll_rx_on(struct sk_buff *skb) +{ + struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); + + return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); +} + +static inline int netpoll_receive_skb(struct sk_buff *skb) +{ + if (!list_empty(&skb->dev->napi_list)) + return netpoll_rx(skb); + return 0; +} + +static inline void *netpoll_poll_lock(struct napi_struct *napi) +{ + struct net_device *dev = napi->dev; + + if (dev && dev->npinfo) { + spin_lock(&napi->poll_lock); + napi->poll_owner = smp_processor_id(); + return napi; + } + return NULL; +} + +static inline void netpoll_poll_unlock(void *have) +{ + struct napi_struct *napi = have; + + if (napi) { + napi->poll_owner = -1; + spin_unlock(&napi->poll_lock); + } +} + +static inline int netpoll_tx_running(struct net_device *dev) +{ + return irqs_disabled(); +} + +#else +static inline bool netpoll_rx(struct sk_buff *skb) +{ + return 0; +} +static inline int netpoll_rx_on(struct sk_buff *skb) +{ + return 0; +} +static inline int netpoll_receive_skb(struct sk_buff *skb) +{ + return 0; +} +static inline void *netpoll_poll_lock(struct napi_struct *napi) +{ + return NULL; +} +static inline void netpoll_poll_unlock(void *have) +{ +} +static inline void netpoll_netdev_init(struct net_device *dev) +{ +} +static inline int netpoll_tx_running(struct net_device *dev) +{ + return 0; +} +#endif + +#endif diff --git a/include/linux/netrom.h b/include/linux/netrom.h new file mode 100644 index 00000000..af7313cc --- /dev/null +++ b/include/linux/netrom.h @@ -0,0 +1,36 @@ +/* + * These are the public elements of the Linux kernel NET/ROM implementation. + * For kernel AX.25 see the file ax25.h. This file requires ax25.h for the + * definition of the ax25_address structure. + */ + +#ifndef NETROM_KERNEL_H +#define NETROM_KERNEL_H + +#include <linux/ax25.h> + +#define NETROM_MTU 236 + +#define NETROM_T1 1 +#define NETROM_T2 2 +#define NETROM_N2 3 +#define NETROM_T4 6 +#define NETROM_IDLE 7 + +#define SIOCNRDECOBS (SIOCPROTOPRIVATE+2) + +struct nr_route_struct { +#define NETROM_NEIGH 0 +#define NETROM_NODE 1 + int type; + ax25_address callsign; + char device[16]; + unsigned int quality; + char mnemonic[7]; + ax25_address neighbour; + unsigned int obs_count; + unsigned int ndigis; + ax25_address digipeaters[AX25_MAX_DIGIS]; +}; + +#endif diff --git a/include/linux/nfc.h b/include/linux/nfc.h new file mode 100644 index 00000000..39c1fcf0 --- /dev/null +++ b/include/linux/nfc.h @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * + * Authors: + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __LINUX_NFC_H +#define __LINUX_NFC_H + +#include <linux/types.h> +#include <linux/socket.h> + +#define NFC_GENL_NAME "nfc" +#define NFC_GENL_VERSION 1 + +#define NFC_GENL_MCAST_EVENT_NAME "events" + +/** + * enum nfc_commands - supported nfc commands + * + * @NFC_CMD_UNSPEC: unspecified command + * + * @NFC_CMD_GET_DEVICE: request information about a device (requires + * %NFC_ATTR_DEVICE_INDEX) or dump request to get a list of all nfc devices + * @NFC_CMD_DEV_UP: turn on the nfc device + * (requires %NFC_ATTR_DEVICE_INDEX) + * @NFC_CMD_DEV_DOWN: turn off the nfc device + * (requires %NFC_ATTR_DEVICE_INDEX) + * @NFC_CMD_START_POLL: start polling for targets using the given protocols + * (requires %NFC_ATTR_DEVICE_INDEX and %NFC_ATTR_PROTOCOLS) + * @NFC_CMD_STOP_POLL: stop polling for targets (requires + * %NFC_ATTR_DEVICE_INDEX) + * @NFC_CMD_GET_TARGET: dump all targets found by the previous poll (requires + * %NFC_ATTR_DEVICE_INDEX) + * @NFC_EVENT_TARGETS_FOUND: event emitted when a new target is found + * (it sends %NFC_ATTR_DEVICE_INDEX) + * @NFC_EVENT_DEVICE_ADDED: event emitted when a new device is registred + * (it sends %NFC_ATTR_DEVICE_NAME, %NFC_ATTR_DEVICE_INDEX and + * %NFC_ATTR_PROTOCOLS) + * @NFC_EVENT_DEVICE_REMOVED: event emitted when a device is removed + * (it sends %NFC_ATTR_DEVICE_INDEX) + */ +enum nfc_commands { + NFC_CMD_UNSPEC, + NFC_CMD_GET_DEVICE, + NFC_CMD_DEV_UP, + NFC_CMD_DEV_DOWN, + NFC_CMD_DEP_LINK_UP, + NFC_CMD_DEP_LINK_DOWN, + NFC_CMD_START_POLL, + NFC_CMD_STOP_POLL, + NFC_CMD_GET_TARGET, + NFC_EVENT_TARGETS_FOUND, + NFC_EVENT_DEVICE_ADDED, + NFC_EVENT_DEVICE_REMOVED, +/* private: internal use only */ + __NFC_CMD_AFTER_LAST +}; +#define NFC_CMD_MAX (__NFC_CMD_AFTER_LAST - 1) + +/** + * enum nfc_attrs - supported nfc attributes + * + * @NFC_ATTR_UNSPEC: unspecified attribute + * + * @NFC_ATTR_DEVICE_INDEX: index of nfc device + * @NFC_ATTR_DEVICE_NAME: device name, max 8 chars + * @NFC_ATTR_PROTOCOLS: nfc protocols - bitwise or-ed combination from + * NFC_PROTO_*_MASK constants + * @NFC_ATTR_TARGET_INDEX: index of the nfc target + * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID + * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the + * target is not NFC-Forum compliant) + * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes + * @NFC_ATTR_TARGET_SENSB_RES: NFC-B targets extra information, max 12 bytes + * @NFC_ATTR_TARGET_SENSF_RES: NFC-F targets extra information, max 18 bytes + * @NFC_ATTR_COMM_MODE: Passive or active mode + * @NFC_ATTR_RF_MODE: Initiator or target + */ +enum nfc_attrs { + NFC_ATTR_UNSPEC, + NFC_ATTR_DEVICE_INDEX, + NFC_ATTR_DEVICE_NAME, + NFC_ATTR_PROTOCOLS, + NFC_ATTR_TARGET_INDEX, + NFC_ATTR_TARGET_SENS_RES, + NFC_ATTR_TARGET_SEL_RES, + NFC_ATTR_TARGET_NFCID1, + NFC_ATTR_TARGET_SENSB_RES, + NFC_ATTR_TARGET_SENSF_RES, + NFC_ATTR_COMM_MODE, + NFC_ATTR_RF_MODE, + NFC_ATTR_DEVICE_POWERED, +/* private: internal use only */ + __NFC_ATTR_AFTER_LAST +}; +#define NFC_ATTR_MAX (__NFC_ATTR_AFTER_LAST - 1) + +#define NFC_DEVICE_NAME_MAXSIZE 8 +#define NFC_NFCID1_MAXSIZE 10 +#define NFC_SENSB_RES_MAXSIZE 12 +#define NFC_SENSF_RES_MAXSIZE 18 + +/* NFC protocols */ +#define NFC_PROTO_JEWEL 1 +#define NFC_PROTO_MIFARE 2 +#define NFC_PROTO_FELICA 3 +#define NFC_PROTO_ISO14443 4 +#define NFC_PROTO_NFC_DEP 5 + +#define NFC_PROTO_MAX 6 + +/* NFC communication modes */ +#define NFC_COMM_ACTIVE 0 +#define NFC_COMM_PASSIVE 1 + +/* NFC RF modes */ +#define NFC_RF_INITIATOR 0 +#define NFC_RF_TARGET 1 + +/* NFC protocols masks used in bitsets */ +#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) +#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) +#define NFC_PROTO_FELICA_MASK (1 << NFC_PROTO_FELICA) +#define NFC_PROTO_ISO14443_MASK (1 << NFC_PROTO_ISO14443) +#define NFC_PROTO_NFC_DEP_MASK (1 << NFC_PROTO_NFC_DEP) + +struct sockaddr_nfc { + sa_family_t sa_family; + __u32 dev_idx; + __u32 target_idx; + __u32 nfc_protocol; +}; + +#define NFC_LLCP_MAX_SERVICE_NAME 63 +struct sockaddr_nfc_llcp { + sa_family_t sa_family; + __u32 dev_idx; + __u32 target_idx; + __u32 nfc_protocol; + __u8 dsap; /* Destination SAP, if known */ + __u8 ssap; /* Source SAP to be bound to */ + char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; + size_t service_name_len; +}; + +/* NFC socket protocols */ +#define NFC_SOCKPROTO_RAW 0 +#define NFC_SOCKPROTO_LLCP 1 +#define NFC_SOCKPROTO_MAX 2 + +#define NFC_HEADER_SIZE 1 + +#endif /*__LINUX_NFC_H */ diff --git a/include/linux/nfc/bcm2079x.h b/include/linux/nfc/bcm2079x.h new file mode 100644 index 00000000..689757aa --- /dev/null +++ b/include/linux/nfc/bcm2079x.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012 Broadcom Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _BCM2079X_H +#define _BCM2079X_H + +#define BCMNFC_MAGIC 0xFA + +/* + * BCMNFC power control via ioctl + * BCMNFC_POWER_CTL(0): power off + * BCMNFC_POWER_CTL(1): power on + * BCMNFC_WAKE_CTL(0): wake off + * BCMNFC_WAKE_CTL(1): wake on + */ +#define BCMNFC_POWER_CTL _IO(BCMNFC_MAGIC, 0x01) +#define BCMNFC_CHANGE_ADDR _IO(BCMNFC_MAGIC, 0x02) +#define BCMNFC_READ_FULL_PACKET _IO(BCMNFC_MAGIC, 0x03) +#define BCMNFC_SET_WAKE_ACTIVE_STATE _IO(BCMNFC_MAGIC, 0x04) +#define BCMNFC_WAKE_CTL _IO(BCMNFC_MAGIC, 0x05) +#define BCMNFC_READ_MULTI_PACKETS _IO(BCMNFC_MAGIC, 0x06) + +struct bcm2079x_platform_data { + unsigned int irq_gpio; + unsigned int irq_no; + unsigned int en_gpio; + int wake_gpio; +}; + +#endif diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h new file mode 100644 index 00000000..11eb2236 --- /dev/null +++ b/include/linux/nfc/pn544.h @@ -0,0 +1,114 @@ +/* + * Driver include for the PN544 NFC chip. + * + * Copyright (C) Nokia Corporation + * + * Author: Jari Vanhala <ext-jari.vanhala@nokia.com> + * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _PN544_H_ +#define _PN544_H_ + +#include <linux/i2c.h> + +#define PN544_DRIVER_NAME "pn544" +#define PN544_MAXWINDOW_SIZE 7 +#define PN544_WINDOW_SIZE 4 +#define PN544_RETRIES 10 +#define PN544_MAX_I2C_TRANSFER 0x0400 +#define PN544_MSG_MAX_SIZE 0x21 /* at normal HCI mode */ + + +#define PN544_MAGIC 0xE9 + +/* + * PN544 power control via ioctl + * PN544_SET_PWR(0): power off + * PN544_SET_PWR(1): power on + * PN544_SET_PWR(2): reset and power on with firmware download enabled + */ +#define PN544_SET_PWR _IOW(PN544_MAGIC, 0x01, unsigned int) + + +/* ioctl */ +#define PN544_CHAR_BASE 'P' +#define PN544_IOR(num, dtype) _IOR(PN544_CHAR_BASE, num, dtype) +#define PN544_IOW(num, dtype) _IOW(PN544_CHAR_BASE, num, dtype) +#define PN544_GET_FW_MODE PN544_IOW(1, unsigned int) +#define PN544_SET_FW_MODE PN544_IOW(2, unsigned int) +#define PN544_GET_DEBUG PN544_IOW(3, unsigned int) +#define PN544_SET_DEBUG PN544_IOW(4, unsigned int) + +//#define PN544_SET_PWR PN544_IOW(5, unsigned int) //add 2014-7-10 +/* Timing restrictions (ms) */ +#define PN544_RESETVEN_TIME 30 /* 7 */ +#define PN544_PVDDVEN_TIME 0 +#define PN544_VBATVEN_TIME 0 +#define PN544_GPIO4VEN_TIME 0 +#define PN544_WAKEUP_ACK 5 +#define PN544_WAKEUP_GUARD (PN544_WAKEUP_ACK + 1) +#define PN544_INACTIVITY_TIME 1000 +#define PN544_INTERFRAME_DELAY 200 /* us */ +#define PN544_BAUDRATE_CHANGE 150 /* us */ + +/* Debug bits */ +#define PN544_DEBUG_BUF 0x01 +#define PN544_DEBUG_READ 0x02 +#define PN544_DEBUG_WRITE 0x04 +#define PN544_DEBUG_IRQ 0x08 +#define PN544_DEBUG_CALLS 0x10 +#define PN544_DEBUG_MODE 0x20 + +/* Normal (HCI) mode */ +#define PN544_LLC_HCI_OVERHEAD 3 /* header + crc (to length) */ +#define PN544_LLC_MIN_SIZE (1 + PN544_LLC_HCI_OVERHEAD) /* length + */ +#define PN544_LLC_MAX_DATA (PN544_MSG_MAX_SIZE - 2) +#define PN544_LLC_MAX_HCI_SIZE (PN544_LLC_MAX_DATA - 2) + +struct pn544_llc_packet { + unsigned char length; /* of rest of packet */ + unsigned char header; + unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */ +}; + +/* Firmware upgrade mode */ +#define PN544_FW_HEADER_SIZE 3 +/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */ +#define PN544_MAX_FW_DATA (PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE) + +struct pn544_fw_packet { + unsigned char command; /* status in answer */ + unsigned char length[2]; /* big-endian order (msf) */ + unsigned char data[PN544_MAX_FW_DATA]; +}; + +#ifdef __KERNEL__ +/* board config */ +struct pn544_nfc_platform_data { + int (*request_resources) (struct i2c_client *client); + void (*free_resources) (void); + void (*enable) (int fw); + int (*test) (void); + void (*disable) (void); + int irq_gpio, ven_gpio, firm_gpio; + //int irq_enable, ven_enable, firm_enable; + int irq_active, ven_active, firm_active; + +}; +#endif /* __KERNEL__ */ + +#endif /* _PN544_H_ */ diff --git a/include/linux/nfs.h b/include/linux/nfs.h new file mode 100644 index 00000000..6d1fb63f --- /dev/null +++ b/include/linux/nfs.h @@ -0,0 +1,173 @@ +/* + * NFS protocol definitions + * + * This file contains constants mostly for Version 2 of the protocol, + * but also has a couple of NFSv3 bits in (notably the error codes). + */ +#ifndef _LINUX_NFS_H +#define _LINUX_NFS_H + +#define NFS_PROGRAM 100003 +#define NFS_PORT 2049 +#define NFS_MAXDATA 8192 +#define NFS_MAXPATHLEN 1024 +#define NFS_MAXNAMLEN 255 +#define NFS_MAXGROUPS 16 +#define NFS_FHSIZE 32 +#define NFS_COOKIESIZE 4 +#define NFS_FIFO_DEV (-1) +#define NFSMODE_FMT 0170000 +#define NFSMODE_DIR 0040000 +#define NFSMODE_CHR 0020000 +#define NFSMODE_BLK 0060000 +#define NFSMODE_REG 0100000 +#define NFSMODE_LNK 0120000 +#define NFSMODE_SOCK 0140000 +#define NFSMODE_FIFO 0010000 + +#define NFS_MNT_PROGRAM 100005 +#define NFS_MNT_VERSION 1 +#define NFS_MNT3_VERSION 3 + +#define NFS_PIPE_DIRNAME "nfs" + +/* + * NFS stats. The good thing with these values is that NFSv3 errors are + * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which + * no-one uses anyway), so we can happily mix code as long as we make sure + * no NFSv3 errors are returned to NFSv2 clients. + * Error codes that have a `--' in the v2 column are not part of the + * standard, but seem to be widely used nevertheless. + */ + enum nfs_stat { + NFS_OK = 0, /* v2 v3 v4 */ + NFSERR_PERM = 1, /* v2 v3 v4 */ + NFSERR_NOENT = 2, /* v2 v3 v4 */ + NFSERR_IO = 5, /* v2 v3 v4 */ + NFSERR_NXIO = 6, /* v2 v3 v4 */ + NFSERR_EAGAIN = 11, /* v2 v3 */ + NFSERR_ACCES = 13, /* v2 v3 v4 */ + NFSERR_EXIST = 17, /* v2 v3 v4 */ + NFSERR_XDEV = 18, /* v3 v4 */ + NFSERR_NODEV = 19, /* v2 v3 v4 */ + NFSERR_NOTDIR = 20, /* v2 v3 v4 */ + NFSERR_ISDIR = 21, /* v2 v3 v4 */ + NFSERR_INVAL = 22, /* v2 v3 v4 */ + NFSERR_FBIG = 27, /* v2 v3 v4 */ + NFSERR_NOSPC = 28, /* v2 v3 v4 */ + NFSERR_ROFS = 30, /* v2 v3 v4 */ + NFSERR_MLINK = 31, /* v3 v4 */ + NFSERR_OPNOTSUPP = 45, /* v2 v3 */ + NFSERR_NAMETOOLONG = 63, /* v2 v3 v4 */ + NFSERR_NOTEMPTY = 66, /* v2 v3 v4 */ + NFSERR_DQUOT = 69, /* v2 v3 v4 */ + NFSERR_STALE = 70, /* v2 v3 v4 */ + NFSERR_REMOTE = 71, /* v2 v3 */ + NFSERR_WFLUSH = 99, /* v2 */ + NFSERR_BADHANDLE = 10001, /* v3 v4 */ + NFSERR_NOT_SYNC = 10002, /* v3 */ + NFSERR_BAD_COOKIE = 10003, /* v3 v4 */ + NFSERR_NOTSUPP = 10004, /* v3 v4 */ + NFSERR_TOOSMALL = 10005, /* v3 v4 */ + NFSERR_SERVERFAULT = 10006, /* v3 v4 */ + NFSERR_BADTYPE = 10007, /* v3 v4 */ + NFSERR_JUKEBOX = 10008, /* v3 v4 */ + NFSERR_SAME = 10009, /* v4 */ + NFSERR_DENIED = 10010, /* v4 */ + NFSERR_EXPIRED = 10011, /* v4 */ + NFSERR_LOCKED = 10012, /* v4 */ + NFSERR_GRACE = 10013, /* v4 */ + NFSERR_FHEXPIRED = 10014, /* v4 */ + NFSERR_SHARE_DENIED = 10015, /* v4 */ + NFSERR_WRONGSEC = 10016, /* v4 */ + NFSERR_CLID_INUSE = 10017, /* v4 */ + NFSERR_RESOURCE = 10018, /* v4 */ + NFSERR_MOVED = 10019, /* v4 */ + NFSERR_NOFILEHANDLE = 10020, /* v4 */ + NFSERR_MINOR_VERS_MISMATCH = 10021, /* v4 */ + NFSERR_STALE_CLIENTID = 10022, /* v4 */ + NFSERR_STALE_STATEID = 10023, /* v4 */ + NFSERR_OLD_STATEID = 10024, /* v4 */ + NFSERR_BAD_STATEID = 10025, /* v4 */ + NFSERR_BAD_SEQID = 10026, /* v4 */ + NFSERR_NOT_SAME = 10027, /* v4 */ + NFSERR_LOCK_RANGE = 10028, /* v4 */ + NFSERR_SYMLINK = 10029, /* v4 */ + NFSERR_RESTOREFH = 10030, /* v4 */ + NFSERR_LEASE_MOVED = 10031, /* v4 */ + NFSERR_ATTRNOTSUPP = 10032, /* v4 */ + NFSERR_NO_GRACE = 10033, /* v4 */ + NFSERR_RECLAIM_BAD = 10034, /* v4 */ + NFSERR_RECLAIM_CONFLICT = 10035,/* v4 */ + NFSERR_BAD_XDR = 10036, /* v4 */ + NFSERR_LOCKS_HELD = 10037, /* v4 */ + NFSERR_OPENMODE = 10038, /* v4 */ + NFSERR_BADOWNER = 10039, /* v4 */ + NFSERR_BADCHAR = 10040, /* v4 */ + NFSERR_BADNAME = 10041, /* v4 */ + NFSERR_BAD_RANGE = 10042, /* v4 */ + NFSERR_LOCK_NOTSUPP = 10043, /* v4 */ + NFSERR_OP_ILLEGAL = 10044, /* v4 */ + NFSERR_DEADLOCK = 10045, /* v4 */ + NFSERR_FILE_OPEN = 10046, /* v4 */ + NFSERR_ADMIN_REVOKED = 10047, /* v4 */ + NFSERR_CB_PATH_DOWN = 10048, /* v4 */ +}; + +/* NFSv2 file types - beware, these are not the same in NFSv3 */ + +enum nfs_ftype { + NFNON = 0, + NFREG = 1, + NFDIR = 2, + NFBLK = 3, + NFCHR = 4, + NFLNK = 5, + NFSOCK = 6, + NFBAD = 7, + NFFIFO = 8 +}; + +#ifdef __KERNEL__ +#include <linux/sunrpc/msg_prot.h> +#include <linux/string.h> + +/* + * This is the kernel NFS client file handle representation + */ +#define NFS_MAXFHSIZE 128 +struct nfs_fh { + unsigned short size; + unsigned char data[NFS_MAXFHSIZE]; +}; + +/* + * Returns a zero iff the size and data fields match. + * Checks only "size" bytes in the data field. + */ +static inline int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b) +{ + return a->size != b->size || memcmp(a->data, b->data, a->size) != 0; +} + +static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) +{ + target->size = source->size; + memcpy(target->data, source->data, source->size); +} + + +/* + * This is really a general kernel constant, but since nothing like + * this is defined in the kernel headers, I have to do it here. + */ +#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1)) + + +enum nfs3_stable_how { + NFS_UNSTABLE = 0, + NFS_DATA_SYNC = 1, + NFS_FILE_SYNC = 2 +}; +#endif /* __KERNEL__ */ +#endif /* _LINUX_NFS_H */ diff --git a/include/linux/nfs2.h b/include/linux/nfs2.h new file mode 100644 index 00000000..fde24b30 --- /dev/null +++ b/include/linux/nfs2.h @@ -0,0 +1,67 @@ +/* + * NFS protocol definitions + * + * This file contains constants for Version 2 of the protocol. + */ +#ifndef _LINUX_NFS2_H +#define _LINUX_NFS2_H + +#define NFS2_PORT 2049 +#define NFS2_MAXDATA 8192 +#define NFS2_MAXPATHLEN 1024 +#define NFS2_MAXNAMLEN 255 +#define NFS2_MAXGROUPS 16 +#define NFS2_FHSIZE 32 +#define NFS2_COOKIESIZE 4 +#define NFS2_FIFO_DEV (-1) +#define NFS2MODE_FMT 0170000 +#define NFS2MODE_DIR 0040000 +#define NFS2MODE_CHR 0020000 +#define NFS2MODE_BLK 0060000 +#define NFS2MODE_REG 0100000 +#define NFS2MODE_LNK 0120000 +#define NFS2MODE_SOCK 0140000 +#define NFS2MODE_FIFO 0010000 + + +/* NFSv2 file types - beware, these are not the same in NFSv3 */ +enum nfs2_ftype { + NF2NON = 0, + NF2REG = 1, + NF2DIR = 2, + NF2BLK = 3, + NF2CHR = 4, + NF2LNK = 5, + NF2SOCK = 6, + NF2BAD = 7, + NF2FIFO = 8 +}; + +struct nfs2_fh { + char data[NFS2_FHSIZE]; +}; + +/* + * Procedure numbers for NFSv2 + */ +#define NFS2_VERSION 2 +#define NFSPROC_NULL 0 +#define NFSPROC_GETATTR 1 +#define NFSPROC_SETATTR 2 +#define NFSPROC_ROOT 3 +#define NFSPROC_LOOKUP 4 +#define NFSPROC_READLINK 5 +#define NFSPROC_READ 6 +#define NFSPROC_WRITECACHE 7 +#define NFSPROC_WRITE 8 +#define NFSPROC_CREATE 9 +#define NFSPROC_REMOVE 10 +#define NFSPROC_RENAME 11 +#define NFSPROC_LINK 12 +#define NFSPROC_SYMLINK 13 +#define NFSPROC_MKDIR 14 +#define NFSPROC_RMDIR 15 +#define NFSPROC_READDIR 16 +#define NFSPROC_STATFS 17 + +#endif /* _LINUX_NFS2_H */ diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h new file mode 100644 index 00000000..6ccfe3b6 --- /dev/null +++ b/include/linux/nfs3.h @@ -0,0 +1,103 @@ +/* + * NFSv3 protocol definitions + */ +#ifndef _LINUX_NFS3_H +#define _LINUX_NFS3_H + +#define NFS3_PORT 2049 +#define NFS3_MAXDATA 32768 +#define NFS3_MAXPATHLEN PATH_MAX +#define NFS3_MAXNAMLEN NAME_MAX +#define NFS3_MAXGROUPS 16 +#define NFS3_FHSIZE 64 +#define NFS3_COOKIESIZE 4 +#define NFS3_CREATEVERFSIZE 8 +#define NFS3_COOKIEVERFSIZE 8 +#define NFS3_WRITEVERFSIZE 8 +#define NFS3_FIFO_DEV (-1) +#define NFS3MODE_FMT 0170000 +#define NFS3MODE_DIR 0040000 +#define NFS3MODE_CHR 0020000 +#define NFS3MODE_BLK 0060000 +#define NFS3MODE_REG 0100000 +#define NFS3MODE_LNK 0120000 +#define NFS3MODE_SOCK 0140000 +#define NFS3MODE_FIFO 0010000 + +/* Flags for access() call */ +#define NFS3_ACCESS_READ 0x0001 +#define NFS3_ACCESS_LOOKUP 0x0002 +#define NFS3_ACCESS_MODIFY 0x0004 +#define NFS3_ACCESS_EXTEND 0x0008 +#define NFS3_ACCESS_DELETE 0x0010 +#define NFS3_ACCESS_EXECUTE 0x0020 +#define NFS3_ACCESS_FULL 0x003f + +/* Flags for create mode */ +enum nfs3_createmode { + NFS3_CREATE_UNCHECKED = 0, + NFS3_CREATE_GUARDED = 1, + NFS3_CREATE_EXCLUSIVE = 2 +}; + +/* NFSv3 file system properties */ +#define NFS3_FSF_LINK 0x0001 +#define NFS3_FSF_SYMLINK 0x0002 +#define NFS3_FSF_HOMOGENEOUS 0x0008 +#define NFS3_FSF_CANSETTIME 0x0010 +/* Some shorthands. See fs/nfsd/nfs3proc.c */ +#define NFS3_FSF_DEFAULT 0x001B +#define NFS3_FSF_BILLYBOY 0x0018 +#define NFS3_FSF_READONLY 0x0008 + +enum nfs3_ftype { + NF3NON = 0, + NF3REG = 1, + NF3DIR = 2, + NF3BLK = 3, + NF3CHR = 4, + NF3LNK = 5, + NF3SOCK = 6, + NF3FIFO = 7, /* changed from NFSv2 (was 8) */ + NF3BAD = 8 +}; + +struct nfs3_fh { + unsigned short size; + unsigned char data[NFS3_FHSIZE]; +}; + +#define NFS3_VERSION 3 +#define NFS3PROC_NULL 0 +#define NFS3PROC_GETATTR 1 +#define NFS3PROC_SETATTR 2 +#define NFS3PROC_LOOKUP 3 +#define NFS3PROC_ACCESS 4 +#define NFS3PROC_READLINK 5 +#define NFS3PROC_READ 6 +#define NFS3PROC_WRITE 7 +#define NFS3PROC_CREATE 8 +#define NFS3PROC_MKDIR 9 +#define NFS3PROC_SYMLINK 10 +#define NFS3PROC_MKNOD 11 +#define NFS3PROC_REMOVE 12 +#define NFS3PROC_RMDIR 13 +#define NFS3PROC_RENAME 14 +#define NFS3PROC_LINK 15 +#define NFS3PROC_READDIR 16 +#define NFS3PROC_READDIRPLUS 17 +#define NFS3PROC_FSSTAT 18 +#define NFS3PROC_FSINFO 19 +#define NFS3PROC_PATHCONF 20 +#define NFS3PROC_COMMIT 21 + +#define NFS_MNT3_VERSION 3 + + +#if defined(__KERNEL__) + +/* Number of 32bit words in post_op_attr */ +#define NFS3_POST_OP_ATTR_WORDS 22 + +#endif /* __KERNEL__ */ +#endif /* _LINUX_NFS3_H */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h new file mode 100644 index 00000000..0987146b --- /dev/null +++ b/include/linux/nfs4.h @@ -0,0 +1,669 @@ +/* + * include/linux/nfs4.h + * + * NFSv4 protocol definitions. + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Kendrick Smith <kmsmith@umich.edu> + * Andy Adamson <andros@umich.edu> + */ + +#ifndef _LINUX_NFS4_H +#define _LINUX_NFS4_H + +#include <linux/types.h> + +#define NFS4_BITMAP_SIZE 2 +#define NFS4_VERIFIER_SIZE 8 +#define NFS4_STATEID_SEQID_SIZE 4 +#define NFS4_STATEID_OTHER_SIZE 12 +#define NFS4_STATEID_SIZE (NFS4_STATEID_SEQID_SIZE + NFS4_STATEID_OTHER_SIZE) +#define NFS4_FHSIZE 128 +#define NFS4_MAXPATHLEN PATH_MAX +#define NFS4_MAXNAMLEN NAME_MAX +#define NFS4_OPAQUE_LIMIT 1024 +#define NFS4_MAX_SESSIONID_LEN 16 + +#define NFS4_ACCESS_READ 0x0001 +#define NFS4_ACCESS_LOOKUP 0x0002 +#define NFS4_ACCESS_MODIFY 0x0004 +#define NFS4_ACCESS_EXTEND 0x0008 +#define NFS4_ACCESS_DELETE 0x0010 +#define NFS4_ACCESS_EXECUTE 0x0020 + +#define NFS4_FH_PERSISTENT 0x0000 +#define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001 +#define NFS4_FH_VOLATILE_ANY 0x0002 +#define NFS4_FH_VOL_MIGRATION 0x0004 +#define NFS4_FH_VOL_RENAME 0x0008 + +#define NFS4_OPEN_RESULT_CONFIRM 0x0002 +#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 + +#define NFS4_SHARE_ACCESS_MASK 0x000F +#define NFS4_SHARE_ACCESS_READ 0x0001 +#define NFS4_SHARE_ACCESS_WRITE 0x0002 +#define NFS4_SHARE_ACCESS_BOTH 0x0003 +#define NFS4_SHARE_DENY_READ 0x0001 +#define NFS4_SHARE_DENY_WRITE 0x0002 +#define NFS4_SHARE_DENY_BOTH 0x0003 + +/* nfs41 */ +#define NFS4_SHARE_WANT_MASK 0xFF00 +#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000 +#define NFS4_SHARE_WANT_READ_DELEG 0x0100 +#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200 +#define NFS4_SHARE_WANT_ANY_DELEG 0x0300 +#define NFS4_SHARE_WANT_NO_DELEG 0x0400 +#define NFS4_SHARE_WANT_CANCEL 0x0500 + +#define NFS4_SHARE_WHEN_MASK 0xF0000 +#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 +#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 + +#define NFS4_CDFC4_FORE 0x1 +#define NFS4_CDFC4_BACK 0x2 +#define NFS4_CDFC4_BOTH 0x3 +#define NFS4_CDFC4_FORE_OR_BOTH 0x3 +#define NFS4_CDFC4_BACK_OR_BOTH 0x7 + +#define NFS4_SET_TO_SERVER_TIME 0 +#define NFS4_SET_TO_CLIENT_TIME 1 + +#define NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE 0 +#define NFS4_ACE_ACCESS_DENIED_ACE_TYPE 1 +#define NFS4_ACE_SYSTEM_AUDIT_ACE_TYPE 2 +#define NFS4_ACE_SYSTEM_ALARM_ACE_TYPE 3 + +#define ACL4_SUPPORT_ALLOW_ACL 0x01 +#define ACL4_SUPPORT_DENY_ACL 0x02 +#define ACL4_SUPPORT_AUDIT_ACL 0x04 +#define ACL4_SUPPORT_ALARM_ACL 0x08 + +#define NFS4_ACE_FILE_INHERIT_ACE 0x00000001 +#define NFS4_ACE_DIRECTORY_INHERIT_ACE 0x00000002 +#define NFS4_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004 +#define NFS4_ACE_INHERIT_ONLY_ACE 0x00000008 +#define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010 +#define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020 +#define NFS4_ACE_IDENTIFIER_GROUP 0x00000040 + +#define NFS4_ACE_READ_DATA 0x00000001 +#define NFS4_ACE_LIST_DIRECTORY 0x00000001 +#define NFS4_ACE_WRITE_DATA 0x00000002 +#define NFS4_ACE_ADD_FILE 0x00000002 +#define NFS4_ACE_APPEND_DATA 0x00000004 +#define NFS4_ACE_ADD_SUBDIRECTORY 0x00000004 +#define NFS4_ACE_READ_NAMED_ATTRS 0x00000008 +#define NFS4_ACE_WRITE_NAMED_ATTRS 0x00000010 +#define NFS4_ACE_EXECUTE 0x00000020 +#define NFS4_ACE_DELETE_CHILD 0x00000040 +#define NFS4_ACE_READ_ATTRIBUTES 0x00000080 +#define NFS4_ACE_WRITE_ATTRIBUTES 0x00000100 +#define NFS4_ACE_DELETE 0x00010000 +#define NFS4_ACE_READ_ACL 0x00020000 +#define NFS4_ACE_WRITE_ACL 0x00040000 +#define NFS4_ACE_WRITE_OWNER 0x00080000 +#define NFS4_ACE_SYNCHRONIZE 0x00100000 +#define NFS4_ACE_GENERIC_READ 0x00120081 +#define NFS4_ACE_GENERIC_WRITE 0x00160106 +#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 +#define NFS4_ACE_MASK_ALL 0x001F01FF + +#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001 +#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002 +#define EXCHGID4_FLAG_BIND_PRINC_STATEID 0x00000100 + +#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000 +#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000 +#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000 +#define EXCHGID4_FLAG_MASK_PNFS 0x00070000 + +#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 +#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 +/* + * Since the validity of these bits depends on whether + * they're set in the argument or response, have separate + * invalid flag masks for arg (_A) and resp (_R). + */ +#define EXCHGID4_FLAG_MASK_A 0x40070103 +#define EXCHGID4_FLAG_MASK_R 0x80070103 + +#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 +#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 +#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004 +#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008 +#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010 +#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020 +#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 +#define SEQ4_STATUS_LEASE_MOVED 0x00000080 +#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 +#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200 +#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400 + +#define NFS4_SECINFO_STYLE4_CURRENT_FH 0 +#define NFS4_SECINFO_STYLE4_PARENT 1 + +#define NFS4_MAX_UINT64 (~(u64)0) + +/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations. + * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly. + */ +#define NFS4_MAX_OPS 8 + +/* Our NFS4 client back channel server only wants the cb_sequene and the + * actual operation per compound + */ +#define NFS4_MAX_BACK_CHANNEL_OPS 2 + +enum nfs4_acl_whotype { + NFS4_ACL_WHO_NAMED = 0, + NFS4_ACL_WHO_OWNER, + NFS4_ACL_WHO_GROUP, + NFS4_ACL_WHO_EVERYONE, +}; + +#ifdef __KERNEL__ +#include <linux/list.h> + +struct nfs4_ace { + uint32_t type; + uint32_t flag; + uint32_t access_mask; + int whotype; + uid_t who; +}; + +struct nfs4_acl { + uint32_t naces; + struct nfs4_ace aces[0]; +}; + +typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; + +struct nfs_stateid4 { + __be32 seqid; + char other[NFS4_STATEID_OTHER_SIZE]; +} __attribute__ ((packed)); + +typedef struct nfs_stateid4 nfs4_stateid; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + + /* nfs41 */ + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + + OP_ILLEGAL = 10044, +}; + +/*Defining first and last NFS4 operations implemented. +Needs to be updated if more operations are defined in future.*/ + +#define FIRST_NFS4_OP OP_ACCESS +#define LAST_NFS4_OP OP_RECLAIM_COMPLETE + +enum nfsstat4 { + NFS4_OK = 0, + NFS4ERR_PERM = 1, + NFS4ERR_NOENT = 2, + NFS4ERR_IO = 5, + NFS4ERR_NXIO = 6, + NFS4ERR_ACCESS = 13, + NFS4ERR_EXIST = 17, + NFS4ERR_XDEV = 18, + /* Unused/reserved 19 */ + NFS4ERR_NOTDIR = 20, + NFS4ERR_ISDIR = 21, + NFS4ERR_INVAL = 22, + NFS4ERR_FBIG = 27, + NFS4ERR_NOSPC = 28, + NFS4ERR_ROFS = 30, + NFS4ERR_MLINK = 31, + NFS4ERR_NAMETOOLONG = 63, + NFS4ERR_NOTEMPTY = 66, + NFS4ERR_DQUOT = 69, + NFS4ERR_STALE = 70, + NFS4ERR_BADHANDLE = 10001, + NFS4ERR_BAD_COOKIE = 10003, + NFS4ERR_NOTSUPP = 10004, + NFS4ERR_TOOSMALL = 10005, + NFS4ERR_SERVERFAULT = 10006, + NFS4ERR_BADTYPE = 10007, + NFS4ERR_DELAY = 10008, + NFS4ERR_SAME = 10009, + NFS4ERR_DENIED = 10010, + NFS4ERR_EXPIRED = 10011, + NFS4ERR_LOCKED = 10012, + NFS4ERR_GRACE = 10013, + NFS4ERR_FHEXPIRED = 10014, + NFS4ERR_SHARE_DENIED = 10015, + NFS4ERR_WRONGSEC = 10016, + NFS4ERR_CLID_INUSE = 10017, + NFS4ERR_RESOURCE = 10018, + NFS4ERR_MOVED = 10019, + NFS4ERR_NOFILEHANDLE = 10020, + NFS4ERR_MINOR_VERS_MISMATCH = 10021, + NFS4ERR_STALE_CLIENTID = 10022, + NFS4ERR_STALE_STATEID = 10023, + NFS4ERR_OLD_STATEID = 10024, + NFS4ERR_BAD_STATEID = 10025, + NFS4ERR_BAD_SEQID = 10026, + NFS4ERR_NOT_SAME = 10027, + NFS4ERR_LOCK_RANGE = 10028, + NFS4ERR_SYMLINK = 10029, + NFS4ERR_RESTOREFH = 10030, + NFS4ERR_LEASE_MOVED = 10031, + NFS4ERR_ATTRNOTSUPP = 10032, + NFS4ERR_NO_GRACE = 10033, + NFS4ERR_RECLAIM_BAD = 10034, + NFS4ERR_RECLAIM_CONFLICT = 10035, + NFS4ERR_BADXDR = 10036, + NFS4ERR_LOCKS_HELD = 10037, + NFS4ERR_OPENMODE = 10038, + NFS4ERR_BADOWNER = 10039, + NFS4ERR_BADCHAR = 10040, + NFS4ERR_BADNAME = 10041, + NFS4ERR_BAD_RANGE = 10042, + NFS4ERR_LOCK_NOTSUPP = 10043, + NFS4ERR_OP_ILLEGAL = 10044, + NFS4ERR_DEADLOCK = 10045, + NFS4ERR_FILE_OPEN = 10046, + NFS4ERR_ADMIN_REVOKED = 10047, + NFS4ERR_CB_PATH_DOWN = 10048, + + /* nfs41 */ + NFS4ERR_BADIOMODE = 10049, + NFS4ERR_BADLAYOUT = 10050, + NFS4ERR_BAD_SESSION_DIGEST = 10051, + NFS4ERR_BADSESSION = 10052, + NFS4ERR_BADSLOT = 10053, + NFS4ERR_COMPLETE_ALREADY = 10054, + NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055, + NFS4ERR_DELEG_ALREADY_WANTED = 10056, + NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */ + NFS4ERR_LAYOUTTRYLATER = 10058, + NFS4ERR_LAYOUTUNAVAILABLE = 10059, + NFS4ERR_NOMATCHING_LAYOUT = 10060, + NFS4ERR_RECALLCONFLICT = 10061, + NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062, + NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */ + NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */ + NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */ + NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */ + NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */ + NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */ + NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */ + NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */ + NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */ + NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */ + /* Error 10073 is unused. */ + NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */ + NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */ + NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not original */ + NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */ + NFS4ERR_DEADSESSION = 10078, /* persistent session dead */ + NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */ + NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */ + NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */ + NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */ + NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */ + NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */ + NFS4ERR_REJECT_DELEG = 10085, /* on callback */ + NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */ + NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */ +}; + +static inline bool seqid_mutating_err(u32 err) +{ + /* rfc 3530 section 8.1.5: */ + switch (err) { + case NFS4ERR_STALE_CLIENTID: + case NFS4ERR_STALE_STATEID: + case NFS4ERR_BAD_STATEID: + case NFS4ERR_BAD_SEQID: + case NFS4ERR_BADXDR: + case NFS4ERR_RESOURCE: + case NFS4ERR_NOFILEHANDLE: + return false; + }; + return true; +} + +/* + * Note: NF4BAD is not actually part of the protocol; it is just used + * internally by nfsd. + */ +enum nfs_ftype4 { + NF4BAD = 0, + NF4REG = 1, /* Regular File */ + NF4DIR = 2, /* Directory */ + NF4BLK = 3, /* Special File - block device */ + NF4CHR = 4, /* Special File - character device */ + NF4LNK = 5, /* Symbolic Link */ + NF4SOCK = 6, /* Special File - socket */ + NF4FIFO = 7, /* Special File - fifo */ + NF4ATTRDIR = 8, /* Attribute Directory */ + NF4NAMEDATTR = 9 /* Named Attribute */ +}; + +enum open_claim_type4 { + NFS4_OPEN_CLAIM_NULL = 0, + NFS4_OPEN_CLAIM_PREVIOUS = 1, + NFS4_OPEN_CLAIM_DELEGATE_CUR = 2, + NFS4_OPEN_CLAIM_DELEGATE_PREV = 3, + NFS4_OPEN_CLAIM_FH = 4, /* 4.1 */ + NFS4_OPEN_CLAIM_DELEG_CUR_FH = 5, /* 4.1 */ + NFS4_OPEN_CLAIM_DELEG_PREV_FH = 6, /* 4.1 */ +}; + +enum opentype4 { + NFS4_OPEN_NOCREATE = 0, + NFS4_OPEN_CREATE = 1 +}; + +enum createmode4 { + NFS4_CREATE_UNCHECKED = 0, + NFS4_CREATE_GUARDED = 1, + NFS4_CREATE_EXCLUSIVE = 2, + /* + * New to NFSv4.1. If session is persistent, + * GUARDED4 MUST be used. Otherwise, use + * EXCLUSIVE4_1 instead of EXCLUSIVE4. + */ + NFS4_CREATE_EXCLUSIVE4_1 = 3 +}; + +enum limit_by4 { + NFS4_LIMIT_SIZE = 1, + NFS4_LIMIT_BLOCKS = 2 +}; + +enum open_delegation_type4 { + NFS4_OPEN_DELEGATE_NONE = 0, + NFS4_OPEN_DELEGATE_READ = 1, + NFS4_OPEN_DELEGATE_WRITE = 2, + NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */ +}; + +enum why_no_delegation4 { /* new to v4.1 */ + WND4_NOT_WANTED = 0, + WND4_CONTENTION = 1, + WND4_RESOURCE = 2, + WND4_NOT_SUPP_FTYPE = 3, + WND4_WRITE_DELEG_NOT_SUPP_FTYPE = 4, + WND4_NOT_SUPP_UPGRADE = 5, + WND4_NOT_SUPP_DOWNGRADE = 6, + WND4_CANCELLED = 7, + WND4_IS_DIR = 8, +}; + +enum lock_type4 { + NFS4_UNLOCK_LT = 0, + NFS4_READ_LT = 1, + NFS4_WRITE_LT = 2, + NFS4_READW_LT = 3, + NFS4_WRITEW_LT = 4 +}; + + +/* Mandatory Attributes */ +#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0) +#define FATTR4_WORD0_TYPE (1UL << 1) +#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2) +#define FATTR4_WORD0_CHANGE (1UL << 3) +#define FATTR4_WORD0_SIZE (1UL << 4) +#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5) +#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6) +#define FATTR4_WORD0_NAMED_ATTR (1UL << 7) +#define FATTR4_WORD0_FSID (1UL << 8) +#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) +#define FATTR4_WORD0_LEASE_TIME (1UL << 10) +#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) +/* Mandatory in NFSv4.1 */ +#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) + +/* Recommended Attributes */ +#define FATTR4_WORD0_ACL (1UL << 12) +#define FATTR4_WORD0_ACLSUPPORT (1UL << 13) +#define FATTR4_WORD0_ARCHIVE (1UL << 14) +#define FATTR4_WORD0_CANSETTIME (1UL << 15) +#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16) +#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17) +#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18) +#define FATTR4_WORD0_FILEHANDLE (1UL << 19) +#define FATTR4_WORD0_FILEID (1UL << 20) +#define FATTR4_WORD0_FILES_AVAIL (1UL << 21) +#define FATTR4_WORD0_FILES_FREE (1UL << 22) +#define FATTR4_WORD0_FILES_TOTAL (1UL << 23) +#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24) +#define FATTR4_WORD0_HIDDEN (1UL << 25) +#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26) +#define FATTR4_WORD0_MAXFILESIZE (1UL << 27) +#define FATTR4_WORD0_MAXLINK (1UL << 28) +#define FATTR4_WORD0_MAXNAME (1UL << 29) +#define FATTR4_WORD0_MAXREAD (1UL << 30) +#define FATTR4_WORD0_MAXWRITE (1UL << 31) +#define FATTR4_WORD1_MIMETYPE (1UL << 0) +#define FATTR4_WORD1_MODE (1UL << 1) +#define FATTR4_WORD1_NO_TRUNC (1UL << 2) +#define FATTR4_WORD1_NUMLINKS (1UL << 3) +#define FATTR4_WORD1_OWNER (1UL << 4) +#define FATTR4_WORD1_OWNER_GROUP (1UL << 5) +#define FATTR4_WORD1_QUOTA_HARD (1UL << 6) +#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7) +#define FATTR4_WORD1_QUOTA_USED (1UL << 8) +#define FATTR4_WORD1_RAWDEV (1UL << 9) +#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10) +#define FATTR4_WORD1_SPACE_FREE (1UL << 11) +#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12) +#define FATTR4_WORD1_SPACE_USED (1UL << 13) +#define FATTR4_WORD1_SYSTEM (1UL << 14) +#define FATTR4_WORD1_TIME_ACCESS (1UL << 15) +#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16) +#define FATTR4_WORD1_TIME_BACKUP (1UL << 17) +#define FATTR4_WORD1_TIME_CREATE (1UL << 18) +#define FATTR4_WORD1_TIME_DELTA (1UL << 19) +#define FATTR4_WORD1_TIME_METADATA (1UL << 20) +#define FATTR4_WORD1_TIME_MODIFY (1UL << 21) +#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) +#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) +#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) +#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) + +#define NFSPROC4_NULL 0 +#define NFSPROC4_COMPOUND 1 +#define NFS4_VERSION 4 +#define NFS4_MINOR_VERSION 0 + +#if defined(CONFIG_NFS_V4_1) +#define NFS4_MAX_MINOR_VERSION 1 +#else +#define NFS4_MAX_MINOR_VERSION 0 +#endif /* CONFIG_NFS_V4_1 */ + +#define NFS4_DEBUG 1 + +/* Index of predefined Linux client operations */ + +enum { + NFSPROC4_CLNT_NULL = 0, /* Unused */ + NFSPROC4_CLNT_READ, + NFSPROC4_CLNT_WRITE, + NFSPROC4_CLNT_COMMIT, + NFSPROC4_CLNT_OPEN, + NFSPROC4_CLNT_OPEN_CONFIRM, + NFSPROC4_CLNT_OPEN_NOATTR, + NFSPROC4_CLNT_OPEN_DOWNGRADE, + NFSPROC4_CLNT_CLOSE, + NFSPROC4_CLNT_SETATTR, + NFSPROC4_CLNT_FSINFO, + NFSPROC4_CLNT_RENEW, + NFSPROC4_CLNT_SETCLIENTID, + NFSPROC4_CLNT_SETCLIENTID_CONFIRM, + NFSPROC4_CLNT_LOCK, + NFSPROC4_CLNT_LOCKT, + NFSPROC4_CLNT_LOCKU, + NFSPROC4_CLNT_ACCESS, + NFSPROC4_CLNT_GETATTR, + NFSPROC4_CLNT_LOOKUP, + NFSPROC4_CLNT_LOOKUP_ROOT, + NFSPROC4_CLNT_REMOVE, + NFSPROC4_CLNT_RENAME, + NFSPROC4_CLNT_LINK, + NFSPROC4_CLNT_SYMLINK, + NFSPROC4_CLNT_CREATE, + NFSPROC4_CLNT_PATHCONF, + NFSPROC4_CLNT_STATFS, + NFSPROC4_CLNT_READLINK, + NFSPROC4_CLNT_READDIR, + NFSPROC4_CLNT_SERVER_CAPS, + NFSPROC4_CLNT_DELEGRETURN, + NFSPROC4_CLNT_GETACL, + NFSPROC4_CLNT_SETACL, + NFSPROC4_CLNT_FS_LOCATIONS, + NFSPROC4_CLNT_RELEASE_LOCKOWNER, + NFSPROC4_CLNT_SECINFO, + + /* nfs41 */ + NFSPROC4_CLNT_EXCHANGE_ID, + NFSPROC4_CLNT_CREATE_SESSION, + NFSPROC4_CLNT_DESTROY_SESSION, + NFSPROC4_CLNT_SEQUENCE, + NFSPROC4_CLNT_GET_LEASE_TIME, + NFSPROC4_CLNT_RECLAIM_COMPLETE, + NFSPROC4_CLNT_LAYOUTGET, + NFSPROC4_CLNT_GETDEVICEINFO, + NFSPROC4_CLNT_LAYOUTCOMMIT, + NFSPROC4_CLNT_LAYOUTRETURN, + NFSPROC4_CLNT_SECINFO_NO_NAME, + NFSPROC4_CLNT_TEST_STATEID, + NFSPROC4_CLNT_FREE_STATEID, + NFSPROC4_CLNT_GETDEVICELIST, +}; + +/* nfs41 types */ +struct nfs4_sessionid { + unsigned char data[NFS4_MAX_SESSIONID_LEN]; +}; + +/* Create Session Flags */ +#define SESSION4_PERSIST 0x001 +#define SESSION4_BACK_CHAN 0x002 +#define SESSION4_RDMA 0x004 + +#define SESSION4_FLAG_MASK_A 0x007 + +enum state_protect_how4 { + SP4_NONE = 0, + SP4_MACH_CRED = 1, + SP4_SSV = 2 +}; + +enum pnfs_layouttype { + LAYOUT_NFSV4_1_FILES = 1, + LAYOUT_OSD2_OBJECTS = 2, + LAYOUT_BLOCK_VOLUME = 3, +}; + +/* used for both layout return and recall */ +enum pnfs_layoutreturn_type { + RETURN_FILE = 1, + RETURN_FSID = 2, + RETURN_ALL = 3 +}; + +enum pnfs_iomode { + IOMODE_READ = 1, + IOMODE_RW = 2, + IOMODE_ANY = 3, +}; + +enum pnfs_notify_deviceid_type4 { + NOTIFY_DEVICEID4_CHANGE = 1 << 1, + NOTIFY_DEVICEID4_DELETE = 1 << 2, +}; + +#define NFL4_UFLG_MASK 0x0000003F +#define NFL4_UFLG_DENSE 0x00000001 +#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002 +#define NFL4_UFLG_STRIPE_UNIT_SIZE_MASK 0xFFFFFFC0 + +/* Encoded in the loh_body field of type layouthint4 */ +enum filelayout_hint_care4 { + NFLH4_CARE_DENSE = NFL4_UFLG_DENSE, + NFLH4_CARE_COMMIT_THRU_MDS = NFL4_UFLG_COMMIT_THRU_MDS, + NFLH4_CARE_STRIPE_UNIT_SIZE = 0x00000040, + NFLH4_CARE_STRIPE_COUNT = 0x00000080 +}; + +#define NFS4_DEVICEID4_SIZE 16 + +struct nfs4_deviceid { + char data[NFS4_DEVICEID4_SIZE]; +}; + +#endif +#endif + +/* + * Local variables: + * c-basic-offset: 8 + * End: + */ diff --git a/include/linux/nfs4_mount.h b/include/linux/nfs4_mount.h new file mode 100644 index 00000000..a0dcf665 --- /dev/null +++ b/include/linux/nfs4_mount.h @@ -0,0 +1,71 @@ +#ifndef _LINUX_NFS4_MOUNT_H +#define _LINUX_NFS4_MOUNT_H + +/* + * linux/include/linux/nfs4_mount.h + * + * Copyright (C) 2002 Trond Myklebust + * + * structure passed from user-space to kernel-space during an nfsv4 mount + */ + +/* + * WARNING! Do not delete or change the order of these fields. If + * a new field is required then add it to the end. The version field + * tracks which fields are present. This will ensure some measure of + * mount-to-kernel version compatibility. Some of these aren't used yet + * but here they are anyway. + */ +#define NFS4_MOUNT_VERSION 1 + +struct nfs_string { + unsigned int len; + const char __user * data; +}; + +struct nfs4_mount_data { + int version; /* 1 */ + int flags; /* 1 */ + int rsize; /* 1 */ + int wsize; /* 1 */ + int timeo; /* 1 */ + int retrans; /* 1 */ + int acregmin; /* 1 */ + int acregmax; /* 1 */ + int acdirmin; /* 1 */ + int acdirmax; /* 1 */ + + /* see the definition of 'struct clientaddr4' in RFC3010 */ + struct nfs_string client_addr; /* 1 */ + + /* Mount path */ + struct nfs_string mnt_path; /* 1 */ + + /* Server details */ + struct nfs_string hostname; /* 1 */ + /* Server IP address */ + unsigned int host_addrlen; /* 1 */ + struct sockaddr __user * host_addr; /* 1 */ + + /* Transport protocol to use */ + int proto; /* 1 */ + + /* Pseudo-flavours to use for authentication. See RFC2623 */ + int auth_flavourlen; /* 1 */ + int __user *auth_flavours; /* 1 */ +}; + +/* bits in the flags field */ +/* Note: the fields that correspond to existing NFSv2/v3 mount options + * should mirror the values from include/linux/nfs_mount.h + */ + +#define NFS4_MOUNT_SOFT 0x0001 /* 1 */ +#define NFS4_MOUNT_INTR 0x0002 /* 1 */ +#define NFS4_MOUNT_NOCTO 0x0010 /* 1 */ +#define NFS4_MOUNT_NOAC 0x0020 /* 1 */ +#define NFS4_MOUNT_STRICTLOCK 0x1000 /* 1 */ +#define NFS4_MOUNT_UNSHARED 0x8000 /* 1 */ +#define NFS4_MOUNT_FLAGMASK 0x9033 + +#endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h new file mode 100644 index 00000000..52a1bdb4 --- /dev/null +++ b/include/linux/nfs_fs.h @@ -0,0 +1,671 @@ +/* + * linux/include/linux/nfs_fs.h + * + * Copyright (C) 1992 Rick Sladkey + * + * OS-specific nfs filesystem definitions and declarations + */ + +#ifndef _LINUX_NFS_FS_H +#define _LINUX_NFS_FS_H + +#include <linux/magic.h> + +/* Default timeout values */ +#define NFS_DEF_UDP_TIMEO (11) +#define NFS_DEF_UDP_RETRANS (3) +#define NFS_DEF_TCP_TIMEO (600) +#define NFS_DEF_TCP_RETRANS (2) + +#define NFS_MAX_UDP_TIMEOUT (60*HZ) +#define NFS_MAX_TCP_TIMEOUT (600*HZ) + +#define NFS_DEF_ACREGMIN (3) +#define NFS_DEF_ACREGMAX (60) +#define NFS_DEF_ACDIRMIN (30) +#define NFS_DEF_ACDIRMAX (60) + +/* + * When flushing a cluster of dirty pages, there can be different + * strategies: + */ +#define FLUSH_SYNC 1 /* file being synced, or contention */ +#define FLUSH_STABLE 4 /* commit to stable storage */ +#define FLUSH_LOWPRI 8 /* low priority background flush */ +#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ +#define FLUSH_COND_STABLE 32 /* conditional stable write - only stable + * if everything fits in one RPC */ + +#ifdef __KERNEL__ + +/* + * Enable dprintk() debugging support for nfs client. + */ +#ifdef CONFIG_NFS_DEBUG +# define NFS_DEBUG +#endif + +#include <linux/in.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/rbtree.h> +#include <linux/rwsem.h> +#include <linux/wait.h> + +#include <linux/sunrpc/debug.h> +#include <linux/sunrpc/auth.h> +#include <linux/sunrpc/clnt.h> + +#include <linux/nfs.h> +#include <linux/nfs2.h> +#include <linux/nfs3.h> +#include <linux/nfs4.h> +#include <linux/nfs_xdr.h> +#include <linux/nfs_fs_sb.h> + +#include <linux/mempool.h> + +/* + * These are the default flags for swap requests + */ +#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) + +/* + * NFSv3/v4 Access mode cache entry + */ +struct nfs_access_entry { + struct rb_node rb_node; + struct list_head lru; + unsigned long jiffies; + struct rpc_cred * cred; + int mask; +}; + +struct nfs_lock_context { + atomic_t count; + struct list_head list; + struct nfs_open_context *open_context; + fl_owner_t lockowner; + pid_t pid; +}; + +struct nfs4_state; +struct nfs_open_context { + struct nfs_lock_context lock_context; + struct dentry *dentry; + struct rpc_cred *cred; + struct nfs4_state *state; + fmode_t mode; + + unsigned long flags; +#define NFS_CONTEXT_ERROR_WRITE (0) + int error; + + struct list_head list; +}; + +struct nfs_open_dir_context { + struct rpc_cred *cred; + unsigned long attr_gencount; + __u64 dir_cookie; + __u64 dup_cookie; + signed char duped; +}; + +/* + * NFSv4 delegation + */ +struct nfs_delegation; + +struct posix_acl; + +/* + * nfs fs inode data in memory + */ +struct nfs_inode { + /* + * The 64bit 'inode number' + */ + __u64 fileid; + + /* + * NFS file handle + */ + struct nfs_fh fh; + + /* + * Various flags + */ + unsigned long flags; /* atomic bit ops */ + unsigned long cache_validity; /* bit mask */ + + /* + * read_cache_jiffies is when we started read-caching this inode. + * attrtimeo is for how long the cached information is assumed + * to be valid. A successful attribute revalidation doubles + * attrtimeo (up to acregmax/acdirmax), a failure resets it to + * acregmin/acdirmin. + * + * We need to revalidate the cached attrs for this inode if + * + * jiffies - read_cache_jiffies >= attrtimeo + * + * Please note the comparison is greater than or equal + * so that zero timeout values can be specified. + */ + unsigned long read_cache_jiffies; + unsigned long attrtimeo; + unsigned long attrtimeo_timestamp; + + unsigned long attr_gencount; + /* "Generation counter" for the attribute cache. This is + * bumped whenever we update the metadata on the + * server. + */ + unsigned long cache_change_attribute; + + struct rb_root access_cache; + struct list_head access_cache_entry_lru; + struct list_head access_cache_inode_lru; +#ifdef CONFIG_NFS_V3_ACL + struct posix_acl *acl_access; + struct posix_acl *acl_default; +#endif + + /* + * This is the cookie verifier used for NFSv3 readdir + * operations + */ + __be32 cookieverf[2]; + + unsigned long npages; + unsigned long ncommit; + struct list_head commit_list; + + /* Open contexts for shared mmap writes */ + struct list_head open_files; + + /* Number of in-flight sillydelete RPC calls */ + atomic_t silly_count; + /* List of deferred sillydelete requests */ + struct hlist_head silly_list; + wait_queue_head_t waitqueue; + +#ifdef CONFIG_NFS_V4 + struct nfs4_cached_acl *nfs4_acl; + /* NFSv4 state */ + struct list_head open_states; + struct nfs_delegation __rcu *delegation; + fmode_t delegation_state; + struct rw_semaphore rwsem; + + /* pNFS layout information */ + struct pnfs_layout_hdr *layout; + atomic_t commits_outstanding; +#endif /* CONFIG_NFS_V4*/ +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; +#endif + struct inode vfs_inode; +}; + +/* + * Cache validity bit flags + */ +#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */ +#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */ +#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */ +#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ +#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ +#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ +#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ + +/* + * Bit offsets in flags field + */ +#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */ +#define NFS_INO_STALE (1) /* possible stale inode */ +#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ +#define NFS_INO_FLUSHING (4) /* inode is flushing out data */ +#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ +#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ +#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ +#define NFS_INO_PNFS_COMMIT (8) /* use pnfs code for commit */ +#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ +#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ + +static inline struct nfs_inode *NFS_I(const struct inode *inode) +{ + return container_of(inode, struct nfs_inode, vfs_inode); +} + +static inline struct nfs_server *NFS_SB(const struct super_block *s) +{ + return (struct nfs_server *)(s->s_fs_info); +} + +static inline struct nfs_fh *NFS_FH(const struct inode *inode) +{ + return &NFS_I(inode)->fh; +} + +static inline struct nfs_server *NFS_SERVER(const struct inode *inode) +{ + return NFS_SB(inode->i_sb); +} + +static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode) +{ + return NFS_SERVER(inode)->client; +} + +static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) +{ + return NFS_SERVER(inode)->nfs_client->rpc_ops; +} + +static inline __be32 *NFS_COOKIEVERF(const struct inode *inode) +{ + return NFS_I(inode)->cookieverf; +} + +static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) +{ + struct nfs_server *nfss = NFS_SERVER(inode); + return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin; +} + +static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode) +{ + struct nfs_server *nfss = NFS_SERVER(inode); + return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax; +} + +static inline int NFS_STALE(const struct inode *inode) +{ + return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); +} + +static inline int NFS_FSCACHE(const struct inode *inode) +{ + return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); +} + +static inline __u64 NFS_FILEID(const struct inode *inode) +{ + return NFS_I(inode)->fileid; +} + +static inline void set_nfs_fileid(struct inode *inode, __u64 fileid) +{ + NFS_I(inode)->fileid = fileid; +} + +static inline void nfs_mark_for_revalidate(struct inode *inode) +{ + struct nfs_inode *nfsi = NFS_I(inode); + + spin_lock(&inode->i_lock); + nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; + if (S_ISDIR(inode->i_mode)) + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; + spin_unlock(&inode->i_lock); +} + +static inline int nfs_server_capable(struct inode *inode, int cap) +{ + return NFS_SERVER(inode)->caps & cap; +} + +static inline int NFS_USE_READDIRPLUS(struct inode *inode) +{ + return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); +} + +static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) +{ + dentry->d_time = verf; +} + +/** + * nfs_save_change_attribute - Returns the inode attribute change cookie + * @dir - pointer to parent directory inode + * The "change attribute" is updated every time we finish an operation + * that will result in a metadata change on the server. + */ +static inline unsigned long nfs_save_change_attribute(struct inode *dir) +{ + return NFS_I(dir)->cache_change_attribute; +} + +/** + * nfs_verify_change_attribute - Detects NFS remote directory changes + * @dir - pointer to parent directory inode + * @chattr - previously saved change attribute + * Return "false" if the verifiers doesn't match the change attribute. + * This would usually indicate that the directory contents have changed on + * the server, and that any dentries need revalidating. + */ +static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr) +{ + return chattr == NFS_I(dir)->cache_change_attribute; +} + +/* + * linux/fs/nfs/inode.c + */ +extern int nfs_sync_mapping(struct address_space *mapping); +extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); +extern void nfs_zap_caches(struct inode *); +extern void nfs_invalidate_atime(struct inode *); +extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, + struct nfs_fattr *); +extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); +extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int nfs_permission(struct inode *, int); +extern int nfs_open(struct inode *, struct file *); +extern int nfs_release(struct inode *, struct file *); +extern int nfs_attribute_timeout(struct inode *inode); +extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); +extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); +extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); +extern int nfs_setattr(struct dentry *, struct iattr *); +extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); +extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); +extern void put_nfs_open_context(struct nfs_open_context *ctx); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); +extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); +extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); +extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); +extern u64 nfs_compat_user_ino64(u64 fileid); +extern void nfs_fattr_init(struct nfs_fattr *fattr); +extern unsigned long nfs_inc_attr_generation_counter(void); + +extern struct nfs_fattr *nfs_alloc_fattr(void); + +static inline void nfs_free_fattr(const struct nfs_fattr *fattr) +{ + kfree(fattr); +} + +extern struct nfs_fh *nfs_alloc_fhandle(void); + +static inline void nfs_free_fhandle(const struct nfs_fh *fh) +{ + kfree(fh); +} + +#ifdef NFS_DEBUG +extern u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh); +static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) +{ + return _nfs_display_fhandle_hash(fh); +} +extern void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption); +#define nfs_display_fhandle(fh, caption) \ + do { \ + if (unlikely(nfs_debug & NFSDBG_FACILITY)) \ + _nfs_display_fhandle(fh, caption); \ + } while (0) +#else +static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) +{ + return 0; +} +static inline void nfs_display_fhandle(const struct nfs_fh *fh, + const char *caption) +{ +} +#endif + +/* + * linux/fs/nfs/nfsroot.c + */ +extern int nfs_root_data(char **root_device, char **root_data); /*__init*/ +/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ +extern __be32 root_nfs_parse_addr(char *name); /*__init*/ + +/* + * linux/fs/nfs/file.c + */ +extern const struct inode_operations nfs_file_inode_operations; +#ifdef CONFIG_NFS_V3 +extern const struct inode_operations nfs3_file_inode_operations; +#endif /* CONFIG_NFS_V3 */ +extern const struct file_operations nfs_file_operations; +#ifdef CONFIG_NFS_V4 +extern const struct file_operations nfs4_file_operations; +#endif /* CONFIG_NFS_V4 */ +extern const struct address_space_operations nfs_file_aops; +extern const struct address_space_operations nfs_dir_aops; + +static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) +{ + return filp->private_data; +} + +static inline struct rpc_cred *nfs_file_cred(struct file *file) +{ + if (file != NULL) { + struct nfs_open_context *ctx = + nfs_file_open_context(file); + if (ctx) + return ctx->cred; + } + return NULL; +} + +/* + * linux/fs/nfs/xattr.c + */ +#ifdef CONFIG_NFS_V3_ACL +extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t); +extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t); +extern int nfs3_setxattr(struct dentry *, const char *, + const void *, size_t, int); +extern int nfs3_removexattr (struct dentry *, const char *name); +#else +# define nfs3_listxattr NULL +# define nfs3_getxattr NULL +# define nfs3_setxattr NULL +# define nfs3_removexattr NULL +#endif + +/* + * linux/fs/nfs/direct.c + */ +extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t, + unsigned long); +extern ssize_t nfs_file_direct_read(struct kiocb *iocb, + const struct iovec *iov, unsigned long nr_segs, + loff_t pos); +extern ssize_t nfs_file_direct_write(struct kiocb *iocb, + const struct iovec *iov, unsigned long nr_segs, + loff_t pos); + +/* + * linux/fs/nfs/dir.c + */ +extern const struct inode_operations nfs_dir_inode_operations; +#ifdef CONFIG_NFS_V3 +extern const struct inode_operations nfs3_dir_inode_operations; +#endif /* CONFIG_NFS_V3 */ +extern const struct file_operations nfs_dir_operations; +extern const struct dentry_operations nfs_dentry_operations; + +extern void nfs_force_lookup_revalidate(struct inode *dir); +extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); +extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); +extern void nfs_access_zap_cache(struct inode *inode); + +/* + * linux/fs/nfs/symlink.c + */ +extern const struct inode_operations nfs_symlink_inode_operations; + +/* + * linux/fs/nfs/sysctl.c + */ +#ifdef CONFIG_SYSCTL +extern int nfs_register_sysctl(void); +extern void nfs_unregister_sysctl(void); +#else +#define nfs_register_sysctl() 0 +#define nfs_unregister_sysctl() do { } while(0) +#endif + +/* + * linux/fs/nfs/namespace.c + */ +extern const struct inode_operations nfs_mountpoint_inode_operations; +extern const struct inode_operations nfs_referral_inode_operations; +extern int nfs_mountpoint_expiry_timeout; +extern void nfs_release_automount_timer(void); + +/* + * linux/fs/nfs/unlink.c + */ +extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); +extern void nfs_block_sillyrename(struct dentry *dentry); +extern void nfs_unblock_sillyrename(struct dentry *dentry); +extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry); + +/* + * linux/fs/nfs/write.c + */ +extern int nfs_congestion_kb; +extern int nfs_writepage(struct page *page, struct writeback_control *wbc); +extern int nfs_writepages(struct address_space *, struct writeback_control *); +extern int nfs_flush_incompatible(struct file *file, struct page *page); +extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); +extern void nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); + +/* + * Try to write back everything synchronously (but check the + * return value!) + */ +extern int nfs_wb_all(struct inode *inode); +extern int nfs_wb_page(struct inode *inode, struct page* page); +extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); +#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) +extern int nfs_commit_inode(struct inode *, int); +extern struct nfs_write_data *nfs_commitdata_alloc(void); +extern void nfs_commit_free(struct nfs_write_data *wdata); +#else +static inline int +nfs_commit_inode(struct inode *inode, int how) +{ + return 0; +} +#endif + +static inline int +nfs_have_writebacks(struct inode *inode) +{ + return NFS_I(inode)->npages != 0; +} + +/* + * Allocate nfs_write_data structures + */ +extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); +extern void nfs_writedata_free(struct nfs_write_data *); + +/* + * linux/fs/nfs/read.c + */ +extern int nfs_readpage(struct file *, struct page *); +extern int nfs_readpages(struct file *, struct address_space *, + struct list_head *, unsigned); +extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); +extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, + struct page *); + +/* + * Allocate nfs_read_data structures + */ +extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); +extern void nfs_readdata_free(struct nfs_read_data *); + +/* + * linux/fs/nfs3proc.c + */ +#ifdef CONFIG_NFS_V3_ACL +extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type); +extern int nfs3_proc_setacl(struct inode *inode, int type, + struct posix_acl *acl); +extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode, + umode_t mode); +extern void nfs3_forget_cached_acls(struct inode *inode); +#else +static inline int nfs3_proc_set_default_acl(struct inode *dir, + struct inode *inode, + umode_t mode) +{ + return 0; +} + +static inline void nfs3_forget_cached_acls(struct inode *inode) +{ +} +#endif /* CONFIG_NFS_V3_ACL */ + +/* + * inline functions + */ + +static inline loff_t nfs_size_to_loff_t(__u64 size) +{ + if (size > (__u64) OFFSET_MAX - 1) + return OFFSET_MAX - 1; + return (loff_t) size; +} + +static inline ino_t +nfs_fileid_to_ino_t(u64 fileid) +{ + ino_t ino = (ino_t) fileid; + if (sizeof(ino_t) < sizeof(u64)) + ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8; + return ino; +} + +#define NFS_JUKEBOX_RETRY_TIME (5 * HZ) + +#endif /* __KERNEL__ */ + +/* + * NFS debug flags + */ +#define NFSDBG_VFS 0x0001 +#define NFSDBG_DIRCACHE 0x0002 +#define NFSDBG_LOOKUPCACHE 0x0004 +#define NFSDBG_PAGECACHE 0x0008 +#define NFSDBG_PROC 0x0010 +#define NFSDBG_XDR 0x0020 +#define NFSDBG_FILE 0x0040 +#define NFSDBG_ROOT 0x0080 +#define NFSDBG_CALLBACK 0x0100 +#define NFSDBG_CLIENT 0x0200 +#define NFSDBG_MOUNT 0x0400 +#define NFSDBG_FSCACHE 0x0800 +#define NFSDBG_PNFS 0x1000 +#define NFSDBG_PNFS_LD 0x2000 +#define NFSDBG_ALL 0xFFFF + +#ifdef __KERNEL__ + +# undef ifdebug +# ifdef NFS_DEBUG +# define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac)) +# define NFS_IFDEBUG(x) x +# else +# define ifdebug(fac) if (0) +# define NFS_IFDEBUG(x) +# endif +#endif /* __KERNEL */ + +#endif diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h new file mode 100644 index 00000000..a5c50d97 --- /dev/null +++ b/include/linux/nfs_fs_i.h @@ -0,0 +1,20 @@ +#ifndef _NFS_FS_I +#define _NFS_FS_I + +struct nlm_lockowner; + +/* + * NFS lock info + */ +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +#endif diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h new file mode 100644 index 00000000..7073fc74 --- /dev/null +++ b/include/linux/nfs_fs_sb.h @@ -0,0 +1,243 @@ +#ifndef _NFS_FS_SB +#define _NFS_FS_SB + +#include <linux/list.h> +#include <linux/backing-dev.h> +#include <linux/idr.h> +#include <linux/wait.h> +#include <linux/nfs_xdr.h> +#include <linux/sunrpc/xprt.h> + +#include <linux/atomic.h> + +struct nfs4_session; +struct nfs_iostats; +struct nlm_host; +struct nfs4_sequence_args; +struct nfs4_sequence_res; +struct nfs_server; +struct nfs4_minor_version_ops; +struct server_scope; +struct nfs41_impl_id; + +/* + * The nfs_client identifies our client state to the server. + */ +struct nfs_client { + atomic_t cl_count; + int cl_cons_state; /* current construction state (-ve: init error) */ +#define NFS_CS_READY 0 /* ready to be used */ +#define NFS_CS_INITING 1 /* busy initialising */ +#define NFS_CS_SESSION_INITING 2 /* busy initialising session */ + unsigned long cl_res_state; /* NFS resources state */ +#define NFS_CS_CALLBACK 1 /* - callback started */ +#define NFS_CS_IDMAP 2 /* - idmap started */ +#define NFS_CS_RENEWD 3 /* - renewd started */ +#define NFS_CS_STOP_RENEW 4 /* no more state to renew */ +#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */ + struct sockaddr_storage cl_addr; /* server identifier */ + size_t cl_addrlen; + char * cl_hostname; /* hostname of server */ + struct list_head cl_share_link; /* link in global client list */ + struct list_head cl_superblocks; /* List of nfs_server structs */ + + struct rpc_clnt * cl_rpcclient; + const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ + int cl_proto; /* Network transport protocol */ + + u32 cl_minorversion;/* NFSv4 minorversion */ + struct rpc_cred *cl_machine_cred; + +#ifdef CONFIG_NFS_V4 + u64 cl_clientid; /* constant */ + nfs4_verifier cl_confirm; /* Clientid verifier */ + unsigned long cl_state; + + spinlock_t cl_lock; + + unsigned long cl_lease_time; + unsigned long cl_last_renewal; + struct delayed_work cl_renewd; + + struct rpc_wait_queue cl_rpcwaitq; + + /* used for the setclientid verifier */ + struct timespec cl_boot_time; + + /* idmapper */ + struct idmap * cl_idmap; + + /* Our own IP address, as a null-terminated string. + * This is used to generate the clientid, and the callback address. + */ + char cl_ipaddr[48]; + unsigned char cl_id_uniquifier; + u32 cl_cb_ident; /* v4.0 callback identifier */ + const struct nfs4_minor_version_ops *cl_mvops; + + /* The sequence id to use for the next CREATE_SESSION */ + u32 cl_seqid; + /* The flags used for obtaining the clientid during EXCHANGE_ID */ + u32 cl_exchange_flags; + struct nfs4_session *cl_session; /* sharred session */ +#endif /* CONFIG_NFS_V4 */ + +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; /* client index cache cookie */ +#endif + + struct server_scope *server_scope; /* from exchange_id */ + struct nfs41_impl_id *impl_id; /* from exchange_id */ + struct net *net; +}; + +/* + * NFS client parameters stored in the superblock. + */ +struct nfs_server { + struct nfs_client * nfs_client; /* shared client and NFS4 state */ + struct list_head client_link; /* List of other nfs_server structs + * that share the same client + */ + struct list_head master_link; /* link in master servers list */ + struct rpc_clnt * client; /* RPC client handle */ + struct rpc_clnt * client_acl; /* ACL RPC client handle */ + struct nlm_host *nlm_host; /* NLM client handle */ + struct nfs_iostats __percpu *io_stats; /* I/O statistics */ + struct backing_dev_info backing_dev_info; + atomic_long_t writeback; /* number of writeback pages */ + int flags; /* various flags */ + unsigned int caps; /* server capabilities */ + unsigned int rsize; /* read size */ + unsigned int rpages; /* read size (in pages) */ + unsigned int wsize; /* write size */ + unsigned int wpages; /* write size (in pages) */ + unsigned int wtmult; /* server disk block size */ + unsigned int dtsize; /* readdir size */ + unsigned short port; /* "port=" setting */ + unsigned int bsize; /* server block size */ + unsigned int acregmin; /* attr cache timeouts */ + unsigned int acregmax; + unsigned int acdirmin; + unsigned int acdirmax; + unsigned int namelen; + unsigned int options; /* extra options enabled by mount */ +#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ + + struct nfs_fsid fsid; + __u64 maxfilesize; /* maximum file size */ + struct timespec time_delta; /* smallest time granularity */ + unsigned long mount_time; /* when this fs was mounted */ + dev_t s_dev; /* superblock dev numbers */ + +#ifdef CONFIG_NFS_FSCACHE + struct nfs_fscache_key *fscache_key; /* unique key for superblock */ + struct fscache_cookie *fscache; /* superblock cookie */ +#endif + + u32 pnfs_blksize; /* layout_blksize attr */ +#ifdef CONFIG_NFS_V4 + u32 attr_bitmask[3];/* V4 bitmask representing the set + of attributes supported on this + filesystem */ + u32 cache_consistency_bitmask[2]; + /* V4 bitmask representing the subset + of change attribute, size, ctime + and mtime attributes supported by + the server */ + u32 acl_bitmask; /* V4 bitmask representing the ACEs + that are supported on this + filesystem */ + u32 fh_expire_type; /* V4 bitmask representing file + handle volatility type for + this filesystem */ + struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ + struct rpc_wait_queue roc_rpcwaitq; + void *pnfs_ld_data; /* per mount point data */ + + /* the following fields are protected by nfs_client->cl_lock */ + struct rb_root state_owners; +#endif + struct ida openowner_id; + struct ida lockowner_id; + struct list_head state_owners_lru; + struct list_head layouts; + struct list_head delegations; + void (*destroy)(struct nfs_server *); + + atomic_t active; /* Keep trace of any activity to this server */ + + /* mountd-related mount options */ + struct sockaddr_storage mountd_address; + size_t mountd_addrlen; + u32 mountd_version; + unsigned short mountd_port; + unsigned short mountd_protocol; +}; + +/* Server capabilities */ +#define NFS_CAP_READDIRPLUS (1U << 0) +#define NFS_CAP_HARDLINKS (1U << 1) +#define NFS_CAP_SYMLINKS (1U << 2) +#define NFS_CAP_ACLS (1U << 3) +#define NFS_CAP_ATOMIC_OPEN (1U << 4) +#define NFS_CAP_CHANGE_ATTR (1U << 5) +#define NFS_CAP_FILEID (1U << 6) +#define NFS_CAP_MODE (1U << 7) +#define NFS_CAP_NLINK (1U << 8) +#define NFS_CAP_OWNER (1U << 9) +#define NFS_CAP_OWNER_GROUP (1U << 10) +#define NFS_CAP_ATIME (1U << 11) +#define NFS_CAP_CTIME (1U << 12) +#define NFS_CAP_MTIME (1U << 13) +#define NFS_CAP_POSIX_LOCK (1U << 14) +#define NFS_CAP_UIDGID_NOMAP (1U << 15) + + +/* maximum number of slots to use */ +#define NFS4_DEF_SLOT_TABLE_SIZE (16U) +#define NFS4_MAX_SLOT_TABLE (256U) +#define NFS4_NO_SLOT ((u32)-1) + +#if defined(CONFIG_NFS_V4) + +/* Sessions */ +#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) +struct nfs4_slot_table { + struct nfs4_slot *slots; /* seqid per slot */ + unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */ + spinlock_t slot_tbl_lock; + struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */ + u32 max_slots; /* # slots in table */ + u32 highest_used_slotid; /* sent to server on each SEQ. + * op for dynamic resizing */ + u32 target_max_slots; /* Set by CB_RECALL_SLOT as + * the new max_slots */ + struct completion complete; +}; + +static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp) +{ + return sp - tbl->slots; +} + +/* + * Session related parameters + */ +struct nfs4_session { + struct nfs4_sessionid sess_id; + u32 flags; + unsigned long session_state; + u32 hash_alg; + u32 ssv_len; + + /* The fore and back channel */ + struct nfs4_channel_attrs fc_attrs; + struct nfs4_slot_table fc_slot_table; + struct nfs4_channel_attrs bc_attrs; + struct nfs4_slot_table bc_slot_table; + struct nfs_client *clp; +}; + +#endif /* CONFIG_NFS_V4 */ +#endif diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h new file mode 100644 index 00000000..7eed2012 --- /dev/null +++ b/include/linux/nfs_idmap.h @@ -0,0 +1,102 @@ +/* + * include/linux/nfs_idmap.h + * + * UID and GID to name mapping for clients. + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Marius Aamodt Eriksen <marius@umich.edu> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef NFS_IDMAP_H +#define NFS_IDMAP_H + +#include <linux/types.h> + +/* XXX from bits/utmp.h */ +#define IDMAP_NAMESZ 128 + +#define IDMAP_TYPE_USER 0 +#define IDMAP_TYPE_GROUP 1 + +#define IDMAP_CONV_IDTONAME 0 +#define IDMAP_CONV_NAMETOID 1 + +#define IDMAP_STATUS_INVALIDMSG 0x01 +#define IDMAP_STATUS_AGAIN 0x02 +#define IDMAP_STATUS_LOOKUPFAIL 0x04 +#define IDMAP_STATUS_SUCCESS 0x08 + +struct idmap_msg { + __u8 im_type; + __u8 im_conv; + char im_name[IDMAP_NAMESZ]; + __u32 im_id; + __u8 im_status; +}; + +#ifdef __KERNEL__ + +/* Forward declaration to make this header independent of others */ +struct nfs_client; +struct nfs_server; +struct nfs_fattr; +struct nfs4_string; + +#ifdef CONFIG_NFS_V4 +int nfs_idmap_init(void); +void nfs_idmap_quit(void); +#else +static inline int nfs_idmap_init(void) +{ + return 0; +} + +static inline void nfs_idmap_quit(void) +{} +#endif + +int nfs_idmap_new(struct nfs_client *); +void nfs_idmap_delete(struct nfs_client *); + +void nfs_fattr_init_names(struct nfs_fattr *fattr, + struct nfs4_string *owner_name, + struct nfs4_string *group_name); +void nfs_fattr_free_names(struct nfs_fattr *); +void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *); + +int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, __u32 *); +int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, __u32 *); +int nfs_map_uid_to_name(const struct nfs_server *, __u32, char *, size_t); +int nfs_map_gid_to_group(const struct nfs_server *, __u32, char *, size_t); + +extern unsigned int nfs_idmap_cache_timeout; +#endif /* __KERNEL__ */ + +#endif /* NFS_IDMAP_H */ diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h new file mode 100644 index 00000000..9dcbbe9a --- /dev/null +++ b/include/linux/nfs_iostat.h @@ -0,0 +1,133 @@ +/* + * User-space visible declarations for NFS client per-mount + * point statistics + * + * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com> + * + * NFS client per-mount statistics provide information about the + * health of the NFS client and the health of each NFS mount point. + * Generally these are not for detailed problem diagnosis, but + * simply to indicate that there is a problem. + * + * These counters are not meant to be human-readable, but are meant + * to be integrated into system monitoring tools such as "sar" and + * "iostat". As such, the counters are sampled by the tools over + * time, and are never zeroed after a file system is mounted. + * Moving averages can be computed by the tools by taking the + * difference between two instantaneous samples and dividing that + * by the time between the samples. + */ + +#ifndef _LINUX_NFS_IOSTAT +#define _LINUX_NFS_IOSTAT + +#define NFS_IOSTAT_VERS "1.1" + +/* + * NFS byte counters + * + * 1. SERVER - the number of payload bytes read from or written + * to the server by the NFS client via an NFS READ or WRITE + * request. + * + * 2. NORMAL - the number of bytes read or written by applications + * via the read(2) and write(2) system call interfaces. + * + * 3. DIRECT - the number of bytes read or written from files + * opened with the O_DIRECT flag. + * + * These counters give a view of the data throughput into and out + * of the NFS client. Comparing the number of bytes requested by + * an application with the number of bytes the client requests from + * the server can provide an indication of client efficiency + * (per-op, cache hits, etc). + * + * These counters can also help characterize which access methods + * are in use. DIRECT by itself shows whether there is any O_DIRECT + * traffic. NORMAL + DIRECT shows how much data is going through + * the system call interface. A large amount of SERVER traffic + * without much NORMAL or DIRECT traffic shows that applications + * are using mapped files. + * + * NFS page counters + * + * These count the number of pages read or written via nfs_readpage(), + * nfs_readpages(), or their write equivalents. + * + * NB: When adding new byte counters, please include the measured + * units in the name of each byte counter to help users of this + * interface determine what exactly is being counted. + */ +enum nfs_stat_bytecounters { + NFSIOS_NORMALREADBYTES = 0, + NFSIOS_NORMALWRITTENBYTES, + NFSIOS_DIRECTREADBYTES, + NFSIOS_DIRECTWRITTENBYTES, + NFSIOS_SERVERREADBYTES, + NFSIOS_SERVERWRITTENBYTES, + NFSIOS_READPAGES, + NFSIOS_WRITEPAGES, + __NFSIOS_BYTESMAX, +}; + +/* + * NFS event counters + * + * These counters provide a low-overhead way of monitoring client + * activity without enabling NFS trace debugging. The counters + * show the rate at which VFS requests are made, and how often the + * client invalidates its data and attribute caches. This allows + * system administrators to monitor such things as how close-to-open + * is working, and answer questions such as "why are there so many + * GETATTR requests on the wire?" + * + * They also count anamolous events such as short reads and writes, + * silly renames due to close-after-delete, and operations that + * change the size of a file (such operations can often be the + * source of data corruption if applications aren't using file + * locking properly). + */ +enum nfs_stat_eventcounters { + NFSIOS_INODEREVALIDATE = 0, + NFSIOS_DENTRYREVALIDATE, + NFSIOS_DATAINVALIDATE, + NFSIOS_ATTRINVALIDATE, + NFSIOS_VFSOPEN, + NFSIOS_VFSLOOKUP, + NFSIOS_VFSACCESS, + NFSIOS_VFSUPDATEPAGE, + NFSIOS_VFSREADPAGE, + NFSIOS_VFSREADPAGES, + NFSIOS_VFSWRITEPAGE, + NFSIOS_VFSWRITEPAGES, + NFSIOS_VFSGETDENTS, + NFSIOS_VFSSETATTR, + NFSIOS_VFSFLUSH, + NFSIOS_VFSFSYNC, + NFSIOS_VFSLOCK, + NFSIOS_VFSRELEASE, + NFSIOS_CONGESTIONWAIT, + NFSIOS_SETATTRTRUNC, + NFSIOS_EXTENDWRITE, + NFSIOS_SILLYRENAME, + NFSIOS_SHORTREAD, + NFSIOS_SHORTWRITE, + NFSIOS_DELAY, + NFSIOS_PNFS_READ, + NFSIOS_PNFS_WRITE, + __NFSIOS_COUNTSMAX, +}; + +/* + * NFS local caching servicing counters + */ +enum nfs_stat_fscachecounters { + NFSIOS_FSCACHE_PAGES_READ_OK, + NFSIOS_FSCACHE_PAGES_READ_FAIL, + NFSIOS_FSCACHE_PAGES_WRITTEN_OK, + NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, + NFSIOS_FSCACHE_PAGES_UNCACHED, + __NFSIOS_FSCACHEMAX, +}; + +#endif /* _LINUX_NFS_IOSTAT */ diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h new file mode 100644 index 00000000..576bddd7 --- /dev/null +++ b/include/linux/nfs_mount.h @@ -0,0 +1,77 @@ +#ifndef _LINUX_NFS_MOUNT_H +#define _LINUX_NFS_MOUNT_H + +/* + * linux/include/linux/nfs_mount.h + * + * Copyright (C) 1992 Rick Sladkey + * + * structure passed from user-space to kernel-space during an nfs mount + */ +#include <linux/in.h> +#include <linux/nfs.h> +#include <linux/nfs2.h> +#include <linux/nfs3.h> + +/* + * WARNING! Do not delete or change the order of these fields. If + * a new field is required then add it to the end. The version field + * tracks which fields are present. This will ensure some measure of + * mount-to-kernel version compatibility. Some of these aren't used yet + * but here they are anyway. + */ +#define NFS_MOUNT_VERSION 6 +#define NFS_MAX_CONTEXT_LEN 256 + +struct nfs_mount_data { + int version; /* 1 */ + int fd; /* 1 */ + struct nfs2_fh old_root; /* 1 */ + int flags; /* 1 */ + int rsize; /* 1 */ + int wsize; /* 1 */ + int timeo; /* 1 */ + int retrans; /* 1 */ + int acregmin; /* 1 */ + int acregmax; /* 1 */ + int acdirmin; /* 1 */ + int acdirmax; /* 1 */ + struct sockaddr_in addr; /* 1 */ + char hostname[NFS_MAXNAMLEN + 1]; /* 1 */ + int namlen; /* 2 */ + unsigned int bsize; /* 3 */ + struct nfs3_fh root; /* 4 */ + int pseudoflavor; /* 5 */ + char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ +}; + +/* bits in the flags field visible to user space */ + +#define NFS_MOUNT_SOFT 0x0001 /* 1 */ +#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ +#define NFS_MOUNT_SECURE 0x0004 /* 1 */ +#define NFS_MOUNT_POSIX 0x0008 /* 1 */ +#define NFS_MOUNT_NOCTO 0x0010 /* 1 */ +#define NFS_MOUNT_NOAC 0x0020 /* 1 */ +#define NFS_MOUNT_TCP 0x0040 /* 2 */ +#define NFS_MOUNT_VER3 0x0080 /* 3 */ +#define NFS_MOUNT_KERBEROS 0x0100 /* 3 */ +#define NFS_MOUNT_NONLM 0x0200 /* 3 */ +#define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */ +#define NFS_MOUNT_NOACL 0x0800 /* 4 */ +#define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ +#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ +#define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ +#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */ +#define NFS_MOUNT_FLAGMASK 0xFFFF + +/* The following are for internal use only */ +#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 +#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 +#define NFS_MOUNT_NORESVPORT 0x40000 +#define NFS_MOUNT_LEGACY_INTERFACE 0x80000 + +#define NFS_MOUNT_LOCAL_FLOCK 0x100000 +#define NFS_MOUNT_LOCAL_FCNTL 0x200000 + +#endif diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h new file mode 100644 index 00000000..eac30d6b --- /dev/null +++ b/include/linux/nfs_page.h @@ -0,0 +1,154 @@ +/* + * linux/include/linux/nfs_page.h + * + * Copyright (C) 2000 Trond Myklebust + * + * NFS page cache wrapper. + */ + +#ifndef _LINUX_NFS_PAGE_H +#define _LINUX_NFS_PAGE_H + + +#include <linux/list.h> +#include <linux/pagemap.h> +#include <linux/wait.h> +#include <linux/sunrpc/auth.h> +#include <linux/nfs_xdr.h> + +#include <linux/kref.h> + +/* + * Valid flags for a dirty buffer + */ +enum { + PG_BUSY = 0, + PG_MAPPED, + PG_CLEAN, + PG_NEED_COMMIT, + PG_NEED_RESCHED, + PG_PARTIAL_READ_FAILED, + PG_COMMIT_TO_DS, +}; + +struct nfs_inode; +struct nfs_page { + struct list_head wb_list; /* Defines state of page: */ + struct page *wb_page; /* page to read in/write out */ + struct nfs_open_context *wb_context; /* File state context info */ + struct nfs_lock_context *wb_lock_context; /* lock context info */ + atomic_t wb_complete; /* i/os we're waiting for */ + pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ + unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ + wb_pgbase, /* Start of page data */ + wb_bytes; /* Length of request */ + struct kref wb_kref; /* reference count */ + unsigned long wb_flags; + struct nfs_writeverf wb_verf; /* Commit cookie */ +}; + +struct nfs_pageio_descriptor; +struct nfs_pageio_ops { + void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); + bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); + int (*pg_doio)(struct nfs_pageio_descriptor *); +}; + +struct nfs_pageio_descriptor { + struct list_head pg_list; + unsigned long pg_bytes_written; + size_t pg_count; + size_t pg_bsize; + unsigned int pg_base; + unsigned char pg_moreio : 1, + pg_recoalesce : 1; + + struct inode *pg_inode; + const struct nfs_pageio_ops *pg_ops; + int pg_ioflags; + int pg_error; + const struct rpc_call_ops *pg_rpc_callops; + struct pnfs_layout_segment *pg_lseg; +}; + +#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) + +extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, + struct inode *inode, + struct page *page, + unsigned int offset, + unsigned int count); +extern void nfs_release_request(struct nfs_page *req); + + +extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, + struct inode *inode, + const struct nfs_pageio_ops *pg_ops, + size_t bsize, + int how); +extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, + struct nfs_page *); +extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); +extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); +extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, + struct nfs_page *prev, + struct nfs_page *req); +extern int nfs_wait_on_request(struct nfs_page *); +extern void nfs_unlock_request(struct nfs_page *req); + +/* + * Lock the page of an asynchronous request without getting a new reference + */ +static inline int +nfs_lock_request_dontget(struct nfs_page *req) +{ + return !test_and_set_bit(PG_BUSY, &req->wb_flags); +} + +static inline int +nfs_lock_request(struct nfs_page *req) +{ + if (test_and_set_bit(PG_BUSY, &req->wb_flags)) + return 0; + kref_get(&req->wb_kref); + return 1; +} + + +/** + * nfs_list_add_request - Insert a request into a list + * @req: request + * @head: head of list into which to insert the request. + */ +static inline void +nfs_list_add_request(struct nfs_page *req, struct list_head *head) +{ + list_add_tail(&req->wb_list, head); +} + + +/** + * nfs_list_remove_request - Remove a request from its wb_list + * @req: request + */ +static inline void +nfs_list_remove_request(struct nfs_page *req) +{ + if (list_empty(&req->wb_list)) + return; + list_del_init(&req->wb_list); +} + +static inline struct nfs_page * +nfs_list_entry(struct list_head *head) +{ + return list_entry(head, struct nfs_page, wb_list); +} + +static inline +loff_t req_offset(struct nfs_page *req) +{ + return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; +} + +#endif /* _LINUX_NFS_PAGE_H */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h new file mode 100644 index 00000000..7ba3551a --- /dev/null +++ b/include/linux/nfs_xdr.h @@ -0,0 +1,1315 @@ +#ifndef _LINUX_NFS_XDR_H +#define _LINUX_NFS_XDR_H + +#include <linux/nfsacl.h> +#include <linux/sunrpc/gss_api.h> + +/* + * To change the maximum rsize and wsize supported by the NFS client, adjust + * NFS_MAX_FILE_IO_SIZE. 64KB is a typical maximum, but some servers can + * support a megabyte or more. The default is left at 4096 bytes, which is + * reasonable for NFS over UDP. + */ +#define NFS_MAX_FILE_IO_SIZE (1048576U) +#define NFS_DEF_FILE_IO_SIZE (4096U) +#define NFS_MIN_FILE_IO_SIZE (1024U) + +/* Forward declaration for NFS v3 */ +struct nfs4_secinfo_flavors; + +struct nfs4_string { + unsigned int len; + char *data; +}; + +struct nfs_fsid { + uint64_t major; + uint64_t minor; +}; + +/* + * Helper for checking equality between 2 fsids. + */ +static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid *b) +{ + return a->major == b->major && a->minor == b->minor; +} + +struct nfs_fattr { + unsigned int valid; /* which fields are valid */ + umode_t mode; + __u32 nlink; + __u32 uid; + __u32 gid; + dev_t rdev; + __u64 size; + union { + struct { + __u32 blocksize; + __u32 blocks; + } nfs2; + struct { + __u64 used; + } nfs3; + } du; + struct nfs_fsid fsid; + __u64 fileid; + __u64 mounted_on_fileid; + struct timespec atime; + struct timespec mtime; + struct timespec ctime; + __u64 change_attr; /* NFSv4 change attribute */ + __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ + __u64 pre_size; /* pre_op_attr.size */ + struct timespec pre_mtime; /* pre_op_attr.mtime */ + struct timespec pre_ctime; /* pre_op_attr.ctime */ + unsigned long time_start; + unsigned long gencount; + struct nfs4_string *owner_name; + struct nfs4_string *group_name; +}; + +#define NFS_ATTR_FATTR_TYPE (1U << 0) +#define NFS_ATTR_FATTR_MODE (1U << 1) +#define NFS_ATTR_FATTR_NLINK (1U << 2) +#define NFS_ATTR_FATTR_OWNER (1U << 3) +#define NFS_ATTR_FATTR_GROUP (1U << 4) +#define NFS_ATTR_FATTR_RDEV (1U << 5) +#define NFS_ATTR_FATTR_SIZE (1U << 6) +#define NFS_ATTR_FATTR_PRESIZE (1U << 7) +#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) +#define NFS_ATTR_FATTR_SPACE_USED (1U << 9) +#define NFS_ATTR_FATTR_FSID (1U << 10) +#define NFS_ATTR_FATTR_FILEID (1U << 11) +#define NFS_ATTR_FATTR_ATIME (1U << 12) +#define NFS_ATTR_FATTR_MTIME (1U << 13) +#define NFS_ATTR_FATTR_CTIME (1U << 14) +#define NFS_ATTR_FATTR_PREMTIME (1U << 15) +#define NFS_ATTR_FATTR_PRECTIME (1U << 16) +#define NFS_ATTR_FATTR_CHANGE (1U << 17) +#define NFS_ATTR_FATTR_PRECHANGE (1U << 18) +#define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19) +#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20) +#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21) +#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22) +#define NFS_ATTR_FATTR_OWNER_NAME (1U << 23) +#define NFS_ATTR_FATTR_GROUP_NAME (1U << 24) + +#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ + | NFS_ATTR_FATTR_MODE \ + | NFS_ATTR_FATTR_NLINK \ + | NFS_ATTR_FATTR_OWNER \ + | NFS_ATTR_FATTR_GROUP \ + | NFS_ATTR_FATTR_RDEV \ + | NFS_ATTR_FATTR_SIZE \ + | NFS_ATTR_FATTR_FSID \ + | NFS_ATTR_FATTR_FILEID \ + | NFS_ATTR_FATTR_ATIME \ + | NFS_ATTR_FATTR_MTIME \ + | NFS_ATTR_FATTR_CTIME) +#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_BLOCKS_USED) +#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_SPACE_USED) +#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_SPACE_USED \ + | NFS_ATTR_FATTR_CHANGE) + +/* + * Info on the file system + */ +struct nfs_fsinfo { + struct nfs_fattr *fattr; /* Post-op attributes */ + __u32 rtmax; /* max. read transfer size */ + __u32 rtpref; /* pref. read transfer size */ + __u32 rtmult; /* reads should be multiple of this */ + __u32 wtmax; /* max. write transfer size */ + __u32 wtpref; /* pref. write transfer size */ + __u32 wtmult; /* writes should be multiple of this */ + __u32 dtpref; /* pref. readdir transfer size */ + __u64 maxfilesize; + struct timespec time_delta; /* server time granularity */ + __u32 lease_time; /* in seconds */ + __u32 layouttype; /* supported pnfs layout driver */ + __u32 blksize; /* preferred pnfs io block size */ +}; + +struct nfs_fsstat { + struct nfs_fattr *fattr; /* Post-op attributes */ + __u64 tbytes; /* total size in bytes */ + __u64 fbytes; /* # of free bytes */ + __u64 abytes; /* # of bytes available to user */ + __u64 tfiles; /* # of files */ + __u64 ffiles; /* # of free files */ + __u64 afiles; /* # of files available to user */ +}; + +struct nfs2_fsstat { + __u32 tsize; /* Server transfer size */ + __u32 bsize; /* Filesystem block size */ + __u32 blocks; /* No. of "bsize" blocks on filesystem */ + __u32 bfree; /* No. of free "bsize" blocks */ + __u32 bavail; /* No. of available "bsize" blocks */ +}; + +struct nfs_pathconf { + struct nfs_fattr *fattr; /* Post-op attributes */ + __u32 max_link; /* max # of hard links */ + __u32 max_namelen; /* max name length */ +}; + +struct nfs4_change_info { + u32 atomic; + u64 before; + u64 after; +}; + +struct nfs_seqid; + +/* nfs41 sessions channel attributes */ +struct nfs4_channel_attrs { + u32 max_rqst_sz; + u32 max_resp_sz; + u32 max_resp_sz_cached; + u32 max_ops; + u32 max_reqs; +}; + +/* nfs41 sessions slot seqid */ +struct nfs4_slot { + u32 seq_nr; +}; + +struct nfs4_sequence_args { + struct nfs4_session *sa_session; + u32 sa_slotid; + u8 sa_cache_this; +}; + +struct nfs4_sequence_res { + struct nfs4_session *sr_session; + struct nfs4_slot *sr_slot; /* slot used to send request */ + int sr_status; /* sequence operation status */ + unsigned long sr_renewal_time; + u32 sr_status_flags; +}; + +struct nfs4_get_lease_time_args { + struct nfs4_sequence_args la_seq_args; +}; + +struct nfs4_get_lease_time_res { + struct nfs_fsinfo *lr_fsinfo; + struct nfs4_sequence_res lr_seq_res; +}; + +#define PNFS_LAYOUT_MAXSIZE 4096 + +struct nfs4_layoutdriver_data { + struct page **pages; + __u32 pglen; + __u32 len; +}; + +struct pnfs_layout_range { + u32 iomode; + u64 offset; + u64 length; +}; + +struct nfs4_layoutget_args { + __u32 type; + struct pnfs_layout_range range; + __u64 minlength; + __u32 maxcount; + struct inode *inode; + struct nfs_open_context *ctx; + struct nfs4_sequence_args seq_args; + nfs4_stateid stateid; + struct nfs4_layoutdriver_data layout; +}; + +struct nfs4_layoutget_res { + __u32 return_on_close; + struct pnfs_layout_range range; + __u32 type; + nfs4_stateid stateid; + struct nfs4_sequence_res seq_res; + struct nfs4_layoutdriver_data *layoutp; +}; + +struct nfs4_layoutget { + struct nfs4_layoutget_args args; + struct nfs4_layoutget_res res; + struct pnfs_layout_segment **lsegpp; + gfp_t gfp_flags; +}; + +struct nfs4_getdevicelist_args { + const struct nfs_fh *fh; + u32 layoutclass; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_getdevicelist_res { + struct pnfs_devicelist *devlist; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_getdeviceinfo_args { + struct pnfs_device *pdev; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_getdeviceinfo_res { + struct pnfs_device *pdev; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_layoutcommit_args { + nfs4_stateid stateid; + __u64 lastbytewritten; + struct inode *inode; + const u32 *bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_layoutcommit_res { + struct nfs_fattr *fattr; + const struct nfs_server *server; + struct nfs4_sequence_res seq_res; + int status; +}; + +struct nfs4_layoutcommit_data { + struct rpc_task task; + struct nfs_fattr fattr; + struct list_head lseg_list; + struct rpc_cred *cred; + struct nfs4_layoutcommit_args args; + struct nfs4_layoutcommit_res res; +}; + +struct nfs4_layoutreturn_args { + struct pnfs_layout_hdr *layout; + struct inode *inode; + nfs4_stateid stateid; + __u32 layout_type; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_layoutreturn_res { + struct nfs4_sequence_res seq_res; + u32 lrs_present; + nfs4_stateid stateid; +}; + +struct nfs4_layoutreturn { + struct nfs4_layoutreturn_args args; + struct nfs4_layoutreturn_res res; + struct rpc_cred *cred; + struct nfs_client *clp; + int rpc_status; +}; + +struct stateowner_id { + __u64 create_time; + __u32 uniquifier; +}; + +/* + * Arguments to the open call. + */ +struct nfs_openargs { + const struct nfs_fh * fh; + struct nfs_seqid * seqid; + int open_flags; + fmode_t fmode; + __u64 clientid; + struct stateowner_id id; + union { + struct { + struct iattr * attrs; /* UNCHECKED, GUARDED */ + nfs4_verifier verifier; /* EXCLUSIVE */ + }; + nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ + fmode_t delegation_type; /* CLAIM_PREVIOUS */ + } u; + const struct qstr * name; + const struct nfs_server *server; /* Needed for ID mapping */ + const u32 * bitmask; + const u32 * dir_bitmask; + __u32 claim; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_openres { + nfs4_stateid stateid; + struct nfs_fh fh; + struct nfs4_change_info cinfo; + __u32 rflags; + struct nfs_fattr * f_attr; + struct nfs_fattr * dir_attr; + struct nfs_seqid * seqid; + const struct nfs_server *server; + fmode_t delegation_type; + nfs4_stateid delegation; + __u32 do_recall; + __u64 maxsize; + __u32 attrset[NFS4_BITMAP_SIZE]; + struct nfs4_string *owner; + struct nfs4_string *group_owner; + struct nfs4_sequence_res seq_res; +}; + +/* + * Arguments to the open_confirm call. + */ +struct nfs_open_confirmargs { + const struct nfs_fh * fh; + nfs4_stateid * stateid; + struct nfs_seqid * seqid; +}; + +struct nfs_open_confirmres { + nfs4_stateid stateid; + struct nfs_seqid * seqid; +}; + +/* + * Arguments to the close call. + */ +struct nfs_closeargs { + struct nfs_fh * fh; + nfs4_stateid * stateid; + struct nfs_seqid * seqid; + fmode_t fmode; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_closeres { + nfs4_stateid stateid; + struct nfs_fattr * fattr; + struct nfs_seqid * seqid; + const struct nfs_server *server; + struct nfs4_sequence_res seq_res; +}; +/* + * * Arguments to the lock,lockt, and locku call. + * */ +struct nfs_lowner { + __u64 clientid; + __u64 id; + dev_t s_dev; +}; + +struct nfs_lock_args { + struct nfs_fh * fh; + struct file_lock * fl; + struct nfs_seqid * lock_seqid; + nfs4_stateid * lock_stateid; + struct nfs_seqid * open_seqid; + nfs4_stateid * open_stateid; + struct nfs_lowner lock_owner; + unsigned char block : 1; + unsigned char reclaim : 1; + unsigned char new_lock_owner : 1; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_lock_res { + nfs4_stateid stateid; + struct nfs_seqid * lock_seqid; + struct nfs_seqid * open_seqid; + struct nfs4_sequence_res seq_res; +}; + +struct nfs_locku_args { + struct nfs_fh * fh; + struct file_lock * fl; + struct nfs_seqid * seqid; + nfs4_stateid * stateid; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_locku_res { + nfs4_stateid stateid; + struct nfs_seqid * seqid; + struct nfs4_sequence_res seq_res; +}; + +struct nfs_lockt_args { + struct nfs_fh * fh; + struct file_lock * fl; + struct nfs_lowner lock_owner; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_lockt_res { + struct file_lock * denied; /* LOCK, LOCKT failed */ + struct nfs4_sequence_res seq_res; +}; + +struct nfs_release_lockowner_args { + struct nfs_lowner lock_owner; +}; + +struct nfs4_delegreturnargs { + const struct nfs_fh *fhandle; + const nfs4_stateid *stateid; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_delegreturnres { + struct nfs_fattr * fattr; + const struct nfs_server *server; + struct nfs4_sequence_res seq_res; +}; + +/* + * Arguments to the read call. + */ +struct nfs_readargs { + struct nfs_fh * fh; + struct nfs_open_context *context; + struct nfs_lock_context *lock_context; + __u64 offset; + __u32 count; + unsigned int pgbase; + struct page ** pages; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_readres { + struct nfs_fattr * fattr; + __u32 count; + int eof; + struct nfs4_sequence_res seq_res; +}; + +/* + * Arguments to the write call. + */ +struct nfs_writeargs { + struct nfs_fh * fh; + struct nfs_open_context *context; + struct nfs_lock_context *lock_context; + __u64 offset; + __u32 count; + enum nfs3_stable_how stable; + unsigned int pgbase; + struct page ** pages; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_writeverf { + enum nfs3_stable_how committed; + __be32 verifier[2]; +}; + +struct nfs_writeres { + struct nfs_fattr * fattr; + struct nfs_writeverf * verf; + __u32 count; + const struct nfs_server *server; + struct nfs4_sequence_res seq_res; +}; + +/* + * Common arguments to the unlink call + */ +struct nfs_removeargs { + const struct nfs_fh *fh; + struct qstr name; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_removeres { + const struct nfs_server *server; + struct nfs_fattr *dir_attr; + struct nfs4_change_info cinfo; + struct nfs4_sequence_res seq_res; +}; + +/* + * Common arguments to the rename call + */ +struct nfs_renameargs { + const struct nfs_fh *old_dir; + const struct nfs_fh *new_dir; + const struct qstr *old_name; + const struct qstr *new_name; + const u32 *bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_renameres { + const struct nfs_server *server; + struct nfs4_change_info old_cinfo; + struct nfs_fattr *old_fattr; + struct nfs4_change_info new_cinfo; + struct nfs_fattr *new_fattr; + struct nfs4_sequence_res seq_res; +}; + +/* + * Argument struct for decode_entry function + */ +struct nfs_entry { + __u64 ino; + __u64 cookie, + prev_cookie; + const char * name; + unsigned int len; + int eof; + struct nfs_fh * fh; + struct nfs_fattr * fattr; + unsigned char d_type; + struct nfs_server * server; +}; + +/* + * The following types are for NFSv2 only. + */ +struct nfs_sattrargs { + struct nfs_fh * fh; + struct iattr * sattr; +}; + +struct nfs_diropargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; +}; + +struct nfs_createargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + struct iattr * sattr; +}; + +struct nfs_setattrargs { + struct nfs_fh * fh; + nfs4_stateid stateid; + struct iattr * iap; + const struct nfs_server * server; /* Needed for name mapping */ + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_setaclargs { + struct nfs_fh * fh; + size_t acl_len; + unsigned int acl_pgbase; + struct page ** acl_pages; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_setaclres { + struct nfs4_sequence_res seq_res; +}; + +struct nfs_getaclargs { + struct nfs_fh * fh; + size_t acl_len; + unsigned int acl_pgbase; + struct page ** acl_pages; + struct nfs4_sequence_args seq_args; +}; + +/* getxattr ACL interface flags */ +#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */ +struct nfs_getaclres { + size_t acl_len; + size_t acl_data_offset; + int acl_flags; + struct page * acl_scratch; + struct nfs4_sequence_res seq_res; +}; + +struct nfs_setattrres { + struct nfs_fattr * fattr; + const struct nfs_server * server; + struct nfs4_sequence_res seq_res; +}; + +struct nfs_linkargs { + struct nfs_fh * fromfh; + struct nfs_fh * tofh; + const char * toname; + unsigned int tolen; +}; + +struct nfs_symlinkargs { + struct nfs_fh * fromfh; + const char * fromname; + unsigned int fromlen; + struct page ** pages; + unsigned int pathlen; + struct iattr * sattr; +}; + +struct nfs_readdirargs { + struct nfs_fh * fh; + __u32 cookie; + unsigned int count; + struct page ** pages; +}; + +struct nfs3_getaclargs { + struct nfs_fh * fh; + int mask; + struct page ** pages; +}; + +struct nfs3_setaclargs { + struct inode * inode; + int mask; + struct posix_acl * acl_access; + struct posix_acl * acl_default; + size_t len; + unsigned int npages; + struct page ** pages; +}; + +struct nfs_diropok { + struct nfs_fh * fh; + struct nfs_fattr * fattr; +}; + +struct nfs_readlinkargs { + struct nfs_fh * fh; + unsigned int pgbase; + unsigned int pglen; + struct page ** pages; +}; + +struct nfs3_sattrargs { + struct nfs_fh * fh; + struct iattr * sattr; + unsigned int guard; + struct timespec guardtime; +}; + +struct nfs3_diropargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; +}; + +struct nfs3_accessargs { + struct nfs_fh * fh; + __u32 access; +}; + +struct nfs3_createargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + struct iattr * sattr; + enum nfs3_createmode createmode; + __be32 verifier[2]; +}; + +struct nfs3_mkdirargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + struct iattr * sattr; +}; + +struct nfs3_symlinkargs { + struct nfs_fh * fromfh; + const char * fromname; + unsigned int fromlen; + struct page ** pages; + unsigned int pathlen; + struct iattr * sattr; +}; + +struct nfs3_mknodargs { + struct nfs_fh * fh; + const char * name; + unsigned int len; + enum nfs3_ftype type; + struct iattr * sattr; + dev_t rdev; +}; + +struct nfs3_linkargs { + struct nfs_fh * fromfh; + struct nfs_fh * tofh; + const char * toname; + unsigned int tolen; +}; + +struct nfs3_readdirargs { + struct nfs_fh * fh; + __u64 cookie; + __be32 verf[2]; + int plus; + unsigned int count; + struct page ** pages; +}; + +struct nfs3_diropres { + struct nfs_fattr * dir_attr; + struct nfs_fh * fh; + struct nfs_fattr * fattr; +}; + +struct nfs3_accessres { + struct nfs_fattr * fattr; + __u32 access; +}; + +struct nfs3_readlinkargs { + struct nfs_fh * fh; + unsigned int pgbase; + unsigned int pglen; + struct page ** pages; +}; + +struct nfs3_linkres { + struct nfs_fattr * dir_attr; + struct nfs_fattr * fattr; +}; + +struct nfs3_readdirres { + struct nfs_fattr * dir_attr; + __be32 * verf; + int plus; +}; + +struct nfs3_getaclres { + struct nfs_fattr * fattr; + int mask; + unsigned int acl_access_count; + unsigned int acl_default_count; + struct posix_acl * acl_access; + struct posix_acl * acl_default; +}; + +#ifdef CONFIG_NFS_V4 + +typedef u64 clientid4; + +struct nfs4_accessargs { + const struct nfs_fh * fh; + const u32 * bitmask; + u32 access; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_accessres { + const struct nfs_server * server; + struct nfs_fattr * fattr; + u32 supported; + u32 access; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_create_arg { + u32 ftype; + union { + struct { + struct page ** pages; + unsigned int len; + } symlink; /* NF4LNK */ + struct { + u32 specdata1; + u32 specdata2; + } device; /* NF4BLK, NF4CHR */ + } u; + const struct qstr * name; + const struct nfs_server * server; + const struct iattr * attrs; + const struct nfs_fh * dir_fh; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_create_res { + const struct nfs_server * server; + struct nfs_fh * fh; + struct nfs_fattr * fattr; + struct nfs4_change_info dir_cinfo; + struct nfs_fattr * dir_fattr; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_fsinfo_arg { + const struct nfs_fh * fh; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_fsinfo_res { + struct nfs_fsinfo *fsinfo; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_getattr_arg { + const struct nfs_fh * fh; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_getattr_res { + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_link_arg { + const struct nfs_fh * fh; + const struct nfs_fh * dir_fh; + const struct qstr * name; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_link_res { + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs4_change_info cinfo; + struct nfs_fattr * dir_attr; + struct nfs4_sequence_res seq_res; +}; + + +struct nfs4_lookup_arg { + const struct nfs_fh * dir_fh; + const struct qstr * name; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_lookup_res { + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs_fh * fh; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_lookup_root_arg { + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_pathconf_arg { + const struct nfs_fh * fh; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_pathconf_res { + struct nfs_pathconf *pathconf; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_readdir_arg { + const struct nfs_fh * fh; + u64 cookie; + nfs4_verifier verifier; + u32 count; + struct page ** pages; /* zero-copy data */ + unsigned int pgbase; /* zero-copy data */ + const u32 * bitmask; + int plus; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_readdir_res { + nfs4_verifier verifier; + unsigned int pgbase; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_readlink { + const struct nfs_fh * fh; + unsigned int pgbase; + unsigned int pglen; /* zero-copy data */ + struct page ** pages; /* zero-copy data */ + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_readlink_res { + struct nfs4_sequence_res seq_res; +}; + +#define NFS4_SETCLIENTID_NAMELEN (127) +struct nfs4_setclientid { + const nfs4_verifier * sc_verifier; + unsigned int sc_name_len; + char sc_name[NFS4_SETCLIENTID_NAMELEN + 1]; + u32 sc_prog; + unsigned int sc_netid_len; + char sc_netid[RPCBIND_MAXNETIDLEN + 1]; + unsigned int sc_uaddr_len; + char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; + u32 sc_cb_ident; +}; + +struct nfs4_setclientid_res { + u64 clientid; + nfs4_verifier confirm; +}; + +struct nfs4_statfs_arg { + const struct nfs_fh * fh; + const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_statfs_res { + struct nfs_fsstat *fsstat; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_server_caps_arg { + struct nfs_fh *fhandle; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_server_caps_res { + u32 attr_bitmask[3]; + u32 acl_bitmask; + u32 has_links; + u32 has_symlinks; + u32 fh_expire_type; + struct nfs4_sequence_res seq_res; +}; + +#define NFS4_PATHNAME_MAXCOMPONENTS 512 +struct nfs4_pathname { + unsigned int ncomponents; + struct nfs4_string components[NFS4_PATHNAME_MAXCOMPONENTS]; +}; + +#define NFS4_FS_LOCATION_MAXSERVERS 10 +struct nfs4_fs_location { + unsigned int nservers; + struct nfs4_string servers[NFS4_FS_LOCATION_MAXSERVERS]; + struct nfs4_pathname rootpath; +}; + +#define NFS4_FS_LOCATIONS_MAXENTRIES 10 +struct nfs4_fs_locations { + struct nfs_fattr fattr; + const struct nfs_server *server; + struct nfs4_pathname fs_path; + int nlocations; + struct nfs4_fs_location locations[NFS4_FS_LOCATIONS_MAXENTRIES]; +}; + +struct nfs4_fs_locations_arg { + const struct nfs_fh *dir_fh; + const struct qstr *name; + struct page *page; + const u32 *bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_fs_locations_res { + struct nfs4_fs_locations *fs_locations; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_secinfo_oid { + unsigned int len; + char data[GSS_OID_MAX_LEN]; +}; + +struct nfs4_secinfo_gss { + struct nfs4_secinfo_oid sec_oid4; + unsigned int qop4; + unsigned int service; +}; + +struct nfs4_secinfo_flavor { + unsigned int flavor; + struct nfs4_secinfo_gss gss; +}; + +struct nfs4_secinfo_flavors { + unsigned int num_flavors; + struct nfs4_secinfo_flavor flavors[0]; +}; + +struct nfs4_secinfo_arg { + const struct nfs_fh *dir_fh; + const struct qstr *name; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_secinfo_res { + struct nfs4_secinfo_flavors *flavors; + struct nfs4_sequence_res seq_res; +}; + +#endif /* CONFIG_NFS_V4 */ + +struct nfstime4 { + u64 seconds; + u32 nseconds; +}; + +#ifdef CONFIG_NFS_V4_1 +#define NFS4_EXCHANGE_ID_LEN (48) +struct nfs41_exchange_id_args { + struct nfs_client *client; + nfs4_verifier *verifier; + unsigned int id_len; + char id[NFS4_EXCHANGE_ID_LEN]; + u32 flags; +}; + +struct server_owner { + uint64_t minor_id; + uint32_t major_id_sz; + char major_id[NFS4_OPAQUE_LIMIT]; +}; + +struct server_scope { + uint32_t server_scope_sz; + char server_scope[NFS4_OPAQUE_LIMIT]; +}; + +struct nfs41_impl_id { + char domain[NFS4_OPAQUE_LIMIT + 1]; + char name[NFS4_OPAQUE_LIMIT + 1]; + struct nfstime4 date; +}; + +struct nfs41_exchange_id_res { + struct nfs_client *client; + u32 flags; + struct server_scope *server_scope; + struct nfs41_impl_id *impl_id; +}; + +struct nfs41_create_session_args { + struct nfs_client *client; + uint32_t flags; + uint32_t cb_program; + struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ + struct nfs4_channel_attrs bc_attrs; /* Back Channel */ +}; + +struct nfs41_create_session_res { + struct nfs_client *client; +}; + +struct nfs41_reclaim_complete_args { + /* In the future extend to include curr_fh for use with migration */ + unsigned char one_fs:1; + struct nfs4_sequence_args seq_args; +}; + +struct nfs41_reclaim_complete_res { + struct nfs4_sequence_res seq_res; +}; + +#define SECINFO_STYLE_CURRENT_FH 0 +#define SECINFO_STYLE_PARENT 1 +struct nfs41_secinfo_no_name_args { + int style; + struct nfs4_sequence_args seq_args; +}; + +struct nfs41_test_stateid_args { + nfs4_stateid *stateid; + struct nfs4_sequence_args seq_args; +}; + +struct nfs41_test_stateid_res { + unsigned int status; + struct nfs4_sequence_res seq_res; +}; + +struct nfs41_free_stateid_args { + nfs4_stateid *stateid; + struct nfs4_sequence_args seq_args; +}; + +struct nfs41_free_stateid_res { + unsigned int status; + struct nfs4_sequence_res seq_res; +}; + +#endif /* CONFIG_NFS_V4_1 */ + +struct nfs_page; + +#define NFS_PAGEVEC_SIZE (8U) + +struct nfs_read_data { + struct rpc_task task; + struct inode *inode; + struct rpc_cred *cred; + struct nfs_fattr fattr; /* fattr storage */ + struct list_head pages; /* Coalesced read requests */ + struct list_head list; /* lists of struct nfs_read_data */ + struct nfs_page *req; /* multi ops per nfs_page */ + struct page **pagevec; + unsigned int npages; /* Max length of pagevec */ + struct nfs_readargs args; + struct nfs_readres res; + unsigned long timestamp; /* For lease renewal */ + struct pnfs_layout_segment *lseg; + struct nfs_client *ds_clp; /* pNFS data server */ + const struct rpc_call_ops *mds_ops; + int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data); + __u64 mds_offset; + int pnfs_error; + struct page *page_array[NFS_PAGEVEC_SIZE]; +}; + +struct nfs_write_data { + struct rpc_task task; + struct inode *inode; + struct rpc_cred *cred; + struct nfs_fattr fattr; + struct nfs_writeverf verf; + struct list_head pages; /* Coalesced requests we wish to flush */ + struct list_head list; /* lists of struct nfs_write_data */ + struct nfs_page *req; /* multi ops per nfs_page */ + struct page **pagevec; + unsigned int npages; /* Max length of pagevec */ + struct nfs_writeargs args; /* argument struct */ + struct nfs_writeres res; /* result struct */ + struct pnfs_layout_segment *lseg; + struct nfs_client *ds_clp; /* pNFS data server */ + int ds_commit_index; + const struct rpc_call_ops *mds_ops; + int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data); +#ifdef CONFIG_NFS_V4 + unsigned long timestamp; /* For lease renewal */ +#endif + __u64 mds_offset; /* Filelayout dense stripe */ + int pnfs_error; + struct page *page_array[NFS_PAGEVEC_SIZE]; +}; + +struct nfs_unlinkdata { + struct hlist_node list; + struct nfs_removeargs args; + struct nfs_removeres res; + struct inode *dir; + struct rpc_cred *cred; + struct nfs_fattr dir_attr; +}; + +struct nfs_renamedata { + struct nfs_renameargs args; + struct nfs_renameres res; + struct rpc_cred *cred; + struct inode *old_dir; + struct dentry *old_dentry; + struct nfs_fattr old_fattr; + struct inode *new_dir; + struct dentry *new_dentry; + struct nfs_fattr new_fattr; +}; + +struct nfs_access_entry; +struct nfs_client; +struct rpc_timeout; + +/* + * RPC procedure vector for NFSv2/NFSv3 demuxing + */ +struct nfs_rpc_ops { + u32 version; /* Protocol version */ + const struct dentry_operations *dentry_ops; + const struct inode_operations *dir_inode_ops; + const struct inode_operations *file_inode_ops; + const struct file_operations *file_ops; + + int (*getroot) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsinfo *); + int (*getattr) (struct nfs_server *, struct nfs_fh *, + struct nfs_fattr *); + int (*setattr) (struct dentry *, struct nfs_fattr *, + struct iattr *); + int (*lookup) (struct rpc_clnt *clnt, struct inode *, struct qstr *, + struct nfs_fh *, struct nfs_fattr *); + int (*access) (struct inode *, struct nfs_access_entry *); + int (*readlink)(struct inode *, struct page *, unsigned int, + unsigned int); + int (*create) (struct inode *, struct dentry *, + struct iattr *, int, struct nfs_open_context *); + int (*remove) (struct inode *, struct qstr *); + void (*unlink_setup) (struct rpc_message *, struct inode *dir); + void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); + int (*unlink_done) (struct rpc_task *, struct inode *); + int (*rename) (struct inode *, struct qstr *, + struct inode *, struct qstr *); + void (*rename_setup) (struct rpc_message *msg, struct inode *dir); + void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); + int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); + int (*link) (struct inode *, struct inode *, struct qstr *); + int (*symlink) (struct inode *, struct dentry *, struct page *, + unsigned int, struct iattr *); + int (*mkdir) (struct inode *, struct dentry *, struct iattr *); + int (*rmdir) (struct inode *, struct qstr *); + int (*readdir) (struct dentry *, struct rpc_cred *, + u64, struct page **, unsigned int, int); + int (*mknod) (struct inode *, struct dentry *, struct iattr *, + dev_t); + int (*statfs) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsstat *); + int (*fsinfo) (struct nfs_server *, struct nfs_fh *, + struct nfs_fsinfo *); + int (*pathconf) (struct nfs_server *, struct nfs_fh *, + struct nfs_pathconf *); + int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); + int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int); + void (*read_setup) (struct nfs_read_data *, struct rpc_message *); + void (*read_rpc_prepare)(struct rpc_task *, struct nfs_read_data *); + int (*read_done) (struct rpc_task *, struct nfs_read_data *); + void (*write_setup) (struct nfs_write_data *, struct rpc_message *); + void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *); + int (*write_done) (struct rpc_task *, struct nfs_write_data *); + void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); + int (*commit_done) (struct rpc_task *, struct nfs_write_data *); + int (*lock)(struct file *, int, struct file_lock *); + int (*lock_check_bounds)(const struct file_lock *); + void (*clear_acl_cache)(struct inode *); + void (*close_context)(struct nfs_open_context *ctx, int); + struct inode * (*open_context) (struct inode *dir, + struct nfs_open_context *ctx, + int open_flags, + struct iattr *iattr); + int (*init_client) (struct nfs_client *, const struct rpc_timeout *, + const char *, rpc_authflavor_t, int); + int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); +}; + +/* + * NFS_CALL(getattr, inode, (fattr)); + * into + * NFS_PROTO(inode)->getattr(fattr); + */ +#define NFS_CALL(op, inode, args) NFS_PROTO(inode)->op args + +/* + * Function vectors etc. for the NFS client + */ +extern const struct nfs_rpc_ops nfs_v2_clientops; +extern const struct nfs_rpc_ops nfs_v3_clientops; +extern const struct nfs_rpc_ops nfs_v4_clientops; +extern const struct rpc_version nfs_version2; +extern const struct rpc_version nfs_version3; +extern const struct rpc_version nfs_version4; + +extern const struct rpc_version nfsacl_version3; +extern const struct rpc_program nfsacl_program; + +#endif diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h new file mode 100644 index 00000000..fabcb1e5 --- /dev/null +++ b/include/linux/nfsacl.h @@ -0,0 +1,62 @@ +/* + * File: linux/nfsacl.h + * + * (C) 2003 Andreas Gruenbacher <agruen@suse.de> + */ +#ifndef __LINUX_NFSACL_H +#define __LINUX_NFSACL_H + +#define NFS_ACL_PROGRAM 100227 + +#define ACLPROC2_GETACL 1 +#define ACLPROC2_SETACL 2 +#define ACLPROC2_GETATTR 3 +#define ACLPROC2_ACCESS 4 + +#define ACLPROC3_GETACL 1 +#define ACLPROC3_SETACL 2 + + +/* Flags for the getacl/setacl mode */ +#define NFS_ACL 0x0001 +#define NFS_ACLCNT 0x0002 +#define NFS_DFACL 0x0004 +#define NFS_DFACLCNT 0x0008 + +/* Flag for Default ACL entries */ +#define NFS_ACL_DEFAULT 0x1000 + +#ifdef __KERNEL__ + +#include <linux/posix_acl.h> +#include <linux/sunrpc/xdr.h> + +/* Maximum number of ACL entries over NFS */ +#define NFS_ACL_MAX_ENTRIES 1024 + +#define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES)) +#define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ + >> PAGE_SHIFT) + +#define NFS_ACL_MAX_ENTRIES_INLINE (5) +#define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2) + +static inline unsigned int +nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) +{ + unsigned int w = 16; + w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12; + if (acl_default) + w += max((int)acl_default->a_count, 4) * 12; + return w; +} + +extern int +nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, + struct posix_acl *acl, int encode_entries, int typeflag); +extern int +nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, + struct posix_acl **pacl); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_NFSACL_H */ diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild new file mode 100644 index 00000000..5b7d84ac --- /dev/null +++ b/include/linux/nfsd/Kbuild @@ -0,0 +1,5 @@ +header-y += cld.h +header-y += debug.h +header-y += export.h +header-y += nfsfh.h +header-y += stats.h diff --git a/include/linux/nfsd/cld.h b/include/linux/nfsd/cld.h new file mode 100644 index 00000000..f14a9ab0 --- /dev/null +++ b/include/linux/nfsd/cld.h @@ -0,0 +1,56 @@ +/* + * Upcall description for nfsdcld communication + * + * Copyright (c) 2012 Red Hat, Inc. + * Author(s): Jeff Layton <jlayton@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _NFSD_CLD_H +#define _NFSD_CLD_H + +/* latest upcall version available */ +#define CLD_UPCALL_VERSION 1 + +/* defined by RFC3530 */ +#define NFS4_OPAQUE_LIMIT 1024 + +enum cld_command { + Cld_Create, /* create a record for this cm_id */ + Cld_Remove, /* remove record of this cm_id */ + Cld_Check, /* is this cm_id allowed? */ + Cld_GraceDone, /* grace period is complete */ +}; + +/* representation of long-form NFSv4 client ID */ +struct cld_name { + uint16_t cn_len; /* length of cm_id */ + unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */ +} __attribute__((packed)); + +/* message struct for communication with userspace */ +struct cld_msg { + uint8_t cm_vers; /* upcall version */ + uint8_t cm_cmd; /* upcall command */ + int16_t cm_status; /* return code */ + uint32_t cm_xid; /* transaction id */ + union { + int64_t cm_gracetime; /* grace period start time */ + struct cld_name cm_name; + } __attribute__((packed)) cm_u; +} __attribute__((packed)); + +#endif /* !_NFSD_CLD_H */ diff --git a/include/linux/nfsd/debug.h b/include/linux/nfsd/debug.h new file mode 100644 index 00000000..ee4aa917 --- /dev/null +++ b/include/linux/nfsd/debug.h @@ -0,0 +1,48 @@ +/* + * linux/include/linux/nfsd/debug.h + * + * Debugging-related stuff for nfsd + * + * Copyright (C) 1995 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_NFSD_DEBUG_H +#define LINUX_NFSD_DEBUG_H + +#include <linux/sunrpc/debug.h> + +/* + * Enable debugging for nfsd. + * Requires RPC_DEBUG. + */ +#ifdef RPC_DEBUG +# define NFSD_DEBUG 1 +#endif + +/* + * knfsd debug flags + */ +#define NFSDDBG_SOCK 0x0001 +#define NFSDDBG_FH 0x0002 +#define NFSDDBG_EXPORT 0x0004 +#define NFSDDBG_SVC 0x0008 +#define NFSDDBG_PROC 0x0010 +#define NFSDDBG_FILEOP 0x0020 +#define NFSDDBG_AUTH 0x0040 +#define NFSDDBG_REPCACHE 0x0080 +#define NFSDDBG_XDR 0x0100 +#define NFSDDBG_LOCKD 0x0200 +#define NFSDDBG_ALL 0x7FFF +#define NFSDDBG_NOCHANGE 0xFFFF + + +#ifdef __KERNEL__ +# undef ifdebug +# ifdef NFSD_DEBUG +# define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag) +# else +# define ifdebug(flag) if (0) +# endif +#endif /* __KERNEL__ */ + +#endif /* LINUX_NFSD_DEBUG_H */ diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h new file mode 100644 index 00000000..f85308e6 --- /dev/null +++ b/include/linux/nfsd/export.h @@ -0,0 +1,161 @@ +/* + * include/linux/nfsd/export.h + * + * Public declarations for NFS exports. The definitions for the + * syscall interface are in nfsctl.h + * + * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef NFSD_EXPORT_H +#define NFSD_EXPORT_H + +# include <linux/types.h> +#ifdef __KERNEL__ +# include <linux/nfsd/nfsfh.h> +#endif + +/* + * Important limits for the exports stuff. + */ +#define NFSCLNT_IDMAX 1024 +#define NFSCLNT_ADDRMAX 16 +#define NFSCLNT_KEYMAX 32 + +/* + * Export flags. + */ +#define NFSEXP_READONLY 0x0001 +#define NFSEXP_INSECURE_PORT 0x0002 +#define NFSEXP_ROOTSQUASH 0x0004 +#define NFSEXP_ALLSQUASH 0x0008 +#define NFSEXP_ASYNC 0x0010 +#define NFSEXP_GATHERED_WRITES 0x0020 +/* 40 80 100 currently unused */ +#define NFSEXP_NOHIDE 0x0200 +#define NFSEXP_NOSUBTREECHECK 0x0400 +#define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ +#define NFSEXP_MSNFS 0x1000 /* do silly things that MS clients expect; no longer supported */ +#define NFSEXP_FSID 0x2000 +#define NFSEXP_CROSSMOUNT 0x4000 +#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ +/* + * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4 + * clients, and only to the single directory that is the root of the + * export; further lookup and readdir operations are treated as if every + * subdirectory was a mountpoint, and ignored if they are not themselves + * exported. This is used by nfsd and mountd to construct the NFSv4 + * pseudofilesystem, which provides access only to paths leading to each + * exported filesystem. + */ +#define NFSEXP_V4ROOT 0x10000 +/* All flags that we claim to support. (Note we don't support NOACL.) */ +#define NFSEXP_ALLFLAGS 0x17E3F + +/* The flags that may vary depending on security flavor: */ +#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ + | NFSEXP_ALLSQUASH \ + | NFSEXP_INSECURE_PORT) + +#ifdef __KERNEL__ + +/* + * FS Locations + */ + +#define MAX_FS_LOCATIONS 128 + +struct nfsd4_fs_location { + char *hosts; /* colon separated list of hosts */ + char *path; /* slash separated list of path components */ +}; + +struct nfsd4_fs_locations { + uint32_t locations_count; + struct nfsd4_fs_location *locations; +/* If we're not actually serving this data ourselves (only providing a + * list of replicas that do serve it) then we set "migrated": */ + int migrated; +}; + +/* + * We keep an array of pseudoflavors with the export, in order from most + * to least preferred. For the foreseeable future, we don't expect more + * than the eight pseudoflavors null, unix, krb5, krb5i, krb5p, skpm3, + * spkm3i, and spkm3p (and using all 8 at once should be rare). + */ +#define MAX_SECINFO_LIST 8 + +struct exp_flavor_info { + u32 pseudoflavor; + u32 flags; +}; + +struct svc_export { + struct cache_head h; + struct auth_domain * ex_client; + int ex_flags; + struct path ex_path; + uid_t ex_anon_uid; + gid_t ex_anon_gid; + int ex_fsid; + unsigned char * ex_uuid; /* 16 byte fsid */ + struct nfsd4_fs_locations ex_fslocs; + int ex_nflavors; + struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST]; +}; + +/* an "export key" (expkey) maps a filehandlefragement to an + * svc_export for a given client. There can be several per export, + * for the different fsid types. + */ +struct svc_expkey { + struct cache_head h; + + struct auth_domain * ek_client; + int ek_fsidtype; + u32 ek_fsid[6]; + + struct path ek_path; +}; + +#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) +#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) +#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) + +int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp); +__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); + +/* + * Function declarations + */ +int nfsd_export_init(void); +void nfsd_export_shutdown(void); +void nfsd_export_flush(void); +struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, + struct path *); +struct svc_export * rqst_exp_parent(struct svc_rqst *, + struct path *); +struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *); +int exp_rootfh(struct auth_domain *, + char *path, struct knfsd_fh *, int maxsize); +__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); +__be32 nfserrno(int errno); + +extern struct cache_detail svc_export_cache; + +static inline void exp_put(struct svc_export *exp) +{ + cache_put(&exp->h, &svc_export_cache); +} + +static inline void exp_get(struct svc_export *exp) +{ + cache_get(&exp->h); +} +struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *); + +#endif /* __KERNEL__ */ + +#endif /* NFSD_EXPORT_H */ + diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h new file mode 100644 index 00000000..ce4743a2 --- /dev/null +++ b/include/linux/nfsd/nfsfh.h @@ -0,0 +1,171 @@ +/* + * include/linux/nfsd/nfsfh.h + * + * This file describes the layout of the file handles as passed + * over the wire. + * + * Earlier versions of knfsd used to sign file handles using keyed MD5 + * or SHA. I've removed this code, because it doesn't give you more + * security than blocking external access to port 2049 on your firewall. + * + * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_NFSD_FH_H +#define _LINUX_NFSD_FH_H + +#include <linux/types.h> +#include <linux/nfs.h> +#include <linux/nfs2.h> +#include <linux/nfs3.h> +#include <linux/nfs4.h> +#ifdef __KERNEL__ +# include <linux/sunrpc/svc.h> +#endif + +/* + * This is the old "dentry style" Linux NFSv2 file handle. + * + * The xino and xdev fields are currently used to transport the + * ino/dev of the exported inode. + */ +struct nfs_fhbase_old { + __u32 fb_dcookie; /* dentry cookie - always 0xfeebbaca */ + __u32 fb_ino; /* our inode number */ + __u32 fb_dirino; /* dir inode number, 0 for directories */ + __u32 fb_dev; /* our device */ + __u32 fb_xdev; + __u32 fb_xino; + __u32 fb_generation; +}; + +/* + * This is the new flexible, extensible style NFSv2/v3 file handle. + * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000 + * + * The file handle starts with a sequence of four-byte words. + * The first word contains a version number (1) and three descriptor bytes + * that tell how the remaining 3 variable length fields should be handled. + * These three bytes are auth_type, fsid_type and fileid_type. + * + * All four-byte values are in host-byte-order. + * + * The auth_type field specifies how the filehandle can be authenticated + * This might allow a file to be confirmed to be in a writable part of a + * filetree without checking the path from it up to the root. + * Current values: + * 0 - No authentication. fb_auth is 0 bytes long + * Possible future values: + * 1 - 4 bytes taken from MD5 hash of the remainer of the file handle + * prefixed by a secret and with the important export flags. + * + * The fsid_type identifies how the filesystem (or export point) is + * encoded. + * Current values: + * 0 - 4 byte device id (ms-2-bytes major, ls-2-bytes minor), 4byte inode number + * NOTE: we cannot use the kdev_t device id value, because kdev_t.h + * says we mustn't. We must break it up and reassemble. + * 1 - 4 byte user specified identifier + * 2 - 4 byte major, 4 byte minor, 4 byte inode number - DEPRECATED + * 3 - 4 byte device id, encoded for user-space, 4 byte inode number + * 4 - 4 byte inode number and 4 byte uuid + * 5 - 8 byte uuid + * 6 - 16 byte uuid + * 7 - 8 byte inode number and 16 byte uuid + * + * The fileid_type identified how the file within the filesystem is encoded. + * This is (will be) passed to, and set by, the underlying filesystem if it supports + * filehandle operations. The filesystem must not use the value '0' or '0xff' and may + * only use the values 1 and 2 as defined below: + * Current values: + * 0 - The root, or export point, of the filesystem. fb_fileid is 0 bytes. + * 1 - 32bit inode number, 32 bit generation number. + * 2 - 32bit inode number, 32 bit generation number, 32 bit parent directory inode number. + * + */ +struct nfs_fhbase_new { + __u8 fb_version; /* == 1, even => nfs_fhbase_old */ + __u8 fb_auth_type; + __u8 fb_fsid_type; + __u8 fb_fileid_type; + __u32 fb_auth[1]; +/* __u32 fb_fsid[0]; floating */ +/* __u32 fb_fileid[0]; floating */ +}; + +struct knfsd_fh { + unsigned int fh_size; /* significant for NFSv3. + * Points to the current size while building + * a new file handle + */ + union { + struct nfs_fhbase_old fh_old; + __u32 fh_pad[NFS4_FHSIZE/4]; + struct nfs_fhbase_new fh_new; + } fh_base; +}; + +#define ofh_dcookie fh_base.fh_old.fb_dcookie +#define ofh_ino fh_base.fh_old.fb_ino +#define ofh_dirino fh_base.fh_old.fb_dirino +#define ofh_dev fh_base.fh_old.fb_dev +#define ofh_xdev fh_base.fh_old.fb_xdev +#define ofh_xino fh_base.fh_old.fb_xino +#define ofh_generation fh_base.fh_old.fb_generation + +#define fh_version fh_base.fh_new.fb_version +#define fh_fsid_type fh_base.fh_new.fb_fsid_type +#define fh_auth_type fh_base.fh_new.fb_auth_type +#define fh_fileid_type fh_base.fh_new.fb_fileid_type +#define fh_auth fh_base.fh_new.fb_auth +#define fh_fsid fh_base.fh_new.fb_auth + +#ifdef __KERNEL__ + +static inline __u32 ino_t_to_u32(ino_t ino) +{ + return (__u32) ino; +} + +static inline ino_t u32_to_ino_t(__u32 uino) +{ + return (ino_t) uino; +} + +/* + * This is the internal representation of an NFS handle used in knfsd. + * pre_mtime/post_version will be used to support wcc_attr's in NFSv3. + */ +typedef struct svc_fh { + struct knfsd_fh fh_handle; /* FH data */ + struct dentry * fh_dentry; /* validated dentry */ + struct svc_export * fh_export; /* export pointer */ + int fh_maxsize; /* max size for fh_handle */ + + unsigned char fh_locked; /* inode locked by us */ + +#ifdef CONFIG_NFSD_V3 + unsigned char fh_post_saved; /* post-op attrs saved */ + unsigned char fh_pre_saved; /* pre-op attrs saved */ + + /* Pre-op attributes saved during fh_lock */ + __u64 fh_pre_size; /* size before operation */ + struct timespec fh_pre_mtime; /* mtime before oper */ + struct timespec fh_pre_ctime; /* ctime before oper */ + /* + * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode) + * to find out if it is valid. + */ + u64 fh_pre_change; + + /* Post-op attributes saved in fh_unlock */ + struct kstat fh_post_attr; /* full attrs after operation */ + u64 fh_post_change; /* nfsv4 change; see above */ +#endif /* CONFIG_NFSD_V3 */ + +} svc_fh; + +#endif /* __KERNEL__ */ + + +#endif /* _LINUX_NFSD_FH_H */ diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h new file mode 100644 index 00000000..2693ef64 --- /dev/null +++ b/include/linux/nfsd/stats.h @@ -0,0 +1,51 @@ +/* + * linux/include/linux/nfsd/stats.h + * + * Statistics for NFS server. + * + * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef LINUX_NFSD_STATS_H +#define LINUX_NFSD_STATS_H + +#include <linux/nfs4.h> + +/* thread usage wraps very million seconds (approx one fortnight) */ +#define NFSD_USAGE_WRAP (HZ*1000000) + +#ifdef __KERNEL__ + +struct nfsd_stats { + unsigned int rchits; /* repcache hits */ + unsigned int rcmisses; /* repcache hits */ + unsigned int rcnocache; /* uncached reqs */ + unsigned int fh_stale; /* FH stale error */ + unsigned int fh_lookup; /* dentry cached */ + unsigned int fh_anon; /* anon file dentry returned */ + unsigned int fh_nocache_dir; /* filehandle not found in dcache */ + unsigned int fh_nocache_nondir; /* filehandle not found in dcache */ + unsigned int io_read; /* bytes returned to read requests */ + unsigned int io_write; /* bytes passed in write requests */ + unsigned int th_cnt; /* number of available threads */ + unsigned int th_usage[10]; /* number of ticks during which n perdeciles + * of available threads were in use */ + unsigned int th_fullcnt; /* number of times last free thread was used */ + unsigned int ra_size; /* size of ra cache */ + unsigned int ra_depth[11]; /* number of times ra entry was found that deep + * in the cache (10percentiles). [10] = not found */ +#ifdef CONFIG_NFSD_V4 + unsigned int nfs4_opcount[LAST_NFS4_OP + 1]; /* count of individual nfsv4 operations */ +#endif + +}; + + +extern struct nfsd_stats nfsdstats; +extern struct svc_stat nfsd_svcstats; + +void nfsd_stat_init(void); +void nfsd_stat_shutdown(void); + +#endif /* __KERNEL__ */ +#endif /* LINUX_NFSD_STATS_H */ diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h new file mode 100644 index 00000000..89bd4a4d --- /dev/null +++ b/include/linux/nilfs2_fs.h @@ -0,0 +1,852 @@ +/* + * nilfs2_fs.h - NILFS2 on-disk structures and common declarations. + * + * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * Written by Koji Sato <koji@osrg.net> + * Ryusuke Konishi <ryusuke@osrg.net> + */ +/* + * linux/include/linux/ext2_fs.h + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/include/linux/minix_fs.h + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#ifndef _LINUX_NILFS_FS_H +#define _LINUX_NILFS_FS_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <linux/magic.h> +#include <linux/bug.h> + + +#define NILFS_INODE_BMAP_SIZE 7 +/** + * struct nilfs_inode - structure of an inode on disk + * @i_blocks: blocks count + * @i_size: size in bytes + * @i_ctime: creation time (seconds) + * @i_mtime: modification time (seconds) + * @i_ctime_nsec: creation time (nano seconds) + * @i_mtime_nsec: modification time (nano seconds) + * @i_uid: user id + * @i_gid: group id + * @i_mode: file mode + * @i_links_count: links count + * @i_flags: file flags + * @i_bmap: block mapping + * @i_xattr: extended attributes + * @i_generation: file generation (for NFS) + * @i_pad: padding + */ +struct nilfs_inode { + __le64 i_blocks; + __le64 i_size; + __le64 i_ctime; + __le64 i_mtime; + __le32 i_ctime_nsec; + __le32 i_mtime_nsec; + __le32 i_uid; + __le32 i_gid; + __le16 i_mode; + __le16 i_links_count; + __le32 i_flags; + __le64 i_bmap[NILFS_INODE_BMAP_SIZE]; +#define i_device_code i_bmap[0] + __le64 i_xattr; + __le32 i_generation; + __le32 i_pad; +}; + +/** + * struct nilfs_super_root - structure of super root + * @sr_sum: check sum + * @sr_bytes: byte count of the structure + * @sr_flags: flags (reserved) + * @sr_nongc_ctime: write time of the last segment not for cleaner operation + * @sr_dat: DAT file inode + * @sr_cpfile: checkpoint file inode + * @sr_sufile: segment usage file inode + */ +struct nilfs_super_root { + __le32 sr_sum; + __le16 sr_bytes; + __le16 sr_flags; + __le64 sr_nongc_ctime; + struct nilfs_inode sr_dat; + struct nilfs_inode sr_cpfile; + struct nilfs_inode sr_sufile; +}; + +#define NILFS_SR_MDT_OFFSET(inode_size, i) \ + ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \ + (inode_size) * (i)) +#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0) +#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1) +#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2) +#define NILFS_SR_BYTES(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 3) + +/* + * Maximal mount counts + */ +#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */ + +/* + * File system states (sbp->s_state, nilfs->ns_mount_state) + */ +#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */ +#define NILFS_ERROR_FS 0x0002 /* Errors detected */ +#define NILFS_RESIZE_FS 0x0004 /* Resize required */ + +/* + * Mount flags (sbi->s_mount_opt) + */ +#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */ +#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ +#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ +#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ +#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ +#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order + semantics also for data */ +#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during + mount-time recovery */ +#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */ + + +/** + * struct nilfs_super_block - structure of super block on disk + */ +struct nilfs_super_block { +/*00*/ __le32 s_rev_level; /* Revision level */ + __le16 s_minor_rev_level; /* minor revision level */ + __le16 s_magic; /* Magic signature */ + + __le16 s_bytes; /* Bytes count of CRC calculation + for this structure. s_reserved + is excluded. */ + __le16 s_flags; /* flags */ + __le32 s_crc_seed; /* Seed value of CRC calculation */ +/*10*/ __le32 s_sum; /* Check sum of super block */ + + __le32 s_log_block_size; /* Block size represented as follows + blocksize = + 1 << (s_log_block_size + 10) */ + __le64 s_nsegments; /* Number of segments in filesystem */ +/*20*/ __le64 s_dev_size; /* block device size in bytes */ + __le64 s_first_data_block; /* 1st seg disk block number */ +/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */ + __le32 s_r_segments_percentage; /* Reserved segments percentage */ + + __le64 s_last_cno; /* Last checkpoint number */ +/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */ + __le64 s_last_seq; /* seq. number of seg written last */ +/*50*/ __le64 s_free_blocks_count; /* Free blocks count */ + + __le64 s_ctime; /* Creation time (execution time of + newfs) */ +/*60*/ __le64 s_mtime; /* Mount time */ + __le64 s_wtime; /* Write time */ +/*70*/ __le16 s_mnt_count; /* Mount count */ + __le16 s_max_mnt_count; /* Maximal mount count */ + __le16 s_state; /* File system state */ + __le16 s_errors; /* Behaviour when detecting errors */ + __le64 s_lastcheck; /* time of last check */ + +/*80*/ __le32 s_checkinterval; /* max. time between checks */ + __le32 s_creator_os; /* OS */ + __le16 s_def_resuid; /* Default uid for reserved blocks */ + __le16 s_def_resgid; /* Default gid for reserved blocks */ + __le32 s_first_ino; /* First non-reserved inode */ + +/*90*/ __le16 s_inode_size; /* Size of an inode */ + __le16 s_dat_entry_size; /* Size of a dat entry */ + __le16 s_checkpoint_size; /* Size of a checkpoint */ + __le16 s_segment_usage_size; /* Size of a segment usage */ + +/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ +/*A8*/ char s_volume_name[80]; /* volume name */ + +/*F8*/ __le32 s_c_interval; /* Commit interval of segment */ + __le32 s_c_block_max; /* Threshold of data amount for + the segment construction */ +/*100*/ __le64 s_feature_compat; /* Compatible feature set */ + __le64 s_feature_compat_ro; /* Read-only compatible feature set */ + __le64 s_feature_incompat; /* Incompatible feature set */ + __u32 s_reserved[186]; /* padding to the end of the block */ +}; + +/* + * Codes for operating systems + */ +#define NILFS_OS_LINUX 0 +/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */ + +/* + * Revision levels + */ +#define NILFS_CURRENT_REV 2 /* current major revision */ +#define NILFS_MINOR_REV 0 /* minor revision */ +#define NILFS_MIN_SUPP_REV 2 /* minimum supported revision */ + +/* + * Feature set definitions + * + * If there is a bit set in the incompatible feature set that the kernel + * doesn't know about, it should refuse to mount the filesystem. + */ +#define NILFS_FEATURE_COMPAT_RO_BLOCK_COUNT 0x00000001ULL + +#define NILFS_FEATURE_COMPAT_SUPP 0ULL +#define NILFS_FEATURE_COMPAT_RO_SUPP NILFS_FEATURE_COMPAT_RO_BLOCK_COUNT +#define NILFS_FEATURE_INCOMPAT_SUPP 0ULL + +/* + * Bytes count of super_block for CRC-calculation + */ +#define NILFS_SB_BYTES \ + ((long)&((struct nilfs_super_block *)0)->s_reserved) + +/* + * Special inode number + */ +#define NILFS_ROOT_INO 2 /* Root file inode */ +#define NILFS_DAT_INO 3 /* DAT file */ +#define NILFS_CPFILE_INO 4 /* checkpoint file */ +#define NILFS_SUFILE_INO 5 /* segment usage file */ +#define NILFS_IFILE_INO 6 /* ifile */ +#define NILFS_ATIME_INO 7 /* Atime file (reserved) */ +#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */ +#define NILFS_SKETCH_INO 10 /* Sketch file */ +#define NILFS_USER_INO 11 /* Fisrt user's file inode number */ + +#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ + +#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in + a full segment */ +#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in + a partial segment */ +#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved + segments */ + +/* + * We call DAT, cpfile, and sufile root metadata files. Inodes of + * these files are written in super root block instead of ifile, and + * garbage collector doesn't keep any past versions of these files. + */ +#define NILFS_ROOT_METADATA_FILE(ino) \ + ((ino) >= NILFS_DAT_INO && (ino) <= NILFS_SUFILE_INO) + +/* + * bytes offset of secondary super block + */ +#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) + +/* + * Maximal count of links to a file + */ +#define NILFS_LINK_MAX 32000 + +/* + * Structure of a directory entry + * (Same as ext2) + */ + +#define NILFS_NAME_LEN 255 + +/* + * Block size limitations + */ +#define NILFS_MIN_BLOCK_SIZE 1024 +#define NILFS_MAX_BLOCK_SIZE 65536 + +/* + * The new version of the directory entry. Since V0 structures are + * stored in intel byte order, and the name_len field could never be + * bigger than 255 chars, it's safe to reclaim the extra byte for the + * file_type field. + */ +struct nilfs_dir_entry { + __le64 inode; /* Inode number */ + __le16 rec_len; /* Directory entry length */ + __u8 name_len; /* Name length */ + __u8 file_type; + char name[NILFS_NAME_LEN]; /* File name */ + char pad; +}; + +/* + * NILFS directory file types. Only the low 3 bits are used. The + * other bits are reserved for now. + */ +enum { + NILFS_FT_UNKNOWN, + NILFS_FT_REG_FILE, + NILFS_FT_DIR, + NILFS_FT_CHRDEV, + NILFS_FT_BLKDEV, + NILFS_FT_FIFO, + NILFS_FT_SOCK, + NILFS_FT_SYMLINK, + NILFS_FT_MAX +}; + +/* + * NILFS_DIR_PAD defines the directory entries boundaries + * + * NOTE: It must be a multiple of 8 + */ +#define NILFS_DIR_PAD 8 +#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) +#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ + ~NILFS_DIR_ROUND) +#define NILFS_MAX_REC_LEN ((1<<16)-1) + +static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) +{ + unsigned len = le16_to_cpu(dlen); + +#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) + if (len == NILFS_MAX_REC_LEN) + return 1 << 16; +#endif + return len; +} + +static inline __le16 nilfs_rec_len_to_disk(unsigned len) +{ +#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) + if (len == (1 << 16)) + return cpu_to_le16(NILFS_MAX_REC_LEN); + else if (len > (1 << 16)) + BUG(); +#endif + return cpu_to_le16(len); +} + +/** + * struct nilfs_finfo - file information + * @fi_ino: inode number + * @fi_cno: checkpoint number + * @fi_nblocks: number of blocks (including intermediate blocks) + * @fi_ndatablk: number of file data blocks + */ +struct nilfs_finfo { + __le64 fi_ino; + __le64 fi_cno; + __le32 fi_nblocks; + __le32 fi_ndatablk; + /* array of virtual block numbers */ +}; + +/** + * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned + * @bi_vblocknr: virtual block number + * @bi_blkoff: block offset + */ +struct nilfs_binfo_v { + __le64 bi_vblocknr; + __le64 bi_blkoff; +}; + +/** + * struct nilfs_binfo_dat - information for the block which belongs to the DAT file + * @bi_blkoff: block offset + * @bi_level: level + * @bi_pad: padding + */ +struct nilfs_binfo_dat { + __le64 bi_blkoff; + __u8 bi_level; + __u8 bi_pad[7]; +}; + +/** + * union nilfs_binfo: block information + * @bi_v: nilfs_binfo_v structure + * @bi_dat: nilfs_binfo_dat structure + */ +union nilfs_binfo { + struct nilfs_binfo_v bi_v; + struct nilfs_binfo_dat bi_dat; +}; + +/** + * struct nilfs_segment_summary - segment summary + * @ss_datasum: checksum of data + * @ss_sumsum: checksum of segment summary + * @ss_magic: magic number + * @ss_bytes: size of this structure in bytes + * @ss_flags: flags + * @ss_seq: sequence number + * @ss_create: creation timestamp + * @ss_next: next segment + * @ss_nblocks: number of blocks + * @ss_nfinfo: number of finfo structures + * @ss_sumbytes: total size of segment summary in bytes + * @ss_pad: padding + * @ss_cno: checkpoint number + */ +struct nilfs_segment_summary { + __le32 ss_datasum; + __le32 ss_sumsum; + __le32 ss_magic; + __le16 ss_bytes; + __le16 ss_flags; + __le64 ss_seq; + __le64 ss_create; + __le64 ss_next; + __le32 ss_nblocks; + __le32 ss_nfinfo; + __le32 ss_sumbytes; + __le32 ss_pad; + __le64 ss_cno; + /* array of finfo structures */ +}; + +#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */ + +/* + * Segment summary flags + */ +#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */ +#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */ +#define NILFS_SS_SR 0x0004 /* has super root */ +#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ +#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ + +/** + * struct nilfs_btree_node - B-tree node + * @bn_flags: flags + * @bn_level: level + * @bn_nchildren: number of children + * @bn_pad: padding + */ +struct nilfs_btree_node { + __u8 bn_flags; + __u8 bn_level; + __le16 bn_nchildren; + __le32 bn_pad; +}; + +/* flags */ +#define NILFS_BTREE_NODE_ROOT 0x01 + +/* level */ +#define NILFS_BTREE_LEVEL_DATA 0 +#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) +#define NILFS_BTREE_LEVEL_MAX 14 + +/** + * struct nilfs_palloc_group_desc - block group descriptor + * @pg_nfrees: number of free entries in block group + */ +struct nilfs_palloc_group_desc { + __le32 pg_nfrees; +}; + +/** + * struct nilfs_dat_entry - disk address translation entry + * @de_blocknr: block number + * @de_start: start checkpoint number + * @de_end: end checkpoint number + * @de_rsv: reserved for future use + */ +struct nilfs_dat_entry { + __le64 de_blocknr; + __le64 de_start; + __le64 de_end; + __le64 de_rsv; +}; + +/** + * struct nilfs_snapshot_list - snapshot list + * @ssl_next: next checkpoint number on snapshot list + * @ssl_prev: previous checkpoint number on snapshot list + */ +struct nilfs_snapshot_list { + __le64 ssl_next; + __le64 ssl_prev; +}; + +/** + * struct nilfs_checkpoint - checkpoint structure + * @cp_flags: flags + * @cp_checkpoints_count: checkpoints count in a block + * @cp_snapshot_list: snapshot list + * @cp_cno: checkpoint number + * @cp_create: creation timestamp + * @cp_nblk_inc: number of blocks incremented by this checkpoint + * @cp_inodes_count: inodes count + * @cp_blocks_count: blocks count + * @cp_ifile_inode: inode of ifile + */ +struct nilfs_checkpoint { + __le32 cp_flags; + __le32 cp_checkpoints_count; + struct nilfs_snapshot_list cp_snapshot_list; + __le64 cp_cno; + __le64 cp_create; + __le64 cp_nblk_inc; + __le64 cp_inodes_count; + __le64 cp_blocks_count; + + /* Do not change the byte offset of ifile inode. + To keep the compatibility of the disk format, + additional fields should be added behind cp_ifile_inode. */ + struct nilfs_inode cp_ifile_inode; +}; + +/* checkpoint flags */ +enum { + NILFS_CHECKPOINT_SNAPSHOT, + NILFS_CHECKPOINT_INVALID, + NILFS_CHECKPOINT_SKETCH, + NILFS_CHECKPOINT_MINOR, +}; + +#define NILFS_CHECKPOINT_FNS(flag, name) \ +static inline void \ +nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ +{ \ + cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ + (1UL << NILFS_CHECKPOINT_##flag)); \ +} \ +static inline void \ +nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ +{ \ + cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ + ~(1UL << NILFS_CHECKPOINT_##flag)); \ +} \ +static inline int \ +nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ +{ \ + return !!(le32_to_cpu(cp->cp_flags) & \ + (1UL << NILFS_CHECKPOINT_##flag)); \ +} + +NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot) +NILFS_CHECKPOINT_FNS(INVALID, invalid) +NILFS_CHECKPOINT_FNS(MINOR, minor) + +/** + * struct nilfs_cpinfo - checkpoint information + * @ci_flags: flags + * @ci_pad: padding + * @ci_cno: checkpoint number + * @ci_create: creation timestamp + * @ci_nblk_inc: number of blocks incremented by this checkpoint + * @ci_inodes_count: inodes count + * @ci_blocks_count: blocks count + * @ci_next: next checkpoint number in snapshot list + */ +struct nilfs_cpinfo { + __u32 ci_flags; + __u32 ci_pad; + __u64 ci_cno; + __u64 ci_create; + __u64 ci_nblk_inc; + __u64 ci_inodes_count; + __u64 ci_blocks_count; + __u64 ci_next; +}; + +#define NILFS_CPINFO_FNS(flag, name) \ +static inline int \ +nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ +{ \ + return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \ +} + +NILFS_CPINFO_FNS(SNAPSHOT, snapshot) +NILFS_CPINFO_FNS(INVALID, invalid) +NILFS_CPINFO_FNS(MINOR, minor) + + +/** + * struct nilfs_cpfile_header - checkpoint file header + * @ch_ncheckpoints: number of checkpoints + * @ch_nsnapshots: number of snapshots + * @ch_snapshot_list: snapshot list + */ +struct nilfs_cpfile_header { + __le64 ch_ncheckpoints; + __le64 ch_nsnapshots; + struct nilfs_snapshot_list ch_snapshot_list; +}; + +#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ + ((sizeof(struct nilfs_cpfile_header) + \ + sizeof(struct nilfs_checkpoint) - 1) / \ + sizeof(struct nilfs_checkpoint)) + +/** + * struct nilfs_segment_usage - segment usage + * @su_lastmod: last modified timestamp + * @su_nblocks: number of blocks in segment + * @su_flags: flags + */ +struct nilfs_segment_usage { + __le64 su_lastmod; + __le32 su_nblocks; + __le32 su_flags; +}; + +/* segment usage flag */ +enum { + NILFS_SEGMENT_USAGE_ACTIVE, + NILFS_SEGMENT_USAGE_DIRTY, + NILFS_SEGMENT_USAGE_ERROR, + + /* ... */ +}; + +#define NILFS_SEGMENT_USAGE_FNS(flag, name) \ +static inline void \ +nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ +{ \ + su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ + (1UL << NILFS_SEGMENT_USAGE_##flag));\ +} \ +static inline void \ +nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ +{ \ + su->su_flags = \ + cpu_to_le32(le32_to_cpu(su->su_flags) & \ + ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ +} \ +static inline int \ +nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ +{ \ + return !!(le32_to_cpu(su->su_flags) & \ + (1UL << NILFS_SEGMENT_USAGE_##flag)); \ +} + +NILFS_SEGMENT_USAGE_FNS(ACTIVE, active) +NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty) +NILFS_SEGMENT_USAGE_FNS(ERROR, error) + +static inline void +nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) +{ + su->su_lastmod = cpu_to_le64(0); + su->su_nblocks = cpu_to_le32(0); + su->su_flags = cpu_to_le32(0); +} + +static inline int +nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) +{ + return !le32_to_cpu(su->su_flags); +} + +/** + * struct nilfs_sufile_header - segment usage file header + * @sh_ncleansegs: number of clean segments + * @sh_ndirtysegs: number of dirty segments + * @sh_last_alloc: last allocated segment number + */ +struct nilfs_sufile_header { + __le64 sh_ncleansegs; + __le64 sh_ndirtysegs; + __le64 sh_last_alloc; + /* ... */ +}; + +#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ + ((sizeof(struct nilfs_sufile_header) + \ + sizeof(struct nilfs_segment_usage) - 1) / \ + sizeof(struct nilfs_segment_usage)) + +/** + * nilfs_suinfo - segment usage information + * @sui_lastmod: + * @sui_nblocks: + * @sui_flags: + */ +struct nilfs_suinfo { + __u64 sui_lastmod; + __u32 sui_nblocks; + __u32 sui_flags; +}; + +#define NILFS_SUINFO_FNS(flag, name) \ +static inline int \ +nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ +{ \ + return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \ +} + +NILFS_SUINFO_FNS(ACTIVE, active) +NILFS_SUINFO_FNS(DIRTY, dirty) +NILFS_SUINFO_FNS(ERROR, error) + +static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) +{ + return !si->sui_flags; +} + +/* ioctl */ +enum { + NILFS_CHECKPOINT, + NILFS_SNAPSHOT, +}; + +/** + * struct nilfs_cpmode - + * @cc_cno: + * @cc_mode: + */ +struct nilfs_cpmode { + __u64 cm_cno; + __u32 cm_mode; + __u32 cm_pad; +}; + +/** + * struct nilfs_argv - argument vector + * @v_base: + * @v_nmembs: + * @v_size: + * @v_flags: + * @v_index: + */ +struct nilfs_argv { + __u64 v_base; + __u32 v_nmembs; /* number of members */ + __u16 v_size; /* size of members */ + __u16 v_flags; + __u64 v_index; +}; + +/** + * struct nilfs_period - + * @p_start: + * @p_end: + */ +struct nilfs_period { + __u64 p_start; + __u64 p_end; +}; + +/** + * struct nilfs_cpstat - + * @cs_cno: checkpoint number + * @cs_ncps: number of checkpoints + * @cs_nsss: number of snapshots + */ +struct nilfs_cpstat { + __u64 cs_cno; + __u64 cs_ncps; + __u64 cs_nsss; +}; + +/** + * struct nilfs_sustat - + * @ss_nsegs: number of segments + * @ss_ncleansegs: number of clean segments + * @ss_ndirtysegs: number of dirty segments + * @ss_ctime: creation time of the last segment + * @ss_nongc_ctime: creation time of the last segment not for GC + * @ss_prot_seq: least sequence number of segments which must not be reclaimed + */ +struct nilfs_sustat { + __u64 ss_nsegs; + __u64 ss_ncleansegs; + __u64 ss_ndirtysegs; + __u64 ss_ctime; + __u64 ss_nongc_ctime; + __u64 ss_prot_seq; +}; + +/** + * struct nilfs_vinfo - virtual block number information + * @vi_vblocknr: + * @vi_start: + * @vi_end: + * @vi_blocknr: + */ +struct nilfs_vinfo { + __u64 vi_vblocknr; + __u64 vi_start; + __u64 vi_end; + __u64 vi_blocknr; +}; + +/** + * struct nilfs_vdesc - + */ +struct nilfs_vdesc { + __u64 vd_ino; + __u64 vd_cno; + __u64 vd_vblocknr; + struct nilfs_period vd_period; + __u64 vd_blocknr; + __u64 vd_offset; + __u32 vd_flags; + __u32 vd_pad; +}; + +/** + * struct nilfs_bdesc - + */ +struct nilfs_bdesc { + __u64 bd_ino; + __u64 bd_oblocknr; + __u64 bd_blocknr; + __u64 bd_offset; + __u32 bd_level; + __u32 bd_pad; +}; + +#define NILFS_IOCTL_IDENT 'n' + +#define NILFS_IOCTL_CHANGE_CPMODE \ + _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) +#define NILFS_IOCTL_DELETE_CHECKPOINT \ + _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) +#define NILFS_IOCTL_GET_CPINFO \ + _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) +#define NILFS_IOCTL_GET_CPSTAT \ + _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) +#define NILFS_IOCTL_GET_SUINFO \ + _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) +#define NILFS_IOCTL_GET_SUSTAT \ + _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) +#define NILFS_IOCTL_GET_VINFO \ + _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) +#define NILFS_IOCTL_GET_BDESCS \ + _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) +#define NILFS_IOCTL_CLEAN_SEGMENTS \ + _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) +#define NILFS_IOCTL_SYNC \ + _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) +#define NILFS_IOCTL_RESIZE \ + _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) +#define NILFS_IOCTL_SET_ALLOC_RANGE \ + _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2]) + +#endif /* _LINUX_NILFS_FS_H */ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h new file mode 100644 index 00000000..e474f6e7 --- /dev/null +++ b/include/linux/nl80211.h @@ -0,0 +1,2866 @@ +#ifndef __LINUX_NL80211_H +#define __LINUX_NL80211_H +/* + * 802.11 netlink interface public header + * + * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2008 Michael Wu <flamingice@sourmilk.net> + * Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com> + * Copyright 2008 Michael Buesch <m@bues.ch> + * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com> + * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> + * Copyright 2008 Colin McCabe <colin@cozybit.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include <linux/types.h> + +/** + * DOC: Station handling + * + * Stations are added per interface, but a special case exists with VLAN + * interfaces. When a station is bound to an AP interface, it may be moved + * into a VLAN identified by a VLAN interface index (%NL80211_ATTR_STA_VLAN). + * The station is still assumed to belong to the AP interface it was added + * to. + * + * TODO: need more info? + */ + +/** + * DOC: Frame transmission/registration support + * + * Frame transmission and registration support exists to allow userspace + * management entities such as wpa_supplicant react to management frames + * that are not being handled by the kernel. This includes, for example, + * certain classes of action frames that cannot be handled in the kernel + * for various reasons. + * + * Frame registration is done on a per-interface basis and registrations + * cannot be removed other than by closing the socket. It is possible to + * specify a registration filter to register, for example, only for a + * certain type of action frame. In particular with action frames, those + * that userspace registers for will not be returned as unhandled by the + * driver, so that the registered application has to take responsibility + * for doing that. + * + * The type of frame that can be registered for is also dependent on the + * driver and interface type. The frame types are advertised in wiphy + * attributes so applications know what to expect. + * + * NOTE: When an interface changes type while registrations are active, + * these registrations are ignored until the interface type is + * changed again. This means that changing the interface type can + * lead to a situation that couldn't otherwise be produced, but + * any such registrations will be dormant in the sense that they + * will not be serviced, i.e. they will not receive any frames. + * + * Frame transmission allows userspace to send for example the required + * responses to action frames. It is subject to some sanity checking, + * but many frames can be transmitted. When a frame was transmitted, its + * status is indicated to the sending socket. + * + * For more technical details, see the corresponding command descriptions + * below. + */ + +/** + * DOC: Virtual interface / concurrency capabilities + * + * Some devices are able to operate with virtual MACs, they can have + * more than one virtual interface. The capability handling for this + * is a bit complex though, as there may be a number of restrictions + * on the types of concurrency that are supported. + * + * To start with, each device supports the interface types listed in + * the %NL80211_ATTR_SUPPORTED_IFTYPES attribute, but by listing the + * types there no concurrency is implied. + * + * Once concurrency is desired, more attributes must be observed: + * To start with, since some interface types are purely managed in + * software, like the AP-VLAN type in mac80211 for example, there's + * an additional list of these, they can be added at any time and + * are only restricted by some semantic restrictions (e.g. AP-VLAN + * cannot be added without a corresponding AP interface). This list + * is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute. + * + * Further, the list of supported combinations is exported. This is + * in the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute. Basically, + * it exports a list of "groups", and at any point in time the + * interfaces that are currently active must fall into any one of + * the advertised groups. Within each group, there are restrictions + * on the number of interfaces of different types that are supported + * and also the number of different channels, along with potentially + * some other restrictions. See &enum nl80211_if_combination_attrs. + * + * All together, these attributes define the concurrency of virtual + * interfaces that a given device supports. + */ + +/** + * enum nl80211_commands - supported nl80211 commands + * + * @NL80211_CMD_UNSPEC: unspecified command to catch errors + * + * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request + * to get a list of all present wiphys. + * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or + * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, + * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, + * %NL80211_ATTR_WIPHY_CHANNEL_TYPE, %NL80211_ATTR_WIPHY_RETRY_SHORT, + * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD. + * However, for setting the channel, see %NL80211_CMD_SET_CHANNEL + * instead, the support here is for backward compatibility only. + * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request + * or rename notification. Has attributes %NL80211_ATTR_WIPHY and + * %NL80211_ATTR_WIPHY_NAME. + * @NL80211_CMD_DEL_WIPHY: Wiphy deleted. Has attributes + * %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME. + * + * @NL80211_CMD_GET_INTERFACE: Request an interface's configuration; + * either a dump request on a %NL80211_ATTR_WIPHY or a specific get + * on an %NL80211_ATTR_IFINDEX is supported. + * @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE. + * @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response + * to %NL80211_CMD_GET_INTERFACE. Has %NL80211_ATTR_IFINDEX, + * %NL80211_ATTR_WIPHY and %NL80211_ATTR_IFTYPE attributes. Can also + * be sent from userspace to request creation of a new virtual interface, + * then requires attributes %NL80211_ATTR_WIPHY, %NL80211_ATTR_IFTYPE and + * %NL80211_ATTR_IFNAME. + * @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from + * userspace to request deletion of a virtual interface, then requires + * attribute %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified + * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. + * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT, + * %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD. + * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA, + * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER, + * and %NL80211_ATTR_KEY_SEQ attributes. + * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX + * or %NL80211_ATTR_MAC. + * + * @NL80211_CMD_GET_BEACON: (not used) + * @NL80211_CMD_SET_BEACON: change the beacon on an access point interface + * using the %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL + * attributes. For drivers that generate the beacon and probe responses + * internally, the following attributes must be provided: %NL80211_ATTR_IE, + * %NL80211_ATTR_IE_PROBE_RESP and %NL80211_ATTR_IE_ASSOC_RESP. + * @NL80211_CMD_START_AP: Start AP operation on an AP interface, parameters + * are like for %NL80211_CMD_SET_BEACON, and additionally parameters that + * do not change are used, these include %NL80211_ATTR_BEACON_INTERVAL, + * %NL80211_ATTR_DTIM_PERIOD, %NL80211_ATTR_SSID, + * %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHERS_PAIRWISE, + * %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS, + * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY, + * %NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT. + * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP + * @NL80211_CMD_STOP_AP: Stop AP operation on the given interface + * @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP + * + * @NL80211_CMD_GET_STATION: Get station attributes for station identified by + * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_STATION: Set station attributes for station identified by + * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_NEW_STATION: Add a station with given attributes to the + * the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC + * or, if no MAC address given, all stations, on the interface identified + * by %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by + * %NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP. + * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by + * %NL80211_ATTR_MAC. + * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the + * the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC + * or, if no MAC address given, all mesh paths, on the interface identified + * by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set + * regulatory domain. + * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command + * after being queried by the kernel. CRDA replies by sending a regulatory + * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our + * current alpha2 if it found a match. It also provides + * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each + * regulatory rule is a nested set of attributes given by + * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and + * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by + * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and + * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. + * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain + * to the specified ISO/IEC 3166-1 alpha2 country code. The core will + * store this as a valid request and then query userspace for it. + * + * @NL80211_CMD_GET_MESH_CONFIG: Get mesh networking properties for the + * interface identified by %NL80211_ATTR_IFINDEX + * + * @NL80211_CMD_SET_MESH_CONFIG: Set mesh networking properties for the + * interface identified by %NL80211_ATTR_IFINDEX + * + * @NL80211_CMD_SET_MGMT_EXTRA_IE: Set extra IEs for management frames. The + * interface is identified with %NL80211_ATTR_IFINDEX and the management + * frame subtype with %NL80211_ATTR_MGMT_SUBTYPE. The extra IE data to be + * added to the end of the specified management frame is specified with + * %NL80211_ATTR_IE. If the command succeeds, the requested data will be + * added to all specified management frames generated by + * kernel/firmware/driver. + * Note: This command has been removed and it is only reserved at this + * point to avoid re-using existing command number. The functionality this + * command was planned for has been provided with cleaner design with the + * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN, + * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, + * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE. + * + * @NL80211_CMD_GET_SCAN: get scan results + * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters + * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the + * probe requests at CCK rate or not. + * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to + * NL80211_CMD_GET_SCAN and on the "scan" multicast group) + * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons, + * partial scan results may be available + * + * @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain + * intervals, as specified by %NL80211_ATTR_SCHED_SCAN_INTERVAL. + * Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS) + * are passed, they are used in the probe requests. For + * broadcast, a broadcast SSID must be passed (ie. an empty + * string). If no SSID is passed, no probe requests are sent and + * a passive scan is performed. %NL80211_ATTR_SCAN_FREQUENCIES, + * if passed, define which channels should be scanned; if not + * passed, all channels allowed for the current regulatory domain + * are used. Extra IEs can also be passed from the userspace by + * using the %NL80211_ATTR_IE attribute. + * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT + * if scheduled scan is not running. + * @NL80211_CMD_SCHED_SCAN_RESULTS: indicates that there are scheduled scan + * results available. + * @NL80211_CMD_SCHED_SCAN_STOPPED: indicates that the scheduled scan has + * stopped. The driver may issue this event at any time during a + * scheduled scan. One reason for stopping the scan is if the hardware + * does not support starting an association or a normal scan while running + * a scheduled scan. This event is also sent when the + * %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface + * is brought down while a scheduled scan was running. + * + * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation + * or noise level + * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to + * NL80211_CMD_GET_SURVEY and on the "scan" multicast group) + * + * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain + * has been changed and provides details of the request information + * that caused the change such as who initiated the regulatory request + * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx + * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if + * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or + * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain + * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is + * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on + * to (%NL80211_ATTR_REG_ALPHA2). + * @NL80211_CMD_REG_BEACON_HINT: indicates to userspace that an AP beacon + * has been found while world roaming thus enabling active scan or + * any mode of operation that initiates TX (beacons) on a channel + * where we would not have been able to do either before. As an example + * if you are world roaming (regulatory domain set to world or if your + * driver is using a custom world roaming regulatory domain) and while + * doing a passive scan on the 5 GHz band you find an AP there (if not + * on a DFS channel) you will now be able to actively scan for that AP + * or use AP mode on your card on that same channel. Note that this will + * never be used for channels 1-11 on the 2 GHz band as they are always + * enabled world wide. This beacon hint is only sent if your device had + * either disabled active scanning or beaconing on a channel. We send to + * userspace the wiphy on which we removed a restriction from + * (%NL80211_ATTR_WIPHY) and the channel on which this occurred + * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER) + * the beacon hint was processed. + * + * @NL80211_CMD_AUTHENTICATE: authentication request and notification. + * This command is used both as a command (request to authenticate) and + * as an event on the "mlme" multicast group indicating completion of the + * authentication process. + * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the + * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and + * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify + * the SSID (mainly for association, but is included in authentication + * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used + * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE + * is used to specify the authentication type. %NL80211_ATTR_IE is used to + * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs) + * to be added to the frame. + * When used as an event, this reports reception of an Authentication + * frame in station and IBSS modes when the local MLME processed the + * frame, i.e., it was for the local STA and was received in correct + * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the + * MLME SAP interface (kernel providing MLME, userspace SME). The + * included %NL80211_ATTR_FRAME attribute contains the management frame + * (including both the header and frame body, but not FCS). This event is + * also used to indicate if the authentication attempt timed out. In that + * case the %NL80211_ATTR_FRAME attribute is replaced with a + * %NL80211_ATTR_TIMED_OUT flag (and %NL80211_ATTR_MAC to indicate which + * pending authentication timed out). + * @NL80211_CMD_ASSOCIATE: association request and notification; like + * NL80211_CMD_AUTHENTICATE but for Association and Reassociation + * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request, + * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). + * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like + * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to + * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication + * primitives). + * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like + * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to + * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives). + * + * @NL80211_CMD_MICHAEL_MIC_FAILURE: notification of a locally detected Michael + * MIC (part of TKIP) failure; sent on the "mlme" multicast group; the + * event includes %NL80211_ATTR_MAC to describe the source MAC address of + * the frame with invalid MIC, %NL80211_ATTR_KEY_TYPE to show the key + * type, %NL80211_ATTR_KEY_IDX to indicate the key identifier, and + * %NL80211_ATTR_KEY_SEQ to indicate the TSC value of the frame; this + * event matches with MLME-MICHAELMICFAILURE.indication() primitive + * + * @NL80211_CMD_JOIN_IBSS: Join a new IBSS -- given at least an SSID and a + * FREQ attribute (for the initial frequency if no peer can be found) + * and optionally a MAC (as BSSID) and FREQ_FIXED attribute if those + * should be fixed rather than automatically determined. Can only be + * executed on a network interface that is UP, and fixed BSSID/FREQ + * may be rejected. Another optional parameter is the beacon interval, + * given in the %NL80211_ATTR_BEACON_INTERVAL attribute, which if not + * given defaults to 100 TU (102.4ms). + * @NL80211_CMD_LEAVE_IBSS: Leave the IBSS -- no special arguments, the IBSS is + * determined by the network interface. + * + * @NL80211_CMD_TESTMODE: testmode command, takes a wiphy (or ifindex) attribute + * to identify the device, and the TESTDATA blob attribute to pass through + * to the driver. + * + * @NL80211_CMD_CONNECT: connection request and notification; this command + * requests to connect to a specified network but without separating + * auth and assoc steps. For this, you need to specify the SSID in a + * %NL80211_ATTR_SSID attribute, and can optionally specify the association + * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, + * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT, + * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and + * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT. + * Background scan period can optionally be + * specified in %NL80211_ATTR_BG_SCAN_PERIOD, + * if not specified default background scan configuration + * in driver is used and if period value is 0, bg scan will be disabled. + * This attribute is ignored if driver does not support roam scan. + * It is also sent as an event, with the BSSID and response IEs when the + * connection is established or failed to be established. This can be + * determined by the STATUS_CODE attribute. + * @NL80211_CMD_ROAM: request that the card roam (currently not implemented), + * sent as an event when the card/driver roamed by itself. + * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify + * userspace that a connection was dropped by the AP or due to other + * reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and + * %NL80211_ATTR_REASON_CODE attributes are used. + * + * @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices + * associated with this wiphy must be down and will follow. + * + * @NL80211_CMD_REMAIN_ON_CHANNEL: Request to remain awake on the specified + * channel for the specified amount of time. This can be used to do + * off-channel operations like transmit a Public Action frame and wait for + * a response while being associated to an AP on another channel. + * %NL80211_ATTR_IFINDEX is used to specify which interface (and thus + * radio) is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the + * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be + * optionally used to specify additional channel parameters. + * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds + * to remain on the channel. This command is also used as an event to + * notify when the requested duration starts (it may take a while for the + * driver to schedule this time due to other concurrent needs for the + * radio). + * When called, this operation returns a cookie (%NL80211_ATTR_COOKIE) + * that will be included with any events pertaining to this request; + * the cookie is also used to cancel the request. + * @NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: This command can be used to cancel a + * pending remain-on-channel duration if the desired operation has been + * completed prior to expiration of the originally requested duration. + * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify the + * radio. The %NL80211_ATTR_COOKIE attribute must be given as well to + * uniquely identify the request. + * This command is also used as an event to notify when a requested + * remain-on-channel duration has expired. + * + * @NL80211_CMD_SET_TX_BITRATE_MASK: Set the mask of rates to be used in TX + * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface + * and @NL80211_ATTR_TX_RATES the set of allowed rates. + * + * @NL80211_CMD_REGISTER_FRAME: Register for receiving certain mgmt frames + * (via @NL80211_CMD_FRAME) for processing in userspace. This command + * requires an interface index, a frame type attribute (optional for + * backward compatibility reasons, if not given assumes action frames) + * and a match attribute containing the first few bytes of the frame + * that should match, e.g. a single byte for only a category match or + * four bytes for vendor frames including the OUI. The registration + * cannot be dropped, but is removed automatically when the netlink + * socket is closed. Multiple registrations can be made. + * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for + * backward compatibility + * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This + * command is used both as a request to transmit a management frame and + * as an event indicating reception of a frame that was not processed in + * kernel code, but is for us (i.e., which may need to be processed in a + * user space application). %NL80211_ATTR_FRAME is used to specify the + * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and + * optionally %NL80211_ATTR_WIPHY_CHANNEL_TYPE) is used to indicate on + * which channel the frame is to be transmitted or was received. If this + * channel is not the current channel (remain-on-channel or the + * operational channel) the device will switch to the given channel and + * transmit the frame, optionally waiting for a response for the time + * specified using %NL80211_ATTR_DURATION. When called, this operation + * returns a cookie (%NL80211_ATTR_COOKIE) that will be included with the + * TX status event pertaining to the TX request. + * %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the + * management frames at CCK rate or not in 2GHz band. + * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this + * command may be used with the corresponding cookie to cancel the wait + * time if it is known that it is no longer necessary. + * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. + * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame + * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies + * the TX command and %NL80211_ATTR_FRAME includes the contents of the + * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged + * the frame. + * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for + * backward compatibility. + * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command + * is used to configure connection quality monitoring notification trigger + * levels. + * @NL80211_CMD_NOTIFY_CQM: Connection quality monitor notification. This + * command is used as an event to indicate the that a trigger level was + * reached. + * @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ + * and %NL80211_ATTR_WIPHY_CHANNEL_TYPE) the given interface (identifed + * by %NL80211_ATTR_IFINDEX) shall operate on. + * In case multiple channels are supported by the device, the mechanism + * with which it switches channels is implementation-defined. + * When a monitor interface is given, it can only switch channel while + * no other interfaces are operating to avoid disturbing the operation + * of any other interfaces, and other interfaces will again take + * precedence when they are used. + * + * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface. + * + * @NL80211_CMD_JOIN_MESH: Join a mesh. The mesh ID must be given, and initial + * mesh config parameters may be given. + * @NL80211_CMD_LEAVE_MESH: Leave the mesh network -- no special arguments, the + * network is determined by the network interface. + * + * @NL80211_CMD_UNPROT_DEAUTHENTICATE: Unprotected deauthentication frame + * notification. This event is used to indicate that an unprotected + * deauthentication frame was dropped when MFP is in use. + * @NL80211_CMD_UNPROT_DISASSOCIATE: Unprotected disassociation frame + * notification. This event is used to indicate that an unprotected + * disassociation frame was dropped when MFP is in use. + * + * @NL80211_CMD_NEW_PEER_CANDIDATE: Notification on the reception of a + * beacon or probe response from a compatible mesh peer. This is only + * sent while no station information (sta_info) exists for the new peer + * candidate and when @NL80211_MESH_SETUP_USERSPACE_AUTH is set. On + * reception of this notification, userspace may decide to create a new + * station (@NL80211_CMD_NEW_STATION). To stop this notification from + * reoccurring, the userspace authentication daemon may want to create the + * new station with the AUTHENTICATED flag unset and maybe change it later + * depending on the authentication result. + * + * @NL80211_CMD_GET_WOWLAN: get Wake-on-Wireless-LAN (WoWLAN) settings. + * @NL80211_CMD_SET_WOWLAN: set Wake-on-Wireless-LAN (WoWLAN) settings. + * Since wireless is more complex than wired ethernet, it supports + * various triggers. These triggers can be configured through this + * command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For + * more background information, see + * http://wireless.kernel.org/en/users/Documentation/WoWLAN. + * + * @NL80211_CMD_SET_REKEY_OFFLOAD: This command is used give the driver + * the necessary information for supporting GTK rekey offload. This + * feature is typically used during WoWLAN. The configuration data + * is contained in %NL80211_ATTR_REKEY_DATA (which is nested and + * contains the data in sub-attributes). After rekeying happened, + * this command may also be sent by the driver as an MLME event to + * inform userspace of the new replay counter. + * + * @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace + * of PMKSA caching dandidates. + * + * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). + * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. + * + * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP + * (or GO) interface (i.e. hostapd) to ask for unexpected frames to + * implement sending deauth to stations that send unexpected class 3 + * frames. Also used as the event sent by the kernel when such a frame + * is received. + * For the event, the %NL80211_ATTR_MAC attribute carries the TA and + * other attributes like the interface index are present. + * If used as the command it must have an interface index and you can + * only unsubscribe from the event by closing the socket. Subscription + * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events. + * + * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the + * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame + * and wasn't already in a 4-addr VLAN. The event will be sent similarly + * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener. + * + * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface + * by sending a null data frame to it and reporting when the frame is + * acknowleged. This is used to allow timing out inactive clients. Uses + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a + * direct reply with an %NL80211_ATTR_COOKIE that is later used to match + * up the event with the request. The event includes the same data and + * has %NL80211_ATTR_ACK set if the frame was ACKed. + * + * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from + * other BSSes when any interfaces are in AP mode. This helps implement + * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME + * messages. Note that per PHY only one application may register. + * + * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether + * No Acknowledgement Policy should be applied. + * + * @NL80211_CMD_MAX: highest used command number + * @__NL80211_CMD_AFTER_LAST: internal use + */ +enum nl80211_commands { +/* don't change the order or add anything between, this is ABI! */ + NL80211_CMD_UNSPEC, + + NL80211_CMD_GET_WIPHY, /* can dump */ + NL80211_CMD_SET_WIPHY, + NL80211_CMD_NEW_WIPHY, + NL80211_CMD_DEL_WIPHY, + + NL80211_CMD_GET_INTERFACE, /* can dump */ + NL80211_CMD_SET_INTERFACE, + NL80211_CMD_NEW_INTERFACE, + NL80211_CMD_DEL_INTERFACE, + + NL80211_CMD_GET_KEY, + NL80211_CMD_SET_KEY, + NL80211_CMD_NEW_KEY, + NL80211_CMD_DEL_KEY, + + NL80211_CMD_GET_BEACON, + NL80211_CMD_SET_BEACON, + NL80211_CMD_START_AP, + NL80211_CMD_NEW_BEACON = NL80211_CMD_START_AP, + NL80211_CMD_STOP_AP, + NL80211_CMD_DEL_BEACON = NL80211_CMD_STOP_AP, + + NL80211_CMD_GET_STATION, + NL80211_CMD_SET_STATION, + NL80211_CMD_NEW_STATION, + NL80211_CMD_DEL_STATION, + + NL80211_CMD_GET_MPATH, + NL80211_CMD_SET_MPATH, + NL80211_CMD_NEW_MPATH, + NL80211_CMD_DEL_MPATH, + + NL80211_CMD_SET_BSS, + + NL80211_CMD_SET_REG, + NL80211_CMD_REQ_SET_REG, + + NL80211_CMD_GET_MESH_CONFIG, + NL80211_CMD_SET_MESH_CONFIG, + + NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */, + + NL80211_CMD_GET_REG, + + NL80211_CMD_GET_SCAN, + NL80211_CMD_TRIGGER_SCAN, + NL80211_CMD_NEW_SCAN_RESULTS, + NL80211_CMD_SCAN_ABORTED, + + NL80211_CMD_REG_CHANGE, + + NL80211_CMD_AUTHENTICATE, + NL80211_CMD_ASSOCIATE, + NL80211_CMD_DEAUTHENTICATE, + NL80211_CMD_DISASSOCIATE, + + NL80211_CMD_MICHAEL_MIC_FAILURE, + + NL80211_CMD_REG_BEACON_HINT, + + NL80211_CMD_JOIN_IBSS, + NL80211_CMD_LEAVE_IBSS, + + NL80211_CMD_TESTMODE, + + NL80211_CMD_CONNECT, + NL80211_CMD_ROAM, + NL80211_CMD_DISCONNECT, + + NL80211_CMD_SET_WIPHY_NETNS, + + NL80211_CMD_GET_SURVEY, + NL80211_CMD_NEW_SURVEY_RESULTS, + + NL80211_CMD_SET_PMKSA, + NL80211_CMD_DEL_PMKSA, + NL80211_CMD_FLUSH_PMKSA, + + NL80211_CMD_REMAIN_ON_CHANNEL, + NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, + + NL80211_CMD_SET_TX_BITRATE_MASK, + + NL80211_CMD_REGISTER_FRAME, + NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME, + NL80211_CMD_FRAME, + NL80211_CMD_ACTION = NL80211_CMD_FRAME, + NL80211_CMD_FRAME_TX_STATUS, + NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS, + + NL80211_CMD_SET_POWER_SAVE, + NL80211_CMD_GET_POWER_SAVE, + + NL80211_CMD_SET_CQM, + NL80211_CMD_NOTIFY_CQM, + + NL80211_CMD_SET_CHANNEL, + NL80211_CMD_SET_WDS_PEER, + + NL80211_CMD_FRAME_WAIT_CANCEL, + + NL80211_CMD_JOIN_MESH, + NL80211_CMD_LEAVE_MESH, + + NL80211_CMD_UNPROT_DEAUTHENTICATE, + NL80211_CMD_UNPROT_DISASSOCIATE, + + NL80211_CMD_NEW_PEER_CANDIDATE, + + NL80211_CMD_GET_WOWLAN, + NL80211_CMD_SET_WOWLAN, + + NL80211_CMD_START_SCHED_SCAN, + NL80211_CMD_STOP_SCHED_SCAN, + NL80211_CMD_SCHED_SCAN_RESULTS, + NL80211_CMD_SCHED_SCAN_STOPPED, + + NL80211_CMD_SET_REKEY_OFFLOAD, + + NL80211_CMD_PMKSA_CANDIDATE, + + NL80211_CMD_TDLS_OPER, + NL80211_CMD_TDLS_MGMT, + + NL80211_CMD_UNEXPECTED_FRAME, + + NL80211_CMD_PROBE_CLIENT, + + NL80211_CMD_REGISTER_BEACONS, + + NL80211_CMD_UNEXPECTED_4ADDR_FRAME, + + NL80211_CMD_SET_NOACK_MAP, + + /* add new commands above here */ + + /* used to define NL80211_CMD_MAX below */ + __NL80211_CMD_AFTER_LAST, + NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 +}; + +/* + * Allow user space programs to use #ifdef on new commands by defining them + * here + */ +#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS +#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE +#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE +#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE +#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE +#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE +#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE +#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT + +#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS + +/* source-level API compatibility */ +#define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG +#define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG +#define NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE NL80211_MESH_SETUP_IE + +/** + * enum nl80211_attrs - nl80211 netlink attributes + * + * @NL80211_ATTR_UNSPEC: unspecified attribute to catch errors + * + * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf. + * /sys/class/ieee80211/<phyname>/index + * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming) + * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters + * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz + * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ + * if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included): + * NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including + * this attribute) + * NL80211_CHAN_HT20 = HT20 only + * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel + * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel + * @NL80211_ATTR_WIPHY_RETRY_SHORT: TX retry limit for frames whose length is + * less than or equal to the RTS threshold; allowed range: 1..255; + * dot11ShortRetryLimit; u8 + * @NL80211_ATTR_WIPHY_RETRY_LONG: TX retry limit for frames whose length is + * greater than the RTS threshold; allowed range: 1..255; + * dot11ShortLongLimit; u8 + * @NL80211_ATTR_WIPHY_FRAG_THRESHOLD: fragmentation threshold, i.e., maximum + * length in octets for frames; allowed range: 256..8000, disable + * fragmentation with (u32)-1; dot11FragmentationThreshold; u32 + * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length + * larger than or equal to this use RTS/CTS handshake); allowed range: + * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32 + * @NL80211_ATTR_WIPHY_COVERAGE_CLASS: Coverage Class as defined by IEEE 802.11 + * section 7.3.2.9; dot11CoverageClass; u8 + * + * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on + * @NL80211_ATTR_IFNAME: network interface name + * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype + * + * @NL80211_ATTR_MAC: MAC address (various uses) + * + * @NL80211_ATTR_KEY_DATA: (temporal) key data; for TKIP this consists of + * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC + * keys + * @NL80211_ATTR_KEY_IDX: key ID (u8, 0-3) + * @NL80211_ATTR_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 + * section 7.3.2.25.1, e.g. 0x000FAC04) + * @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and + * CCMP keys, each six bytes in little endian + * + * @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU + * @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing + * @NL80211_ATTR_BEACON_HEAD: portion of the beacon before the TIM IE + * @NL80211_ATTR_BEACON_TAIL: portion of the beacon after the TIM IE + * + * @NL80211_ATTR_STA_AID: Association ID for the station (u16) + * @NL80211_ATTR_STA_FLAGS: flags, nested element with NLA_FLAG attributes of + * &enum nl80211_sta_flags (deprecated, use %NL80211_ATTR_STA_FLAGS2) + * @NL80211_ATTR_STA_LISTEN_INTERVAL: listen interval as defined by + * IEEE 802.11 7.3.1.6 (u16). + * @NL80211_ATTR_STA_SUPPORTED_RATES: supported rates, array of supported + * rates as defined by IEEE 802.11 7.3.2.2 but without the length + * restriction (at most %NL80211_MAX_SUPP_RATES). + * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station + * to, or the AP interface the station was originally added to to. + * @NL80211_ATTR_STA_INFO: information about a station, part of station info + * given for %NL80211_CMD_GET_STATION, nested attribute containing + * info as possible, see &enum nl80211_sta_info. + * + * @NL80211_ATTR_WIPHY_BANDS: Information about an operating bands, + * consisting of a nested array. + * + * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes). + * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link. + * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. + * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path + * info given for %NL80211_CMD_GET_MPATH, nested attribute described at + * &enum nl80211_mpath_info. + * + * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of + * &enum nl80211_mntr_flags. + * + * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the + * current regulatory domain should be set to or is already set to. + * For example, 'CR', for Costa Rica. This attribute is used by the kernel + * to query the CRDA to retrieve one regulatory domain. This attribute can + * also be used by userspace to query the kernel for the currently set + * regulatory domain. We chose an alpha2 as that is also used by the + * IEEE-802.11d country information element to identify a country. + * Users can also simply ask the wireless core to set regulatory domain + * to a specific alpha2. + * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory + * rules. + * + * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled + * (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled + * (u8, 0 or 1) + * @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic + * rates in format defined by IEEE 802.11 7.3.2.2 but without the length + * restriction (at most %NL80211_MAX_SUPP_RATES). + * + * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION) + * + * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all + * supported interface types, each a flag attribute with the number + * of the interface mode. + * + * @NL80211_ATTR_MGMT_SUBTYPE: Management frame subtype for + * %NL80211_CMD_SET_MGMT_EXTRA_IE. + * + * @NL80211_ATTR_IE: Information element(s) data (used, e.g., with + * %NL80211_CMD_SET_MGMT_EXTRA_IE). + * + * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with + * a single scan request, a wiphy attribute. + * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS: number of SSIDs you can + * scan with a single scheduled scan request, a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements + * that can be added to a scan request + * @NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN: maximum length of information + * elements that can be added to a scheduled scan request + * @NL80211_ATTR_MAX_MATCH_SETS: maximum number of sets that can be + * used with @NL80211_ATTR_SCHED_SCAN_MATCH, a wiphy attribute. + * + * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz) + * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive + * scanning and include a zero-length SSID (wildcard) for wildcard scan + * @NL80211_ATTR_BSS: scan result BSS + * + * @NL80211_ATTR_REG_INITIATOR: indicates who requested the regulatory domain + * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_* + * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently + * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) + * + * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies + * an array of command numbers (i.e. a mapping index to command number) + * that the driver for the given wiphy supports. + * + * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header + * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and + * NL80211_CMD_ASSOCIATE events + * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets) + * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type, + * represented as a u32 + * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and + * %NL80211_CMD_DISASSOCIATE, u16 + * + * @NL80211_ATTR_KEY_TYPE: Key Type, see &enum nl80211_key_type, represented as + * a u32 + * + * @NL80211_ATTR_FREQ_BEFORE: A channel which has suffered a regulatory change + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _before_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* + * @NL80211_ATTR_FREQ_AFTER: A channel which has suffered a regulatory change + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _after_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* + * + * @NL80211_ATTR_CIPHER_SUITES: a set of u32 values indicating the supported + * cipher suites + * + * @NL80211_ATTR_FREQ_FIXED: a flag indicating the IBSS should not try to look + * for other networks on different channels + * + * @NL80211_ATTR_TIMED_OUT: a flag indicating than an operation timed out; this + * is used, e.g., with %NL80211_CMD_AUTHENTICATE event + * + * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is + * used for the association (&enum nl80211_mfp, represented as a u32); + * this attribute can be used + * with %NL80211_CMD_ASSOCIATE request + * + * @NL80211_ATTR_STA_FLAGS2: Attribute containing a + * &struct nl80211_sta_flag_update. + * + * @NL80211_ATTR_CONTROL_PORT: A flag indicating whether user space controls + * IEEE 802.1X port, i.e., sets/clears %NL80211_STA_FLAG_AUTHORIZED, in + * station mode. If the flag is included in %NL80211_CMD_ASSOCIATE + * request, the driver will assume that the port is unauthorized until + * authorized by user space. Otherwise, port is marked authorized by + * default in station mode. + * @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the + * ethertype that will be used for key negotiation. It can be + * specified with the associate and connect commands. If it is not + * specified, the value defaults to 0x888E (PAE, 802.1X). This + * attribute is also used as a flag in the wiphy information to + * indicate that protocols other than PAE are supported. + * @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with + * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom + * ethertype frames used for key negotiation must not be encrypted. + * + * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. + * We recommend using nested, driver-specific attributes within this. + * + * @NL80211_ATTR_DISCONNECTED_BY_AP: A flag indicating that the DISCONNECT + * event was due to the AP disconnecting the station, and not due to + * a local disconnect request. + * @NL80211_ATTR_STATUS_CODE: StatusCode for the %NL80211_CMD_CONNECT + * event (u16) + * @NL80211_ATTR_PRIVACY: Flag attribute, used with connect(), indicating + * that protected APs should be used. This is also used with NEW_BEACON to + * indicate that the BSS is to use protection. + * + * @NL80211_ATTR_CIPHERS_PAIRWISE: Used with CONNECT, ASSOCIATE, and NEW_BEACON + * to indicate which unicast key ciphers will be used with the connection + * (an array of u32). + * @NL80211_ATTR_CIPHER_GROUP: Used with CONNECT, ASSOCIATE, and NEW_BEACON to + * indicate which group key cipher will be used with the connection (a + * u32). + * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT, ASSOCIATE, and NEW_BEACON to + * indicate which WPA version(s) the AP we want to associate with is using + * (a u32 with flags from &enum nl80211_wpa_versions). + * @NL80211_ATTR_AKM_SUITES: Used with CONNECT, ASSOCIATE, and NEW_BEACON to + * indicate which key management algorithm(s) to use (an array of u32). + * + * @NL80211_ATTR_REQ_IE: (Re)association request information elements as + * sent out by the card, for ROAM and successful CONNECT events. + * @NL80211_ATTR_RESP_IE: (Re)association response information elements as + * sent by peer, for ROAM and successful CONNECT events. + * + * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE + * commands to specify using a reassociate frame + * + * @NL80211_ATTR_KEY: key information in a nested attribute with + * %NL80211_KEY_* sub-attributes + * @NL80211_ATTR_KEYS: array of keys for static WEP keys for connect() + * and join_ibss(), key information is in a nested attribute each + * with %NL80211_KEY_* sub-attributes + * + * @NL80211_ATTR_PID: Process ID of a network namespace. + * + * @NL80211_ATTR_GENERATION: Used to indicate consistent snapshots for + * dumps. This number increases whenever the object list being + * dumped changes, and as such userspace can verify that it has + * obtained a complete and consistent snapshot by verifying that + * all dump messages contain the same generation number. If it + * changed then the list changed and the dump should be repeated + * completely from scratch. + * + * @NL80211_ATTR_4ADDR: Use 4-address frames on a virtual interface + * + * @NL80211_ATTR_SURVEY_INFO: survey information about a channel, part of + * the survey response for %NL80211_CMD_GET_SURVEY, nested attribute + * containing info as possible, see &enum survey_info. + * + * @NL80211_ATTR_PMKID: PMK material for PMKSA caching. + * @NL80211_ATTR_MAX_NUM_PMKIDS: maximum number of PMKIDs a firmware can + * cache, a wiphy attribute. + * + * @NL80211_ATTR_DURATION: Duration of an operation in milliseconds, u32. + * @NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION: Device attribute that + * specifies the maximum duration that can be requested with the + * remain-on-channel operation, in milliseconds, u32. + * + * @NL80211_ATTR_COOKIE: Generic 64-bit cookie to identify objects. + * + * @NL80211_ATTR_TX_RATES: Nested set of attributes + * (enum nl80211_tx_rate_attributes) describing TX rates per band. The + * enum nl80211_band value is used as the index (nla_type() of the nested + * data. If a band is not included, it will be configured to allow all + * rates based on negotiated supported rates information. This attribute + * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. + * + * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain + * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME. + * @NL80211_ATTR_FRAME_TYPE: A u16 indicating the frame type/subtype for the + * @NL80211_CMD_REGISTER_FRAME command. + * @NL80211_ATTR_TX_FRAME_TYPES: wiphy capability attribute, which is a + * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing + * information about which frame types can be transmitted with + * %NL80211_CMD_FRAME. + * @NL80211_ATTR_RX_FRAME_TYPES: wiphy capability attribute, which is a + * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing + * information about which frame types can be registered for RX. + * + * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was + * acknowledged by the recipient. + * + * @NL80211_ATTR_CQM: connection quality monitor configuration in a + * nested attribute with %NL80211_ATTR_CQM_* sub-attributes. + * + * @NL80211_ATTR_LOCAL_STATE_CHANGE: Flag attribute to indicate that a command + * is requesting a local authentication/association state change without + * invoking actual management frame exchange. This can be used with + * NL80211_CMD_AUTHENTICATE, NL80211_CMD_DEAUTHENTICATE, + * NL80211_CMD_DISASSOCIATE. + * + * @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations + * connected to this BSS. + * + * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See + * &enum nl80211_tx_power_setting for possible values. + * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units. + * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING + * for non-automatic settings. + * + * @NL80211_ATTR_SUPPORT_IBSS_RSN: The device supports IBSS RSN, which mostly + * means support for per-station GTKs. + * + * @NL80211_ATTR_WIPHY_ANTENNA_TX: Bitmap of allowed antennas for transmitting. + * This can be used to mask out antennas which are not attached or should + * not be used for transmitting. If an antenna is not selected in this + * bitmap the hardware is not allowed to transmit on this antenna. + * + * Each bit represents one antenna, starting with antenna 1 at the first + * bit. Depending on which antennas are selected in the bitmap, 802.11n + * drivers can derive which chainmasks to use (if all antennas belonging to + * a particular chain are disabled this chain should be disabled) and if + * a chain has diversity antennas wether diversity should be used or not. + * HT capabilities (STBC, TX Beamforming, Antenna selection) can be + * derived from the available chains after applying the antenna mask. + * Non-802.11n drivers can derive wether to use diversity or not. + * Drivers may reject configurations or RX/TX mask combinations they cannot + * support by returning -EINVAL. + * + * @NL80211_ATTR_WIPHY_ANTENNA_RX: Bitmap of allowed antennas for receiving. + * This can be used to mask out antennas which are not attached or should + * not be used for receiving. If an antenna is not selected in this bitmap + * the hardware should not be configured to receive on this antenna. + * For a more detailed description see @NL80211_ATTR_WIPHY_ANTENNA_TX. + * + * @NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX: Bitmap of antennas which are available + * for configuration as TX antennas via the above parameters. + * + * @NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX: Bitmap of antennas which are available + * for configuration as RX antennas via the above parameters. + * + * @NL80211_ATTR_MCAST_RATE: Multicast tx rate (in 100 kbps) for IBSS + * + * @NL80211_ATTR_OFFCHANNEL_TX_OK: For management frame TX, the frame may be + * transmitted on another channel when the channel given doesn't match + * the current channel. If the current channel doesn't match and this + * flag isn't set, the frame will be rejected. This is also used as an + * nl80211 capability flag. + * + * @NL80211_ATTR_BSS_HTOPMODE: HT operation mode (u16) + * + * @NL80211_ATTR_KEY_DEFAULT_TYPES: A nested attribute containing flags + * attributes, specifying what a key should be set as default as. + * See &enum nl80211_key_default_types. + * + * @NL80211_ATTR_MESH_SETUP: Optional mesh setup parameters. These cannot be + * changed once the mesh is active. + * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute + * containing attributes from &enum nl80211_meshconf_params. + * @NL80211_ATTR_SUPPORT_MESH_AUTH: Currently, this means the underlying driver + * allows auth frames in a mesh to be passed to userspace for processing via + * the @NL80211_MESH_SETUP_USERSPACE_AUTH flag. + * @NL80211_ATTR_STA_PLINK_STATE: The state of a mesh peer link as + * defined in &enum nl80211_plink_state. Used when userspace is + * driving the peer link management state machine. + * @NL80211_MESH_SETUP_USERSPACE_AMPE must be enabled. + * + * @NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED: indicates, as part of the wiphy + * capabilities, the supported WoWLAN triggers + * @NL80211_ATTR_WOWLAN_TRIGGERS: used by %NL80211_CMD_SET_WOWLAN to + * indicate which WoW triggers should be enabled. This is also + * used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN + * triggers. + + * @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan + * cycles, in msecs. + + * @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more + * sets of attributes to match during scheduled scans. Only BSSs + * that match any of the sets will be reported. These are + * pass-thru filter rules. + * For a match to succeed, the BSS must match all attributes of a + * set. Since not every hardware supports matching all types of + * attributes, there is no guarantee that the reported BSSs are + * fully complying with the match sets and userspace needs to be + * able to ignore them by itself. + * Thus, the implementation is somewhat hardware-dependent, but + * this is only an optimization and the userspace application + * needs to handle all the non-filtered results anyway. + * If the match attributes don't make sense when combined with + * the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID + * is included in the probe request, but the match attributes + * will never let it go through), -EINVAL may be returned. + * If ommited, no filtering is done. + * + * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported + * interface combinations. In each nested item, it contains attributes + * defined in &enum nl80211_if_combination_attrs. + * @NL80211_ATTR_SOFTWARE_IFTYPES: Nested attribute (just like + * %NL80211_ATTR_SUPPORTED_IFTYPES) containing the interface types that + * are managed in software: interfaces of these types aren't subject to + * any restrictions in their number or combinations. + * + * @%NL80211_ATTR_REKEY_DATA: nested attribute containing the information + * necessary for GTK rekeying in the device, see &enum nl80211_rekey_data. + * + * @NL80211_ATTR_SCAN_SUPP_RATES: rates per to be advertised as supported in scan, + * nested array attribute containing an entry for each band, with the entry + * being a list of supported rates as defined by IEEE 802.11 7.3.2.2 but + * without the length restriction (at most %NL80211_MAX_SUPP_RATES). + * + * @NL80211_ATTR_HIDDEN_SSID: indicates whether SSID is to be hidden from Beacon + * and Probe Response (when response to wildcard Probe Request); see + * &enum nl80211_hidden_ssid, represented as a u32 + * + * @NL80211_ATTR_IE_PROBE_RESP: Information element(s) for Probe Response frame. + * This is used with %NL80211_CMD_NEW_BEACON and %NL80211_CMD_SET_BEACON to + * provide extra IEs (e.g., WPS/P2P IE) into Probe Response frames when the + * driver (or firmware) replies to Probe Request frames. + * @NL80211_ATTR_IE_ASSOC_RESP: Information element(s) for (Re)Association + * Response frames. This is used with %NL80211_CMD_NEW_BEACON and + * %NL80211_CMD_SET_BEACON to provide extra IEs (e.g., WPS/P2P IE) into + * (Re)Association Response frames when the driver (or firmware) replies to + * (Re)Association Request frames. + * + * @NL80211_ATTR_STA_WME: Nested attribute containing the wme configuration + * of the station, see &enum nl80211_sta_wme_attr. + * @NL80211_ATTR_SUPPORT_AP_UAPSD: the device supports uapsd when working + * as AP. + * + * @NL80211_ATTR_ROAM_SUPPORT: Indicates whether the firmware is capable of + * roaming to another AP in the same ESS if the signal lever is low. + * + * @NL80211_ATTR_PMKSA_CANDIDATE: Nested attribute containing the PMKSA caching + * candidate information, see &enum nl80211_pmksa_candidate_attr. + * + * @NL80211_ATTR_TX_NO_CCK_RATE: Indicates whether to use CCK rate or not + * for management frames transmission. In order to avoid p2p probe/action + * frames are being transmitted at CCK rate in 2GHz band, the user space + * applications use this attribute. + * This attribute is used with %NL80211_CMD_TRIGGER_SCAN and + * %NL80211_CMD_FRAME commands. + * + * @NL80211_ATTR_TDLS_ACTION: Low level TDLS action code (e.g. link setup + * request, link setup confirm, link teardown, etc.). Values are + * described in the TDLS (802.11z) specification. + * @NL80211_ATTR_TDLS_DIALOG_TOKEN: Non-zero token for uniquely identifying a + * TDLS conversation between two devices. + * @NL80211_ATTR_TDLS_OPERATION: High level TDLS operation; see + * &enum nl80211_tdls_operation, represented as a u8. + * @NL80211_ATTR_TDLS_SUPPORT: A flag indicating the device can operate + * as a TDLS peer sta. + * @NL80211_ATTR_TDLS_EXTERNAL_SETUP: The TDLS discovery/setup and teardown + * procedures should be performed by sending TDLS packets via + * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be + * used for asking the driver to perform a TDLS operation. + * + * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices + * that have AP support to indicate that they have the AP SME integrated + * with support for the features listed in this attribute, see + * &enum nl80211_ap_sme_features. + * + * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells + * the driver to not wait for an acknowledgement. Note that due to this, + * it will also not give a status callback nor return a cookie. This is + * mostly useful for probe responses to save airtime. + * + * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from + * &enum nl80211_feature_flags and is advertised in wiphy information. + * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe + * + * requests while operating in AP-mode. + * This attribute holds a bitmap of the supported protocols for + * offloading (see &enum nl80211_probe_resp_offload_support_attr). + * + * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire + * probe-response frame. The DA field in the 802.11 header is zero-ed out, + * to be filled by the FW. + * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable + * this feature. Currently, only supported in mac80211 drivers. + * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the + * ATTR_HT_CAPABILITY to which attention should be paid. + * Currently, only mac80211 NICs support this feature. + * The values that may be configured are: + * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40 + * AMPDU density and AMPDU factor. + * All values are treated as suggestions and may be ignored + * by the driver as required. The actual values may be seen in + * the station debugfs ht_caps file. + * + * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country + * abides to when initiating radiation on DFS channels. A country maps + * to one DFS region. + * + * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of + * up to 16 TIDs. + * + * @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be + * used by the drivers which has MLME in firmware and does not have support + * to report per station tx/rx activity to free up the staion entry from + * the list. This needs to be used when the driver advertises the + * capability to timeout the stations. + * + * @NL80211_ATTR_RX_SIGNAL_DBM: signal strength in dBm (as a 32-bit int); + * this attribute is (depending on the driver capabilities) added to + * received frames indicated with %NL80211_CMD_FRAME. + * + * @NL80211_ATTR_BG_SCAN_PERIOD: Background scan period in seconds + * or 0 to disable background scan. + * + * @NL80211_ATTR_MAX: highest attribute number currently defined + * @__NL80211_ATTR_AFTER_LAST: internal use + */ +enum nl80211_attrs { +/* don't change the order or add anything between, this is ABI! */ + NL80211_ATTR_UNSPEC, + + NL80211_ATTR_WIPHY, + NL80211_ATTR_WIPHY_NAME, + + NL80211_ATTR_IFINDEX, + NL80211_ATTR_IFNAME, + NL80211_ATTR_IFTYPE, + + NL80211_ATTR_MAC, + + NL80211_ATTR_KEY_DATA, + NL80211_ATTR_KEY_IDX, + NL80211_ATTR_KEY_CIPHER, + NL80211_ATTR_KEY_SEQ, + NL80211_ATTR_KEY_DEFAULT, + + NL80211_ATTR_BEACON_INTERVAL, + NL80211_ATTR_DTIM_PERIOD, + NL80211_ATTR_BEACON_HEAD, + NL80211_ATTR_BEACON_TAIL, + + NL80211_ATTR_STA_AID, + NL80211_ATTR_STA_FLAGS, + NL80211_ATTR_STA_LISTEN_INTERVAL, + NL80211_ATTR_STA_SUPPORTED_RATES, + NL80211_ATTR_STA_VLAN, + NL80211_ATTR_STA_INFO, + + NL80211_ATTR_WIPHY_BANDS, + + NL80211_ATTR_MNTR_FLAGS, + + NL80211_ATTR_MESH_ID, + NL80211_ATTR_STA_PLINK_ACTION, + NL80211_ATTR_MPATH_NEXT_HOP, + NL80211_ATTR_MPATH_INFO, + + NL80211_ATTR_BSS_CTS_PROT, + NL80211_ATTR_BSS_SHORT_PREAMBLE, + NL80211_ATTR_BSS_SHORT_SLOT_TIME, + + NL80211_ATTR_HT_CAPABILITY, + + NL80211_ATTR_SUPPORTED_IFTYPES, + + NL80211_ATTR_REG_ALPHA2, + NL80211_ATTR_REG_RULES, + + NL80211_ATTR_MESH_CONFIG, + + NL80211_ATTR_BSS_BASIC_RATES, + + NL80211_ATTR_WIPHY_TXQ_PARAMS, + NL80211_ATTR_WIPHY_FREQ, + NL80211_ATTR_WIPHY_CHANNEL_TYPE, + + NL80211_ATTR_KEY_DEFAULT_MGMT, + + NL80211_ATTR_MGMT_SUBTYPE, + NL80211_ATTR_IE, + + NL80211_ATTR_MAX_NUM_SCAN_SSIDS, + + NL80211_ATTR_SCAN_FREQUENCIES, + NL80211_ATTR_SCAN_SSIDS, + NL80211_ATTR_GENERATION, /* replaces old SCAN_GENERATION */ + NL80211_ATTR_BSS, + + NL80211_ATTR_REG_INITIATOR, + NL80211_ATTR_REG_TYPE, + + NL80211_ATTR_SUPPORTED_COMMANDS, + + NL80211_ATTR_FRAME, + NL80211_ATTR_SSID, + NL80211_ATTR_AUTH_TYPE, + NL80211_ATTR_REASON_CODE, + + NL80211_ATTR_KEY_TYPE, + + NL80211_ATTR_MAX_SCAN_IE_LEN, + NL80211_ATTR_CIPHER_SUITES, + + NL80211_ATTR_FREQ_BEFORE, + NL80211_ATTR_FREQ_AFTER, + + NL80211_ATTR_FREQ_FIXED, + + + NL80211_ATTR_WIPHY_RETRY_SHORT, + NL80211_ATTR_WIPHY_RETRY_LONG, + NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + NL80211_ATTR_WIPHY_RTS_THRESHOLD, + + NL80211_ATTR_TIMED_OUT, + + NL80211_ATTR_USE_MFP, + + NL80211_ATTR_STA_FLAGS2, + + NL80211_ATTR_CONTROL_PORT, + + NL80211_ATTR_TESTDATA, + + NL80211_ATTR_PRIVACY, + + NL80211_ATTR_DISCONNECTED_BY_AP, + NL80211_ATTR_STATUS_CODE, + + NL80211_ATTR_CIPHER_SUITES_PAIRWISE, + NL80211_ATTR_CIPHER_SUITE_GROUP, + NL80211_ATTR_WPA_VERSIONS, + NL80211_ATTR_AKM_SUITES, + + NL80211_ATTR_REQ_IE, + NL80211_ATTR_RESP_IE, + + NL80211_ATTR_PREV_BSSID, + + NL80211_ATTR_KEY, + NL80211_ATTR_KEYS, + + NL80211_ATTR_PID, + + NL80211_ATTR_4ADDR, + + NL80211_ATTR_SURVEY_INFO, + + NL80211_ATTR_PMKID, + NL80211_ATTR_MAX_NUM_PMKIDS, + + NL80211_ATTR_DURATION, + + NL80211_ATTR_COOKIE, + + NL80211_ATTR_WIPHY_COVERAGE_CLASS, + + NL80211_ATTR_TX_RATES, + + NL80211_ATTR_FRAME_MATCH, + + NL80211_ATTR_ACK, + + NL80211_ATTR_PS_STATE, + + NL80211_ATTR_CQM, + + NL80211_ATTR_LOCAL_STATE_CHANGE, + + NL80211_ATTR_AP_ISOLATE, + + NL80211_ATTR_WIPHY_TX_POWER_SETTING, + NL80211_ATTR_WIPHY_TX_POWER_LEVEL, + + NL80211_ATTR_TX_FRAME_TYPES, + NL80211_ATTR_RX_FRAME_TYPES, + NL80211_ATTR_FRAME_TYPE, + + NL80211_ATTR_CONTROL_PORT_ETHERTYPE, + NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, + + NL80211_ATTR_SUPPORT_IBSS_RSN, + + NL80211_ATTR_WIPHY_ANTENNA_TX, + NL80211_ATTR_WIPHY_ANTENNA_RX, + + NL80211_ATTR_MCAST_RATE, + + NL80211_ATTR_OFFCHANNEL_TX_OK, + + NL80211_ATTR_BSS_HT_OPMODE, + + NL80211_ATTR_KEY_DEFAULT_TYPES, + + NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, + + NL80211_ATTR_MESH_SETUP, + + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, + + NL80211_ATTR_SUPPORT_MESH_AUTH, + NL80211_ATTR_STA_PLINK_STATE, + + NL80211_ATTR_WOWLAN_TRIGGERS, + NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, + + NL80211_ATTR_SCHED_SCAN_INTERVAL, + + NL80211_ATTR_INTERFACE_COMBINATIONS, + NL80211_ATTR_SOFTWARE_IFTYPES, + + NL80211_ATTR_REKEY_DATA, + + NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, + NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, + + NL80211_ATTR_SCAN_SUPP_RATES, + + NL80211_ATTR_HIDDEN_SSID, + + NL80211_ATTR_IE_PROBE_RESP, + NL80211_ATTR_IE_ASSOC_RESP, + + NL80211_ATTR_STA_WME, + NL80211_ATTR_SUPPORT_AP_UAPSD, + + NL80211_ATTR_ROAM_SUPPORT, + + NL80211_ATTR_SCHED_SCAN_MATCH, + NL80211_ATTR_MAX_MATCH_SETS, + + NL80211_ATTR_PMKSA_CANDIDATE, + + NL80211_ATTR_TX_NO_CCK_RATE, + + NL80211_ATTR_TDLS_ACTION, + NL80211_ATTR_TDLS_DIALOG_TOKEN, + NL80211_ATTR_TDLS_OPERATION, + NL80211_ATTR_TDLS_SUPPORT, + NL80211_ATTR_TDLS_EXTERNAL_SETUP, + + NL80211_ATTR_DEVICE_AP_SME, + + NL80211_ATTR_DONT_WAIT_FOR_ACK, + + NL80211_ATTR_FEATURE_FLAGS, + + NL80211_ATTR_PROBE_RESP_OFFLOAD, + + NL80211_ATTR_PROBE_RESP, + + NL80211_ATTR_DFS_REGION, + + NL80211_ATTR_DISABLE_HT, + NL80211_ATTR_HT_CAPABILITY_MASK, + + NL80211_ATTR_NOACK_MAP, + + NL80211_ATTR_INACTIVITY_TIMEOUT, + + NL80211_ATTR_RX_SIGNAL_DBM, + + NL80211_ATTR_BG_SCAN_PERIOD, + + /* add attributes here, update the policy in nl80211.c */ + + __NL80211_ATTR_AFTER_LAST, + NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 +}; + +/* source-level API compatibility */ +#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION +#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG + +/* + * Allow user space programs to use #ifdef on new attributes by defining them + * here + */ +#define NL80211_CMD_CONNECT NL80211_CMD_CONNECT +#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY +#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES +#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS +#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ +#define NL80211_ATTR_WIPHY_CHANNEL_TYPE NL80211_ATTR_WIPHY_CHANNEL_TYPE +#define NL80211_ATTR_MGMT_SUBTYPE NL80211_ATTR_MGMT_SUBTYPE +#define NL80211_ATTR_IE NL80211_ATTR_IE +#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR +#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE +#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME +#define NL80211_ATTR_SSID NL80211_ATTR_SSID +#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE +#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE +#define NL80211_ATTR_CIPHER_SUITES_PAIRWISE NL80211_ATTR_CIPHER_SUITES_PAIRWISE +#define NL80211_ATTR_CIPHER_SUITE_GROUP NL80211_ATTR_CIPHER_SUITE_GROUP +#define NL80211_ATTR_WPA_VERSIONS NL80211_ATTR_WPA_VERSIONS +#define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES +#define NL80211_ATTR_KEY NL80211_ATTR_KEY +#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS +#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS + +#define NL80211_MAX_SUPP_RATES 32 +#define NL80211_MAX_SUPP_HT_RATES 77 +#define NL80211_MAX_SUPP_REG_RULES 32 +#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 +#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 +#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 +#define NL80211_HT_CAPABILITY_LEN 26 + +#define NL80211_MAX_NR_CIPHER_SUITES 5 +#define NL80211_MAX_NR_AKM_SUITES 2 + +/** + * enum nl80211_iftype - (virtual) interface types + * + * @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides + * @NL80211_IFTYPE_ADHOC: independent BSS member + * @NL80211_IFTYPE_STATION: managed BSS member + * @NL80211_IFTYPE_AP: access point + * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points; VLAN interfaces + * are a bit special in that they must always be tied to a pre-existing + * AP type interface. + * @NL80211_IFTYPE_WDS: wireless distribution interface + * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames + * @NL80211_IFTYPE_MESH_POINT: mesh point + * @NL80211_IFTYPE_P2P_CLIENT: P2P client + * @NL80211_IFTYPE_P2P_GO: P2P group owner + * @NL80211_IFTYPE_MAX: highest interface type number currently defined + * @NUM_NL80211_IFTYPES: number of defined interface types + * + * These values are used with the %NL80211_ATTR_IFTYPE + * to set the type of an interface. + * + */ +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED, + NL80211_IFTYPE_ADHOC, + NL80211_IFTYPE_STATION, + NL80211_IFTYPE_AP, + NL80211_IFTYPE_AP_VLAN, + NL80211_IFTYPE_WDS, + NL80211_IFTYPE_MONITOR, + NL80211_IFTYPE_MESH_POINT, + NL80211_IFTYPE_P2P_CLIENT, + NL80211_IFTYPE_P2P_GO, + + /* keep last */ + NUM_NL80211_IFTYPES, + NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1 +}; + +/** + * enum nl80211_sta_flags - station flags + * + * Station flags. When a station is added to an AP interface, it is + * assumed to be already associated (and hence authenticated.) + * + * @__NL80211_STA_FLAG_INVALID: attribute number 0 is reserved + * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) + * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames + * with short barker preamble + * @NL80211_STA_FLAG_WME: station is WME/QoS capable + * @NL80211_STA_FLAG_MFP: station uses management frame protection + * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated + * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should + * only be used in managed mode (even in the flags mask). Note that the + * flag can't be changed, it is only valid while adding a station, and + * attempts to change it will silently be ignored (rather than rejected + * as errors.) + * @NL80211_STA_FLAG_MAX: highest station flag number currently defined + * @__NL80211_STA_FLAG_AFTER_LAST: internal use + */ +enum nl80211_sta_flags { + __NL80211_STA_FLAG_INVALID, + NL80211_STA_FLAG_AUTHORIZED, + NL80211_STA_FLAG_SHORT_PREAMBLE, + NL80211_STA_FLAG_WME, + NL80211_STA_FLAG_MFP, + NL80211_STA_FLAG_AUTHENTICATED, + NL80211_STA_FLAG_TDLS_PEER, + + /* keep last */ + __NL80211_STA_FLAG_AFTER_LAST, + NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1 +}; + +/** + * struct nl80211_sta_flag_update - station flags mask/set + * @mask: mask of station flags to set + * @set: which values to set them to + * + * Both mask and set contain bits as per &enum nl80211_sta_flags. + */ +struct nl80211_sta_flag_update { + __u32 mask; + __u32 set; +} __attribute__((packed)); + +/** + * enum nl80211_rate_info - bitrate information + * + * These attribute types are used with %NL80211_STA_INFO_TXRATE + * when getting information about the bitrate of a station. + * + * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved + * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s) + * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8) + * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate + * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval + * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined + * @__NL80211_RATE_INFO_AFTER_LAST: internal use + */ +enum nl80211_rate_info { + __NL80211_RATE_INFO_INVALID, + NL80211_RATE_INFO_BITRATE, + NL80211_RATE_INFO_MCS, + NL80211_RATE_INFO_40_MHZ_WIDTH, + NL80211_RATE_INFO_SHORT_GI, + + /* keep last */ + __NL80211_RATE_INFO_AFTER_LAST, + NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_sta_bss_param - BSS information collected by STA + * + * These attribute types are used with %NL80211_STA_INFO_BSS_PARAM + * when getting information about the bitrate of a station. + * + * @__NL80211_STA_BSS_PARAM_INVALID: attribute number 0 is reserved + * @NL80211_STA_BSS_PARAM_CTS_PROT: whether CTS protection is enabled (flag) + * @NL80211_STA_BSS_PARAM_SHORT_PREAMBLE: whether short preamble is enabled + * (flag) + * @NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME: whether short slot time is enabled + * (flag) + * @NL80211_STA_BSS_PARAM_DTIM_PERIOD: DTIM period for beaconing (u8) + * @NL80211_STA_BSS_PARAM_BEACON_INTERVAL: Beacon interval (u16) + * @NL80211_STA_BSS_PARAM_MAX: highest sta_bss_param number currently defined + * @__NL80211_STA_BSS_PARAM_AFTER_LAST: internal use + */ +enum nl80211_sta_bss_param { + __NL80211_STA_BSS_PARAM_INVALID, + NL80211_STA_BSS_PARAM_CTS_PROT, + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE, + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME, + NL80211_STA_BSS_PARAM_DTIM_PERIOD, + NL80211_STA_BSS_PARAM_BEACON_INTERVAL, + + /* keep last */ + __NL80211_STA_BSS_PARAM_AFTER_LAST, + NL80211_STA_BSS_PARAM_MAX = __NL80211_STA_BSS_PARAM_AFTER_LAST - 1 +}; + +/** + * enum nl80211_sta_info - station information + * + * These attribute types are used with %NL80211_ATTR_STA_INFO + * when getting information about a station. + * + * @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved + * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs) + * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station) + * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) + * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) + * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute + * containing info as possible, see &enum nl80211_rate_info + * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) + * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this + * station) + * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station) + * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station) + * @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm) + * @NL80211_STA_INFO_LLID: the station's mesh LLID + * @NL80211_STA_INFO_PLID: the station's mesh PLID + * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station + * (see %enum nl80211_plink_state) + * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested + * attribute, like NL80211_STA_INFO_TX_BITRATE. + * @NL80211_STA_INFO_BSS_PARAM: current station's view of BSS, nested attribute + * containing info as possible, see &enum nl80211_sta_bss_param + * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected + * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. + * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) + * @__NL80211_STA_INFO_AFTER_LAST: internal + * @NL80211_STA_INFO_MAX: highest possible station info attribute + */ +enum nl80211_sta_info { + __NL80211_STA_INFO_INVALID, + NL80211_STA_INFO_INACTIVE_TIME, + NL80211_STA_INFO_RX_BYTES, + NL80211_STA_INFO_TX_BYTES, + NL80211_STA_INFO_LLID, + NL80211_STA_INFO_PLID, + NL80211_STA_INFO_PLINK_STATE, + NL80211_STA_INFO_SIGNAL, + NL80211_STA_INFO_TX_BITRATE, + NL80211_STA_INFO_RX_PACKETS, + NL80211_STA_INFO_TX_PACKETS, + NL80211_STA_INFO_TX_RETRIES, + NL80211_STA_INFO_TX_FAILED, + NL80211_STA_INFO_SIGNAL_AVG, + NL80211_STA_INFO_RX_BITRATE, + NL80211_STA_INFO_BSS_PARAM, + NL80211_STA_INFO_CONNECTED_TIME, + NL80211_STA_INFO_STA_FLAGS, + NL80211_STA_INFO_BEACON_LOSS, + + /* keep last */ + __NL80211_STA_INFO_AFTER_LAST, + NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_mpath_flags - nl80211 mesh path flags + * + * @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active + * @NL80211_MPATH_FLAG_RESOLVING: the mesh path discovery process is running + * @NL80211_MPATH_FLAG_SN_VALID: the mesh path contains a valid SN + * @NL80211_MPATH_FLAG_FIXED: the mesh path has been manually set + * @NL80211_MPATH_FLAG_RESOLVED: the mesh path discovery process succeeded + */ +enum nl80211_mpath_flags { + NL80211_MPATH_FLAG_ACTIVE = 1<<0, + NL80211_MPATH_FLAG_RESOLVING = 1<<1, + NL80211_MPATH_FLAG_SN_VALID = 1<<2, + NL80211_MPATH_FLAG_FIXED = 1<<3, + NL80211_MPATH_FLAG_RESOLVED = 1<<4, +}; + +/** + * enum nl80211_mpath_info - mesh path information + * + * These attribute types are used with %NL80211_ATTR_MPATH_INFO when getting + * information about a mesh path. + * + * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved + * @NL80211_MPATH_INFO_FRAME_QLEN: number of queued frames for this destination + * @NL80211_MPATH_INFO_SN: destination sequence number + * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path + * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now + * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in + * &enum nl80211_mpath_flags; + * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec + * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries + * @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number + * currently defind + * @__NL80211_MPATH_INFO_AFTER_LAST: internal use + */ +enum nl80211_mpath_info { + __NL80211_MPATH_INFO_INVALID, + NL80211_MPATH_INFO_FRAME_QLEN, + NL80211_MPATH_INFO_SN, + NL80211_MPATH_INFO_METRIC, + NL80211_MPATH_INFO_EXPTIME, + NL80211_MPATH_INFO_FLAGS, + NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, + NL80211_MPATH_INFO_DISCOVERY_RETRIES, + + /* keep last */ + __NL80211_MPATH_INFO_AFTER_LAST, + NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_band_attr - band attributes + * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band, + * an array of nested frequency attributes + * @NL80211_BAND_ATTR_RATES: supported bitrates in this band, + * an array of nested bitrate attributes + * @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as + * defined in 802.11n + * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE + * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n + * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n + * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined + * @__NL80211_BAND_ATTR_AFTER_LAST: internal use + */ +enum nl80211_band_attr { + __NL80211_BAND_ATTR_INVALID, + NL80211_BAND_ATTR_FREQS, + NL80211_BAND_ATTR_RATES, + + NL80211_BAND_ATTR_HT_MCS_SET, + NL80211_BAND_ATTR_HT_CAPA, + NL80211_BAND_ATTR_HT_AMPDU_FACTOR, + NL80211_BAND_ATTR_HT_AMPDU_DENSITY, + + /* keep last */ + __NL80211_BAND_ATTR_AFTER_LAST, + NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 +}; + +#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA + +/** + * enum nl80211_frequency_attr - frequency attributes + * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz + * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current + * regulatory domain. + * @NL80211_FREQUENCY_ATTR_PASSIVE_SCAN: Only passive scanning is + * permitted on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_NO_IBSS: IBSS networks are not permitted + * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory + * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm + * (100 * dBm). + * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number + * currently defined + * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use + */ +enum nl80211_frequency_attr { + __NL80211_FREQUENCY_ATTR_INVALID, + NL80211_FREQUENCY_ATTR_FREQ, + NL80211_FREQUENCY_ATTR_DISABLED, + NL80211_FREQUENCY_ATTR_PASSIVE_SCAN, + NL80211_FREQUENCY_ATTR_NO_IBSS, + NL80211_FREQUENCY_ATTR_RADAR, + NL80211_FREQUENCY_ATTR_MAX_TX_POWER, + + /* keep last */ + __NL80211_FREQUENCY_ATTR_AFTER_LAST, + NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1 +}; + +#define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER + +/** + * enum nl80211_bitrate_attr - bitrate attributes + * @__NL80211_BITRATE_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps + * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported + * in 2.4 GHz band. + * @NL80211_BITRATE_ATTR_MAX: highest bitrate attribute number + * currently defined + * @__NL80211_BITRATE_ATTR_AFTER_LAST: internal use + */ +enum nl80211_bitrate_attr { + __NL80211_BITRATE_ATTR_INVALID, + NL80211_BITRATE_ATTR_RATE, + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE, + + /* keep last */ + __NL80211_BITRATE_ATTR_AFTER_LAST, + NL80211_BITRATE_ATTR_MAX = __NL80211_BITRATE_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_initiator - Indicates the initiator of a reg domain request + * @NL80211_REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world + * regulatory domain. + * @NL80211_REGDOM_SET_BY_USER: User asked the wireless core to set the + * regulatory domain. + * @NL80211_REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the + * wireless core it thinks its knows the regulatory domain we should be in. + * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an + * 802.11 country information element with regulatory information it + * thinks we should consider. cfg80211 only processes the country + * code from the IE, and relies on the regulatory domain information + * structure passed by userspace (CRDA) from our wireless-regdb. + * If a channel is enabled but the country code indicates it should + * be disabled we disable the channel and re-enable it upon disassociation. + */ +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE, + NL80211_REGDOM_SET_BY_USER, + NL80211_REGDOM_SET_BY_DRIVER, + NL80211_REGDOM_SET_BY_COUNTRY_IE, +}; + +/** + * enum nl80211_reg_type - specifies the type of regulatory domain + * @NL80211_REGDOM_TYPE_COUNTRY: the regulatory domain set is one that pertains + * to a specific country. When this is set you can count on the + * ISO / IEC 3166 alpha2 country code being valid. + * @NL80211_REGDOM_TYPE_WORLD: the regulatory set domain is the world regulatory + * domain. + * @NL80211_REGDOM_TYPE_CUSTOM_WORLD: the regulatory domain set is a custom + * driver specific world regulatory domain. These do not apply system-wide + * and are only applicable to the individual devices which have requested + * them to be applied. + * @NL80211_REGDOM_TYPE_INTERSECTION: the regulatory domain set is the product + * of an intersection between two regulatory domains -- the previously + * set regulatory domain on the system and the last accepted regulatory + * domain request to be processed. + */ +enum nl80211_reg_type { + NL80211_REGDOM_TYPE_COUNTRY, + NL80211_REGDOM_TYPE_WORLD, + NL80211_REGDOM_TYPE_CUSTOM_WORLD, + NL80211_REGDOM_TYPE_INTERSECTION, +}; + +/** + * enum nl80211_reg_rule_attr - regulatory rule attributes + * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional + * considerations for a given frequency range. These are the + * &enum nl80211_reg_rule_flags. + * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory + * rule in KHz. This is not a center of frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule + * in KHz. This is not a center a frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this + * frequency range, in KHz. + * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain + * for a given frequency range. The value is in mBi (100 * dBi). + * If you don't have one then don't send this. + * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for + * a given frequency range. The value is in mBm (100 * dBm). + * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number + * currently defined + * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use + */ +enum nl80211_reg_rule_attr { + __NL80211_REG_RULE_ATTR_INVALID, + NL80211_ATTR_REG_RULE_FLAGS, + + NL80211_ATTR_FREQ_RANGE_START, + NL80211_ATTR_FREQ_RANGE_END, + NL80211_ATTR_FREQ_RANGE_MAX_BW, + + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + NL80211_ATTR_POWER_RULE_MAX_EIRP, + + /* keep last */ + __NL80211_REG_RULE_ATTR_AFTER_LAST, + NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_sched_scan_match_attr - scheduled scan match attributes + * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching, + * only report BSS with matching SSID. + * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter + * attribute number currently defined + * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use + */ +enum nl80211_sched_scan_match_attr { + __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID, + + NL80211_ATTR_SCHED_SCAN_MATCH_SSID, + + /* keep last */ + __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, + NL80211_SCHED_SCAN_MATCH_ATTR_MAX = + __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_reg_rule_flags - regulatory rule flags + * + * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed + * @NL80211_RRF_NO_CCK: CCK modulation not allowed + * @NL80211_RRF_NO_INDOOR: indoor operation not allowed + * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed + * @NL80211_RRF_DFS: DFS support is required to be used + * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links + * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links + * @NL80211_RRF_PASSIVE_SCAN: passive scan is required + * @NL80211_RRF_NO_IBSS: no IBSS is allowed + */ +enum nl80211_reg_rule_flags { + NL80211_RRF_NO_OFDM = 1<<0, + NL80211_RRF_NO_CCK = 1<<1, + NL80211_RRF_NO_INDOOR = 1<<2, + NL80211_RRF_NO_OUTDOOR = 1<<3, + NL80211_RRF_DFS = 1<<4, + NL80211_RRF_PTP_ONLY = 1<<5, + NL80211_RRF_PTMP_ONLY = 1<<6, + NL80211_RRF_PASSIVE_SCAN = 1<<7, + NL80211_RRF_NO_IBSS = 1<<8, +}; + +/** + * enum nl80211_dfs_regions - regulatory DFS regions + * + * @NL80211_DFS_UNSET: Country has no DFS master region specified + * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC + * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI + * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec + */ +enum nl80211_dfs_regions { + NL80211_DFS_UNSET = 0, + NL80211_DFS_FCC = 1, + NL80211_DFS_ETSI = 2, + NL80211_DFS_JP = 3, +}; + +/** + * enum nl80211_survey_info - survey information + * + * These attribute types are used with %NL80211_ATTR_SURVEY_INFO + * when getting information about a survey. + * + * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved + * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel + * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) + * @NL80211_SURVEY_INFO_IN_USE: channel is currently being used + * @NL80211_SURVEY_INFO_CHANNEL_TIME: amount of time (in ms) that the radio + * spent on this channel + * @NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY: amount of the time the primary + * channel was sensed busy (either due to activity or energy detect) + * @NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: amount of time the extension + * channel was sensed busy + * @NL80211_SURVEY_INFO_CHANNEL_TIME_RX: amount of time the radio spent + * receiving data + * @NL80211_SURVEY_INFO_CHANNEL_TIME_TX: amount of time the radio spent + * transmitting data + * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number + * currently defined + * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use + */ +enum nl80211_survey_info { + __NL80211_SURVEY_INFO_INVALID, + NL80211_SURVEY_INFO_FREQUENCY, + NL80211_SURVEY_INFO_NOISE, + NL80211_SURVEY_INFO_IN_USE, + NL80211_SURVEY_INFO_CHANNEL_TIME, + NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, + NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, + NL80211_SURVEY_INFO_CHANNEL_TIME_RX, + NL80211_SURVEY_INFO_CHANNEL_TIME_TX, + + /* keep last */ + __NL80211_SURVEY_INFO_AFTER_LAST, + NL80211_SURVEY_INFO_MAX = __NL80211_SURVEY_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_mntr_flags - monitor configuration flags + * + * Monitor configuration flags. + * + * @__NL80211_MNTR_FLAG_INVALID: reserved + * + * @NL80211_MNTR_FLAG_FCSFAIL: pass frames with bad FCS + * @NL80211_MNTR_FLAG_PLCPFAIL: pass frames with bad PLCP + * @NL80211_MNTR_FLAG_CONTROL: pass control frames + * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering + * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing. + * overrides all other flags. + * + * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use + * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag + */ +enum nl80211_mntr_flags { + __NL80211_MNTR_FLAG_INVALID, + NL80211_MNTR_FLAG_FCSFAIL, + NL80211_MNTR_FLAG_PLCPFAIL, + NL80211_MNTR_FLAG_CONTROL, + NL80211_MNTR_FLAG_OTHER_BSS, + NL80211_MNTR_FLAG_COOK_FRAMES, + + /* keep last */ + __NL80211_MNTR_FLAG_AFTER_LAST, + NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1 +}; + +/** + * enum nl80211_meshconf_params - mesh configuration parameters + * + * Mesh configuration parameters. These can be changed while the mesh is + * active. + * + * @__NL80211_MESHCONF_INVALID: internal use + * + * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in + * millisecond units, used by the Peer Link Open message + * + * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in + * millisecond units, used by the peer link management to close a peer link + * + * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in + * millisecond units + * + * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed + * on this mesh interface + * + * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link + * open retries that can be sent to establish a new peer link instance in a + * mesh + * + * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh + * point. + * + * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically + * open peer links when we detect compatible mesh peers. + * + * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames + * containing a PREQ that an MP can send to a particular destination (path + * target) + * + * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths + * (in milliseconds) + * + * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait + * until giving up on a path discovery (in milliseconds) + * + * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh + * points receiving a PREQ shall consider the forwarding information from the + * root to be valid. (TU = time unit) + * + * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in + * TUs) during which an MP can send only one action frame containing a PREQ + * reference element + * + * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs) + * that it takes for an HWMP information element to propagate across the mesh + * + * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not + * + * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a + * source mesh point for path selection elements. + * + * @NL80211_MESHCONF_HWMP_RANN_INTERVAL: The interval of time (in TUs) between + * root announcements are transmitted. + * + * @NL80211_MESHCONF_GATE_ANNOUNCEMENTS: Advertise that this mesh station has + * access to a broader network beyond the MBSS. This is done via Root + * Announcement frames. + * + * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in + * TUs) during which a mesh STA can send only one Action frame containing a + * PERR element. + * + * @NL80211_MESHCONF_FORWARDING: set Mesh STA as forwarding or non-forwarding + * or forwarding entity (default is TRUE - forwarding entity) + * + * @NL80211_MESHCONF_RSSI_THRESHOLD: RSSI threshold in dBm. This specifies the + * threshold for average signal strength of candidate station to establish + * a peer link. + * + * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute + * + * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use + */ +enum nl80211_meshconf_params { + __NL80211_MESHCONF_INVALID, + NL80211_MESHCONF_RETRY_TIMEOUT, + NL80211_MESHCONF_CONFIRM_TIMEOUT, + NL80211_MESHCONF_HOLDING_TIMEOUT, + NL80211_MESHCONF_MAX_PEER_LINKS, + NL80211_MESHCONF_MAX_RETRIES, + NL80211_MESHCONF_TTL, + NL80211_MESHCONF_AUTO_OPEN_PLINKS, + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + NL80211_MESHCONF_PATH_REFRESH_TIME, + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + NL80211_MESHCONF_HWMP_ROOTMODE, + NL80211_MESHCONF_ELEMENT_TTL, + NL80211_MESHCONF_HWMP_RANN_INTERVAL, + NL80211_MESHCONF_GATE_ANNOUNCEMENTS, + NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, + NL80211_MESHCONF_FORWARDING, + NL80211_MESHCONF_RSSI_THRESHOLD, + + /* keep last */ + __NL80211_MESHCONF_ATTR_AFTER_LAST, + NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_mesh_setup_params - mesh setup parameters + * + * Mesh setup parameters. These are used to start/join a mesh and cannot be + * changed while the mesh is active. + * + * @__NL80211_MESH_SETUP_INVALID: Internal use + * + * @NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL: Enable this option to use a + * vendor specific path selection algorithm or disable it to use the default + * HWMP. + * + * @NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC: Enable this option to use a + * vendor specific path metric or disable it to use the default Airtime + * metric. + * + * @NL80211_MESH_SETUP_IE: Information elements for this mesh, for instance, a + * robust security network ie, or a vendor specific information element that + * vendors will use to identify the path selection methods and metrics in use. + * + * @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication + * daemon will be authenticating mesh candidates. + * + * @NL80211_MESH_SETUP_USERSPACE_AMPE: Enable this option if an authentication + * daemon will be securing peer link frames. AMPE is a secured version of Mesh + * Peering Management (MPM) and is implemented with the assistance of a + * userspace daemon. When this flag is set, the kernel will send peer + * management frames to a userspace daemon that will implement AMPE + * functionality (security capabilities selection, key confirmation, and key + * management). When the flag is unset (default), the kernel can autonomously + * complete (unsecured) mesh peering without the need of a userspace daemon. + * + * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number + * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use + */ +enum nl80211_mesh_setup_params { + __NL80211_MESH_SETUP_INVALID, + NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL, + NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC, + NL80211_MESH_SETUP_IE, + NL80211_MESH_SETUP_USERSPACE_AUTH, + NL80211_MESH_SETUP_USERSPACE_AMPE, + + /* keep last */ + __NL80211_MESH_SETUP_ATTR_AFTER_LAST, + NL80211_MESH_SETUP_ATTR_MAX = __NL80211_MESH_SETUP_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_txq_attr - TX queue parameter attributes + * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved + * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*) + * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning + * disabled + * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255] + * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal + * @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number + */ +enum nl80211_txq_attr { + __NL80211_TXQ_ATTR_INVALID, + NL80211_TXQ_ATTR_QUEUE, + NL80211_TXQ_ATTR_TXOP, + NL80211_TXQ_ATTR_CWMIN, + NL80211_TXQ_ATTR_CWMAX, + NL80211_TXQ_ATTR_AIFS, + + /* keep last */ + __NL80211_TXQ_ATTR_AFTER_LAST, + NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1 +}; + +enum nl80211_txq_q { + NL80211_TXQ_Q_VO, + NL80211_TXQ_Q_VI, + NL80211_TXQ_Q_BE, + NL80211_TXQ_Q_BK +}; + +enum nl80211_channel_type { + NL80211_CHAN_NO_HT, + NL80211_CHAN_HT20, + NL80211_CHAN_HT40MINUS, + NL80211_CHAN_HT40PLUS +}; + +/** + * enum nl80211_bss - netlink attributes for a BSS + * + * @__NL80211_BSS_INVALID: invalid + * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) + * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) + * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) + * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) + * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) + * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the + * raw information elements from the probe response/beacon (bin); + * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are + * from a Probe Response frame; otherwise they are from a Beacon frame. + * However, if the driver does not indicate the source of the IEs, these + * IEs may be from either frame subtype. + * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon + * in mBm (100 * dBm) (s32) + * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon + * in unspecified units, scaled to 0..100 (u8) + * @NL80211_BSS_STATUS: status, if this BSS is "used" + * @NL80211_BSS_SEEN_MS_AGO: age of this BSS entry in ms + * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information + * elements from a Beacon frame (bin); not present if no Beacon frame has + * yet been received + * @__NL80211_BSS_AFTER_LAST: internal + * @NL80211_BSS_MAX: highest BSS attribute + */ +enum nl80211_bss { + __NL80211_BSS_INVALID, + NL80211_BSS_BSSID, + NL80211_BSS_FREQUENCY, + NL80211_BSS_TSF, + NL80211_BSS_BEACON_INTERVAL, + NL80211_BSS_CAPABILITY, + NL80211_BSS_INFORMATION_ELEMENTS, + NL80211_BSS_SIGNAL_MBM, + NL80211_BSS_SIGNAL_UNSPEC, + NL80211_BSS_STATUS, + NL80211_BSS_SEEN_MS_AGO, + NL80211_BSS_BEACON_IES, + + /* keep last */ + __NL80211_BSS_AFTER_LAST, + NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1 +}; + +/** + * enum nl80211_bss_status - BSS "status" + * @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS. + * @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS. + * @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS. + * + * The BSS status is a BSS attribute in scan dumps, which + * indicates the status the interface has wrt. this BSS. + */ +enum nl80211_bss_status { + NL80211_BSS_STATUS_AUTHENTICATED, + NL80211_BSS_STATUS_ASSOCIATED, + NL80211_BSS_STATUS_IBSS_JOINED, +}; + +/** + * enum nl80211_auth_type - AuthenticationType + * + * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication + * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only) + * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r) + * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP) + * @__NL80211_AUTHTYPE_NUM: internal + * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm + * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by + * trying multiple times); this is invalid in netlink -- leave out + * the attribute for this on CONNECT commands. + */ +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM, + NL80211_AUTHTYPE_SHARED_KEY, + NL80211_AUTHTYPE_FT, + NL80211_AUTHTYPE_NETWORK_EAP, + + /* keep last */ + __NL80211_AUTHTYPE_NUM, + NL80211_AUTHTYPE_MAX = __NL80211_AUTHTYPE_NUM - 1, + NL80211_AUTHTYPE_AUTOMATIC +}; + +/** + * enum nl80211_key_type - Key Type + * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key + * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key + * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) + * @NUM_NL80211_KEYTYPES: number of defined key types + */ +enum nl80211_key_type { + NL80211_KEYTYPE_GROUP, + NL80211_KEYTYPE_PAIRWISE, + NL80211_KEYTYPE_PEERKEY, + + NUM_NL80211_KEYTYPES +}; + +/** + * enum nl80211_mfp - Management frame protection state + * @NL80211_MFP_NO: Management frame protection not used + * @NL80211_MFP_REQUIRED: Management frame protection required + */ +enum nl80211_mfp { + NL80211_MFP_NO, + NL80211_MFP_REQUIRED, +}; + +enum nl80211_wpa_versions { + NL80211_WPA_VERSION_1 = 1 << 0, + NL80211_WPA_VERSION_2 = 1 << 1, +}; + +/** + * enum nl80211_key_default_types - key default types + * @__NL80211_KEY_DEFAULT_TYPE_INVALID: invalid + * @NL80211_KEY_DEFAULT_TYPE_UNICAST: key should be used as default + * unicast key + * @NL80211_KEY_DEFAULT_TYPE_MULTICAST: key should be used as default + * multicast key + * @NUM_NL80211_KEY_DEFAULT_TYPES: number of default types + */ +enum nl80211_key_default_types { + __NL80211_KEY_DEFAULT_TYPE_INVALID, + NL80211_KEY_DEFAULT_TYPE_UNICAST, + NL80211_KEY_DEFAULT_TYPE_MULTICAST, + + NUM_NL80211_KEY_DEFAULT_TYPES +}; + +/** + * enum nl80211_key_attributes - key attributes + * @__NL80211_KEY_INVALID: invalid + * @NL80211_KEY_DATA: (temporal) key data; for TKIP this consists of + * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC + * keys + * @NL80211_KEY_IDX: key ID (u8, 0-3) + * @NL80211_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 + * section 7.3.2.25.1, e.g. 0x000FAC04) + * @NL80211_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and + * CCMP keys, each six bytes in little endian + * @NL80211_KEY_DEFAULT: flag indicating default key + * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key + * @NL80211_KEY_TYPE: the key type from enum nl80211_key_type, if not + * specified the default depends on whether a MAC address was + * given with the command using the key or not (u32) + * @NL80211_KEY_DEFAULT_TYPES: A nested attribute containing flags + * attributes, specifying what a key should be set as default as. + * See &enum nl80211_key_default_types. + * @__NL80211_KEY_AFTER_LAST: internal + * @NL80211_KEY_MAX: highest key attribute + */ +enum nl80211_key_attributes { + __NL80211_KEY_INVALID, + NL80211_KEY_DATA, + NL80211_KEY_IDX, + NL80211_KEY_CIPHER, + NL80211_KEY_SEQ, + NL80211_KEY_DEFAULT, + NL80211_KEY_DEFAULT_MGMT, + NL80211_KEY_TYPE, + NL80211_KEY_DEFAULT_TYPES, + + /* keep last */ + __NL80211_KEY_AFTER_LAST, + NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1 +}; + +/** + * enum nl80211_tx_rate_attributes - TX rate set attributes + * @__NL80211_TXRATE_INVALID: invalid + * @NL80211_TXRATE_LEGACY: Legacy (non-MCS) rates allowed for TX rate selection + * in an array of rates as defined in IEEE 802.11 7.3.2.2 (u8 values with + * 1 = 500 kbps) but without the IE length restriction (at most + * %NL80211_MAX_SUPP_RATES in a single array). + * @NL80211_TXRATE_MCS: HT (MCS) rates allowed for TX rate selection + * in an array of MCS numbers. + * @__NL80211_TXRATE_AFTER_LAST: internal + * @NL80211_TXRATE_MAX: highest TX rate attribute + */ +enum nl80211_tx_rate_attributes { + __NL80211_TXRATE_INVALID, + NL80211_TXRATE_LEGACY, + NL80211_TXRATE_MCS, + + /* keep last */ + __NL80211_TXRATE_AFTER_LAST, + NL80211_TXRATE_MAX = __NL80211_TXRATE_AFTER_LAST - 1 +}; + +/** + * enum nl80211_band - Frequency band + * @NL80211_BAND_2GHZ: 2.4 GHz ISM band + * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) + */ +enum nl80211_band { + NL80211_BAND_2GHZ, + NL80211_BAND_5GHZ, +}; + +enum nl80211_ps_state { + NL80211_PS_DISABLED, + NL80211_PS_ENABLED, +}; + +/** + * enum nl80211_attr_cqm - connection quality monitor attributes + * @__NL80211_ATTR_CQM_INVALID: invalid + * @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies + * the threshold for the RSSI level at which an event will be sent. Zero + * to disable. + * @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies + * the minimum amount the RSSI level must change after an event before a + * new event may be issued (to reduce effects of RSSI oscillation). + * @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event + * @NL80211_ATTR_CQM_PKT_LOSS_EVENT: a u32 value indicating that this many + * consecutive packets were not acknowledged by the peer + * @__NL80211_ATTR_CQM_AFTER_LAST: internal + * @NL80211_ATTR_CQM_MAX: highest key attribute + */ +enum nl80211_attr_cqm { + __NL80211_ATTR_CQM_INVALID, + NL80211_ATTR_CQM_RSSI_THOLD, + NL80211_ATTR_CQM_RSSI_HYST, + NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, + NL80211_ATTR_CQM_PKT_LOSS_EVENT, + + /* keep last */ + __NL80211_ATTR_CQM_AFTER_LAST, + NL80211_ATTR_CQM_MAX = __NL80211_ATTR_CQM_AFTER_LAST - 1 +}; + +/** + * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event + * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW: The RSSI level is lower than the + * configured threshold + * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the + * configured threshold + */ +enum nl80211_cqm_rssi_threshold_event { + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, +}; + + +/** + * enum nl80211_tx_power_setting - TX power adjustment + * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power + * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter + * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter + */ +enum nl80211_tx_power_setting { + NL80211_TX_POWER_AUTOMATIC, + NL80211_TX_POWER_LIMITED, + NL80211_TX_POWER_FIXED, +}; + +/** + * enum nl80211_wowlan_packet_pattern_attr - WoWLAN packet pattern attribute + * @__NL80211_WOWLAN_PKTPAT_INVALID: invalid number for nested attribute + * @NL80211_WOWLAN_PKTPAT_PATTERN: the pattern, values where the mask has + * a zero bit are ignored + * @NL80211_WOWLAN_PKTPAT_MASK: pattern mask, must be long enough to have + * a bit for each byte in the pattern. The lowest-order bit corresponds + * to the first byte of the pattern, but the bytes of the pattern are + * in a little-endian-like format, i.e. the 9th byte of the pattern + * corresponds to the lowest-order bit in the second byte of the mask. + * For example: The match 00:xx:00:00:xx:00:00:00:00:xx:xx:xx (where + * xx indicates "don't care") would be represented by a pattern of + * twelve zero bytes, and a mask of "0xed,0x07". + * Note that the pattern matching is done as though frames were not + * 802.11 frames but 802.3 frames, i.e. the frame is fully unpacked + * first (including SNAP header unpacking) and then matched. + * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes + * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number + */ +enum nl80211_wowlan_packet_pattern_attr { + __NL80211_WOWLAN_PKTPAT_INVALID, + NL80211_WOWLAN_PKTPAT_MASK, + NL80211_WOWLAN_PKTPAT_PATTERN, + + NUM_NL80211_WOWLAN_PKTPAT, + MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1, +}; + +/** + * struct nl80211_wowlan_pattern_support - pattern support information + * @max_patterns: maximum number of patterns supported + * @min_pattern_len: minimum length of each pattern + * @max_pattern_len: maximum length of each pattern + * + * This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when + * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the + * capability information given by the kernel to userspace. + */ +struct nl80211_wowlan_pattern_support { + __u32 max_patterns; + __u32 min_pattern_len; + __u32 max_pattern_len; +} __attribute__((packed)); + +/** + * enum nl80211_wowlan_triggers - WoWLAN trigger definitions + * @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes + * @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put + * the chip into a special state -- works best with chips that have + * support for low-power operation already (flag) + * @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect + * is detected is implementation-specific (flag) + * @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed + * by 16 repetitions of MAC addr, anywhere in payload) (flag) + * @NL80211_WOWLAN_TRIG_PKT_PATTERN: wake up on the specified packet patterns + * which are passed in an array of nested attributes, each nested attribute + * defining a with attributes from &struct nl80211_wowlan_trig_pkt_pattern. + * Each pattern defines a wakeup packet. The matching is done on the MSDU, + * i.e. as though the packet was an 802.3 packet, so the pattern matching + * is done after the packet is converted to the MSDU. + * + * In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute + * carrying a &struct nl80211_wowlan_pattern_support. + * @NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED: Not a real trigger, and cannot be + * used when setting, used only to indicate that GTK rekeying is supported + * by the device (flag) + * @NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE: wake up on GTK rekey failure (if + * done by the device) (flag) + * @NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST: wake up on EAP Identity Request + * packet (flag) + * @NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE: wake up on 4-way handshake (flag) + * @NL80211_WOWLAN_TRIG_RFKILL_RELEASE: wake up when rfkill is released + * (on devices that have rfkill in the device) (flag) + * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers + * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number + */ +enum nl80211_wowlan_triggers { + __NL80211_WOWLAN_TRIG_INVALID, + NL80211_WOWLAN_TRIG_ANY, + NL80211_WOWLAN_TRIG_DISCONNECT, + NL80211_WOWLAN_TRIG_MAGIC_PKT, + NL80211_WOWLAN_TRIG_PKT_PATTERN, + NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED, + NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE, + NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST, + NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE, + NL80211_WOWLAN_TRIG_RFKILL_RELEASE, + + /* keep last */ + NUM_NL80211_WOWLAN_TRIG, + MAX_NL80211_WOWLAN_TRIG = NUM_NL80211_WOWLAN_TRIG - 1 +}; + +/** + * enum nl80211_iface_limit_attrs - limit attributes + * @NL80211_IFACE_LIMIT_UNSPEC: (reserved) + * @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that + * can be chosen from this set of interface types (u32) + * @NL80211_IFACE_LIMIT_TYPES: nested attribute containing a + * flag attribute for each interface type in this set + * @NUM_NL80211_IFACE_LIMIT: number of attributes + * @MAX_NL80211_IFACE_LIMIT: highest attribute number + */ +enum nl80211_iface_limit_attrs { + NL80211_IFACE_LIMIT_UNSPEC, + NL80211_IFACE_LIMIT_MAX, + NL80211_IFACE_LIMIT_TYPES, + + /* keep last */ + NUM_NL80211_IFACE_LIMIT, + MAX_NL80211_IFACE_LIMIT = NUM_NL80211_IFACE_LIMIT - 1 +}; + +/** + * enum nl80211_if_combination_attrs -- interface combination attributes + * + * @NL80211_IFACE_COMB_UNSPEC: (reserved) + * @NL80211_IFACE_COMB_LIMITS: Nested attributes containing the limits + * for given interface types, see &enum nl80211_iface_limit_attrs. + * @NL80211_IFACE_COMB_MAXNUM: u32 attribute giving the total number of + * interfaces that can be created in this group. This number doesn't + * apply to interfaces purely managed in software, which are listed + * in a separate attribute %NL80211_ATTR_INTERFACES_SOFTWARE. + * @NL80211_IFACE_COMB_STA_AP_BI_MATCH: flag attribute specifying that + * beacon intervals within this group must be all the same even for + * infrastructure and AP/GO combinations, i.e. the GO(s) must adopt + * the infrastructure network's beacon interval. + * @NL80211_IFACE_COMB_NUM_CHANNELS: u32 attribute specifying how many + * different channels may be used within this group. + * @NUM_NL80211_IFACE_COMB: number of attributes + * @MAX_NL80211_IFACE_COMB: highest attribute number + * + * Examples: + * limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2 + * => allows an AP and a STA that must match BIs + * + * numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8 + * => allows 8 of AP/GO + * + * numbers = [ #{STA} <= 2 ], channels = 2, max = 2 + * => allows two STAs on different channels + * + * numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4 + * => allows a STA plus three P2P interfaces + * + * The list of these four possiblities could completely be contained + * within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate + * that any of these groups must match. + * + * "Combinations" of just a single interface will not be listed here, + * a single interface of any valid interface type is assumed to always + * be possible by itself. This means that implicitly, for each valid + * interface type, the following group always exists: + * numbers = [ #{<type>} <= 1 ], channels = 1, max = 1 + */ +enum nl80211_if_combination_attrs { + NL80211_IFACE_COMB_UNSPEC, + NL80211_IFACE_COMB_LIMITS, + NL80211_IFACE_COMB_MAXNUM, + NL80211_IFACE_COMB_STA_AP_BI_MATCH, + NL80211_IFACE_COMB_NUM_CHANNELS, + + /* keep last */ + NUM_NL80211_IFACE_COMB, + MAX_NL80211_IFACE_COMB = NUM_NL80211_IFACE_COMB - 1 +}; + + +/** + * enum nl80211_plink_state - state of a mesh peer link finite state machine + * + * @NL80211_PLINK_LISTEN: initial state, considered the implicit + * state of non existant mesh peer links + * @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to + * this mesh peer + * @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received + * from this mesh peer + * @NL80211_PLINK_CNF_RCVD: mesh plink confirm frame has been + * received from this mesh peer + * @NL80211_PLINK_ESTAB: mesh peer link is established + * @NL80211_PLINK_HOLDING: mesh peer link is being closed or cancelled + * @NL80211_PLINK_BLOCKED: all frames transmitted from this mesh + * plink are discarded + * @NUM_NL80211_PLINK_STATES: number of peer link states + * @MAX_NL80211_PLINK_STATES: highest numerical value of plink states + */ +enum nl80211_plink_state { + NL80211_PLINK_LISTEN, + NL80211_PLINK_OPN_SNT, + NL80211_PLINK_OPN_RCVD, + NL80211_PLINK_CNF_RCVD, + NL80211_PLINK_ESTAB, + NL80211_PLINK_HOLDING, + NL80211_PLINK_BLOCKED, + + /* keep last */ + NUM_NL80211_PLINK_STATES, + MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1 +}; + +#define NL80211_KCK_LEN 16 +#define NL80211_KEK_LEN 16 +#define NL80211_REPLAY_CTR_LEN 8 + +/** + * enum nl80211_rekey_data - attributes for GTK rekey offload + * @__NL80211_REKEY_DATA_INVALID: invalid number for nested attributes + * @NL80211_REKEY_DATA_KEK: key encryption key (binary) + * @NL80211_REKEY_DATA_KCK: key confirmation key (binary) + * @NL80211_REKEY_DATA_REPLAY_CTR: replay counter (binary) + * @NUM_NL80211_REKEY_DATA: number of rekey attributes (internal) + * @MAX_NL80211_REKEY_DATA: highest rekey attribute (internal) + */ +enum nl80211_rekey_data { + __NL80211_REKEY_DATA_INVALID, + NL80211_REKEY_DATA_KEK, + NL80211_REKEY_DATA_KCK, + NL80211_REKEY_DATA_REPLAY_CTR, + + /* keep last */ + NUM_NL80211_REKEY_DATA, + MAX_NL80211_REKEY_DATA = NUM_NL80211_REKEY_DATA - 1 +}; + +/** + * enum nl80211_hidden_ssid - values for %NL80211_ATTR_HIDDEN_SSID + * @NL80211_HIDDEN_SSID_NOT_IN_USE: do not hide SSID (i.e., broadcast it in + * Beacon frames) + * @NL80211_HIDDEN_SSID_ZERO_LEN: hide SSID by using zero-length SSID element + * in Beacon frames + * @NL80211_HIDDEN_SSID_ZERO_CONTENTS: hide SSID by using correct length of SSID + * element in Beacon frames but zero out each byte in the SSID + */ +enum nl80211_hidden_ssid { + NL80211_HIDDEN_SSID_NOT_IN_USE, + NL80211_HIDDEN_SSID_ZERO_LEN, + NL80211_HIDDEN_SSID_ZERO_CONTENTS +}; + +/** + * enum nl80211_sta_wme_attr - station WME attributes + * @__NL80211_STA_WME_INVALID: invalid number for nested attribute + * @NL80211_STA_WME_UAPSD_QUEUES: bitmap of uapsd queues. the format + * is the same as the AC bitmap in the QoS info field. + * @NL80211_STA_WME_MAX_SP: max service period. the format is the same + * as the MAX_SP field in the QoS info field (but already shifted down). + * @__NL80211_STA_WME_AFTER_LAST: internal + * @NL80211_STA_WME_MAX: highest station WME attribute + */ +enum nl80211_sta_wme_attr { + __NL80211_STA_WME_INVALID, + NL80211_STA_WME_UAPSD_QUEUES, + NL80211_STA_WME_MAX_SP, + + /* keep last */ + __NL80211_STA_WME_AFTER_LAST, + NL80211_STA_WME_MAX = __NL80211_STA_WME_AFTER_LAST - 1 +}; + +/** + * enum nl80211_pmksa_candidate_attr - attributes for PMKSA caching candidates + * @__NL80211_PMKSA_CANDIDATE_INVALID: invalid number for nested attributes + * @NL80211_PMKSA_CANDIDATE_INDEX: candidate index (u32; the smaller, the higher + * priority) + * @NL80211_PMKSA_CANDIDATE_BSSID: candidate BSSID (6 octets) + * @NL80211_PMKSA_CANDIDATE_PREAUTH: RSN pre-authentication supported (flag) + * @NUM_NL80211_PMKSA_CANDIDATE: number of PMKSA caching candidate attributes + * (internal) + * @MAX_NL80211_PMKSA_CANDIDATE: highest PMKSA caching candidate attribute + * (internal) + */ +enum nl80211_pmksa_candidate_attr { + __NL80211_PMKSA_CANDIDATE_INVALID, + NL80211_PMKSA_CANDIDATE_INDEX, + NL80211_PMKSA_CANDIDATE_BSSID, + NL80211_PMKSA_CANDIDATE_PREAUTH, + + /* keep last */ + NUM_NL80211_PMKSA_CANDIDATE, + MAX_NL80211_PMKSA_CANDIDATE = NUM_NL80211_PMKSA_CANDIDATE - 1 +}; + +/** + * enum nl80211_tdls_operation - values for %NL80211_ATTR_TDLS_OPERATION + * @NL80211_TDLS_DISCOVERY_REQ: Send a TDLS discovery request + * @NL80211_TDLS_SETUP: Setup TDLS link + * @NL80211_TDLS_TEARDOWN: Teardown a TDLS link which is already established + * @NL80211_TDLS_ENABLE_LINK: Enable TDLS link + * @NL80211_TDLS_DISABLE_LINK: Disable TDLS link + */ +enum nl80211_tdls_operation { + NL80211_TDLS_DISCOVERY_REQ, + NL80211_TDLS_SETUP, + NL80211_TDLS_TEARDOWN, + NL80211_TDLS_ENABLE_LINK, + NL80211_TDLS_DISABLE_LINK, +}; + +/* + * enum nl80211_ap_sme_features - device-integrated AP features + * Reserved for future use, no bits are defined in + * NL80211_ATTR_DEVICE_AP_SME yet. +enum nl80211_ap_sme_features { +}; + */ + +/** + * enum nl80211_feature_flags - device/driver features + * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back + * TX status to the socket error queue when requested with the + * socket option. + * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates. + * @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up + * the connected inactive stations in AP mode. + */ +enum nl80211_feature_flags { + NL80211_FEATURE_SK_TX_STATUS = 1 << 0, + NL80211_FEATURE_HT_IBSS = 1 << 1, + NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, +}; + +/** + * enum nl80211_probe_resp_offload_support_attr - optional supported + * protocols for probe-response offloading by the driver/FW. + * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute. + * Each enum value represents a bit in the bitmap of supported + * protocols. Typically a subset of probe-requests belonging to a + * supported protocol will be excluded from offload and uploaded + * to the host. + * + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1 + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2 + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u + */ +enum nl80211_probe_resp_offload_support_attr { + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0, + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1, + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2, + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3, +}; + +#endif /* __LINUX_NL80211_H */ diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h new file mode 100644 index 00000000..33d9f517 --- /dev/null +++ b/include/linux/nl802154.h @@ -0,0 +1,129 @@ +/* + * nl802154.h + * + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef NL802154_H +#define NL802154_H + +#define IEEE802154_NL_NAME "802.15.4 MAC" +#define IEEE802154_MCAST_COORD_NAME "coordinator" +#define IEEE802154_MCAST_BEACON_NAME "beacon" + +enum { + __IEEE802154_ATTR_INVALID, + + IEEE802154_ATTR_DEV_NAME, + IEEE802154_ATTR_DEV_INDEX, + + IEEE802154_ATTR_STATUS, + + IEEE802154_ATTR_SHORT_ADDR, + IEEE802154_ATTR_HW_ADDR, + IEEE802154_ATTR_PAN_ID, + + IEEE802154_ATTR_CHANNEL, + + IEEE802154_ATTR_COORD_SHORT_ADDR, + IEEE802154_ATTR_COORD_HW_ADDR, + IEEE802154_ATTR_COORD_PAN_ID, + + IEEE802154_ATTR_SRC_SHORT_ADDR, + IEEE802154_ATTR_SRC_HW_ADDR, + IEEE802154_ATTR_SRC_PAN_ID, + + IEEE802154_ATTR_DEST_SHORT_ADDR, + IEEE802154_ATTR_DEST_HW_ADDR, + IEEE802154_ATTR_DEST_PAN_ID, + + IEEE802154_ATTR_CAPABILITY, + IEEE802154_ATTR_REASON, + IEEE802154_ATTR_SCAN_TYPE, + IEEE802154_ATTR_CHANNELS, + IEEE802154_ATTR_DURATION, + IEEE802154_ATTR_ED_LIST, + IEEE802154_ATTR_BCN_ORD, + IEEE802154_ATTR_SF_ORD, + IEEE802154_ATTR_PAN_COORD, + IEEE802154_ATTR_BAT_EXT, + IEEE802154_ATTR_COORD_REALIGN, + IEEE802154_ATTR_SEC, + + IEEE802154_ATTR_PAGE, + IEEE802154_ATTR_CHANNEL_PAGE_LIST, + + IEEE802154_ATTR_PHY_NAME, + + __IEEE802154_ATTR_MAX, +}; + +#define IEEE802154_ATTR_MAX (__IEEE802154_ATTR_MAX - 1) + +extern const struct nla_policy ieee802154_policy[]; + +/* commands */ +/* REQ should be responded with CONF + * and INDIC with RESP + */ +enum { + __IEEE802154_COMMAND_INVALID, + + IEEE802154_ASSOCIATE_REQ, + IEEE802154_ASSOCIATE_CONF, + IEEE802154_DISASSOCIATE_REQ, + IEEE802154_DISASSOCIATE_CONF, + IEEE802154_GET_REQ, + IEEE802154_GET_CONF, + IEEE802154_RESET_REQ, + IEEE802154_RESET_CONF, + IEEE802154_SCAN_REQ, + IEEE802154_SCAN_CONF, + IEEE802154_SET_REQ, + IEEE802154_SET_CONF, + IEEE802154_START_REQ, + IEEE802154_START_CONF, + IEEE802154_SYNC_REQ, + IEEE802154_POLL_REQ, + IEEE802154_POLL_CONF, + + IEEE802154_ASSOCIATE_INDIC, + IEEE802154_ASSOCIATE_RESP, + IEEE802154_DISASSOCIATE_INDIC, + IEEE802154_BEACON_NOTIFY_INDIC, + IEEE802154_ORPHAN_INDIC, + IEEE802154_ORPHAN_RESP, + IEEE802154_COMM_STATUS_INDIC, + IEEE802154_SYNC_LOSS_INDIC, + + IEEE802154_GTS_REQ, /* Not supported yet */ + IEEE802154_GTS_INDIC, /* Not supported yet */ + IEEE802154_GTS_CONF, /* Not supported yet */ + IEEE802154_RX_ENABLE_REQ, /* Not supported yet */ + IEEE802154_RX_ENABLE_CONF, /* Not supported yet */ + + IEEE802154_LIST_IFACE, + IEEE802154_LIST_PHY, + IEEE802154_ADD_IFACE, + IEEE802154_DEL_IFACE, + + __IEEE802154_CMD_MAX, +}; + +#define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1) + +#endif diff --git a/include/linux/nls.h b/include/linux/nls.h new file mode 100644 index 00000000..5dc635f8 --- /dev/null +++ b/include/linux/nls.h @@ -0,0 +1,107 @@ +#ifndef _LINUX_NLS_H +#define _LINUX_NLS_H + +#include <linux/init.h> + +/* Unicode has changed over the years. Unicode code points no longer + * fit into 16 bits; as of Unicode 5 valid code points range from 0 + * to 0x10ffff (17 planes, where each plane holds 65536 code points). + * + * The original decision to represent Unicode characters as 16-bit + * wchar_t values is now outdated. But plane 0 still includes the + * most commonly used characters, so we will retain it. The newer + * 32-bit unicode_t type can be used when it is necessary to + * represent the full Unicode character set. + */ + +/* Plane-0 Unicode character */ +typedef u16 wchar_t; +#define MAX_WCHAR_T 0xffff + +/* Arbitrary Unicode character */ +typedef u32 unicode_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen); + int (*char2uni) (const unsigned char *rawstring, int boundlen, + wchar_t *uni); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +/* this value hold the maximum octet of charset */ +#define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ + +/* Byte order for UTF-16 strings */ +enum utf16_endian { + UTF16_HOST_ENDIAN, + UTF16_LITTLE_ENDIAN, + UTF16_BIG_ENDIAN +}; + +/* nls_base.c */ +extern int register_nls(struct nls_table *); +extern int unregister_nls(struct nls_table *); +extern struct nls_table *load_nls(char *); +extern void unload_nls(struct nls_table *); +extern struct nls_table *load_nls_default(void); + +extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu); +extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen); +extern int utf8s_to_utf16s(const u8 *s, int len, + enum utf16_endian endian, wchar_t *pwcs, int maxlen); +extern int utf16s_to_utf8s(const wchar_t *pwcs, int len, + enum utf16_endian endian, u8 *s, int maxlen); + +static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) +{ + unsigned char nc = t->charset2lower[c]; + + return nc ? nc : c; +} + +static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c) +{ + unsigned char nc = t->charset2upper[c]; + + return nc ? nc : c; +} + +static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1, + const unsigned char *s2, int len) +{ + while (len--) { + if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++)) + return 1; + } + + return 0; +} + +/* + * nls_nullsize - return length of null character for codepage + * @codepage - codepage for which to return length of NULL terminator + * + * Since we can't guarantee that the null terminator will be a particular + * length, we have to check against the codepage. If there's a problem + * determining it, assume a single-byte NULL terminator. + */ +static inline int +nls_nullsize(const struct nls_table *codepage) +{ + int charlen; + char tmp[NLS_MAX_CHARSET_SIZE]; + + charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE); + + return charlen > 0 ? charlen : 1; +} + +#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name)) + +#endif /* _LINUX_NLS_H */ + diff --git a/include/linux/nmi.h b/include/linux/nmi.h new file mode 100644 index 00000000..db50840e --- /dev/null +++ b/include/linux/nmi.h @@ -0,0 +1,56 @@ +/* + * linux/include/linux/nmi.h + */ +#ifndef LINUX_NMI_H +#define LINUX_NMI_H + +#include <linux/sched.h> +#include <asm/irq.h> + +/** + * touch_nmi_watchdog - restart NMI watchdog timeout. + * + * If the architecture supports the NMI watchdog, touch_nmi_watchdog() + * may be used to reset the timeout - for code which intentionally + * disables interrupts for a long time. This call is stateless. + */ +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +#include <asm/nmi.h> +extern void touch_nmi_watchdog(void); +#else +static inline void touch_nmi_watchdog(void) +{ + touch_softlockup_watchdog(); +} +#endif + +/* + * Create trigger_all_cpu_backtrace() out of the arch-provided + * base function. Return whether such support was available, + * to allow calling code to fall back to some other mechanism: + */ +#ifdef arch_trigger_all_cpu_backtrace +static inline bool trigger_all_cpu_backtrace(void) +{ + arch_trigger_all_cpu_backtrace(); + + return true; +} +#else +static inline bool trigger_all_cpu_backtrace(void) +{ + return false; +} +#endif + +#ifdef CONFIG_LOCKUP_DETECTOR +int hw_nmi_is_cpu_stuck(struct pt_regs *); +u64 hw_nmi_get_sample_period(int watchdog_thresh); +extern int watchdog_enabled; +extern int watchdog_thresh; +struct ctl_table; +extern int proc_dowatchdog(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +#endif + +#endif diff --git a/include/linux/node.h b/include/linux/node.h new file mode 100644 index 00000000..624e53ce --- /dev/null +++ b/include/linux/node.h @@ -0,0 +1,85 @@ +/* + * include/linux/node.h - generic node definition + * + * This is mainly for topological representation. We define the + * basic 'struct node' here, which can be embedded in per-arch + * definitions of processors. + * + * Basic handling of the devices is done in drivers/base/node.c + * and system devices are handled in drivers/base/sys.c. + * + * Nodes are exported via driverfs in the class/node/devices/ + * directory. + */ +#ifndef _LINUX_NODE_H_ +#define _LINUX_NODE_H_ + +#include <linux/device.h> +#include <linux/cpumask.h> +#include <linux/workqueue.h> + +struct node { + struct device dev; + +#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) + struct work_struct node_work; +#endif +}; + +struct memory_block; +extern struct node node_devices[]; +typedef void (*node_registration_func_t)(struct node *); + +extern int register_node(struct node *, int, struct node *); +extern void unregister_node(struct node *node); +#ifdef CONFIG_NUMA +extern int register_one_node(int nid); +extern void unregister_one_node(int nid); +extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int register_mem_sect_under_node(struct memory_block *mem_blk, + int nid); +extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, + unsigned long phys_index); + +#ifdef CONFIG_HUGETLBFS +extern void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister); +#endif +#else +static inline int register_one_node(int nid) +{ + return 0; +} +static inline int unregister_one_node(int nid) +{ + return 0; +} +static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + return 0; +} +static inline int register_mem_sect_under_node(struct memory_block *mem_blk, + int nid) +{ + return 0; +} +static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, + unsigned long phys_index) +{ + return 0; +} + +static inline void register_hugetlbfs_with_node(node_registration_func_t reg, + node_registration_func_t unreg) +{ +} +#endif + +#define to_node(device) container_of(device, struct node, dev) + +#endif /* _LINUX_NODE_H_ */ diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h new file mode 100644 index 00000000..7afc3633 --- /dev/null +++ b/include/linux/nodemask.h @@ -0,0 +1,515 @@ +#ifndef __LINUX_NODEMASK_H +#define __LINUX_NODEMASK_H + +/* + * Nodemasks provide a bitmap suitable for representing the + * set of Node's in a system, one bit position per Node number. + * + * See detailed comments in the file linux/bitmap.h describing the + * data type on which these nodemasks are based. + * + * For details of nodemask_scnprintf() and nodemask_parse_user(), + * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c. + * For details of nodelist_scnprintf() and nodelist_parse(), see + * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. + * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c. + * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c. + * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c. + * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c. + * + * The available nodemask operations are: + * + * void node_set(node, mask) turn on bit 'node' in mask + * void node_clear(node, mask) turn off bit 'node' in mask + * void nodes_setall(mask) set all bits + * void nodes_clear(mask) clear all bits + * int node_isset(node, mask) true iff bit 'node' set in mask + * int node_test_and_set(node, mask) test and set bit 'node' in mask + * + * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] + * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] + * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 + * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 + * void nodes_complement(dst, src) dst = ~src + * + * int nodes_equal(mask1, mask2) Does mask1 == mask2? + * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? + * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? + * int nodes_empty(mask) Is mask empty (no bits sets)? + * int nodes_full(mask) Is mask full (all bits sets)? + * int nodes_weight(mask) Hamming weight - number of set bits + * + * void nodes_shift_right(dst, src, n) Shift right + * void nodes_shift_left(dst, src, n) Shift left + * + * int first_node(mask) Number lowest set bit, or MAX_NUMNODES + * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES + * int first_unset_node(mask) First node not set in mask, or + * MAX_NUMNODES. + * + * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set + * NODE_MASK_ALL Initializer - all bits set + * NODE_MASK_NONE Initializer - no bits set + * unsigned long *nodes_addr(mask) Array of unsigned long's in mask + * + * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing + * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask + * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing + * int nodelist_parse(buf, map) Parse ascii string as nodelist + * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) + * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) + * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap + * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz + * + * for_each_node_mask(node, mask) for-loop node over mask + * + * int num_online_nodes() Number of online Nodes + * int num_possible_nodes() Number of all possible Nodes + * + * int node_random(mask) Random node with set bit in mask + * + * int node_online(node) Is some node online? + * int node_possible(node) Is some node possible? + * + * node_set_online(node) set bit 'node' in node_online_map + * node_set_offline(node) clear bit 'node' in node_online_map + * + * for_each_node(node) for-loop node over node_possible_map + * for_each_online_node(node) for-loop node over node_online_map + * + * Subtlety: + * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) + * to generate slightly worse code. So use a simple one-line #define + * for node_isset(), instead of wrapping an inline inside a macro, the + * way we do the other calls. + * + * NODEMASK_SCRATCH + * When doing above logical AND, OR, XOR, Remap operations the callers tend to + * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, + * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper + * for such situations. See below and CPUMASK_ALLOC also. + */ + +#include <linux/kernel.h> +#include <linux/threads.h> +#include <linux/bitmap.h> +#include <linux/numa.h> + +typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; +extern nodemask_t _unused_nodemask_arg_; + +#define node_set(node, dst) __node_set((node), &(dst)) +static inline void __node_set(int node, volatile nodemask_t *dstp) +{ + set_bit(node, dstp->bits); +} + +#define node_clear(node, dst) __node_clear((node), &(dst)) +static inline void __node_clear(int node, volatile nodemask_t *dstp) +{ + clear_bit(node, dstp->bits); +} + +#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) +static inline void __nodes_setall(nodemask_t *dstp, int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + +#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) +static inline void __nodes_clear(nodemask_t *dstp, int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + +/* No static inline type checking - see Subtlety (1) above. */ +#define node_isset(node, nodemask) test_bit((node), (nodemask).bits) + +#define node_test_and_set(node, nodemask) \ + __node_test_and_set((node), &(nodemask)) +static inline int __node_test_and_set(int node, nodemask_t *addr) +{ + return test_and_set_bit(node, addr->bits); +} + +#define nodes_and(dst, src1, src2) \ + __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_or(dst, src1, src2) \ + __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_xor(dst, src1, src2) \ + __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_andnot(dst, src1, src2) \ + __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) +static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + +#define nodes_complement(dst, src) \ + __nodes_complement(&(dst), &(src), MAX_NUMNODES) +static inline void __nodes_complement(nodemask_t *dstp, + const nodemask_t *srcp, int nbits) +{ + bitmap_complement(dstp->bits, srcp->bits, nbits); +} + +#define nodes_equal(src1, src2) \ + __nodes_equal(&(src1), &(src2), MAX_NUMNODES) +static inline int __nodes_equal(const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + +#define nodes_intersects(src1, src2) \ + __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) +static inline int __nodes_intersects(const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + +#define nodes_subset(src1, src2) \ + __nodes_subset(&(src1), &(src2), MAX_NUMNODES) +static inline int __nodes_subset(const nodemask_t *src1p, + const nodemask_t *src2p, int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + +#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) +static inline int __nodes_empty(const nodemask_t *srcp, int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + +#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) +static inline int __nodes_full(const nodemask_t *srcp, int nbits) +{ + return bitmap_full(srcp->bits, nbits); +} + +#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) +static inline int __nodes_weight(const nodemask_t *srcp, int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + +#define nodes_shift_right(dst, src, n) \ + __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) +static inline void __nodes_shift_right(nodemask_t *dstp, + const nodemask_t *srcp, int n, int nbits) +{ + bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); +} + +#define nodes_shift_left(dst, src, n) \ + __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) +static inline void __nodes_shift_left(nodemask_t *dstp, + const nodemask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} + +/* FIXME: better would be to fix all architectures to never return + > MAX_NUMNODES, then the silly min_ts could be dropped. */ + +#define first_node(src) __first_node(&(src)) +static inline int __first_node(const nodemask_t *srcp) +{ + return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); +} + +#define next_node(n, src) __next_node((n), &(src)) +static inline int __next_node(int n, const nodemask_t *srcp) +{ + return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); +} + +static inline void init_nodemask_of_node(nodemask_t *mask, int node) +{ + nodes_clear(*mask); + node_set(node, *mask); +} + +#define nodemask_of_node(node) \ +({ \ + typeof(_unused_nodemask_arg_) m; \ + if (sizeof(m) == sizeof(unsigned long)) { \ + m.bits[0] = 1UL << (node); \ + } else { \ + init_nodemask_of_node(&m, (node)); \ + } \ + m; \ +}) + +#define first_unset_node(mask) __first_unset_node(&(mask)) +static inline int __first_unset_node(const nodemask_t *maskp) +{ + return min_t(int,MAX_NUMNODES, + find_first_zero_bit(maskp->bits, MAX_NUMNODES)); +} + +#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) + +#if MAX_NUMNODES <= BITS_PER_LONG + +#define NODE_MASK_ALL \ +((nodemask_t) { { \ + [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ +} }) + +#else + +#define NODE_MASK_ALL \ +((nodemask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ + [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ +} }) + +#endif + +#define NODE_MASK_NONE \ +((nodemask_t) { { \ + [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ +} }) + +#define nodes_addr(src) ((src).bits) + +#define nodemask_scnprintf(buf, len, src) \ + __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES) +static inline int __nodemask_scnprintf(char *buf, int len, + const nodemask_t *srcp, int nbits) +{ + return bitmap_scnprintf(buf, len, srcp->bits, nbits); +} + +#define nodemask_parse_user(ubuf, ulen, dst) \ + __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) +static inline int __nodemask_parse_user(const char __user *buf, int len, + nodemask_t *dstp, int nbits) +{ + return bitmap_parse_user(buf, len, dstp->bits, nbits); +} + +#define nodelist_scnprintf(buf, len, src) \ + __nodelist_scnprintf((buf), (len), &(src), MAX_NUMNODES) +static inline int __nodelist_scnprintf(char *buf, int len, + const nodemask_t *srcp, int nbits) +{ + return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); +} + +#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) +static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) +{ + return bitmap_parselist(buf, dstp->bits, nbits); +} + +#define node_remap(oldbit, old, new) \ + __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) +static inline int __node_remap(int oldbit, + const nodemask_t *oldp, const nodemask_t *newp, int nbits) +{ + return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); +} + +#define nodes_remap(dst, src, old, new) \ + __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) +static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, + const nodemask_t *oldp, const nodemask_t *newp, int nbits) +{ + bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); +} + +#define nodes_onto(dst, orig, relmap) \ + __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) +static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, + const nodemask_t *relmapp, int nbits) +{ + bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); +} + +#define nodes_fold(dst, orig, sz) \ + __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) +static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, + int sz, int nbits) +{ + bitmap_fold(dstp->bits, origp->bits, sz, nbits); +} + +#if MAX_NUMNODES > 1 +#define for_each_node_mask(node, mask) \ + for ((node) = first_node(mask); \ + (node) < MAX_NUMNODES; \ + (node) = next_node((node), (mask))) +#else /* MAX_NUMNODES == 1 */ +#define for_each_node_mask(node, mask) \ + if (!nodes_empty(mask)) \ + for ((node) = 0; (node) < 1; (node)++) +#endif /* MAX_NUMNODES */ + +/* + * Bitmasks that are kept for all the nodes. + */ +enum node_states { + N_POSSIBLE, /* The node could become online at some point */ + N_ONLINE, /* The node is online */ + N_NORMAL_MEMORY, /* The node has regular memory */ +#ifdef CONFIG_HIGHMEM + N_HIGH_MEMORY, /* The node has regular or high memory */ +#else + N_HIGH_MEMORY = N_NORMAL_MEMORY, +#endif + N_CPU, /* The node has one or more cpus */ + NR_NODE_STATES +}; + +/* + * The following particular system nodemasks and operations + * on them manage all possible and online nodes. + */ + +extern nodemask_t node_states[NR_NODE_STATES]; + +#if MAX_NUMNODES > 1 +static inline int node_state(int node, enum node_states state) +{ + return node_isset(node, node_states[state]); +} + +static inline void node_set_state(int node, enum node_states state) +{ + __node_set(node, &node_states[state]); +} + +static inline void node_clear_state(int node, enum node_states state) +{ + __node_clear(node, &node_states[state]); +} + +static inline int num_node_state(enum node_states state) +{ + return nodes_weight(node_states[state]); +} + +#define for_each_node_state(__node, __state) \ + for_each_node_mask((__node), node_states[__state]) + +#define first_online_node first_node(node_states[N_ONLINE]) +#define next_online_node(nid) next_node((nid), node_states[N_ONLINE]) + +extern int nr_node_ids; +extern int nr_online_nodes; + +static inline void node_set_online(int nid) +{ + node_set_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} + +static inline void node_set_offline(int nid) +{ + node_clear_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} + +#else + +static inline int node_state(int node, enum node_states state) +{ + return node == 0; +} + +static inline void node_set_state(int node, enum node_states state) +{ +} + +static inline void node_clear_state(int node, enum node_states state) +{ +} + +static inline int num_node_state(enum node_states state) +{ + return 1; +} + +#define for_each_node_state(node, __state) \ + for ( (node) = 0; (node) == 0; (node) = 1) + +#define first_online_node 0 +#define next_online_node(nid) (MAX_NUMNODES) +#define nr_node_ids 1 +#define nr_online_nodes 1 + +#define node_set_online(node) node_set_state((node), N_ONLINE) +#define node_set_offline(node) node_clear_state((node), N_ONLINE) + +#endif + +#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) +extern int node_random(const nodemask_t *maskp); +#else +static inline int node_random(const nodemask_t *mask) +{ + return 0; +} +#endif + +#define node_online_map node_states[N_ONLINE] +#define node_possible_map node_states[N_POSSIBLE] + +#define num_online_nodes() num_node_state(N_ONLINE) +#define num_possible_nodes() num_node_state(N_POSSIBLE) +#define node_online(node) node_state((node), N_ONLINE) +#define node_possible(node) node_state((node), N_POSSIBLE) + +#define for_each_node(node) for_each_node_state(node, N_POSSIBLE) +#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) + +/* + * For nodemask scrach area. + * NODEMASK_ALLOC(type, name) allocates an object with a specified type and + * name. + */ +#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */ +#define NODEMASK_ALLOC(type, name, gfp_flags) \ + type *name = kmalloc(sizeof(*name), gfp_flags) +#define NODEMASK_FREE(m) kfree(m) +#else +#define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name +#define NODEMASK_FREE(m) do {} while (0) +#endif + +/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; + +#define NODEMASK_SCRATCH(x) \ + NODEMASK_ALLOC(struct nodemask_scratch, x, \ + GFP_KERNEL | __GFP_NORETRY) +#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) + + +#endif /* __LINUX_NODEMASK_H */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h new file mode 100644 index 00000000..d65746ef --- /dev/null +++ b/include/linux/notifier.h @@ -0,0 +1,212 @@ +/* + * Routines to manage notifier chains for passing status changes to any + * interested routines. We need this instead of hard coded call lists so + * that modules can poke their nose into the innards. The network devices + * needed them so here they are for the rest of you. + * + * Alan Cox <Alan.Cox@linux.org> + */ + +#ifndef _LINUX_NOTIFIER_H +#define _LINUX_NOTIFIER_H +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/rwsem.h> +#include <linux/srcu.h> + +/* + * Notifier chains are of four types: + * + * Atomic notifier chains: Chain callbacks run in interrupt/atomic + * context. Callouts are not allowed to block. + * Blocking notifier chains: Chain callbacks run in process context. + * Callouts are allowed to block. + * Raw notifier chains: There are no restrictions on callbacks, + * registration, or unregistration. All locking and protection + * must be provided by the caller. + * SRCU notifier chains: A variant of blocking notifier chains, with + * the same restrictions. + * + * atomic_notifier_chain_register() may be called from an atomic context, + * but blocking_notifier_chain_register() and srcu_notifier_chain_register() + * must be called from a process context. Ditto for the corresponding + * _unregister() routines. + * + * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(), + * and srcu_notifier_chain_unregister() _must not_ be called from within + * the call chain. + * + * SRCU notifier chains are an alternative form of blocking notifier chains. + * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for + * protection of the chain links. This means there is _very_ low overhead + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very + * often but notifier_blocks will seldom be removed. Also, SRCU notifier + * chains are slightly more difficult to use because they require special + * runtime initialization. + */ + +struct notifier_block { + int (*notifier_call)(struct notifier_block *, unsigned long, void *); + struct notifier_block __rcu *next; + int priority; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block __rcu *head; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block __rcu *head; +}; + +struct raw_notifier_head { + struct notifier_block __rcu *head; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_struct srcu; + struct notifier_block __rcu *head; +}; + +#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ + spin_lock_init(&(name)->lock); \ + (name)->head = NULL; \ + } while (0) +#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \ + init_rwsem(&(name)->rwsem); \ + (name)->head = NULL; \ + } while (0) +#define RAW_INIT_NOTIFIER_HEAD(name) do { \ + (name)->head = NULL; \ + } while (0) + +/* srcu_notifier_heads must be initialized and cleaned up dynamically */ +extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); +#define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); + +#define ATOMIC_NOTIFIER_INIT(name) { \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .head = NULL } +#define BLOCKING_NOTIFIER_INIT(name) { \ + .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ + .head = NULL } +#define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +/* srcu_notifier_heads cannot be initialized statically */ + +#define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ + ATOMIC_NOTIFIER_INIT(name) +#define BLOCKING_NOTIFIER_HEAD(name) \ + struct blocking_notifier_head name = \ + BLOCKING_NOTIFIER_INIT(name) +#define RAW_NOTIFIER_HEAD(name) \ + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + +#ifdef __KERNEL__ + +extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int blocking_notifier_chain_cond_register( + struct blocking_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v); +extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v); +extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v); +extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v); +extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); + +#define NOTIFY_DONE 0x0000 /* Don't care */ +#define NOTIFY_OK 0x0001 /* Suits me */ +#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ +#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) + /* Bad/Veto action */ +/* + * Clean way to return from the notifier and stop further calls. + */ +#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) + +/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ +static inline int notifier_from_errno(int err) +{ + if (err) + return NOTIFY_STOP_MASK | (NOTIFY_OK - err); + + return NOTIFY_OK; +} + +/* Restore (negative) errno value from notify return value. */ +static inline int notifier_to_errno(int ret) +{ + ret &= ~NOTIFY_STOP_MASK; + return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0; +} + +/* + * Declared notifiers so far. I can imagine quite a few more chains + * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, + * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ + +/* CPU notfiers are defined in include/linux/cpu.h. */ + +/* netdevice notifiers are defined in include/linux/netdevice.h */ + +/* reboot notifiers are defined in include/linux/reboot.h. */ + +/* Hibernation and suspend events are defined in include/linux/suspend.h. */ + +/* Virtual Terminal events are defined in include/linux/vt.h. */ + +#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */ + +/* Console keyboard events. + * Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and + * KBD_KEYSYM. */ +#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */ +#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */ +#define KBD_UNICODE 0x0003 /* Keyboard unicode */ +#define KBD_KEYSYM 0x0004 /* Keyboard keysym */ +#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ + +extern struct blocking_notifier_head reboot_notifier_list; + +#endif /* __KERNEL__ */ +#endif /* _LINUX_NOTIFIER_H */ diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h new file mode 100644 index 00000000..7da0cf37 --- /dev/null +++ b/include/linux/nsc_gpio.h @@ -0,0 +1,40 @@ +/** + nsc_gpio.c + + National Semiconductor GPIO common access methods. + + struct nsc_gpio_ops abstracts the low-level access + operations for the GPIO units on 2 NSC chip families; the GEODE + integrated CPU, and the PC-8736[03456] integrated PC-peripheral + chips. + + The GPIO units on these chips have the same pin architecture, but + the access methods differ. Thus, scx200_gpio and pc8736x_gpio + implement their own versions of these routines; and use the common + file-operations routines implemented in nsc_gpio module. + + Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> + + NB: this work was tested on the Geode SC-1100 and PC-87366 chips. + NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond. +*/ + +struct nsc_gpio_ops { + struct module* owner; + u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits); + void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor); + int (*gpio_get) (unsigned iminor); + void (*gpio_set) (unsigned iminor, int state); + void (*gpio_change) (unsigned iminor); + int (*gpio_current) (unsigned iminor); + struct device* dev; /* for dev_dbg() support, set in init */ +}; + +extern ssize_t nsc_gpio_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos); + +extern ssize_t nsc_gpio_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos); + +extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index); + diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h new file mode 100644 index 00000000..cc37a55a --- /dev/null +++ b/include/linux/nsproxy.h @@ -0,0 +1,85 @@ +#ifndef _LINUX_NSPROXY_H +#define _LINUX_NSPROXY_H + +#include <linux/spinlock.h> +#include <linux/sched.h> + +struct mnt_namespace; +struct uts_namespace; +struct ipc_namespace; +struct pid_namespace; +struct fs_struct; + +/* + * A structure to contain pointers to all per-process + * namespaces - fs (mount), uts, network, sysvipc, etc. + * + * 'count' is the number of tasks holding a reference. + * The count for each namespace, then, will be the number + * of nsproxies pointing to it, not the number of tasks. + * + * The nsproxy is shared by tasks which share all namespaces. + * As soon as a single namespace is cloned or unshared, the + * nsproxy is copied. + */ +struct nsproxy { + atomic_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns; + struct net *net_ns; +}; +extern struct nsproxy init_nsproxy; + +/* + * the namespaces access rules are: + * + * 1. only current task is allowed to change tsk->nsproxy pointer or + * any pointer on the nsproxy itself + * + * 2. when accessing (i.e. reading) current task's namespaces - no + * precautions should be taken - just dereference the pointers + * + * 3. the access to other task namespaces is performed like this + * rcu_read_lock(); + * nsproxy = task_nsproxy(tsk); + * if (nsproxy != NULL) { + * / * + * * work with the namespaces here + * * e.g. get the reference on one of them + * * / + * } / * + * * NULL task_nsproxy() means that this task is + * * almost dead (zombie) + * * / + * rcu_read_unlock(); + * + */ + +static inline struct nsproxy *task_nsproxy(struct task_struct *tsk) +{ + return rcu_dereference(tsk->nsproxy); +} + +int copy_namespaces(unsigned long flags, struct task_struct *tsk); +void exit_task_namespaces(struct task_struct *tsk); +void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); +void free_nsproxy(struct nsproxy *ns); +int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, + struct fs_struct *); +int __init nsproxy_cache_init(void); + +static inline void put_nsproxy(struct nsproxy *ns) +{ + if (atomic_dec_and_test(&ns->count)) { + free_nsproxy(ns); + } +} + +static inline void get_nsproxy(struct nsproxy *ns) +{ + atomic_inc(&ns->count); +} + +#endif diff --git a/include/linux/nubus.h b/include/linux/nubus.h new file mode 100644 index 00000000..e137b3c4 --- /dev/null +++ b/include/linux/nubus.h @@ -0,0 +1,363 @@ +/* + nubus.h: various definitions and prototypes for NuBus drivers to use. + + Originally written by Alan Cox. + + Hacked to death by C. Scott Ananian and David Huggins-Daines. + + Some of the constants in here are from the corresponding + NetBSD/OpenBSD header file, by Allen Briggs. We figured out the + rest of them on our own. */ + +#ifndef LINUX_NUBUS_H +#define LINUX_NUBUS_H + +#include <linux/types.h> +#ifdef __KERNEL__ +#include <asm/nubus.h> +#endif + +enum nubus_category { + NUBUS_CAT_BOARD = 0x0001, + NUBUS_CAT_DISPLAY = 0x0003, + NUBUS_CAT_NETWORK = 0x0004, + NUBUS_CAT_COMMUNICATIONS = 0x0006, + NUBUS_CAT_FONT = 0x0009, + NUBUS_CAT_CPU = 0x000A, + /* For lack of a better name */ + NUBUS_CAT_DUODOCK = 0x0020 +}; + +enum nubus_type_network { + NUBUS_TYPE_ETHERNET = 0x0001, + NUBUS_TYPE_RS232 = 0x0002 +}; + +enum nubus_type_display { + NUBUS_TYPE_VIDEO = 0x0001 +}; + +enum nubus_type_cpu { + NUBUS_TYPE_68020 = 0x0003, + NUBUS_TYPE_68030 = 0x0004, + NUBUS_TYPE_68040 = 0x0005 +}; + +/* Known <Cat,Type,SW,HW> tuples: (according to TattleTech and Slots) + * 68030 motherboards: <10,4,0,24> + * 68040 motherboards: <10,5,0,24> + * DuoDock Plus: <32,1,1,2> + * + * Toby Frame Buffer card: <3,1,1,1> + * RBV built-in video (IIci): <3,1,1,24> + * Valkyrie built-in video (Q630): <3,1,1,46> + * Macintosh Display Card: <3,1,1,25> + * Sonora built-in video (P460): <3,1,1,34> + * Jet framebuffer (DuoDock Plus): <3,1,1,41> + * + * SONIC comm-slot/on-board and DuoDock Ethernet: <4,1,1,272> + * SONIC LC-PDS Ethernet (Dayna, but like Apple 16-bit, sort of): <4,1,1,271> + * Apple SONIC LC-PDS Ethernet ("Apple Ethernet LC Twisted-Pair Card"): <4,1,0,281> + * Sonic Systems Ethernet A-Series Card: <4,1,268,256> + * Asante MacCon NuBus-A: <4,1,260,256> (alpha-1.0,1.1 revision) + * ROM on the above card: <2,1,0,0> + * Cabletron ethernet card: <4,1,1,265> + * Farallon ethernet card: <4,1,268,256> (identical to Sonic Systems card) + * Kinetics EtherPort IIN: <4,1,259,262> + * API Engineering EtherRun_LCa PDS enet card: <4,1,282,256> + * + * Add your devices to the list! You can obtain the "Slots" utility + * from Apple's FTP site at: + * ftp://dev.apple.com/devworld/Tool_Chest/Devices_-_Hardware/NuBus_Slot_Manager/ + * + * Alternately, TattleTech can be found at any Info-Mac mirror site. + * or from its distribution site: ftp://ftp.decismkr.com/dms + */ + +/* DrSW: Uniquely identifies the software interface to a board. This + is usually the one you want to look at when writing a driver. It's + not as useful as you think, though, because as we should know by + now (duh), "Apple Compatible" can mean a lot of things... */ + +/* Add known DrSW values here */ +enum nubus_drsw { + /* NUBUS_CAT_DISPLAY */ + NUBUS_DRSW_APPLE = 0x0001, + NUBUS_DRSW_APPLE_HIRES = 0x0013, /* MacII HiRes card driver */ + + /* NUBUS_CAT_NETWORK */ + NUBUS_DRSW_3COM = 0x0000, + NUBUS_DRSW_CABLETRON = 0x0001, + NUBUS_DRSW_SONIC_LC = 0x0001, + NUBUS_DRSW_KINETICS = 0x0103, + NUBUS_DRSW_ASANTE = 0x0104, + NUBUS_DRSW_TECHWORKS = 0x0109, + NUBUS_DRSW_DAYNA = 0x010b, + NUBUS_DRSW_FARALLON = 0x010c, + NUBUS_DRSW_APPLE_SN = 0x010f, + NUBUS_DRSW_DAYNA2 = 0x0115, + NUBUS_DRSW_FOCUS = 0x011a, + NUBUS_DRSW_ASANTE_CS = 0x011d, /* use asante SMC9194 driver */ + NUBUS_DRSW_DAYNA_LC = 0x011e, + + /* NUBUS_CAT_CPU */ + NUBUS_DRSW_NONE = 0x0000, +}; + +/* DrHW: Uniquely identifies the hardware interface to a board (or at + least, it should... some video cards are known to incorrectly + identify themselves as Toby cards) */ + +/* Add known DrHW values here */ +enum nubus_drhw { + /* NUBUS_CAT_DISPLAY */ + NUBUS_DRHW_APPLE_TFB = 0x0001, /* Toby frame buffer card */ + NUBUS_DRHW_APPLE_WVC = 0x0006, /* Apple Workstation Video Card */ + NUBUS_DRHW_SIGMA_CLRMAX = 0x0007, /* Sigma Design ColorMax */ + NUBUS_DRHW_APPLE_SE30 = 0x0009, /* Apple SE/30 video */ + NUBUS_DRHW_APPLE_HRVC = 0x0013, /* Mac II High-Res Video Card */ + NUBUS_DRHW_APPLE_PVC = 0x0017, /* Mac II Portrait Video Card */ + NUBUS_DRHW_APPLE_RBV1 = 0x0018, /* IIci RBV video */ + NUBUS_DRHW_APPLE_MDC = 0x0019, /* Macintosh Display Card */ + NUBUS_DRHW_APPLE_SONORA = 0x0022, /* Sonora built-in video */ + NUBUS_DRHW_APPLE_24AC = 0x002b, /* Mac 24AC Video Card */ + NUBUS_DRHW_APPLE_VALKYRIE = 0x002e, + NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */ + NUBUS_DRHW_SMAC_GFX = 0x0105, /* SuperMac GFX */ + NUBUS_DRHW_RASTER_CB264 = 0x013B, /* RasterOps ColorBoard 264 */ + NUBUS_DRHW_MICRON_XCEED = 0x0146, /* Micron Exceed color */ + NUBUS_DRHW_RDIUS_GSC = 0x0153, /* Radius GS/C */ + NUBUS_DRHW_SMAC_SPEC8 = 0x017B, /* SuperMac Spectrum/8 */ + NUBUS_DRHW_SMAC_SPEC24 = 0x017C, /* SuperMac Spectrum/24 */ + NUBUS_DRHW_RASTER_CB364 = 0x026F, /* RasterOps ColorBoard 364 */ + NUBUS_DRHW_RDIUS_DCGX = 0x027C, /* Radius DirectColor/GX */ + NUBUS_DRHW_RDIUS_PC8 = 0x0291, /* Radius PrecisionColor 8 */ + NUBUS_DRHW_LAPIS_PCS8 = 0x0292, /* Lapis ProColorServer 8 */ + NUBUS_DRHW_RASTER_24XLI = 0x02A0, /* RasterOps 8/24 XLi */ + NUBUS_DRHW_RASTER_PBPGT = 0x02A5, /* RasterOps PaintBoard Prism GT */ + NUBUS_DRHW_EMACH_FSX = 0x02AE, /* E-Machines Futura SX */ + NUBUS_DRHW_RASTER_24XLTV = 0x02B7, /* RasterOps 24XLTV */ + NUBUS_DRHW_SMAC_THUND24 = 0x02CB, /* SuperMac Thunder/24 */ + NUBUS_DRHW_SMAC_THUNDLGHT = 0x03D9, /* SuperMac ThunderLight */ + NUBUS_DRHW_RDIUS_PC24XP = 0x0406, /* Radius PrecisionColor 24Xp */ + NUBUS_DRHW_RDIUS_PC24X = 0x040A, /* Radius PrecisionColor 24X */ + NUBUS_DRHW_RDIUS_PC8XJ = 0x040B, /* Radius PrecisionColor 8XJ */ + + /* NUBUS_CAT_NETWORK */ + NUBUS_DRHW_INTERLAN = 0x0100, + NUBUS_DRHW_SMC9194 = 0x0101, + NUBUS_DRHW_KINETICS = 0x0106, + NUBUS_DRHW_CABLETRON = 0x0109, + NUBUS_DRHW_ASANTE_LC = 0x010f, + NUBUS_DRHW_SONIC = 0x0110, + NUBUS_DRHW_TECHWORKS = 0x0112, + NUBUS_DRHW_APPLE_SONIC_NB = 0x0118, + NUBUS_DRHW_APPLE_SONIC_LC = 0x0119, + NUBUS_DRHW_FOCUS = 0x011c, + NUBUS_DRHW_SONNET = 0x011d, +}; + +/* Resource IDs: These are the identifiers for the various weird and + wonderful tidbits of information that may or may not reside in the + NuBus ROM directory. */ +enum nubus_res_id { + NUBUS_RESID_TYPE = 0x0001, + NUBUS_RESID_NAME = 0x0002, + NUBUS_RESID_ICON = 0x0003, + NUBUS_RESID_DRVRDIR = 0x0004, + NUBUS_RESID_LOADREC = 0x0005, + NUBUS_RESID_BOOTREC = 0x0006, + NUBUS_RESID_FLAGS = 0x0007, + NUBUS_RESID_HWDEVID = 0x0008, + NUBUS_RESID_MINOR_BASEOS = 0x000a, + NUBUS_RESID_MINOR_LENGTH = 0x000b, + NUBUS_RESID_MAJOR_BASEOS = 0x000c, + NUBUS_RESID_MAJOR_LENGTH = 0x000d, + NUBUS_RESID_CICN = 0x000f, + NUBUS_RESID_ICL8 = 0x0010, + NUBUS_RESID_ICL4 = 0x0011, +}; + +/* Category-specific resources. */ +enum nubus_board_res_id { + NUBUS_RESID_BOARDID = 0x0020, + NUBUS_RESID_PRAMINITDATA = 0x0021, + NUBUS_RESID_PRIMARYINIT = 0x0022, + NUBUS_RESID_TIMEOUTCONST = 0x0023, + NUBUS_RESID_VENDORINFO = 0x0024, + NUBUS_RESID_BOARDFLAGS = 0x0025, + NUBUS_RESID_SECONDINIT = 0x0026, + + /* Not sure why Apple put these next two in here */ + NUBUS_RESID_VIDNAMES = 0x0041, + NUBUS_RESID_VIDMODES = 0x007e +}; + +/* Fields within the vendor info directory */ +enum nubus_vendor_res_id { + NUBUS_RESID_VEND_ID = 0x0001, + NUBUS_RESID_VEND_SERIAL = 0x0002, + NUBUS_RESID_VEND_REV = 0x0003, + NUBUS_RESID_VEND_PART = 0x0004, + NUBUS_RESID_VEND_DATE = 0x0005 +}; + +enum nubus_net_res_id { + NUBUS_RESID_MAC_ADDRESS = 0x0080 +}; + +enum nubus_cpu_res_id { + NUBUS_RESID_MEMINFO = 0x0081, + NUBUS_RESID_ROMINFO = 0x0082 +}; + +enum nubus_display_res_id { + NUBUS_RESID_GAMMADIR = 0x0040, + NUBUS_RESID_FIRSTMODE = 0x0080, + NUBUS_RESID_SECONDMODE = 0x0081, + NUBUS_RESID_THIRDMODE = 0x0082, + NUBUS_RESID_FOURTHMODE = 0x0083, + NUBUS_RESID_FIFTHMODE = 0x0084, + NUBUS_RESID_SIXTHMODE = 0x0085 +}; + +struct nubus_dir +{ + unsigned char *base; + unsigned char *ptr; + int done; + int mask; +}; + +struct nubus_dirent +{ + unsigned char *base; + unsigned char type; + __u32 data; /* Actually 24bits used */ + int mask; +}; + +#ifdef __KERNEL__ +struct nubus_board { + struct nubus_board* next; + struct nubus_dev* first_dev; + + /* Only 9-E actually exist, though 0-8 are also theoretically + possible, and 0 is a special case which represents the + motherboard and onboard peripherals (Ethernet, video) */ + int slot; + /* For slot 0, this is bogus. */ + char name[64]; + + /* Format block */ + unsigned char* fblock; + /* Root directory (does *not* always equal fblock + doffset!) */ + unsigned char* directory; + + unsigned long slot_addr; + /* Offset to root directory (sometimes) */ + unsigned long doffset; + /* Length over which to compute the crc */ + unsigned long rom_length; + /* Completely useless most of the time */ + unsigned long crc; + unsigned char rev; + unsigned char format; + unsigned char lanes; +}; + +struct nubus_dev { + /* Next link in device list */ + struct nubus_dev* next; + /* Directory entry in /proc/bus/nubus */ + struct proc_dir_entry* procdir; + + /* The functional resource ID of this device */ + unsigned char resid; + /* These are mostly here for convenience; we could always read + them from the ROMs if we wanted to */ + unsigned short category; + unsigned short type; + unsigned short dr_sw; + unsigned short dr_hw; + /* This is the device's name rather than the board's. + Sometimes they are different. Usually the board name is + more correct. */ + char name[64]; + /* MacOS driver (I kid you not) */ + unsigned char* driver; + /* Actually this is an offset */ + unsigned long iobase; + unsigned long iosize; + unsigned char flags, hwdevid; + + /* Functional directory */ + unsigned char* directory; + /* Much of our info comes from here */ + struct nubus_board* board; +}; + +/* This is all NuBus devices (used to find devices later on) */ +extern struct nubus_dev* nubus_devices; +/* This is all NuBus cards */ +extern struct nubus_board* nubus_boards; + +/* Generic NuBus interface functions, modelled after the PCI interface */ +void nubus_scan_bus(void); +extern void nubus_proc_init(void); +int get_nubus_list(char *buf); +int nubus_proc_attach_device(struct nubus_dev *dev); +int nubus_proc_detach_device(struct nubus_dev *dev); +/* If we need more precision we can add some more of these */ +struct nubus_dev* nubus_find_device(unsigned short category, + unsigned short type, + unsigned short dr_hw, + unsigned short dr_sw, + const struct nubus_dev* from); +struct nubus_dev* nubus_find_type(unsigned short category, + unsigned short type, + const struct nubus_dev* from); +/* Might have more than one device in a slot, you know... */ +struct nubus_dev* nubus_find_slot(unsigned int slot, + const struct nubus_dev* from); + +/* These are somewhat more NuBus-specific. They all return 0 for + success and -1 for failure, as you'd expect. */ + +/* The root directory which contains the board and functional + directories */ +int nubus_get_root_dir(const struct nubus_board* board, + struct nubus_dir* dir); +/* The board directory */ +int nubus_get_board_dir(const struct nubus_board* board, + struct nubus_dir* dir); +/* The functional directory */ +int nubus_get_func_dir(const struct nubus_dev* dev, + struct nubus_dir* dir); + +/* These work on any directory gotten via the above */ +int nubus_readdir(struct nubus_dir* dir, + struct nubus_dirent* ent); +int nubus_find_rsrc(struct nubus_dir* dir, + unsigned char rsrc_type, + struct nubus_dirent* ent); +int nubus_rewinddir(struct nubus_dir* dir); + +/* Things to do with directory entries */ +int nubus_get_subdir(const struct nubus_dirent* ent, + struct nubus_dir* dir); +void nubus_get_rsrc_mem(void* dest, + const struct nubus_dirent *dirent, + int len); +void nubus_get_rsrc_str(void* dest, + const struct nubus_dirent *dirent, + int maxlen); +#endif /* __KERNEL__ */ + +/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */ +static inline void *nubus_slot_addr(int slot) +{ + return (void *)(0xF0000000|(slot<<24)); +} + +#endif /* LINUX_NUBUS_H */ diff --git a/include/linux/numa.h b/include/linux/numa.h new file mode 100644 index 00000000..3aaa3160 --- /dev/null +++ b/include/linux/numa.h @@ -0,0 +1,15 @@ +#ifndef _LINUX_NUMA_H +#define _LINUX_NUMA_H + + +#ifdef CONFIG_NODES_SHIFT +#define NODES_SHIFT CONFIG_NODES_SHIFT +#else +#define NODES_SHIFT 0 +#endif + +#define MAX_NUMNODES (1 << NODES_SHIFT) + +#define NUMA_NO_NODE (-1) + +#endif /* _LINUX_NUMA_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h new file mode 100644 index 00000000..9490a005 --- /dev/null +++ b/include/linux/nvme.h @@ -0,0 +1,434 @@ +/* + * Definitions for the NVM Express interface + * Copyright (c) 2011, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _LINUX_NVME_H +#define _LINUX_NVME_H + +#include <linux/types.h> + +struct nvme_bar { + __u64 cap; /* Controller Capabilities */ + __u32 vs; /* Version */ + __u32 intms; /* Interrupt Mask Set */ + __u32 intmc; /* Interrupt Mask Clear */ + __u32 cc; /* Controller Configuration */ + __u32 rsvd1; /* Reserved */ + __u32 csts; /* Controller Status */ + __u32 rsvd2; /* Reserved */ + __u32 aqa; /* Admin Queue Attributes */ + __u64 asq; /* Admin SQ Base Address */ + __u64 acq; /* Admin CQ Base Address */ +}; + +#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) +#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) + +enum { + NVME_CC_ENABLE = 1 << 0, + NVME_CC_CSS_NVM = 0 << 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_ARB_RR = 0 << 11, + NVME_CC_ARB_WRRU = 1 << 11, + NVME_CC_ARB_VS = 7 << 11, + NVME_CC_SHN_NONE = 0 << 14, + NVME_CC_SHN_NORMAL = 1 << 14, + NVME_CC_SHN_ABRUPT = 2 << 14, + NVME_CC_IOSQES = 6 << 16, + NVME_CC_IOCQES = 4 << 20, + NVME_CSTS_RDY = 1 << 0, + NVME_CSTS_CFS = 1 << 1, + NVME_CSTS_SHST_NORMAL = 0 << 2, + NVME_CSTS_SHST_OCCUR = 1 << 2, + NVME_CSTS_SHST_CMPLT = 2 << 2, +}; + +struct nvme_id_power_state { + __le16 max_power; /* centiwatts */ + __u16 rsvd2; + __le32 entry_lat; /* microseconds */ + __le32 exit_lat; /* microseconds */ + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __u8 rsvd16[16]; +}; + +#define NVME_VS(major, minor) (major << 16 | minor) + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 mic; + __u8 mdts; + __u8 rsvd78[178]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 rsvd264[248]; + __u8 sqes; + __u8 cqes; + __u8 rsvd514[2]; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 rsvd530[1518]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 rsvd30[98]; + struct nvme_lbaf lbaf[16]; + __u8 rsvd192[192]; + __u8 vs[3712]; +}; + +enum { + NVME_NS_FEAT_THIN = 1 << 0, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, +}; + +struct nvme_lba_range_type { + __u8 type; + __u8 attributes; + __u8 rsvd2[14]; + __u64 slba; + __u64 nlb; + __u8 guid[16]; + __u8 rsvd48[16]; +}; + +enum { + NVME_LBART_TYPE_FS = 0x01, + NVME_LBART_TYPE_RAID = 0x02, + NVME_LBART_TYPE_CACHE = 0x03, + NVME_LBART_TYPE_SWAP = 0x04, + + NVME_LBART_ATTRIB_TEMP = 1 << 0, + NVME_LBART_ATTRIB_HIDE = 1 << 1, +}; + +/* I/O commands */ + +enum nvme_opcode { + nvme_cmd_flush = 0x00, + nvme_cmd_write = 0x01, + nvme_cmd_read = 0x02, + nvme_cmd_write_uncor = 0x04, + nvme_cmd_compare = 0x05, + nvme_cmd_dsm = 0x09, +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u32 cdw2[2]; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __u32 cdw10[6]; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +enum { + NVME_RW_LR = 1 << 15, + NVME_RW_FUA = 1 << 14, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0 << 4, + NVME_RW_DSM_LATENCY_IDLE = 1 << 4, + NVME_RW_DSM_LATENCY_NORM = 2 << 4, + NVME_RW_DSM_LATENCY_LOW = 3 << 4, + NVME_RW_DSM_SEQ_REQ = 1 << 6, + NVME_RW_DSM_COMPRESSED = 1 << 7, +}; + +/* Admin commands */ + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0x00, + nvme_admin_create_sq = 0x01, + nvme_admin_get_log_page = 0x02, + nvme_admin_delete_cq = 0x04, + nvme_admin_create_cq = 0x05, + nvme_admin_identify = 0x06, + nvme_admin_abort_cmd = 0x08, + nvme_admin_set_features = 0x09, + nvme_admin_get_features = 0x0a, + nvme_admin_async_event = 0x0c, + nvme_admin_activate_fw = 0x10, + nvme_admin_download_fw = 0x11, + nvme_admin_format_nvm = 0x80, + nvme_admin_security_send = 0x81, + nvme_admin_security_recv = 0x82, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = (1 << 0), + NVME_CQ_IRQ_ENABLED = (1 << 1), + NVME_SQ_PRIO_URGENT = (0 << 1), + NVME_SQ_PRIO_HIGH = (1 << 1), + NVME_SQ_PRIO_MEDIUM = (2 << 1), + NVME_SQ_PRIO_LOW = (3 << 1), + NVME_FEAT_ARBITRATION = 0x01, + NVME_FEAT_POWER_MGMT = 0x02, + NVME_FEAT_LBA_RANGE = 0x03, + NVME_FEAT_TEMP_THRESH = 0x04, + NVME_FEAT_ERR_RECOVERY = 0x05, + NVME_FEAT_VOLATILE_WC = 0x06, + NVME_FEAT_NUM_QUEUES = 0x07, + NVME_FEAT_IRQ_COALESCE = 0x08, + NVME_FEAT_IRQ_CONFIG = 0x09, + NVME_FEAT_WRITE_ATOMIC = 0x0a, + NVME_FEAT_ASYNC_EVENT = 0x0b, + NVME_FEAT_SW_PROGRESS = 0x0c, +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 cns; + __u32 rsvd11[5]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 fid; + __le32 dword11; + __u32 rsvd12[4]; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + }; +}; + +enum { + NVME_SC_SUCCESS = 0x0, + NVME_SC_INVALID_OPCODE = 0x1, + NVME_SC_INVALID_FIELD = 0x2, + NVME_SC_CMDID_CONFLICT = 0x3, + NVME_SC_DATA_XFER_ERROR = 0x4, + NVME_SC_POWER_LOSS = 0x5, + NVME_SC_INTERNAL = 0x6, + NVME_SC_ABORT_REQ = 0x7, + NVME_SC_ABORT_QUEUE = 0x8, + NVME_SC_FUSED_FAIL = 0x9, + NVME_SC_FUSED_MISSING = 0xa, + NVME_SC_INVALID_NS = 0xb, + NVME_SC_LBA_RANGE = 0x80, + NVME_SC_CAP_EXCEEDED = 0x81, + NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_CQ_INVALID = 0x100, + NVME_SC_QID_INVALID = 0x101, + NVME_SC_QUEUE_SIZE = 0x102, + NVME_SC_ABORT_LIMIT = 0x103, + NVME_SC_ABORT_MISSING = 0x104, + NVME_SC_ASYNC_LIMIT = 0x105, + NVME_SC_FIRMWARE_SLOT = 0x106, + NVME_SC_FIRMWARE_IMAGE = 0x107, + NVME_SC_INVALID_VECTOR = 0x108, + NVME_SC_INVALID_LOG_PAGE = 0x109, + NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_WRITE_FAULT = 0x280, + NVME_SC_READ_ERROR = 0x281, + NVME_SC_GUARD_CHECK = 0x282, + NVME_SC_APPTAG_CHECK = 0x283, + NVME_SC_REFTAG_CHECK = 0x284, + NVME_SC_COMPARE_FAILED = 0x285, + NVME_SC_ACCESS_DENIED = 0x286, +}; + +struct nvme_completion { + __le32 result; /* Used by admin commands to return data */ + __u32 rsvd; + __le16 sq_head; /* how much of this queue may be reclaimed */ + __le16 sq_id; /* submission queue that generated this entry */ + __u16 command_id; /* of the command which completed */ + __le16 status; /* did the command fail, and if so, why? */ +}; + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +}; + +struct nvme_admin_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +#define NVME_IOCTL_ID _IO('N', 0x40) +#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) +#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) + +#endif /* _LINUX_NVME_H */ diff --git a/include/linux/nvram.h b/include/linux/nvram.h new file mode 100644 index 00000000..9189829c --- /dev/null +++ b/include/linux/nvram.h @@ -0,0 +1,25 @@ +#ifndef _LINUX_NVRAM_H +#define _LINUX_NVRAM_H + +#include <linux/ioctl.h> + +/* /dev/nvram ioctls */ +#define NVRAM_INIT _IO('p', 0x40) /* initialize NVRAM and set checksum */ +#define NVRAM_SETCKS _IO('p', 0x41) /* recalculate checksum */ + +/* for all current systems, this is where NVRAM starts */ +#define NVRAM_FIRST_BYTE 14 +/* all these functions expect an NVRAM offset, not an absolute */ +#define NVRAM_OFFSET(x) ((x)-NVRAM_FIRST_BYTE) + +#ifdef __KERNEL__ +/* __foo is foo without grabbing the rtc_lock - get it yourself */ +extern unsigned char __nvram_read_byte(int i); +extern unsigned char nvram_read_byte(int i); +extern void __nvram_write_byte(unsigned char c, int i); +extern void nvram_write_byte(unsigned char c, int i); +extern int __nvram_check_checksum(void); +extern int nvram_check_checksum(void); +#endif + +#endif /* _LINUX_NVRAM_H */ diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h new file mode 100644 index 00000000..9acb2157 --- /dev/null +++ b/include/linux/nwpserial.h @@ -0,0 +1,18 @@ +/* + * Serial Port driver for a NWP uart device + * + * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _NWPSERIAL_H +#define _NWPSERIAL_H + +int nwpserial_register_port(struct uart_port *port); +void nwpserial_unregister_port(int line); + +#endif /* _NWPSERIAL_H */ diff --git a/include/linux/of.h b/include/linux/of.h new file mode 100644 index 00000000..fa7fb1d9 --- /dev/null +++ b/include/linux/of.h @@ -0,0 +1,377 @@ +#ifndef _LINUX_OF_H +#define _LINUX_OF_H +/* + * Definitions for talking to the Open Firmware PROM on + * Power Macintosh and other computers. + * + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. + * Updates for SPARC64 by David S. Miller + * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/kref.h> +#include <linux/mod_devicetable.h> +#include <linux/spinlock.h> + +#include <asm/byteorder.h> +#include <asm/errno.h> + +typedef u32 phandle; +typedef u32 ihandle; + +struct property { + char *name; + int length; + void *value; + struct property *next; + unsigned long _flags; + unsigned int unique_id; +}; + +#if defined(CONFIG_SPARC) +struct of_irq_controller; +#endif + +struct device_node { + const char *name; + const char *type; + phandle phandle; + char *full_name; + + struct property *properties; + struct property *deadprops; /* removed properties */ + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct device_node *next; /* next device of same type */ + struct device_node *allnext; /* next in list of all nodes */ + struct proc_dir_entry *pde; /* this node's proc directory */ + struct kref kref; + unsigned long _flags; + void *data; +#if defined(CONFIG_SPARC) + char *path_component_name; + unsigned int unique_id; + struct of_irq_controller *irq_trans; +#endif +}; + +#define MAX_PHANDLE_ARGS 8 +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[MAX_PHANDLE_ARGS]; +}; + +#ifdef CONFIG_OF_DYNAMIC +extern struct device_node *of_node_get(struct device_node *node); +extern void of_node_put(struct device_node *node); +#else /* CONFIG_OF_DYNAMIC */ +/* Dummy ref counting routines - to be implemented later */ +static inline struct device_node *of_node_get(struct device_node *node) +{ + return node; +} +static inline void of_node_put(struct device_node *node) { } +#endif /* !CONFIG_OF_DYNAMIC */ + +#ifdef CONFIG_OF + +/* Pointer for first entry in chain of all nodes. */ +extern struct device_node *allnodes; +extern struct device_node *of_chosen; +extern struct device_node *of_aliases; +extern rwlock_t devtree_lock; + +static inline bool of_have_populated_dt(void) +{ + return allnodes != NULL; +} + +static inline bool of_node_is_root(const struct device_node *node) +{ + return node && (node->parent == NULL); +} + +static inline int of_node_check_flag(struct device_node *n, unsigned long flag) +{ + return test_bit(flag, &n->_flags); +} + +static inline void of_node_set_flag(struct device_node *n, unsigned long flag) +{ + set_bit(flag, &n->_flags); +} + +extern struct device_node *of_find_all_nodes(struct device_node *prev); + +/* + * OF address retrieval & translation + */ + +/* Helper to read a big number; size is in cells (not bytes) */ +static inline u64 of_read_number(const __be32 *cell, int size) +{ + u64 r = 0; + while (size--) + r = (r << 32) | be32_to_cpu(*(cell++)); + return r; +} + +/* Like of_read_number, but we want an unsigned long result */ +static inline unsigned long of_read_ulong(const __be32 *cell, int size) +{ + /* toss away upper bits if unsigned long is smaller than u64 */ + return of_read_number(cell, size); +} + +#include <asm/prom.h> + +/* Default #address and #size cells. Allow arch asm/prom.h to override */ +#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 +#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 +#endif + +/* Default string compare functions, Allow arch asm/prom.h to override */ +#if !defined(of_compat_cmp) +#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) +#define of_prop_cmp(s1, s2) strcmp((s1), (s2)) +#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) +#endif + +/* flag descriptions */ +#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ +#define OF_DETACHED 2 /* node has been detached from the device tree */ + +#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) +#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) + +#define OF_BAD_ADDR ((u64)-1) + +#ifndef of_node_to_nid +static inline int of_node_to_nid(struct device_node *np) { return -1; } +#define of_node_to_nid of_node_to_nid +#endif + +extern struct device_node *of_find_node_by_name(struct device_node *from, + const char *name); +#define for_each_node_by_name(dn, name) \ + for (dn = of_find_node_by_name(NULL, name); dn; \ + dn = of_find_node_by_name(dn, name)) +extern struct device_node *of_find_node_by_type(struct device_node *from, + const char *type); +#define for_each_node_by_type(dn, type) \ + for (dn = of_find_node_by_type(NULL, type); dn; \ + dn = of_find_node_by_type(dn, type)) +extern struct device_node *of_find_compatible_node(struct device_node *from, + const char *type, const char *compat); +#define for_each_compatible_node(dn, type, compatible) \ + for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ + dn = of_find_compatible_node(dn, type, compatible)) +extern struct device_node *of_find_matching_node(struct device_node *from, + const struct of_device_id *matches); +#define for_each_matching_node(dn, matches) \ + for (dn = of_find_matching_node(NULL, matches); dn; \ + dn = of_find_matching_node(dn, matches)) +extern struct device_node *of_find_node_by_path(const char *path); +extern struct device_node *of_find_node_by_phandle(phandle handle); +extern struct device_node *of_get_parent(const struct device_node *node); +extern struct device_node *of_get_next_parent(struct device_node *node); +extern struct device_node *of_get_next_child(const struct device_node *node, + struct device_node *prev); +#define for_each_child_of_node(parent, child) \ + for (child = of_get_next_child(parent, NULL); child != NULL; \ + child = of_get_next_child(parent, child)) + +extern struct device_node *of_find_node_with_property( + struct device_node *from, const char *prop_name); +#define for_each_node_with_property(dn, prop_name) \ + for (dn = of_find_node_with_property(NULL, prop_name); dn; \ + dn = of_find_node_with_property(dn, prop_name)) + +extern struct property *of_find_property(const struct device_node *np, + const char *name, + int *lenp); +extern int of_property_read_u32_array(const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz); +extern int of_property_read_u64(const struct device_node *np, + const char *propname, u64 *out_value); + +extern int of_property_read_string(struct device_node *np, + const char *propname, + const char **out_string); +extern int of_property_read_string_index(struct device_node *np, + const char *propname, + int index, const char **output); +extern int of_property_match_string(struct device_node *np, + const char *propname, + const char *string); +extern int of_property_count_strings(struct device_node *np, + const char *propname); +extern int of_device_is_compatible(const struct device_node *device, + const char *); +extern int of_device_is_available(const struct device_node *device); +extern const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp); +#define for_each_property_of_node(dn, pp) \ + for (pp = dn->properties; pp != NULL; pp = pp->next) + +extern int of_n_addr_cells(struct device_node *np); +extern int of_n_size_cells(struct device_node *np); +extern const struct of_device_id *of_match_node( + const struct of_device_id *matches, const struct device_node *node); +extern int of_modalias_node(struct device_node *node, char *modalias, int len); +extern struct device_node *of_parse_phandle(struct device_node *np, + const char *phandle_name, + int index); +extern int of_parse_phandle_with_args(struct device_node *np, + const char *list_name, const char *cells_name, int index, + struct of_phandle_args *out_args); + +extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); +extern int of_alias_get_id(struct device_node *np, const char *stem); + +extern int of_machine_is_compatible(const char *compat); + +extern int prom_add_property(struct device_node* np, struct property* prop); +extern int prom_remove_property(struct device_node *np, struct property *prop); +extern int prom_update_property(struct device_node *np, + struct property *newprop, + struct property *oldprop); + +#if defined(CONFIG_OF_DYNAMIC) +/* For updating the device tree at runtime */ +extern void of_attach_node(struct device_node *); +extern void of_detach_node(struct device_node *); +#endif + +#define of_match_ptr(_ptr) (_ptr) +#else /* CONFIG_OF */ + +static inline bool of_have_populated_dt(void) +{ + return false; +} + +#define for_each_child_of_node(parent, child) \ + while (0) + +static inline int of_device_is_compatible(const struct device_node *device, + const char *name) +{ + return 0; +} + +static inline struct property *of_find_property(const struct device_node *np, + const char *name, + int *lenp) +{ + return NULL; +} + +static inline struct device_node *of_find_compatible_node( + struct device_node *from, + const char *type, + const char *compat) +{ + return NULL; +} + +static inline int of_property_read_u32_array(const struct device_node *np, + const char *propname, + u32 *out_values, size_t sz) +{ + return -ENOSYS; +} + +static inline int of_property_read_string(struct device_node *np, + const char *propname, + const char **out_string) +{ + return -ENOSYS; +} + +static inline int of_property_read_string_index(struct device_node *np, + const char *propname, int index, + const char **out_string) +{ + return -ENOSYS; +} + +static inline int of_property_count_strings(struct device_node *np, + const char *propname) +{ + return -ENOSYS; +} + +static inline const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp) +{ + return NULL; +} + +static inline int of_property_read_u64(const struct device_node *np, + const char *propname, u64 *out_value) +{ + return -ENOSYS; +} + +static inline struct device_node *of_parse_phandle(struct device_node *np, + const char *phandle_name, + int index) +{ + return NULL; +} + +static inline int of_alias_get_id(struct device_node *np, const char *stem) +{ + return -ENOSYS; +} + +static inline int of_machine_is_compatible(const char *compat) +{ + return 0; +} + +#define of_match_ptr(_ptr) NULL +#define of_match_node(_matches, _node) NULL +#endif /* CONFIG_OF */ + +/** + * of_property_read_bool - Findfrom a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node. + * Returns true if the property exist false otherwise. + */ +static inline bool of_property_read_bool(const struct device_node *np, + const char *propname) +{ + struct property *prop = of_find_property(np, propname, NULL); + + return prop ? true : false; +} + +static inline int of_property_read_u32(const struct device_node *np, + const char *propname, + u32 *out_value) +{ + return of_property_read_u32_array(np, propname, out_value, 1); +} + +#endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h new file mode 100644 index 00000000..01b925ad --- /dev/null +++ b/include/linux/of_address.h @@ -0,0 +1,74 @@ +#ifndef __OF_ADDRESS_H +#define __OF_ADDRESS_H +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/of.h> + +#ifdef CONFIG_OF_ADDRESS +extern u64 of_translate_address(struct device_node *np, const __be32 *addr); +extern int of_address_to_resource(struct device_node *dev, int index, + struct resource *r); +extern struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address); +extern void __iomem *of_iomap(struct device_node *device, int index); + +/* Extract an address from a device, returns the region size and + * the address space flags too. The PCI version uses a BAR number + * instead of an absolute index + */ +extern const u32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags); + +#ifndef pci_address_to_pio +static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } +#define pci_address_to_pio pci_address_to_pio +#endif + +#else /* CONFIG_OF_ADDRESS */ +static inline int of_address_to_resource(struct device_node *dev, int index, + struct resource *r) +{ + return -EINVAL; +} +static inline struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address) +{ + return NULL; +} +static inline void __iomem *of_iomap(struct device_node *device, int index) +{ + return NULL; +} +static inline const u32 *of_get_address(struct device_node *dev, int index, + u64 *size, unsigned int *flags) +{ + return NULL; +} +#endif /* CONFIG_OF_ADDRESS */ + + +#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) +extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, + u64 *size, unsigned int *flags); +extern int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r); +#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ +static inline int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + return -ENOSYS; +} + +static inline const __be32 *of_get_pci_address(struct device_node *dev, + int bar_no, u64 *size, unsigned int *flags) +{ + return NULL; +} +#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ + +#endif /* __OF_ADDRESS_H */ + diff --git a/include/linux/of_device.h b/include/linux/of_device.h new file mode 100644 index 00000000..901b7435 --- /dev/null +++ b/include/linux/of_device.h @@ -0,0 +1,72 @@ +#ifndef _LINUX_OF_DEVICE_H +#define _LINUX_OF_DEVICE_H + +#include <linux/platform_device.h> +#include <linux/of_platform.h> /* temporary until merge */ + +#ifdef CONFIG_OF_DEVICE +#include <linux/of.h> +#include <linux/mod_devicetable.h> + +struct device; + +extern const struct of_device_id *of_match_device( + const struct of_device_id *matches, const struct device *dev); +extern void of_device_make_bus_id(struct device *dev); + +/** + * of_driver_match_device - Tell if a driver's of_match_table matches a device. + * @drv: the device_driver structure to test + * @dev: the device structure to match against + */ +static inline int of_driver_match_device(struct device *dev, + const struct device_driver *drv) +{ + return of_match_device(drv->of_match_table, dev) != NULL; +} + +extern struct platform_device *of_dev_get(struct platform_device *dev); +extern void of_dev_put(struct platform_device *dev); + +extern int of_device_add(struct platform_device *pdev); +extern int of_device_register(struct platform_device *ofdev); +extern void of_device_unregister(struct platform_device *ofdev); + +extern ssize_t of_device_get_modalias(struct device *dev, + char *str, ssize_t len); + +extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); +extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); + +static inline void of_device_node_put(struct device *dev) +{ + of_node_put(dev->of_node); +} + +#else /* CONFIG_OF_DEVICE */ + +static inline int of_driver_match_device(struct device *dev, + struct device_driver *drv) +{ + return 0; +} + +static inline void of_device_uevent(struct device *dev, + struct kobj_uevent_env *env) { } + +static inline int of_device_uevent_modalias(struct device *dev, + struct kobj_uevent_env *env) +{ + return -ENODEV; +} + +static inline void of_device_node_put(struct device *dev) { } + +static inline const struct of_device_id *of_match_device( + const struct of_device_id *matches, const struct device *dev) +{ + return NULL; +} +#endif /* CONFIG_OF_DEVICE */ + +#endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h new file mode 100644 index 00000000..ed136ad6 --- /dev/null +++ b/include/linux/of_fdt.h @@ -0,0 +1,125 @@ +/* + * Definitions for working with the Flattened Device Tree data format + * + * Copyright 2009 Benjamin Herrenschmidt, IBM Corp + * benh@kernel.crashing.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#ifndef _LINUX_OF_FDT_H +#define _LINUX_OF_FDT_H + +#include <linux/types.h> +#include <linux/init.h> + +/* Definitions used by the flattened device tree */ +#define OF_DT_HEADER 0xd00dfeed /* marker */ +#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ +#define OF_DT_END_NODE 0x2 /* End node */ +#define OF_DT_PROP 0x3 /* Property: name off, size, + * content */ +#define OF_DT_NOP 0x4 /* nop */ +#define OF_DT_END 0x9 + +#define OF_DT_VERSION 0x10 + +#ifndef __ASSEMBLY__ +/* + * This is what gets passed to the kernel by prom_init or kexec + * + * The dt struct contains the device tree structure, full pathes and + * property contents. The dt strings contain a separate block with just + * the strings for the property names, and is fully page aligned and + * self contained in a page, so that it can be kept around by the kernel, + * each property name appears only once in this page (cheap compression) + * + * the mem_rsvmap contains a map of reserved ranges of physical memory, + * passing it here instead of in the device-tree itself greatly simplifies + * the job of everybody. It's just a list of u64 pairs (base/size) that + * ends when size is 0 + */ +struct boot_param_header { + __be32 magic; /* magic word OF_DT_HEADER */ + __be32 totalsize; /* total size of DT block */ + __be32 off_dt_struct; /* offset to structure */ + __be32 off_dt_strings; /* offset to strings */ + __be32 off_mem_rsvmap; /* offset to memory reserve map */ + __be32 version; /* format version */ + __be32 last_comp_version; /* last compatible version */ + /* version 2 fields below */ + __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */ + /* version 3 fields below */ + __be32 dt_strings_size; /* size of the DT strings block */ + /* version 17 fields below */ + __be32 dt_struct_size; /* size of the DT structure block */ +}; + +#if defined(CONFIG_OF_FLATTREE) + +struct device_node; + +/* For scanning an arbitrary device-tree at any time */ +extern char *of_fdt_get_string(struct boot_param_header *blob, u32 offset); +extern void *of_fdt_get_property(struct boot_param_header *blob, + unsigned long node, + const char *name, + unsigned long *size); +extern int of_fdt_is_compatible(struct boot_param_header *blob, + unsigned long node, + const char *compat); +extern int of_fdt_match(struct boot_param_header *blob, unsigned long node, + const char *const *compat); +extern void of_fdt_unflatten_tree(unsigned long *blob, + struct device_node **mynodes); + +/* TBD: Temporary export of fdt globals - remove when code fully merged */ +extern int __initdata dt_root_addr_cells; +extern int __initdata dt_root_size_cells; +extern struct boot_param_header *initial_boot_params; + +/* For scanning the flat device-tree at boot time */ +extern char *find_flat_dt_string(u32 offset); +extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, + int depth, void *data), + void *data); +extern void *of_get_flat_dt_prop(unsigned long node, const char *name, + unsigned long *size); +extern int of_flat_dt_is_compatible(unsigned long node, const char *name); +extern int of_flat_dt_match(unsigned long node, const char *const *matches); +extern unsigned long of_get_flat_dt_root(void); + +extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, + int depth, void *data); +extern void early_init_dt_check_for_initrd(unsigned long node); +extern int early_init_dt_scan_memory(unsigned long node, const char *uname, + int depth, void *data); +extern void early_init_dt_add_memory_arch(u64 base, u64 size); +extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); +extern u64 dt_mem_next_cell(int s, __be32 **cellp); + +/* + * If BLK_DEV_INITRD, the fdt early init code will call this function, + * to be provided by the arch code. start and end are specified as + * physical addresses. + */ +#ifdef CONFIG_BLK_DEV_INITRD +extern void early_init_dt_setup_initrd_arch(unsigned long start, + unsigned long end); +#endif + +/* Early flat tree scan hooks */ +extern int early_init_dt_scan_root(unsigned long node, const char *uname, + int depth, void *data); + +/* Other Prototypes */ +extern void unflatten_device_tree(void); +extern void early_init_devtree(void *); +#else /* CONFIG_OF_FLATTREE */ +static inline void unflatten_device_tree(void) {} +#endif /* CONFIG_OF_FLATTREE */ + +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_OF_FDT_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h new file mode 100644 index 00000000..81733d12 --- /dev/null +++ b/include/linux/of_gpio.h @@ -0,0 +1,158 @@ +/* + * OF helpers for the GPIO API + * + * Copyright (c) 2007-2008 MontaVista Software, Inc. + * + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_OF_GPIO_H +#define __LINUX_OF_GPIO_H + +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/gpio.h> +#include <linux/of.h> + +struct device_node; + +/* + * This is Linux-specific flags. By default controllers' and Linux' mapping + * match, but GPIO controllers are free to translate their own flags to + * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended. + */ +enum of_gpio_flags { + OF_GPIO_ACTIVE_LOW = 0x1, +}; + +#ifdef CONFIG_OF_GPIO + +/* + * OF GPIO chip for memory mapped banks + */ +struct of_mm_gpio_chip { + struct gpio_chip gc; + void (*save_regs)(struct of_mm_gpio_chip *mm_gc); + void __iomem *regs; +}; + +static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) +{ + return container_of(gc, struct of_mm_gpio_chip, gc); +} + +extern int of_get_named_gpio_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags); + +extern unsigned int of_gpio_named_count(struct device_node *np, + const char* propname); + +extern int of_mm_gpiochip_add(struct device_node *np, + struct of_mm_gpio_chip *mm_gc); + +extern void of_gpiochip_add(struct gpio_chip *gc); +extern void of_gpiochip_remove(struct gpio_chip *gc); +extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np); +extern int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags); + +#else /* CONFIG_OF_GPIO */ + +/* Drivers may not strictly depend on the GPIO support, so let them link. */ +static inline int of_get_named_gpio_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags) +{ + return -ENOSYS; +} + +static inline unsigned int of_gpio_named_count(struct device_node *np, + const char* propname) +{ + return 0; +} + +static inline int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + return -ENOSYS; +} + +static inline void of_gpiochip_add(struct gpio_chip *gc) { } +static inline void of_gpiochip_remove(struct gpio_chip *gc) { } + +#endif /* CONFIG_OF_GPIO */ + +/** + * of_gpio_count - Count GPIOs for a device + * @np: device node to count GPIOs for + * + * The function returns the count of GPIOs specified for a node. + * + * Note that the empty GPIO specifiers counts too. For example, + * + * gpios = <0 + * &pio1 1 2 + * 0 + * &pio2 3 4>; + * + * defines four GPIOs (so this function will return 4), two of which + * are not specified. + */ +static inline unsigned int of_gpio_count(struct device_node *np) +{ + return of_gpio_named_count(np, "gpios"); +} + +/** + * of_get_gpio_flags() - Get a GPIO number and flags to use with GPIO API + * @np: device node to get GPIO from + * @index: index of the GPIO + * @flags: a flags pointer to fill in + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. If @flags is not NULL the function also fills + * in flags for the GPIO. + */ +static inline int of_get_gpio_flags(struct device_node *np, int index, + enum of_gpio_flags *flags) +{ + return of_get_named_gpio_flags(np, "gpios", index, flags); +} + +/** + * of_get_named_gpio() - Get a GPIO number to use with GPIO API + * @np: device node to get GPIO from + * @propname: Name of property containing gpio specifier(s) + * @index: index of the GPIO + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. + */ +static inline int of_get_named_gpio(struct device_node *np, + const char *propname, int index) +{ + return of_get_named_gpio_flags(np, propname, index, NULL); +} + +/** + * of_get_gpio() - Get a GPIO number to use with GPIO API + * @np: device node to get GPIO from + * @index: index of the GPIO + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. + */ +static inline int of_get_gpio(struct device_node *np, int index) +{ + return of_get_gpio_flags(np, index, NULL); +} + +#endif /* __LINUX_OF_GPIO_H */ diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h new file mode 100644 index 00000000..0efe8d46 --- /dev/null +++ b/include/linux/of_i2c.h @@ -0,0 +1,30 @@ +/* + * Generic I2C API implementation for PowerPC. + * + * Copyright (c) 2008 Jochen Friedrich <jochen@scram.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_OF_I2C_H +#define __LINUX_OF_I2C_H + +#if defined(CONFIG_OF_I2C) || defined(CONFIG_OF_I2C_MODULE) +#include <linux/i2c.h> + +extern void of_i2c_register_devices(struct i2c_adapter *adap); + +/* must call put_device() when done with returned i2c_client device */ +extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); + +#else +static inline void of_i2c_register_devices(struct i2c_adapter *adap) +{ + return; +} +#endif /* CONFIG_OF_I2C */ + +#endif /* __LINUX_OF_I2C_H */ diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h new file mode 100644 index 00000000..d229ad3e --- /dev/null +++ b/include/linux/of_irq.h @@ -0,0 +1,80 @@ +#ifndef __OF_IRQ_H +#define __OF_IRQ_H + +#if defined(CONFIG_OF) +struct of_irq; +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/ioport.h> +#include <linux/of.h> + +/* + * irq_of_parse_and_map() is used ba all OF enabled platforms; but SPARC + * implements it differently. However, the prototype is the same for all, + * so declare it here regardless of the CONFIG_OF_IRQ setting. + */ +extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); + +#if defined(CONFIG_OF_IRQ) +/** + * of_irq - container for device_node/irq_specifier pair for an irq controller + * @controller: pointer to interrupt controller device tree node + * @size: size of interrupt specifier + * @specifier: array of cells @size long specifing the specific interrupt + * + * This structure is returned when an interrupt is mapped. The controller + * field needs to be put() after use + */ +#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */ +struct of_irq { + struct device_node *controller; /* Interrupt controller node */ + u32 size; /* Specifier size */ + u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */ +}; + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +/* + * Workarounds only applied to 32bit powermac machines + */ +#define OF_IMAP_OLDWORLD_MAC 0x00000001 +#define OF_IMAP_NO_PHANDLE 0x00000002 + +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) +extern unsigned int of_irq_workarounds; +extern struct device_node *of_irq_dflt_pic; +extern int of_irq_map_oldworld(struct device_node *device, int index, + struct of_irq *out_irq); +#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ +#define of_irq_workarounds (0) +#define of_irq_dflt_pic (NULL) +static inline int of_irq_map_oldworld(struct device_node *device, int index, + struct of_irq *out_irq) +{ + return -EINVAL; +} +#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */ + + +extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec, + u32 ointsize, const u32 *addr, + struct of_irq *out_irq); +extern int of_irq_map_one(struct device_node *device, int index, + struct of_irq *out_irq); +extern unsigned int irq_create_of_mapping(struct device_node *controller, + const u32 *intspec, + unsigned int intsize); +extern int of_irq_to_resource(struct device_node *dev, int index, + struct resource *r); +extern int of_irq_count(struct device_node *dev); +extern int of_irq_to_resource_table(struct device_node *dev, + struct resource *res, int nr_irqs); +extern struct device_node *of_irq_find_parent(struct device_node *child); + +extern void of_irq_init(const struct of_device_id *matches); + +#endif /* CONFIG_OF_IRQ */ +#endif /* CONFIG_OF */ +#endif /* __OF_IRQ_H */ diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h new file mode 100644 index 00000000..53b94e02 --- /dev/null +++ b/include/linux/of_mdio.h @@ -0,0 +1,25 @@ +/* + * OF helpers for the MDIO (Ethernet PHY) API + * + * Copyright (c) 2009 Secret Lab Technologies, Ltd. + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_MDIO_H +#define __LINUX_OF_MDIO_H + +#include <linux/phy.h> +#include <linux/of.h> + +extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +extern struct phy_device *of_phy_find_device(struct device_node *phy_np); +extern struct phy_device *of_phy_connect(struct net_device *dev, + struct device_node *phy_np, + void (*hndlr)(struct net_device *), + u32 flags, phy_interface_t iface); +extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, + void (*hndlr)(struct net_device *), + phy_interface_t iface); + +#endif /* __LINUX_OF_MDIO_H */ diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h new file mode 100644 index 00000000..bae1b609 --- /dev/null +++ b/include/linux/of_mtd.h @@ -0,0 +1,19 @@ +/* + * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> + * + * OF helpers for mtd. + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_MTD_H +#define __LINUX_OF_NET_H + +#ifdef CONFIG_OF_MTD +#include <linux/of.h> +extern const int of_get_nand_ecc_mode(struct device_node *np); +int of_get_nand_bus_width(struct device_node *np); +bool of_get_nand_on_flash_bbt(struct device_node *np); +#endif + +#endif /* __LINUX_OF_MTD_H */ diff --git a/include/linux/of_net.h b/include/linux/of_net.h new file mode 100644 index 00000000..f4746418 --- /dev/null +++ b/include/linux/of_net.h @@ -0,0 +1,16 @@ +/* + * OF helpers for network devices. + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_NET_H +#define __LINUX_OF_NET_H + +#ifdef CONFIG_OF_NET +#include <linux/of.h> +extern const int of_get_phy_mode(struct device_node *np); +extern const void *of_get_mac_address(struct device_node *np); +#endif + +#endif /* __LINUX_OF_NET_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h new file mode 100644 index 00000000..f93e2170 --- /dev/null +++ b/include/linux/of_pci.h @@ -0,0 +1,14 @@ +#ifndef __OF_PCI_H +#define __OF_PCI_H + +#include <linux/pci.h> + +struct pci_dev; +struct of_irq; +int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); + +struct device_node; +struct device_node *of_pci_find_child_device(struct device_node *parent, + unsigned int devfn); + +#endif diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h new file mode 100644 index 00000000..c65a18a0 --- /dev/null +++ b/include/linux/of_pdt.h @@ -0,0 +1,45 @@ +/* + * Definitions for building a device tree by calling into the + * Open Firmware PROM. + * + * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_OF_PDT_H +#define _LINUX_OF_PDT_H + +/* overridable operations for calling into the PROM */ +struct of_pdt_ops { + /* + * buf should be 32 bytes; return 0 on success. + * If prev is NULL, the first property will be returned. + */ + int (*nextprop)(phandle node, char *prev, char *buf); + + /* for both functions, return proplen on success; -1 on error */ + int (*getproplen)(phandle node, const char *prop); + int (*getproperty)(phandle node, const char *prop, char *buf, + int bufsize); + + /* phandles are 0 if no child or sibling exists */ + phandle (*getchild)(phandle parent); + phandle (*getsibling)(phandle node); + + /* return 0 on success; fill in 'len' with number of bytes in path */ + int (*pkg2path)(phandle node, char *buf, const int buflen, int *len); +}; + +extern void *prom_early_alloc(unsigned long size); + +/* for building the device tree */ +extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); + +extern void (*of_pdt_build_more)(struct device_node *dp, + struct device_node ***nextp); + +#endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h new file mode 100644 index 00000000..b47d2040 --- /dev/null +++ b/include/linux/of_platform.h @@ -0,0 +1,112 @@ +#ifndef _LINUX_OF_PLATFORM_H +#define _LINUX_OF_PLATFORM_H +/* + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifdef CONFIG_OF_DEVICE +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/pm.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +/** + * struct of_dev_auxdata - lookup table entry for device names & platform_data + * @compatible: compatible value of node to match against node + * @phys_addr: Start address of registers to match against node + * @name: Name to assign for matching nodes + * @platform_data: platform_data to assign for matching nodes + * + * This lookup table allows the caller of of_platform_populate() to override + * the names of devices when creating devices from the device tree. The table + * should be terminated with an empty entry. It also allows the platform_data + * pointer to be set. + * + * The reason for this functionality is that some Linux infrastructure uses + * the device name to look up a specific device, but the Linux-specific names + * are not encoded into the device tree, so the kernel needs to provide specific + * values. + * + * Note: Using an auxdata lookup table should be considered a last resort when + * converting a platform to use the DT. Normally the automatically generated + * device name will not matter, and drivers should obtain data from the device + * node instead of from an anonymouns platform_data pointer. + */ +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +/* Macro to simplify populating a lookup table */ +#define OF_DEV_AUXDATA(_compat,_phys,_name,_pdata) \ + { .compatible = _compat, .phys_addr = _phys, .name = _name, \ + .platform_data = _pdata } + +/** + * of_platform_driver - Legacy of-aware driver for platform devices. + * + * An of_platform_driver driver is attached to a basic platform_device on + * the ibm ebus (ibmebus_bus_type). + */ +struct of_platform_driver +{ + int (*probe)(struct platform_device* dev, + const struct of_device_id *match); + int (*remove)(struct platform_device* dev); + + int (*suspend)(struct platform_device* dev, pm_message_t state); + int (*resume)(struct platform_device* dev); + int (*shutdown)(struct platform_device* dev); + + struct device_driver driver; +}; +#define to_of_platform_driver(drv) \ + container_of(drv,struct of_platform_driver, driver) + +extern const struct of_device_id of_default_bus_match_table[]; + +/* Platform drivers register/unregister */ +extern struct platform_device *of_device_alloc(struct device_node *np, + const char *bus_id, + struct device *parent); +extern struct platform_device *of_find_device_by_node(struct device_node *np); + +#ifdef CONFIG_OF_ADDRESS /* device reg helpers depend on OF_ADDRESS */ +/* Platform devices and busses creation */ +extern struct platform_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent); + +extern int of_platform_bus_probe(struct device_node *root, + const struct of_device_id *matches, + struct device *parent); +extern int of_platform_populate(struct device_node *root, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent); +#endif /* CONFIG_OF_ADDRESS */ + +#endif /* CONFIG_OF_DEVICE */ + +#if !defined(CONFIG_OF_ADDRESS) +struct of_dev_auxdata; +static inline int of_platform_populate(struct device_node *root, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent) +{ + return -ENODEV; +} +#endif /* !CONFIG_OF_ADDRESS */ + +#endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h new file mode 100644 index 00000000..9e3e70f7 --- /dev/null +++ b/include/linux/of_spi.h @@ -0,0 +1,23 @@ +/* + * OpenFirmware SPI support routines + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * Support routines for deriving SPI device attachments from the device + * tree. + */ + +#ifndef __LINUX_OF_SPI_H +#define __LINUX_OF_SPI_H + +#include <linux/spi/spi.h> + +#if defined(CONFIG_OF_SPI) || defined(CONFIG_OF_SPI_MODULE) +extern void of_register_spi_devices(struct spi_master *master); +#else +static inline void of_register_spi_devices(struct spi_master *master) +{ + return; +} +#endif /* CONFIG_OF_SPI */ + +#endif /* __LINUX_OF_SPI */ diff --git a/include/linux/omap3isp.h b/include/linux/omap3isp.h new file mode 100644 index 00000000..c73a34c3 --- /dev/null +++ b/include/linux/omap3isp.h @@ -0,0 +1,644 @@ +/* + * omap3isp.h + * + * TI OMAP3 ISP - User-space API + * + * Copyright (C) 2010 Nokia Corporation + * Copyright (C) 2009 Texas Instruments, Inc. + * + * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * Sakari Ailus <sakari.ailus@iki.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef OMAP3_ISP_USER_H +#define OMAP3_ISP_USER_H + +#include <linux/types.h> + +/* + * Private IOCTLs + * + * VIDIOC_OMAP3ISP_CCDC_CFG: Set CCDC configuration + * VIDIOC_OMAP3ISP_PRV_CFG: Set preview engine configuration + * VIDIOC_OMAP3ISP_AEWB_CFG: Set AEWB module configuration + * VIDIOC_OMAP3ISP_HIST_CFG: Set histogram module configuration + * VIDIOC_OMAP3ISP_AF_CFG: Set auto-focus module configuration + * VIDIOC_OMAP3ISP_STAT_REQ: Read statistics (AEWB/AF/histogram) data + * VIDIOC_OMAP3ISP_STAT_EN: Enable/disable a statistics module + */ + +#define VIDIOC_OMAP3ISP_CCDC_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct omap3isp_ccdc_update_config) +#define VIDIOC_OMAP3ISP_PRV_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 2, struct omap3isp_prev_update_config) +#define VIDIOC_OMAP3ISP_AEWB_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 3, struct omap3isp_h3a_aewb_config) +#define VIDIOC_OMAP3ISP_HIST_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 4, struct omap3isp_hist_config) +#define VIDIOC_OMAP3ISP_AF_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 5, struct omap3isp_h3a_af_config) +#define VIDIOC_OMAP3ISP_STAT_REQ \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 6, struct omap3isp_stat_data) +#define VIDIOC_OMAP3ISP_STAT_EN \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 7, unsigned long) + +/* + * Events + * + * V4L2_EVENT_OMAP3ISP_AEWB: AEWB statistics data ready + * V4L2_EVENT_OMAP3ISP_AF: AF statistics data ready + * V4L2_EVENT_OMAP3ISP_HIST: Histogram statistics data ready + */ + +#define V4L2_EVENT_OMAP3ISP_CLASS (V4L2_EVENT_PRIVATE_START | 0x100) +#define V4L2_EVENT_OMAP3ISP_AEWB (V4L2_EVENT_OMAP3ISP_CLASS | 0x1) +#define V4L2_EVENT_OMAP3ISP_AF (V4L2_EVENT_OMAP3ISP_CLASS | 0x2) +#define V4L2_EVENT_OMAP3ISP_HIST (V4L2_EVENT_OMAP3ISP_CLASS | 0x3) + +struct omap3isp_stat_event_status { + __u32 frame_number; + __u16 config_counter; + __u8 buf_err; +}; + +/* AE/AWB related structures and flags*/ + +/* H3A Range Constants */ +#define OMAP3ISP_AEWB_MAX_SATURATION_LIM 1023 +#define OMAP3ISP_AEWB_MIN_WIN_H 2 +#define OMAP3ISP_AEWB_MAX_WIN_H 256 +#define OMAP3ISP_AEWB_MIN_WIN_W 6 +#define OMAP3ISP_AEWB_MAX_WIN_W 256 +#define OMAP3ISP_AEWB_MIN_WINVC 1 +#define OMAP3ISP_AEWB_MIN_WINHC 1 +#define OMAP3ISP_AEWB_MAX_WINVC 128 +#define OMAP3ISP_AEWB_MAX_WINHC 36 +#define OMAP3ISP_AEWB_MAX_WINSTART 4095 +#define OMAP3ISP_AEWB_MIN_SUB_INC 2 +#define OMAP3ISP_AEWB_MAX_SUB_INC 32 +#define OMAP3ISP_AEWB_MAX_BUF_SIZE 83600 + +#define OMAP3ISP_AF_IIRSH_MIN 0 +#define OMAP3ISP_AF_IIRSH_MAX 4095 +#define OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN 1 +#define OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MAX 36 +#define OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN 1 +#define OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MAX 128 +#define OMAP3ISP_AF_PAXEL_INCREMENT_MIN 2 +#define OMAP3ISP_AF_PAXEL_INCREMENT_MAX 32 +#define OMAP3ISP_AF_PAXEL_HEIGHT_MIN 2 +#define OMAP3ISP_AF_PAXEL_HEIGHT_MAX 256 +#define OMAP3ISP_AF_PAXEL_WIDTH_MIN 16 +#define OMAP3ISP_AF_PAXEL_WIDTH_MAX 256 +#define OMAP3ISP_AF_PAXEL_HZSTART_MIN 1 +#define OMAP3ISP_AF_PAXEL_HZSTART_MAX 4095 +#define OMAP3ISP_AF_PAXEL_VTSTART_MIN 0 +#define OMAP3ISP_AF_PAXEL_VTSTART_MAX 4095 +#define OMAP3ISP_AF_THRESHOLD_MAX 255 +#define OMAP3ISP_AF_COEF_MAX 4095 +#define OMAP3ISP_AF_PAXEL_SIZE 48 +#define OMAP3ISP_AF_MAX_BUF_SIZE 221184 + +/** + * struct omap3isp_h3a_aewb_config - AE AWB configuration reset values + * saturation_limit: Saturation limit. + * @win_height: Window Height. Range 2 - 256, even values only. + * @win_width: Window Width. Range 6 - 256, even values only. + * @ver_win_count: Vertical Window Count. Range 1 - 128. + * @hor_win_count: Horizontal Window Count. Range 1 - 36. + * @ver_win_start: Vertical Window Start. Range 0 - 4095. + * @hor_win_start: Horizontal Window Start. Range 0 - 4095. + * @blk_ver_win_start: Black Vertical Windows Start. Range 0 - 4095. + * @blk_win_height: Black Window Height. Range 2 - 256, even values only. + * @subsample_ver_inc: Subsample Vertical points increment Range 2 - 32, even + * values only. + * @subsample_hor_inc: Subsample Horizontal points increment Range 2 - 32, even + * values only. + * @alaw_enable: AEW ALAW EN flag. + */ +struct omap3isp_h3a_aewb_config { + /* + * Common fields. + * They should be the first ones and must be in the same order as in + * ispstat_generic_config struct. + */ + __u32 buf_size; + __u16 config_counter; + + /* Private fields */ + __u16 saturation_limit; + __u16 win_height; + __u16 win_width; + __u16 ver_win_count; + __u16 hor_win_count; + __u16 ver_win_start; + __u16 hor_win_start; + __u16 blk_ver_win_start; + __u16 blk_win_height; + __u16 subsample_ver_inc; + __u16 subsample_hor_inc; + __u8 alaw_enable; +}; + +/** + * struct omap3isp_stat_data - Statistic data sent to or received from user + * @ts: Timestamp of returned framestats. + * @buf: Pointer to pass to user. + * @frame_number: Frame number of requested stats. + * @cur_frame: Current frame number being processed. + * @config_counter: Number of the configuration associated with the data. + */ +struct omap3isp_stat_data { + struct timeval ts; + void __user *buf; + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; +}; + + +/* Histogram related structs */ + +/* Flags for number of bins */ +#define OMAP3ISP_HIST_BINS_32 0 +#define OMAP3ISP_HIST_BINS_64 1 +#define OMAP3ISP_HIST_BINS_128 2 +#define OMAP3ISP_HIST_BINS_256 3 + +/* Number of bins * 4 colors * 4-bytes word */ +#define OMAP3ISP_HIST_MEM_SIZE_BINS(n) ((1 << ((n)+5))*4*4) + +#define OMAP3ISP_HIST_MEM_SIZE 1024 +#define OMAP3ISP_HIST_MIN_REGIONS 1 +#define OMAP3ISP_HIST_MAX_REGIONS 4 +#define OMAP3ISP_HIST_MAX_WB_GAIN 255 +#define OMAP3ISP_HIST_MIN_WB_GAIN 0 +#define OMAP3ISP_HIST_MAX_BIT_WIDTH 14 +#define OMAP3ISP_HIST_MIN_BIT_WIDTH 8 +#define OMAP3ISP_HIST_MAX_WG 4 +#define OMAP3ISP_HIST_MAX_BUF_SIZE 4096 + +/* Source */ +#define OMAP3ISP_HIST_SOURCE_CCDC 0 +#define OMAP3ISP_HIST_SOURCE_MEM 1 + +/* CFA pattern */ +#define OMAP3ISP_HIST_CFA_BAYER 0 +#define OMAP3ISP_HIST_CFA_FOVEONX3 1 + +struct omap3isp_hist_region { + __u16 h_start; + __u16 h_end; + __u16 v_start; + __u16 v_end; +}; + +struct omap3isp_hist_config { + /* + * Common fields. + * They should be the first ones and must be in the same order as in + * ispstat_generic_config struct. + */ + __u32 buf_size; + __u16 config_counter; + + __u8 num_acc_frames; /* Num of image frames to be processed and + accumulated for each histogram frame */ + __u16 hist_bins; /* number of bins: 32, 64, 128, or 256 */ + __u8 cfa; /* BAYER or FOVEON X3 */ + __u8 wg[OMAP3ISP_HIST_MAX_WG]; /* White Balance Gain */ + __u8 num_regions; /* number of regions to be configured */ + struct omap3isp_hist_region region[OMAP3ISP_HIST_MAX_REGIONS]; +}; + +/* Auto Focus related structs */ + +#define OMAP3ISP_AF_NUM_COEF 11 + +enum omap3isp_h3a_af_fvmode { + OMAP3ISP_AF_MODE_SUMMED = 0, + OMAP3ISP_AF_MODE_PEAK = 1 +}; + +/* Red, Green, and blue pixel location in the AF windows */ +enum omap3isp_h3a_af_rgbpos { + OMAP3ISP_AF_GR_GB_BAYER = 0, /* GR and GB as Bayer pattern */ + OMAP3ISP_AF_RG_GB_BAYER = 1, /* RG and GB as Bayer pattern */ + OMAP3ISP_AF_GR_BG_BAYER = 2, /* GR and BG as Bayer pattern */ + OMAP3ISP_AF_RG_BG_BAYER = 3, /* RG and BG as Bayer pattern */ + OMAP3ISP_AF_GG_RB_CUSTOM = 4, /* GG and RB as custom pattern */ + OMAP3ISP_AF_RB_GG_CUSTOM = 5 /* RB and GG as custom pattern */ +}; + +/* Contains the information regarding the Horizontal Median Filter */ +struct omap3isp_h3a_af_hmf { + __u8 enable; /* Status of Horizontal Median Filter */ + __u8 threshold; /* Threshold Value for Horizontal Median Filter */ +}; + +/* Contains the information regarding the IIR Filters */ +struct omap3isp_h3a_af_iir { + __u16 h_start; /* IIR horizontal start */ + __u16 coeff_set0[OMAP3ISP_AF_NUM_COEF]; /* Filter coefficient, set 0 */ + __u16 coeff_set1[OMAP3ISP_AF_NUM_COEF]; /* Filter coefficient, set 1 */ +}; + +/* Contains the information regarding the Paxels Structure in AF Engine */ +struct omap3isp_h3a_af_paxel { + __u16 h_start; /* Horizontal Start Position */ + __u16 v_start; /* Vertical Start Position */ + __u8 width; /* Width of the Paxel */ + __u8 height; /* Height of the Paxel */ + __u8 h_cnt; /* Horizontal Count */ + __u8 v_cnt; /* vertical Count */ + __u8 line_inc; /* Line Increment */ +}; + +/* Contains the parameters required for hardware set up of AF Engine */ +struct omap3isp_h3a_af_config { + /* + * Common fields. + * They should be the first ones and must be in the same order as in + * ispstat_generic_config struct. + */ + __u32 buf_size; + __u16 config_counter; + + struct omap3isp_h3a_af_hmf hmf; /* HMF configurations */ + struct omap3isp_h3a_af_iir iir; /* IIR filter configurations */ + struct omap3isp_h3a_af_paxel paxel; /* Paxel parameters */ + enum omap3isp_h3a_af_rgbpos rgb_pos; /* RGB Positions */ + enum omap3isp_h3a_af_fvmode fvmode; /* Accumulator mode */ + __u8 alaw_enable; /* AF ALAW status */ +}; + +/* ISP CCDC structs */ + +/* Abstraction layer CCDC configurations */ +#define OMAP3ISP_CCDC_ALAW (1 << 0) +#define OMAP3ISP_CCDC_LPF (1 << 1) +#define OMAP3ISP_CCDC_BLCLAMP (1 << 2) +#define OMAP3ISP_CCDC_BCOMP (1 << 3) +#define OMAP3ISP_CCDC_FPC (1 << 4) +#define OMAP3ISP_CCDC_CULL (1 << 5) +#define OMAP3ISP_CCDC_CONFIG_LSC (1 << 7) +#define OMAP3ISP_CCDC_TBL_LSC (1 << 8) + +#define OMAP3ISP_RGB_MAX 3 + +/* Enumeration constants for Alaw input width */ +enum omap3isp_alaw_ipwidth { + OMAP3ISP_ALAW_BIT12_3 = 0x3, + OMAP3ISP_ALAW_BIT11_2 = 0x4, + OMAP3ISP_ALAW_BIT10_1 = 0x5, + OMAP3ISP_ALAW_BIT9_0 = 0x6 +}; + +/** + * struct omap3isp_ccdc_lsc_config - LSC configuration + * @offset: Table Offset of the gain table. + * @gain_mode_n: Vertical dimension of a paxel in LSC configuration. + * @gain_mode_m: Horizontal dimension of a paxel in LSC configuration. + * @gain_format: Gain table format. + * @fmtsph: Start pixel horizontal from start of the HS sync pulse. + * @fmtlnh: Number of pixels in horizontal direction to use for the data + * reformatter. + * @fmtslv: Start line from start of VS sync pulse for the data reformatter. + * @fmtlnv: Number of lines in vertical direction for the data reformatter. + * @initial_x: X position, in pixels, of the first active pixel in reference + * to the first active paxel. Must be an even number. + * @initial_y: Y position, in pixels, of the first active pixel in reference + * to the first active paxel. Must be an even number. + * @size: Size of LSC gain table. Filled when loaded from userspace. + */ +struct omap3isp_ccdc_lsc_config { + __u16 offset; + __u8 gain_mode_n; + __u8 gain_mode_m; + __u8 gain_format; + __u16 fmtsph; + __u16 fmtlnh; + __u16 fmtslv; + __u16 fmtlnv; + __u8 initial_x; + __u8 initial_y; + __u32 size; +}; + +/** + * struct omap3isp_ccdc_bclamp - Optical & Digital black clamp subtract + * @obgain: Optical black average gain. + * @obstpixel: Start Pixel w.r.t. HS pulse in Optical black sample. + * @oblines: Optical Black Sample lines. + * @oblen: Optical Black Sample Length. + * @dcsubval: Digital Black Clamp subtract value. + */ +struct omap3isp_ccdc_bclamp { + __u8 obgain; + __u8 obstpixel; + __u8 oblines; + __u8 oblen; + __u16 dcsubval; +}; + +/** + * struct omap3isp_ccdc_fpc - Faulty Pixels Correction + * @fpnum: Number of faulty pixels to be corrected in the frame. + * @fpcaddr: Memory address of the FPC Table + */ +struct omap3isp_ccdc_fpc { + __u16 fpnum; + __u32 fpcaddr; +}; + +/** + * struct omap3isp_ccdc_blcomp - Black Level Compensation parameters + * @b_mg: B/Mg pixels. 2's complement. -128 to +127. + * @gb_g: Gb/G pixels. 2's complement. -128 to +127. + * @gr_cy: Gr/Cy pixels. 2's complement. -128 to +127. + * @r_ye: R/Ye pixels. 2's complement. -128 to +127. + */ +struct omap3isp_ccdc_blcomp { + __u8 b_mg; + __u8 gb_g; + __u8 gr_cy; + __u8 r_ye; +}; + +/** + * omap3isp_ccdc_culling - Culling parameters + * @v_pattern: Vertical culling pattern. + * @h_odd: Horizontal Culling pattern for odd lines. + * @h_even: Horizontal Culling pattern for even lines. + */ +struct omap3isp_ccdc_culling { + __u8 v_pattern; + __u16 h_odd; + __u16 h_even; +}; + +/** + * omap3isp_ccdc_update_config - CCDC configuration + * @update: Specifies which CCDC registers should be updated. + * @flag: Specifies which CCDC functions should be enabled. + * @alawip: Enable/Disable A-Law compression. + * @bclamp: Black clamp control register. + * @blcomp: Black level compensation value for RGrGbB Pixels. 2's complement. + * @fpc: Number of faulty pixels corrected in the frame, address of FPC table. + * @cull: Cull control register. + * @lsc: Pointer to LSC gain table. + */ +struct omap3isp_ccdc_update_config { + __u16 update; + __u16 flag; + enum omap3isp_alaw_ipwidth alawip; + struct omap3isp_ccdc_bclamp __user *bclamp; + struct omap3isp_ccdc_blcomp __user *blcomp; + struct omap3isp_ccdc_fpc __user *fpc; + struct omap3isp_ccdc_lsc_config __user *lsc_cfg; + struct omap3isp_ccdc_culling __user *cull; + __u8 __user *lsc; +}; + +/* Preview configurations */ +#define OMAP3ISP_PREV_LUMAENH (1 << 0) +#define OMAP3ISP_PREV_INVALAW (1 << 1) +#define OMAP3ISP_PREV_HRZ_MED (1 << 2) +#define OMAP3ISP_PREV_CFA (1 << 3) +#define OMAP3ISP_PREV_CHROMA_SUPP (1 << 4) +#define OMAP3ISP_PREV_WB (1 << 5) +#define OMAP3ISP_PREV_BLKADJ (1 << 6) +#define OMAP3ISP_PREV_RGB2RGB (1 << 7) +#define OMAP3ISP_PREV_COLOR_CONV (1 << 8) +#define OMAP3ISP_PREV_YC_LIMIT (1 << 9) +#define OMAP3ISP_PREV_DEFECT_COR (1 << 10) +#define OMAP3ISP_PREV_GAMMABYPASS (1 << 11) +#define OMAP3ISP_PREV_DRK_FRM_CAPTURE (1 << 12) +#define OMAP3ISP_PREV_DRK_FRM_SUBTRACT (1 << 13) +#define OMAP3ISP_PREV_LENS_SHADING (1 << 14) +#define OMAP3ISP_PREV_NF (1 << 15) +#define OMAP3ISP_PREV_GAMMA (1 << 16) + +#define OMAP3ISP_PREV_NF_TBL_SIZE 64 +#define OMAP3ISP_PREV_CFA_TBL_SIZE 576 +#define OMAP3ISP_PREV_GAMMA_TBL_SIZE 1024 +#define OMAP3ISP_PREV_YENH_TBL_SIZE 128 + +#define OMAP3ISP_PREV_DETECT_CORRECT_CHANNELS 4 + +/** + * struct omap3isp_prev_hmed - Horizontal Median Filter + * @odddist: Distance between consecutive pixels of same color in the odd line. + * @evendist: Distance between consecutive pixels of same color in the even + * line. + * @thres: Horizontal median filter threshold. + */ +struct omap3isp_prev_hmed { + __u8 odddist; + __u8 evendist; + __u8 thres; +}; + +/* + * Enumeration for CFA Formats supported by preview + */ +enum omap3isp_cfa_fmt { + OMAP3ISP_CFAFMT_BAYER, + OMAP3ISP_CFAFMT_SONYVGA, + OMAP3ISP_CFAFMT_RGBFOVEON, + OMAP3ISP_CFAFMT_DNSPL, + OMAP3ISP_CFAFMT_HONEYCOMB, + OMAP3ISP_CFAFMT_RRGGBBFOVEON +}; + +/** + * struct omap3isp_prev_cfa - CFA Interpolation + * @format: CFA Format Enum value supported by preview. + * @gradthrs_vert: CFA Gradient Threshold - Vertical. + * @gradthrs_horz: CFA Gradient Threshold - Horizontal. + * @table: Pointer to the CFA table. + */ +struct omap3isp_prev_cfa { + enum omap3isp_cfa_fmt format; + __u8 gradthrs_vert; + __u8 gradthrs_horz; + __u32 table[OMAP3ISP_PREV_CFA_TBL_SIZE]; +}; + +/** + * struct omap3isp_prev_csup - Chrominance Suppression + * @gain: Gain. + * @thres: Threshold. + * @hypf_en: Flag to enable/disable the High Pass Filter. + */ +struct omap3isp_prev_csup { + __u8 gain; + __u8 thres; + __u8 hypf_en; +}; + +/** + * struct omap3isp_prev_wbal - White Balance + * @dgain: Digital gain (U10Q8). + * @coef3: White balance gain - COEF 3 (U8Q5). + * @coef2: White balance gain - COEF 2 (U8Q5). + * @coef1: White balance gain - COEF 1 (U8Q5). + * @coef0: White balance gain - COEF 0 (U8Q5). + */ +struct omap3isp_prev_wbal { + __u16 dgain; + __u8 coef3; + __u8 coef2; + __u8 coef1; + __u8 coef0; +}; + +/** + * struct omap3isp_prev_blkadj - Black Level Adjustment + * @red: Black level offset adjustment for Red in 2's complement format + * @green: Black level offset adjustment for Green in 2's complement format + * @blue: Black level offset adjustment for Blue in 2's complement format + */ +struct omap3isp_prev_blkadj { + /*Black level offset adjustment for Red in 2's complement format */ + __u8 red; + /*Black level offset adjustment for Green in 2's complement format */ + __u8 green; + /* Black level offset adjustment for Blue in 2's complement format */ + __u8 blue; +}; + +/** + * struct omap3isp_prev_rgbtorgb - RGB to RGB Blending + * @matrix: Blending values(S12Q8 format) + * [RR] [GR] [BR] + * [RG] [GG] [BG] + * [RB] [GB] [BB] + * @offset: Blending offset value for R,G,B in 2's complement integer format. + */ +struct omap3isp_prev_rgbtorgb { + __u16 matrix[OMAP3ISP_RGB_MAX][OMAP3ISP_RGB_MAX]; + __u16 offset[OMAP3ISP_RGB_MAX]; +}; + +/** + * struct omap3isp_prev_csc - Color Space Conversion from RGB-YCbYCr + * @matrix: Color space conversion coefficients(S10Q8) + * [CSCRY] [CSCGY] [CSCBY] + * [CSCRCB] [CSCGCB] [CSCBCB] + * [CSCRCR] [CSCGCR] [CSCBCR] + * @offset: CSC offset values for Y offset, CB offset and CR offset respectively + */ +struct omap3isp_prev_csc { + __u16 matrix[OMAP3ISP_RGB_MAX][OMAP3ISP_RGB_MAX]; + __s16 offset[OMAP3ISP_RGB_MAX]; +}; + +/** + * struct omap3isp_prev_yclimit - Y, C Value Limit + * @minC: Minimum C value + * @maxC: Maximum C value + * @minY: Minimum Y value + * @maxY: Maximum Y value + */ +struct omap3isp_prev_yclimit { + __u8 minC; + __u8 maxC; + __u8 minY; + __u8 maxY; +}; + +/** + * struct omap3isp_prev_dcor - Defect correction + * @couplet_mode_en: Flag to enable or disable the couplet dc Correction in NF + * @detect_correct: Thresholds for correction bit 0:10 detect 16:25 correct + */ +struct omap3isp_prev_dcor { + __u8 couplet_mode_en; + __u32 detect_correct[OMAP3ISP_PREV_DETECT_CORRECT_CHANNELS]; +}; + +/** + * struct omap3isp_prev_nf - Noise Filter + * @spread: Spread value to be used in Noise Filter + * @table: Pointer to the Noise Filter table + */ +struct omap3isp_prev_nf { + __u8 spread; + __u32 table[OMAP3ISP_PREV_NF_TBL_SIZE]; +}; + +/** + * struct omap3isp_prev_gtables - Gamma correction tables + * @red: Array for red gamma table. + * @green: Array for green gamma table. + * @blue: Array for blue gamma table. + */ +struct omap3isp_prev_gtables { + __u32 red[OMAP3ISP_PREV_GAMMA_TBL_SIZE]; + __u32 green[OMAP3ISP_PREV_GAMMA_TBL_SIZE]; + __u32 blue[OMAP3ISP_PREV_GAMMA_TBL_SIZE]; +}; + +/** + * struct omap3isp_prev_luma - Luma enhancement + * @table: Array for luma enhancement table. + */ +struct omap3isp_prev_luma { + __u32 table[OMAP3ISP_PREV_YENH_TBL_SIZE]; +}; + +/** + * struct omap3isp_prev_update_config - Preview engine configuration (user) + * @update: Specifies which ISP Preview registers should be updated. + * @flag: Specifies which ISP Preview functions should be enabled. + * @shading_shift: 3bit value of shift used in shading compensation. + * @luma: Pointer to luma enhancement structure. + * @hmed: Pointer to structure containing the odd and even distance. + * between the pixels in the image along with the filter threshold. + * @cfa: Pointer to structure containing the CFA interpolation table, CFA. + * format in the image, vertical and horizontal gradient threshold. + * @csup: Pointer to Structure for Chrominance Suppression coefficients. + * @wbal: Pointer to structure for White Balance. + * @blkadj: Pointer to structure for Black Adjustment. + * @rgb2rgb: Pointer to structure for RGB to RGB Blending. + * @csc: Pointer to structure for Color Space Conversion from RGB-YCbYCr. + * @yclimit: Pointer to structure for Y, C Value Limit. + * @dcor: Pointer to structure for defect correction. + * @nf: Pointer to structure for Noise Filter + * @gamma: Pointer to gamma structure. + */ +struct omap3isp_prev_update_config { + __u32 update; + __u32 flag; + __u32 shading_shift; + struct omap3isp_prev_luma __user *luma; + struct omap3isp_prev_hmed __user *hmed; + struct omap3isp_prev_cfa __user *cfa; + struct omap3isp_prev_csup __user *csup; + struct omap3isp_prev_wbal __user *wbal; + struct omap3isp_prev_blkadj __user *blkadj; + struct omap3isp_prev_rgbtorgb __user *rgb2rgb; + struct omap3isp_prev_csc __user *csc; + struct omap3isp_prev_yclimit __user *yclimit; + struct omap3isp_prev_dcor __user *dcor; + struct omap3isp_prev_nf __user *nf; + struct omap3isp_prev_gtables __user *gamma; +}; + +#endif /* OMAP3_ISP_USER_H */ diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h new file mode 100644 index 00000000..4ff57e81 --- /dev/null +++ b/include/linux/omapfb.h @@ -0,0 +1,233 @@ +/* + * File: include/linux/omapfb.h + * + * Framebuffer driver for TI OMAP boards + * + * Copyright (C) 2004 Nokia Corporation + * Author: Imre Deak <imre.deak@nokia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __LINUX_OMAPFB_H__ +#define __LINUX_OMAPFB_H__ + +#include <linux/fb.h> +#include <linux/ioctl.h> +#include <linux/types.h> + +/* IOCTL commands. */ + +#define OMAP_IOW(num, dtype) _IOW('O', num, dtype) +#define OMAP_IOR(num, dtype) _IOR('O', num, dtype) +#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype) +#define OMAP_IO(num) _IO('O', num) + +#define OMAPFB_MIRROR OMAP_IOW(31, int) +#define OMAPFB_SYNC_GFX OMAP_IO(37) +#define OMAPFB_VSYNC OMAP_IO(38) +#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int) +#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps) +#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int) +#define OMAPFB_LCD_TEST OMAP_IOW(45, int) +#define OMAPFB_CTRL_TEST OMAP_IOW(46, int) +#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old) +#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key) +#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key) +#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info) +#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info) +#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window) +#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info) +#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info) +#define OMAPFB_WAITFORVSYNC OMAP_IO(57) +#define OMAPFB_MEMORY_READ OMAP_IOR(58, struct omapfb_memory_read) +#define OMAPFB_GET_OVERLAY_COLORMODE OMAP_IOR(59, struct omapfb_ovl_colormode) +#define OMAPFB_WAITFORGO OMAP_IO(60) +#define OMAPFB_GET_VRAM_INFO OMAP_IOR(61, struct omapfb_vram_info) +#define OMAPFB_SET_TEARSYNC OMAP_IOW(62, struct omapfb_tearsync_info) +#define OMAPFB_GET_DISPLAY_INFO OMAP_IOR(63, struct omapfb_display_info) + +#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff +#define OMAPFB_CAPS_LCDC_MASK 0x00fff000 +#define OMAPFB_CAPS_PANEL_MASK 0xff000000 + +#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000 +#define OMAPFB_CAPS_TEARSYNC 0x00002000 +#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000 +#define OMAPFB_CAPS_PLANE_SCALE 0x00008000 +#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000 +#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000 +#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000 +#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000 +#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000 + +/* Values from DSP must map to lower 16-bits */ +#define OMAPFB_FORMAT_MASK 0x00ff +#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100 +#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200 +#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400 +#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800 +#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000 + +#define OMAPFB_MEMTYPE_SDRAM 0 +#define OMAPFB_MEMTYPE_SRAM 1 +#define OMAPFB_MEMTYPE_MAX 1 + +#define OMAPFB_MEM_IDX_ENABLED 0x80 +#define OMAPFB_MEM_IDX_MASK 0x7f + +enum omapfb_color_format { + OMAPFB_COLOR_RGB565 = 0, + OMAPFB_COLOR_YUV422, + OMAPFB_COLOR_YUV420, + OMAPFB_COLOR_CLUT_8BPP, + OMAPFB_COLOR_CLUT_4BPP, + OMAPFB_COLOR_CLUT_2BPP, + OMAPFB_COLOR_CLUT_1BPP, + OMAPFB_COLOR_RGB444, + OMAPFB_COLOR_YUY422, + + OMAPFB_COLOR_ARGB16, + OMAPFB_COLOR_RGB24U, /* RGB24, 32-bit container */ + OMAPFB_COLOR_RGB24P, /* RGB24, 24-bit container */ + OMAPFB_COLOR_ARGB32, + OMAPFB_COLOR_RGBA32, + OMAPFB_COLOR_RGBX32, +}; + +struct omapfb_update_window { + __u32 x, y; + __u32 width, height; + __u32 format; + __u32 out_x, out_y; + __u32 out_width, out_height; + __u32 reserved[8]; +}; + +struct omapfb_update_window_old { + __u32 x, y; + __u32 width, height; + __u32 format; +}; + +enum omapfb_plane { + OMAPFB_PLANE_GFX = 0, + OMAPFB_PLANE_VID1, + OMAPFB_PLANE_VID2, +}; + +enum omapfb_channel_out { + OMAPFB_CHANNEL_OUT_LCD = 0, + OMAPFB_CHANNEL_OUT_DIGIT, +}; + +struct omapfb_plane_info { + __u32 pos_x; + __u32 pos_y; + __u8 enabled; + __u8 channel_out; + __u8 mirror; + __u8 mem_idx; + __u32 out_width; + __u32 out_height; + __u32 reserved2[12]; +}; + +struct omapfb_mem_info { + __u32 size; + __u8 type; + __u8 reserved[3]; +}; + +struct omapfb_caps { + __u32 ctrl; + __u32 plane_color; + __u32 wnd_color; +}; + +enum omapfb_color_key_type { + OMAPFB_COLOR_KEY_DISABLED = 0, + OMAPFB_COLOR_KEY_GFX_DST, + OMAPFB_COLOR_KEY_VID_SRC, +}; + +struct omapfb_color_key { + __u8 channel_out; + __u32 background; + __u32 trans_key; + __u8 key_type; +}; + +enum omapfb_update_mode { + OMAPFB_UPDATE_DISABLED = 0, + OMAPFB_AUTO_UPDATE, + OMAPFB_MANUAL_UPDATE +}; + +struct omapfb_memory_read { + __u16 x; + __u16 y; + __u16 w; + __u16 h; + size_t buffer_size; + void __user *buffer; +}; + +struct omapfb_ovl_colormode { + __u8 overlay_idx; + __u8 mode_idx; + __u32 bits_per_pixel; + __u32 nonstd; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; +}; + +struct omapfb_vram_info { + __u32 total; + __u32 free; + __u32 largest_free_block; + __u32 reserved[5]; +}; + +struct omapfb_tearsync_info { + __u8 enabled; + __u8 reserved1[3]; + __u16 line; + __u16 reserved2; +}; + +struct omapfb_display_info { + __u16 xres; + __u16 yres; + __u32 width; /* phys width of the display in micrometers */ + __u32 height; /* phys height of the display in micrometers */ + __u32 reserved[5]; +}; + +#ifdef __KERNEL__ + +#include <plat/board.h> + +struct omapfb_platform_data { + struct omap_lcd_config lcd; +}; + +void __init omapfb_set_lcd_config(const struct omap_lcd_config *config); + +#endif + +#endif /* __OMAPFB_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h new file mode 100644 index 00000000..3d764753 --- /dev/null +++ b/include/linux/oom.h @@ -0,0 +1,75 @@ +#ifndef __INCLUDE_LINUX_OOM_H +#define __INCLUDE_LINUX_OOM_H + +/* + * /proc/<pid>/oom_adj is deprecated, see + * Documentation/feature-removal-schedule.txt. + * + * /proc/<pid>/oom_adj set to -17 protects from the oom-killer + */ +#define OOM_DISABLE (-17) +/* inclusive */ +#define OOM_ADJUST_MIN (-16) +#define OOM_ADJUST_MAX 15 + +/* + * /proc/<pid>/oom_score_adj set to OOM_SCORE_ADJ_MIN disables oom killing for + * pid. + */ +#define OOM_SCORE_ADJ_MIN (-1000) +#define OOM_SCORE_ADJ_MAX 1000 + +#ifdef __KERNEL__ + +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/nodemask.h> + +struct zonelist; +struct notifier_block; +struct mem_cgroup; +struct task_struct; + +/* + * Types of limitations to the nodes from which allocations may occur + */ +enum oom_constraint { + CONSTRAINT_NONE, + CONSTRAINT_CPUSET, + CONSTRAINT_MEMORY_POLICY, + CONSTRAINT_MEMCG, +}; + +extern void compare_swap_oom_score_adj(int old_val, int new_val); +extern int test_set_oom_score_adj(int new_val); + +extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg, + const nodemask_t *nodemask, unsigned long totalpages); +extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); +extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); + +extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, + int order, nodemask_t *mask, bool force_kill); +extern int register_oom_notifier(struct notifier_block *nb); +extern int unregister_oom_notifier(struct notifier_block *nb); + +extern bool oom_killer_disabled; + +static inline void oom_killer_disable(void) +{ + oom_killer_disabled = true; +} + +static inline void oom_killer_enable(void) +{ + oom_killer_disabled = false; +} + +extern struct task_struct *find_lock_task_mm(struct task_struct *p); + +/* sysctls */ +extern int sysctl_oom_dump_tasks; +extern int sysctl_oom_kill_allocating_task; +extern int sysctl_panic_on_oom; +#endif /* __KERNEL__*/ +#endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h new file mode 100644 index 00000000..eb1efa54 --- /dev/null +++ b/include/linux/openvswitch.h @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2007-2011 Nicira Networks. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#ifndef _LINUX_OPENVSWITCH_H +#define _LINUX_OPENVSWITCH_H 1 + +#include <linux/types.h> + +/** + * struct ovs_header - header for OVS Generic Netlink messages. + * @dp_ifindex: ifindex of local port for datapath (0 to make a request not + * specific to a datapath). + * + * Attributes following the header are specific to a particular OVS Generic + * Netlink family, but all of the OVS families use this header. + */ + +struct ovs_header { + int dp_ifindex; +}; + +/* Datapaths. */ + +#define OVS_DATAPATH_FAMILY "ovs_datapath" +#define OVS_DATAPATH_MCGROUP "ovs_datapath" +#define OVS_DATAPATH_VERSION 0x1 + +enum ovs_datapath_cmd { + OVS_DP_CMD_UNSPEC, + OVS_DP_CMD_NEW, + OVS_DP_CMD_DEL, + OVS_DP_CMD_GET, + OVS_DP_CMD_SET +}; + +/** + * enum ovs_datapath_attr - attributes for %OVS_DP_* commands. + * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local + * port". This is the name of the network device whose dp_ifindex is given in + * the &struct ovs_header. Always present in notifications. Required in + * %OVS_DP_NEW requests. May be used as an alternative to specifying + * dp_ifindex in other requests (with a dp_ifindex of 0). + * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially + * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on + * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should + * not be sent. + * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the + * datapath. Always present in notifications. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_DP_* commands. + */ +enum ovs_datapath_attr { + OVS_DP_ATTR_UNSPEC, + OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */ + OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ + OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ + __OVS_DP_ATTR_MAX +}; + +#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1) + +struct ovs_dp_stats { + __u64 n_hit; /* Number of flow table matches. */ + __u64 n_missed; /* Number of flow table misses. */ + __u64 n_lost; /* Number of misses not sent to userspace. */ + __u64 n_flows; /* Number of flows present */ +}; + +struct ovs_vport_stats { + __u64 rx_packets; /* total packets received */ + __u64 tx_packets; /* total packets transmitted */ + __u64 rx_bytes; /* total bytes received */ + __u64 tx_bytes; /* total bytes transmitted */ + __u64 rx_errors; /* bad packets received */ + __u64 tx_errors; /* packet transmit problems */ + __u64 rx_dropped; /* no space in linux buffers */ + __u64 tx_dropped; /* no space available in linux */ +}; + +/* Fixed logical ports. */ +#define OVSP_LOCAL ((__u16)0) + +/* Packet transfer. */ + +#define OVS_PACKET_FAMILY "ovs_packet" +#define OVS_PACKET_VERSION 0x1 + +enum ovs_packet_cmd { + OVS_PACKET_CMD_UNSPEC, + + /* Kernel-to-user notifications. */ + OVS_PACKET_CMD_MISS, /* Flow table miss. */ + OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */ + + /* Userspace commands. */ + OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */ +}; + +/** + * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands. + * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire + * packet as received, from the start of the Ethernet header onward. For + * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by + * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is + * the flow key extracted from the packet as originally received. + * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key + * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows + * userspace to adapt its flow setup strategy by comparing its notion of the + * flow key against the kernel's. + * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used + * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. + * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION + * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an + * %OVS_USERSPACE_ATTR_USERDATA attribute. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_PACKET_* commands. + */ +enum ovs_packet_attr { + OVS_PACKET_ATTR_UNSPEC, + OVS_PACKET_ATTR_PACKET, /* Packet data. */ + OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ + OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */ + __OVS_PACKET_ATTR_MAX +}; + +#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1) + +/* Virtual ports. */ + +#define OVS_VPORT_FAMILY "ovs_vport" +#define OVS_VPORT_MCGROUP "ovs_vport" +#define OVS_VPORT_VERSION 0x1 + +enum ovs_vport_cmd { + OVS_VPORT_CMD_UNSPEC, + OVS_VPORT_CMD_NEW, + OVS_VPORT_CMD_DEL, + OVS_VPORT_CMD_GET, + OVS_VPORT_CMD_SET +}; + +enum ovs_vport_type { + OVS_VPORT_TYPE_UNSPEC, + OVS_VPORT_TYPE_NETDEV, /* network device */ + OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ + __OVS_VPORT_TYPE_MAX +}; + +#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1) + +/** + * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands. + * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath. + * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type + * of vport. + * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device + * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes + * plus a null terminator. + * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information. + * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that + * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on + * this port. A value of zero indicates that upcalls should not be sent. + * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for + * packets sent or received through the vport. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_VPORT_* commands. + * + * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and + * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is + * optional; if not specified a free port number is automatically selected. + * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type + * of vport. + * and other attributes are ignored. + * + * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to + * look up the vport to operate on; otherwise dp_idx from the &struct + * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport. + */ +enum ovs_vport_attr { + OVS_VPORT_ATTR_UNSPEC, + OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */ + OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */ + OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */ + OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */ + OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */ + OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ + __OVS_VPORT_ATTR_MAX +}; + +#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) + +/* Flows. */ + +#define OVS_FLOW_FAMILY "ovs_flow" +#define OVS_FLOW_MCGROUP "ovs_flow" +#define OVS_FLOW_VERSION 0x1 + +enum ovs_flow_cmd { + OVS_FLOW_CMD_UNSPEC, + OVS_FLOW_CMD_NEW, + OVS_FLOW_CMD_DEL, + OVS_FLOW_CMD_GET, + OVS_FLOW_CMD_SET +}; + +struct ovs_flow_stats { + __u64 n_packets; /* Number of matched packets. */ + __u64 n_bytes; /* Number of matched bytes. */ +}; + +enum ovs_key_attr { + OVS_KEY_ATTR_UNSPEC, + OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */ + OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */ + OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */ + OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */ + OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */ + OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */ + OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */ + OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */ + OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */ + OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */ + OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */ + OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ + OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ + OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ + __OVS_KEY_ATTR_MAX +}; + +#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) + +/** + * enum ovs_frag_type - IPv4 and IPv6 fragment type + * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. + * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0. + * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset. + * + * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct + * ovs_key_ipv6. + */ +enum ovs_frag_type { + OVS_FRAG_TYPE_NONE, + OVS_FRAG_TYPE_FIRST, + OVS_FRAG_TYPE_LATER, + __OVS_FRAG_TYPE_MAX +}; + +#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) + +struct ovs_key_ethernet { + __u8 eth_src[6]; + __u8 eth_dst[6]; +}; + +struct ovs_key_ipv4 { + __be32 ipv4_src; + __be32 ipv4_dst; + __u8 ipv4_proto; + __u8 ipv4_tos; + __u8 ipv4_ttl; + __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */ +}; + +struct ovs_key_ipv6 { + __be32 ipv6_src[4]; + __be32 ipv6_dst[4]; + __be32 ipv6_label; /* 20-bits in least-significant bits. */ + __u8 ipv6_proto; + __u8 ipv6_tclass; + __u8 ipv6_hlimit; + __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ +}; + +struct ovs_key_tcp { + __be16 tcp_src; + __be16 tcp_dst; +}; + +struct ovs_key_udp { + __be16 udp_src; + __be16 udp_dst; +}; + +struct ovs_key_icmp { + __u8 icmp_type; + __u8 icmp_code; +}; + +struct ovs_key_icmpv6 { + __u8 icmpv6_type; + __u8 icmpv6_code; +}; + +struct ovs_key_arp { + __be32 arp_sip; + __be32 arp_tip; + __be16 arp_op; + __u8 arp_sha[6]; + __u8 arp_tha[6]; +}; + +struct ovs_key_nd { + __u32 nd_target[4]; + __u8 nd_sll[6]; + __u8 nd_tll[6]; +}; + +/** + * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. + * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow + * key. Always present in notifications. Required for all requests (except + * dumps). + * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying + * the actions to take for packets that match the key. Always present in + * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for + * %OVS_FLOW_CMD_SET requests. + * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this + * flow. Present in notifications if the stats would be nonzero. Ignored in + * requests. + * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the + * TCP flags seen on packets in this flow. Only present in notifications for + * TCP flows, and only if it would be nonzero. Ignored in requests. + * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on + * the system monotonic clock, at which a packet was last processed for this + * flow. Only present in notifications if a packet has been processed for this + * flow. Ignored in requests. + * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the + * last-used time, accumulated TCP flags, and statistics for this flow. + * Otherwise ignored in requests. Never present in notifications. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_FLOW_* commands. + */ +enum ovs_flow_attr { + OVS_FLOW_ATTR_UNSPEC, + OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */ + OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */ + OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ + OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ + OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ + __OVS_FLOW_ATTR_MAX +}; + +#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) + +/** + * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. + * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with + * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of + * %UINT32_MAX samples all packets and intermediate values sample intermediate + * fractions of packets. + * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event. + * Actions are passed as nested attributes. + * + * Executes the specified actions with the given probability on a per-packet + * basis. + */ +enum ovs_sample_attr { + OVS_SAMPLE_ATTR_UNSPEC, + OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ + OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + __OVS_SAMPLE_ATTR_MAX, +}; + +#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) + +/** + * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. + * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION + * message should be sent. Required. + * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the + * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA, + */ +enum ovs_userspace_attr { + OVS_USERSPACE_ATTR_UNSPEC, + OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ + OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */ + __OVS_USERSPACE_ATTR_MAX +}; + +#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) + +/** + * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. + * @vlan_tpid: Tag protocol identifier (TPID) to push. + * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set + * (but it will not be set in the 802.1Q header that is pushed). + * + * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID + * values are those that the kernel module also parses as 802.1Q headers, to + * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN + * from having surprising results. + */ +struct ovs_action_push_vlan { + __be16 vlan_tpid; /* 802.1Q TPID. */ + __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ +}; + +/** + * enum ovs_action_attr - Action types. + * + * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. + * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested + * %OVS_USERSPACE_ATTR_* attributes. + * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The + * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its + * value. + * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the + * packet. + * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. + * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in + * the nested %OVS_SAMPLE_ATTR_* attributes. + * + * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all + * fields within a header are modifiable, e.g. the IPv4 protocol and fragment + * type may not be changed. + */ + +enum ovs_action_attr { + OVS_ACTION_ATTR_UNSPEC, + OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */ + OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */ + OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */ + OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ + OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ + OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ + __OVS_ACTION_ATTR_MAX +}; + +#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) + +#endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/linux/opp.h b/include/linux/opp.h new file mode 100644 index 00000000..2a4e5fae --- /dev/null +++ b/include/linux/opp.h @@ -0,0 +1,126 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_OPP_H__ +#define __LINUX_OPP_H__ + +#include <linux/err.h> +#include <linux/cpufreq.h> +#include <linux/notifier.h> + +struct opp; +struct device; + +enum opp_event { + OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, +}; + +#if defined(CONFIG_PM_OPP) + +unsigned long opp_get_voltage(struct opp *opp); + +unsigned long opp_get_freq(struct opp *opp); + +int opp_get_opp_count(struct device *dev); + +struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, + bool available); + +struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq); + +struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq); + +int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); + +int opp_enable(struct device *dev, unsigned long freq); + +int opp_disable(struct device *dev, unsigned long freq); + +struct srcu_notifier_head *opp_get_notifier(struct device *dev); + +#else +static inline unsigned long opp_get_voltage(struct opp *opp) +{ + return 0; +} + +static inline unsigned long opp_get_freq(struct opp *opp) +{ + return 0; +} + +static inline int opp_get_opp_count(struct device *dev) +{ + return 0; +} + +static inline struct opp *opp_find_freq_exact(struct device *dev, + unsigned long freq, bool available) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct opp *opp_find_freq_floor(struct device *dev, + unsigned long *freq) +{ + return ERR_PTR(-EINVAL); +} + +static inline struct opp *opp_find_freq_ceil(struct device *dev, + unsigned long *freq) +{ + return ERR_PTR(-EINVAL); +} + +static inline int opp_add(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + return -EINVAL; +} + +static inline int opp_enable(struct device *dev, unsigned long freq) +{ + return 0; +} + +static inline int opp_disable(struct device *dev, unsigned long freq) +{ + return 0; +} + +static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_PM_OPP */ + +#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) +int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); +#else +static inline int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + return -EINVAL; +} + +static inline +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ +} +#endif /* CONFIG_CPU_FREQ */ + +#endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h new file mode 100644 index 00000000..a4c56245 --- /dev/null +++ b/include/linux/oprofile.h @@ -0,0 +1,211 @@ +/** + * @file oprofile.h + * + * API for machine-specific interrupts to interface + * to oprofile. + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon <levon@movementarian.org> + */ + +#ifndef OPROFILE_H +#define OPROFILE_H + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/printk.h> +#include <linux/atomic.h> + +/* Each escaped entry is prefixed by ESCAPE_CODE + * then one of the following codes, then the + * relevant data. + * These #defines live in this file so that arch-specific + * buffer sync'ing code can access them. + */ +#define ESCAPE_CODE ~0UL +#define CTX_SWITCH_CODE 1 +#define CPU_SWITCH_CODE 2 +#define COOKIE_SWITCH_CODE 3 +#define KERNEL_ENTER_SWITCH_CODE 4 +#define KERNEL_EXIT_SWITCH_CODE 5 +#define MODULE_LOADED_CODE 6 +#define CTX_TGID_CODE 7 +#define TRACE_BEGIN_CODE 8 +#define TRACE_END_CODE 9 +#define XEN_ENTER_SWITCH_CODE 10 +#define SPU_PROFILING_CODE 11 +#define SPU_CTX_SWITCH_CODE 12 +#define IBS_FETCH_CODE 13 +#define IBS_OP_CODE 14 + +struct super_block; +struct dentry; +struct file_operations; +struct pt_regs; + +/* Operations structure to be filled in */ +struct oprofile_operations { + /* create any necessary configuration files in the oprofile fs. + * Optional. */ + int (*create_files)(struct super_block * sb, struct dentry * root); + /* Do any necessary interrupt setup. Optional. */ + int (*setup)(void); + /* Do any necessary interrupt shutdown. Optional. */ + void (*shutdown)(void); + /* Start delivering interrupts. */ + int (*start)(void); + /* Stop delivering interrupts. */ + void (*stop)(void); + /* Arch-specific buffer sync functions. + * Return value = 0: Success + * Return value = -1: Failure + * Return value = 1: Run generic sync function + */ + int (*sync_start)(void); + int (*sync_stop)(void); + + /* Initiate a stack backtrace. Optional. */ + void (*backtrace)(struct pt_regs * const regs, unsigned int depth); + + /* Multiplex between different events. Optional. */ + int (*switch_events)(void); + /* CPU identification string. */ + char * cpu_type; +}; + +/** + * One-time initialisation. *ops must be set to a filled-in + * operations structure. This is called even in timer interrupt + * mode so an arch can set a backtrace callback. + * + * If an error occurs, the fields should be left untouched. + */ +int oprofile_arch_init(struct oprofile_operations * ops); + +/** + * One-time exit/cleanup for the arch. + */ +void oprofile_arch_exit(void); + +/** + * Add a sample. This may be called from any context. + */ +void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); + +/** + * Add an extended sample. Use this when the PC is not from the regs, and + * we cannot determine if we're in kernel mode from the regs. + * + * This function does perform a backtrace. + * + */ +void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel); + +/** + * Add an hardware sample. + */ +void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel, + struct task_struct *task); + +/* Use this instead when the PC value is not from the regs. Doesn't + * backtrace. */ +void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); + +/* add a backtrace entry, to be called from the ->backtrace callback */ +void oprofile_add_trace(unsigned long eip); + + +/** + * Create a file of the given name as a child of the given root, with + * the specified file operations. + */ +int oprofilefs_create_file(struct super_block * sb, struct dentry * root, + char const * name, const struct file_operations * fops); + +int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, + char const * name, const struct file_operations * fops, int perm); + +/** Create a file for read/write access to an unsigned long. */ +int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, + char const * name, ulong * val); + +/** Create a file for read-only access to an unsigned long. */ +int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, + char const * name, ulong * val); + +/** Create a file for read-only access to an atomic_t. */ +int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, + char const * name, atomic_t * val); + +/** create a directory */ +struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root, + char const * name); + +/** + * Write the given asciz string to the given user buffer @buf, updating *offset + * appropriately. Returns bytes written or -EFAULT. + */ +ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset); + +/** + * Convert an unsigned long value into ASCII and copy it to the user buffer @buf, + * updating *offset appropriately. Returns bytes written or -EFAULT. + */ +ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset); + +/** + * Read an ASCII string for a number from a userspace buffer and fill *val on success. + * Returns 0 on success, < 0 on error. + */ +int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); + +/** lock for read/write safety */ +extern raw_spinlock_t oprofilefs_lock; + +/** + * Add the contents of a circular buffer to the event buffer. + */ +void oprofile_put_buff(unsigned long *buf, unsigned int start, + unsigned int stop, unsigned int max); + +unsigned long oprofile_get_cpu_buffer_size(void); +void oprofile_cpu_buffer_inc_smpl_lost(void); + +/* cpu buffer functions */ + +struct op_sample; + +struct op_entry { + struct ring_buffer_event *event; + struct op_sample *sample; + unsigned long size; + unsigned long *data; +}; + +void oprofile_write_reserve(struct op_entry *entry, + struct pt_regs * const regs, + unsigned long pc, int code, int size); +int oprofile_add_data(struct op_entry *entry, unsigned long val); +int oprofile_add_data64(struct op_entry *entry, u64 val); +int oprofile_write_commit(struct op_entry *entry); + +#ifdef CONFIG_HW_PERF_EVENTS +int __init oprofile_perf_init(struct oprofile_operations *ops); +void oprofile_perf_exit(void); +char *op_name_from_perf_id(void); +#else +static inline int __init oprofile_perf_init(struct oprofile_operations *ops) +{ + pr_info("oprofile: hardware counters not available\n"); + return -ENODEV; +} +static inline void oprofile_perf_exit(void) { } +#endif /* CONFIG_HW_PERF_EVENTS */ + +#endif /* OPROFILE_H */ diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h new file mode 100644 index 00000000..0bf96eae --- /dev/null +++ b/include/linux/oxu210hp.h @@ -0,0 +1,7 @@ +/* platform data for the OXU210HP HCD */ + +struct oxu210hp_platform_data { + unsigned int bus16:1; + unsigned int use_hcd_otg:1; + unsigned int use_hcd_sph:1; +}; diff --git a/include/linux/padata.h b/include/linux/padata.h new file mode 100644 index 00000000..86292bee --- /dev/null +++ b/include/linux/padata.h @@ -0,0 +1,190 @@ +/* + * padata.h - header for the padata parallelization interface + * + * Copyright (C) 2008, 2009 secunet Security Networks AG + * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef PADATA_H +#define PADATA_H + +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/timer.h> +#include <linux/notifier.h> +#include <linux/kobject.h> + +#define PADATA_CPU_SERIAL 0x01 +#define PADATA_CPU_PARALLEL 0x02 + +/** + * struct padata_priv - Embedded to the users data structure. + * + * @list: List entry, to attach to the padata lists. + * @pd: Pointer to the internal control structure. + * @cb_cpu: Callback cpu for serializatioon. + * @seq_nr: Sequence number of the parallelized data object. + * @info: Used to pass information from the parallel to the serial function. + * @parallel: Parallel execution function. + * @serial: Serial complete function. + */ +struct padata_priv { + struct list_head list; + struct parallel_data *pd; + int cb_cpu; + int info; + void (*parallel)(struct padata_priv *padata); + void (*serial)(struct padata_priv *padata); +}; + +/** + * struct padata_list + * + * @list: List head. + * @lock: List lock. + */ +struct padata_list { + struct list_head list; + spinlock_t lock; +}; + +/** +* struct padata_serial_queue - The percpu padata serial queue +* +* @serial: List to wait for serialization after reordering. +* @work: work struct for serialization. +* @pd: Backpointer to the internal control structure. +*/ +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +/** + * struct padata_parallel_queue - The percpu padata parallel queue + * + * @parallel: List to wait for parallelization. + * @reorder: List to wait for reordering after parallel processing. + * @serial: List to wait for serialization after reordering. + * @pwork: work struct for parallelization. + * @swork: work struct for serialization. + * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. + * @num_obj: Number of objects that are processed by this cpu. + * @cpu_index: Index of the cpu. + */ +struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; + struct parallel_data *pd; + struct work_struct work; + atomic_t num_obj; + int cpu_index; +}; + +/** + * struct padata_cpumask - The cpumasks for the parallel/serial workers + * + * @pcpu: cpumask for the parallel workers. + * @cbcpu: cpumask for the serial (callback) workers. + */ +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; + +/** + * struct parallel_data - Internal control structure, covers everything + * that depends on the cpumask in use. + * + * @pinst: padata instance. + * @pqueue: percpu padata queues used for parallelization. + * @squeue: percpu padata queues used for serialuzation. + * @reorder_objects: Number of objects waiting in the reorder queues. + * @refcnt: Number of objects holding a reference on this parallel_data. + * @max_seq_nr: Maximal used sequence number. + * @cpumask: The cpumasks in use for parallel and serial workers. + * @lock: Reorder lock. + * @processed: Number of already processed objects. + * @timer: Reorder timer. + */ +struct parallel_data { + struct padata_instance *pinst; + struct padata_parallel_queue __percpu *pqueue; + struct padata_serial_queue __percpu *squeue; + atomic_t reorder_objects; + atomic_t refcnt; + struct padata_cpumask cpumask; + spinlock_t lock ____cacheline_aligned; + spinlock_t seq_lock; + unsigned int seq_nr; + unsigned int processed; + struct timer_list timer; +}; + +/** + * struct padata_instance - The overall control structure. + * + * @cpu_notifier: cpu hotplug notifier. + * @wq: The workqueue in use. + * @pd: The internal control structure. + * @cpumask: User supplied cpumasks for parallel and serial works. + * @cpumask_change_notifier: Notifiers chain for user-defined notify + * callbacks that will be called when either @pcpu or @cbcpu + * or both cpumasks change. + * @kobj: padata instance kernel object. + * @lock: padata instance lock. + * @flags: padata flags. + */ +struct padata_instance { + struct notifier_block cpu_notifier; + struct workqueue_struct *wq; + struct parallel_data *pd; + struct padata_cpumask cpumask; + struct blocking_notifier_head cpumask_change_notifier; + struct kobject kobj; + struct mutex lock; + u8 flags; +#define PADATA_INIT 1 +#define PADATA_RESET 2 +#define PADATA_INVALID 4 +}; + +extern struct padata_instance *padata_alloc_possible( + struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); +extern void padata_free(struct padata_instance *pinst); +extern int padata_do_parallel(struct padata_instance *pinst, + struct padata_priv *padata, int cb_cpu); +extern void padata_do_serial(struct padata_priv *padata); +extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + cpumask_var_t cpumask); +extern int padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask); +extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); +extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); +extern int padata_start(struct padata_instance *pinst); +extern void padata_stop(struct padata_instance *pinst); +extern int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +#endif diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h new file mode 100644 index 00000000..22691f61 --- /dev/null +++ b/include/linux/page-debug-flags.h @@ -0,0 +1,32 @@ +#ifndef LINUX_PAGE_DEBUG_FLAGS_H +#define LINUX_PAGE_DEBUG_FLAGS_H + +/* + * page->debug_flags bits: + * + * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to + * implement generic debug pagealloc feature. The pages are filled with + * poison patterns and set this flag after free_pages(). The poisoned + * pages are verified whether the patterns are not corrupted and clear + * the flag before alloc_pages(). + */ + +enum page_debug_flags { + PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ + PAGE_DEBUG_FLAG_GUARD, +}; + +/* + * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably + * gets turned off when no debug features are enabling it! + */ + +#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS +#if !defined(CONFIG_PAGE_POISONING) && \ + !defined(CONFIG_PAGE_GUARD) \ +/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ +#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! +#endif +#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ + +#endif /* LINUX_PAGE_DEBUG_FLAGS_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h new file mode 100644 index 00000000..c88d2a94 --- /dev/null +++ b/include/linux/page-flags.h @@ -0,0 +1,503 @@ +/* + * Macros for manipulating and testing page->flags + */ + +#ifndef PAGE_FLAGS_H +#define PAGE_FLAGS_H + +#include <linux/types.h> +#include <linux/bug.h> +#ifndef __GENERATING_BOUNDS_H +#include <linux/mm_types.h> +#include <generated/bounds.h> +#endif /* !__GENERATING_BOUNDS_H */ + +/* + * Various page->flags bits: + * + * PG_reserved is set for special pages, which can never be swapped out. Some + * of them might not even exist (eg empty_bad_page)... + * + * The PG_private bitflag is set on pagecache pages if they contain filesystem + * specific data (which is normally at page->private). It can be used by + * private allocations for its own usage. + * + * During initiation of disk I/O, PG_locked is set. This bit is set before I/O + * and cleared when writeback _starts_ or when read _completes_. PG_writeback + * is set before writeback starts and cleared when it finishes. + * + * PG_locked also pins a page in pagecache, and blocks truncation of the file + * while it is held. + * + * page_waitqueue(page) is a wait queue of all tasks waiting for the page + * to become unlocked. + * + * PG_uptodate tells whether the page's contents is valid. When a read + * completes, the page becomes uptodate, unless a disk I/O error happened. + * + * PG_referenced, PG_reclaim are used for page reclaim for anonymous and + * file-backed pagecache (see mm/vmscan.c). + * + * PG_error is set to indicate that an I/O error occurred on this page. + * + * PG_arch_1 is an architecture specific page state bit. The generic code + * guarantees that this bit is cleared for a page when it first is entered into + * the page cache. + * + * PG_highmem pages are not permanently mapped into the kernel virtual address + * space, they need to be kmapped separately for doing IO on the pages. The + * struct page (these bits with information) are always mapped into kernel + * address space... + * + * PG_hwpoison indicates that a page got corrupted in hardware and contains + * data with incorrect ECC bits that triggered a machine check. Accessing is + * not safe since it may cause another machine check. Don't touch! + */ + +/* + * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break + * locked- and dirty-page accounting. + * + * The page flags field is split into two parts, the main flags area + * which extends from the low bits upwards, and the fields area which + * extends from the high bits downwards. + * + * | FIELD | ... | FLAGS | + * N-1 ^ 0 + * (NR_PAGEFLAGS) + * + * The fields area is reserved for fields mapping zone, node (for NUMA) and + * SPARSEMEM section (for variants of SPARSEMEM that require section ids like + * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). + */ +enum pageflags { + PG_locked, /* Page is locked. Don't touch. */ + PG_error, + PG_referenced, + PG_uptodate, + PG_dirty, + PG_lru, + PG_active, + PG_slab, + PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ + PG_arch_1, + PG_reserved, + PG_private, /* If pagecache, has fs-private data */ + PG_private_2, /* If pagecache, has fs aux data */ + PG_writeback, /* Page is under writeback */ +#ifdef CONFIG_PAGEFLAGS_EXTENDED + PG_head, /* A head page */ + PG_tail, /* A tail page */ +#else + PG_compound, /* A compound page */ +#endif + PG_swapcache, /* Swap page: swp_entry_t in private */ + PG_mappedtodisk, /* Has blocks allocated on-disk */ + PG_reclaim, /* To be reclaimed asap */ + PG_swapbacked, /* Page is backed by RAM/swap */ + PG_unevictable, /* Page is "unevictable" */ +#ifdef CONFIG_MMU + PG_mlocked, /* Page is vma mlocked */ +#endif +#ifdef CONFIG_ARCH_USES_PG_UNCACHED + PG_uncached, /* Page has been mapped as uncached */ +#endif +#ifdef CONFIG_MEMORY_FAILURE + PG_hwpoison, /* hardware poisoned page. Don't touch */ +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + PG_compound_lock, +#endif + __NR_PAGEFLAGS, + + /* Filesystems */ + PG_checked = PG_owner_priv_1, + + /* Two page bits are conscripted by FS-Cache to maintain local caching + * state. These bits are set on pages belonging to the netfs's inodes + * when those inodes are being locally cached. + */ + PG_fscache = PG_private_2, /* page backed by cache */ + + /* XEN */ + PG_pinned = PG_owner_priv_1, + PG_savepinned = PG_dirty, + + /* SLOB */ + PG_slob_free = PG_private, +}; + +#ifndef __GENERATING_BOUNDS_H + +/* + * Macros to create function definitions for page flags + */ +#define TESTPAGEFLAG(uname, lname) \ +static inline int Page##uname(const struct page *page) \ + { return test_bit(PG_##lname, &page->flags); } + +#define SETPAGEFLAG(uname, lname) \ +static inline void SetPage##uname(struct page *page) \ + { set_bit(PG_##lname, &page->flags); } + +#define CLEARPAGEFLAG(uname, lname) \ +static inline void ClearPage##uname(struct page *page) \ + { clear_bit(PG_##lname, &page->flags); } + +#define __SETPAGEFLAG(uname, lname) \ +static inline void __SetPage##uname(struct page *page) \ + { __set_bit(PG_##lname, &page->flags); } + +#define __CLEARPAGEFLAG(uname, lname) \ +static inline void __ClearPage##uname(struct page *page) \ + { __clear_bit(PG_##lname, &page->flags); } + +#define TESTSETFLAG(uname, lname) \ +static inline int TestSetPage##uname(struct page *page) \ + { return test_and_set_bit(PG_##lname, &page->flags); } + +#define TESTCLEARFLAG(uname, lname) \ +static inline int TestClearPage##uname(struct page *page) \ + { return test_and_clear_bit(PG_##lname, &page->flags); } + +#define __TESTCLEARFLAG(uname, lname) \ +static inline int __TestClearPage##uname(struct page *page) \ + { return __test_and_clear_bit(PG_##lname, &page->flags); } + +#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ + SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) + +#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ + __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) + +#define PAGEFLAG_FALSE(uname) \ +static inline int Page##uname(const struct page *page) \ + { return 0; } + +#define TESTSCFLAG(uname, lname) \ + TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) + +#define SETPAGEFLAG_NOOP(uname) \ +static inline void SetPage##uname(struct page *page) { } + +#define CLEARPAGEFLAG_NOOP(uname) \ +static inline void ClearPage##uname(struct page *page) { } + +#define __CLEARPAGEFLAG_NOOP(uname) \ +static inline void __ClearPage##uname(struct page *page) { } + +#define TESTCLEARFLAG_FALSE(uname) \ +static inline int TestClearPage##uname(struct page *page) { return 0; } + +#define __TESTCLEARFLAG_FALSE(uname) \ +static inline int __TestClearPage##uname(struct page *page) { return 0; } + +struct page; /* forward declaration */ + +TESTPAGEFLAG(Locked, locked) +PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) +PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) +PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) +PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) +PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) + TESTCLEARFLAG(Active, active) +__PAGEFLAG(Slab, slab) +PAGEFLAG(Checked, checked) /* Used by some filesystems */ +PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ +PAGEFLAG(SavePinned, savepinned); /* Xen */ +PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) +PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) + +__PAGEFLAG(SlobFree, slob_free) + +/* + * Private page markings that may be used by the filesystem that owns the page + * for its own purposes. + * - PG_private and PG_private_2 cause releasepage() and co to be invoked + */ +PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) + __CLEARPAGEFLAG(Private, private) +PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) +PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) + +/* + * Only test-and-set exist for PG_writeback. The unconditional operators are + * risky: they bypass page accounting. + */ +TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) +PAGEFLAG(MappedToDisk, mappedtodisk) + +/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ +PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) +PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ + +#ifdef CONFIG_HIGHMEM +/* + * Must use a macro here due to header dependency issues. page_zone() is not + * available at this point. + */ +#define PageHighMem(__p) is_highmem(page_zone(__p)) +#else +PAGEFLAG_FALSE(HighMem) +#endif + +#ifdef CONFIG_SWAP +PAGEFLAG(SwapCache, swapcache) +#else +PAGEFLAG_FALSE(SwapCache) + SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) +#endif + +PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) + TESTCLEARFLAG(Unevictable, unevictable) + +#ifdef CONFIG_MMU +PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) + TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) +#else +PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) + TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) +#endif + +#ifdef CONFIG_ARCH_USES_PG_UNCACHED +PAGEFLAG(Uncached, uncached) +#else +PAGEFLAG_FALSE(Uncached) +#endif + +#ifdef CONFIG_MEMORY_FAILURE +PAGEFLAG(HWPoison, hwpoison) +TESTSCFLAG(HWPoison, hwpoison) +#define __PG_HWPOISON (1UL << PG_hwpoison) +#else +PAGEFLAG_FALSE(HWPoison) +#define __PG_HWPOISON 0 +#endif + +u64 stable_page_flags(struct page *page); + +static inline int PageUptodate(struct page *page) +{ + int ret = test_bit(PG_uptodate, &(page)->flags); + + /* + * Must ensure that the data we read out of the page is loaded + * _after_ we've loaded page->flags to check for PageUptodate. + * We can skip the barrier if the page is not uptodate, because + * we wouldn't be reading anything from it. + * + * See SetPageUptodate() for the other side of the story. + */ + if (ret) + smp_rmb(); + + return ret; +} + +static inline void __SetPageUptodate(struct page *page) +{ + smp_wmb(); + __set_bit(PG_uptodate, &(page)->flags); +} + +static inline void SetPageUptodate(struct page *page) +{ +#ifdef CONFIG_S390 + if (!test_and_set_bit(PG_uptodate, &page->flags)) + page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0); +#else + /* + * Memory barrier must be issued before setting the PG_uptodate bit, + * so that all previous stores issued in order to bring the page + * uptodate are actually visible before PageUptodate becomes true. + * + * s390 doesn't need an explicit smp_wmb here because the test and + * set bit already provides full barriers. + */ + smp_wmb(); + set_bit(PG_uptodate, &(page)->flags); +#endif +} + +CLEARPAGEFLAG(Uptodate, uptodate) + +extern void cancel_dirty_page(struct page *page, unsigned int account_size); + +int test_clear_page_writeback(struct page *page); +int test_set_page_writeback(struct page *page); + +static inline void set_page_writeback(struct page *page) +{ + test_set_page_writeback(page); +} + +#ifdef CONFIG_PAGEFLAGS_EXTENDED +/* + * System with lots of page flags available. This allows separate + * flags for PageHead() and PageTail() checks of compound pages so that bit + * tests can be used in performance sensitive paths. PageCompound is + * generally not used in hot code paths. + */ +__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) +__PAGEFLAG(Tail, tail) + +static inline int PageCompound(struct page *page) +{ + return page->flags & ((1L << PG_head) | (1L << PG_tail)); + +} +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON(!PageHead(page)); + ClearPageHead(page); +} +#endif +#else +/* + * Reduce page flag use as much as possible by overlapping + * compound page flags with the flags used for page cache pages. Possible + * because PageCompound is always set for compound pages and not for + * pages on the LRU and/or pagecache. + */ +TESTPAGEFLAG(Compound, compound) +__PAGEFLAG(Head, compound) + +/* + * PG_reclaim is used in combination with PG_compound to mark the + * head and tail of a compound page. This saves one page flag + * but makes it impossible to use compound pages for the page cache. + * The PG_reclaim bit would have to be used for reclaim or readahead + * if compound pages enter the page cache. + * + * PG_compound & PG_reclaim => Tail page + * PG_compound & ~PG_reclaim => Head page + */ +#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) + +static inline int PageTail(struct page *page) +{ + return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); +} + +static inline void __SetPageTail(struct page *page) +{ + page->flags |= PG_head_tail_mask; +} + +static inline void __ClearPageTail(struct page *page) +{ + page->flags &= ~PG_head_tail_mask; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); + clear_bit(PG_compound, &page->flags); +} +#endif + +#endif /* !PAGEFLAGS_EXTENDED */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * PageHuge() only returns true for hugetlbfs pages, but not for + * normal or transparent huge pages. + * + * PageTransHuge() returns true for both transparent huge and + * hugetlbfs pages, but not normal pages. PageTransHuge() can only be + * called only in the core VM paths where hugetlbfs pages can't exist. + */ +static inline int PageTransHuge(struct page *page) +{ + VM_BUG_ON(PageTail(page)); + return PageHead(page); +} + +/* + * PageTransCompound returns true for both transparent huge pages + * and hugetlbfs pages, so it should only be called when it's known + * that hugetlbfs pages aren't involved. + */ +static inline int PageTransCompound(struct page *page) +{ + return PageCompound(page); +} + +/* + * PageTransTail returns true for both transparent huge pages + * and hugetlbfs pages, so it should only be called when it's known + * that hugetlbfs pages aren't involved. + */ +static inline int PageTransTail(struct page *page) +{ + return PageTail(page); +} + +#else + +static inline int PageTransHuge(struct page *page) +{ + return 0; +} + +static inline int PageTransCompound(struct page *page) +{ + return 0; +} + +static inline int PageTransTail(struct page *page) +{ + return 0; +} +#endif + +#ifdef CONFIG_MMU +#define __PG_MLOCKED (1 << PG_mlocked) +#else +#define __PG_MLOCKED 0 +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __PG_COMPOUND_LOCK (1 << PG_compound_lock) +#else +#define __PG_COMPOUND_LOCK 0 +#endif + +/* + * Flags checked when a page is freed. Pages being freed should not have + * these flags set. It they are, there is a problem. + */ +#define PAGE_FLAGS_CHECK_AT_FREE \ + (1 << PG_lru | 1 << PG_locked | \ + 1 << PG_private | 1 << PG_private_2 | \ + 1 << PG_writeback | 1 << PG_reserved | \ + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ + 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ + __PG_COMPOUND_LOCK) + +/* + * Flags checked when a page is prepped for return by the page allocator. + * Pages being prepped should not have any flags set. It they are set, + * there has been a kernel bug or struct page corruption. + */ +#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) + +#define PAGE_FLAGS_PRIVATE \ + (1 << PG_private | 1 << PG_private_2) +/** + * page_has_private - Determine if page has private stuff + * @page: The page to be checked + * + * Determine if a page has private stuff, indicating that release routines + * should be invoked upon it. + */ +static inline int page_has_private(struct page *page) +{ + return !!(page->flags & PAGE_FLAGS_PRIVATE); +} + +#endif /* !__GENERATING_BOUNDS_H */ + +#endif /* PAGE_FLAGS_H */ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h new file mode 100644 index 00000000..051c1b1e --- /dev/null +++ b/include/linux/page-isolation.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_PAGEISOLATION_H +#define __LINUX_PAGEISOLATION_H + +/* + * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. + * If specified range includes migrate types other than MOVABLE, + * this will fail with -EBUSY. + * + * For isolating all pages in the range finally, the caller have to + * free all pages in the range. test_page_isolated() can be used for + * test it. + */ +extern int +start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn); + +/* + * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. + * target range is [start_pfn, end_pfn) + */ +extern int +undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn); + +/* + * test all pages in [start_pfn, end_pfn)are isolated or not. + */ +extern int +test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn); + +/* + * Internal funcs.Changes pageblock's migrate type. + * Please use make_pagetype_isolated()/make_pagetype_movable(). + */ +extern int set_migratetype_isolate(struct page *page); +extern void unset_migratetype_isolate(struct page *page); + + +#endif diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h new file mode 100644 index 00000000..a88cdba2 --- /dev/null +++ b/include/linux/page_cgroup.h @@ -0,0 +1,145 @@ +#ifndef __LINUX_PAGE_CGROUP_H +#define __LINUX_PAGE_CGROUP_H + +enum { + /* flags for mem_cgroup */ + PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ + PCG_USED, /* this object is in use. */ + PCG_MIGRATION, /* under page migration */ + __NR_PCG_FLAGS, +}; + +#ifndef __GENERATING_BOUNDS_H +#include <generated/bounds.h> + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +#include <linux/bit_spinlock.h> + +/* + * Page Cgroup can be considered as an extended mem_map. + * A page_cgroup page is associated with every page descriptor. The + * page_cgroup helps us identify information about the cgroup + * All page cgroups are allocated at boot or memory hotplug event, + * then the page cgroup for pfn always exists. + */ +struct page_cgroup { + unsigned long flags; + struct mem_cgroup *mem_cgroup; +}; + +void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); + +#ifdef CONFIG_SPARSEMEM +static inline void __init page_cgroup_init_flatmem(void) +{ +} +extern void __init page_cgroup_init(void); +#else +void __init page_cgroup_init_flatmem(void); +static inline void __init page_cgroup_init(void) +{ +} +#endif + +struct page_cgroup *lookup_page_cgroup(struct page *page); +struct page *lookup_cgroup_page(struct page_cgroup *pc); + +#define TESTPCGFLAG(uname, lname) \ +static inline int PageCgroup##uname(struct page_cgroup *pc) \ + { return test_bit(PCG_##lname, &pc->flags); } + +#define SETPCGFLAG(uname, lname) \ +static inline void SetPageCgroup##uname(struct page_cgroup *pc)\ + { set_bit(PCG_##lname, &pc->flags); } + +#define CLEARPCGFLAG(uname, lname) \ +static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ + { clear_bit(PCG_##lname, &pc->flags); } + +#define TESTCLEARPCGFLAG(uname, lname) \ +static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ + { return test_and_clear_bit(PCG_##lname, &pc->flags); } + +TESTPCGFLAG(Used, USED) +CLEARPCGFLAG(Used, USED) +SETPCGFLAG(Used, USED) + +SETPCGFLAG(Migration, MIGRATION) +CLEARPCGFLAG(Migration, MIGRATION) +TESTPCGFLAG(Migration, MIGRATION) + +static inline void lock_page_cgroup(struct page_cgroup *pc) +{ + /* + * Don't take this lock in IRQ context. + * This lock is for pc->mem_cgroup, USED, MIGRATION + */ + bit_spin_lock(PCG_LOCK, &pc->flags); +} + +static inline void unlock_page_cgroup(struct page_cgroup *pc) +{ + bit_spin_unlock(PCG_LOCK, &pc->flags); +} + +#else /* CONFIG_CGROUP_MEM_RES_CTLR */ +struct page_cgroup; + +static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) +{ +} + +static inline struct page_cgroup *lookup_page_cgroup(struct page *page) +{ + return NULL; +} + +static inline void page_cgroup_init(void) +{ +} + +static inline void __init page_cgroup_init_flatmem(void) +{ +} + +#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ + +#include <linux/swap.h> + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ + +#endif /* !__GENERATING_BOUNDS_H */ + +#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h new file mode 100644 index 00000000..19ef95d2 --- /dev/null +++ b/include/linux/pageblock-flags.h @@ -0,0 +1,74 @@ +/* + * Macros for manipulating and testing flags related to a + * pageblock_nr_pages number of pages. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Original author, Mel Gorman + * Major cleanups and reduction of bit operations, Andy Whitcroft + */ +#ifndef PAGEBLOCK_FLAGS_H +#define PAGEBLOCK_FLAGS_H + +#include <linux/types.h> + +/* Bit indices that affect a whole block of pages */ +enum pageblock_bits { + PB_migrate, + PB_migrate_end = PB_migrate + 3 - 1, + /* 3 bits required for migrate types */ + NR_PAGEBLOCK_BITS +}; + +#ifdef CONFIG_HUGETLB_PAGE + +#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE + +/* Huge page sizes are variable */ +extern int pageblock_order; + +#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +/* Huge pages are a constant size */ +#define pageblock_order HUGETLB_PAGE_ORDER + +#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +#else /* CONFIG_HUGETLB_PAGE */ + +/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ +#define pageblock_order (MAX_ORDER-1) + +#endif /* CONFIG_HUGETLB_PAGE */ + +#define pageblock_nr_pages (1UL << pageblock_order) + +/* Forward declaration */ +struct page; + +/* Declarations for getting and setting flags. See mm/page_alloc.c */ +unsigned long get_pageblock_flags_group(struct page *page, + int start_bitidx, int end_bitidx); +void set_pageblock_flags_group(struct page *page, unsigned long flags, + int start_bitidx, int end_bitidx); + +#define get_pageblock_flags(page) \ + get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1) +#define set_pageblock_flags(page, flags) \ + set_pageblock_flags_group(page, flags, \ + 0, NR_PAGEBLOCK_BITS-1) + +#endif /* PAGEBLOCK_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h new file mode 100644 index 00000000..cfaaa694 --- /dev/null +++ b/include/linux/pagemap.h @@ -0,0 +1,479 @@ +#ifndef _LINUX_PAGEMAP_H +#define _LINUX_PAGEMAP_H + +/* + * Copyright 1995 Linus Torvalds + */ +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/highmem.h> +#include <linux/compiler.h> +#include <asm/uaccess.h> +#include <linux/gfp.h> +#include <linux/bitops.h> +#include <linux/hardirq.h> /* for in_interrupt() */ +#include <linux/hugetlb_inline.h> + +/* + * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page + * allocation mode flags. + */ +enum mapping_flags { + AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ + AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ + AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ + AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ +}; + +static inline void mapping_set_error(struct address_space *mapping, int error) +{ + if (unlikely(error)) { + if (error == -ENOSPC) + set_bit(AS_ENOSPC, &mapping->flags); + else + set_bit(AS_EIO, &mapping->flags); + } +} + +static inline void mapping_set_unevictable(struct address_space *mapping) +{ + set_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline void mapping_clear_unevictable(struct address_space *mapping) +{ + clear_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline int mapping_unevictable(struct address_space *mapping) +{ + if (mapping) + return test_bit(AS_UNEVICTABLE, &mapping->flags); + return !!mapping; +} + +static inline gfp_t mapping_gfp_mask(struct address_space * mapping) +{ + return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; +} + +/* + * This is non-atomic. Only to be used before the mapping is activated. + * Probably needs a barrier... + */ +static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) +{ + m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | + (__force unsigned long)mask; +} + +/* + * The page cache can done in larger chunks than + * one page, because it allows for more efficient + * throughput (it can then be mapped into user + * space in smaller chunks for same flexibility). + * + * Or rather, it _will_ be done in larger chunks. + */ +#define PAGE_CACHE_SHIFT PAGE_SHIFT +#define PAGE_CACHE_SIZE PAGE_SIZE +#define PAGE_CACHE_MASK PAGE_MASK +#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) + +#define page_cache_get(page) get_page(page) +#define page_cache_release(page) put_page(page) +void release_pages(struct page **pages, int nr, int cold); + +/* + * speculatively take a reference to a page. + * If the page is free (_count == 0), then _count is untouched, and 0 + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * + * This function must be called inside the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree (or page table): + * this allows allocators to use a synchronize_rcu() to stabilize _count. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with, no matter what it is subsequently allocated + * for (because put_page is what is used here to drop an invalid speculative + * reference). + * + * This is the interesting part of the lockless pagecache (and lockless + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) + * has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. check the page is still in pagecache (if no, goto 1) + * + * Remove-side that cares about stability of _count (eg. reclaim) has the + * following (with tree_lock held for write): + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) + * B. remove page from pagecache + * C. free the page + * + * There are 2 critical interleavings that matter: + * - 2 runs before A: in this case, A sees elevated refcount and bails out + * - A runs before 2: in this case, 2 sees zero refcount and retries; + * subsequently, B will complete and 1 will find no page, causing the + * lookup to return NULL. + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using tree_lock could equally have run before or after + * such a re-insertion, depending on order that locks are granted. + * + * Lookups racing against pagecache insertion isn't a big problem: either 1 + * will find the page or it will not. Likewise, the old find_get_page could run + * either before the insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) +# ifdef CONFIG_PREEMPT_COUNT + VM_BUG_ON(!in_atomic()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON(page_count(page) == 0); + atomic_inc(&page->_count); + +#else + if (unlikely(!get_page_unless_zero(page))) { + /* + * Either the page has been freed, or will be freed. + * In either case, retry here and the caller should + * do the right thing (see comments above). + */ + return 0; + } +#endif + VM_BUG_ON(PageTail(page)); + + return 1; +} + +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) +# ifdef CONFIG_PREEMPT_COUNT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON(page_count(page) == 0); + atomic_add(count, &page->_count); + +#else + if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + return 0; +#endif + VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + + return 1; +} + +static inline int page_freeze_refs(struct page *page, int count) +{ + return likely(atomic_cmpxchg(&page->_count, count, 0) == count); +} + +static inline void page_unfreeze_refs(struct page *page, int count) +{ + VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON(count == 0); + + atomic_set(&page->_count, count); +} + +#ifdef CONFIG_NUMA +extern struct page *__page_cache_alloc(gfp_t gfp); +#else +static inline struct page *__page_cache_alloc(gfp_t gfp) +{ + return alloc_pages(gfp, 0); +} +#endif + +static inline struct page *page_cache_alloc(struct address_space *x) +{ + return __page_cache_alloc(mapping_gfp_mask(x)); +} + +static inline struct page *page_cache_alloc_cold(struct address_space *x) +{ + return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); +} + +static inline struct page *page_cache_alloc_readahead(struct address_space *x) +{ + return __page_cache_alloc(mapping_gfp_mask(x) | + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); +} + +typedef int filler_t(void *, struct page *); + +extern struct page * find_get_page(struct address_space *mapping, + pgoff_t index); +extern struct page * find_lock_page(struct address_space *mapping, + pgoff_t index); +extern struct page * find_or_create_page(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +unsigned find_get_pages(struct address_space *mapping, pgoff_t start, + unsigned int nr_pages, struct page **pages); +unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, + unsigned int nr_pages, struct page **pages); +unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, + int tag, unsigned int nr_pages, struct page **pages); + +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags); + +/* + * Returns locked page at given index in given cache, creating it if needed. + */ +static inline struct page *grab_cache_page(struct address_space *mapping, + pgoff_t index) +{ + return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); +} + +extern struct page * grab_cache_page_nowait(struct address_space *mapping, + pgoff_t index); +extern struct page * read_cache_page_async(struct address_space *mapping, + pgoff_t index, filler_t *filler, void *data); +extern struct page * read_cache_page(struct address_space *mapping, + pgoff_t index, filler_t *filler, void *data); +extern struct page * read_cache_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern int read_cache_pages(struct address_space *mapping, + struct list_head *pages, filler_t *filler, void *data); + +static inline struct page *read_mapping_page_async( + struct address_space *mapping, + pgoff_t index, void *data) +{ + filler_t *filler = (filler_t *)mapping->a_ops->readpage; + return read_cache_page_async(mapping, index, filler, data); +} + +static inline struct page *read_mapping_page(struct address_space *mapping, + pgoff_t index, void *data) +{ + filler_t *filler = (filler_t *)mapping->a_ops->readpage; + return read_cache_page(mapping, index, filler, data); +} + +/* + * Return byte-offset into filesystem object for page. + */ +static inline loff_t page_offset(struct page *page) +{ + return ((loff_t)page->index) << PAGE_CACHE_SHIFT; +} + +extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, + unsigned long address); + +static inline pgoff_t linear_page_index(struct vm_area_struct *vma, + unsigned long address) +{ + pgoff_t pgoff; + if (unlikely(is_vm_hugetlb_page(vma))) + return linear_hugepage_index(vma, address); + pgoff = (address - vma->vm_start) >> PAGE_SHIFT; + pgoff += vma->vm_pgoff; + return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); +} + +extern void __lock_page(struct page *page); +extern int __lock_page_killable(struct page *page); +extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags); +extern void unlock_page(struct page *page); + +static inline void __set_page_locked(struct page *page) +{ + __set_bit(PG_locked, &page->flags); +} + +static inline void __clear_page_locked(struct page *page) +{ + __clear_bit(PG_locked, &page->flags); +} + +static inline int trylock_page(struct page *page) +{ + return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); +} + +/* + * lock_page may only be called if we have the page's inode pinned. + */ +static inline void lock_page(struct page *page) +{ + might_sleep(); + if (!trylock_page(page)) + __lock_page(page); +} + +/* + * lock_page_killable is like lock_page but can be interrupted by fatal + * signals. It returns 0 if it locked the page and -EINTR if it was + * killed while waiting. + */ +static inline int lock_page_killable(struct page *page) +{ + might_sleep(); + if (!trylock_page(page)) + return __lock_page_killable(page); + return 0; +} + +/* + * lock_page_or_retry - Lock the page, unless this would block and the + * caller indicated that it can handle a retry. + */ +static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + might_sleep(); + return trylock_page(page) || __lock_page_or_retry(page, mm, flags); +} + +/* + * This is exported only for wait_on_page_locked/wait_on_page_writeback. + * Never use this directly! + */ +extern void wait_on_page_bit(struct page *page, int bit_nr); + +extern int wait_on_page_bit_killable(struct page *page, int bit_nr); + +static inline int wait_on_page_locked_killable(struct page *page) +{ + if (PageLocked(page)) + return wait_on_page_bit_killable(page, PG_locked); + return 0; +} + +/* + * Wait for a page to be unlocked. + * + * This must be called with the caller "holding" the page, + * ie with increased "page->count" so that the page won't + * go away during the wait.. + */ +static inline void wait_on_page_locked(struct page *page) +{ + if (PageLocked(page)) + wait_on_page_bit(page, PG_locked); +} + +/* + * Wait for a page to complete writeback + */ +static inline void wait_on_page_writeback(struct page *page) +{ + if (PageWriteback(page)) + wait_on_page_bit(page, PG_writeback); +} + +extern void end_page_writeback(struct page *page); + +/* + * Add an arbitrary waiter to a page's wait queue + */ +extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); + +/* + * Fault a userspace page into pagetables. Return non-zero on a fault. + * + * This assumes that two userspace pages are always sufficient. That's + * not true if PAGE_CACHE_SIZE > PAGE_SIZE. + */ +static inline int fault_in_pages_writeable(char __user *uaddr, int size) +{ + int ret; + + if (unlikely(size == 0)) + return 0; + + /* + * Writing zeroes into userspace here is OK, because we know that if + * the zero gets there, we'll be overwriting it. + */ + ret = __put_user(0, uaddr); + if (ret == 0) { + char __user *end = uaddr + size - 1; + + /* + * If the page was already mapped, this will get a cache miss + * for sure, so try to avoid doing it. + */ + if (((unsigned long)uaddr & PAGE_MASK) != + ((unsigned long)end & PAGE_MASK)) + ret = __put_user(0, end); + } + return ret; +} + +static inline int fault_in_pages_readable(const char __user *uaddr, int size) +{ + volatile char c; + int ret; + + if (unlikely(size == 0)) + return 0; + + ret = __get_user(c, uaddr); + if (ret == 0) { + const char __user *end = uaddr + size - 1; + + if (((unsigned long)uaddr & PAGE_MASK) != + ((unsigned long)end & PAGE_MASK)) { + ret = __get_user(c, end); + (void)c; + } + } + return ret; +} + +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void delete_from_page_cache(struct page *page); +extern void __delete_from_page_cache(struct page *page); +int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run __set_page_locked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + __set_page_locked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + __clear_page_locked(page); + return error; +} + +#endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h new file mode 100644 index 00000000..2aa12b84 --- /dev/null +++ b/include/linux/pagevec.h @@ -0,0 +1,99 @@ +/* + * include/linux/pagevec.h + * + * In many places it is efficient to batch an operation up against multiple + * pages. A pagevec is a multipage container which is used for that. + */ + +#ifndef _LINUX_PAGEVEC_H +#define _LINUX_PAGEVEC_H + +/* 14 pointers + two long's align the pagevec structure to a power of two */ +#define PAGEVEC_SIZE 14 + +struct page; +struct address_space; + +struct pagevec { + unsigned long nr; + unsigned long cold; + struct page *pages[PAGEVEC_SIZE]; +}; + +void __pagevec_release(struct pagevec *pvec); +void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); +unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, + pgoff_t start, unsigned nr_pages); +unsigned pagevec_lookup_tag(struct pagevec *pvec, + struct address_space *mapping, pgoff_t *index, int tag, + unsigned nr_pages); + +static inline void pagevec_init(struct pagevec *pvec, int cold) +{ + pvec->nr = 0; + pvec->cold = cold; +} + +static inline void pagevec_reinit(struct pagevec *pvec) +{ + pvec->nr = 0; +} + +static inline unsigned pagevec_count(struct pagevec *pvec) +{ + return pvec->nr; +} + +static inline unsigned pagevec_space(struct pagevec *pvec) +{ + return PAGEVEC_SIZE - pvec->nr; +} + +/* + * Add a page to a pagevec. Returns the number of slots still available. + */ +static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) +{ + pvec->pages[pvec->nr++] = page; + return pagevec_space(pvec); +} + +static inline void pagevec_release(struct pagevec *pvec) +{ + if (pagevec_count(pvec)) + __pagevec_release(pvec); +} + +static inline void __pagevec_lru_add_anon(struct pagevec *pvec) +{ + __pagevec_lru_add(pvec, LRU_INACTIVE_ANON); +} + +static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec) +{ + __pagevec_lru_add(pvec, LRU_ACTIVE_ANON); +} + +static inline void __pagevec_lru_add_file(struct pagevec *pvec) +{ + __pagevec_lru_add(pvec, LRU_INACTIVE_FILE); +} + +static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) +{ + __pagevec_lru_add(pvec, LRU_ACTIVE_FILE); +} + +static inline void pagevec_lru_add_file(struct pagevec *pvec) +{ + if (pagevec_count(pvec)) + __pagevec_lru_add_file(pvec); +} + +static inline void pagevec_lru_add_anon(struct pagevec *pvec) +{ + if (pagevec_count(pvec)) + __pagevec_lru_add_anon(pvec); +} + +#endif /* _LINUX_PAGEVEC_H */ diff --git a/include/linux/param.h b/include/linux/param.h new file mode 100644 index 00000000..092e92f6 --- /dev/null +++ b/include/linux/param.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_PARAM_H +#define _LINUX_PARAM_H + +#include <asm/param.h> + +#endif diff --git a/include/linux/parport.h b/include/linux/parport.h new file mode 100644 index 00000000..106c2ca9 --- /dev/null +++ b/include/linux/parport.h @@ -0,0 +1,568 @@ +/* + * Any part of this program may be used in documents licensed under + * the GNU Free Documentation License, Version 1.1 or any later version + * published by the Free Software Foundation. + */ + +#ifndef _PARPORT_H_ +#define _PARPORT_H_ + +/* Start off with user-visible constants */ + +/* Maximum of 16 ports per machine */ +#define PARPORT_MAX 16 + +/* Magic numbers */ +#define PARPORT_IRQ_NONE -1 +#define PARPORT_DMA_NONE -1 +#define PARPORT_IRQ_AUTO -2 +#define PARPORT_DMA_AUTO -2 +#define PARPORT_DMA_NOFIFO -3 +#define PARPORT_DISABLE -2 +#define PARPORT_IRQ_PROBEONLY -3 +#define PARPORT_IOHI_AUTO -1 + +#define PARPORT_CONTROL_STROBE 0x1 +#define PARPORT_CONTROL_AUTOFD 0x2 +#define PARPORT_CONTROL_INIT 0x4 +#define PARPORT_CONTROL_SELECT 0x8 + +#define PARPORT_STATUS_ERROR 0x8 +#define PARPORT_STATUS_SELECT 0x10 +#define PARPORT_STATUS_PAPEROUT 0x20 +#define PARPORT_STATUS_ACK 0x40 +#define PARPORT_STATUS_BUSY 0x80 + +/* Type classes for Plug-and-Play probe. */ +typedef enum { + PARPORT_CLASS_LEGACY = 0, /* Non-IEEE1284 device */ + PARPORT_CLASS_PRINTER, + PARPORT_CLASS_MODEM, + PARPORT_CLASS_NET, + PARPORT_CLASS_HDC, /* Hard disk controller */ + PARPORT_CLASS_PCMCIA, + PARPORT_CLASS_MEDIA, /* Multimedia device */ + PARPORT_CLASS_FDC, /* Floppy disk controller */ + PARPORT_CLASS_PORTS, + PARPORT_CLASS_SCANNER, + PARPORT_CLASS_DIGCAM, + PARPORT_CLASS_OTHER, /* Anything else */ + PARPORT_CLASS_UNSPEC, /* No CLS field in ID */ + PARPORT_CLASS_SCSIADAPTER +} parport_device_class; + +/* The "modes" entry in parport is a bit field representing the + capabilities of the hardware. */ +#define PARPORT_MODE_PCSPP (1<<0) /* IBM PC registers available. */ +#define PARPORT_MODE_TRISTATE (1<<1) /* Can tristate. */ +#define PARPORT_MODE_EPP (1<<2) /* Hardware EPP. */ +#define PARPORT_MODE_ECP (1<<3) /* Hardware ECP. */ +#define PARPORT_MODE_COMPAT (1<<4) /* Hardware 'printer protocol'. */ +#define PARPORT_MODE_DMA (1<<5) /* Hardware can DMA. */ +#define PARPORT_MODE_SAFEININT (1<<6) /* SPP registers accessible in IRQ. */ + +/* IEEE1284 modes: + Nibble mode, byte mode, ECP, ECPRLE and EPP are their own + 'extensibility request' values. Others are special. + 'Real' ECP modes must have the IEEE1284_MODE_ECP bit set. */ +#define IEEE1284_MODE_NIBBLE 0 +#define IEEE1284_MODE_BYTE (1<<0) +#define IEEE1284_MODE_COMPAT (1<<8) +#define IEEE1284_MODE_BECP (1<<9) /* Bounded ECP mode */ +#define IEEE1284_MODE_ECP (1<<4) +#define IEEE1284_MODE_ECPRLE (IEEE1284_MODE_ECP | (1<<5)) +#define IEEE1284_MODE_ECPSWE (1<<10) /* Software-emulated */ +#define IEEE1284_MODE_EPP (1<<6) +#define IEEE1284_MODE_EPPSL (1<<11) /* EPP 1.7 */ +#define IEEE1284_MODE_EPPSWE (1<<12) /* Software-emulated */ +#define IEEE1284_DEVICEID (1<<2) /* This is a flag */ +#define IEEE1284_EXT_LINK (1<<14) /* This flag causes the + * extensibility link to + * be requested, using + * bits 0-6. */ + +/* For the benefit of parport_read/write, you can use these with + * parport_negotiate to use address operations. They have no effect + * other than to make parport_read/write use address transfers. */ +#define IEEE1284_ADDR (1<<13) /* This is a flag */ +#define IEEE1284_DATA 0 /* So is this */ + +/* Flags for block transfer operations. */ +#define PARPORT_EPP_FAST (1<<0) /* Unreliable counts. */ +#define PARPORT_W91284PIC (1<<1) /* have a Warp9 w91284pic in the device */ + +/* The rest is for the kernel only */ +#ifdef __KERNEL__ + +#include <linux/jiffies.h> +#include <linux/proc_fs.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/irqreturn.h> +#include <linux/semaphore.h> +#include <asm/ptrace.h> + +/* Define this later. */ +struct parport; +struct pardevice; + +struct pc_parport_state { + unsigned int ctr; + unsigned int ecr; +}; + +struct ax_parport_state { + unsigned int ctr; + unsigned int ecr; + unsigned int dcsr; +}; + +/* used by both parport_amiga and parport_mfc3 */ +struct amiga_parport_state { + unsigned char data; /* ciaa.prb */ + unsigned char datadir; /* ciaa.ddrb */ + unsigned char status; /* ciab.pra & 7 */ + unsigned char statusdir;/* ciab.ddrb & 7 */ +}; + +struct ax88796_parport_state { + unsigned char cpr; +}; + +struct ip32_parport_state { + unsigned int dcr; + unsigned int ecr; +}; + +struct parport_state { + union { + struct pc_parport_state pc; + /* ARC has no state. */ + struct ax_parport_state ax; + struct amiga_parport_state amiga; + struct ax88796_parport_state ax88796; + /* Atari has not state. */ + struct ip32_parport_state ip32; + void *misc; + } u; +}; + +struct parport_operations { + /* IBM PC-style virtual registers. */ + void (*write_data)(struct parport *, unsigned char); + unsigned char (*read_data)(struct parport *); + + void (*write_control)(struct parport *, unsigned char); + unsigned char (*read_control)(struct parport *); + unsigned char (*frob_control)(struct parport *, unsigned char mask, + unsigned char val); + + unsigned char (*read_status)(struct parport *); + + /* IRQs. */ + void (*enable_irq)(struct parport *); + void (*disable_irq)(struct parport *); + + /* Data direction. */ + void (*data_forward) (struct parport *); + void (*data_reverse) (struct parport *); + + /* For core parport code. */ + void (*init_state)(struct pardevice *, struct parport_state *); + void (*save_state)(struct parport *, struct parport_state *); + void (*restore_state)(struct parport *, struct parport_state *); + + /* Block read/write */ + size_t (*epp_write_data) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*epp_read_data) (struct parport *port, void *buf, size_t len, + int flags); + size_t (*epp_write_addr) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*epp_read_addr) (struct parport *port, void *buf, size_t len, + int flags); + + size_t (*ecp_write_data) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*ecp_read_data) (struct parport *port, void *buf, size_t len, + int flags); + size_t (*ecp_write_addr) (struct parport *port, const void *buf, + size_t len, int flags); + + size_t (*compat_write_data) (struct parport *port, const void *buf, + size_t len, int flags); + size_t (*nibble_read_data) (struct parport *port, void *buf, + size_t len, int flags); + size_t (*byte_read_data) (struct parport *port, void *buf, + size_t len, int flags); + struct module *owner; +}; + +struct parport_device_info { + parport_device_class class; + const char *class_name; + const char *mfr; + const char *model; + const char *cmdset; + const char *description; +}; + +/* Each device can have two callback functions: + * 1) a preemption function, called by the resource manager to request + * that the driver relinquish control of the port. The driver should + * return zero if it agrees to release the port, and nonzero if it + * refuses. Do not call parport_release() - the kernel will do this + * implicitly. + * + * 2) a wake-up function, called by the resource manager to tell drivers + * that the port is available to be claimed. If a driver wants to use + * the port, it should call parport_claim() here. + */ + +/* A parallel port device */ +struct pardevice { + const char *name; + struct parport *port; + int daisy; + int (*preempt)(void *); + void (*wakeup)(void *); + void *private; + void (*irq_func)(void *); + unsigned int flags; + struct pardevice *next; + struct pardevice *prev; + struct parport_state *state; /* saved status over preemption */ + wait_queue_head_t wait_q; + unsigned long int time; + unsigned long int timeslice; + volatile long int timeout; + unsigned long waiting; /* long req'd for set_bit --RR */ + struct pardevice *waitprev; + struct pardevice *waitnext; + void * sysctl_table; +}; + +/* IEEE1284 information */ + +/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL + * PP[GS]ETPHASE, so do not change existing values. */ +enum ieee1284_phase { + IEEE1284_PH_FWD_DATA, + IEEE1284_PH_FWD_IDLE, + IEEE1284_PH_TERMINATE, + IEEE1284_PH_NEGOTIATION, + IEEE1284_PH_HBUSY_DNA, + IEEE1284_PH_REV_IDLE, + IEEE1284_PH_HBUSY_DAVAIL, + IEEE1284_PH_REV_DATA, + IEEE1284_PH_ECP_SETUP, + IEEE1284_PH_ECP_FWD_TO_REV, + IEEE1284_PH_ECP_REV_TO_FWD, + IEEE1284_PH_ECP_DIR_UNKNOWN, +}; +struct ieee1284_info { + int mode; + volatile enum ieee1284_phase phase; + struct semaphore irq; +}; + +/* A parallel port */ +struct parport { + unsigned long base; /* base address */ + unsigned long base_hi; /* base address (hi - ECR) */ + unsigned int size; /* IO extent */ + const char *name; + unsigned int modes; + int irq; /* interrupt (or -1 for none) */ + int dma; + int muxport; /* which muxport (if any) this is */ + int portnum; /* which physical parallel port (not mux) */ + struct device *dev; /* Physical device associated with IO/DMA. + * This may unfortulately be null if the + * port has a legacy driver. + */ + + struct parport *physport; + /* If this is a non-default mux + parport, i.e. we're a clone of a real + physical port, this is a pointer to that + port. The locking is only done in the + real port. For a clone port, the + following structure members are + meaningless: devices, cad, muxsel, + waithead, waittail, flags, pdir, + dev, ieee1284, *_lock. + + It this is a default mux parport, or + there is no mux involved, this points to + ourself. */ + + struct pardevice *devices; + struct pardevice *cad; /* port owner */ + int daisy; /* currently selected daisy addr */ + int muxsel; /* currently selected mux port */ + + struct pardevice *waithead; + struct pardevice *waittail; + + struct list_head list; + unsigned int flags; + + void *sysctl_table; + struct parport_device_info probe_info[5]; /* 0-3 + non-IEEE1284.3 */ + struct ieee1284_info ieee1284; + + struct parport_operations *ops; + void *private_data; /* for lowlevel driver */ + + int number; /* port index - the `n' in `parportn' */ + spinlock_t pardevice_lock; + spinlock_t waitlist_lock; + rwlock_t cad_lock; + + int spintime; + atomic_t ref_count; + + unsigned long devflags; +#define PARPORT_DEVPROC_REGISTERED 0 + struct pardevice *proc_device; /* Currently register proc device */ + + struct list_head full_list; + struct parport *slaves[3]; +}; + +#define DEFAULT_SPIN_TIME 500 /* us */ + +struct parport_driver { + const char *name; + void (*attach) (struct parport *); + void (*detach) (struct parport *); + struct list_head list; +}; + +/* parport_register_port registers a new parallel port at the given + address (if one does not already exist) and returns a pointer to it. + This entails claiming the I/O region, IRQ and DMA. NULL is returned + if initialisation fails. */ +struct parport *parport_register_port(unsigned long base, int irq, int dma, + struct parport_operations *ops); + +/* Once a registered port is ready for high-level drivers to use, the + low-level driver that registered it should announce it. This will + call the high-level drivers' attach() functions (after things like + determining the IEEE 1284.3 topology of the port and collecting + DeviceIDs). */ +void parport_announce_port (struct parport *port); + +/* Unregister a port. */ +extern void parport_remove_port(struct parport *port); + +/* Register a new high-level driver. */ +extern int parport_register_driver (struct parport_driver *); + +/* Unregister a high-level driver. */ +extern void parport_unregister_driver (struct parport_driver *); + +/* If parport_register_driver doesn't fit your needs, perhaps + * parport_find_xxx does. */ +extern struct parport *parport_find_number (int); +extern struct parport *parport_find_base (unsigned long); + +/* generic irq handler, if it suits your needs */ +extern irqreturn_t parport_irq_handler(int irq, void *dev_id); + +/* Reference counting for ports. */ +extern struct parport *parport_get_port (struct parport *); +extern void parport_put_port (struct parport *); + +/* parport_register_device declares that a device is connected to a + port, and tells the kernel all it needs to know. + - pf is the preemption function (may be NULL for no callback) + - kf is the wake-up function (may be NULL for no callback) + - irq_func is the interrupt handler (may be NULL for no interrupts) + - handle is a user pointer that gets handed to callback functions. */ +struct pardevice *parport_register_device(struct parport *port, + const char *name, + int (*pf)(void *), void (*kf)(void *), + void (*irq_func)(void *), + int flags, void *handle); + +/* parport_unregister unlinks a device from the chain. */ +extern void parport_unregister_device(struct pardevice *dev); + +/* parport_claim tries to gain ownership of the port for a particular + driver. This may fail (return non-zero) if another driver is busy. + If this driver has registered an interrupt handler, it will be + enabled. */ +extern int parport_claim(struct pardevice *dev); + +/* parport_claim_or_block is the same, but sleeps if the port cannot + be claimed. Return value is 1 if it slept, 0 normally and -errno + on error. */ +extern int parport_claim_or_block(struct pardevice *dev); + +/* parport_release reverses a previous parport_claim. This can never + fail, though the effects are undefined (except that they are bad) + if you didn't previously own the port. Once you have released the + port you should make sure that neither your code nor the hardware + on the port tries to initiate any communication without first + re-claiming the port. If you mess with the port state (enabling + ECP for example) you should clean up before releasing the port. */ + +extern void parport_release(struct pardevice *dev); + +/** + * parport_yield - relinquish a parallel port temporarily + * @dev: a device on the parallel port + * + * This function relinquishes the port if it would be helpful to other + * drivers to do so. Afterwards it tries to reclaim the port using + * parport_claim(), and the return value is the same as for + * parport_claim(). If it fails, the port is left unclaimed and it is + * the driver's responsibility to reclaim the port. + * + * The parport_yield() and parport_yield_blocking() functions are for + * marking points in the driver at which other drivers may claim the + * port and use their devices. Yielding the port is similar to + * releasing it and reclaiming it, but is more efficient because no + * action is taken if there are no other devices needing the port. In + * fact, nothing is done even if there are other devices waiting but + * the current device is still within its "timeslice". The default + * timeslice is half a second, but it can be adjusted via the /proc + * interface. + **/ +static __inline__ int parport_yield(struct pardevice *dev) +{ + unsigned long int timeslip = (jiffies - dev->time); + if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice)) + return 0; + parport_release(dev); + return parport_claim(dev); +} + +/** + * parport_yield_blocking - relinquish a parallel port temporarily + * @dev: a device on the parallel port + * + * This function relinquishes the port if it would be helpful to other + * drivers to do so. Afterwards it tries to reclaim the port using + * parport_claim_or_block(), and the return value is the same as for + * parport_claim_or_block(). + **/ +static __inline__ int parport_yield_blocking(struct pardevice *dev) +{ + unsigned long int timeslip = (jiffies - dev->time); + if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice)) + return 0; + parport_release(dev); + return parport_claim_or_block(dev); +} + +/* Flags used to identify what a device does. */ +#define PARPORT_DEV_TRAN 0 /* WARNING !! DEPRECATED !! */ +#define PARPORT_DEV_LURK (1<<0) /* WARNING !! DEPRECATED !! */ +#define PARPORT_DEV_EXCL (1<<1) /* Need exclusive access. */ + +#define PARPORT_FLAG_EXCL (1<<1) /* EXCL driver registered. */ + +/* IEEE1284 functions */ +extern void parport_ieee1284_interrupt (void *); +extern int parport_negotiate (struct parport *, int mode); +extern ssize_t parport_write (struct parport *, const void *buf, size_t len); +extern ssize_t parport_read (struct parport *, void *buf, size_t len); + +#define PARPORT_INACTIVITY_O_NONBLOCK 1 +extern long parport_set_timeout (struct pardevice *, long inactivity); + +extern int parport_wait_event (struct parport *, long timeout); +extern int parport_wait_peripheral (struct parport *port, + unsigned char mask, + unsigned char val); +extern int parport_poll_peripheral (struct parport *port, + unsigned char mask, + unsigned char val, + int usec); + +/* For architectural drivers */ +extern size_t parport_ieee1284_write_compat (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_read_nibble (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_read_byte (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_ecp_read_data (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_ecp_write_data (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_ecp_write_addr (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_epp_write_data (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_epp_read_data (struct parport *, + void *, size_t, int); +extern size_t parport_ieee1284_epp_write_addr (struct parport *, + const void *, size_t, int); +extern size_t parport_ieee1284_epp_read_addr (struct parport *, + void *, size_t, int); + +/* IEEE1284.3 functions */ +extern int parport_daisy_init (struct parport *port); +extern void parport_daisy_fini (struct parport *port); +extern struct pardevice *parport_open (int devnum, const char *name); +extern void parport_close (struct pardevice *dev); +extern ssize_t parport_device_id (int devnum, char *buffer, size_t len); +extern void parport_daisy_deselect_all (struct parport *port); +extern int parport_daisy_select (struct parport *port, int daisy, int mode); + +/* Lowlevel drivers _can_ call this support function to handle irqs. */ +static inline void parport_generic_irq(struct parport *port) +{ + parport_ieee1284_interrupt (port); + read_lock(&port->cad_lock); + if (port->cad && port->cad->irq_func) + port->cad->irq_func(port->cad->private); + read_unlock(&port->cad_lock); +} + +/* Prototypes from parport_procfs */ +extern int parport_proc_register(struct parport *pp); +extern int parport_proc_unregister(struct parport *pp); +extern int parport_device_proc_register(struct pardevice *device); +extern int parport_device_proc_unregister(struct pardevice *device); + +/* If PC hardware is the only type supported, we can optimise a bit. */ +#if !defined(CONFIG_PARPORT_NOT_PC) + +#include <linux/parport_pc.h> +#define parport_write_data(p,x) parport_pc_write_data(p,x) +#define parport_read_data(p) parport_pc_read_data(p) +#define parport_write_control(p,x) parport_pc_write_control(p,x) +#define parport_read_control(p) parport_pc_read_control(p) +#define parport_frob_control(p,m,v) parport_pc_frob_control(p,m,v) +#define parport_read_status(p) parport_pc_read_status(p) +#define parport_enable_irq(p) parport_pc_enable_irq(p) +#define parport_disable_irq(p) parport_pc_disable_irq(p) +#define parport_data_forward(p) parport_pc_data_forward(p) +#define parport_data_reverse(p) parport_pc_data_reverse(p) + +#else /* !CONFIG_PARPORT_NOT_PC */ + +/* Generic operations vector through the dispatch table. */ +#define parport_write_data(p,x) (p)->ops->write_data(p,x) +#define parport_read_data(p) (p)->ops->read_data(p) +#define parport_write_control(p,x) (p)->ops->write_control(p,x) +#define parport_read_control(p) (p)->ops->read_control(p) +#define parport_frob_control(p,m,v) (p)->ops->frob_control(p,m,v) +#define parport_read_status(p) (p)->ops->read_status(p) +#define parport_enable_irq(p) (p)->ops->enable_irq(p) +#define parport_disable_irq(p) (p)->ops->disable_irq(p) +#define parport_data_forward(p) (p)->ops->data_forward(p) +#define parport_data_reverse(p) (p)->ops->data_reverse(p) + +#endif /* !CONFIG_PARPORT_NOT_PC */ + +extern unsigned long parport_default_timeslice; +extern int parport_default_spintime; + +#endif /* __KERNEL__ */ +#endif /* _PARPORT_H_ */ diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h new file mode 100644 index 00000000..cc1767f5 --- /dev/null +++ b/include/linux/parport_pc.h @@ -0,0 +1,238 @@ +#ifndef __LINUX_PARPORT_PC_H +#define __LINUX_PARPORT_PC_H + +#include <asm/io.h> + +/* --- register definitions ------------------------------- */ + +#define ECONTROL(p) ((p)->base_hi + 0x2) +#define CONFIGB(p) ((p)->base_hi + 0x1) +#define CONFIGA(p) ((p)->base_hi + 0x0) +#define FIFO(p) ((p)->base_hi + 0x0) +#define EPPDATA(p) ((p)->base + 0x4) +#define EPPADDR(p) ((p)->base + 0x3) +#define CONTROL(p) ((p)->base + 0x2) +#define STATUS(p) ((p)->base + 0x1) +#define DATA(p) ((p)->base + 0x0) + +struct parport_pc_private { + /* Contents of CTR. */ + unsigned char ctr; + + /* Bitmask of writable CTR bits. */ + unsigned char ctr_writable; + + /* Whether or not there's an ECR. */ + int ecr; + + /* Number of PWords that FIFO will hold. */ + int fifo_depth; + + /* Number of bytes per portword. */ + int pword; + + /* Not used yet. */ + int readIntrThreshold; + int writeIntrThreshold; + + /* buffer suitable for DMA, if DMA enabled */ + char *dma_buf; + dma_addr_t dma_handle; + struct list_head list; + struct parport *port; +}; + +struct parport_pc_via_data +{ + /* ISA PnP IRQ routing register 1 */ + u8 via_pci_parport_irq_reg; + /* ISA PnP DMA request routing register */ + u8 via_pci_parport_dma_reg; + /* Register and value to enable SuperIO configuration access */ + u8 via_pci_superio_config_reg; + u8 via_pci_superio_config_data; + /* SuperIO function register number */ + u8 viacfg_function; + /* parallel port control register number */ + u8 viacfg_parport_control; + /* Parallel port base address register */ + u8 viacfg_parport_base; +}; + +static __inline__ void parport_pc_write_data(struct parport *p, unsigned char d) +{ +#ifdef DEBUG_PARPORT + printk (KERN_DEBUG "parport_pc_write_data(%p,0x%02x)\n", p, d); +#endif + outb(d, DATA(p)); +} + +static __inline__ unsigned char parport_pc_read_data(struct parport *p) +{ + unsigned char val = inb (DATA (p)); +#ifdef DEBUG_PARPORT + printk (KERN_DEBUG "parport_pc_read_data(%p) = 0x%02x\n", + p, val); +#endif + return val; +} + +#ifdef DEBUG_PARPORT +static inline void dump_parport_state (char *str, struct parport *p) +{ + /* here's hoping that reading these ports won't side-effect anything underneath */ + unsigned char ecr = inb (ECONTROL (p)); + unsigned char dcr = inb (CONTROL (p)); + unsigned char dsr = inb (STATUS (p)); + static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; + const struct parport_pc_private *priv = p->physport->private_data; + int i; + + printk (KERN_DEBUG "*** parport state (%s): ecr=[%s", str, ecr_modes[(ecr & 0xe0) >> 5]); + if (ecr & 0x10) printk (",nErrIntrEn"); + if (ecr & 0x08) printk (",dmaEn"); + if (ecr & 0x04) printk (",serviceIntr"); + if (ecr & 0x02) printk (",f_full"); + if (ecr & 0x01) printk (",f_empty"); + for (i=0; i<2; i++) { + printk ("] dcr(%s)=[", i ? "soft" : "hard"); + dcr = i ? priv->ctr : inb (CONTROL (p)); + + if (dcr & 0x20) { + printk ("rev"); + } else { + printk ("fwd"); + } + if (dcr & 0x10) printk (",ackIntEn"); + if (!(dcr & 0x08)) printk (",N-SELECT-IN"); + if (dcr & 0x04) printk (",N-INIT"); + if (!(dcr & 0x02)) printk (",N-AUTOFD"); + if (!(dcr & 0x01)) printk (",N-STROBE"); + } + printk ("] dsr=["); + if (!(dsr & 0x80)) printk ("BUSY"); + if (dsr & 0x40) printk (",N-ACK"); + if (dsr & 0x20) printk (",PERROR"); + if (dsr & 0x10) printk (",SELECT"); + if (dsr & 0x08) printk (",N-FAULT"); + printk ("]\n"); + return; +} +#else /* !DEBUG_PARPORT */ +#define dump_parport_state(args...) +#endif /* !DEBUG_PARPORT */ + +/* __parport_pc_frob_control differs from parport_pc_frob_control in that + * it doesn't do any extra masking. */ +static __inline__ unsigned char __parport_pc_frob_control (struct parport *p, + unsigned char mask, + unsigned char val) +{ + struct parport_pc_private *priv = p->physport->private_data; + unsigned char ctr = priv->ctr; +#ifdef DEBUG_PARPORT + printk (KERN_DEBUG + "__parport_pc_frob_control(%02x,%02x): %02x -> %02x\n", + mask, val, ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable); +#endif + ctr = (ctr & ~mask) ^ val; + ctr &= priv->ctr_writable; /* only write writable bits. */ + outb (ctr, CONTROL (p)); + priv->ctr = ctr; /* Update soft copy */ + return ctr; +} + +static __inline__ void parport_pc_data_reverse (struct parport *p) +{ + __parport_pc_frob_control (p, 0x20, 0x20); +} + +static __inline__ void parport_pc_data_forward (struct parport *p) +{ + __parport_pc_frob_control (p, 0x20, 0x00); +} + +static __inline__ void parport_pc_write_control (struct parport *p, + unsigned char d) +{ + const unsigned char wm = (PARPORT_CONTROL_STROBE | + PARPORT_CONTROL_AUTOFD | + PARPORT_CONTROL_INIT | + PARPORT_CONTROL_SELECT); + + /* Take this out when drivers have adapted to newer interface. */ + if (d & 0x20) { + printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n", + p->name, p->cad->name); + parport_pc_data_reverse (p); + } + + __parport_pc_frob_control (p, wm, d & wm); +} + +static __inline__ unsigned char parport_pc_read_control(struct parport *p) +{ + const unsigned char rm = (PARPORT_CONTROL_STROBE | + PARPORT_CONTROL_AUTOFD | + PARPORT_CONTROL_INIT | + PARPORT_CONTROL_SELECT); + const struct parport_pc_private *priv = p->physport->private_data; + return priv->ctr & rm; /* Use soft copy */ +} + +static __inline__ unsigned char parport_pc_frob_control (struct parport *p, + unsigned char mask, + unsigned char val) +{ + const unsigned char wm = (PARPORT_CONTROL_STROBE | + PARPORT_CONTROL_AUTOFD | + PARPORT_CONTROL_INIT | + PARPORT_CONTROL_SELECT); + + /* Take this out when drivers have adapted to newer interface. */ + if (mask & 0x20) { + printk (KERN_DEBUG "%s (%s): use data_%s for this!\n", + p->name, p->cad->name, + (val & 0x20) ? "reverse" : "forward"); + if (val & 0x20) + parport_pc_data_reverse (p); + else + parport_pc_data_forward (p); + } + + /* Restrict mask and val to control lines. */ + mask &= wm; + val &= wm; + + return __parport_pc_frob_control (p, mask, val); +} + +static __inline__ unsigned char parport_pc_read_status(struct parport *p) +{ + return inb(STATUS(p)); +} + + +static __inline__ void parport_pc_disable_irq(struct parport *p) +{ + __parport_pc_frob_control (p, 0x10, 0x00); +} + +static __inline__ void parport_pc_enable_irq(struct parport *p) +{ + __parport_pc_frob_control (p, 0x10, 0x10); +} + +extern void parport_pc_release_resources(struct parport *p); + +extern int parport_pc_claim_resources(struct parport *p); + +/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ +extern struct parport *parport_pc_probe_port(unsigned long base, + unsigned long base_hi, + int irq, int dma, + struct device *dev, + int irqflags); +extern void parport_pc_unregister_port(struct parport *p); + +#endif diff --git a/include/linux/parser.h b/include/linux/parser.h new file mode 100644 index 00000000..ea2281e7 --- /dev/null +++ b/include/linux/parser.h @@ -0,0 +1,33 @@ +/* + * linux/include/linux/parser.h + * + * Header for lib/parser.c + * Intended use of these functions is parsing filesystem argument lists, + * but could potentially be used anywhere else that simple option=arg + * parsing is required. + */ + + +/* associates an integer enumerator with a pattern string. */ +struct match_token { + int token; + const char *pattern; +}; + +typedef struct match_token match_table_t[]; + +/* Maximum number of arguments that match_token will find in a pattern */ +enum {MAX_OPT_ARGS = 3}; + +/* Describe the location within a string of a substring */ +typedef struct { + char *from; + char *to; +} substring_t; + +int match_token(char *, const match_table_t table, substring_t args[]); +int match_int(substring_t *, int *result); +int match_octal(substring_t *, int *result); +int match_hex(substring_t *, int *result); +size_t match_strlcpy(char *, const substring_t *, size_t); +char *match_strdup(const substring_t *); diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h new file mode 100644 index 00000000..a6ee9aa8 --- /dev/null +++ b/include/linux/pata_arasan_cf_data.h @@ -0,0 +1,49 @@ +/* + * include/linux/pata_arasan_cf_data.h + * + * Arasan Compact Flash host controller platform data header file + * + * Copyright (C) 2011 ST Microelectronics + * Viresh Kumar <viresh.kumar@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _PATA_ARASAN_CF_DATA_H +#define _PATA_ARASAN_CF_DATA_H + +#include <linux/platform_device.h> + +struct arasan_cf_pdata { + u8 cf_if_clk; + #define CF_IF_CLK_100M (0x0) + #define CF_IF_CLK_75M (0x1) + #define CF_IF_CLK_66M (0x2) + #define CF_IF_CLK_50M (0x3) + #define CF_IF_CLK_40M (0x4) + #define CF_IF_CLK_33M (0x5) + #define CF_IF_CLK_25M (0x6) + #define CF_IF_CLK_125M (0x7) + #define CF_IF_CLK_150M (0x8) + #define CF_IF_CLK_166M (0x9) + #define CF_IF_CLK_200M (0xA) + /* + * Platform specific incapabilities of CF controller is handled via + * quirks + */ + u32 quirk; + #define CF_BROKEN_PIO (1) + #define CF_BROKEN_MWDMA (1 << 1) + #define CF_BROKEN_UDMA (1 << 2) + /* This is platform specific data for the DMA controller */ + void *dma_priv; +}; + +static inline void +set_arasan_cf_pdata(struct platform_device *pdev, struct arasan_cf_pdata *data) +{ + pdev->dev.platform_data = data; +} +#endif /* _PATA_ARASAN_CF_DATA_H */ diff --git a/include/linux/patchkey.h b/include/linux/patchkey.h new file mode 100644 index 00000000..aefda0ec --- /dev/null +++ b/include/linux/patchkey.h @@ -0,0 +1,47 @@ +/* + * <linux/patchkey.h> -- definition of _PATCHKEY macro + * + * Copyright (C) 2005 Stuart Brady + * + * This exists because awe_voice.h defined its own _PATCHKEY and it wasn't + * clear whether removing this would break anything in userspace. + * + * Do not include this file directly. Please use <sys/soundcard.h> instead. + * For kernel code, use <linux/soundcard.h> + */ + +#ifndef _LINUX_PATCHKEY_H_INDIRECT +#error "patchkey.h included directly" +#endif + +#ifndef _LINUX_PATCHKEY_H +#define _LINUX_PATCHKEY_H + +/* Endian macros. */ +#ifdef __KERNEL__ +# include <asm/byteorder.h> +#else +# include <endian.h> +#endif + +#if defined(__KERNEL__) +# if defined(__BIG_ENDIAN) +# define _PATCHKEY(id) (0xfd00|id) +# elif defined(__LITTLE_ENDIAN) +# define _PATCHKEY(id) ((id<<8)|0x00fd) +# else +# error "could not determine byte order" +# endif +#else +#if defined(__BYTE_ORDER) +# if __BYTE_ORDER == __BIG_ENDIAN +# define _PATCHKEY(id) (0xfd00|id) +# elif __BYTE_ORDER == __LITTLE_ENDIAN +# define _PATCHKEY(id) ((id<<8)|0x00fd) +# else +# error "could not determine byte order" +# endif +#endif +#endif + +#endif /* _LINUX_PATCHKEY_H */ diff --git a/include/linux/path.h b/include/linux/path.h new file mode 100644 index 00000000..edc98dec --- /dev/null +++ b/include/linux/path.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_PATH_H +#define _LINUX_PATH_H + +struct dentry; +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +extern void path_get(struct path *); +extern void path_put(struct path *); + +static inline int path_equal(const struct path *path1, const struct path *path2) +{ + return path1->mnt == path2->mnt && path1->dentry == path2->dentry; +} + +#endif /* _LINUX_PATH_H */ diff --git a/include/linux/pch_dma.h b/include/linux/pch_dma.h new file mode 100644 index 00000000..fdafe529 --- /dev/null +++ b/include/linux/pch_dma.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef PCH_DMA_H +#define PCH_DMA_H + +#include <linux/dmaengine.h> + +enum pch_dma_width { + PCH_DMA_WIDTH_1_BYTE, + PCH_DMA_WIDTH_2_BYTES, + PCH_DMA_WIDTH_4_BYTES, +}; + +struct pch_dma_slave { + struct device *dma_dev; + unsigned int chan_id; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum pch_dma_width width; +}; + +#endif diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h new file mode 100644 index 00000000..44623500 --- /dev/null +++ b/include/linux/pci-acpi.h @@ -0,0 +1,46 @@ +/* + * File pci-acpi.h + * + * Copyright (C) 2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + */ + +#ifndef _PCI_ACPI_H_ +#define _PCI_ACPI_H_ + +#include <linux/acpi.h> + +#ifdef CONFIG_ACPI +extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, + struct pci_bus *pci_bus); +extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev); +extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, + struct pci_dev *pci_dev); +extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev); + +static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) +{ + struct pci_bus *pbus = pdev->bus; + /* Find a PCI root bus */ + while (!pci_is_root_bus(pbus)) + pbus = pbus->parent; + return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus), + pbus->number); +} + +static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) +{ + if (!pci_is_root_bus(pbus)) + return DEVICE_ACPI_HANDLE(&(pbus->self->dev)); + return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus), + pbus->number); +} +#endif + +#ifdef CONFIG_ACPI_APEI +extern bool aer_acpi_firmware_first(void); +#else +static inline bool aer_acpi_firmware_first(void) { return false; } +#endif + +#endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h new file mode 100644 index 00000000..c8320144 --- /dev/null +++ b/include/linux/pci-aspm.h @@ -0,0 +1,69 @@ +/* + * aspm.h + * + * PCI Express ASPM defines and function prototypes + * + * Copyright (C) 2007 Intel Corp. + * Zhang Yanmin (yanmin.zhang@intel.com) + * Shaohua Li (shaohua.li@intel.com) + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI Express Specification + */ + +#ifndef LINUX_ASPM_H +#define LINUX_ASPM_H + +#include <linux/pci.h> + +#define PCIE_LINK_STATE_L0S 1 +#define PCIE_LINK_STATE_L1 2 +#define PCIE_LINK_STATE_CLKPM 4 + +#ifdef CONFIG_PCIEASPM +extern void pcie_aspm_init_link_state(struct pci_dev *pdev); +extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); +extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); +extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev); +extern void pci_disable_link_state(struct pci_dev *pdev, int state); +extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state); +extern void pcie_clear_aspm(struct pci_bus *bus); +extern void pcie_no_aspm(void); +#else +static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) +{ +} +static inline void pci_disable_link_state(struct pci_dev *pdev, int state) +{ +} +static inline void pcie_clear_aspm(struct pci_bus *bus) +{ +} +static inline void pcie_no_aspm(void) +{ +} +#endif + +#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ +extern void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev); +extern void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev); +#else +static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) +{ +} +static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) +{ +} +#endif +#endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h new file mode 100644 index 00000000..7ef68724 --- /dev/null +++ b/include/linux/pci-ats.h @@ -0,0 +1,127 @@ +#ifndef LINUX_PCI_ATS_H +#define LINUX_PCI_ATS_H + +#include <linux/pci.h> + +/* Address Translation Service */ +struct pci_ats { + int pos; /* capability position */ + int stu; /* Smallest Translation Unit */ + int qdep; /* Invalidate Queue Depth */ + int ref_cnt; /* Physical Function reference count */ + unsigned int is_enabled:1; /* Enable bit is set */ +}; + +#ifdef CONFIG_PCI_ATS + +extern int pci_enable_ats(struct pci_dev *dev, int ps); +extern void pci_disable_ats(struct pci_dev *dev); +extern int pci_ats_queue_depth(struct pci_dev *dev); + +/** + * pci_ats_enabled - query the ATS status + * @dev: the PCI device + * + * Returns 1 if ATS capability is enabled, or 0 if not. + */ +static inline int pci_ats_enabled(struct pci_dev *dev) +{ + return dev->ats && dev->ats->is_enabled; +} + +#else /* CONFIG_PCI_ATS */ + +static inline int pci_enable_ats(struct pci_dev *dev, int ps) +{ + return -ENODEV; +} + +static inline void pci_disable_ats(struct pci_dev *dev) +{ +} + +static inline int pci_ats_queue_depth(struct pci_dev *dev) +{ + return -ENODEV; +} + +static inline int pci_ats_enabled(struct pci_dev *dev) +{ + return 0; +} + +#endif /* CONFIG_PCI_ATS */ + +#ifdef CONFIG_PCI_PRI + +extern int pci_enable_pri(struct pci_dev *pdev, u32 reqs); +extern void pci_disable_pri(struct pci_dev *pdev); +extern bool pci_pri_enabled(struct pci_dev *pdev); +extern int pci_reset_pri(struct pci_dev *pdev); +extern bool pci_pri_stopped(struct pci_dev *pdev); +extern int pci_pri_status(struct pci_dev *pdev); + +#else /* CONFIG_PCI_PRI */ + +static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) +{ + return -ENODEV; +} + +static inline void pci_disable_pri(struct pci_dev *pdev) +{ +} + +static inline bool pci_pri_enabled(struct pci_dev *pdev) +{ + return false; +} + +static inline int pci_reset_pri(struct pci_dev *pdev) +{ + return -ENODEV; +} + +static inline bool pci_pri_stopped(struct pci_dev *pdev) +{ + return true; +} + +static inline int pci_pri_status(struct pci_dev *pdev) +{ + return -ENODEV; +} +#endif /* CONFIG_PCI_PRI */ + +#ifdef CONFIG_PCI_PASID + +extern int pci_enable_pasid(struct pci_dev *pdev, int features); +extern void pci_disable_pasid(struct pci_dev *pdev); +extern int pci_pasid_features(struct pci_dev *pdev); +extern int pci_max_pasids(struct pci_dev *pdev); + +#else /* CONFIG_PCI_PASID */ + +static inline int pci_enable_pasid(struct pci_dev *pdev, int features) +{ + return -EINVAL; +} + +static inline void pci_disable_pasid(struct pci_dev *pdev) +{ +} + +static inline int pci_pasid_features(struct pci_dev *pdev) +{ + return -EINVAL; +} + +static inline int pci_max_pasids(struct pci_dev *pdev) +{ + return -EINVAL; +} + +#endif /* CONFIG_PCI_PASID */ + + +#endif /* LINUX_PCI_ATS_H*/ diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h new file mode 100644 index 00000000..549a041f --- /dev/null +++ b/include/linux/pci-dma.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_PCI_DMA_H +#define _LINUX_PCI_DMA_H + +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME); +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME); +#define pci_unmap_addr dma_unmap_addr +#define pci_unmap_addr_set dma_unmap_addr_set +#define pci_unmap_len dma_unmap_len +#define pci_unmap_len_set dma_unmap_len_set + +#endif diff --git a/include/linux/pci.h b/include/linux/pci.h new file mode 100644 index 00000000..e444f5b4 --- /dev/null +++ b/include/linux/pci.h @@ -0,0 +1,1724 @@ +/* + * pci.h + * + * PCI defines and function prototypes + * Copyright 1994, Drew Eckhardt + * Copyright 1997--1999 Martin Mares <mj@ucw.cz> + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI BIOS Specification + * PCI Local Bus Specification + * PCI to PCI Bridge Specification + * PCI System Design Guide + */ + +#ifndef LINUX_PCI_H +#define LINUX_PCI_H + +#include <linux/pci_regs.h> /* The pci register defines */ + +/* + * The PCI interface treats multi-function devices as independent + * devices. The slot/function address of each device is encoded + * in a single byte as follows: + * + * 7:3 = slot + * 2:0 = function + */ +#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) +#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) +#define PCI_FUNC(devfn) ((devfn) & 0x07) + +/* Ioctls for /proc/bus/pci/X/Y nodes. */ +#define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) +#define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ +#define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ +#define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ +#define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ + +#ifdef __KERNEL__ + +#include <linux/mod_devicetable.h> + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/list.h> +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/kobject.h> +#include <linux/atomic.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/irqreturn.h> + +/* Include the ID list */ +#include <linux/pci_ids.h> + +/* pci_slot represents a physical slot */ +struct pci_slot { + struct pci_bus *bus; /* The bus this slot is on */ + struct list_head list; /* node in list of slots on this bus */ + struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ + unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ + struct kobject kobj; +}; + +static inline const char *pci_slot_name(const struct pci_slot *slot) +{ + return kobject_name(&slot->kobj); +} + +/* File state for mmap()s on /proc/bus/pci/X/Y */ +enum pci_mmap_state { + pci_mmap_io, + pci_mmap_mem +}; + +/* This defines the direction arg to the DMA mapping routines. */ +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define PCI_DMA_NONE 3 + +/* + * For PCI devices, the region numbers are assigned this way: + */ +enum { + /* #0-5: standard PCI resources */ + PCI_STD_RESOURCES, + PCI_STD_RESOURCE_END = 5, + + /* #6: expansion ROM resource */ + PCI_ROM_RESOURCE, + + /* device specific resources */ +#ifdef CONFIG_PCI_IOV + PCI_IOV_RESOURCES, + PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, +#endif + + /* resources assigned to buses behind the bridge */ +#define PCI_BRIDGE_RESOURCE_NUM 4 + + PCI_BRIDGE_RESOURCES, + PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + + PCI_BRIDGE_RESOURCE_NUM - 1, + + /* total resources associated with a PCI device */ + PCI_NUM_RESOURCES, + + /* preserve this for compatibility */ + DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, +}; + +typedef int __bitwise pci_power_t; + +#define PCI_D0 ((pci_power_t __force) 0) +#define PCI_D1 ((pci_power_t __force) 1) +#define PCI_D2 ((pci_power_t __force) 2) +#define PCI_D3hot ((pci_power_t __force) 3) +#define PCI_D3cold ((pci_power_t __force) 4) +#define PCI_UNKNOWN ((pci_power_t __force) 5) +#define PCI_POWER_ERROR ((pci_power_t __force) -1) + +/* Remember to update this when the list above changes! */ +extern const char *pci_power_names[]; + +static inline const char *pci_power_name(pci_power_t state) +{ + return pci_power_names[1 + (int) state]; +} + +#define PCI_PM_D2_DELAY 200 +#define PCI_PM_D3_WAIT 10 +#define PCI_PM_BUS_WAIT 50 + +/** The pci_channel state describes connectivity between the CPU and + * the pci device. If some PCI bus between here and the pci device + * has crashed or locked up, this info is reflected here. + */ +typedef unsigned int __bitwise pci_channel_state_t; + +enum pci_channel_state { + /* I/O channel is in normal state */ + pci_channel_io_normal = (__force pci_channel_state_t) 1, + + /* I/O to channel is blocked */ + pci_channel_io_frozen = (__force pci_channel_state_t) 2, + + /* PCI card is dead */ + pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, +}; + +typedef unsigned int __bitwise pcie_reset_state_t; + +enum pcie_reset_state { + /* Reset is NOT asserted (Use to deassert reset) */ + pcie_deassert_reset = (__force pcie_reset_state_t) 1, + + /* Use #PERST to reset PCI-E device */ + pcie_warm_reset = (__force pcie_reset_state_t) 2, + + /* Use PCI-E Hot Reset to reset device */ + pcie_hot_reset = (__force pcie_reset_state_t) 3 +}; + +typedef unsigned short __bitwise pci_dev_flags_t; +enum pci_dev_flags { + /* INTX_DISABLE in PCI_COMMAND register disables MSI + * generation too. + */ + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, + /* Device configuration is irrevocably lost if disabled into D3 */ + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, + /* Provide indication device is assigned by a Virtual Machine Manager */ + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, +}; + +enum pci_irq_reroute_variant { + INTEL_IRQ_REROUTE_VARIANT = 1, + MAX_IRQ_REROUTE_VARIANTS = 3 +}; + +typedef unsigned short __bitwise pci_bus_flags_t; +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, + PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, +}; + +/* Based on the PCI Hotplug Spec, but some values are made up by us */ +enum pci_bus_speed { + PCI_SPEED_33MHz = 0x00, + PCI_SPEED_66MHz = 0x01, + PCI_SPEED_66MHz_PCIX = 0x02, + PCI_SPEED_100MHz_PCIX = 0x03, + PCI_SPEED_133MHz_PCIX = 0x04, + PCI_SPEED_66MHz_PCIX_ECC = 0x05, + PCI_SPEED_100MHz_PCIX_ECC = 0x06, + PCI_SPEED_133MHz_PCIX_ECC = 0x07, + PCI_SPEED_66MHz_PCIX_266 = 0x09, + PCI_SPEED_100MHz_PCIX_266 = 0x0a, + PCI_SPEED_133MHz_PCIX_266 = 0x0b, + AGP_UNKNOWN = 0x0c, + AGP_1X = 0x0d, + AGP_2X = 0x0e, + AGP_4X = 0x0f, + AGP_8X = 0x10, + PCI_SPEED_66MHz_PCIX_533 = 0x11, + PCI_SPEED_100MHz_PCIX_533 = 0x12, + PCI_SPEED_133MHz_PCIX_533 = 0x13, + PCIE_SPEED_2_5GT = 0x14, + PCIE_SPEED_5_0GT = 0x15, + PCIE_SPEED_8_0GT = 0x16, + PCI_SPEED_UNKNOWN = 0xff, +}; + +struct pci_cap_saved_data { + char cap_nr; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pcie_link_state; +struct pci_vpd; +struct pci_sriov; +struct pci_ats; + +/* + * The pci_dev structure is used to describe PCI devices. + */ +struct pci_dev { + struct list_head bus_list; /* node in per-bus list */ + struct pci_bus *bus; /* bus this device is on */ + struct pci_bus *subordinate; /* bus this device bridges to */ + + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ + struct pci_slot *slot; /* Physical slot this device is in */ + + unsigned int devfn; /* encoded device & function index */ + unsigned short vendor; + unsigned short device; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + unsigned int class; /* 3 bytes: (base,sub,prog-if) */ + u8 revision; /* PCI revision, low byte of class word */ + u8 hdr_type; /* PCI header type (`multi' flag masked out) */ + u8 pcie_cap; /* PCI-E capability offset */ + u8 pcie_type:4; /* PCI-E device/port type */ + u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ + u8 rom_base_reg; /* which config register controls the ROM */ + u8 pin; /* which interrupt pin this device uses */ + + struct pci_driver *driver; /* which driver has allocated this device */ + u64 dma_mask; /* Mask of the bits of bus address this + device implements. Normally this is + 0xffffffff. You only need to change + this if your device has broken DMA + or supports 64-bit transfers. */ + + struct device_dma_parameters dma_parms; + + pci_power_t current_state; /* Current operating state. In ACPI-speak, + this is D0-D3, D0 being fully functional, + and D3 being off. */ + int pm_cap; /* PM capability offset in the + configuration space */ + unsigned int pme_support:5; /* Bitmask of states from which PME# + can be generated */ + unsigned int pme_interrupt:1; + unsigned int pme_poll:1; /* Poll device's PME status bit */ + unsigned int d1_support:1; /* Low power state D1 is supported */ + unsigned int d2_support:1; /* Low power state D2 is supported */ + unsigned int no_d1d2:1; /* Only allow D0 and D3 */ + unsigned int mmio_always_on:1; /* disallow turning off io/mem + decoding during bar sizing */ + unsigned int wakeup_prepared:1; + unsigned int d3_delay; /* D3->D0 transition time in ms */ + +#ifdef CONFIG_PCIEASPM + struct pcie_link_state *link_state; /* ASPM link state. */ +#endif + + pci_channel_state_t error_state; /* current connectivity state */ + struct device dev; /* Generic device interface */ + + int cfg_size; /* Size of configuration space */ + + /* + * Instead of touching interrupt line and base address registers + * directly, use the values stored here. They might be different! + */ + unsigned int irq; + struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + + /* These fields are used by common fixups */ + unsigned int transparent:1; /* Transparent PCI bridge */ + unsigned int multifunction:1;/* Part of multi-function device */ + /* keep track of device state */ + unsigned int is_added:1; + unsigned int is_busmaster:1; /* device is busmaster */ + unsigned int no_msi:1; /* device may not use msi */ + unsigned int block_cfg_access:1; /* config space access is blocked */ + unsigned int broken_parity_status:1; /* Device generates false positive parity */ + unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ + unsigned int msi_enabled:1; + unsigned int msix_enabled:1; + unsigned int ari_enabled:1; /* ARI forwarding */ + unsigned int is_managed:1; + unsigned int is_pcie:1; /* Obsolete. Will be removed. + Use pci_is_pcie() instead */ + unsigned int needs_freset:1; /* Dev requires fundamental reset */ + unsigned int state_saved:1; + unsigned int is_physfn:1; + unsigned int is_virtfn:1; + unsigned int reset_fn:1; + unsigned int is_hotplug_bridge:1; + unsigned int __aer_firmware_first_valid:1; + unsigned int __aer_firmware_first:1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; /* pci_enable_device has been called */ + + u32 saved_config_space[16]; /* config space saved at suspend time */ + struct hlist_head saved_cap_space; + struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ + int rom_attr_enabled; /* has display of the rom attribute been enabled? */ + struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ + struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ +#ifdef CONFIG_PCI_MSI + struct list_head msi_list; + struct kset *msi_kset; +#endif + struct pci_vpd *vpd; +#ifdef CONFIG_PCI_ATS + union { + struct pci_sriov *sriov; /* SR-IOV capability related */ + struct pci_dev *physfn; /* the PF this VF is associated with */ + }; + struct pci_ats *ats; /* Address Translation Service */ +#endif +}; + +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif + + return dev; +} + +extern struct pci_dev *alloc_pci_dev(void); + +#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) +#define to_pci_dev(n) container_of(n, struct pci_dev, dev) +#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) + +static inline int pci_channel_offline(struct pci_dev *pdev) +{ + return (pdev->error_state != pci_channel_io_normal); +} + +struct pci_host_bridge_window { + struct list_head list; + struct resource *res; /* host bridge aperture (CPU address) */ + resource_size_t offset; /* bus address + offset = CPU address */ +}; + +struct pci_host_bridge { + struct list_head list; + struct pci_bus *bus; /* root bus */ + struct list_head windows; /* pci_host_bridge_windows */ +}; + +/* + * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond + * to P2P or CardBus bridge windows) go in a table. Additional ones (for + * buses below host bridges or subtractive decode bridges) go in the list. + * Use pci_bus_for_each_resource() to iterate through all the resources. + */ + +/* + * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly + * and there's no way to program the bridge with the details of the window. + * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- + * decode bit set, because they are explicit and can be programmed with _SRS. + */ +#define PCI_SUBTRACTIVE_DECODE 0x1 + +struct pci_bus_resource { + struct list_head list; + struct resource *res; + unsigned int flags; +}; + +#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ + +struct pci_bus { + struct list_head node; /* node in list of buses */ + struct pci_bus *parent; /* parent bus this bridge is on */ + struct list_head children; /* list of child buses */ + struct list_head devices; /* list of devices on this bus */ + struct pci_dev *self; /* bridge device as seen by parent */ + struct list_head slots; /* list of slots on this bus */ + struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; + struct list_head resources; /* address space routed to this bus */ + + struct pci_ops *ops; /* configuration access functions */ + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ + + unsigned char number; /* bus number */ + unsigned char primary; /* number of primary bridge */ + unsigned char secondary; /* number of secondary bridge */ + unsigned char subordinate; /* max number of subordinate buses */ + unsigned char max_bus_speed; /* enum pci_bus_speed */ + unsigned char cur_bus_speed; /* enum pci_bus_speed */ + + char name[48]; + + unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ + pci_bus_flags_t bus_flags; /* Inherited by child busses */ + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; /* legacy I/O for this bus */ + struct bin_attribute *legacy_mem; /* legacy mem */ + unsigned int is_added:1; +}; + +#define pci_bus_b(n) list_entry(n, struct pci_bus, node) +#define to_pci_bus(n) container_of(n, struct pci_bus, dev) + +/* + * Returns true if the pci bus is root (behind host-pci bridge), + * false otherwise + */ +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} + +#ifdef CONFIG_PCI_MSI +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) +{ + return pci_dev->msi_enabled || pci_dev->msix_enabled; +} +#else +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } +#endif + +/* + * Error values that may be returned by PCI functions. + */ +#define PCIBIOS_SUCCESSFUL 0x00 +#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 +#define PCIBIOS_BAD_VENDOR_ID 0x83 +#define PCIBIOS_DEVICE_NOT_FOUND 0x86 +#define PCIBIOS_BAD_REGISTER_NUMBER 0x87 +#define PCIBIOS_SET_FAILED 0x88 +#define PCIBIOS_BUFFER_TOO_SMALL 0x89 + +/* Low-level architecture-dependent routines */ + +struct pci_ops { + int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); + int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); +}; + +/* + * ACPI needs to be able to access PCI config space before we've done a + * PCI bus scan and created pci_bus structures. + */ +extern int raw_pci_read(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 *val); +extern int raw_pci_write(unsigned int domain, unsigned int bus, + unsigned int devfn, int reg, int len, u32 val); + +struct pci_bus_region { + resource_size_t start; + resource_size_t end; +}; + +struct pci_dynids { + spinlock_t lock; /* protects list, index */ + struct list_head list; /* for IDs added at runtime */ +}; + +/* ---------------------------------------------------------------- */ +/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides + * a set of callbacks in struct pci_error_handlers, then that device driver + * will be notified of PCI bus errors, and will be driven to recovery + * when an error occurs. + */ + +typedef unsigned int __bitwise pci_ers_result_t; + +enum pci_ers_result { + /* no result/none/not supported in device driver */ + PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, + + /* Device driver can recover without slot reset */ + PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, + + /* Device driver wants slot to be reset. */ + PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, + + /* Device has completely failed, is unrecoverable */ + PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, + + /* Device driver is fully recovered and operational */ + PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, +}; + +/* PCI bus error event callbacks */ +struct pci_error_handlers { + /* PCI bus error detected on this device */ + pci_ers_result_t (*error_detected)(struct pci_dev *dev, + enum pci_channel_state error); + + /* MMIO has been re-enabled, but not DMA */ + pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); + + /* PCI Express link has been reset */ + pci_ers_result_t (*link_reset)(struct pci_dev *dev); + + /* PCI slot has been reset */ + pci_ers_result_t (*slot_reset)(struct pci_dev *dev); + + /* Device driver may resume normal operations */ + void (*resume)(struct pci_dev *dev); +}; + +/* ---------------------------------------------------------------- */ + +struct module; +struct pci_driver { + struct list_head node; + const char *name; + const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ + int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ + void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ + int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ + int (*suspend_late) (struct pci_dev *dev, pm_message_t state); + int (*resume_early) (struct pci_dev *dev); + int (*resume) (struct pci_dev *dev); /* Device woken up */ + void (*shutdown) (struct pci_dev *dev); + struct pci_error_handlers *err_handler; + struct device_driver driver; + struct pci_dynids dynids; +}; + +#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) + +/** + * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table + * @_table: device table name + * + * This macro is used to create a struct pci_device_id array (a device table) + * in a generic manner. + */ +#define DEFINE_PCI_DEVICE_TABLE(_table) \ + const struct pci_device_id _table[] __devinitconst + +/** + * PCI_DEVICE - macro used to describe a specific pci device + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID. + */ +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + +/** + * PCI_DEVICE_CLASS - macro used to describe a specific pci device class + * @dev_class: the class, subclass, prog-if triple for this device + * @dev_class_mask: the class mask for this device + * + * This macro is used to create a struct pci_device_id that matches a + * specific PCI class. The vendor, device, subvendor, and subdevice + * fields will be set to PCI_ANY_ID. + */ +#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ + .class = (dev_class), .class_mask = (dev_class_mask), \ + .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + +/** + * PCI_VDEVICE - macro used to describe a specific pci device in short form + * @vendor: the vendor name + * @device: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific PCI device. The subvendor, and subdevice fields will be set + * to PCI_ANY_ID. The macro allows the next field to follow as the device + * private data. + */ + +#define PCI_VDEVICE(vendor, device) \ + PCI_VENDOR_ID_##vendor, (device), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 + +/* these external functions are only available when PCI support is enabled */ +#ifdef CONFIG_PCI + +extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF, + PCIE_BUS_SAFE, + PCIE_BUS_PERFORMANCE, + PCIE_BUS_PEER2PEER, +}; + +extern enum pcie_bus_config_types pcie_bus_config; + +extern struct bus_type pci_bus_type; + +/* Do NOT directly access these two variables, unless you are arch specific pci + * code, or pci core code. */ +extern struct list_head pci_root_buses; /* list of all known PCI buses */ +/* Some device drivers need know if pci is initiated */ +extern int no_pci_devices(void); + +void pcibios_fixup_bus(struct pci_bus *); +int __must_check pcibios_enable_device(struct pci_dev *, int mask); +char *pcibios_setup(char *str); + +/* Used only when drivers/pci/setup.c is used */ +resource_size_t pcibios_align_resource(void *, const struct resource *, + resource_size_t, + resource_size_t); +void pcibios_update_irq(struct pci_dev *, int irq); + +/* Weak but can be overriden by arch */ +void pci_fixup_cardbus(struct pci_bus *); + +/* Generic PCI functions used internally */ + +void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res); +void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region); +void pcibios_scan_specific_bus(int busn); +extern struct pci_bus *pci_find_bus(int domain, int busnr); +void pci_bus_add_devices(const struct pci_bus *bus); +struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata); +struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); +struct pci_bus *pci_create_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, + struct list_head *resources); +struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, + struct list_head *resources); +struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, + int busnr); +void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); +struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + const char *name, + struct hotplug_slot *hotplug); +void pci_destroy_slot(struct pci_slot *slot); +void pci_renumber_slot(struct pci_slot *slot, int slot_nr); +int pci_scan_slot(struct pci_bus *bus, int devfn); +struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); +void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); +unsigned int pci_scan_child_bus(struct pci_bus *bus); +int __must_check pci_bus_add_device(struct pci_dev *dev); +void pci_read_bridge_bases(struct pci_bus *child); +struct resource *pci_find_parent_resource(const struct pci_dev *dev, + struct resource *res); +u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin); +int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); +u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); +extern struct pci_dev *pci_dev_get(struct pci_dev *dev); +extern void pci_dev_put(struct pci_dev *dev); +extern void pci_remove_bus(struct pci_bus *b); +extern void __pci_remove_bus_device(struct pci_dev *dev); +extern void pci_stop_and_remove_bus_device(struct pci_dev *dev); +extern void pci_stop_bus_device(struct pci_dev *dev); +void pci_setup_cardbus(struct pci_bus *bus); +extern void pci_sort_breadthfirst(void); +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) +#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0)) + +/* Generic PCI functions exported to card drivers */ + +enum pci_lost_interrupt_reason { + PCI_LOST_IRQ_NO_INFORMATION = 0, + PCI_LOST_IRQ_DISABLE_MSI, + PCI_LOST_IRQ_DISABLE_MSIX, + PCI_LOST_IRQ_DISABLE_ACPI, +}; +enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); +int pci_find_capability(struct pci_dev *dev, int cap); +int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); +int pci_find_ext_capability(struct pci_dev *dev, int cap); +int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, + int cap); +int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); +int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); +struct pci_bus *pci_find_next_bus(const struct pci_bus *from); + +struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, + struct pci_dev *from); +struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, + unsigned int ss_vendor, unsigned int ss_device, + struct pci_dev *from); +struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); +struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, + unsigned int devfn); +static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, + unsigned int devfn) +{ + return pci_get_domain_bus_and_slot(0, bus, devfn); +} +struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); +int pci_dev_present(const struct pci_device_id *ids); + +int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 *val); +int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 *val); +int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 *val); +int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, + int where, u8 val); +int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 val); +int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 val); +struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); + +static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) +{ + return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); +} +static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) +{ + return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); +} +static inline int pci_read_config_dword(const struct pci_dev *dev, int where, + u32 *val) +{ + return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) +{ + return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) +{ + return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_dword(const struct pci_dev *dev, int where, + u32 val) +{ + return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); +} + +int __must_check pci_enable_device(struct pci_dev *dev); +int __must_check pci_enable_device_io(struct pci_dev *dev); +int __must_check pci_enable_device_mem(struct pci_dev *dev); +int __must_check pci_reenable_device(struct pci_dev *); +int __must_check pcim_enable_device(struct pci_dev *pdev); +void pcim_pin_device(struct pci_dev *pdev); + +static inline int pci_is_enabled(struct pci_dev *pdev) +{ + return (atomic_read(&pdev->enable_cnt) > 0); +} + +static inline int pci_is_managed(struct pci_dev *pdev) +{ + return pdev->is_managed; +} + +void pci_disable_device(struct pci_dev *dev); + +extern unsigned int pcibios_max_latency; +void pci_set_master(struct pci_dev *dev); +void pci_clear_master(struct pci_dev *dev); + +int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); +int pci_set_cacheline_size(struct pci_dev *dev); +#define HAVE_PCI_SET_MWI +int __must_check pci_set_mwi(struct pci_dev *dev); +int pci_try_set_mwi(struct pci_dev *dev); +void pci_clear_mwi(struct pci_dev *dev); +void pci_intx(struct pci_dev *dev, int enable); +bool pci_intx_mask_supported(struct pci_dev *dev); +bool pci_check_and_mask_intx(struct pci_dev *dev); +bool pci_check_and_unmask_intx(struct pci_dev *dev); +void pci_msi_off(struct pci_dev *dev); +int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); +int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); +int pcix_get_max_mmrbc(struct pci_dev *dev); +int pcix_get_mmrbc(struct pci_dev *dev); +int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); +int pcie_get_readrq(struct pci_dev *dev); +int pcie_set_readrq(struct pci_dev *dev, int rq); +int pcie_get_mps(struct pci_dev *dev); +int pcie_set_mps(struct pci_dev *dev, int mps); +int __pci_reset_function(struct pci_dev *dev); +int __pci_reset_function_locked(struct pci_dev *dev); +int pci_reset_function(struct pci_dev *dev); +void pci_update_resource(struct pci_dev *dev, int resno); +int __must_check pci_assign_resource(struct pci_dev *dev, int i); +int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); +int pci_select_bars(struct pci_dev *dev, unsigned long flags); + +/* ROM control related routines */ +int pci_enable_rom(struct pci_dev *pdev); +void pci_disable_rom(struct pci_dev *pdev); +void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); +void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); +size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); + +/* Power management related routines */ +int pci_save_state(struct pci_dev *dev); +void pci_restore_state(struct pci_dev *dev); +struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); +int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); +int pci_load_and_free_saved_state(struct pci_dev *dev, + struct pci_saved_state **state); +int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); +int pci_set_power_state(struct pci_dev *dev, pci_power_t state); +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); +int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, + bool runtime, bool enable); +int pci_wake_from_d3(struct pci_dev *dev, bool enable); +pci_power_t pci_target_state(struct pci_dev *dev); +int pci_prepare_to_sleep(struct pci_dev *dev); +int pci_back_from_sleep(struct pci_dev *dev); +bool pci_dev_run_wake(struct pci_dev *dev); +bool pci_check_pme_status(struct pci_dev *dev); +void pci_pme_wakeup_bus(struct pci_bus *bus); + +static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, + bool enable) +{ + return __pci_enable_wake(dev, state, false, enable); +} + +#define PCI_EXP_IDO_REQUEST (1<<0) +#define PCI_EXP_IDO_COMPLETION (1<<1) +void pci_enable_ido(struct pci_dev *dev, unsigned long type); +void pci_disable_ido(struct pci_dev *dev, unsigned long type); + +enum pci_obff_signal_type { + PCI_EXP_OBFF_SIGNAL_L0 = 0, + PCI_EXP_OBFF_SIGNAL_ALWAYS = 1, +}; +int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type); +void pci_disable_obff(struct pci_dev *dev); + +bool pci_ltr_supported(struct pci_dev *dev); +int pci_enable_ltr(struct pci_dev *dev); +void pci_disable_ltr(struct pci_dev *dev); +int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns); + +/* For use by arch with custom probe code */ +void set_pcie_port_type(struct pci_dev *pdev); +void set_pcie_hotplug_bridge(struct pci_dev *pdev); + +/* Functions for PCI Hotplug drivers to use */ +int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); +#ifdef CONFIG_HOTPLUG +unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); +unsigned int pci_rescan_bus(struct pci_bus *bus); +#endif + +/* Vital product data routines */ +ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); +ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); +int pci_vpd_truncate(struct pci_dev *dev, size_t size); + +/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ +resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); +void pci_bus_assign_resources(const struct pci_bus *bus); +void pci_bus_size_bridges(struct pci_bus *bus); +int pci_claim_resource(struct pci_dev *, int); +void pci_assign_unassigned_resources(void); +void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); +void pdev_enable_device(struct pci_dev *); +int pci_enable_resources(struct pci_dev *, int mask); +void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), + int (*)(const struct pci_dev *, u8, u8)); +#define HAVE_PCI_REQ_REGIONS 2 +int __must_check pci_request_regions(struct pci_dev *, const char *); +int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); +void pci_release_regions(struct pci_dev *); +int __must_check pci_request_region(struct pci_dev *, int, const char *); +int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); +void pci_release_region(struct pci_dev *, int); +int pci_request_selected_regions(struct pci_dev *, int, const char *); +int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); +void pci_release_selected_regions(struct pci_dev *, int); + +/* drivers/pci/bus.c */ +void pci_add_resource(struct list_head *resources, struct resource *res); +void pci_add_resource_offset(struct list_head *resources, struct resource *res, + resource_size_t offset); +void pci_free_resource_list(struct list_head *resources); +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags); +struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); +void pci_bus_remove_resources(struct pci_bus *bus); + +#define pci_bus_for_each_resource(bus, res, i) \ + for (i = 0; \ + (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ + i++) + +int __must_check pci_bus_alloc_resource(struct pci_bus *bus, + struct resource *res, resource_size_t size, + resource_size_t align, resource_size_t min, + unsigned int type_mask, + resource_size_t (*alignf)(void *, + const struct resource *, + resource_size_t, + resource_size_t), + void *alignf_data); +void pci_enable_bridges(struct pci_bus *bus); + +/* Proper probing supporting hot-pluggable devices */ +int __must_check __pci_register_driver(struct pci_driver *, struct module *, + const char *mod_name); + +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) + +void pci_unregister_driver(struct pci_driver *dev); + +/** + * module_pci_driver() - Helper macro for registering a PCI driver + * @__pci_driver: pci_driver struct + * + * Helper macro for PCI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_pci_driver(__pci_driver) \ + module_driver(__pci_driver, pci_register_driver, \ + pci_unregister_driver) + +void pci_stop_and_remove_behind_bridge(struct pci_dev *dev); +struct pci_driver *pci_dev_driver(const struct pci_dev *dev); +int pci_add_dynid(struct pci_driver *drv, + unsigned int vendor, unsigned int device, + unsigned int subvendor, unsigned int subdevice, + unsigned int class, unsigned int class_mask, + unsigned long driver_data); +const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, + struct pci_dev *dev); +int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, + int pass); + +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata); +int pci_cfg_space_size_ext(struct pci_dev *dev); +int pci_cfg_space_size(struct pci_dev *dev); +unsigned char pci_bus_max_busnr(struct pci_bus *bus); +void pci_setup_bridge(struct pci_bus *bus); + +#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) +#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) + +int pci_set_vga_state(struct pci_dev *pdev, bool decode, + unsigned int command_bits, u32 flags); +/* kmem_cache style wrapper around pci_alloc_consistent() */ + +#include <linux/pci-dma.h> +#include <linux/dmapool.h> + +#define pci_pool dma_pool +#define pci_pool_create(name, pdev, size, align, allocation) \ + dma_pool_create(name, &pdev->dev, size, align, allocation) +#define pci_pool_destroy(pool) dma_pool_destroy(pool) +#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) + +enum pci_dma_burst_strategy { + PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, + strategy_parameter is N/A */ + PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter + byte boundaries */ + PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of + strategy_parameter byte boundaries */ +}; + +struct msix_entry { + u32 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; + + +#ifndef CONFIG_PCI_MSI +static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) +{ + return -1; +} + +static inline void pci_msi_shutdown(struct pci_dev *dev) +{ } +static inline void pci_disable_msi(struct pci_dev *dev) +{ } + +static inline int pci_msix_table_size(struct pci_dev *dev) +{ + return 0; +} +static inline int pci_enable_msix(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + return -1; +} + +static inline void pci_msix_shutdown(struct pci_dev *dev) +{ } +static inline void pci_disable_msix(struct pci_dev *dev) +{ } + +static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) +{ } + +static inline void pci_restore_msi_state(struct pci_dev *dev) +{ } +static inline int pci_msi_enabled(void) +{ + return 0; +} +#else +extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); +extern void pci_msi_shutdown(struct pci_dev *dev); +extern void pci_disable_msi(struct pci_dev *dev); +extern int pci_msix_table_size(struct pci_dev *dev); +extern int pci_enable_msix(struct pci_dev *dev, + struct msix_entry *entries, int nvec); +extern void pci_msix_shutdown(struct pci_dev *dev); +extern void pci_disable_msix(struct pci_dev *dev); +extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); +extern void pci_restore_msi_state(struct pci_dev *dev); +extern int pci_msi_enabled(void); +#endif + +#ifdef CONFIG_PCIEPORTBUS +extern bool pcie_ports_disabled; +extern bool pcie_ports_auto; +#else +#define pcie_ports_disabled true +#define pcie_ports_auto false +#endif + +#ifndef CONFIG_PCIEASPM +static inline int pcie_aspm_enabled(void) { return 0; } +static inline bool pcie_aspm_support_enabled(void) { return false; } +#else +extern int pcie_aspm_enabled(void); +extern bool pcie_aspm_support_enabled(void); +#endif + +#ifdef CONFIG_PCIEAER +void pci_no_aer(void); +bool pci_aer_available(void); +#else +static inline void pci_no_aer(void) { } +static inline bool pci_aer_available(void) { return false; } +#endif + +#ifndef CONFIG_PCIE_ECRC +static inline void pcie_set_ecrc_checking(struct pci_dev *dev) +{ + return; +} +static inline void pcie_ecrc_get_policy(char *str) {}; +#else +extern void pcie_set_ecrc_checking(struct pci_dev *dev); +extern void pcie_ecrc_get_policy(char *str); +#endif + +#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) + +#ifdef CONFIG_HT_IRQ +/* The functions a driver should call */ +int ht_create_irq(struct pci_dev *dev, int idx); +void ht_destroy_irq(unsigned int irq); +#endif /* CONFIG_HT_IRQ */ + +extern void pci_cfg_access_lock(struct pci_dev *dev); +extern bool pci_cfg_access_trylock(struct pci_dev *dev); +extern void pci_cfg_access_unlock(struct pci_dev *dev); + +/* + * PCI domain support. Sometimes called PCI segment (eg by ACPI), + * a PCI domain is defined to be a set of PCI busses which share + * configuration space. + */ +#ifdef CONFIG_PCI_DOMAINS +extern int pci_domains_supported; +#else +enum { pci_domains_supported = 0 }; +static inline int pci_domain_nr(struct pci_bus *bus) +{ + return 0; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return 0; +} +#endif /* CONFIG_PCI_DOMAINS */ + +/* some architectures require additional setup to direct VGA traffic */ +typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, + unsigned int command_bits, u32 flags); +extern void pci_register_set_vga_state(arch_set_vga_state_t func); + +#else /* CONFIG_PCI is not enabled */ + +/* + * If the system does not have PCI, clearly these return errors. Define + * these as simple inline functions to avoid hair in drivers. + */ + +#define _PCI_NOP(o, s, t) \ + static inline int pci_##o##_config_##s(struct pci_dev *dev, \ + int where, t val) \ + { return PCIBIOS_FUNC_NOT_SUPPORTED; } + +#define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ + _PCI_NOP(o, word, u16 x) \ + _PCI_NOP(o, dword, u32 x) +_PCI_NOP_ALL(read, *) +_PCI_NOP_ALL(write,) + +static inline struct pci_dev *pci_get_device(unsigned int vendor, + unsigned int device, + struct pci_dev *from) +{ + return NULL; +} + +static inline struct pci_dev *pci_get_subsys(unsigned int vendor, + unsigned int device, + unsigned int ss_vendor, + unsigned int ss_device, + struct pci_dev *from) +{ + return NULL; +} + +static inline struct pci_dev *pci_get_class(unsigned int class, + struct pci_dev *from) +{ + return NULL; +} + +#define pci_dev_present(ids) (0) +#define no_pci_devices() (1) +#define pci_dev_put(dev) do { } while (0) + +static inline void pci_set_master(struct pci_dev *dev) +{ } + +static inline int pci_enable_device(struct pci_dev *dev) +{ + return -EIO; +} + +static inline void pci_disable_device(struct pci_dev *dev) +{ } + +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return -EIO; +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return -EIO; +} + +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ + return -EIO; +} + +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ + return -EIO; +} + +static inline int pci_assign_resource(struct pci_dev *dev, int i) +{ + return -EBUSY; +} + +static inline int __pci_register_driver(struct pci_driver *drv, + struct module *owner) +{ + return 0; +} + +static inline int pci_register_driver(struct pci_driver *drv) +{ + return 0; +} + +static inline void pci_unregister_driver(struct pci_driver *drv) +{ } + +static inline int pci_find_capability(struct pci_dev *dev, int cap) +{ + return 0; +} + +static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, + int cap) +{ + return 0; +} + +static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) +{ + return 0; +} + +/* Power management related routines */ +static inline int pci_save_state(struct pci_dev *dev) +{ + return 0; +} + +static inline void pci_restore_state(struct pci_dev *dev) +{ } + +static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + return 0; +} + +static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + return 0; +} + +static inline pci_power_t pci_choose_state(struct pci_dev *dev, + pm_message_t state) +{ + return PCI_D0; +} + +static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, + int enable) +{ + return 0; +} + +static inline void pci_enable_ido(struct pci_dev *dev, unsigned long type) +{ +} + +static inline void pci_disable_ido(struct pci_dev *dev, unsigned long type) +{ +} + +static inline int pci_enable_obff(struct pci_dev *dev, unsigned long type) +{ + return 0; +} + +static inline void pci_disable_obff(struct pci_dev *dev) +{ +} + +static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) +{ + return -EIO; +} + +static inline void pci_release_regions(struct pci_dev *dev) +{ } + +#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) + +static inline void pci_block_cfg_access(struct pci_dev *dev) +{ } + +static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) +{ return 0; } + +static inline void pci_unblock_cfg_access(struct pci_dev *dev) +{ } + +static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) +{ return NULL; } + +static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, + unsigned int devfn) +{ return NULL; } + +static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, + unsigned int devfn) +{ return NULL; } + +static inline int pci_domain_nr(struct pci_bus *bus) +{ return 0; } + +#define dev_is_pci(d) (false) +#define dev_is_pf(d) (false) +#define dev_num_vf(d) (0) +#endif /* CONFIG_PCI */ + +/* Include architecture-dependent settings and functions */ + +#include <asm/pci.h> + +#ifndef PCIBIOS_MAX_MEM_32 +#define PCIBIOS_MAX_MEM_32 (-1) +#endif + +/* these helpers provide future and backwards compatibility + * for accessing popular PCI BAR info */ +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) +#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) +#define pci_resource_len(dev,bar) \ + ((pci_resource_start((dev), (bar)) == 0 && \ + pci_resource_end((dev), (bar)) == \ + pci_resource_start((dev), (bar))) ? 0 : \ + \ + (pci_resource_end((dev), (bar)) - \ + pci_resource_start((dev), (bar)) + 1)) + +/* Similar to the helpers above, these manipulate per-pci_dev + * driver-specific data. They are really just a wrapper around + * the generic device structure functions of these calls. + */ +static inline void *pci_get_drvdata(struct pci_dev *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +/* If you want to know what to call your pci_dev, ask this function. + * Again, it's a wrapper around the generic device. + */ +static inline const char *pci_name(const struct pci_dev *pdev) +{ + return dev_name(&pdev->dev); +} + + +/* Some archs don't want to expose struct resource to userland as-is + * in sysfs and /proc + */ +#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER +static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, resource_size_t *start, + resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} +#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ + + +/* + * The world is not perfect and supplies us with broken PCI devices. + * For at least a part of these bugs we need a work-around, so both + * generic (drivers/pci/quirks.c) and per-architecture code can define + * fixup hooks to be called for particular buggy devices. + */ + +struct pci_fixup { + u16 vendor; /* You can use PCI_ANY_ID here of course */ + u16 device; /* You can use PCI_ANY_ID here of course */ + u32 class; /* You can use PCI_ANY_ID here too */ + unsigned int class_shift; /* should be 0, 8, 16 */ + void (*hook)(struct pci_dev *dev); +}; + +enum pci_fixup_pass { + pci_fixup_early, /* Before probing BARs */ + pci_fixup_header, /* After reading configuration header */ + pci_fixup_final, /* Final phase of device fixups */ + pci_fixup_enable, /* pci_enable_device() time */ + pci_fixup_resume, /* pci_device_resume() */ + pci_fixup_suspend, /* pci_device_suspend */ + pci_fixup_resume_early, /* pci_device_resume_early() */ +}; + +/* Anonymous variables would be nice... */ +#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ + class_shift, hook) \ + static const struct pci_fixup const __pci_fixup_##name __used \ + __attribute__((__section__(#section), aligned((sizeof(void *))))) \ + = { vendor, device, class, class_shift, hook }; + +#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ + vendor##device##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ + vendor##device##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ + vendor##device##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ + vendor##device##hook, vendor, device, class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ + resume##vendor##device##hook, vendor, device, class, \ + class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ + resume_early##vendor##device##hook, vendor, device, \ + class, class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ + suspend##vendor##device##hook, vendor, device, class, \ + class_shift, hook) + +#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ + vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ + vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ + vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ + vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ + resume##vendor##device##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ + resume_early##vendor##device##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ + suspend##vendor##device##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) + +#ifdef CONFIG_PCI_QUIRKS +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); +#else +static inline void pci_fixup_device(enum pci_fixup_pass pass, + struct pci_dev *dev) {} +#endif + +void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); +void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); +void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); +int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); +int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, + const char *name); +void pcim_iounmap_regions(struct pci_dev *pdev, int mask); + +extern int pci_pci_problems; +#define PCIPCI_FAIL 1 /* No PCI PCI DMA */ +#define PCIPCI_TRITON 2 +#define PCIPCI_NATOMA 4 +#define PCIPCI_VIAETBF 8 +#define PCIPCI_VSFX 16 +#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ +#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ + +extern unsigned long pci_cardbus_io_size; +extern unsigned long pci_cardbus_mem_size; +extern u8 __devinitdata pci_dfl_cache_line_size; +extern u8 pci_cache_line_size; + +extern unsigned long pci_hotplug_io_size; +extern unsigned long pci_hotplug_mem_size; + +/* Architecture specific versions may override these (weak) */ +int pcibios_add_platform_entries(struct pci_dev *dev); +void pcibios_disable_device(struct pci_dev *dev); +void pcibios_set_master(struct pci_dev *dev); +int pcibios_set_pcie_reset_state(struct pci_dev *dev, + enum pcie_reset_state state); + +#ifdef CONFIG_PCI_MMCONFIG +extern void __init pci_mmcfg_early_init(void); +extern void __init pci_mmcfg_late_init(void); +#else +static inline void pci_mmcfg_early_init(void) { } +static inline void pci_mmcfg_late_init(void) { } +#endif + +int pci_ext_cfg_avail(struct pci_dev *dev); + +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); + +#ifdef CONFIG_PCI_IOV +extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); +extern void pci_disable_sriov(struct pci_dev *dev); +extern irqreturn_t pci_sriov_migration(struct pci_dev *dev); +extern int pci_num_vf(struct pci_dev *dev); +#else +static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) +{ + return -ENODEV; +} +static inline void pci_disable_sriov(struct pci_dev *dev) +{ +} +static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev) +{ + return IRQ_NONE; +} +static inline int pci_num_vf(struct pci_dev *dev) +{ + return 0; +} +#endif + +#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) +extern void pci_hp_create_module_link(struct pci_slot *pci_slot); +extern void pci_hp_remove_module_link(struct pci_slot *pci_slot); +#endif + +/** + * pci_pcie_cap - get the saved PCIe capability offset + * @dev: PCI device + * + * PCIe capability offset is calculated at PCI device initialization + * time and saved in the data structure. This function returns saved + * PCIe capability offset. Using this instead of pci_find_capability() + * reduces unnecessary search in the PCI configuration space. If you + * need to calculate PCIe capability offset from raw device for some + * reasons, please use pci_find_capability() instead. + */ +static inline int pci_pcie_cap(struct pci_dev *dev) +{ + return dev->pcie_cap; +} + +/** + * pci_is_pcie - check if the PCI device is PCI Express capable + * @dev: PCI device + * + * Retrun true if the PCI device is PCI Express capable, false otherwise. + */ +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} + +void pci_request_acs(void); + + +#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ +#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) + +/* Large Resource Data Type Tag Item Names */ +#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ +#define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ +#define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ + +#define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) +#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) +#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) + +/* Small Resource Data Type Tag Item Names */ +#define PCI_VPD_STIN_END 0x78 /* End */ + +#define PCI_VPD_SRDT_END PCI_VPD_STIN_END + +#define PCI_VPD_SRDT_TIN_MASK 0x78 +#define PCI_VPD_SRDT_LEN_MASK 0x07 + +#define PCI_VPD_LRDT_TAG_SIZE 3 +#define PCI_VPD_SRDT_TAG_SIZE 1 + +#define PCI_VPD_INFO_FLD_HDR_SIZE 3 + +#define PCI_VPD_RO_KEYWORD_PARTNO "PN" +#define PCI_VPD_RO_KEYWORD_MFR_ID "MN" +#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" +#define PCI_VPD_RO_KEYWORD_CHKSUM "RV" + +/** + * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length + * @lrdt: Pointer to the beginning of the Large Resource Data Type tag + * + * Returns the extracted Large Resource Data Type length. + */ +static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) +{ + return (u16)lrdt[1] + ((u16)lrdt[2] << 8); +} + +/** + * pci_vpd_srdt_size - Extracts the Small Resource Data Type length + * @lrdt: Pointer to the beginning of the Small Resource Data Type tag + * + * Returns the extracted Small Resource Data Type length. + */ +static inline u8 pci_vpd_srdt_size(const u8 *srdt) +{ + return (*srdt) & PCI_VPD_SRDT_LEN_MASK; +} + +/** + * pci_vpd_info_field_size - Extracts the information field length + * @lrdt: Pointer to the beginning of an information field header + * + * Returns the extracted information field length. + */ +static inline u8 pci_vpd_info_field_size(const u8 *info_field) +{ + return info_field[2]; +} + +/** + * pci_vpd_find_tag - Locates the Resource Data Type tag provided + * @buf: Pointer to buffered vpd data + * @off: The offset into the buffer at which to begin the search + * @len: The length of the vpd buffer + * @rdt: The Resource Data Type to search for + * + * Returns the index where the Resource Data Type was found or + * -ENOENT otherwise. + */ +int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); + +/** + * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD + * @buf: Pointer to buffered vpd data + * @off: The offset into the buffer at which to begin the search + * @len: The length of the buffer area, relative to off, in which to search + * @kw: The keyword to search for + * + * Returns the index where the information field keyword was found or + * -ENOENT otherwise. + */ +int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, + unsigned int len, const char *kw); + +/* PCI <-> OF binding helpers */ +#ifdef CONFIG_OF +struct device_node; +extern void pci_set_of_node(struct pci_dev *dev); +extern void pci_release_of_node(struct pci_dev *dev); +extern void pci_set_bus_of_node(struct pci_bus *bus); +extern void pci_release_bus_of_node(struct pci_bus *bus); + +/* Arch may override this (weak) */ +extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus); + +static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) +{ + return pdev ? pdev->dev.of_node : NULL; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + return bus ? bus->dev.of_node : NULL; +} + +#else /* CONFIG_OF */ +static inline void pci_set_of_node(struct pci_dev *dev) { } +static inline void pci_release_of_node(struct pci_dev *dev) { } +static inline void pci_set_bus_of_node(struct pci_bus *bus) { } +static inline void pci_release_bus_of_node(struct pci_bus *bus) { } +#endif /* CONFIG_OF */ + +#ifdef CONFIG_EEH +static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) +{ + return pdev->dev.archdata.edev; +} +#endif + +/** + * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device + * @pdev: the PCI device + * + * if the device is PCIE, return NULL + * if the device isn't connected to a PCIe bridge (that is its parent is a + * legacy PCI bridge and the bridge is directly connected to bus 0), return its + * parent + */ +struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); + +#endif /* __KERNEL__ */ +#endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h new file mode 100644 index 00000000..45fc162c --- /dev/null +++ b/include/linux/pci_hotplug.h @@ -0,0 +1,203 @@ +/* + * PCI HotPlug Core Functions + * + * Copyright (C) 1995,2001 Compaq Computer Corporation + * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) + * Copyright (C) 2001 IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to <kristen.c.accardi@intel.com> + * + */ +#ifndef _PCI_HOTPLUG_H +#define _PCI_HOTPLUG_H + +/* These values come from the PCI Express Spec */ +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +/** + * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use + * @owner: The module owner of this structure + * @mod_name: The module name (KBUILD_MODNAME) of this structure + * @enable_slot: Called when the user wants to enable a specific pci slot + * @disable_slot: Called when the user wants to disable a specific pci slot + * @set_attention_status: Called to set the specific slot's attention LED to + * the specified value + * @hardware_test: Called to run a specified hardware test on the specified + * slot. + * @get_power_status: Called to get the current power status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @get_attention_status: Called to get the current attention status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @get_latch_status: Called to get the current latch status of a slot. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * @get_adapter_status: Called to get see if an adapter is present in the slot or not. + * If this field is NULL, the value passed in the struct hotplug_slot_info + * will be used when this value is requested by a user. + * + * The table of function pointers that is passed to the hotplug pci core by a + * hotplug pci driver. These functions are called by the hotplug pci core when + * the user wants to do something to a specific slot (query it for information, + * set an LED, enable / disable power, etc.) + */ +struct hotplug_slot_ops { + struct module *owner; + const char *mod_name; + int (*enable_slot) (struct hotplug_slot *slot); + int (*disable_slot) (struct hotplug_slot *slot); + int (*set_attention_status) (struct hotplug_slot *slot, u8 value); + int (*hardware_test) (struct hotplug_slot *slot, u32 value); + int (*get_power_status) (struct hotplug_slot *slot, u8 *value); + int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); + int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); + int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); +}; + +/** + * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot + * @power_status: if power is enabled or not (1/0) + * @attention_status: if the attention light is enabled or not (1/0) + * @latch_status: if the latch (if any) is open or closed (1/0) + * @adapter_status: if there is a pci board present in the slot or not (1/0) + * + * Used to notify the hotplug pci core of the status of a specific slot. + */ +struct hotplug_slot_info { + u8 power_status; + u8 attention_status; + u8 latch_status; + u8 adapter_status; +}; + +/** + * struct hotplug_slot - used to register a physical slot with the hotplug pci core + * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot + * @info: pointer to the &struct hotplug_slot_info for the initial values for + * this slot. + * @release: called during pci_hp_deregister to free memory allocated in a + * hotplug_slot structure. + * @private: used by the hotplug pci controller driver to store whatever it + * needs. + */ +struct hotplug_slot { + struct hotplug_slot_ops *ops; + struct hotplug_slot_info *info; + void (*release) (struct hotplug_slot *slot); + void *private; + + /* Variables below this are for use only by the hotplug pci core. */ + struct list_head slot_list; + struct pci_slot *pci_slot; +}; +#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) + +static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) +{ + return pci_slot_name(slot->pci_slot); +} + +extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, + int nr, const char *name, + struct module *owner, const char *mod_name); +extern int pci_hp_deregister(struct hotplug_slot *slot); +extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, + struct hotplug_slot_info *info); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define pci_hp_register(slot, pbus, devnr, name) \ + __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME) + +/* PCI Setting Record (Type 0) */ +struct hpp_type0 { + u32 revision; + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +/* PCI-X Setting Record (Type 1) */ +struct hpp_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +/* PCI Express Setting Record (Type 2) */ +struct hpp_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +struct hotplug_params { + struct hpp_type0 *t0; /* Type0: NULL if not available */ + struct hpp_type1 *t1; /* Type1: NULL if not available */ + struct hpp_type2 *t2; /* Type2: NULL if not available */ + struct hpp_type0 type0_data; + struct hpp_type1 type1_data; + struct hpp_type2 type2_data; +}; + +#ifdef CONFIG_ACPI +#include <acpi/acpi.h> +#include <acpi/acpi_bus.h> +int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); +int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); +int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); +int acpi_pci_detect_ejectable(acpi_handle handle); +#else +static inline int pci_get_hp_params(struct pci_dev *dev, + struct hotplug_params *hpp) +{ + return -ENODEV; +} +#endif + +void pci_configure_slot(struct pci_dev *dev); +#endif + diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h new file mode 100644 index 00000000..3329965e --- /dev/null +++ b/include/linux/pci_ids.h @@ -0,0 +1,2907 @@ +/* + * PCI Class, Vendor and Device IDs + * + * Please keep sorted. + * + * Do not add new entries to this file unless the definitions + * are shared between multiple drivers. + */ + +/* Device classes and subclasses */ + +#define PCI_CLASS_NOT_DEFINED 0x0000 +#define PCI_CLASS_NOT_DEFINED_VGA 0x0001 + +#define PCI_BASE_CLASS_STORAGE 0x01 +#define PCI_CLASS_STORAGE_SCSI 0x0100 +#define PCI_CLASS_STORAGE_IDE 0x0101 +#define PCI_CLASS_STORAGE_FLOPPY 0x0102 +#define PCI_CLASS_STORAGE_IPI 0x0103 +#define PCI_CLASS_STORAGE_RAID 0x0104 +#define PCI_CLASS_STORAGE_SATA 0x0106 +#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 +#define PCI_CLASS_STORAGE_SAS 0x0107 +#define PCI_CLASS_STORAGE_OTHER 0x0180 + +#define PCI_BASE_CLASS_NETWORK 0x02 +#define PCI_CLASS_NETWORK_ETHERNET 0x0200 +#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 +#define PCI_CLASS_NETWORK_FDDI 0x0202 +#define PCI_CLASS_NETWORK_ATM 0x0203 +#define PCI_CLASS_NETWORK_OTHER 0x0280 + +#define PCI_BASE_CLASS_DISPLAY 0x03 +#define PCI_CLASS_DISPLAY_VGA 0x0300 +#define PCI_CLASS_DISPLAY_XGA 0x0301 +#define PCI_CLASS_DISPLAY_3D 0x0302 +#define PCI_CLASS_DISPLAY_OTHER 0x0380 + +#define PCI_BASE_CLASS_MULTIMEDIA 0x04 +#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 +#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 +#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 +#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 + +#define PCI_BASE_CLASS_MEMORY 0x05 +#define PCI_CLASS_MEMORY_RAM 0x0500 +#define PCI_CLASS_MEMORY_FLASH 0x0501 +#define PCI_CLASS_MEMORY_OTHER 0x0580 + +#define PCI_BASE_CLASS_BRIDGE 0x06 +#define PCI_CLASS_BRIDGE_HOST 0x0600 +#define PCI_CLASS_BRIDGE_ISA 0x0601 +#define PCI_CLASS_BRIDGE_EISA 0x0602 +#define PCI_CLASS_BRIDGE_MC 0x0603 +#define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_PCMCIA 0x0605 +#define PCI_CLASS_BRIDGE_NUBUS 0x0606 +#define PCI_CLASS_BRIDGE_CARDBUS 0x0607 +#define PCI_CLASS_BRIDGE_RACEWAY 0x0608 +#define PCI_CLASS_BRIDGE_OTHER 0x0680 + +#define PCI_BASE_CLASS_COMMUNICATION 0x07 +#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 +#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 +#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 +#define PCI_CLASS_COMMUNICATION_MODEM 0x0703 +#define PCI_CLASS_COMMUNICATION_OTHER 0x0780 + +#define PCI_BASE_CLASS_SYSTEM 0x08 +#define PCI_CLASS_SYSTEM_PIC 0x0800 +#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 +#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 +#define PCI_CLASS_SYSTEM_DMA 0x0801 +#define PCI_CLASS_SYSTEM_TIMER 0x0802 +#define PCI_CLASS_SYSTEM_RTC 0x0803 +#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 +#define PCI_CLASS_SYSTEM_SDHCI 0x0805 +#define PCI_CLASS_SYSTEM_OTHER 0x0880 + +#define PCI_BASE_CLASS_INPUT 0x09 +#define PCI_CLASS_INPUT_KEYBOARD 0x0900 +#define PCI_CLASS_INPUT_PEN 0x0901 +#define PCI_CLASS_INPUT_MOUSE 0x0902 +#define PCI_CLASS_INPUT_SCANNER 0x0903 +#define PCI_CLASS_INPUT_GAMEPORT 0x0904 +#define PCI_CLASS_INPUT_OTHER 0x0980 + +#define PCI_BASE_CLASS_DOCKING 0x0a +#define PCI_CLASS_DOCKING_GENERIC 0x0a00 +#define PCI_CLASS_DOCKING_OTHER 0x0a80 + +#define PCI_BASE_CLASS_PROCESSOR 0x0b +#define PCI_CLASS_PROCESSOR_386 0x0b00 +#define PCI_CLASS_PROCESSOR_486 0x0b01 +#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 +#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 +#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 +#define PCI_CLASS_PROCESSOR_MIPS 0x0b30 +#define PCI_CLASS_PROCESSOR_CO 0x0b40 + +#define PCI_BASE_CLASS_SERIAL 0x0c +#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 +#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 +#define PCI_CLASS_SERIAL_ACCESS 0x0c01 +#define PCI_CLASS_SERIAL_SSA 0x0c02 +#define PCI_CLASS_SERIAL_USB 0x0c03 +#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 +#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 +#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 +#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 +#define PCI_CLASS_SERIAL_FIBER 0x0c04 +#define PCI_CLASS_SERIAL_SMBUS 0x0c05 + +#define PCI_BASE_CLASS_WIRELESS 0x0d +#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 +#define PCI_CLASS_WIRELESS_WHCI 0x0d1010 + +#define PCI_BASE_CLASS_INTELLIGENT 0x0e +#define PCI_CLASS_INTELLIGENT_I2O 0x0e00 + +#define PCI_BASE_CLASS_SATELLITE 0x0f +#define PCI_CLASS_SATELLITE_TV 0x0f00 +#define PCI_CLASS_SATELLITE_AUDIO 0x0f01 +#define PCI_CLASS_SATELLITE_VOICE 0x0f03 +#define PCI_CLASS_SATELLITE_DATA 0x0f04 + +#define PCI_BASE_CLASS_CRYPT 0x10 +#define PCI_CLASS_CRYPT_NETWORK 0x1000 +#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 +#define PCI_CLASS_CRYPT_OTHER 0x1080 + +#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 +#define PCI_CLASS_SP_DPIO 0x1100 +#define PCI_CLASS_SP_OTHER 0x1180 + +#define PCI_CLASS_OTHERS 0xff + +/* Vendors and devices. Sort key: vendor first, device next. */ + +#define PCI_VENDOR_ID_TTTECH 0x0357 +#define PCI_DEVICE_ID_TTTECH_MC322 0x000a + +#define PCI_VENDOR_ID_DYNALINK 0x0675 +#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 + +#define PCI_VENDOR_ID_BERKOM 0x0871 +#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 +#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 +#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4 +#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8 + +#define PCI_VENDOR_ID_COMPAQ 0x0e11 +#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 +#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc +#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 +#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 +#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 +#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33 +#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 +#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 +#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 +#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 +#define PCI_DEVICE_ID_COMPAQ_CISS 0xb060 +#define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178 +#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46 +#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 +#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 + +#define PCI_VENDOR_ID_NCR 0x1000 +#define PCI_VENDOR_ID_LSI_LOGIC 0x1000 +#define PCI_DEVICE_ID_NCR_53C810 0x0001 +#define PCI_DEVICE_ID_NCR_53C820 0x0002 +#define PCI_DEVICE_ID_NCR_53C825 0x0003 +#define PCI_DEVICE_ID_NCR_53C815 0x0004 +#define PCI_DEVICE_ID_LSI_53C810AP 0x0005 +#define PCI_DEVICE_ID_NCR_53C860 0x0006 +#define PCI_DEVICE_ID_LSI_53C1510 0x000a +#define PCI_DEVICE_ID_NCR_53C896 0x000b +#define PCI_DEVICE_ID_NCR_53C895 0x000c +#define PCI_DEVICE_ID_NCR_53C885 0x000d +#define PCI_DEVICE_ID_NCR_53C875 0x000f +#define PCI_DEVICE_ID_NCR_53C1510 0x0010 +#define PCI_DEVICE_ID_LSI_53C895A 0x0012 +#define PCI_DEVICE_ID_LSI_53C875A 0x0013 +#define PCI_DEVICE_ID_LSI_53C1010_33 0x0020 +#define PCI_DEVICE_ID_LSI_53C1010_66 0x0021 +#define PCI_DEVICE_ID_LSI_53C1030 0x0030 +#define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032 +#define PCI_DEVICE_ID_LSI_53C1035 0x0040 +#define PCI_DEVICE_ID_NCR_53C875J 0x008f +#define PCI_DEVICE_ID_LSI_FC909 0x0621 +#define PCI_DEVICE_ID_LSI_FC929 0x0622 +#define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623 +#define PCI_DEVICE_ID_LSI_FC919 0x0624 +#define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625 +#define PCI_DEVICE_ID_LSI_FC929X 0x0626 +#define PCI_DEVICE_ID_LSI_FC939X 0x0642 +#define PCI_DEVICE_ID_LSI_FC949X 0x0640 +#define PCI_DEVICE_ID_LSI_FC949ES 0x0646 +#define PCI_DEVICE_ID_LSI_FC919X 0x0628 +#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701 +#define PCI_DEVICE_ID_LSI_61C102 0x0901 +#define PCI_DEVICE_ID_LSI_63C815 0x1000 +#define PCI_DEVICE_ID_LSI_SAS1064 0x0050 +#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 +#define PCI_DEVICE_ID_LSI_SAS1066 0x005E +#define PCI_DEVICE_ID_LSI_SAS1068 0x0054 +#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C +#define PCI_DEVICE_ID_LSI_SAS1064E 0x0056 +#define PCI_DEVICE_ID_LSI_SAS1066E 0x005A +#define PCI_DEVICE_ID_LSI_SAS1068E 0x0058 +#define PCI_DEVICE_ID_LSI_SAS1078 0x0060 + +#define PCI_VENDOR_ID_ATI 0x1002 +/* Mach64 */ +#define PCI_DEVICE_ID_ATI_68800 0x4158 +#define PCI_DEVICE_ID_ATI_215CT222 0x4354 +#define PCI_DEVICE_ID_ATI_210888CX 0x4358 +#define PCI_DEVICE_ID_ATI_215ET222 0x4554 +/* Mach64 / Rage */ +#define PCI_DEVICE_ID_ATI_215GB 0x4742 +#define PCI_DEVICE_ID_ATI_215GD 0x4744 +#define PCI_DEVICE_ID_ATI_215GI 0x4749 +#define PCI_DEVICE_ID_ATI_215GP 0x4750 +#define PCI_DEVICE_ID_ATI_215GQ 0x4751 +#define PCI_DEVICE_ID_ATI_215XL 0x4752 +#define PCI_DEVICE_ID_ATI_215GT 0x4754 +#define PCI_DEVICE_ID_ATI_215GTB 0x4755 +#define PCI_DEVICE_ID_ATI_215_IV 0x4756 +#define PCI_DEVICE_ID_ATI_215_IW 0x4757 +#define PCI_DEVICE_ID_ATI_215_IZ 0x475A +#define PCI_DEVICE_ID_ATI_210888GX 0x4758 +#define PCI_DEVICE_ID_ATI_215_LB 0x4c42 +#define PCI_DEVICE_ID_ATI_215_LD 0x4c44 +#define PCI_DEVICE_ID_ATI_215_LG 0x4c47 +#define PCI_DEVICE_ID_ATI_215_LI 0x4c49 +#define PCI_DEVICE_ID_ATI_215_LM 0x4c4D +#define PCI_DEVICE_ID_ATI_215_LN 0x4c4E +#define PCI_DEVICE_ID_ATI_215_LR 0x4c52 +#define PCI_DEVICE_ID_ATI_215_LS 0x4c53 +#define PCI_DEVICE_ID_ATI_264_LT 0x4c54 +/* Mach64 VT */ +#define PCI_DEVICE_ID_ATI_264VT 0x5654 +#define PCI_DEVICE_ID_ATI_264VU 0x5655 +#define PCI_DEVICE_ID_ATI_264VV 0x5656 +/* Rage128 GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 +#define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 +#define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247 +/* Rage128 VR */ +#define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b +#define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c +#define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345 +#define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346 +#define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347 +#define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348 +#define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b +#define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c +#define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d +#define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e +/* Rage128 Ultra */ +#define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446 +#define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c +#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452 +#define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453 +#define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454 +#define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455 +/* Rage128 M3 */ +#define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 +#define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 +/* Rage128 M4 */ +#define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46 +#define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c +/* Rage128 Pro GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041 +#define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042 +#define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043 +#define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044 +#define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045 +#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 +/* Rage128 Pro VR */ +#define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047 +#define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048 +#define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049 +#define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A +#define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B +#define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C +#define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D +#define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E +#define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F +#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050 +#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051 +#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 +#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053 +#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054 +#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055 +#define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056 +#define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057 +#define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058 +/* Rage128 M4 */ +/* Radeon R100 */ +#define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144 +#define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145 +#define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146 +#define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147 +/* Radeon RV100 (VE) */ +#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 +#define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a +/* Radeon R200 (8500) */ +#define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c +#define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e +#define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f +#define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c +#define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242 +/* Radeon R200 (9100) */ +#define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d +/* Radeon RV200 (7500) */ +#define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157 +#define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158 +/* Radeon NV-100 */ +/* Radeon RV250 (9000) */ +#define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964 +#define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965 +#define PCI_DEVICE_ID_ATI_RADEON_If 0x4966 +#define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967 +/* Radeon RV280 (9200) */ +#define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961 +#define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964 +/* Radeon R300 (9500) */ +/* Radeon R300 (9700) */ +#define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44 +#define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45 +#define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46 +#define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47 +/* Radeon R350 (9800) */ +/* Radeon RV350 (9600) */ +/* Radeon M6 */ +#define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59 +#define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a +/* Radeon M7 */ +#define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57 +#define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58 +/* Radeon M9 */ +#define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64 +#define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65 +#define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66 +#define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67 +/* Radeon */ +/* RadeonIGP */ +#define PCI_DEVICE_ID_ATI_RS100 0xcab0 +#define PCI_DEVICE_ID_ATI_RS200 0xcab2 +#define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2 +#define PCI_DEVICE_ID_ATI_RS250 0xcab3 +#define PCI_DEVICE_ID_ATI_RS300_100 0x5830 +#define PCI_DEVICE_ID_ATI_RS300_133 0x5831 +#define PCI_DEVICE_ID_ATI_RS300_166 0x5832 +#define PCI_DEVICE_ID_ATI_RS300_200 0x5833 +#define PCI_DEVICE_ID_ATI_RS350_100 0x7830 +#define PCI_DEVICE_ID_ATI_RS350_133 0x7831 +#define PCI_DEVICE_ID_ATI_RS350_166 0x7832 +#define PCI_DEVICE_ID_ATI_RS350_200 0x7833 +#define PCI_DEVICE_ID_ATI_RS400_100 0x5a30 +#define PCI_DEVICE_ID_ATI_RS400_133 0x5a31 +#define PCI_DEVICE_ID_ATI_RS400_166 0x5a32 +#define PCI_DEVICE_ID_ATI_RS400_200 0x5a33 +#define PCI_DEVICE_ID_ATI_RS480 0x5950 +/* ATI IXP Chipset */ +#define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349 +#define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353 +#define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363 +#define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369 +#define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e +#define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372 +#define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 +#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 +#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a +#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 +#define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385 +#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c +#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 +#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c + +#define PCI_VENDOR_ID_VLSI 0x1004 +#define PCI_DEVICE_ID_VLSI_82C592 0x0005 +#define PCI_DEVICE_ID_VLSI_82C593 0x0006 +#define PCI_DEVICE_ID_VLSI_82C594 0x0007 +#define PCI_DEVICE_ID_VLSI_82C597 0x0009 +#define PCI_DEVICE_ID_VLSI_82C541 0x000c +#define PCI_DEVICE_ID_VLSI_82C543 0x000d +#define PCI_DEVICE_ID_VLSI_82C532 0x0101 +#define PCI_DEVICE_ID_VLSI_82C534 0x0102 +#define PCI_DEVICE_ID_VLSI_82C535 0x0104 +#define PCI_DEVICE_ID_VLSI_82C147 0x0105 +#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 + +/* AMD RD890 Chipset */ +#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 + +#define PCI_VENDOR_ID_ADL 0x1005 +#define PCI_DEVICE_ID_ADL_2301 0x2301 + +#define PCI_VENDOR_ID_NS 0x100b +#define PCI_DEVICE_ID_NS_87415 0x0002 +#define PCI_DEVICE_ID_NS_87560_LIO 0x000e +#define PCI_DEVICE_ID_NS_87560_USB 0x0012 +#define PCI_DEVICE_ID_NS_83815 0x0020 +#define PCI_DEVICE_ID_NS_83820 0x0022 +#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b +#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d +#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e +#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f +#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030 +#define PCI_DEVICE_ID_NS_SATURN 0x0035 +#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 +#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 +#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 +#define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503 +#define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504 +#define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505 +#define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510 +#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511 +#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 +#define PCI_DEVICE_ID_NS_87410 0xd001 + +#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028 + +#define PCI_VENDOR_ID_TSENG 0x100c +#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 +#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 +#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 +#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 +#define PCI_DEVICE_ID_TSENG_ET6000 0x3208 + +#define PCI_VENDOR_ID_WEITEK 0x100e +#define PCI_DEVICE_ID_WEITEK_P9000 0x9001 +#define PCI_DEVICE_ID_WEITEK_P9100 0x9100 + +#define PCI_VENDOR_ID_DEC 0x1011 +#define PCI_DEVICE_ID_DEC_BRD 0x0001 +#define PCI_DEVICE_ID_DEC_TULIP 0x0002 +#define PCI_DEVICE_ID_DEC_TGA 0x0004 +#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 +#define PCI_DEVICE_ID_DEC_TGA2 0x000D +#define PCI_DEVICE_ID_DEC_FDDI 0x000F +#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 +#define PCI_DEVICE_ID_DEC_21142 0x0019 +#define PCI_DEVICE_ID_DEC_21052 0x0021 +#define PCI_DEVICE_ID_DEC_21150 0x0022 +#define PCI_DEVICE_ID_DEC_21152 0x0024 +#define PCI_DEVICE_ID_DEC_21153 0x0025 +#define PCI_DEVICE_ID_DEC_21154 0x0026 +#define PCI_DEVICE_ID_DEC_21285 0x1065 +#define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 + +#define PCI_VENDOR_ID_CIRRUS 0x1013 +#define PCI_DEVICE_ID_CIRRUS_7548 0x0038 +#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 +#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 +#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 +#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac +#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 +#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc +#define PCI_DEVICE_ID_CIRRUS_5462 0x00d0 +#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 +#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 +#define PCI_DEVICE_ID_CIRRUS_6729 0x1100 +#define PCI_DEVICE_ID_CIRRUS_6832 0x1110 +#define PCI_DEVICE_ID_CIRRUS_7543 0x1202 +#define PCI_DEVICE_ID_CIRRUS_4610 0x6001 +#define PCI_DEVICE_ID_CIRRUS_4612 0x6003 +#define PCI_DEVICE_ID_CIRRUS_4615 0x6004 + +#define PCI_VENDOR_ID_IBM 0x1014 +#define PCI_DEVICE_ID_IBM_TR 0x0018 +#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e +#define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc +#define PCI_DEVICE_ID_IBM_SNIPE 0x0180 +#define PCI_DEVICE_ID_IBM_CITRINE 0x028C +#define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166 +#define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD +#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031 +#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 +#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A +#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 +#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 +#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 + +#define PCI_SUBVENDOR_ID_IBM 0x1014 +#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4 + +#define PCI_VENDOR_ID_UNISYS 0x1018 +#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C + +#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ +#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 + +#define PCI_VENDOR_ID_WD 0x101c +#define PCI_DEVICE_ID_WD_90C 0xc24a + +#define PCI_VENDOR_ID_AMI 0x101e +#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 +#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 +#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 + +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_DEVICE_ID_AMD_K8_NB 0x1100 +#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 +#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 +#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 +#define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200 +#define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201 +#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202 +#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 +#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204 +#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300 +#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301 +#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 +#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 +#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 +#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 +#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 +#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 +#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 +#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 +#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 +#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 +#define PCI_DEVICE_ID_AMD_LANCE 0x2000 +#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 +#define PCI_DEVICE_ID_AMD_SCSI 0x2020 +#define PCI_DEVICE_ID_AMD_SERENADE 0x36c0 +#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006 +#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007 +#define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C +#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E +#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401 +#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409 +#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B +#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410 +#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411 +#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413 +#define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440 +#define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441 +#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 +#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 +#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 +#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 +#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 +#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 +#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a +#define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b +#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d +#define PCI_DEVICE_ID_AMD_8151_0 0x7454 +#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 +#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 +#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b +#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F +#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 +#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 +#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 +#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094 +#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095 +#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 +#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 +#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A +#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 +#define PCI_DEVICE_ID_AMD_LX_AES 0x2082 +#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c +#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 + +#define PCI_VENDOR_ID_TRIDENT 0x1023 +#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 +#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 +#define PCI_DEVICE_ID_TRIDENT_9320 0x9320 +#define PCI_DEVICE_ID_TRIDENT_9388 0x9388 +#define PCI_DEVICE_ID_TRIDENT_9397 0x9397 +#define PCI_DEVICE_ID_TRIDENT_939A 0x939A +#define PCI_DEVICE_ID_TRIDENT_9520 0x9520 +#define PCI_DEVICE_ID_TRIDENT_9525 0x9525 +#define PCI_DEVICE_ID_TRIDENT_9420 0x9420 +#define PCI_DEVICE_ID_TRIDENT_9440 0x9440 +#define PCI_DEVICE_ID_TRIDENT_9660 0x9660 +#define PCI_DEVICE_ID_TRIDENT_9750 0x9750 +#define PCI_DEVICE_ID_TRIDENT_9850 0x9850 +#define PCI_DEVICE_ID_TRIDENT_9880 0x9880 +#define PCI_DEVICE_ID_TRIDENT_8400 0x8400 +#define PCI_DEVICE_ID_TRIDENT_8420 0x8420 +#define PCI_DEVICE_ID_TRIDENT_8500 0x8500 + +#define PCI_VENDOR_ID_AI 0x1025 +#define PCI_DEVICE_ID_AI_M1435 0x1435 + +#define PCI_VENDOR_ID_DELL 0x1028 +#define PCI_DEVICE_ID_DELL_RACIII 0x0008 +#define PCI_DEVICE_ID_DELL_RAC4 0x0012 +#define PCI_DEVICE_ID_DELL_PERC5 0x0015 + +#define PCI_VENDOR_ID_MATROX 0x102B +#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 +#define PCI_DEVICE_ID_MATROX_MIL 0x0519 +#define PCI_DEVICE_ID_MATROX_MYS 0x051A +#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b +#define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e +#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f +#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 +#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 +#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 +#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 +#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 +#define PCI_DEVICE_ID_MATROX_G400 0x0525 +#define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530 +#define PCI_DEVICE_ID_MATROX_G550 0x2527 +#define PCI_DEVICE_ID_MATROX_VIA 0x4536 + +#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2 + +#define PCI_VENDOR_ID_CT 0x102c +#define PCI_DEVICE_ID_CT_69000 0x00c0 +#define PCI_DEVICE_ID_CT_65545 0x00d8 +#define PCI_DEVICE_ID_CT_65548 0x00dc +#define PCI_DEVICE_ID_CT_65550 0x00e0 +#define PCI_DEVICE_ID_CT_65554 0x00e4 +#define PCI_DEVICE_ID_CT_65555 0x00e5 + +#define PCI_VENDOR_ID_MIRO 0x1031 +#define PCI_DEVICE_ID_MIRO_36050 0x5601 +#define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe +#define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801 + +#define PCI_VENDOR_ID_NEC 0x1033 +#define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */ +#define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */ +#define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */ +#define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */ +#define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */ +#define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */ +#define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */ +#define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */ +#define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */ +#define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */ +#define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */ +#define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */ +#define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */ +#define PCI_DEVICE_ID_NEC_CBUS_3 0x003b +#define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e +#define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */ +#define PCI_DEVICE_ID_NEC_VRC5476 0x009b +#define PCI_DEVICE_ID_NEC_VRC4173 0x00a5 +#define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6 +#define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */ +#define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */ + +#define PCI_VENDOR_ID_FD 0x1036 +#define PCI_DEVICE_ID_FD_36C70 0x0000 + +#define PCI_VENDOR_ID_SI 0x1039 +#define PCI_DEVICE_ID_SI_5591_AGP 0x0001 +#define PCI_DEVICE_ID_SI_6202 0x0002 +#define PCI_DEVICE_ID_SI_503 0x0008 +#define PCI_DEVICE_ID_SI_ACPI 0x0009 +#define PCI_DEVICE_ID_SI_SMBUS 0x0016 +#define PCI_DEVICE_ID_SI_LPC 0x0018 +#define PCI_DEVICE_ID_SI_5597_VGA 0x0200 +#define PCI_DEVICE_ID_SI_6205 0x0205 +#define PCI_DEVICE_ID_SI_501 0x0406 +#define PCI_DEVICE_ID_SI_496 0x0496 +#define PCI_DEVICE_ID_SI_300 0x0300 +#define PCI_DEVICE_ID_SI_315H 0x0310 +#define PCI_DEVICE_ID_SI_315 0x0315 +#define PCI_DEVICE_ID_SI_315PRO 0x0325 +#define PCI_DEVICE_ID_SI_530 0x0530 +#define PCI_DEVICE_ID_SI_540 0x0540 +#define PCI_DEVICE_ID_SI_550 0x0550 +#define PCI_DEVICE_ID_SI_540_VGA 0x5300 +#define PCI_DEVICE_ID_SI_550_VGA 0x5315 +#define PCI_DEVICE_ID_SI_620 0x0620 +#define PCI_DEVICE_ID_SI_630 0x0630 +#define PCI_DEVICE_ID_SI_633 0x0633 +#define PCI_DEVICE_ID_SI_635 0x0635 +#define PCI_DEVICE_ID_SI_640 0x0640 +#define PCI_DEVICE_ID_SI_645 0x0645 +#define PCI_DEVICE_ID_SI_646 0x0646 +#define PCI_DEVICE_ID_SI_648 0x0648 +#define PCI_DEVICE_ID_SI_650 0x0650 +#define PCI_DEVICE_ID_SI_651 0x0651 +#define PCI_DEVICE_ID_SI_655 0x0655 +#define PCI_DEVICE_ID_SI_661 0x0661 +#define PCI_DEVICE_ID_SI_730 0x0730 +#define PCI_DEVICE_ID_SI_733 0x0733 +#define PCI_DEVICE_ID_SI_630_VGA 0x6300 +#define PCI_DEVICE_ID_SI_735 0x0735 +#define PCI_DEVICE_ID_SI_740 0x0740 +#define PCI_DEVICE_ID_SI_741 0x0741 +#define PCI_DEVICE_ID_SI_745 0x0745 +#define PCI_DEVICE_ID_SI_746 0x0746 +#define PCI_DEVICE_ID_SI_755 0x0755 +#define PCI_DEVICE_ID_SI_760 0x0760 +#define PCI_DEVICE_ID_SI_900 0x0900 +#define PCI_DEVICE_ID_SI_961 0x0961 +#define PCI_DEVICE_ID_SI_962 0x0962 +#define PCI_DEVICE_ID_SI_963 0x0963 +#define PCI_DEVICE_ID_SI_965 0x0965 +#define PCI_DEVICE_ID_SI_966 0x0966 +#define PCI_DEVICE_ID_SI_968 0x0968 +#define PCI_DEVICE_ID_SI_1180 0x1180 +#define PCI_DEVICE_ID_SI_5511 0x5511 +#define PCI_DEVICE_ID_SI_5513 0x5513 +#define PCI_DEVICE_ID_SI_5517 0x5517 +#define PCI_DEVICE_ID_SI_5518 0x5518 +#define PCI_DEVICE_ID_SI_5571 0x5571 +#define PCI_DEVICE_ID_SI_5581 0x5581 +#define PCI_DEVICE_ID_SI_5582 0x5582 +#define PCI_DEVICE_ID_SI_5591 0x5591 +#define PCI_DEVICE_ID_SI_5596 0x5596 +#define PCI_DEVICE_ID_SI_5597 0x5597 +#define PCI_DEVICE_ID_SI_5598 0x5598 +#define PCI_DEVICE_ID_SI_5600 0x5600 +#define PCI_DEVICE_ID_SI_7012 0x7012 +#define PCI_DEVICE_ID_SI_7013 0x7013 +#define PCI_DEVICE_ID_SI_7016 0x7016 +#define PCI_DEVICE_ID_SI_7018 0x7018 + +#define PCI_VENDOR_ID_HP 0x103c +#define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a +#define PCI_DEVICE_ID_HP_TACHYON 0x1028 +#define PCI_DEVICE_ID_HP_TACHLITE 0x1029 +#define PCI_DEVICE_ID_HP_J2585A 0x1030 +#define PCI_DEVICE_ID_HP_J2585B 0x1031 +#define PCI_DEVICE_ID_HP_J2973A 0x1040 +#define PCI_DEVICE_ID_HP_J2970A 0x1042 +#define PCI_DEVICE_ID_HP_DIVA 0x1048 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A +#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B +#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 +#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b +#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 +#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 +#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227 +#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a +#define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e +#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c +#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 +#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 +#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 +#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a +#define PCI_DEVICE_ID_HP_CISSA 0x3220 +#define PCI_DEVICE_ID_HP_CISSC 0x3230 +#define PCI_DEVICE_ID_HP_CISSD 0x3238 +#define PCI_DEVICE_ID_HP_CISSE 0x323a +#define PCI_DEVICE_ID_HP_CISSF 0x323b +#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 + +#define PCI_VENDOR_ID_PCTECH 0x1042 +#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 +#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 +#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 + +#define PCI_VENDOR_ID_ASUSTEK 0x1043 +#define PCI_DEVICE_ID_ASUSTEK_0675 0x0675 + +#define PCI_VENDOR_ID_DPT 0x1044 +#define PCI_DEVICE_ID_DPT 0xa400 + +#define PCI_VENDOR_ID_OPTI 0x1045 +#define PCI_DEVICE_ID_OPTI_82C558 0xc558 +#define PCI_DEVICE_ID_OPTI_82C621 0xc621 +#define PCI_DEVICE_ID_OPTI_82C700 0xc700 +#define PCI_DEVICE_ID_OPTI_82C825 0xd568 + +#define PCI_VENDOR_ID_ELSA 0x1048 +#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000 +#define PCI_DEVICE_ID_ELSA_QS3000 0x3000 + +#define PCI_VENDOR_ID_STMICRO 0x104A +#define PCI_DEVICE_ID_STMICRO_USB_HOST 0xCC00 +#define PCI_DEVICE_ID_STMICRO_USB_OHCI 0xCC01 +#define PCI_DEVICE_ID_STMICRO_USB_OTG 0xCC02 +#define PCI_DEVICE_ID_STMICRO_UART_HWFC 0xCC03 +#define PCI_DEVICE_ID_STMICRO_UART_NO_HWFC 0xCC04 +#define PCI_DEVICE_ID_STMICRO_SOC_DMA 0xCC05 +#define PCI_DEVICE_ID_STMICRO_SATA 0xCC06 +#define PCI_DEVICE_ID_STMICRO_I2C 0xCC07 +#define PCI_DEVICE_ID_STMICRO_SPI_HS 0xCC08 +#define PCI_DEVICE_ID_STMICRO_MAC 0xCC09 +#define PCI_DEVICE_ID_STMICRO_SDIO_EMMC 0xCC0A +#define PCI_DEVICE_ID_STMICRO_SDIO 0xCC0B +#define PCI_DEVICE_ID_STMICRO_GPIO 0xCC0C +#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D +#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_DMA 0xCC0E +#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_SRCS 0xCC0F +#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_MSPS 0xCC10 +#define PCI_DEVICE_ID_STMICRO_CAN 0xCC11 +#define PCI_DEVICE_ID_STMICRO_MLB 0xCC12 +#define PCI_DEVICE_ID_STMICRO_DBP 0xCC13 +#define PCI_DEVICE_ID_STMICRO_SATA_PHY 0xCC14 +#define PCI_DEVICE_ID_STMICRO_ESRAM 0xCC15 +#define PCI_DEVICE_ID_STMICRO_VIC 0xCC16 + +#define PCI_VENDOR_ID_BUSLOGIC 0x104B +#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 +#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 +#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 + +#define PCI_VENDOR_ID_TI 0x104c +#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 +#define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 +#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 +#define PCI_DEVICE_ID_TI_X515 0x8036 +#define PCI_DEVICE_ID_TI_XX12 0x8039 +#define PCI_DEVICE_ID_TI_XX12_FM 0x803b +#define PCI_DEVICE_ID_TI_XIO2000A 0x8231 +#define PCI_DEVICE_ID_TI_1130 0xac12 +#define PCI_DEVICE_ID_TI_1031 0xac13 +#define PCI_DEVICE_ID_TI_1131 0xac15 +#define PCI_DEVICE_ID_TI_1250 0xac16 +#define PCI_DEVICE_ID_TI_1220 0xac17 +#define PCI_DEVICE_ID_TI_1221 0xac19 +#define PCI_DEVICE_ID_TI_1210 0xac1a +#define PCI_DEVICE_ID_TI_1450 0xac1b +#define PCI_DEVICE_ID_TI_1225 0xac1c +#define PCI_DEVICE_ID_TI_1251A 0xac1d +#define PCI_DEVICE_ID_TI_1211 0xac1e +#define PCI_DEVICE_ID_TI_1251B 0xac1f +#define PCI_DEVICE_ID_TI_4410 0xac41 +#define PCI_DEVICE_ID_TI_4451 0xac42 +#define PCI_DEVICE_ID_TI_4510 0xac44 +#define PCI_DEVICE_ID_TI_4520 0xac46 +#define PCI_DEVICE_ID_TI_7510 0xac47 +#define PCI_DEVICE_ID_TI_7610 0xac48 +#define PCI_DEVICE_ID_TI_7410 0xac49 +#define PCI_DEVICE_ID_TI_1410 0xac50 +#define PCI_DEVICE_ID_TI_1420 0xac51 +#define PCI_DEVICE_ID_TI_1451A 0xac52 +#define PCI_DEVICE_ID_TI_1620 0xac54 +#define PCI_DEVICE_ID_TI_1520 0xac55 +#define PCI_DEVICE_ID_TI_1510 0xac56 +#define PCI_DEVICE_ID_TI_X620 0xac8d +#define PCI_DEVICE_ID_TI_X420 0xac8e +#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f + +#define PCI_VENDOR_ID_SONY 0x104d + +/* Winbond have two vendor IDs! See 0x10ad as well */ +#define PCI_VENDOR_ID_WINBOND2 0x1050 +#define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a +#define PCI_DEVICE_ID_WINBOND2_6692 0x6692 + +#define PCI_VENDOR_ID_ANIGMA 0x1051 +#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 + +#define PCI_VENDOR_ID_EFAR 0x1055 +#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 +#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 + +#define PCI_VENDOR_ID_MOTOROLA 0x1057 +#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 +#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 +#define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004 +#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 +#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 +#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803 +#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b +#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803 +#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809 + +#define PCI_VENDOR_ID_PROMISE 0x105a +#define PCI_DEVICE_ID_PROMISE_20265 0x0d30 +#define PCI_DEVICE_ID_PROMISE_20267 0x4d30 +#define PCI_DEVICE_ID_PROMISE_20246 0x4d33 +#define PCI_DEVICE_ID_PROMISE_20262 0x4d38 +#define PCI_DEVICE_ID_PROMISE_20263 0x0D38 +#define PCI_DEVICE_ID_PROMISE_20268 0x4d68 +#define PCI_DEVICE_ID_PROMISE_20269 0x4d69 +#define PCI_DEVICE_ID_PROMISE_20270 0x6268 +#define PCI_DEVICE_ID_PROMISE_20271 0x6269 +#define PCI_DEVICE_ID_PROMISE_20275 0x1275 +#define PCI_DEVICE_ID_PROMISE_20276 0x5275 +#define PCI_DEVICE_ID_PROMISE_20277 0x7275 + +#define PCI_VENDOR_ID_FOXCONN 0x105b + +#define PCI_VENDOR_ID_UMC 0x1060 +#define PCI_DEVICE_ID_UMC_UM8673F 0x0101 +#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a +#define PCI_DEVICE_ID_UMC_UM8886A 0x886a + +#define PCI_VENDOR_ID_PICOPOWER 0x1066 +#define PCI_DEVICE_ID_PICOPOWER_PT86C523 0x0002 +#define PCI_DEVICE_ID_PICOPOWER_PT86C523BBP 0x8002 + +#define PCI_VENDOR_ID_MYLEX 0x1069 +#define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 +#define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 +#define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 +#define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 +#define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 +#define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 +#define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166 + +#define PCI_VENDOR_ID_APPLE 0x106b +#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 +#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e +#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 +#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 +#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d +#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e +#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 +#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 +#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b +#define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043 +#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b +#define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c +#define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050 +#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 +#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 +#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 +#define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b +#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066 +#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069 +#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a +#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b +#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 + +#define PCI_VENDOR_ID_YAMAHA 0x1073 +#define PCI_DEVICE_ID_YAMAHA_724 0x0004 +#define PCI_DEVICE_ID_YAMAHA_724F 0x000d +#define PCI_DEVICE_ID_YAMAHA_740 0x000a +#define PCI_DEVICE_ID_YAMAHA_740C 0x000c +#define PCI_DEVICE_ID_YAMAHA_744 0x0010 +#define PCI_DEVICE_ID_YAMAHA_754 0x0012 + +#define PCI_VENDOR_ID_QLOGIC 0x1077 +#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 +#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 +#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 +#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 +#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 +#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 +#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 +#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 +#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 +#define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312 +#define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322 +#define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312 +#define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322 +#define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422 +#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 +#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 +#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 +#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422 +#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432 + +#define PCI_VENDOR_ID_CYRIX 0x1078 +#define PCI_DEVICE_ID_CYRIX_5510 0x0000 +#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 +#define PCI_DEVICE_ID_CYRIX_5520 0x0002 +#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 +#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 +#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 +#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 + +#define PCI_VENDOR_ID_CONTAQ 0x1080 +#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 + +#define PCI_VENDOR_ID_OLICOM 0x108d +#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 +#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 +#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 + +#define PCI_VENDOR_ID_SUN 0x108e +#define PCI_DEVICE_ID_SUN_EBUS 0x1000 +#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 +#define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100 +#define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101 +#define PCI_DEVICE_ID_SUN_RIO_1394 0x1102 +#define PCI_DEVICE_ID_SUN_RIO_USB 0x1103 +#define PCI_DEVICE_ID_SUN_GEM 0x2bad +#define PCI_DEVICE_ID_SUN_SIMBA 0x5000 +#define PCI_DEVICE_ID_SUN_PBM 0x8000 +#define PCI_DEVICE_ID_SUN_SCHIZO 0x8001 +#define PCI_DEVICE_ID_SUN_SABRE 0xa000 +#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 +#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 +#define PCI_DEVICE_ID_SUN_CASSINI 0xabba + +#define PCI_VENDOR_ID_NI 0x1093 +#define PCI_DEVICE_ID_NI_PCI2322 0xd130 +#define PCI_DEVICE_ID_NI_PCI2324 0xd140 +#define PCI_DEVICE_ID_NI_PCI2328 0xd150 +#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 +#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 +#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 +#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 +#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 +#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 +#define PCI_DEVICE_ID_NI_PCI2322I 0xd250 +#define PCI_DEVICE_ID_NI_PCI2324I 0xd270 +#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 +#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 +#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db +#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd +#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df +#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 +#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 +#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 +#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 +#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 +#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea +#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec +#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee + +#define PCI_VENDOR_ID_CMD 0x1095 +#define PCI_DEVICE_ID_CMD_643 0x0643 +#define PCI_DEVICE_ID_CMD_646 0x0646 +#define PCI_DEVICE_ID_CMD_648 0x0648 +#define PCI_DEVICE_ID_CMD_649 0x0649 + +#define PCI_DEVICE_ID_SII_680 0x0680 +#define PCI_DEVICE_ID_SII_3112 0x3112 +#define PCI_DEVICE_ID_SII_1210SA 0x0240 + +#define PCI_VENDOR_ID_BROOKTREE 0x109e +#define PCI_DEVICE_ID_BROOKTREE_878 0x0878 +#define PCI_DEVICE_ID_BROOKTREE_879 0x0879 + +#define PCI_VENDOR_ID_SGI 0x10a9 +#define PCI_DEVICE_ID_SGI_IOC3 0x0003 +#define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 +#define PCI_DEVICE_ID_SGI_IOC4 0x100a + +#define PCI_VENDOR_ID_WINBOND 0x10ad +#define PCI_DEVICE_ID_WINBOND_82C105 0x0105 +#define PCI_DEVICE_ID_WINBOND_83C553 0x0565 + +#define PCI_VENDOR_ID_PLX 0x10b5 +#define PCI_DEVICE_ID_PLX_R685 0x1030 +#define PCI_DEVICE_ID_PLX_ROMULUS 0x106a +#define PCI_DEVICE_ID_PLX_SPCOM800 0x1076 +#define PCI_DEVICE_ID_PLX_1077 0x1077 +#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103 +#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 +#define PCI_DEVICE_ID_PLX_R753 0x1152 +#define PCI_DEVICE_ID_PLX_OLITEC 0x1187 +#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 +#define PCI_DEVICE_ID_PLX_9030 0x9030 +#define PCI_DEVICE_ID_PLX_9050 0x9050 +#define PCI_DEVICE_ID_PLX_9056 0x9056 +#define PCI_DEVICE_ID_PLX_9080 0x9080 +#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 + +#define PCI_VENDOR_ID_MADGE 0x10b6 +#define PCI_DEVICE_ID_MADGE_MK2 0x0002 + +#define PCI_VENDOR_ID_3COM 0x10b7 +#define PCI_DEVICE_ID_3COM_3C985 0x0001 +#define PCI_DEVICE_ID_3COM_3C940 0x1700 +#define PCI_DEVICE_ID_3COM_3C339 0x3390 +#define PCI_DEVICE_ID_3COM_3C359 0x3590 +#define PCI_DEVICE_ID_3COM_3C940B 0x80eb +#define PCI_DEVICE_ID_3COM_3CR990 0x9900 +#define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902 +#define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903 +#define PCI_DEVICE_ID_3COM_3CR990B 0x9904 +#define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905 +#define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908 +#define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909 +#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a + +#define PCI_VENDOR_ID_AL 0x10b9 +#define PCI_DEVICE_ID_AL_M1533 0x1533 +#define PCI_DEVICE_ID_AL_M1535 0x1535 +#define PCI_DEVICE_ID_AL_M1541 0x1541 +#define PCI_DEVICE_ID_AL_M1563 0x1563 +#define PCI_DEVICE_ID_AL_M1621 0x1621 +#define PCI_DEVICE_ID_AL_M1631 0x1631 +#define PCI_DEVICE_ID_AL_M1632 0x1632 +#define PCI_DEVICE_ID_AL_M1641 0x1641 +#define PCI_DEVICE_ID_AL_M1644 0x1644 +#define PCI_DEVICE_ID_AL_M1647 0x1647 +#define PCI_DEVICE_ID_AL_M1651 0x1651 +#define PCI_DEVICE_ID_AL_M1671 0x1671 +#define PCI_DEVICE_ID_AL_M1681 0x1681 +#define PCI_DEVICE_ID_AL_M1683 0x1683 +#define PCI_DEVICE_ID_AL_M1689 0x1689 +#define PCI_DEVICE_ID_AL_M5219 0x5219 +#define PCI_DEVICE_ID_AL_M5228 0x5228 +#define PCI_DEVICE_ID_AL_M5229 0x5229 +#define PCI_DEVICE_ID_AL_M5451 0x5451 +#define PCI_DEVICE_ID_AL_M7101 0x7101 + +#define PCI_VENDOR_ID_NEOMAGIC 0x10c8 +#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 +#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006 +#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016 + +#define PCI_VENDOR_ID_TCONRAD 0x10da +#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 + +#define PCI_VENDOR_ID_NVIDIA 0x10de +#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 +#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 +#define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029 +#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a +#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C +#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E +#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 +#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 +#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d +#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 +#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 +#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 +#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 +#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099 +#define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 +#define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1 +#define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2 +#define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8 +#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9 +#define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc +#define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce +#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 +#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 +#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 +#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 +#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 +#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112 +#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152 +#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B +#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0 +#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 +#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc +#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E +#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360 +#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364 +#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 + +#define PCI_VENDOR_ID_IMS 0x10e0 +#define PCI_DEVICE_ID_IMS_TT128 0x9128 +#define PCI_DEVICE_ID_IMS_TT3D 0x9135 + +#define PCI_VENDOR_ID_INTERG 0x10ea +#define PCI_DEVICE_ID_INTERG_1682 0x1682 +#define PCI_DEVICE_ID_INTERG_2000 0x2000 +#define PCI_DEVICE_ID_INTERG_2010 0x2010 +#define PCI_DEVICE_ID_INTERG_5000 0x5000 +#define PCI_DEVICE_ID_INTERG_5050 0x5050 + +#define PCI_VENDOR_ID_REALTEK 0x10ec +#define PCI_DEVICE_ID_REALTEK_8139 0x8139 + +#define PCI_VENDOR_ID_XILINX 0x10ee +#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 +#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 +#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 +#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 + +#define PCI_VENDOR_ID_INIT 0x1101 + +#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ +#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 +#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 +#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b +#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 +#define PCI_SUBDEVICE_ID_CREATIVE_SB1270 0x0062 +#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 + +#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ +#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 + +#define PCI_VENDOR_ID_TTI 0x1103 +#define PCI_DEVICE_ID_TTI_HPT343 0x0003 +#define PCI_DEVICE_ID_TTI_HPT366 0x0004 +#define PCI_DEVICE_ID_TTI_HPT372 0x0005 +#define PCI_DEVICE_ID_TTI_HPT302 0x0006 +#define PCI_DEVICE_ID_TTI_HPT371 0x0007 +#define PCI_DEVICE_ID_TTI_HPT374 0x0008 +#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ + +#define PCI_VENDOR_ID_VIA 0x1106 +#define PCI_DEVICE_ID_VIA_8763_0 0x0198 +#define PCI_DEVICE_ID_VIA_8380_0 0x0204 +#define PCI_DEVICE_ID_VIA_3238_0 0x0238 +#define PCI_DEVICE_ID_VIA_PT880 0x0258 +#define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 +#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 +#define PCI_DEVICE_ID_VIA_3269_0 0x0269 +#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 +#define PCI_DEVICE_ID_VIA_3296_0 0x0296 +#define PCI_DEVICE_ID_VIA_8363_0 0x0305 +#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 +#define PCI_DEVICE_ID_VIA_P4M890 0x0327 +#define PCI_DEVICE_ID_VIA_VT3324 0x0324 +#define PCI_DEVICE_ID_VIA_VT3336 0x0336 +#define PCI_DEVICE_ID_VIA_VT3351 0x0351 +#define PCI_DEVICE_ID_VIA_VT3364 0x0364 +#define PCI_DEVICE_ID_VIA_8371_0 0x0391 +#define PCI_DEVICE_ID_VIA_6415 0x0415 +#define PCI_DEVICE_ID_VIA_8501_0 0x0501 +#define PCI_DEVICE_ID_VIA_82C561 0x0561 +#define PCI_DEVICE_ID_VIA_82C586_1 0x0571 +#define PCI_DEVICE_ID_VIA_82C576 0x0576 +#define PCI_DEVICE_ID_VIA_82C586_0 0x0586 +#define PCI_DEVICE_ID_VIA_82C596 0x0596 +#define PCI_DEVICE_ID_VIA_82C597_0 0x0597 +#define PCI_DEVICE_ID_VIA_82C598_0 0x0598 +#define PCI_DEVICE_ID_VIA_8601_0 0x0601 +#define PCI_DEVICE_ID_VIA_8605_0 0x0605 +#define PCI_DEVICE_ID_VIA_82C686 0x0686 +#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 +#define PCI_DEVICE_ID_VIA_82C576_1 0x1571 +#define PCI_DEVICE_ID_VIA_82C586_2 0x3038 +#define PCI_DEVICE_ID_VIA_82C586_3 0x3040 +#define PCI_DEVICE_ID_VIA_82C596_3 0x3050 +#define PCI_DEVICE_ID_VIA_82C596B_3 0x3051 +#define PCI_DEVICE_ID_VIA_82C686_4 0x3057 +#define PCI_DEVICE_ID_VIA_82C686_5 0x3058 +#define PCI_DEVICE_ID_VIA_8233_5 0x3059 +#define PCI_DEVICE_ID_VIA_8233_0 0x3074 +#define PCI_DEVICE_ID_VIA_8633_0 0x3091 +#define PCI_DEVICE_ID_VIA_8367_0 0x3099 +#define PCI_DEVICE_ID_VIA_8653_0 0x3101 +#define PCI_DEVICE_ID_VIA_8622 0x3102 +#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104 +#define PCI_DEVICE_ID_VIA_8233C_0 0x3109 +#define PCI_DEVICE_ID_VIA_8361 0x3112 +#define PCI_DEVICE_ID_VIA_XM266 0x3116 +#define PCI_DEVICE_ID_VIA_612X 0x3119 +#define PCI_DEVICE_ID_VIA_862X_0 0x3123 +#define PCI_DEVICE_ID_VIA_8753_0 0x3128 +#define PCI_DEVICE_ID_VIA_8233A 0x3147 +#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 +#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 +#define PCI_DEVICE_ID_VIA_XN266 0x3156 +#define PCI_DEVICE_ID_VIA_6410 0x3164 +#define PCI_DEVICE_ID_VIA_8754C_0 0x3168 +#define PCI_DEVICE_ID_VIA_8235 0x3177 +#define PCI_DEVICE_ID_VIA_8385_0 0x3188 +#define PCI_DEVICE_ID_VIA_8377_0 0x3189 +#define PCI_DEVICE_ID_VIA_8378_0 0x3205 +#define PCI_DEVICE_ID_VIA_8783_0 0x3208 +#define PCI_DEVICE_ID_VIA_8237 0x3227 +#define PCI_DEVICE_ID_VIA_8251 0x3287 +#define PCI_DEVICE_ID_VIA_8261 0x3402 +#define PCI_DEVICE_ID_VIA_8237A 0x3337 +#define PCI_DEVICE_ID_VIA_8237S 0x3372 +#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324 +#define PCI_DEVICE_ID_VIA_8231 0x8231 +#define PCI_DEVICE_ID_VIA_8231_4 0x8235 +#define PCI_DEVICE_ID_VIA_8365_1 0x8305 +#define PCI_DEVICE_ID_VIA_CX700 0x8324 +#define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581 +#define PCI_DEVICE_ID_VIA_VX800 0x8353 +#define PCI_DEVICE_ID_VIA_VX855 0x8409 +#define PCI_DEVICE_ID_VIA_8371_1 0x8391 +#define PCI_DEVICE_ID_VIA_82C598_1 0x8598 +#define PCI_DEVICE_ID_VIA_838X_1 0xB188 +#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 +#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 +#define PCI_DEVICE_ID_VIA_ANON 0xFFFF + +#define PCI_VENDOR_ID_SIEMENS 0x110A +#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102 + +#define PCI_VENDOR_ID_VORTEX 0x1119 +#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 +#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 +#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 +#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 +#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 +#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 +#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 +#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 +#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 +#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 +#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a +#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b +#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c +#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d +#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 +#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 +#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 +#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 +#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 +#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 + +#define PCI_VENDOR_ID_EF 0x111a +#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 +#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 +#define PCI_DEVICE_ID_EF_ATM_LANAI2 0x0003 +#define PCI_DEVICE_ID_EF_ATM_LANAIHB 0x0005 + +#define PCI_VENDOR_ID_IDT 0x111d +#define PCI_DEVICE_ID_IDT_IDT77201 0x0001 + +#define PCI_VENDOR_ID_FORE 0x1127 +#define PCI_DEVICE_ID_FORE_PCA200E 0x0300 + +#define PCI_VENDOR_ID_PHILIPS 0x1131 +#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 +#define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730 + +#define PCI_VENDOR_ID_EICON 0x1133 +#define PCI_DEVICE_ID_EICON_DIVA20 0xe002 +#define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004 +#define PCI_DEVICE_ID_EICON_DIVA201 0xe005 +#define PCI_DEVICE_ID_EICON_DIVA202 0xe00b +#define PCI_DEVICE_ID_EICON_MAESTRA 0xe010 +#define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012 +#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 +#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 + +#define PCI_VENDOR_ID_CISCO 0x1137 + +#define PCI_VENDOR_ID_ZIATECH 0x1138 +#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 + + +#define PCI_VENDOR_ID_SYSKONNECT 0x1148 +#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 +#define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 +#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320 +#define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400 +#define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500 + +#define PCI_VENDOR_ID_DIGI 0x114f +#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070 +#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071 +#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072 +#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073 +#define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1 +#define PCI_DEVICE_ID_NEO_2DB9 0x00C8 +#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9 +#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA +#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB +#define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4 + +#define PCI_VENDOR_ID_XIRCOM 0x115d +#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101 +#define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103 + +#define PCI_VENDOR_ID_SERVERWORKS 0x1166 +#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 +#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 +#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB 0x0036 +#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 +#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 +#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 +#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205 +#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 +#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 +#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 + +#define PCI_VENDOR_ID_SBE 0x1176 +#define PCI_DEVICE_ID_SBE_WANXL100 0x0301 +#define PCI_DEVICE_ID_SBE_WANXL200 0x0302 +#define PCI_DEVICE_ID_SBE_WANXL400 0x0104 +#define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901 +#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902 + +#define PCI_VENDOR_ID_TOSHIBA 0x1179 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0102 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_3 0x0103 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_5 0x0105 +#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a +#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f +#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617 + +#define PCI_VENDOR_ID_TOSHIBA_2 0x102f +#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 +#define PCI_DEVICE_ID_TOSHIBA_TC35815_NWU 0x0031 +#define PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939 0x0032 +#define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105 +#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 +#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3 + +#define PCI_VENDOR_ID_ATTO 0x117c + +#define PCI_VENDOR_ID_RICOH 0x1180 +#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 +#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 +#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 +#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 +#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 +#define PCI_DEVICE_ID_RICOH_R5C822 0x0822 +#define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 +#define PCI_DEVICE_ID_RICOH_R5C832 0x0832 +#define PCI_DEVICE_ID_RICOH_R5C843 0x0843 + +#define PCI_VENDOR_ID_DLINK 0x1186 +#define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 + +#define PCI_VENDOR_ID_ARTOP 0x1191 +#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 +#define PCI_DEVICE_ID_ARTOP_ATP860 0x0006 +#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007 +#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008 +#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009 +#define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A +#define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B +#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002 +#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010 +#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020 +#define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030 +#define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040 +#define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050 +#define PCI_DEVICE_ID_ARTOP_8060 0x8060 + +#define PCI_VENDOR_ID_ZEITNET 0x1193 +#define PCI_DEVICE_ID_ZEITNET_1221 0x0001 +#define PCI_DEVICE_ID_ZEITNET_1225 0x0002 + +#define PCI_VENDOR_ID_FUJITSU_ME 0x119e +#define PCI_DEVICE_ID_FUJITSU_FS155 0x0001 +#define PCI_DEVICE_ID_FUJITSU_FS50 0x0003 + +#define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9 +#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334 + +#define PCI_VENDOR_ID_MARVELL 0x11ab +#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146 +#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 +#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 +#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 +#define PCI_DEVICE_ID_MARVELL_88ALP01_NAND 0x4100 +#define PCI_DEVICE_ID_MARVELL_88ALP01_SD 0x4101 +#define PCI_DEVICE_ID_MARVELL_88ALP01_CCIC 0x4102 + +#define PCI_VENDOR_ID_V3 0x11b0 +#define PCI_DEVICE_ID_V3_V960 0x0001 +#define PCI_DEVICE_ID_V3_V351 0x0002 + +#define PCI_VENDOR_ID_ATT 0x11c1 +#define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 + +#define PCI_VENDOR_ID_SPECIALIX 0x11cb +#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000 +#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000 +#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 + +#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 +#define PCI_DEVICE_ID_AD1889JS 0x1889 + +#define PCI_DEVICE_ID_SEGA_BBA 0x1234 + +#define PCI_VENDOR_ID_ZORAN 0x11de +#define PCI_DEVICE_ID_ZORAN_36057 0x6057 +#define PCI_DEVICE_ID_ZORAN_36120 0x6120 + +#define PCI_VENDOR_ID_COMPEX 0x11f6 +#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 + +#define PCI_VENDOR_ID_PMC_Sierra 0x11f8 + +#define PCI_VENDOR_ID_RP 0x11fe +#define PCI_DEVICE_ID_RP32INTF 0x0001 +#define PCI_DEVICE_ID_RP8INTF 0x0002 +#define PCI_DEVICE_ID_RP16INTF 0x0003 +#define PCI_DEVICE_ID_RP4QUAD 0x0004 +#define PCI_DEVICE_ID_RP8OCTA 0x0005 +#define PCI_DEVICE_ID_RP8J 0x0006 +#define PCI_DEVICE_ID_RP4J 0x0007 +#define PCI_DEVICE_ID_RP8SNI 0x0008 +#define PCI_DEVICE_ID_RP16SNI 0x0009 +#define PCI_DEVICE_ID_RPP4 0x000A +#define PCI_DEVICE_ID_RPP8 0x000B +#define PCI_DEVICE_ID_RP4M 0x000D +#define PCI_DEVICE_ID_RP2_232 0x000E +#define PCI_DEVICE_ID_RP2_422 0x000F +#define PCI_DEVICE_ID_URP32INTF 0x0801 +#define PCI_DEVICE_ID_URP8INTF 0x0802 +#define PCI_DEVICE_ID_URP16INTF 0x0803 +#define PCI_DEVICE_ID_URP8OCTA 0x0805 +#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C +#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D +#define PCI_DEVICE_ID_CRP16INTF 0x0903 + +#define PCI_VENDOR_ID_CYCLADES 0x120e +#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 +#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 +#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 +#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 +#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 +#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 +#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 +#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 +#define PCI_DEVICE_ID_PC300_RX_2 0x0300 +#define PCI_DEVICE_ID_PC300_RX_1 0x0301 +#define PCI_DEVICE_ID_PC300_TE_2 0x0310 +#define PCI_DEVICE_ID_PC300_TE_1 0x0311 +#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320 +#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321 + +#define PCI_VENDOR_ID_ESSENTIAL 0x120f +#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 + +#define PCI_VENDOR_ID_O2 0x1217 +#define PCI_DEVICE_ID_O2_6729 0x6729 +#define PCI_DEVICE_ID_O2_6730 0x673a +#define PCI_DEVICE_ID_O2_6832 0x6832 +#define PCI_DEVICE_ID_O2_6836 0x6836 +#define PCI_DEVICE_ID_O2_6812 0x6872 +#define PCI_DEVICE_ID_O2_6933 0x6933 +#define PCI_DEVICE_ID_O2_8120 0x8120 +#define PCI_DEVICE_ID_O2_8220 0x8220 +#define PCI_DEVICE_ID_O2_8221 0x8221 +#define PCI_DEVICE_ID_O2_8320 0x8320 +#define PCI_DEVICE_ID_O2_8321 0x8321 + +#define PCI_VENDOR_ID_3DFX 0x121a +#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 +#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 +#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 +#define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005 +#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009 + +#define PCI_VENDOR_ID_AVM 0x1244 +#define PCI_DEVICE_ID_AVM_B1 0x0700 +#define PCI_DEVICE_ID_AVM_C4 0x0800 +#define PCI_DEVICE_ID_AVM_A1 0x0a00 +#define PCI_DEVICE_ID_AVM_A1_V2 0x0e00 +#define PCI_DEVICE_ID_AVM_C2 0x1100 +#define PCI_DEVICE_ID_AVM_T1 0x1200 + +#define PCI_VENDOR_ID_STALLION 0x124d + +/* Allied Telesyn */ +#define PCI_VENDOR_ID_AT 0x1259 +#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 +#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 + +#define PCI_VENDOR_ID_ESS 0x125d +#define PCI_DEVICE_ID_ESS_ESS1968 0x1968 +#define PCI_DEVICE_ID_ESS_ESS1978 0x1978 +#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988 +#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989 +#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990 +#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992 +#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998 +#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999 +#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a +#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b + +#define PCI_VENDOR_ID_SATSAGEM 0x1267 +#define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016 + +#define PCI_VENDOR_ID_ENSONIQ 0x1274 +#define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880 +#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000 +#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 + +#define PCI_VENDOR_ID_TRANSMETA 0x1279 +#define PCI_DEVICE_ID_EFFICEON 0x0060 + +#define PCI_VENDOR_ID_ROCKWELL 0x127A + +#define PCI_VENDOR_ID_ITE 0x1283 +#define PCI_DEVICE_ID_ITE_8172 0x8172 +#define PCI_DEVICE_ID_ITE_8211 0x8211 +#define PCI_DEVICE_ID_ITE_8212 0x8212 +#define PCI_DEVICE_ID_ITE_8213 0x8213 +#define PCI_DEVICE_ID_ITE_8152 0x8152 +#define PCI_DEVICE_ID_ITE_8872 0x8872 +#define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886 + +/* formerly Platform Tech */ +#define PCI_DEVICE_ID_ESS_ESS0100 0x0100 + +#define PCI_VENDOR_ID_ALTEON 0x12ae + +#define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D +#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332 + +#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 +#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 + +#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041 +#define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D +#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001 +#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010 + +#define PCI_VENDOR_ID_AUREAL 0x12eb +#define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001 +#define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002 +#define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003 + +#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8 +#define PCI_DEVICE_ID_LML_33R10 0x8a02 + +#define PCI_VENDOR_ID_ESDGMBH 0x12fe +#define PCI_DEVICE_ID_ESDGMBH_CPCIASIO4 0x0111 + +#define PCI_VENDOR_ID_SIIG 0x131f +#define PCI_SUBVENDOR_ID_SIIG 0x131f +#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 +#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 +#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 +#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 +#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 +#define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030 +#define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031 +#define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 +#define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050 +#define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051 +#define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052 +#define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000 +#define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001 +#define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002 +#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 +#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 +#define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030 +#define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031 +#define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 +#define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050 +#define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051 +#define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 +#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080 +#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 +#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 +#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530 + +#define PCI_VENDOR_ID_RADISYS 0x1331 + +#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332 +#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415 +#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425 +#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155 + +#define PCI_VENDOR_ID_DOMEX 0x134a +#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 + +#define PCI_VENDOR_ID_INTASHIELD 0x135a +#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 +#define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0 + +#define PCI_VENDOR_ID_QUATECH 0x135C +#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 +#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 +#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050 +#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060 +#define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278 + +#define PCI_VENDOR_ID_SEALEVEL 0x135e +#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 +#define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 +#define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 +#define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 + +#define PCI_VENDOR_ID_HYPERCOPE 0x1365 +#define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050 +#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104 +#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106 +#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 +#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 + +#define PCI_VENDOR_ID_DIGIGRAM 0x1369 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 + +#define PCI_VENDOR_ID_KAWASAKI 0x136b +#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 + +#define PCI_VENDOR_ID_CNET 0x1371 +#define PCI_DEVICE_ID_CNET_GIGACARD 0x434e + +#define PCI_VENDOR_ID_LMC 0x1376 +#define PCI_DEVICE_ID_LMC_HSSI 0x0003 +#define PCI_DEVICE_ID_LMC_DS3 0x0004 +#define PCI_DEVICE_ID_LMC_SSI 0x0005 +#define PCI_DEVICE_ID_LMC_T1 0x0006 + +#define PCI_VENDOR_ID_NETGEAR 0x1385 +#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a + +#define PCI_VENDOR_ID_APPLICOM 0x1389 +#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001 +#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 +#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 + +#define PCI_VENDOR_ID_MOXA 0x1393 +#define PCI_DEVICE_ID_MOXA_RC7000 0x0001 +#define PCI_DEVICE_ID_MOXA_CP102 0x1020 +#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021 +#define PCI_DEVICE_ID_MOXA_CP102U 0x1022 +#define PCI_DEVICE_ID_MOXA_C104 0x1040 +#define PCI_DEVICE_ID_MOXA_CP104U 0x1041 +#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042 +#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043 +#define PCI_DEVICE_ID_MOXA_CT114 0x1140 +#define PCI_DEVICE_ID_MOXA_CP114 0x1141 +#define PCI_DEVICE_ID_MOXA_CP118U 0x1180 +#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181 +#define PCI_DEVICE_ID_MOXA_CP132 0x1320 +#define PCI_DEVICE_ID_MOXA_CP132U 0x1321 +#define PCI_DEVICE_ID_MOXA_CP134U 0x1340 +#define PCI_DEVICE_ID_MOXA_C168 0x1680 +#define PCI_DEVICE_ID_MOXA_CP168U 0x1681 +#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682 +#define PCI_DEVICE_ID_MOXA_CP204J 0x2040 +#define PCI_DEVICE_ID_MOXA_C218 0x2180 +#define PCI_DEVICE_ID_MOXA_C320 0x3200 + +#define PCI_VENDOR_ID_CCD 0x1397 +#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 +#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 +#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 +#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 +#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 +#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 +#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 +#define PCI_DEVICE_ID_CCD_B000 0xb000 +#define PCI_DEVICE_ID_CCD_B006 0xb006 +#define PCI_DEVICE_ID_CCD_B007 0xb007 +#define PCI_DEVICE_ID_CCD_B008 0xb008 +#define PCI_DEVICE_ID_CCD_B009 0xb009 +#define PCI_DEVICE_ID_CCD_B00A 0xb00a +#define PCI_DEVICE_ID_CCD_B00B 0xb00b +#define PCI_DEVICE_ID_CCD_B00C 0xb00c +#define PCI_DEVICE_ID_CCD_B100 0xb100 +#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 +#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 +#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 +#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 +#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 +#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553 +#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B +#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 +#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 +#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 +#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 +#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 +#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 +#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 +#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 +#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 +#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A +#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B +#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 +#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 +#define PCI_DEVICE_ID_CCD_B700 0xb700 +#define PCI_DEVICE_ID_CCD_B701 0xb701 +#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 +#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 +#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 +#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 + +#define PCI_VENDOR_ID_EXAR 0x13a8 +#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 +#define PCI_DEVICE_ID_EXAR_XR17C154 0x0154 +#define PCI_DEVICE_ID_EXAR_XR17C158 0x0158 + +#define PCI_VENDOR_ID_MICROGATE 0x13c0 +#define PCI_DEVICE_ID_MICROGATE_USC 0x0010 +#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 + +#define PCI_VENDOR_ID_3WARE 0x13C1 +#define PCI_DEVICE_ID_3WARE_1000 0x1000 +#define PCI_DEVICE_ID_3WARE_7000 0x1001 +#define PCI_DEVICE_ID_3WARE_9000 0x1002 + +#define PCI_VENDOR_ID_IOMEGA 0x13ca +#define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231 + +#define PCI_VENDOR_ID_ABOCOM 0x13D1 +#define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1 + +#define PCI_VENDOR_ID_SUNDANCE 0x13f0 + +#define PCI_VENDOR_ID_CMEDIA 0x13f6 +#define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100 +#define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101 +#define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111 +#define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112 + +#define PCI_VENDOR_ID_LAVA 0x1407 +#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ +#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ +#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ +#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ +#define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */ +#define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */ +#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 +#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ +#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ +#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800 + +#define PCI_VENDOR_ID_TIMEDIA 0x1409 +#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 + +#define PCI_VENDOR_ID_ICE 0x1412 +#define PCI_DEVICE_ID_ICE_1712 0x1712 +#define PCI_DEVICE_ID_VT1724 0x1724 + +#define PCI_VENDOR_ID_OXSEMI 0x1415 +#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 +#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000 +#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C +#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 +#define PCI_DEVICE_ID_OXSEMI_C950 0x950B +#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 +#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 +#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 +#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 +#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define PCI_VENDOR_ID_SAMSUNG 0x144d + +#define PCI_VENDOR_ID_GIGABYTE 0x1458 + +#define PCI_VENDOR_ID_AMBIT 0x1468 + +#define PCI_VENDOR_ID_MYRICOM 0x14c1 + +#define PCI_VENDOR_ID_TITAN 0x14D2 +#define PCI_DEVICE_ID_TITAN_010L 0x8001 +#define PCI_DEVICE_ID_TITAN_100L 0x8010 +#define PCI_DEVICE_ID_TITAN_110L 0x8011 +#define PCI_DEVICE_ID_TITAN_200L 0x8020 +#define PCI_DEVICE_ID_TITAN_210L 0x8021 +#define PCI_DEVICE_ID_TITAN_400L 0x8040 +#define PCI_DEVICE_ID_TITAN_800L 0x8080 +#define PCI_DEVICE_ID_TITAN_100 0xA001 +#define PCI_DEVICE_ID_TITAN_200 0xA005 +#define PCI_DEVICE_ID_TITAN_400 0xA003 +#define PCI_DEVICE_ID_TITAN_800B 0xA004 + +#define PCI_VENDOR_ID_PANACOM 0x14d4 +#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400 +#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402 + +#define PCI_VENDOR_ID_SIPACKETS 0x14d9 +#define PCI_DEVICE_ID_SP1011 0x0010 + +#define PCI_VENDOR_ID_AFAVLAB 0x14db +#define PCI_DEVICE_ID_AFAVLAB_P028 0x2180 +#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 +#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 + +#define PCI_VENDOR_ID_BCM_GVC 0x14a4 +#define PCI_VENDOR_ID_BROADCOM 0x14e4 +#define PCI_DEVICE_ID_TIGON3_5752 0x1600 +#define PCI_DEVICE_ID_TIGON3_5752M 0x1601 +#define PCI_DEVICE_ID_NX2_5709 0x1639 +#define PCI_DEVICE_ID_NX2_5709S 0x163a +#define PCI_DEVICE_ID_TIGON3_5700 0x1644 +#define PCI_DEVICE_ID_TIGON3_5701 0x1645 +#define PCI_DEVICE_ID_TIGON3_5702 0x1646 +#define PCI_DEVICE_ID_TIGON3_5703 0x1647 +#define PCI_DEVICE_ID_TIGON3_5704 0x1648 +#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 +#define PCI_DEVICE_ID_NX2_5706 0x164a +#define PCI_DEVICE_ID_NX2_5708 0x164c +#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d +#define PCI_DEVICE_ID_NX2_57710 0x164e +#define PCI_DEVICE_ID_NX2_57711 0x164f +#define PCI_DEVICE_ID_NX2_57711E 0x1650 +#define PCI_DEVICE_ID_TIGON3_5705 0x1653 +#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 +#define PCI_DEVICE_ID_TIGON3_5719 0x1657 +#define PCI_DEVICE_ID_TIGON3_5721 0x1659 +#define PCI_DEVICE_ID_TIGON3_5722 0x165a +#define PCI_DEVICE_ID_TIGON3_5723 0x165b +#define PCI_DEVICE_ID_TIGON3_5705M 0x165d +#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e +#define PCI_DEVICE_ID_NX2_57712 0x1662 +#define PCI_DEVICE_ID_NX2_57712E 0x1663 +#define PCI_DEVICE_ID_TIGON3_5714 0x1668 +#define PCI_DEVICE_ID_TIGON3_5714S 0x1669 +#define PCI_DEVICE_ID_TIGON3_5780 0x166a +#define PCI_DEVICE_ID_TIGON3_5780S 0x166b +#define PCI_DEVICE_ID_TIGON3_5705F 0x166e +#define PCI_DEVICE_ID_TIGON3_5754M 0x1672 +#define PCI_DEVICE_ID_TIGON3_5755M 0x1673 +#define PCI_DEVICE_ID_TIGON3_5756 0x1674 +#define PCI_DEVICE_ID_TIGON3_5751 0x1677 +#define PCI_DEVICE_ID_TIGON3_5715 0x1678 +#define PCI_DEVICE_ID_TIGON3_5715S 0x1679 +#define PCI_DEVICE_ID_TIGON3_5754 0x167a +#define PCI_DEVICE_ID_TIGON3_5755 0x167b +#define PCI_DEVICE_ID_TIGON3_5751M 0x167d +#define PCI_DEVICE_ID_TIGON3_5751F 0x167e +#define PCI_DEVICE_ID_TIGON3_5787F 0x167f +#define PCI_DEVICE_ID_TIGON3_5761E 0x1680 +#define PCI_DEVICE_ID_TIGON3_5761 0x1681 +#define PCI_DEVICE_ID_TIGON3_5764 0x1684 +#define PCI_DEVICE_ID_NX2_57800 0x168a +#define PCI_DEVICE_ID_NX2_57840 0x168d +#define PCI_DEVICE_ID_NX2_57810 0x168e +#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 +#define PCI_DEVICE_ID_TIGON3_5782 0x1696 +#define PCI_DEVICE_ID_TIGON3_5784 0x1698 +#define PCI_DEVICE_ID_TIGON3_5786 0x169a +#define PCI_DEVICE_ID_TIGON3_5787 0x169b +#define PCI_DEVICE_ID_TIGON3_5788 0x169c +#define PCI_DEVICE_ID_TIGON3_5789 0x169d +#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 +#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 +#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 +#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 +#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 +#define PCI_DEVICE_ID_NX2_5706S 0x16aa +#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab +#define PCI_DEVICE_ID_NX2_5708S 0x16ac +#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad +#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae +#define PCI_DEVICE_ID_NX2_57810_VF 0x16af +#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 +#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 +#define PCI_DEVICE_ID_TIGON3_5781 0x16dd +#define PCI_DEVICE_ID_TIGON3_5753 0x16f7 +#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd +#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe +#define PCI_DEVICE_ID_TIGON3_5901 0x170d +#define PCI_DEVICE_ID_BCM4401B1 0x170c +#define PCI_DEVICE_ID_TIGON3_5901_2 0x170e +#define PCI_DEVICE_ID_TIGON3_5906 0x1712 +#define PCI_DEVICE_ID_TIGON3_5906M 0x1713 +#define PCI_DEVICE_ID_BCM4401 0x4401 +#define PCI_DEVICE_ID_BCM4401B0 0x4402 + +#define PCI_VENDOR_ID_TOPIC 0x151f +#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 + +#define PCI_VENDOR_ID_MAINPINE 0x1522 +#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100 +#define PCI_VENDOR_ID_ENE 0x1524 +#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510 +#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 +#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 +#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750 +#define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751 +#define PCI_DEVICE_ID_ENE_1211 0x1211 +#define PCI_DEVICE_ID_ENE_1225 0x1225 +#define PCI_DEVICE_ID_ENE_1410 0x1410 +#define PCI_DEVICE_ID_ENE_710 0x1411 +#define PCI_DEVICE_ID_ENE_712 0x1412 +#define PCI_DEVICE_ID_ENE_1420 0x1420 +#define PCI_DEVICE_ID_ENE_720 0x1421 +#define PCI_DEVICE_ID_ENE_722 0x1422 + +#define PCI_SUBVENDOR_ID_PERLE 0x155f +#define PCI_SUBDEVICE_ID_PCI_RAS4 0xf001 +#define PCI_SUBDEVICE_ID_PCI_RAS8 0xf010 + +#define PCI_VENDOR_ID_SYBA 0x1592 +#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782 +#define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783 + +#define PCI_VENDOR_ID_MORETON 0x15aa +#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 + +#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 +#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 + +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 + +#define PCI_VENDOR_ID_DFI 0x15bd + +#define PCI_VENDOR_ID_QUICKNET 0x15e2 +#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500 + +/* + * ADDI-DATA GmbH communication cards <info@addi-data.com> + */ +#define PCI_VENDOR_ID_ADDIDATA_OLD 0x10E8 +#define PCI_VENDOR_ID_ADDIDATA 0x15B8 +#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000 +#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001 +#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002 +#define PCI_DEVICE_ID_ADDIDATA_APCI7800 0x818E +#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009 +#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A +#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B +#define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C +#define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D +#define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E +#define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F +#define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013 + +#define PCI_VENDOR_ID_PDC 0x15e9 + +#define PCI_VENDOR_ID_FARSITE 0x1619 +#define PCI_DEVICE_ID_FARSITE_T2P 0x0400 +#define PCI_DEVICE_ID_FARSITE_T4P 0x0440 +#define PCI_DEVICE_ID_FARSITE_T1U 0x0610 +#define PCI_DEVICE_ID_FARSITE_T2U 0x0620 +#define PCI_DEVICE_ID_FARSITE_T4U 0x0640 +#define PCI_DEVICE_ID_FARSITE_TE1 0x1610 +#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612 + +#define PCI_VENDOR_ID_ARIMA 0x161f + +#define PCI_VENDOR_ID_BROCADE 0x1657 +#define PCI_DEVICE_ID_BROCADE_CT 0x0014 +#define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017 +#define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021 + +#define PCI_VENDOR_ID_SIBYTE 0x166d +#define PCI_DEVICE_ID_BCM1250_PCI 0x0001 +#define PCI_DEVICE_ID_BCM1250_HT 0x0002 + +#define PCI_VENDOR_ID_ATHEROS 0x168c + +#define PCI_VENDOR_ID_NETCELL 0x169c +#define PCI_DEVICE_ID_REVOLUTION 0x0044 + +#define PCI_VENDOR_ID_CENATEK 0x16CA +#define PCI_DEVICE_ID_CENATEK_IDE 0x0001 + +#define PCI_VENDOR_ID_VITESSE 0x1725 +#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 + +#define PCI_VENDOR_ID_LINKSYS 0x1737 +#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 + +#define PCI_VENDOR_ID_ALTIMA 0x173b +#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8 +#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9 +#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea +#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb + +#define PCI_VENDOR_ID_BELKIN 0x1799 +#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f + +#define PCI_VENDOR_ID_RDC 0x17f3 +#define PCI_DEVICE_ID_RDC_R6020 0x6020 +#define PCI_DEVICE_ID_RDC_R6030 0x6030 +#define PCI_DEVICE_ID_RDC_R6040 0x6040 +#define PCI_DEVICE_ID_RDC_R6060 0x6060 +#define PCI_DEVICE_ID_RDC_R6061 0x6061 +#define PCI_DEVICE_ID_RDC_D1010 0x1010 + +#define PCI_VENDOR_ID_LENOVO 0x17aa + +#define PCI_VENDOR_ID_ARECA 0x17d3 +#define PCI_DEVICE_ID_ARECA_1110 0x1110 +#define PCI_DEVICE_ID_ARECA_1120 0x1120 +#define PCI_DEVICE_ID_ARECA_1130 0x1130 +#define PCI_DEVICE_ID_ARECA_1160 0x1160 +#define PCI_DEVICE_ID_ARECA_1170 0x1170 +#define PCI_DEVICE_ID_ARECA_1200 0x1200 +#define PCI_DEVICE_ID_ARECA_1201 0x1201 +#define PCI_DEVICE_ID_ARECA_1202 0x1202 +#define PCI_DEVICE_ID_ARECA_1210 0x1210 +#define PCI_DEVICE_ID_ARECA_1220 0x1220 +#define PCI_DEVICE_ID_ARECA_1230 0x1230 +#define PCI_DEVICE_ID_ARECA_1260 0x1260 +#define PCI_DEVICE_ID_ARECA_1270 0x1270 +#define PCI_DEVICE_ID_ARECA_1280 0x1280 +#define PCI_DEVICE_ID_ARECA_1380 0x1380 +#define PCI_DEVICE_ID_ARECA_1381 0x1381 +#define PCI_DEVICE_ID_ARECA_1680 0x1680 +#define PCI_DEVICE_ID_ARECA_1681 0x1681 + +#define PCI_VENDOR_ID_S2IO 0x17d5 +#define PCI_DEVICE_ID_S2IO_WIN 0x5731 +#define PCI_DEVICE_ID_S2IO_UNI 0x5831 +#define PCI_DEVICE_ID_HERC_WIN 0x5732 +#define PCI_DEVICE_ID_HERC_UNI 0x5832 + +#define PCI_VENDOR_ID_SITECOM 0x182d +#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 + +#define PCI_VENDOR_ID_TOPSPIN 0x1867 + +#define PCI_VENDOR_ID_SILAN 0x1904 + +#define PCI_VENDOR_ID_RENESAS 0x1912 +#define PCI_DEVICE_ID_RENESAS_SH7781 0x0001 +#define PCI_DEVICE_ID_RENESAS_SH7780 0x0002 +#define PCI_DEVICE_ID_RENESAS_SH7763 0x0004 +#define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 +#define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 + +#define PCI_VENDOR_ID_SOLARFLARE 0x1924 +#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703 +#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703 +#define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710 + +#define PCI_VENDOR_ID_TDI 0x192E +#define PCI_DEVICE_ID_TDI_EHCI 0x0101 + +#define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_DEVICE_ID_MPC8308 0xc006 +#define PCI_DEVICE_ID_MPC8315E 0x00b4 +#define PCI_DEVICE_ID_MPC8315 0x00b5 +#define PCI_DEVICE_ID_MPC8314E 0x00b6 +#define PCI_DEVICE_ID_MPC8314 0x00b7 +#define PCI_DEVICE_ID_MPC8378E 0x00c4 +#define PCI_DEVICE_ID_MPC8378 0x00c5 +#define PCI_DEVICE_ID_MPC8377E 0x00c6 +#define PCI_DEVICE_ID_MPC8377 0x00c7 +#define PCI_DEVICE_ID_MPC8548E 0x0012 +#define PCI_DEVICE_ID_MPC8548 0x0013 +#define PCI_DEVICE_ID_MPC8543E 0x0014 +#define PCI_DEVICE_ID_MPC8543 0x0015 +#define PCI_DEVICE_ID_MPC8547E 0x0018 +#define PCI_DEVICE_ID_MPC8545E 0x0019 +#define PCI_DEVICE_ID_MPC8545 0x001a +#define PCI_DEVICE_ID_MPC8569E 0x0061 +#define PCI_DEVICE_ID_MPC8569 0x0060 +#define PCI_DEVICE_ID_MPC8568E 0x0020 +#define PCI_DEVICE_ID_MPC8568 0x0021 +#define PCI_DEVICE_ID_MPC8567E 0x0022 +#define PCI_DEVICE_ID_MPC8567 0x0023 +#define PCI_DEVICE_ID_MPC8533E 0x0030 +#define PCI_DEVICE_ID_MPC8533 0x0031 +#define PCI_DEVICE_ID_MPC8544E 0x0032 +#define PCI_DEVICE_ID_MPC8544 0x0033 +#define PCI_DEVICE_ID_MPC8572E 0x0040 +#define PCI_DEVICE_ID_MPC8572 0x0041 +#define PCI_DEVICE_ID_MPC8536E 0x0050 +#define PCI_DEVICE_ID_MPC8536 0x0051 +#define PCI_DEVICE_ID_P2020E 0x0070 +#define PCI_DEVICE_ID_P2020 0x0071 +#define PCI_DEVICE_ID_P2010E 0x0078 +#define PCI_DEVICE_ID_P2010 0x0079 +#define PCI_DEVICE_ID_P1020E 0x0100 +#define PCI_DEVICE_ID_P1020 0x0101 +#define PCI_DEVICE_ID_P1021E 0x0102 +#define PCI_DEVICE_ID_P1021 0x0103 +#define PCI_DEVICE_ID_P1011E 0x0108 +#define PCI_DEVICE_ID_P1011 0x0109 +#define PCI_DEVICE_ID_P1022E 0x0110 +#define PCI_DEVICE_ID_P1022 0x0111 +#define PCI_DEVICE_ID_P1013E 0x0118 +#define PCI_DEVICE_ID_P1013 0x0119 +#define PCI_DEVICE_ID_P4080E 0x0400 +#define PCI_DEVICE_ID_P4080 0x0401 +#define PCI_DEVICE_ID_P4040E 0x0408 +#define PCI_DEVICE_ID_P4040 0x0409 +#define PCI_DEVICE_ID_P2040E 0x0410 +#define PCI_DEVICE_ID_P2040 0x0411 +#define PCI_DEVICE_ID_P3041E 0x041E +#define PCI_DEVICE_ID_P3041 0x041F +#define PCI_DEVICE_ID_P5020E 0x0420 +#define PCI_DEVICE_ID_P5020 0x0421 +#define PCI_DEVICE_ID_P5010E 0x0428 +#define PCI_DEVICE_ID_P5010 0x0429 +#define PCI_DEVICE_ID_MPC8641 0x7010 +#define PCI_DEVICE_ID_MPC8641D 0x7011 +#define PCI_DEVICE_ID_MPC8610 0x7018 + +#define PCI_VENDOR_ID_PASEMI 0x1959 + +#define PCI_VENDOR_ID_ATTANSIC 0x1969 +#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 +#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 + +#define PCI_VENDOR_ID_JMICRON 0x197B +#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 +#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 +#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 +#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 +#define PCI_DEVICE_ID_JMICRON_JMB364 0x2364 +#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 +#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 +#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 +#define PCI_DEVICE_ID_JMICRON_JMB369 0x2369 +#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 +#define PCI_DEVICE_ID_JMICRON_JMB385_MS 0x2388 +#define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391 +#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392 +#define PCI_DEVICE_ID_JMICRON_JMB390_MS 0x2393 + +#define PCI_VENDOR_ID_KORENIX 0x1982 +#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 +#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff +#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 +#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff + +#define PCI_VENDOR_ID_QMI 0x1a32 + +#define PCI_VENDOR_ID_AZWAVE 0x1a3b + +#define PCI_VENDOR_ID_ASMEDIA 0x1b21 + +#define PCI_VENDOR_ID_TEKRAM 0x1de1 +#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 + +#define PCI_VENDOR_ID_TEHUTI 0x1fc9 +#define PCI_DEVICE_ID_TEHUTI_3009 0x3009 +#define PCI_DEVICE_ID_TEHUTI_3010 0x3010 +#define PCI_DEVICE_ID_TEHUTI_3014 0x3014 + +#define PCI_VENDOR_ID_HINT 0x3388 +#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 + +#define PCI_VENDOR_ID_3DLABS 0x3d3d +#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 +#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 + +#define PCI_VENDOR_ID_NETXEN 0x4040 +#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 +#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 +#define PCI_DEVICE_ID_NX2031_4GCU 0x0003 +#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 +#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 +#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 +#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 +#define PCI_DEVICE_ID_NX3031 0x0100 + +#define PCI_VENDOR_ID_AKS 0x416c +#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 + +#define PCI_VENDOR_ID_ACCESSIO 0x494f +#define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0 + +#define PCI_VENDOR_ID_S3 0x5333 +#define PCI_DEVICE_ID_S3_TRIO 0x8811 +#define PCI_DEVICE_ID_S3_868 0x8880 +#define PCI_DEVICE_ID_S3_968 0x88f0 +#define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25 +#define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04 +#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 + +#define PCI_VENDOR_ID_DUNORD 0x5544 +#define PCI_DEVICE_ID_DUNORD_I3000 0x0001 + +#define PCI_VENDOR_ID_DCI 0x6666 +#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 +#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 +#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004 + +#define PCI_VENDOR_ID_INTEL 0x8086 +#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 +#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 +#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 +#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 +#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A +#define PCI_DEVICE_ID_INTEL_PXHV 0x032C +#define PCI_DEVICE_ID_INTEL_80332_0 0x0330 +#define PCI_DEVICE_ID_INTEL_80332_1 0x0332 +#define PCI_DEVICE_ID_INTEL_80333_0 0x0370 +#define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_82375 0x0482 +#define PCI_DEVICE_ID_INTEL_82424 0x0483 +#define PCI_DEVICE_ID_INTEL_82378 0x0484 +#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 +#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 +#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 +#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821 +#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 +#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 +#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 +#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F +#define PCI_DEVICE_ID_INTEL_I960 0x0960 +#define PCI_DEVICE_ID_INTEL_I960RM 0x0962 +#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 +#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 +#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F +#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 +#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 +#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 +#define PCI_DEVICE_ID_INTEL_7505_0 0x2550 +#define PCI_DEVICE_ID_INTEL_7205_0 0x255d +#define PCI_DEVICE_ID_INTEL_82437 0x122d +#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e +#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 +#define PCI_DEVICE_ID_INTEL_82371MX 0x1234 +#define PCI_DEVICE_ID_INTEL_82441 0x1237 +#define PCI_DEVICE_ID_INTEL_82380FB 0x124b +#define PCI_DEVICE_ID_INTEL_82439 0x1250 +#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 +#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 +#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f +#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 +#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f +#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 +#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 +#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 +#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415 +#define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416 +#define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418 +#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420 +#define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421 +#define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423 +#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425 +#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426 +#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428 +#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440 +#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443 +#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445 +#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448 +#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a +#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b +#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c +#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e +#define PCI_DEVICE_ID_INTEL_82801E_0 0x2450 +#define PCI_DEVICE_ID_INTEL_82801E_11 0x245b +#define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480 +#define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483 +#define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485 +#define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486 +#define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a +#define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b +#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c +#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 +#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 +#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 +#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 +#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 +#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 +#define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9 +#define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca +#define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb +#define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc +#define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0 +#define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1 +#define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3 +#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 +#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 +#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db +#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc +#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd +#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 +#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 +#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 +#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 +#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab +#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac +#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 +#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 +#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 +#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 +#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c +#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 +#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 +#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 +#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572 +#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578 +#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580 +#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 +#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 +#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 +#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0 +#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5 +#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6 +#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 +#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 +#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 +#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 +#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 +#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 +#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 +#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 +#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a +#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d +#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e +#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f +#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 +#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698 +#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b +#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e +#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 +#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 +#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 +#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc +#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd +#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da +#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd +#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de +#define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df +#define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810 +#define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811 +#define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812 +#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814 +#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 +#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e +#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 +#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 +#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 +#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 +#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913 +#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 +#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 +#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 +#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 +#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 +#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18 +#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19 +#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a +#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3 +#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a +#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b +#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e +#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 +#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 +#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 +#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c +#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e +#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 +#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 +#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 +#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592 +#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595 +#define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596 +#define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597 +#define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598 +#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 +#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a +#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e +#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c +#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f +#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610 +#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b +#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c +#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 +#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 +#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 +#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a +#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 +#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f +#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e +#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f +#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f +#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 +#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 +#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 +#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 +#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 +#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 +#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff +#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031 +#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032 +#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 +#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 +#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 +#define PCI_DEVICE_ID_INTEL_82437VX 0x7030 +#define PCI_DEVICE_ID_INTEL_82439TX 0x7100 +#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 +#define PCI_DEVICE_ID_INTEL_82371AB 0x7111 +#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 +#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 +#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120 +#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121 +#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122 +#define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123 +#define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124 +#define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125 +#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 +#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 +#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 +#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 +#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 +#define PCI_DEVICE_ID_INTEL_440MX 0x7195 +#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196 +#define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198 +#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199 +#define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b +#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 +#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 +#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 +#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 +#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a +#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186 +#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 +#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 +#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca +#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb +#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea +#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 +#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 +#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 + +#define PCI_VENDOR_ID_SCALEMP 0x8686 +#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 + +#define PCI_VENDOR_ID_COMPUTONE 0x8e0e +#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 +#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 +#define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e +#define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 +#define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002 +#define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003 + +#define PCI_VENDOR_ID_KTI 0x8e2e + +#define PCI_VENDOR_ID_ADAPTEC 0x9004 +#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 +#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 +#define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 +#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 +#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 +#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 +#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 +#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 +#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 +#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 +#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 +#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 +#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 +#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 +#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 +#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 +#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 +#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 +#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 +#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 +#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 +#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 +#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 +#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 + +#define PCI_VENDOR_ID_ADAPTEC2 0x9005 +#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 +#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 +#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 +#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f +#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 +#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 +#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f +#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 +#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 +#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 +#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f +#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 +#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 +#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 +#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf +#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500 +#define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503 + +#define PCI_VENDOR_ID_HOLTEK 0x9412 +#define PCI_DEVICE_ID_HOLTEK_6565 0x6565 + +#define PCI_VENDOR_ID_NETMOS 0x9710 +#define PCI_DEVICE_ID_NETMOS_9705 0x9705 +#define PCI_DEVICE_ID_NETMOS_9715 0x9715 +#define PCI_DEVICE_ID_NETMOS_9735 0x9735 +#define PCI_DEVICE_ID_NETMOS_9745 0x9745 +#define PCI_DEVICE_ID_NETMOS_9755 0x9755 +#define PCI_DEVICE_ID_NETMOS_9805 0x9805 +#define PCI_DEVICE_ID_NETMOS_9815 0x9815 +#define PCI_DEVICE_ID_NETMOS_9835 0x9835 +#define PCI_DEVICE_ID_NETMOS_9845 0x9845 +#define PCI_DEVICE_ID_NETMOS_9855 0x9855 +#define PCI_DEVICE_ID_NETMOS_9865 0x9865 +#define PCI_DEVICE_ID_NETMOS_9900 0x9900 +#define PCI_DEVICE_ID_NETMOS_9901 0x9901 +#define PCI_DEVICE_ID_NETMOS_9904 0x9904 +#define PCI_DEVICE_ID_NETMOS_9912 0x9912 +#define PCI_DEVICE_ID_NETMOS_9922 0x9922 + +#define PCI_VENDOR_ID_3COM_2 0xa727 + +#define PCI_VENDOR_ID_DIGIUM 0xd161 +#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 + +#define PCI_SUBVENDOR_ID_EXSYS 0xd84d +#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 +#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 + +#define PCI_VENDOR_ID_TIGERJET 0xe159 +#define PCI_DEVICE_ID_TIGERJET_300 0x0001 +#define PCI_DEVICE_ID_TIGERJET_100 0x0002 + +#define PCI_VENDOR_ID_XILINX_RME 0xea60 +#define PCI_DEVICE_ID_RME_DIGI32 0x9896 +#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 +#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 + +#define PCI_VENDOR_ID_XEN 0x5853 +#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 + +#define PCI_VENDOR_ID_OCZ 0x1b85 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h new file mode 100644 index 00000000..4b608f54 --- /dev/null +++ b/include/linux/pci_regs.h @@ -0,0 +1,738 @@ +/* + * pci_regs.h + * + * PCI standard defines + * Copyright 1994, Drew Eckhardt + * Copyright 1997--1999 Martin Mares <mj@ucw.cz> + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI BIOS Specification + * PCI Local Bus Specification + * PCI to PCI Bridge Specification + * PCI System Design Guide + * + * For hypertransport information, please consult the following manuals + * from http://www.hypertransport.org + * + * The Hypertransport I/O Link Specification + */ + +#ifndef LINUX_PCI_REGS_H +#define LINUX_PCI_REGS_H + +/* + * Under PCI, each device has 256 bytes of configuration address space, + * of which the first 64 bytes are standardized as follows: + */ +#define PCI_VENDOR_ID 0x00 /* 16 bits */ +#define PCI_DEVICE_ID 0x02 /* 16 bits */ +#define PCI_COMMAND 0x04 /* 16 bits */ +#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */ +#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ +#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */ +#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */ +#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */ +#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */ +#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */ +#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */ +#define PCI_COMMAND_SERR 0x100 /* Enable SERR */ +#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ +#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ + +#define PCI_STATUS 0x06 /* 16 bits */ +#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */ +#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ +#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */ +#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */ +#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */ +#define PCI_STATUS_PARITY 0x100 /* Detected parity error */ +#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */ +#define PCI_STATUS_DEVSEL_FAST 0x000 +#define PCI_STATUS_DEVSEL_MEDIUM 0x200 +#define PCI_STATUS_DEVSEL_SLOW 0x400 +#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */ +#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */ +#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */ +#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */ +#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */ + +#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ +#define PCI_REVISION_ID 0x08 /* Revision ID */ +#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */ +#define PCI_CLASS_DEVICE 0x0a /* Device class */ + +#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ +#define PCI_LATENCY_TIMER 0x0d /* 8 bits */ +#define PCI_HEADER_TYPE 0x0e /* 8 bits */ +#define PCI_HEADER_TYPE_NORMAL 0 +#define PCI_HEADER_TYPE_BRIDGE 1 +#define PCI_HEADER_TYPE_CARDBUS 2 + +#define PCI_BIST 0x0f /* 8 bits */ +#define PCI_BIST_CODE_MASK 0x0f /* Return result */ +#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */ +#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */ + +/* + * Base addresses specify locations in memory or I/O space. + * Decoded size can be determined by writing a value of + * 0xffffffff to the register, and reading it back. Only + * 1 bits are decoded. + */ +#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ +#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */ +#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */ +#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */ +#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */ +#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */ +#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ +#define PCI_BASE_ADDRESS_SPACE_IO 0x01 +#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 +#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 +#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ +#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */ +#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ +#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ +#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) +#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) +/* bit 1 is reserved if address_space = 1 */ + +/* Header type 0 (normal devices) */ +#define PCI_CARDBUS_CIS 0x28 +#define PCI_SUBSYSTEM_VENDOR_ID 0x2c +#define PCI_SUBSYSTEM_ID 0x2e +#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ +#define PCI_ROM_ADDRESS_ENABLE 0x01 +#define PCI_ROM_ADDRESS_MASK (~0x7ffUL) + +#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ + +/* 0x35-0x3b are reserved */ +#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ +#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ +#define PCI_MIN_GNT 0x3e /* 8 bits */ +#define PCI_MAX_LAT 0x3f /* 8 bits */ + +/* Header type 1 (PCI-to-PCI bridges) */ +#define PCI_PRIMARY_BUS 0x18 /* Primary bus number */ +#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */ +#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */ +#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */ +#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */ +#define PCI_IO_LIMIT 0x1d +#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */ +#define PCI_IO_RANGE_TYPE_16 0x00 +#define PCI_IO_RANGE_TYPE_32 0x01 +#define PCI_IO_RANGE_MASK (~0x0fUL) +#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */ +#define PCI_MEMORY_BASE 0x20 /* Memory range behind */ +#define PCI_MEMORY_LIMIT 0x22 +#define PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL +#define PCI_MEMORY_RANGE_MASK (~0x0fUL) +#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */ +#define PCI_PREF_MEMORY_LIMIT 0x26 +#define PCI_PREF_RANGE_TYPE_MASK 0x0fUL +#define PCI_PREF_RANGE_TYPE_32 0x00 +#define PCI_PREF_RANGE_TYPE_64 0x01 +#define PCI_PREF_RANGE_MASK (~0x0fUL) +#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */ +#define PCI_PREF_LIMIT_UPPER32 0x2c +#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */ +#define PCI_IO_LIMIT_UPPER16 0x32 +/* 0x34 same as for htype 0 */ +/* 0x35-0x3b is reserved */ +#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */ +/* 0x3c-0x3d are same as for htype 0 */ +#define PCI_BRIDGE_CONTROL 0x3e +#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */ +#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */ +#define PCI_BRIDGE_CTL_ISA 0x04 /* Enable ISA mode */ +#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */ +#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */ +#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */ +#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */ + +/* Header type 2 (CardBus bridges) */ +#define PCI_CB_CAPABILITY_LIST 0x14 +/* 0x15 reserved */ +#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */ +#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */ +#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */ +#define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */ +#define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */ +#define PCI_CB_MEMORY_BASE_0 0x1c +#define PCI_CB_MEMORY_LIMIT_0 0x20 +#define PCI_CB_MEMORY_BASE_1 0x24 +#define PCI_CB_MEMORY_LIMIT_1 0x28 +#define PCI_CB_IO_BASE_0 0x2c +#define PCI_CB_IO_BASE_0_HI 0x2e +#define PCI_CB_IO_LIMIT_0 0x30 +#define PCI_CB_IO_LIMIT_0_HI 0x32 +#define PCI_CB_IO_BASE_1 0x34 +#define PCI_CB_IO_BASE_1_HI 0x36 +#define PCI_CB_IO_LIMIT_1 0x38 +#define PCI_CB_IO_LIMIT_1_HI 0x3a +#define PCI_CB_IO_RANGE_MASK (~0x03UL) +/* 0x3c-0x3d are same as for htype 0 */ +#define PCI_CB_BRIDGE_CONTROL 0x3e +#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */ +#define PCI_CB_BRIDGE_CTL_SERR 0x02 +#define PCI_CB_BRIDGE_CTL_ISA 0x04 +#define PCI_CB_BRIDGE_CTL_VGA 0x08 +#define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20 +#define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */ +#define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */ +#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */ +#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200 +#define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400 +#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40 +#define PCI_CB_SUBSYSTEM_ID 0x42 +#define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */ +/* 0x48-0x7f reserved */ + +/* Capability lists */ + +#define PCI_CAP_LIST_ID 0 /* Capability ID */ +#define PCI_CAP_ID_PM 0x01 /* Power Management */ +#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */ +#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ +#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */ +#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ +#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */ +#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */ +#define PCI_CAP_ID_HT 0x08 /* HyperTransport */ +#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific */ +#define PCI_CAP_ID_DBG 0x0A /* Debug port */ +#define PCI_CAP_ID_CCRC 0x0B /* CompactPCI Central Resource Control */ +#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ +#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */ +#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */ +#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ +#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ +#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ +#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ +#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ +#define PCI_CAP_SIZEOF 4 + +/* Power Management Registers */ + +#define PCI_PM_PMC 2 /* PM Capabilities Register */ +#define PCI_PM_CAP_VER_MASK 0x0007 /* Version */ +#define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */ +#define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */ +#define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */ +#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxiliary power support mask */ +#define PCI_PM_CAP_D1 0x0200 /* D1 power state support */ +#define PCI_PM_CAP_D2 0x0400 /* D2 power state support */ +#define PCI_PM_CAP_PME 0x0800 /* PME pin supported */ +#define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */ +#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */ +#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */ +#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ +#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ +#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ +#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ +#define PCI_PM_CTRL 4 /* PM control and status register */ +#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ +#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */ +#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ +#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ +#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ +#define PCI_PM_CTRL_PME_STATUS 0x8000 /* PME pin status */ +#define PCI_PM_PPB_EXTENSIONS 6 /* PPB support extensions (??) */ +#define PCI_PM_PPB_B2_B3 0x40 /* Stop clock when in D3hot (??) */ +#define PCI_PM_BPCC_ENABLE 0x80 /* Bus power/clock control enable (??) */ +#define PCI_PM_DATA_REGISTER 7 /* (??) */ +#define PCI_PM_SIZEOF 8 + +/* AGP registers */ + +#define PCI_AGP_VERSION 2 /* BCD version number */ +#define PCI_AGP_RFU 3 /* Rest of capability flags */ +#define PCI_AGP_STATUS 4 /* Status register */ +#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */ +#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */ +#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */ +#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */ +#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */ +#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */ +#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */ +#define PCI_AGP_COMMAND 8 /* Control register */ +#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */ +#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */ +#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */ +#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */ +#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */ +#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */ +#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */ +#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */ +#define PCI_AGP_SIZEOF 12 + +/* Vital Product Data */ + +#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */ +#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */ +#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */ +#define PCI_VPD_DATA 4 /* 32-bits of data returned here */ + +/* Slot Identification */ + +#define PCI_SID_ESR 2 /* Expansion Slot Register */ +#define PCI_SID_ESR_NSLOTS 0x1f /* Number of expansion slots available */ +#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */ +#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */ + +/* Message Signalled Interrupts registers */ + +#define PCI_MSI_FLAGS 2 /* Various flags */ +#define PCI_MSI_FLAGS_64BIT 0x80 /* 64-bit addresses allowed */ +#define PCI_MSI_FLAGS_QSIZE 0x70 /* Message queue size configured */ +#define PCI_MSI_FLAGS_QMASK 0x0e /* Maximum queue size available */ +#define PCI_MSI_FLAGS_ENABLE 0x01 /* MSI feature enabled */ +#define PCI_MSI_FLAGS_MASKBIT 0x100 /* 64-bit mask bits allowed */ +#define PCI_MSI_RFU 3 /* Rest of capability flags */ +#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */ +#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */ +#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */ +#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */ +#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ +#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ + +/* MSI-X registers */ +#define PCI_MSIX_FLAGS 2 +#define PCI_MSIX_FLAGS_QSIZE 0x7FF +#define PCI_MSIX_FLAGS_ENABLE (1 << 15) +#define PCI_MSIX_FLAGS_MASKALL (1 << 14) +#define PCI_MSIX_TABLE 4 +#define PCI_MSIX_PBA 8 +#define PCI_MSIX_FLAGS_BIRMASK (7 << 0) + +/* MSI-X entry's format */ +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 +#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + +/* CompactPCI Hotswap Register */ + +#define PCI_CHSWP_CSR 2 /* Control and Status Register */ +#define PCI_CHSWP_DHA 0x01 /* Device Hiding Arm */ +#define PCI_CHSWP_EIM 0x02 /* ENUM# Signal Mask */ +#define PCI_CHSWP_PIE 0x04 /* Pending Insert or Extract */ +#define PCI_CHSWP_LOO 0x08 /* LED On / Off */ +#define PCI_CHSWP_PI 0x30 /* Programming Interface */ +#define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */ +#define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */ + +/* PCI Advanced Feature registers */ + +#define PCI_AF_LENGTH 2 +#define PCI_AF_CAP 3 +#define PCI_AF_CAP_TP 0x01 +#define PCI_AF_CAP_FLR 0x02 +#define PCI_AF_CTRL 4 +#define PCI_AF_CTRL_FLR 0x01 +#define PCI_AF_STATUS 5 +#define PCI_AF_STATUS_TP 0x01 + +/* PCI-X registers */ + +#define PCI_X_CMD 2 /* Modes & Features */ +#define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */ +#define PCI_X_CMD_ERO 0x0002 /* Enable Relaxed Ordering */ +#define PCI_X_CMD_READ_512 0x0000 /* 512 byte maximum read byte count */ +#define PCI_X_CMD_READ_1K 0x0004 /* 1Kbyte maximum read byte count */ +#define PCI_X_CMD_READ_2K 0x0008 /* 2Kbyte maximum read byte count */ +#define PCI_X_CMD_READ_4K 0x000c /* 4Kbyte maximum read byte count */ +#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */ + /* Max # of outstanding split transactions */ +#define PCI_X_CMD_SPLIT_1 0x0000 /* Max 1 */ +#define PCI_X_CMD_SPLIT_2 0x0010 /* Max 2 */ +#define PCI_X_CMD_SPLIT_3 0x0020 /* Max 3 */ +#define PCI_X_CMD_SPLIT_4 0x0030 /* Max 4 */ +#define PCI_X_CMD_SPLIT_8 0x0040 /* Max 8 */ +#define PCI_X_CMD_SPLIT_12 0x0050 /* Max 12 */ +#define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */ +#define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */ +#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */ +#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */ +#define PCI_X_STATUS 4 /* PCI-X capabilities */ +#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */ +#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */ +#define PCI_X_STATUS_64BIT 0x00010000 /* 64-bit device */ +#define PCI_X_STATUS_133MHZ 0x00020000 /* 133 MHz capable */ +#define PCI_X_STATUS_SPL_DISC 0x00040000 /* Split Completion Discarded */ +#define PCI_X_STATUS_UNX_SPL 0x00080000 /* Unexpected Split Completion */ +#define PCI_X_STATUS_COMPLEX 0x00100000 /* Device Complexity */ +#define PCI_X_STATUS_MAX_READ 0x00600000 /* Designed Max Memory Read Count */ +#define PCI_X_STATUS_MAX_SPLIT 0x03800000 /* Designed Max Outstanding Split Transactions */ +#define PCI_X_STATUS_MAX_CUM 0x1c000000 /* Designed Max Cumulative Read Size */ +#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */ +#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */ +#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */ + +/* PCI Bridge Subsystem ID registers */ + +#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */ +#define PCI_SSVID_DEVICE_ID 6 /* PCI-Bridge subsystem device id register */ + +/* PCI Express capability registers */ + +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ +#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIE Bridge */ +#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */ +#define PCI_EXP_DEVCAP_PHANTOM 0x18 /* Phantom functions */ +#define PCI_EXP_DEVCAP_EXT_TAG 0x20 /* Extended tags */ +#define PCI_EXP_DEVCAP_L0S 0x1c0 /* L0s Acceptable Latency */ +#define PCI_EXP_DEVCAP_L1 0xe00 /* L1 Acceptable Latency */ +#define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ +#define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ +#define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ +#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ +#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ +#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL 8 /* Device Control */ +#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */ +#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */ +#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */ +#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ +#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ +#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ +#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ +#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ +#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ +#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ +#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */ +#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */ +#define PCI_EXP_DEVSTA_FED 0x04 /* Fatal Error Detected */ +#define PCI_EXP_DEVSTA_URD 0x08 /* Unsupported Request Detected */ +#define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */ +#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */ +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ +#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */ +#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* L1 Clock Power Management */ +#define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Surprise Down Error Reporting Capable */ +#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */ +#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */ +#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ +#define PCI_EXP_LNKCTL 16 /* Link Control */ +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */ +#define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */ +#define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */ +#define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */ +#define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */ +#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */ +#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */ +#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */ +#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */ +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */ +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */ +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */ +#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ +#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ +#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ +#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ +#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */ +#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */ +#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */ +#define PCI_EXP_SLTCAP_AIP 0x00000008 /* Attention Indicator Present */ +#define PCI_EXP_SLTCAP_PIP 0x00000010 /* Power Indicator Present */ +#define PCI_EXP_SLTCAP_HPS 0x00000020 /* Hot-Plug Surprise */ +#define PCI_EXP_SLTCAP_HPC 0x00000040 /* Hot-Plug Capable */ +#define PCI_EXP_SLTCAP_SPLV 0x00007f80 /* Slot Power Limit Value */ +#define PCI_EXP_SLTCAP_SPLS 0x00018000 /* Slot Power Limit Scale */ +#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */ +#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */ +#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */ +#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */ +#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */ +#define PCI_EXP_SLTCTL_PDCE 0x0008 /* Presence Detect Changed Enable */ +#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ +#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ +#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ +#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ +#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ +#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ +#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ +#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */ +#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */ +#define PCI_EXP_SLTSTA_PDC 0x0008 /* Presence Detect Changed */ +#define PCI_EXP_SLTSTA_CC 0x0010 /* Command Completed */ +#define PCI_EXP_SLTSTA_MRLSS 0x0020 /* MRL Sensor State */ +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ +#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */ +#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */ +#define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */ +#define PCI_EXP_RTCTL_SEFEE 0x04 /* System Error on Fatal Error */ +#define PCI_EXP_RTCTL_PMEIE 0x08 /* PME Interrupt Enable */ +#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ +#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCAP2_LTR 0x800 /* Latency tolerance reporting */ +#define PCI_EXP_OBFF_MASK 0xc0000 /* OBFF support mechanism */ +#define PCI_EXP_OBFF_MSG 0x40000 /* New message signaling */ +#define PCI_EXP_OBFF_WAKE 0x80000 /* Re-use WAKE# for OBFF */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ +#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ +#define PCI_EXP_IDO_REQ_EN 0x100 /* ID-based ordering request enable */ +#define PCI_EXP_IDO_CMP_EN 0x200 /* ID-based ordering completion enable */ +#define PCI_EXP_LTR_EN 0x400 /* Latency tolerance reporting */ +#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */ +#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */ +#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */ +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ +#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ + +/* Extended Capabilities (PCI-X 2.0 and Express) */ +#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) +#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf) +#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc) + +#define PCI_EXT_CAP_ID_ERR 1 +#define PCI_EXT_CAP_ID_VC 2 +#define PCI_EXT_CAP_ID_DSN 3 +#define PCI_EXT_CAP_ID_PWR 4 +#define PCI_EXT_CAP_ID_VNDR 11 +#define PCI_EXT_CAP_ID_ACS 13 +#define PCI_EXT_CAP_ID_ARI 14 +#define PCI_EXT_CAP_ID_ATS 15 +#define PCI_EXT_CAP_ID_SRIOV 16 +#define PCI_EXT_CAP_ID_PRI 19 +#define PCI_EXT_CAP_ID_LTR 24 +#define PCI_EXT_CAP_ID_PASID 27 + +/* Advanced Error Reporting */ +#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ +#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */ +#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ +#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */ +#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */ +#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */ +#define PCI_ERR_UNC_COMP_ABORT 0x00008000 /* Completer Abort */ +#define PCI_ERR_UNC_UNX_COMP 0x00010000 /* Unexpected Completion */ +#define PCI_ERR_UNC_RX_OVER 0x00020000 /* Receiver Overflow */ +#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */ +#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */ +#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */ +#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */ + /* Same bits as above */ +#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */ + /* Same bits as above */ +#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */ +#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */ +#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */ +#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */ +#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */ +#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */ +#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */ + /* Same bits as above */ +#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */ +#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */ +#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ +#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ +#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ +#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ +#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */ +#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */ +/* Correctable Err Reporting Enable */ +#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 +/* Non-fatal Err Reporting Enable */ +#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 +/* Fatal Err Reporting Enable */ +#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 +#define PCI_ERR_ROOT_STATUS 48 +#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */ +/* Multi ERR_COR Received */ +#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 +/* ERR_FATAL/NONFATAL Recevied */ +#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 +/* Multi ERR_FATAL/NONFATAL Recevied */ +#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 +#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */ +#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ +#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ +#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ + +/* Virtual Channel */ +#define PCI_VC_PORT_REG1 4 +#define PCI_VC_PORT_REG2 8 +#define PCI_VC_PORT_CTRL 12 +#define PCI_VC_PORT_STATUS 14 +#define PCI_VC_RES_CAP 16 +#define PCI_VC_RES_CTRL 20 +#define PCI_VC_RES_STATUS 26 + +/* Power Budgeting */ +#define PCI_PWR_DSR 4 /* Data Select Register */ +#define PCI_PWR_DATA 8 /* Data Register */ +#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */ +#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */ +#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */ +#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */ +#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */ +#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */ +#define PCI_PWR_CAP 12 /* Capability */ +#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ + +/* + * Hypertransport sub capability types + * + * Unfortunately there are both 3 bit and 5 bit capability types defined + * in the HT spec, catering for that is a little messy. You probably don't + * want to use these directly, just use pci_find_ht_capability() and it + * will do the right thing for you. + */ +#define HT_3BIT_CAP_MASK 0xE0 +#define HT_CAPTYPE_SLAVE 0x00 /* Slave/Primary link configuration */ +#define HT_CAPTYPE_HOST 0x20 /* Host/Secondary link configuration */ + +#define HT_5BIT_CAP_MASK 0xF8 +#define HT_CAPTYPE_IRQ 0x80 /* IRQ Configuration */ +#define HT_CAPTYPE_REMAPPING_40 0xA0 /* 40 bit address remapping */ +#define HT_CAPTYPE_REMAPPING_64 0xA2 /* 64 bit address remapping */ +#define HT_CAPTYPE_UNITID_CLUMP 0x90 /* Unit ID clumping */ +#define HT_CAPTYPE_EXTCONF 0x98 /* Extended Configuration Space Access */ +#define HT_CAPTYPE_MSI_MAPPING 0xA8 /* MSI Mapping Capability */ +#define HT_MSI_FLAGS 0x02 /* Offset to flags */ +#define HT_MSI_FLAGS_ENABLE 0x1 /* Mapping enable */ +#define HT_MSI_FLAGS_FIXED 0x2 /* Fixed mapping only */ +#define HT_MSI_FIXED_ADDR 0x00000000FEE00000ULL /* Fixed addr */ +#define HT_MSI_ADDR_LO 0x04 /* Offset to low addr bits */ +#define HT_MSI_ADDR_LO_MASK 0xFFF00000 /* Low address bit mask */ +#define HT_MSI_ADDR_HI 0x08 /* Offset to high addr bits */ +#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */ +#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */ +#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */ +#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */ +#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */ + +/* Alternative Routing-ID Interpretation */ +#define PCI_ARI_CAP 0x04 /* ARI Capability Register */ +#define PCI_ARI_CAP_MFVC 0x0001 /* MFVC Function Groups Capability */ +#define PCI_ARI_CAP_ACS 0x0002 /* ACS Function Groups Capability */ +#define PCI_ARI_CAP_NFN(x) (((x) >> 8) & 0xff) /* Next Function Number */ +#define PCI_ARI_CTRL 0x06 /* ARI Control Register */ +#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */ +#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ +#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ + +/* Address Translation Service */ +#define PCI_ATS_CAP 0x04 /* ATS Capability Register */ +#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */ +#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */ +#define PCI_ATS_CTRL 0x06 /* ATS Control Register */ +#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */ +#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */ +#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */ + +/* Page Request Interface */ +#define PCI_PRI_CTRL 0x04 /* PRI control register */ +#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */ +#define PCI_PRI_CTRL_RESET 0x02 /* Reset */ +#define PCI_PRI_STATUS 0x06 /* PRI status register */ +#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */ +#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */ +#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */ +#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */ +#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ + +/* PASID capability */ +#define PCI_PASID_CAP 0x04 /* PASID feature register */ +#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ +#define PCI_PASID_CAP_PRIV 0x04 /* Priviledge Mode Supported */ +#define PCI_PASID_CTRL 0x06 /* PASID control register */ +#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ +#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ +#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */ + +/* Single Root I/O Virtualization */ +#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ +#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */ +#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */ +#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ +#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */ +#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */ +#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */ +#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */ +#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */ +#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */ +#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */ +#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ +#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ +#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */ +#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */ +#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */ +#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */ +#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */ +#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */ +#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */ +#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */ +#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */ +#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/ +#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */ +#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */ +#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */ +#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */ +#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ +#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ + +#define PCI_LTR_MAX_SNOOP_LAT 0x4 +#define PCI_LTR_MAX_NOSNOOP_LAT 0x6 +#define PCI_LTR_VALUE_MASK 0x000003ff +#define PCI_LTR_SCALE_MASK 0x00001c00 +#define PCI_LTR_SCALE_SHIFT 10 + +/* Access Control Service */ +#define PCI_ACS_CAP 0x04 /* ACS Capability Register */ +#define PCI_ACS_SV 0x01 /* Source Validation */ +#define PCI_ACS_TB 0x02 /* Translation Blocking */ +#define PCI_ACS_RR 0x04 /* P2P Request Redirect */ +#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */ +#define PCI_ACS_UF 0x10 /* Upstream Forwarding */ +#define PCI_ACS_EC 0x20 /* P2P Egress Control */ +#define PCI_ACS_DT 0x40 /* Direct Translated P2P */ +#define PCI_ACS_CTRL 0x06 /* ACS Control Register */ +#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ + +#endif /* LINUX_PCI_REGS_H */ diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h new file mode 100644 index 00000000..6775532b --- /dev/null +++ b/include/linux/pcieport_if.h @@ -0,0 +1,68 @@ +/* + * File: pcieport_if.h + * Purpose: PCI Express Port Bus Driver's IF Data Structure + * + * Copyright (C) 2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + */ + +#ifndef _PCIEPORT_IF_H_ +#define _PCIEPORT_IF_H_ + +/* Port Type */ +#define PCIE_ANY_PORT (~0) + +/* Service Type */ +#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ +#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) +#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */ +#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT) +#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */ +#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) +#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ +#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) + +struct pcie_device { + int irq; /* Service IRQ/MSI/MSI-X Vector */ + struct pci_dev *port; /* Root/Upstream/Downstream Port */ + u32 service; /* Port service this device represents */ + void *priv_data; /* Service Private Data */ + struct device device; /* Generic Device Interface */ +}; +#define to_pcie_device(d) container_of(d, struct pcie_device, device) + +static inline void set_service_data(struct pcie_device *dev, void *data) +{ + dev->priv_data = data; +} + +static inline void* get_service_data(struct pcie_device *dev) +{ + return dev->priv_data; +} + +struct pcie_port_service_driver { + const char *name; + int (*probe) (struct pcie_device *dev); + void (*remove) (struct pcie_device *dev); + int (*suspend) (struct pcie_device *dev); + int (*resume) (struct pcie_device *dev); + + /* Service Error Recovery Handler */ + struct pci_error_handlers *err_handler; + + /* Link Reset Capability - AER service driver specific */ + pci_ers_result_t (*reset_link) (struct pci_dev *dev); + + int port_type; /* Type of the port this driver can handle */ + u32 service; /* Port service this device represents */ + + struct device_driver driver; +}; +#define to_service_driver(d) \ + container_of(d, struct pcie_port_service_driver, driver) + +extern int pcie_port_service_register(struct pcie_port_service_driver *new); +extern void pcie_port_service_unregister(struct pcie_port_service_driver *new); + +#endif /* _PCIEPORT_IF_H_ */ diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h new file mode 100644 index 00000000..2bb62bf2 --- /dev/null +++ b/include/linux/pda_power.h @@ -0,0 +1,42 @@ +/* + * Common power driver for PDAs and phones with one or two external + * power supplies (AC/USB) connected to main and backup batteries, + * and optional builtin charger. + * + * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __PDA_POWER_H__ +#define __PDA_POWER_H__ + +#define PDA_POWER_CHARGE_AC (1 << 0) +#define PDA_POWER_CHARGE_USB (1 << 1) + +struct device; + +struct pda_power_pdata { + int (*init)(struct device *dev); + int (*is_ac_online)(void); + int (*is_usb_online)(void); + void (*set_charge)(int flags); + void (*exit)(struct device *dev); + int (*suspend)(pm_message_t state); + int (*resume)(void); + + char **supplied_to; + size_t num_supplicants; + + unsigned int wait_for_status; /* msecs, default is 500 */ + unsigned int wait_for_charger; /* msecs, default is 500 */ + unsigned int polling_interval; /* msecs, default is 2000 */ + + unsigned long ac_max_uA; /* current to draw when on AC */ + + bool use_otg_notifier; +}; + +#endif /* __PDA_POWER_H__ */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h new file mode 100644 index 00000000..27ef6b19 --- /dev/null +++ b/include/linux/percpu-defs.h @@ -0,0 +1,163 @@ +#ifndef _LINUX_PERCPU_DEFS_H +#define _LINUX_PERCPU_DEFS_H + +/* + * Base implementations of per-CPU variable declarations and definitions, where + * the section in which the variable is to be placed is provided by the + * 'sec' argument. This may be used to affect the parameters governing the + * variable's storage. + * + * NOTE! The sections for the DECLARE and for the DEFINE must match, lest + * linkage errors occur due the compiler generating the wrong code to access + * that section. + */ +#define __PCPU_ATTRS(sec) \ + __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ + PER_CPU_ATTRIBUTES + +#define __PCPU_DUMMY_ATTRS \ + __attribute__((section(".discard"), unused)) + +/* + * Macro which verifies @ptr is a percpu pointer without evaluating + * @ptr. This is to be used in percpu accessors to verify that the + * input parameter is a percpu pointer. + */ +#define __verify_pcpu_ptr(ptr) do { \ + const void __percpu *__vpp_verify = (typeof(ptr))NULL; \ + (void)__vpp_verify; \ +} while (0) + +/* + * s390 and alpha modules require percpu variables to be defined as + * weak to force the compiler to generate GOT based external + * references for them. This is necessary because percpu sections + * will be located outside of the usually addressable area. + * + * This definition puts the following two extra restrictions when + * defining percpu variables. + * + * 1. The symbol must be globally unique, even the static ones. + * 2. Static percpu variables cannot be defined inside a function. + * + * Archs which need weak percpu definitions should define + * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. + * + * To ensure that the generic code observes the above two + * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak + * definition is used for all cases. + */ +#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) +/* + * __pcpu_scope_* dummy variable is used to enforce scope. It + * receives the static modifier when it's used in front of + * DEFINE_PER_CPU() and will trigger build failure if + * DECLARE_PER_CPU() is used for the same variable. + * + * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness + * such that hidden weak symbol collision, which will cause unrelated + * variables to share the same address, can be detected during build. + */ +#define DECLARE_PER_CPU_SECTION(type, name, sec) \ + extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ + extern __PCPU_ATTRS(sec) __typeof__(type) name + +#define DEFINE_PER_CPU_SECTION(type, name, sec) \ + __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ + extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ + __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ + __typeof__(type) name +#else +/* + * Normal declaration and definition macros. + */ +#define DECLARE_PER_CPU_SECTION(type, name, sec) \ + extern __PCPU_ATTRS(sec) __typeof__(type) name + +#define DEFINE_PER_CPU_SECTION(type, name, sec) \ + __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ + __typeof__(type) name +#endif + +/* + * Variant on the per-CPU variable declaration/definition theme used for + * ordinary per-CPU variables. + */ +#define DECLARE_PER_CPU(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "") + +#define DEFINE_PER_CPU(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "") + +/* + * Declaration/definition used for per-CPU variables that must come first in + * the set of variables. + */ +#define DECLARE_PER_CPU_FIRST(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) + +#define DEFINE_PER_CPU_FIRST(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) + +/* + * Declaration/definition used for per-CPU variables that must be cacheline + * aligned under SMP conditions so that, whilst a particular instance of the + * data corresponds to a particular CPU, inefficiencies due to direct access by + * other CPUs are reduced by preventing the data from unnecessarily spanning + * cachelines. + * + * An example of this would be statistical data, where each CPU's set of data + * is updated by that CPU alone, but the data from across all CPUs is collated + * by a CPU processing a read from a proc file. + */ +#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DECLARE_PER_CPU_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ + ____cacheline_aligned + +#define DEFINE_PER_CPU_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ + ____cacheline_aligned + +/* + * Declaration/definition used for per-CPU variables that must be page aligned. + */ +#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ + __aligned(PAGE_SIZE) + +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ + __aligned(PAGE_SIZE) + +/* + * Declaration/definition used for per-CPU variables that must be read mostly. + */ +#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..readmostly") + +#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..readmostly") + +/* + * Intermodule exports for per-CPU variables. sparse forgets about + * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to + * noop if __CHECKER__. + */ +#ifndef __CHECKER__ +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) +#else +#define EXPORT_PER_CPU_SYMBOL(var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) +#endif + +#endif /* _LINUX_PERCPU_DEFS_H */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h new file mode 100644 index 00000000..21638ae1 --- /dev/null +++ b/include/linux/percpu.h @@ -0,0 +1,812 @@ +#ifndef __LINUX_PERCPU_H +#define __LINUX_PERCPU_H + +#include <linux/preempt.h> +#include <linux/smp.h> +#include <linux/cpumask.h> +#include <linux/pfn.h> +#include <linux/init.h> + +#include <asm/percpu.h> + +/* enough to cover all DEFINE_PER_CPUs in modules */ +#ifdef CONFIG_MODULES +#define PERCPU_MODULE_RESERVE (8 << 10) +#else +#define PERCPU_MODULE_RESERVE 0 +#endif + +#ifndef PERCPU_ENOUGH_ROOM +#define PERCPU_ENOUGH_ROOM \ + (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ + PERCPU_MODULE_RESERVE) +#endif + +/* + * Must be an lvalue. Since @var must be a simple identifier, + * we force a syntax error here if it isn't. + */ +#define get_cpu_var(var) (*({ \ + preempt_disable(); \ + &__get_cpu_var(var); })) + +/* + * The weird & is necessary because sparse considers (void)(var) to be + * a direct dereference of percpu variable (var). + */ +#define put_cpu_var(var) do { \ + (void)&(var); \ + preempt_enable(); \ +} while (0) + +#define get_cpu_ptr(var) ({ \ + preempt_disable(); \ + this_cpu_ptr(var); }) + +#define put_cpu_ptr(var) do { \ + (void)(var); \ + preempt_enable(); \ +} while (0) + +/* minimum unit size, also is the maximum supported allocation size */ +#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +/* + * Percpu allocator can serve percpu allocations before slab is + * initialized which allows slab to depend on the percpu allocator. + * The following two parameters decide how much resource to + * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or + * larger than PERCPU_DYNAMIC_EARLY_SIZE. + */ +#define PERCPU_DYNAMIC_EARLY_SLOTS 128 +#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) + +/* + * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy + * back on the first chunk for dynamic percpu allocation if arch is + * manually allocating and mapping it for faster access (as a part of + * large page mapping for example). + * + * The following values give between one and two pages of free space + * after typical minimal boot (2-way SMP, single disk and NIC) with + * both defconfig and a distro config on x86_64 and 32. More + * intelligent way to determine this would be nice. + */ +#if BITS_PER_LONG > 32 +#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#else +#define PERCPU_DYNAMIC_RESERVE (12 << 10) +#endif + +extern void *pcpu_base_addr; +extern const unsigned long *pcpu_unit_offsets; + +struct pcpu_group_info { + int nr_units; /* aligned # of units */ + unsigned long base_offset; /* base address offset */ + unsigned int *cpu_map; /* unit->cpu map, empty + * entries contain NR_CPUS */ +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; /* internal, don't use */ + int nr_groups; /* 0 if grouping unnecessary */ + struct pcpu_group_info groups[]; +}; + +enum pcpu_fc { + PCPU_FC_AUTO, + PCPU_FC_EMBED, + PCPU_FC_PAGE, + + PCPU_FC_NR, +}; +extern const char *pcpu_fc_names[PCPU_FC_NR]; + +extern enum pcpu_fc pcpu_chosen_fc; + +typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, + size_t align); +typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); +typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); +typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); + +extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, + int nr_units); +extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); + +extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, + void *base_addr); + +#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK +extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, + size_t atom_size, + pcpu_fc_cpu_distance_fn_t cpu_distance_fn, + pcpu_fc_alloc_fn_t alloc_fn, + pcpu_fc_free_fn_t free_fn); +#endif + +#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK +extern int __init pcpu_page_first_chunk(size_t reserved_size, + pcpu_fc_alloc_fn_t alloc_fn, + pcpu_fc_free_fn_t free_fn, + pcpu_fc_populate_pte_fn_t populate_pte_fn); +#endif + +/* + * Use this to get to a cpu's version of the per-cpu object + * dynamically allocated. Non-atomic access to the current CPU's + * version should probably be combined with get_cpu()/put_cpu(). + */ +#ifdef CONFIG_SMP +#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) +#else +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) +#endif + +extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); +extern bool is_kernel_percpu_address(unsigned long addr); + +#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) +extern void __init setup_per_cpu_areas(void); +#endif +extern void __init percpu_init_late(void); + +extern void __percpu *__alloc_percpu(size_t size, size_t align); +extern void free_percpu(void __percpu *__pdata); +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); + +#define alloc_percpu(type) \ + (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) + +/* + * Optional methods for optimized non-lvalue per-cpu variable access. + * + * @var can be a percpu variable or a field of it and its size should + * equal char, int or long. percpu_read() evaluates to a lvalue and + * all others to void. + * + * These operations are guaranteed to be atomic. + * The generic versions disable interrupts. Archs are + * encouraged to implement single-instruction alternatives which don't + * require protection. + */ +#ifndef percpu_read +# define percpu_read(var) \ + ({ \ + typeof(var) *pr_ptr__ = &(var); \ + typeof(var) pr_ret__; \ + pr_ret__ = get_cpu_var(*pr_ptr__); \ + put_cpu_var(*pr_ptr__); \ + pr_ret__; \ + }) +#endif + +#define __percpu_generic_to_op(var, val, op) \ +do { \ + typeof(var) *pgto_ptr__ = &(var); \ + get_cpu_var(*pgto_ptr__) op val; \ + put_cpu_var(*pgto_ptr__); \ +} while (0) + +#ifndef percpu_write +# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) +#endif + +#ifndef percpu_add +# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) +#endif + +#ifndef percpu_sub +# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) +#endif + +#ifndef percpu_and +# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) +#endif + +#ifndef percpu_or +# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) +#endif + +#ifndef percpu_xor +# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) +#endif + +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#define __pcpu_size_call_return(stem, variable) \ +({ typeof(variable) pscr_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr_ret__ = stem##1(variable);break; \ + case 2: pscr_ret__ = stem##2(variable);break; \ + case 4: pscr_ret__ = stem##4(variable);break; \ + case 8: pscr_ret__ = stem##8(variable);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ + pscr_ret__; \ +}) + +#define __pcpu_size_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + +/* + * Special handling for cmpxchg_double. cmpxchg_double is passed two + * percpu variables. The first has to be aligned to a double word + * boundary and the second has to follow directly thereafter. + * We enforce this on all architectures even if they don't support + * a double cmpxchg instruction, since it's a cheap requirement, and it + * avoids breaking the requirement for architectures with the instruction. + */ +#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ +({ \ + bool pdcrb_ret__; \ + __verify_pcpu_ptr(&pcp1); \ + BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ + VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ + VM_BUG_ON((unsigned long)(&pcp2) != \ + (unsigned long)(&pcp1) + sizeof(pcp1)); \ + switch(sizeof(pcp1)) { \ + case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ + case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ + case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ + case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pdcrb_ret__; \ +}) + +#define __pcpu_size_call(stem, variable, ...) \ +do { \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables. + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The first group is used for accesses that must be done in a + * preemption safe way since we know that the context is not preempt + * safe. Interrupts may occur. If the interrupt modifies the variable + * too then RMW actions will not be reliable. + * + * The arch code can provide optimized functions in two ways: + * + * 1. Override the function completely. F.e. define this_cpu_add(). + * The arch must then ensure that the various scalar format passed + * are handled correctly. + * + * 2. Provide functions for certain scalar sizes. F.e. provide + * this_cpu_add_2() to provide per cpu atomic operations for 2 byte + * sized RMW actions. If arch code does not provide operations for + * a scalar size then the fallback in the generic code will be + * used. + */ + +#define _this_cpu_generic_read(pcp) \ +({ typeof(pcp) ret__; \ + preempt_disable(); \ + ret__ = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_read +# ifndef this_cpu_read_1 +# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_2 +# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_4 +# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_8 +# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) +# endif +# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) +#endif + +#define _this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long flags; \ + raw_local_irq_save(flags); \ + *__this_cpu_ptr(&(pcp)) op val; \ + raw_local_irq_restore(flags); \ +} while (0) + +#ifndef this_cpu_write +# ifndef this_cpu_write_1 +# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_2 +# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_4 +# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_8 +# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) +#endif + +#ifndef this_cpu_add +# ifndef this_cpu_add_1 +# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_2 +# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_4 +# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_8 +# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) +#endif + +#ifndef this_cpu_sub +# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) +#endif + +#ifndef this_cpu_inc +# define this_cpu_inc(pcp) this_cpu_add((pcp), 1) +#endif + +#ifndef this_cpu_dec +# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) +#endif + +#ifndef this_cpu_and +# ifndef this_cpu_and_1 +# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_2 +# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_4 +# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_8 +# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) +#endif + +#ifndef this_cpu_or +# ifndef this_cpu_or_1 +# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_2 +# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_4 +# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_8 +# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) +#endif + +#ifndef this_cpu_xor +# ifndef this_cpu_xor_1 +# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_2 +# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_4 +# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_8 +# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) +#endif + +#define _this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) ret__; \ + unsigned long flags; \ + raw_local_irq_save(flags); \ + __this_cpu_add(pcp, val); \ + ret__ = __this_cpu_read(pcp); \ + raw_local_irq_restore(flags); \ + ret__; \ +}) + +#ifndef this_cpu_add_return +# ifndef this_cpu_add_return_1 +# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_2 +# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_4 +# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_8 +# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#endif + +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + +#define _this_cpu_generic_xchg(pcp, nval) \ +({ typeof(pcp) ret__; \ + unsigned long flags; \ + raw_local_irq_save(flags); \ + ret__ = __this_cpu_read(pcp); \ + __this_cpu_write(pcp, nval); \ + raw_local_irq_restore(flags); \ + ret__; \ +}) + +#ifndef this_cpu_xchg +# ifndef this_cpu_xchg_1 +# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef this_cpu_xchg_2 +# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef this_cpu_xchg_4 +# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef this_cpu_xchg_8 +# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# define this_cpu_xchg(pcp, nval) \ + __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) +#endif + +#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) ret__; \ + unsigned long flags; \ + raw_local_irq_save(flags); \ + ret__ = __this_cpu_read(pcp); \ + if (ret__ == (oval)) \ + __this_cpu_write(pcp, nval); \ + raw_local_irq_restore(flags); \ + ret__; \ +}) + +#ifndef this_cpu_cmpxchg +# ifndef this_cpu_cmpxchg_1 +# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef this_cpu_cmpxchg_2 +# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef this_cpu_cmpxchg_4 +# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef this_cpu_cmpxchg_8 +# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# define this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) +#endif + +/* + * cmpxchg_double replaces two adjacent scalars at once. The first + * two parameters are per cpu variables which have to be of the same + * size. A truth value is returned to indicate success or failure + * (since a double register result is difficult to handle). There is + * very limited hardware support for these operations, so only certain + * sizes may work. + */ +#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int ret__; \ + unsigned long flags; \ + raw_local_irq_save(flags); \ + ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(flags); \ + ret__; \ +}) + +#ifndef this_cpu_cmpxchg_double +# ifndef this_cpu_cmpxchg_double_1 +# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# ifndef this_cpu_cmpxchg_double_2 +# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# ifndef this_cpu_cmpxchg_double_4 +# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# ifndef this_cpu_cmpxchg_double_8 +# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) +#endif + +/* + * Generic percpu operations for context that are safe from preemption/interrupts. + * Either we do not care about races or the caller has the + * responsibility of handling preemption/interrupt issues. Arch code can still + * override these instructions since the arch per cpu code may be more + * efficient and may actually get race freeness for free (that is the + * case for x86 for example). + * + * If there is no other protection through preempt disable and/or + * disabling interupts then one of these RMW operations can show unexpected + * behavior because the execution thread was rescheduled on another processor + * or an interrupt occurred and the same percpu variable was modified from + * the interrupt context. + */ +#ifndef __this_cpu_read +# ifndef __this_cpu_read_1 +# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_2 +# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_4 +# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_8 +# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) +#endif + +#define __this_cpu_generic_to_op(pcp, val, op) \ +do { \ + *__this_cpu_ptr(&(pcp)) op val; \ +} while (0) + +#ifndef __this_cpu_write +# ifndef __this_cpu_write_1 +# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_2 +# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_4 +# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_8 +# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) +#endif + +#ifndef __this_cpu_add +# ifndef __this_cpu_add_1 +# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_2 +# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_4 +# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_8 +# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) +#endif + +#ifndef __this_cpu_sub +# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) +#endif + +#ifndef __this_cpu_inc +# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) +#endif + +#ifndef __this_cpu_dec +# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) +#endif + +#ifndef __this_cpu_and +# ifndef __this_cpu_and_1 +# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_2 +# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_4 +# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_8 +# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) +#endif + +#ifndef __this_cpu_or +# ifndef __this_cpu_or_1 +# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_2 +# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_4 +# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_8 +# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) +#endif + +#ifndef __this_cpu_xor +# ifndef __this_cpu_xor_1 +# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_2 +# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_4 +# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_8 +# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) +#endif + +#define __this_cpu_generic_add_return(pcp, val) \ +({ \ + __this_cpu_add(pcp, val); \ + __this_cpu_read(pcp); \ +}) + +#ifndef __this_cpu_add_return +# ifndef __this_cpu_add_return_1 +# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# ifndef __this_cpu_add_return_2 +# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# ifndef __this_cpu_add_return_4 +# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# ifndef __this_cpu_add_return_8 +# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# define __this_cpu_add_return(pcp, val) \ + __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) +#endif + +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val)) +#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) +#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) + +#define __this_cpu_generic_xchg(pcp, nval) \ +({ typeof(pcp) ret__; \ + ret__ = __this_cpu_read(pcp); \ + __this_cpu_write(pcp, nval); \ + ret__; \ +}) + +#ifndef __this_cpu_xchg +# ifndef __this_cpu_xchg_1 +# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef __this_cpu_xchg_2 +# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef __this_cpu_xchg_4 +# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef __this_cpu_xchg_8 +# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# define __this_cpu_xchg(pcp, nval) \ + __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) +#endif + +#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) ret__; \ + ret__ = __this_cpu_read(pcp); \ + if (ret__ == (oval)) \ + __this_cpu_write(pcp, nval); \ + ret__; \ +}) + +#ifndef __this_cpu_cmpxchg +# ifndef __this_cpu_cmpxchg_1 +# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef __this_cpu_cmpxchg_2 +# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef __this_cpu_cmpxchg_4 +# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef __this_cpu_cmpxchg_8 +# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# define __this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) +#endif + +#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret = 0; \ + if (__this_cpu_read(pcp1) == (oval1) && \ + __this_cpu_read(pcp2) == (oval2)) { \ + __this_cpu_write(pcp1, (nval1)); \ + __this_cpu_write(pcp2, (nval2)); \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#ifndef __this_cpu_cmpxchg_double +# ifndef __this_cpu_cmpxchg_double_1 +# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# ifndef __this_cpu_cmpxchg_double_2 +# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# ifndef __this_cpu_cmpxchg_double_4 +# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# ifndef __this_cpu_cmpxchg_double_8 +# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +# endif +# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) +#endif + +#endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h new file mode 100644 index 00000000..b9df9ed1 --- /dev/null +++ b/include/linux/percpu_counter.h @@ -0,0 +1,177 @@ +#ifndef _LINUX_PERCPU_COUNTER_H +#define _LINUX_PERCPU_COUNTER_H +/* + * A simple "approximate counter" for use in ext2 and ext3 superblocks. + * + * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. + */ + +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <linux/list.h> +#include <linux/threads.h> +#include <linux/percpu.h> +#include <linux/types.h> + +#ifdef CONFIG_SMP + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; +#ifdef CONFIG_HOTPLUG_CPU + struct list_head list; /* All percpu_counters are on a list */ +#endif + s32 __percpu *counters; +}; + +extern int percpu_counter_batch; + +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, + struct lock_class_key *key); + +#define percpu_counter_init(fbc, value) \ + ({ \ + static struct lock_class_key __key; \ + \ + __percpu_counter_init(fbc, value, &__key); \ + }) + +void percpu_counter_destroy(struct percpu_counter *fbc); +void percpu_counter_set(struct percpu_counter *fbc, s64 amount); +void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); +s64 __percpu_counter_sum(struct percpu_counter *fbc); +int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); + +static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) +{ + __percpu_counter_add(fbc, amount, percpu_counter_batch); +} + +static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) +{ + s64 ret = __percpu_counter_sum(fbc); + return ret < 0 ? 0 : ret; +} + +static inline s64 percpu_counter_sum(struct percpu_counter *fbc) +{ + return __percpu_counter_sum(fbc); +} + +static inline s64 percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +/* + * It is possible for the percpu_counter_read() to return a small negative + * number for some counter which should never be negative. + * + */ +static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) +{ + s64 ret = fbc->count; + + barrier(); /* Prevent reloads of fbc->count */ + if (ret >= 0) + return ret; + return 0; +} + +static inline int percpu_counter_initialized(struct percpu_counter *fbc) +{ + return (fbc->counters != NULL); +} + +#else + +struct percpu_counter { + s64 count; +}; + +static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) +{ + fbc->count = amount; + return 0; +} + +static inline void percpu_counter_destroy(struct percpu_counter *fbc) +{ +} + +static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) +{ + fbc->count = amount; +} + +static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +{ + if (fbc->count > rhs) + return 1; + else if (fbc->count < rhs) + return -1; + else + return 0; +} + +static inline void +percpu_counter_add(struct percpu_counter *fbc, s64 amount) +{ + preempt_disable(); + fbc->count += amount; + preempt_enable(); +} + +static inline void +__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +{ + percpu_counter_add(fbc, amount); +} + +static inline s64 percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +/* + * percpu_counter is intended to track positive numbers. In the UP case the + * number should never be negative. + */ +static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) +{ + return fbc->count; +} + +static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) +{ + return percpu_counter_read_positive(fbc); +} + +static inline s64 percpu_counter_sum(struct percpu_counter *fbc) +{ + return percpu_counter_read(fbc); +} + +static inline int percpu_counter_initialized(struct percpu_counter *fbc) +{ + return 1; +} + +#endif /* CONFIG_SMP */ + +static inline void percpu_counter_inc(struct percpu_counter *fbc) +{ + percpu_counter_add(fbc, 1); +} + +static inline void percpu_counter_dec(struct percpu_counter *fbc) +{ + percpu_counter_add(fbc, -1); +} + +static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add(fbc, -amount); +} + +#endif /* _LINUX_PERCPU_COUNTER_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h new file mode 100644 index 00000000..ddbb6a90 --- /dev/null +++ b/include/linux/perf_event.h @@ -0,0 +1,1365 @@ +/* + * Performance events: + * + * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_EVENT_H +#define _LINUX_PERF_EVENT_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <asm/byteorder.h> + +/* + * User-space ABI bits: + */ + +/* + * attr.type + */ +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + + PERF_TYPE_MAX, /* non-ABI */ +}; + +/* + * Generalized performance event event_id types, used by the + * attr.event_id parameter of the sys_perf_event_open() + * syscall: + */ +enum perf_hw_id { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + + PERF_COUNT_HW_MAX, /* non-ABI */ +}; + +/* + * Generalized hardware cache events: + * + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x + * { read, write, prefetch } x + * { accesses, misses } + */ +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ +}; + +/* + * Special "software" events provided by the kernel, even if the hardware + * does not support performance events. These events measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + + PERF_COUNT_SW_MAX, /* non-ABI */ +}; + +/* + * Bits that can be set in attr.sample_type to request information + * in the overflow packets. + */ +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_READ = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_STREAM_ID = 1U << 9, + PERF_SAMPLE_RAW = 1U << 10, + PERF_SAMPLE_BRANCH_STACK = 1U << 11, + + PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */ +}; + +/* + * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set + * + * If the user does not pass priv level information via branch_sample_type, + * the kernel uses the event's priv level. Branch and event priv levels do + * not have to match. Branch priv level is checked for permissions. + * + * The branch types can be combined, however BRANCH_ANY covers all types + * of branches and therefore it supersedes all the other types. + */ +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ + PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ + PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ + + PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ + PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ + + PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */ +}; + +#define PERF_SAMPLE_BRANCH_PLM_ALL \ + (PERF_SAMPLE_BRANCH_USER|\ + PERF_SAMPLE_BRANCH_KERNEL|\ + PERF_SAMPLE_BRANCH_HV) + +/* + * The format of the data returned by read() on a perf event fd, + * as specified by attr.read_format: + * + * struct read_format { + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED + * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP + * + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED + * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP + * }; + */ +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_GROUP = 1U << 3, + + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ +}; + +#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ +#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ +#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ + +/* + * Hardware event_id to monitor via a performance monitoring event: + */ +struct perf_event_attr { + + /* + * Major type: hardware/software/tracepoint/etc. + */ + __u32 type; + + /* + * Size of the attr structure, for fwd/bwd compat. + */ + __u32 size; + + /* + * Type specific configuration information. + */ + __u64 config; + + union { + __u64 sample_period; + __u64 sample_freq; + }; + + __u64 sample_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + comm : 1, /* include comm data */ + freq : 1, /* use freq, not period */ + inherit_stat : 1, /* per task counts */ + enable_on_exec : 1, /* next exec enables */ + task : 1, /* trace fork/exit */ + watermark : 1, /* wakeup_watermark */ + /* + * precise_ip: + * + * 0 - SAMPLE_IP can have arbitrary skid + * 1 - SAMPLE_IP must have constant skid + * 2 - SAMPLE_IP requested to have 0 skid + * 3 - SAMPLE_IP must have 0 skid + * + * See also PERF_RECORD_MISC_EXACT_IP + */ + precise_ip : 2, /* skid constraint */ + mmap_data : 1, /* non-exec mmap data */ + sample_id_all : 1, /* sample_type all events */ + + exclude_host : 1, /* don't count in host */ + exclude_guest : 1, /* don't count in guest */ + + __reserved_1 : 43; + + union { + __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_watermark; /* bytes before wakeup */ + }; + + __u32 bp_type; + union { + __u64 bp_addr; + __u64 config1; /* extension of config */ + }; + union { + __u64 bp_len; + __u64 config2; /* extension of config1 */ + }; + __u64 branch_sample_type; /* enum branch_sample_type */ +}; + +/* + * Ioctls that can be done on a perf event fd: + */ +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_event_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw events in user-space. + * + * u32 seq, time_mult, time_shift, idx, width; + * u64 count, enabled, running; + * u64 cyc, time_offset; + * s64 pmc = 0; + * + * do { + * seq = pc->lock; + * barrier() + * + * enabled = pc->time_enabled; + * running = pc->time_running; + * + * if (pc->cap_usr_time && enabled != running) { + * cyc = rdtsc(); + * time_offset = pc->time_offset; + * time_mult = pc->time_mult; + * time_shift = pc->time_shift; + * } + * + * idx = pc->index; + * count = pc->offset; + * if (pc->cap_usr_rdpmc && idx) { + * width = pc->pmc_width; + * pmc = rdpmc(idx - 1); + * } + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware event identifier */ + __s64 offset; /* add to hardware event value */ + __u64 time_enabled; /* time event active */ + __u64 time_running; /* time event on cpu */ + union { + __u64 capabilities; + __u64 cap_usr_time : 1, + cap_usr_rdpmc : 1, + cap_____res : 62; + }; + + /* + * If cap_usr_rdpmc this field provides the bit-width of the value + * read using the rdpmc() or equivalent instruction. This can be used + * to sign extend the result like: + * + * pmc <<= 64 - width; + * pmc >>= 64 - width; // signed shift right + * count += pmc; + */ + __u16 pmc_width; + + /* + * If cap_usr_time the below fields can be used to compute the time + * delta since time_enabled (in ns) using rdtsc or similar. + * + * u64 quot, rem; + * u64 delta; + * + * quot = (cyc >> time_shift); + * rem = cyc & ((1 << time_shift) - 1); + * delta = time_offset + quot * time_mult + + * ((rem * time_mult) >> time_shift); + * + * Where time_offset,time_mult,time_shift and cyc are read in the + * seqcount loop described above. This delta can then be added to + * enabled and possible running (if idx), improving the scaling: + * + * enabled += delta; + * if (idx) + * running += delta; + * + * quot = count / running; + * rem = count % running; + * count = quot * enabled + (rem * enabled) / running; + */ + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + + /* + * Hole for extension of the self monitor capabilities + */ + + __u64 __reserved[120]; /* align to 1k */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading the @data_head value should issue an rmb(), on + * SMP capable platforms, after reading this value -- see + * perf_event_wakeup(). + * + * When the mapping is PROT_WRITE the @data_tail value should be + * written by userspace to reflect the last read data. In this case + * the kernel will not over-write unread data. + */ + __u64 data_head; /* head in the data section */ + __u64 data_tail; /* user-space written tail */ +}; + +#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) +#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_RECORD_MISC_KERNEL (1 << 0) +#define PERF_RECORD_MISC_USER (2 << 0) +#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) +#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) +#define PERF_RECORD_MISC_GUEST_USER (5 << 0) + +/* + * Indicates that the content of PERF_SAMPLE_IP points to + * the actual instruction that triggered the event. See also + * perf_event_attr::precise_ip. + */ +#define PERF_RECORD_MISC_EXACT_IP (1 << 14) +/* + * Reserve the last bit to indicate some extended misc field + */ +#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * If perf_event_attr.sample_id_all is set then all event types will + * have the sample_type selected fields related to where/when + * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID) + * described in PERF_RECORD_SAMPLE below, it will be stashed just after + * the perf_event_header and the fields already present for the existing + * fields, i.e. at the end of the payload. That way a newer perf.data + * file will be supported by older perf tools, with these new optional + * fields being ignored. + * + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_RECORD_MMAP = 1, + + /* + * struct { + * struct perf_event_header header; + * u64 id; + * u64 lost; + * }; + */ + PERF_RECORD_LOST = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_RECORD_COMM = 3, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * u64 time; + * }; + */ + PERF_RECORD_EXIT = 4, + + /* + * struct { + * struct perf_event_header header; + * u64 time; + * u64 id; + * u64 stream_id; + * }; + */ + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * u64 time; + * }; + */ + PERF_RECORD_FORK = 7, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, tid; + * + * struct read_format values; + * }; + */ + PERF_RECORD_READ = 8, + + /* + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_SAMPLE_IP + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 addr; } && PERF_SAMPLE_ADDR + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u64 period; } && PERF_SAMPLE_PERIOD + * + * { struct read_format values; } && PERF_SAMPLE_READ + * + * { u64 nr, + * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN + * + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # + * + * { u32 size; + * char data[size];}&& PERF_SAMPLE_RAW + * + * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK + * }; + */ + PERF_RECORD_SAMPLE = 9, + + PERF_RECORD_MAX, /* non-ABI */ +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, + + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, + + PERF_CONTEXT_MAX = (__u64)-4095, +}; + +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) +#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ + +#ifdef __KERNEL__ +/* + * Kernel-internal data types and definitions: + */ + +#ifdef CONFIG_PERF_EVENTS +# include <linux/cgroup.h> +# include <asm/perf_event.h> +# include <asm/local64.h> +#endif + +struct perf_guest_info_callbacks { + int (*is_in_guest)(void); + int (*is_user_mode)(void); + unsigned long (*get_guest_ip)(void); +}; + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +#include <asm/hw_breakpoint.h> +#endif + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/hrtimer.h> +#include <linux/fs.h> +#include <linux/pid_namespace.h> +#include <linux/workqueue.h> +#include <linux/ftrace.h> +#include <linux/cpu.h> +#include <linux/irq_work.h> +#include <linux/static_key.h> +#include <linux/atomic.h> +#include <linux/sysfs.h> +#include <asm/local.h> + +#define PERF_MAX_STACK_DEPTH 255 + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[PERF_MAX_STACK_DEPTH]; +}; + +struct perf_raw_record { + u32 size; + void *data; +}; + +/* + * single taken branch record layout: + * + * from: source instruction (may not always be a branch insn) + * to: branch target + * mispred: branch target was mispredicted + * predicted: branch target was predicted + * + * support for mispred, predicted is optional. In case it + * is not supported mispred = predicted = 0. + */ +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred:1, /* target mispredicted */ + predicted:1,/* target predicted */ + reserved:62; +}; + +/* + * branch stack layout: + * nr: number of taken branches stored in entries[] + * + * Note that nr can vary from sample to sample + * branches (to, from) are stored from most recent + * to least recent, i.e., entries[0] contains the most + * recent branch. + */ +struct perf_branch_stack { + __u64 nr; + struct perf_branch_entry entries[0]; +}; + +struct task_struct; + +/* + * extra PMU register associated with an event + */ +struct hw_perf_event_extra { + u64 config; /* register value */ + unsigned int reg; /* register address or index */ + int alloc; /* extra register already allocated */ + int idx; /* index in shared_regs->regs[] */ +}; + +/** + * struct hw_perf_event - performance event hardware details: + */ +struct hw_perf_event { +#ifdef CONFIG_PERF_EVENTS + union { + struct { /* hardware */ + u64 config; + u64 last_tag; + unsigned long config_base; + unsigned long event_base; + int idx; + int last_cpu; + + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { /* software */ + struct hrtimer hrtimer; + }; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + struct { /* breakpoint */ + struct arch_hw_breakpoint info; + struct list_head bp_list; + /* + * Crufty hack to avoid the chicken and egg + * problem hw_breakpoint has with context + * creation and event initalization. + */ + struct task_struct *bp_target; + }; +#endif + }; + int state; + local64_t prev_count; + u64 sample_period; + u64 last_period; + local64_t period_left; + u64 interrupts_seq; + u64 interrupts; + + u64 freq_time_stamp; + u64 freq_count_stamp; +#endif +}; + +/* + * hw_perf_event::state flags + */ +#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ +#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ +#define PERF_HES_ARCH 0x04 + +struct perf_event; + +/* + * Common implementation detail of pmu::{start,commit,cancel}_txn + */ +#define PERF_EVENT_TXN 0x1 + +/** + * struct pmu - generic performance monitoring unit + */ +struct pmu { + struct list_head entry; + + struct device *dev; + const struct attribute_group **attr_groups; + char *name; + int type; + + int * __percpu pmu_disable_count; + struct perf_cpu_context * __percpu pmu_cpu_context; + int task_ctx_nr; + + /* + * Fully disable/enable this PMU, can be used to protect from the PMI + * as well as for lazy/batch writing of the MSRs. + */ + void (*pmu_enable) (struct pmu *pmu); /* optional */ + void (*pmu_disable) (struct pmu *pmu); /* optional */ + + /* + * Try and initialize the event for this PMU. + * Should return -ENOENT when the @event doesn't match this PMU. + */ + int (*event_init) (struct perf_event *event); + +#define PERF_EF_START 0x01 /* start the counter when adding */ +#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ +#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ + + /* + * Adds/Removes a counter to/from the PMU, can be done inside + * a transaction, see the ->*_txn() methods. + */ + int (*add) (struct perf_event *event, int flags); + void (*del) (struct perf_event *event, int flags); + + /* + * Starts/Stops a counter present on the PMU. The PMI handler + * should stop the counter when perf_event_overflow() returns + * !0. ->start() will be used to continue. + */ + void (*start) (struct perf_event *event, int flags); + void (*stop) (struct perf_event *event, int flags); + + /* + * Updates the counter value of the event. + */ + void (*read) (struct perf_event *event); + + /* + * Group events scheduling is treated as a transaction, add + * group events as a whole and perform one schedulability test. + * If the test fails, roll back the whole group + * + * Start the transaction, after this ->add() doesn't need to + * do schedulability tests. + */ + void (*start_txn) (struct pmu *pmu); /* optional */ + /* + * If ->start_txn() disabled the ->add() schedulability test + * then ->commit_txn() is required to perform one. On success + * the transaction is closed. On error the transaction is kept + * open until ->cancel_txn() is called. + */ + int (*commit_txn) (struct pmu *pmu); /* optional */ + /* + * Will cancel the transaction, assumes ->del() is called + * for each successful ->add() during the transaction. + */ + void (*cancel_txn) (struct pmu *pmu); /* optional */ + + /* + * Will return the value for perf_event_mmap_page::index for this event, + * if no implementation is provided it will default to: event->hw.idx + 1. + */ + int (*event_idx) (struct perf_event *event); /*optional */ + + /* + * flush branch stack on context-switches (needed in cpu-wide mode) + */ + void (*flush_branch_stack) (void); +}; + +/** + * enum perf_event_active_state - the states of a event + */ +enum perf_event_active_state { + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct file; +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, + struct perf_sample_data *, + struct pt_regs *regs); + +enum perf_group_flag { + PERF_GROUP_SOFTWARE = 0x1, +}; + +#define SWEVENT_HLIST_BITS 8 +#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) + +struct swevent_hlist { + struct hlist_head heads[SWEVENT_HLIST_SIZE]; + struct rcu_head rcu_head; +}; + +#define PERF_ATTACH_CONTEXT 0x01 +#define PERF_ATTACH_GROUP 0x02 +#define PERF_ATTACH_TASK 0x04 + +#ifdef CONFIG_CGROUP_PERF +/* + * perf_cgroup_info keeps track of time_enabled for a cgroup. + * This is a per-cpu dynamically allocated data structure. + */ +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; /* timing info, one per cpu */ +}; +#endif + +struct ring_buffer; + +/** + * struct perf_event - performance event kernel representation: + */ +struct perf_event { +#ifdef CONFIG_PERF_EVENTS + struct list_head group_entry; + struct list_head event_entry; + struct list_head sibling_list; + struct hlist_node hlist_entry; + int nr_siblings; + int group_flags; + struct perf_event *group_leader; + struct pmu *pmu; + + enum perf_event_active_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + + /* + * These are the total time in nanoseconds that the event + * has been enabled (i.e. eligible to run, and the task has + * been scheduled in, if this is a per-task event) + * and running (scheduled onto the CPU), respectively. + * + * They are computed from tstamp_enabled, tstamp_running and + * tstamp_stopped when the event is in INACTIVE or ACTIVE state. + */ + u64 total_time_enabled; + u64 total_time_running; + + /* + * These are timestamps used for computing total_time_enabled + * and total_time_running when the event is in INACTIVE or + * ACTIVE state, measured in nanoseconds from an arbitrary point + * in time. + * tstamp_enabled: the notional time when the event was enabled + * tstamp_running: the notional time when the event was scheduled on + * tstamp_stopped: in INACTIVE state, the notional time when the + * event was scheduled off. + */ + u64 tstamp_enabled; + u64 tstamp_running; + u64 tstamp_stopped; + + /* + * timestamp shadows the actual context timing but it can + * be safely used in NMI interrupt context. It reflects the + * context time as it was when the event was last scheduled in. + * + * ctx_time already accounts for ctx->timestamp. Therefore to + * compute ctx_time for a sample, simply add perf_clock(). + */ + u64 shadow_ctx_time; + + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + + struct perf_event_context *ctx; + struct file *filp; + + /* + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + + /* + * Protect attach/detach and child_list: + */ + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + + int oncpu; + int cpu; + + struct list_head owner_entry; + struct task_struct *owner; + + /* mmap bits */ + struct mutex mmap_mutex; + atomic_t mmap_count; + int mmap_locked; + struct user_struct *mmap_user; + struct ring_buffer *rb; + struct list_head rb_entry; + + /* poll related */ + wait_queue_head_t waitq; + struct fasync_struct *fasync; + + /* delayed work for NMIs and such */ + int pending_wakeup; + int pending_kill; + int pending_disable; + struct irq_work pending; + + atomic_t event_limit; + + void (*destroy)(struct perf_event *); + struct rcu_head rcu_head; + + struct pid_namespace *ns; + u64 id; + + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + +#ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call *tp_event; + struct event_filter *filter; +#ifdef CONFIG_FUNCTION_TRACER + struct ftrace_ops ftrace_ops; +#endif +#endif + +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; /* cgroup event is attach to */ + int cgrp_defer_enabled; +#endif + +#endif /* CONFIG_PERF_EVENTS */ +}; + +enum perf_event_context_type { + task_context, + cpu_context, +}; + +/** + * struct perf_event_context - event context structure + * + * Used as a container for task events and CPU events as well: + */ +struct perf_event_context { + struct pmu *pmu; + enum perf_event_context_type type; + /* + * Protect the states of the events in the list, + * nr_active, and the list: + */ + raw_spinlock_t lock; + /* + * Protect the list of events. Locking either mutex or lock + * is sufficient to ensure the list doesn't change; to change + * the list you need to lock both the mutex and the spinlock. + */ + struct mutex mutex; + + struct list_head pinned_groups; + struct list_head flexible_groups; + struct list_head event_list; + int nr_events; + int nr_active; + int is_active; + int nr_stat; + int nr_freq; + int rotate_disable; + atomic_t refcount; + struct task_struct *task; + + /* + * Context clock, runs when context enabled. + */ + u64 time; + u64 timestamp; + + /* + * These fields let us detect when two contexts have both + * been cloned (inherited) from a common ancestor. + */ + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; /* cgroup evts */ + int nr_branch_stack; /* branch_stack evt */ + struct rcu_head rcu_head; +}; + +/* + * Number of contexts where an event can trigger: + * task, softirq, hardirq, nmi. + */ +#define PERF_NR_CONTEXTS 4 + +/** + * struct perf_event_cpu_context - per cpu event context structure + */ +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int exclusive; + struct list_head rotation_list; + int jiffies_interval; + struct pmu *active_pmu; + struct perf_cgroup *cgrp; +}; + +struct perf_output_handle { + struct perf_event *event; + struct ring_buffer *rb; + unsigned long wakeup; + unsigned long size; + void *addr; + int page; +}; + +#ifdef CONFIG_PERF_EVENTS + +extern int perf_pmu_register(struct pmu *pmu, char *name, int type); +extern void perf_pmu_unregister(struct pmu *pmu); + +extern int perf_num_counters(void); +extern const char *perf_pmu_name(void); +extern void __perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task); +extern void __perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next); +extern int perf_event_init_task(struct task_struct *child); +extern void perf_event_exit_task(struct task_struct *child); +extern void perf_event_free_task(struct task_struct *task); +extern void perf_event_delayed_put(struct task_struct *task); +extern void perf_event_print_debug(void); +extern void perf_pmu_disable(struct pmu *pmu); +extern void perf_pmu_enable(struct pmu *pmu); +extern int perf_event_task_disable(void); +extern int perf_event_task_enable(void); +extern int perf_event_refresh(struct perf_event *event, int refresh); +extern void perf_event_update_userpage(struct perf_event *event); +extern int perf_event_release_kernel(struct perf_event *event); +extern struct perf_event * +perf_event_create_kernel_counter(struct perf_event_attr *attr, + int cpu, + struct task_struct *task, + perf_overflow_handler_t callback, + void *context); +extern u64 perf_event_read_value(struct perf_event *event, + u64 *enabled, u64 *running); + + +struct perf_sample_data { + u64 type; + + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 addr; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + u64 period; + struct perf_callchain_entry *callchain; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; +}; + +static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) +{ + data->addr = addr; + data->raw = NULL; + data->br_stack = NULL; +} + +extern void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs); + +extern int perf_event_overflow(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); + +static inline bool is_sampling_event(struct perf_event *event) +{ + return event->attr.sample_period != 0; +} + +/* + * Return 1 for a software event, 0 for a hardware event + */ +static inline int is_software_event(struct perf_event *event) +{ + return event->pmu->task_ctx_nr == perf_sw_context; +} + +extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); + +#ifndef perf_arch_fetch_caller_regs +static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +#endif + +/* + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: + * - ip for PERF_SAMPLE_IP + * - cs for user_mode() tests + * - bp for callchains + * - eflags, for future purposes, just in case + */ +static inline void perf_fetch_caller_regs(struct pt_regs *regs) +{ + memset(regs, 0, sizeof(*regs)); + + perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); +} + +static __always_inline void +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) +{ + struct pt_regs hot_regs; + + if (static_key_false(&perf_swevent_enabled[event_id])) { + if (!regs) { + perf_fetch_caller_regs(&hot_regs); + regs = &hot_regs; + } + __perf_sw_event(event_id, nr, regs, addr); + } +} + +extern struct static_key_deferred perf_sched_events; + +static inline void perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) +{ + if (static_key_false(&perf_sched_events.key)) + __perf_event_task_sched_in(prev, task); +} + +static inline void perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) +{ + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); + + if (static_key_false(&perf_sched_events.key)) + __perf_event_task_sched_out(prev, next); +} + +extern void perf_event_mmap(struct vm_area_struct *vma); +extern struct perf_guest_info_callbacks *perf_guest_cbs; +extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); +extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); + +extern void perf_event_comm(struct task_struct *tsk); +extern void perf_event_fork(struct task_struct *tsk); + +/* Callchains */ +DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); + +extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); + +static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +{ + if (entry->nr < PERF_MAX_STACK_DEPTH) + entry->ip[entry->nr++] = ip; +} + +extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_mlock; +extern int sysctl_perf_event_sample_rate; + +extern int perf_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +static inline bool perf_paranoid_tracepoint_raw(void) +{ + return sysctl_perf_event_paranoid > -1; +} + +static inline bool perf_paranoid_cpu(void) +{ + return sysctl_perf_event_paranoid > 0; +} + +static inline bool perf_paranoid_kernel(void) +{ + return sysctl_perf_event_paranoid > 1; +} + +extern void perf_event_init(void); +extern void perf_tp_event(u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs, + struct hlist_head *head, int rctx); +extern void perf_bp_event(struct perf_event *event, void *data); + +#ifndef perf_misc_flags +# define perf_misc_flags(regs) \ + (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) +# define perf_instruction_pointer(regs) instruction_pointer(regs) +#endif + +static inline bool has_branch_stack(struct perf_event *event) +{ + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; +} + +extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size); +extern void perf_output_end(struct perf_output_handle *handle); +extern void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len); +extern int perf_swevent_get_recursion_context(void); +extern void perf_swevent_put_recursion_context(int rctx); +extern void perf_event_enable(struct perf_event *event); +extern void perf_event_disable(struct perf_event *event); +extern void perf_event_task_tick(void); +#else +static inline void +perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) { } +static inline void +perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) { } +static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline void perf_event_exit_task(struct task_struct *child) { } +static inline void perf_event_free_task(struct task_struct *task) { } +static inline void perf_event_delayed_put(struct task_struct *task) { } +static inline void perf_event_print_debug(void) { } +static inline int perf_event_task_disable(void) { return -EINVAL; } +static inline int perf_event_task_enable(void) { return -EINVAL; } +static inline int perf_event_refresh(struct perf_event *event, int refresh) +{ + return -EINVAL; +} + +static inline void +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } +static inline void +perf_bp_event(struct perf_event *event, void *data) { } + +static inline int perf_register_guest_info_callbacks +(struct perf_guest_info_callbacks *callbacks) { return 0; } +static inline int perf_unregister_guest_info_callbacks +(struct perf_guest_info_callbacks *callbacks) { return 0; } + +static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_comm(struct task_struct *tsk) { } +static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_init(void) { } +static inline int perf_swevent_get_recursion_context(void) { return -1; } +static inline void perf_swevent_put_recursion_context(int rctx) { } +static inline void perf_event_enable(struct perf_event *event) { } +static inline void perf_event_disable(struct perf_event *event) { } +static inline void perf_event_task_tick(void) { } +#endif + +#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) + +/* + * This has to have a higher priority than migration_notifier in sched.c. + */ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ +} while (0) + + +#define PMU_FORMAT_ATTR(_name, _format) \ +static ssize_t \ +_name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ + \ +static struct device_attribute format_attr_##_name = __ATTR_RO(_name) + +#endif /* __KERNEL__ */ +#endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/persistent_ram.h b/include/linux/persistent_ram.h new file mode 100644 index 00000000..22422171 --- /dev/null +++ b/include/linux/persistent_ram.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_PERSISTENT_RAM_H__ +#define __LINUX_PERSISTENT_RAM_H__ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/types.h> + +struct persistent_ram_buffer; + +struct persistent_ram_descriptor { + const char *name; + phys_addr_t size; +}; + +struct persistent_ram { + phys_addr_t start; + phys_addr_t size; + + int ecc_block_size; + int ecc_size; + int ecc_symsize; + int ecc_poly; + + int num_descs; + struct persistent_ram_descriptor *descs; + + struct list_head node; +}; + +struct persistent_ram_zone { + struct list_head node; + void *vaddr; + struct persistent_ram_buffer *buffer; + size_t buffer_size; + + /* ECC correction */ + bool ecc; + char *par_buffer; + char *par_header; + struct rs_control *rs_decoder; + int corrected_bytes; + int bad_blocks; + int ecc_block_size; + int ecc_size; + int ecc_symsize; + int ecc_poly; + + char *old_log; + size_t old_log_size; + size_t old_log_footer_size; + bool early; +}; + +int persistent_ram_early_init(struct persistent_ram *ram); + +struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev, + bool ecc); + +int persistent_ram_write(struct persistent_ram_zone *prz, const void *s, + unsigned int count); + +size_t persistent_ram_old_size(struct persistent_ram_zone *prz); +void *persistent_ram_old(struct persistent_ram_zone *prz); +void persistent_ram_free_old(struct persistent_ram_zone *prz); +ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, + char *str, size_t len); + +#endif diff --git a/include/linux/personality.h b/include/linux/personality.h new file mode 100644 index 00000000..8fc7dd1a --- /dev/null +++ b/include/linux/personality.h @@ -0,0 +1,121 @@ +#ifndef _LINUX_PERSONALITY_H +#define _LINUX_PERSONALITY_H + +#ifdef __KERNEL__ + +/* + * Handling of different ABIs (personalities). + */ + +struct exec_domain; +struct pt_regs; + +extern int register_exec_domain(struct exec_domain *); +extern int unregister_exec_domain(struct exec_domain *); +extern int __set_personality(unsigned int); + +#endif /* __KERNEL__ */ + +/* + * Flags for bug emulation. + * + * These occupy the top three bytes. + */ +enum { + UNAME26 = 0x0020000, + ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ + FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors + * (signal handling) + */ + MMAP_PAGE_ZERO = 0x0100000, + ADDR_COMPAT_LAYOUT = 0x0200000, + READ_IMPLIES_EXEC = 0x0400000, + ADDR_LIMIT_32BIT = 0x0800000, + SHORT_INODE = 0x1000000, + WHOLE_SECONDS = 0x2000000, + STICKY_TIMEOUTS = 0x4000000, + ADDR_LIMIT_3GB = 0x8000000, +}; + +/* + * Security-relevant compatibility flags that must be + * cleared upon setuid or setgid exec: + */ +#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ + ADDR_NO_RANDOMIZE | \ + ADDR_COMPAT_LAYOUT | \ + MMAP_PAGE_ZERO) + +/* + * Personality types. + * + * These go in the low byte. Avoid using the top bit, it will + * conflict with error returns. + */ +enum { + PER_LINUX = 0x0000, + PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, + PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, + PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, + PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, + PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | + WHOLE_SECONDS | SHORT_INODE, + PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, + PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, + PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, + PER_BSD = 0x0006, + PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, + PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, + PER_LINUX32 = 0x0008, + PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, + PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ + PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ + PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ + PER_RISCOS = 0x000c, + PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, + PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, + PER_OSF4 = 0x000f, /* OSF/1 v4 */ + PER_HPUX = 0x0010, + PER_MASK = 0x00ff, +}; + +#ifdef __KERNEL__ + +/* + * Description of an execution domain. + * + * The first two members are refernced from assembly source + * and should stay where they are unless explicitly needed. + */ +typedef void (*handler_t)(int, struct pt_regs *); + +struct exec_domain { + const char *name; /* name of the execdomain */ + handler_t handler; /* handler for syscalls */ + unsigned char pers_low; /* lowest personality */ + unsigned char pers_high; /* highest personality */ + unsigned long *signal_map; /* signal mapping */ + unsigned long *signal_invmap; /* reverse signal mapping */ + struct map_segment *err_map; /* error mapping */ + struct map_segment *socktype_map; /* socket type mapping */ + struct map_segment *sockopt_map; /* socket option mapping */ + struct map_segment *af_map; /* address family mapping */ + struct module *module; /* module context of the ed. */ + struct exec_domain *next; /* linked list (internal) */ +}; + +/* + * Return the base personality without flags. + */ +#define personality(pers) (pers & PER_MASK) + + +/* + * Change personality of the currently running process. + */ +#define set_personality(pers) \ + ((current->personality == (pers)) ? 0 : __set_personality(pers)) + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_PERSONALITY_H */ diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h new file mode 100644 index 00000000..0b80c806 --- /dev/null +++ b/include/linux/pfkeyv2.h @@ -0,0 +1,370 @@ +/* PF_KEY user interface, this is defined by rfc2367 so + * do not make arbitrary modifications or else this header + * file will not be compliant. + */ + +#ifndef _LINUX_PFKEY2_H +#define _LINUX_PFKEY2_H + +#include <linux/types.h> + +#define PF_KEY_V2 2 +#define PFKEYV2_REVISION 199806L + +struct sadb_msg { + __u8 sadb_msg_version; + __u8 sadb_msg_type; + __u8 sadb_msg_errno; + __u8 sadb_msg_satype; + __u16 sadb_msg_len; + __u16 sadb_msg_reserved; + __u32 sadb_msg_seq; + __u32 sadb_msg_pid; +} __attribute__((packed)); +/* sizeof(struct sadb_msg) == 16 */ + +struct sadb_ext { + __u16 sadb_ext_len; + __u16 sadb_ext_type; +} __attribute__((packed)); +/* sizeof(struct sadb_ext) == 4 */ + +struct sadb_sa { + __u16 sadb_sa_len; + __u16 sadb_sa_exttype; + __be32 sadb_sa_spi; + __u8 sadb_sa_replay; + __u8 sadb_sa_state; + __u8 sadb_sa_auth; + __u8 sadb_sa_encrypt; + __u32 sadb_sa_flags; +} __attribute__((packed)); +/* sizeof(struct sadb_sa) == 16 */ + +struct sadb_lifetime { + __u16 sadb_lifetime_len; + __u16 sadb_lifetime_exttype; + __u32 sadb_lifetime_allocations; + __u64 sadb_lifetime_bytes; + __u64 sadb_lifetime_addtime; + __u64 sadb_lifetime_usetime; +} __attribute__((packed)); +/* sizeof(struct sadb_lifetime) == 32 */ + +struct sadb_address { + __u16 sadb_address_len; + __u16 sadb_address_exttype; + __u8 sadb_address_proto; + __u8 sadb_address_prefixlen; + __u16 sadb_address_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_address) == 8 */ + +struct sadb_key { + __u16 sadb_key_len; + __u16 sadb_key_exttype; + __u16 sadb_key_bits; + __u16 sadb_key_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_key) == 8 */ + +struct sadb_ident { + __u16 sadb_ident_len; + __u16 sadb_ident_exttype; + __u16 sadb_ident_type; + __u16 sadb_ident_reserved; + __u64 sadb_ident_id; +} __attribute__((packed)); +/* sizeof(struct sadb_ident) == 16 */ + +struct sadb_sens { + __u16 sadb_sens_len; + __u16 sadb_sens_exttype; + __u32 sadb_sens_dpd; + __u8 sadb_sens_sens_level; + __u8 sadb_sens_sens_len; + __u8 sadb_sens_integ_level; + __u8 sadb_sens_integ_len; + __u32 sadb_sens_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_sens) == 16 */ + +/* followed by: + __u64 sadb_sens_bitmap[sens_len]; + __u64 sadb_integ_bitmap[integ_len]; */ + +struct sadb_prop { + __u16 sadb_prop_len; + __u16 sadb_prop_exttype; + __u8 sadb_prop_replay; + __u8 sadb_prop_reserved[3]; +} __attribute__((packed)); +/* sizeof(struct sadb_prop) == 8 */ + +/* followed by: + struct sadb_comb sadb_combs[(sadb_prop_len + + sizeof(__u64) - sizeof(struct sadb_prop)) / + sizeof(struct sadb_comb)]; */ + +struct sadb_comb { + __u8 sadb_comb_auth; + __u8 sadb_comb_encrypt; + __u16 sadb_comb_flags; + __u16 sadb_comb_auth_minbits; + __u16 sadb_comb_auth_maxbits; + __u16 sadb_comb_encrypt_minbits; + __u16 sadb_comb_encrypt_maxbits; + __u32 sadb_comb_reserved; + __u32 sadb_comb_soft_allocations; + __u32 sadb_comb_hard_allocations; + __u64 sadb_comb_soft_bytes; + __u64 sadb_comb_hard_bytes; + __u64 sadb_comb_soft_addtime; + __u64 sadb_comb_hard_addtime; + __u64 sadb_comb_soft_usetime; + __u64 sadb_comb_hard_usetime; +} __attribute__((packed)); +/* sizeof(struct sadb_comb) == 72 */ + +struct sadb_supported { + __u16 sadb_supported_len; + __u16 sadb_supported_exttype; + __u32 sadb_supported_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_supported) == 8 */ + +/* followed by: + struct sadb_alg sadb_algs[(sadb_supported_len + + sizeof(__u64) - sizeof(struct sadb_supported)) / + sizeof(struct sadb_alg)]; */ + +struct sadb_alg { + __u8 sadb_alg_id; + __u8 sadb_alg_ivlen; + __u16 sadb_alg_minbits; + __u16 sadb_alg_maxbits; + __u16 sadb_alg_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_alg) == 8 */ + +struct sadb_spirange { + __u16 sadb_spirange_len; + __u16 sadb_spirange_exttype; + __u32 sadb_spirange_min; + __u32 sadb_spirange_max; + __u32 sadb_spirange_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_spirange) == 16 */ + +struct sadb_x_kmprivate { + __u16 sadb_x_kmprivate_len; + __u16 sadb_x_kmprivate_exttype; + __u32 sadb_x_kmprivate_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_x_kmprivate) == 8 */ + +struct sadb_x_sa2 { + __u16 sadb_x_sa2_len; + __u16 sadb_x_sa2_exttype; + __u8 sadb_x_sa2_mode; + __u8 sadb_x_sa2_reserved1; + __u16 sadb_x_sa2_reserved2; + __u32 sadb_x_sa2_sequence; + __u32 sadb_x_sa2_reqid; +} __attribute__((packed)); +/* sizeof(struct sadb_x_sa2) == 16 */ + +struct sadb_x_policy { + __u16 sadb_x_policy_len; + __u16 sadb_x_policy_exttype; + __u16 sadb_x_policy_type; + __u8 sadb_x_policy_dir; + __u8 sadb_x_policy_reserved; + __u32 sadb_x_policy_id; + __u32 sadb_x_policy_priority; +} __attribute__((packed)); +/* sizeof(struct sadb_x_policy) == 16 */ + +struct sadb_x_ipsecrequest { + __u16 sadb_x_ipsecrequest_len; + __u16 sadb_x_ipsecrequest_proto; + __u8 sadb_x_ipsecrequest_mode; + __u8 sadb_x_ipsecrequest_level; + __u16 sadb_x_ipsecrequest_reserved1; + __u32 sadb_x_ipsecrequest_reqid; + __u32 sadb_x_ipsecrequest_reserved2; +} __attribute__((packed)); +/* sizeof(struct sadb_x_ipsecrequest) == 16 */ + +/* This defines the TYPE of Nat Traversal in use. Currently only one + * type of NAT-T is supported, draft-ietf-ipsec-udp-encaps-06 + */ +struct sadb_x_nat_t_type { + __u16 sadb_x_nat_t_type_len; + __u16 sadb_x_nat_t_type_exttype; + __u8 sadb_x_nat_t_type_type; + __u8 sadb_x_nat_t_type_reserved[3]; +} __attribute__((packed)); +/* sizeof(struct sadb_x_nat_t_type) == 8 */ + +/* Pass a NAT Traversal port (Source or Dest port) */ +struct sadb_x_nat_t_port { + __u16 sadb_x_nat_t_port_len; + __u16 sadb_x_nat_t_port_exttype; + __be16 sadb_x_nat_t_port_port; + __u16 sadb_x_nat_t_port_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_x_nat_t_port) == 8 */ + +/* Generic LSM security context */ +struct sadb_x_sec_ctx { + __u16 sadb_x_sec_len; + __u16 sadb_x_sec_exttype; + __u8 sadb_x_ctx_alg; /* LSMs: e.g., selinux == 1 */ + __u8 sadb_x_ctx_doi; + __u16 sadb_x_ctx_len; +} __attribute__((packed)); +/* sizeof(struct sadb_sec_ctx) = 8 */ + +/* Used by MIGRATE to pass addresses IKE will use to perform + * negotiation with the peer */ +struct sadb_x_kmaddress { + __u16 sadb_x_kmaddress_len; + __u16 sadb_x_kmaddress_exttype; + __u32 sadb_x_kmaddress_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_x_kmaddress) == 8 */ + +/* Message types */ +#define SADB_RESERVED 0 +#define SADB_GETSPI 1 +#define SADB_UPDATE 2 +#define SADB_ADD 3 +#define SADB_DELETE 4 +#define SADB_GET 5 +#define SADB_ACQUIRE 6 +#define SADB_REGISTER 7 +#define SADB_EXPIRE 8 +#define SADB_FLUSH 9 +#define SADB_DUMP 10 +#define SADB_X_PROMISC 11 +#define SADB_X_PCHANGE 12 +#define SADB_X_SPDUPDATE 13 +#define SADB_X_SPDADD 14 +#define SADB_X_SPDDELETE 15 +#define SADB_X_SPDGET 16 +#define SADB_X_SPDACQUIRE 17 +#define SADB_X_SPDDUMP 18 +#define SADB_X_SPDFLUSH 19 +#define SADB_X_SPDSETIDX 20 +#define SADB_X_SPDEXPIRE 21 +#define SADB_X_SPDDELETE2 22 +#define SADB_X_NAT_T_NEW_MAPPING 23 +#define SADB_X_MIGRATE 24 +#define SADB_MAX 24 + +/* Security Association flags */ +#define SADB_SAFLAGS_PFS 1 +#define SADB_SAFLAGS_NOPMTUDISC 0x20000000 +#define SADB_SAFLAGS_DECAP_DSCP 0x40000000 +#define SADB_SAFLAGS_NOECN 0x80000000 + +/* Security Association states */ +#define SADB_SASTATE_LARVAL 0 +#define SADB_SASTATE_MATURE 1 +#define SADB_SASTATE_DYING 2 +#define SADB_SASTATE_DEAD 3 +#define SADB_SASTATE_MAX 3 + +/* Security Association types */ +#define SADB_SATYPE_UNSPEC 0 +#define SADB_SATYPE_AH 2 +#define SADB_SATYPE_ESP 3 +#define SADB_SATYPE_RSVP 5 +#define SADB_SATYPE_OSPFV2 6 +#define SADB_SATYPE_RIPV2 7 +#define SADB_SATYPE_MIP 8 +#define SADB_X_SATYPE_IPCOMP 9 +#define SADB_SATYPE_MAX 9 + +/* Authentication algorithms */ +#define SADB_AALG_NONE 0 +#define SADB_AALG_MD5HMAC 2 +#define SADB_AALG_SHA1HMAC 3 +#define SADB_X_AALG_SHA2_256HMAC 5 +#define SADB_X_AALG_SHA2_384HMAC 6 +#define SADB_X_AALG_SHA2_512HMAC 7 +#define SADB_X_AALG_RIPEMD160HMAC 8 +#define SADB_X_AALG_AES_XCBC_MAC 9 +#define SADB_X_AALG_NULL 251 /* kame */ +#define SADB_AALG_MAX 251 + +/* Encryption algorithms */ +#define SADB_EALG_NONE 0 +#define SADB_EALG_DESCBC 2 +#define SADB_EALG_3DESCBC 3 +#define SADB_X_EALG_CASTCBC 6 +#define SADB_X_EALG_BLOWFISHCBC 7 +#define SADB_EALG_NULL 11 +#define SADB_X_EALG_AESCBC 12 +#define SADB_X_EALG_AESCTR 13 +#define SADB_X_EALG_AES_CCM_ICV8 14 +#define SADB_X_EALG_AES_CCM_ICV12 15 +#define SADB_X_EALG_AES_CCM_ICV16 16 +#define SADB_X_EALG_AES_GCM_ICV8 18 +#define SADB_X_EALG_AES_GCM_ICV12 19 +#define SADB_X_EALG_AES_GCM_ICV16 20 +#define SADB_X_EALG_CAMELLIACBC 22 +#define SADB_X_EALG_NULL_AES_GMAC 23 +#define SADB_EALG_MAX 253 /* last EALG */ +/* private allocations should use 249-255 (RFC2407) */ +#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */ +#define SADB_X_EALG_TWOFISHCBC 253 /* draft-ietf-ipsec-ciph-aes-cbc-00 */ + +/* Compression algorithms */ +#define SADB_X_CALG_NONE 0 +#define SADB_X_CALG_OUI 1 +#define SADB_X_CALG_DEFLATE 2 +#define SADB_X_CALG_LZS 3 +#define SADB_X_CALG_LZJH 4 +#define SADB_X_CALG_MAX 4 + +/* Extension Header values */ +#define SADB_EXT_RESERVED 0 +#define SADB_EXT_SA 1 +#define SADB_EXT_LIFETIME_CURRENT 2 +#define SADB_EXT_LIFETIME_HARD 3 +#define SADB_EXT_LIFETIME_SOFT 4 +#define SADB_EXT_ADDRESS_SRC 5 +#define SADB_EXT_ADDRESS_DST 6 +#define SADB_EXT_ADDRESS_PROXY 7 +#define SADB_EXT_KEY_AUTH 8 +#define SADB_EXT_KEY_ENCRYPT 9 +#define SADB_EXT_IDENTITY_SRC 10 +#define SADB_EXT_IDENTITY_DST 11 +#define SADB_EXT_SENSITIVITY 12 +#define SADB_EXT_PROPOSAL 13 +#define SADB_EXT_SUPPORTED_AUTH 14 +#define SADB_EXT_SUPPORTED_ENCRYPT 15 +#define SADB_EXT_SPIRANGE 16 +#define SADB_X_EXT_KMPRIVATE 17 +#define SADB_X_EXT_POLICY 18 +#define SADB_X_EXT_SA2 19 +/* The next four entries are for setting up NAT Traversal */ +#define SADB_X_EXT_NAT_T_TYPE 20 +#define SADB_X_EXT_NAT_T_SPORT 21 +#define SADB_X_EXT_NAT_T_DPORT 22 +#define SADB_X_EXT_NAT_T_OA 23 +#define SADB_X_EXT_SEC_CTX 24 +/* Used with MIGRATE to pass @ to IKE for negotiation */ +#define SADB_X_EXT_KMADDRESS 25 +#define SADB_EXT_MAX 25 + +/* Identity Extension values */ +#define SADB_IDENTTYPE_RESERVED 0 +#define SADB_IDENTTYPE_PREFIX 1 +#define SADB_IDENTTYPE_FQDN 2 +#define SADB_IDENTTYPE_USERFQDN 3 +#define SADB_IDENTTYPE_MAX 3 + +#endif /* !(_LINUX_PFKEY2_H) */ diff --git a/include/linux/pfn.h b/include/linux/pfn.h new file mode 100644 index 00000000..76466372 --- /dev/null +++ b/include/linux/pfn.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_PFN_H_ +#define _LINUX_PFN_H_ + +#ifndef __ASSEMBLY__ +#include <linux/types.h> +#endif + +#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) +#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) +#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) +#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) + +#endif diff --git a/include/linux/pg.h b/include/linux/pg.h new file mode 100644 index 00000000..db994bb0 --- /dev/null +++ b/include/linux/pg.h @@ -0,0 +1,63 @@ +/* pg.h (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License + + + pg.h defines the user interface to the generic ATAPI packet + command driver for parallel port ATAPI devices (pg). The + driver is loosely modelled after the generic SCSI driver, sg, + although the actual interface is different. + + The pg driver provides a simple character device interface for + sending ATAPI commands to a device. With the exception of the + ATAPI reset operation, all operations are performed by a pair + of read and write operations to the appropriate /dev/pgN device. + A write operation delivers a command and any outbound data in + a single buffer. Normally, the write will succeed unless the + device is offline or malfunctioning, or there is already another + command pending. If the write succeeds, it should be followed + immediately by a read operation, to obtain any returned data and + status information. A read will fail if there is no operation + in progress. + + As a special case, the device can be reset with a write operation, + and in this case, no following read is expected, or permitted. + + There are no ioctl() operations. Any single operation + may transfer at most PG_MAX_DATA bytes. Note that the driver must + copy the data through an internal buffer. In keeping with all + current ATAPI devices, command packets are assumed to be exactly + 12 bytes in length. + + To permit future changes to this interface, the headers in the + read and write buffers contain a single character "magic" flag. + Currently this flag must be the character "P". + +*/ + +#define PG_MAGIC 'P' +#define PG_RESET 'Z' +#define PG_COMMAND 'C' + +#define PG_MAX_DATA 32768 + +struct pg_write_hdr { + + char magic; /* == PG_MAGIC */ + char func; /* PG_RESET or PG_COMMAND */ + int dlen; /* number of bytes expected to transfer */ + int timeout; /* number of seconds before timeout */ + char packet[12]; /* packet command */ + +}; + +struct pg_read_hdr { + + char magic; /* == PG_MAGIC */ + char scsi; /* "scsi" status == sense key */ + int dlen; /* size of device transfer request */ + int duration; /* time in seconds command took */ + char pad[12]; /* not used */ + +}; + +/* end of pg.h */ diff --git a/include/linux/phantom.h b/include/linux/phantom.h new file mode 100644 index 00000000..94dd6645 --- /dev/null +++ b/include/linux/phantom.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PHANTOM_H +#define __PHANTOM_H + +#include <linux/types.h> + +/* PHN_(G/S)ET_REG param */ +struct phm_reg { + __u32 reg; + __u32 value; +}; + +/* PHN_(G/S)ET_REGS param */ +struct phm_regs { + __u32 count; + __u32 mask; + __u32 values[8]; +}; + +#define PH_IOC_MAGIC 'p' +#define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *) +#define PHN_SET_REG _IOW(PH_IOC_MAGIC, 1, struct phm_reg *) +#define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *) +#define PHN_SET_REGS _IOW(PH_IOC_MAGIC, 3, struct phm_regs *) +/* this ioctl tells the driver, that the caller is not OpenHaptics and might + * use improved registers update (no more phantom switchoffs when using + * libphantom) */ +#define PHN_NOT_OH _IO(PH_IOC_MAGIC, 4) +#define PHN_GETREG _IOWR(PH_IOC_MAGIC, 5, struct phm_reg) +#define PHN_SETREG _IOW(PH_IOC_MAGIC, 6, struct phm_reg) +#define PHN_GETREGS _IOWR(PH_IOC_MAGIC, 7, struct phm_regs) +#define PHN_SETREGS _IOW(PH_IOC_MAGIC, 8, struct phm_regs) + +#define PHN_CONTROL 0x6 /* control byte in iaddr space */ +#define PHN_CTL_AMP 0x1 /* switch after torques change */ +#define PHN_CTL_BUT 0x2 /* is button switched */ +#define PHN_CTL_IRQ 0x10 /* is irq enabled */ + +#define PHN_ZERO_FORCE 2048 /* zero torque on motor */ + +#endif diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h new file mode 100644 index 00000000..4269de99 --- /dev/null +++ b/include/linux/phonedev.h @@ -0,0 +1,25 @@ +#ifndef __LINUX_PHONEDEV_H +#define __LINUX_PHONEDEV_H + +#include <linux/types.h> + +#ifdef __KERNEL__ + +#include <linux/poll.h> + +struct phone_device { + struct phone_device *next; + const struct file_operations *f_op; + int (*open) (struct phone_device *, struct file *); + int board; /* Device private index */ + int minor; +}; + +extern int phonedev_init(void); +#define PHONE_MAJOR 100 +extern int phone_register_device(struct phone_device *, int unit); +#define PHONE_UNIT_ANY -1 +extern void phone_unregister_device(struct phone_device *); + +#endif +#endif diff --git a/include/linux/phonet.h b/include/linux/phonet.h new file mode 100644 index 00000000..f48bfc80 --- /dev/null +++ b/include/linux/phonet.h @@ -0,0 +1,200 @@ +/** + * file phonet.h + * + * Phonet sockets kernel interface + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef LINUX_PHONET_H +#define LINUX_PHONET_H + +#include <linux/types.h> +#include <linux/socket.h> + +/* Automatic protocol selection */ +#define PN_PROTO_TRANSPORT 0 +/* Phonet datagram socket */ +#define PN_PROTO_PHONET 1 +/* Phonet pipe */ +#define PN_PROTO_PIPE 2 +#define PHONET_NPROTO 3 + +/* Socket options for SOL_PNPIPE level */ +#define PNPIPE_ENCAP 1 +#define PNPIPE_IFINDEX 2 +#define PNPIPE_HANDLE 3 +#define PNPIPE_INITSTATE 4 + +#define PNADDR_ANY 0 +#define PNADDR_BROADCAST 0xFC +#define PNPORT_RESOURCE_ROUTING 0 + +/* Values for PNPIPE_ENCAP option */ +#define PNPIPE_ENCAP_NONE 0 +#define PNPIPE_ENCAP_IP 1 + +/* ioctls */ +#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) +#define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13) +#define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) +#define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) + +/* Phonet protocol header */ +struct phonethdr { + __u8 pn_rdev; + __u8 pn_sdev; + __u8 pn_res; + __be16 pn_length; + __u8 pn_robj; + __u8 pn_sobj; +} __attribute__((packed)); + +/* Common Phonet payload header */ +struct phonetmsg { + __u8 pn_trans_id; /* transaction ID */ + __u8 pn_msg_id; /* message type */ + union { + struct { + __u8 pn_submsg_id; /* message subtype */ + __u8 pn_data[5]; + } base; + struct { + __u16 pn_e_res_id; /* extended resource ID */ + __u8 pn_e_submsg_id; /* message subtype */ + __u8 pn_e_data[3]; + } ext; + } pn_msg_u; +}; +#define PN_COMMON_MESSAGE 0xF0 +#define PN_COMMGR 0x10 +#define PN_PREFIX 0xE0 /* resource for extended messages */ +#define pn_submsg_id pn_msg_u.base.pn_submsg_id +#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id +#define pn_e_res_id pn_msg_u.ext.pn_e_res_id +#define pn_data pn_msg_u.base.pn_data +#define pn_e_data pn_msg_u.ext.pn_e_data + +/* data for unreachable errors */ +#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01 +#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14 +#define pn_orig_msg_id pn_data[0] +#define pn_status pn_data[1] +#define pn_e_orig_msg_id pn_e_data[0] +#define pn_e_status pn_e_data[1] + +/* Phonet socket address structure */ +struct sockaddr_pn { + __kernel_sa_family_t spn_family; + __u8 spn_obj; + __u8 spn_dev; + __u8 spn_resource; + __u8 spn_zero[sizeof(struct sockaddr) - sizeof(__kernel_sa_family_t) - 3]; +} __attribute__((packed)); + +/* Well known address */ +#define PN_DEV_PC 0x10 + +static inline __u16 pn_object(__u8 addr, __u16 port) +{ + return (addr << 8) | (port & 0x3ff); +} + +static inline __u8 pn_obj(__u16 handle) +{ + return handle & 0xff; +} + +static inline __u8 pn_dev(__u16 handle) +{ + return handle >> 8; +} + +static inline __u16 pn_port(__u16 handle) +{ + return handle & 0x3ff; +} + +static inline __u8 pn_addr(__u16 handle) +{ + return (handle >> 8) & 0xfc; +} + +static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr) +{ + spn->spn_dev &= 0x03; + spn->spn_dev |= addr & 0xfc; +} + +static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port) +{ + spn->spn_dev &= 0xfc; + spn->spn_dev |= (port >> 8) & 0x03; + spn->spn_obj = port & 0xff; +} + +static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn, + __u16 handle) +{ + spn->spn_dev = pn_dev(handle); + spn->spn_obj = pn_obj(handle); +} + +static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn, + __u8 resource) +{ + spn->spn_resource = resource; +} + +static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn) +{ + return spn->spn_dev & 0xfc; +} + +static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn) +{ + return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj; +} + +static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn) +{ + return pn_object(spn->spn_dev, spn->spn_obj); +} + +static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn) +{ + return spn->spn_resource; +} + +/* Phonet device ioctl requests */ +#ifdef __KERNEL__ +#define SIOCPNGAUTOCONF (SIOCDEVPRIVATE + 0) + +struct if_phonet_autoconf { + uint8_t device; +}; + +struct if_phonet_req { + char ifr_phonet_name[16]; + union { + struct if_phonet_autoconf ifru_phonet_autoconf; + } ifr_ifru; +}; +#define ifr_phonet_autoconf ifr_ifru.ifru_phonet_autoconf +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/phy.h b/include/linux/phy.h new file mode 100644 index 00000000..6fe0a37d --- /dev/null +++ b/include/linux/phy.h @@ -0,0 +1,537 @@ +/* + * include/linux/phy.h + * + * Framework and drivers for configuring and reading different PHYs + * Based on code in sungem_phy.c and gianfar_phy.c + * + * Author: Andy Fleming + * + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __PHY_H +#define __PHY_H + +#include <linux/spinlock.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/mod_devicetable.h> + +#include <linux/atomic.h> + +#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP | \ + SUPPORTED_MII) + +#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full) + +/* + * Set phydev->irq to PHY_POLL if interrupts are not supported, + * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if + * the attached driver handles the interrupt + */ +#define PHY_POLL -1 +#define PHY_IGNORE_INTERRUPT -2 + +#define PHY_HAS_INTERRUPT 0x00000001 +#define PHY_HAS_MAGICANEG 0x00000002 + +/* Interface Mode definitions */ +typedef enum { + PHY_INTERFACE_MODE_NA, + PHY_INTERFACE_MODE_MII, + PHY_INTERFACE_MODE_GMII, + PHY_INTERFACE_MODE_SGMII, + PHY_INTERFACE_MODE_TBI, + PHY_INTERFACE_MODE_RMII, + PHY_INTERFACE_MODE_RGMII, + PHY_INTERFACE_MODE_RGMII_ID, + PHY_INTERFACE_MODE_RGMII_RXID, + PHY_INTERFACE_MODE_RGMII_TXID, + PHY_INTERFACE_MODE_RTBI, + PHY_INTERFACE_MODE_SMII, +} phy_interface_t; + + +#define PHY_INIT_TIMEOUT 100000 +#define PHY_STATE_TIME 1 +#define PHY_FORCE_TIMEOUT 10 +#define PHY_AN_TIMEOUT 10 + +#define PHY_MAX_ADDR 32 + +/* Used when trying to connect to a specific phy (mii bus id:phy device id) */ +#define PHY_ID_FMT "%s:%02x" + +/* + * Need to be a little smaller than phydev->dev.bus_id to leave room + * for the ":%02x" + */ +#define MII_BUS_ID_SIZE (20 - 3) + +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ +#define MII_ADDR_C45 (1<<30) + +struct device; +struct sk_buff; + +/* + * The Bus class for PHYs. Devices which provide access to + * PHYs should register using this structure + */ +struct mii_bus { + const char *name; + char id[MII_BUS_ID_SIZE]; + void *priv; + int (*read)(struct mii_bus *bus, int phy_id, int regnum); + int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); + int (*reset)(struct mii_bus *bus); + + /* + * A lock to ensure that only one thing can read/write + * the MDIO bus at a time + */ + struct mutex mdio_lock; + + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED, + MDIOBUS_UNREGISTERED, + MDIOBUS_RELEASED, + } state; + struct device dev; + + /* list of all PHYs on bus */ + struct phy_device *phy_map[PHY_MAX_ADDR]; + + /* PHY addresses to be ignored when probing */ + u32 phy_mask; + + /* + * Pointer to an array of interrupts, each PHY's + * interrupt at the index matching its address + */ + int *irq; +}; +#define to_mii_bus(d) container_of(d, struct mii_bus, dev) + +struct mii_bus *mdiobus_alloc_size(size_t); +static inline struct mii_bus *mdiobus_alloc(void) +{ + return mdiobus_alloc_size(0); +} + +int mdiobus_register(struct mii_bus *bus); +void mdiobus_unregister(struct mii_bus *bus); +void mdiobus_free(struct mii_bus *bus); +struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); +int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); + + +#define PHY_INTERRUPT_DISABLED 0x0 +#define PHY_INTERRUPT_ENABLED 0x80000000 + +/* PHY state machine states: + * + * DOWN: PHY device and driver are not ready for anything. probe + * should be called if and only if the PHY is in this state, + * given that the PHY device exists. + * - PHY driver probe function will, depending on the PHY, set + * the state to STARTING or READY + * + * STARTING: PHY device is coming up, and the ethernet driver is + * not ready. PHY drivers may set this in the probe function. + * If they do, they are responsible for making sure the state is + * eventually set to indicate whether the PHY is UP or READY, + * depending on the state when the PHY is done starting up. + * - PHY driver will set the state to READY + * - start will set the state to PENDING + * + * READY: PHY is ready to send and receive packets, but the + * controller is not. By default, PHYs which do not implement + * probe will be set to this state by phy_probe(). If the PHY + * driver knows the PHY is ready, and the PHY state is STARTING, + * then it sets this STATE. + * - start will set the state to UP + * + * PENDING: PHY device is coming up, but the ethernet driver is + * ready. phy_start will set this state if the PHY state is + * STARTING. + * - PHY driver will set the state to UP when the PHY is ready + * + * UP: The PHY and attached device are ready to do work. + * Interrupts should be started here. + * - timer moves to AN + * + * AN: The PHY is currently negotiating the link state. Link is + * therefore down for now. phy_timer will set this state when it + * detects the state is UP. config_aneg will set this state + * whenever called with phydev->autoneg set to AUTONEG_ENABLE. + * - If autonegotiation finishes, but there's no link, it sets + * the state to NOLINK. + * - If aneg finishes with link, it sets the state to RUNNING, + * and calls adjust_link + * - If autonegotiation did not finish after an arbitrary amount + * of time, autonegotiation should be tried again if the PHY + * supports "magic" autonegotiation (back to AN) + * - If it didn't finish, and no magic_aneg, move to FORCING. + * + * NOLINK: PHY is up, but not currently plugged in. + * - If the timer notes that the link comes back, we move to RUNNING + * - config_aneg moves to AN + * - phy_stop moves to HALTED + * + * FORCING: PHY is being configured with forced settings + * - if link is up, move to RUNNING + * - If link is down, we drop to the next highest setting, and + * retry (FORCING) after a timeout + * - phy_stop moves to HALTED + * + * RUNNING: PHY is currently up, running, and possibly sending + * and/or receiving packets + * - timer will set CHANGELINK if we're polling (this ensures the + * link state is polled every other cycle of this state machine, + * which makes it every other second) + * - irq will set CHANGELINK + * - config_aneg will set AN + * - phy_stop moves to HALTED + * + * CHANGELINK: PHY experienced a change in link state + * - timer moves to RUNNING if link + * - timer moves to NOLINK if the link is down + * - phy_stop moves to HALTED + * + * HALTED: PHY is up, but no polling or interrupts are done. Or + * PHY is in an error state. + * + * - phy_start moves to RESUMING + * + * RESUMING: PHY was halted, but now wants to run again. + * - If we are forcing, or aneg is done, timer moves to RUNNING + * - If aneg is not done, timer moves to AN + * - phy_stop moves to HALTED + */ +enum phy_state { + PHY_DOWN=0, + PHY_STARTING, + PHY_READY, + PHY_PENDING, + PHY_UP, + PHY_AN, + PHY_RUNNING, + PHY_NOLINK, + PHY_FORCING, + PHY_CHANGELINK, + PHY_HALTED, + PHY_RESUMING +}; + + +/* phy_device: An instance of a PHY + * + * drv: Pointer to the driver for this PHY instance + * bus: Pointer to the bus this PHY is on + * dev: driver model device structure for this PHY + * phy_id: UID for this device found during discovery + * state: state of the PHY for management purposes + * dev_flags: Device-specific flags used by the PHY driver. + * addr: Bus address of PHY + * link_timeout: The number of timer firings to wait before the + * giving up on the current attempt at acquiring a link + * irq: IRQ number of the PHY's interrupt (-1 if none) + * phy_timer: The timer for handling the state machine + * phy_queue: A work_queue for the interrupt + * attached_dev: The attached enet driver's device instance ptr + * adjust_link: Callback for the enet controller to respond to + * changes in the link state. + * adjust_state: Callback for the enet driver to respond to + * changes in the state machine. + * + * speed, duplex, pause, supported, advertising, and + * autoneg are used like in mii_if_info + * + * interrupts currently only supports enabled or disabled, + * but could be changed in the future to support enabling + * and disabling specific interrupts + * + * Contains some infrastructure for polling and interrupt + * handling, as well as handling shifts in PHY hardware state + */ +struct phy_device { + /* Information about the PHY type */ + /* And management functions */ + struct phy_driver *drv; + + struct mii_bus *bus; + + struct device dev; + + u32 phy_id; + + enum phy_state state; + + u32 dev_flags; + + phy_interface_t interface; + + /* Bus address of the PHY (0-31) */ + int addr; + + /* + * forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + int asym_pause; + + /* The most recently read link state */ + int link; + + /* Enabled Interrupts */ + u32 interrupts; + + /* Union of PHY and Attached devices' supported modes */ + /* See mii.h for more info */ + u32 supported; + u32 advertising; + + int autoneg; + + int link_timeout; + + /* + * Interrupt number for this PHY + * -1 means no interrupt + */ + int irq; + + /* private data pointer */ + /* For use by PHYs to maintain extra state */ + void *priv; + + /* Interrupt and Polling infrastructure */ + struct work_struct phy_queue; + struct delayed_work state_queue; + atomic_t irq_disable; + + struct mutex lock; + + struct net_device *attached_dev; + + void (*adjust_link)(struct net_device *dev); + + void (*adjust_state)(struct net_device *dev); +}; +#define to_phy_device(d) container_of(d, struct phy_device, dev) + +/* struct phy_driver: Driver structure for a particular PHY type + * + * phy_id: The result of reading the UID registers of this PHY + * type, and ANDing them with the phy_id_mask. This driver + * only works for PHYs with IDs which match this field + * name: The friendly name of this PHY type + * phy_id_mask: Defines the important bits of the phy_id + * features: A list of features (speed, duplex, etc) supported + * by this PHY + * flags: A bitfield defining certain other features this PHY + * supports (like interrupts) + * + * The drivers must implement config_aneg and read_status. All + * other functions are optional. Note that none of these + * functions should be called from interrupt time. The goal is + * for the bus read/write functions to be able to block when the + * bus transaction is happening, and be freed up by an interrupt + * (The MPC85xx has this ability, though it is not currently + * supported in the driver). + */ +struct phy_driver { + u32 phy_id; + char *name; + unsigned int phy_id_mask; + u32 features; + u32 flags; + + /* + * Called to initialize the PHY, + * including after a reset + */ + int (*config_init)(struct phy_device *phydev); + + /* + * Called during discovery. Used to set + * up device-specific structures, if any + */ + int (*probe)(struct phy_device *phydev); + + /* PHY Power Management */ + int (*suspend)(struct phy_device *phydev); + int (*resume)(struct phy_device *phydev); + + /* + * Configures the advertisement and resets + * autonegotiation if phydev->autoneg is on, + * forces the speed to the current settings in phydev + * if phydev->autoneg is off + */ + int (*config_aneg)(struct phy_device *phydev); + + /* Determines the negotiated speed and duplex */ + int (*read_status)(struct phy_device *phydev); + + /* Clears any pending interrupts */ + int (*ack_interrupt)(struct phy_device *phydev); + + /* Enables or disables interrupts */ + int (*config_intr)(struct phy_device *phydev); + + /* + * Checks if the PHY generated an interrupt. + * For multi-PHY devices with shared PHY interrupt pin + */ + int (*did_interrupt)(struct phy_device *phydev); + + /* Clears up any memory if needed */ + void (*remove)(struct phy_device *phydev); + + /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ + int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); + + /* + * Requests a Rx timestamp for 'skb'. If the skb is accepted, + * the phy driver promises to deliver it using netif_rx() as + * soon as a timestamp becomes available. One of the + * PTP_CLASS_ values is passed in 'type'. The function must + * return true if the skb is accepted for delivery. + */ + bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + /* + * Requests a Tx timestamp for 'skb'. The phy driver promises + * to deliver it using skb_complete_tx_timestamp() as soon as a + * timestamp becomes available. One of the PTP_CLASS_ values + * is passed in 'type'. + */ + void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); + + struct device_driver driver; +}; +#define to_phy_driver(d) container_of(d, struct phy_driver, driver) + +#define PHY_ANY_ID "MATCH ANY PHY" +#define PHY_ANY_UID 0xffffffff + +/* A Structure for boards to register fixups with the PHY Lib */ +struct phy_fixup { + struct list_head list; + char bus_id[20]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *phydev); +}; + +/** + * phy_read - Convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_read(struct phy_device *phydev, u32 regnum) +{ + return mdiobus_read(phydev->bus, phydev->addr, regnum); +} + +/** + * phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + return mdiobus_write(phydev->bus, phydev->addr, regnum, val); +} + +int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); +struct phy_device* get_phy_device(struct mii_bus *bus, int addr); +int phy_device_register(struct phy_device *phy); +int phy_init_hw(struct phy_device *phydev); +struct phy_device * phy_attach(struct net_device *dev, + const char *bus_id, u32 flags, phy_interface_t interface); +struct phy_device *phy_find_first(struct mii_bus *bus); +int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, + void (*handler)(struct net_device *), u32 flags, + phy_interface_t interface); +struct phy_device * phy_connect(struct net_device *dev, const char *bus_id, + void (*handler)(struct net_device *), u32 flags, + phy_interface_t interface); +void phy_disconnect(struct phy_device *phydev); +void phy_detach(struct phy_device *phydev); +void phy_start(struct phy_device *phydev); +void phy_stop(struct phy_device *phydev); +int phy_start_aneg(struct phy_device *phydev); + +int phy_stop_interrupts(struct phy_device *phydev); + +static inline int phy_read_status(struct phy_device *phydev) { + return phydev->drv->read_status(phydev); +} + +int genphy_restart_aneg(struct phy_device *phydev); +int genphy_config_aneg(struct phy_device *phydev); +int genphy_update_link(struct phy_device *phydev); +int genphy_read_status(struct phy_device *phydev); +int genphy_suspend(struct phy_device *phydev); +int genphy_resume(struct phy_device *phydev); +void phy_driver_unregister(struct phy_driver *drv); +int phy_driver_register(struct phy_driver *new_driver); +void phy_state_machine(struct work_struct *work); +void phy_start_machine(struct phy_device *phydev, + void (*handler)(struct net_device *)); +void phy_stop_machine(struct phy_device *phydev); +int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd); +int phy_mii_ioctl(struct phy_device *phydev, + struct ifreq *ifr, int cmd); +int phy_start_interrupts(struct phy_device *phydev); +void phy_print_status(struct phy_device *phydev); +void phy_device_free(struct phy_device *phydev); + +int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)); +int phy_register_fixup_for_id(const char *bus_id, + int (*run)(struct phy_device *)); +int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)); +int phy_scan_fixups(struct phy_device *phydev); + +int __init mdio_bus_init(void); +void mdio_bus_exit(void); + +extern struct bus_type mdio_bus_type; +#endif /* __PHY_H */ diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h new file mode 100644 index 00000000..509d8f5f --- /dev/null +++ b/include/linux/phy_fixed.h @@ -0,0 +1,31 @@ +#ifndef __PHY_FIXED_H +#define __PHY_FIXED_H + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +#ifdef CONFIG_FIXED_PHY +extern int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status); +#else +static inline int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status) +{ + return -ENODEV; +} +#endif /* CONFIG_FIXED_PHY */ + +/* + * This function issued only by fixed_phy-aware drivers, no need + * protect it with #ifdef + */ +extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); + +#endif /* __PHY_FIXED_H */ diff --git a/include/linux/pid.h b/include/linux/pid.h new file mode 100644 index 00000000..b152d44f --- /dev/null +++ b/include/linux/pid.h @@ -0,0 +1,202 @@ +#ifndef _LINUX_PID_H +#define _LINUX_PID_H + +#include <linux/rcupdate.h> + +enum pid_type +{ + PIDTYPE_PID, + PIDTYPE_PGID, + PIDTYPE_SID, + PIDTYPE_MAX +}; + +/* + * What is struct pid? + * + * A struct pid is the kernel's internal notion of a process identifier. + * It refers to individual tasks, process groups, and sessions. While + * there are processes attached to it the struct pid lives in a hash + * table, so it and then the processes that it refers to can be found + * quickly from the numeric pid value. The attached processes may be + * quickly accessed by following pointers from struct pid. + * + * Storing pid_t values in the kernel and referring to them later has a + * problem. The process originally with that pid may have exited and the + * pid allocator wrapped, and another process could have come along + * and been assigned that pid. + * + * Referring to user space processes by holding a reference to struct + * task_struct has a problem. When the user space process exits + * the now useless task_struct is still kept. A task_struct plus a + * stack consumes around 10K of low kernel memory. More precisely + * this is THREAD_SIZE + sizeof(struct task_struct). By comparison + * a struct pid is about 64 bytes. + * + * Holding a reference to struct pid solves both of these problems. + * It is small so holding a reference does not consume a lot of + * resources, and since a new struct pid is allocated when the numeric pid + * value is reused (when pids wrap around) we don't mistakenly refer to new + * processes. + */ + + +/* + * struct upid is used to get the id of the struct pid, as it is + * seen in particular namespace. Later the struct pid is found with + * find_pid_ns() using the int nr and struct pid_namespace *ns. + */ + +struct upid { + /* Try to keep pid_chain in the same cacheline as nr for find_vpid */ + int nr; + struct pid_namespace *ns; + struct hlist_node pid_chain; +}; + +struct pid +{ + atomic_t count; + unsigned int level; + /* lists of tasks that use this pid */ + struct hlist_head tasks[PIDTYPE_MAX]; + struct rcu_head rcu; + struct upid numbers[1]; +}; + +extern struct pid init_struct_pid; + +struct pid_link +{ + struct hlist_node node; + struct pid *pid; +}; + +static inline struct pid *get_pid(struct pid *pid) +{ + if (pid) + atomic_inc(&pid->count); + return pid; +} + +extern void put_pid(struct pid *pid); +extern struct task_struct *pid_task(struct pid *pid, enum pid_type); +extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); + +extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); + +/* + * attach_pid() and detach_pid() must be called with the tasklist_lock + * write-held. + */ +extern void attach_pid(struct task_struct *task, enum pid_type type, + struct pid *pid); +extern void detach_pid(struct task_struct *task, enum pid_type); +extern void change_pid(struct task_struct *task, enum pid_type, + struct pid *pid); +extern void transfer_pid(struct task_struct *old, struct task_struct *new, + enum pid_type); + +struct pid_namespace; +extern struct pid_namespace init_pid_ns; + +/* + * look up a PID in the hash table. Must be called with the tasklist_lock + * or rcu_read_lock() held. + * + * find_pid_ns() finds the pid in the namespace specified + * find_vpid() finds the pid by its virtual id, i.e. in the current namespace + * + * see also find_task_by_vpid() set in include/linux/sched.h + */ +extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); +extern struct pid *find_vpid(int nr); + +/* + * Lookup a PID in the hash table, and return with it's count elevated. + */ +extern struct pid *find_get_pid(int nr); +extern struct pid *find_ge_pid(int nr, struct pid_namespace *); +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); + +extern struct pid *alloc_pid(struct pid_namespace *ns); +extern void free_pid(struct pid *pid); + +/* + * ns_of_pid() returns the pid namespace in which the specified pid was + * allocated. + * + * NOTE: + * ns_of_pid() is expected to be called for a process (task) that has + * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid + * is expected to be non-NULL. If @pid is NULL, caller should handle + * the resulting NULL pid-ns. + */ +static inline struct pid_namespace *ns_of_pid(struct pid *pid) +{ + struct pid_namespace *ns = NULL; + if (pid) + ns = pid->numbers[pid->level].ns; + return ns; +} + +/* + * is_child_reaper returns true if the pid is the init process + * of the current namespace. As this one could be checked before + * pid_ns->child_reaper is assigned in copy_process, we check + * with the pid number. + */ +static inline bool is_child_reaper(struct pid *pid) +{ + return pid->numbers[pid->level].nr == 1; +} + +/* + * the helpers to get the pid's id seen from different namespaces + * + * pid_nr() : global id, i.e. the id seen from the init namespace; + * pid_vnr() : virtual id, i.e. the id seen from the pid namespace of + * current. + * pid_nr_ns() : id seen from the ns specified. + * + * see also task_xid_nr() etc in include/linux/sched.h + */ + +static inline pid_t pid_nr(struct pid *pid) +{ + pid_t nr = 0; + if (pid) + nr = pid->numbers[0].nr; + return nr; +} + +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); +pid_t pid_vnr(struct pid *pid); + +#define do_each_pid_task(pid, type, task) \ + do { \ + struct hlist_node *pos___; \ + if ((pid) != NULL) \ + hlist_for_each_entry_rcu((task), pos___, \ + &(pid)->tasks[type], pids[type].node) { + + /* + * Both old and new leaders may be attached to + * the same pid in the middle of de_thread(). + */ +#define while_each_pid_task(pid, type, task) \ + if (type == PIDTYPE_PID) \ + break; \ + } \ + } while (0) + +#define do_each_pid_thread(pid, type, task) \ + do_each_pid_task(pid, type, task) { \ + struct task_struct *tg___ = task; \ + do { + +#define while_each_pid_thread(pid, type, task) \ + } while_each_thread(tg___, task); \ + task = tg___; \ + } while_each_pid_task(pid, type, task) +#endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h new file mode 100644 index 00000000..b067bd8c --- /dev/null +++ b/include/linux/pid_namespace.h @@ -0,0 +1,95 @@ +#ifndef _LINUX_PID_NS_H +#define _LINUX_PID_NS_H + +#include <linux/sched.h> +#include <linux/bug.h> +#include <linux/mm.h> +#include <linux/threads.h> +#include <linux/nsproxy.h> +#include <linux/kref.h> + +struct pidmap { + atomic_t nr_free; + void *page; +}; + +#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) + +struct bsd_acct_struct; + +struct pid_namespace { + struct kref kref; + struct pidmap pidmap[PIDMAP_ENTRIES]; + int last_pid; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; +#ifdef CONFIG_PROC_FS + struct vfsmount *proc_mnt; +#endif +#ifdef CONFIG_BSD_PROCESS_ACCT + struct bsd_acct_struct *bacct; +#endif + gid_t pid_gid; + int hide_pid; + int reboot; /* group exit code if this pidns was rebooted */ +}; + +extern struct pid_namespace init_pid_ns; + +#ifdef CONFIG_PID_NS +static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) +{ + if (ns != &init_pid_ns) + kref_get(&ns->kref); + return ns; +} + +extern struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *ns); +extern void free_pid_ns(struct kref *kref); +extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); +extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd); + +static inline void put_pid_ns(struct pid_namespace *ns) +{ + if (ns != &init_pid_ns) + kref_put(&ns->kref, free_pid_ns); +} + +#else /* !CONFIG_PID_NS */ +#include <linux/err.h> + +static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) +{ + return ns; +} + +static inline struct pid_namespace * +copy_pid_ns(unsigned long flags, struct pid_namespace *ns) +{ + if (flags & CLONE_NEWPID) + ns = ERR_PTR(-EINVAL); + return ns; +} + +static inline void put_pid_ns(struct pid_namespace *ns) +{ +} + +static inline void zap_pid_ns_processes(struct pid_namespace *ns) +{ + BUG(); +} + +static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) +{ + return 0; +} +#endif /* CONFIG_PID_NS */ + +extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); +void pidhash_init(void); +void pidmap_init(void); + +#endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/pim.h b/include/linux/pim.h new file mode 100644 index 00000000..252bf664 --- /dev/null +++ b/include/linux/pim.h @@ -0,0 +1,27 @@ +#ifndef __LINUX_PIM_H +#define __LINUX_PIM_H + +#include <asm/byteorder.h> + +/* Message types - V1 */ +#define PIM_V1_VERSION cpu_to_be32(0x10000000) +#define PIM_V1_REGISTER 1 + +/* Message types - V2 */ +#define PIM_VERSION 2 +#define PIM_REGISTER 1 + +#define PIM_NULL_REGISTER cpu_to_be32(0x40000000) + +/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ +struct pimreghdr +{ + __u8 type; + __u8 reserved; + __be16 csum; + __be32 flags; +}; + +struct sk_buff; +extern int pim_rcv_v1(struct sk_buff *); +#endif diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h new file mode 100644 index 00000000..191e7268 --- /dev/null +++ b/include/linux/pinctrl/consumer.h @@ -0,0 +1,159 @@ +/* + * Consumer interface the pin control subsystem + * + * Copyright (C) 2012 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_CONSUMER_H +#define __LINUX_PINCTRL_CONSUMER_H + +#include <linux/err.h> +#include <linux/list.h> +#include <linux/seq_file.h> +#include "pinctrl-state.h" + +/* This struct is private to the core and should be regarded as a cookie */ +struct pinctrl; +struct pinctrl_state; + +#ifdef CONFIG_PINCTRL + +/* External interface to pin control */ +extern int pinctrl_request_gpio(unsigned gpio); +extern void pinctrl_free_gpio(unsigned gpio); +extern int pinctrl_gpio_direction_input(unsigned gpio); +extern int pinctrl_gpio_direction_output(unsigned gpio); + +extern struct pinctrl * __must_check pinctrl_get(struct device *dev); +extern void pinctrl_put(struct pinctrl *p); +extern struct pinctrl_state * __must_check pinctrl_lookup_state( + struct pinctrl *p, + const char *name); +extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); + +#else /* !CONFIG_PINCTRL */ + +static inline int pinctrl_request_gpio(unsigned gpio) +{ + return 0; +} + +static inline void pinctrl_free_gpio(unsigned gpio) +{ +} + +static inline int pinctrl_gpio_direction_input(unsigned gpio) +{ + return 0; +} + +static inline int pinctrl_gpio_direction_output(unsigned gpio) +{ + return 0; +} + +static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) +{ + return NULL; +} + +static inline void pinctrl_put(struct pinctrl *p) +{ +} + +static inline struct pinctrl_state * __must_check pinctrl_lookup_state( + struct pinctrl *p, + const char *name) +{ + return NULL; +} + +static inline int pinctrl_select_state(struct pinctrl *p, + struct pinctrl_state *s) +{ + return 0; +} + +#endif /* CONFIG_PINCTRL */ + +static inline struct pinctrl * __must_check pinctrl_get_select( + struct device *dev, const char *name) +{ + struct pinctrl *p; + struct pinctrl_state *s; + int ret; + + p = pinctrl_get(dev); + if (IS_ERR(p)) + return p; + + s = pinctrl_lookup_state(p, name); + if (IS_ERR(s)) { + pinctrl_put(p); + return ERR_PTR(PTR_ERR(s)); + } + + ret = pinctrl_select_state(p, s); + if (ret < 0) { + pinctrl_put(p); + return ERR_PTR(ret); + } + + return p; +} + +static inline struct pinctrl * __must_check pinctrl_get_select_default( + struct device *dev) +{ + return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT); +} + +#ifdef CONFIG_PINCONF + +extern int pin_config_get(const char *dev_name, const char *name, + unsigned long *config); +extern int pin_config_set(const char *dev_name, const char *name, + unsigned long config); +extern int pin_config_group_get(const char *dev_name, + const char *pin_group, + unsigned long *config); +extern int pin_config_group_set(const char *dev_name, + const char *pin_group, + unsigned long config); + +#else + +static inline int pin_config_get(const char *dev_name, const char *name, + unsigned long *config) +{ + return 0; +} + +static inline int pin_config_set(const char *dev_name, const char *name, + unsigned long config) +{ + return 0; +} + +static inline int pin_config_group_get(const char *dev_name, + const char *pin_group, + unsigned long *config) +{ + return 0; +} + +static inline int pin_config_group_set(const char *dev_name, + const char *pin_group, + unsigned long config) +{ + return 0; +} + +#endif + +#endif /* __LINUX_PINCTRL_CONSUMER_H */ diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h new file mode 100644 index 00000000..e4d1de74 --- /dev/null +++ b/include/linux/pinctrl/machine.h @@ -0,0 +1,167 @@ +/* + * Machine interface for the pinctrl subsystem. + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_MACHINE_H +#define __LINUX_PINCTRL_MACHINE_H + +#include <linux/bug.h> + +#include "pinctrl-state.h" + +enum pinctrl_map_type { + PIN_MAP_TYPE_INVALID, + PIN_MAP_TYPE_DUMMY_STATE, + PIN_MAP_TYPE_MUX_GROUP, + PIN_MAP_TYPE_CONFIGS_PIN, + PIN_MAP_TYPE_CONFIGS_GROUP, +}; + +/** + * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP + * @group: the name of the group whose mux function is to be configured. This + * field may be left NULL, and the first applicable group for the function + * will be used. + * @function: the mux function to select for the group + */ +struct pinctrl_map_mux { + const char *group; + const char *function; +}; + +/** + * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_* + * @group_or_pin: the name of the pin or group whose configuration parameters + * are to be configured. + * @configs: a pointer to an array of config parameters/values to program into + * hardware. Each individual pin controller defines the format and meaning + * of config parameters. + * @num_configs: the number of entries in array @configs + */ +struct pinctrl_map_configs { + const char *group_or_pin; + unsigned long *configs; + unsigned num_configs; +}; + +/** + * struct pinctrl_map - boards/machines shall provide this map for devices + * @dev_name: the name of the device using this specific mapping, the name + * must be the same as in your struct device*. If this name is set to the + * same name as the pin controllers own dev_name(), the map entry will be + * hogged by the driver itself upon registration + * @name: the name of this specific map entry for the particular machine. + * This is the parameter passed to pinmux_lookup_state() + * @type: the type of mapping table entry + * @ctrl_dev_name: the name of the device controlling this specific mapping, + * the name must be the same as in your struct device*. This field is not + * used for PIN_MAP_TYPE_DUMMY_STATE + * @data: Data specific to the mapping type + */ +struct pinctrl_map { + const char *dev_name; + const char *name; + enum pinctrl_map_type type; + const char *ctrl_dev_name; + union { + struct pinctrl_map_mux mux; + struct pinctrl_map_configs configs; + } data; +}; + +/* Convenience macros to create mapping table entries */ + +#define PIN_MAP_DUMMY_STATE(dev, state) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_DUMMY_STATE, \ + } + +#define PIN_MAP_MUX_GROUP(dev, state, pinctrl, grp, func) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_MUX_GROUP, \ + .ctrl_dev_name = pinctrl, \ + .data.mux = { \ + .group = grp, \ + .function = func, \ + }, \ + } + +#define PIN_MAP_MUX_GROUP_DEFAULT(dev, pinctrl, grp, func) \ + PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, func) + +#define PIN_MAP_MUX_GROUP_HOG(dev, state, grp, func) \ + PIN_MAP_MUX_GROUP(dev, state, dev, grp, func) + +#define PIN_MAP_MUX_GROUP_HOG_DEFAULT(dev, grp, func) \ + PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, func) + +#define PIN_MAP_CONFIGS_PIN(dev, state, pinctrl, pin, cfgs) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_CONFIGS_PIN, \ + .ctrl_dev_name = pinctrl, \ + .data.configs = { \ + .group_or_pin = pin, \ + .configs = cfgs, \ + .num_configs = ARRAY_SIZE(cfgs), \ + }, \ + } + +#define PIN_MAP_CONFIGS_PIN_DEFAULT(dev, pinctrl, pin, cfgs) \ + PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, pinctrl, pin, cfgs) + +#define PIN_MAP_CONFIGS_PIN_HOG(dev, state, pin, cfgs) \ + PIN_MAP_CONFIGS_PIN(dev, state, dev, pin, cfgs) + +#define PIN_MAP_CONFIGS_PIN_HOG_DEFAULT(dev, pin, cfgs) \ + PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, dev, pin, cfgs) + +#define PIN_MAP_CONFIGS_GROUP(dev, state, pinctrl, grp, cfgs) \ + { \ + .dev_name = dev, \ + .name = state, \ + .type = PIN_MAP_TYPE_CONFIGS_GROUP, \ + .ctrl_dev_name = pinctrl, \ + .data.configs = { \ + .group_or_pin = grp, \ + .configs = cfgs, \ + .num_configs = ARRAY_SIZE(cfgs), \ + }, \ + } + +#define PIN_MAP_CONFIGS_GROUP_DEFAULT(dev, pinctrl, grp, cfgs) \ + PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, cfgs) + +#define PIN_MAP_CONFIGS_GROUP_HOG(dev, state, grp, cfgs) \ + PIN_MAP_CONFIGS_GROUP(dev, state, dev, grp, cfgs) + +#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \ + PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs) + +#ifdef CONFIG_PINCTRL + +extern int pinctrl_register_mappings(struct pinctrl_map const *map, + unsigned num_maps); + +#else + +static inline int pinctrl_register_mappings(struct pinctrl_map const *map, + unsigned num_maps) +{ + return 0; +} + +#endif /* !CONFIG_PINMUX */ +#endif diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h new file mode 100644 index 00000000..4f0abb9f --- /dev/null +++ b/include/linux/pinctrl/pinconf-generic.h @@ -0,0 +1,114 @@ +/* + * Interface the generic pinconfig portions of the pinctrl subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H +#define __LINUX_PINCTRL_PINCONF_GENERIC_H + +/* + * You shouldn't even be able to compile with these enums etc unless you're + * using generic pin config. That is why this is defined out. + */ +#ifdef CONFIG_GENERIC_PINCONF + +/** + * enum pin_config_param - possible pin configuration parameters + * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a + * transition from say pull-up to pull-down implies that you disable + * pull-up in the process, this setting disables all biasing. + * @PIN_CONFIG_BIAS_HIGH_IMPEDANCE: the pin will be set to a high impedance + * mode, also know as "third-state" (tristate) or "high-Z" or "floating". + * On output pins this effectively disconnects the pin, which is useful + * if for example some other pin is going to drive the signal connected + * to it for a while. Pins used for input are usually always high + * impedance. + * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high + * impedance to VDD). If the argument is != 0 pull-up is enabled, + * if it is 0, pull-up is disabled. + * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high + * impedance to GROUND). If the argument is != 0 pull-down is enabled, + * if it is 0, pull-down is disabled. + * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and + * low, this is the most typical case and is typically achieved with two + * active transistors on the output. Sending this config will enabale + * push-pull mode, the argument is ignored. + * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open + * collector) which means it is usually wired with other output ports + * which are then pulled up with an external resistor. Sending this + * config will enabale open drain mode, the argument is ignored. + * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source + * (open emitter). Sending this config will enabale open drain mode, the + * argument is ignored. + * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in + * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, + * the threshold value is given on a custom format as argument when + * setting pins to this mode. The argument zero turns the schmitt trigger + * off. + * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, + * which means it will wait for signals to settle when reading inputs. The + * argument gives the debounce time on a custom format. Setting the + * argument to zero turns debouncing off. + * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power + * supplies, the argument to this parameter (on a custom format) tells + * the driver which alternative power source to use. + * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power + * operation, if several modes of operation are supported these can be + * passed in the argument on a custom form, else just use argument 1 + * to indicate low power mode, argument 0 turns low power mode off. + * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if + * you need to pass in custom configurations to the pin controller, use + * PIN_CONFIG_END+1 as the base offset. + */ +enum pin_config_param { + PIN_CONFIG_BIAS_DISABLE, + PIN_CONFIG_BIAS_HIGH_IMPEDANCE, + PIN_CONFIG_BIAS_PULL_UP, + PIN_CONFIG_BIAS_PULL_DOWN, + PIN_CONFIG_DRIVE_PUSH_PULL, + PIN_CONFIG_DRIVE_OPEN_DRAIN, + PIN_CONFIG_DRIVE_OPEN_SOURCE, + PIN_CONFIG_INPUT_SCHMITT, + PIN_CONFIG_INPUT_DEBOUNCE, + PIN_CONFIG_POWER_SOURCE, + PIN_CONFIG_LOW_POWER_MODE, + PIN_CONFIG_END = 0x7FFF, +}; + +/* + * Helpful configuration macro to be used in tables etc. + */ +#define PIN_CONF_PACKED(p, a) ((a << 16) | ((unsigned long) p & 0xffffUL)) + +/* + * The following inlines stuffs a configuration parameter and data value + * into and out of an unsigned long argument, as used by the generic pin config + * system. We put the parameter in the lower 16 bits and the argument in the + * upper 16 bits. + */ + +static inline enum pin_config_param pinconf_to_config_param(unsigned long config) +{ + return (enum pin_config_param) (config & 0xffffUL); +} + +static inline u16 pinconf_to_config_argument(unsigned long config) +{ + return (enum pin_config_param) ((config >> 16) & 0xffffUL); +} + +static inline unsigned long pinconf_to_config_packed(enum pin_config_param param, + u16 argument) +{ + return PIN_CONF_PACKED(param, argument); +} + +#endif /* CONFIG_GENERIC_PINCONF */ + +#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */ diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h new file mode 100644 index 00000000..ec431f03 --- /dev/null +++ b/include/linux/pinctrl/pinconf.h @@ -0,0 +1,63 @@ +/* + * Interface the pinconfig portions of the pinctrl subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINCONF_H +#define __LINUX_PINCTRL_PINCONF_H + +#ifdef CONFIG_PINCONF + +struct pinctrl_dev; +struct seq_file; + +/** + * struct pinconf_ops - pin config operations, to be implemented by + * pin configuration capable drivers. + * @is_generic: for pin controllers that want to use the generic interface, + * this flag tells the framework that it's generic. + * @pin_config_get: get the config of a certain pin, if the requested config + * is not available on this controller this should return -ENOTSUPP + * and if it is available but disabled it should return -EINVAL + * @pin_config_get: get the config of a certain pin + * @pin_config_set: configure an individual pin + * @pin_config_group_get: get configurations for an entire pin group + * @pin_config_group_set: configure all pins in a group + * @pin_config_dbg_show: optional debugfs display hook that will provide + * per-device info for a certain pin in debugfs + * @pin_config_group_dbg_show: optional debugfs display hook that will provide + * per-device info for a certain group in debugfs + */ +struct pinconf_ops { +#ifdef CONFIG_GENERIC_PINCONF + bool is_generic; +#endif + int (*pin_config_get) (struct pinctrl_dev *pctldev, + unsigned pin, + unsigned long *config); + int (*pin_config_set) (struct pinctrl_dev *pctldev, + unsigned pin, + unsigned long config); + int (*pin_config_group_get) (struct pinctrl_dev *pctldev, + unsigned selector, + unsigned long *config); + int (*pin_config_group_set) (struct pinctrl_dev *pctldev, + unsigned selector, + unsigned long config); + void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned offset); + void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned selector); +}; + +#endif + +#endif /* __LINUX_PINCTRL_PINCONF_H */ diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h new file mode 100644 index 00000000..3920e28b --- /dev/null +++ b/include/linux/pinctrl/pinctrl-state.h @@ -0,0 +1,6 @@ +/* + * Standard pin control state definitions + */ + +#define PINCTRL_STATE_DEFAULT "default" +#define PINCTRL_STATE_IDLE "idle" diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h new file mode 100644 index 00000000..4e9f0788 --- /dev/null +++ b/include/linux/pinctrl/pinctrl.h @@ -0,0 +1,136 @@ +/* + * Interface the pinctrl subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * This interface is used in the core to keep track of pins. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINCTRL_H +#define __LINUX_PINCTRL_PINCTRL_H + +#ifdef CONFIG_PINCTRL + +#include <linux/radix-tree.h> +#include <linux/list.h> +#include <linux/seq_file.h> +#include "pinctrl-state.h" + +struct device; +struct pinctrl_dev; +struct pinmux_ops; +struct pinconf_ops; +struct gpio_chip; + +/** + * struct pinctrl_pin_desc - boards/machines provide information on their + * pins, pads or other muxable units in this struct + * @number: unique pin number from the global pin number space + * @name: a name for this pin + */ +struct pinctrl_pin_desc { + unsigned number; + const char *name; +}; + +/* Convenience macro to define a single named or anonymous pin descriptor */ +#define PINCTRL_PIN(a, b) { .number = a, .name = b } +#define PINCTRL_PIN_ANON(a) { .number = a } + +/** + * struct pinctrl_gpio_range - each pin controller can provide subranges of + * the GPIO number space to be handled by the controller + * @node: list node for internal use + * @name: a name for the chip in this range + * @id: an ID number for the chip in this range + * @base: base offset of the GPIO range + * @pin_base: base pin number of the GPIO range + * @npins: number of pins in the GPIO range, including the base number + * @gc: an optional pointer to a gpio_chip + */ +struct pinctrl_gpio_range { + struct list_head node; + const char *name; + unsigned int id; + unsigned int base; + unsigned int pin_base; + unsigned int npins; + struct gpio_chip *gc; +}; + +/** + * struct pinctrl_ops - global pin control operations, to be implemented by + * pin controller drivers. + * @list_groups: list the number of selectable named groups available + * in this pinmux driver, the core will begin on 0 and call this + * repeatedly as long as it returns >= 0 to enumerate the groups + * @get_group_name: return the group name of the pin group + * @get_group_pins: return an array of pins corresponding to a certain + * group selector @pins, and the size of the array in @num_pins + * @pin_dbg_show: optional debugfs display hook that will provide per-device + * info for a certain pin in debugfs + */ +struct pinctrl_ops { + int (*list_groups) (struct pinctrl_dev *pctldev, unsigned selector); + const char *(*get_group_name) (struct pinctrl_dev *pctldev, + unsigned selector); + int (*get_group_pins) (struct pinctrl_dev *pctldev, + unsigned selector, + const unsigned **pins, + unsigned *num_pins); + void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, + unsigned offset); +}; + +/** + * struct pinctrl_desc - pin controller descriptor, register this to pin + * control subsystem + * @name: name for the pin controller + * @pins: an array of pin descriptors describing all the pins handled by + * this pin controller + * @npins: number of descriptors in the array, usually just ARRAY_SIZE() + * of the pins field above + * @pctlops: pin control operation vtable, to support global concepts like + * grouping of pins, this is optional. + * @pmxops: pinmux operations vtable, if you support pinmuxing in your driver + * @confops: pin config operations vtable, if you support pin configuration in + * your driver + * @owner: module providing the pin controller, used for refcounting + */ +struct pinctrl_desc { + const char *name; + struct pinctrl_pin_desc const *pins; + unsigned int npins; + struct pinctrl_ops *pctlops; + struct pinmux_ops *pmxops; + struct pinconf_ops *confops; + struct module *owner; +}; + +/* External interface to pin controller */ +extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, + struct device *dev, void *driver_data); +extern void pinctrl_unregister(struct pinctrl_dev *pctldev); +extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); +extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range); +extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range); +extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); +extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); +#else + +struct pinctrl_dev; + +/* Sufficiently stupid default functions when pinctrl is not in use */ +static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) +{ + return pin >= 0; +} + +#endif /* !CONFIG_PINCTRL */ + +#endif /* __LINUX_PINCTRL_PINCTRL_H */ diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h new file mode 100644 index 00000000..47e9237e --- /dev/null +++ b/include/linux/pinctrl/pinmux.h @@ -0,0 +1,90 @@ +/* + * Interface the pinmux subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef __LINUX_PINCTRL_PINMUX_H +#define __LINUX_PINCTRL_PINMUX_H + +#include <linux/list.h> +#include <linux/seq_file.h> +#include "pinctrl.h" + +#ifdef CONFIG_PINMUX + +struct pinctrl_dev; + +/** + * struct pinmux_ops - pinmux operations, to be implemented by pin controller + * drivers that support pinmuxing + * @request: called by the core to see if a certain pin can be made available + * available for muxing. This is called by the core to acquire the pins + * before selecting any actual mux setting across a function. The driver + * is allowed to answer "no" by returning a negative error code + * @free: the reverse function of the request() callback, frees a pin after + * being requested + * @list_functions: list the number of selectable named functions available + * in this pinmux driver, the core will begin on 0 and call this + * repeatedly as long as it returns >= 0 to enumerate mux settings + * @get_function_name: return the function name of the muxing selector, + * called by the core to figure out which mux setting it shall map a + * certain device to + * @get_function_groups: return an array of groups names (in turn + * referencing pins) connected to a certain function selector. The group + * name can be used with the generic @pinctrl_ops to retrieve the + * actual pins affected. The applicable groups will be returned in + * @groups and the number of groups in @num_groups + * @enable: enable a certain muxing function with a certain pin group. The + * driver does not need to figure out whether enabling this function + * conflicts some other use of the pins in that group, such collisions + * are handled by the pinmux subsystem. The @func_selector selects a + * certain function whereas @group_selector selects a certain set of pins + * to be used. On simple controllers the latter argument may be ignored + * @disable: disable a certain muxing selector with a certain pin group + * @gpio_request_enable: requests and enables GPIO on a certain pin. + * Implement this only if you can mux every pin individually as GPIO. The + * affected GPIO range is passed along with an offset(pin number) into that + * specific GPIO range - function selectors and pin groups are orthogonal + * to this, the core will however make sure the pins do not collide. + * @gpio_disable_free: free up GPIO muxing on a certain pin, the reverse of + * @gpio_request_enable + * @gpio_set_direction: Since controllers may need different configurations + * depending on whether the GPIO is configured as input or output, + * a direction selector function may be implemented as a backing + * to the GPIO controllers that need pin muxing. + */ +struct pinmux_ops { + int (*request) (struct pinctrl_dev *pctldev, unsigned offset); + int (*free) (struct pinctrl_dev *pctldev, unsigned offset); + int (*list_functions) (struct pinctrl_dev *pctldev, unsigned selector); + const char *(*get_function_name) (struct pinctrl_dev *pctldev, + unsigned selector); + int (*get_function_groups) (struct pinctrl_dev *pctldev, + unsigned selector, + const char * const **groups, + unsigned * const num_groups); + int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector, + unsigned group_selector); + void (*disable) (struct pinctrl_dev *pctldev, unsigned func_selector, + unsigned group_selector); + int (*gpio_request_enable) (struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned offset); + void (*gpio_disable_free) (struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned offset); + int (*gpio_set_direction) (struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned offset, + bool input); +}; + +#endif /* CONFIG_PINMUX */ + +#endif /* __LINUX_PINCTRL_PINMUX_H */ diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h new file mode 100644 index 00000000..e1ac1ce1 --- /dev/null +++ b/include/linux/pipe_fs_i.h @@ -0,0 +1,165 @@ +#ifndef _LINUX_PIPE_FS_I_H +#define _LINUX_PIPE_FS_I_H + +#define PIPE_DEF_BUFFERS 16 + +#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ +#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ +#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ +#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ + +/** + * struct pipe_buffer - a linux kernel pipe buffer + * @page: the page containing the data for the pipe buffer + * @offset: offset of data inside the @page + * @len: length of data inside the @page + * @ops: operations associated with this buffer. See @pipe_buf_operations. + * @flags: pipe buffer flags. See above. + * @private: private data owned by the ops. + **/ +struct pipe_buffer { + struct page *page; + unsigned int offset, len; + const struct pipe_buf_operations *ops; + unsigned int flags; + unsigned long private; +}; + +/** + * struct pipe_inode_info - a linux kernel pipe + * @wait: reader/writer wait point in case of empty/full pipe + * @nrbufs: the number of non-empty pipe buffers in this pipe + * @buffers: total number of buffers (should be a power of 2) + * @curbuf: the current pipe buffer entry + * @tmp_page: cached released page + * @readers: number of current readers of this pipe + * @writers: number of current writers of this pipe + * @waiting_writers: number of writers blocked waiting for room + * @r_counter: reader counter + * @w_counter: writer counter + * @fasync_readers: reader side fasync + * @fasync_writers: writer side fasync + * @inode: inode this pipe is attached to + * @bufs: the circular array of pipe buffers + **/ +struct pipe_inode_info { + wait_queue_head_t wait; + unsigned int nrbufs, curbuf, buffers; + unsigned int readers; + unsigned int writers; + unsigned int waiting_writers; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct inode *inode; + struct pipe_buffer *bufs; +}; + +/* + * Note on the nesting of these functions: + * + * ->confirm() + * ->steal() + * ... + * ->map() + * ... + * ->unmap() + * + * That is, ->map() must be called on a confirmed buffer, + * same goes for ->steal(). See below for the meaning of each + * operation. Also see kerneldoc in fs/pipe.c for the pipe + * and generic variants of these hooks. + */ +struct pipe_buf_operations { + /* + * This is set to 1, if the generic pipe read/write may coalesce + * data into an existing buffer. If this is set to 0, a new pipe + * page segment is always used for new data. + */ + int can_merge; + + /* + * ->map() returns a virtual address mapping of the pipe buffer. + * The last integer flag reflects whether this should be an atomic + * mapping or not. The atomic map is faster, however you can't take + * page faults before calling ->unmap() again. So if you need to eg + * access user data through copy_to/from_user(), then you must get + * a non-atomic map. ->map() uses the KM_USER0 atomic slot for + * atomic maps, so you can't map more than one pipe_buffer at once + * and you have to be careful if mapping another page as source + * or destination for a copy (IOW, it has to use something else + * than KM_USER0). + */ + void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int); + + /* + * Undoes ->map(), finishes the virtual mapping of the pipe buffer. + */ + void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *); + + /* + * ->confirm() verifies that the data in the pipe buffer is there + * and that the contents are good. If the pages in the pipe belong + * to a file system, we may need to wait for IO completion in this + * hook. Returns 0 for good, or a negative error value in case of + * error. + */ + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * When the contents of this pipe buffer has been completely + * consumed by a reader, ->release() is called. + */ + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * Attempt to take ownership of the pipe buffer and its contents. + * ->steal() returns 0 for success, in which case the contents + * of the pipe (the buf->page) is locked and now completely owned + * by the caller. The page may then be transferred to a different + * mapping, the most often used case is insertion into different + * file address space cache. + */ + int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * Get a reference to the pipe buffer. + */ + void (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual + memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ +#define PIPE_SIZE PAGE_SIZE + +/* Pipe lock and unlock operations */ +void pipe_lock(struct pipe_inode_info *); +void pipe_unlock(struct pipe_inode_info *); +void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); + +extern unsigned int pipe_max_size, pipe_min_size; +int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); + + +/* Drop the inode semaphore and wait for a pipe event, atomically */ +void pipe_wait(struct pipe_inode_info *pipe); + +struct pipe_inode_info * alloc_pipe_info(struct inode * inode); +void free_pipe_info(struct inode * inode); +void __free_pipe_info(struct pipe_inode_info *); + +/* Generic pipe buffer ops functions */ +void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int); +void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *); +void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); +int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); +void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); + +/* for F_SETPIPE_SZ and F_GETPIPE_SZ */ +long pipe_fcntl(struct file *, unsigned int, unsigned long arg); +struct pipe_inode_info *get_pipe_info(struct file *file); + +#endif diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h new file mode 100644 index 00000000..defbde20 --- /dev/null +++ b/include/linux/pkt_cls.h @@ -0,0 +1,467 @@ +#ifndef __LINUX_PKT_CLS_H +#define __LINUX_PKT_CLS_H + +#include <linux/types.h> +#include <linux/pkt_sched.h> + +/* I think i could have done better macros ; for now this is stolen from + * some arch/mips code - jhs +*/ +#define _TC_MAKE32(x) ((x)) + +#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n)) +#define _TC_MAKEMASK(v,n) (_TC_MAKE32((_TC_MAKE32(1)<<(v))-1) << _TC_MAKE32(n)) +#define _TC_MAKEVALUE(v,n) (_TC_MAKE32(v) << _TC_MAKE32(n)) +#define _TC_GETVALUE(v,n,m) ((_TC_MAKE32(v) & _TC_MAKE32(m)) >> _TC_MAKE32(n)) + +/* verdict bit breakdown + * +bit 0: when set -> this packet has been munged already + +bit 1: when set -> It is ok to munge this packet + +bit 2,3,4,5: Reclassify counter - sort of reverse TTL - if exceeded +assume loop + +bit 6,7: Where this packet was last seen +0: Above the transmit example at the socket level +1: on the Ingress +2: on the Egress + +bit 8: when set --> Request not to classify on ingress. + +bits 9,10,11: redirect counter - redirect TTL. Loop avoidance + + * + * */ + +#define TC_MUNGED _TC_MAKEMASK1(0) +#define SET_TC_MUNGED(v) ( TC_MUNGED | (v & ~TC_MUNGED)) +#define CLR_TC_MUNGED(v) ( v & ~TC_MUNGED) + +#define TC_OK2MUNGE _TC_MAKEMASK1(1) +#define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE)) +#define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE) + +#define S_TC_VERD _TC_MAKE32(2) +#define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD) +#define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD) +#define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD) +#define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD)) + +#define S_TC_FROM _TC_MAKE32(6) +#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM) +#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM) +#define V_TC_FROM(x) _TC_MAKEVALUE(x,S_TC_FROM) +#define SET_TC_FROM(v,n) ((V_TC_FROM(n)) | (v & ~M_TC_FROM)) +#define AT_STACK 0x0 +#define AT_INGRESS 0x1 +#define AT_EGRESS 0x2 + +#define TC_NCLS _TC_MAKEMASK1(8) +#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS)) +#define CLR_TC_NCLS(v) ( v & ~TC_NCLS) + +#define S_TC_RTTL _TC_MAKE32(9) +#define M_TC_RTTL _TC_MAKEMASK(3,S_TC_RTTL) +#define G_TC_RTTL(x) _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL) +#define V_TC_RTTL(x) _TC_MAKEVALUE(x,S_TC_RTTL) +#define SET_TC_RTTL(v,n) ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL)) + +#define S_TC_AT _TC_MAKE32(12) +#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT) +#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT) +#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT) +#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT)) + +/* Action attributes */ +enum { + TCA_ACT_UNSPEC, + TCA_ACT_KIND, + TCA_ACT_OPTIONS, + TCA_ACT_INDEX, + TCA_ACT_STATS, + __TCA_ACT_MAX +}; + +#define TCA_ACT_MAX __TCA_ACT_MAX +#define TCA_OLD_COMPAT (TCA_ACT_MAX+1) +#define TCA_ACT_MAX_PRIO 32 +#define TCA_ACT_BIND 1 +#define TCA_ACT_NOBIND 0 +#define TCA_ACT_UNBIND 1 +#define TCA_ACT_NOUNBIND 0 +#define TCA_ACT_REPLACE 1 +#define TCA_ACT_NOREPLACE 0 +#define MAX_REC_LOOP 4 +#define MAX_RED_LOOP 4 + +#define TC_ACT_UNSPEC (-1) +#define TC_ACT_OK 0 +#define TC_ACT_RECLASSIFY 1 +#define TC_ACT_SHOT 2 +#define TC_ACT_PIPE 3 +#define TC_ACT_STOLEN 4 +#define TC_ACT_QUEUED 5 +#define TC_ACT_REPEAT 6 +#define TC_ACT_JUMP 0x10000000 + +/* Action type identifiers*/ +enum { + TCA_ID_UNSPEC=0, + TCA_ID_POLICE=1, + /* other actions go here */ + __TCA_ID_MAX=255 +}; + +#define TCA_ID_MAX __TCA_ID_MAX + +struct tc_police { + __u32 index; + int action; +#define TC_POLICE_UNSPEC TC_ACT_UNSPEC +#define TC_POLICE_OK TC_ACT_OK +#define TC_POLICE_RECLASSIFY TC_ACT_RECLASSIFY +#define TC_POLICE_SHOT TC_ACT_SHOT +#define TC_POLICE_PIPE TC_ACT_PIPE + + __u32 limit; + __u32 burst; + __u32 mtu; + struct tc_ratespec rate; + struct tc_ratespec peakrate; + int refcnt; + int bindcnt; + __u32 capab; +}; + +struct tcf_t { + __u64 install; + __u64 lastuse; + __u64 expires; +}; + +struct tc_cnt { + int refcnt; + int bindcnt; +}; + +#define tc_gen \ + __u32 index; \ + __u32 capab; \ + int action; \ + int refcnt; \ + int bindcnt + +enum { + TCA_POLICE_UNSPEC, + TCA_POLICE_TBF, + TCA_POLICE_RATE, + TCA_POLICE_PEAKRATE, + TCA_POLICE_AVRATE, + TCA_POLICE_RESULT, + __TCA_POLICE_MAX +#define TCA_POLICE_RESULT TCA_POLICE_RESULT +}; + +#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1) + +/* U32 filters */ + +#define TC_U32_HTID(h) ((h)&0xFFF00000) +#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20) +#define TC_U32_HASH(h) (((h)>>12)&0xFF) +#define TC_U32_NODE(h) ((h)&0xFFF) +#define TC_U32_KEY(h) ((h)&0xFFFFF) +#define TC_U32_UNSPEC 0 +#define TC_U32_ROOT (0xFFF00000) + +enum { + TCA_U32_UNSPEC, + TCA_U32_CLASSID, + TCA_U32_HASH, + TCA_U32_LINK, + TCA_U32_DIVISOR, + TCA_U32_SEL, + TCA_U32_POLICE, + TCA_U32_ACT, + TCA_U32_INDEV, + TCA_U32_PCNT, + TCA_U32_MARK, + __TCA_U32_MAX +}; + +#define TCA_U32_MAX (__TCA_U32_MAX - 1) + +struct tc_u32_key { + __be32 mask; + __be32 val; + int off; + int offmask; +}; + +struct tc_u32_sel { + unsigned char flags; + unsigned char offshift; + unsigned char nkeys; + + __be16 offmask; + __u16 off; + short offoff; + + short hoff; + __be32 hmask; + struct tc_u32_key keys[0]; +}; + +struct tc_u32_mark { + __u32 val; + __u32 mask; + __u32 success; +}; + +struct tc_u32_pcnt { + __u64 rcnt; + __u64 rhit; + __u64 kcnts[0]; +}; + +/* Flags */ + +#define TC_U32_TERMINAL 1 +#define TC_U32_OFFSET 2 +#define TC_U32_VAROFFSET 4 +#define TC_U32_EAT 8 + +#define TC_U32_MAXDEPTH 8 + + +/* RSVP filter */ + +enum { + TCA_RSVP_UNSPEC, + TCA_RSVP_CLASSID, + TCA_RSVP_DST, + TCA_RSVP_SRC, + TCA_RSVP_PINFO, + TCA_RSVP_POLICE, + TCA_RSVP_ACT, + __TCA_RSVP_MAX +}; + +#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 ) + +struct tc_rsvp_gpi { + __u32 key; + __u32 mask; + int offset; +}; + +struct tc_rsvp_pinfo { + struct tc_rsvp_gpi dpi; + struct tc_rsvp_gpi spi; + __u8 protocol; + __u8 tunnelid; + __u8 tunnelhdr; + __u8 pad; +}; + +/* ROUTE filter */ + +enum { + TCA_ROUTE4_UNSPEC, + TCA_ROUTE4_CLASSID, + TCA_ROUTE4_TO, + TCA_ROUTE4_FROM, + TCA_ROUTE4_IIF, + TCA_ROUTE4_POLICE, + TCA_ROUTE4_ACT, + __TCA_ROUTE4_MAX +}; + +#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1) + + +/* FW filter */ + +enum { + TCA_FW_UNSPEC, + TCA_FW_CLASSID, + TCA_FW_POLICE, + TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */ + TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */ + TCA_FW_MASK, + __TCA_FW_MAX +}; + +#define TCA_FW_MAX (__TCA_FW_MAX - 1) + +/* TC index filter */ + +enum { + TCA_TCINDEX_UNSPEC, + TCA_TCINDEX_HASH, + TCA_TCINDEX_MASK, + TCA_TCINDEX_SHIFT, + TCA_TCINDEX_FALL_THROUGH, + TCA_TCINDEX_CLASSID, + TCA_TCINDEX_POLICE, + TCA_TCINDEX_ACT, + __TCA_TCINDEX_MAX +}; + +#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1) + +/* Flow filter */ + +enum { + FLOW_KEY_SRC, + FLOW_KEY_DST, + FLOW_KEY_PROTO, + FLOW_KEY_PROTO_SRC, + FLOW_KEY_PROTO_DST, + FLOW_KEY_IIF, + FLOW_KEY_PRIORITY, + FLOW_KEY_MARK, + FLOW_KEY_NFCT, + FLOW_KEY_NFCT_SRC, + FLOW_KEY_NFCT_DST, + FLOW_KEY_NFCT_PROTO_SRC, + FLOW_KEY_NFCT_PROTO_DST, + FLOW_KEY_RTCLASSID, + FLOW_KEY_SKUID, + FLOW_KEY_SKGID, + FLOW_KEY_VLAN_TAG, + FLOW_KEY_RXHASH, + __FLOW_KEY_MAX, +}; + +#define FLOW_KEY_MAX (__FLOW_KEY_MAX - 1) + +enum { + FLOW_MODE_MAP, + FLOW_MODE_HASH, +}; + +enum { + TCA_FLOW_UNSPEC, + TCA_FLOW_KEYS, + TCA_FLOW_MODE, + TCA_FLOW_BASECLASS, + TCA_FLOW_RSHIFT, + TCA_FLOW_ADDEND, + TCA_FLOW_MASK, + TCA_FLOW_XOR, + TCA_FLOW_DIVISOR, + TCA_FLOW_ACT, + TCA_FLOW_POLICE, + TCA_FLOW_EMATCHES, + TCA_FLOW_PERTURB, + __TCA_FLOW_MAX +}; + +#define TCA_FLOW_MAX (__TCA_FLOW_MAX - 1) + +/* Basic filter */ + +enum { + TCA_BASIC_UNSPEC, + TCA_BASIC_CLASSID, + TCA_BASIC_EMATCHES, + TCA_BASIC_ACT, + TCA_BASIC_POLICE, + __TCA_BASIC_MAX +}; + +#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1) + + +/* Cgroup classifier */ + +enum { + TCA_CGROUP_UNSPEC, + TCA_CGROUP_ACT, + TCA_CGROUP_POLICE, + TCA_CGROUP_EMATCHES, + __TCA_CGROUP_MAX, +}; + +#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1) + +/* Extended Matches */ + +struct tcf_ematch_tree_hdr { + __u16 nmatches; + __u16 progid; +}; + +enum { + TCA_EMATCH_TREE_UNSPEC, + TCA_EMATCH_TREE_HDR, + TCA_EMATCH_TREE_LIST, + __TCA_EMATCH_TREE_MAX +}; +#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1) + +struct tcf_ematch_hdr { + __u16 matchid; + __u16 kind; + __u16 flags; + __u16 pad; /* currently unused */ +}; + +/* 0 1 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 + * +-----------------------+-+-+---+ + * | Unused |S|I| R | + * +-----------------------+-+-+---+ + * + * R(2) ::= relation to next ematch + * where: 0 0 END (last ematch) + * 0 1 AND + * 1 0 OR + * 1 1 Unused (invalid) + * I(1) ::= invert result + * S(1) ::= simple payload + */ +#define TCF_EM_REL_END 0 +#define TCF_EM_REL_AND (1<<0) +#define TCF_EM_REL_OR (1<<1) +#define TCF_EM_INVERT (1<<2) +#define TCF_EM_SIMPLE (1<<3) + +#define TCF_EM_REL_MASK 3 +#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK) + +enum { + TCF_LAYER_LINK, + TCF_LAYER_NETWORK, + TCF_LAYER_TRANSPORT, + __TCF_LAYER_MAX +}; +#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1) + +/* Ematch type assignments + * 1..32767 Reserved for ematches inside kernel tree + * 32768..65535 Free to use, not reliable + */ +#define TCF_EM_CONTAINER 0 +#define TCF_EM_CMP 1 +#define TCF_EM_NBYTE 2 +#define TCF_EM_U32 3 +#define TCF_EM_META 4 +#define TCF_EM_TEXT 5 +#define TCF_EM_VLAN 6 +#define TCF_EM_MAX 6 + +enum { + TCF_EM_PROG_TC +}; + +enum { + TCF_EM_OPND_EQ, + TCF_EM_OPND_GT, + TCF_EM_OPND_LT +}; + +#endif diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h new file mode 100644 index 00000000..410b33d0 --- /dev/null +++ b/include/linux/pkt_sched.h @@ -0,0 +1,657 @@ +#ifndef __LINUX_PKT_SCHED_H +#define __LINUX_PKT_SCHED_H + +#include <linux/types.h> + +/* Logical priority bands not depending on specific packet scheduler. + Every scheduler will map them to real traffic classes, if it has + no more precise mechanism to classify packets. + + These numbers have no special meaning, though their coincidence + with obsolete IPv6 values is not occasional :-). New IPv6 drafts + preferred full anarchy inspired by diffserv group. + + Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy + class, actually, as rule it will be handled with more care than + filler or even bulk. + */ + +#define TC_PRIO_BESTEFFORT 0 +#define TC_PRIO_FILLER 1 +#define TC_PRIO_BULK 2 +#define TC_PRIO_INTERACTIVE_BULK 4 +#define TC_PRIO_INTERACTIVE 6 +#define TC_PRIO_CONTROL 7 + +#define TC_PRIO_MAX 15 + +/* Generic queue statistics, available for all the elements. + Particular schedulers may have also their private records. + */ + +struct tc_stats { + __u64 bytes; /* Number of enqueued bytes */ + __u32 packets; /* Number of enqueued packets */ + __u32 drops; /* Packets dropped because of lack of resources */ + __u32 overlimits; /* Number of throttle events when this + * flow goes out of allocated bandwidth */ + __u32 bps; /* Current flow byte rate */ + __u32 pps; /* Current flow packet rate */ + __u32 qlen; + __u32 backlog; +}; + +struct tc_estimator { + signed char interval; + unsigned char ewma_log; +}; + +/* "Handles" + --------- + + All the traffic control objects have 32bit identifiers, or "handles". + + They can be considered as opaque numbers from user API viewpoint, + but actually they always consist of two fields: major and + minor numbers, which are interpreted by kernel specially, + that may be used by applications, though not recommended. + + F.e. qdisc handles always have minor number equal to zero, + classes (or flows) have major equal to parent qdisc major, and + minor uniquely identifying class inside qdisc. + + Macros to manipulate handles: + */ + +#define TC_H_MAJ_MASK (0xFFFF0000U) +#define TC_H_MIN_MASK (0x0000FFFFU) +#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) +#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) +#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) + +#define TC_H_UNSPEC (0U) +#define TC_H_ROOT (0xFFFFFFFFU) +#define TC_H_INGRESS (0xFFFFFFF1U) + +struct tc_ratespec { + unsigned char cell_log; + unsigned char __reserved; + unsigned short overhead; + short cell_align; + unsigned short mpu; + __u32 rate; +}; + +#define TC_RTAB_SIZE 1024 + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum { + TCA_STAB_UNSPEC, + TCA_STAB_BASE, + TCA_STAB_DATA, + __TCA_STAB_MAX +}; + +#define TCA_STAB_MAX (__TCA_STAB_MAX - 1) + +/* FIFO section */ + +struct tc_fifo_qopt { + __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ +}; + +/* PRIO section */ + +#define TCQ_PRIO_BANDS 16 +#define TCQ_MIN_PRIO_BANDS 2 + +struct tc_prio_qopt { + int bands; /* Number of bands */ + __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ +}; + +/* MULTIQ section */ + +struct tc_multiq_qopt { + __u16 bands; /* Number of bands */ + __u16 max_bands; /* Maximum number of queues */ +}; + +/* PLUG section */ + +#define TCQ_PLUG_BUFFER 0 +#define TCQ_PLUG_RELEASE_ONE 1 +#define TCQ_PLUG_RELEASE_INDEFINITE 2 +#define TCQ_PLUG_LIMIT 3 + +struct tc_plug_qopt { + /* TCQ_PLUG_BUFFER: Inset a plug into the queue and + * buffer any incoming packets + * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head + * to beginning of the next plug. + * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. + * Stop buffering packets until the next TCQ_PLUG_BUFFER + * command is received (just act as a pass-thru queue). + * TCQ_PLUG_LIMIT: Increase/decrease queue size + */ + int action; + __u32 limit; +}; + +/* TBF section */ + +struct tc_tbf_qopt { + struct tc_ratespec rate; + struct tc_ratespec peakrate; + __u32 limit; + __u32 buffer; + __u32 mtu; +}; + +enum { + TCA_TBF_UNSPEC, + TCA_TBF_PARMS, + TCA_TBF_RTAB, + TCA_TBF_PTAB, + __TCA_TBF_MAX, +}; + +#define TCA_TBF_MAX (__TCA_TBF_MAX - 1) + + +/* TEQL section */ + +/* TEQL does not require any parameters */ + +/* SFQ section */ + +struct tc_sfq_qopt { + unsigned quantum; /* Bytes per round allocated to flow */ + int perturb_period; /* Period of hash perturbation */ + __u32 limit; /* Maximal packets in queue */ + unsigned divisor; /* Hash divisor */ + unsigned flows; /* Maximal number of flows */ +}; + +struct tc_sfqred_stats { + __u32 prob_drop; /* Early drops, below max threshold */ + __u32 forced_drop; /* Early drops, after max threshold */ + __u32 prob_mark; /* Marked packets, below max threshold */ + __u32 forced_mark; /* Marked packets, after max threshold */ + __u32 prob_mark_head; /* Marked packets, below max threshold */ + __u32 forced_mark_head;/* Marked packets, after max threshold */ +}; + +struct tc_sfq_qopt_v1 { + struct tc_sfq_qopt v0; + unsigned int depth; /* max number of packets per flow */ + unsigned int headdrop; +/* SFQRED parameters */ + __u32 limit; /* HARD maximal flow queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; + __u32 max_P; /* probability, high resolution */ +/* SFQRED stats */ + struct tc_sfqred_stats stats; +}; + + +struct tc_sfq_xstats { + __s32 allot; +}; + +/* RED section */ + +enum { + TCA_RED_UNSPEC, + TCA_RED_PARMS, + TCA_RED_STAB, + TCA_RED_MAX_P, + __TCA_RED_MAX, +}; + +#define TCA_RED_MAX (__TCA_RED_MAX - 1) + +struct tc_red_qopt { + __u32 limit; /* HARD maximal queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; +#define TC_RED_ECN 1 +#define TC_RED_HARDDROP 2 +#define TC_RED_ADAPTATIVE 4 +}; + +struct tc_red_xstats { + __u32 early; /* Early drops */ + __u32 pdrop; /* Drops due to queue limits */ + __u32 other; /* Drops due to drop() calls */ + __u32 marked; /* Marked packets */ +}; + +/* GRED section */ + +#define MAX_DPs 16 + +enum { + TCA_GRED_UNSPEC, + TCA_GRED_PARMS, + TCA_GRED_STAB, + TCA_GRED_DPS, + TCA_GRED_MAX_P, + __TCA_GRED_MAX, +}; + +#define TCA_GRED_MAX (__TCA_GRED_MAX - 1) + +struct tc_gred_qopt { + __u32 limit; /* HARD maximal queue length (bytes) */ + __u32 qth_min; /* Min average length threshold (bytes) */ + __u32 qth_max; /* Max average length threshold (bytes) */ + __u32 DP; /* up to 2^32 DPs */ + __u32 backlog; + __u32 qave; + __u32 forced; + __u32 early; + __u32 other; + __u32 pdrop; + __u8 Wlog; /* log(W) */ + __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ + __u8 Scell_log; /* cell size for idle damping */ + __u8 prio; /* prio of this VQ */ + __u32 packets; + __u32 bytesin; +}; + +/* gred setup */ +struct tc_gred_sopt { + __u32 DPs; + __u32 def_DP; + __u8 grio; + __u8 flags; + __u16 pad1; +}; + +/* CHOKe section */ + +enum { + TCA_CHOKE_UNSPEC, + TCA_CHOKE_PARMS, + TCA_CHOKE_STAB, + TCA_CHOKE_MAX_P, + __TCA_CHOKE_MAX, +}; + +#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) + +struct tc_choke_qopt { + __u32 limit; /* Hard queue length (packets) */ + __u32 qth_min; /* Min average threshold (packets) */ + __u32 qth_max; /* Max average threshold (packets) */ + unsigned char Wlog; /* log(W) */ + unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ + unsigned char Scell_log; /* cell size for idle damping */ + unsigned char flags; /* see RED flags */ +}; + +struct tc_choke_xstats { + __u32 early; /* Early drops */ + __u32 pdrop; /* Drops due to queue limits */ + __u32 other; /* Drops due to drop() calls */ + __u32 marked; /* Marked packets */ + __u32 matched; /* Drops due to flow match */ +}; + +/* HTB section */ +#define TC_HTB_NUMPRIO 8 +#define TC_HTB_MAXDEPTH 8 +#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ + +struct tc_htb_opt { + struct tc_ratespec rate; + struct tc_ratespec ceil; + __u32 buffer; + __u32 cbuffer; + __u32 quantum; + __u32 level; /* out only */ + __u32 prio; +}; +struct tc_htb_glob { + __u32 version; /* to match HTB/TC */ + __u32 rate2quantum; /* bps->quantum divisor */ + __u32 defcls; /* default class number */ + __u32 debug; /* debug flags */ + + /* stats */ + __u32 direct_pkts; /* count of non shaped packets */ +}; +enum { + TCA_HTB_UNSPEC, + TCA_HTB_PARMS, + TCA_HTB_INIT, + TCA_HTB_CTAB, + TCA_HTB_RTAB, + __TCA_HTB_MAX, +}; + +#define TCA_HTB_MAX (__TCA_HTB_MAX - 1) + +struct tc_htb_xstats { + __u32 lends; + __u32 borrows; + __u32 giants; /* too big packets (rate will not be accurate) */ + __u32 tokens; + __u32 ctokens; +}; + +/* HFSC section */ + +struct tc_hfsc_qopt { + __u16 defcls; /* default class */ +}; + +struct tc_service_curve { + __u32 m1; /* slope of the first segment in bps */ + __u32 d; /* x-projection of the first segment in us */ + __u32 m2; /* slope of the second segment in bps */ +}; + +struct tc_hfsc_stats { + __u64 work; /* total work done */ + __u64 rtwork; /* work done by real-time criteria */ + __u32 period; /* current period */ + __u32 level; /* class level in hierarchy */ +}; + +enum { + TCA_HFSC_UNSPEC, + TCA_HFSC_RSC, + TCA_HFSC_FSC, + TCA_HFSC_USC, + __TCA_HFSC_MAX, +}; + +#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) + + +/* CBQ section */ + +#define TC_CBQ_MAXPRIO 8 +#define TC_CBQ_MAXLEVEL 8 +#define TC_CBQ_DEF_EWMA 5 + +struct tc_cbq_lssopt { + unsigned char change; + unsigned char flags; +#define TCF_CBQ_LSS_BOUNDED 1 +#define TCF_CBQ_LSS_ISOLATED 2 + unsigned char ewma_log; + unsigned char level; +#define TCF_CBQ_LSS_FLAGS 1 +#define TCF_CBQ_LSS_EWMA 2 +#define TCF_CBQ_LSS_MAXIDLE 4 +#define TCF_CBQ_LSS_MINIDLE 8 +#define TCF_CBQ_LSS_OFFTIME 0x10 +#define TCF_CBQ_LSS_AVPKT 0x20 + __u32 maxidle; + __u32 minidle; + __u32 offtime; + __u32 avpkt; +}; + +struct tc_cbq_wrropt { + unsigned char flags; + unsigned char priority; + unsigned char cpriority; + unsigned char __reserved; + __u32 allot; + __u32 weight; +}; + +struct tc_cbq_ovl { + unsigned char strategy; +#define TC_CBQ_OVL_CLASSIC 0 +#define TC_CBQ_OVL_DELAY 1 +#define TC_CBQ_OVL_LOWPRIO 2 +#define TC_CBQ_OVL_DROP 3 +#define TC_CBQ_OVL_RCLASSIC 4 + unsigned char priority2; + __u16 pad; + __u32 penalty; +}; + +struct tc_cbq_police { + unsigned char police; + unsigned char __res1; + unsigned short __res2; +}; + +struct tc_cbq_fopt { + __u32 split; + __u32 defmap; + __u32 defchange; +}; + +struct tc_cbq_xstats { + __u32 borrows; + __u32 overactions; + __s32 avgidle; + __s32 undertime; +}; + +enum { + TCA_CBQ_UNSPEC, + TCA_CBQ_LSSOPT, + TCA_CBQ_WRROPT, + TCA_CBQ_FOPT, + TCA_CBQ_OVL_STRATEGY, + TCA_CBQ_RATE, + TCA_CBQ_RTAB, + TCA_CBQ_POLICE, + __TCA_CBQ_MAX, +}; + +#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) + +/* dsmark section */ + +enum { + TCA_DSMARK_UNSPEC, + TCA_DSMARK_INDICES, + TCA_DSMARK_DEFAULT_INDEX, + TCA_DSMARK_SET_TC_INDEX, + TCA_DSMARK_MASK, + TCA_DSMARK_VALUE, + __TCA_DSMARK_MAX, +}; + +#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) + +/* ATM section */ + +enum { + TCA_ATM_UNSPEC, + TCA_ATM_FD, /* file/socket descriptor */ + TCA_ATM_PTR, /* pointer to descriptor - later */ + TCA_ATM_HDR, /* LL header */ + TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ + TCA_ATM_ADDR, /* PVC address (for output only) */ + TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ + __TCA_ATM_MAX, +}; + +#define TCA_ATM_MAX (__TCA_ATM_MAX - 1) + +/* Network emulator */ + +enum { + TCA_NETEM_UNSPEC, + TCA_NETEM_CORR, + TCA_NETEM_DELAY_DIST, + TCA_NETEM_REORDER, + TCA_NETEM_CORRUPT, + TCA_NETEM_LOSS, + TCA_NETEM_RATE, + __TCA_NETEM_MAX, +}; + +#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) + +struct tc_netem_qopt { + __u32 latency; /* added delay (us) */ + __u32 limit; /* fifo limit (packets) */ + __u32 loss; /* random packet loss (0=none ~0=100%) */ + __u32 gap; /* re-ordering gap (0 for none) */ + __u32 duplicate; /* random packet dup (0=none ~0=100%) */ + __u32 jitter; /* random jitter in latency (us) */ +}; + +struct tc_netem_corr { + __u32 delay_corr; /* delay correlation */ + __u32 loss_corr; /* packet loss correlation */ + __u32 dup_corr; /* duplicate correlation */ +}; + +struct tc_netem_reorder { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_corrupt { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_rate { + __u32 rate; /* byte/s */ + __s32 packet_overhead; + __u32 cell_size; + __s32 cell_overhead; +}; + +enum { + NETEM_LOSS_UNSPEC, + NETEM_LOSS_GI, /* General Intuitive - 4 state model */ + NETEM_LOSS_GE, /* Gilbert Elliot models */ + __NETEM_LOSS_MAX +}; +#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) + +/* State transition probabilities for 4 state model */ +struct tc_netem_gimodel { + __u32 p13; + __u32 p31; + __u32 p32; + __u32 p14; + __u32 p23; +}; + +/* Gilbert-Elliot models */ +struct tc_netem_gemodel { + __u32 p; + __u32 r; + __u32 h; + __u32 k1; +}; + +#define NETEM_DIST_SCALE 8192 +#define NETEM_DIST_MAX 16384 + +/* DRR */ + +enum { + TCA_DRR_UNSPEC, + TCA_DRR_QUANTUM, + __TCA_DRR_MAX +}; + +#define TCA_DRR_MAX (__TCA_DRR_MAX - 1) + +struct tc_drr_stats { + __u32 deficit; +}; + +/* MQPRIO */ +#define TC_QOPT_BITMASK 15 +#define TC_QOPT_MAX_QUEUE 16 + +struct tc_mqprio_qopt { + __u8 num_tc; + __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; + __u8 hw; + __u16 count[TC_QOPT_MAX_QUEUE]; + __u16 offset[TC_QOPT_MAX_QUEUE]; +}; + +/* SFB */ + +enum { + TCA_SFB_UNSPEC, + TCA_SFB_PARMS, + __TCA_SFB_MAX, +}; + +#define TCA_SFB_MAX (__TCA_SFB_MAX - 1) + +/* + * Note: increment, decrement are Q0.16 fixed-point values. + */ +struct tc_sfb_qopt { + __u32 rehash_interval; /* delay between hash move, in ms */ + __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ + __u32 max; /* max len of qlen_min */ + __u32 bin_size; /* maximum queue length per bin */ + __u32 increment; /* probability increment, (d1 in Blue) */ + __u32 decrement; /* probability decrement, (d2 in Blue) */ + __u32 limit; /* max SFB queue length */ + __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ + __u32 penalty_burst; +}; + +struct tc_sfb_xstats { + __u32 earlydrop; + __u32 penaltydrop; + __u32 bucketdrop; + __u32 queuedrop; + __u32 childdrop; /* drops in child qdisc */ + __u32 marked; + __u32 maxqlen; + __u32 maxprob; + __u32 avgprob; +}; + +#define SFB_MAX_PROB 0xFFFF + +/* QFQ */ +enum { + TCA_QFQ_UNSPEC, + TCA_QFQ_WEIGHT, + TCA_QFQ_LMAX, + __TCA_QFQ_MAX +}; + +#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) + +struct tc_qfq_stats { + __u32 weight; + __u32 lmax; +}; + +#endif diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h new file mode 100644 index 00000000..721301b0 --- /dev/null +++ b/include/linux/pktcdvd.h @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2000 Jens Axboe <axboe@suse.de> + * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and + * DVD-RW devices. + * + */ +#ifndef __PKTCDVD_H +#define __PKTCDVD_H + +#include <linux/types.h> + +/* + * 1 for normal debug messages, 2 is very verbose. 0 to turn it off. + */ +#define PACKET_DEBUG 1 + +#define MAX_WRITERS 8 + +#define PKT_RB_POOL_SIZE 512 + +/* + * How long we should hold a non-full packet before starting data gathering. + */ +#define PACKET_WAIT_TIME (HZ * 5 / 1000) + +/* + * use drive write caching -- we need deferred error handling to be + * able to successfully recover with this option (drive will return good + * status as soon as the cdb is validated). + */ +#if defined(CONFIG_CDROM_PKTCDVD_WCACHE) +#define USE_WCACHING 1 +#else +#define USE_WCACHING 0 +#endif + +/* + * No user-servicable parts beyond this point -> + */ + +/* + * device types + */ +#define PACKET_CDR 1 +#define PACKET_CDRW 2 +#define PACKET_DVDR 3 +#define PACKET_DVDRW 4 + +/* + * flags + */ +#define PACKET_WRITABLE 1 /* pd is writable */ +#define PACKET_NWA_VALID 2 /* next writable address valid */ +#define PACKET_LRA_VALID 3 /* last recorded address valid */ +#define PACKET_MERGE_SEGS 4 /* perform segment merging to keep */ + /* underlying cdrom device happy */ + +/* + * Disc status -- from READ_DISC_INFO + */ +#define PACKET_DISC_EMPTY 0 +#define PACKET_DISC_INCOMPLETE 1 +#define PACKET_DISC_COMPLETE 2 +#define PACKET_DISC_OTHER 3 + +/* + * write type, and corresponding data block type + */ +#define PACKET_MODE1 1 +#define PACKET_MODE2 2 +#define PACKET_BLOCK_MODE1 8 +#define PACKET_BLOCK_MODE2 10 + +/* + * Last session/border status + */ +#define PACKET_SESSION_EMPTY 0 +#define PACKET_SESSION_INCOMPLETE 1 +#define PACKET_SESSION_RESERVED 2 +#define PACKET_SESSION_COMPLETE 3 + +#define PACKET_MCN "4a656e734178626f65323030300000" + +#undef PACKET_USE_LS + +#define PKT_CTRL_CMD_SETUP 0 +#define PKT_CTRL_CMD_TEARDOWN 1 +#define PKT_CTRL_CMD_STATUS 2 + +struct pkt_ctrl_command { + __u32 command; /* in: Setup, teardown, status */ + __u32 dev_index; /* in/out: Device index */ + __u32 dev; /* in/out: Device nr for cdrw device */ + __u32 pkt_dev; /* in/out: Device nr for packet device */ + __u32 num_devices; /* out: Largest device index + 1 */ + __u32 padding; /* Not used */ +}; + +/* + * packet ioctls + */ +#define PACKET_IOCTL_MAGIC ('X') +#define PACKET_CTRL_CMD _IOWR(PACKET_IOCTL_MAGIC, 1, struct pkt_ctrl_command) + +#ifdef __KERNEL__ +#include <linux/blkdev.h> +#include <linux/completion.h> +#include <linux/cdrom.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/mempool.h> + +/* default bio write queue congestion marks */ +#define PKT_WRITE_CONGESTION_ON 10000 +#define PKT_WRITE_CONGESTION_OFF 9000 + + +struct packet_settings +{ + __u32 size; /* packet size in (512 byte) sectors */ + __u8 fp; /* fixed packets */ + __u8 link_loss; /* the rest is specified + * as per Mt Fuji */ + __u8 write_type; + __u8 track_mode; + __u8 block_mode; +}; + +/* + * Very crude stats for now + */ +struct packet_stats +{ + unsigned long pkt_started; + unsigned long pkt_ended; + unsigned long secs_w; + unsigned long secs_rg; + unsigned long secs_r; +}; + +struct packet_cdrw +{ + struct list_head pkt_free_list; + struct list_head pkt_active_list; + spinlock_t active_list_lock; /* Serialize access to pkt_active_list */ + struct task_struct *thread; + atomic_t pending_bios; +}; + +/* + * Switch to high speed reading after reading this many kilobytes + * with no interspersed writes. + */ +#define HI_SPEED_SWITCH 512 + +struct packet_iosched +{ + atomic_t attention; /* Set to non-zero when queue processing is needed */ + int writing; /* Non-zero when writing, zero when reading */ + spinlock_t lock; /* Protecting read/write queue manipulations */ + struct bio_list read_queue; + struct bio_list write_queue; + sector_t last_write; /* The sector where the last write ended */ + int successive_reads; +}; + +/* + * 32 buffers of 2048 bytes + */ +#if (PAGE_SIZE % CD_FRAMESIZE) != 0 +#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE" +#endif +#define PACKET_MAX_SIZE 128 +#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE) +#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9) + +enum packet_data_state { + PACKET_IDLE_STATE, /* Not used at the moment */ + PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */ + /* we don't have to do as much */ + /* data gathering */ + PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */ + PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */ + PACKET_RECOVERY_STATE, /* Recover after read/write errors */ + PACKET_FINISHED_STATE, /* After write has finished */ + + PACKET_NUM_STATES /* Number of possible states */ +}; + +/* + * Information needed for writing a single packet + */ +struct pktcdvd_device; + +struct packet_data +{ + struct list_head list; + + spinlock_t lock; /* Lock protecting state transitions and */ + /* orig_bios list */ + + struct bio_list orig_bios; /* Original bios passed to pkt_make_request */ + /* that will be handled by this packet */ + int write_size; /* Total size of all bios in the orig_bios */ + /* list, measured in number of frames */ + + struct bio *w_bio; /* The bio we will send to the real CD */ + /* device once we have all data for the */ + /* packet we are going to write */ + sector_t sector; /* First sector in this packet */ + int frames; /* Number of frames in this packet */ + + enum packet_data_state state; /* Current state */ + atomic_t run_sm; /* Incremented whenever the state */ + /* machine needs to be run */ + long sleep_time; /* Set this to non-zero to make the state */ + /* machine run after this many jiffies. */ + + atomic_t io_wait; /* Number of pending IO operations */ + atomic_t io_errors; /* Number of read/write errors during IO */ + + struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */ + struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE]; + + int cache_valid; /* If non-zero, the data for the zone defined */ + /* by the sector variable is completely cached */ + /* in the pages[] vector. */ + + int id; /* ID number for debugging */ + struct pktcdvd_device *pd; +}; + +struct pkt_rb_node { + struct rb_node rb_node; + struct bio *bio; +}; + +struct packet_stacked_data +{ + struct bio *bio; /* Original read request bio */ + struct pktcdvd_device *pd; +}; +#define PSD_POOL_SIZE 64 + +struct pktcdvd_kobj +{ + struct kobject kobj; + struct pktcdvd_device *pd; +}; +#define to_pktcdvdkobj(_k) \ + ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj)) + +struct pktcdvd_device +{ + struct block_device *bdev; /* dev attached */ + dev_t pkt_dev; /* our dev */ + char name[20]; + struct packet_settings settings; + struct packet_stats stats; + int refcnt; /* Open count */ + int write_speed; /* current write speed, kB/s */ + int read_speed; /* current read speed, kB/s */ + unsigned long offset; /* start offset */ + __u8 mode_offset; /* 0 / 8 */ + __u8 type; + unsigned long flags; + __u16 mmc3_profile; + __u32 nwa; /* next writable address */ + __u32 lra; /* last recorded address */ + struct packet_cdrw cdrw; + wait_queue_head_t wqueue; + + spinlock_t lock; /* Serialize access to bio_queue */ + struct rb_root bio_queue; /* Work queue of bios we need to handle */ + int bio_queue_size; /* Number of nodes in bio_queue */ + sector_t current_sector; /* Keep track of where the elevator is */ + atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */ + /* needs to be run. */ + mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */ + + struct packet_iosched iosched; + struct gendisk *disk; + + int write_congestion_off; + int write_congestion_on; + + struct device *dev; /* sysfs pktcdvd[0-7] dev */ + struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */ + struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */ + + struct dentry *dfs_d_root; /* debugfs: devname directory */ + struct dentry *dfs_f_info; /* debugfs: info file */ +}; + +#endif /* __KERNEL__ */ + +#endif /* __PKTCDVD_H */ diff --git a/include/linux/platform_data/android_battery.h b/include/linux/platform_data/android_battery.h new file mode 100644 index 00000000..f6c8298f --- /dev/null +++ b/include/linux/platform_data/android_battery.h @@ -0,0 +1,47 @@ +/* + * android_battery.h + * + * Copyright (C) 2012 Samsung Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_ANDROID_BATTERY_H +#define _LINUX_ANDROID_BATTERY_H + +enum { + CHARGE_SOURCE_NONE = 0, + CHARGE_SOURCE_AC, + CHARGE_SOURCE_USB, +}; + +struct android_bat_callbacks { + void (*charge_source_changed) + (struct android_bat_callbacks *, int); + void (*battery_set_full)(struct android_bat_callbacks *); +}; + +struct android_bat_platform_data { + void (*register_callbacks)(struct android_bat_callbacks *); + void (*unregister_callbacks)(void); + void (*set_charging_current) (int); + void (*set_charging_enable) (int); + int (*poll_charge_source) (void); + int (*get_capacity) (void); + int (*get_temperature) (int *); + int (*get_voltage_now)(void); + int (*get_current_now)(int *); + + int temp_high_threshold; + int temp_high_recovery; + int temp_low_recovery; + int temp_low_threshold; + + unsigned long full_charging_time; + unsigned long recharging_time; + unsigned int recharging_voltage; +}; + +#endif diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h new file mode 100644 index 00000000..b0f2c56a --- /dev/null +++ b/include/linux/platform_data/atmel.h @@ -0,0 +1,27 @@ +/* + * atmel platform data + * + * GPL v2 Only + */ + +#ifndef __ATMEL_H__ +#define __ATMEL_H__ + +#include <linux/mtd/nand.h> + + /* NAND / SmartMedia */ +struct atmel_nand_data { + int enable_pin; /* chip enable */ + int det_pin; /* card detect */ + int rdy_pin; /* ready/busy */ + u8 rdy_pin_active_low; /* rdy_pin value is inverted */ + u8 ale; /* address line number connected to ALE */ + u8 cle; /* address line number connected to CLE */ + u8 bus_width_16; /* buswidth is 16 bit */ + u8 ecc_mode; /* ecc mode */ + u8 on_flash_bbt; /* bbt on flash */ + struct mtd_partition *parts; + unsigned int num_parts; +}; + +#endif /* __ATMEL_H__ */ diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h new file mode 100644 index 00000000..c4e23d02 --- /dev/null +++ b/include/linux/platform_data/cpsw.h @@ -0,0 +1,55 @@ +/* + * Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __CPSW_H__ +#define __CPSW_H__ + +#include <linux/if_ether.h> + +struct cpsw_slave_data { + u32 slave_reg_ofs; + u32 sliver_reg_ofs; + const char *phy_id; + int phy_if; + u8 mac_addr[ETH_ALEN]; +}; + +struct cpsw_platform_data { + u32 ss_reg_ofs; /* Subsystem control register offset */ + u32 channels; /* number of cpdma channels (symmetric) */ + u32 cpdma_reg_ofs; /* cpdma register offset */ + u32 cpdma_sram_ofs; /* cpdma sram offset */ + + u32 slaves; /* number of slave cpgmac ports */ + struct cpsw_slave_data *slave_data; + + u32 ale_reg_ofs; /* address lookup engine reg offset */ + u32 ale_entries; /* ale table size */ + + u32 host_port_reg_ofs; /* cpsw cpdma host port registers */ + u32 host_port_num; /* The port number for the host port */ + + u32 hw_stats_reg_ofs; /* cpsw hardware statistics counters */ + + u32 bd_ram_ofs; /* embedded buffer descriptor RAM offset*/ + u32 bd_ram_size; /*buffer descriptor ram size */ + u32 hw_ram_addr; /*if the HW address for BD RAM is different */ + bool no_bd_ram; /* no embedded BD ram*/ + + u32 rx_descs; /* Number of Rx Descriptios */ + + u32 mac_control; /* Mac control register */ +}; + +#endif /* __CPSW_H__ */ diff --git a/include/linux/platform_data/ds2482.h b/include/linux/platform_data/ds2482.h new file mode 100644 index 00000000..5a6879e2 --- /dev/null +++ b/include/linux/platform_data/ds2482.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __PLATFORM_DATA_DS2482__ +#define __PLATFORM_DATA_DS2482__ + +struct ds2482_platform_data { + int slpz_gpio; +}; + +#endif /* __PLATFORM_DATA_DS2482__ */ diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h new file mode 100644 index 00000000..5eb7da9b --- /dev/null +++ b/include/linux/platform_data/dwc3-exynos.h @@ -0,0 +1,24 @@ +/** + * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header. + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Anton Tikhomirov <av.tikhomirov@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _DWC3_EXYNOS_H_ +#define _DWC3_EXYNOS_H_ + +struct dwc3_exynos_data { + int phy_type; + int (*phy_init)(struct platform_device *pdev, int type); + int (*phy_exit)(struct platform_device *pdev, int type); +}; + +#endif /* _DWC3_EXYNOS_H_ */ diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h new file mode 100644 index 00000000..ada40124 --- /dev/null +++ b/include/linux/platform_data/dwc3-omap.h @@ -0,0 +1,47 @@ +/** + * dwc3-omap.h - OMAP Specific Glue layer, header. + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com + * All rights reserved. + * + * Author: Felipe Balbi <balbi@ti.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2, as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +enum dwc3_omap_utmi_mode { + DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, + DWC3_OMAP_UTMI_MODE_HW, + DWC3_OMAP_UTMI_MODE_SW, +}; + +struct dwc3_omap_data { + enum dwc3_omap_utmi_mode utmi_mode; +}; diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h new file mode 100644 index 00000000..ed0e975b --- /dev/null +++ b/include/linux/platform_data/efm32-uart.h @@ -0,0 +1,18 @@ +/* + * + * + */ +#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ +#define __LINUX_PLATFORM_DATA_EFM32_UART_H__ + +#include <linux/types.h> + +/** + * struct efm32_uart_pdata + * @location: pinmux location for the I/O pins (to be written to the ROUTE + * register) + */ +struct efm32_uart_pdata { + u8 location; +}; +#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */ diff --git a/include/linux/platform_data/exynos4_tmu.h b/include/linux/platform_data/exynos4_tmu.h new file mode 100644 index 00000000..39e038cc --- /dev/null +++ b/include/linux/platform_data/exynos4_tmu.h @@ -0,0 +1,83 @@ +/* + * exynos4_tmu.h - Samsung EXYNOS4 TMU (Thermal Management Unit) + * + * Copyright (C) 2011 Samsung Electronics + * Donggeun Kim <dg77.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_EXYNOS4_TMU_H +#define _LINUX_EXYNOS4_TMU_H + +enum calibration_type { + TYPE_ONE_POINT_TRIMMING, + TYPE_TWO_POINT_TRIMMING, + TYPE_NONE, +}; + +/** + * struct exynos4_tmu_platform_data + * @threshold: basic temperature for generating interrupt + * 25 <= threshold <= 125 [unit: degree Celsius] + * @trigger_levels: array for each interrupt levels + * [unit: degree Celsius] + * 0: temperature for trigger_level0 interrupt + * condition for trigger_level0 interrupt: + * current temperature > threshold + trigger_levels[0] + * 1: temperature for trigger_level1 interrupt + * condition for trigger_level1 interrupt: + * current temperature > threshold + trigger_levels[1] + * 2: temperature for trigger_level2 interrupt + * condition for trigger_level2 interrupt: + * current temperature > threshold + trigger_levels[2] + * 3: temperature for trigger_level3 interrupt + * condition for trigger_level3 interrupt: + * current temperature > threshold + trigger_levels[3] + * @trigger_level0_en: + * 1 = enable trigger_level0 interrupt, + * 0 = disable trigger_level0 interrupt + * @trigger_level1_en: + * 1 = enable trigger_level1 interrupt, + * 0 = disable trigger_level1 interrupt + * @trigger_level2_en: + * 1 = enable trigger_level2 interrupt, + * 0 = disable trigger_level2 interrupt + * @trigger_level3_en: + * 1 = enable trigger_level3 interrupt, + * 0 = disable trigger_level3 interrupt + * @gain: gain of amplifier in the positive-TC generator block + * 0 <= gain <= 15 + * @reference_voltage: reference voltage of amplifier + * in the positive-TC generator block + * 0 <= reference_voltage <= 31 + * @cal_type: calibration type for temperature + * + * This structure is required for configuration of exynos4_tmu driver. + */ +struct exynos4_tmu_platform_data { + u8 threshold; + u8 trigger_levels[4]; + bool trigger_level0_en; + bool trigger_level1_en; + bool trigger_level2_en; + bool trigger_level3_en; + + u8 gain; + u8 reference_voltage; + + enum calibration_type cal_type; +}; +#endif /* _LINUX_EXYNOS4_TMU_H */ diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h new file mode 100644 index 00000000..72dddcb4 --- /dev/null +++ b/include/linux/platform_data/fsa9480.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2010 Samsung Electronics + * Minkyu Kang <mk7.kang@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _FSA9480_H_ +#define _FSA9480_H_ + +#define FSA9480_ATTACHED 1 +#define FSA9480_DETACHED 0 + +struct fsa9480_platform_data { + void (*cfg_gpio) (void); + void (*usb_cb) (u8 attached); + void (*uart_cb) (u8 attached); + void (*charger_cb) (u8 attached); + void (*jig_cb) (u8 attached); + void (*reset_cb) (void); + void (*usb_power) (u8 on); + int wakeup; +}; + +#endif /* _FSA9480_H_ */ diff --git a/include/linux/platform_data/leds-renesas-tpu.h b/include/linux/platform_data/leds-renesas-tpu.h new file mode 100644 index 00000000..05538708 --- /dev/null +++ b/include/linux/platform_data/leds-renesas-tpu.h @@ -0,0 +1,14 @@ +#ifndef __LEDS_RENESAS_TPU_H__ +#define __LEDS_RENESAS_TPU_H__ + +struct led_renesas_tpu_config { + char *name; + unsigned pin_gpio_fn; + unsigned pin_gpio; + unsigned int channel_offset; + unsigned int timer_bit; + unsigned int max_brightness; + unsigned int refresh_rate; +}; + +#endif /* __LEDS_RENESAS_TPU_H__ */ diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h new file mode 100644 index 00000000..b081c724 --- /dev/null +++ b/include/linux/platform_data/macb.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __MACB_PDATA_H__ +#define __MACB_PDATA_H__ + +struct macb_platform_data { + u32 phy_mask; + int phy_irq_pin; /* PHY IRQ */ + u8 is_rmii; /* using RMII interface? */ +}; + +#endif /* __MACB_PDATA_H__ */ diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h new file mode 100644 index 00000000..98a2046f --- /dev/null +++ b/include/linux/platform_data/msm_serial_hs.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2008 Google, Inc. + * Author: Nick Pelly <npelly@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_MSM_SERIAL_HS_H +#define __ASM_ARCH_MSM_SERIAL_HS_H + +#include <linux/serial_core.h> + +/* API to request the uart clock off or on for low power management + * Clients should call request_clock_off() when no uart data is expected, + * and must call request_clock_on() before any further uart data can be + * received. */ +extern void msm_hs_request_clock_off(struct uart_port *uport); +extern void msm_hs_request_clock_on(struct uart_port *uport); + +/** + * struct msm_serial_hs_platform_data + * @rx_wakeup_irq: Rx activity irq + * @rx_to_inject: extra character to be inserted to Rx tty on wakeup + * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character + * @exit_lpm_cb: function called before every Tx transaction + * + * This is an optional structure required for UART Rx GPIO IRQ based + * wakeup from low power state. UART wakeup can be triggered by RX activity + * (using a wakeup GPIO on the UART RX pin). This should only be used if + * there is not a wakeup GPIO on the UART CTS, and the first RX byte is + * known (eg., with the Bluetooth Texas Instruments HCILL protocol), + * since the first RX byte will always be lost. RTS will be asserted even + * while the UART is clocked off in this mode of operation. + */ +struct msm_serial_hs_platform_data { + int rx_wakeup_irq; + unsigned char inject_rx_on_wakeup; + char rx_to_inject; + void (*exit_lpm_cb)(struct uart_port *); +}; + +#endif diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h new file mode 100644 index 00000000..d94804ac --- /dev/null +++ b/include/linux/platform_data/mv_usb.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2011 Marvell International Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MV_PLATFORM_USB_H +#define __MV_PLATFORM_USB_H + +enum pxa_ehci_type { + EHCI_UNDEFINED = 0, + PXA_U2OEHCI, /* pxa 168, 9xx */ + PXA_SPH, /* pxa 168, 9xx SPH */ + MMP3_HSIC, /* mmp3 hsic */ + MMP3_FSIC, /* mmp3 fsic */ +}; + +enum { + MV_USB_MODE_OTG, + MV_USB_MODE_HOST, +}; + +enum { + VBUS_LOW = 0, + VBUS_HIGH = 1 << 0, +}; + +struct mv_usb_addon_irq { + unsigned int irq; + int (*poll)(void); +}; + +struct mv_usb_platform_data { + unsigned int clknum; + char **clkname; + struct mv_usb_addon_irq *id; /* Only valid for OTG. ID pin change*/ + struct mv_usb_addon_irq *vbus; /* valid for OTG/UDC. VBUS change*/ + + /* only valid for HCD. OTG or Host only*/ + unsigned int mode; + + /* This flag is used for that needs id pin checked by otg */ + unsigned int disable_otg_clock_gating:1; + /* Force a_bus_req to be asserted */ + unsigned int otg_force_a_bus_req:1; + + int (*phy_init)(void __iomem *regbase); + void (*phy_deinit)(void __iomem *regbase); + int (*set_vbus)(unsigned int vbus); + int (*private_init)(void __iomem *opregs, void __iomem *phyregs); +}; + +#ifndef CONFIG_HAVE_CLK +/* Dummy stub for clk framework */ +#define clk_get(dev, id) NULL +#define clk_put(clock) do {} while (0) +#define clk_enable(clock) do {} while (0) +#define clk_disable(clock) do {} while (0) +#endif + +#endif diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h new file mode 100644 index 00000000..88734e87 --- /dev/null +++ b/include/linux/platform_data/ntc_thermistor.h @@ -0,0 +1,53 @@ +/* + * ntc_thermistor.h - NTC Thermistors + * + * Copyright (C) 2010 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _LINUX_NTC_H +#define _LINUX_NTC_H + +enum ntc_thermistor_type { + TYPE_NCPXXWB473, + TYPE_NCPXXWL333, +}; + +struct ntc_thermistor_platform_data { + /* + * One (not both) of read_uV and read_ohm should be provided and only + * one of the two should be provided. + * Both functions should return negative value for an error case. + * + * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use + * read_uV() + * + * How to setup pullup_ohm, pulldown_ohm, and connect is + * described at Documentation/hwmon/ntc_thermistor + * + * pullup/down_ohm: 0 for infinite / not-connected + */ + int (*read_uV)(void); + unsigned int pullup_uV; + + unsigned int pullup_ohm; + unsigned int pulldown_ohm; + enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect; + + int (*read_ohm)(void); +}; + +#endif /* _LINUX_NTC_H */ diff --git a/include/linux/platform_data/omap-abe-twl6040.h b/include/linux/platform_data/omap-abe-twl6040.h new file mode 100644 index 00000000..5d298ac1 --- /dev/null +++ b/include/linux/platform_data/omap-abe-twl6040.h @@ -0,0 +1,49 @@ +/** + * omap-abe-twl6040.h - ASoC machine driver OMAP4+ devices, header. + * + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com + * All rights reserved. + * + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _OMAP_ABE_TWL6040_H_ +#define _OMAP_ABE_TWL6040_H_ + +/* To select if only one channel is connected in a stereo port */ +#define ABE_TWL6040_LEFT (1 << 0) +#define ABE_TWL6040_RIGHT (1 << 1) + +struct omap_abe_twl6040_data { + char *card_name; + /* Feature flags for connected audio pins */ + u8 has_hs; + u8 has_hf; + bool has_ep; + u8 has_aux; + u8 has_vibra; + bool has_dmic; + bool has_hsmic; + bool has_mainmic; + bool has_submic; + u8 has_afm; + /* Other features */ + bool jack_detection; /* board can detect jack events */ + int mclk_freq; /* MCLK frequency speed for twl6040 */ +}; + +#endif /* _OMAP_ABE_TWL6040_H_ */ diff --git a/include/linux/platform_data/omap4-keypad.h b/include/linux/platform_data/omap4-keypad.h new file mode 100644 index 00000000..4eef5fb0 --- /dev/null +++ b/include/linux/platform_data/omap4-keypad.h @@ -0,0 +1,13 @@ +#ifndef __LINUX_INPUT_OMAP4_KEYPAD_H +#define __LINUX_INPUT_OMAP4_KEYPAD_H + +#include <linux/input/matrix_keypad.h> + +struct omap4_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + u8 rows; + u8 cols; +}; + +#endif /* __LINUX_INPUT_OMAP4_KEYPAD_H */ diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h new file mode 100644 index 00000000..51ad0995 --- /dev/null +++ b/include/linux/platform_data/pxa_sdhci.h @@ -0,0 +1,60 @@ +/* + * include/linux/platform_data/pxa_sdhci.h + * + * Copyright 2010 Marvell + * Zhangfei Gao <zhangfei.gao@marvell.com> + * + * PXA Platform - SDHCI platform data definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PXA_SDHCI_H_ +#define _PXA_SDHCI_H_ + +/* pxa specific flag */ +/* Require clock free running */ +#define PXA_FLAG_ENABLE_CLOCK_GATING (1<<0) +/* card always wired to host, like on-chip emmc */ +#define PXA_FLAG_CARD_PERMANENT (1<<1) +/* Board design supports 8-bit data on SD/SDIO BUS */ +#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) + +/* + * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI + * @flags: flags for platform requirement + * @clk_delay_cycles: + * mmp2: each step is roughly 100ps, 5bits width + * pxa910: each step is 1ns, 4bits width + * @clk_delay_sel: select clk_delay, used on pxa910 + * 0: choose feedback clk + * 1: choose feedback clk + delay value + * 2: choose internal clk + * @clk_delay_enable: enable clk_delay or not, used on pxa910 + * @ext_cd_gpio: gpio pin used for external CD line + * @ext_cd_gpio_invert: invert values for external CD gpio line + * @max_speed: the maximum speed supported + * @host_caps: Standard MMC host capabilities bit field. + * @quirks: quirks of platfrom + * @pm_caps: pm_caps of platfrom + */ +struct sdhci_pxa_platdata { + unsigned int flags; + unsigned int clk_delay_cycles; + unsigned int clk_delay_sel; + bool clk_delay_enable; + unsigned int ext_cd_gpio; + bool ext_cd_gpio_invert; + unsigned int max_speed; + unsigned int host_caps; + unsigned int quirks; + unsigned int pm_caps; +}; + +struct sdhci_pxa { + u8 clk_enable; + u8 power_mode; +}; +#endif /* _PXA_SDHCI_H_ */ diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h new file mode 100644 index 00000000..6fa10933 --- /dev/null +++ b/include/linux/platform_data/s3c-hsudc.h @@ -0,0 +1,34 @@ +/* + * S3C24XX USB 2.0 High-speed USB controller gadget driver + * + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. + * Each endpoint can be configured as either in or out endpoint. Endpoints + * can be configured for Bulk or Interrupt transfer mode. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LINUX_USB_S3C_HSUDC_H +#define __LINUX_USB_S3C_HSUDC_H + +/** + * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller. + * @epnum: Number of endpoints to be instantiated by the controller driver. + * @gpio_init: Platform specific USB related GPIO initialization. + * @gpio_uninit: Platform specific USB releted GPIO uninitialzation. + * + * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget + * controllers. + */ +struct s3c24xx_hsudc_platdata { + unsigned int epnum; + void (*gpio_init)(void); + void (*gpio_uninit)(void); +}; + +#endif /* __LINUX_USB_S3C_HSUDC_H */ diff --git a/include/linux/platform_data/spear_thermal.h b/include/linux/platform_data/spear_thermal.h new file mode 100644 index 00000000..724f2e1c --- /dev/null +++ b/include/linux/platform_data/spear_thermal.h @@ -0,0 +1,26 @@ +/* + * SPEAr thermal driver platform data. + * + * Copyright (C) 2011-2012 ST Microelectronics + * Author: Vincenzo Frascino <vincenzo.frascino@st.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef SPEAR_THERMAL_H +#define SPEAR_THERMAL_H + +/* SPEAr Thermal Sensor Platform Data */ +struct spear_thermal_pdata { + /* flags used to enable thermal sensor */ + unsigned int thermal_flags; +}; + +#endif /* SPEAR_THERMAL_H */ diff --git a/include/linux/platform_data/tegra_emc.h b/include/linux/platform_data/tegra_emc.h new file mode 100644 index 00000000..df67505e --- /dev/null +++ b/include/linux/platform_data/tegra_emc.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * Olof Johansson <olof@lixom.net> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TEGRA_EMC_H_ +#define __TEGRA_EMC_H_ + +#define TEGRA_EMC_NUM_REGS 46 + +struct tegra_emc_table { + unsigned long rate; + u32 regs[TEGRA_EMC_NUM_REGS]; +}; + +struct tegra_emc_pdata { + int num_tables; + struct tegra_emc_table *tables; +}; + +#endif diff --git a/include/linux/platform_data/tegra_usb.h b/include/linux/platform_data/tegra_usb.h new file mode 100644 index 00000000..6bca5b56 --- /dev/null +++ b/include/linux/platform_data/tegra_usb.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _TEGRA_USB_H_ +#define _TEGRA_USB_H_ + +enum tegra_usb_operating_modes { + TEGRA_USB_DEVICE, + TEGRA_USB_HOST, + TEGRA_USB_OTG, +}; + +struct tegra_ehci_platform_data { + enum tegra_usb_operating_modes operating_mode; + /* power down the phy on bus suspend */ + int power_down_on_bus_suspend; + void *phy_config; +}; + +#endif /* _TEGRA_USB_H_ */ diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h new file mode 100644 index 00000000..f39140aa --- /dev/null +++ b/include/linux/platform_data/uio_pruss.h @@ -0,0 +1,25 @@ +/* + * include/linux/platform_data/uio_pruss.h + * + * Platform data for uio_pruss driver + * + * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UIO_PRUSS_H_ +#define _UIO_PRUSS_H_ + +/* To configure the PRUSS INTC base offset for UIO driver */ +struct uio_pruss_pdata { + u32 pintc_base; +}; +#endif /* _UIO_PRUSS_H_ */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h new file mode 100644 index 00000000..60e9994e --- /dev/null +++ b/include/linux/platform_device.h @@ -0,0 +1,291 @@ +/* + * platform_device.h - generic, centralized driver model + * + * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> + * + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. + */ + +#ifndef _PLATFORM_DEVICE_H_ +#define _PLATFORM_DEVICE_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +struct mfd_cell; + +struct platform_device { + const char * name; + int id; + struct device dev; + u32 num_resources; + struct resource * resource; + + const struct platform_device_id *id_entry; + + /* MFD cell pointer */ + struct mfd_cell *mfd_cell; + + /* arch specific additions */ + struct pdev_archdata archdata; +}; + +#define platform_get_device_id(pdev) ((pdev)->id_entry) + +#define to_platform_device(x) container_of((x), struct platform_device, dev) + +extern int platform_device_register(struct platform_device *); +extern void platform_device_unregister(struct platform_device *); + +extern struct bus_type platform_bus_type; +extern struct device platform_bus; + +extern void arch_setup_pdev_archdata(struct platform_device *); +extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); +extern int platform_get_irq(struct platform_device *, unsigned int); +extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); +extern int platform_get_irq_byname(struct platform_device *, const char *); +extern int platform_add_devices(struct platform_device **, int); + +struct platform_device_info { + struct device *parent; + + const char *name; + int id; + + const struct resource *res; + unsigned int num_res; + + const void *data; + size_t size_data; + u64 dma_mask; +}; +extern struct platform_device *platform_device_register_full( + const struct platform_device_info *pdevinfo); + +/** + * platform_device_register_resndata - add a platform-level device with + * resources and platform-specific data + * + * @parent: parent device for the device we're adding + * @name: base name of the device we're adding + * @id: instance id + * @res: set of resources that needs to be allocated for the device + * @num: number of resources + * @data: platform specific data for this platform device + * @size: size of platform specific data + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_resndata( + struct device *parent, const char *name, int id, + const struct resource *res, unsigned int num, + const void *data, size_t size) { + + struct platform_device_info pdevinfo = { + .parent = parent, + .name = name, + .id = id, + .res = res, + .num_res = num, + .data = data, + .size_data = size, + .dma_mask = 0, + }; + + return platform_device_register_full(&pdevinfo); +} + +/** + * platform_device_register_simple - add a platform-level device and its resources + * @name: base name of the device we're adding + * @id: instance id + * @res: set of resources that needs to be allocated for the device + * @num: number of resources + * + * This function creates a simple platform device that requires minimal + * resource and memory management. Canned release function freeing memory + * allocated for the device allows drivers using such devices to be + * unloaded without waiting for the last reference to the device to be + * dropped. + * + * This interface is primarily intended for use with legacy drivers which + * probe hardware directly. Because such drivers create sysfs device nodes + * themselves, rather than letting system infrastructure handle such device + * enumeration tasks, they don't fully conform to the Linux driver model. + * In particular, when such drivers are built as modules, they can't be + * "hotplugged". + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_simple( + const char *name, int id, + const struct resource *res, unsigned int num) +{ + return platform_device_register_resndata(NULL, name, id, + res, num, NULL, 0); +} + +/** + * platform_device_register_data - add a platform-level device with platform-specific data + * @parent: parent device for the device we're adding + * @name: base name of the device we're adding + * @id: instance id + * @data: platform specific data for this platform device + * @size: size of platform specific data + * + * This function creates a simple platform device that requires minimal + * resource and memory management. Canned release function freeing memory + * allocated for the device allows drivers using such devices to be + * unloaded without waiting for the last reference to the device to be + * dropped. + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +static inline struct platform_device *platform_device_register_data( + struct device *parent, const char *name, int id, + const void *data, size_t size) +{ + return platform_device_register_resndata(parent, name, id, + NULL, 0, data, size); +} + +extern struct platform_device *platform_device_alloc(const char *name, int id); +extern int platform_device_add_resources(struct platform_device *pdev, + const struct resource *res, + unsigned int num); +extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); +extern int platform_device_add(struct platform_device *pdev); +extern void platform_device_del(struct platform_device *pdev); +extern void platform_device_put(struct platform_device *pdev); + +struct platform_driver { + int (*probe)(struct platform_device *); + int (*remove)(struct platform_device *); + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t state); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; +}; + +extern int platform_driver_register(struct platform_driver *); +extern void platform_driver_unregister(struct platform_driver *); + +/* non-hotpluggable platform devices may use this so that probe() and + * its support may live in __init sections, conserving runtime memory. + */ +extern int platform_driver_probe(struct platform_driver *driver, + int (*probe)(struct platform_device *)); + +static inline void *platform_get_drvdata(const struct platform_device *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void platform_set_drvdata(struct platform_device *pdev, void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +/* module_platform_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_platform_driver(__platform_driver) \ + module_driver(__platform_driver, platform_driver_register, \ + platform_driver_unregister) + +extern struct platform_device *platform_create_bundle(struct platform_driver *driver, + int (*probe)(struct platform_device *), + struct resource *res, unsigned int n_res, + const void *data, size_t size); + +/* early platform driver interface */ +struct early_platform_driver { + const char *class_str; + struct platform_driver *pdrv; + struct list_head list; + int requested_id; + char *buffer; + int bufsize; +}; + +#define EARLY_PLATFORM_ID_UNSET -2 +#define EARLY_PLATFORM_ID_ERROR -3 + +extern int early_platform_driver_register(struct early_platform_driver *epdrv, + char *buf); +extern void early_platform_add_devices(struct platform_device **devs, int num); + +static inline int is_early_platform_device(struct platform_device *pdev) +{ + return !pdev->dev.driver; +} + +extern void early_platform_driver_register_all(char *class_str); +extern int early_platform_driver_probe(char *class_str, + int nr_probe, int user_only); +extern void early_platform_cleanup(void); + +#define early_platform_init(class_string, platdrv) \ + early_platform_init_buffer(class_string, platdrv, NULL, 0) + +#ifndef MODULE +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static __initdata struct early_platform_driver early_driver = { \ + .class_str = class_string, \ + .buffer = buf, \ + .bufsize = bufsiz, \ + .pdrv = platdrv, \ + .requested_id = EARLY_PLATFORM_ID_UNSET, \ +}; \ +static int __init early_platform_driver_setup_func(char *buffer) \ +{ \ + return early_platform_driver_register(&early_driver, buffer); \ +} \ +early_param(class_string, early_platform_driver_setup_func) +#else /* MODULE */ +#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ +static inline char *early_platform_driver_setup_func(void) \ +{ \ + return bufsiz ? buf : NULL; \ +} +#endif /* MODULE */ + +#ifdef CONFIG_SUSPEND +extern int platform_pm_suspend(struct device *dev); +extern int platform_pm_resume(struct device *dev); +#else +#define platform_pm_suspend NULL +#define platform_pm_resume NULL +#endif + +#ifdef CONFIG_HIBERNATE_CALLBACKS +extern int platform_pm_freeze(struct device *dev); +extern int platform_pm_thaw(struct device *dev); +extern int platform_pm_poweroff(struct device *dev); +extern int platform_pm_restore(struct device *dev); +#else +#define platform_pm_freeze NULL +#define platform_pm_thaw NULL +#define platform_pm_poweroff NULL +#define platform_pm_restore NULL +#endif + +#ifdef CONFIG_PM_SLEEP +#define USE_PLATFORM_PM_SLEEP_OPS \ + .suspend = platform_pm_suspend, \ + .resume = platform_pm_resume, \ + .freeze = platform_pm_freeze, \ + .thaw = platform_pm_thaw, \ + .poweroff = platform_pm_poweroff, \ + .restore = platform_pm_restore, +#else +#define USE_PLATFORM_PM_SLEEP_OPS +#endif + +#endif /* _PLATFORM_DEVICE_H_ */ diff --git a/include/linux/plist.h b/include/linux/plist.h new file mode 100644 index 00000000..aa0fb390 --- /dev/null +++ b/include/linux/plist.h @@ -0,0 +1,255 @@ +/* + * Descending-priority-sorted double-linked list + * + * (C) 2002-2003 Intel Corp + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>. + * + * 2001-2005 (c) MontaVista Software, Inc. + * Daniel Walker <dwalker@mvista.com> + * + * (C) 2005 Thomas Gleixner <tglx@linutronix.de> + * + * Simplifications of the original code by + * Oleg Nesterov <oleg@tv-sign.ru> + * + * Licensed under the FSF's GNU Public License v2 or later. + * + * Based on simple lists (include/linux/list.h). + * + * This is a priority-sorted list of nodes; each node has a + * priority from INT_MIN (highest) to INT_MAX (lowest). + * + * Addition is O(K), removal is O(1), change of priority of a node is + * O(K) and K is the number of RT priority levels used in the system. + * (1 <= K <= 99) + * + * This list is really a list of lists: + * + * - The tier 1 list is the prio_list, different priority nodes. + * + * - The tier 2 list is the node_list, serialized nodes. + * + * Simple ASCII art explanation: + * + * pl:prio_list (only for plist_node) + * nl:node_list + * HEAD| NODE(S) + * | + * ||------------------------------------| + * ||->|pl|<->|pl|<--------------->|pl|<-| + * | |10| |21| |21| |21| |40| (prio) + * | | | | | | | | | | | + * | | | | | | | | | | | + * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| + * |-------------------------------------------| + * + * The nodes on the prio_list list are sorted by priority to simplify + * the insertion of new nodes. There are no nodes with duplicate + * priorites on the list. + * + * The nodes on the node_list are ordered by priority and can contain + * entries which have the same priority. Those entries are ordered + * FIFO + * + * Addition means: look for the prio_list node in the prio_list + * for the priority of the node and insert it before the node_list + * entry of the next prio_list node. If it is the first node of + * that priority, add it to the prio_list in the right position and + * insert it into the serialized node_list list + * + * Removal means remove it from the node_list and remove it from + * the prio_list if the node_list list_head is non empty. In case + * of removal from the prio_list it must be checked whether other + * entries of the same priority are on the list or not. If there + * is another entry of the same priority then this entry has to + * replace the removed entry on the prio_list. If the entry which + * is removed is the only entry of this priority then a simple + * remove from both list is sufficient. + * + * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX + * is lowest priority. + * + * No locking is done, up to the caller. + * + */ +#ifndef _LINUX_PLIST_H_ +#define _LINUX_PLIST_H_ + +#include <linux/kernel.h> +#include <linux/list.h> + +struct plist_head { + struct list_head node_list; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +/** + * PLIST_HEAD_INIT - static struct plist_head initializer + * @head: struct plist_head variable name + */ +#define PLIST_HEAD_INIT(head) \ +{ \ + .node_list = LIST_HEAD_INIT((head).node_list) \ +} + +/** + * PLIST_NODE_INIT - static struct plist_node initializer + * @node: struct plist_node variable name + * @__prio: initial node priority + */ +#define PLIST_NODE_INIT(node, __prio) \ +{ \ + .prio = (__prio), \ + .prio_list = LIST_HEAD_INIT((node).prio_list), \ + .node_list = LIST_HEAD_INIT((node).node_list), \ +} + +/** + * plist_head_init - dynamic struct plist_head initializer + * @head: &struct plist_head pointer + */ +static inline void +plist_head_init(struct plist_head *head) +{ + INIT_LIST_HEAD(&head->node_list); +} + +/** + * plist_node_init - Dynamic struct plist_node initializer + * @node: &struct plist_node pointer + * @prio: initial node priority + */ +static inline void plist_node_init(struct plist_node *node, int prio) +{ + node->prio = prio; + INIT_LIST_HEAD(&node->prio_list); + INIT_LIST_HEAD(&node->node_list); +} + +extern void plist_add(struct plist_node *node, struct plist_head *head); +extern void plist_del(struct plist_node *node, struct plist_head *head); + +/** + * plist_for_each - iterate over the plist + * @pos: the type * to use as a loop counter + * @head: the head for your list + */ +#define plist_for_each(pos, head) \ + list_for_each_entry(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_safe - iterate safely over a plist of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * + * Iterate over a plist of given type, safe against removal of list entry. + */ +#define plist_for_each_safe(pos, n, head) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) + +/** + * plist_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop counter + * @head: the head for your list + * @mem: the name of the list_struct within the struct + */ +#define plist_for_each_entry(pos, head, mem) \ + list_for_each_entry(pos, &(head)->node_list, mem.node_list) + +/** + * plist_for_each_entry_safe - iterate safely over list of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * @m: the name of the list_struct within the struct + * + * Iterate over list of given type, safe against removal of list entry. + */ +#define plist_for_each_entry_safe(pos, n, head, m) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) + +/** + * plist_head_empty - return !0 if a plist_head is empty + * @head: &struct plist_head pointer + */ +static inline int plist_head_empty(const struct plist_head *head) +{ + return list_empty(&head->node_list); +} + +/** + * plist_node_empty - return !0 if plist_node is not on a list + * @node: &struct plist_node pointer + */ +static inline int plist_node_empty(const struct plist_node *node) +{ + return list_empty(&node->node_list); +} + +/* All functions below assume the plist_head is not empty. */ + +/** + * plist_first_entry - get the struct for the first entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_struct within the struct + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_first_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_first(head), type, member); \ +}) +#else +# define plist_first_entry(head, type, member) \ + container_of(plist_first(head), type, member) +#endif + +/** + * plist_last_entry - get the struct for the last entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_struct within the struct + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_last_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_last(head), type, member); \ +}) +#else +# define plist_last_entry(head, type, member) \ + container_of(plist_last(head), type, member) +#endif + +/** + * plist_first - return the first node (and thus, highest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_first(const struct plist_head *head) +{ + return list_entry(head->node_list.next, + struct plist_node, node_list); +} + +/** + * plist_last - return the last node (and thus, lowest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_last(const struct plist_head *head) +{ + return list_entry(head->node_list.prev, + struct plist_node, node_list); +} + +#endif diff --git a/include/linux/pm.h b/include/linux/pm.h new file mode 100644 index 00000000..f067e60a --- /dev/null +++ b/include/linux/pm.h @@ -0,0 +1,698 @@ +/* + * pm.h - Power management interface + * + * Copyright (C) 2000 Andrew Henroid + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PM_H +#define _LINUX_PM_H + +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/timer.h> +#include <linux/completion.h> + +/* + * Callbacks for platform drivers to implement. + */ +extern void (*pm_idle)(void); +extern void (*pm_power_off)(void); +extern void (*pm_power_off_prepare)(void); + +/* + * Device power management + */ + +struct device; + +#ifdef CONFIG_PM +extern const char power_group_name[]; /* = "power" */ +#else +#define power_group_name NULL +#endif + +typedef struct pm_message { + int event; +} pm_message_t; + +/** + * struct dev_pm_ops - device PM callbacks + * + * Several device power state transitions are externally visible, affecting + * the state of pending I/O queues and (for drivers that touch hardware) + * interrupts, wakeups, DMA, and other hardware state. There may also be + * internal transitions to various low-power modes which are transparent + * to the rest of the driver stack (such as a driver that's ON gating off + * clocks which are not in active use). + * + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that two levels of callbacks are + * involved. First, the PM core executes callbacks provided by PM domains, + * device types, classes and bus types. They are the subsystem-level callbacks + * supposed to execute callbacks provided by device drivers, although they may + * choose not to do that. If the driver callbacks are executed, they have to + * collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. + * + * @prepare: The principal role of this callback is to prevent new children of + * the device from being registered after it has returned (the driver's + * subsystem and generally the rest of the kernel is supposed to prevent + * new calls to the probe method from being made too once @prepare() has + * succeeded). If @prepare() detects a situation it cannot handle (e.g. + * registration of a child already in progress), it may return -EAGAIN, so + * that the PM core can execute it once again (e.g. after a new child has + * been registered) to recover from the race condition. + * This method is executed for all kinds of suspend transitions and is + * followed by one of the suspend callbacks: @suspend(), @freeze(), or + * @poweroff(). The PM core executes subsystem-level @prepare() for all + * devices before starting to invoke suspend callbacks for any of them, so + * generally devices may be assumed to be functional or to respond to + * runtime resume requests while @prepare() is being executed. However, + * device drivers may NOT assume anything about the availability of user + * space at that time and it is NOT valid to request firmware from within + * @prepare() (it's too late to do that). It also is NOT valid to allocate + * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. + * [To work around these limitations, drivers may register suspend and + * hibernation notifiers to be executed before the freezing of tasks.] + * + * @complete: Undo the changes made by @prepare(). This method is executed for + * all kinds of resume transitions, following one of the resume callbacks: + * @resume(), @thaw(), @restore(). Also called if the state transition + * fails before the driver's suspend callback: @suspend(), @freeze() or + * @poweroff(), can be executed (e.g. if the suspend callback fails for one + * of the other devices that the PM core has unsuccessfully attempted to + * suspend earlier). + * The PM core executes subsystem-level @complete() after it has executed + * the appropriate resume callbacks for all devices. + * + * @suspend: Executed before putting the system into a sleep state in which the + * contents of main memory are preserved. The exact action to perform + * depends on the device's subsystem (PM domain, device type, class or bus + * type), but generally the device must be quiescent after subsystem-level + * @suspend() has returned, so that it doesn't do any I/O or DMA. + * Subsystem-level @suspend() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @suspend_late: Continue operations started by @suspend(). For a number of + * devices @suspend_late() may point to the same callback routine as the + * runtime suspend callback. + * + * @resume: Executed after waking the system up from a sleep state in which the + * contents of main memory were preserved. The exact action to perform + * depends on the device's subsystem, but generally the driver is expected + * to start working again, responding to hardware events and software + * requests (the device itself may be left in a low-power state, waiting + * for a runtime resume to occur). The state of the device at the time its + * driver's @resume() callback is run depends on the platform and subsystem + * the device belongs to. On most platforms, there are no restrictions on + * availability of resources like clocks during @resume(). + * Subsystem-level @resume() is executed for all devices after invoking + * subsystem-level @resume_noirq() for all of them. + * + * @resume_early: Prepare to execute @resume(). For a number of devices + * @resume_early() may point to the same callback routine as the runtime + * resume callback. + * + * @freeze: Hibernation-specific, executed before creating a hibernation image. + * Analogous to @suspend(), but it should not enable the device to signal + * wakeup events or change its power state. The majority of subsystems + * (with the notable exception of the PCI bus type) expect the driver-level + * @freeze() to save the device settings in memory to be used by @restore() + * during the subsequent resume from hibernation. + * Subsystem-level @freeze() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @freeze_late: Continue operations started by @freeze(). Analogous to + * @suspend_late(), but it should not enable the device to signal wakeup + * events or change its power state. + * + * @thaw: Hibernation-specific, executed after creating a hibernation image OR + * if the creation of an image has failed. Also executed after a failing + * attempt to restore the contents of main memory from such an image. + * Undo the changes made by the preceding @freeze(), so the device can be + * operated in the same way as immediately before the call to @freeze(). + * Subsystem-level @thaw() is executed for all devices after invoking + * subsystem-level @thaw_noirq() for all of them. It also may be executed + * directly after @freeze() in case of a transition error. + * + * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the + * preceding @freeze_late(). + * + * @poweroff: Hibernation-specific, executed after saving a hibernation image. + * Analogous to @suspend(), but it need not save the device's settings in + * memory. + * Subsystem-level @poweroff() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @poweroff_late: Continue operations started by @poweroff(). Analogous to + * @suspend_late(), but it need not save the device's settings in memory. + * + * @restore: Hibernation-specific, executed after restoring the contents of main + * memory from a hibernation image, analogous to @resume(). + * + * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). + * + * @suspend_noirq: Complete the actions started by @suspend(). Carry out any + * additional operations required for suspending the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @suspend_noirq() is being executed. + * It generally is expected that the device will be in a low-power state + * (appropriate for the target system sleep state) after subsystem-level + * @suspend_noirq() has returned successfully. If the device can generate + * system wakeup signals and is enabled to wake up the system, it should be + * configured to do so at that time. However, depending on the platform + * and device's subsystem, @suspend() or @suspend_late() may be allowed to + * put the device into the low-power state and configure it to generate + * wakeup signals, in which case it generally is not necessary to define + * @suspend_noirq(). + * + * @resume_noirq: Prepare for the execution of @resume() by carrying out any + * operations required for resuming the device that might be racing with + * its driver's interrupt handler, which is guaranteed not to run while + * @resume_noirq() is being executed. + * + * @freeze_noirq: Complete the actions started by @freeze(). Carry out any + * additional operations required for freezing the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @freeze_noirq() is being executed. + * The power state of the device should not be changed by either @freeze(), + * or @freeze_late(), or @freeze_noirq() and it should not be configured to + * signal system wakeup by any of these callbacks. + * + * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @thaw_noirq() is being executed. + * + * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to + * @suspend_noirq(), but it need not save the device's settings in memory. + * + * @restore_noirq: Prepare for the execution of @restore() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @restore_noirq() is being executed. Analogous to @resume_noirq(). + * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by the resume operations, @resume(), + * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do + * not cause the PM core to abort the resume transition during which they are + * returned. The error codes returned in those cases are only printed by the PM + * core to the system logs for debugging purposes. Still, it is recommended + * that drivers only return error codes from their resume methods in case of an + * unrecoverable failure (i.e. when the device being handled refuses to resume + * and becomes unusable) to allow us to modify the PM core in the future, so + * that it can avoid attempting to handle devices that failed to resume and + * their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, a callback routine must NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). + * + * Refer to Documentation/power/devices.txt for more information about the role + * of the above callbacks in the system suspend process. + * + * There also are callbacks related to runtime power management of devices. + * Again, these callbacks are executed by the PM core only for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are supposed to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. + * + * @runtime_suspend: Prepare the device for a condition in which it won't be + * able to communicate with the CPU(s) and RAM due to power management. + * This need not mean that the device should be put into a low-power state. + * For example, if the device is behind a link which is about to be turned + * off, the device may remain at full power. If the device does go to low + * power and is capable of generating runtime wakeup events, remote wakeup + * (i.e., a hardware mechanism allowing the device to request a change of + * its power state via an interrupt) should be enabled for it. + * + * @runtime_resume: Put the device into the fully active state in response to a + * wakeup event generated by hardware or at the request of software. If + * necessary, put the device into the full-power state and restore its + * registers, so that it is fully operational. + * + * @runtime_idle: Device appears to be inactive and it might be put into a + * low-power state if all of the necessary conditions are satisfied. Check + * these conditions and handle the device as appropriate, possibly queueing + * a suspend request for it. The return value is ignored by the PM core. + * + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the above callbacks in device runtime power management. + * + */ + +struct dev_pm_ops { + int (*prepare)(struct device *dev); + void (*complete)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*thaw)(struct device *dev); + int (*poweroff)(struct device *dev); + int (*restore)(struct device *dev); + int (*suspend_late)(struct device *dev); + int (*resume_early)(struct device *dev); + int (*freeze_late)(struct device *dev); + int (*thaw_early)(struct device *dev); + int (*poweroff_late)(struct device *dev); + int (*restore_early)(struct device *dev); + int (*suspend_noirq)(struct device *dev); + int (*resume_noirq)(struct device *dev); + int (*freeze_noirq)(struct device *dev); + int (*thaw_noirq)(struct device *dev); + int (*poweroff_noirq)(struct device *dev); + int (*restore_noirq)(struct device *dev); + int (*runtime_suspend)(struct device *dev); + int (*runtime_resume)(struct device *dev); + int (*runtime_idle)(struct device *dev); +}; + +#ifdef CONFIG_PM_SLEEP +#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend = suspend_fn, \ + .resume = resume_fn, \ + .freeze = suspend_fn, \ + .thaw = resume_fn, \ + .poweroff = suspend_fn, \ + .restore = resume_fn, +#else +#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM_RUNTIME +#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ + .runtime_suspend = suspend_fn, \ + .runtime_resume = resume_fn, \ + .runtime_idle = idle_fn, +#else +#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) +#endif + +/* + * Use this if you want to use the same suspend and resume callbacks for suspend + * to RAM and hibernation. + */ +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +const struct dev_pm_ops name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ +} + +/* + * Use this for defining a set of PM operations to be used in all situations + * (sustem suspend, hibernation or runtime PM). + * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should + * be different from the corresponding runtime PM callbacks, .runtime_suspend(), + * and .runtime_resume(), because .runtime_suspend() always works on an already + * quiescent device, while .suspend() should assume that the device may be doing + * something when it is called (it should ensure that the device will be + * quiescent after it has returned). Therefore it's better to point the "late" + * suspend and "early" resume callback pointers, .suspend_late() and + * .resume_early(), to the same routines as .runtime_suspend() and + * .runtime_resume(), respectively (and analogously for hibernation). + */ +#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ +const struct dev_pm_ops name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ +} + +/** + * PM_EVENT_ messages + * + * The following PM_EVENT_ messages are defined for the internal use of the PM + * core, in order to provide a mechanism allowing the high level suspend and + * hibernation code to convey the necessary information to the device PM core + * code: + * + * ON No transition. + * + * FREEZE System is going to hibernate, call ->prepare() and ->freeze() + * for all devices. + * + * SUSPEND System is going to suspend, call ->prepare() and ->suspend() + * for all devices. + * + * HIBERNATE Hibernation image has been saved, call ->prepare() and + * ->poweroff() for all devices. + * + * QUIESCE Contents of main memory are going to be restored from a (loaded) + * hibernation image, call ->prepare() and ->freeze() for all + * devices. + * + * RESUME System is resuming, call ->resume() and ->complete() for all + * devices. + * + * THAW Hibernation image has been created, call ->thaw() and + * ->complete() for all devices. + * + * RESTORE Contents of main memory have been restored from a hibernation + * image, call ->restore() and ->complete() for all devices. + * + * RECOVER Creation of a hibernation image or restoration of the main + * memory contents from a hibernation image has failed, call + * ->thaw() and ->complete() for all devices. + * + * The following PM_EVENT_ messages are defined for internal use by + * kernel subsystems. They are never issued by the PM core. + * + * USER_SUSPEND Manual selective suspend was issued by userspace. + * + * USER_RESUME Manual selective resume was issued by userspace. + * + * REMOTE_WAKEUP Remote-wakeup request was received from the device. + * + * AUTO_SUSPEND Automatic (device idle) runtime suspend was + * initiated by the subsystem. + * + * AUTO_RESUME Automatic (device needed) runtime resume was + * requested by a driver. + */ + +#define PM_EVENT_INVALID (-1) +#define PM_EVENT_ON 0x0000 +#define PM_EVENT_FREEZE 0x0001 +#define PM_EVENT_SUSPEND 0x0002 +#define PM_EVENT_HIBERNATE 0x0004 +#define PM_EVENT_QUIESCE 0x0008 +#define PM_EVENT_RESUME 0x0010 +#define PM_EVENT_THAW 0x0020 +#define PM_EVENT_RESTORE 0x0040 +#define PM_EVENT_RECOVER 0x0080 +#define PM_EVENT_USER 0x0100 +#define PM_EVENT_REMOTE 0x0200 +#define PM_EVENT_AUTO 0x0400 + +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) +#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) +#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) +#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) +#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) + +#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) +#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) +#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) +#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) +#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) +#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) +#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) +#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) +#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) +#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) +#define PMSG_USER_SUSPEND ((struct pm_message) \ + { .event = PM_EVENT_USER_SUSPEND, }) +#define PMSG_USER_RESUME ((struct pm_message) \ + { .event = PM_EVENT_USER_RESUME, }) +#define PMSG_REMOTE_RESUME ((struct pm_message) \ + { .event = PM_EVENT_REMOTE_RESUME, }) +#define PMSG_AUTO_SUSPEND ((struct pm_message) \ + { .event = PM_EVENT_AUTO_SUSPEND, }) +#define PMSG_AUTO_RESUME ((struct pm_message) \ + { .event = PM_EVENT_AUTO_RESUME, }) + +#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) + +/** + * Device run-time power management status. + * + * These status labels are used internally by the PM core to indicate the + * current status of a device with respect to the PM core operations. They do + * not reflect the actual power state of the device or its status as seen by the + * driver. + * + * RPM_ACTIVE Device is fully operational. Indicates that the device + * bus type's ->runtime_resume() callback has completed + * successfully. + * + * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has + * completed successfully. The device is regarded as + * suspended. + * + * RPM_RESUMING Device bus type's ->runtime_resume() callback is being + * executed. + * + * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being + * executed. + */ + +enum rpm_status { + RPM_ACTIVE = 0, + RPM_RESUMING, + RPM_SUSPENDED, + RPM_SUSPENDING, +}; + +/** + * Device run-time power management request types. + * + * RPM_REQ_NONE Do nothing. + * + * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback + * + * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback + * + * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has + * been inactive for as long as power.autosuspend_delay + * + * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback + */ + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE, + RPM_REQ_SUSPEND, + RPM_REQ_AUTOSUSPEND, + RPM_REQ_RESUME, +}; + +struct wakeup_source; + +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; +#ifdef CONFIG_PM_CLK + struct list_head clock_list; +#endif +#ifdef CONFIG_PM_GENERIC_DOMAINS + struct pm_domain_data *domain_data; +#endif +}; + +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup:1; + unsigned int async_suspend:1; + bool is_prepared:1; /* Owned by the PM core */ + bool is_suspended:1; /* Ditto */ + bool ignore_children:1; + spinlock_t lock; +#ifdef CONFIG_PM_SLEEP + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path:1; +#else + unsigned int should_wakeup:1; +#endif +#ifdef CONFIG_PM_RUNTIME + struct timer_list suspend_timer; + unsigned long timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth:3; + unsigned int idle_notification:1; + unsigned int request_pending:1; + unsigned int deferred_resume:1; + unsigned int run_wake:1; + unsigned int runtime_auto:1; + unsigned int no_callbacks:1; + unsigned int irq_safe:1; + unsigned int use_autosuspend:1; + unsigned int timer_autosuspends:1; + enum rpm_request request; + enum rpm_status runtime_status; + int runtime_error; + int autosuspend_delay; + unsigned long last_busy; + unsigned long active_jiffies; + unsigned long suspended_jiffies; + unsigned long accounting_timestamp; + struct dev_pm_qos_request *pq_req; +#endif + struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ + struct pm_qos_constraints *constraints; +}; + +extern void update_pm_runtime_accounting(struct device *dev); +extern int dev_pm_get_subsys_data(struct device *dev); +extern int dev_pm_put_subsys_data(struct device *dev); + +/* + * Power domains provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions along with + * subsystem-level and driver-level callbacks. + */ +struct dev_pm_domain { + struct dev_pm_ops ops; +}; + +/* + * The PM_EVENT_ messages are also used by drivers implementing the legacy + * suspend framework, based on the ->suspend() and ->resume() callbacks common + * for suspend and hibernation transitions, according to the rules below. + */ + +/* Necessary, because several drivers use PM_EVENT_PRETHAW */ +#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE + +/* + * One transition is triggered by resume(), after a suspend() call; the + * message is implicit: + * + * ON Driver starts working again, responding to hardware events + * and software requests. The hardware may have gone through + * a power-off reset, or it may have maintained state from the + * previous suspend() which the driver will rely on while + * resuming. On most platforms, there are no restrictions on + * availability of resources like clocks during resume(). + * + * Other transitions are triggered by messages sent using suspend(). All + * these transitions quiesce the driver, so that I/O queues are inactive. + * That commonly entails turning off IRQs and DMA; there may be rules + * about how to quiesce that are specific to the bus or the device's type. + * (For example, network drivers mark the link state.) Other details may + * differ according to the message: + * + * SUSPEND Quiesce, enter a low power device state appropriate for + * the upcoming system state (such as PCI_D3hot), and enable + * wakeup events as appropriate. + * + * HIBERNATE Enter a low power device state appropriate for the hibernation + * state (eg. ACPI S4) and enable wakeup events as appropriate. + * + * FREEZE Quiesce operations so that a consistent image can be saved; + * but do NOT otherwise enter a low power device state, and do + * NOT emit system wakeup events. + * + * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring + * the system from a snapshot taken after an earlier FREEZE. + * Some drivers will need to reset their hardware state instead + * of preserving it, to ensure that it's never mistaken for the + * state which that earlier snapshot had set up. + * + * A minimally power-aware driver treats all messages as SUSPEND, fully + * reinitializes its device during resume() -- whether or not it was reset + * during the suspend/resume cycle -- and can't issue wakeup events. + * + * More power-aware drivers may also use low power states at runtime as + * well as during system sleep states like PM_SUSPEND_STANDBY. They may + * be able to use wakeup events to exit from runtime low-power states, + * or from system low-power states such as standby or suspend-to-RAM. + */ + +#ifdef CONFIG_PM_SLEEP +extern void device_pm_lock(void); +extern void dpm_resume_start(pm_message_t state); +extern void dpm_resume_end(pm_message_t state); +extern void dpm_resume(pm_message_t state); +extern void dpm_complete(pm_message_t state); + +extern void device_pm_unlock(void); +extern int dpm_suspend_end(pm_message_t state); +extern int dpm_suspend_start(pm_message_t state); +extern int dpm_suspend(pm_message_t state); +extern int dpm_prepare(pm_message_t state); + +extern void __suspend_report_result(const char *function, void *fn, int ret); + +#define suspend_report_result(fn, ret) \ + do { \ + __suspend_report_result(__func__, fn, ret); \ + } while (0) + +extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); + +extern int pm_generic_prepare(struct device *dev); +extern int pm_generic_suspend_late(struct device *dev); +extern int pm_generic_suspend_noirq(struct device *dev); +extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume_early(struct device *dev); +extern int pm_generic_resume_noirq(struct device *dev); +extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze_noirq(struct device *dev); +extern int pm_generic_freeze_late(struct device *dev); +extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw_noirq(struct device *dev); +extern int pm_generic_thaw_early(struct device *dev); +extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore_noirq(struct device *dev); +extern int pm_generic_restore_early(struct device *dev); +extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff_noirq(struct device *dev); +extern int pm_generic_poweroff_late(struct device *dev); +extern int pm_generic_poweroff(struct device *dev); +extern void pm_generic_complete(struct device *dev); + +#else /* !CONFIG_PM_SLEEP */ + +#define device_pm_lock() do {} while (0) +#define device_pm_unlock() do {} while (0) + +static inline int dpm_suspend_start(pm_message_t state) +{ + return 0; +} + +#define suspend_report_result(fn, ret) do {} while (0) + +static inline int device_pm_wait_for_dev(struct device *a, struct device *b) +{ + return 0; +} + +#define pm_generic_prepare NULL +#define pm_generic_suspend NULL +#define pm_generic_resume NULL +#define pm_generic_freeze NULL +#define pm_generic_thaw NULL +#define pm_generic_restore NULL +#define pm_generic_poweroff NULL +#define pm_generic_complete NULL +#endif /* !CONFIG_PM_SLEEP */ + +/* How to reorder dpm_list after device_move() */ +enum dpm_order { + DPM_ORDER_NONE, + DPM_ORDER_DEV_AFTER_PARENT, + DPM_ORDER_PARENT_BEFORE_DEV, + DPM_ORDER_DEV_LAST, +}; + +#endif /* _LINUX_PM_H */ diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h new file mode 100644 index 00000000..8348866e --- /dev/null +++ b/include/linux/pm_clock.h @@ -0,0 +1,71 @@ +/* + * pm_clock.h - Definitions and headers related to device clocks. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_CLOCK_H +#define _LINUX_PM_CLOCK_H + +#include <linux/device.h> +#include <linux/notifier.h> + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[]; +}; + +#ifdef CONFIG_PM_CLK +static inline bool pm_clk_no_clocks(struct device *dev) +{ + return dev && dev->power.subsys_data + && list_empty(&dev->power.subsys_data->clock_list); +} + +extern void pm_clk_init(struct device *dev); +extern int pm_clk_create(struct device *dev); +extern void pm_clk_destroy(struct device *dev); +extern int pm_clk_add(struct device *dev, const char *con_id); +extern void pm_clk_remove(struct device *dev, const char *con_id); +extern int pm_clk_suspend(struct device *dev); +extern int pm_clk_resume(struct device *dev); +#else +static inline bool pm_clk_no_clocks(struct device *dev) +{ + return true; +} +static inline void pm_clk_init(struct device *dev) +{ +} +static inline int pm_clk_create(struct device *dev) +{ + return -EINVAL; +} +static inline void pm_clk_destroy(struct device *dev) +{ +} +static inline int pm_clk_add(struct device *dev, const char *con_id) +{ + return -EINVAL; +} +static inline void pm_clk_remove(struct device *dev, const char *con_id) +{ +} +#define pm_clk_suspend NULL +#define pm_clk_resume NULL +#endif + +#ifdef CONFIG_HAVE_CLK +extern void pm_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb); +#else +static inline void pm_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb) +{ +} +#endif + +#endif diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h new file mode 100644 index 00000000..30f794eb --- /dev/null +++ b/include/linux/pm_domain.h @@ -0,0 +1,243 @@ +/* + * pm_domain.h - Definitions and headers related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_DOMAIN_H +#define _LINUX_PM_DOMAIN_H + +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/pm.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/notifier.h> + +enum gpd_status { + GPD_STATE_ACTIVE = 0, /* PM domain is active */ + GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ + GPD_STATE_BUSY, /* Something is happening to the PM domain */ + GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ + GPD_STATE_POWER_OFF, /* PM domain is off */ +}; + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *domain); + bool (*stop_ok)(struct device *dev); +}; + +struct gpd_dev_ops { + int (*start)(struct device *dev); + int (*stop)(struct device *dev); + int (*save_state)(struct device *dev); + int (*restore_state)(struct device *dev); + int (*suspend)(struct device *dev); + int (*suspend_late)(struct device *dev); + int (*resume_early)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*freeze_late)(struct device *dev); + int (*thaw_early)(struct device *dev); + int (*thaw)(struct device *dev); + bool (*active_wakeup)(struct device *dev); +}; + +struct generic_pm_domain { + struct dev_pm_domain domain; /* PM domain operations */ + struct list_head gpd_list_node; /* Node in the global PM domains list */ + struct list_head master_links; /* Links with PM domain as a master */ + struct list_head slave_links; /* Links with PM domain as a slave */ + struct list_head dev_list; /* List of devices */ + struct mutex lock; + struct dev_power_governor *gov; + struct work_struct power_off_work; + char *name; + unsigned int in_progress; /* Number of devices being suspended now */ + atomic_t sd_count; /* Number of subdomains with power "on" */ + enum gpd_status status; /* Current state of the domain */ + wait_queue_head_t status_wait_queue; + struct task_struct *poweroff_task; /* Powering off task */ + unsigned int resume_count; /* Number of devices being resumed */ + unsigned int device_count; /* Number of devices */ + unsigned int suspended_count; /* System suspend device counter */ + unsigned int prepared_count; /* Suspend counter of prepared devices */ + bool suspend_power_off; /* Power status before system suspend */ + bool dev_irq_safe; /* Device callbacks are IRQ-safe */ + int (*power_off)(struct generic_pm_domain *domain); + s64 power_off_latency_ns; + int (*power_on)(struct generic_pm_domain *domain); + s64 power_on_latency_ns; + struct gpd_dev_ops dev_ops; + s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ + bool max_off_time_changed; + bool cached_power_down_ok; + struct device_node *of_node; /* Node in device tree */ +}; + +static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) +{ + return container_of(pd, struct generic_pm_domain, domain); +} + +struct gpd_link { + struct generic_pm_domain *master; + struct list_head master_node; + struct generic_pm_domain *slave; + struct list_head slave_node; +}; + +struct gpd_timing_data { + s64 stop_latency_ns; + s64 start_latency_ns; + s64 save_state_latency_ns; + s64 restore_state_latency_ns; + s64 effective_constraint_ns; + bool constraint_changed; + bool cached_stop_ok; +}; + +struct generic_pm_domain_data { + struct pm_domain_data base; + struct gpd_dev_ops ops; + struct gpd_timing_data td; + struct notifier_block nb; + struct mutex lock; + bool need_restore; + bool always_on; +}; + +#ifdef CONFIG_PM_GENERIC_DOMAINS +static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) +{ + return container_of(pdd, struct generic_pm_domain_data, base); +} + +static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) +{ + return to_gpd_data(dev->power.subsys_data->domain_data); +} + +extern struct dev_power_governor simple_qos_governor; + +extern struct generic_pm_domain *dev_to_genpd(struct device *dev); +extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev, + struct gpd_timing_data *td); + +extern int __pm_genpd_of_add_device(struct device_node *genpd_node, + struct device *dev, + struct gpd_timing_data *td); + +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return __pm_genpd_add_device(genpd, dev, NULL); +} + +static inline int pm_genpd_of_add_device(struct device_node *genpd_node, + struct device *dev) +{ + return __pm_genpd_of_add_device(genpd_node, dev, NULL); +} + +extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev); +extern void pm_genpd_dev_always_on(struct device *dev, bool val); +extern void pm_genpd_dev_need_restore(struct device *dev, bool val); +extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_subdomain); +extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target); +extern int pm_genpd_add_callbacks(struct device *dev, + struct gpd_dev_ops *ops, + struct gpd_timing_data *td); +extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); +extern void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); + +extern int pm_genpd_poweron(struct generic_pm_domain *genpd); + +extern bool default_stop_ok(struct device *dev); + +extern struct dev_power_governor pm_domain_always_on_gov; +#else + +static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) +{ + return ERR_PTR(-ENOSYS); +} +static inline struct generic_pm_domain *dev_to_genpd(struct device *dev) +{ + return ERR_PTR(-ENOSYS); +} +static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev, + struct gpd_timing_data *td) +{ + return -ENOSYS; +} +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {} +static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {} +static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_sd) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target) +{ + return -ENOSYS; +} +static inline int pm_genpd_add_callbacks(struct device *dev, + struct gpd_dev_ops *ops, + struct gpd_timing_data *td) +{ + return -ENOSYS; +} +static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) +{ + return -ENOSYS; +} +static inline void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) +{ +} +static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + return -ENOSYS; +} +static inline bool default_stop_ok(struct device *dev) +{ + return false; +} +#define simple_qos_governor NULL +#define pm_domain_always_on_gov NULL +#endif + +static inline int pm_genpd_remove_callbacks(struct device *dev) +{ + return __pm_genpd_remove_callbacks(dev, true); +} + +#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME +extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); +extern void pm_genpd_poweroff_unused(void); +#else +static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} +static inline void pm_genpd_poweroff_unused(void) {} +#endif + +#endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h new file mode 100644 index 00000000..233149cb --- /dev/null +++ b/include/linux/pm_qos.h @@ -0,0 +1,153 @@ +#ifndef _LINUX_PM_QOS_H +#define _LINUX_PM_QOS_H +/* interface for the pm_qos_power infrastructure of the linux kernel. + * + * Mark Gross <mgross@linux.intel.com> + */ +#include <linux/plist.h> +#include <linux/notifier.h> +#include <linux/miscdevice.h> +#include <linux/device.h> +#include <linux/workqueue.h> + +enum { + PM_QOS_RESERVED = 0, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_NETWORK_LATENCY, + PM_QOS_NETWORK_THROUGHPUT, + + /* insert new class ID */ + PM_QOS_NUM_CLASSES, +}; + +#define PM_QOS_DEFAULT_VALUE -1 + +#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 +#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 + +struct pm_qos_request { + struct plist_node node; + int pm_qos_class; + struct delayed_work work; /* for pm_qos_update_request_timeout */ +}; + +struct dev_pm_qos_request { + struct plist_node node; + struct device *dev; +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED, + PM_QOS_MAX, /* return the largest value */ + PM_QOS_MIN /* return the smallest value */ +}; + +/* + * Note: The lockless read path depends on the CPU accessing + * target_value atomically. Atomic access is only guaranteed on all CPU + * types linux supports for 32 bit quantites + */ +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; /* Do not change to 64 bit */ + s32 default_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +/* Action requested to pm_qos_update_target */ +enum pm_qos_req_action { + PM_QOS_ADD_REQ, /* Add a new request */ + PM_QOS_UPDATE_REQ, /* Update an existing request */ + PM_QOS_REMOVE_REQ /* Remove an existing request */ +}; + +static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) +{ + return req->dev != 0; +} + +int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, + enum pm_qos_req_action action, int value); +void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, + s32 value); +void pm_qos_update_request(struct pm_qos_request *req, + s32 new_value); +void pm_qos_update_request_timeout(struct pm_qos_request *req, + s32 new_value, unsigned long timeout_us); +void pm_qos_remove_request(struct pm_qos_request *req); + +int pm_qos_request(int pm_qos_class); +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_request_active(struct pm_qos_request *req); +s32 pm_qos_read_value(struct pm_qos_constraints *c); + +#ifdef CONFIG_PM +s32 __dev_pm_qos_read_value(struct device *dev); +s32 dev_pm_qos_read_value(struct device *dev); +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, + s32 value); +int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); +int dev_pm_qos_add_notifier(struct device *dev, + struct notifier_block *notifier); +int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier); +int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); +int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); +void dev_pm_qos_constraints_init(struct device *dev); +void dev_pm_qos_constraints_destroy(struct device *dev); +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value); +#else +static inline s32 __dev_pm_qos_read_value(struct device *dev) + { return 0; } +static inline s32 dev_pm_qos_read_value(struct device *dev) + { return 0; } +static inline int dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + s32 value) + { return 0; } +static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, + s32 new_value) + { return 0; } +static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) + { return 0; } +static inline int dev_pm_qos_add_notifier(struct device *dev, + struct notifier_block *notifier) + { return 0; } +static inline int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier) + { return 0; } +static inline int dev_pm_qos_add_global_notifier( + struct notifier_block *notifier) + { return 0; } +static inline int dev_pm_qos_remove_global_notifier( + struct notifier_block *notifier) + { return 0; } +static inline void dev_pm_qos_constraints_init(struct device *dev) +{ + dev->power.power_state = PMSG_ON; +} +static inline void dev_pm_qos_constraints_destroy(struct device *dev) +{ + dev->power.power_state = PMSG_INVALID; +} +static inline int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value) + { return 0; } +#endif + +#ifdef CONFIG_PM_RUNTIME +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); +void dev_pm_qos_hide_latency_limit(struct device *dev); +#else +static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) + { return 0; } +static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} +#endif + +#endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h new file mode 100644 index 00000000..f271860c --- /dev/null +++ b/include/linux/pm_runtime.h @@ -0,0 +1,251 @@ +/* + * pm_runtime.h - Device run-time power management helper functions. + * + * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl> + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_RUNTIME_H +#define _LINUX_PM_RUNTIME_H + +#include <linux/device.h> +#include <linux/notifier.h> +#include <linux/pm.h> + +#include <linux/jiffies.h> + +/* Runtime PM flag argument bits */ +#define RPM_ASYNC 0x01 /* Request is asynchronous */ +#define RPM_NOWAIT 0x02 /* Don't wait for concurrent + state change */ +#define RPM_GET_PUT 0x04 /* Increment/decrement the + usage_count */ +#define RPM_AUTO 0x08 /* Use autosuspend_delay */ + +#ifdef CONFIG_PM_RUNTIME + +extern struct workqueue_struct *pm_wq; + +extern int __pm_runtime_idle(struct device *dev, int rpmflags); +extern int __pm_runtime_suspend(struct device *dev, int rpmflags); +extern int __pm_runtime_resume(struct device *dev, int rpmflags); +extern int pm_schedule_suspend(struct device *dev, unsigned int delay); +extern int __pm_runtime_set_status(struct device *dev, unsigned int status); +extern int pm_runtime_barrier(struct device *dev); +extern void pm_runtime_enable(struct device *dev); +extern void __pm_runtime_disable(struct device *dev, bool check_resume); +extern void pm_runtime_allow(struct device *dev); +extern void pm_runtime_forbid(struct device *dev); +extern int pm_generic_runtime_idle(struct device *dev); +extern int pm_generic_runtime_suspend(struct device *dev); +extern int pm_generic_runtime_resume(struct device *dev); +extern void pm_runtime_no_callbacks(struct device *dev); +extern void pm_runtime_irq_safe(struct device *dev); +extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); +extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); +extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); +extern void pm_runtime_update_max_time_suspended(struct device *dev, + s64 delta_ns); + +static inline bool pm_children_suspended(struct device *dev) +{ + return dev->power.ignore_children + || !atomic_read(&dev->power.child_count); +} + +static inline void pm_runtime_get_noresume(struct device *dev) +{ + atomic_inc(&dev->power.usage_count); +} + +static inline void pm_runtime_put_noidle(struct device *dev) +{ + atomic_add_unless(&dev->power.usage_count, -1, 0); +} + +static inline bool device_run_wake(struct device *dev) +{ + return dev->power.run_wake; +} + +static inline void device_set_run_wake(struct device *dev, bool enable) +{ + dev->power.run_wake = enable; +} + +static inline bool pm_runtime_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; +} + +static inline bool pm_runtime_status_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED; +} + +static inline bool pm_runtime_enabled(struct device *dev) +{ + return !dev->power.disable_depth; +} + +static inline bool pm_runtime_callbacks_present(struct device *dev) +{ + return !dev->power.no_callbacks; +} + +static inline void pm_runtime_mark_last_busy(struct device *dev) +{ + ACCESS_ONCE(dev->power.last_busy) = jiffies; +} + +#else /* !CONFIG_PM_RUNTIME */ + +static inline int __pm_runtime_idle(struct device *dev, int rpmflags) +{ + return -ENOSYS; +} +static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) +{ + return -ENOSYS; +} +static inline int __pm_runtime_resume(struct device *dev, int rpmflags) +{ + return 1; +} +static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) +{ + return -ENOSYS; +} +static inline int __pm_runtime_set_status(struct device *dev, + unsigned int status) { return 0; } +static inline int pm_runtime_barrier(struct device *dev) { return 0; } +static inline void pm_runtime_enable(struct device *dev) {} +static inline void __pm_runtime_disable(struct device *dev, bool c) {} +static inline void pm_runtime_allow(struct device *dev) {} +static inline void pm_runtime_forbid(struct device *dev) {} + +static inline bool pm_children_suspended(struct device *dev) { return false; } +static inline void pm_runtime_get_noresume(struct device *dev) {} +static inline void pm_runtime_put_noidle(struct device *dev) {} +static inline bool device_run_wake(struct device *dev) { return false; } +static inline void device_set_run_wake(struct device *dev, bool enable) {} +static inline bool pm_runtime_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_enabled(struct device *dev) { return false; } + +static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } +static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } +static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline void pm_runtime_no_callbacks(struct device *dev) {} +static inline void pm_runtime_irq_safe(struct device *dev) {} + +static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } +static inline void pm_runtime_mark_last_busy(struct device *dev) {} +static inline void __pm_runtime_use_autosuspend(struct device *dev, + bool use) {} +static inline void pm_runtime_set_autosuspend_delay(struct device *dev, + int delay) {} +static inline unsigned long pm_runtime_autosuspend_expiration( + struct device *dev) { return 0; } + +#endif /* !CONFIG_PM_RUNTIME */ + +static inline int pm_runtime_idle(struct device *dev) +{ + return __pm_runtime_idle(dev, 0); +} + +static inline int pm_runtime_suspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, 0); +} + +static inline int pm_runtime_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_AUTO); +} + +static inline int pm_runtime_resume(struct device *dev) +{ + return __pm_runtime_resume(dev, 0); +} + +static inline int pm_request_idle(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_ASYNC); +} + +static inline int pm_request_resume(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_ASYNC); +} + +static inline int pm_request_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); +} + +static inline int pm_runtime_get(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); +} + +static inline int pm_runtime_get_sync(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); +} + +static inline int pm_runtime_put_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, + RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); +} + +static inline int pm_runtime_put_sync(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put_sync_suspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put_sync_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); +} + +static inline int pm_runtime_set_active(struct device *dev) +{ + return __pm_runtime_set_status(dev, RPM_ACTIVE); +} + +static inline void pm_runtime_set_suspended(struct device *dev) +{ + __pm_runtime_set_status(dev, RPM_SUSPENDED); +} + +static inline void pm_runtime_disable(struct device *dev) +{ + __pm_runtime_disable(dev, true); +} + +static inline void pm_runtime_use_autosuspend(struct device *dev) +{ + __pm_runtime_use_autosuspend(dev, true); +} + +static inline void pm_runtime_dont_use_autosuspend(struct device *dev) +{ + __pm_runtime_use_autosuspend(dev, false); +} + +#endif diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h new file mode 100644 index 00000000..569781fa --- /dev/null +++ b/include/linux/pm_wakeup.h @@ -0,0 +1,195 @@ +/* + * pm_wakeup.h - Power management wakeup interface + * + * Copyright (C) 2008 Alan Stern + * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PM_WAKEUP_H +#define _LINUX_PM_WAKEUP_H + +#ifndef _DEVICE_H_ +# error "please don't include this file directly" +#endif + +#include <linux/types.h> + +/** + * struct wakeup_source - Representation of wakeup sources + * + * @total_time: Total time this wakeup source has been active. + * @max_time: Maximum time this wakeup source has been continuously active. + * @last_time: Monotonic clock when the wakeup source's was touched last time. + * @prevent_sleep_time: Total time this source has been preventing autosleep. + * @event_count: Number of signaled wakeup events. + * @active_count: Number of times the wakeup sorce was activated. + * @relax_count: Number of times the wakeup sorce was deactivated. + * @expire_count: Number of times the wakeup source's timeout has expired. + * @wakeup_count: Number of times the wakeup source might abort suspend. + * @active: Status of the wakeup source. + * @has_timeout: The wakeup source has been activated with a timeout. + */ +struct wakeup_source { + const char *name; + struct list_head entry; + spinlock_t lock; + struct timer_list timer; + unsigned long timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + unsigned long event_count; + unsigned long active_count; + unsigned long relax_count; + unsigned long expire_count; + unsigned long wakeup_count; + bool active:1; + bool autosleep_enabled:1; +}; + +#ifdef CONFIG_PM_SLEEP + +/* + * Changes to device_may_wakeup take effect on the next pm state change. + */ + +static inline bool device_can_wakeup(struct device *dev) +{ + return dev->power.can_wakeup; +} + +static inline bool device_may_wakeup(struct device *dev) +{ + return dev->power.can_wakeup && !!dev->power.wakeup; +} + +/* drivers/base/power/wakeup.c */ +extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); +extern struct wakeup_source *wakeup_source_create(const char *name); +extern void wakeup_source_drop(struct wakeup_source *ws); +extern void wakeup_source_destroy(struct wakeup_source *ws); +extern void wakeup_source_add(struct wakeup_source *ws); +extern void wakeup_source_remove(struct wakeup_source *ws); +extern struct wakeup_source *wakeup_source_register(const char *name); +extern void wakeup_source_unregister(struct wakeup_source *ws); +extern int device_wakeup_enable(struct device *dev); +extern int device_wakeup_disable(struct device *dev); +extern void device_set_wakeup_capable(struct device *dev, bool capable); +extern int device_init_wakeup(struct device *dev, bool val); +extern int device_set_wakeup_enable(struct device *dev, bool enable); +extern void __pm_stay_awake(struct wakeup_source *ws); +extern void pm_stay_awake(struct device *dev); +extern void __pm_relax(struct wakeup_source *ws); +extern void pm_relax(struct device *dev); +extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); +extern void pm_wakeup_event(struct device *dev, unsigned int msec); + +#else /* !CONFIG_PM_SLEEP */ + +static inline void device_set_wakeup_capable(struct device *dev, bool capable) +{ + dev->power.can_wakeup = capable; +} + +static inline bool device_can_wakeup(struct device *dev) +{ + return dev->power.can_wakeup; +} + +static inline void wakeup_source_prepare(struct wakeup_source *ws, + const char *name) {} + +static inline struct wakeup_source *wakeup_source_create(const char *name) +{ + return NULL; +} + +static inline void wakeup_source_drop(struct wakeup_source *ws) {} + +static inline void wakeup_source_destroy(struct wakeup_source *ws) {} + +static inline void wakeup_source_add(struct wakeup_source *ws) {} + +static inline void wakeup_source_remove(struct wakeup_source *ws) {} + +static inline struct wakeup_source *wakeup_source_register(const char *name) +{ + return NULL; +} + +static inline void wakeup_source_unregister(struct wakeup_source *ws) {} + +static inline int device_wakeup_enable(struct device *dev) +{ + dev->power.should_wakeup = true; + return 0; +} + +static inline int device_wakeup_disable(struct device *dev) +{ + dev->power.should_wakeup = false; + return 0; +} + +static inline int device_set_wakeup_enable(struct device *dev, bool enable) +{ + dev->power.should_wakeup = enable; + return 0; +} + +static inline int device_init_wakeup(struct device *dev, bool val) +{ + device_set_wakeup_capable(dev, val); + device_set_wakeup_enable(dev, val); + return 0; +} + +static inline bool device_may_wakeup(struct device *dev) +{ + return dev->power.can_wakeup && dev->power.should_wakeup; +} + +static inline void __pm_stay_awake(struct wakeup_source *ws) {} + +static inline void pm_stay_awake(struct device *dev) {} + +static inline void __pm_relax(struct wakeup_source *ws) {} + +static inline void pm_relax(struct device *dev) {} + +static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {} + +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} + +#endif /* !CONFIG_PM_SLEEP */ + +static inline void wakeup_source_init(struct wakeup_source *ws, + const char *name) +{ + wakeup_source_prepare(ws, name); + wakeup_source_add(ws); +} + +static inline void wakeup_source_trash(struct wakeup_source *ws) +{ + wakeup_source_remove(ws); + wakeup_source_drop(ws); +} + +#endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/pmu.h b/include/linux/pmu.h new file mode 100644 index 00000000..84e6a55a --- /dev/null +++ b/include/linux/pmu.h @@ -0,0 +1,213 @@ +/* + * Definitions for talking to the PMU. The PMU is a microcontroller + * which controls battery charging and system power on PowerBook 3400 + * and 2400 models as well as the RTC and various other things. + * + * Copyright (C) 1998 Paul Mackerras. + */ + +#ifndef _LINUX_PMU_H +#define _LINUX_PMU_H + +#define PMU_DRIVER_VERSION 2 + +/* + * PMU commands + */ +#define PMU_POWER_CTRL0 0x10 /* control power of some devices */ +#define PMU_POWER_CTRL 0x11 /* control power of some devices */ +#define PMU_ADB_CMD 0x20 /* send ADB packet */ +#define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */ +#define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */ +#define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */ +#define PMU_SET_RTC 0x30 /* set real-time clock */ +#define PMU_READ_RTC 0x38 /* read real-time clock */ +#define PMU_SET_VOLBUTTON 0x40 /* set volume up/down position */ +#define PMU_BACKLIGHT_BRIGHT 0x41 /* set backlight brightness */ +#define PMU_GET_VOLBUTTON 0x48 /* get volume up/down position */ +#define PMU_PCEJECT 0x4c /* eject PC-card from slot */ +#define PMU_BATTERY_STATE 0x6b /* report battery state etc. */ +#define PMU_SMART_BATTERY_STATE 0x6f /* report battery state (new way) */ +#define PMU_SET_INTR_MASK 0x70 /* set PMU interrupt mask */ +#define PMU_INT_ACK 0x78 /* read interrupt bits */ +#define PMU_SHUTDOWN 0x7e /* turn power off */ +#define PMU_CPU_SPEED 0x7d /* control CPU speed on some models */ +#define PMU_SLEEP 0x7f /* put CPU to sleep */ +#define PMU_POWER_EVENTS 0x8f /* Send power-event commands to PMU */ +#define PMU_I2C_CMD 0x9a /* I2C operations */ +#define PMU_RESET 0xd0 /* reset CPU */ +#define PMU_GET_BRIGHTBUTTON 0xd9 /* report brightness up/down pos */ +#define PMU_GET_COVER 0xdc /* report cover open/closed */ +#define PMU_SYSTEM_READY 0xdf /* tell PMU we are awake */ +#define PMU_GET_VERSION 0xea /* read the PMU version */ + +/* Bits to use with the PMU_POWER_CTRL0 command */ +#define PMU_POW0_ON 0x80 /* OR this to power ON the device */ +#define PMU_POW0_OFF 0x00 /* leave bit 7 to 0 to power it OFF */ +#define PMU_POW0_HARD_DRIVE 0x04 /* Hard drive power (on wallstreet/lombard ?) */ + +/* Bits to use with the PMU_POWER_CTRL command */ +#define PMU_POW_ON 0x80 /* OR this to power ON the device */ +#define PMU_POW_OFF 0x00 /* leave bit 7 to 0 to power it OFF */ +#define PMU_POW_BACKLIGHT 0x01 /* backlight power */ +#define PMU_POW_CHARGER 0x02 /* battery charger power */ +#define PMU_POW_IRLED 0x04 /* IR led power (on wallstreet) */ +#define PMU_POW_MEDIABAY 0x08 /* media bay power (wallstreet/lombard ?) */ + +/* Bits in PMU interrupt and interrupt mask bytes */ +#define PMU_INT_PCEJECT 0x04 /* PC-card eject buttons */ +#define PMU_INT_SNDBRT 0x08 /* sound/brightness up/down buttons */ +#define PMU_INT_ADB 0x10 /* ADB autopoll or reply data */ +#define PMU_INT_BATTERY 0x20 /* Battery state change */ +#define PMU_INT_ENVIRONMENT 0x40 /* Environment interrupts */ +#define PMU_INT_TICK 0x80 /* 1-second tick interrupt */ + +/* Other bits in PMU interrupt valid when PMU_INT_ADB is set */ +#define PMU_INT_ADB_AUTO 0x04 /* ADB autopoll, when PMU_INT_ADB */ +#define PMU_INT_WAITING_CHARGER 0x01 /* ??? */ +#define PMU_INT_AUTO_SRQ_POLL 0x02 /* ??? */ + +/* Bits in the environement message (either obtained via PMU_GET_COVER, + * or via PMU_INT_ENVIRONMENT on core99 */ +#define PMU_ENV_LID_CLOSED 0x01 /* The lid is closed */ + +/* I2C related definitions */ +#define PMU_I2C_MODE_SIMPLE 0 +#define PMU_I2C_MODE_STDSUB 1 +#define PMU_I2C_MODE_COMBINED 2 + +#define PMU_I2C_BUS_STATUS 0 +#define PMU_I2C_BUS_SYSCLK 1 +#define PMU_I2C_BUS_POWER 2 + +#define PMU_I2C_STATUS_OK 0 +#define PMU_I2C_STATUS_DATAREAD 1 +#define PMU_I2C_STATUS_BUSY 0xfe + + +/* Kind of PMU (model) */ +enum { + PMU_UNKNOWN, + PMU_OHARE_BASED, /* 2400, 3400, 3500 (old G3 powerbook) */ + PMU_HEATHROW_BASED, /* PowerBook G3 series */ + PMU_PADDINGTON_BASED, /* 1999 PowerBook G3 */ + PMU_KEYLARGO_BASED, /* Core99 motherboard (PMU99) */ + PMU_68K_V1, /* 68K PMU, version 1 */ + PMU_68K_V2, /* 68K PMU, version 2 */ +}; + +/* PMU PMU_POWER_EVENTS commands */ +enum { + PMU_PWR_GET_POWERUP_EVENTS = 0x00, + PMU_PWR_SET_POWERUP_EVENTS = 0x01, + PMU_PWR_CLR_POWERUP_EVENTS = 0x02, + PMU_PWR_GET_WAKEUP_EVENTS = 0x03, + PMU_PWR_SET_WAKEUP_EVENTS = 0x04, + PMU_PWR_CLR_WAKEUP_EVENTS = 0x05, +}; + +/* Power events wakeup bits */ +enum { + PMU_PWR_WAKEUP_KEY = 0x01, /* Wake on key press */ + PMU_PWR_WAKEUP_AC_INSERT = 0x02, /* Wake on AC adapter plug */ + PMU_PWR_WAKEUP_AC_CHANGE = 0x04, + PMU_PWR_WAKEUP_LID_OPEN = 0x08, + PMU_PWR_WAKEUP_RING = 0x10, +}; + +/* + * Ioctl commands for the /dev/pmu device + */ +#include <linux/ioctl.h> + +/* no param */ +#define PMU_IOC_SLEEP _IO('B', 0) +/* out param: u32* backlight value: 0 to 15 */ +#define PMU_IOC_GET_BACKLIGHT _IOR('B', 1, size_t) +/* in param: u32 backlight value: 0 to 15 */ +#define PMU_IOC_SET_BACKLIGHT _IOW('B', 2, size_t) +/* out param: u32* PMU model */ +#define PMU_IOC_GET_MODEL _IOR('B', 3, size_t) +/* out param: u32* has_adb: 0 or 1 */ +#define PMU_IOC_HAS_ADB _IOR('B', 4, size_t) +/* out param: u32* can_sleep: 0 or 1 */ +#define PMU_IOC_CAN_SLEEP _IOR('B', 5, size_t) +/* no param, but historically was _IOR('B', 6, 0), meaning 4 bytes */ +#define PMU_IOC_GRAB_BACKLIGHT _IOR('B', 6, size_t) + +#ifdef __KERNEL__ + +extern int find_via_pmu(void); + +extern int pmu_request(struct adb_request *req, + void (*done)(struct adb_request *), int nbytes, ...); +extern int pmu_queue_request(struct adb_request *req); +extern void pmu_poll(void); +extern void pmu_poll_adb(void); /* For use by xmon */ +extern void pmu_wait_complete(struct adb_request *req); + +/* For use before switching interrupts off for a long time; + * warning: not stackable + */ +#if defined(CONFIG_ADB_PMU) +extern void pmu_suspend(void); +extern void pmu_resume(void); +#else +static inline void pmu_suspend(void) +{} +static inline void pmu_resume(void) +{} +#endif + +extern void pmu_enable_irled(int on); + +extern void pmu_restart(void); +extern void pmu_shutdown(void); +extern void pmu_unlock(void); + +extern int pmu_present(void); +extern int pmu_get_model(void); + +extern void pmu_backlight_set_sleep(int sleep); + +#define PMU_MAX_BATTERIES 2 + +/* values for pmu_power_flags */ +#define PMU_PWR_AC_PRESENT 0x00000001 + +/* values for pmu_battery_info.flags */ +#define PMU_BATT_PRESENT 0x00000001 +#define PMU_BATT_CHARGING 0x00000002 +#define PMU_BATT_TYPE_MASK 0x000000f0 +#define PMU_BATT_TYPE_SMART 0x00000010 /* Smart battery */ +#define PMU_BATT_TYPE_HOOPER 0x00000020 /* 3400/3500 */ +#define PMU_BATT_TYPE_COMET 0x00000030 /* 2400 */ + +struct pmu_battery_info +{ + unsigned int flags; + unsigned int charge; /* current charge */ + unsigned int max_charge; /* maximum charge */ + signed int amperage; /* current, positive if charging */ + unsigned int voltage; /* voltage */ + unsigned int time_remaining; /* remaining time */ +}; + +extern int pmu_battery_count; +extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; +extern unsigned int pmu_power_flags; + +/* Backlight */ +extern void pmu_backlight_init(void); + +/* some code needs to know if the PMU was suspended for hibernation */ +#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) +extern int pmu_sys_suspended; +#else +/* if power management is not configured it can't be suspended */ +#define pmu_sys_suspended 0 +#endif + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_PMU_H */ diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h new file mode 100644 index 00000000..435dd5fa --- /dev/null +++ b/include/linux/pnfs_osd_xdr.h @@ -0,0 +1,318 @@ +/* + * pNFS-osd on-the-wire data structures + * + * Copyright (C) 2007 Panasas Inc. [year of first publication] + * All rights reserved. + * + * Benny Halevy <bhalevy@panasas.com> + * Boaz Harrosh <bharrosh@panasas.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * See the file COPYING included with this distribution for more details. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Panasas company nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __PNFS_OSD_XDR_H__ +#define __PNFS_OSD_XDR_H__ + +#include <linux/nfs_fs.h> +#include <linux/nfs_page.h> + +/* + * draft-ietf-nfsv4-minorversion-22 + * draft-ietf-nfsv4-pnfs-obj-12 + */ + +/* Layout Structure */ + +enum pnfs_osd_raid_algorithm4 { + PNFS_OSD_RAID_0 = 1, + PNFS_OSD_RAID_4 = 2, + PNFS_OSD_RAID_5 = 3, + PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */ +}; + +/* struct pnfs_osd_data_map4 { + * uint32_t odm_num_comps; + * length4 odm_stripe_unit; + * uint32_t odm_group_width; + * uint32_t odm_group_depth; + * uint32_t odm_mirror_cnt; + * pnfs_osd_raid_algorithm4 odm_raid_algorithm; + * }; + */ +struct pnfs_osd_data_map { + u32 odm_num_comps; + u64 odm_stripe_unit; + u32 odm_group_width; + u32 odm_group_depth; + u32 odm_mirror_cnt; + u32 odm_raid_algorithm; +}; + +/* struct pnfs_osd_objid4 { + * deviceid4 oid_device_id; + * uint64_t oid_partition_id; + * uint64_t oid_object_id; + * }; + */ +struct pnfs_osd_objid { + struct nfs4_deviceid oid_device_id; + u64 oid_partition_id; + u64 oid_object_id; +}; + +/* For printout. I use: + * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer)); + * BE style + */ +#define _DEVID_LO(oid_device_id) \ + (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data) + +#define _DEVID_HI(oid_device_id) \ + (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1) + +enum pnfs_osd_version { + PNFS_OSD_MISSING = 0, + PNFS_OSD_VERSION_1 = 1, + PNFS_OSD_VERSION_2 = 2 +}; + +struct pnfs_osd_opaque_cred { + u32 cred_len; + void *cred; +}; + +enum pnfs_osd_cap_key_sec { + PNFS_OSD_CAP_KEY_SEC_NONE = 0, + PNFS_OSD_CAP_KEY_SEC_SSV = 1, +}; + +/* struct pnfs_osd_object_cred4 { + * pnfs_osd_objid4 oc_object_id; + * pnfs_osd_version4 oc_osd_version; + * pnfs_osd_cap_key_sec4 oc_cap_key_sec; + * opaque oc_capability_key<>; + * opaque oc_capability<>; + * }; + */ +struct pnfs_osd_object_cred { + struct pnfs_osd_objid oc_object_id; + u32 oc_osd_version; + u32 oc_cap_key_sec; + struct pnfs_osd_opaque_cred oc_cap_key; + struct pnfs_osd_opaque_cred oc_cap; +}; + +/* struct pnfs_osd_layout4 { + * pnfs_osd_data_map4 olo_map; + * uint32_t olo_comps_index; + * pnfs_osd_object_cred4 olo_components<>; + * }; + */ +struct pnfs_osd_layout { + struct pnfs_osd_data_map olo_map; + u32 olo_comps_index; + u32 olo_num_comps; + struct pnfs_osd_object_cred *olo_comps; +}; + +/* Device Address */ +enum pnfs_osd_targetid_type { + OBJ_TARGET_ANON = 1, + OBJ_TARGET_SCSI_NAME = 2, + OBJ_TARGET_SCSI_DEVICE_ID = 3, +}; + +/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) { + * case OBJ_TARGET_SCSI_NAME: + * string oti_scsi_name<>; + * + * case OBJ_TARGET_SCSI_DEVICE_ID: + * opaque oti_scsi_device_id<>; + * + * default: + * void; + * }; + * + * union pnfs_osd_targetaddr4 switch (bool ota_available) { + * case TRUE: + * netaddr4 ota_netaddr; + * case FALSE: + * void; + * }; + * + * struct pnfs_osd_deviceaddr4 { + * pnfs_osd_targetid4 oda_targetid; + * pnfs_osd_targetaddr4 oda_targetaddr; + * uint64_t oda_lun; + * opaque oda_systemid<>; + * pnfs_osd_object_cred4 oda_root_obj_cred; + * opaque oda_osdname<>; + * }; + */ +struct pnfs_osd_targetid { + u32 oti_type; + struct nfs4_string oti_scsi_device_id; +}; + +/* struct netaddr4 { + * // see struct rpcb in RFC1833 + * string r_netid<>; // network id + * string r_addr<>; // universal address + * }; + */ +struct pnfs_osd_net_addr { + struct nfs4_string r_netid; + struct nfs4_string r_addr; +}; + +struct pnfs_osd_targetaddr { + u32 ota_available; + struct pnfs_osd_net_addr ota_netaddr; +}; + +struct pnfs_osd_deviceaddr { + struct pnfs_osd_targetid oda_targetid; + struct pnfs_osd_targetaddr oda_targetaddr; + u8 oda_lun[8]; + struct nfs4_string oda_systemid; + struct pnfs_osd_object_cred oda_root_obj_cred; + struct nfs4_string oda_osdname; +}; + +/* LAYOUTCOMMIT: layoutupdate */ + +/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) { + * case TRUE: + * int64_t dsu_delta; + * case FALSE: + * void; + * }; + * + * struct pnfs_osd_layoutupdate4 { + * pnfs_osd_deltaspaceused4 olu_delta_space_used; + * bool olu_ioerr_flag; + * }; + */ +struct pnfs_osd_layoutupdate { + u32 dsu_valid; + s64 dsu_delta; + u32 olu_ioerr_flag; +}; + +/* LAYOUTRETURN: I/O Rrror Report */ + +enum pnfs_osd_errno { + PNFS_OSD_ERR_EIO = 1, + PNFS_OSD_ERR_NOT_FOUND = 2, + PNFS_OSD_ERR_NO_SPACE = 3, + PNFS_OSD_ERR_BAD_CRED = 4, + PNFS_OSD_ERR_NO_ACCESS = 5, + PNFS_OSD_ERR_UNREACHABLE = 6, + PNFS_OSD_ERR_RESOURCE = 7 +}; + +/* struct pnfs_osd_ioerr4 { + * pnfs_osd_objid4 oer_component; + * length4 oer_comp_offset; + * length4 oer_comp_length; + * bool oer_iswrite; + * pnfs_osd_errno4 oer_errno; + * }; + */ +struct pnfs_osd_ioerr { + struct pnfs_osd_objid oer_component; + u64 oer_comp_offset; + u64 oer_comp_length; + u32 oer_iswrite; + u32 oer_errno; +}; + +/* OSD XDR Client API */ +/* Layout helpers */ +/* Layout decoding is done in two parts: + * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part + * of the layout. @iter members need not be initialized. + * Returned: + * @layout members are set. (@layout->olo_comps set to NULL). + * + * Zero on success, or negative error if passed xdr is broken. + * + * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns + * false, to decode the next component. + * Returned: + * true if there is more to decode or false if we are done or error. + * + * Example: + * struct pnfs_osd_xdr_decode_layout_iter iter; + * struct pnfs_osd_layout layout; + * struct pnfs_osd_object_cred comp; + * int status; + * + * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr); + * if (unlikely(status)) + * goto err; + * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) { + * // All of @comp strings point to inside the xdr_buffer + * // or scrach buffer. Copy them out to user memory eg. + * copy_single_comp(dest_comp++, &comp); + * } + * if (unlikely(status)) + * goto err; + */ + +struct pnfs_osd_xdr_decode_layout_iter { + unsigned total_comps; + unsigned decoded_comps; +}; + +extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, + struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr); + +extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp, + struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, + int *err); + +/* Device Info helpers */ + +/* Note: All strings inside @deviceaddr point to space inside @p. + * @p should stay valid while @deviceaddr is in use. + */ +extern void pnfs_osd_xdr_decode_deviceaddr( + struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p); + +/* layoutupdate (layout_commit) xdr helpers */ +extern int +pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr, + struct pnfs_osd_layoutupdate *lou); + +/* osd_ioerror encoding (layout_return) */ +extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr); +extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr); + +#endif /* __PNFS_OSD_XDR_H__ */ diff --git a/include/linux/pnp.h b/include/linux/pnp.h new file mode 100644 index 00000000..195aafc6 --- /dev/null +++ b/include/linux/pnp.h @@ -0,0 +1,505 @@ +/* + * Linux Plug and Play Support + * Copyright by Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> + */ + +#ifndef _LINUX_PNP_H +#define _LINUX_PNP_H + +#include <linux/device.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/mod_devicetable.h> + +#define PNP_NAME_LEN 50 + +struct pnp_protocol; +struct pnp_dev; + +/* + * Resource Management + */ +#ifdef CONFIG_PNP +struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type, + unsigned int num); +#else +static inline struct resource *pnp_get_resource(struct pnp_dev *dev, + unsigned long type, unsigned int num) +{ + return NULL; +} +#endif + +static inline int pnp_resource_valid(struct resource *res) +{ + if (res) + return 1; + return 0; +} + +static inline int pnp_resource_enabled(struct resource *res) +{ + if (res && !(res->flags & IORESOURCE_DISABLED)) + return 1; + return 0; +} + +static inline resource_size_t pnp_resource_len(struct resource *res) +{ + if (res->start == 0 && res->end == 0) + return 0; + return resource_size(res); +} + + +static inline resource_size_t pnp_port_start(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->start; + return 0; +} + +static inline resource_size_t pnp_port_end(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->end; + return 0; +} + +static inline unsigned long pnp_port_flags(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_IO | IORESOURCE_AUTO; +} + +static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IO, bar)); +} + +static inline resource_size_t pnp_port_len(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar); + + if (pnp_resource_valid(res)) + return pnp_resource_len(res); + return 0; +} + + +static inline resource_size_t pnp_mem_start(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->start; + return 0; +} + +static inline resource_size_t pnp_mem_end(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->end; + return 0; +} + +static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_MEM | IORESOURCE_AUTO; +} + +static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_MEM, bar)); +} + +static inline resource_size_t pnp_mem_len(struct pnp_dev *dev, + unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar); + + if (pnp_resource_valid(res)) + return pnp_resource_len(res); + return 0; +} + + +static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); + + if (pnp_resource_valid(res)) + return res->start; + return -1; +} + +static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_IRQ | IORESOURCE_AUTO; +} + +static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IRQ, bar)); +} + + +static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); + + if (pnp_resource_valid(res)) + return res->start; + return -1; +} + +static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar) +{ + struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar); + + if (pnp_resource_valid(res)) + return res->flags; + return IORESOURCE_DMA | IORESOURCE_AUTO; +} + +static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar) +{ + return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_DMA, bar)); +} + + +/* + * Device Management + */ + +struct pnp_card { + struct device dev; /* Driver Model device interface */ + unsigned char number; /* used as an index, must be unique */ + struct list_head global_list; /* node in global list of cards */ + struct list_head protocol_list; /* node in protocol's list of cards */ + struct list_head devices; /* devices attached to the card */ + + struct pnp_protocol *protocol; + struct pnp_id *id; /* contains supported EISA IDs */ + + char name[PNP_NAME_LEN]; /* contains a human-readable name */ + unsigned char pnpver; /* Plug & Play version */ + unsigned char productver; /* product version */ + unsigned int serial; /* serial number */ + unsigned char checksum; /* if zero - checksum passed */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/isapnp */ +}; + +#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) +#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) +#define to_pnp_card(n) container_of(n, struct pnp_card, dev) +#define pnp_for_each_card(card) \ + for((card) = global_to_pnp_card(pnp_cards.next); \ + (card) != global_to_pnp_card(&pnp_cards); \ + (card) = global_to_pnp_card((card)->global_list.next)) + +struct pnp_card_link { + struct pnp_card *card; + struct pnp_card_driver *driver; + void *driver_data; + pm_message_t pm_state; +}; + +static inline void *pnp_get_card_drvdata(struct pnp_card_link *pcard) +{ + return pcard->driver_data; +} + +static inline void pnp_set_card_drvdata(struct pnp_card_link *pcard, void *data) +{ + pcard->driver_data = data; +} + +struct pnp_dev { + struct device dev; /* Driver Model device interface */ + u64 dma_mask; + unsigned int number; /* used as an index, must be unique */ + int status; + + struct list_head global_list; /* node in global list of devices */ + struct list_head protocol_list; /* node in list of device's protocol */ + struct list_head card_list; /* node in card's list of devices */ + struct list_head rdev_list; /* node in cards list of requested devices */ + + struct pnp_protocol *protocol; + struct pnp_card *card; /* card the device is attached to, none if NULL */ + struct pnp_driver *driver; + struct pnp_card_link *card_link; + + struct pnp_id *id; /* supported EISA IDs */ + + int active; + int capabilities; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; + + char name[PNP_NAME_LEN]; /* contains a human-readable name */ + int flags; /* used by protocols */ + struct proc_dir_entry *procent; /* device entry in /proc/bus/isapnp */ + void *data; +}; + +#define global_to_pnp_dev(n) list_entry(n, struct pnp_dev, global_list) +#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) +#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) +#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) +#define pnp_for_each_dev(dev) \ + for((dev) = global_to_pnp_dev(pnp_global.next); \ + (dev) != global_to_pnp_dev(&pnp_global); \ + (dev) = global_to_pnp_dev((dev)->global_list.next)) +#define card_for_each_dev(card,dev) \ + for((dev) = card_to_pnp_dev((card)->devices.next); \ + (dev) != card_to_pnp_dev(&(card)->devices); \ + (dev) = card_to_pnp_dev((dev)->card_list.next)) +#define pnp_dev_name(dev) (dev)->name + +static inline void *pnp_get_drvdata(struct pnp_dev *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +struct pnp_fixup { + char id[7]; + void (*quirk_function) (struct pnp_dev * dev); /* fixup function */ +}; + +/* config parameters */ +#define PNP_CONFIG_NORMAL 0x0001 +#define PNP_CONFIG_FORCE 0x0002 /* disables validity checking */ + +/* capabilities */ +#define PNP_READ 0x0001 +#define PNP_WRITE 0x0002 +#define PNP_DISABLE 0x0004 +#define PNP_CONFIGURABLE 0x0008 +#define PNP_REMOVABLE 0x0010 + +#define pnp_can_read(dev) (((dev)->protocol->get) && \ + ((dev)->capabilities & PNP_READ)) +#define pnp_can_write(dev) (((dev)->protocol->set) && \ + ((dev)->capabilities & PNP_WRITE)) +#define pnp_can_disable(dev) (((dev)->protocol->disable) && \ + ((dev)->capabilities & PNP_DISABLE)) +#define pnp_can_configure(dev) ((!(dev)->active) && \ + ((dev)->capabilities & PNP_CONFIGURABLE)) + +#ifdef CONFIG_ISAPNP +extern struct pnp_protocol isapnp_protocol; +#define pnp_device_is_isapnp(dev) ((dev)->protocol == (&isapnp_protocol)) +#else +#define pnp_device_is_isapnp(dev) 0 +#endif +extern struct mutex pnp_res_mutex; + +#ifdef CONFIG_PNPBIOS +extern struct pnp_protocol pnpbios_protocol; +#define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol)) +#else +#define pnp_device_is_pnpbios(dev) 0 +#endif + +#ifdef CONFIG_PNPACPI +extern struct pnp_protocol pnpacpi_protocol; + +static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev) +{ + if (dev->protocol == &pnpacpi_protocol) + return dev->data; + return NULL; +} +#else +#define pnp_acpi_device(dev) 0 +#endif + +/* status */ +#define PNP_READY 0x0000 +#define PNP_ATTACHED 0x0001 +#define PNP_BUSY 0x0002 +#define PNP_FAULTY 0x0004 + +/* isapnp specific macros */ + +#define isapnp_card_number(dev) ((dev)->card ? (dev)->card->number : -1) +#define isapnp_csn_number(dev) ((dev)->number) + +/* + * Driver Management + */ + +struct pnp_id { + char id[PNP_ID_LEN]; + struct pnp_id *next; +}; + +struct pnp_driver { + char *name; + const struct pnp_device_id *id_table; + unsigned int flags; + int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id); + void (*remove) (struct pnp_dev *dev); + void (*shutdown) (struct pnp_dev *dev); + int (*suspend) (struct pnp_dev *dev, pm_message_t state); + int (*resume) (struct pnp_dev *dev); + struct device_driver driver; +}; + +#define to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver) + +struct pnp_card_driver { + struct list_head global_list; + char *name; + const struct pnp_card_device_id *id_table; + unsigned int flags; + int (*probe) (struct pnp_card_link *card, + const struct pnp_card_device_id *card_id); + void (*remove) (struct pnp_card_link *card); + int (*suspend) (struct pnp_card_link *card, pm_message_t state); + int (*resume) (struct pnp_card_link *card); + struct pnp_driver link; +}; + +#define to_pnp_card_driver(drv) container_of(drv, struct pnp_card_driver, link) + +/* pnp driver flags */ +#define PNP_DRIVER_RES_DO_NOT_CHANGE 0x0001 /* do not change the state of the device */ +#define PNP_DRIVER_RES_DISABLE 0x0003 /* ensure the device is disabled */ + +/* + * Protocol Management + */ + +struct pnp_protocol { + struct list_head protocol_list; + char *name; + + /* resource control functions */ + int (*get) (struct pnp_dev *dev); + int (*set) (struct pnp_dev *dev); + int (*disable) (struct pnp_dev *dev); + + /* protocol specific suspend/resume */ + bool (*can_wakeup) (struct pnp_dev *dev); + int (*suspend) (struct pnp_dev * dev, pm_message_t state); + int (*resume) (struct pnp_dev * dev); + + /* used by pnp layer only (look but don't touch) */ + unsigned char number; /* protocol number */ + struct device dev; /* link to driver model */ + struct list_head cards; + struct list_head devices; +}; + +#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) +#define protocol_for_each_card(protocol,card) \ + for((card) = protocol_to_pnp_card((protocol)->cards.next); \ + (card) != protocol_to_pnp_card(&(protocol)->cards); \ + (card) = protocol_to_pnp_card((card)->protocol_list.next)) +#define protocol_for_each_dev(protocol,dev) \ + for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ + (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ + (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) + +extern struct bus_type pnp_bus_type; + +#if defined(CONFIG_PNP) + +/* device management */ +int pnp_device_attach(struct pnp_dev *pnp_dev); +void pnp_device_detach(struct pnp_dev *pnp_dev); +extern struct list_head pnp_global; +extern int pnp_platform_devices; + +/* multidevice card support */ +struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, + const char *id, struct pnp_dev *from); +void pnp_release_card_device(struct pnp_dev *dev); +int pnp_register_card_driver(struct pnp_card_driver *drv); +void pnp_unregister_card_driver(struct pnp_card_driver *drv); +extern struct list_head pnp_cards; + +/* resource management */ +int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base, + resource_size_t size); +int pnp_auto_config_dev(struct pnp_dev *dev); +int pnp_start_dev(struct pnp_dev *dev); +int pnp_stop_dev(struct pnp_dev *dev); +int pnp_activate_dev(struct pnp_dev *dev); +int pnp_disable_dev(struct pnp_dev *dev); +int pnp_range_reserved(resource_size_t start, resource_size_t end); + +/* protocol helpers */ +int pnp_is_active(struct pnp_dev *dev); +int compare_pnp_id(struct pnp_id *pos, const char *id); +int pnp_register_driver(struct pnp_driver *drv); +void pnp_unregister_driver(struct pnp_driver *drv); + +#else + +/* device management */ +static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; } +static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { } + +#define pnp_platform_devices 0 + +/* multidevice card support */ +static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; } +static inline void pnp_release_card_device(struct pnp_dev *dev) { } +static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return -ENODEV; } +static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { } + +/* resource management */ +static inline int pnp_possible_config(struct pnp_dev *dev, int type, + resource_size_t base, + resource_size_t size) { return 0; } +static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;} + +/* protocol helpers */ +static inline int pnp_is_active(struct pnp_dev *dev) { return 0; } +static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -ENODEV; } +static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; } +static inline void pnp_unregister_driver(struct pnp_driver *drv) { } + +#endif /* CONFIG_PNP */ + +#endif /* _LINUX_PNP_H */ diff --git a/include/linux/poison.h b/include/linux/poison.h new file mode 100644 index 00000000..2110a81c --- /dev/null +++ b/include/linux/poison.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_POISON_H +#define _LINUX_POISON_H + +/********** include/linux/list.h **********/ + +/* + * Architectures might want to move the poison pointer offset + * into some well-recognized area such as 0xdead000000000000, + * that is also not mappable by user-space exploits: + */ +#ifdef CONFIG_ILLEGAL_POINTER_VALUE +# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) +#else +# define POISON_POINTER_DELTA 0 +#endif + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) + +/********** include/linux/timer.h **********/ +/* + * Magic number "tsta" to indicate a static timer initializer + * for the object debugging code. + */ +#define TIMER_ENTRY_STATIC ((void *) 0x74737461) + +/********** mm/debug-pagealloc.c **********/ +#define PAGE_POISON 0xaa + +/********** mm/slab.c **********/ +/* + * Magic nums for obj red zoning. + * Placed in the first word before and the first word after an obj. + */ +#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ +#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ + +#define SLUB_RED_INACTIVE 0xbb +#define SLUB_RED_ACTIVE 0xcc + +/* ...and for poisoning */ +#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ +#define POISON_FREE 0x6b /* for use-after-free poisoning */ +#define POISON_END 0xa5 /* end-byte of poisoning */ + +/********** arch/$ARCH/mm/init.c **********/ +#define POISON_FREE_INITMEM 0xcc + +/********** arch/ia64/hp/common/sba_iommu.c **********/ +/* + * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a + * value of "SBAIOMMU POISON\0" for spill-over poisoning. + */ + +/********** fs/jbd/journal.c **********/ +#define JBD_POISON_FREE 0x5b +#define JBD2_POISON_FREE 0x5c + +/********** drivers/base/dmapool.c **********/ +#define POOL_POISON_FREED 0xa7 /* !inuse */ +#define POOL_POISON_ALLOCATED 0xa9 /* !initted */ + +/********** drivers/atm/ **********/ +#define ATM_POISON_FREE 0x12 +#define ATM_POISON 0xdeadbeef + +/********** net/ **********/ +#define NEIGHBOR_DEAD 0xdeadbeef +#define NETFILTER_LINK_POISON 0xdead57ac + +/********** kernel/mutexes **********/ +#define MUTEX_DEBUG_INIT 0x11 +#define MUTEX_DEBUG_FREE 0x22 + +/********** lib/flex_array.c **********/ +#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ + +/********** security/ **********/ +#define KEY_DESTROY 0xbd + +/********** sound/oss/ **********/ +#define OSS_POISON_FREE 0xAB + +#endif diff --git a/include/linux/poll.h b/include/linux/poll.h new file mode 100644 index 00000000..48fe8bc3 --- /dev/null +++ b/include/linux/poll.h @@ -0,0 +1,168 @@ +#ifndef _LINUX_POLL_H +#define _LINUX_POLL_H + +#include <asm/poll.h> + +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <linux/ktime.h> +#include <linux/wait.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sysctl.h> +#include <asm/uaccess.h> + +extern struct ctl_table epoll_table[]; /* for sysctl */ +/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating + additional memory. */ +#define MAX_STACK_ALLOC 832 +#define FRONTEND_STACK_ALLOC 256 +#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC +#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC +#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) +#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) + +#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) + +struct poll_table_struct; + +/* + * structures and helpers for f_op->poll implementations + */ +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +/* + * Do not touch the structure directly, use the access functions + * poll_does_not_wait() and poll_requested_events() instead. + */ +typedef struct poll_table_struct { + poll_queue_proc _qproc; + unsigned long _key; +} poll_table; + +static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) +{ + if (p && p->_qproc && wait_address) + p->_qproc(filp, wait_address, p); +} + +/* + * Return true if it is guaranteed that poll will not wait. This is the case + * if the poll() of another file descriptor in the set got an event, so there + * is no need for waiting. + */ +static inline bool poll_does_not_wait(const poll_table *p) +{ + return p == NULL || p->_qproc == NULL; +} + +/* + * Return the set of events that the application wants to poll for. + * This is useful for drivers that need to know whether a DMA transfer has + * to be started implicitly on poll(). You typically only want to do that + * if the application is actually polling for POLLIN and/or POLLOUT. + */ +static inline unsigned long poll_requested_events(const poll_table *p) +{ + return p ? p->_key : ~0UL; +} + +static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) +{ + pt->_qproc = qproc; + pt->_key = ~0UL; /* all events enabled */ +} + +struct poll_table_entry { + struct file *filp; + unsigned long key; + wait_queue_t wait; + wait_queue_head_t *wait_address; +}; + +/* + * Structures and helpers for select/poll syscall + */ +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; +}; + +extern void poll_initwait(struct poll_wqueues *pwq); +extern void poll_freewait(struct poll_wqueues *pwq); +extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, + ktime_t *expires, unsigned long slack); +extern long select_estimate_accuracy(struct timespec *tv); + + +static inline int poll_schedule(struct poll_wqueues *pwq, int state) +{ + return poll_schedule_timeout(pwq, state, NULL, 0); +} + +/* + * Scalable version of the fd_set. + */ + +typedef struct { + unsigned long *in, *out, *ex; + unsigned long *res_in, *res_out, *res_ex; +} fd_set_bits; + +/* + * How many longwords for "nr" bits? + */ +#define FDS_BITPERLONG (8*sizeof(long)) +#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) +#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) + +/* + * We do a VERIFY_WRITE here even though we are only reading this time: + * we'll write to it eventually.. + * + * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. + */ +static inline +int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) +{ + nr = FDS_BYTES(nr); + if (ufdset) + return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; + + memset(fdset, 0, nr); + return 0; +} + +static inline unsigned long __must_check +set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) +{ + if (ufdset) + return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); + return 0; +} + +static inline +void zero_fd_set(unsigned long nr, unsigned long *fdset) +{ + memset(fdset, 0, FDS_BYTES(nr)); +} + +#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) + +extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); +extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, + struct timespec *end_time); +extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, + fd_set __user *exp, struct timespec *end_time); + +extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); + +#endif /* KERNEL */ + +#endif /* _LINUX_POLL_H */ diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h new file mode 100644 index 00000000..34c4498b --- /dev/null +++ b/include/linux/posix-clock.h @@ -0,0 +1,151 @@ +/* + * posix-clock.h - support for dynamic clock devices + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_POSIX_CLOCK_H_ +#define _LINUX_POSIX_CLOCK_H_ + +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/posix-timers.h> +#include <linux/rwsem.h> + +struct posix_clock; + +/** + * struct posix_clock_operations - functional interface to the clock + * + * Every posix clock is represented by a character device. Drivers may + * optionally offer extended capabilities by implementing the + * character device methods. The character device file operations are + * first handled by the clock device layer, then passed on to the + * driver by calling these functions. + * + * @owner: The clock driver should set to THIS_MODULE + * @clock_adjtime: Adjust the clock + * @clock_gettime: Read the current time + * @clock_getres: Get the clock resolution + * @clock_settime: Set the current time value + * @timer_create: Create a new timer + * @timer_delete: Remove a previously created timer + * @timer_gettime: Get remaining time and interval of a timer + * @timer_settime: Set a timer's initial expiration and interval + * @fasync: Optional character device fasync method + * @mmap: Optional character device mmap method + * @open: Optional character device open method + * @release: Optional character device release method + * @ioctl: Optional character device ioctl method + * @read: Optional character device read method + * @poll: Optional character device poll method + */ +struct posix_clock_operations { + struct module *owner; + + int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); + + int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts); + + int (*clock_getres) (struct posix_clock *pc, struct timespec *ts); + + int (*clock_settime)(struct posix_clock *pc, + const struct timespec *ts); + + int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit); + + int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit); + + void (*timer_gettime)(struct posix_clock *pc, + struct k_itimer *kit, struct itimerspec *tsp); + + int (*timer_settime)(struct posix_clock *pc, + struct k_itimer *kit, int flags, + struct itimerspec *tsp, struct itimerspec *old); + /* + * Optional character device methods: + */ + int (*fasync) (struct posix_clock *pc, + int fd, struct file *file, int on); + + long (*ioctl) (struct posix_clock *pc, + unsigned int cmd, unsigned long arg); + + int (*mmap) (struct posix_clock *pc, + struct vm_area_struct *vma); + + int (*open) (struct posix_clock *pc, fmode_t f_mode); + + uint (*poll) (struct posix_clock *pc, + struct file *file, poll_table *wait); + + int (*release) (struct posix_clock *pc); + + ssize_t (*read) (struct posix_clock *pc, + uint flags, char __user *buf, size_t cnt); +}; + +/** + * struct posix_clock - represents a dynamic posix clock + * + * @ops: Functional interface to the clock + * @cdev: Character device instance for this clock + * @kref: Reference count. + * @rwsem: Protects the 'zombie' field from concurrent access. + * @zombie: If 'zombie' is true, then the hardware has disappeared. + * @release: A function to free the structure when the reference count reaches + * zero. May be NULL if structure is statically allocated. + * + * Drivers should embed their struct posix_clock within a private + * structure, obtaining a reference to it during callbacks using + * container_of(). + */ +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct kref kref; + struct rw_semaphore rwsem; + bool zombie; + void (*release)(struct posix_clock *clk); +}; + +/** + * posix_clock_register() - register a new clock + * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' + * @devid: Allocated device id + * + * A clock driver calls this function to register itself with the + * clock device subsystem. If 'clk' points to dynamically allocated + * memory, then the caller must provide a 'release' function to free + * that memory. + * + * Returns zero on success, non-zero otherwise. + */ +int posix_clock_register(struct posix_clock *clk, dev_t devid); + +/** + * posix_clock_unregister() - unregister a clock + * @clk: Clock instance previously registered via posix_clock_register() + * + * A clock driver calls this function to remove itself from the clock + * device subsystem. The posix_clock itself will remain (in an + * inactive state) until its reference count drops to zero, at which + * point it will be deallocated with its 'release' method. + */ +void posix_clock_unregister(struct posix_clock *clk); + +#endif diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h new file mode 100644 index 00000000..042058fd --- /dev/null +++ b/include/linux/posix-timers.h @@ -0,0 +1,132 @@ +#ifndef _linux_POSIX_TIMERS_H +#define _linux_POSIX_TIMERS_H + +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/sched.h> +#include <linux/timex.h> +#include <linux/alarmtimer.h> + +union cpu_time_count { + cputime_t cpu; + unsigned long long sched; +}; + +struct cpu_timer_list { + struct list_head entry; + union cpu_time_count expires, incr; + struct task_struct *task; + int firing; +}; + +/* + * Bit fields within a clockid: + * + * The most significant 29 bits hold either a pid or a file descriptor. + * + * Bit 2 indicates whether a cpu clock refers to a thread or a process. + * + * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. + * + * A clockid is invalid if bits 2, 1, and 0 are all set. + */ +#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) +#define CPUCLOCK_PERTHREAD(clock) \ + (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) + +#define CPUCLOCK_PERTHREAD_MASK 4 +#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) +#define CPUCLOCK_CLOCK_MASK 3 +#define CPUCLOCK_PROF 0 +#define CPUCLOCK_VIRT 1 +#define CPUCLOCK_SCHED 2 +#define CPUCLOCK_MAX 3 +#define CLOCKFD CPUCLOCK_MAX +#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) + +#define MAKE_PROCESS_CPUCLOCK(pid, clock) \ + ((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) +#define MAKE_THREAD_CPUCLOCK(tid, clock) \ + MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) + +#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) +#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3)) + +/* POSIX.1b interval timer structure. */ +struct k_itimer { + struct list_head list; /* free/ allocate list */ + spinlock_t it_lock; + clockid_t it_clock; /* which timer type */ + timer_t it_id; /* timer id */ + int it_overrun; /* overrun on pending signal */ + int it_overrun_last; /* overrun on last delivered signal */ + int it_requeue_pending; /* waiting to requeue this timer */ +#define REQUEUE_PENDING 1 + int it_sigev_notify; /* notify word of sigevent struct */ + struct signal_struct *it_signal; + union { + struct pid *it_pid; /* pid of process to send signal to */ + struct task_struct *it_process; /* for clock_nanosleep */ + }; + struct sigqueue *sigq; /* signal queue entry. */ + union { + struct { + struct hrtimer timer; + ktime_t interval; + } real; + struct cpu_timer_list cpu; + struct { + unsigned int clock; + unsigned int node; + unsigned long incr; + unsigned long expires; + } mmtimer; + struct { + struct alarm alarmtimer; + ktime_t interval; + } alarm; + struct rcu_head rcu; + } it; +}; + +struct k_clock { + int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); + int (*clock_set) (const clockid_t which_clock, + const struct timespec *tp); + int (*clock_get) (const clockid_t which_clock, struct timespec * tp); + int (*clock_adj) (const clockid_t which_clock, struct timex *tx); + int (*timer_create) (struct k_itimer *timer); + int (*nsleep) (const clockid_t which_clock, int flags, + struct timespec *, struct timespec __user *); + long (*nsleep_restart) (struct restart_block *restart_block); + int (*timer_set) (struct k_itimer * timr, int flags, + struct itimerspec * new_setting, + struct itimerspec * old_setting); + int (*timer_del) (struct k_itimer * timr); +#define TIMER_RETRY 1 + void (*timer_get) (struct k_itimer * timr, + struct itimerspec * cur_setting); +}; + +extern struct k_clock clock_posix_cpu; +extern struct k_clock clock_posix_dynamic; + +void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock); + +/* function to call to trigger timer event */ +int posix_timer_event(struct k_itimer *timr, int si_private); + +void posix_cpu_timer_schedule(struct k_itimer *timer); + +void run_posix_cpu_timers(struct task_struct *task); +void posix_cpu_timers_exit(struct task_struct *task); +void posix_cpu_timers_exit_group(struct task_struct *task); + +void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, + cputime_t *newval, cputime_t *oldval); + +long clock_nanosleep_restart(struct restart_block *restart_block); + +void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); + +#endif diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h new file mode 100644 index 00000000..11bad91c --- /dev/null +++ b/include/linux/posix_acl.h @@ -0,0 +1,172 @@ +/* + File: linux/posix_acl.h + + (C) 2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> +*/ + + +#ifndef __LINUX_POSIX_ACL_H +#define __LINUX_POSIX_ACL_H + +#include <linux/bug.h> +#include <linux/slab.h> +#include <linux/rcupdate.h> + +#define ACL_UNDEFINED_ID (-1) + +/* a_type field in acl_user_posix_entry_t */ +#define ACL_TYPE_ACCESS (0x8000) +#define ACL_TYPE_DEFAULT (0x4000) + +/* e_tag entry in struct posix_acl_entry */ +#define ACL_USER_OBJ (0x01) +#define ACL_USER (0x02) +#define ACL_GROUP_OBJ (0x04) +#define ACL_GROUP (0x08) +#define ACL_MASK (0x10) +#define ACL_OTHER (0x20) + +/* permissions in the e_perm field */ +#define ACL_READ (0x04) +#define ACL_WRITE (0x02) +#define ACL_EXECUTE (0x01) +//#define ACL_ADD (0x08) +//#define ACL_DELETE (0x10) + +struct posix_acl_entry { + short e_tag; + unsigned short e_perm; + unsigned int e_id; +}; + +struct posix_acl { + union { + atomic_t a_refcount; + struct rcu_head a_rcu; + }; + unsigned int a_count; + struct posix_acl_entry a_entries[0]; +}; + +#define FOREACH_ACL_ENTRY(pa, acl, pe) \ + for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; pa<pe; pa++) + + +/* + * Duplicate an ACL handle. + */ +static inline struct posix_acl * +posix_acl_dup(struct posix_acl *acl) +{ + if (acl) + atomic_inc(&acl->a_refcount); + return acl; +} + +/* + * Free an ACL handle. + */ +static inline void +posix_acl_release(struct posix_acl *acl) +{ + if (acl && atomic_dec_and_test(&acl->a_refcount)) + kfree_rcu(acl, a_rcu); +} + + +/* posix_acl.c */ + +extern void posix_acl_init(struct posix_acl *, int); +extern struct posix_acl *posix_acl_alloc(int, gfp_t); +extern int posix_acl_valid(const struct posix_acl *); +extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); +extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); +extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); +extern int posix_acl_create(struct posix_acl **, gfp_t, umode_t *); +extern int posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); + +extern struct posix_acl *get_posix_acl(struct inode *, int); +extern int set_posix_acl(struct inode *, int, struct posix_acl *); + +#ifdef CONFIG_FS_POSIX_ACL +static inline struct posix_acl **acl_by_type(struct inode *inode, int type) +{ + switch (type) { + case ACL_TYPE_ACCESS: + return &inode->i_acl; + case ACL_TYPE_DEFAULT: + return &inode->i_default_acl; + default: + BUG(); + } +} + +static inline struct posix_acl *get_cached_acl(struct inode *inode, int type) +{ + struct posix_acl **p = acl_by_type(inode, type); + struct posix_acl *acl = ACCESS_ONCE(*p); + if (acl) { + spin_lock(&inode->i_lock); + acl = *p; + if (acl != ACL_NOT_CACHED) + acl = posix_acl_dup(acl); + spin_unlock(&inode->i_lock); + } + return acl; +} + +static inline struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type) +{ + return rcu_dereference(*acl_by_type(inode, type)); +} + +static inline void set_cached_acl(struct inode *inode, + int type, + struct posix_acl *acl) +{ + struct posix_acl **p = acl_by_type(inode, type); + struct posix_acl *old; + spin_lock(&inode->i_lock); + old = *p; + rcu_assign_pointer(*p, posix_acl_dup(acl)); + spin_unlock(&inode->i_lock); + if (old != ACL_NOT_CACHED) + posix_acl_release(old); +} + +static inline void forget_cached_acl(struct inode *inode, int type) +{ + struct posix_acl **p = acl_by_type(inode, type); + struct posix_acl *old; + spin_lock(&inode->i_lock); + old = *p; + *p = ACL_NOT_CACHED; + spin_unlock(&inode->i_lock); + if (old != ACL_NOT_CACHED) + posix_acl_release(old); +} + +static inline void forget_all_cached_acls(struct inode *inode) +{ + struct posix_acl *old_access, *old_default; + spin_lock(&inode->i_lock); + old_access = inode->i_acl; + old_default = inode->i_default_acl; + inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; + spin_unlock(&inode->i_lock); + if (old_access != ACL_NOT_CACHED) + posix_acl_release(old_access); + if (old_default != ACL_NOT_CACHED) + posix_acl_release(old_default); +} +#endif + +static inline void cache_no_acl(struct inode *inode) +{ +#ifdef CONFIG_FS_POSIX_ACL + inode->i_acl = NULL; + inode->i_default_acl = NULL; +#endif +} + +#endif /* __LINUX_POSIX_ACL_H */ diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h new file mode 100644 index 00000000..6e53c340 --- /dev/null +++ b/include/linux/posix_acl_xattr.h @@ -0,0 +1,58 @@ +/* + File: linux/posix_acl_xattr.h + + Extended attribute system call representation of Access Control Lists. + + Copyright (C) 2000 by Andreas Gruenbacher <a.gruenbacher@computer.org> + Copyright (C) 2002 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com> + */ +#ifndef _POSIX_ACL_XATTR_H +#define _POSIX_ACL_XATTR_H + +#include <linux/posix_acl.h> + +/* Extended attribute names */ +#define POSIX_ACL_XATTR_ACCESS "system.posix_acl_access" +#define POSIX_ACL_XATTR_DEFAULT "system.posix_acl_default" + +/* Supported ACL a_version fields */ +#define POSIX_ACL_XATTR_VERSION 0x0002 + + +/* An undefined entry e_id value */ +#define ACL_UNDEFINED_ID (-1) + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} posix_acl_xattr_entry; + +typedef struct { + __le32 a_version; + posix_acl_xattr_entry a_entries[0]; +} posix_acl_xattr_header; + + +static inline size_t +posix_acl_xattr_size(int count) +{ + return (sizeof(posix_acl_xattr_header) + + (count * sizeof(posix_acl_xattr_entry))); +} + +static inline int +posix_acl_xattr_count(size_t size) +{ + if (size < sizeof(posix_acl_xattr_header)) + return -1; + size -= sizeof(posix_acl_xattr_header); + if (size % sizeof(posix_acl_xattr_entry)) + return -1; + return size / sizeof(posix_acl_xattr_entry); +} + +struct posix_acl *posix_acl_from_xattr(const void *value, size_t size); +int posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size); + +#endif /* _POSIX_ACL_XATTR_H */ diff --git a/include/linux/posix_types.h b/include/linux/posix_types.h new file mode 100644 index 00000000..f04c98cf --- /dev/null +++ b/include/linux/posix_types.h @@ -0,0 +1,49 @@ +#ifndef _LINUX_POSIX_TYPES_H +#define _LINUX_POSIX_TYPES_H + +#include <linux/stddef.h> + +/* + * This allows for 1024 file descriptors: if NR_OPEN is ever grown + * beyond that you'll have to change this too. But 1024 fd's seem to be + * enough even for such "real" unices like OSF/1, so hopefully this is + * one limit that doesn't have to be changed [again]. + * + * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in + * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical + * place for them. Solved by having dummy defines in <sys/time.h>. + */ + +/* + * Those macros may have been defined in <gnu/types.h>. But we always + * use the ones here. + */ +#undef __NFDBITS +#define __NFDBITS (8 * sizeof(unsigned long)) + +#undef __FD_SETSIZE +#define __FD_SETSIZE 1024 + +#undef __FDSET_LONGS +#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS) + +#undef __FDELT +#define __FDELT(d) ((d) / __NFDBITS) + +#undef __FDMASK +#define __FDMASK(d) (1UL << ((d) % __NFDBITS)) + +typedef struct { + unsigned long fds_bits [__FDSET_LONGS]; +} __kernel_fd_set; + +/* Type of a signal handler. */ +typedef void (*__kernel_sighandler_t)(int); + +/* Type of a SYSV IPC key. */ +typedef int __kernel_key_t; +typedef int __kernel_mqd_t; + +#include <asm/posix_types.h> + +#endif /* _LINUX_POSIX_TYPES_H */ diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h new file mode 100644 index 00000000..a857f719 --- /dev/null +++ b/include/linux/power/bq27x00_battery.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_BQ27X00_BATTERY_H__ +#define __LINUX_BQ27X00_BATTERY_H__ + +/** + * struct bq27000_plaform_data - Platform data for bq27000 devices + * @name: Name of the battery. If NULL the driver will fallback to "bq27000". + * @read: HDQ read callback. + * This function should provide access to the HDQ bus the battery is + * connected to. + * The first parameter is a pointer to the battery device, the second the + * register to be read. The return value should either be the content of + * the passed register or an error value. + */ +struct bq27000_platform_data { + const char *name; + int (*read)(struct device *dev, unsigned int); +}; + +#endif diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h new file mode 100644 index 00000000..4f75e531 --- /dev/null +++ b/include/linux/power/charger-manager.h @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2011 Samsung Electronics Co., Ltd. + * MyungJoo.Ham <myungjoo.ham@samsung.com> + * + * Charger Manager. + * This framework enables to control and multiple chargers and to + * monitor charging even in the context of suspend-to-RAM with + * an interface combining the chargers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +**/ + +#ifndef _CHARGER_MANAGER_H +#define _CHARGER_MANAGER_H + +#include <linux/power_supply.h> + +enum data_source { + CM_FUEL_GAUGE, + CM_CHARGER_STAT, +}; + +enum polling_modes { + CM_POLL_DISABLE = 0, + CM_POLL_ALWAYS, + CM_POLL_EXTERNAL_POWER_ONLY, + CM_POLL_CHARGING_ONLY, +}; + +/** + * struct charger_global_desc + * @rtc_name: the name of RTC used to wake up the system from suspend. + * @rtc_only_wakeup: + * If the system is woken up by waekup-sources other than the RTC or + * callbacks, Charger Manager should recognize with + * rtc_only_wakeup() returning false. + * If the RTC given to CM is the only wakeup reason, + * rtc_only_wakeup should return true. + */ +struct charger_global_desc { + char *rtc_name; + + bool (*rtc_only_wakeup)(void); +}; + +/** + * struct charger_desc + * @psy_name: the name of power-supply-class for charger manager + * @polling_mode: + * Determine which polling mode will be used + * @fullbatt_uV: voltage in microvolt + * If it is not being charged and VBATT >= fullbatt_uV, + * it is assumed to be full. + * @polling_interval_ms: interval in millisecond at which + * charger manager will monitor battery health + * @battery_present: + * Specify where information for existance of battery can be obtained + * @psy_charger_stat: the names of power-supply for chargers + * @num_charger_regulator: the number of entries in charger_regulators + * @charger_regulators: array of regulator_bulk_data for chargers + * @psy_fuel_gauge: the name of power-supply for fuel gauge + * @temperature_out_of_range: + * Determine whether the status is overheat or cold or normal. + * return_value > 0: overheat + * return_value == 0: normal + * return_value < 0: cold + * @measure_battery_temp: + * true: measure battery temperature + * false: measure ambient temperature + */ +struct charger_desc { + char *psy_name; + + enum polling_modes polling_mode; + unsigned int polling_interval_ms; + + unsigned int fullbatt_uV; + + enum data_source battery_present; + + char **psy_charger_stat; + + int num_charger_regulators; + struct regulator_bulk_data *charger_regulators; + + char *psy_fuel_gauge; + + int (*temperature_out_of_range)(int *mC); + bool measure_battery_temp; +}; + +#define PSY_NAME_MAX 30 + +/** + * struct charger_manager + * @entry: entry for list + * @dev: device pointer + * @desc: instance of charger_desc + * @fuel_gauge: power_supply for fuel gauge + * @charger_stat: array of power_supply for chargers + * @charger_enabled: the state of charger + * @emergency_stop: + * When setting true, stop charging + * @last_temp_mC: the measured temperature in milli-Celsius + * @psy_name_buf: the name of power-supply-class for charger manager + * @charger_psy: power_supply for charger manager + * @status_save_ext_pwr_inserted: + * saved status of external power before entering suspend-to-RAM + * @status_save_batt: + * saved status of battery before entering suspend-to-RAM + */ +struct charger_manager { + struct list_head entry; + struct device *dev; + struct charger_desc *desc; + + struct power_supply *fuel_gauge; + struct power_supply **charger_stat; + + bool charger_enabled; + + int emergency_stop; + int last_temp_mC; + + char psy_name_buf[PSY_NAME_MAX + 1]; + struct power_supply charger_psy; + + bool status_save_ext_pwr_inserted; + bool status_save_batt; +}; + +#ifdef CONFIG_CHARGER_MANAGER +extern int setup_charger_manager(struct charger_global_desc *gd); +extern bool cm_suspend_again(void); +#else +static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd) +{ } + +static bool __maybe_unused cm_suspend_again(void) +{ + return false; +} +#endif + +#endif /* _CHARGER_MANAGER_H */ diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h new file mode 100644 index 00000000..de1dfe09 --- /dev/null +++ b/include/linux/power/gpio-charger.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_POWER_GPIO_CHARGER_H__ +#define __LINUX_POWER_GPIO_CHARGER_H__ + +#include <linux/power_supply.h> +#include <linux/types.h> + +/** + * struct gpio_charger_platform_data - platform_data for gpio_charger devices + * @name: Name for the chargers power_supply device + * @type: Type of the charger + * @gpio: GPIO which is used to indicate the chargers status + * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0 + * @supplied_to: Array of battery names to which this chargers supplies power + * @num_supplicants: Number of entries in the supplied_to array + */ +struct gpio_charger_platform_data { + const char *name; + enum power_supply_type type; + + int gpio; + int gpio_active_low; + + char **supplied_to; + size_t num_supplicants; +}; + +#endif diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h new file mode 100644 index 00000000..68096a6a --- /dev/null +++ b/include/linux/power/isp1704_charger.h @@ -0,0 +1,29 @@ +/* + * ISP1704 USB Charger Detection driver + * + * Copyright (C) 2011 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#ifndef __ISP1704_CHARGER_H +#define __ISP1704_CHARGER_H + +struct isp1704_charger_data { + void (*set_power)(bool on); +}; + +#endif diff --git a/include/linux/power/jz4740-battery.h b/include/linux/power/jz4740-battery.h new file mode 100644 index 00000000..19c9610c --- /dev/null +++ b/include/linux/power/jz4740-battery.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2009, Jiejing Zhang <kzjeef@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __JZ4740_BATTERY_H +#define __JZ4740_BATTERY_H + +struct jz_battery_platform_data { + struct power_supply_info info; + int gpio_charge; /* GPIO port of Charger state */ + int gpio_charge_active_low; +}; + +#endif diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h new file mode 100644 index 00000000..e01b167e --- /dev/null +++ b/include/linux/power/max17042_battery.h @@ -0,0 +1,196 @@ +/* + * Fuel gauge driver for Maxim 17042 / 8966 / 8997 + * Note that Maxim 8966 and 8997 are mfd and this is its subdevice. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __MAX17042_BATTERY_H_ +#define __MAX17042_BATTERY_H_ + +#define MAX17042_STATUS_BattAbsent (1 << 3) +#define MAX17042_BATTERY_FULL (100) +#define MAX17042_DEFAULT_SNS_RESISTOR (10000) + +#define MAX17042_CHARACTERIZATION_DATA_SIZE 48 + +enum max17042_register { + MAX17042_STATUS = 0x00, + MAX17042_VALRT_Th = 0x01, + MAX17042_TALRT_Th = 0x02, + MAX17042_SALRT_Th = 0x03, + MAX17042_AtRate = 0x04, + MAX17042_RepCap = 0x05, + MAX17042_RepSOC = 0x06, + MAX17042_Age = 0x07, + MAX17042_TEMP = 0x08, + MAX17042_VCELL = 0x09, + MAX17042_Current = 0x0A, + MAX17042_AvgCurrent = 0x0B, + + MAX17042_SOC = 0x0D, + MAX17042_AvSOC = 0x0E, + MAX17042_RemCap = 0x0F, + MAX17042_FullCAP = 0x10, + MAX17042_TTE = 0x11, + MAX17042_V_empty = 0x12, + + MAX17042_RSLOW = 0x14, + + MAX17042_AvgTA = 0x16, + MAX17042_Cycles = 0x17, + MAX17042_DesignCap = 0x18, + MAX17042_AvgVCELL = 0x19, + MAX17042_MinMaxTemp = 0x1A, + MAX17042_MinMaxVolt = 0x1B, + MAX17042_MinMaxCurr = 0x1C, + MAX17042_CONFIG = 0x1D, + MAX17042_ICHGTerm = 0x1E, + MAX17042_AvCap = 0x1F, + MAX17042_ManName = 0x20, + MAX17042_DevName = 0x21, + + MAX17042_FullCAPNom = 0x23, + MAX17042_TempNom = 0x24, + MAX17042_TempLim = 0x25, + MAX17042_TempHot = 0x26, + MAX17042_AIN = 0x27, + MAX17042_LearnCFG = 0x28, + MAX17042_FilterCFG = 0x29, + MAX17042_RelaxCFG = 0x2A, + MAX17042_MiscCFG = 0x2B, + MAX17042_TGAIN = 0x2C, + MAx17042_TOFF = 0x2D, + MAX17042_CGAIN = 0x2E, + MAX17042_COFF = 0x2F, + + MAX17042_MaskSOC = 0x32, + MAX17042_SOC_empty = 0x33, + MAX17042_T_empty = 0x34, + + MAX17042_FullCAP0 = 0x35, + MAX17042_LAvg_empty = 0x36, + MAX17042_FCTC = 0x37, + MAX17042_RCOMP0 = 0x38, + MAX17042_TempCo = 0x39, + MAX17042_EmptyTempCo = 0x3A, + MAX17042_K_empty0 = 0x3B, + MAX17042_TaskPeriod = 0x3C, + MAX17042_FSTAT = 0x3D, + + MAX17042_SHDNTIMER = 0x3F, + + MAX17042_dQacc = 0x45, + MAX17042_dPacc = 0x46, + + MAX17042_VFSOC0 = 0x48, + + MAX17042_QH = 0x4D, + MAX17042_QL = 0x4E, + + MAX17042_VFSOC0Enable = 0x60, + MAX17042_MLOCKReg1 = 0x62, + MAX17042_MLOCKReg2 = 0x63, + + MAX17042_MODELChrTbl = 0x80, + + MAX17042_OCV = 0xEE, + + MAX17042_OCVInternal = 0xFB, + + MAX17042_VFSOC = 0xFF, +}; + +/* + * used for setting a register to a desired value + * addr : address for a register + * data : setting value for the register + */ +struct max17042_reg_data { + u8 addr; + u16 data; +}; + +struct max17042_config_data { + /* External current sense resistor value in milli-ohms */ + u32 cur_sense_val; + + /* A/D measurement */ + u16 tgain; /* 0x2C */ + u16 toff; /* 0x2D */ + u16 cgain; /* 0x2E */ + u16 coff; /* 0x2F */ + + /* Alert / Status */ + u16 valrt_thresh; /* 0x01 */ + u16 talrt_thresh; /* 0x02 */ + u16 soc_alrt_thresh; /* 0x03 */ + u16 config; /* 0x01D */ + u16 shdntimer; /* 0x03F */ + + /* App data */ + u16 design_cap; /* 0x18 */ + u16 ichgt_term; /* 0x1E */ + + /* MG3 config */ + u16 at_rate; /* 0x04 */ + u16 learn_cfg; /* 0x28 */ + u16 filter_cfg; /* 0x29 */ + u16 relax_cfg; /* 0x2A */ + u16 misc_cfg; /* 0x2B */ + u16 masksoc; /* 0x32 */ + + /* MG3 save and restore */ + u16 fullcap; /* 0x10 */ + u16 fullcapnom; /* 0x23 */ + u16 socempty; /* 0x33 */ + u16 lavg_empty; /* 0x36 */ + u16 dqacc; /* 0x45 */ + u16 dpacc; /* 0x46 */ + + /* Cell technology from power_supply.h */ + u16 cell_technology; + + /* Cell Data */ + u16 vempty; /* 0x12 */ + u16 temp_nom; /* 0x24 */ + u16 temp_lim; /* 0x25 */ + u16 fctc; /* 0x37 */ + u16 rcomp0; /* 0x38 */ + u16 tcompc0; /* 0x39 */ + u16 empty_tempco; /* 0x3A */ + u16 kempty0; /* 0x3B */ + u16 cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE]; +} __packed; + +struct max17042_platform_data { + struct max17042_reg_data *init_data; + struct max17042_config_data *config_data; + int num_init_data; /* Number of enties in init_data array */ + bool enable_current_sense; + bool enable_por_init; /* Use POR init from Maxim appnote */ + + /* + * R_sns in micro-ohms. + * default 10000 (if r_sns = 0) as it is the recommended value by + * the datasheet although it can be changed by board designers. + */ + unsigned int r_sns; +}; + +#endif /* __MAX17042_BATTERY_H_ */ diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h new file mode 100644 index 00000000..24f51db8 --- /dev/null +++ b/include/linux/power/max8903_charger.h @@ -0,0 +1,57 @@ +/* + * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __MAX8903_CHARGER_H__ +#define __MAX8903_CHARGER_H__ + +struct max8903_pdata { + /* + * GPIOs + * cen, chg, flt, and usus are optional. + * dok, dcm, and uok are not optional depending on the status of + * dc_valid and usb_valid. + */ + int cen; /* Charger Enable input */ + int dok; /* DC(Adapter) Power OK output */ + int uok; /* USB Power OK output */ + int chg; /* Charger status output */ + int flt; /* Fault output */ + int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */ + int usus; /* USB Suspend Input (1: suspended) */ + + /* + * DC(Adapter/TA) is wired + * When dc_valid is true, + * dok and dcm should be valid. + * + * At least one of dc_valid or usb_valid should be true. + */ + bool dc_valid; + /* + * USB is wired + * When usb_valid is true, + * uok should be valid. + */ + bool usb_valid; +}; + +#endif /* __MAX8903_CHARGER_H__ */ diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h new file mode 100644 index 00000000..2b0a9d9f --- /dev/null +++ b/include/linux/power/sbs-battery.h @@ -0,0 +1,42 @@ +/* + * Gas Gauge driver for SBS Compliant Gas Gauges + * + * Copyright (c) 2010, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __LINUX_POWER_SBS_BATTERY_H_ +#define __LINUX_POWER_SBS_BATTERY_H_ + +#include <linux/power_supply.h> +#include <linux/types.h> + +/** + * struct sbs_platform_data - platform data for sbs devices + * @battery_detect: GPIO which is used to detect battery presence + * @battery_detect_present: gpio state when battery is present (0 / 1) + * @i2c_retry_count: # of times to retry on i2c IO failure + * @poll_retry_count: # of times to retry looking for new status after + * external change notification + */ +struct sbs_platform_data { + int battery_detect; + int battery_detect_present; + int i2c_retry_count; + int poll_retry_count; +}; + +#endif diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h new file mode 100644 index 00000000..e9aab944 --- /dev/null +++ b/include/linux/power/smb347-charger.h @@ -0,0 +1,123 @@ +/* + * Summit Microelectronics SMB347 Battery Charger Driver + * + * Copyright (C) 2011, Intel Corporation + * + * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SMB347_CHARGER_H +#define SMB347_CHARGER_H + +#include <linux/types.h> +#include <linux/power_supply.h> + +enum { + /* use the default compensation method */ + SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1, + + SMB347_SOFT_TEMP_COMPENSATE_NONE, + SMB347_SOFT_TEMP_COMPENSATE_CURRENT, + SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE, +}; + +/* Use default factory programmed value for hard/soft temperature limit */ +#define SMB347_TEMP_USE_DEFAULT -273 + +/* + * Charging enable can be controlled by software (via i2c) by + * smb347-charger driver or by EN pin (active low/high). + */ +enum smb347_chg_enable { + SMB347_CHG_ENABLE_SW, + SMB347_CHG_ENABLE_PIN_ACTIVE_LOW, + SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH, +}; + +/** + * struct smb347_charger_platform_data - platform data for SMB347 charger + * @battery_info: Information about the battery + * @max_charge_current: maximum current (in uA) the battery can be charged + * @max_charge_voltage: maximum voltage (in uV) the battery can be charged + * @pre_charge_current: current (in uA) to use in pre-charging phase + * @termination_current: current (in uA) used to determine when the + * charging cycle terminates + * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to + * pre-charge to fast charge mode + * @mains_current_limit: maximum input current drawn from AC/DC input (in uA) + * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB + * input + * @chip_temp_threshold: die temperature where device starts limiting charge + * current [%100 - %130] (in degree C) + * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C), + * granularity is 5 deg C. + * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C), + * granularity is 5 deg C. + * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C), + * granularity is 5 deg C. + * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C), + * granularity is 5 deg C. + * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit + * @soft_temp_limit_compensation: compensation method when soft temperature + * limit is hit + * @charge_current_compensation: current (in uA) for charging compensation + * current when temperature hits soft limits + * @use_mains: AC/DC input can be used + * @use_usb: USB input can be used + * @use_usb_otg: USB OTG output can be used (not implemented yet) + * @irq_gpio: GPIO number used for interrupts (%-1 if not used) + * @enable_control: how charging enable/disable is controlled + * (driver/pin controls) + * + * @use_main, @use_usb, and @use_usb_otg are means to enable/disable + * hardware support for these. This is useful when we want to have for + * example OTG charging controlled via OTG transceiver driver and not by + * the SMB347 hardware. + * + * Hard and soft temperature limit values are given as described in the + * device data sheet and assuming NTC beta value is %3750. Even if this is + * not the case, these values should be used. They can be mapped to the + * corresponding NTC beta values with the help of table %2 in the data + * sheet. So for example if NTC beta is %3375 and we want to program hard + * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50. + * + * If zero value is given in any of the current and voltage values, the + * factory programmed default will be used. For soft/hard temperature + * values, pass in %SMB347_TEMP_USE_DEFAULT instead. + */ +struct smb347_charger_platform_data { + struct power_supply_info battery_info; + unsigned int max_charge_current; + unsigned int max_charge_voltage; + unsigned int pre_charge_current; + unsigned int termination_current; + unsigned int pre_to_fast_voltage; + unsigned int mains_current_limit; + unsigned int usb_hc_current_limit; + unsigned int chip_temp_threshold; + int soft_cold_temp_limit; + int soft_hot_temp_limit; + int hard_cold_temp_limit; + int hard_hot_temp_limit; + bool suspend_on_hard_temp_limit; + unsigned int soft_temp_limit_compensation; + unsigned int charge_current_compensation; + bool use_mains; + bool use_usb; + bool use_usb_otg; + bool disable_automatic_recharge; + int irq_gpio; + bool disable_stat_interrupts; + enum smb347_chg_enable enable_control; + bool usb_mode_pin_ctrl; + char **supplied_to; + size_t num_supplicants; + int en_gpio; +}; + +#endif /* SMB347_CHARGER_H */ diff --git a/include/linux/power/wmt_battery.h b/include/linux/power/wmt_battery.h new file mode 100755 index 00000000..e721fa5e --- /dev/null +++ b/include/linux/power/wmt_battery.h @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2013 WonderMedia Technologies, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_POWER_WMT_BATTERY_H__ +#define __LINUX_POWER_WMT_BATTERY_H__ + +static inline int prefixcmp(const char *str, const char *prefix) +{ + for (; ; str++, prefix++) + if (!*prefix) + return 0; + else if (*str != *prefix) + return (unsigned char)*prefix - (unsigned char)*str; +} + +#include <linux/power_supply.h> +#include <mach/hardware.h> + +enum cable_type { + CABLE_TYPE_DC, + CABLE_TYPE_USB, + CABLE_TYPE_UNKNOWN, +}; + +enum { + CURRENT_SWITCH_DYNAMIC, + CURRENT_SWITCH_SLEEP, +}; + +enum { + PC_CONNECTED_NOT_CHARGING, + PC_CONNECTED_CHARGING, +}; + +#define USB_BASE_ADD (0xD8007800+WMT_MMAP_OFFSET) + +static inline int wmt_is_otg_plugin(void) +{ + return !(REG8_VAL(USB_BASE_ADD + 0x7F2) & BIT0); +} + +static inline int wmt_is_dc_plugin(void) +{ + return (REG8_VAL(PM_CTRL_BASE_ADDR + 0x005d) & 0x01); +} + +static inline void wmt_dcdet_irq_enable(void) +{ + uint32_t val; + // dcdet wakeup interrupt + PMTC_VAL |= BIT27; + val = PMWTC_VAL; + val &= ~(0xf << 12); + val |= 0x04 << 12; + PMWTC_VAL = val; + PMWE_VAL |= BIT27; + + // dcdet interrupt + /* Register dcdet interrupt, edge trigger */ + PMCIE_VAL |= BIT27; + WK_TRG_EN_VAL |= BIT27; + pmc_enable_wakeup_isr(WKS_DCDET, 4); +} + +static inline void wmt_dcdet_irq_disable(void) +{ + pmc_disable_wakeup_isr(WKS_DCDET); +} + +static inline int charger_get_status(void) +{ + struct power_supply *charger; + union power_supply_propval val; + + charger = power_supply_get_by_name("ac"); + if (!charger) { + pr_err("get ac power supply failed\n"); + return -ENODEV; + } + + charger->get_property(charger, POWER_SUPPLY_PROP_STATUS, &val); + return val.intval; +} + +static inline int charger_is_full(void) +{ + return charger_get_status() == POWER_SUPPLY_STATUS_FULL; +} + +static inline int wmt_charger_is_dc_plugin(void) +{ + return power_supply_is_system_supplied(); +} + +extern int wmt_is_pc_connected(void); +extern void wmt_do_pc_connected(void); + +extern int parse_charger_led(void); +extern void led_power_enable(int enable); + +#endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h new file mode 100644 index 00000000..e1f54479 --- /dev/null +++ b/include/linux/power_supply.h @@ -0,0 +1,279 @@ +/* + * Universal power supply monitor class + * + * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> + * Copyright © 2004 Szabolcs Gyurko + * Copyright © 2003 Ian Molton <spyro@f2s.com> + * + * Modified: 2004, Oct Szabolcs Gyurko + * + * You may use this code as per GPL version 2 + */ + +#ifndef __LINUX_POWER_SUPPLY_H__ +#define __LINUX_POWER_SUPPLY_H__ + +#include <linux/workqueue.h> +#include <linux/leds.h> + +struct device; + +/* + * All voltages, currents, charges, energies, time and temperatures in uV, + * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise + * stated. It's driver's job to convert its raw values to units in which + * this class operates. + */ + +/* + * For systems where the charger determines the maximum battery capacity + * the min and max fields should be used to present these values to user + * space. Unused/unknown fields will not appear in sysfs. + */ + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING, + POWER_SUPPLY_STATUS_DISCHARGING, + POWER_SUPPLY_STATUS_NOT_CHARGING, + POWER_SUPPLY_STATUS_FULL, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE, + POWER_SUPPLY_CHARGE_TYPE_FAST, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD, + POWER_SUPPLY_HEALTH_OVERHEAT, + POWER_SUPPLY_HEALTH_DEAD, + POWER_SUPPLY_HEALTH_OVERVOLTAGE, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, + POWER_SUPPLY_HEALTH_COLD, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH, + POWER_SUPPLY_TECHNOLOGY_LION, + POWER_SUPPLY_TECHNOLOGY_LIPO, + POWER_SUPPLY_TECHNOLOGY_LiFe, + POWER_SUPPLY_TECHNOLOGY_NiCd, + POWER_SUPPLY_TECHNOLOGY_LiMn, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL, + POWER_SUPPLY_CAPACITY_LEVEL_LOW, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH, + POWER_SUPPLY_CAPACITY_LEVEL_FULL, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM, + POWER_SUPPLY_SCOPE_DEVICE, +}; + +enum power_supply_property { + /* Properties of type `int' */ + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_VOLTAGE_AVG, + POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_POWER_NOW, + POWER_SUPPLY_PROP_POWER_AVG, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_EMPTY, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, + POWER_SUPPLY_PROP_ENERGY_FULL, + POWER_SUPPLY_PROP_ENERGY_EMPTY, + POWER_SUPPLY_PROP_ENERGY_NOW, + POWER_SUPPLY_PROP_ENERGY_AVG, + POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TEMP_AMBIENT, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ + POWER_SUPPLY_PROP_SCOPE, + /* Local extensions */ + POWER_SUPPLY_PROP_USB_HC, + POWER_SUPPLY_PROP_USB_OTG, + POWER_SUPPLY_PROP_CHARGE_ENABLED, + /* Properties of type `const char *' */ + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY, + POWER_SUPPLY_TYPE_UPS, + POWER_SUPPLY_TYPE_MAINS, + POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */ + POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ + POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ + POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply { + const char *name; + enum power_supply_type type; + enum power_supply_property *properties; + size_t num_properties; + + char **supplied_to; + size_t num_supplicants; + + int (*get_property)(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + int (*set_property)(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val); + int (*property_is_writeable)(struct power_supply *psy, + enum power_supply_property psp); + void (*external_power_changed)(struct power_supply *psy); + void (*set_charged)(struct power_supply *psy); + + /* For APM emulation, think legacy userspace. */ + int use_for_apm; + + /* private */ + struct device *dev; + struct work_struct changed_work; + spinlock_t changed_lock; + bool changed; + +#ifdef CONFIG_LEDS_TRIGGERS + struct led_trigger *charging_full_trig; + char *charging_full_trig_name; + struct led_trigger *charging_trig; + char *charging_trig_name; + struct led_trigger *full_trig; + char *full_trig_name; + struct led_trigger *online_trig; + char *online_trig_name; + struct led_trigger *charging_blink_full_solid_trig; + char *charging_blink_full_solid_trig_name; +#endif +}; + +/* + * This is recommended structure to specify static power supply parameters. + * Generic one, parametrizable for different power supplies. Power supply + * class itself does not use it, but that's what implementing most platform + * drivers, should try reuse for consistency. + */ + +struct power_supply_info { + const char *name; + int technology; + int voltage_max_design; + int voltage_min_design; + int charge_full_design; + int charge_empty_design; + int energy_full_design; + int energy_empty_design; + int use_for_apm; +}; + +extern struct power_supply *power_supply_get_by_name(char *name); +extern void power_supply_changed(struct power_supply *psy); +extern int power_supply_am_i_supplied(struct power_supply *psy); +extern int power_supply_set_battery_charged(struct power_supply *psy); + +#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE) +extern int power_supply_is_system_supplied(void); +#else +static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } +#endif + +extern int power_supply_register(struct device *parent, + struct power_supply *psy); +extern void power_supply_unregister(struct power_supply *psy); +extern int power_supply_powers(struct power_supply *psy, struct device *dev); + +/* For APM emulation, think legacy userspace. */ +extern struct class *power_supply_class; + +static inline bool power_supply_is_amp_property(enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN: + case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CHARGE_EMPTY: + case POWER_SUPPLY_PROP_CHARGE_NOW: + case POWER_SUPPLY_PROP_CHARGE_AVG: + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + case POWER_SUPPLY_PROP_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_CURRENT_AVG: + return 1; + default: + break; + } + + return 0; +} + +static inline bool power_supply_is_watt_property(enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: + case POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN: + case POWER_SUPPLY_PROP_ENERGY_FULL: + case POWER_SUPPLY_PROP_ENERGY_EMPTY: + case POWER_SUPPLY_PROP_ENERGY_NOW: + case POWER_SUPPLY_PROP_ENERGY_AVG: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_VOLTAGE_AVG: + case POWER_SUPPLY_PROP_POWER_NOW: + return 1; + default: + break; + } + + return 0; +} + +#endif /* __LINUX_POWER_SUPPLY_H__ */ diff --git a/include/linux/ppdev.h b/include/linux/ppdev.h new file mode 100644 index 00000000..dc18c5d2 --- /dev/null +++ b/include/linux/ppdev.h @@ -0,0 +1,99 @@ +/* + * linux/include/linux/ppdev.h + * + * User-space parallel port device driver (header file). + * + * Copyright (C) 1998-9 Tim Waugh <tim@cyberelk.demon.co.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Added PPGETTIME/PPSETTIME, Fred Barnes, 1999 + * Added PPGETMODES/PPGETMODE/PPGETPHASE, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001 + */ + +#define PP_IOCTL 'p' + +/* Set mode for read/write (e.g. IEEE1284_MODE_EPP) */ +#define PPSETMODE _IOW(PP_IOCTL, 0x80, int) + +/* Read status */ +#define PPRSTATUS _IOR(PP_IOCTL, 0x81, unsigned char) +#define PPWSTATUS OBSOLETE__IOW(PP_IOCTL, 0x82, unsigned char) + +/* Read/write control */ +#define PPRCONTROL _IOR(PP_IOCTL, 0x83, unsigned char) +#define PPWCONTROL _IOW(PP_IOCTL, 0x84, unsigned char) + +struct ppdev_frob_struct { + unsigned char mask; + unsigned char val; +}; +#define PPFCONTROL _IOW(PP_IOCTL, 0x8e, struct ppdev_frob_struct) + +/* Read/write data */ +#define PPRDATA _IOR(PP_IOCTL, 0x85, unsigned char) +#define PPWDATA _IOW(PP_IOCTL, 0x86, unsigned char) + +/* Read/write econtrol (not used) */ +#define PPRECONTROL OBSOLETE__IOR(PP_IOCTL, 0x87, unsigned char) +#define PPWECONTROL OBSOLETE__IOW(PP_IOCTL, 0x88, unsigned char) + +/* Read/write FIFO (not used) */ +#define PPRFIFO OBSOLETE__IOR(PP_IOCTL, 0x89, unsigned char) +#define PPWFIFO OBSOLETE__IOW(PP_IOCTL, 0x8a, unsigned char) + +/* Claim the port to start using it */ +#define PPCLAIM _IO(PP_IOCTL, 0x8b) + +/* Release the port when you aren't using it */ +#define PPRELEASE _IO(PP_IOCTL, 0x8c) + +/* Yield the port (release it if another driver is waiting, + * then reclaim) */ +#define PPYIELD _IO(PP_IOCTL, 0x8d) + +/* Register device exclusively (must be before PPCLAIM). */ +#define PPEXCL _IO(PP_IOCTL, 0x8f) + +/* Data line direction: non-zero for input mode. */ +#define PPDATADIR _IOW(PP_IOCTL, 0x90, int) + +/* Negotiate a particular IEEE 1284 mode. */ +#define PPNEGOT _IOW(PP_IOCTL, 0x91, int) + +/* Set control lines when an interrupt occurs. */ +#define PPWCTLONIRQ _IOW(PP_IOCTL, 0x92, unsigned char) + +/* Clear (and return) interrupt count. */ +#define PPCLRIRQ _IOR(PP_IOCTL, 0x93, int) + +/* Set the IEEE 1284 phase that we're in (e.g. IEEE1284_PH_FWD_IDLE) */ +#define PPSETPHASE _IOW(PP_IOCTL, 0x94, int) + +/* Set and get port timeout (struct timeval's) */ +#define PPGETTIME _IOR(PP_IOCTL, 0x95, struct timeval) +#define PPSETTIME _IOW(PP_IOCTL, 0x96, struct timeval) + +/* Get available modes (what the hardware can do) */ +#define PPGETMODES _IOR(PP_IOCTL, 0x97, unsigned int) + +/* Get the current mode and phaze */ +#define PPGETMODE _IOR(PP_IOCTL, 0x98, int) +#define PPGETPHASE _IOR(PP_IOCTL, 0x99, int) + +/* get/set flags */ +#define PPGETFLAGS _IOR(PP_IOCTL, 0x9a, int) +#define PPSETFLAGS _IOW(PP_IOCTL, 0x9b, int) + +/* flags visible to the world */ +#define PP_FASTWRITE (1<<2) +#define PP_FASTREAD (1<<3) +#define PP_W91284PIC (1<<4) + +/* only masks user-visible flags */ +#define PP_FLAGMASK (PP_FASTWRITE | PP_FASTREAD | PP_W91284PIC) + + diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h new file mode 100644 index 00000000..e53ff659 --- /dev/null +++ b/include/linux/ppp-comp.h @@ -0,0 +1,184 @@ +/* + * ppp-comp.h - Definitions for doing PPP packet compression. + * + * Copyright 1994-1998 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#ifndef _NET_PPP_COMP_H +#define _NET_PPP_COMP_H + +struct module; + +/* + * The following symbols control whether we include code for + * various compression methods. + */ + +#ifndef DO_BSD_COMPRESS +#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */ +#endif +#ifndef DO_DEFLATE +#define DO_DEFLATE 1 /* by default, include Deflate */ +#endif +#define DO_PREDICTOR_1 0 +#define DO_PREDICTOR_2 0 + +/* + * Structure giving methods for compression/decompression. + */ + +struct compressor { + int compress_proto; /* CCP compression protocol number */ + + /* Allocate space for a compressor (transmit side) */ + void *(*comp_alloc) (unsigned char *options, int opt_len); + + /* Free space used by a compressor */ + void (*comp_free) (void *state); + + /* Initialize a compressor */ + int (*comp_init) (void *state, unsigned char *options, + int opt_len, int unit, int opthdr, int debug); + + /* Reset a compressor */ + void (*comp_reset) (void *state); + + /* Compress a packet */ + int (*compress) (void *state, unsigned char *rptr, + unsigned char *obuf, int isize, int osize); + + /* Return compression statistics */ + void (*comp_stat) (void *state, struct compstat *stats); + + /* Allocate space for a decompressor (receive side) */ + void *(*decomp_alloc) (unsigned char *options, int opt_len); + + /* Free space used by a decompressor */ + void (*decomp_free) (void *state); + + /* Initialize a decompressor */ + int (*decomp_init) (void *state, unsigned char *options, + int opt_len, int unit, int opthdr, int mru, + int debug); + + /* Reset a decompressor */ + void (*decomp_reset) (void *state); + + /* Decompress a packet. */ + int (*decompress) (void *state, unsigned char *ibuf, int isize, + unsigned char *obuf, int osize); + + /* Update state for an incompressible packet received */ + void (*incomp) (void *state, unsigned char *ibuf, int icnt); + + /* Return decompression statistics */ + void (*decomp_stat) (void *state, struct compstat *stats); + + /* Used in locking compressor modules */ + struct module *owner; + /* Extra skb space needed by the compressor algorithm */ + unsigned int comp_extra; +}; + +/* + * The return value from decompress routine is the length of the + * decompressed packet if successful, otherwise DECOMP_ERROR + * or DECOMP_FATALERROR if an error occurred. + * + * We need to make this distinction so that we can disable certain + * useful functionality, namely sending a CCP reset-request as a result + * of an error detected after decompression. This is to avoid infringing + * a patent held by Motorola. + * Don't you just lurve software patents. + */ + +#define DECOMP_ERROR -1 /* error detected before decomp. */ +#define DECOMP_FATALERROR -2 /* error detected after decomp. */ + +/* + * CCP codes. + */ + +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 + +/* + * Max # bytes for a CCP option + */ + +#define CCP_MAX_OPTION_LENGTH 32 + +/* + * Parts of a CCP packet. + */ + +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 + +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 + +/* + * Definitions for BSD-Compress. + */ + +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ + +/* Macros for handling the 3rd byte of the BSD-Compress config option. */ +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) + +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ + +/* + * Definitions for Deflate. + */ + +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 9 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + 8) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - 8) << 4) + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 + +/* + * Definitions for MPPE. + */ + +#define CI_MPPE 18 /* config option for MPPE */ +#define CILEN_MPPE 6 /* length of config option */ + +/* + * Definitions for other, as yet unsupported, compression methods. + */ + +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ + +#ifdef __KERNEL__ +extern int ppp_register_compressor(struct compressor *); +extern void ppp_unregister_compressor(struct compressor *); +#endif /* __KERNEL__ */ + +#endif /* _NET_PPP_COMP_H */ diff --git a/include/linux/ppp-ioctl.h b/include/linux/ppp-ioctl.h new file mode 100644 index 00000000..2d9a8859 --- /dev/null +++ b/include/linux/ppp-ioctl.h @@ -0,0 +1,119 @@ +/* + * ppp-ioctl.h - PPP ioctl definitions. + * + * Copyright 1999-2002 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#ifndef _PPP_IOCTL_H +#define _PPP_IOCTL_H + +#include <linux/types.h> +#include <linux/compiler.h> + +/* + * Bit definitions for flags argument to PPPIOCGFLAGS/PPPIOCSFLAGS. + */ +#define SC_COMP_PROT 0x00000001 /* protocol compression (output) */ +#define SC_COMP_AC 0x00000002 /* header compression (output) */ +#define SC_COMP_TCP 0x00000004 /* TCP (VJ) compression (output) */ +#define SC_NO_TCP_CCID 0x00000008 /* disable VJ connection-id comp. */ +#define SC_REJ_COMP_AC 0x00000010 /* reject adrs/ctrl comp. on input */ +#define SC_REJ_COMP_TCP 0x00000020 /* reject TCP (VJ) comp. on input */ +#define SC_CCP_OPEN 0x00000040 /* Look at CCP packets */ +#define SC_CCP_UP 0x00000080 /* May send/recv compressed packets */ +#define SC_ENABLE_IP 0x00000100 /* IP packets may be exchanged */ +#define SC_LOOP_TRAFFIC 0x00000200 /* send traffic to pppd */ +#define SC_MULTILINK 0x00000400 /* do multilink encapsulation */ +#define SC_MP_SHORTSEQ 0x00000800 /* use short MP sequence numbers */ +#define SC_COMP_RUN 0x00001000 /* compressor has been inited */ +#define SC_DECOMP_RUN 0x00002000 /* decompressor has been inited */ +#define SC_MP_XSHORTSEQ 0x00004000 /* transmit short MP seq numbers */ +#define SC_DEBUG 0x00010000 /* enable debug messages */ +#define SC_LOG_INPKT 0x00020000 /* log contents of good pkts recvd */ +#define SC_LOG_OUTPKT 0x00040000 /* log contents of pkts sent */ +#define SC_LOG_RAWIN 0x00080000 /* log all chars received */ +#define SC_LOG_FLUSH 0x00100000 /* log all chars flushed */ +#define SC_SYNC 0x00200000 /* synchronous serial mode */ +#define SC_MUST_COMP 0x00400000 /* no uncompressed packets may be sent or received */ +#define SC_MASK 0x0f600fff /* bits that user can change */ + +/* state bits */ +#define SC_XMIT_BUSY 0x10000000 /* (used by isdn_ppp?) */ +#define SC_RCV_ODDP 0x08000000 /* have rcvd char with odd parity */ +#define SC_RCV_EVNP 0x04000000 /* have rcvd char with even parity */ +#define SC_RCV_B7_1 0x02000000 /* have rcvd char with bit 7 = 1 */ +#define SC_RCV_B7_0 0x01000000 /* have rcvd char with bit 7 = 0 */ +#define SC_DC_FERROR 0x00800000 /* fatal decomp error detected */ +#define SC_DC_ERROR 0x00400000 /* non-fatal decomp error detected */ + +/* Used with PPPIOCGNPMODE/PPPIOCSNPMODE */ +struct npioctl { + int protocol; /* PPP protocol, e.g. PPP_IP */ + enum NPmode mode; +}; + +/* Structure describing a CCP configuration option, for PPPIOCSCOMPRESS */ +struct ppp_option_data { + __u8 __user *ptr; + __u32 length; + int transmit; +}; + +/* For PPPIOCGL2TPSTATS */ +struct pppol2tp_ioc_stats { + __u16 tunnel_id; /* redundant */ + __u16 session_id; /* if zero, get tunnel stats */ + __u32 using_ipsec:1; /* valid only for session_id == 0 */ + __aligned_u64 tx_packets; + __aligned_u64 tx_bytes; + __aligned_u64 tx_errors; + __aligned_u64 rx_packets; + __aligned_u64 rx_bytes; + __aligned_u64 rx_seq_discards; + __aligned_u64 rx_oos_packets; + __aligned_u64 rx_errors; +}; + +/* + * Ioctl definitions. + */ + +#define PPPIOCGFLAGS _IOR('t', 90, int) /* get configuration flags */ +#define PPPIOCSFLAGS _IOW('t', 89, int) /* set configuration flags */ +#define PPPIOCGASYNCMAP _IOR('t', 88, int) /* get async map */ +#define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */ +#define PPPIOCGUNIT _IOR('t', 86, int) /* get ppp unit number */ +#define PPPIOCGRASYNCMAP _IOR('t', 85, int) /* get receive async map */ +#define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */ +#define PPPIOCGMRU _IOR('t', 83, int) /* get max receive unit */ +#define PPPIOCSMRU _IOW('t', 82, int) /* set max receive unit */ +#define PPPIOCSMAXCID _IOW('t', 81, int) /* set VJ max slot ID */ +#define PPPIOCGXASYNCMAP _IOR('t', 80, ext_accm) /* get extended ACCM */ +#define PPPIOCSXASYNCMAP _IOW('t', 79, ext_accm) /* set extended ACCM */ +#define PPPIOCXFERUNIT _IO('t', 78) /* transfer PPP unit */ +#define PPPIOCSCOMPRESS _IOW('t', 77, struct ppp_option_data) +#define PPPIOCGNPMODE _IOWR('t', 76, struct npioctl) /* get NP mode */ +#define PPPIOCSNPMODE _IOW('t', 75, struct npioctl) /* set NP mode */ +#define PPPIOCSPASS _IOW('t', 71, struct sock_fprog) /* set pass filter */ +#define PPPIOCSACTIVE _IOW('t', 70, struct sock_fprog) /* set active filt */ +#define PPPIOCGDEBUG _IOR('t', 65, int) /* Read debug level */ +#define PPPIOCSDEBUG _IOW('t', 64, int) /* Set debug level */ +#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */ +#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */ +#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */ +#define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */ +#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */ +#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */ +#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */ +#define PPPIOCATTCHAN _IOW('t', 56, int) /* attach to ppp channel */ +#define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */ +#define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats) + +#define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0) +#define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */ +#define SIOCGPPPCSTATS (SIOCDEVPRIVATE + 2) + +#endif /* _PPP_IOCTL_H */ diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h new file mode 100644 index 00000000..5d87f810 --- /dev/null +++ b/include/linux/ppp_channel.h @@ -0,0 +1,88 @@ +#ifndef _PPP_CHANNEL_H_ +#define _PPP_CHANNEL_H_ +/* + * Definitions for the interface between the generic PPP code + * and a PPP channel. + * + * A PPP channel provides a way for the generic PPP code to send + * and receive packets over some sort of communications medium. + * Packets are stored in sk_buffs and have the 2-byte PPP protocol + * number at the start, but not the address and control bytes. + * + * Copyright 1999 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * ==FILEVERSION 20000322== + */ + +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/poll.h> +#include <net/net_namespace.h> + +struct ppp_channel; + +struct ppp_channel_ops { + /* Send a packet (or multilink fragment) on this channel. + Returns 1 if it was accepted, 0 if not. */ + int (*start_xmit)(struct ppp_channel *, struct sk_buff *); + /* Handle an ioctl call that has come in via /dev/ppp. */ + int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long); +}; + +struct ppp_channel { + void *private; /* channel private data */ + const struct ppp_channel_ops *ops; /* operations for this channel */ + int mtu; /* max transmit packet size */ + int hdrlen; /* amount of headroom channel needs */ + void *ppp; /* opaque to channel */ + int speed; /* transfer rate (bytes/second) */ + /* the following is not used at present */ + int latency; /* overhead time in milliseconds */ +}; + +#ifdef __KERNEL__ +/* Called by the channel when it can send some more data. */ +extern void ppp_output_wakeup(struct ppp_channel *); + +/* Called by the channel to process a received PPP packet. + The packet should have just the 2-byte PPP protocol header. */ +extern void ppp_input(struct ppp_channel *, struct sk_buff *); + +/* Called by the channel when an input error occurs, indicating + that we may have missed a packet. */ +extern void ppp_input_error(struct ppp_channel *, int code); + +/* Attach a channel to a given PPP unit in specified net. */ +extern int ppp_register_net_channel(struct net *, struct ppp_channel *); + +/* Attach a channel to a given PPP unit. */ +extern int ppp_register_channel(struct ppp_channel *); + +/* Detach a channel from its PPP unit (e.g. on hangup). */ +extern void ppp_unregister_channel(struct ppp_channel *); + +/* Get the channel number for a channel */ +extern int ppp_channel_index(struct ppp_channel *); + +/* Get the unit number associated with a channel, or -1 if none */ +extern int ppp_unit_number(struct ppp_channel *); + +/* Get the device name associated with a channel, or NULL if none */ +extern char *ppp_dev_name(struct ppp_channel *); + +/* + * SMP locking notes: + * The channel code must ensure that when it calls ppp_unregister_channel, + * nothing is executing in any of the procedures above, for that + * channel. The generic layer will ensure that nothing is executing + * in the start_xmit and ioctl routines for the channel by the time + * that ppp_unregister_channel returns. + */ + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h new file mode 100644 index 00000000..ba416f67 --- /dev/null +++ b/include/linux/ppp_defs.h @@ -0,0 +1,154 @@ +/* + * ppp_defs.h - PPP definitions. + * + * Copyright 1994-2000 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#include <linux/types.h> + +#ifndef _PPP_DEFS_H_ +#define _PPP_DEFS_H_ + +/* + * The basic PPP frame. + */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ +#define PPP_MRU 1500 /* default MRU = max length of info field */ + +#define PPP_ADDRESS(p) (((__u8 *)(p))[0]) +#define PPP_CONTROL(p) (((__u8 *)(p))[1]) +#define PPP_PROTOCOL(p) ((((__u8 *)(p))[2] << 8) + ((__u8 *)(p))[3]) + +/* + * Significant octet values. + */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ + +/* + * Protocol field values. + */ +#define PPP_IP 0x21 /* Internet Protocol */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX protocol */ +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#define PPP_MP 0x3d /* Multilink protocol */ +#define PPP_IPV6 0x57 /* Internet Protocol Version 6 */ +#define PPP_COMPFRAG 0xfb /* fragment compressed below bundle */ +#define PPP_COMP 0xfd /* compressed packet */ +#define PPP_MPLS_UC 0x0281 /* Multi Protocol Label Switching - Unicast */ +#define PPP_MPLS_MC 0x0283 /* Multi Protocol Label Switching - Multicast */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol */ +#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#define PPP_CCPFRAG 0x80fb /* CCP at link level (below MP bundle) */ +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#define PPP_MPLSCP 0x80fd /* MPLS Control Protocol */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ + +/* + * Values for FCS calculations. + */ + +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ + +#ifdef __KERNEL__ +#include <linux/crc-ccitt.h> +#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c) +#endif + +/* + * Extended asyncmap - allows any character to be escaped. + */ + +typedef __u32 ext_accm[8]; + +/* + * What to do with network protocol (NP) packets. + */ +enum NPmode { + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ +}; + +/* + * Statistics for LQRP and pppstats + */ +struct pppstat { + __u32 ppp_discards; /* # frames discarded */ + + __u32 ppp_ibytes; /* bytes received */ + __u32 ppp_ioctects; /* bytes received not in error */ + __u32 ppp_ipackets; /* packets received */ + __u32 ppp_ierrors; /* receive errors */ + __u32 ppp_ilqrs; /* # LQR frames received */ + + __u32 ppp_obytes; /* raw bytes sent */ + __u32 ppp_ooctects; /* frame bytes sent */ + __u32 ppp_opackets; /* packets sent */ + __u32 ppp_oerrors; /* transmit errors */ + __u32 ppp_olqrs; /* # LQR frames sent */ +}; + +struct vjstat { + __u32 vjs_packets; /* outbound packets */ + __u32 vjs_compressed; /* outbound compressed packets */ + __u32 vjs_searches; /* searches for connection state */ + __u32 vjs_misses; /* times couldn't find conn. state */ + __u32 vjs_uncompressedin; /* inbound uncompressed packets */ + __u32 vjs_compressedin; /* inbound compressed packets */ + __u32 vjs_errorin; /* inbound unknown type packets */ + __u32 vjs_tossed; /* inbound packets tossed because of error */ +}; + +struct compstat { + __u32 unc_bytes; /* total uncompressed bytes */ + __u32 unc_packets; /* total uncompressed packets */ + __u32 comp_bytes; /* compressed bytes */ + __u32 comp_packets; /* compressed packets */ + __u32 inc_bytes; /* incompressible bytes */ + __u32 inc_packets; /* incompressible packets */ + + /* the compression ratio is defined as in_count / bytes_out */ + __u32 in_count; /* Bytes received */ + __u32 bytes_out; /* Bytes transmitted */ + + double ratio; /* not computed in kernel. */ +}; + +struct ppp_stats { + struct pppstat p; /* basic PPP statistics */ + struct vjstat vj; /* VJ header compression statistics */ +}; + +struct ppp_comp_stats { + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ +}; + +/* + * The following structure records the time in seconds since + * the last NP packet was sent or received. + */ +struct ppp_idle { + __kernel_time_t xmit_idle; /* time since last NP packet sent */ + __kernel_time_t recv_idle; /* time since last NP packet received */ +}; + +#endif /* _PPP_DEFS_H_ */ diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h new file mode 100644 index 00000000..0035abe4 --- /dev/null +++ b/include/linux/pps-gpio.h @@ -0,0 +1,32 @@ +/* + * pps-gpio.h -- PPS client for GPIOs + * + * + * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PPS_GPIO_H +#define _PPS_GPIO_H + +struct pps_gpio_platform_data { + bool assert_falling_edge; + bool capture_clear; + unsigned int gpio_pin; + const char *gpio_label; +}; + +#endif diff --git a/include/linux/pps.h b/include/linux/pps.h new file mode 100644 index 00000000..a9bb1d93 --- /dev/null +++ b/include/linux/pps.h @@ -0,0 +1,131 @@ +/* + * PPS API header + * + * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#ifndef _PPS_H_ +#define _PPS_H_ + +#include <linux/types.h> + +#define PPS_VERSION "5.3.6" +#define PPS_MAX_SOURCES 16 /* should be enough... */ + +/* Implementation note: the logical states ``assert'' and ``clear'' + * are implemented in terms of the chip register, i.e. ``assert'' + * means the bit is set. */ + +/* + * 3.2 New data structures + */ + +#define PPS_API_VERS_1 1 +#define PPS_API_VERS PPS_API_VERS_1 /* we use API version 1 */ +#define PPS_MAX_NAME_LEN 32 + +/* 32-bit vs. 64-bit compatibility. + * + * 0n i386, the alignment of a uint64_t is only 4 bytes, while on most other + * architectures it's 8 bytes. On i386, there will be no padding between the + * two consecutive 'struct pps_ktime' members of struct pps_kinfo and struct + * pps_kparams. But on most platforms there will be padding to ensure correct + * alignment. + * + * The simple fix is probably to add an explicit padding. + * [David Woodhouse] + */ +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; +#define PPS_TIME_INVALID (1<<0) /* used to specify timeout==NULL */ + +struct pps_kinfo { + __u32 assert_sequence; /* seq. num. of assert event */ + __u32 clear_sequence; /* seq. num. of clear event */ + struct pps_ktime assert_tu; /* time of assert event */ + struct pps_ktime clear_tu; /* time of clear event */ + int current_mode; /* current mode bits */ +}; + +struct pps_kparams { + int api_version; /* API version # */ + int mode; /* mode bits */ + struct pps_ktime assert_off_tu; /* offset compensation for assert */ + struct pps_ktime clear_off_tu; /* offset compensation for clear */ +}; + +/* + * 3.3 Mode bit definitions + */ + +/* Device/implementation parameters */ +#define PPS_CAPTUREASSERT 0x01 /* capture assert events */ +#define PPS_CAPTURECLEAR 0x02 /* capture clear events */ +#define PPS_CAPTUREBOTH 0x03 /* capture assert and clear events */ + +#define PPS_OFFSETASSERT 0x10 /* apply compensation for assert ev. */ +#define PPS_OFFSETCLEAR 0x20 /* apply compensation for clear ev. */ + +#define PPS_CANWAIT 0x100 /* can we wait for an event? */ +#define PPS_CANPOLL 0x200 /* bit reserved for future use */ + +/* Kernel actions */ +#define PPS_ECHOASSERT 0x40 /* feed back assert event to output */ +#define PPS_ECHOCLEAR 0x80 /* feed back clear event to output */ + +/* Timestamp formats */ +#define PPS_TSFMT_TSPEC 0x1000 /* select timespec format */ +#define PPS_TSFMT_NTPFP 0x2000 /* select NTP format */ + +/* + * 3.4.4 New functions: disciplining the kernel timebase + */ + +/* Kernel consumers */ +#define PPS_KC_HARDPPS 0 /* hardpps() (or equivalent) */ +#define PPS_KC_HARDPPS_PLL 1 /* hardpps() constrained to + use a phase-locked loop */ +#define PPS_KC_HARDPPS_FLL 2 /* hardpps() constrained to + use a frequency-locked loop */ +/* + * Here begins the implementation-specific part! + */ + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_bind_args { + int tsformat; /* format of time stamps */ + int edge; /* selected event type */ + int consumer; /* selected kernel consumer */ +}; + +#include <linux/ioctl.h> + +#define PPS_GETPARAMS _IOR('p', 0xa1, struct pps_kparams *) +#define PPS_SETPARAMS _IOW('p', 0xa2, struct pps_kparams *) +#define PPS_GETCAP _IOR('p', 0xa3, int *) +#define PPS_FETCH _IOWR('p', 0xa4, struct pps_fdata *) +#define PPS_KC_BIND _IOW('p', 0xa5, struct pps_bind_args *) + +#endif /* _PPS_H_ */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h new file mode 100644 index 00000000..94048547 --- /dev/null +++ b/include/linux/pps_kernel.h @@ -0,0 +1,120 @@ +/* + * PPS API kernel header + * + * Copyright (C) 2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_PPS_KERNEL_H +#define LINUX_PPS_KERNEL_H + +#include <linux/pps.h> + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/time.h> + +/* + * Global defines + */ + +struct pps_device; + +/* The specific PPS source info */ +struct pps_source_info { + char name[PPS_MAX_NAME_LEN]; /* simbolic name */ + char path[PPS_MAX_NAME_LEN]; /* path of connected device */ + int mode; /* PPS's allowed mode */ + + void (*echo)(struct pps_device *pps, + int event, void *data); /* PPS echo function */ + + struct module *owner; + struct device *dev; +}; + +struct pps_event_time { +#ifdef CONFIG_NTP_PPS + struct timespec ts_raw; +#endif /* CONFIG_NTP_PPS */ + struct timespec ts_real; +}; + +/* The main struct */ +struct pps_device { + struct pps_source_info info; /* PSS source info */ + + struct pps_kparams params; /* PPS's current params */ + + __u32 assert_sequence; /* PPS' assert event seq # */ + __u32 clear_sequence; /* PPS' clear event seq # */ + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; /* PPS mode at event time */ + + unsigned int last_ev; /* last PPS event id */ + wait_queue_head_t queue; /* PPS event queue */ + + unsigned int id; /* PPS source unique ID */ + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; /* fasync method */ + spinlock_t lock; +}; + +/* + * Global variables + */ + +extern struct device_attribute pps_attrs[]; + +/* + * Exported functions + */ + +extern struct pps_device *pps_register_source( + struct pps_source_info *info, int default_params); +extern void pps_unregister_source(struct pps_device *pps); +extern int pps_register_cdev(struct pps_device *pps); +extern void pps_unregister_cdev(struct pps_device *pps); +extern void pps_event(struct pps_device *pps, + struct pps_event_time *ts, int event, void *data); + +static inline void timespec_to_pps_ktime(struct pps_ktime *kt, + struct timespec ts) +{ + kt->sec = ts.tv_sec; + kt->nsec = ts.tv_nsec; +} + +#ifdef CONFIG_NTP_PPS + +static inline void pps_get_ts(struct pps_event_time *ts) +{ + getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); +} + +#else /* CONFIG_NTP_PPS */ + +static inline void pps_get_ts(struct pps_event_time *ts) +{ + getnstimeofday(&ts->ts_real); +} + +#endif /* CONFIG_NTP_PPS */ + +#endif /* LINUX_PPS_KERNEL_H */ + diff --git a/include/linux/prctl.h b/include/linux/prctl.h new file mode 100644 index 00000000..e0cfec24 --- /dev/null +++ b/include/linux/prctl.h @@ -0,0 +1,127 @@ +#ifndef _LINUX_PRCTL_H +#define _LINUX_PRCTL_H + +/* Values to pass as first argument to prctl() */ + +#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ +#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */ + +/* Get/set current->mm->dumpable */ +#define PR_GET_DUMPABLE 3 +#define PR_SET_DUMPABLE 4 + +/* Get/set unaligned access control bits (if meaningful) */ +#define PR_GET_UNALIGN 5 +#define PR_SET_UNALIGN 6 +# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */ +# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */ + +/* Get/set whether or not to drop capabilities on setuid() away from + * uid 0 (as per security/commoncap.c) */ +#define PR_GET_KEEPCAPS 7 +#define PR_SET_KEEPCAPS 8 + +/* Get/set floating-point emulation control bits (if meaningful) */ +#define PR_GET_FPEMU 9 +#define PR_SET_FPEMU 10 +# define PR_FPEMU_NOPRINT 1 /* silently emulate fp operations accesses */ +# define PR_FPEMU_SIGFPE 2 /* don't emulate fp operations, send SIGFPE instead */ + +/* Get/set floating-point exception mode (if meaningful) */ +#define PR_GET_FPEXC 11 +#define PR_SET_FPEXC 12 +# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */ +# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */ +# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */ +# define PR_FP_EXC_UND 0x040000 /* floating point underflow */ +# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */ +# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */ +# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */ +# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */ +# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */ +# define PR_FP_EXC_PRECISE 3 /* precise exception mode */ + +/* Get/set whether we use statistical process timing or accurate timestamp + * based process timing */ +#define PR_GET_TIMING 13 +#define PR_SET_TIMING 14 +# define PR_TIMING_STATISTICAL 0 /* Normal, traditional, + statistical process timing */ +# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based + process timing */ + +#define PR_SET_NAME 15 /* Set process name */ +#define PR_GET_NAME 16 /* Get process name */ + +/* Get/set process endian */ +#define PR_GET_ENDIAN 19 +#define PR_SET_ENDIAN 20 +# define PR_ENDIAN_BIG 0 +# define PR_ENDIAN_LITTLE 1 /* True little endian mode */ +# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */ + +/* Get/set process seccomp mode */ +#define PR_GET_SECCOMP 21 +#define PR_SET_SECCOMP 22 + +/* Get/set the capability bounding set (as per security/commoncap.c) */ +#define PR_CAPBSET_READ 23 +#define PR_CAPBSET_DROP 24 + +/* Get/set the process' ability to use the timestamp counter instruction */ +#define PR_GET_TSC 25 +#define PR_SET_TSC 26 +# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ +# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ + +/* Get/set securebits (as per security/commoncap.c) */ +#define PR_GET_SECUREBITS 27 +#define PR_SET_SECUREBITS 28 + +/* + * Get/set the timerslack as used by poll/select/nanosleep + * A value of 0 means "use default" + */ +#define PR_SET_TIMERSLACK 29 +#define PR_GET_TIMERSLACK 30 + +#define PR_TASK_PERF_EVENTS_DISABLE 31 +#define PR_TASK_PERF_EVENTS_ENABLE 32 + +/* + * Set early/late kill mode for hwpoison memory corruption. + * This influences when the process gets killed on a memory corruption. + */ +#define PR_MCE_KILL 33 +# define PR_MCE_KILL_CLEAR 0 +# define PR_MCE_KILL_SET 1 + +# define PR_MCE_KILL_LATE 0 +# define PR_MCE_KILL_EARLY 1 +# define PR_MCE_KILL_DEFAULT 2 + +#define PR_MCE_KILL_GET 34 + +/* + * Tune up process memory map specifics. + */ +#define PR_SET_MM 35 +# define PR_SET_MM_START_CODE 1 +# define PR_SET_MM_END_CODE 2 +# define PR_SET_MM_START_DATA 3 +# define PR_SET_MM_END_DATA 4 +# define PR_SET_MM_START_STACK 5 +# define PR_SET_MM_START_BRK 6 +# define PR_SET_MM_BRK 7 + +/* + * Set specific pid that is allowed to ptrace the current task. + * A value of 0 mean "no process". + */ +#define PR_SET_PTRACER 0x59616d61 +# define PR_SET_PTRACER_ANY ((unsigned long)-1) + +#define PR_SET_CHILD_SUBREAPER 36 +#define PR_GET_CHILD_SUBREAPER 37 + +#endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h new file mode 100644 index 00000000..5a710b9c --- /dev/null +++ b/include/linux/preempt.h @@ -0,0 +1,155 @@ +#ifndef __LINUX_PREEMPT_H +#define __LINUX_PREEMPT_H + +/* + * include/linux/preempt.h - macros for accessing and manipulating + * preempt_count (used for kernel preemption, interrupt count, etc.) + */ + +#include <linux/thread_info.h> +#include <linux/linkage.h> +#include <linux/list.h> + +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) + extern void add_preempt_count(int val); + extern void sub_preempt_count(int val); +#else +# define add_preempt_count(val) do { preempt_count() += (val); } while (0) +# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) +#endif + +#define inc_preempt_count() add_preempt_count(1) +#define dec_preempt_count() sub_preempt_count(1) + +#define preempt_count() (current_thread_info()->preempt_count) + +#ifdef CONFIG_PREEMPT + +asmlinkage void preempt_schedule(void); + +#define preempt_check_resched() \ +do { \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + preempt_schedule(); \ +} while (0) + +#else /* !CONFIG_PREEMPT */ + +#define preempt_check_resched() do { } while (0) + +#endif /* CONFIG_PREEMPT */ + + +#ifdef CONFIG_PREEMPT_COUNT + +#define preempt_disable() \ +do { \ + inc_preempt_count(); \ + barrier(); \ +} while (0) + +#define sched_preempt_enable_no_resched() \ +do { \ + barrier(); \ + dec_preempt_count(); \ +} while (0) + +#define preempt_enable_no_resched() sched_preempt_enable_no_resched() + +#define preempt_enable() \ +do { \ + preempt_enable_no_resched(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) + +/* For debugging and tracer internals only! */ +#define add_preempt_count_notrace(val) \ + do { preempt_count() += (val); } while (0) +#define sub_preempt_count_notrace(val) \ + do { preempt_count() -= (val); } while (0) +#define inc_preempt_count_notrace() add_preempt_count_notrace(1) +#define dec_preempt_count_notrace() sub_preempt_count_notrace(1) + +#define preempt_disable_notrace() \ +do { \ + inc_preempt_count_notrace(); \ + barrier(); \ +} while (0) + +#define preempt_enable_no_resched_notrace() \ +do { \ + barrier(); \ + dec_preempt_count_notrace(); \ +} while (0) + +/* preempt_check_resched is OK to trace */ +#define preempt_enable_notrace() \ +do { \ + preempt_enable_no_resched_notrace(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) + +#else /* !CONFIG_PREEMPT_COUNT */ + +#define preempt_disable() do { } while (0) +#define sched_preempt_enable_no_resched() do { } while (0) +#define preempt_enable_no_resched() do { } while (0) +#define preempt_enable() do { } while (0) + +#define preempt_disable_notrace() do { } while (0) +#define preempt_enable_no_resched_notrace() do { } while (0) +#define preempt_enable_notrace() do { } while (0) + +#endif /* CONFIG_PREEMPT_COUNT */ + +#ifdef CONFIG_PREEMPT_NOTIFIERS + +struct preempt_notifier; + +/** + * preempt_ops - notifiers called when a task is preempted and rescheduled + * @sched_in: we're about to be rescheduled: + * notifier: struct preempt_notifier for the task being scheduled + * cpu: cpu we're scheduled on + * @sched_out: we've just been preempted + * notifier: struct preempt_notifier for the task being preempted + * next: the task that's kicking us out + * + * Please note that sched_in and out are called under different + * contexts. sched_out is called with rq lock held and irq disabled + * while sched_in is called without rq lock and irq enabled. This + * difference is intentional and depended upon by its users. + */ +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *notifier, int cpu); + void (*sched_out)(struct preempt_notifier *notifier, + struct task_struct *next); +}; + +/** + * preempt_notifier - key for installing preemption notifiers + * @link: internal use + * @ops: defines the notifier functions to be called + * + * Usually used in conjunction with container_of(). + */ +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +void preempt_notifier_register(struct preempt_notifier *notifier); +void preempt_notifier_unregister(struct preempt_notifier *notifier); + +static inline void preempt_notifier_init(struct preempt_notifier *notifier, + struct preempt_ops *ops) +{ + INIT_HLIST_NODE(¬ifier->link); + notifier->ops = ops; +} + +#endif + +#endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h new file mode 100644 index 00000000..a3bfbdf6 --- /dev/null +++ b/include/linux/prefetch.h @@ -0,0 +1,64 @@ +/* + * Generic cache management functions. Everything is arch-specific, + * but this header exists to make sure the defines/functions can be + * used in a generic way. + * + * 2000-11-13 Arjan van de Ven <arjan@fenrus.demon.nl> + * + */ + +#ifndef _LINUX_PREFETCH_H +#define _LINUX_PREFETCH_H + +#include <linux/types.h> +#include <asm/processor.h> +#include <asm/cache.h> + +/* + prefetch(x) attempts to pre-emptively get the memory pointed to + by address "x" into the CPU L1 cache. + prefetch(x) should not cause any kind of exception, prefetch(0) is + specifically ok. + + prefetch() should be defined by the architecture, if not, the + #define below provides a no-op define. + + There are 3 prefetch() macros: + + prefetch(x) - prefetches the cacheline at "x" for read + prefetchw(x) - prefetches the cacheline at "x" for write + spin_lock_prefetch(x) - prefetches the spinlock *x for taking + + there is also PREFETCH_STRIDE which is the architecure-preferred + "lookahead" size for prefetching streamed operations. + +*/ + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(x) __builtin_prefetch(x) +#endif + +#ifndef ARCH_HAS_PREFETCHW +#define prefetchw(x) __builtin_prefetch(x,1) +#endif + +#ifndef ARCH_HAS_SPINLOCK_PREFETCH +#define spin_lock_prefetch(x) prefetchw(x) +#endif + +#ifndef PREFETCH_STRIDE +#define PREFETCH_STRIDE (4*L1_CACHE_BYTES) +#endif + +static inline void prefetch_range(void *addr, size_t len) +{ +#ifdef ARCH_HAS_PREFETCH + char *cp; + char *end = addr + len; + + for (cp = addr; cp < end; cp += PREFETCH_STRIDE) + prefetch(cp); +#endif +} + +#endif diff --git a/include/linux/printk.h b/include/linux/printk.h new file mode 100644 index 00000000..0525927f --- /dev/null +++ b/include/linux/printk.h @@ -0,0 +1,319 @@ +#ifndef __KERNEL_PRINTK__ +#define __KERNEL_PRINTK__ + +#include <linux/init.h> + +extern const char linux_banner[]; +extern const char linux_proc_banner[]; + +#define KERN_EMERG "<0>" /* system is unusable */ +#define KERN_ALERT "<1>" /* action must be taken immediately */ +#define KERN_CRIT "<2>" /* critical conditions */ +#define KERN_ERR "<3>" /* error conditions */ +#define KERN_WARNING "<4>" /* warning conditions */ +#define KERN_NOTICE "<5>" /* normal but significant condition */ +#define KERN_INFO "<6>" /* informational */ +#define KERN_DEBUG "<7>" /* debug-level messages */ + +/* Use the default kernel loglevel */ +#define KERN_DEFAULT "<d>" +/* + * Annotation for a "continued" line of log printout (only done after a + * line that had no enclosing \n). Only to be used by core/arch code + * during early bootup (a continued line is not SMP-safe otherwise). + */ +#define KERN_CONT "<c>" + +extern int console_printk[]; + +#define console_loglevel (console_printk[0]) +#define default_message_loglevel (console_printk[1]) +#define minimum_console_loglevel (console_printk[2]) +#define default_console_loglevel (console_printk[3]) + +static inline void console_silent(void) +{ + console_loglevel = 0; +} + +static inline void console_verbose(void) +{ + if (console_loglevel) + console_loglevel = 15; +} + +struct va_format { + const char *fmt; + va_list *va; +}; + +/* + * FW_BUG + * Add this to a message where you are sure the firmware is buggy or behaves + * really stupid or out of spec. Be aware that the responsible BIOS developer + * should be able to fix this issue or at least get a concrete idea of the + * problem by reading your message without the need of looking at the kernel + * code. + * + * Use it for definite and high priority BIOS bugs. + * + * FW_WARN + * Use it for not that clear (e.g. could the kernel messed up things already?) + * and medium priority BIOS bugs. + * + * FW_INFO + * Use this one if you want to tell the user or vendor about something + * suspicious, but generally harmless related to the firmware. + * + * Use it for information or very low priority BIOS bugs. + */ +#define FW_BUG "[Firmware Bug]: " +#define FW_WARN "[Firmware Warn]: " +#define FW_INFO "[Firmware Info]: " + +/* + * HW_ERR + * Add this to a message for hardware errors, so that user can report + * it to hardware vendor instead of LKML or software vendor. + */ +#define HW_ERR "[Hardware Error]: " + +/* + * Dummy printk for disabled debugging statements to use whilst maintaining + * gcc's format and side-effect checking. + */ +static inline __printf(1, 2) +int no_printk(const char *fmt, ...) +{ + return 0; +} + +extern asmlinkage __printf(1, 2) +void early_printk(const char *fmt, ...); + +extern int printk_needs_cpu(int cpu); +extern void printk_tick(void); + +#ifdef CONFIG_PRINTK +asmlinkage __printf(1, 0) +int vprintk(const char *fmt, va_list args); +asmlinkage __printf(1, 2) __cold +int printk(const char *fmt, ...); + +/* + * Special printk facility for scheduler use only, _DO_NOT_USE_ ! + */ +__printf(1, 2) __cold int printk_sched(const char *fmt, ...); + +/* + * Please don't use printk_ratelimit(), because it shares ratelimiting state + * with all other unrelated printk_ratelimit() callsites. Instead use + * printk_ratelimited() or plain old __ratelimit(). + */ +extern int __printk_ratelimit(const char *func); +#define printk_ratelimit() __printk_ratelimit(__func__) +extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); + +extern int printk_delay_msec; +extern int dmesg_restrict; +extern int kptr_restrict; + +void log_buf_kexec_setup(void); +void __init setup_log_buf(int early); +#else +static inline __printf(1, 0) +int vprintk(const char *s, va_list args) +{ + return 0; +} +static inline __printf(1, 2) __cold +int printk(const char *s, ...) +{ + return 0; +} +static inline __printf(1, 2) __cold +int printk_sched(const char *s, ...) +{ + return 0; +} +static inline int printk_ratelimit(void) +{ + return 0; +} +static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec) +{ + return false; +} + +static inline void log_buf_kexec_setup(void) +{ +} + +static inline void setup_log_buf(int early) +{ +} +#endif + +extern void dump_stack(void) __cold; + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_emerg(fmt, ...) \ + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert(fmt, ...) \ + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit(fmt, ...) \ + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning(fmt, ...) \ + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn pr_warning +#define pr_notice(fmt, ...) \ + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) \ + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) + +/* pr_devel() should produce zero code unless DEBUG is defined */ +#ifdef DEBUG +#define pr_devel(fmt, ...) \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_devel(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(CONFIG_DYNAMIC_DEBUG) +/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ +#define pr_debug(fmt, ...) \ + dynamic_pr_debug(fmt, ##__VA_ARGS__) +#elif defined(DEBUG) +#define pr_debug(fmt, ...) \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* + * Print a one-time message (analogous to WARN_ONCE() et al): + */ + +#ifdef CONFIG_PRINTK +#define printk_once(fmt, ...) \ +({ \ + static bool __print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + printk(fmt, ##__VA_ARGS__); \ + } \ +}) +#else +#define printk_once(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#endif + +#define pr_emerg_once(fmt, ...) \ + printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_once(fmt, ...) \ + printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_once(fmt, ...) \ + printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_once(fmt, ...) \ + printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn_once(fmt, ...) \ + printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_once(fmt, ...) \ + printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_once(fmt, ...) \ + printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +#define pr_cont_once(fmt, ...) \ + printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(DEBUG) +#define pr_debug_once(fmt, ...) \ + printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_once(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +/* + * ratelimited messages with local ratelimit_state, + * no local ratelimit_state used in the !PRINTK case + */ +#ifdef CONFIG_PRINTK +#define printk_ratelimited(fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + printk(fmt, ##__VA_ARGS__); \ +}) +#else +#define printk_ratelimited(fmt, ...) \ + no_printk(fmt, ##__VA_ARGS__) +#endif + +#define pr_emerg_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +#define pr_notice_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* no pr_cont_ratelimited, don't do that... */ +/* If you are writing a driver, please use dev_dbg instead */ +#if defined(DEBUG) +#define pr_debug_ratelimited(fmt, ...) \ + printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug_ratelimited(fmt, ...) \ + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#endif + +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +extern void hex_dump_to_buffer(const void *buf, size_t len, + int rowsize, int groupsize, + char *linebuf, size_t linebuflen, bool ascii); +#ifdef CONFIG_PRINTK +extern void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len); +#else +static inline void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ +} +static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, + const void *buf, size_t len) +{ +} + +#endif + +#endif diff --git a/include/linux/prio_heap.h b/include/linux/prio_heap.h new file mode 100644 index 00000000..08094350 --- /dev/null +++ b/include/linux/prio_heap.h @@ -0,0 +1,58 @@ +#ifndef _LINUX_PRIO_HEAP_H +#define _LINUX_PRIO_HEAP_H + +/* + * Simple insertion-only static-sized priority heap containing + * pointers, based on CLR, chapter 7 + */ + +#include <linux/gfp.h> + +/** + * struct ptr_heap - simple static-sized priority heap + * @ptrs - pointer to data area + * @max - max number of elements that can be stored in @ptrs + * @size - current number of valid elements in @ptrs (in the range 0..@size-1 + * @gt: comparison operator, which should implement "greater than" + */ +struct ptr_heap { + void **ptrs; + int max; + int size; + int (*gt)(void *, void *); +}; + +/** + * heap_init - initialize an empty heap with a given memory size + * @heap: the heap structure to be initialized + * @size: amount of memory to use in bytes + * @gfp_mask: mask to pass to kmalloc() + * @gt: comparison operator, which should implement "greater than" + */ +extern int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask, + int (*gt)(void *, void *)); + +/** + * heap_free - release a heap's storage + * @heap: the heap structure whose data should be released + */ +void heap_free(struct ptr_heap *heap); + +/** + * heap_insert - insert a value into the heap and return any overflowed value + * @heap: the heap to be operated on + * @p: the pointer to be inserted + * + * Attempts to insert the given value into the priority heap. If the + * heap is full prior to the insertion, then the resulting heap will + * consist of the smallest @max elements of the original heap and the + * new element; the greatest element will be removed from the heap and + * returned. Note that the returned element will be the new element + * (i.e. no change to the heap) if the new element is greater than all + * elements currently in the heap. + */ +extern void *heap_insert(struct ptr_heap *heap, void *p); + + + +#endif /* _LINUX_PRIO_HEAP_H */ diff --git a/include/linux/prio_tree.h b/include/linux/prio_tree.h new file mode 100644 index 00000000..db04abb5 --- /dev/null +++ b/include/linux/prio_tree.h @@ -0,0 +1,120 @@ +#ifndef _LINUX_PRIO_TREE_H +#define _LINUX_PRIO_TREE_H + +/* + * K&R 2nd ed. A8.3 somewhat obliquely hints that initial sequences of struct + * fields with identical types should end up at the same location. We'll use + * this until we can scrap struct raw_prio_tree_node. + * + * Note: all this could be done more elegantly by using unnamed union/struct + * fields. However, gcc 2.95.3 and apparently also gcc 3.0.4 don't support this + * language extension. + */ + +struct raw_prio_tree_node { + struct prio_tree_node *left; + struct prio_tree_node *right; + struct prio_tree_node *parent; +}; + +struct prio_tree_node { + struct prio_tree_node *left; + struct prio_tree_node *right; + struct prio_tree_node *parent; + unsigned long start; + unsigned long last; /* last location _in_ interval */ +}; + +struct prio_tree_root { + struct prio_tree_node *prio_tree_node; + unsigned short index_bits; + unsigned short raw; + /* + * 0: nodes are of type struct prio_tree_node + * 1: nodes are of type raw_prio_tree_node + */ +}; + +struct prio_tree_iter { + struct prio_tree_node *cur; + unsigned long mask; + unsigned long value; + int size_level; + + struct prio_tree_root *root; + pgoff_t r_index; + pgoff_t h_index; +}; + +static inline void prio_tree_iter_init(struct prio_tree_iter *iter, + struct prio_tree_root *root, pgoff_t r_index, pgoff_t h_index) +{ + iter->root = root; + iter->r_index = r_index; + iter->h_index = h_index; + iter->cur = NULL; +} + +#define __INIT_PRIO_TREE_ROOT(ptr, _raw) \ +do { \ + (ptr)->prio_tree_node = NULL; \ + (ptr)->index_bits = 1; \ + (ptr)->raw = (_raw); \ +} while (0) + +#define INIT_PRIO_TREE_ROOT(ptr) __INIT_PRIO_TREE_ROOT(ptr, 0) +#define INIT_RAW_PRIO_TREE_ROOT(ptr) __INIT_PRIO_TREE_ROOT(ptr, 1) + +#define INIT_PRIO_TREE_NODE(ptr) \ +do { \ + (ptr)->left = (ptr)->right = (ptr)->parent = (ptr); \ +} while (0) + +#define INIT_PRIO_TREE_ITER(ptr) \ +do { \ + (ptr)->cur = NULL; \ + (ptr)->mask = 0UL; \ + (ptr)->value = 0UL; \ + (ptr)->size_level = 0; \ +} while (0) + +#define prio_tree_entry(ptr, type, member) \ + ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) + +static inline int prio_tree_empty(const struct prio_tree_root *root) +{ + return root->prio_tree_node == NULL; +} + +static inline int prio_tree_root(const struct prio_tree_node *node) +{ + return node->parent == node; +} + +static inline int prio_tree_left_empty(const struct prio_tree_node *node) +{ + return node->left == node; +} + +static inline int prio_tree_right_empty(const struct prio_tree_node *node) +{ + return node->right == node; +} + + +struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, + struct prio_tree_node *old, struct prio_tree_node *node); +struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, + struct prio_tree_node *node); +void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node); +struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter); + +#define raw_prio_tree_replace(root, old, node) \ + prio_tree_replace(root, (struct prio_tree_node *) (old), \ + (struct prio_tree_node *) (node)) +#define raw_prio_tree_insert(root, node) \ + prio_tree_insert(root, (struct prio_tree_node *) (node)) +#define raw_prio_tree_remove(root, node) \ + prio_tree_remove(root, (struct prio_tree_node *) (node)) + +#endif /* _LINUX_PRIO_TREE_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h new file mode 100644 index 00000000..85c50730 --- /dev/null +++ b/include/linux/proc_fs.h @@ -0,0 +1,293 @@ +#ifndef _LINUX_PROC_FS_H +#define _LINUX_PROC_FS_H + +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/spinlock.h> +#include <linux/magic.h> +#include <linux/atomic.h> + +struct net; +struct completion; +struct mm_struct; + +/* + * The proc filesystem constants/structures + */ + +/* + * Offset of the first process in the /proc root directory.. + */ +#define FIRST_PROCESS_ENTRY 256 + +/* Worst case buffer size needed for holding an integer. */ +#define PROC_NUMBUF 13 + +/* + * We always define these enumerators + */ + +enum { + PROC_ROOT_INO = 1, +}; + +/* + * This is not completely implemented yet. The idea is to + * create an in-memory tree (like the actual /proc filesystem + * tree) of these proc_dir_entries, so that we can dynamically + * add new files to /proc. + * + * The "next" pointer creates a linked list of one /proc directory, + * while parent/subdir create the directory structure (every + * /proc file has a parent, but "subdir" is NULL for all + * non-directory entries). + */ + +typedef int (read_proc_t)(char *page, char **start, off_t off, + int count, int *eof, void *data); +typedef int (write_proc_t)(struct file *file, const char __user *buffer, + unsigned long count, void *data); + +struct proc_dir_entry { + unsigned int low_ino; + umode_t mode; + nlink_t nlink; + uid_t uid; + gid_t gid; + loff_t size; + const struct inode_operations *proc_iops; + /* + * NULL ->proc_fops means "PDE is going away RSN" or + * "PDE is just created". In either case, e.g. ->read_proc won't be + * called because it's too late or too early, respectively. + * + * If you're allocating ->proc_fops dynamically, save a pointer + * somewhere. + */ + const struct file_operations *proc_fops; + struct proc_dir_entry *next, *parent, *subdir; + void *data; + read_proc_t *read_proc; + write_proc_t *write_proc; + atomic_t count; /* use count */ + int pde_users; /* number of callers into module in progress */ + struct completion *pde_unload_completion; + struct list_head pde_openers; /* who did ->open, but not ->release */ + spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ + u8 namelen; + char name[]; +}; + +enum kcore_type { + KCORE_TEXT, + KCORE_VMALLOC, + KCORE_RAM, + KCORE_VMEMMAP, + KCORE_OTHER, +}; + +struct kcore_list { + struct list_head list; + unsigned long addr; + size_t size; + int type; +}; + +struct vmcore { + struct list_head list; + unsigned long long paddr; + unsigned long long size; + loff_t offset; +}; + +#ifdef CONFIG_PROC_FS + +extern void proc_root_init(void); + +void proc_flush_task(struct task_struct *task); + +extern struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode, + struct proc_dir_entry *parent); +struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, + struct proc_dir_entry *parent, + const struct file_operations *proc_fops, + void *data); +extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); + +struct pid_namespace; + +extern int pid_ns_prepare_proc(struct pid_namespace *ns); +extern void pid_ns_release_proc(struct pid_namespace *ns); + +/* + * proc_tty.c + */ +struct tty_driver; +extern void proc_tty_init(void); +extern void proc_tty_register_driver(struct tty_driver *driver); +extern void proc_tty_unregister_driver(struct tty_driver *driver); + +/* + * proc_devtree.c + */ +#ifdef CONFIG_PROC_DEVICETREE +struct device_node; +struct property; +extern void proc_device_tree_init(void); +extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); +extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); +extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde, + struct property *prop); +extern void proc_device_tree_update_prop(struct proc_dir_entry *pde, + struct property *newprop, + struct property *oldprop); +#endif /* CONFIG_PROC_DEVICETREE */ + +extern struct proc_dir_entry *proc_symlink(const char *, + struct proc_dir_entry *, const char *); +extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *); +extern struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, + struct proc_dir_entry *parent); + +static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode, + struct proc_dir_entry *parent, const struct file_operations *proc_fops) +{ + return proc_create_data(name, mode, parent, proc_fops, NULL); +} + +static inline struct proc_dir_entry *create_proc_read_entry(const char *name, + umode_t mode, struct proc_dir_entry *base, + read_proc_t *read_proc, void * data) +{ + struct proc_dir_entry *res=create_proc_entry(name,mode,base); + if (res) { + res->read_proc=read_proc; + res->data=data; + } + return res; +} + +extern struct proc_dir_entry *proc_net_fops_create(struct net *net, + const char *name, umode_t mode, const struct file_operations *fops); +extern void proc_net_remove(struct net *net, const char *name); +extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, + struct proc_dir_entry *parent); + +extern struct file *proc_ns_fget(int fd); + +#else + +#define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; }) +static inline void proc_net_remove(struct net *net, const char *name) {} + +static inline void proc_flush_task(struct task_struct *task) +{ +} + +static inline struct proc_dir_entry *create_proc_entry(const char *name, + umode_t mode, struct proc_dir_entry *parent) { return NULL; } +static inline struct proc_dir_entry *proc_create(const char *name, + umode_t mode, struct proc_dir_entry *parent, + const struct file_operations *proc_fops) +{ + return NULL; +} +static inline struct proc_dir_entry *proc_create_data(const char *name, + umode_t mode, struct proc_dir_entry *parent, + const struct file_operations *proc_fops, void *data) +{ + return NULL; +} +#define remove_proc_entry(name, parent) do {} while (0) + +static inline struct proc_dir_entry *proc_symlink(const char *name, + struct proc_dir_entry *parent,const char *dest) {return NULL;} +static inline struct proc_dir_entry *proc_mkdir(const char *name, + struct proc_dir_entry *parent) {return NULL;} +static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, + umode_t mode, struct proc_dir_entry *parent) { return NULL; } + +static inline struct proc_dir_entry *create_proc_read_entry(const char *name, + umode_t mode, struct proc_dir_entry *base, + read_proc_t *read_proc, void * data) { return NULL; } + +struct tty_driver; +static inline void proc_tty_register_driver(struct tty_driver *driver) {}; +static inline void proc_tty_unregister_driver(struct tty_driver *driver) {}; + +static inline int pid_ns_prepare_proc(struct pid_namespace *ns) +{ + return 0; +} + +static inline void pid_ns_release_proc(struct pid_namespace *ns) +{ +} + +static inline struct file *proc_ns_fget(int fd) +{ + return ERR_PTR(-EINVAL); +} + +#endif /* CONFIG_PROC_FS */ + +#if !defined(CONFIG_PROC_KCORE) +static inline void +kclist_add(struct kcore_list *new, void *addr, size_t size, int type) +{ +} +#else +extern void kclist_add(struct kcore_list *, void *, size_t, int type); +#endif + +struct nsproxy; +struct proc_ns_operations { + const char *name; + int type; + void *(*get)(struct task_struct *task); + void (*put)(void *ns); + int (*install)(struct nsproxy *nsproxy, void *ns); +}; +extern const struct proc_ns_operations netns_operations; +extern const struct proc_ns_operations utsns_operations; +extern const struct proc_ns_operations ipcns_operations; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_read)(struct task_struct *task, char *page); + int (*proc_show)(struct seq_file *m, + struct pid_namespace *ns, struct pid *pid, + struct task_struct *task); +}; + +struct ctl_table_header; +struct ctl_table; + +struct proc_inode { + struct pid *pid; + int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + struct ctl_table *sysctl_entry; + void *ns; + const struct proc_ns_operations *ns_ops; + struct inode vfs_inode; +}; + +static inline struct proc_inode *PROC_I(const struct inode *inode) +{ + return container_of(inode, struct proc_inode, vfs_inode); +} + +static inline struct proc_dir_entry *PDE(const struct inode *inode) +{ + return PROC_I(inode)->pde; +} + +static inline struct net *PDE_NET(struct proc_dir_entry *pde) +{ + return pde->parent->data; +} + +#endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/profile.h b/include/linux/profile.h new file mode 100644 index 00000000..a0fc3227 --- /dev/null +++ b/include/linux/profile.h @@ -0,0 +1,150 @@ +#ifndef _LINUX_PROFILE_H +#define _LINUX_PROFILE_H + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/cpumask.h> +#include <linux/cache.h> + +#include <asm/errno.h> + +#define CPU_PROFILING 1 +#define SCHED_PROFILING 2 +#define SLEEP_PROFILING 3 +#define KVM_PROFILING 4 + +struct proc_dir_entry; +struct pt_regs; +struct notifier_block; + +#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) +void create_prof_cpu_mask(struct proc_dir_entry *de); +int create_proc_profile(void); +#else +static inline void create_prof_cpu_mask(struct proc_dir_entry *de) +{ +} + +static inline int create_proc_profile(void) +{ + return 0; +} +#endif + +enum profile_type { + PROFILE_TASK_EXIT, + PROFILE_MUNMAP +}; + +#ifdef CONFIG_PROFILING + +extern int prof_on __read_mostly; + +/* init basic kernel profiler */ +int profile_init(void); +int profile_setup(char *str); +void profile_tick(int type); + +/* + * Add multiple profiler hits to a given address: + */ +void profile_hits(int type, void *ip, unsigned int nr_hits); + +/* + * Single profiler hit: + */ +static inline void profile_hit(int type, void *ip) +{ + /* + * Speedup for the common (no profiling enabled) case: + */ + if (unlikely(prof_on == type)) + profile_hits(type, ip, 1); +} + +struct task_struct; +struct mm_struct; + +/* task is in do_exit() */ +void profile_task_exit(struct task_struct * task); + +/* task is dead, free task struct ? Returns 1 if + * the task was taken, 0 if the task should be freed. + */ +int profile_handoff_task(struct task_struct * task); + +/* sys_munmap */ +void profile_munmap(unsigned long addr); + +int task_handoff_register(struct notifier_block * n); +int task_handoff_unregister(struct notifier_block * n); + +int profile_event_register(enum profile_type, struct notifier_block * n); +int profile_event_unregister(enum profile_type, struct notifier_block * n); + +int register_timer_hook(int (*hook)(struct pt_regs *)); +void unregister_timer_hook(int (*hook)(struct pt_regs *)); + +struct pt_regs; + +#else + +#define prof_on 0 + +static inline int profile_init(void) +{ + return 0; +} + +static inline void profile_tick(int type) +{ + return; +} + +static inline void profile_hits(int type, void *ip, unsigned int nr_hits) +{ + return; +} + +static inline void profile_hit(int type, void *ip) +{ + return; +} + +static inline int task_handoff_register(struct notifier_block * n) +{ + return -ENOSYS; +} + +static inline int task_handoff_unregister(struct notifier_block * n) +{ + return -ENOSYS; +} + +static inline int profile_event_register(enum profile_type t, struct notifier_block * n) +{ + return -ENOSYS; +} + +static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n) +{ + return -ENOSYS; +} + +#define profile_task_exit(a) do { } while (0) +#define profile_handoff_task(a) (0) +#define profile_munmap(a) do { } while (0) + +static inline int register_timer_hook(int (*hook)(struct pt_regs *)) +{ + return -ENOSYS; +} + +static inline void unregister_timer_hook(int (*hook)(struct pt_regs *)) +{ + return; +} + +#endif /* CONFIG_PROFILING */ + +#endif /* _LINUX_PROFILE_H */ diff --git a/include/linux/proportions.h b/include/linux/proportions.h new file mode 100644 index 00000000..26a8a4ed --- /dev/null +++ b/include/linux/proportions.h @@ -0,0 +1,136 @@ +/* + * FLoating proportions + * + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * + * This file contains the public data structure and API definitions. + */ + +#ifndef _LINUX_PROPORTIONS_H +#define _LINUX_PROPORTIONS_H + +#include <linux/percpu_counter.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +struct prop_global { + /* + * The period over which we differentiate + * + * period = 2^shift + */ + int shift; + /* + * The total event counter aka 'time'. + * + * Treated as an unsigned long; the lower 'shift - 1' bits are the + * counter bits, the remaining upper bits the period counter. + */ + struct percpu_counter events; +}; + +/* + * global proportion descriptor + * + * this is needed to consitently flip prop_global structures. + */ +struct prop_descriptor { + int index; + struct prop_global pg[2]; + struct mutex mutex; /* serialize the prop_global switch */ +}; + +int prop_descriptor_init(struct prop_descriptor *pd, int shift); +void prop_change_shift(struct prop_descriptor *pd, int new_shift); + +/* + * ----- PERCPU ------ + */ + +struct prop_local_percpu { + /* + * the local events counter + */ + struct percpu_counter events; + + /* + * snapshot of the last seen global state + */ + int shift; + unsigned long period; + raw_spinlock_t lock; /* protect the snapshot state */ +}; + +int prop_local_init_percpu(struct prop_local_percpu *pl); +void prop_local_destroy_percpu(struct prop_local_percpu *pl); +void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); +void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, + long *numerator, long *denominator); + +static inline +void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) +{ + unsigned long flags; + + local_irq_save(flags); + __prop_inc_percpu(pd, pl); + local_irq_restore(flags); +} + +/* + * Limit the time part in order to ensure there are some bits left for the + * cycle counter and fraction multiply. + */ +#if BITS_PER_LONG == 32 +#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) +#else +#define PROP_MAX_SHIFT (BITS_PER_LONG/2) +#endif + +#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1) +#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT) + +void __prop_inc_percpu_max(struct prop_descriptor *pd, + struct prop_local_percpu *pl, long frac); + + +/* + * ----- SINGLE ------ + */ + +struct prop_local_single { + /* + * the local events counter + */ + unsigned long events; + + /* + * snapshot of the last seen global state + * and a lock protecting this state + */ + unsigned long period; + int shift; + raw_spinlock_t lock; /* protect the snapshot state */ +}; + +#define INIT_PROP_LOCAL_SINGLE(name) \ +{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ +} + +int prop_local_init_single(struct prop_local_single *pl); +void prop_local_destroy_single(struct prop_local_single *pl); +void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); +void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, + long *numerator, long *denominator); + +static inline +void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) +{ + unsigned long flags; + + local_irq_save(flags); + __prop_inc_single(pd, pl); + local_irq_restore(flags); +} + +#endif /* _LINUX_PROPORTIONS_H */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h new file mode 100644 index 00000000..e1461e14 --- /dev/null +++ b/include/linux/pstore.h @@ -0,0 +1,65 @@ +/* + * Persistent Storage - pstore.h + * + * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com> + * + * This code is the generic layer to export data records from platform + * level persistent storage via a file system. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _LINUX_PSTORE_H +#define _LINUX_PSTORE_H + +#include <linux/time.h> +#include <linux/kmsg_dump.h> + +/* types */ +enum pstore_type_id { + PSTORE_TYPE_DMESG = 0, + PSTORE_TYPE_MCE = 1, + PSTORE_TYPE_UNKNOWN = 255 +}; + +struct pstore_info { + struct module *owner; + char *name; + spinlock_t buf_lock; /* serialize access to 'buf' */ + char *buf; + size_t bufsize; + struct mutex read_mutex; /* serialize open/read/close */ + int (*open)(struct pstore_info *psi); + int (*close)(struct pstore_info *psi); + ssize_t (*read)(u64 *id, enum pstore_type_id *type, + struct timespec *time, char **buf, + struct pstore_info *psi); + int (*write)(enum pstore_type_id type, + enum kmsg_dump_reason reason, u64 *id, + unsigned int part, size_t size, struct pstore_info *psi); + int (*erase)(enum pstore_type_id type, u64 id, + struct pstore_info *psi); + void *data; +}; + +#ifdef CONFIG_PSTORE +extern int pstore_register(struct pstore_info *); +#else +static inline int +pstore_register(struct pstore_info *psi) +{ + return -ENODEV; +} +#endif + +#endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/pti.h b/include/linux/pti.h new file mode 100644 index 00000000..b3ea01a3 --- /dev/null +++ b/include/linux/pti.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) Intel 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The PTI (Parallel Trace Interface) driver directs trace data routed from + * various parts in the system out through the Intel Penwell PTI port and + * out of the mobile device for analysis with a debugging tool + * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, + * compact JTAG, standard. + * + * This header file will allow other parts of the OS to use the + * interface to write out it's contents for debugging a mobile system. + */ + +#ifndef PTI_H_ +#define PTI_H_ + +/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ +#define PTI_LASTDWORD_DTS 0x30 + +/* basic structure used as a write address to the PTI HW */ +struct pti_masterchannel { + u8 master; + u8 channel; +}; + +/* the following functions are defined in misc/pti.c */ +void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); +struct pti_masterchannel *pti_request_masterchannel(u8 type, + const char *thread_name); +void pti_release_masterchannel(struct pti_masterchannel *mc); + +#endif /*PTI_H_*/ diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h new file mode 100644 index 00000000..1dc420ba --- /dev/null +++ b/include/linux/ptp_classify.h @@ -0,0 +1,140 @@ +/* + * PTP 1588 support + * + * This file implements a BPF that recognizes PTP event messages. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLASSIFY_H_ +#define _PTP_CLASSIFY_H_ + +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/filter.h> +#ifdef __KERNEL__ +#include <linux/in.h> +#else +#include <netinet/in.h> +#endif + +#define PTP_CLASS_NONE 0x00 /* not a PTP event message */ +#define PTP_CLASS_V1 0x01 /* protocol version 1 */ +#define PTP_CLASS_V2 0x02 /* protocol version 2 */ +#define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ +#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ +#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ +#define PTP_CLASS_L2 0x30 /* event in a L2 packet */ +#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged L2 packet */ +#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */ + +#define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) +#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /*probably DNE*/ +#define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4) +#define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) +#define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) +#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) + +#define PTP_EV_PORT 319 +#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ + +#define OFF_ETYPE 12 +#define OFF_IHL 14 +#define OFF_FRAG 20 +#define OFF_PROTO4 23 +#define OFF_NEXT 6 +#define OFF_UDP_DST 2 + +#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ +#define OFF_PTP_SEQUENCE_ID 30 +#define OFF_PTP_CONTROL 32 /* PTPv1 only */ + +#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2) + +#define IP6_HLEN 40 +#define UDP_HLEN 8 + +#define RELOFF_DST4 (ETH_HLEN + OFF_UDP_DST) +#define OFF_DST6 (ETH_HLEN + IP6_HLEN + OFF_UDP_DST) +#define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN) + +#define OP_AND (BPF_ALU | BPF_AND | BPF_K) +#define OP_JEQ (BPF_JMP | BPF_JEQ | BPF_K) +#define OP_JSET (BPF_JMP | BPF_JSET | BPF_K) +#define OP_LDB (BPF_LD | BPF_B | BPF_ABS) +#define OP_LDH (BPF_LD | BPF_H | BPF_ABS) +#define OP_LDHI (BPF_LD | BPF_H | BPF_IND) +#define OP_LDX (BPF_LDX | BPF_B | BPF_MSH) +#define OP_OR (BPF_ALU | BPF_OR | BPF_K) +#define OP_RETA (BPF_RET | BPF_A) +#define OP_RETK (BPF_RET | BPF_K) + +static inline int ptp_filter_init(struct sock_filter *f, int len) +{ + if (OP_LDH == f[0].code) + return sk_chk_filter(f, len); + else + return 0; +} + +#define PTP_FILTER \ + {OP_LDH, 0, 0, OFF_ETYPE }, /* */ \ + {OP_JEQ, 0, 12, ETH_P_IP }, /* f goto L20 */ \ + {OP_LDB, 0, 0, OFF_PROTO4 }, /* */ \ + {OP_JEQ, 0, 9, IPPROTO_UDP }, /* f goto L10 */ \ + {OP_LDH, 0, 0, OFF_FRAG }, /* */ \ + {OP_JSET, 7, 0, 0x1fff }, /* t goto L11 */ \ + {OP_LDX, 0, 0, OFF_IHL }, /* */ \ + {OP_LDHI, 0, 0, RELOFF_DST4 }, /* */ \ + {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L12 */ \ + {OP_LDHI, 0, 0, ETH_HLEN + UDP_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_IPV4 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L1x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ +/*L20*/ {OP_JEQ, 0, 9, ETH_P_IPV6 }, /* f goto L40 */ \ + {OP_LDB, 0, 0, ETH_HLEN + OFF_NEXT }, /* */ \ + {OP_JEQ, 0, 6, IPPROTO_UDP }, /* f goto L30 */ \ + {OP_LDH, 0, 0, OFF_DST6 }, /* */ \ + {OP_JEQ, 0, 4, PTP_EV_PORT }, /* f goto L31 */ \ + {OP_LDH, 0, 0, OFF_PTP6 }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ +/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \ + {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ + {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \ + {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ + {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \ + {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \ + {OP_LDB, 0, 0, ETH_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ + {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \ + {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ + {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ + {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ + {OP_RETA, 0, 0, 0 }, /* */ \ +/*L6x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, + +#endif diff --git a/include/linux/ptp_clock.h b/include/linux/ptp_clock.h new file mode 100644 index 00000000..94e981f8 --- /dev/null +++ b/include/linux/ptp_clock.h @@ -0,0 +1,84 @@ +/* + * PTP 1588 clock support - user space interface + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLOCK_H_ +#define _PTP_CLOCK_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* PTP_xxx bits, for the flags field within the request structures. */ +#define PTP_ENABLE_FEATURE (1<<0) +#define PTP_RISING_EDGE (1<<1) +#define PTP_FALLING_EDGE (1<<2) + +/* + * struct ptp_clock_time - represents a time value + * + * The sign of the seconds field applies to the whole value. The + * nanoseconds field is always unsigned. The reserved field is + * included for sub-nanosecond resolution, should the demand for + * this ever appear. + * + */ +struct ptp_clock_time { + __s64 sec; /* seconds */ + __u32 nsec; /* nanoseconds */ + __u32 reserved; +}; + +struct ptp_clock_caps { + int max_adj; /* Maximum frequency adjustment in parts per billon. */ + int n_alarm; /* Number of programmable alarms. */ + int n_ext_ts; /* Number of external time stamp channels. */ + int n_per_out; /* Number of programmable periodic signals. */ + int pps; /* Whether the clock supports a PPS callback. */ + int rsv[15]; /* Reserved for future use. */ +}; + +struct ptp_extts_request { + unsigned int index; /* Which channel to configure. */ + unsigned int flags; /* Bit field for PTP_xxx flags. */ + unsigned int rsv[2]; /* Reserved for future use. */ +}; + +struct ptp_perout_request { + struct ptp_clock_time start; /* Absolute start time. */ + struct ptp_clock_time period; /* Desired period, zero means disable. */ + unsigned int index; /* Which channel to configure. */ + unsigned int flags; /* Reserved for future use. */ + unsigned int rsv[4]; /* Reserved for future use. */ +}; + +#define PTP_CLK_MAGIC '=' + +#define PTP_CLOCK_GETCAPS _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps) +#define PTP_EXTTS_REQUEST _IOW(PTP_CLK_MAGIC, 2, struct ptp_extts_request) +#define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request) +#define PTP_ENABLE_PPS _IOW(PTP_CLK_MAGIC, 4, int) + +struct ptp_extts_event { + struct ptp_clock_time t; /* Time event occured. */ + unsigned int index; /* Which channel produced the event. */ + unsigned int flags; /* Reserved for future use. */ + unsigned int rsv[2]; /* Reserved for future use. */ +}; + +#endif diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h new file mode 100644 index 00000000..dd2e44fb --- /dev/null +++ b/include/linux/ptp_clock_kernel.h @@ -0,0 +1,139 @@ +/* + * PTP 1588 clock support + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _PTP_CLOCK_KERNEL_H_ +#define _PTP_CLOCK_KERNEL_H_ + +#include <linux/ptp_clock.h> + + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS, + PTP_CLK_REQ_PEROUT, + PTP_CLK_REQ_PPS, + } type; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +/** + * struct ptp_clock_info - decribes a PTP hardware clock + * + * @owner: The clock driver should set to THIS_MODULE. + * @name: A short name to identify the clock. + * @max_adj: The maximum possible frequency adjustment, in parts per billon. + * @n_alarm: The number of programmable alarms. + * @n_ext_ts: The number of external time stamp channels. + * @n_per_out: The number of programmable periodic signals. + * @pps: Indicates whether the clock supports a PPS callback. + * + * clock operations + * + * @adjfreq: Adjusts the frequency of the hardware clock. + * parameter delta: Desired period change in parts per billion. + * + * @adjtime: Shifts the time of the hardware clock. + * parameter delta: Desired change in nanoseconds. + * + * @gettime: Reads the current time from the hardware clock. + * parameter ts: Holds the result. + * + * @settime: Set the current time on the hardware clock. + * parameter ts: Time value to set. + * + * @enable: Request driver to enable or disable an ancillary feature. + * parameter request: Desired resource to enable or disable. + * parameter on: Caller passes one to enable or zero to disable. + * + * Drivers should embed their ptp_clock_info within a private + * structure, obtaining a reference to it using container_of(). + * + * The callbacks must all return zero on success, non-zero otherwise. + */ + +struct ptp_clock_info { + struct module *owner; + char name[16]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); + int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); + int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts); + int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts); + int (*enable)(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); +}; + +struct ptp_clock; + +/** + * ptp_clock_register() - register a PTP hardware clock driver + * + * @info: Structure describing the new clock. + */ + +extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info); + +/** + * ptp_clock_unregister() - unregister a PTP hardware clock driver + * + * @ptp: The clock to remove from service. + */ + +extern int ptp_clock_unregister(struct ptp_clock *ptp); + + +enum ptp_clock_events { + PTP_CLOCK_ALARM, + PTP_CLOCK_EXTTS, + PTP_CLOCK_PPS, +}; + +/** + * struct ptp_clock_event - decribes a PTP hardware clock event + * + * @type: One of the ptp_clock_events enumeration values. + * @index: Identifies the source of the event. + * @timestamp: When the event occured. + */ + +struct ptp_clock_event { + int type; + int index; + u64 timestamp; +}; + +/** + * ptp_clock_event() - notify the PTP layer about an event + * + * @ptp: The clock obtained from ptp_clock_register(). + * @event: Message structure describing the event. + */ + +extern void ptp_clock_event(struct ptp_clock *ptp, + struct ptp_clock_event *event); + +#endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h new file mode 100644 index 00000000..5c719627 --- /dev/null +++ b/include/linux/ptrace.h @@ -0,0 +1,416 @@ +#ifndef _LINUX_PTRACE_H +#define _LINUX_PTRACE_H +/* ptrace.h */ +/* structs and defines to help the user use the ptrace system call. */ + +/* has the defines to get at the registers. */ + +#define PTRACE_TRACEME 0 +#define PTRACE_PEEKTEXT 1 +#define PTRACE_PEEKDATA 2 +#define PTRACE_PEEKUSR 3 +#define PTRACE_POKETEXT 4 +#define PTRACE_POKEDATA 5 +#define PTRACE_POKEUSR 6 +#define PTRACE_CONT 7 +#define PTRACE_KILL 8 +#define PTRACE_SINGLESTEP 9 + +#define PTRACE_ATTACH 16 +#define PTRACE_DETACH 17 + +#define PTRACE_SYSCALL 24 + +/* 0x4200-0x4300 are reserved for architecture-independent additions. */ +#define PTRACE_SETOPTIONS 0x4200 +#define PTRACE_GETEVENTMSG 0x4201 +#define PTRACE_GETSIGINFO 0x4202 +#define PTRACE_SETSIGINFO 0x4203 + +/* + * Generic ptrace interface that exports the architecture specific regsets + * using the corresponding NT_* types (which are also used in the core dump). + * Please note that the NT_PRSTATUS note type in a core dump contains a full + * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the + * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the + * other user_regset flavors, the user_regset layout and the ELF core dump note + * payload are exactly the same layout. + * + * This interface usage is as follows: + * struct iovec iov = { buf, len}; + * + * ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov); + * + * On the successful completion, iov.len will be updated by the kernel, + * specifying how much the kernel has written/read to/from the user's iov.buf. + */ +#define PTRACE_GETREGSET 0x4204 +#define PTRACE_SETREGSET 0x4205 + +#define PTRACE_SEIZE 0x4206 +#define PTRACE_INTERRUPT 0x4207 +#define PTRACE_LISTEN 0x4208 + +/* Wait extended result codes for the above trace options. */ +#define PTRACE_EVENT_FORK 1 +#define PTRACE_EVENT_VFORK 2 +#define PTRACE_EVENT_CLONE 3 +#define PTRACE_EVENT_EXEC 4 +#define PTRACE_EVENT_VFORK_DONE 5 +#define PTRACE_EVENT_EXIT 6 +/* Extended result codes which enabled by means other than options. */ +#define PTRACE_EVENT_STOP 128 + +/* Options set using PTRACE_SETOPTIONS or using PTRACE_SEIZE @data param */ +#define PTRACE_O_TRACESYSGOOD 1 +#define PTRACE_O_TRACEFORK (1 << PTRACE_EVENT_FORK) +#define PTRACE_O_TRACEVFORK (1 << PTRACE_EVENT_VFORK) +#define PTRACE_O_TRACECLONE (1 << PTRACE_EVENT_CLONE) +#define PTRACE_O_TRACEEXEC (1 << PTRACE_EVENT_EXEC) +#define PTRACE_O_TRACEVFORKDONE (1 << PTRACE_EVENT_VFORK_DONE) +#define PTRACE_O_TRACEEXIT (1 << PTRACE_EVENT_EXIT) + +#define PTRACE_O_MASK 0x0000007f + +#include <asm/ptrace.h> + +#ifdef __KERNEL__ +/* + * Ptrace flags + * + * The owner ship rules for task->ptrace which holds the ptrace + * flags is simple. When a task is running it owns it's task->ptrace + * flags. When the a task is stopped the ptracer owns task->ptrace. + */ + +#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ +#define PT_PTRACED 0x00000001 +#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ +#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */ + +#define PT_OPT_FLAG_SHIFT 3 +/* PT_TRACE_* event enable flags */ +#define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event))) +#define PT_TRACESYSGOOD PT_EVENT_FLAG(0) +#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) +#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) +#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) +#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) +#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) +#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) + +/* single stepping state bits (used on ARM and PA-RISC) */ +#define PT_SINGLESTEP_BIT 31 +#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT) +#define PT_BLOCKSTEP_BIT 30 +#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT) + +#include <linux/compiler.h> /* For unlikely. */ +#include <linux/sched.h> /* For struct task_struct. */ +#include <linux/err.h> /* for IS_ERR_VALUE */ +#include <linux/bug.h> /* For BUG_ON. */ + + +extern long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); +extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); +extern void ptrace_disable(struct task_struct *); +extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); +extern int ptrace_request(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern void ptrace_notify(int exit_code); +extern void __ptrace_link(struct task_struct *child, + struct task_struct *new_parent); +extern void __ptrace_unlink(struct task_struct *child); +extern void exit_ptrace(struct task_struct *tracer); +#define PTRACE_MODE_READ 0x01 +#define PTRACE_MODE_ATTACH 0x02 +#define PTRACE_MODE_NOAUDIT 0x04 +/* Returns 0 on success, -errno on denial. */ +extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); +/* Returns true on success, false on denial. */ +extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); + +static inline int ptrace_reparented(struct task_struct *child) +{ + return !same_thread_group(child->real_parent, child->parent); +} + +static inline void ptrace_unlink(struct task_struct *child) +{ + if (unlikely(child->ptrace)) + __ptrace_unlink(child); +} + +int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + unsigned long data); +int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, + unsigned long data); + +/** + * ptrace_parent - return the task that is tracing the given task + * @task: task to consider + * + * Returns %NULL if no one is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() held + * on @task, still held from when check_unsafe_exec() was called. + */ +static inline struct task_struct *ptrace_parent(struct task_struct *task) +{ + if (unlikely(task->ptrace)) + return rcu_dereference(task->parent); + return NULL; +} + +/** + * ptrace_event_enabled - test whether a ptrace event is enabled + * @task: ptracee of interest + * @event: %PTRACE_EVENT_* to test + * + * Test whether @event is enabled for ptracee @task. + * + * Returns %true if @event is enabled, %false otherwise. + */ +static inline bool ptrace_event_enabled(struct task_struct *task, int event) +{ + return task->ptrace & PT_EVENT_FLAG(event); +} + +/** + * ptrace_event - possibly stop for a ptrace event notification + * @event: %PTRACE_EVENT_* value to report + * @message: value for %PTRACE_GETEVENTMSG to return + * + * Check whether @event is enabled and, if so, report @event and @message + * to the ptrace parent. + * + * Called without locks. + */ +static inline void ptrace_event(int event, unsigned long message) +{ + if (unlikely(ptrace_event_enabled(current, event))) { + current->ptrace_message = message; + ptrace_notify((event << 8) | SIGTRAP); + } else if (event == PTRACE_EVENT_EXEC) { + /* legacy EXEC report via SIGTRAP */ + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) + send_sig(SIGTRAP, current, 0); + } +} + +/** + * ptrace_init_task - initialize ptrace state for a new child + * @child: new child task + * @ptrace: true if child should be ptrace'd by parent's tracer + * + * This is called immediately after adding @child to its parent's children + * list. @ptrace is false in the normal case, and true to ptrace @child. + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void ptrace_init_task(struct task_struct *child, bool ptrace) +{ + INIT_LIST_HEAD(&child->ptrace_entry); + INIT_LIST_HEAD(&child->ptraced); +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_set(&child->ptrace_bp_refcnt, 1); +#endif + child->jobctl = 0; + child->ptrace = 0; + child->parent = child->real_parent; + + if (unlikely(ptrace) && current->ptrace) { + child->ptrace = current->ptrace; + __ptrace_link(child, current->parent); + + if (child->ptrace & PT_SEIZED) + task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); + else + sigaddset(&child->pending.signal, SIGSTOP); + + set_tsk_thread_flag(child, TIF_SIGPENDING); + } +} + +/** + * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped + * @task: task in %EXIT_DEAD state + * + * Called with write_lock(&tasklist_lock) held. + */ +static inline void ptrace_release_task(struct task_struct *task) +{ + BUG_ON(!list_empty(&task->ptraced)); + ptrace_unlink(task); + BUG_ON(!list_empty(&task->ptrace_entry)); +} + +#ifndef force_successful_syscall_return +/* + * System call handlers that, upon successful completion, need to return a + * negative value should call force_successful_syscall_return() right before + * returning. On architectures where the syscall convention provides for a + * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly + * others), this macro can be used to ensure that the error flag will not get + * set. On architectures which do not support a separate error flag, the macro + * is a no-op and the spurious error condition needs to be filtered out by some + * other means (e.g., in user-level, by passing an extra argument to the + * syscall handler, or something along those lines). + */ +#define force_successful_syscall_return() do { } while (0) +#endif + +#ifndef is_syscall_success +/* + * On most systems we can tell if a syscall is a success based on if the retval + * is an error value. On some systems like ia64 and powerpc they have different + * indicators of success/failure and must define their own. + */ +#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs)))) +#endif + +/* + * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. + * + * These do-nothing inlines are used when the arch does not + * implement single-step. The kerneldoc comments are here + * to document the interface for all arch definitions. + */ + +#ifndef arch_has_single_step +/** + * arch_has_single_step - does this CPU support user-mode single-step? + * + * If this is defined, then there must be function declarations or + * inlines for user_enable_single_step() and user_disable_single_step(). + * arch_has_single_step() should evaluate to nonzero iff the machine + * supports instruction single-step for user mode. + * It can be a constant or it can test a CPU feature bit. + */ +#define arch_has_single_step() (0) + +/** + * user_enable_single_step - single-step in user-mode task + * @task: either current or a task stopped in %TASK_TRACED + * + * This can only be called when arch_has_single_step() has returned nonzero. + * Set @task so that when it returns to user mode, it will trap after the + * next single instruction executes. If arch_has_block_step() is defined, + * this must clear the effects of user_enable_block_step() too. + */ +static inline void user_enable_single_step(struct task_struct *task) +{ + BUG(); /* This can never be called. */ +} + +/** + * user_disable_single_step - cancel user-mode single-step + * @task: either current or a task stopped in %TASK_TRACED + * + * Clear @task of the effects of user_enable_single_step() and + * user_enable_block_step(). This can be called whether or not either + * of those was ever called on @task, and even if arch_has_single_step() + * returned zero. + */ +static inline void user_disable_single_step(struct task_struct *task) +{ +} +#else +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); +#endif /* arch_has_single_step */ + +#ifndef arch_has_block_step +/** + * arch_has_block_step - does this CPU support user-mode block-step? + * + * If this is defined, then there must be a function declaration or inline + * for user_enable_block_step(), and arch_has_single_step() must be defined + * too. arch_has_block_step() should evaluate to nonzero iff the machine + * supports step-until-branch for user mode. It can be a constant or it + * can test a CPU feature bit. + */ +#define arch_has_block_step() (0) + +/** + * user_enable_block_step - step until branch in user-mode task + * @task: either current or a task stopped in %TASK_TRACED + * + * This can only be called when arch_has_block_step() has returned nonzero, + * and will never be called when single-instruction stepping is being used. + * Set @task so that when it returns to user mode, it will trap after the + * next branch or trap taken. + */ +static inline void user_enable_block_step(struct task_struct *task) +{ + BUG(); /* This can never be called. */ +} +#else +extern void user_enable_block_step(struct task_struct *); +#endif /* arch_has_block_step */ + +#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO +extern void user_single_step_siginfo(struct task_struct *tsk, + struct pt_regs *regs, siginfo_t *info); +#else +static inline void user_single_step_siginfo(struct task_struct *tsk, + struct pt_regs *regs, siginfo_t *info) +{ + memset(info, 0, sizeof(*info)); + info->si_signo = SIGTRAP; +} +#endif + +#ifndef arch_ptrace_stop_needed +/** + * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called + * @code: current->exit_code value ptrace will stop with + * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with + * + * This is called with the siglock held, to decide whether or not it's + * necessary to release the siglock and call arch_ptrace_stop() with the + * same @code and @info arguments. It can be defined to a constant if + * arch_ptrace_stop() is never required, or always is. On machines where + * this makes sense, it should be defined to a quick test to optimize out + * calling arch_ptrace_stop() when it would be superfluous. For example, + * if the thread has not been back to user mode since the last stop, the + * thread state might indicate that nothing needs to be done. + */ +#define arch_ptrace_stop_needed(code, info) (0) +#endif + +#ifndef arch_ptrace_stop +/** + * arch_ptrace_stop - Do machine-specific work before stopping for ptrace + * @code: current->exit_code value ptrace will stop with + * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with + * + * This is called with no locks held when arch_ptrace_stop_needed() has + * just returned nonzero. It is allowed to block, e.g. for user memory + * access. The arch can have machine-specific work to be done before + * ptrace stops. On ia64, register backing store gets written back to user + * memory here. Since this can be costly (requires dropping the siglock), + * we only do it when the arch requires it for this particular stop, as + * indicated by arch_ptrace_stop_needed(). + */ +#define arch_ptrace_stop(code, info) do { } while (0) +#endif + +extern int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc); + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern int ptrace_get_breakpoints(struct task_struct *tsk); +extern void ptrace_put_breakpoints(struct task_struct *tsk); +#else +static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* __KERNEL */ + +#endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h new file mode 100644 index 00000000..7c775751 --- /dev/null +++ b/include/linux/pwm.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_PWM_H +#define __LINUX_PWM_H + +struct pwm_device; + +/* + * pwm_request - request a PWM device + */ +struct pwm_device *pwm_request(int pwm_id, const char *label); + +/* + * pwm_free - free a PWM device + */ +void pwm_free(struct pwm_device *pwm); + +/* + * pwm_config - change a PWM device configuration + */ +int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); + +/* + * pwm_enable - start a PWM output toggling + */ +int pwm_enable(struct pwm_device *pwm); + +/* + * pwm_disable - stop a PWM output toggling + */ +void pwm_disable(struct pwm_device *pwm); + +#endif /* __LINUX_PWM_H */ diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h new file mode 100644 index 00000000..412b70c7 --- /dev/null +++ b/include/linux/pwm_backlight.h @@ -0,0 +1,24 @@ +/* + * Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c + */ +#ifndef __LINUX_PWM_BACKLIGHT_H +#define __LINUX_PWM_BACKLIGHT_H + +#include <linux/backlight.h> + +struct platform_pwm_backlight_data { + int pwm_id; + int invert; + unsigned int max_brightness; + unsigned int dft_brightness; + unsigned int lth_brightness; + unsigned int hth_brightness; + unsigned int pwm_period_ns; + int (*init)(struct device *dev); + int (*notify)(struct device *dev, int brightness); + void (*notify_after)(struct device *dev, int brightness); + void (*exit)(struct device *dev); + int (*check_fb)(struct device *dev, struct fb_info *info); +}; + +#endif diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h new file mode 100644 index 00000000..18d75e79 --- /dev/null +++ b/include/linux/pxa168_eth.h @@ -0,0 +1,30 @@ +/* + *pxa168 ethernet platform device data definition file. + */ +#ifndef __LINUX_PXA168_ETH_H +#define __LINUX_PXA168_ETH_H + +struct pxa168_eth_platform_data { + int port_number; + int phy_addr; + + /* + * If speed is 0, then speed and duplex are autonegotiated. + */ + int speed; /* 0, SPEED_10, SPEED_100 */ + int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + + /* + * Override default RX/TX queue sizes if nonzero. + */ + int rx_queue_size; + int tx_queue_size; + + /* + * init callback is used for board specific initialization + * e.g on Aspenite its used to initialize the PHY transceiver. + */ + int (*init)(void); +}; + +#endif /* __LINUX_PXA168_ETH_H */ diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h new file mode 100644 index 00000000..44835fb3 --- /dev/null +++ b/include/linux/pxa2xx_ssp.h @@ -0,0 +1,209 @@ +/* + * pxa2xx_ssp.h + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This driver supports the following PXA CPU/SSP ports:- + * + * PXA250 SSP + * PXA255 SSP, NSSP + * PXA26x SSP, NSSP, ASSP + * PXA27x SSP1, SSP2, SSP3 + * PXA3xx SSP1, SSP2, SSP3, SSP4 + */ + +#ifndef __LINUX_SSP_H +#define __LINUX_SSP_H + +#include <linux/list.h> +#include <linux/io.h> + +/* + * SSP Serial Port Registers + * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different. + * PXA255, PXA26x and PXA27x have extra ports, registers and bits. + */ + +#define SSCR0 (0x00) /* SSP Control Register 0 */ +#define SSCR1 (0x04) /* SSP Control Register 1 */ +#define SSSR (0x08) /* SSP Status Register */ +#define SSITR (0x0C) /* SSP Interrupt Test Register */ +#define SSDR (0x10) /* SSP Data Write/Data Read Register */ + +#define SSTO (0x28) /* SSP Time Out Register */ +#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ +#define SSTSA (0x30) /* SSP Tx Timeslot Active */ +#define SSRSA (0x34) /* SSP Rx Timeslot Active */ +#define SSTSS (0x38) /* SSP Timeslot Status */ +#define SSACD (0x3C) /* SSP Audio Clock Divider */ +#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */ + +/* Common PXA2xx bits first */ +#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ +#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ +#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */ +#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */ +#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */ +#define SSCR0_National (0x2 << 4) /* National Microwire */ +#define SSCR0_ECS (1 << 6) /* External clock select */ +#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ +#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */ + +/* PXA27x, PXA3xx */ +#define SSCR0_EDSS (1 << 20) /* Extended data size select */ +#define SSCR0_NCS (1 << 21) /* Network clock select */ +#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */ +#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */ +#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */ +#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */ +#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */ +#define SSCR0_ACS (1 << 30) /* Audio clock select */ +#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ + + +#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ +#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ +#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ +#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */ +#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */ +#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */ + +#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */ +#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */ +#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */ +#define SSSR_BSY (1 << 4) /* SSP Busy */ +#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */ +#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */ +#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */ + +#ifdef CONFIG_ARCH_PXA +#define RX_THRESH_DFLT 8 +#define TX_THRESH_DFLT 8 + +#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */ +#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ + +#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ +#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ + +#else + +#define RX_THRESH_DFLT 2 +#define TX_THRESH_DFLT 2 + +#define SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */ +#define SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */ + +#define SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */ +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */ +#define SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */ +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ +#endif + +/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ +#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ +#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ +#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */ +#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */ +#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */ +#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */ +#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */ +#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */ +#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */ +#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */ +#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */ +#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */ +#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */ +#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */ +#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */ +#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */ +#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */ +#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */ +#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */ + +#define SSSR_BCE (1 << 23) /* Bit Count Error */ +#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */ +#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */ +#define SSSR_EOC (1 << 20) /* End Of Chain */ +#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */ +#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */ + + +#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */ +#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */ +#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */ +#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */ +#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */ +#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */ +#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */ +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */ +#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */ + +/* PXA3xx */ +#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */ +#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */ +#define SSPSP_TIMING_MASK (0x7f8001f0) + +#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ +#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ +#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ +#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ + +enum pxa_ssp_type { + SSP_UNDEFINED = 0, + PXA25x_SSP, /* pxa 210, 250, 255, 26x */ + PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ + PXA27x_SSP, + PXA168_SSP, + CE4100_SSP, +}; + +struct ssp_device { + struct platform_device *pdev; + struct list_head node; + + struct clk *clk; + void __iomem *mmio_base; + unsigned long phys_base; + + const char *label; + int port_id; + int type; + int use_count; + int irq; + int drcmr_rx; + int drcmr_tx; +}; + +/** + * pxa_ssp_write_reg - Write to a SSP register + * + * @dev: SSP device to access + * @reg: Register to write to + * @val: Value to be written. + */ +static inline void pxa_ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val) +{ + __raw_writel(val, dev->mmio_base + reg); +} + +/** + * pxa_ssp_read_reg - Read from a SSP register + * + * @dev: SSP device to access + * @reg: Register to read from + */ +static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg) +{ + return __raw_readl(dev->mmio_base + reg); +} + +struct ssp_device *pxa_ssp_request(int port, const char *label); +void pxa_ssp_free(struct ssp_device *); +#endif diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h new file mode 100644 index 00000000..8b9aee1a --- /dev/null +++ b/include/linux/qnx4_fs.h @@ -0,0 +1,88 @@ +/* + * Name : qnx4_fs.h + * Author : Richard Frowijn + * Function : qnx4 global filesystem definitions + * History : 23-03-1998 created + */ +#ifndef _LINUX_QNX4_FS_H +#define _LINUX_QNX4_FS_H + +#include <linux/types.h> +#include <linux/qnxtypes.h> +#include <linux/magic.h> + +#define QNX4_ROOT_INO 1 + +#define QNX4_MAX_XTNTS_PER_XBLK 60 +/* for di_status */ +#define QNX4_FILE_USED 0x01 +#define QNX4_FILE_MODIFIED 0x02 +#define QNX4_FILE_BUSY 0x04 +#define QNX4_FILE_LINK 0x08 +#define QNX4_FILE_INODE 0x10 +#define QNX4_FILE_FSYSCLEAN 0x20 + +#define QNX4_I_MAP_SLOTS 8 +#define QNX4_Z_MAP_SLOTS 64 +#define QNX4_VALID_FS 0x0001 /* Clean fs. */ +#define QNX4_ERROR_FS 0x0002 /* fs has errors. */ +#define QNX4_BLOCK_SIZE 0x200 /* blocksize of 512 bytes */ +#define QNX4_BLOCK_SIZE_BITS 9 /* blocksize shift */ +#define QNX4_DIR_ENTRY_SIZE 0x040 /* dir entry size of 64 bytes */ +#define QNX4_DIR_ENTRY_SIZE_BITS 6 /* dir entry size shift */ +#define QNX4_XBLK_ENTRY_SIZE 0x200 /* xblk entry size */ +#define QNX4_INODES_PER_BLOCK 0x08 /* 512 / 64 */ + +/* for filenames */ +#define QNX4_SHORT_NAME_MAX 16 +#define QNX4_NAME_MAX 48 + +/* + * This is the original qnx4 inode layout on disk. + */ +struct qnx4_inode_entry { + char di_fname[QNX4_SHORT_NAME_MAX]; + qnx4_off_t di_size; + qnx4_xtnt_t di_first_xtnt; + __le32 di_xblk; + __le32 di_ftime; + __le32 di_mtime; + __le32 di_atime; + __le32 di_ctime; + qnx4_nxtnt_t di_num_xtnts; + qnx4_mode_t di_mode; + qnx4_muid_t di_uid; + qnx4_mgid_t di_gid; + qnx4_nlink_t di_nlink; + __u8 di_zero[4]; + qnx4_ftype_t di_type; + __u8 di_status; +}; + +struct qnx4_link_info { + char dl_fname[QNX4_NAME_MAX]; + __le32 dl_inode_blk; + __u8 dl_inode_ndx; + __u8 dl_spare[10]; + __u8 dl_status; +}; + +struct qnx4_xblk { + __le32 xblk_next_xblk; + __le32 xblk_prev_xblk; + __u8 xblk_num_xtnts; + __u8 xblk_spare[3]; + __le32 xblk_num_blocks; + qnx4_xtnt_t xblk_xtnts[QNX4_MAX_XTNTS_PER_XBLK]; + char xblk_signature[8]; + qnx4_xtnt_t xblk_first_xtnt; +}; + +struct qnx4_super_block { + struct qnx4_inode_entry RootDir; + struct qnx4_inode_entry Inode; + struct qnx4_inode_entry Boot; + struct qnx4_inode_entry AltBoot; +}; + +#endif diff --git a/include/linux/qnx6_fs.h b/include/linux/qnx6_fs.h new file mode 100644 index 00000000..26049eab --- /dev/null +++ b/include/linux/qnx6_fs.h @@ -0,0 +1,134 @@ +/* + * Name : qnx6_fs.h + * Author : Kai Bankett + * Function : qnx6 global filesystem definitions + * History : 17-01-2012 created + */ +#ifndef _LINUX_QNX6_FS_H +#define _LINUX_QNX6_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +#define QNX6_ROOT_INO 1 + +/* for di_status */ +#define QNX6_FILE_DIRECTORY 0x01 +#define QNX6_FILE_DELETED 0x02 +#define QNX6_FILE_NORMAL 0x03 + +#define QNX6_SUPERBLOCK_SIZE 0x200 /* superblock always is 512 bytes */ +#define QNX6_SUPERBLOCK_AREA 0x1000 /* area reserved for superblock */ +#define QNX6_BOOTBLOCK_SIZE 0x2000 /* heading bootblock area */ +#define QNX6_DIR_ENTRY_SIZE 0x20 /* dir entry size of 32 bytes */ +#define QNX6_INODE_SIZE 0x80 /* each inode is 128 bytes */ +#define QNX6_INODE_SIZE_BITS 7 /* inode entry size shift */ + +#define QNX6_NO_DIRECT_POINTERS 16 /* 16 blockptrs in sbl/inode */ +#define QNX6_PTR_MAX_LEVELS 5 /* maximum indirect levels */ + +/* for filenames */ +#define QNX6_SHORT_NAME_MAX 27 +#define QNX6_LONG_NAME_MAX 510 + +/* list of mount options */ +#define QNX6_MOUNT_MMI_FS 0x010000 /* mount as Audi MMI 3G fs */ + +/* + * This is the original qnx6 inode layout on disk. + * Each inode is 128 byte long. + */ +struct qnx6_inode_entry { + __fs64 di_size; + __fs32 di_uid; + __fs32 di_gid; + __fs32 di_ftime; + __fs32 di_mtime; + __fs32 di_atime; + __fs32 di_ctime; + __fs16 di_mode; + __fs16 di_ext_mode; + __fs32 di_block_ptr[QNX6_NO_DIRECT_POINTERS]; + __u8 di_filelevels; + __u8 di_status; + __u8 di_unknown2[2]; + __fs32 di_zero2[6]; +}; + +/* + * Each directory entry is maximum 32 bytes long. + * If more characters or special characters required it is stored + * in the longfilenames structure. + */ +struct qnx6_dir_entry { + __fs32 de_inode; + __u8 de_size; + char de_fname[QNX6_SHORT_NAME_MAX]; +}; + +/* + * Longfilename direntries have a different structure + */ +struct qnx6_long_dir_entry { + __fs32 de_inode; + __u8 de_size; + __u8 de_unknown[3]; + __fs32 de_long_inode; + __fs32 de_checksum; +}; + +struct qnx6_long_filename { + __fs16 lf_size; + __u8 lf_fname[QNX6_LONG_NAME_MAX]; +}; + +struct qnx6_root_node { + __fs64 size; + __fs32 ptr[QNX6_NO_DIRECT_POINTERS]; + __u8 levels; + __u8 mode; + __u8 spare[6]; +}; + +struct qnx6_super_block { + __fs32 sb_magic; + __fs32 sb_checksum; + __fs64 sb_serial; + __fs32 sb_ctime; /* time the fs was created */ + __fs32 sb_atime; /* last access time */ + __fs32 sb_flags; + __fs16 sb_version1; /* filesystem version information */ + __fs16 sb_version2; /* filesystem version information */ + __u8 sb_volumeid[16]; + __fs32 sb_blocksize; + __fs32 sb_num_inodes; + __fs32 sb_free_inodes; + __fs32 sb_num_blocks; + __fs32 sb_free_blocks; + __fs32 sb_allocgroup; + struct qnx6_root_node Inode; + struct qnx6_root_node Bitmap; + struct qnx6_root_node Longfile; + struct qnx6_root_node Unknown; +}; + +/* Audi MMI 3G superblock layout is different to plain qnx6 */ +struct qnx6_mmi_super_block { + __fs32 sb_magic; + __fs32 sb_checksum; + __fs64 sb_serial; + __u8 sb_spare0[12]; + __u8 sb_id[12]; + __fs32 sb_blocksize; + __fs32 sb_num_inodes; + __fs32 sb_free_inodes; + __fs32 sb_num_blocks; + __fs32 sb_free_blocks; + __u8 sb_spare1[4]; + struct qnx6_root_node Inode; + struct qnx6_root_node Bitmap; + struct qnx6_root_node Longfile; + struct qnx6_root_node Unknown; +}; + +#endif diff --git a/include/linux/qnxtypes.h b/include/linux/qnxtypes.h new file mode 100644 index 00000000..bebbe5cc --- /dev/null +++ b/include/linux/qnxtypes.h @@ -0,0 +1,28 @@ +/* + * Name : qnxtypes.h + * Author : Richard Frowijn + * Function : standard qnx types + * History : 22-03-1998 created + * + */ + +#ifndef _QNX4TYPES_H +#define _QNX4TYPES_H + +#include <linux/types.h> + +typedef __le16 qnx4_nxtnt_t; +typedef __u8 qnx4_ftype_t; + +typedef struct { + __le32 xtnt_blk; + __le32 xtnt_size; +} qnx4_xtnt_t; + +typedef __le16 qnx4_mode_t; +typedef __le16 qnx4_muid_t; +typedef __le16 qnx4_mgid_t; +typedef __le32 qnx4_off_t; +typedef __le16 qnx4_nlink_t; + +#endif diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h new file mode 100644 index 00000000..bd466439 --- /dev/null +++ b/include/linux/quicklist.h @@ -0,0 +1,93 @@ +#ifndef LINUX_QUICKLIST_H +#define LINUX_QUICKLIST_H +/* + * Fast allocations and disposal of pages. Pages must be in the condition + * as needed after allocation when they are freed. Per cpu lists of pages + * are kept that only contain node local pages. + * + * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com> + */ +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +#ifdef CONFIG_QUICKLIST + +struct quicklist { + void *page; + int nr_pages; +}; + +DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +/* + * The two key functions quicklist_alloc and quicklist_free are inline so + * that they may be custom compiled for the platform. + * Specifying a NULL ctor can remove constructor support. Specifying + * a constant quicklist allows the determination of the exact address + * in the per cpu area. + * + * The fast patch in quicklist_alloc touched only a per cpu cacheline and + * the first cacheline of the page itself. There is minmal overhead involved. + */ +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + + q =&get_cpu_var(quicklist)[nr]; + p = q->page; + if (likely(p)) { + q->page = p[0]; + p[0] = NULL; + q->nr_pages--; + } + put_cpu_var(quicklist); + if (likely(p)) + return p; + + p = (void *)__get_free_page(flags | __GFP_ZERO); + if (ctor && p) + ctor(p); + return p; +} + +static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, + struct page *page) +{ + struct quicklist *q; + + q = &get_cpu_var(quicklist)[nr]; + *(void **)p = q->page; + q->page = p; + q->nr_pages++; + put_cpu_var(quicklist); +} + +static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) +{ + __quicklist_free(nr, dtor, pp, virt_to_page(pp)); +} + +static inline void quicklist_free_page(int nr, void (*dtor)(void *), + struct page *page) +{ + __quicklist_free(nr, dtor, page_address(page), page); +} + +void quicklist_trim(int nr, void (*dtor)(void *), + unsigned long min_pages, unsigned long max_free); + +unsigned long quicklist_total_size(void); + +#else + +static inline unsigned long quicklist_total_size(void) +{ + return 0; +} + +#endif + +#endif /* LINUX_QUICKLIST_H */ + diff --git a/include/linux/quota.h b/include/linux/quota.h new file mode 100644 index 00000000..c09fa042 --- /dev/null +++ b/include/linux/quota.h @@ -0,0 +1,423 @@ +/* + * Copyright (c) 1982, 1986 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Robert Elz at The University of Melbourne. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_QUOTA_ +#define _LINUX_QUOTA_ + +#include <linux/errno.h> +#include <linux/types.h> + +#define __DQUOT_VERSION__ "dquot_6.5.2" + +#define MAXQUOTAS 2 +#define USRQUOTA 0 /* element used for user quotas */ +#define GRPQUOTA 1 /* element used for group quotas */ + +/* + * Definitions for the default names of the quotas files. + */ +#define INITQFNAMES { \ + "user", /* USRQUOTA */ \ + "group", /* GRPQUOTA */ \ + "undefined", \ +}; + +/* + * Command definitions for the 'quotactl' system call. + * The commands are broken into a main command defined below + * and a subcommand that is used to convey the type of + * quota that is being manipulated (see above). + */ +#define SUBCMDMASK 0x00ff +#define SUBCMDSHIFT 8 +#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK)) + +#define Q_SYNC 0x800001 /* sync disk copy of a filesystems quotas */ +#define Q_QUOTAON 0x800002 /* turn quotas on */ +#define Q_QUOTAOFF 0x800003 /* turn quotas off */ +#define Q_GETFMT 0x800004 /* get quota format used on given filesystem */ +#define Q_GETINFO 0x800005 /* get information about quota files */ +#define Q_SETINFO 0x800006 /* set information about quota files */ +#define Q_GETQUOTA 0x800007 /* get user quota structure */ +#define Q_SETQUOTA 0x800008 /* set user quota structure */ + +/* Quota format type IDs */ +#define QFMT_VFS_OLD 1 +#define QFMT_VFS_V0 2 +#define QFMT_OCFS2 3 +#define QFMT_VFS_V1 4 + +/* Size of block in which space limits are passed through the quota + * interface */ +#define QIF_DQBLKSIZE_BITS 10 +#define QIF_DQBLKSIZE (1 << QIF_DQBLKSIZE_BITS) + +/* + * Quota structure used for communication with userspace via quotactl + * Following flags are used to specify which fields are valid + */ +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B, + QIF_ILIMITS_B, + QIF_INODES_B, + QIF_BTIME_B, + QIF_ITIME_B, +}; + +#define QIF_BLIMITS (1 << QIF_BLIMITS_B) +#define QIF_SPACE (1 << QIF_SPACE_B) +#define QIF_ILIMITS (1 << QIF_ILIMITS_B) +#define QIF_INODES (1 << QIF_INODES_B) +#define QIF_BTIME (1 << QIF_BTIME_B) +#define QIF_ITIME (1 << QIF_ITIME_B) +#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) +#define QIF_USAGE (QIF_SPACE | QIF_INODES) +#define QIF_TIMES (QIF_BTIME | QIF_ITIME) +#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) + +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +/* + * Structure used for setting quota information about file via quotactl + * Following flags are used to specify which fields are valid + */ +#define IIF_BGRACE 1 +#define IIF_IGRACE 2 +#define IIF_FLAGS 4 +#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS) + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; + +/* + * Definitions for quota netlink interface + */ +#define QUOTA_NL_NOWARN 0 +#define QUOTA_NL_IHARDWARN 1 /* Inode hardlimit reached */ +#define QUOTA_NL_ISOFTLONGWARN 2 /* Inode grace time expired */ +#define QUOTA_NL_ISOFTWARN 3 /* Inode softlimit reached */ +#define QUOTA_NL_BHARDWARN 4 /* Block hardlimit reached */ +#define QUOTA_NL_BSOFTLONGWARN 5 /* Block grace time expired */ +#define QUOTA_NL_BSOFTWARN 6 /* Block softlimit reached */ +#define QUOTA_NL_IHARDBELOW 7 /* Usage got below inode hardlimit */ +#define QUOTA_NL_ISOFTBELOW 8 /* Usage got below inode softlimit */ +#define QUOTA_NL_BHARDBELOW 9 /* Usage got below block hardlimit */ +#define QUOTA_NL_BSOFTBELOW 10 /* Usage got below block softlimit */ + +enum { + QUOTA_NL_C_UNSPEC, + QUOTA_NL_C_WARNING, + __QUOTA_NL_C_MAX, +}; +#define QUOTA_NL_C_MAX (__QUOTA_NL_C_MAX - 1) + +enum { + QUOTA_NL_A_UNSPEC, + QUOTA_NL_A_QTYPE, + QUOTA_NL_A_EXCESS_ID, + QUOTA_NL_A_WARNING, + QUOTA_NL_A_DEV_MAJOR, + QUOTA_NL_A_DEV_MINOR, + QUOTA_NL_A_CAUSED_ID, + __QUOTA_NL_A_MAX, +}; +#define QUOTA_NL_A_MAX (__QUOTA_NL_A_MAX - 1) + + +#ifdef __KERNEL__ +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rwsem.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/percpu_counter.h> + +#include <linux/dqblk_xfs.h> +#include <linux/dqblk_v1.h> +#include <linux/dqblk_v2.h> + +#include <linux/atomic.h> + +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef long long qsize_t; /* Type in which we store sizes */ + +extern spinlock_t dq_data_lock; + +/* Maximal numbers of writes for quota operation (insert/delete/update) + * (over VFS all formats) */ +#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC) +#define DQUOT_INIT_REWRITE max(V1_INIT_REWRITE, V2_INIT_REWRITE) +#define DQUOT_DEL_ALLOC max(V1_DEL_ALLOC, V2_DEL_ALLOC) +#define DQUOT_DEL_REWRITE max(V1_DEL_REWRITE, V2_DEL_REWRITE) + +/* + * Data for one user/group kept in memory + */ +struct mem_dqblk { + qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ + qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ + qsize_t dqb_curspace; /* current used space */ + qsize_t dqb_rsvspace; /* current reserved space for delalloc*/ + qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ + qsize_t dqb_isoftlimit; /* preferred inode limit */ + qsize_t dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ +}; + +/* + * Data for one quotafile kept in memory + */ +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; /* Id of the dqi_format - used when turning + * quotas on after remount RW */ + struct list_head dqi_dirty_list; /* List of dirty dquots */ + unsigned long dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_maxblimit; + qsize_t dqi_maxilimit; + void *dqi_priv; +}; + +struct super_block; + +#define DQF_MASK 0xffff /* Mask for format specific flags */ +#define DQF_GETINFO_MASK 0x1ffff /* Mask for flags passed to userspace */ +#define DQF_SETINFO_MASK 0xffff /* Mask for flags modifiable from userspace */ +#define DQF_SYS_FILE_B 16 +#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B) /* Quota file stored as system file */ +#define DQF_INFO_DIRTY_B 31 +#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ + +extern void mark_info_dirty(struct super_block *sb, int type); +static inline int info_dirty(struct mem_dqinfo *info) +{ + return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); +} + +enum { + DQST_LOOKUPS, + DQST_DROPS, + DQST_READS, + DQST_WRITES, + DQST_CACHE_HITS, + DQST_ALLOC_DQUOTS, + DQST_FREE_DQUOTS, + DQST_SYNCS, + _DQST_DQSTAT_LAST +}; + +struct dqstats { + int stat[_DQST_DQSTAT_LAST]; + struct percpu_counter counter[_DQST_DQSTAT_LAST]; +}; + +extern struct dqstats *dqstats_pcpu; +extern struct dqstats dqstats; + +static inline void dqstats_inc(unsigned int type) +{ + percpu_counter_inc(&dqstats.counter[type]); +} + +static inline void dqstats_dec(unsigned int type) +{ + percpu_counter_dec(&dqstats.counter[type]); +} + +#define DQ_MOD_B 0 /* dquot modified since read */ +#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */ +#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */ +#define DQ_FAKE_B 3 /* no limits only usage */ +#define DQ_READ_B 4 /* dquot was read into memory */ +#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ +#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\ + * for the mask of entries set via SETQUOTA\ + * quotactl. They are set under dq_data_lock\ + * and the quota format handling dquot can\ + * clear them when it sees fit. */ + +struct dquot { + struct hlist_node dq_hash; /* Hash list in memory */ + struct list_head dq_inuse; /* List of all quotas */ + struct list_head dq_free; /* Free list element */ + struct list_head dq_dirty; /* List of dirty dquots */ + struct mutex dq_lock; /* dquot IO lock */ + atomic_t dq_count; /* Use count */ + wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ + struct super_block *dq_sb; /* superblock this applies to */ + unsigned int dq_id; /* ID this applies to (uid, gid) */ + loff_t dq_off; /* Offset of dquot on disk */ + unsigned long dq_flags; /* See DQ_* */ + short dq_type; /* Type of quota */ + struct mem_dqblk dq_dqb; /* Diskquota usage */ +}; + +/* Operations which must be implemented by each quota format */ +struct quota_format_ops { + int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */ + int (*read_file_info)(struct super_block *sb, int type); /* Read main info about file - called on quotaon() */ + int (*write_file_info)(struct super_block *sb, int type); /* Write main info about file */ + int (*free_file_info)(struct super_block *sb, int type); /* Called on quotaoff() */ + int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */ + int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ + int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ +}; + +/* Operations working with dquots */ +struct dquot_operations { + int (*write_dquot) (struct dquot *); /* Ordinary dquot write */ + struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */ + void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */ + int (*acquire_dquot) (struct dquot *); /* Quota is going to be created on disk */ + int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ + int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ + int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ + /* get reserved quota for delayed alloc, value returned is managed by + * quota code only */ + qsize_t *(*get_reserved_space) (struct inode *); +}; + +struct path; + +/* Operations handling requests from userspace */ +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, struct path *); + int (*quota_on_meta)(struct super_block *, int, int); + int (*quota_off)(struct super_block *, int); + int (*quota_sync)(struct super_block *, int, int); + int (*get_info)(struct super_block *, int, struct if_dqinfo *); + int (*set_info)(struct super_block *, int, struct if_dqinfo *); + int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); + int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); + int (*get_xstate)(struct super_block *, struct fs_quota_stat *); + int (*set_xstate)(struct super_block *, unsigned int, int); +}; + +struct quota_format_type { + int qf_fmt_id; /* Quota format id */ + const struct quota_format_ops *qf_ops; /* Operations of format */ + struct module *qf_owner; /* Module implementing quota format */ + struct quota_format_type *qf_next; +}; + +/* Quota state flags - they actually come in two flavors - for users and groups */ +enum { + _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ + _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */ + _DQUOT_SUSPENDED, /* User diskquotas are off, but + * we have necessary info in + * memory to turn them on */ + _DQUOT_STATE_FLAGS +}; +#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED) +#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED) +#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED) +#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ + DQUOT_SUSPENDED) +/* Other quota flags */ +#define DQUOT_STATE_LAST (_DQUOT_STATE_FLAGS * MAXQUOTAS) +#define DQUOT_QUOTA_SYS_FILE (1 << DQUOT_STATE_LAST) + /* Quota file is a special + * system file and user cannot + * touch it. Filesystem is + * responsible for setting + * S_NOQUOTA, S_NOATIME flags + */ +#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1)) + /* Allow negative quota usage */ + +static inline unsigned int dquot_state_flag(unsigned int flags, int type) +{ + return flags << _DQUOT_STATE_FLAGS * type; +} + +static inline unsigned int dquot_generic_flag(unsigned int flags, int type) +{ + return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS; +} + +#ifdef CONFIG_QUOTA_NETLINK_INTERFACE +extern void quota_send_warning(short type, unsigned int id, dev_t dev, + const char warntype); +#else +static inline void quota_send_warning(short type, unsigned int id, dev_t dev, + const char warntype) +{ + return; +} +#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */ + +struct quota_info { + unsigned int flags; /* Flags for diskquotas on this device */ + struct mutex dqio_mutex; /* lock device while I/O in progress */ + struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ + struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ + struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ + struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ + const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ +}; + +int register_quota_format(struct quota_format_type *fmt); +void unregister_quota_format(struct quota_format_type *fmt); + +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; + +#define INIT_QUOTA_MODULE_NAMES {\ + {QFMT_VFS_OLD, "quota_v1"},\ + {QFMT_VFS_V0, "quota_v2"},\ + {0, NULL}} + +#endif /* __KERNEL__ */ +#endif /* _QUOTA_ */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h new file mode 100644 index 00000000..d93f95e6 --- /dev/null +++ b/include/linux/quotaops.h @@ -0,0 +1,360 @@ +/* + * Definitions for diskquota-operations. When diskquota is configured these + * macros expand to the right source-code. + * + * Author: Marco van Wieringen <mvw@planets.elm.net> + */ +#ifndef _LINUX_QUOTAOPS_ +#define _LINUX_QUOTAOPS_ + +#include <linux/fs.h> + +#define DQUOT_SPACE_WARN 0x1 +#define DQUOT_SPACE_RESERVE 0x2 +#define DQUOT_SPACE_NOFAIL 0x4 + +static inline struct quota_info *sb_dqopt(struct super_block *sb) +{ + return &sb->s_dquot; +} + +/* i_mutex must being held */ +static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) +{ + return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) || + (ia->ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) || + (ia->ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid); +} + +#if defined(CONFIG_QUOTA) + +#define quota_error(sb, fmt, args...) \ + __quota_error((sb), __func__, fmt , ## args) + +extern __printf(3, 4) +void __quota_error(struct super_block *sb, const char *func, + const char *fmt, ...); + +/* + * declaration of quota_function calls in kernel. + */ +void inode_add_rsv_space(struct inode *inode, qsize_t number); +void inode_claim_rsv_space(struct inode *inode, qsize_t number); +void inode_sub_rsv_space(struct inode *inode, qsize_t number); + +void dquot_initialize(struct inode *inode); +void dquot_drop(struct inode *inode); +struct dquot *dqget(struct super_block *sb, unsigned int id, int type); +void dqput(struct dquot *dquot); +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv); +struct dquot *dquot_alloc(struct super_block *sb, int type); +void dquot_destroy(struct dquot *dquot); + +int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); +void __dquot_free_space(struct inode *inode, qsize_t number, int flags); + +int dquot_alloc_inode(const struct inode *inode); + +int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); +void dquot_free_inode(const struct inode *inode); + +int dquot_disable(struct super_block *sb, int type, unsigned int flags); +/* Suspend quotas on remount RO */ +static inline int dquot_suspend(struct super_block *sb, int type) +{ + return dquot_disable(sb, type, DQUOT_SUSPENDED); +} +int dquot_resume(struct super_block *sb, int type); + +int dquot_commit(struct dquot *dquot); +int dquot_acquire(struct dquot *dquot); +int dquot_release(struct dquot *dquot); +int dquot_commit_info(struct super_block *sb, int type); +int dquot_mark_dquot_dirty(struct dquot *dquot); + +int dquot_file_open(struct inode *inode, struct file *file); + +int dquot_enable(struct inode *inode, int type, int format_id, + unsigned int flags); +int dquot_quota_on(struct super_block *sb, int type, int format_id, + struct path *path); +int dquot_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type); +int dquot_quota_off(struct super_block *sb, int type); +int dquot_quota_sync(struct super_block *sb, int type, int wait); +int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, + struct fs_disk_quota *di); +int dquot_set_dqblk(struct super_block *sb, int type, qid_t id, + struct fs_disk_quota *di); + +int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); +int dquot_transfer(struct inode *inode, struct iattr *iattr); + +static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->info + type; +} + +/* + * Functions for checking status of quota + */ + +static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_USAGE_ENABLED, type); +} + +static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_LIMITS_ENABLED, type); +} + +static inline bool sb_has_quota_suspended(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_SUSPENDED, type); +} + +static inline unsigned sb_any_quota_suspended(struct super_block *sb) +{ + unsigned type, tmsk = 0; + for (type = 0; type < MAXQUOTAS; type++) + tmsk |= sb_has_quota_suspended(sb, type) << type; + return tmsk; +} + +/* Does kernel know about any quota information for given sb + type? */ +static inline bool sb_has_quota_loaded(struct super_block *sb, int type) +{ + /* Currently if anything is on, then quota usage is on as well */ + return sb_has_quota_usage_enabled(sb, type); +} + +static inline unsigned sb_any_quota_loaded(struct super_block *sb) +{ + unsigned type, tmsk = 0; + for (type = 0; type < MAXQUOTAS; type++) + tmsk |= sb_has_quota_loaded(sb, type) << type; + return tmsk; +} + +static inline bool sb_has_quota_active(struct super_block *sb, int type) +{ + return sb_has_quota_loaded(sb, type) && + !sb_has_quota_suspended(sb, type); +} + +/* + * Operations supported for diskquotas. + */ +extern const struct dquot_operations dquot_operations; +extern const struct quotactl_ops dquot_quotactl_ops; + +#else + +static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_has_quota_suspended(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_suspended(struct super_block *sb) +{ + return 0; +} + +/* Does kernel know about any quota information for given sb + type? */ +static inline int sb_has_quota_loaded(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_loaded(struct super_block *sb) +{ + return 0; +} + +static inline int sb_has_quota_active(struct super_block *sb, int type) +{ + return 0; +} + +static inline void dquot_initialize(struct inode *inode) +{ +} + +static inline void dquot_drop(struct inode *inode) +{ +} + +static inline int dquot_alloc_inode(const struct inode *inode) +{ + return 0; +} + +static inline void dquot_free_inode(const struct inode *inode) +{ +} + +static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) +{ + return 0; +} + +static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, + int flags) +{ + if (!(flags & DQUOT_SPACE_RESERVE)) + inode_add_bytes(inode, number); + return 0; +} + +static inline void __dquot_free_space(struct inode *inode, qsize_t number, + int flags) +{ + if (!(flags & DQUOT_SPACE_RESERVE)) + inode_sub_bytes(inode, number); +} + +static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) +{ + inode_add_bytes(inode, number); + return 0; +} + +static inline int dquot_disable(struct super_block *sb, int type, + unsigned int flags) +{ + return 0; +} + +static inline int dquot_suspend(struct super_block *sb, int type) +{ + return 0; +} + +static inline int dquot_resume(struct super_block *sb, int type) +{ + return 0; +} + +#define dquot_file_open generic_file_open + +#endif /* CONFIG_QUOTA */ + +static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) +{ + return __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN); +} + +static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr) +{ + __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL); + mark_inode_dirty_sync(inode); +} + +static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) +{ + int ret; + + ret = dquot_alloc_space_nodirty(inode, nr); + if (!ret) { + /* + * Mark inode fully dirty. Since we are allocating blocks, inode + * would become fully dirty soon anyway and it reportedly + * reduces lock contention. + */ + mark_inode_dirty(inode); + } + return ret; +} + +static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr) +{ + return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits); +} + +static inline void dquot_alloc_block_nofail(struct inode *inode, qsize_t nr) +{ + dquot_alloc_space_nofail(inode, nr << inode->i_blkbits); +} + +static inline int dquot_alloc_block(struct inode *inode, qsize_t nr) +{ + return dquot_alloc_space(inode, nr << inode->i_blkbits); +} + +static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr) +{ + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0); +} + +static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) +{ + int ret; + + ret = dquot_prealloc_block_nodirty(inode, nr); + if (!ret) + mark_inode_dirty_sync(inode); + return ret; +} + +static inline int dquot_reserve_block(struct inode *inode, qsize_t nr) +{ + return __dquot_alloc_space(inode, nr << inode->i_blkbits, + DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE); +} + +static inline int dquot_claim_block(struct inode *inode, qsize_t nr) +{ + int ret; + + ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); + if (!ret) + mark_inode_dirty_sync(inode); + return ret; +} + +static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr) +{ + __dquot_free_space(inode, nr, 0); +} + +static inline void dquot_free_space(struct inode *inode, qsize_t nr) +{ + dquot_free_space_nodirty(inode, nr); + mark_inode_dirty_sync(inode); +} + +static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr) +{ + dquot_free_space_nodirty(inode, nr << inode->i_blkbits); +} + +static inline void dquot_free_block(struct inode *inode, qsize_t nr) +{ + dquot_free_space(inode, nr << inode->i_blkbits); +} + +static inline void dquot_release_reservation_block(struct inode *inode, + qsize_t nr) +{ + __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE); +} + +#endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/radeonfb.h b/include/linux/radeonfb.h new file mode 100644 index 00000000..8c4bbdec --- /dev/null +++ b/include/linux/radeonfb.h @@ -0,0 +1,15 @@ +#ifndef __LINUX_RADEONFB_H__ +#define __LINUX_RADEONFB_H__ + +#include <asm/ioctl.h> +#include <linux/types.h> + +#define ATY_RADEON_LCD_ON 0x00000001 +#define ATY_RADEON_CRT_ON 0x00000002 + + +#define FBIO_RADEON_GET_MIRROR _IOR('@', 3, size_t) +#define FBIO_RADEON_SET_MIRROR _IOW('@', 4, size_t) + +#endif + diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h new file mode 100644 index 00000000..ffc444c3 --- /dev/null +++ b/include/linux/radix-tree.h @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2001 Momchil Velikov + * Portions Copyright (C) 2001 Christoph Hellwig + * Copyright (C) 2006 Nick Piggin + * Copyright (C) 2012 Konstantin Khlebnikov + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _LINUX_RADIX_TREE_H +#define _LINUX_RADIX_TREE_H + +#include <linux/preempt.h> +#include <linux/types.h> +#include <linux/bug.h> +#include <linux/kernel.h> +#include <linux/rcupdate.h> + +/* + * An indirect pointer (root->rnode pointing to a radix_tree_node, rather + * than a data item) is signalled by the low bit set in the root->rnode + * pointer. + * + * In this case root->height is > 0, but the indirect pointer tests are + * needed for RCU lookups (because root->height is unreliable). The only + * time callers need worry about this is when doing a lookup_slot under + * RCU. + * + * Indirect pointer in fact is also used to tag the last pointer of a node + * when it is shrunk, before we rcu free the node. See shrink code for + * details. + */ +#define RADIX_TREE_INDIRECT_PTR 1 +/* + * A common use of the radix tree is to store pointers to struct pages; + * but shmem/tmpfs needs also to store swap entries in the same tree: + * those are marked as exceptional entries to distinguish them. + * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. + */ +#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 +#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 + +static inline int radix_tree_is_indirect_ptr(void *ptr) +{ + return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR); +} + +/*** radix-tree API starts here ***/ + +#define RADIX_TREE_MAX_TAGS 3 + +/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +struct radix_tree_root { + unsigned int height; + gfp_t gfp_mask; + struct radix_tree_node __rcu *rnode; +}; + +#define RADIX_TREE_INIT(mask) { \ + .height = 0, \ + .gfp_mask = (mask), \ + .rnode = NULL, \ +} + +#define RADIX_TREE(name, mask) \ + struct radix_tree_root name = RADIX_TREE_INIT(mask) + +#define INIT_RADIX_TREE(root, mask) \ +do { \ + (root)->height = 0; \ + (root)->gfp_mask = (mask); \ + (root)->rnode = NULL; \ +} while (0) + +/** + * Radix-tree synchronization + * + * The radix-tree API requires that users provide all synchronisation (with + * specific exceptions, noted below). + * + * Synchronization of access to the data items being stored in the tree, and + * management of their lifetimes must be completely managed by API users. + * + * For API usage, in general, + * - any function _modifying_ the tree or tags (inserting or deleting + * items, setting or clearing tags) must exclude other modifications, and + * exclude any functions reading the tree. + * - any function _reading_ the tree or tags (looking up items or tags, + * gang lookups) must exclude modifications to the tree, but may occur + * concurrently with other readers. + * + * The notable exceptions to this rule are the following functions: + * radix_tree_lookup + * radix_tree_lookup_slot + * radix_tree_tag_get + * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot + * radix_tree_gang_lookup_tag + * radix_tree_gang_lookup_tag_slot + * radix_tree_tagged + * + * The first 7 functions are able to be called locklessly, using RCU. The + * caller must ensure calls to these functions are made within rcu_read_lock() + * regions. Other readers (lock-free or otherwise) and modifications may be + * running concurrently. + * + * It is still required that the caller manage the synchronization and lifetimes + * of the items. So if RCU lock-free lookups are used, typically this would mean + * that the items have their own locks, or are amenable to lock-free access; and + * that the items are freed by RCU (or only freed after having been deleted from + * the radix tree *and* a synchronize_rcu() grace period). + * + * (Note, rcu_assign_pointer and rcu_dereference are not needed to control + * access to data items when inserting into or looking up from the radix tree) + * + * Note that the value returned by radix_tree_tag_get() may not be relied upon + * if only the RCU read lock is held. Functions to set/clear tags and to + * delete nodes running concurrently with it may affect its result such that + * two consecutive reads in the same locked section may return different + * values. If reliability is required, modification functions must also be + * excluded from concurrency. + * + * radix_tree_tagged is able to be called without locking or RCU. + */ + +/** + * radix_tree_deref_slot - dereference a slot + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * Returns: item that was stored in that slot with any direct pointer flag + * removed. + * + * For use with radix_tree_lookup_slot(). Caller must hold tree at least read + * locked across slot lookup and dereference. Not required if write lock is + * held (ie. items cannot be concurrently inserted). + * + * radix_tree_deref_retry must be used to confirm validity of the pointer if + * only the read lock is held. + */ +static inline void *radix_tree_deref_slot(void **pslot) +{ + return rcu_dereference(*pslot); +} + +/** + * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * Returns: item that was stored in that slot with any direct pointer flag + * removed. + * + * Similar to radix_tree_deref_slot but only used during migration when a pages + * mapping is being moved. The caller does not hold the RCU read lock but it + * must hold the tree lock to prevent parallel updates. + */ +static inline void *radix_tree_deref_slot_protected(void **pslot, + spinlock_t *treelock) +{ + return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); +} + +/** + * radix_tree_deref_retry - check radix_tree_deref_slot + * @arg: pointer returned by radix_tree_deref_slot + * Returns: 0 if retry is not required, otherwise retry is required + * + * radix_tree_deref_retry must be used with radix_tree_deref_slot. + */ +static inline int radix_tree_deref_retry(void *arg) +{ + return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); +} + +/** + * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? + * @arg: value returned by radix_tree_deref_slot + * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. + */ +static inline int radix_tree_exceptional_entry(void *arg) +{ + /* Not unlikely because radix_tree_exception often tested first */ + return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; +} + +/** + * radix_tree_exception - radix_tree_deref_slot returned either exception? + * @arg: value returned by radix_tree_deref_slot + * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. + */ +static inline int radix_tree_exception(void *arg) +{ + return unlikely((unsigned long)arg & + (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY)); +} + +/** + * radix_tree_replace_slot - replace item in a slot + * @pslot: pointer to slot, returned by radix_tree_lookup_slot + * @item: new item to store in the slot. + * + * For use with radix_tree_lookup_slot(). Caller must hold tree write locked + * across slot lookup and replacement. + */ +static inline void radix_tree_replace_slot(void **pslot, void *item) +{ + BUG_ON(radix_tree_is_indirect_ptr(item)); + rcu_assign_pointer(*pslot, item); +} + +int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); +void *radix_tree_lookup(struct radix_tree_root *, unsigned long); +void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *radix_tree_delete(struct radix_tree_root *, unsigned long); +unsigned int +radix_tree_gang_lookup(struct radix_tree_root *root, void **results, + unsigned long first_index, unsigned int max_items); +unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, + void ***results, unsigned long *indices, + unsigned long first_index, unsigned int max_items); +unsigned long radix_tree_next_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); +unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); +int radix_tree_preload(gfp_t gfp_mask); +void radix_tree_init(void); +void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +void *radix_tree_tag_clear(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +int radix_tree_tag_get(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); +unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, + unsigned long *first_indexp, unsigned long last_index, + unsigned long nr_to_tag, + unsigned int fromtag, unsigned int totag); +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); + +static inline void radix_tree_preload_end(void) +{ + preempt_enable(); +} + +/** + * struct radix_tree_iter - radix tree iterator state + * + * @index: index of current slot + * @next_index: next-to-last index for this chunk + * @tags: bit-mask for tag-iterating + * + * This radix tree iterator works in terms of "chunks" of slots. A chunk is a + * subinterval of slots contained within one radix tree leaf node. It is + * described by a pointer to its first slot and a struct radix_tree_iter + * which holds the chunk's position in the tree and its size. For tagged + * iteration radix_tree_iter also holds the slots' bit-mask for one chosen + * radix tree tag. + */ +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; +}; + +#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ +#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ +#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ + +/** + * radix_tree_iter_init - initialize radix tree iterator + * + * @iter: pointer to iterator state + * @start: iteration starting index + * Returns: NULL + */ +static __always_inline void ** +radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) +{ + /* + * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it + * in the case of a successful tagged chunk lookup. If the lookup was + * unsuccessful or non-tagged then nobody cares about ->tags. + * + * Set index to zero to bypass next_index overflow protection. + * See the comment in radix_tree_next_chunk() for details. + */ + iter->index = 0; + iter->next_index = start; + return NULL; +} + +/** + * radix_tree_next_chunk - find next chunk of slots for iteration + * + * @root: radix tree root + * @iter: iterator state + * @flags: RADIX_TREE_ITER_* flags and tag index + * Returns: pointer to chunk first slot, or NULL if there no more left + * + * This function looks up the next chunk in the radix tree starting from + * @iter->next_index. It returns a pointer to the chunk's first slot. + * Also it fills @iter with data about chunk: position in the tree (index), + * its end (next_index), and constructs a bit mask for tagged iterating (tags). + */ +void **radix_tree_next_chunk(struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned flags); + +/** + * radix_tree_chunk_size - get current chunk size + * + * @iter: pointer to radix tree iterator + * Returns: current chunk size + */ +static __always_inline unsigned +radix_tree_chunk_size(struct radix_tree_iter *iter) +{ + return iter->next_index - iter->index; +} + +/** + * radix_tree_next_slot - find next slot in chunk + * + * @slot: pointer to current slot + * @iter: pointer to interator state + * @flags: RADIX_TREE_ITER_*, should be constant + * Returns: pointer to next slot, or NULL if there no more left + * + * This function updates @iter->index in the case of a successful lookup. + * For tagged lookup it also eats @iter->tags. + */ +static __always_inline void ** +radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) +{ + if (flags & RADIX_TREE_ITER_TAGGED) { + iter->tags >>= 1; + if (likely(iter->tags & 1ul)) { + iter->index++; + return slot + 1; + } + if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { + unsigned offset = __ffs(iter->tags); + + iter->tags >>= offset; + iter->index += offset + 1; + return slot + offset + 1; + } + } else { + unsigned size = radix_tree_chunk_size(iter) - 1; + + while (size--) { + slot++; + iter->index++; + if (likely(*slot)) + return slot; + if (flags & RADIX_TREE_ITER_CONTIG) { + /* forbid switching to the next chunk */ + iter->next_index = 0; + break; + } + } + } + return NULL; +} + +/** + * radix_tree_for_each_chunk - iterate over chunks + * + * @slot: the void** variable for pointer to chunk first slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * @flags: RADIX_TREE_ITER_* and tag index + * + * Locks can be released and reacquired between iterations. + */ +#define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + (slot = radix_tree_next_chunk(root, iter, flags)) ;) + +/** + * radix_tree_for_each_chunk_slot - iterate over slots in one chunk + * + * @slot: the void** variable, at the beginning points to chunk first slot + * @iter: the struct radix_tree_iter pointer + * @flags: RADIX_TREE_ITER_*, should be constant + * + * This macro is designed to be nested inside radix_tree_for_each_chunk(). + * @slot points to the radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_chunk_slot(slot, iter, flags) \ + for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) + +/** + * radix_tree_for_each_slot - iterate over non-empty slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_slot(slot, root, iter, start) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ + slot = radix_tree_next_slot(slot, iter, 0)) + +/** + * radix_tree_for_each_contig - iterate over contiguous slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_contig(slot, root, iter, start) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, \ + RADIX_TREE_ITER_CONTIG)) ; \ + slot = radix_tree_next_slot(slot, iter, \ + RADIX_TREE_ITER_CONTIG)) + +/** + * radix_tree_for_each_tagged - iterate over tagged slots + * + * @slot: the void** variable for pointer to slot + * @root: the struct radix_tree_root pointer + * @iter: the struct radix_tree_iter pointer + * @start: iteration starting index + * @tag: tag index + * + * @slot points to radix tree slot, @iter->index contains its index. + */ +#define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ + for (slot = radix_tree_iter_init(iter, start) ; \ + slot || (slot = radix_tree_next_chunk(root, iter, \ + RADIX_TREE_ITER_TAGGED | tag)) ; \ + slot = radix_tree_next_slot(slot, iter, \ + RADIX_TREE_ITER_TAGGED)) + +#endif /* _LINUX_RADIX_TREE_H */ diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild new file mode 100644 index 00000000..2415a64c --- /dev/null +++ b/include/linux/raid/Kbuild @@ -0,0 +1,2 @@ +header-y += md_p.h +header-y += md_u.h diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h new file mode 100644 index 00000000..8c0a3adc --- /dev/null +++ b/include/linux/raid/md_p.h @@ -0,0 +1,290 @@ +/* + md_p.h : physical layout of Linux RAID devices + Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _MD_P_H +#define _MD_P_H + +#include <linux/types.h> + +/* + * RAID superblock. + * + * The RAID superblock maintains some statistics on each RAID configuration. + * Each real device in the RAID set contains it near the end of the device. + * Some of the ideas are copied from the ext2fs implementation. + * + * We currently use 4096 bytes as follows: + * + * word offset function + * + * 0 - 31 Constant generic RAID device information. + * 32 - 63 Generic state information. + * 64 - 127 Personality specific information. + * 128 - 511 12 32-words descriptors of the disks in the raid set. + * 512 - 911 Reserved. + * 912 - 1023 Disk specific descriptor. + */ + +/* + * If x is the real device size in bytes, we return an apparent size of: + * + * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES + * + * and place the 4kB superblock at offset y. + */ +#define MD_RESERVED_BYTES (64 * 1024) +#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512) + +#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS) + +#define MD_SB_BYTES 4096 +#define MD_SB_WORDS (MD_SB_BYTES / 4) +#define MD_SB_SECTORS (MD_SB_BYTES / 512) + +/* + * The following are counted in 32-bit words + */ +#define MD_SB_GENERIC_OFFSET 0 +#define MD_SB_PERSONALITY_OFFSET 64 +#define MD_SB_DISKS_OFFSET 128 +#define MD_SB_DESCRIPTOR_OFFSET 992 + +#define MD_SB_GENERIC_CONSTANT_WORDS 32 +#define MD_SB_GENERIC_STATE_WORDS 32 +#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS) +#define MD_SB_PERSONALITY_WORDS 64 +#define MD_SB_DESCRIPTOR_WORDS 32 +#define MD_SB_DISKS 27 +#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS) +#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS) +#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS) + +/* + * Device "operational" state bits + */ +#define MD_DISK_FAULTY 0 /* disk is faulty / operational */ +#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */ +#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */ +#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */ + +#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config. + * read requests will only be sent here in + * dire need + */ + +typedef struct mdp_device_descriptor_s { + __u32 number; /* 0 Device number in the entire set */ + __u32 major; /* 1 Device major number */ + __u32 minor; /* 2 Device minor number */ + __u32 raid_disk; /* 3 The role of the device in the raid set */ + __u32 state; /* 4 Operational state */ + __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5]; +} mdp_disk_t; + +#define MD_SB_MAGIC 0xa92b4efc + +/* + * Superblock state bits + */ +#define MD_SB_CLEAN 0 +#define MD_SB_ERRORS 1 + +#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ + +/* + * Notes: + * - if an array is being reshaped (restriped) in order to change the + * the number of active devices in the array, 'raid_disks' will be + * the larger of the old and new numbers. 'delta_disks' will + * be the "new - old". So if +ve, raid_disks is the new value, and + * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the + * old value and "raid_disks+delta_disks" is the new (smaller) value. + */ + + +typedef struct mdp_superblock_s { + /* + * Constant generic information + */ + __u32 md_magic; /* 0 MD identifier */ + __u32 major_version; /* 1 major version to which the set conforms */ + __u32 minor_version; /* 2 minor version ... */ + __u32 patch_version; /* 3 patchlevel version ... */ + __u32 gvalid_words; /* 4 Number of used words in this section */ + __u32 set_uuid0; /* 5 Raid set identifier */ + __u32 ctime; /* 6 Creation time */ + __u32 level; /* 7 Raid personality */ + __u32 size; /* 8 Apparent size of each individual disk */ + __u32 nr_disks; /* 9 total disks in the raid set */ + __u32 raid_disks; /* 10 disks in a fully functional raid set */ + __u32 md_minor; /* 11 preferred MD minor device number */ + __u32 not_persistent; /* 12 does it have a persistent superblock */ + __u32 set_uuid1; /* 13 Raid set identifier #2 */ + __u32 set_uuid2; /* 14 Raid set identifier #3 */ + __u32 set_uuid3; /* 15 Raid set identifier #4 */ + __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16]; + + /* + * Generic state information + */ + __u32 utime; /* 0 Superblock update time */ + __u32 state; /* 1 State bits (clean, ...) */ + __u32 active_disks; /* 2 Number of currently active disks */ + __u32 working_disks; /* 3 Number of working disks */ + __u32 failed_disks; /* 4 Number of failed disks */ + __u32 spare_disks; /* 5 Number of spare disks */ + __u32 sb_csum; /* 6 checksum of the whole superblock */ +#ifdef __BIG_ENDIAN + __u32 events_hi; /* 7 high-order of superblock update count */ + __u32 events_lo; /* 8 low-order of superblock update count */ + __u32 cp_events_hi; /* 9 high-order of checkpoint update count */ + __u32 cp_events_lo; /* 10 low-order of checkpoint update count */ +#else + __u32 events_lo; /* 7 low-order of superblock update count */ + __u32 events_hi; /* 8 high-order of superblock update count */ + __u32 cp_events_lo; /* 9 low-order of checkpoint update count */ + __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ +#endif + __u32 recovery_cp; /* 11 recovery checkpoint sector count */ + /* There are only valid for minor_version > 90 */ + __u64 reshape_position; /* 12,13 next address in array-space for reshape */ + __u32 new_level; /* 14 new level we are reshaping to */ + __u32 delta_disks; /* 15 change in number of raid_disks */ + __u32 new_layout; /* 16 new layout */ + __u32 new_chunk; /* 17 new chunk size (bytes) */ + __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18]; + + /* + * Personality information + */ + __u32 layout; /* 0 the array's physical layout */ + __u32 chunk_size; /* 1 chunk size in bytes */ + __u32 root_pv; /* 2 LV root PV */ + __u32 root_block; /* 3 LV root block */ + __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4]; + + /* + * Disks information + */ + mdp_disk_t disks[MD_SB_DISKS]; + + /* + * Reserved + */ + __u32 reserved[MD_SB_RESERVED_WORDS]; + + /* + * Active descriptor + */ + mdp_disk_t this_disk; + +} mdp_super_t; + +static inline __u64 md_event(mdp_super_t *sb) { + __u64 ev = sb->events_hi; + return (ev<<32)| sb->events_lo; +} + +#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1) + +/* + * The version-1 superblock : + * All numeric fields are little-endian. + * + * total size: 256 bytes plus 2 per device. + * 1K allows 384 devices. + */ +struct mdp_superblock_1 { + /* constant array information - 128 bytes */ + __le32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */ + __le32 major_version; /* 1 */ + __le32 feature_map; /* bit 0 set if 'bitmap_offset' is meaningful */ + __le32 pad0; /* always set to 0 when writing */ + + __u8 set_uuid[16]; /* user-space generated. */ + char set_name[32]; /* set and interpreted by user-space */ + + __le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/ + __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */ + __le32 layout; /* only for raid5 and raid10 currently */ + __le64 size; /* used size of component devices, in 512byte sectors */ + + __le32 chunksize; /* in 512byte sectors */ + __le32 raid_disks; + __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts + * NOTE: signed, so bitmap can be before superblock + * only meaningful of feature_map[0] is set. + */ + + /* These are only valid with feature bit '4' */ + __le32 new_level; /* new level we are reshaping to */ + __le64 reshape_position; /* next address in array-space for reshape */ + __le32 delta_disks; /* change in number of raid_disks */ + __le32 new_layout; /* new layout */ + __le32 new_chunk; /* new chunk size (512byte sectors) */ + __u8 pad1[128-124]; /* set to 0 when written */ + + /* constant this-device information - 64 bytes */ + __le64 data_offset; /* sector start of data, often 0 */ + __le64 data_size; /* sectors in this device that can be used for data */ + __le64 super_offset; /* sector start of this superblock */ + __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ + __le32 dev_number; /* permanent identifier of this device - not role in raid */ + __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ + __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ + __u8 devflags; /* per-device flags. Only one defined...*/ +#define WriteMostly1 1 /* mask for writemostly flag in above */ + /* Bad block log. If there are any bad blocks the feature flag is set. + * If offset and size are non-zero, that space is reserved and available + */ + __u8 bblog_shift; /* shift from sectors to block size */ + __le16 bblog_size; /* number of sectors reserved for list */ + __le32 bblog_offset; /* sector offset from superblock to bblog, + * signed - not unsigned */ + + /* array state information - 64 bytes */ + __le64 utime; /* 40 bits second, 24 bits microseconds */ + __le64 events; /* incremented when superblock updated */ + __le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */ + __le32 sb_csum; /* checksum up to devs[max_dev] */ + __le32 max_dev; /* size of devs[] array to consider */ + __u8 pad3[64-32]; /* set to 0 when writing */ + + /* device state information. Indexed by dev_number. + * 2 bytes per device + * Note there are no per-device state flags. State information is rolled + * into the 'roles' value. If a device is spare or faulty, then it doesn't + * have a meaningful role. + */ + __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */ +}; + +/* feature_map bits */ +#define MD_FEATURE_BITMAP_OFFSET 1 +#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and + * must be honoured + */ +#define MD_FEATURE_RESHAPE_ACTIVE 4 +#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */ +#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an + * active device with same 'role'. + * 'recovery_offset' is also set. + */ +#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ + |MD_FEATURE_RECOVERY_OFFSET \ + |MD_FEATURE_RESHAPE_ACTIVE \ + |MD_FEATURE_BAD_BLOCKS \ + |MD_FEATURE_REPLACEMENT) + +#endif diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h new file mode 100644 index 00000000..fb1abb33 --- /dev/null +++ b/include/linux/raid/md_u.h @@ -0,0 +1,159 @@ +/* + md_u.h : user <=> kernel API between Linux raidtools and RAID drivers + Copyright (C) 1998 Ingo Molnar + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + You should have received a copy of the GNU General Public License + (for example /usr/src/linux/COPYING); if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef _MD_U_H +#define _MD_U_H + +/* + * Different major versions are not compatible. + * Different minor versions are only downward compatible. + * Different patchlevel versions are downward and upward compatible. + */ +#define MD_MAJOR_VERSION 0 +#define MD_MINOR_VERSION 90 +/* + * MD_PATCHLEVEL_VERSION indicates kernel functionality. + * >=1 means different superblock formats are selectable using SET_ARRAY_INFO + * and major_version/minor_version accordingly + * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT + * in the super status byte + * >=3 means that bitmap superblock version 4 is supported, which uses + * little-ending representation rather than host-endian + */ +#define MD_PATCHLEVEL_VERSION 3 + +/* ioctls */ + +/* status */ +#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t) +#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t) +#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t) +#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13) +#define RAID_AUTORUN _IO (MD_MAJOR, 0x14) +#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t) + +/* configuration */ +#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20) +#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t) +#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22) +#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t) +#define SET_DISK_INFO _IO (MD_MAJOR, 0x24) +#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25) +#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26) +#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27) +#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28) +#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29) +#define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a) +#define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int) + +/* usage */ +#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t) +/* 0x31 was START_ARRAY */ +#define STOP_ARRAY _IO (MD_MAJOR, 0x32) +#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) +#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) + +/* 63 partitions with the alternate major number (mdp) */ +#define MdpMinorShift 6 +#ifdef __KERNEL__ +extern int mdp_major; +#endif + +typedef struct mdu_version_s { + int major; + int minor; + int patchlevel; +} mdu_version_t; + +typedef struct mdu_array_info_s { + /* + * Generic constant information + */ + int major_version; + int minor_version; + int patch_version; + int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + + /* + * Generic state information + */ + int utime; /* 0 Superblock update time */ + int state; /* 1 State bits (clean, ...) */ + int active_disks; /* 2 Number of currently active disks */ + int working_disks; /* 3 Number of working disks */ + int failed_disks; /* 4 Number of failed disks */ + int spare_disks; /* 5 Number of spare disks */ + + /* + * Personality information + */ + int layout; /* 0 the array's physical layout */ + int chunk_size; /* 1 chunk size in bytes */ + +} mdu_array_info_t; + +/* non-obvious values for 'level' */ +#define LEVEL_MULTIPATH (-4) +#define LEVEL_LINEAR (-1) +#define LEVEL_FAULTY (-5) + +/* we need a value for 'no level specified' and 0 + * means 'raid0', so we need something else. This is + * for internal use only + */ +#define LEVEL_NONE (-1000000) + +typedef struct mdu_disk_info_s { + /* + * configuration/status of one particular disk + */ + int number; + int major; + int minor; + int raid_disk; + int state; + +} mdu_disk_info_t; + +typedef struct mdu_start_info_s { + /* + * configuration/status of one particular disk + */ + int major; + int minor; + int raid_disk; + int state; + +} mdu_start_info_t; + +typedef struct mdu_bitmap_file_s +{ + char pathname[4096]; +} mdu_bitmap_file_t; + +typedef struct mdu_param_s +{ + int personality; /* 1,2,3,4 */ + int chunk_size; /* in bytes */ + int max_fault; /* unused for now */ +} mdu_param_t; + +#endif + diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h new file mode 100644 index 00000000..53272e98 --- /dev/null +++ b/include/linux/raid/pq.h @@ -0,0 +1,153 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2003 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Boston MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +#ifndef LINUX_RAID_RAID6_H +#define LINUX_RAID_RAID6_H + +#ifdef __KERNEL__ + +/* Set to 1 to use kernel-wide empty_zero_page */ +#define RAID6_USE_EMPTY_ZERO_PAGE 0 +#include <linux/blkdev.h> + +/* We need a pre-zeroed page... if we don't want to use the kernel-provided + one define it here */ +#if RAID6_USE_EMPTY_ZERO_PAGE +# define raid6_empty_zero_page empty_zero_page +#else +extern const char raid6_empty_zero_page[PAGE_SIZE]; +#endif + +#else /* ! __KERNEL__ */ +/* Used for testing in user space */ + +#include <errno.h> +#include <inttypes.h> +#include <limits.h> +#include <stddef.h> +#include <sys/mman.h> +#include <sys/types.h> + +/* Not standard, but glibc defines it */ +#define BITS_PER_LONG __WORDSIZE + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +#ifndef PAGE_SIZE +# define PAGE_SIZE 4096 +#endif +extern const char raid6_empty_zero_page[PAGE_SIZE]; + +#define __init +#define __exit +#define __attribute_const__ __attribute__((const)) +#define noinline __attribute__((noinline)) + +#define preempt_enable() +#define preempt_disable() +#define cpu_has_feature(x) 1 +#define enable_kernel_altivec() +#define disable_kernel_altivec() + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define MODULE_LICENSE(licence) +#define MODULE_DESCRIPTION(desc) +#define subsys_initcall(x) +#define module_exit(x) +#endif /* __KERNEL__ */ + +/* Routine choices */ +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + int (*valid)(void); /* Returns 1 if this routine set is usable */ + const char *name; /* Name of this routine set */ + int prefer; /* Has special performance attribute */ +}; + +/* Selected algorithm */ +extern struct raid6_calls raid6_call; + +/* Various routine sets */ +extern const struct raid6_calls raid6_intx1; +extern const struct raid6_calls raid6_intx2; +extern const struct raid6_calls raid6_intx4; +extern const struct raid6_calls raid6_intx8; +extern const struct raid6_calls raid6_intx16; +extern const struct raid6_calls raid6_intx32; +extern const struct raid6_calls raid6_mmxx1; +extern const struct raid6_calls raid6_mmxx2; +extern const struct raid6_calls raid6_sse1x1; +extern const struct raid6_calls raid6_sse1x2; +extern const struct raid6_calls raid6_sse2x1; +extern const struct raid6_calls raid6_sse2x2; +extern const struct raid6_calls raid6_sse2x4; +extern const struct raid6_calls raid6_altivec1; +extern const struct raid6_calls raid6_altivec2; +extern const struct raid6_calls raid6_altivec4; +extern const struct raid6_calls raid6_altivec8; + +/* Algorithm list */ +extern const struct raid6_calls * const raid6_algos[]; +int raid6_select_algo(void); + +/* Return values from chk_syndrome */ +#define RAID6_OK 0 +#define RAID6_P_BAD 1 +#define RAID6_Q_BAD 2 +#define RAID6_PQ_BAD 3 + +/* Galois field tables */ +extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); +extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); +extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); +extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); + +/* Recovery routines */ +void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); +void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); +void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); + +/* Some definitions to allow code to be compiled for testing in userspace */ +#ifndef __KERNEL__ + +# define jiffies raid6_jiffies() +# define printk printf +# define GFP_KERNEL 0 +# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ + PROT_READ|PROT_WRITE, \ + MAP_PRIVATE|MAP_ANONYMOUS,\ + 0, 0)) +# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y)) + +static inline void cpu_relax(void) +{ + /* Nothing */ +} + +#undef HZ +#define HZ 1000 +static inline uint32_t raid6_jiffies(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec*1000 + tv.tv_usec/1000; +} + +#endif /* ! __KERNEL__ */ + +#endif /* LINUX_RAID_RAID6_H */ diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h new file mode 100644 index 00000000..5a210959 --- /dev/null +++ b/include/linux/raid/xor.h @@ -0,0 +1,22 @@ +#ifndef _XOR_H +#define _XOR_H + +#define MAX_XOR_BLOCKS 4 + +extern void xor_blocks(unsigned int count, unsigned int bytes, + void *dest, void **srcs); + +struct xor_block_template { + struct xor_block_template *next; + const char *name; + int speed; + void (*do_2)(unsigned long, unsigned long *, unsigned long *); + void (*do_3)(unsigned long, unsigned long *, unsigned long *, + unsigned long *); + void (*do_4)(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *); + void (*do_5)(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *, unsigned long *); +}; + +#endif diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h new file mode 100644 index 00000000..31e1ff69 --- /dev/null +++ b/include/linux/raid_class.h @@ -0,0 +1,83 @@ +/* + * raid_class.h - a generic raid visualisation class + * + * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> + * + * This file is licensed under GPLv2 + */ +#include <linux/transport_class.h> + +struct raid_template { + struct transport_container raid_attrs; +}; + +struct raid_function_template { + void *cookie; + int (*is_raid)(struct device *); + void (*get_resync)(struct device *); + void (*get_state)(struct device *); +}; + +enum raid_state { + RAID_STATE_UNKNOWN = 0, + RAID_STATE_ACTIVE, + RAID_STATE_DEGRADED, + RAID_STATE_RESYNCING, + RAID_STATE_OFFLINE, +}; + +enum raid_level { + RAID_LEVEL_UNKNOWN = 0, + RAID_LEVEL_LINEAR, + RAID_LEVEL_0, + RAID_LEVEL_1, + RAID_LEVEL_10, + RAID_LEVEL_1E, + RAID_LEVEL_3, + RAID_LEVEL_4, + RAID_LEVEL_5, + RAID_LEVEL_50, + RAID_LEVEL_6, +}; + +struct raid_data { + struct list_head component_list; + int component_count; + enum raid_level level; + enum raid_state state; + int resync; +}; + +/* resync complete goes from 0 to this */ +#define RAID_MAX_RESYNC (10000) + +#define DEFINE_RAID_ATTRIBUTE(type, attr) \ +static inline void \ +raid_set_##attr(struct raid_template *r, struct device *dev, type value) { \ + struct device *device = \ + attribute_container_find_class_device(&r->raid_attrs.ac, dev);\ + struct raid_data *rd; \ + BUG_ON(!device); \ + rd = dev_get_drvdata(device); \ + rd->attr = value; \ +} \ +static inline type \ +raid_get_##attr(struct raid_template *r, struct device *dev) { \ + struct device *device = \ + attribute_container_find_class_device(&r->raid_attrs.ac, dev);\ + struct raid_data *rd; \ + BUG_ON(!device); \ + rd = dev_get_drvdata(device); \ + return rd->attr; \ +} + +DEFINE_RAID_ATTRIBUTE(enum raid_level, level) +DEFINE_RAID_ATTRIBUTE(int, resync) +DEFINE_RAID_ATTRIBUTE(enum raid_state, state) + +struct raid_template *raid_class_attach(struct raid_function_template *); +void raid_class_release(struct raid_template *); + +int __must_check raid_component_add(struct raid_template *, struct device *, + struct device *); + diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h new file mode 100644 index 00000000..5bf5500d --- /dev/null +++ b/include/linux/ramfs.h @@ -0,0 +1,26 @@ +#ifndef _LINUX_RAMFS_H +#define _LINUX_RAMFS_H + +struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, + umode_t mode, dev_t dev); +extern struct dentry *ramfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data); + +#ifndef CONFIG_MMU +extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); +extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); + +extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); +#endif + +extern const struct file_operations ramfs_file_operations; +extern const struct vm_operations_struct generic_file_vm_ops; +extern int __init init_rootfs(void); + +int ramfs_fill_super(struct super_block *sb, void *data, int silent); + +#endif diff --git a/include/linux/ramoops.h b/include/linux/ramoops.h new file mode 100644 index 00000000..484fef81 --- /dev/null +++ b/include/linux/ramoops.h @@ -0,0 +1,17 @@ +#ifndef __RAMOOPS_H +#define __RAMOOPS_H + +/* + * Ramoops platform data + * @mem_size memory size for ramoops + * @mem_address physical memory address to contain ramoops + */ + +struct ramoops_platform_data { + unsigned long mem_size; + unsigned long mem_address; + unsigned long record_size; + int dump_oops; +}; + +#endif diff --git a/include/linux/random.h b/include/linux/random.h new file mode 100644 index 00000000..8f74538c --- /dev/null +++ b/include/linux/random.h @@ -0,0 +1,109 @@ +/* + * include/linux/random.h + * + * Include file for the random number generator. + */ + +#ifndef _LINUX_RANDOM_H +#define _LINUX_RANDOM_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <linux/irqnr.h> + +/* ioctl()'s for the random number generator */ + +/* Get the entropy count. */ +#define RNDGETENTCNT _IOR( 'R', 0x00, int ) + +/* Add to (or subtract from) the entropy count. (Superuser only.) */ +#define RNDADDTOENTCNT _IOW( 'R', 0x01, int ) + +/* Get the contents of the entropy pool. (Superuser only.) */ +#define RNDGETPOOL _IOR( 'R', 0x02, int [2] ) + +/* + * Write bytes into the entropy pool and add to the entropy count. + * (Superuser only.) + */ +#define RNDADDENTROPY _IOW( 'R', 0x03, int [2] ) + +/* Clear entropy count to 0. (Superuser only.) */ +#define RNDZAPENTCNT _IO( 'R', 0x04 ) + +/* Clear the entropy pool and associated counters. (Superuser only.) */ +#define RNDCLEARPOOL _IO( 'R', 0x06 ) + +struct rand_pool_info { + int entropy_count; + int buf_size; + __u32 buf[0]; +}; + +struct rnd_state { + __u32 s1, s2, s3; +}; + +/* Exported functions */ + +#ifdef __KERNEL__ + +extern void rand_initialize_irq(int irq); + +extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value); +extern void add_interrupt_randomness(int irq); + +extern void get_random_bytes(void *buf, int nbytes); +void generate_random_uuid(unsigned char uuid_out[16]); + +#ifndef MODULE +extern const struct file_operations random_fops, urandom_fops; +#endif + +unsigned int get_random_int(void); +unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); + +u32 random32(void); +void srandom32(u32 seed); + +u32 prandom32(struct rnd_state *); + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom32_seed - set seed for prandom32(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom32_seed(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 1); + state->s2 = __seed(i, 7); + state->s3 = __seed(i, 15); +} + +#ifdef CONFIG_ARCH_RANDOM +# include <asm/archrandom.h> +#else +static inline int arch_get_random_long(unsigned long *v) +{ + return 0; +} +static inline int arch_get_random_int(unsigned int *v) +{ + return 0; +} +#endif + +#endif /* __KERNEL___ */ + +#endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/range.h b/include/linux/range.h new file mode 100644 index 00000000..bd184a5d --- /dev/null +++ b/include/linux/range.h @@ -0,0 +1,30 @@ +#ifndef _LINUX_RANGE_H +#define _LINUX_RANGE_H + +struct range { + u64 start; + u64 end; +}; + +int add_range(struct range *range, int az, int nr_range, + u64 start, u64 end); + + +int add_range_with_merge(struct range *range, int az, int nr_range, + u64 start, u64 end); + +void subtract_range(struct range *range, int az, u64 start, u64 end); + +int clean_sort_range(struct range *range, int az); + +void sort_range(struct range *range, int nr_range); + +#define MAX_RESOURCE ((resource_size_t)~0) +static inline resource_size_t cap_resource(u64 val) +{ + if (val > MAX_RESOURCE) + return MAX_RESOURCE; + + return val; +} +#endif diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h new file mode 100644 index 00000000..e11ccb4c --- /dev/null +++ b/include/linux/ratelimit.h @@ -0,0 +1,84 @@ +#ifndef _LINUX_RATELIMIT_H +#define _LINUX_RATELIMIT_H + +#include <linux/param.h> +#include <linux/spinlock.h> + +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +struct ratelimit_state { + raw_spinlock_t lock; /* protect the state */ + + int interval; + int burst; + int printed; + int missed; + unsigned long begin; +}; + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + } + +static inline void ratelimit_state_init(struct ratelimit_state *rs, + int interval, int burst) +{ + raw_spin_lock_init(&rs->lock); + rs->interval = interval; + rs->burst = burst; + rs->printed = 0; + rs->missed = 0; + rs->begin = 0; +} + +extern struct ratelimit_state printk_ratelimit_state; + +extern int ___ratelimit(struct ratelimit_state *rs, const char *func); +#define __ratelimit(state) ___ratelimit(state, __func__) + +#ifdef CONFIG_PRINTK + +#define WARN_ON_RATELIMIT(condition, state) \ + WARN_ON((condition) && __ratelimit(state)) + +#define __WARN_RATELIMIT(condition, state, format...) \ +({ \ + int rtn = 0; \ + if (unlikely(__ratelimit(state))) \ + rtn = WARN(condition, format); \ + rtn; \ +}) + +#define WARN_RATELIMIT(condition, format...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + __WARN_RATELIMIT(condition, &_rs, format); \ +}) + +#else + +#define WARN_ON_RATELIMIT(condition, state) \ + WARN_ON(condition) + +#define __WARN_RATELIMIT(condition, state, format...) \ +({ \ + int rtn = WARN(condition, format); \ + rtn; \ +}) + +#define WARN_RATELIMIT(condition, format...) \ +({ \ + int rtn = WARN(condition, format); \ + rtn; \ +}) + +#endif + +#endif /* _LINUX_RATELIMIT_H */ diff --git a/include/linux/rational.h b/include/linux/rational.h new file mode 100644 index 00000000..4f532fcd --- /dev/null +++ b/include/linux/rational.h @@ -0,0 +1,19 @@ +/* + * rational fractions + * + * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> + * + * helper functions when coping with rational numbers, + * e.g. when calculating optimum numerator/denominator pairs for + * pll configuration taking into account restricted register size + */ + +#ifndef _LINUX_RATIONAL_H +#define _LINUX_RATIONAL_H + +void rational_best_approximation( + unsigned long given_numerator, unsigned long given_denominator, + unsigned long max_numerator, unsigned long max_denominator, + unsigned long *best_numerator, unsigned long *best_denominator); + +#endif /* _LINUX_RATIONAL_H */ diff --git a/include/linux/raw.h b/include/linux/raw.h new file mode 100644 index 00000000..62d543e7 --- /dev/null +++ b/include/linux/raw.h @@ -0,0 +1,18 @@ +#ifndef __LINUX_RAW_H +#define __LINUX_RAW_H + +#include <linux/types.h> + +#define RAW_SETBIND _IO( 0xac, 0 ) +#define RAW_GETBIND _IO( 0xac, 1 ) + +struct raw_config_request +{ + int raw_minor; + __u64 block_major; + __u64 block_minor; +}; + +#define MAX_RAW_MINORS CONFIG_MAX_RAW_DEVS + +#endif /* __LINUX_RAW_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h new file mode 100644 index 00000000..033b507b --- /dev/null +++ b/include/linux/rbtree.h @@ -0,0 +1,177 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli <andrea@suse.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + Some example of insert and search follows here. The search is a plain + normal search over an ordered tree. The insert instead must be implemented + in two steps: First, the code must insert the element in order as a red leaf + in the tree, and then the support library function rb_insert_color() must + be called. Such function will do the not trivial work to rebalance the + rbtree, if necessary. + +----------------------------------------------------------------------- +static inline struct page * rb_search_page_cache(struct inode * inode, + unsigned long offset) +{ + struct rb_node * n = inode->i_rb_page_cache.rb_node; + struct page * page; + + while (n) + { + page = rb_entry(n, struct page, rb_page_cache); + + if (offset < page->offset) + n = n->rb_left; + else if (offset > page->offset) + n = n->rb_right; + else + return page; + } + return NULL; +} + +static inline struct page * __rb_insert_page_cache(struct inode * inode, + unsigned long offset, + struct rb_node * node) +{ + struct rb_node ** p = &inode->i_rb_page_cache.rb_node; + struct rb_node * parent = NULL; + struct page * page; + + while (*p) + { + parent = *p; + page = rb_entry(parent, struct page, rb_page_cache); + + if (offset < page->offset) + p = &(*p)->rb_left; + else if (offset > page->offset) + p = &(*p)->rb_right; + else + return page; + } + + rb_link_node(node, parent, p); + + return NULL; +} + +static inline struct page * rb_insert_page_cache(struct inode * inode, + unsigned long offset, + struct rb_node * node) +{ + struct page * ret; + if ((ret = __rb_insert_page_cache(inode, offset, node))) + goto out; + rb_insert_color(node, &inode->i_rb_page_cache); + out: + return ret; +} +----------------------------------------------------------------------- +*/ + +#ifndef _LINUX_RBTREE_H +#define _LINUX_RBTREE_H + +#include <linux/kernel.h> +#include <linux/stddef.h> + +struct rb_node +{ + unsigned long rb_parent_color; +#define RB_RED 0 +#define RB_BLACK 1 + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root +{ + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3)) +#define rb_color(r) ((r)->rb_parent_color & 1) +#define rb_is_red(r) (!rb_color(r)) +#define rb_is_black(r) rb_color(r) +#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0) +#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; +} +static inline void rb_set_color(struct rb_node *rb, int color) +{ + rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; +} + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#define RB_EMPTY_NODE(node) (rb_parent(node) == node) +#define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) + +static inline void rb_init_node(struct rb_node *rb) +{ + rb->rb_parent_color = 0; + rb->rb_right = NULL; + rb->rb_left = NULL; + RB_CLEAR_NODE(rb); +} + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + +typedef void (*rb_augment_f)(struct rb_node *node, void *data); + +extern void rb_augment_insert(struct rb_node *node, + rb_augment_f func, void *data); +extern struct rb_node *rb_augment_erase_begin(struct rb_node *node); +extern void rb_augment_erase_end(struct rb_node *node, + rb_augment_f func, void *data); + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, + struct rb_node ** rb_link) +{ + node->rb_parent_color = (unsigned long )parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h new file mode 100644 index 00000000..d0792908 --- /dev/null +++ b/include/linux/rculist.h @@ -0,0 +1,493 @@ +#ifndef _LINUX_RCULIST_H +#define _LINUX_RCULIST_H + +#ifdef __KERNEL__ + +/* + * RCU-protected list version + */ +#include <linux/list.h> +#include <linux/rcupdate.h> + +/* + * Why is there no list_empty_rcu()? Because list_empty() serves this + * purpose. The list_empty() function fetches the RCU-protected pointer + * and compares it to the address of the list head, but neither dereferences + * this pointer itself nor provides this pointer to the caller. Therefore, + * it is not necessary to use rcu_dereference(), so that list_empty() can + * be used anywhere you would want to use a list_empty_rcu(). + */ + +/* + * return the ->next pointer of a list_head in an rcu safe + * way, we must not access it directly + */ +#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next) +{ + new->next = next; + new->prev = prev; + rcu_assign_pointer(list_next_rcu(prev), new); + next->prev = new; +} + +/** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +/** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_tail_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} + +/** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * Note: list_empty() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_del_rcu() + * or list_add_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_rcu() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = LIST_POISON2; +} + +/** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_add_head_rcu() or + * hlist_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_for_each_entry_rcu(). + */ +static inline void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + n->pprev = NULL; + } +} + +/** + * list_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The @old entry will be replaced with the @new entry atomically. + * Note: @old should not be empty. + */ +static inline void list_replace_rcu(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->prev = old->prev; + rcu_assign_pointer(list_next_rcu(new->prev), new); + new->next->prev = new; + old->prev = LIST_POISON2; +} + +/** + * list_splice_init_rcu - splice an RCU-protected list into an existing list. + * @list: the RCU-protected list to splice + * @head: the place in the list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + * + * @head can be RCU-read traversed concurrently with this function. + * + * Note that this function blocks. + * + * Important note: the caller must take whatever action is necessary to + * prevent any other updates to @head. In principle, it is possible + * to modify the list as soon as sync() begins execution. + * If this sort of thing becomes necessary, an alternative version + * based on call_rcu() could be created. But only if -really- + * needed -- there is no shortage of RCU API members. + */ +static inline void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + if (list_empty(list)) + return; + + /* "first" and "last" tracking list, so initialize it. */ + + INIT_LIST_HEAD(list); + + /* + * At this point, the list body still points to the source list. + * Wait for any readers to finish using the list before splicing + * the list body into the new list. Any new readers will see + * an empty list. + */ + + sync(); + + /* + * Readers are finished with the source list, so perform splice. + * The order is important if the new list is global and accessible + * to concurrent RCU readers. Note that RCU readers are not + * permitted to traverse the prev pointers without excluding + * this function. + */ + + last->next = at; + rcu_assign_pointer(list_next_rcu(head), first); + first->prev = head; + at->prev = last; +} + +/** + * list_entry_rcu - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_entry_rcu(ptr, type, member) \ + ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ + container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ + }) + +/** + * list_first_entry_rcu - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_first_entry_rcu(ptr, type, member) \ + list_entry_rcu((ptr)->next, type, member) + +/** + * list_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) + + +/** + * list_for_each_continue_rcu + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * Iterate over an rcu-protected list, continuing after current point. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_continue_rcu(pos, head) \ + for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ + (pos) != (head); \ + (pos) = rcu_dereference_raw(list_next_rcu(pos))) + +/** + * list_for_each_entry_continue_rcu - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue_rcu(pos, head, member) \ + for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) + +/** + * hlist_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry(). + */ +static inline void hlist_del_rcu(struct hlist_node *n) +{ + __hlist_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The @old entry will be replaced with the @new entry atomically. + */ +static inline void hlist_replace_rcu(struct hlist_node *old, + struct hlist_node *new) +{ + struct hlist_node *next = old->next; + + new->next = next; + new->pprev = old->pprev; + rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); + if (next) + new->next->pprev = &new->next; + old->pprev = LIST_POISON2; +} + +/* + * return the first or the next element in an RCU protected hlist + */ +#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) +#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) +#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) + +/** + * hlist_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_head_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + rcu_assign_pointer(hlist_first_rcu(h), n); + if (first) + first->pprev = &n->next; +} + +/** + * hlist_add_before_rcu + * @n: the new element to add to the hash list. + * @next: the existing element to add the new element before. + * + * Description: + * Adds the specified element to the specified hlist + * before the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_before_rcu(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + rcu_assign_pointer(hlist_pprev_rcu(n), n); + next->pprev = &n->next; +} + +/** + * hlist_add_after_rcu + * @prev: the existing element to add the new element after. + * @n: the new element to add to the hash list. + * + * Description: + * Adds the specified element to the specified hlist + * after the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_after_rcu(struct hlist_node *prev, + struct hlist_node *n) +{ + n->next = prev->next; + n->pprev = &prev->next; + rcu_assign_pointer(hlist_next_rcu(prev), n); + if (n->next) + n->next->pprev = &n->next; +} + +#define __hlist_for_each_rcu(pos, head) \ + for (pos = rcu_dereference(hlist_first_rcu(head)); \ + pos; \ + pos = rcu_dereference(hlist_next_rcu(pos))) + +/** + * hlist_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_raw(hlist_next_rcu(pos))) + +/** + * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ + for (pos = rcu_dereference_bh((head)->first); \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_bh(pos->next)) + +/** + * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ + for (pos = rcu_dereference((pos)->next); \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference(pos->next)) + +/** + * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ + for (pos = rcu_dereference_bh((pos)->next); \ + pos && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_bh(pos->next)) + + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h new file mode 100644 index 00000000..cf1244fb --- /dev/null +++ b/include/linux/rculist_bl.h @@ -0,0 +1,128 @@ +#ifndef _LINUX_RCULIST_BL_H +#define _LINUX_RCULIST_BL_H + +/* + * RCU-protected bl list version. See include/linux/list_bl.h. + */ +#include <linux/list_bl.h> +#include <linux/rcupdate.h> + +static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, + struct hlist_bl_node *n) +{ + LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); + LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != + LIST_BL_LOCKMASK); + rcu_assign_pointer(h->first, + (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); +} + +static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) +{ + return (struct hlist_bl_node *) + ((unsigned long)rcu_dereference(h->first) & ~LIST_BL_LOCKMASK); +} + +/** + * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_bl_unhashed() on the node returns true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_bl_add_head_rcu() or + * hlist_bl_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_bl_for_each_entry_rcu(). + */ +static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n) +{ + if (!hlist_bl_unhashed(n)) { + __hlist_bl_del(n); + n->pprev = NULL; + } +} + +/** + * hlist_bl_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_bl_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_bl_add_head_rcu() + * or hlist_bl_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_bl_for_each_entry(). + */ +static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) +{ + __hlist_bl_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_bl_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_bl, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_bl_add_head_rcu() + * or hlist_bl_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, + struct hlist_bl_head *h) +{ + struct hlist_bl_node *first; + + /* don't need hlist_bl_first_rcu because we're under lock */ + first = hlist_bl_first(h); + + n->next = first; + if (first) + first->pprev = &n->next; + n->pprev = &h->first; + + /* need _rcu because we can have concurrent lock free readers */ + hlist_bl_set_first_rcu(h, n); +} +/** + * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_bl_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_bl_node within the struct. + * + */ +#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = hlist_bl_first_rcu(head); \ + pos && \ + ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_raw(pos->next)) + +#endif diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h new file mode 100644 index 00000000..2ae13714 --- /dev/null +++ b/include/linux/rculist_nulls.h @@ -0,0 +1,116 @@ +#ifndef _LINUX_RCULIST_NULLS_H +#define _LINUX_RCULIST_NULLS_H + +#ifdef __KERNEL__ + +/* + * RCU-protected list version + */ +#include <linux/list_nulls.h> +#include <linux/rcupdate.h> + +/** + * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_nulls_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_nulls_add_head_rcu() or + * hlist_nulls_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_nulls_for_each_entry_rcu(). + */ +static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) +{ + if (!hlist_nulls_unhashed(n)) { + __hlist_nulls_del(n); + n->pprev = NULL; + } +} + +#define hlist_nulls_first_rcu(head) \ + (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) + +#define hlist_nulls_next_rcu(node) \ + (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) + +/** + * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_nulls_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry(). + */ +static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_nulls_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_nulls, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + rcu_assign_pointer(hlist_nulls_first_rcu(h), n); + if (!is_a_nulls(first)) + first->pprev = &n->next; +} +/** + * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_nulls_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_nulls_node within the struct. + * + */ +#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) + +#endif +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h new file mode 100644 index 00000000..20fb776a --- /dev/null +++ b/include/linux/rcupdate.h @@ -0,0 +1,951 @@ +/* + * Read-Copy Update mechanism for mutual exclusion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2001 + * + * Author: Dipankar Sarma <dipankar@in.ibm.com> + * + * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +#ifndef __LINUX_RCUPDATE_H +#define __LINUX_RCUPDATE_H + +#include <linux/types.h> +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/cpumask.h> +#include <linux/seqlock.h> +#include <linux/lockdep.h> +#include <linux/completion.h> +#include <linux/debugobjects.h> +#include <linux/bug.h> +#include <linux/compiler.h> + +#ifdef CONFIG_RCU_TORTURE_TEST +extern int rcutorture_runnable; /* for sysctl */ +#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +extern void rcutorture_record_test_transition(void); +extern void rcutorture_record_progress(unsigned long vernum); +extern void do_trace_rcu_torture_read(char *rcutorturename, + struct rcu_head *rhp); +#else +static inline void rcutorture_record_test_transition(void) +{ +} +static inline void rcutorture_record_progress(unsigned long vernum) +{ +} +#ifdef CONFIG_RCU_TRACE +extern void do_trace_rcu_torture_read(char *rcutorturename, + struct rcu_head *rhp); +#else +#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#endif +#endif + +#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) +#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) +#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) +#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) + +/* Exported common interfaces */ + +#ifdef CONFIG_PREEMPT_RCU + +/** + * call_rcu() - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all pre-existing RCU read-side + * critical sections have completed. However, the callback function + * might well execute concurrently with RCU read-side critical sections + * that started after call_rcu() was invoked. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + */ +extern void call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head)); + +#else /* #ifdef CONFIG_PREEMPT_RCU */ + +/* In classic RCU, call_rcu() is just call_rcu_sched(). */ +#define call_rcu call_rcu_sched + +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by : + * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. + * OR + * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. + * These may be nested. + */ +extern void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); + +/** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_sched() assumes + * that the read-side critical sections end on enabling of preemption + * or on voluntary preemption. + * RCU read-side critical sections are delimited by : + * - rcu_read_lock_sched() and rcu_read_unlock_sched(), + * OR + * anything that disables preemption. + * These may be nested. + */ +extern void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)); + +extern void synchronize_sched(void); + +#ifdef CONFIG_PREEMPT_RCU + +extern void __rcu_read_lock(void); +extern void __rcu_read_unlock(void); +void synchronize_rcu(void); + +/* + * Defined as a macro as it is a very low level header included from + * areas that don't even know about current. This gives the rcu_read_lock() + * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ +#define rcu_preempt_depth() (current->rcu_read_lock_nesting) + +#else /* #ifdef CONFIG_PREEMPT_RCU */ + +static inline void __rcu_read_lock(void) +{ + preempt_disable(); +} + +static inline void __rcu_read_unlock(void) +{ + preempt_enable(); +} + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline int rcu_preempt_depth(void) +{ + return 0; +} + +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/* Internal to kernel */ +extern void rcu_sched_qs(int cpu); +extern void rcu_bh_qs(int cpu); +extern void rcu_check_callbacks(int cpu, int user); +struct notifier_block; +extern void rcu_idle_enter(void); +extern void rcu_idle_exit(void); +extern void rcu_irq_enter(void); +extern void rcu_irq_exit(void); + +/** + * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers + * @a: Code that RCU needs to pay attention to. + * + * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden + * in the inner idle loop, that is, between the rcu_idle_enter() and + * the rcu_idle_exit() -- RCU will happily ignore any such read-side + * critical sections. However, things like powertop need tracepoints + * in the inner idle loop. + * + * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) + * will tell RCU that it needs to pay attending, invoke its argument + * (in this example, a call to the do_something_with_RCU() function), + * and then tell RCU to go back to ignoring this CPU. It is permissible + * to nest RCU_NONIDLE() wrappers, but the nesting level is currently + * quite limited. If deeper nesting is required, it will be necessary + * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. + * + * This macro may be used from process-level code only. + */ +#define RCU_NONIDLE(a) \ + do { \ + rcu_idle_exit(); \ + do { a; } while (0); \ + rcu_idle_enter(); \ + } while (0) + +/* + * Infrastructure to implement the synchronize_() primitives in + * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. + */ + +typedef void call_rcu_func_t(struct rcu_head *head, + void (*func)(struct rcu_head *head)); +void wait_rcu_gp(call_rcu_func_t crf); + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#include <linux/rcutree.h> +#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) +#include <linux/rcutiny.h> +#else +#error "Unknown RCU implementation specified to kernel configuration" +#endif + +/* + * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic + * initialization and destruction of rcu_head on the stack. rcu_head structures + * allocated dynamically in the heap or defined statically don't need any + * initialization. + */ +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +extern void init_rcu_head_on_stack(struct rcu_head *head); +extern void destroy_rcu_head_on_stack(struct rcu_head *head); +#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void init_rcu_head_on_stack(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) +{ +} +#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ + +#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) +bool rcu_lockdep_current_cpu_online(void); +#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ +static inline bool rcu_lockdep_current_cpu_online(void) +{ + return 1; +} +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +#ifdef CONFIG_PROVE_RCU +extern int rcu_is_cpu_idle(void); +#else /* !CONFIG_PROVE_RCU */ +static inline int rcu_is_cpu_idle(void) +{ + return 0; +} +#endif /* else !CONFIG_PROVE_RCU */ + +static inline void rcu_lock_acquire(struct lockdep_map *map) +{ + lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); +} + +static inline void rcu_lock_release(struct lockdep_map *map) +{ + lock_release(map, 1, _THIS_IP_); +} + +extern struct lockdep_map rcu_lock_map; +extern struct lockdep_map rcu_bh_lock_map; +extern struct lockdep_map rcu_sched_lock_map; +extern int debug_lockdep_rcu_enabled(void); + +/** + * rcu_read_lock_held() - might we be in RCU read-side critical section? + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, + * this assumes we are in an RCU read-side critical section unless it can + * prove otherwise. This is useful for debug checks in functions that + * require that they be called within an RCU read-side critical section. + * + * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. + * + * Note that rcu_read_lock() and the matching rcu_read_unlock() must + * occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock() in process context if the matching rcu_read_lock() + * was invoked from within an irq handler. + * + * Note that rcu_read_lock() is disallowed if the CPU is either idle or + * offline from an RCU perspective, so check for those as well. + */ +static inline int rcu_read_lock_held(void) +{ + if (!debug_lockdep_rcu_enabled()) + return 1; + if (rcu_is_cpu_idle()) + return 0; + if (!rcu_lockdep_current_cpu_online()) + return 0; + return lock_is_held(&rcu_lock_map); +} + +/* + * rcu_read_lock_bh_held() is defined out of line to avoid #include-file + * hell. + */ +extern int rcu_read_lock_bh_held(void); + +/** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. Note that disabling + * of preemption (including disabling irqs) counts as an RCU-sched + * read-side critical section. This is useful for debug checks in functions + * that required that they be called within an RCU-sched read-side + * critical section. + * + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. + * + * Note that if the CPU is in the idle loop from an RCU point of + * view (ie: that we are in the section between rcu_idle_enter() and + * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU + * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs + * that are in such a section, considering these as in extended quiescent + * state, so such a CPU is effectively never in an RCU read-side critical + * section regardless of what RCU primitives it invokes. This state of + * affairs is required --- we need to keep an RCU-free window in idle + * where the CPU may possibly enter into low power mode. This way we can + * notice an extended quiescent state to other CPUs that started a grace + * period. Otherwise we would delay any grace period as long as we run in + * the idle task. + * + * Similarly, we avoid claiming an SRCU read lock held if the current + * CPU is offline. + */ +#ifdef CONFIG_PREEMPT_COUNT +static inline int rcu_read_lock_sched_held(void) +{ + int lockdep_opinion = 0; + + if (!debug_lockdep_rcu_enabled()) + return 1; + if (rcu_is_cpu_idle()) + return 0; + if (!rcu_lockdep_current_cpu_online()) + return 0; + if (debug_locks) + lockdep_opinion = lock_is_held(&rcu_sched_lock_map); + return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); +} +#else /* #ifdef CONFIG_PREEMPT_COUNT */ +static inline int rcu_read_lock_sched_held(void) +{ + return 1; +} +#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +# define rcu_lock_acquire(a) do { } while (0) +# define rcu_lock_release(a) do { } while (0) + +static inline int rcu_read_lock_held(void) +{ + return 1; +} + +static inline int rcu_read_lock_bh_held(void) +{ + return 1; +} + +#ifdef CONFIG_PREEMPT_COUNT +static inline int rcu_read_lock_sched_held(void) +{ + return preempt_count() != 0 || irqs_disabled(); +} +#else /* #ifdef CONFIG_PREEMPT_COUNT */ +static inline int rcu_read_lock_sched_held(void) +{ + return 1; +} +#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#ifdef CONFIG_PROVE_RCU + +extern int rcu_my_thread_group_empty(void); + +/** + * rcu_lockdep_assert - emit lockdep splat if specified condition not met + * @c: condition to check + * @s: informative message + */ +#define rcu_lockdep_assert(c, s) \ + do { \ + static bool __section(.data.unlikely) __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ + __warned = true; \ + lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ + } \ + } while (0) + +#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) +static inline void rcu_preempt_sleep_check(void) +{ + rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), + "Illegal context switch in RCU read-side " + "critical section"); +} +#else /* #ifdef CONFIG_PROVE_RCU */ +static inline void rcu_preempt_sleep_check(void) +{ +} +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + +#define rcu_sleep_check() \ + do { \ + rcu_preempt_sleep_check(); \ + rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ + "Illegal context switch in RCU-bh" \ + " read-side critical section"); \ + rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ + "Illegal context switch in RCU-sched"\ + " read-side critical section"); \ + } while (0) + +#else /* #ifdef CONFIG_PROVE_RCU */ + +#define rcu_lockdep_assert(c, s) do { } while (0) +#define rcu_sleep_check() do { } while (0) + +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + +/* + * Helper functions for rcu_dereference_check(), rcu_dereference_protected() + * and rcu_assign_pointer(). Some of these could be folded into their + * callers, but they are left separate in order to ease introduction of + * multiple flavors of pointers to match the multiple flavors of RCU + * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in + * the future. + */ + +#ifdef __CHECKER__ +#define rcu_dereference_sparse(p, space) \ + ((void)(((typeof(*p) space *)p) == p)) +#else /* #ifdef __CHECKER__ */ +#define rcu_dereference_sparse(p, space) +#endif /* #else #ifdef __CHECKER__ */ + +#define __rcu_access_pointer(p, space) \ + ({ \ + typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ + rcu_dereference_sparse(p, space); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ + }) +#define __rcu_dereference_check(p, c, space) \ + ({ \ + typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ + rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \ + " usage"); \ + rcu_dereference_sparse(p, space); \ + smp_read_barrier_depends(); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ + }) +#define __rcu_dereference_protected(p, c, space) \ + ({ \ + rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \ + " usage"); \ + rcu_dereference_sparse(p, space); \ + ((typeof(*p) __force __kernel *)(p)); \ + }) + +#define __rcu_access_index(p, space) \ + ({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + rcu_dereference_sparse(p, space); \ + (_________p1); \ + }) +#define __rcu_dereference_index_check(p, c) \ + ({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + rcu_lockdep_assert(c, \ + "suspicious rcu_dereference_index_check()" \ + " usage"); \ + smp_read_barrier_depends(); \ + (_________p1); \ + }) +#define __rcu_assign_pointer(p, v, space) \ + ({ \ + smp_wmb(); \ + (p) = (typeof(*v) __force space *)(v); \ + }) + + +/** + * rcu_access_pointer() - fetch RCU pointer with no dereferencing + * @p: The pointer to read + * + * Return the value of the specified RCU-protected pointer, but omit the + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * when the value of this pointer is accessed, but the pointer is not + * dereferenced, for example, when testing an RCU-protected pointer against + * NULL. Although rcu_access_pointer() may also be used in cases where + * update-side locks prevent the value of the pointer from changing, you + * should instead use rcu_dereference_protected() for this use case. + * + * It is also permissible to use rcu_access_pointer() when read-side + * access to the pointer was removed at least one grace period ago, as + * is the case in the context of the RCU callback that is freeing up + * the data, or after a synchronize_rcu() returns. This can be useful + * when tearing down multi-linked structures after a grace period + * has elapsed. + */ +#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) + +/** + * rcu_dereference_check() - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Do an rcu_dereference(), but check that the conditions under which the + * dereference will take place are correct. Typically the conditions + * indicate the various locking conditions that should be held at that + * point. The check should return true if the conditions are satisfied. + * An implicit check for being in an RCU read-side critical section + * (rcu_read_lock()) is included. + * + * For example: + * + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); + * + * could be used to indicate to lockdep that foo->bar may only be dereferenced + * if either rcu_read_lock() is held, or that the lock required to replace + * the bar struct at foo->bar is held. + * + * Note that the list of conditions may also include indications of when a lock + * need not be held, for example during initialisation or destruction of the + * target struct: + * + * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || + * atomic_read(&foo->usage) == 0); + * + * Inserts memory barriers on architectures that require them + * (currently only the Alpha), prevents the compiler from refetching + * (and from merging fetches), and, more importantly, documents exactly + * which pointers are protected by RCU and checks that the pointer is + * annotated as __rcu. + */ +#define rcu_dereference_check(p, c) \ + __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) + +/** + * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-bh counterpart to rcu_dereference_check(). + */ +#define rcu_dereference_bh_check(p, c) \ + __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) + +/** + * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * This is the RCU-sched counterpart to rcu_dereference_check(). + */ +#define rcu_dereference_sched_check(p, c) \ + __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ + __rcu) + +#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ + +/** + * rcu_access_index() - fetch RCU index with no dereferencing + * @p: The index to read + * + * Return the value of the specified RCU-protected index, but omit the + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * when the value of this index is accessed, but the index is not + * dereferenced, for example, when testing an RCU-protected index against + * -1. Although rcu_access_index() may also be used in cases where + * update-side locks prevent the value of the index from changing, you + * should instead use rcu_dereference_index_protected() for this use case. + */ +#define rcu_access_index(p) __rcu_access_index((p), __rcu) + +/** + * rcu_dereference_index_check() - rcu_dereference for indices with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Similar to rcu_dereference_check(), but omits the sparse checking. + * This allows rcu_dereference_index_check() to be used on integers, + * which can then be used as array indices. Attempting to use + * rcu_dereference_check() on an integer will give compiler warnings + * because the sparse address-space mechanism relies on dereferencing + * the RCU-protected pointer. Dereferencing integers is not something + * that even gcc will put up with. + * + * Note that this function does not implicitly check for RCU read-side + * critical sections. If this function gains lots of uses, it might + * make sense to provide versions for each flavor of RCU, but it does + * not make sense as of early 2010. + */ +#define rcu_dereference_index_check(p, c) \ + __rcu_dereference_index_check((p), (c)) + +/** + * rcu_dereference_protected() - fetch RCU pointer when updates prevented + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This + * is useful in cases where update-side locks prevent the value of the + * pointer from changing. Please note that this primitive does -not- + * prevent the compiler from repeating this reference or combining it + * with other references, so it should not be used without protection + * of appropriate locks. + * + * This function is only for update-side use. Using this function + * when protected only by rcu_read_lock() will result in infrequent + * but very ugly failures. + */ +#define rcu_dereference_protected(p, c) \ + __rcu_dereference_protected((p), (c), __rcu) + + +/** + * rcu_dereference() - fetch RCU-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * This is a simple wrapper around rcu_dereference_check(). + */ +#define rcu_dereference(p) rcu_dereference_check(p, 0) + +/** + * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) + +/** + * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing + * @p: The pointer to read, prior to dereferencing + * + * Makes rcu_dereference_check() do the dirty work. + */ +#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) + +/** + * rcu_read_lock() - mark the beginning of an RCU read-side critical section + * + * When synchronize_rcu() is invoked on one CPU while other CPUs + * are within RCU read-side critical sections, then the + * synchronize_rcu() is guaranteed to block until after all the other + * CPUs exit their critical sections. Similarly, if call_rcu() is invoked + * on one CPU while other CPUs are within RCU read-side critical + * sections, invocation of the corresponding RCU callback is deferred + * until after the all the other CPUs exit their critical sections. + * + * Note, however, that RCU callbacks are permitted to run concurrently + * with new RCU read-side critical sections. One way that this can happen + * is via the following sequence of events: (1) CPU 0 enters an RCU + * read-side critical section, (2) CPU 1 invokes call_rcu() to register + * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, + * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU + * callback is invoked. This is legal, because the RCU read-side critical + * section that was running concurrently with the call_rcu() (and which + * therefore might be referencing something that the corresponding RCU + * callback would free up) has completed before the corresponding + * RCU callback is invoked. + * + * RCU read-side critical sections may be nested. Any deferred actions + * will be deferred until the outermost RCU read-side critical section + * completes. + * + * You can avoid reading and understanding the next paragraph by + * following this rule: don't put anything in an rcu_read_lock() RCU + * read-side critical section that would block in a !PREEMPT kernel. + * But if you want the full story, read on! + * + * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it + * is illegal to block while in an RCU read-side critical section. In + * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) + * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may + * be preempted, but explicit blocking is illegal. Finally, in preemptible + * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, + * RCU read-side critical sections may be preempted and they may also + * block, but only when acquiring spinlocks that are subject to priority + * inheritance. + */ +static inline void rcu_read_lock(void) +{ + __rcu_read_lock(); + __acquire(RCU); + rcu_lock_acquire(&rcu_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock() used illegally while idle"); +} + +/* + * So where is rcu_write_lock()? It does not exist, as there is no + * way for writers to lock out RCU readers. This is a feature, not + * a bug -- this property is what provides RCU's performance benefits. + * Of course, writers must coordinate with each other. The normal + * spinlock primitives work well for this, but any other technique may be + * used as well. RCU does not care how the writers keep out of each + * others' way, as long as they do so. + */ + +/** + * rcu_read_unlock() - marks the end of an RCU read-side critical section. + * + * See rcu_read_lock() for more information. + */ +static inline void rcu_read_unlock(void) +{ + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock() used illegally while idle"); + rcu_lock_release(&rcu_lock_map); + __release(RCU); + __rcu_read_unlock(); +} + +/** + * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section + * + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since + * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a + * softirq handler to be a quiescent state, a process in RCU read-side + * critical section must be protected by disabling softirqs. Read-side + * critical sections in interrupt context can use just rcu_read_lock(), + * though this should at least be commented to avoid confusing people + * reading the code. + * + * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() + * must occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() + * was invoked from some other task. + */ +static inline void rcu_read_lock_bh(void) +{ + local_bh_disable(); + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock_bh() used illegally while idle"); +} + +/* + * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section + * + * See rcu_read_lock_bh() for more information. + */ +static inline void rcu_read_unlock_bh(void) +{ + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); + local_bh_enable(); +} + +/** + * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section + * + * This is equivalent of rcu_read_lock(), but to be used when updates + * are being done using call_rcu_sched() or synchronize_rcu_sched(). + * Read-side critical sections can also be introduced by anything that + * disables preemption, including local_irq_disable() and friends. + * + * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() + * must occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock_sched() from process context if the matching + * rcu_read_lock_sched() was invoked from an NMI handler. + */ +static inline void rcu_read_lock_sched(void) +{ + preempt_disable(); + __acquire(RCU_SCHED); + rcu_lock_acquire(&rcu_sched_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock_sched() used illegally while idle"); +} + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_lock_sched_notrace(void) +{ + preempt_disable_notrace(); + __acquire(RCU_SCHED); +} + +/* + * rcu_read_unlock_sched - marks the end of a RCU-classic critical section + * + * See rcu_read_lock_sched for more information. + */ +static inline void rcu_read_unlock_sched(void) +{ + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock_sched() used illegally while idle"); + rcu_lock_release(&rcu_sched_lock_map); + __release(RCU_SCHED); + preempt_enable(); +} + +/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ +static inline notrace void rcu_read_unlock_sched_notrace(void) +{ + __release(RCU_SCHED); + preempt_enable_notrace(); +} + +/** + * rcu_assign_pointer() - assign to RCU-protected pointer + * @p: pointer to assign to + * @v: value to assign (publish) + * + * Assigns the specified value to the specified RCU-protected + * pointer, ensuring that any concurrent RCU readers will see + * any prior initialization. Returns the value assigned. + * + * Inserts memory barriers on architectures that require them + * (which is most of them), and also prevents the compiler from + * reordering the code that initializes the structure after the pointer + * assignment. More importantly, this call documents which pointers + * will be dereferenced by RCU read-side code. + * + * In some special cases, you may use RCU_INIT_POINTER() instead + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due + * to the fact that it does not constrain either the CPU or the compiler. + * That said, using RCU_INIT_POINTER() when you should have used + * rcu_assign_pointer() is a very bad thing that results in + * impossible-to-diagnose memory corruption. So please be careful. + * See the RCU_INIT_POINTER() comment header for details. + */ +#define rcu_assign_pointer(p, v) \ + __rcu_assign_pointer((p), (v), __rcu) + +/** + * RCU_INIT_POINTER() - initialize an RCU protected pointer + * + * Initialize an RCU-protected pointer in special cases where readers + * do not need ordering constraints on the CPU or the compiler. These + * special cases are: + * + * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- + * 2. The caller has taken whatever steps are required to prevent + * RCU readers from concurrently accessing this pointer -or- + * 3. The referenced data structure has already been exposed to + * readers either at compile time or via rcu_assign_pointer() -and- + * a. You have not made -any- reader-visible changes to + * this structure since then -or- + * b. It is OK for readers accessing this structure from its + * new location to see the old state of the structure. (For + * example, the changes were to statistical counters or to + * other state where exact synchronization is not required.) + * + * Failure to follow these rules governing use of RCU_INIT_POINTER() will + * result in impossible-to-diagnose memory corruption. As in the structures + * will look OK in crash dumps, but any concurrent RCU readers might + * see pre-initialized values of the referenced data structure. So + * please be very careful how you use RCU_INIT_POINTER()!!! + * + * If you are creating an RCU-protected linked structure that is accessed + * by a single external-to-structure RCU-protected pointer, then you may + * use RCU_INIT_POINTER() to initialize the internal RCU-protected + * pointers, but you must use rcu_assign_pointer() to initialize the + * external-to-structure pointer -after- you have completely initialized + * the reader-accessible portions of the linked structure. + */ +#define RCU_INIT_POINTER(p, v) \ + p = (typeof(*v) __force __rcu *)(v) + +static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) +{ + return offset < 4096; +} + +static __always_inline +void __kfree_rcu(struct rcu_head *head, unsigned long offset) +{ + typedef void (*rcu_callback)(struct rcu_head *); + + BUILD_BUG_ON(!__builtin_constant_p(offset)); + + /* See the kfree_rcu() header comment. */ + BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); + + kfree_call_rcu(head, (rcu_callback)offset); +} + +/** + * kfree_rcu() - kfree an object after a grace period. + * @ptr: pointer to kfree + * @rcu_head: the name of the struct rcu_head within the type of @ptr. + * + * Many rcu callbacks functions just call kfree() on the base structure. + * These functions are trivial, but their size adds up, and furthermore + * when they are used in a kernel module, that module must invoke the + * high-latency rcu_barrier() function at module-unload time. + * + * The kfree_rcu() function handles this issue. Rather than encoding a + * function address in the embedded rcu_head structure, kfree_rcu() instead + * encodes the offset of the rcu_head structure within the base structure. + * Because the functions are not allowed in the low-order 4096 bytes of + * kernel virtual memory, offsets up to 4095 bytes can be accommodated. + * If the offset is larger than 4095 bytes, a compile-time error will + * be generated in __kfree_rcu(). If this error is triggered, you can + * either fall back to use of call_rcu() or rearrange the structure to + * position the rcu_head structure into the first 4096 bytes. + * + * Note that the allowable offset might decrease in the future, for example, + * to allow something like kmem_cache_free_rcu(). + */ +#define kfree_rcu(ptr, rcu_head) \ + __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) + +#endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h new file mode 100644 index 00000000..e93df771 --- /dev/null +++ b/include/linux/rcutiny.h @@ -0,0 +1,171 @@ +/* + * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2008 + * + * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + */ +#ifndef __LINUX_TINY_H +#define __LINUX_TINY_H + +#include <linux/cache.h> + +static inline void rcu_init(void) +{ +} + +static inline void rcu_barrier_bh(void) +{ + wait_rcu_gp(call_rcu_bh); +} + +static inline void rcu_barrier_sched(void) +{ + wait_rcu_gp(call_rcu_sched); +} + +#ifdef CONFIG_TINY_RCU + +static inline void synchronize_rcu_expedited(void) +{ + synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ +} + +static inline void rcu_barrier(void) +{ + rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ +} + +#else /* #ifdef CONFIG_TINY_RCU */ + +void synchronize_rcu_expedited(void); + +static inline void rcu_barrier(void) +{ + wait_rcu_gp(call_rcu); +} + +#endif /* #else #ifdef CONFIG_TINY_RCU */ + +static inline void synchronize_rcu_bh(void) +{ + synchronize_sched(); +} + +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_sched(); +} + +static inline void synchronize_sched_expedited(void) +{ + synchronize_sched(); +} + +static inline void kfree_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + call_rcu(head, func); +} + +#ifdef CONFIG_TINY_RCU + +static inline void rcu_preempt_note_context_switch(void) +{ +} + +static inline void exit_rcu(void) +{ +} + +static inline int rcu_needs_cpu(int cpu) +{ + return 0; +} + +#else /* #ifdef CONFIG_TINY_RCU */ + +void rcu_preempt_note_context_switch(void); +extern void exit_rcu(void); +int rcu_preempt_needs_cpu(void); + +static inline int rcu_needs_cpu(int cpu) +{ + return rcu_preempt_needs_cpu(); +} + +#endif /* #else #ifdef CONFIG_TINY_RCU */ + +static inline void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); + rcu_preempt_note_context_switch(); +} + +/* + * Take advantage of the fact that there is only one CPU, which + * allows us to ignore virtualization-based context switches. + */ +static inline void rcu_virt_note_context_switch(int cpu) +{ +} + +/* + * Return the number of grace periods. + */ +static inline long rcu_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of bottom-half grace periods. + */ +static inline long rcu_batches_completed_bh(void) +{ + return 0; +} + +static inline void rcu_force_quiescent_state(void) +{ +} + +static inline void rcu_bh_force_quiescent_state(void) +{ +} + +static inline void rcu_sched_force_quiescent_state(void) +{ +} + +static inline void rcu_cpu_stall_reset(void) +{ +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern int rcu_scheduler_active __read_mostly; +extern void rcu_scheduler_starting(void); +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +static inline void rcu_scheduler_starting(void) +{ +} +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 00000000..e8ee5dd0 --- /dev/null +++ b/include/linux/rcutree.h @@ -0,0 +1,111 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2008 + * + * Author: Dipankar Sarma <dipankar@in.ibm.com> + * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm + * + * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + */ + +#ifndef __LINUX_RCUTREE_H +#define __LINUX_RCUTREE_H + +extern void rcu_init(void); +extern void rcu_note_context_switch(int cpu); +extern int rcu_needs_cpu(int cpu); +extern void rcu_cpu_stall_reset(void); + +/* + * Note a virtualization-based context switch. This is simply a + * wrapper around rcu_note_context_switch(), which allows TINY_RCU + * to save a few bytes. + */ +static inline void rcu_virt_note_context_switch(int cpu) +{ + rcu_note_context_switch(cpu); +} + +#ifdef CONFIG_TREE_PREEMPT_RCU + +extern void exit_rcu(void); + +#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + +static inline void exit_rcu(void) +{ +} + +#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ + +extern void synchronize_rcu_bh(void); +extern void synchronize_sched_expedited(void); +extern void synchronize_rcu_expedited(void); + +void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); + +/** + * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period + * + * Wait for an RCU-bh grace period to elapse, but use a "big hammer" + * approach to force the grace period to end quickly. This consumes + * significant time on all CPUs and is unfriendly to real-time workloads, + * so is thus not recommended for any sort of common-case code. In fact, + * if you are using synchronize_rcu_bh_expedited() in a loop, please + * restructure your code to batch your updates, and then use a single + * synchronize_rcu_bh() instead. + * + * Note that it is illegal to call this function while holding any lock + * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal + * to call this function from a CPU-hotplug notifier. Failing to observe + * these restriction will result in deadlock. + */ +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_sched_expedited(); +} + +extern void rcu_barrier(void); +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); + +extern unsigned long rcutorture_testseq; +extern unsigned long rcutorture_vernum; +extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); +extern long rcu_batches_completed_sched(void); + +extern void rcu_force_quiescent_state(void); +extern void rcu_bh_force_quiescent_state(void); +extern void rcu_sched_force_quiescent_state(void); + +/* A context switch is a grace period for RCU-sched and RCU-bh. */ +static inline int rcu_blocking_is_gp(void) +{ + might_sleep(); /* Check for RCU read-side critical section. */ + return num_online_cpus() == 1; +} + +extern void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __read_mostly; + +#endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/rds.h b/include/linux/rds.h new file mode 100644 index 00000000..91950950 --- /dev/null +++ b/include/linux/rds.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2008 Oracle. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _LINUX_RDS_H +#define _LINUX_RDS_H + +#include <linux/types.h> + +#define RDS_IB_ABI_VERSION 0x301 + +/* + * setsockopt/getsockopt for SOL_RDS + */ +#define RDS_CANCEL_SENT_TO 1 +#define RDS_GET_MR 2 +#define RDS_FREE_MR 3 +/* deprecated: RDS_BARRIER 4 */ +#define RDS_RECVERR 5 +#define RDS_CONG_MONITOR 6 +#define RDS_GET_MR_FOR_DEST 7 + +/* + * Control message types for SOL_RDS. + * + * CMSG_RDMA_ARGS (sendmsg) + * Request a RDMA transfer to/from the specified + * memory ranges. + * The cmsg_data is a struct rds_rdma_args. + * RDS_CMSG_RDMA_DEST (recvmsg, sendmsg) + * Kernel informs application about intended + * source/destination of a RDMA transfer + * RDS_CMSG_RDMA_MAP (sendmsg) + * Application asks kernel to map the given + * memory range into a IB MR, and send the + * R_Key along in an RDS extension header. + * The cmsg_data is a struct rds_get_mr_args, + * the same as for the GET_MR setsockopt. + * RDS_CMSG_RDMA_STATUS (recvmsg) + * Returns the status of a completed RDMA operation. + */ +#define RDS_CMSG_RDMA_ARGS 1 +#define RDS_CMSG_RDMA_DEST 2 +#define RDS_CMSG_RDMA_MAP 3 +#define RDS_CMSG_RDMA_STATUS 4 +#define RDS_CMSG_CONG_UPDATE 5 +#define RDS_CMSG_ATOMIC_FADD 6 +#define RDS_CMSG_ATOMIC_CSWP 7 +#define RDS_CMSG_MASKED_ATOMIC_FADD 8 +#define RDS_CMSG_MASKED_ATOMIC_CSWP 9 + +#define RDS_INFO_FIRST 10000 +#define RDS_INFO_COUNTERS 10000 +#define RDS_INFO_CONNECTIONS 10001 +/* 10002 aka RDS_INFO_FLOWS is deprecated */ +#define RDS_INFO_SEND_MESSAGES 10003 +#define RDS_INFO_RETRANS_MESSAGES 10004 +#define RDS_INFO_RECV_MESSAGES 10005 +#define RDS_INFO_SOCKETS 10006 +#define RDS_INFO_TCP_SOCKETS 10007 +#define RDS_INFO_IB_CONNECTIONS 10008 +#define RDS_INFO_CONNECTION_STATS 10009 +#define RDS_INFO_IWARP_CONNECTIONS 10010 +#define RDS_INFO_LAST 10010 + +struct rds_info_counter { + uint8_t name[32]; + uint64_t value; +} __attribute__((packed)); + +#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 +#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 +#define RDS_INFO_CONNECTION_FLAG_CONNECTED 0x04 + +#define TRANSNAMSIZ 16 + +struct rds_info_connection { + uint64_t next_tx_seq; + uint64_t next_rx_seq; + __be32 laddr; + __be32 faddr; + uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ + uint8_t flags; +} __attribute__((packed)); + +#define RDS_INFO_MESSAGE_FLAG_ACK 0x01 +#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 + +struct rds_info_message { + uint64_t seq; + uint32_t len; + __be32 laddr; + __be32 faddr; + __be16 lport; + __be16 fport; + uint8_t flags; +} __attribute__((packed)); + +struct rds_info_socket { + uint32_t sndbuf; + __be32 bound_addr; + __be32 connected_addr; + __be16 bound_port; + __be16 connected_port; + uint32_t rcvbuf; + uint64_t inum; +} __attribute__((packed)); + +struct rds_info_tcp_socket { + __be32 local_addr; + __be16 local_port; + __be32 peer_addr; + __be16 peer_port; + uint64_t hdr_rem; + uint64_t data_rem; + uint32_t last_sent_nxt; + uint32_t last_expected_una; + uint32_t last_seen_una; +} __attribute__((packed)); + +#define RDS_IB_GID_LEN 16 +struct rds_info_rdma_connection { + __be32 src_addr; + __be32 dst_addr; + uint8_t src_gid[RDS_IB_GID_LEN]; + uint8_t dst_gid[RDS_IB_GID_LEN]; + + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t rdma_mr_max; + uint32_t rdma_mr_size; +}; + +/* + * Congestion monitoring. + * Congestion control in RDS happens at the host connection + * level by exchanging a bitmap marking congested ports. + * By default, a process sleeping in poll() is always woken + * up when the congestion map is updated. + * With explicit monitoring, an application can have more + * fine-grained control. + * The application installs a 64bit mask value in the socket, + * where each bit corresponds to a group of ports. + * When a congestion update arrives, RDS checks the set of + * ports that are now uncongested against the list bit mask + * installed in the socket, and if they overlap, we queue a + * cong_notification on the socket. + * + * To install the congestion monitor bitmask, use RDS_CONG_MONITOR + * with the 64bit mask. + * Congestion updates are received via RDS_CMSG_CONG_UPDATE + * control messages. + * + * The correspondence between bits and ports is + * 1 << (portnum % 64) + */ +#define RDS_CONG_MONITOR_SIZE 64 +#define RDS_CONG_MONITOR_BIT(port) (((unsigned int) port) % RDS_CONG_MONITOR_SIZE) +#define RDS_CONG_MONITOR_MASK(port) (1ULL << RDS_CONG_MONITOR_BIT(port)) + +/* + * RDMA related types + */ + +/* + * This encapsulates a remote memory location. + * In the current implementation, it contains the R_Key + * of the remote memory region, and the offset into it + * (so that the application does not have to worry about + * alignment). + */ +typedef uint64_t rds_rdma_cookie_t; + +struct rds_iovec { + uint64_t addr; + uint64_t bytes; +}; + +struct rds_get_mr_args { + struct rds_iovec vec; + uint64_t cookie_addr; + uint64_t flags; +}; + +struct rds_get_mr_for_dest_args { + struct sockaddr_storage dest_addr; + struct rds_iovec vec; + uint64_t cookie_addr; + uint64_t flags; +}; + +struct rds_free_mr_args { + rds_rdma_cookie_t cookie; + uint64_t flags; +}; + +struct rds_rdma_args { + rds_rdma_cookie_t cookie; + struct rds_iovec remote_vec; + uint64_t local_vec_addr; + uint64_t nr_local; + uint64_t flags; + uint64_t user_token; +}; + +struct rds_atomic_args { + rds_rdma_cookie_t cookie; + uint64_t local_addr; + uint64_t remote_addr; + union { + struct { + uint64_t compare; + uint64_t swap; + } cswp; + struct { + uint64_t add; + } fadd; + struct { + uint64_t compare; + uint64_t swap; + uint64_t compare_mask; + uint64_t swap_mask; + } m_cswp; + struct { + uint64_t add; + uint64_t nocarry_mask; + } m_fadd; + }; + uint64_t flags; + uint64_t user_token; +}; + +struct rds_rdma_notify { + uint64_t user_token; + int32_t status; +}; + +#define RDS_RDMA_SUCCESS 0 +#define RDS_RDMA_REMOTE_ERROR 1 +#define RDS_RDMA_CANCELED 2 +#define RDS_RDMA_DROPPED 3 +#define RDS_RDMA_OTHER_ERROR 4 + +/* + * Common set of flags for all RDMA related structs + */ +#define RDS_RDMA_READWRITE 0x0001 +#define RDS_RDMA_FENCE 0x0002 /* use FENCE for immediate send */ +#define RDS_RDMA_INVALIDATE 0x0004 /* invalidate R_Key after freeing MR */ +#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ +#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ +#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ +#define RDS_RDMA_SILENT 0x0040 /* Do not interrupt remote */ + +#endif /* IB_RDS_H */ diff --git a/include/linux/reboot.h b/include/linux/reboot.h new file mode 100644 index 00000000..e0879a70 --- /dev/null +++ b/include/linux/reboot.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_REBOOT_H +#define _LINUX_REBOOT_H + +/* + * Magic values required to use _reboot() system call. + */ + +#define LINUX_REBOOT_MAGIC1 0xfee1dead +#define LINUX_REBOOT_MAGIC2 672274793 +#define LINUX_REBOOT_MAGIC2A 85072278 +#define LINUX_REBOOT_MAGIC2B 369367448 +#define LINUX_REBOOT_MAGIC2C 537993216 + + +/* + * Commands accepted by the _reboot() system call. + * + * RESTART Restart system using default command and mode. + * HALT Stop OS and give system control to ROM monitor, if any. + * CAD_ON Ctrl-Alt-Del sequence causes RESTART command. + * CAD_OFF Ctrl-Alt-Del sequence sends SIGINT to init task. + * POWER_OFF Stop OS and remove all power from system, if possible. + * RESTART2 Restart system using given command string. + * SW_SUSPEND Suspend system using software suspend if compiled in. + * KEXEC Restart system using a previously loaded Linux kernel + */ + +#define LINUX_REBOOT_CMD_RESTART 0x01234567 +#define LINUX_REBOOT_CMD_HALT 0xCDEF0123 +#define LINUX_REBOOT_CMD_CAD_ON 0x89ABCDEF +#define LINUX_REBOOT_CMD_CAD_OFF 0x00000000 +#define LINUX_REBOOT_CMD_POWER_OFF 0x4321FEDC +#define LINUX_REBOOT_CMD_RESTART2 0xA1B2C3D4 +#define LINUX_REBOOT_CMD_SW_SUSPEND 0xD000FCE2 +#define LINUX_REBOOT_CMD_KEXEC 0x45584543 + + +#ifdef __KERNEL__ + +#include <linux/notifier.h> + +#define SYS_DOWN 0x0001 /* Notify of system down */ +#define SYS_RESTART SYS_DOWN +#define SYS_HALT 0x0002 /* Notify of system halt */ +#define SYS_POWER_OFF 0x0003 /* Notify of system power off */ + +extern int register_reboot_notifier(struct notifier_block *); +extern int unregister_reboot_notifier(struct notifier_block *); + + +/* + * Architecture-specific implementations of sys_reboot commands. + */ + +extern void machine_restart(char *cmd); +extern void machine_halt(void); +extern void machine_power_off(void); + +extern void machine_shutdown(void); +struct pt_regs; +extern void machine_crash_shutdown(struct pt_regs *); + +/* + * Architecture independent implemenations of sys_reboot commands. + */ + +extern void kernel_restart_prepare(char *cmd); +extern void kernel_restart(char *cmd); +extern void kernel_halt(void); +extern void kernel_power_off(void); + +extern int C_A_D; /* for sysctl */ +void ctrl_alt_del(void); + +#define POWEROFF_CMD_PATH_LEN 256 +extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN]; + +extern int orderly_poweroff(bool force); + +/* + * Emergency restart, callable from an interrupt handler. + */ + +extern void emergency_restart(void); +#include <asm/emergency-restart.h> + +#endif + +#endif /* _LINUX_REBOOT_H */ diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h new file mode 100644 index 00000000..f9c90b33 --- /dev/null +++ b/include/linux/reciprocal_div.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_RECIPROCAL_DIV_H +#define _LINUX_RECIPROCAL_DIV_H + +#include <linux/types.h> + +/* + * This file describes reciprocical division. + * + * This optimizes the (A/B) problem, when A and B are two u32 + * and B is a known value (but not known at compile time) + * + * The math principle used is : + * Let RECIPROCAL_VALUE(B) be (((1LL << 32) + (B - 1))/ B) + * Then A / B = (u32)(((u64)(A) * (R)) >> 32) + * + * This replaces a divide by a multiply (and a shift), and + * is generally less expensive in CPU cycles. + */ + +/* + * Computes the reciprocal value (R) for the value B of the divisor. + * Should not be called before each reciprocal_divide(), + * or else the performance is slower than a normal divide. + */ +extern u32 reciprocal_value(u32 B); + + +static inline u32 reciprocal_divide(u32 A, u32 R) +{ + return (u32)(((u64)A * R) >> 32); +} +#endif diff --git a/include/linux/regmap.h b/include/linux/regmap.h new file mode 100644 index 00000000..a90abb6b --- /dev/null +++ b/include/linux/regmap.h @@ -0,0 +1,332 @@ +#ifndef __LINUX_REGMAP_H +#define __LINUX_REGMAP_H + +/* + * Register map access API + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/list.h> + +struct module; +struct device; +struct i2c_client; +struct spi_device; +struct regmap; + +/* An enum of all the supported cache types */ +enum regcache_type { + REGCACHE_NONE, + REGCACHE_RBTREE, + REGCACHE_COMPRESSED +}; + +/** + * Default value for a register. We use an array of structs rather + * than a simple array as many modern devices have very sparse + * register maps. + * + * @reg: Register address. + * @def: Register default value. + */ +struct reg_default { + unsigned int reg; + unsigned int def; +}; + +#ifdef CONFIG_REGMAP + +/** + * Configuration for the register map of a device. + * + * @reg_bits: Number of bits in a register address, mandatory. + * @pad_bits: Number of bits of padding between register and value. + * @val_bits: Number of bits in a register value, mandatory. + * + * @writeable_reg: Optional callback returning true if the register + * can be written to. + * @readable_reg: Optional callback returning true if the register + * can be read from. + * @volatile_reg: Optional callback returning true if the register + * value can't be cached. + * @precious_reg: Optional callback returning true if the rgister + * should not be read outside of a call from the driver + * (eg, a clear on read interrupt status register). + * + * @max_register: Optional, specifies the maximum valid register index. + * @reg_defaults: Power on reset values for registers (for use with + * register cache support). + * @num_reg_defaults: Number of elements in reg_defaults. + * + * @read_flag_mask: Mask to be set in the top byte of the register when doing + * a read. + * @write_flag_mask: Mask to be set in the top byte of the register when doing + * a write. If both read_flag_mask and write_flag_mask are + * empty the regmap_bus default masks are used. + * + * @cache_type: The actual cache type. + * @reg_defaults_raw: Power on reset values for registers (for use with + * register cache support). + * @num_reg_defaults_raw: Number of elements in reg_defaults_raw. + */ +struct regmap_config { + int reg_bits; + int pad_bits; + int val_bits; + + bool (*writeable_reg)(struct device *dev, unsigned int reg); + bool (*readable_reg)(struct device *dev, unsigned int reg); + bool (*volatile_reg)(struct device *dev, unsigned int reg); + bool (*precious_reg)(struct device *dev, unsigned int reg); + + unsigned int max_register; + const struct reg_default *reg_defaults; + unsigned int num_reg_defaults; + enum regcache_type cache_type; + const void *reg_defaults_raw; + unsigned int num_reg_defaults_raw; + + u8 read_flag_mask; + u8 write_flag_mask; +}; + +typedef int (*regmap_hw_write)(struct device *dev, const void *data, + size_t count); +typedef int (*regmap_hw_gather_write)(struct device *dev, + const void *reg, size_t reg_len, + const void *val, size_t val_len); +typedef int (*regmap_hw_read)(struct device *dev, + const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + +/** + * Description of a hardware bus for the register map infrastructure. + * + * @write: Write operation. + * @gather_write: Write operation with split register/value, return -ENOTSUPP + * if not implemented on a given device. + * @read: Read operation. Data is returned in the buffer used to transmit + * data. + * @read_flag_mask: Mask to be set in the top byte of the register when doing + * a read. + */ +struct regmap_bus { + regmap_hw_write write; + regmap_hw_gather_write gather_write; + regmap_hw_read read; + u8 read_flag_mask; +}; + +struct regmap *regmap_init(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); +struct regmap *regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config); +struct regmap *regmap_init_spi(struct spi_device *dev, + const struct regmap_config *config); + +struct regmap *devm_regmap_init(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); +struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config); +struct regmap *devm_regmap_init_spi(struct spi_device *dev, + const struct regmap_config *config); + +void regmap_exit(struct regmap *map); +int regmap_reinit_cache(struct regmap *map, + const struct regmap_config *config); +int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); +int regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); +int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, + size_t val_count); +int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); +int regmap_raw_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len); +int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, + size_t val_count); +int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val); +int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change); +int regmap_get_val_bytes(struct regmap *map); + +int regcache_sync(struct regmap *map); +int regcache_sync_region(struct regmap *map, unsigned int min, + unsigned int max); +void regcache_cache_only(struct regmap *map, bool enable); +void regcache_cache_bypass(struct regmap *map, bool enable); +void regcache_mark_dirty(struct regmap *map); + +int regmap_register_patch(struct regmap *map, const struct reg_default *regs, + int num_regs); + +/** + * Description of an IRQ for the generic regmap irq_chip. + * + * @reg_offset: Offset of the status/mask register within the bank + * @mask: Mask used to flag/control the register. + */ +struct regmap_irq { + unsigned int reg_offset; + unsigned int mask; +}; + +/** + * Description of a generic regmap irq_chip. This is not intended to + * handle every possible interrupt controller, but it should handle a + * substantial proportion of those that are found in the wild. + * + * @name: Descriptive name for IRQ controller. + * + * @status_base: Base status register address. + * @mask_base: Base mask register address. + * @ack_base: Base ack address. If zero then the chip is clear on read. + * + * @num_regs: Number of registers in each control bank. + * @irqs: Descriptors for individual IRQs. Interrupt numbers are + * assigned based on the index in the array of the interrupt. + * @num_irqs: Number of descriptors. + */ +struct regmap_irq_chip { + const char *name; + + unsigned int status_base; + unsigned int mask_base; + unsigned int ack_base; + + int num_regs; + + const struct regmap_irq *irqs; + int num_irqs; +}; + +struct regmap_irq_chip_data; + +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, + int irq_base, struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); +int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); + +#else + +/* + * These stubs should only ever be called by generic code which has + * regmap based facilities, if they ever get called at runtime + * something is going wrong and something probably needs to select + * REGMAP. + */ + +static inline int regmap_write(struct regmap *map, unsigned int reg, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_count) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_read(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_raw_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, + void *val, size_t val_count) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_check(struct regmap *map, + unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_get_val_bytes(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regcache_sync(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regcache_sync_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline void regcache_cache_only(struct regmap *map, bool enable) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline void regcache_cache_bypass(struct regmap *map, bool enable) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline void regcache_mark_dirty(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); +} + +static inline int regmap_register_patch(struct regmap *map, + const struct reg_default *regs, + int num_regs) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +#endif + +#endif diff --git a/include/linux/regset.h b/include/linux/regset.h new file mode 100644 index 00000000..8e0c9feb --- /dev/null +++ b/include/linux/regset.h @@ -0,0 +1,375 @@ +/* + * User-mode machine state access + * + * Copyright (C) 2007 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * Red Hat Author: Roland McGrath. + */ + +#ifndef _LINUX_REGSET_H +#define _LINUX_REGSET_H 1 + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/bug.h> +#include <linux/uaccess.h> +struct task_struct; +struct user_regset; + + +/** + * user_regset_active_fn - type of @active function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * + * Return -%ENODEV if not available on the hardware found. + * Return %0 if no interesting state in this thread. + * Return >%0 number of @size units of interesting state. + * Any get call fetching state beyond that number will + * see the default initialization state for this data, + * so a caller that knows what the default state is need + * not copy it all out. + * This call is optional; the pointer is %NULL if there + * is no inexpensive check to yield a value < @n. + */ +typedef int user_regset_active_fn(struct task_struct *target, + const struct user_regset *regset); + +/** + * user_regset_get_fn - type of @get function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy into + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into + * + * Fetch register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_get_fn(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); + +/** + * user_regset_set_fn - type of @set function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @pos: offset into the regset data to access, in bytes + * @count: amount of data to copy, in bytes + * @kbuf: if not %NULL, a kernel-space pointer to copy from + * @ubuf: if @kbuf is %NULL, a user-space pointer to copy from + * + * Store register values. Return %0 on success; -%EIO or -%ENODEV + * are usual failure returns. The @pos and @count values are in + * bytes, but must be properly aligned. If @kbuf is non-null, that + * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then + * ubuf gives a userland pointer to access directly, and an -%EFAULT + * return value is possible. + */ +typedef int user_regset_set_fn(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); + +/** + * user_regset_writeback_fn - type of @writeback function in &struct user_regset + * @target: thread being examined + * @regset: regset being examined + * @immediate: zero if writeback at completion of next context switch is OK + * + * This call is optional; usually the pointer is %NULL. When + * provided, there is some user memory associated with this regset's + * hardware, such as memory backing cached register data on register + * window machines; the regset's data controls what user memory is + * used (e.g. via the stack pointer value). + * + * Write register data back to user memory. If the @immediate flag + * is nonzero, it must be written to the user memory so uaccess or + * access_process_vm() can see it when this call returns; if zero, + * then it must be written back by the time the task completes a + * context switch (as synchronized with wait_task_inactive()). + * Return %0 on success or if there was nothing to do, -%EFAULT for + * a memory problem (bad stack pointer or whatever), or -%EIO for a + * hardware problem. + */ +typedef int user_regset_writeback_fn(struct task_struct *target, + const struct user_regset *regset, + int immediate); + +/** + * struct user_regset - accessible thread CPU state + * @n: Number of slots (registers). + * @size: Size in bytes of a slot (register). + * @align: Required alignment, in bytes. + * @bias: Bias from natural indexing. + * @core_note_type: ELF note @n_type value used in core dumps. + * @get: Function to fetch values. + * @set: Function to store values. + * @active: Function to report if regset is active, or %NULL. + * @writeback: Function to write data back to user memory, or %NULL. + * + * This data structure describes a machine resource we call a register set. + * This is part of the state of an individual thread, not necessarily + * actual CPU registers per se. A register set consists of a number of + * similar slots, given by @n. Each slot is @size bytes, and aligned to + * @align bytes (which is at least @size). + * + * These functions must be called only on the current thread or on a + * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are + * guaranteed will not be woken up and return to user mode, and that we + * have called wait_task_inactive() on. (The target thread always might + * wake up for SIGKILL while these functions are working, in which case + * that thread's user_regset state might be scrambled.) + * + * The @pos argument must be aligned according to @align; the @count + * argument must be a multiple of @size. These functions are not + * responsible for checking for invalid arguments. + * + * When there is a natural value to use as an index, @bias gives the + * difference between the natural index and the slot index for the + * register set. For example, x86 GDT segment descriptors form a regset; + * the segment selector produces a natural index, but only a subset of + * that index space is available as a regset (the TLS slots); subtracting + * @bias from a segment selector index value computes the regset slot. + * + * If nonzero, @core_note_type gives the n_type field (NT_* value) + * of the core file note in which this regset's data appears. + * NT_PRSTATUS is a special case in that the regset data starts at + * offsetof(struct elf_prstatus, pr_reg) into the note data; that is + * part of the per-machine ELF formats userland knows about. In + * other cases, the core file note contains exactly the whole regset + * (@n * @size) and nothing else. The core file note is normally + * omitted when there is an @active function and it returns zero. + */ +struct user_regset { + user_regset_get_fn *get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +/** + * struct user_regset_view - available regsets + * @name: Identifier, e.g. UTS_MACHINE string. + * @regsets: Array of @n regsets available in this view. + * @n: Number of elements in @regsets. + * @e_machine: ELF header @e_machine %EM_* value written in core dumps. + * @e_flags: ELF header @e_flags value written in core dumps. + * @ei_osabi: ELF header @e_ident[%EI_OSABI] value written in core dumps. + * + * A regset view is a collection of regsets (&struct user_regset, + * above). This describes all the state of a thread that can be seen + * from a given architecture/ABI environment. More than one view might + * refer to the same &struct user_regset, or more than one regset + * might refer to the same machine-specific state in the thread. For + * example, a 32-bit thread's state could be examined from the 32-bit + * view or from the 64-bit view. Either method reaches the same thread + * register state, doing appropriate widening or truncation. + */ +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +/* + * This is documented here rather than at the definition sites because its + * implementation is machine-dependent but its interface is universal. + */ +/** + * task_user_regset_view - Return the process's native regset view. + * @tsk: a thread of the process in question + * + * Return the &struct user_regset_view that is native for the given process. + * For example, what it would access when it called ptrace(). + * Throughout the life of the process, this only changes at exec. + */ +const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); + + +/* + * These are helpers for writing regset get/set functions in arch code. + * Because @start_pos and @end_pos are always compile-time constants, + * these are inlined into very little code though they look large. + * + * Use one or more calls sequentially for each chunk of regset data stored + * contiguously in memory. Call with constants for @start_pos and @end_pos, + * giving the range of byte positions in the regset that data corresponds + * to; @end_pos can be -1 if this chunk is at the end of the regset layout. + * Each call updates the arguments to point past its chunk. + */ + +static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, + void **kbuf, + void __user **ubuf, const void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(*kbuf, data, copy); + *kbuf += copy; + } else if (__copy_to_user(*ubuf, data, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, + const void **kbuf, + const void __user **ubuf, void *data, + const int start_pos, const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(data, *kbuf, copy); + *kbuf += copy; + } else if (__copy_from_user(data, *ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/* + * These two parallel the two above, but for portions of a regset layout + * that always read as all-zero or for which writes are ignored. + */ +static inline int user_regset_copyout_zero(unsigned int *pos, + unsigned int *count, + void **kbuf, void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) { + memset(*kbuf, 0, copy); + *kbuf += copy; + } else if (__clear_user(*ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int user_regset_copyin_ignore(unsigned int *pos, + unsigned int *count, + const void **kbuf, + const void __user **ubuf, + const int start_pos, + const int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) + *kbuf += copy; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/** + * copy_regset_to_user - fetch a thread's user_regset data into user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy into + */ +static inline int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + + if (!regset->get) + return -EOPNOTSUPP; + + if (!access_ok(VERIFY_WRITE, data, size)) + return -EFAULT; + + return regset->get(target, regset, offset, size, NULL, data); +} + +/** + * copy_regset_from_user - store into thread's user_regset data from user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy from + */ +static inline int copy_regset_from_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + const void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + + if (!regset->set) + return -EOPNOTSUPP; + + if (!access_ok(VERIFY_READ, data, size)) + return -EFAULT; + + return regset->set(target, regset, offset, size, NULL, data); +} + + +#endif /* <linux/regset.h> */ diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h new file mode 100644 index 00000000..7bd73bbd --- /dev/null +++ b/include/linux/regulator/ab8500.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson + * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson + */ + +#ifndef __LINUX_MFD_AB8500_REGULATOR_H +#define __LINUX_MFD_AB8500_REGULATOR_H + +/* AB8500 regulators */ +enum ab8500_regulator_id { + AB8500_LDO_AUX1, + AB8500_LDO_AUX2, + AB8500_LDO_AUX3, + AB8500_LDO_INTCORE, + AB8500_LDO_TVOUT, + AB8500_LDO_USB, + AB8500_LDO_AUDIO, + AB8500_LDO_ANAMIC1, + AB8500_LDO_ANAMIC2, + AB8500_LDO_DMIC, + AB8500_LDO_ANA, + AB8500_NUM_REGULATORS, +}; + +/* AB9450 regulators */ +enum ab9540_regulator_id { + AB9540_LDO_AUX1, + AB9540_LDO_AUX2, + AB9540_LDO_AUX3, + AB9540_LDO_AUX4, + AB9540_LDO_INTCORE, + AB9540_LDO_TVOUT, + AB9540_LDO_USB, + AB9540_LDO_AUDIO, + AB9540_LDO_ANAMIC1, + AB9540_LDO_ANAMIC2, + AB9540_LDO_DMIC, + AB9540_LDO_ANA, + AB9540_SYSCLKREQ_2, + AB9540_SYSCLKREQ_4, + AB9540_NUM_REGULATORS, +}; + +/* AB8500 and AB9540 register initialization */ +struct ab8500_regulator_reg_init { + int id; + u8 value; +}; + +#define INIT_REGULATOR_REGISTER(_id, _value) \ + { \ + .id = _id, \ + .value = _value, \ + } + +/* AB8500 registers */ +enum ab8500_regulator_reg { + AB8500_REGUREQUESTCTRL2, + AB8500_REGUREQUESTCTRL3, + AB8500_REGUREQUESTCTRL4, + AB8500_REGUSYSCLKREQ1HPVALID1, + AB8500_REGUSYSCLKREQ1HPVALID2, + AB8500_REGUHWHPREQ1VALID1, + AB8500_REGUHWHPREQ1VALID2, + AB8500_REGUHWHPREQ2VALID1, + AB8500_REGUHWHPREQ2VALID2, + AB8500_REGUSWHPREQVALID1, + AB8500_REGUSWHPREQVALID2, + AB8500_REGUSYSCLKREQVALID1, + AB8500_REGUSYSCLKREQVALID2, + AB8500_REGUMISC1, + AB8500_VAUDIOSUPPLY, + AB8500_REGUCTRL1VAMIC, + AB8500_VPLLVANAREGU, + AB8500_VREFDDR, + AB8500_EXTSUPPLYREGU, + AB8500_VAUX12REGU, + AB8500_VRF1VAUX3REGU, + AB8500_VAUX1SEL, + AB8500_VAUX2SEL, + AB8500_VRF1VAUX3SEL, + AB8500_REGUCTRL2SPARE, + AB8500_REGUCTRLDISCH, + AB8500_REGUCTRLDISCH2, + AB8500_VSMPS1SEL1, + AB8500_NUM_REGULATOR_REGISTERS, +}; + + +/* AB9540 registers */ +enum ab9540_regulator_reg { + AB9540_REGUREQUESTCTRL1, + AB9540_REGUREQUESTCTRL2, + AB9540_REGUREQUESTCTRL3, + AB9540_REGUREQUESTCTRL4, + AB9540_REGUSYSCLKREQ1HPVALID1, + AB9540_REGUSYSCLKREQ1HPVALID2, + AB9540_REGUHWHPREQ1VALID1, + AB9540_REGUHWHPREQ1VALID2, + AB9540_REGUHWHPREQ2VALID1, + AB9540_REGUHWHPREQ2VALID2, + AB9540_REGUSWHPREQVALID1, + AB9540_REGUSWHPREQVALID2, + AB9540_REGUSYSCLKREQVALID1, + AB9540_REGUSYSCLKREQVALID2, + AB9540_REGUVAUX4REQVALID, + AB9540_REGUMISC1, + AB9540_VAUDIOSUPPLY, + AB9540_REGUCTRL1VAMIC, + AB9540_VSMPS1REGU, + AB9540_VSMPS2REGU, + AB9540_VSMPS3REGU, /* NOTE! PRCMU register */ + AB9540_VPLLVANAREGU, + AB9540_EXTSUPPLYREGU, + AB9540_VAUX12REGU, + AB9540_VRF1VAUX3REGU, + AB9540_VSMPS1SEL1, + AB9540_VSMPS1SEL2, + AB9540_VSMPS1SEL3, + AB9540_VSMPS2SEL1, + AB9540_VSMPS2SEL2, + AB9540_VSMPS2SEL3, + AB9540_VSMPS3SEL1, /* NOTE! PRCMU register */ + AB9540_VSMPS3SEL2, /* NOTE! PRCMU register */ + AB9540_VAUX1SEL, + AB9540_VAUX2SEL, + AB9540_VRF1VAUX3SEL, + AB9540_REGUCTRL2SPARE, + AB9540_VAUX4REQCTRL, + AB9540_VAUX4REGU, + AB9540_VAUX4SEL, + AB9540_REGUCTRLDISCH, + AB9540_REGUCTRLDISCH2, + AB9540_REGUCTRLDISCH3, + AB9540_NUM_REGULATOR_REGISTERS, +}; + +#endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h new file mode 100644 index 00000000..4ed1b30a --- /dev/null +++ b/include/linux/regulator/consumer.h @@ -0,0 +1,349 @@ +/* + * consumer.h -- SoC Regulator consumer support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lrg@slimlogic.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Consumer Interface. + * + * A Power Management Regulator framework for SoC based devices. + * Features:- + * o Voltage and current level control. + * o Operating mode control. + * o Regulator status. + * o sysfs entries for showing client devices and status + * + * EXPERIMENTAL FEATURES: + * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators + * to use most efficient operating mode depending upon voltage and load and + * is transparent to client drivers. + * + * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during + * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when + * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA + * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% + * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate + * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * + */ + +#ifndef __LINUX_REGULATOR_CONSUMER_H_ +#define __LINUX_REGULATOR_CONSUMER_H_ + +struct device; +struct notifier_block; + +/* + * Regulator operating modes. + * + * Regulators can run in a variety of different operating modes depending on + * output load. This allows further system power savings by selecting the + * best (and most efficient) regulator mode for a desired load. + * + * Most drivers will only care about NORMAL. The modes below are generic and + * will probably not match the naming convention of your regulator data sheet + * but should match the use cases in the datasheet. + * + * In order of power efficiency (least efficient at top). + * + * Mode Description + * FAST Regulator can handle fast changes in it's load. + * e.g. useful in CPU voltage & frequency scaling where + * load can quickly increase with CPU frequency increases. + * + * NORMAL Normal regulator power supply mode. Most drivers will + * use this mode. + * + * IDLE Regulator runs in a more efficient mode for light + * loads. Can be used for devices that have a low power + * requirement during periods of inactivity. This mode + * may be more noisy than NORMAL and may not be able + * to handle fast load switching. + * + * STANDBY Regulator runs in the most efficient mode for very + * light loads. Can be used by devices when they are + * in a sleep/standby state. This mode is likely to be + * the most noisy and may not be able to handle fast load + * switching. + * + * NOTE: Most regulators will only support a subset of these modes. Some + * will only just support NORMAL. + * + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + +#define REGULATOR_MODE_FAST 0x1 +#define REGULATOR_MODE_NORMAL 0x2 +#define REGULATOR_MODE_IDLE 0x4 +#define REGULATOR_MODE_STANDBY 0x8 + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator forcibly shut down by software. + * VOLTAGE_CHANGE Regulator voltage changed. + * DISABLE Regulator was disabled. + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 +#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 +#define REGULATOR_EVENT_DISABLE 0x80 + +struct regulator; + +/** + * struct regulator_bulk_data - Data used for bulk regulator operations. + * + * @supply: The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. + * + * The regulator APIs provide a series of regulator_bulk_() API calls as + * a convenience to consumers which require multiple supplies. This + * structure is used to manage data for these calls. + */ +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; + + /* private: Internal use */ + int ret; +}; + +#if defined(CONFIG_REGULATOR) + +/* regulator get and put */ +struct regulator *__must_check regulator_get(struct device *dev, + const char *id); +struct regulator *__must_check devm_regulator_get(struct device *dev, + const char *id); +struct regulator *__must_check regulator_get_exclusive(struct device *dev, + const char *id); +void regulator_put(struct regulator *regulator); +void devm_regulator_put(struct regulator *regulator); + +/* regulator output control and status */ +int regulator_enable(struct regulator *regulator); +int regulator_disable(struct regulator *regulator); +int regulator_force_disable(struct regulator *regulator); +int regulator_is_enabled(struct regulator *regulator); +int regulator_disable_deferred(struct regulator *regulator, int ms); + +int regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int devm_regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_force_disable(int num_consumers, + struct regulator_bulk_data *consumers); +void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers); + +int regulator_count_voltages(struct regulator *regulator); +int regulator_list_voltage(struct regulator *regulator, unsigned selector); +int regulator_is_supported_voltage(struct regulator *regulator, + int min_uV, int max_uV); +int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); +int regulator_set_voltage_time(struct regulator *regulator, + int old_uV, int new_uV); +int regulator_get_voltage(struct regulator *regulator); +int regulator_sync_voltage(struct regulator *regulator); +int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA); +int regulator_get_current_limit(struct regulator *regulator); + +int regulator_set_mode(struct regulator *regulator, unsigned int mode); +unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); + +/* regulator notifier block */ +int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); + +/* driver data - core doesn't touch */ +void *regulator_get_drvdata(struct regulator *regulator); +void regulator_set_drvdata(struct regulator *regulator, void *data); + +#else + +/* + * Make sure client drivers will still build on systems with no software + * controllable voltage or current regulators. + */ +static inline struct regulator *__must_check regulator_get(struct device *dev, + const char *id) +{ + /* Nothing except the stubbed out regulator API should be + * looking at the value except to check if it is an error + * value. Drivers are free to handle NULL specifically by + * skipping all regulator API calls, but they don't have to. + * Drivers which don't, should make sure they properly handle + * corner cases of the API, such as regulator_get_voltage() + * returning 0. + */ + return NULL; +} + +static inline struct regulator *__must_check +devm_regulator_get(struct device *dev, const char *id) +{ + return NULL; +} + +static inline void regulator_put(struct regulator *regulator) +{ +} + +static inline void devm_regulator_put(struct regulator *regulator) +{ +} + +static inline int regulator_enable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_force_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable_deferred(struct regulator *regulator, + int ms) +{ + return 0; +} + +static inline int regulator_is_enabled(struct regulator *regulator) +{ + return 1; +} + +static inline int regulator_bulk_get(struct device *dev, + int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_force_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers) +{ +} + +static inline int regulator_set_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_get_voltage(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA) +{ + return 0; +} + +static inline int regulator_get_current_limit(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_mode(struct regulator *regulator, + unsigned int mode) +{ + return 0; +} + +static inline unsigned int regulator_get_mode(struct regulator *regulator) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_set_optimum_mode(struct regulator *regulator, + int load_uA) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline void *regulator_get_drvdata(struct regulator *regulator) +{ + return NULL; +} + +static inline void regulator_set_drvdata(struct regulator *regulator, + void *data) +{ +} + +#endif + +#endif diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h new file mode 100644 index 00000000..61206231 --- /dev/null +++ b/include/linux/regulator/db8500-prcmu.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson + * + * Interface to power domain regulators on DB8500 + */ + +#ifndef __REGULATOR_H__ +#define __REGULATOR_H__ + +/* Number of DB8500 regulators and regulator enumeration */ +enum db8500_regulator_id { + DB8500_REGULATOR_VAPE, + DB8500_REGULATOR_VARM, + DB8500_REGULATOR_VMODEM, + DB8500_REGULATOR_VPLL, + DB8500_REGULATOR_VSMPS1, + DB8500_REGULATOR_VSMPS2, + DB8500_REGULATOR_VSMPS3, + DB8500_REGULATOR_VRF1, + DB8500_REGULATOR_SWITCH_SVAMMDSP, + DB8500_REGULATOR_SWITCH_SVAMMDSPRET, + DB8500_REGULATOR_SWITCH_SVAPIPE, + DB8500_REGULATOR_SWITCH_SIAMMDSP, + DB8500_REGULATOR_SWITCH_SIAMMDSPRET, + DB8500_REGULATOR_SWITCH_SIAPIPE, + DB8500_REGULATOR_SWITCH_SGA, + DB8500_REGULATOR_SWITCH_B2R2_MCDE, + DB8500_REGULATOR_SWITCH_ESRAM12, + DB8500_REGULATOR_SWITCH_ESRAM12RET, + DB8500_REGULATOR_SWITCH_ESRAM34, + DB8500_REGULATOR_SWITCH_ESRAM34RET, + DB8500_NUM_REGULATORS +}; + +/* + * Exported interface for CPUIdle only. This function is called with all + * interrupts turned off. + */ +int power_state_active_is_enabled(void); + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h new file mode 100644 index 00000000..fa8b55b8 --- /dev/null +++ b/include/linux/regulator/driver.h @@ -0,0 +1,229 @@ +/* + * driver.h -- SoC Regulator driver support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lrg@slimlogic.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Driver Interface. + */ + +#ifndef __LINUX_REGULATOR_DRIVER_H_ +#define __LINUX_REGULATOR_DRIVER_H_ + +#include <linux/device.h> +#include <linux/notifier.h> +#include <linux/regulator/consumer.h> + +struct regulator_dev; +struct regulator_init_data; + +enum regulator_status { + REGULATOR_STATUS_OFF, + REGULATOR_STATUS_ON, + REGULATOR_STATUS_ERROR, + /* fast/normal/idle/standby are flavors of "on" */ + REGULATOR_STATUS_FAST, + REGULATOR_STATUS_NORMAL, + REGULATOR_STATUS_IDLE, + REGULATOR_STATUS_STANDBY, +}; + +/** + * struct regulator_ops - regulator operations. + * + * @enable: Configure the regulator as enabled. + * @disable: Configure the regulator as disabled. + * @is_enabled: Return 1 if the regulator is enabled, 0 if not. + * May also return negative errno. + * + * @set_voltage: Set the voltage for the regulator within the range specified. + * The driver should select the voltage closest to min_uV. + * @set_voltage_sel: Set the voltage for the regulator using the specified + * selector. + * @get_voltage: Return the currently configured voltage for the regulator. + * @get_voltage_sel: Return the currently configured voltage selector for the + * regulator. + * @list_voltage: Return one of the supported voltages, in microvolts; zero + * if the selector indicates a voltage that is unusable on this system; + * or negative errno. Selectors range from zero to one less than + * regulator_desc.n_voltages. Voltages may be reported in any order. + * + * @set_current_limit: Configure a limit for a current-limited regulator. + * @get_current_limit: Get the configured limit for a current-limited regulator. + * + * @set_mode: Set the configured operating mode for the regulator. + * @get_mode: Get the configured operating mode for the regulator. + * @get_status: Return actual (not as-configured) status of regulator, as a + * REGULATOR_STATUS value (or negative errno) + * @get_optimum_mode: Get the most efficient operating mode for the regulator + * when running with the specified parameters. + * + * @enable_time: Time taken for the regulator voltage output voltage to + * stabilise after being enabled, in microseconds. + * @set_voltage_time_sel: Time taken for the regulator voltage output voltage + * to stabilise after being set to a new value, in microseconds. + * The function provides the from and to voltage selector, the + * function should return the worst case. + * + * @set_suspend_voltage: Set the voltage for the regulator when the system + * is suspended. + * @set_suspend_enable: Mark the regulator as enabled when the system is + * suspended. + * @set_suspend_disable: Mark the regulator as disabled when the system is + * suspended. + * @set_suspend_mode: Set the operating mode for the regulator when the + * system is suspended. + * + * This struct describes regulator operations which can be implemented by + * regulator chip drivers. + */ +struct regulator_ops { + + /* enumerate supported voltages */ + int (*list_voltage) (struct regulator_dev *, unsigned selector); + + /* get/set regulator voltage */ + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV, + unsigned *selector); + int (*set_voltage_sel) (struct regulator_dev *, unsigned selector); + int (*get_voltage) (struct regulator_dev *); + int (*get_voltage_sel) (struct regulator_dev *); + + /* get/set regulator current */ + int (*set_current_limit) (struct regulator_dev *, + int min_uA, int max_uA); + int (*get_current_limit) (struct regulator_dev *); + + /* enable/disable regulator */ + int (*enable) (struct regulator_dev *); + int (*disable) (struct regulator_dev *); + int (*is_enabled) (struct regulator_dev *); + + /* get/set regulator operating mode (defined in consumer.h) */ + int (*set_mode) (struct regulator_dev *, unsigned int mode); + unsigned int (*get_mode) (struct regulator_dev *); + + /* Time taken to enable or set voltage on the regulator */ + int (*enable_time) (struct regulator_dev *); + int (*set_voltage_time_sel) (struct regulator_dev *, + unsigned int old_selector, + unsigned int new_selector); + + /* report regulator status ... most other accessors report + * control inputs, this reports results of combining inputs + * from Linux (and other sources) with the actual load. + * returns REGULATOR_STATUS_* or negative errno. + */ + int (*get_status)(struct regulator_dev *); + + /* get most efficient regulator operating mode for load */ + unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, + int output_uV, int load_uA); + + /* the operations below are for configuration of regulator state when + * its parent PMIC enters a global STANDBY/HIBERNATE state */ + + /* set regulator suspend voltage */ + int (*set_suspend_voltage) (struct regulator_dev *, int uV); + + /* enable/disable regulator in suspend state */ + int (*set_suspend_enable) (struct regulator_dev *); + int (*set_suspend_disable) (struct regulator_dev *); + + /* set regulator suspend operating mode (defined in consumer.h) */ + int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); +}; + +/* + * Regulators can either control voltage or current. + */ +enum regulator_type { + REGULATOR_VOLTAGE, + REGULATOR_CURRENT, +}; + +/** + * struct regulator_desc - Regulator descriptor + * + * Each regulator registered with the core is described with a structure of + * this type. + * + * @name: Identifying name for the regulator. + * @supply_name: Identifying the regulator supply + * @id: Numerical identifier for the regulator. + * @n_voltages: Number of selectors available for ops.list_voltage(). + * @ops: Regulator operations table. + * @irq: Interrupt number for the regulator. + * @type: Indicates if the regulator is a voltage or current regulator. + * @owner: Module providing the regulator, used for refcounting. + */ +struct regulator_desc { + const char *name; + const char *supply_name; + int id; + unsigned n_voltages; + struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; +}; + +/* + * struct regulator_dev + * + * Voltage / Current regulator class device. One for each + * regulator. + * + * This should *not* be used directly by anything except the regulator + * core and notification injection (which should take the mutex and do + * no other direct access). + */ +struct regulator_dev { + struct regulator_desc *desc; + int exclusive; + u32 use_count; + u32 open_count; + + /* lists we belong to */ + struct list_head list; /* list of all regulators */ + + /* lists we own */ + struct list_head consumer_list; /* consumers we supply */ + + struct blocking_notifier_head notifier; + struct mutex mutex; /* consumer lock */ + struct module *owner; + struct device dev; + struct regulation_constraints *constraints; + struct regulator *supply; /* for tree */ + + struct delayed_work disable_work; + int deferred_disables; + + void *reg_data; /* regulator_dev data */ + + struct dentry *debugfs; +}; + +struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, + struct device *dev, const struct regulator_init_data *init_data, + void *driver_data, struct device_node *of_node); +void regulator_unregister(struct regulator_dev *rdev); + +int regulator_notifier_call_chain(struct regulator_dev *rdev, + unsigned long event, void *data); + +void *rdev_get_drvdata(struct regulator_dev *rdev); +struct device *rdev_get_dev(struct regulator_dev *rdev); +int rdev_get_id(struct regulator_dev *rdev); + +int regulator_mode_to_status(unsigned int); + +void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); + +#endif diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h new file mode 100644 index 00000000..936a7d8c --- /dev/null +++ b/include/linux/regulator/fixed.h @@ -0,0 +1,64 @@ +/* + * fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros <ext-roger.quadros@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_FIXED_H +#define __REGULATOR_FIXED_H + +struct regulator_init_data; + +/** + * struct fixed_voltage_config - fixed_voltage_config structure + * @supply_name: Name of the regulator supply + * @microvolts: Output voltage of regulator + * @gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @startup_delay: Start-up time in microseconds + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low + * @enabled_at_boot: Whether regulator has been enabled at + * boot or not. 1 = Yes, 0 = No + * This is used to keep the regulator at + * the default state + * @init_data: regulator_init_data + * + * This structure contains fixed voltage regulator configuration + * information that must be passed by platform code to the fixed + * voltage regulator driver. + */ +struct fixed_voltage_config { + const char *supply_name; + int microvolts; + int gpio; + unsigned startup_delay; + unsigned enable_high:1; + unsigned enabled_at_boot:1; + struct regulator_init_data *init_data; +}; + +struct regulator_consumer_supply; + +#if IS_ENABLED(CONFIG_REGULATOR) +struct platform_device *regulator_register_fixed(int id, + struct regulator_consumer_supply *supplies, int num_supplies); +#else +static inline struct platform_device *regulator_register_fixed(int id, + struct regulator_consumer_supply *supplies, int num_supplies) +{ + return NULL; +} +#endif + +#endif diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h new file mode 100644 index 00000000..19fbd267 --- /dev/null +++ b/include/linux/regulator/gpio-regulator.h @@ -0,0 +1,87 @@ +/* + * gpio-regulator.h + * + * Copyright 2011 Heiko Stuebner <heiko@sntech.de> + * + * based on fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * Copyright (c) 2009 Nokia Corporation + * Roger Quadros <ext-roger.quadros@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_GPIO_H +#define __REGULATOR_GPIO_H + +struct regulator_init_data; + +enum regulator_type; + +/** + * struct gpio_regulator_state - state description + * @value: microvolts or microamps + * @gpios: bitfield of gpio target-states for the value + * + * This structure describes a supported setting of the regulator + * and the necessary gpio-state to achieve it. + * + * The n-th bit in the bitfield describes the state of the n-th GPIO + * from the gpios-array defined in gpio_regulator_config below. + */ +struct gpio_regulator_state { + int value; + int gpios; +}; + +/** + * struct gpio_regulator_config - config structure + * @supply_name: Name of the regulator supply + * @enable_gpio: GPIO to use for enable control + * set to -EINVAL if not used + * @enable_high: Polarity of enable GPIO + * 1 = Active high, 0 = Active low + * @enabled_at_boot: Whether regulator has been enabled at + * boot or not. 1 = Yes, 0 = No + * This is used to keep the regulator at + * the default state + * @startup_delay: Start-up time in microseconds + * @gpios: Array containing the gpios needed to control + * the setting of the regulator + * @nr_gpios: Number of gpios + * @states: Array of gpio_regulator_state entries describing + * the gpio state for specific voltages + * @nr_states: Number of states available + * @regulator_type: either REGULATOR_CURRENT or REGULATOR_VOLTAGE + * @init_data: regulator_init_data + * + * This structure contains gpio-voltage regulator configuration + * information that must be passed by platform code to the + * gpio-voltage regulator driver. + */ +struct gpio_regulator_config { + const char *supply_name; + + int enable_gpio; + unsigned enable_high:1; + unsigned enabled_at_boot:1; + unsigned startup_delay; + + struct gpio *gpios; + int nr_gpios; + + struct gpio_regulator_state *states; + int nr_states; + + enum regulator_type type; + struct regulator_init_data *init_data; +}; + +#endif diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h new file mode 100644 index 00000000..61401649 --- /dev/null +++ b/include/linux/regulator/lp3971.h @@ -0,0 +1,51 @@ +/* + * National Semiconductors LP3971 PMIC chip client interface + * + * Copyright (C) 2009 Samsung Electronics + * Author: Marek Szyprowski <m.szyprowski@samsung.com> + * + * Based on wm8400.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_REGULATOR_LP3971_H +#define __LINUX_REGULATOR_LP3971_H + +#include <linux/regulator/machine.h> + +#define LP3971_LDO1 0 +#define LP3971_LDO2 1 +#define LP3971_LDO3 2 +#define LP3971_LDO4 3 +#define LP3971_LDO5 4 + +#define LP3971_DCDC1 5 +#define LP3971_DCDC2 6 +#define LP3971_DCDC3 7 + +#define LP3971_NUM_REGULATORS 8 + +struct lp3971_regulator_subdev { + int id; + struct regulator_init_data *initdata; +}; + +struct lp3971_platform_data { + int num_regulators; + struct lp3971_regulator_subdev *regulators; +}; + +#endif diff --git a/include/linux/regulator/lp3972.h b/include/linux/regulator/lp3972.h new file mode 100644 index 00000000..9bb7389b --- /dev/null +++ b/include/linux/regulator/lp3972.h @@ -0,0 +1,48 @@ +/* + * National Semiconductors LP3972 PMIC chip client interface + * + * Based on lp3971.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_REGULATOR_LP3972_H +#define __LINUX_REGULATOR_LP3972_H + +#include <linux/regulator/machine.h> + +#define LP3972_LDO1 0 +#define LP3972_LDO2 1 +#define LP3972_LDO3 2 +#define LP3972_LDO4 3 +#define LP3972_LDO5 4 + +#define LP3972_DCDC1 5 +#define LP3972_DCDC2 6 +#define LP3972_DCDC3 7 + +#define LP3972_NUM_REGULATORS 8 + +struct lp3972_regulator_subdev { + int id; + struct regulator_init_data *initdata; +}; + +struct lp3972_platform_data { + int num_regulators; + struct lp3972_regulator_subdev *regulators; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h new file mode 100644 index 00000000..b0210844 --- /dev/null +++ b/include/linux/regulator/machine.h @@ -0,0 +1,201 @@ +/* + * machine.h -- SoC Regulator support, machine/board driver API. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lrg@slimlogic.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Machine/Board Interface. + */ + +#ifndef __LINUX_REGULATOR_MACHINE_H_ +#define __LINUX_REGULATOR_MACHINE_H_ + +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> + +struct regulator; + +/* + * Regulator operation constraint flags. These flags are used to enable + * certain regulator operations and can be OR'ed together. + * + * VOLTAGE: Regulator output voltage can be changed by software on this + * board/machine. + * CURRENT: Regulator output current can be changed by software on this + * board/machine. + * MODE: Regulator operating mode can be changed by software on this + * board/machine. + * STATUS: Regulator can be enabled and disabled. + * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + */ + +#define REGULATOR_CHANGE_VOLTAGE 0x1 +#define REGULATOR_CHANGE_CURRENT 0x2 +#define REGULATOR_CHANGE_MODE 0x4 +#define REGULATOR_CHANGE_STATUS 0x8 +#define REGULATOR_CHANGE_DRMS 0x10 + +/** + * struct regulator_state - regulator state during low power system states + * + * This describes a regulators state during a system wide low power + * state. One of enabled or disabled must be set for the + * configuration to be applied. + * + * @uV: Operating voltage during suspend. + * @mode: Operating mode during suspend. + * @enabled: Enabled during suspend. + * @disabled: Disabled during suspend. + */ +struct regulator_state { + int uV; /* suspend voltage */ + unsigned int mode; /* suspend regulator operating mode */ + int enabled; /* is regulator enabled in this suspend state */ + int disabled; /* is the regulator disbled in this suspend state */ +}; + +/** + * struct regulation_constraints - regulator operating constraints. + * + * This struct describes regulator and board/machine specific constraints. + * + * @name: Descriptive name for the constraints, used for display purposes. + * + * @min_uV: Smallest voltage consumers may set. + * @max_uV: Largest voltage consumers may set. + * @uV_offset: Offset applied to voltages from consumer to compensate for + * voltage drops. + * + * @min_uA: Smallest current consumers may set. + * @max_uA: Largest current consumers may set. + * + * @valid_modes_mask: Mask of modes which may be configured by consumers. + * @valid_ops_mask: Operations which may be performed by consumers. + * + * @always_on: Set if the regulator should never be disabled. + * @boot_on: Set if the regulator is enabled when the system is initially + * started. If the regulator is not enabled by the hardware or + * bootloader then it will be enabled when the constraints are + * applied. + * @apply_uV: Apply the voltage constraint when initialising. + * + * @input_uV: Input voltage for regulator when supplied by another regulator. + * + * @state_disk: State for regulator when system is suspended in disk mode. + * @state_mem: State for regulator when system is suspended in mem mode. + * @state_standby: State for regulator when system is suspended in standby + * mode. + * @initial_state: Suspend state to set by default. + * @initial_mode: Mode to set at startup. + */ +struct regulation_constraints { + + const char *name; + + /* voltage output range (inclusive) - for voltage control */ + int min_uV; + int max_uV; + + int uV_offset; + + /* current output range (inclusive) - for current control */ + int min_uA; + int max_uA; + + /* valid regulator operating modes for this machine */ + unsigned int valid_modes_mask; + + /* valid operations for regulator on this machine */ + unsigned int valid_ops_mask; + + /* regulator input voltage - only if supply is another regulator */ + int input_uV; + + /* regulator suspend states for global PMIC STANDBY/HIBERNATE */ + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; /* suspend state to set at init */ + + /* mode to set on startup */ + unsigned int initial_mode; + + /* constraint flags */ + unsigned always_on:1; /* regulator never off when system is on */ + unsigned boot_on:1; /* bootloader/firmware enabled regulator */ + unsigned apply_uV:1; /* apply uV constraint if min == max */ +}; + +/** + * struct regulator_consumer_supply - supply -> device mapping + * + * This maps a supply name to a device. Use of dev_name allows support for + * buses which make struct device available late such as I2C. + * + * @dev_name: Result of dev_name() for the consumer. + * @supply: Name for the supply. + */ +struct regulator_consumer_supply { + const char *dev_name; /* dev_name() for consumer */ + const char *supply; /* consumer supply - e.g. "vcc" */ +}; + +/* Initialize struct regulator_consumer_supply */ +#define REGULATOR_SUPPLY(_name, _dev_name) \ +{ \ + .supply = _name, \ + .dev_name = _dev_name, \ +} + +/** + * struct regulator_init_data - regulator platform initialisation data. + * + * Initialisation constraints, our supply and consumers supplies. + * + * @supply_regulator: Parent regulator. Specified using the regulator name + * as it appears in the name field in sysfs, which can + * be explicitly set using the constraints field 'name'. + * + * @constraints: Constraints. These must be specified for the regulator to + * be usable. + * @num_consumer_supplies: Number of consumer device supplies. + * @consumer_supplies: Consumer device supply configuration. + * + * @regulator_init: Callback invoked when the regulator has been registered. + * @driver_data: Data passed to regulator_init. + */ +struct regulator_init_data { + const char *supply_regulator; /* or NULL for system supply */ + + struct regulation_constraints constraints; + + int num_consumer_supplies; + struct regulator_consumer_supply *consumer_supplies; + + /* optional regulator machine specific init */ + int (*regulator_init)(void *driver_data); + void *driver_data; /* core does not touch this */ +}; + +int regulator_suspend_prepare(suspend_state_t state); +int regulator_suspend_finish(void); + +#ifdef CONFIG_REGULATOR +void regulator_has_full_constraints(void); +void regulator_use_dummy_regulator(void); +#else +static inline void regulator_has_full_constraints(void) +{ +} + +static inline void regulator_use_dummy_regulator(void) +{ +} +#endif + +#endif diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h new file mode 100644 index 00000000..de9a7fae --- /dev/null +++ b/include/linux/regulator/max1586.h @@ -0,0 +1,63 @@ +/* + * max1586.h -- Voltage regulation for the Maxim 1586 + * + * Copyright (C) 2008 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_MAX1586 +#define REGULATOR_MAX1586 + +#include <linux/regulator/machine.h> + +#define MAX1586_V3 0 +#define MAX1586_V6 1 + +/* precalculated values for v3_gain */ +#define MAX1586_GAIN_NO_R24 1000000 /* 700000 .. 1475000 mV */ +#define MAX1586_GAIN_R24_3k32 1051098 /* 735768 .. 1550369 mV */ +#define MAX1586_GAIN_R24_5k11 1078648 /* 755053 .. 1591005 mV */ +#define MAX1586_GAIN_R24_7k5 1115432 /* 780802 .. 1645262 mV */ + +/** + * max1586_subdev_data - regulator data + * @id: regulator Id (either MAX1586_V3 or MAX1586_V6) + * @name: regulator cute name (example for V3: "vcc_core") + * @platform_data: regulator init data (constraints, supplies, ...) + */ +struct max1586_subdev_data { + int id; + char *name; + struct regulator_init_data *platform_data; +}; + +/** + * max1586_platform_data - platform data for max1586 + * @num_subdevs: number of regulators used (may be 1 or 2) + * @subdevs: regulator used + * At most, there will be a regulator for V3 and one for V6 voltages. + * @v3_gain: gain on the V3 voltage output multiplied by 1e6. + * This can be calculated as ((1 + R24/R25 + R24/185.5kOhm) * 1e6) + * for an external resistor configuration as described in the + * data sheet (R25=100kOhm). + */ +struct max1586_platform_data { + int num_subdevs; + struct max1586_subdev_data *subdevs; + int v3_gain; +}; + +#endif diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h new file mode 100644 index 00000000..417d14ec --- /dev/null +++ b/include/linux/regulator/max8649.h @@ -0,0 +1,44 @@ +/* + * Interface of Maxim max8649 + * + * Copyright (C) 2009-2010 Marvell International Ltd. + * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_REGULATOR_MAX8649_H +#define __LINUX_REGULATOR_MAX8649_H + +#include <linux/regulator/machine.h> + +enum { + MAX8649_EXTCLK_26MHZ = 0, + MAX8649_EXTCLK_13MHZ, + MAX8649_EXTCLK_19MHZ, /* 19.2MHz */ +}; + +enum { + MAX8649_RAMP_32MV = 0, + MAX8649_RAMP_16MV, + MAX8649_RAMP_8MV, + MAX8649_RAMP_4MV, + MAX8649_RAMP_2MV, + MAX8649_RAMP_1MV, + MAX8649_RAMP_0_5MV, + MAX8649_RAMP_0_25MV, +}; + +struct max8649_platform_data { + struct regulator_init_data *regulator; + + unsigned mode:2; /* bit[1:0] = VID1,VID0 */ + unsigned extclk_freq:2; + unsigned extclk:1; + unsigned ramp_timing:3; + unsigned ramp_down:1; +}; + +#endif /* __LINUX_REGULATOR_MAX8649_H */ diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h new file mode 100644 index 00000000..99367636 --- /dev/null +++ b/include/linux/regulator/max8660.h @@ -0,0 +1,57 @@ +/* + * max8660.h -- Voltage regulation for the Maxim 8660/8661 + * + * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_REGULATOR_MAX8660_H +#define __LINUX_REGULATOR_MAX8660_H + +#include <linux/regulator/machine.h> + +enum { + MAX8660_V3, + MAX8660_V4, + MAX8660_V5, + MAX8660_V6, + MAX8660_V7, + MAX8660_V_END, +}; + +/** + * max8660_subdev_data - regulator subdev data + * @id: regulator id + * @name: regulator name + * @platform_data: regulator init data + */ +struct max8660_subdev_data { + int id; + char *name; + struct regulator_init_data *platform_data; +}; + +/** + * max8660_platform_data - platform data for max8660 + * @num_subdevs: number of regulators used + * @subdevs: pointer to regulators used + * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled. + */ +struct max8660_platform_data { + int num_subdevs; + struct max8660_subdev_data *subdevs; + unsigned en34_is_high:1; +}; +#endif diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h new file mode 100644 index 00000000..45e42855 --- /dev/null +++ b/include/linux/regulator/max8952.h @@ -0,0 +1,135 @@ +/* + * max8952.h - Voltage regulation for the Maxim 8952 + * + * Copyright (C) 2010 Samsung Electrnoics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_MAX8952 +#define REGULATOR_MAX8952 + +#include <linux/regulator/machine.h> + +enum { + MAX8952_DVS_MODE0, + MAX8952_DVS_MODE1, + MAX8952_DVS_MODE2, + MAX8952_DVS_MODE3, +}; + +enum { + MAX8952_DVS_770mV = 0, + MAX8952_DVS_780mV, + MAX8952_DVS_790mV, + MAX8952_DVS_800mV, + MAX8952_DVS_810mV, + MAX8952_DVS_820mV, + MAX8952_DVS_830mV, + MAX8952_DVS_840mV, + MAX8952_DVS_850mV, + MAX8952_DVS_860mV, + MAX8952_DVS_870mV, + MAX8952_DVS_880mV, + MAX8952_DVS_890mV, + MAX8952_DVS_900mV, + MAX8952_DVS_910mV, + MAX8952_DVS_920mV, + MAX8952_DVS_930mV, + MAX8952_DVS_940mV, + MAX8952_DVS_950mV, + MAX8952_DVS_960mV, + MAX8952_DVS_970mV, + MAX8952_DVS_980mV, + MAX8952_DVS_990mV, + MAX8952_DVS_1000mV, + MAX8952_DVS_1010mV, + MAX8952_DVS_1020mV, + MAX8952_DVS_1030mV, + MAX8952_DVS_1040mV, + MAX8952_DVS_1050mV, + MAX8952_DVS_1060mV, + MAX8952_DVS_1070mV, + MAX8952_DVS_1080mV, + MAX8952_DVS_1090mV, + MAX8952_DVS_1100mV, + MAX8952_DVS_1110mV, + MAX8952_DVS_1120mV, + MAX8952_DVS_1130mV, + MAX8952_DVS_1140mV, + MAX8952_DVS_1150mV, + MAX8952_DVS_1160mV, + MAX8952_DVS_1170mV, + MAX8952_DVS_1180mV, + MAX8952_DVS_1190mV, + MAX8952_DVS_1200mV, + MAX8952_DVS_1210mV, + MAX8952_DVS_1220mV, + MAX8952_DVS_1230mV, + MAX8952_DVS_1240mV, + MAX8952_DVS_1250mV, + MAX8952_DVS_1260mV, + MAX8952_DVS_1270mV, + MAX8952_DVS_1280mV, + MAX8952_DVS_1290mV, + MAX8952_DVS_1300mV, + MAX8952_DVS_1310mV, + MAX8952_DVS_1320mV, + MAX8952_DVS_1330mV, + MAX8952_DVS_1340mV, + MAX8952_DVS_1350mV, + MAX8952_DVS_1360mV, + MAX8952_DVS_1370mV, + MAX8952_DVS_1380mV, + MAX8952_DVS_1390mV, + MAX8952_DVS_1400mV, +}; + +enum { + MAX8952_SYNC_FREQ_26MHZ, /* Default */ + MAX8952_SYNC_FREQ_13MHZ, + MAX8952_SYNC_FREQ_19_2MHZ, +}; + +enum { + MAX8952_RAMP_32mV_us = 0, /* Default */ + MAX8952_RAMP_16mV_us, + MAX8952_RAMP_8mV_us, + MAX8952_RAMP_4mV_us, + MAX8952_RAMP_2mV_us, + MAX8952_RAMP_1mV_us, + MAX8952_RAMP_0_5mV_us, + MAX8952_RAMP_0_25mV_us, +}; + +#define MAX8952_NUM_DVS_MODE 4 + +struct max8952_platform_data { + int gpio_vid0; + int gpio_vid1; + int gpio_en; + + u8 default_mode; + u8 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ + + u8 sync_freq; + u8 ramp_speed; + + struct regulator_init_data reg_data; +}; + + +#endif /* REGULATOR_MAX8952 */ diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h new file mode 100644 index 00000000..769704f2 --- /dev/null +++ b/include/linux/regulator/of_regulator.h @@ -0,0 +1,22 @@ +/* + * OpenFirmware regulator support routines + * + */ + +#ifndef __LINUX_OF_REG_H +#define __LINUX_OF_REG_H + +#if defined(CONFIG_OF) +extern struct regulator_init_data + *of_get_regulator_init_data(struct device *dev, + struct device_node *node); +#else +static inline struct regulator_init_data + *of_get_regulator_init_data(struct device *dev, + struct device_node *node) +{ + return NULL; +} +#endif /* CONFIG_OF */ + +#endif /* __LINUX_OF_REG_H */ diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h new file mode 100644 index 00000000..6a5c1b2c --- /dev/null +++ b/include/linux/regulator/tps62360.h @@ -0,0 +1,57 @@ +/* + * tps62360.h -- TI tps62360 + * + * Interface for regulator driver for TI TPS62360 Processor core supply + * + * Copyright (C) 2012 NVIDIA Corporation + + * Author: Laxman Dewangan <ldewangan@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef __LINUX_REGULATOR_TPS62360_H +#define __LINUX_REGULATOR_TPS62360_H + +#include <linux/regulator/machine.h> + +/* + * struct tps62360_regulator_platform_data - tps62360 regulator platform data. + * + * @reg_init_data: The regulator init data. + * @en_force_pwm: Enable force pwm or not. + * @en_discharge: Enable discharge the output capacitor via internal + * register. + * @en_internal_pulldn: internal pull down enable or not. + * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with + * fixed logic. + * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with + * fixed logic. + * @vsel0_def_state: Default state of vsel0. 1 if it is high else 0. + * @vsel1_def_state: Default state of vsel1. 1 if it is high else 0. + */ +struct tps62360_regulator_platform_data { + struct regulator_init_data reg_init_data; + bool en_force_pwm; + bool en_discharge; + bool en_internal_pulldn; + int vsel0_gpio; + int vsel1_gpio; + int vsel0_def_state; + int vsel1_def_state; +}; + +#endif /* __LINUX_REGULATOR_TPS62360_H */ diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h new file mode 100644 index 00000000..4892f591 --- /dev/null +++ b/include/linux/regulator/tps6507x.h @@ -0,0 +1,32 @@ +/* + * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X + * + * Copyright (C) 2010 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_TPS6507X +#define REGULATOR_TPS6507X + +/** + * tps6507x_reg_platform_data - platform data for tps6507x + * @defdcdc_default: Defines whether DCDC high or the low register controls + * output voltage by default. Valid for DCDC2 and DCDC3 outputs only. + */ +struct tps6507x_reg_platform_data { + bool defdcdc_default; +}; + +#endif diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h new file mode 100644 index 00000000..b4554ce9 --- /dev/null +++ b/include/linux/regulator/userspace-consumer.h @@ -0,0 +1,25 @@ +#ifndef __REGULATOR_PLATFORM_CONSUMER_H_ +#define __REGULATOR_PLATFORM_CONSUMER_H_ + +struct regulator_consumer_supply; + +/** + * struct regulator_userspace_consumer_data - line consumer + * initialisation data. + * + * @name: Name for the consumer line + * @num_supplies: Number of supplies feeding the line + * @supplies: Supplies configuration. + * @init_on: Set if the regulators supplying the line should be + * enabled during initialisation + */ +struct regulator_userspace_consumer_data { + const char *name; + + int num_supplies; + struct regulator_bulk_data *supplies; + + bool init_on; +}; + +#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */ diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h new file mode 100644 index 00000000..ea3700cd --- /dev/null +++ b/include/linux/reiserfs_fs.h @@ -0,0 +1,26 @@ +/* + * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details + */ +#ifndef _LINUX_REISER_FS_H +#define _LINUX_REISER_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* + * include/linux/reiser_fs.h + * + * Reiser File System constants and structures + * + */ + +/* ioctl's command */ +#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) +/* define following flags to be the same as in ext2, so that chattr(1), + lsattr(1) will work with us. */ +#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS +#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS +#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION +#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION + +#endif /* _LINUX_REISER_FS_H */ diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h new file mode 100644 index 00000000..d8ce17c2 --- /dev/null +++ b/include/linux/reiserfs_xattr.h @@ -0,0 +1,24 @@ +/* + File: linux/reiserfs_xattr.h +*/ + +#ifndef _LINUX_REISERFS_XATTR_H +#define _LINUX_REISERFS_XATTR_H + +#include <linux/types.h> + +/* Magic value in header */ +#define REISERFS_XATTR_MAGIC 0x52465841 /* "RFXA" */ + +struct reiserfs_xattr_header { + __le32 h_magic; /* magic number for identification */ + __le32 h_hash; /* hash of the value */ +}; + +struct reiserfs_security_handle { + char *name; + void *value; + size_t length; +}; + +#endif /* _LINUX_REISERFS_XATTR_H */ diff --git a/include/linux/relay.h b/include/linux/relay.h new file mode 100644 index 00000000..91cacc34 --- /dev/null +++ b/include/linux/relay.h @@ -0,0 +1,292 @@ +/* + * linux/include/linux/relay.h + * + * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp + * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com) + * + * CONFIG_RELAY definitions and declarations + */ + +#ifndef _LINUX_RELAY_H +#define _LINUX_RELAY_H + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/kref.h> + +/* Needs a _much_ better name... */ +#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE) + +/* + * Tracks changes to rchan/rchan_buf structs + */ +#define RELAYFS_CHANNEL_VERSION 7 + +/* + * Per-cpu relay channel buffer + */ +struct rchan_buf +{ + void *start; /* start of channel buffer */ + void *data; /* start of current sub-buffer */ + size_t offset; /* current offset into sub-buffer */ + size_t subbufs_produced; /* count of sub-buffers produced */ + size_t subbufs_consumed; /* count of sub-buffers consumed */ + struct rchan *chan; /* associated channel */ + wait_queue_head_t read_wait; /* reader wait queue */ + struct timer_list timer; /* reader wake-up timer */ + struct dentry *dentry; /* channel file dentry */ + struct kref kref; /* channel buffer refcount */ + struct page **page_array; /* array of current buffer pages */ + unsigned int page_count; /* number of current buffer pages */ + unsigned int finalized; /* buffer has been finalized */ + size_t *padding; /* padding counts per sub-buffer */ + size_t prev_padding; /* temporary variable */ + size_t bytes_consumed; /* bytes consumed in cur read subbuf */ + size_t early_bytes; /* bytes consumed before VFS inited */ + unsigned int cpu; /* this buf's cpu */ +} ____cacheline_aligned; + +/* + * Relay channel data structure + */ +struct rchan +{ + u32 version; /* the version of this struct */ + size_t subbuf_size; /* sub-buffer size */ + size_t n_subbufs; /* number of sub-buffers per buffer */ + size_t alloc_size; /* total buffer size allocated */ + struct rchan_callbacks *cb; /* client callbacks */ + struct kref kref; /* channel refcount */ + void *private_data; /* for user-defined data */ + size_t last_toobig; /* tried to log event > subbuf size */ + struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ + int is_global; /* One global buffer ? */ + struct list_head list; /* for channel list */ + struct dentry *parent; /* parent dentry passed to open */ + int has_base_filename; /* has a filename associated? */ + char base_filename[NAME_MAX]; /* saved base filename */ +}; + +/* + * Relay channel client callbacks + */ +struct rchan_callbacks +{ + /* + * subbuf_start - called on buffer-switch to a new sub-buffer + * @buf: the channel buffer containing the new sub-buffer + * @subbuf: the start of the new sub-buffer + * @prev_subbuf: the start of the previous sub-buffer + * @prev_padding: unused space at the end of previous sub-buffer + * + * The client should return 1 to continue logging, 0 to stop + * logging. + * + * NOTE: subbuf_start will also be invoked when the buffer is + * created, so that the first sub-buffer can be initialized + * if necessary. In this case, prev_subbuf will be NULL. + * + * NOTE: the client can reserve bytes at the beginning of the new + * sub-buffer by calling subbuf_start_reserve() in this callback. + */ + int (*subbuf_start) (struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + size_t prev_padding); + + /* + * buf_mapped - relay buffer mmap notification + * @buf: the channel buffer + * @filp: relay file pointer + * + * Called when a relay file is successfully mmapped + */ + void (*buf_mapped)(struct rchan_buf *buf, + struct file *filp); + + /* + * buf_unmapped - relay buffer unmap notification + * @buf: the channel buffer + * @filp: relay file pointer + * + * Called when a relay file is successfully unmapped + */ + void (*buf_unmapped)(struct rchan_buf *buf, + struct file *filp); + /* + * create_buf_file - create file to represent a relay channel buffer + * @filename: the name of the file to create + * @parent: the parent of the file to create + * @mode: the mode of the file to create + * @buf: the channel buffer + * @is_global: outparam - set non-zero if the buffer should be global + * + * Called during relay_open(), once for each per-cpu buffer, + * to allow the client to create a file to be used to + * represent the corresponding channel buffer. If the file is + * created outside of relay, the parent must also exist in + * that filesystem. + * + * The callback should return the dentry of the file created + * to represent the relay buffer. + * + * Setting the is_global outparam to a non-zero value will + * cause relay_open() to create a single global buffer rather + * than the default set of per-cpu buffers. + * + * See Documentation/filesystems/relay.txt for more info. + */ + struct dentry *(*create_buf_file)(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global); + + /* + * remove_buf_file - remove file representing a relay channel buffer + * @dentry: the dentry of the file to remove + * + * Called during relay_close(), once for each per-cpu buffer, + * to allow the client to remove a file used to represent a + * channel buffer. + * + * The callback should return 0 if successful, negative if not. + */ + int (*remove_buf_file)(struct dentry *dentry); +}; + +/* + * CONFIG_RELAY kernel API, kernel/relay.c + */ + +struct rchan *relay_open(const char *base_filename, + struct dentry *parent, + size_t subbuf_size, + size_t n_subbufs, + struct rchan_callbacks *cb, + void *private_data); +extern int relay_late_setup_files(struct rchan *chan, + const char *base_filename, + struct dentry *parent); +extern void relay_close(struct rchan *chan); +extern void relay_flush(struct rchan *chan); +extern void relay_subbufs_consumed(struct rchan *chan, + unsigned int cpu, + size_t consumed); +extern void relay_reset(struct rchan *chan); +extern int relay_buf_full(struct rchan_buf *buf); + +extern size_t relay_switch_subbuf(struct rchan_buf *buf, + size_t length); + +/** + * relay_write - write data into the channel + * @chan: relay channel + * @data: data to be written + * @length: number of bytes to write + * + * Writes data into the current cpu's channel buffer. + * + * Protects the buffer by disabling interrupts. Use this + * if you might be logging from interrupt context. Try + * __relay_write() if you know you won't be logging from + * interrupt context. + */ +static inline void relay_write(struct rchan *chan, + const void *data, + size_t length) +{ + unsigned long flags; + struct rchan_buf *buf; + + local_irq_save(flags); + buf = chan->buf[smp_processor_id()]; + if (unlikely(buf->offset + length > chan->subbuf_size)) + length = relay_switch_subbuf(buf, length); + memcpy(buf->data + buf->offset, data, length); + buf->offset += length; + local_irq_restore(flags); +} + +/** + * __relay_write - write data into the channel + * @chan: relay channel + * @data: data to be written + * @length: number of bytes to write + * + * Writes data into the current cpu's channel buffer. + * + * Protects the buffer by disabling preemption. Use + * relay_write() if you might be logging from interrupt + * context. + */ +static inline void __relay_write(struct rchan *chan, + const void *data, + size_t length) +{ + struct rchan_buf *buf; + + buf = chan->buf[get_cpu()]; + if (unlikely(buf->offset + length > buf->chan->subbuf_size)) + length = relay_switch_subbuf(buf, length); + memcpy(buf->data + buf->offset, data, length); + buf->offset += length; + put_cpu(); +} + +/** + * relay_reserve - reserve slot in channel buffer + * @chan: relay channel + * @length: number of bytes to reserve + * + * Returns pointer to reserved slot, NULL if full. + * + * Reserves a slot in the current cpu's channel buffer. + * Does not protect the buffer at all - caller must provide + * appropriate synchronization. + */ +static inline void *relay_reserve(struct rchan *chan, size_t length) +{ + void *reserved; + struct rchan_buf *buf = chan->buf[smp_processor_id()]; + + if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { + length = relay_switch_subbuf(buf, length); + if (!length) + return NULL; + } + reserved = buf->data + buf->offset; + buf->offset += length; + + return reserved; +} + +/** + * subbuf_start_reserve - reserve bytes at the start of a sub-buffer + * @buf: relay channel buffer + * @length: number of bytes to reserve + * + * Helper function used to reserve bytes at the beginning of + * a sub-buffer in the subbuf_start() callback. + */ +static inline void subbuf_start_reserve(struct rchan_buf *buf, + size_t length) +{ + BUG_ON(length >= buf->chan->subbuf_size - 1); + buf->offset = length; +} + +/* + * exported relay file operations, kernel/relay.c + */ +extern const struct file_operations relay_file_operations; + +#endif /* _LINUX_RELAY_H */ + diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h new file mode 100644 index 00000000..f1ffabb9 --- /dev/null +++ b/include/linux/remoteproc.h @@ -0,0 +1,478 @@ +/* + * Remote Processor Framework + * + * Copyright(c) 2011 Texas Instruments, Inc. + * Copyright(c) 2011 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Texas Instruments nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef REMOTEPROC_H +#define REMOTEPROC_H + +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/klist.h> +#include <linux/mutex.h> +#include <linux/virtio.h> +#include <linux/completion.h> +#include <linux/idr.h> + +/** + * struct resource_table - firmware resource table header + * @ver: version number + * @num: number of resource entries + * @reserved: reserved (must be zero) + * @offset: array of offsets pointing at the various resource entries + * + * A resource table is essentially a list of system resources required + * by the remote processor. It may also include configuration entries. + * If needed, the remote processor firmware should contain this table + * as a dedicated ".resource_table" ELF section. + * + * Some resources entries are mere announcements, where the host is informed + * of specific remoteproc configuration. Other entries require the host to + * do something (e.g. allocate a system resource). Sometimes a negotiation + * is expected, where the firmware requests a resource, and once allocated, + * the host should provide back its details (e.g. address of an allocated + * memory region). + * + * The header of the resource table, as expressed by this structure, + * contains a version number (should we need to change this format in the + * future), the number of available resource entries, and their offsets + * in the table. + * + * Immediately following this header are the resource entries themselves, + * each of which begins with a resource entry header (as described below). + */ +struct resource_table { + u32 ver; + u32 num; + u32 reserved[2]; + u32 offset[0]; +} __packed; + +/** + * struct fw_rsc_hdr - firmware resource entry header + * @type: resource type + * @data: resource data + * + * Every resource entry begins with a 'struct fw_rsc_hdr' header providing + * its @type. The content of the entry itself will immediately follow + * this header, and it should be parsed according to the resource type. + */ +struct fw_rsc_hdr { + u32 type; + u8 data[0]; +} __packed; + +/** + * enum fw_resource_type - types of resource entries + * + * @RSC_CARVEOUT: request for allocation of a physically contiguous + * memory region. + * @RSC_DEVMEM: request to iommu_map a memory-based peripheral. + * @RSC_TRACE: announces the availability of a trace buffer into which + * the remote processor will be writing logs. + * @RSC_VDEV: declare support for a virtio device, and serve as its + * virtio header. + * @RSC_LAST: just keep this one at the end + * + * For more details regarding a specific resource type, please see its + * dedicated structure below. + * + * Please note that these values are used as indices to the rproc_handle_rsc + * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to + * check the validity of an index before the lookup table is accessed, so + * please update it as needed. + */ +enum fw_resource_type { + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, +}; + +#define FW_RSC_ADDR_ANY (0xFFFFFFFFFFFFFFFF) + +/** + * struct fw_rsc_carveout - physically contiguous memory request + * @da: device address + * @pa: physical address + * @len: length (in bytes) + * @flags: iommu protection flags + * @reserved: reserved (must be zero) + * @name: human-readable name of the requested memory region + * + * This resource entry requests the host to allocate a physically contiguous + * memory region. + * + * These request entries should precede other firmware resource entries, + * as other entries might request placing other data objects inside + * these memory regions (e.g. data/code segments, trace resource entries, ...). + * + * Allocating memory this way helps utilizing the reserved physical memory + * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries + * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB + * pressure is important; it may have a substantial impact on performance. + * + * If the firmware is compiled with static addresses, then @da should specify + * the expected device address of this memory region. If @da is set to + * FW_RSC_ADDR_ANY, then the host will dynamically allocate it, and then + * overwrite @da with the dynamically allocated address. + * + * We will always use @da to negotiate the device addresses, even if it + * isn't using an iommu. In that case, though, it will obviously contain + * physical addresses. + * + * Some remote processors needs to know the allocated physical address + * even if they do use an iommu. This is needed, e.g., if they control + * hardware accelerators which access the physical memory directly (this + * is the case with OMAP4 for instance). In that case, the host will + * overwrite @pa with the dynamically allocated physical address. + * Generally we don't want to expose physical addresses if we don't have to + * (remote processors are generally _not_ trusted), so we might want to + * change this to happen _only_ when explicitly required by the hardware. + * + * @flags is used to provide IOMMU protection flags, and @name should + * (optionally) contain a human readable name of this carveout region + * (mainly for debugging purposes). + */ +struct fw_rsc_carveout { + u32 da; + u32 pa; + u32 len; + u32 flags; + u32 reserved; + u8 name[32]; +} __packed; + +/** + * struct fw_rsc_devmem - iommu mapping request + * @da: device address + * @pa: physical address + * @len: length (in bytes) + * @flags: iommu protection flags + * @reserved: reserved (must be zero) + * @name: human-readable name of the requested region to be mapped + * + * This resource entry requests the host to iommu map a physically contiguous + * memory region. This is needed in case the remote processor requires + * access to certain memory-based peripherals; _never_ use it to access + * regular memory. + * + * This is obviously only needed if the remote processor is accessing memory + * via an iommu. + * + * @da should specify the required device address, @pa should specify + * the physical address we want to map, @len should specify the size of + * the mapping and @flags is the IOMMU protection flags. As always, @name may + * (optionally) contain a human readable name of this mapping (mainly for + * debugging purposes). + * + * Note: at this point we just "trust" those devmem entries to contain valid + * physical addresses, but this isn't safe and will be changed: eventually we + * want remoteproc implementations to provide us ranges of physical addresses + * the firmware is allowed to request, and not allow firmwares to request + * access to physical addresses that are outside those ranges. + */ +struct fw_rsc_devmem { + u32 da; + u32 pa; + u32 len; + u32 flags; + u32 reserved; + u8 name[32]; +} __packed; + +/** + * struct fw_rsc_trace - trace buffer declaration + * @da: device address + * @len: length (in bytes) + * @reserved: reserved (must be zero) + * @name: human-readable name of the trace buffer + * + * This resource entry provides the host information about a trace buffer + * into which the remote processor will write log messages. + * + * @da specifies the device address of the buffer, @len specifies + * its size, and @name may contain a human readable name of the trace buffer. + * + * After booting the remote processor, the trace buffers are exposed to the + * user via debugfs entries (called trace0, trace1, etc..). + */ +struct fw_rsc_trace { + u32 da; + u32 len; + u32 reserved; + u8 name[32]; +} __packed; + +/** + * struct fw_rsc_vdev_vring - vring descriptor entry + * @da: device address + * @align: the alignment between the consumer and producer parts of the vring + * @num: num of buffers supported by this vring (must be power of two) + * @notifyid is a unique rproc-wide notify index for this vring. This notify + * index is used when kicking a remote processor, to let it know that this + * vring is triggered. + * @reserved: reserved (must be zero) + * + * This descriptor is not a resource entry by itself; it is part of the + * vdev resource type (see below). + * + * Note that @da should either contain the device address where + * the remote processor is expecting the vring, or indicate that + * dynamically allocation of the vring's device address is supported. + */ +struct fw_rsc_vdev_vring { + u32 da; + u32 align; + u32 num; + u32 notifyid; + u32 reserved; +} __packed; + +/** + * struct fw_rsc_vdev - virtio device header + * @id: virtio device id (as in virtio_ids.h) + * @notifyid is a unique rproc-wide notify index for this vdev. This notify + * index is used when kicking a remote processor, to let it know that the + * status/features of this vdev have changes. + * @dfeatures specifies the virtio device features supported by the firmware + * @gfeatures is a place holder used by the host to write back the + * negotiated features that are supported by both sides. + * @config_len is the size of the virtio config space of this vdev. The config + * space lies in the resource table immediate after this vdev header. + * @status is a place holder where the host will indicate its virtio progress. + * @num_of_vrings indicates how many vrings are described in this vdev header + * @reserved: reserved (must be zero) + * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'. + * + * This resource is a virtio device header: it provides information about + * the vdev, and is then used by the host and its peer remote processors + * to negotiate and share certain virtio properties. + * + * By providing this resource entry, the firmware essentially asks remoteproc + * to statically allocate a vdev upon registration of the rproc (dynamic vdev + * allocation is not yet supported). + * + * Note: unlike virtualization systems, the term 'host' here means + * the Linux side which is running remoteproc to control the remote + * processors. We use the name 'gfeatures' to comply with virtio's terms, + * though there isn't really any virtualized guest OS here: it's the host + * which is responsible for negotiating the final features. + * Yeah, it's a bit confusing. + * + * Note: immediately following this structure is the virtio config space for + * this vdev (which is specific to the vdev; for more info, read the virtio + * spec). the size of the config space is specified by @config_len. + */ +struct fw_rsc_vdev { + u32 id; + u32 notifyid; + u32 dfeatures; + u32 gfeatures; + u32 config_len; + u8 status; + u8 num_of_vrings; + u8 reserved[2]; + struct fw_rsc_vdev_vring vring[0]; +} __packed; + +/** + * struct rproc_mem_entry - memory entry descriptor + * @va: virtual address + * @dma: dma address + * @len: length, in bytes + * @da: device address + * @priv: associated data + * @node: list node + */ +struct rproc_mem_entry { + void *va; + dma_addr_t dma; + int len; + u32 da; + void *priv; + struct list_head node; +}; + +struct rproc; + +/** + * struct rproc_ops - platform-specific device handlers + * @start: power on the device and boot it + * @stop: power off the device + * @kick: kick a virtqueue (virtqueue id given as a parameter) + */ +struct rproc_ops { + int (*start)(struct rproc *rproc); + int (*stop)(struct rproc *rproc); + void (*kick)(struct rproc *rproc, int vqid); +}; + +/** + * enum rproc_state - remote processor states + * @RPROC_OFFLINE: device is powered off + * @RPROC_SUSPENDED: device is suspended; needs to be woken up to receive + * a message. + * @RPROC_RUNNING: device is up and running + * @RPROC_CRASHED: device has crashed; need to start recovery + * @RPROC_LAST: just keep this one at the end + * + * Please note that the values of these states are used as indices + * to rproc_state_string, a state-to-name lookup table, + * so please keep the two synchronized. @RPROC_LAST is used to check + * the validity of an index before the lookup table is accessed, so + * please update it as needed too. + */ +enum rproc_state { + RPROC_OFFLINE = 0, + RPROC_SUSPENDED = 1, + RPROC_RUNNING = 2, + RPROC_CRASHED = 3, + RPROC_LAST = 4, +}; + +/** + * struct rproc - represents a physical remote processor device + * @node: klist node of this rproc object + * @domain: iommu domain + * @name: human readable name of the rproc + * @firmware: name of firmware file to be loaded + * @priv: private data which belongs to the platform-specific rproc module + * @ops: platform-specific start/stop rproc handlers + * @dev: underlying device + * @refcount: refcount of users that have a valid pointer to this rproc + * @power: refcount of users who need this rproc powered up + * @state: state of the device + * @lock: lock which protects concurrent manipulations of the rproc + * @dbg_dir: debugfs directory of this rproc device + * @traces: list of trace buffers + * @num_traces: number of trace buffers + * @carveouts: list of physically contiguous memory allocations + * @mappings: list of iommu mappings we initiated, needed on shutdown + * @firmware_loading_complete: marks e/o asynchronous firmware loading + * @bootaddr: address of first instruction to boot rproc with (optional) + * @rvdevs: list of remote virtio devices + * @notifyids: idr for dynamically assigning rproc-wide unique notify ids + */ +struct rproc { + struct klist_node node; + struct iommu_domain *domain; + const char *name; + const char *firmware; + void *priv; + const struct rproc_ops *ops; + struct device *dev; + struct kref refcount; + atomic_t power; + unsigned int state; + struct mutex lock; + struct dentry *dbg_dir; + struct list_head traces; + int num_traces; + struct list_head carveouts; + struct list_head mappings; + struct completion firmware_loading_complete; + u32 bootaddr; + struct list_head rvdevs; + struct idr notifyids; +}; + +/* we currently support only two vrings per rvdev */ +#define RVDEV_NUM_VRINGS 2 + +/** + * struct rproc_vring - remoteproc vring state + * @va: virtual address + * @dma: dma address + * @len: length, in bytes + * @da: device address + * @align: vring alignment + * @notifyid: rproc-specific unique vring index + * @rvdev: remote vdev + * @vq: the virtqueue of this vring + */ +struct rproc_vring { + void *va; + dma_addr_t dma; + int len; + u32 da; + u32 align; + int notifyid; + struct rproc_vdev *rvdev; + struct virtqueue *vq; +}; + +/** + * struct rproc_vdev - remoteproc state for a supported virtio device + * @node: list node + * @rproc: the rproc handle + * @vdev: the virio device + * @vring: the vrings for this vdev + * @dfeatures: virtio device features + * @gfeatures: virtio guest features + */ +struct rproc_vdev { + struct list_head node; + struct rproc *rproc; + struct virtio_device vdev; + struct rproc_vring vring[RVDEV_NUM_VRINGS]; + unsigned long dfeatures; + unsigned long gfeatures; +}; + +struct rproc *rproc_get_by_name(const char *name); +void rproc_put(struct rproc *rproc); + +struct rproc *rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len); +void rproc_free(struct rproc *rproc); +int rproc_register(struct rproc *rproc); +int rproc_unregister(struct rproc *rproc); + +int rproc_boot(struct rproc *rproc); +void rproc_shutdown(struct rproc *rproc); + +static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) +{ + return container_of(vdev, struct rproc_vdev, vdev); +} + +static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + + return rvdev->rproc; +} + +#endif /* REMOTEPROC_H */ diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h new file mode 100644 index 00000000..da81af08 --- /dev/null +++ b/include/linux/res_counter.h @@ -0,0 +1,226 @@ +#ifndef __RES_COUNTER_H__ +#define __RES_COUNTER_H__ + +/* + * Resource Counters + * Contain common data types and routines for resource accounting + * + * Copyright 2007 OpenVZ SWsoft Inc + * + * Author: Pavel Emelianov <xemul@openvz.org> + * + * See Documentation/cgroups/resource_counter.txt for more + * info about what this counter is. + */ + +#include <linux/cgroup.h> + +/* + * The core object. the cgroup that wishes to account for some + * resource may include this counter into its structures and use + * the helpers described beyond + */ + +struct res_counter { + /* + * the current resource consumption level + */ + unsigned long long usage; + /* + * the maximal value of the usage from the counter creation + */ + unsigned long long max_usage; + /* + * the limit that usage cannot exceed + */ + unsigned long long limit; + /* + * the limit that usage can be exceed + */ + unsigned long long soft_limit; + /* + * the number of unsuccessful attempts to consume the resource + */ + unsigned long long failcnt; + /* + * the lock to protect all of the above. + * the routines below consider this to be IRQ-safe + */ + spinlock_t lock; + /* + * Parent counter, used for hierarchial resource accounting + */ + struct res_counter *parent; +}; + +#define RESOURCE_MAX (unsigned long long)LLONG_MAX + +/** + * Helpers to interact with userspace + * res_counter_read_u64() - returns the value of the specified member. + * res_counter_read/_write - put/get the specified fields from the + * res_counter struct to/from the user + * + * @counter: the counter in question + * @member: the field to work with (see RES_xxx below) + * @buf: the buffer to opeate on,... + * @nbytes: its size... + * @pos: and the offset. + */ + +u64 res_counter_read_u64(struct res_counter *counter, int member); + +ssize_t res_counter_read(struct res_counter *counter, int member, + const char __user *buf, size_t nbytes, loff_t *pos, + int (*read_strategy)(unsigned long long val, char *s)); + +typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); + +int res_counter_memparse_write_strategy(const char *buf, + unsigned long long *res); + +int res_counter_write(struct res_counter *counter, int member, + const char *buffer, write_strategy_fn write_strategy); + +/* + * the field descriptors. one for each member of res_counter + */ + +enum { + RES_USAGE, + RES_MAX_USAGE, + RES_LIMIT, + RES_FAILCNT, + RES_SOFT_LIMIT, +}; + +/* + * helpers for accounting + */ + +void res_counter_init(struct res_counter *counter, struct res_counter *parent); + +/* + * charge - try to consume more resource. + * + * @counter: the counter + * @val: the amount of the resource. each controller defines its own + * units, e.g. numbers, bytes, Kbytes, etc + * + * returns 0 on success and <0 if the counter->usage will exceed the + * counter->limit _locked call expects the counter->lock to be taken + * + * charge_nofail works the same, except that it charges the resource + * counter unconditionally, and returns < 0 if the after the current + * charge we are over limit. + */ + +int __must_check res_counter_charge_locked(struct res_counter *counter, + unsigned long val); +int __must_check res_counter_charge(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); +int __must_check res_counter_charge_nofail(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); + +/* + * uncharge - tell that some portion of the resource is released + * + * @counter: the counter + * @val: the amount of the resource + * + * these calls check for usage underflow and show a warning on the console + * _locked call expects the counter->lock to be taken + */ + +void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); +void res_counter_uncharge(struct res_counter *counter, unsigned long val); + +/** + * res_counter_margin - calculate chargeable space of a counter + * @cnt: the counter + * + * Returns the difference between the hard limit and the current usage + * of resource counter @cnt. + */ +static inline unsigned long long res_counter_margin(struct res_counter *cnt) +{ + unsigned long long margin; + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->limit > cnt->usage) + margin = cnt->limit - cnt->usage; + else + margin = 0; + spin_unlock_irqrestore(&cnt->lock, flags); + return margin; +} + +/** + * Get the difference between the usage and the soft limit + * @cnt: The counter + * + * Returns 0 if usage is less than or equal to soft limit + * The difference between usage and soft limit, otherwise. + */ +static inline unsigned long long +res_counter_soft_limit_excess(struct res_counter *cnt) +{ + unsigned long long excess; + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->usage <= cnt->soft_limit) + excess = 0; + else + excess = cnt->usage - cnt->soft_limit; + spin_unlock_irqrestore(&cnt->lock, flags); + return excess; +} + +static inline void res_counter_reset_max(struct res_counter *cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->max_usage = cnt->usage; + spin_unlock_irqrestore(&cnt->lock, flags); +} + +static inline void res_counter_reset_failcnt(struct res_counter *cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->failcnt = 0; + spin_unlock_irqrestore(&cnt->lock, flags); +} + +static inline int res_counter_set_limit(struct res_counter *cnt, + unsigned long long limit) +{ + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->usage <= limit) { + cnt->limit = limit; + ret = 0; + } + spin_unlock_irqrestore(&cnt->lock, flags); + return ret; +} + +static inline int +res_counter_set_soft_limit(struct res_counter *cnt, + unsigned long long soft_limit) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->soft_limit = soft_limit; + spin_unlock_irqrestore(&cnt->lock, flags); + return 0; +} + +#endif diff --git a/include/linux/resource.h b/include/linux/resource.h new file mode 100644 index 00000000..d01c96c1 --- /dev/null +++ b/include/linux/resource.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_RESOURCE_H +#define _LINUX_RESOURCE_H + +#include <linux/time.h> +#include <linux/types.h> + +/* + * Resource control/accounting header file for linux + */ + +/* + * Definition of struct rusage taken from BSD 4.3 Reno + * + * We don't support all of these yet, but we might as well have them.... + * Otherwise, each time we add new items, programs which depend on this + * structure will lose. This reduces the chances of that happening. + */ +#define RUSAGE_SELF 0 +#define RUSAGE_CHILDREN (-1) +#define RUSAGE_BOTH (-2) /* sys_wait4() uses this */ +#define RUSAGE_THREAD 1 /* only the calling thread */ + +struct rusage { + struct timeval ru_utime; /* user time used */ + struct timeval ru_stime; /* system time used */ + long ru_maxrss; /* maximum resident set size */ + long ru_ixrss; /* integral shared memory size */ + long ru_idrss; /* integral unshared data size */ + long ru_isrss; /* integral unshared stack size */ + long ru_minflt; /* page reclaims */ + long ru_majflt; /* page faults */ + long ru_nswap; /* swaps */ + long ru_inblock; /* block input operations */ + long ru_oublock; /* block output operations */ + long ru_msgsnd; /* messages sent */ + long ru_msgrcv; /* messages received */ + long ru_nsignals; /* signals received */ + long ru_nvcsw; /* voluntary context switches */ + long ru_nivcsw; /* involuntary " */ +}; + +struct rlimit { + unsigned long rlim_cur; + unsigned long rlim_max; +}; + +#define RLIM64_INFINITY (~0ULL) + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +#define PRIO_MIN (-20) +#define PRIO_MAX 20 + +#define PRIO_PROCESS 0 +#define PRIO_PGRP 1 +#define PRIO_USER 2 + +/* + * Limit the stack by to some sane default: root can always + * increase this limit if needed.. 8MB seems reasonable. + */ +#define _STK_LIM (8*1024*1024) + +/* + * GPG2 wants 64kB of mlocked memory, to make sure pass phrases + * and other sensitive information are never written to disk. + */ +#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024) + +/* + * Due to binary compatibility, the actual resource numbers + * may be different for different linux versions.. + */ +#include <asm/resource.h> + +#ifdef __KERNEL__ + +struct task_struct; + +int getrusage(struct task_struct *p, int who, struct rusage __user *ru); +int do_prlimit(struct task_struct *tsk, unsigned int resource, + struct rlimit *new_rlim, struct rlimit *old_rlim); + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h new file mode 100644 index 00000000..f31db236 --- /dev/null +++ b/include/linux/resume-trace.h @@ -0,0 +1,34 @@ +#ifndef RESUME_TRACE_H +#define RESUME_TRACE_H + +#ifdef CONFIG_PM_TRACE +#include <asm/resume-trace.h> +#include <linux/types.h> + +extern int pm_trace_enabled; + +static inline int pm_trace_is_enabled(void) +{ + return pm_trace_enabled; +} + +struct device; +extern void set_trace_device(struct device *); +extern void generate_resume_trace(const void *tracedata, unsigned int user); +extern int show_trace_dev_match(char *buf, size_t size); + +#define TRACE_DEVICE(dev) do { \ + if (pm_trace_enabled) \ + set_trace_device(dev); \ + } while(0) + +#else + +static inline int pm_trace_is_enabled(void) { return 0; } + +#define TRACE_DEVICE(dev) do { } while (0) +#define TRACE_RESUME(dev) do { } while (0) + +#endif + +#endif diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h new file mode 100644 index 00000000..4d09f6ea --- /dev/null +++ b/include/linux/rfkill-gpio.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2011, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + + +#ifndef __RFKILL_GPIO_H +#define __RFKILL_GPIO_H + +#include <linux/types.h> +#include <linux/rfkill.h> + +/** + * struct rfkill_gpio_platform_data - platform data for rfkill gpio device. + * for unused gpio's, the expected value is -1. + * @name: name for the gpio rf kill instance + * @reset_gpio: GPIO which is used for reseting rfkill switch + * @shutdown_gpio: GPIO which is used for shutdown of rfkill switch + * @power_clk_name: [optional] name of clk to turn off while blocked + * @gpio_runtime_close: clean up platform specific gpio configuration + * @gpio_runtime_setup: set up platform specific gpio configuration + */ + +struct rfkill_gpio_platform_data { + char *name; + int reset_gpio; + int shutdown_gpio; + const char *power_clk_name; + enum rfkill_type type; + void (*gpio_runtime_close)(struct platform_device *); + int (*gpio_runtime_setup)(struct platform_device *); +}; + +#endif /* __RFKILL_GPIO_H */ diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h new file mode 100644 index 00000000..aca36bc8 --- /dev/null +++ b/include/linux/rfkill-regulator.h @@ -0,0 +1,48 @@ +/* + * rfkill-regulator.c - Regulator consumer driver for rfkill + * + * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com> + * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_RFKILL_REGULATOR_H +#define __LINUX_RFKILL_REGULATOR_H + +/* + * Use "vrfkill" as supply id when declaring the regulator consumer: + * + * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = { + * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" }, + * }; + * + * If you have several regulator driven rfkill, you can append a numerical id to + * .dev_name as done above, and use the same id when declaring the platform + * device: + * + * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = { + * .name = "ezx-bluetooth", + * .type = RFKILL_TYPE_BLUETOOTH, + * }; + * + * static struct platform_device a910_rfkill = { + * .name = "rfkill-regulator", + * .id = 0, + * .dev = { + * .platform_data = &ezx_rfkill_bt_data, + * }, + * }; + */ + +#include <linux/rfkill.h> + +struct rfkill_regulator_platform_data { + char *name; /* the name for the rfkill switch */ + enum rfkill_type type; /* the type as specified in rfkill.h */ +}; + +#endif /* __LINUX_RFKILL_REGULATOR_H */ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h new file mode 100644 index 00000000..6fdf0273 --- /dev/null +++ b/include/linux/rfkill.h @@ -0,0 +1,359 @@ +#ifndef __RFKILL_H +#define __RFKILL_H + +/* + * Copyright (C) 2006 - 2007 Ivo van Doorn + * Copyright (C) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/types.h> + +/* define userspace visible states */ +#define RFKILL_STATE_SOFT_BLOCKED 0 +#define RFKILL_STATE_UNBLOCKED 1 +#define RFKILL_STATE_HARD_BLOCKED 2 + +/** + * enum rfkill_type - type of rfkill switch. + * + * @RFKILL_TYPE_ALL: toggles all switches (requests only - not a switch type) + * @RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device. + * @RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. + * @RFKILL_TYPE_UWB: switch is on a ultra wideband device. + * @RFKILL_TYPE_WIMAX: switch is on a WiMAX device. + * @RFKILL_TYPE_WWAN: switch is on a wireless WAN device. + * @RFKILL_TYPE_GPS: switch is on a GPS device. + * @RFKILL_TYPE_FM: switch is on a FM radio device. + * @NUM_RFKILL_TYPES: number of defined rfkill types + */ +enum rfkill_type { + RFKILL_TYPE_ALL = 0, + RFKILL_TYPE_WLAN, + RFKILL_TYPE_BLUETOOTH, + RFKILL_TYPE_UWB, + RFKILL_TYPE_WIMAX, + RFKILL_TYPE_WWAN, + RFKILL_TYPE_GPS, + RFKILL_TYPE_FM, + NUM_RFKILL_TYPES, +}; + +/** + * enum rfkill_operation - operation types + * @RFKILL_OP_ADD: a device was added + * @RFKILL_OP_DEL: a device was removed + * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device + * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all) + */ +enum rfkill_operation { + RFKILL_OP_ADD = 0, + RFKILL_OP_DEL, + RFKILL_OP_CHANGE, + RFKILL_OP_CHANGE_ALL, +}; + +/** + * struct rfkill_event - events for userspace on /dev/rfkill + * @idx: index of dev rfkill + * @type: type of the rfkill struct + * @op: operation code + * @hard: hard state (0/1) + * @soft: soft state (0/1) + * + * Structure used for userspace communication on /dev/rfkill, + * used for events from the kernel and control to the kernel. + */ +struct rfkill_event { + __u32 idx; + __u8 type; + __u8 op; + __u8 soft, hard; +} __attribute__((packed)); + +/* + * We are planning to be backward and forward compatible with changes + * to the event struct, by adding new, optional, members at the end. + * When reading an event (whether the kernel from userspace or vice + * versa) we need to accept anything that's at least as large as the + * version 1 event size, but might be able to accept other sizes in + * the future. + * + * One exception is the kernel -- we already have two event sizes in + * that we've made the 'hard' member optional since our only option + * is to ignore it anyway. + */ +#define RFKILL_EVENT_SIZE_V1 8 + +/* ioctl for turning off rfkill-input (if present) */ +#define RFKILL_IOC_MAGIC 'R' +#define RFKILL_IOC_NOINPUT 1 +#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT) + +/* and that's all userspace gets */ +#ifdef __KERNEL__ +/* don't allow anyone to use these in the kernel */ +enum rfkill_user_states { + RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED, + RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED, + RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED, +}; +#undef RFKILL_STATE_SOFT_BLOCKED +#undef RFKILL_STATE_UNBLOCKED +#undef RFKILL_STATE_HARD_BLOCKED + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/leds.h> +#include <linux/err.h> + +struct device; +/* this is opaque */ +struct rfkill; + +/** + * struct rfkill_ops - rfkill driver methods + * + * @poll: poll the rfkill block state(s) -- only assign this method + * when you need polling. When called, simply call one of the + * rfkill_set{,_hw,_sw}_state family of functions. If the hw + * is getting unblocked you need to take into account the return + * value of those functions to make sure the software block is + * properly used. + * @query: query the rfkill block state(s) and call exactly one of the + * rfkill_set{,_hw,_sw}_state family of functions. Assign this + * method if input events can cause hardware state changes to make + * the rfkill core query your driver before setting a requested + * block. + * @set_block: turn the transmitter on (blocked == false) or off + * (blocked == true) -- ignore and return 0 when hard blocked. + * This callback must be assigned. + */ +struct rfkill_ops { + void (*poll)(struct rfkill *rfkill, void *data); + void (*query)(struct rfkill *rfkill, void *data); + int (*set_block)(void *data, bool blocked); +}; + +#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) +/** + * rfkill_alloc - allocate rfkill structure + * @name: name of the struct -- the string is not copied internally + * @parent: device that has rf switch on it + * @type: type of the switch (RFKILL_TYPE_*) + * @ops: rfkill methods + * @ops_data: data passed to each method + * + * This function should be called by the transmitter driver to allocate an + * rfkill structure. Returns %NULL on failure. + */ +struct rfkill * __must_check rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data); + +/** + * rfkill_register - Register a rfkill structure. + * @rfkill: rfkill structure to be registered + * + * This function should be called by the transmitter driver to register + * the rfkill structure. Before calling this function the driver needs + * to be ready to service method calls from rfkill. + * + * If rfkill_init_sw_state() is not called before registration, + * set_block() will be called to initialize the software blocked state + * to a default value. + * + * If the hardware blocked state is not set before registration, + * it is assumed to be unblocked. + */ +int __must_check rfkill_register(struct rfkill *rfkill); + +/** + * rfkill_pause_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_pause_polling(struct rfkill *rfkill); + +/** + * rfkill_resume_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_resume_polling(struct rfkill *rfkill); + + +/** + * rfkill_unregister - Unregister a rfkill structure. + * @rfkill: rfkill structure to be unregistered + * + * This function should be called by the network driver during device + * teardown to destroy rfkill structure. Until it returns, the driver + * needs to be able to service method calls. + */ +void rfkill_unregister(struct rfkill *rfkill); + +/** + * rfkill_destroy - free rfkill structure + * @rfkill: rfkill structure to be destroyed + * + * Destroys the rfkill structure. + */ +void rfkill_destroy(struct rfkill *rfkill); + +/** + * rfkill_set_hw_state - Set the internal rfkill hardware block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current hardware block state to set + * + * rfkill drivers that get events when the hard-blocked state changes + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. They should also use this after + * resume if the state could have changed. + * + * You need not (but may) call this function if poll_state is assigned. + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked) so that drivers need not keep track of the soft + * block state -- which they might not be able to. + */ +bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_sw_state - Set the internal rfkill software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that get events when the soft-blocked state changes + * (yes, some platforms directly act on input but allow changing again) + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. + * + * Drivers should also call this function after resume if the state has + * been changed by the user. This only makes sense for "persistent" + * devices (see rfkill_init_sw_state()). + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked). + */ +bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_init_sw_state - Initialize persistent software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that preserve their software block state over power off + * use this function to notify the rfkill core (and through that also + * userspace) of their initial state. It should only be used before + * registration. + * + * In addition, it marks the device as "persistent", an attribute which + * can be read by userspace. Persistent devices are expected to preserve + * their own state when suspended. + */ +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_states - Set the internal rfkill block states + * @rfkill: pointer to the rfkill class to modify. + * @sw: the current software block state to set + * @hw: the current hardware block state to set + * + * This function can be called in any context, even from within rfkill + * callbacks. + */ +void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); + +/** + * rfkill_blocked - query rfkill block + * + * @rfkill: rfkill struct to query + */ +bool rfkill_blocked(struct rfkill *rfkill); +#else /* !RFKILL */ +static inline struct rfkill * __must_check +rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data) +{ + return ERR_PTR(-ENODEV); +} + +static inline int __must_check rfkill_register(struct rfkill *rfkill) +{ + if (rfkill == ERR_PTR(-ENODEV)) + return 0; + return -EINVAL; +} + +static inline void rfkill_pause_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_resume_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_unregister(struct rfkill *rfkill) +{ +} + +static inline void rfkill_destroy(struct rfkill *rfkill) +{ +} + +static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ +} + +static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) +{ +} + +static inline bool rfkill_blocked(struct rfkill *rfkill) +{ + return false; +} +#endif /* RFKILL || RFKILL_MODULE */ + +#endif /* __KERNEL__ */ + +#endif /* RFKILL_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h new file mode 100644 index 00000000..7be2e88f --- /dev/null +++ b/include/linux/ring_buffer.h @@ -0,0 +1,191 @@ +#ifndef _LINUX_RING_BUFFER_H +#define _LINUX_RING_BUFFER_H + +#include <linux/kmemcheck.h> +#include <linux/mm.h> +#include <linux/seq_file.h> + +struct ring_buffer; +struct ring_buffer_iter; + +/* + * Don't refer to this struct directly, use functions below. + */ +struct ring_buffer_event { + kmemcheck_bitfield_begin(bitfield); + u32 type_len:5, time_delta:27; + kmemcheck_bitfield_end(bitfield); + + u32 array[]; +}; + +/** + * enum ring_buffer_type - internal ring buffer types + * + * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event + * If time_delta is 0: + * array is ignored + * size is variable depending on how much + * padding is needed + * If time_delta is non zero: + * array[0] holds the actual length + * size = 4 + length (bytes) + * + * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta + * array[0] = time delta (28 .. 59) + * size = 8 bytes + * + * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock + * array[0] = tv_nsec + * array[1..2] = tv_sec + * size = 16 bytes + * + * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: + * Data record + * If type_len is zero: + * array[0] holds the actual length + * array[1..(length+3)/4] holds data + * size = 4 + length (bytes) + * else + * length = type_len << 2 + * array[0..(length+3)/4-1] holds data + * size = 4 + length (bytes) + */ +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING, + RINGBUF_TYPE_TIME_EXTEND, + /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ + RINGBUF_TYPE_TIME_STAMP, +}; + +unsigned ring_buffer_event_length(struct ring_buffer_event *event); +void *ring_buffer_event_data(struct ring_buffer_event *event); + +/* + * ring_buffer_discard_commit will remove an event that has not + * ben committed yet. If this is used, then ring_buffer_unlock_commit + * must not be called on the discarded event. This function + * will try to remove the event from the ring buffer completely + * if another event has not been written after it. + * + * Example use: + * + * if (some_condition) + * ring_buffer_discard_commit(buffer, event); + * else + * ring_buffer_unlock_commit(buffer, event); + */ +void ring_buffer_discard_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); + +/* + * size is in bytes for each per CPU buffer. + */ +struct ring_buffer * +__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); + +/* + * Because the ring buffer is generic, if other users of the ring buffer get + * traced by ftrace, it can produce lockdep warnings. We need to keep each + * ring buffer's lock class separate. + */ +#define ring_buffer_alloc(size, flags) \ +({ \ + static struct lock_class_key __key; \ + __ring_buffer_alloc((size), (flags), &__key); \ +}) + +void ring_buffer_free(struct ring_buffer *buffer); + +int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); + +void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); + +struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, + unsigned long length); +int ring_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); +int ring_buffer_write(struct ring_buffer *buffer, + unsigned long length, void *data); + +struct ring_buffer_event * +ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); +struct ring_buffer_event * +ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); + +struct ring_buffer_iter * +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); +void ring_buffer_read_prepare_sync(void); +void ring_buffer_read_start(struct ring_buffer_iter *iter); +void ring_buffer_read_finish(struct ring_buffer_iter *iter); + +struct ring_buffer_event * +ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); +struct ring_buffer_event * +ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); +void ring_buffer_iter_reset(struct ring_buffer_iter *iter); +int ring_buffer_iter_empty(struct ring_buffer_iter *iter); + +unsigned long ring_buffer_size(struct ring_buffer *buffer); + +void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_reset(struct ring_buffer *buffer); + +#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP +int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu); +#else +static inline int +ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu) +{ + return -ENODEV; +} +#endif + +int ring_buffer_empty(struct ring_buffer *buffer); +int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); + +void ring_buffer_record_disable(struct ring_buffer *buffer); +void ring_buffer_record_enable(struct ring_buffer *buffer); +void ring_buffer_record_off(struct ring_buffer *buffer); +void ring_buffer_record_on(struct ring_buffer *buffer); +int ring_buffer_record_is_on(struct ring_buffer *buffer); +void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); +void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); + +unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_entries(struct ring_buffer *buffer); +unsigned long ring_buffer_overruns(struct ring_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); + +u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, + int cpu, u64 *ts); +void ring_buffer_set_clock(struct ring_buffer *buffer, + u64 (*clock)(void)); + +size_t ring_buffer_page_len(void *page); + + +void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); +void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); +int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, + size_t len, int cpu, int full); + +struct trace_seq; + +int ring_buffer_print_entry_header(struct trace_seq *s); +int ring_buffer_print_page_header(struct trace_seq *s); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1 << 0, +}; + +#endif /* _LINUX_RING_BUFFER_H */ diff --git a/include/linux/rio.h b/include/linux/rio.h new file mode 100644 index 00000000..4d506111 --- /dev/null +++ b/include/linux/rio.h @@ -0,0 +1,405 @@ +/* + * RapidIO interconnect services + * (RapidIO Interconnect Specification, http://www.rapidio.org) + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_H +#define LINUX_RIO_H + +#include <linux/types.h> +#include <linux/ioport.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/rio_regs.h> + +#define RIO_NO_HOPCOUNT -1 +#define RIO_INVALID_DESTID 0xffff + +#define RIO_MAX_MPORTS 8 +#define RIO_MAX_MPORT_RESOURCES 16 +#define RIO_MAX_DEV_RESOURCES 16 + +#define RIO_GLOBAL_TABLE 0xff /* Indicates access of a switch's + global routing table if it + has multiple (or per port) + tables */ + +#define RIO_INVALID_ROUTE 0xff /* Indicates that a route table + entry is invalid (no route + exists for the device ID) */ + +#define RIO_MAX_ROUTE_ENTRIES(size) (size ? (1 << 16) : (1 << 8)) +#define RIO_ANY_DESTID(size) (size ? 0xffff : 0xff) + +#define RIO_MAX_MBOX 4 +#define RIO_MAX_MSG_SIZE 0x1000 + +/* + * Error values that may be returned by RIO functions. + */ +#define RIO_SUCCESSFUL 0x00 +#define RIO_BAD_SIZE 0x81 + +/* + * For RIO devices, the region numbers are assigned this way: + * + * 0 RapidIO outbound doorbells + * 1-15 RapidIO memory regions + * + * For RIO master ports, the region number are assigned this way: + * + * 0 RapidIO inbound doorbells + * 1 RapidIO inbound mailboxes + * 1 RapidIO outbound mailboxes + */ +#define RIO_DOORBELL_RESOURCE 0 +#define RIO_INB_MBOX_RESOURCE 1 +#define RIO_OUTB_MBOX_RESOURCE 2 + +#define RIO_PW_MSG_SIZE 64 + +/* + * A component tag value (stored in the component tag CSR) is used as device's + * unique identifier assigned during enumeration. Besides being used for + * identifying switches (which do not have device ID register), it also is used + * by error management notification and therefore has to be assigned + * to endpoints as well. + */ +#define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */ +#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */ + +extern struct bus_type rio_bus_type; +extern struct device rio_bus; +extern struct list_head rio_devices; /* list of all devices */ + +struct rio_mport; +struct rio_dev; +union rio_pw_msg; + +/** + * struct rio_switch - RIO switch info + * @node: Node in global list of switches + * @switchid: Switch ID that is unique across a network + * @route_table: Copy of switch routing table + * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 + * @add_entry: Callback for switch-specific route add function + * @get_entry: Callback for switch-specific route get function + * @clr_table: Callback for switch-specific clear route table function + * @set_domain: Callback for switch-specific domain setting function + * @get_domain: Callback for switch-specific domain get function + * @em_init: Callback for switch-specific error management init function + * @em_handle: Callback for switch-specific error management handler function + * @sw_sysfs: Callback that initializes switch-specific sysfs attributes + * @nextdev: Array of per-port pointers to the next attached device + */ +struct rio_switch { + struct list_head node; + u16 switchid; + u8 *route_table; + u32 port_ok; + int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port); + int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port); + int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table); + int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain); + int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain); + int (*em_init) (struct rio_dev *dev); + int (*em_handle) (struct rio_dev *dev, u8 swport); + int (*sw_sysfs) (struct rio_dev *dev, int create); + struct rio_dev *nextdev[0]; +}; + +/** + * struct rio_dev - RIO device info + * @global_list: Node in list of all RIO devices + * @net_list: Node in list of RIO devices in a network + * @net: Network this device is a part of + * @did: Device ID + * @vid: Vendor ID + * @device_rev: Device revision + * @asm_did: Assembly device ID + * @asm_vid: Assembly vendor ID + * @asm_rev: Assembly revision + * @efptr: Extended feature pointer + * @pef: Processing element features + * @swpinfo: Switch port info + * @src_ops: Source operation capabilities + * @dst_ops: Destination operation capabilities + * @comp_tag: RIO component tag + * @phys_efptr: RIO device extended features pointer + * @em_efptr: RIO Error Management features pointer + * @dma_mask: Mask of bits of RIO address this device implements + * @driver: Driver claiming this device + * @dev: Device model device + * @riores: RIO resources this device owns + * @pwcback: port-write callback function for this device + * @destid: Network destination ID (or associated destid for switch) + * @hopcount: Hopcount to this device + * @prev: Previous RIO device connected to the current one + * @rswitch: struct rio_switch (if valid for this device) + */ +struct rio_dev { + struct list_head global_list; /* node in list of all RIO devices */ + struct list_head net_list; /* node in per net list */ + struct rio_net *net; /* RIO net this device resides in */ + u16 did; + u16 vid; + u32 device_rev; + u16 asm_did; + u16 asm_vid; + u16 asm_rev; + u16 efptr; + u32 pef; + u32 swpinfo; + u32 src_ops; + u32 dst_ops; + u32 comp_tag; + u32 phys_efptr; + u32 em_efptr; + u64 dma_mask; + struct rio_driver *driver; /* RIO driver claiming this device */ + struct device dev; /* LDM device structure */ + struct resource riores[RIO_MAX_DEV_RESOURCES]; + int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step); + u16 destid; + u8 hopcount; + struct rio_dev *prev; + struct rio_switch rswitch[0]; /* RIO switch info */ +}; + +#define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) +#define rio_dev_f(n) list_entry(n, struct rio_dev, net_list) +#define to_rio_dev(n) container_of(n, struct rio_dev, dev) +#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) + +/** + * struct rio_msg - RIO message event + * @res: Mailbox resource + * @mcback: Message event callback + */ +struct rio_msg { + struct resource *res; + void (*mcback) (struct rio_mport * mport, void *dev_id, int mbox, int slot); +}; + +/** + * struct rio_dbell - RIO doorbell event + * @node: Node in list of doorbell events + * @res: Doorbell resource + * @dinb: Doorbell event callback + * @dev_id: Device specific pointer to pass on event + */ +struct rio_dbell { + struct list_head node; + struct resource *res; + void (*dinb) (struct rio_mport *mport, void *dev_id, u16 src, u16 dst, u16 info); + void *dev_id; +}; + +enum rio_phy_type { + RIO_PHY_PARALLEL, + RIO_PHY_SERIAL, +}; + +/** + * struct rio_mport - RIO master port info + * @dbells: List of doorbell events + * @node: Node in global list of master ports + * @nnode: Node in network list of master ports + * @iores: I/O mem resource that this master port interface owns + * @riores: RIO resources that this master port interfaces owns + * @inb_msg: RIO inbound message event descriptors + * @outb_msg: RIO outbound message event descriptors + * @host_deviceid: Host device ID associated with this master port + * @ops: configuration space functions + * @id: Port ID, unique among all ports + * @index: Port index, unique among all port interfaces of the same type + * @sys_size: RapidIO common transport system size + * @phy_type: RapidIO phy type + * @phys_efptr: RIO port extended features pointer + * @name: Port name string + * @priv: Master port private data + */ +struct rio_mport { + struct list_head dbells; /* list of doorbell events */ + struct list_head node; /* node in global list of ports */ + struct list_head nnode; /* node in net list of ports */ + struct resource iores; + struct resource riores[RIO_MAX_MPORT_RESOURCES]; + struct rio_msg inb_msg[RIO_MAX_MBOX]; + struct rio_msg outb_msg[RIO_MAX_MBOX]; + int host_deviceid; /* Host device ID */ + struct rio_ops *ops; /* low-level architecture-dependent routines */ + unsigned char id; /* port ID, unique among all ports */ + unsigned char index; /* port index, unique among all port + interfaces of the same type */ + unsigned int sys_size; /* RapidIO common transport system size. + * 0 - Small size. 256 devices. + * 1 - Large size, 65536 devices. + */ + enum rio_phy_type phy_type; /* RapidIO phy type */ + u32 phys_efptr; + unsigned char name[40]; + void *priv; /* Master port private data */ +}; + +/** + * struct rio_net - RIO network info + * @node: Node in global list of RIO networks + * @devices: List of devices in this network + * @mports: List of master ports accessing this network + * @hport: Default port for accessing this network + * @id: RIO network ID + */ +struct rio_net { + struct list_head node; /* node in list of networks */ + struct list_head devices; /* list of devices in this net */ + struct list_head mports; /* list of ports accessing net */ + struct rio_mport *hport; /* primary port for accessing net */ + unsigned char id; /* RIO network ID */ +}; + +/* Definitions used by switch sysfs initialization callback */ +#define RIO_SW_SYSFS_CREATE 1 /* Create switch attributes */ +#define RIO_SW_SYSFS_REMOVE 0 /* Remove switch attributes */ + +/* Low-level architecture-dependent routines */ + +/** + * struct rio_ops - Low-level RIO configuration space operations + * @lcread: Callback to perform local (master port) read of config space. + * @lcwrite: Callback to perform local (master port) write of config space. + * @cread: Callback to perform network read of config space. + * @cwrite: Callback to perform network write of config space. + * @dsend: Callback to send a doorbell message. + * @pwenable: Callback to enable/disable port-write message handling. + * @open_outb_mbox: Callback to initialize outbound mailbox. + * @close_outb_mbox: Callback to shut down outbound mailbox. + * @open_inb_mbox: Callback to initialize inbound mailbox. + * @close_inb_mbox: Callback to shut down inbound mailbox. + * @add_outb_message: Callback to add a message to an outbound mailbox queue. + * @add_inb_buffer: Callback to add a buffer to an inbound mailbox queue. + * @get_inb_message: Callback to get a message from an inbound mailbox queue. + */ +struct rio_ops { + int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, + u32 *data); + int (*lcwrite) (struct rio_mport *mport, int index, u32 offset, int len, + u32 data); + int (*cread) (struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 *data); + int (*cwrite) (struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 data); + int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); + int (*pwenable) (struct rio_mport *mport, int enable); + int (*open_outb_mbox)(struct rio_mport *mport, void *dev_id, + int mbox, int entries); + void (*close_outb_mbox)(struct rio_mport *mport, int mbox); + int (*open_inb_mbox)(struct rio_mport *mport, void *dev_id, + int mbox, int entries); + void (*close_inb_mbox)(struct rio_mport *mport, int mbox); + int (*add_outb_message)(struct rio_mport *mport, struct rio_dev *rdev, + int mbox, void *buffer, size_t len); + int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); + void *(*get_inb_message)(struct rio_mport *mport, int mbox); +}; + +#define RIO_RESOURCE_MEM 0x00000100 +#define RIO_RESOURCE_DOORBELL 0x00000200 +#define RIO_RESOURCE_MAILBOX 0x00000400 + +#define RIO_RESOURCE_CACHEABLE 0x00010000 +#define RIO_RESOURCE_PCI 0x00020000 + +#define RIO_RESOURCE_BUSY 0x80000000 + +/** + * struct rio_driver - RIO driver info + * @node: Node in list of drivers + * @name: RIO driver name + * @id_table: RIO device ids to be associated with this driver + * @probe: RIO device inserted + * @remove: RIO device removed + * @suspend: RIO device suspended + * @resume: RIO device awakened + * @enable_wake: RIO device enable wake event + * @driver: LDM driver struct + * + * Provides info on a RIO device driver for insertion/removal and + * power management purposes. + */ +struct rio_driver { + struct list_head node; + char *name; + const struct rio_device_id *id_table; + int (*probe) (struct rio_dev * dev, const struct rio_device_id * id); + void (*remove) (struct rio_dev * dev); + int (*suspend) (struct rio_dev * dev, u32 state); + int (*resume) (struct rio_dev * dev); + int (*enable_wake) (struct rio_dev * dev, u32 state, int enable); + struct device_driver driver; +}; + +#define to_rio_driver(drv) container_of(drv,struct rio_driver, driver) + +/** + * struct rio_device_id - RIO device identifier + * @did: RIO device ID + * @vid: RIO vendor ID + * @asm_did: RIO assembly device ID + * @asm_vid: RIO assembly vendor ID + * + * Identifies a RIO device based on both the device/vendor IDs and + * the assembly device/vendor IDs. + */ +struct rio_device_id { + u16 did, vid; + u16 asm_did, asm_vid; +}; + +/** + * struct rio_switch_ops - Per-switch operations + * @vid: RIO vendor ID + * @did: RIO device ID + * @init_hook: Callback that performs switch device initialization + * + * Defines the operations that are necessary to initialize/control + * a particular RIO switch device. + */ +struct rio_switch_ops { + u16 vid, did; + int (*init_hook) (struct rio_dev *rdev, int do_enum); +}; + +union rio_pw_msg { + struct { + u32 comptag; /* Component Tag CSR */ + u32 errdetect; /* Port N Error Detect CSR */ + u32 is_port; /* Implementation specific + PortID */ + u32 ltlerrdet; /* LTL Error Detect CSR */ + u32 padding[12]; + } em; + u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)]; +}; + +/* Architecture and hardware-specific functions */ +extern int rio_register_mport(struct rio_mport *); +extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); +extern void rio_close_inb_mbox(struct rio_mport *, int); +extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); +extern void rio_close_outb_mbox(struct rio_mport *, int); + +#endif /* LINUX_RIO_H */ diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h new file mode 100644 index 00000000..7f07470e --- /dev/null +++ b/include/linux/rio_drv.h @@ -0,0 +1,423 @@ +/* + * RapidIO driver services + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_DRV_H +#define LINUX_RIO_DRV_H + +#include <linux/types.h> +#include <linux/ioport.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/rio.h> + +extern int __rio_local_read_config_32(struct rio_mport *port, u32 offset, + u32 * data); +extern int __rio_local_write_config_32(struct rio_mport *port, u32 offset, + u32 data); +extern int __rio_local_read_config_16(struct rio_mport *port, u32 offset, + u16 * data); +extern int __rio_local_write_config_16(struct rio_mport *port, u32 offset, + u16 data); +extern int __rio_local_read_config_8(struct rio_mport *port, u32 offset, + u8 * data); +extern int __rio_local_write_config_8(struct rio_mport *port, u32 offset, + u8 data); + +extern int rio_mport_read_config_32(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u32 * data); +extern int rio_mport_write_config_32(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u32 data); +extern int rio_mport_read_config_16(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u16 * data); +extern int rio_mport_write_config_16(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u16 data); +extern int rio_mport_read_config_8(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u8 * data); +extern int rio_mport_write_config_8(struct rio_mport *port, u16 destid, + u8 hopcount, u32 offset, u8 data); + +/** + * rio_local_read_config_32 - Read 32 bits from local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Pointer to read data into + * + * Reads 32 bits of data from the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_read_config_32(struct rio_mport *port, u32 offset, + u32 * data) +{ + return __rio_local_read_config_32(port, offset, data); +} + +/** + * rio_local_write_config_32 - Write 32 bits to local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Data to be written + * + * Writes 32 bits of data to the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_write_config_32(struct rio_mport *port, u32 offset, + u32 data) +{ + return __rio_local_write_config_32(port, offset, data); +} + +/** + * rio_local_read_config_16 - Read 16 bits from local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Pointer to read data into + * + * Reads 16 bits of data from the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_read_config_16(struct rio_mport *port, u32 offset, + u16 * data) +{ + return __rio_local_read_config_16(port, offset, data); +} + +/** + * rio_local_write_config_16 - Write 16 bits to local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Data to be written + * + * Writes 16 bits of data to the specified offset within the local + * device's configuration space. + */ + +static inline int rio_local_write_config_16(struct rio_mport *port, u32 offset, + u16 data) +{ + return __rio_local_write_config_16(port, offset, data); +} + +/** + * rio_local_read_config_8 - Read 8 bits from local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Pointer to read data into + * + * Reads 8 bits of data from the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_read_config_8(struct rio_mport *port, u32 offset, + u8 * data) +{ + return __rio_local_read_config_8(port, offset, data); +} + +/** + * rio_local_write_config_8 - Write 8 bits to local configuration space + * @port: Master port + * @offset: Offset into local configuration space + * @data: Data to be written + * + * Writes 8 bits of data to the specified offset within the local + * device's configuration space. + */ +static inline int rio_local_write_config_8(struct rio_mport *port, u32 offset, + u8 data) +{ + return __rio_local_write_config_8(port, offset, data); +} + +/** + * rio_read_config_32 - Read 32 bits from configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Pointer to read data into + * + * Reads 32 bits of data from the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset, + u32 * data) +{ + return rio_mport_read_config_32(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_write_config_32 - Write 32 bits to configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Data to be written + * + * Writes 32 bits of data to the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset, + u32 data) +{ + return rio_mport_write_config_32(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_read_config_16 - Read 16 bits from configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Pointer to read data into + * + * Reads 16 bits of data from the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset, + u16 * data) +{ + return rio_mport_read_config_16(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_write_config_16 - Write 16 bits to configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Data to be written + * + * Writes 16 bits of data to the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset, + u16 data) +{ + return rio_mport_write_config_16(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_read_config_8 - Read 8 bits from configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Pointer to read data into + * + * Reads 8 bits of data from the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data) +{ + return rio_mport_read_config_8(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +/** + * rio_write_config_8 - Write 8 bits to configuration space + * @rdev: RIO device + * @offset: Offset into device configuration space + * @data: Data to be written + * + * Writes 8 bits of data to the specified offset within the + * RIO device's configuration space. + */ +static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data) +{ + return rio_mport_write_config_8(rdev->net->hport, rdev->destid, + rdev->hopcount, offset, data); +}; + +extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, + u16 data); + +/** + * rio_send_doorbell - Send a doorbell message to a device + * @rdev: RIO device + * @data: Doorbell message data + * + * Send a doorbell message to a RIO device. The doorbell message + * has a 16-bit info field provided by the @data argument. + */ +static inline int rio_send_doorbell(struct rio_dev *rdev, u16 data) +{ + return rio_mport_send_doorbell(rdev->net->hport, rdev->destid, data); +}; + +/** + * rio_init_mbox_res - Initialize a RIO mailbox resource + * @res: resource struct + * @start: start of mailbox range + * @end: end of mailbox range + * + * This function is used to initialize the fields of a resource + * for use as a mailbox resource. It initializes a range of + * mailboxes using the start and end arguments. + */ +static inline void rio_init_mbox_res(struct resource *res, int start, int end) +{ + memset(res, 0, sizeof(struct resource)); + res->start = start; + res->end = end; + res->flags = RIO_RESOURCE_MAILBOX; +} + +/** + * rio_init_dbell_res - Initialize a RIO doorbell resource + * @res: resource struct + * @start: start of doorbell range + * @end: end of doorbell range + * + * This function is used to initialize the fields of a resource + * for use as a doorbell resource. It initializes a range of + * doorbell messages using the start and end arguments. + */ +static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end) +{ + memset(res, 0, sizeof(struct resource)); + res->start = start; + res->end = end; + res->flags = RIO_RESOURCE_DOORBELL; +} + +/** + * RIO_DEVICE - macro used to describe a specific RIO device + * @dev: the 16 bit RIO device ID + * @ven: the 16 bit RIO vendor ID + * + * This macro is used to create a struct rio_device_id that matches a + * specific device. The assembly vendor and assembly device fields + * will be set to %RIO_ANY_ID. + */ +#define RIO_DEVICE(dev,ven) \ + .did = (dev), .vid = (ven), \ + .asm_did = RIO_ANY_ID, .asm_vid = RIO_ANY_ID + +/* Mailbox management */ +extern int rio_request_outb_mbox(struct rio_mport *, void *, int, int, + void (*)(struct rio_mport *, void *,int, int)); +extern int rio_release_outb_mbox(struct rio_mport *, int); + +/** + * rio_add_outb_message - Add RIO message to an outbound mailbox queue + * @mport: RIO master port containing the outbound queue + * @rdev: RIO device the message is be sent to + * @mbox: The outbound mailbox queue + * @buffer: Pointer to the message buffer + * @len: Length of the message buffer + * + * Adds a RIO message buffer to an outbound mailbox queue for + * transmission. Returns 0 on success. + */ +static inline int rio_add_outb_message(struct rio_mport *mport, + struct rio_dev *rdev, int mbox, + void *buffer, size_t len) +{ + return mport->ops->add_outb_message(mport, rdev, mbox, + buffer, len); +} + +extern int rio_request_inb_mbox(struct rio_mport *, void *, int, int, + void (*)(struct rio_mport *, void *, int, int)); +extern int rio_release_inb_mbox(struct rio_mport *, int); + +/** + * rio_add_inb_buffer - Add buffer to an inbound mailbox queue + * @mport: Master port containing the inbound mailbox + * @mbox: The inbound mailbox number + * @buffer: Pointer to the message buffer + * + * Adds a buffer to an inbound mailbox queue for reception. Returns + * 0 on success. + */ +static inline int rio_add_inb_buffer(struct rio_mport *mport, int mbox, + void *buffer) +{ + return mport->ops->add_inb_buffer(mport, mbox, buffer); +} + +/** + * rio_get_inb_message - Get A RIO message from an inbound mailbox queue + * @mport: Master port containing the inbound mailbox + * @mbox: The inbound mailbox number + * + * Get a RIO message from an inbound mailbox queue. Returns 0 on success. + */ +static inline void *rio_get_inb_message(struct rio_mport *mport, int mbox) +{ + return mport->ops->get_inb_message(mport, mbox); +} + +/* Doorbell management */ +extern int rio_request_inb_dbell(struct rio_mport *, void *, u16, u16, + void (*)(struct rio_mport *, void *, u16, u16, u16)); +extern int rio_release_inb_dbell(struct rio_mport *, u16, u16); +extern struct resource *rio_request_outb_dbell(struct rio_dev *, u16, u16); +extern int rio_release_outb_dbell(struct rio_dev *, struct resource *); + +/* Memory region management */ +int rio_claim_resource(struct rio_dev *, int); +int rio_request_regions(struct rio_dev *, char *); +void rio_release_regions(struct rio_dev *); +int rio_request_region(struct rio_dev *, int, char *); +void rio_release_region(struct rio_dev *, int); + +/* Port-Write management */ +extern int rio_request_inb_pwrite(struct rio_dev *, + int (*)(struct rio_dev *, union rio_pw_msg*, int)); +extern int rio_release_inb_pwrite(struct rio_dev *); +extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg); + +/* LDM support */ +int rio_register_driver(struct rio_driver *); +void rio_unregister_driver(struct rio_driver *); +struct rio_dev *rio_dev_get(struct rio_dev *); +void rio_dev_put(struct rio_dev *); + +/** + * rio_name - Get the unique RIO device identifier + * @rdev: RIO device + * + * Get the unique RIO device identifier. Returns the device + * identifier string. + */ +static inline const char *rio_name(struct rio_dev *rdev) +{ + return dev_name(&rdev->dev); +} + +/** + * rio_get_drvdata - Get RIO driver specific data + * @rdev: RIO device + * + * Get RIO driver specific data. Returns a pointer to the + * driver specific data. + */ +static inline void *rio_get_drvdata(struct rio_dev *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} + +/** + * rio_set_drvdata - Set RIO driver specific data + * @rdev: RIO device + * @data: Pointer to driver specific data + * + * Set RIO driver specific data. device struct driver data pointer + * is set to the @data argument. + */ +static inline void rio_set_drvdata(struct rio_dev *rdev, void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} + +/* Misc driver helpers */ +extern u16 rio_local_get_device_id(struct rio_mport *port); +extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); +extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, + struct rio_dev *from); + +#endif /* LINUX_RIO_DRV_H */ diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h new file mode 100644 index 00000000..b66d13d1 --- /dev/null +++ b/include/linux/rio_ids.h @@ -0,0 +1,44 @@ +/* + * RapidIO devices + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_IDS_H +#define LINUX_RIO_IDS_H + +#define RIO_ANY_ID 0xffff + +#define RIO_VID_FREESCALE 0x0002 +#define RIO_DID_MPC8560 0x0003 + +#define RIO_VID_TUNDRA 0x000d +#define RIO_DID_TSI500 0x0500 +#define RIO_DID_TSI568 0x0568 +#define RIO_DID_TSI572 0x0572 +#define RIO_DID_TSI574 0x0574 +#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */ +#define RIO_DID_TSI577 0x0577 +#define RIO_DID_TSI578 0x0578 + +#define RIO_VID_IDT 0x0038 +#define RIO_DID_IDT70K200 0x0310 +#define RIO_DID_IDTCPS8 0x035c +#define RIO_DID_IDTCPS12 0x035d +#define RIO_DID_IDTCPS16 0x035b +#define RIO_DID_IDTCPS6Q 0x035f +#define RIO_DID_IDTCPS10Q 0x035e +#define RIO_DID_IDTCPS1848 0x0374 +#define RIO_DID_IDTCPS1432 0x0375 +#define RIO_DID_IDTCPS1616 0x0379 +#define RIO_DID_IDTVPS1616 0x0377 +#define RIO_DID_IDTSPS1616 0x0378 +#define RIO_DID_TSI721 0x80ab + +#endif /* LINUX_RIO_IDS_H */ diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h new file mode 100644 index 00000000..218168a2 --- /dev/null +++ b/include/linux/rio_regs.h @@ -0,0 +1,295 @@ +/* + * RapidIO register definitions + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef LINUX_RIO_REGS_H +#define LINUX_RIO_REGS_H + +/* + * In RapidIO, each device has a 16MB configuration space that is + * accessed via maintenance transactions. Portions of configuration + * space are standardized and/or reserved. + */ +#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ + +#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ +#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ +#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ +#define RIO_ASM_ID_MASK 0xffff0000 /* [I] Asm ID Mask */ +#define RIO_ASM_VEN_ID_MASK 0x0000ffff /* [I] Asm Vend Mask */ + +#define RIO_ASM_INFO_CAR 0x0c /* [I] Assembly Information CAR */ +#define RIO_ASM_REV_MASK 0xffff0000 /* [I] Asm Rev Mask */ +#define RIO_EXT_FTR_PTR_MASK 0x0000ffff /* [I] EF_PTR Mask */ + +#define RIO_PEF_CAR 0x10 /* [I] Processing Element Features CAR */ +#define RIO_PEF_BRIDGE 0x80000000 /* [I] Bridge */ +#define RIO_PEF_MEMORY 0x40000000 /* [I] MMIO */ +#define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ +#define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ +#define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ +#define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */ +#define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */ +#define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */ +#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ +#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ +#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ +#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ +#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ +#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ +#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ +#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ +#define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */ +#define RIO_PEF_ADDR_34 0x00000001 /* [I] 34 bits */ + +#define RIO_SWP_INFO_CAR 0x14 /* [I] Switch Port Information CAR */ +#define RIO_SWP_INFO_PORT_TOTAL_MASK 0x0000ff00 /* [I] Total number of ports */ +#define RIO_SWP_INFO_PORT_NUM_MASK 0x000000ff /* [I] Maintenance transaction port number */ +#define RIO_GET_TOTAL_PORTS(x) ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8) +#define RIO_GET_PORT_NUM(x) (x & RIO_SWP_INFO_PORT_NUM_MASK) + +#define RIO_SRC_OPS_CAR 0x18 /* [I] Source Operations CAR */ +#define RIO_SRC_OPS_READ 0x00008000 /* [I] Read op */ +#define RIO_SRC_OPS_WRITE 0x00004000 /* [I] Write op */ +#define RIO_SRC_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ +#define RIO_SRC_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ +#define RIO_SRC_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ +#define RIO_SRC_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ +#define RIO_SRC_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ +#define RIO_SRC_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ +#define RIO_SRC_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ +#define RIO_SRC_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ +#define RIO_SRC_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ +#define RIO_SRC_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ + +#define RIO_DST_OPS_CAR 0x1c /* Destination Operations CAR */ +#define RIO_DST_OPS_READ 0x00008000 /* [I] Read op */ +#define RIO_DST_OPS_WRITE 0x00004000 /* [I] Write op */ +#define RIO_DST_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ +#define RIO_DST_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ +#define RIO_DST_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ +#define RIO_DST_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ +#define RIO_DST_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ +#define RIO_DST_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ +#define RIO_DST_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ +#define RIO_DST_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ +#define RIO_DST_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ +#define RIO_DST_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ + +#define RIO_OPS_READ 0x00008000 /* [I] Read op */ +#define RIO_OPS_WRITE 0x00004000 /* [I] Write op */ +#define RIO_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ +#define RIO_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ +#define RIO_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ +#define RIO_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ +#define RIO_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ +#define RIO_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ +#define RIO_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ +#define RIO_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ +#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ +#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ + + /* 0x20-0x30 *//* Reserved */ + +#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ +#define RIO_RT_MAX_DESTID 0x0000ffff + +#define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */ +#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ +#define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ +#define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ +#define RIO_MBOX0_BUSY 0x10000000 /* [II] Mbox 0 busy */ +#define RIO_MBOX0_FAIL 0x08000000 /* [II] Mbox 0 fail */ +#define RIO_MBOX0_ERROR 0x04000000 /* [II] Mbox 0 error */ +#define RIO_MBOX1_AVAIL 0x00800000 /* [II] Mbox 1 avail */ +#define RIO_MBOX1_FULL 0x00200000 /* [II] Mbox 1 full */ +#define RIO_MBOX1_EMPTY 0x00200000 /* [II] Mbox 1 empty */ +#define RIO_MBOX1_BUSY 0x00100000 /* [II] Mbox 1 busy */ +#define RIO_MBOX1_FAIL 0x00080000 /* [II] Mbox 1 fail */ +#define RIO_MBOX1_ERROR 0x00040000 /* [II] Mbox 1 error */ +#define RIO_MBOX2_AVAIL 0x00008000 /* [II] Mbox 2 avail */ +#define RIO_MBOX2_FULL 0x00004000 /* [II] Mbox 2 full */ +#define RIO_MBOX2_EMPTY 0x00002000 /* [II] Mbox 2 empty */ +#define RIO_MBOX2_BUSY 0x00001000 /* [II] Mbox 2 busy */ +#define RIO_MBOX2_FAIL 0x00000800 /* [II] Mbox 2 fail */ +#define RIO_MBOX2_ERROR 0x00000400 /* [II] Mbox 2 error */ +#define RIO_MBOX3_AVAIL 0x00000080 /* [II] Mbox 3 avail */ +#define RIO_MBOX3_FULL 0x00000040 /* [II] Mbox 3 full */ +#define RIO_MBOX3_EMPTY 0x00000020 /* [II] Mbox 3 empty */ +#define RIO_MBOX3_BUSY 0x00000010 /* [II] Mbox 3 busy */ +#define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ +#define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ + +#define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */ +#define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */ +#define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ +#define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ +#define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ +#define RIO_DOORBELL_BUSY 0x10000000 /* [II] Doorbell busy */ +#define RIO_DOORBELL_FAILED 0x08000000 /* [II] Doorbell failed */ +#define RIO_DOORBELL_ERROR 0x04000000 /* [II] Doorbell error */ +#define RIO_WRITE_PORT_AVAILABLE 0x00000080 /* [I] Write Port Available */ +#define RIO_WRITE_PORT_FULL 0x00000040 /* [I] Write Port Full */ +#define RIO_WRITE_PORT_EMPTY 0x00000020 /* [I] Write Port Empty */ +#define RIO_WRITE_PORT_BUSY 0x00000010 /* [I] Write Port Busy */ +#define RIO_WRITE_PORT_FAILED 0x00000008 /* [I] Write Port Failed */ +#define RIO_WRITE_PORT_ERROR 0x00000004 /* [I] Write Port Error */ + + /* 0x48 *//* Reserved */ + +#define RIO_PELL_CTRL_CSR 0x4c /* [I] PE Logical Layer Control CSR */ +#define RIO_PELL_ADDR_66 0x00000004 /* [I] 66-bit addr */ +#define RIO_PELL_ADDR_50 0x00000002 /* [I] 50-bit addr */ +#define RIO_PELL_ADDR_34 0x00000001 /* [I] 34-bit addr */ + + /* 0x50-0x54 *//* Reserved */ + +#define RIO_LCSH_BA 0x58 /* [I] LCS High Base Address */ +#define RIO_LCSL_BA 0x5c /* [I] LCS Base Address */ + +#define RIO_DID_CSR 0x60 /* [III] Base Device ID CSR */ + + /* 0x64 *//* Reserved */ + +#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */ +#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */ + +#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70 +#define RIO_STD_RTE_CONF_EXTCFGEN 0x80000000 +#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74 +#define RIO_STD_RTE_DEFAULT_PORT 0x78 + + /* 0x7c-0xf8 *//* Reserved */ + /* 0x100-0xfff8 *//* [I] Extended Features Space */ + /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */ + +/* + * Extended Features Space is a configuration space area where + * functionality is mapped into extended feature blocks via a + * singly linked list of extended feature pointers (EFT_PTR). + * + * Each extended feature block can be identified/located in + * Extended Features Space by walking the extended feature + * list starting with the Extended Feature Pointer located + * in the Assembly Information CAR. + * + * Extended Feature Blocks (EFBs) are identified with an assigned + * EFB ID. Extended feature block offsets in the definitions are + * relative to the offset of the EFB within the Extended Features + * Space. + */ + +/* Helper macros to parse the Extended Feature Block header */ +#define RIO_EFB_PTR_MASK 0xffff0000 +#define RIO_EFB_ID_MASK 0x0000ffff +#define RIO_GET_BLOCK_PTR(x) ((x & RIO_EFB_PTR_MASK) >> 16) +#define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK) + +/* Extended Feature Block IDs */ +#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */ +#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */ +#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */ +#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */ +#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */ +#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */ +#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */ +#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */ +#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */ +#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */ +#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ + +/* + * Physical 8/16 LP-LVDS + * ID=0x0001, Generic End Point Devices + * ID=0x0002, Generic End Point Devices, software assisted recovery option + * ID=0x0003, Generic End Point Free Devices + * + * Physical LP-Serial + * ID=0x0004, Generic End Point Devices + * ID=0x0005, Generic End Point Devices, software assisted recovery option + * ID=0x0006, Generic End Point Free Devices + */ +#define RIO_PORT_MNT_HEADER 0x0000 +#define RIO_PORT_REQ_CTL_CSR 0x0020 +#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */ +#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */ +#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */ +#define RIO_PORT_GEN_CTL_CSR 0x003c +#define RIO_PORT_GEN_HOST 0x80000000 +#define RIO_PORT_GEN_MASTER 0x40000000 +#define RIO_PORT_GEN_DISCOVERED 0x20000000 +#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */ +#define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */ +#define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */ +#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */ +#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ +#define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */ +#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ +#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */ +#define RIO_PORT_N_ACK_CLEAR 0x80000000 +#define RIO_PORT_N_ACK_INBOUND 0x3f000000 +#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 +#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f +#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20) +#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */ +#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */ +#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ +#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 +#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 +#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 +#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20) +#define RIO_PORT_N_CTL_PWIDTH 0xc0000000 +#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 +#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 +#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 +#define RIO_PORT_N_CTL_LOCKOUT 0x00000002 +#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000 +#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000 +#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000 +#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000 + +/* + * Error Management Extensions (RapidIO 1.3+, Part 8) + * + * Extended Features Block ID=0x0007 + */ + +/* General EM Registers (Common for all Ports) */ + +#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ +#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ +#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ +#define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */ +#define REM_LTL_ERR_UNSOLR 0x00800000 /* Unsolicited Response */ +#define REM_LTL_ERR_UNSUPTR 0x00400000 /* Unsupported Transaction */ +#define REM_LTL_ERR_IMPSPEC 0x000000ff /* Implementation Specific */ +#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */ +#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ +#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ +#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ +#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ +#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ + +/* Per-Port EM Registers */ + +#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ +#define REM_PED_IMPL_SPEC 0x80000000 +#define REM_PED_LINK_TO 0x00000001 +#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ +#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ +#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ +#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ +#define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */ +#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ +#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ +#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ + +#endif /* LINUX_RIO_REGS_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h new file mode 100644 index 00000000..fd07c454 --- /dev/null +++ b/include/linux/rmap.h @@ -0,0 +1,263 @@ +#ifndef _LINUX_RMAP_H +#define _LINUX_RMAP_H +/* + * Declarations for Reverse Mapping functions in mm/rmap.c + */ + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/memcontrol.h> + +/* + * The anon_vma heads a list of private "related" vmas, to scan if + * an anonymous page pointing to this anon_vma needs to be unmapped: + * the vmas on the list will be related by forking, or by splitting. + * + * Since vmas come and go as they are split and merged (particularly + * in mprotect), the mapping field of an anonymous page cannot point + * directly to a vma: instead it points to an anon_vma, on whose list + * the related vmas can be easily linked or unlinked. + * + * After unlinking the last vma on the list, we must garbage collect + * the anon_vma object itself: we're guaranteed no page can be + * pointing to this anon_vma once its vma list is empty. + */ +struct anon_vma { + struct anon_vma *root; /* Root of this anon_vma tree */ + struct mutex mutex; /* Serialize access to vma list */ + /* + * The refcount is taken on an anon_vma when there is no + * guarantee that the vma of page tables will exist for + * the duration of the operation. A caller that takes + * the reference is responsible for clearing up the + * anon_vma if they are the last user on release + */ + atomic_t refcount; + + /* + * NOTE: the LSB of the head.next is set by + * mm_take_all_locks() _after_ taking the above lock. So the + * head must only be read/written after taking the above lock + * to be sure to see a valid next pointer. The LSB bit itself + * is serialized by a system wide lock only visible to + * mm_take_all_locks() (mm_all_locks_mutex). + */ + struct list_head head; /* Chain of private "related" vmas */ +}; + +/* + * The copy-on-write semantics of fork mean that an anon_vma + * can become associated with multiple processes. Furthermore, + * each child process will have its own anon_vma, where new + * pages for that process are instantiated. + * + * This structure allows us to find the anon_vmas associated + * with a VMA, or the VMAs associated with an anon_vma. + * The "same_vma" list contains the anon_vma_chains linking + * all the anon_vmas associated with this VMA. + * The "same_anon_vma" list contains the anon_vma_chains + * which link all the VMAs associated with this anon_vma. + */ +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ + struct list_head same_anon_vma; /* locked by anon_vma->mutex */ +}; + +#ifdef CONFIG_MMU +static inline void get_anon_vma(struct anon_vma *anon_vma) +{ + atomic_inc(&anon_vma->refcount); +} + +void __put_anon_vma(struct anon_vma *anon_vma); + +static inline void put_anon_vma(struct anon_vma *anon_vma) +{ + if (atomic_dec_and_test(&anon_vma->refcount)) + __put_anon_vma(anon_vma); +} + +static inline struct anon_vma *page_anon_vma(struct page *page) +{ + if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != + PAGE_MAPPING_ANON) + return NULL; + return page_rmapping(page); +} + +static inline void vma_lock_anon_vma(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = vma->anon_vma; + if (anon_vma) + mutex_lock(&anon_vma->root->mutex); +} + +static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = vma->anon_vma; + if (anon_vma) + mutex_unlock(&anon_vma->root->mutex); +} + +static inline void anon_vma_lock(struct anon_vma *anon_vma) +{ + mutex_lock(&anon_vma->root->mutex); +} + +static inline void anon_vma_unlock(struct anon_vma *anon_vma) +{ + mutex_unlock(&anon_vma->root->mutex); +} + +/* + * anon_vma helper functions. + */ +void anon_vma_init(void); /* create anon_vma_cachep */ +int anon_vma_prepare(struct vm_area_struct *); +void unlink_anon_vmas(struct vm_area_struct *); +int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); +void anon_vma_moveto_tail(struct vm_area_struct *); +int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); + +static inline void anon_vma_merge(struct vm_area_struct *vma, + struct vm_area_struct *next) +{ + VM_BUG_ON(vma->anon_vma != next->anon_vma); + unlink_anon_vmas(next); +} + +struct anon_vma *page_get_anon_vma(struct page *page); + +/* + * rmap interfaces called when adding or removing pte of page + */ +void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long, int); +void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); +void page_add_file_rmap(struct page *); +void page_remove_rmap(struct page *); + +void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long); +void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, + unsigned long); + +static inline void page_dup_rmap(struct page *page) +{ + atomic_inc(&page->_mapcount); +} + +/* + * Called from mm/vmscan.c to handle paging out + */ +int page_referenced(struct page *, int is_locked, + struct mem_cgroup *memcg, unsigned long *vm_flags); +int page_referenced_one(struct page *, struct vm_area_struct *, + unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); + +enum ttu_flags { + TTU_UNMAP = 0, /* unmap mode */ + TTU_MIGRATION = 1, /* migration mode */ + TTU_MUNLOCK = 2, /* munlock mode */ + TTU_ACTION_MASK = 0xff, + + TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ + TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ + TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ +}; +#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) + +bool is_vma_temporary_stack(struct vm_area_struct *vma); + +int try_to_unmap(struct page *, enum ttu_flags flags); +int try_to_unmap_one(struct page *, struct vm_area_struct *, + unsigned long address, enum ttu_flags flags); + +/* + * Called from mm/filemap_xip.c to unmap empty zero page + */ +pte_t *__page_check_address(struct page *, struct mm_struct *, + unsigned long, spinlock_t **, int); + +static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, + unsigned long address, + spinlock_t **ptlp, int sync) +{ + pte_t *ptep; + + __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, + ptlp, sync)); + return ptep; +} + +/* + * Used by swapoff to help locate where page is expected in vma. + */ +unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); + +/* + * Cleans the PTEs of shared mappings. + * (and since clean PTEs should also be readonly, write protects them too) + * + * returns the number of cleaned PTEs. + */ +int page_mkclean(struct page *); + +/* + * called in munlock()/munmap() path to check for other vmas holding + * the page mlocked. + */ +int try_to_munlock(struct page *); + +/* + * Called by memory-failure.c to kill processes. + */ +struct anon_vma *page_lock_anon_vma(struct page *page); +void page_unlock_anon_vma(struct anon_vma *anon_vma); +int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); + +/* + * Called by migrate.c to remove migration ptes, but might be used more later. + */ +int rmap_walk(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); + +#else /* !CONFIG_MMU */ + +#define anon_vma_init() do {} while (0) +#define anon_vma_prepare(vma) (0) +#define anon_vma_link(vma) do {} while (0) + +static inline int page_referenced(struct page *page, int is_locked, + struct mem_cgroup *memcg, + unsigned long *vm_flags) +{ + *vm_flags = 0; + return 0; +} + +#define try_to_unmap(page, refs) SWAP_FAIL + +static inline int page_mkclean(struct page *page) +{ + return 0; +} + + +#endif /* CONFIG_MMU */ + +/* + * Return values of try_to_unmap + */ +#define SWAP_SUCCESS 0 +#define SWAP_AGAIN 1 +#define SWAP_FAIL 2 +#define SWAP_MLOCK 3 + +#endif /* _LINUX_RMAP_H */ diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h new file mode 100644 index 00000000..5f57f93b --- /dev/null +++ b/include/linux/romfs_fs.h @@ -0,0 +1,59 @@ +#ifndef __LINUX_ROMFS_FS_H +#define __LINUX_ROMFS_FS_H + +#include <linux/types.h> +#include <linux/fs.h> + +/* The basic structures of the romfs filesystem */ + +#define ROMBSIZE BLOCK_SIZE +#define ROMBSBITS BLOCK_SIZE_BITS +#define ROMBMASK (ROMBSIZE-1) +#define ROMFS_MAGIC 0x7275 + +#define ROMFS_MAXFN 128 + +#define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff)) +#define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff)) +#define __mk4(a,b,c,d) cpu_to_be32(__mkl(__mkw(a,b),__mkw(c,d))) +#define ROMSB_WORD0 __mk4('-','r','o','m') +#define ROMSB_WORD1 __mk4('1','f','s','-') + +/* On-disk "super block" */ + +struct romfs_super_block { + __be32 word0; + __be32 word1; + __be32 size; + __be32 checksum; + char name[0]; /* volume name */ +}; + +/* On disk inode */ + +struct romfs_inode { + __be32 next; /* low 4 bits see ROMFH_ */ + __be32 spec; + __be32 size; + __be32 checksum; + char name[0]; +}; + +#define ROMFH_TYPE 7 +#define ROMFH_HRD 0 +#define ROMFH_DIR 1 +#define ROMFH_REG 2 +#define ROMFH_SYM 3 +#define ROMFH_BLK 4 +#define ROMFH_CHR 5 +#define ROMFH_SCK 6 +#define ROMFH_FIF 7 +#define ROMFH_EXEC 8 + +/* Alignment */ + +#define ROMFH_SIZE 16 +#define ROMFH_PAD (ROMFH_SIZE-1) +#define ROMFH_MASK (~ROMFH_PAD) + +#endif diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h new file mode 100644 index 00000000..ed241aad --- /dev/null +++ b/include/linux/root_dev.h @@ -0,0 +1,23 @@ +#ifndef _ROOT_DEV_H_ +#define _ROOT_DEV_H_ + +#include <linux/major.h> +#include <linux/types.h> +#include <linux/kdev_t.h> + +enum { + Root_NFS = MKDEV(UNNAMED_MAJOR, 255), + Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0), + Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1), + Root_FD0 = MKDEV(FLOPPY_MAJOR, 0), + Root_HDA1 = MKDEV(IDE0_MAJOR, 1), + Root_HDA2 = MKDEV(IDE0_MAJOR, 2), + Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1), + Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2), + Root_HDC1 = MKDEV(IDE1_MAJOR, 1), + Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0), +}; + +extern dev_t ROOT_DEV; + +#endif diff --git a/include/linux/rose.h b/include/linux/rose.h new file mode 100644 index 00000000..1fcfe958 --- /dev/null +++ b/include/linux/rose.h @@ -0,0 +1,90 @@ +/* + * These are the public elements of the Linux kernel Rose implementation. + * For kernel AX.25 see the file ax25.h. This file requires ax25.h for the + * definition of the ax25_address structure. + */ + +#ifndef ROSE_KERNEL_H +#define ROSE_KERNEL_H + +#include <linux/socket.h> +#include <linux/ax25.h> + +#define ROSE_MTU 251 + +#define ROSE_MAX_DIGIS 6 + +#define ROSE_DEFER 1 +#define ROSE_T1 2 +#define ROSE_T2 3 +#define ROSE_T3 4 +#define ROSE_IDLE 5 +#define ROSE_QBITINCL 6 +#define ROSE_HOLDBACK 7 + +#define SIOCRSGCAUSE (SIOCPROTOPRIVATE+0) +#define SIOCRSSCAUSE (SIOCPROTOPRIVATE+1) +#define SIOCRSL2CALL (SIOCPROTOPRIVATE+2) +#define SIOCRSSL2CALL (SIOCPROTOPRIVATE+2) +#define SIOCRSACCEPT (SIOCPROTOPRIVATE+3) +#define SIOCRSCLRRT (SIOCPROTOPRIVATE+4) +#define SIOCRSGL2CALL (SIOCPROTOPRIVATE+5) +#define SIOCRSGFACILITIES (SIOCPROTOPRIVATE+6) + +#define ROSE_DTE_ORIGINATED 0x00 +#define ROSE_NUMBER_BUSY 0x01 +#define ROSE_INVALID_FACILITY 0x03 +#define ROSE_NETWORK_CONGESTION 0x05 +#define ROSE_OUT_OF_ORDER 0x09 +#define ROSE_ACCESS_BARRED 0x0B +#define ROSE_NOT_OBTAINABLE 0x0D +#define ROSE_REMOTE_PROCEDURE 0x11 +#define ROSE_LOCAL_PROCEDURE 0x13 +#define ROSE_SHIP_ABSENT 0x39 + +typedef struct { + char rose_addr[5]; +} rose_address; + +struct sockaddr_rose { + __kernel_sa_family_t srose_family; + rose_address srose_addr; + ax25_address srose_call; + int srose_ndigis; + ax25_address srose_digi; +}; + +struct full_sockaddr_rose { + __kernel_sa_family_t srose_family; + rose_address srose_addr; + ax25_address srose_call; + unsigned int srose_ndigis; + ax25_address srose_digis[ROSE_MAX_DIGIS]; +}; + +struct rose_route_struct { + rose_address address; + unsigned short mask; + ax25_address neighbour; + char device[16]; + unsigned char ndigis; + ax25_address digipeaters[AX25_MAX_DIGIS]; +}; + +struct rose_cause_struct { + unsigned char cause; + unsigned char diagnostic; +}; + +struct rose_facilities_struct { + rose_address source_addr, dest_addr; + ax25_address source_call, dest_call; + unsigned char source_ndigis, dest_ndigis; + ax25_address source_digis[ROSE_MAX_DIGIS]; + ax25_address dest_digis[ROSE_MAX_DIGIS]; + unsigned int rand; + rose_address fail_addr; + ax25_address fail_call; +}; + +#endif diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h new file mode 100644 index 00000000..3f594dce --- /dev/null +++ b/include/linux/rotary_encoder.h @@ -0,0 +1,16 @@ +#ifndef __ROTARY_ENCODER_H__ +#define __ROTARY_ENCODER_H__ + +struct rotary_encoder_platform_data { + unsigned int steps; + unsigned int axis; + unsigned int gpio_a; + unsigned int gpio_b; + unsigned int inverted_a; + unsigned int inverted_b; + bool relative_axis; + bool rollover; + bool half_period; +}; + +#endif /* __ROTARY_ENCODER_H__ */ diff --git a/include/linux/route.h b/include/linux/route.h new file mode 100644 index 00000000..66007083 --- /dev/null +++ b/include/linux/route.h @@ -0,0 +1,69 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the IP router interface. + * + * Version: @(#)route.h 1.0.3 05/27/93 + * + * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988 + * for the purposes of compatibility only. + * + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * Changes: + * Mike McLagan : Routing by source + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_ROUTE_H +#define _LINUX_ROUTE_H + +#include <linux/if.h> +#include <linux/compiler.h> + +/* This structure gets passed by the SIOCADDRT and SIOCDELRT calls. */ +struct rtentry { + unsigned long rt_pad1; + struct sockaddr rt_dst; /* target address */ + struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ + struct sockaddr rt_genmask; /* target network mask (IP) */ + unsigned short rt_flags; + short rt_pad2; + unsigned long rt_pad3; + void *rt_pad4; + short rt_metric; /* +1 for binary compatibility! */ + char __user *rt_dev; /* forcing the device at add */ + unsigned long rt_mtu; /* per route MTU/Window */ +#ifndef __KERNEL__ +#define rt_mss rt_mtu /* Compatibility :-( */ +#endif + unsigned long rt_window; /* Window clamping */ + unsigned short rt_irtt; /* Initial RTT */ +}; + + +#define RTF_UP 0x0001 /* route usable */ +#define RTF_GATEWAY 0x0002 /* destination is a gateway */ +#define RTF_HOST 0x0004 /* host entry (net otherwise) */ +#define RTF_REINSTATE 0x0008 /* reinstate route after tmout */ +#define RTF_DYNAMIC 0x0010 /* created dyn. (by redirect) */ +#define RTF_MODIFIED 0x0020 /* modified dyn. (by redirect) */ +#define RTF_MTU 0x0040 /* specific MTU for this route */ +#define RTF_MSS RTF_MTU /* Compatibility :-( */ +#define RTF_WINDOW 0x0080 /* per route window clamping */ +#define RTF_IRTT 0x0100 /* Initial round trip time */ +#define RTF_REJECT 0x0200 /* Reject route */ + +/* + * <linux/ipv6_route.h> uses RTF values >= 64k + */ + + + +#endif /* _LINUX_ROUTE_H */ + diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h new file mode 100644 index 00000000..82a67390 --- /dev/null +++ b/include/linux/rpmsg.h @@ -0,0 +1,332 @@ +/* + * Remote processor messaging + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Texas Instruments nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_RPMSG_H +#define _LINUX_RPMSG_H + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/kref.h> +#include <linux/mutex.h> + +/* The feature bitmap for virtio rpmsg */ +#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */ + +/** + * struct rpmsg_hdr - common header for all rpmsg messages + * @src: source address + * @dst: destination address + * @reserved: reserved for future use + * @len: length of payload (in bytes) + * @flags: message flags + * @data: @len bytes of message payload data + * + * Every message sent(/received) on the rpmsg bus begins with this header. + */ +struct rpmsg_hdr { + u32 src; + u32 dst; + u32 reserved; + u16 len; + u16 flags; + u8 data[0]; +} __packed; + +/** + * struct rpmsg_ns_msg - dynamic name service announcement message + * @name: name of remote service that is published + * @addr: address of remote service that is published + * @flags: indicates whether service is created or destroyed + * + * This message is sent across to publish a new service, or announce + * about its removal. When we receive these messages, an appropriate + * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe() + * or ->remove() handler of the appropriate rpmsg driver will be invoked + * (if/as-soon-as one is registered). + */ +struct rpmsg_ns_msg { + char name[RPMSG_NAME_SIZE]; + u32 addr; + u32 flags; +} __packed; + +/** + * enum rpmsg_ns_flags - dynamic name service announcement flags + * + * @RPMSG_NS_CREATE: a new remote service was just created + * @RPMSG_NS_DESTROY: a known remote service was just destroyed + */ +enum rpmsg_ns_flags { + RPMSG_NS_CREATE = 0, + RPMSG_NS_DESTROY = 1, +}; + +#define RPMSG_ADDR_ANY 0xFFFFFFFF + +struct virtproc_info; + +/** + * rpmsg_channel - devices that belong to the rpmsg bus are called channels + * @vrp: the remote processor this channel belongs to + * @dev: the device struct + * @id: device id (used to match between rpmsg drivers and devices) + * @src: local address + * @dst: destination address + * @ept: the rpmsg endpoint of this channel + * @announce: if set, rpmsg will announce the creation/removal of this channel + */ +struct rpmsg_channel { + struct virtproc_info *vrp; + struct device dev; + struct rpmsg_device_id id; + u32 src; + u32 dst; + struct rpmsg_endpoint *ept; + bool announce; +}; + +typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32); + +/** + * struct rpmsg_endpoint - binds a local rpmsg address to its user + * @rpdev: rpmsg channel device + * @refcount: when this drops to zero, the ept is deallocated + * @cb: rx callback handler + * @cb_lock: must be taken before accessing/changing @cb + * @addr: local rpmsg address + * @priv: private data for the driver's use + * + * In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as + * it binds an rpmsg address with an rx callback handler. + * + * Simple rpmsg drivers shouldn't use this struct directly, because + * things just work: every rpmsg driver provides an rx callback upon + * registering to the bus, and that callback is then bound to its rpmsg + * address when the driver is probed. When relevant inbound messages arrive + * (i.e. messages which their dst address equals to the src address of + * the rpmsg channel), the driver's handler is invoked to process it. + * + * More complicated drivers though, that do need to allocate additional rpmsg + * addresses, and bind them to different rx callbacks, must explicitly + * create additional endpoints by themselves (see rpmsg_create_ept()). + */ +struct rpmsg_endpoint { + struct rpmsg_channel *rpdev; + struct kref refcount; + rpmsg_rx_cb_t cb; + struct mutex cb_lock; + u32 addr; + void *priv; +}; + +/** + * struct rpmsg_driver - rpmsg driver struct + * @drv: underlying device driver + * @id_table: rpmsg ids serviced by this driver + * @probe: invoked when a matching rpmsg channel (i.e. device) is found + * @remove: invoked when the rpmsg channel is removed + * @callback: invoked when an inbound message is received on the channel + */ +struct rpmsg_driver { + struct device_driver drv; + const struct rpmsg_device_id *id_table; + int (*probe)(struct rpmsg_channel *dev); + void (*remove)(struct rpmsg_channel *dev); + void (*callback)(struct rpmsg_channel *, void *, int, void *, u32); +}; + +int register_rpmsg_device(struct rpmsg_channel *dev); +void unregister_rpmsg_device(struct rpmsg_channel *dev); +int register_rpmsg_driver(struct rpmsg_driver *drv); +void unregister_rpmsg_driver(struct rpmsg_driver *drv); +void rpmsg_destroy_ept(struct rpmsg_endpoint *); +struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *, + rpmsg_rx_cb_t cb, void *priv, u32 addr); +int +rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool); + +/** + * rpmsg_send() - send a message across to the remote processor + * @rpdev: the rpmsg channel + * @data: payload of message + * @len: length of payload + * + * This function sends @data of length @len on the @rpdev channel. + * The message will be sent to the remote processor which the @rpdev + * channel belongs to, using @rpdev's source and destination addresses. + * In case there are no TX buffers available, the function will block until + * one becomes available, or a timeout of 15 seconds elapses. When the latter + * happens, -ERESTARTSYS is returned. + * + * Can only be called from process context (for now). + * + * Returns 0 on success and an appropriate error value on failure. + */ +static inline int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len) +{ + u32 src = rpdev->src, dst = rpdev->dst; + + return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); +} + +/** + * rpmsg_sendto() - send a message across to the remote processor, specify dst + * @rpdev: the rpmsg channel + * @data: payload of message + * @len: length of payload + * @dst: destination address + * + * This function sends @data of length @len to the remote @dst address. + * The message will be sent to the remote processor which the @rpdev + * channel belongs to, using @rpdev's source address. + * In case there are no TX buffers available, the function will block until + * one becomes available, or a timeout of 15 seconds elapses. When the latter + * happens, -ERESTARTSYS is returned. + * + * Can only be called from process context (for now). + * + * Returns 0 on success and an appropriate error value on failure. + */ +static inline +int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst) +{ + u32 src = rpdev->src; + + return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); +} + +/** + * rpmsg_send_offchannel() - send a message using explicit src/dst addresses + * @rpdev: the rpmsg channel + * @src: source address + * @dst: destination address + * @data: payload of message + * @len: length of payload + * + * This function sends @data of length @len to the remote @dst address, + * and uses @src as the source address. + * The message will be sent to the remote processor which the @rpdev + * channel belongs to. + * In case there are no TX buffers available, the function will block until + * one becomes available, or a timeout of 15 seconds elapses. When the latter + * happens, -ERESTARTSYS is returned. + * + * Can only be called from process context (for now). + * + * Returns 0 on success and an appropriate error value on failure. + */ +static inline +int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, + void *data, int len) +{ + return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); +} + +/** + * rpmsg_send() - send a message across to the remote processor + * @rpdev: the rpmsg channel + * @data: payload of message + * @len: length of payload + * + * This function sends @data of length @len on the @rpdev channel. + * The message will be sent to the remote processor which the @rpdev + * channel belongs to, using @rpdev's source and destination addresses. + * In case there are no TX buffers available, the function will immediately + * return -ENOMEM without waiting until one becomes available. + * + * Can only be called from process context (for now). + * + * Returns 0 on success and an appropriate error value on failure. + */ +static inline +int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len) +{ + u32 src = rpdev->src, dst = rpdev->dst; + + return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); +} + +/** + * rpmsg_sendto() - send a message across to the remote processor, specify dst + * @rpdev: the rpmsg channel + * @data: payload of message + * @len: length of payload + * @dst: destination address + * + * This function sends @data of length @len to the remote @dst address. + * The message will be sent to the remote processor which the @rpdev + * channel belongs to, using @rpdev's source address. + * In case there are no TX buffers available, the function will immediately + * return -ENOMEM without waiting until one becomes available. + * + * Can only be called from process context (for now). + * + * Returns 0 on success and an appropriate error value on failure. + */ +static inline +int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst) +{ + u32 src = rpdev->src; + + return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); +} + +/** + * rpmsg_send_offchannel() - send a message using explicit src/dst addresses + * @rpdev: the rpmsg channel + * @src: source address + * @dst: destination address + * @data: payload of message + * @len: length of payload + * + * This function sends @data of length @len to the remote @dst address, + * and uses @src as the source address. + * The message will be sent to the remote processor which the @rpdev + * channel belongs to. + * In case there are no TX buffers available, the function will immediately + * return -ENOMEM without waiting until one becomes available. + * + * Can only be called from process context (for now). + * + * Returns 0 on success and an appropriate error value on failure. + */ +static inline +int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, + void *data, int len) +{ + return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); +} + +#endif /* _LINUX_RPMSG_H */ diff --git a/include/linux/rslib.h b/include/linux/rslib.h new file mode 100644 index 00000000..746580c1 --- /dev/null +++ b/include/linux/rslib.h @@ -0,0 +1,109 @@ +/* + * include/linux/rslib.h + * + * Overview: + * Generic Reed Solomon encoder / decoder library + * + * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) + * + * RS code lifted from reed solomon library written by Phil Karn + * Copyright 2002 Phil Karn, KA9Q + * + * $Id: rslib.h,v 1.4 2005/11/07 11:14:52 gleixner Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _RSLIB_H_ +#define _RSLIB_H_ + +#include <linux/list.h> + +/** + * struct rs_control - rs control structure + * + * @mm: Bits per symbol + * @nn: Symbols per block (= (1<<mm)-1) + * @alpha_to: log lookup table + * @index_of: Antilog lookup table + * @genpoly: Generator polynomial + * @nroots: Number of generator roots = number of parity symbols + * @fcr: First consecutive root, index form + * @prim: Primitive element, index form + * @iprim: prim-th root of 1, index form + * @gfpoly: The primitive generator polynominal + * @gffunc: Function to generate the field, if non-canonical representation + * @users: Users of this structure + * @list: List entry for the rs control list +*/ +struct rs_control { + int mm; + int nn; + uint16_t *alpha_to; + uint16_t *index_of; + uint16_t *genpoly; + int nroots; + int fcr; + int prim; + int iprim; + int gfpoly; + int (*gffunc)(int); + int users; + struct list_head list; +}; + +/* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */ +#ifdef CONFIG_REED_SOLOMON_ENC8 +int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, + uint16_t invmsk); +#endif +#ifdef CONFIG_REED_SOLOMON_DEC8 +int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, + uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, + uint16_t *corr); +#endif + +/* General purpose RS codec, 16-bit data width, symbol width 1-15 bit */ +#ifdef CONFIG_REED_SOLOMON_ENC16 +int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, + uint16_t invmsk); +#endif +#ifdef CONFIG_REED_SOLOMON_DEC16 +int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, + uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, + uint16_t *corr); +#endif + +/* Create or get a matching rs control structure */ +struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, + int nroots); +struct rs_control *init_rs_non_canonical(int symsize, int (*func)(int), + int fcr, int prim, int nroots); + +/* Release a rs control structure */ +void free_rs(struct rs_control *rs); + +/** modulo replacement for galois field arithmetics + * + * @rs: the rs control structure + * @x: the value to reduce + * + * where + * rs->mm = number of bits per symbol + * rs->nn = (2^rs->mm) - 1 + * + * Simple arithmetic modulo would return a wrong result for values + * >= 3 * rs->nn +*/ +static inline int rs_modnn(struct rs_control *rs, int x) +{ + while (x >= rs->nn) { + x -= rs->nn; + x = (x >> rs->mm) + (x & rs->nn); + } + return x; +} + +#endif diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h new file mode 100644 index 00000000..e55d82ce --- /dev/null +++ b/include/linux/rtc-v3020.h @@ -0,0 +1,41 @@ +/* + * v3020.h - Registers definition and platform data structure for the v3020 RTC. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006, 8D Technologies inc. + */ +#ifndef __LINUX_V3020_H +#define __LINUX_V3020_H + +/* The v3020 has only one data pin but which one + * is used depends on the board. */ +struct v3020_platform_data { + int leftshift; /* (1<<(leftshift)) & readl() */ + + unsigned int use_gpio:1; + unsigned int gpio_cs; + unsigned int gpio_wr; + unsigned int gpio_rd; + unsigned int gpio_io; +}; + +#define V3020_STATUS_0 0x00 +#define V3020_STATUS_1 0x01 +#define V3020_SECONDS 0x02 +#define V3020_MINUTES 0x03 +#define V3020_HOURS 0x04 +#define V3020_MONTH_DAY 0x05 +#define V3020_MONTH 0x06 +#define V3020_YEAR 0x07 +#define V3020_WEEK_DAY 0x08 +#define V3020_WEEK 0x09 + +#define V3020_IS_COMMAND(val) ((val)>=0x0E) + +#define V3020_CMD_RAM2CLOCK 0x0E +#define V3020_CMD_CLOCK2RAM 0x0F + +#endif /* __LINUX_V3020_H */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h new file mode 100644 index 00000000..fcabfb48 --- /dev/null +++ b/include/linux/rtc.h @@ -0,0 +1,284 @@ +/* + * Generic RTC interface. + * This version contains the part of the user interface to the Real Time Clock + * service. It is used with both the legacy mc146818 and also EFI + * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out + * from <linux/mc146818rtc.h> to this file for 2.4 kernels. + * + * Copyright (C) 1999 Hewlett-Packard Co. + * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com> + */ +#ifndef _LINUX_RTC_H_ +#define _LINUX_RTC_H_ + +/* + * The struct used to pass data via the following ioctl. Similar to the + * struct tm in <time.h>, but it needs to be here so that the kernel + * source is self contained, allowing cross-compiles, etc. etc. + */ + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +/* + * This data structure is inspired by the EFI (v0.92) wakeup + * alarm API. + */ +struct rtc_wkalrm { + unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */ + unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */ + struct rtc_time time; /* time the alarm is set to */ +}; + +/* + * Data structure to control PLL correction some better RTC feature + * pll_value is used to get or set current value of correction, + * the rest of the struct is used to query HW capabilities. + * This is modeled after the RTC used in Q40/Q60 computers but + * should be sufficiently flexible for other devices + * + * +ve pll_value means clock will run faster by + * pll_value*pll_posmult/pll_clock + * -ve pll_value means clock will run slower by + * pll_value*pll_negmult/pll_clock + */ + +struct rtc_pll_info { + int pll_ctrl; /* placeholder for fancier control */ + int pll_value; /* get/set correction value */ + int pll_max; /* max +ve (faster) adjustment value */ + int pll_min; /* max -ve (slower) adjustment value */ + int pll_posmult; /* factor for +ve correction */ + int pll_negmult; /* factor for -ve correction */ + long pll_clock; /* base PLL frequency */ +}; + +/* + * ioctl calls that are permitted to the /dev/rtc interface, if + * any of the RTC drivers are enabled. + */ + +#define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */ +#define RTC_AIE_OFF _IO('p', 0x02) /* ... off */ +#define RTC_UIE_ON _IO('p', 0x03) /* Update int. enable on */ +#define RTC_UIE_OFF _IO('p', 0x04) /* ... off */ +#define RTC_PIE_ON _IO('p', 0x05) /* Periodic int. enable on */ +#define RTC_PIE_OFF _IO('p', 0x06) /* ... off */ +#define RTC_WIE_ON _IO('p', 0x0f) /* Watchdog int. enable on */ +#define RTC_WIE_OFF _IO('p', 0x10) /* ... off */ + +#define RTC_ALM_SET _IOW('p', 0x07, struct rtc_time) /* Set alarm time */ +#define RTC_ALM_READ _IOR('p', 0x08, struct rtc_time) /* Read alarm time */ +#define RTC_RD_TIME _IOR('p', 0x09, struct rtc_time) /* Read RTC time */ +#define RTC_SET_TIME _IOW('p', 0x0a, struct rtc_time) /* Set RTC time */ +#define RTC_IRQP_READ _IOR('p', 0x0b, unsigned long) /* Read IRQ rate */ +#define RTC_IRQP_SET _IOW('p', 0x0c, unsigned long) /* Set IRQ rate */ +#define RTC_EPOCH_READ _IOR('p', 0x0d, unsigned long) /* Read epoch */ +#define RTC_EPOCH_SET _IOW('p', 0x0e, unsigned long) /* Set epoch */ + +#define RTC_WKALM_SET _IOW('p', 0x0f, struct rtc_wkalrm)/* Set wakeup alarm*/ +#define RTC_WKALM_RD _IOR('p', 0x10, struct rtc_wkalrm)/* Get wakeup alarm*/ + +#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ +#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ + +/* interrupt flags */ +#define RTC_IRQF 0x80 /* Any of the following is active */ +#define RTC_PF 0x40 /* Periodic interrupt */ +#define RTC_AF 0x20 /* Alarm interrupt */ +#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ + + +#define RTC_MAX_FREQ 8192 + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/interrupt.h> + +extern int rtc_month_days(unsigned int month, unsigned int year); +extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); +extern int rtc_valid_tm(struct rtc_time *tm); +extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); +extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); +ktime_t rtc_tm_to_ktime(struct rtc_time tm); +struct rtc_time rtc_ktime_to_tm(ktime_t kt); + + +#include <linux/device.h> +#include <linux/seq_file.h> +#include <linux/cdev.h> +#include <linux/poll.h> +#include <linux/mutex.h> +#include <linux/timerqueue.h> +#include <linux/workqueue.h> + +extern struct class *rtc_class; + +/* + * For these RTC methods the device parameter is the physical device + * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which + * was passed to rtc_device_register(). Its driver_data normally holds + * device state, including the rtc_device pointer for the RTC. + * + * Most of these methods are called with rtc_device.ops_lock held, + * through the rtc_*(struct rtc_device *, ...) calls. + * + * The (current) exceptions are mostly filesystem hooks: + * - the proc() hook for procfs + * - non-ioctl() chardev hooks: open(), release(), read_callback() + * + * REVISIT those periodic irq calls *do* have ops_lock when they're + * issued through ioctl() ... + */ +struct rtc_class_ops { + int (*open)(struct device *); + void (*release)(struct device *); + int (*ioctl)(struct device *, unsigned int, unsigned long); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*set_mmss)(struct device *, unsigned long secs); + int (*read_callback)(struct device *, int data); + int (*alarm_irq_enable)(struct device *, unsigned int enabled); +}; + +#define RTC_DEVICE_NAME_SIZE 20 +typedef struct rtc_task { + void (*func)(void *private_data); + void *private_data; +} rtc_task_t; + + +struct rtc_timer { + struct rtc_task task; + struct timerqueue_node node; + ktime_t period; + int enabled; +}; + + +/* flags */ +#define RTC_DEV_BUSY 0 + +struct rtc_device +{ + struct device dev; + struct module *owner; + + int id; + char name[RTC_DEVICE_NAME_SIZE]; + + const struct rtc_class_ops *ops; + struct mutex ops_lock; + + struct cdev char_dev; + unsigned long flags; + + unsigned long irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + + struct rtc_task *irq_task; + spinlock_t irq_task_lock; + int irq_freq; + int max_user_freq; + + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ + int pie_enabled; + struct work_struct irqwork; + /* Some hardware can't support UIE mode */ + int uie_unsupported; + +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + struct work_struct uie_task; + struct timer_list uie_timer; + /* Those fields are protected by rtc->irq_lock */ + unsigned int oldsecs; + unsigned int uie_irq_active:1; + unsigned int stop_uie_polling:1; + unsigned int uie_task_active:1; + unsigned int uie_timer_active:1; +#endif +}; +#define to_rtc_device(d) container_of(d, struct rtc_device, dev) + +extern struct rtc_device *rtc_device_register(const char *name, + struct device *dev, + const struct rtc_class_ops *ops, + struct module *owner); +extern void rtc_device_unregister(struct rtc_device *rtc); + +extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); +extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); +int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); +extern int rtc_read_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); +extern int rtc_set_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); +extern int rtc_initialize_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); +extern void rtc_update_irq(struct rtc_device *rtc, + unsigned long num, unsigned long events); + +extern struct rtc_device *rtc_class_open(char *name); +extern void rtc_class_close(struct rtc_device *rtc); + +extern int rtc_irq_register(struct rtc_device *rtc, + struct rtc_task *task); +extern void rtc_irq_unregister(struct rtc_device *rtc, + struct rtc_task *task); +extern int rtc_irq_set_state(struct rtc_device *rtc, + struct rtc_task *task, int enabled); +extern int rtc_irq_set_freq(struct rtc_device *rtc, + struct rtc_task *task, int freq); +extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); + +void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); +void rtc_aie_update_irq(void *private); +void rtc_uie_update_irq(void *private); +enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); + +int rtc_register(rtc_task_t *task); +int rtc_unregister(rtc_task_t *task); +int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); + +void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data); +int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, + ktime_t expires, ktime_t period); +int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer); +void rtc_timer_do_work(struct work_struct *work); + +static inline bool is_leap_year(unsigned int year) +{ + return (!(year % 4) && (year % 100)) || !(year % 400); +} + +#ifdef CONFIG_RTC_HCTOSYS +extern int rtc_hctosys_ret; +#else +#define rtc_hctosys_ret -ENODEV +#endif + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h new file mode 100644 index 00000000..6fc96145 --- /dev/null +++ b/include/linux/rtc/m48t59.h @@ -0,0 +1,64 @@ +/* + * include/linux/rtc/m48t59.h + * + * Definitions for the platform data of m48t59 RTC chip driver. + * + * Copyright (c) 2007 Wind River Systems, Inc. + * + * Mark Zhan <rongkai.zhan@windriver.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_RTC_M48T59_H_ +#define _LINUX_RTC_M48T59_H_ + +/* + * M48T59 Register Offset + */ +#define M48T59_YEAR 0xf +#define M48T59_MONTH 0xe +#define M48T59_MDAY 0xd /* Day of Month */ +#define M48T59_WDAY 0xc /* Day of Week */ +#define M48T59_WDAY_CB 0x20 /* Century Bit */ +#define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */ +#define M48T59_HOUR 0xb +#define M48T59_MIN 0xa +#define M48T59_SEC 0x9 +#define M48T59_CNTL 0x8 +#define M48T59_CNTL_READ 0x40 +#define M48T59_CNTL_WRITE 0x80 +#define M48T59_WATCHDOG 0x7 +#define M48T59_INTR 0x6 +#define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */ +#define M48T59_INTR_ABE 0x20 +#define M48T59_ALARM_DATE 0x5 +#define M48T59_ALARM_HOUR 0x4 +#define M48T59_ALARM_MIN 0x3 +#define M48T59_ALARM_SEC 0x2 +#define M48T59_UNUSED 0x1 +#define M48T59_FLAGS 0x0 +#define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */ +#define M48T59_FLAGS_AF 0x40 /* alarm */ +#define M48T59_FLAGS_BF 0x10 /* low battery */ + +#define M48T59RTC_TYPE_M48T59 0 /* to keep compatibility */ +#define M48T59RTC_TYPE_M48T02 1 +#define M48T59RTC_TYPE_M48T08 2 + +struct m48t59_plat_data { + /* The method to access M48T59 registers */ + void (*write_byte)(struct device *dev, u32 ofs, u8 val); + unsigned char (*read_byte)(struct device *dev, u32 ofs); + + int type; /* RTC model */ + + /* ioaddr mapped externally */ + void __iomem *ioaddr; + /* offset to RTC registers, automatically set according to the type */ + unsigned int offset; +}; + +#endif /* _LINUX_RTC_M48T59_H_ */ diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h new file mode 100644 index 00000000..2c92e1c8 --- /dev/null +++ b/include/linux/rtc/sirfsoc_rtciobrg.h @@ -0,0 +1,18 @@ +/* + * RTC I/O Bridge interfaces for CSR SiRFprimaII + * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ +#ifndef _SIRFSOC_RTC_IOBRG_H_ +#define _SIRFSOC_RTC_IOBRG_H_ + +extern void sirfsoc_rtc_iobrg_besyncing(void); + +extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); + +extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); + +#endif diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h new file mode 100644 index 00000000..de171342 --- /dev/null +++ b/include/linux/rtmutex.h @@ -0,0 +1,109 @@ +/* + * RT Mutexes: blocking mutual exclusion locks with PI support + * + * started by Ingo Molnar and Thomas Gleixner: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> + * + * This file contains the public data structure and API definitions. + */ + +#ifndef __LINUX_RT_MUTEX_H +#define __LINUX_RT_MUTEX_H + +#include <linux/linkage.h> +#include <linux/plist.h> +#include <linux/spinlock_types.h> + +extern int max_lock_depth; /* for sysctl */ + +/** + * The rt_mutex structure + * + * @wait_lock: spinlock to protect the structure + * @wait_list: pilist head to enqueue waiters in priority order + * @owner: the mutex owner + */ +struct rt_mutex { + raw_spinlock_t wait_lock; + struct plist_head wait_list; + struct task_struct *owner; +#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; + const char *name, *file; + int line; + void *magic; +#endif +}; + +struct rt_mutex_waiter; +struct hrtimer_sleeper; + +#ifdef CONFIG_DEBUG_RT_MUTEXES + extern int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len); + extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); +#else + static inline int rt_mutex_debug_check_no_locks_freed(const void *from, + unsigned long len) + { + return 0; + } +# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); +#else +# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) +# define rt_mutex_debug_task_free(t) do { } while (0) +#endif + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ + , .owner = NULL \ + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + +#define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) + +/** + * rt_mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int rt_mutex_is_locked(struct rt_mutex *lock) +{ + return lock->owner != NULL; +} + +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void rt_mutex_destroy(struct rt_mutex *lock); + +extern void rt_mutex_lock(struct rt_mutex *lock); +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, + int detect_deadlock); +extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout, + int detect_deadlock); + +extern int rt_mutex_trylock(struct rt_mutex *lock); + +extern void rt_mutex_unlock(struct rt_mutex *lock); + +#ifdef CONFIG_RT_MUTEXES +# define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \ + INIT_RT_MUTEX_DEBUG(tsk) +#else +# define INIT_RT_MUTEXES(tsk) +#endif + +#endif diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h new file mode 100644 index 00000000..577592ea --- /dev/null +++ b/include/linux/rtnetlink.h @@ -0,0 +1,807 @@ +#ifndef __LINUX_RTNETLINK_H +#define __LINUX_RTNETLINK_H + +#include <linux/types.h> +#include <linux/netlink.h> +#include <linux/if_link.h> +#include <linux/if_addr.h> +#include <linux/neighbour.h> + +/* rtnetlink families. Values up to 127 are reserved for real address + * families, values above 128 may be used arbitrarily. + */ +#define RTNL_FAMILY_IPMR 128 +#define RTNL_FAMILY_IP6MR 129 +#define RTNL_FAMILY_MAX 129 + +/**** + * Routing/neighbour discovery messages. + ****/ + +/* Types of messages */ + +enum { + RTM_BASE = 16, +#define RTM_BASE RTM_BASE + + RTM_NEWLINK = 16, +#define RTM_NEWLINK RTM_NEWLINK + RTM_DELLINK, +#define RTM_DELLINK RTM_DELLINK + RTM_GETLINK, +#define RTM_GETLINK RTM_GETLINK + RTM_SETLINK, +#define RTM_SETLINK RTM_SETLINK + + RTM_NEWADDR = 20, +#define RTM_NEWADDR RTM_NEWADDR + RTM_DELADDR, +#define RTM_DELADDR RTM_DELADDR + RTM_GETADDR, +#define RTM_GETADDR RTM_GETADDR + + RTM_NEWROUTE = 24, +#define RTM_NEWROUTE RTM_NEWROUTE + RTM_DELROUTE, +#define RTM_DELROUTE RTM_DELROUTE + RTM_GETROUTE, +#define RTM_GETROUTE RTM_GETROUTE + + RTM_NEWNEIGH = 28, +#define RTM_NEWNEIGH RTM_NEWNEIGH + RTM_DELNEIGH, +#define RTM_DELNEIGH RTM_DELNEIGH + RTM_GETNEIGH, +#define RTM_GETNEIGH RTM_GETNEIGH + + RTM_NEWRULE = 32, +#define RTM_NEWRULE RTM_NEWRULE + RTM_DELRULE, +#define RTM_DELRULE RTM_DELRULE + RTM_GETRULE, +#define RTM_GETRULE RTM_GETRULE + + RTM_NEWQDISC = 36, +#define RTM_NEWQDISC RTM_NEWQDISC + RTM_DELQDISC, +#define RTM_DELQDISC RTM_DELQDISC + RTM_GETQDISC, +#define RTM_GETQDISC RTM_GETQDISC + + RTM_NEWTCLASS = 40, +#define RTM_NEWTCLASS RTM_NEWTCLASS + RTM_DELTCLASS, +#define RTM_DELTCLASS RTM_DELTCLASS + RTM_GETTCLASS, +#define RTM_GETTCLASS RTM_GETTCLASS + + RTM_NEWTFILTER = 44, +#define RTM_NEWTFILTER RTM_NEWTFILTER + RTM_DELTFILTER, +#define RTM_DELTFILTER RTM_DELTFILTER + RTM_GETTFILTER, +#define RTM_GETTFILTER RTM_GETTFILTER + + RTM_NEWACTION = 48, +#define RTM_NEWACTION RTM_NEWACTION + RTM_DELACTION, +#define RTM_DELACTION RTM_DELACTION + RTM_GETACTION, +#define RTM_GETACTION RTM_GETACTION + + RTM_NEWPREFIX = 52, +#define RTM_NEWPREFIX RTM_NEWPREFIX + + RTM_GETMULTICAST = 58, +#define RTM_GETMULTICAST RTM_GETMULTICAST + + RTM_GETANYCAST = 62, +#define RTM_GETANYCAST RTM_GETANYCAST + + RTM_NEWNEIGHTBL = 64, +#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL + RTM_GETNEIGHTBL = 66, +#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL + RTM_SETNEIGHTBL, +#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL + + RTM_NEWNDUSEROPT = 68, +#define RTM_NEWNDUSEROPT RTM_NEWNDUSEROPT + + RTM_NEWADDRLABEL = 72, +#define RTM_NEWADDRLABEL RTM_NEWADDRLABEL + RTM_DELADDRLABEL, +#define RTM_DELADDRLABEL RTM_DELADDRLABEL + RTM_GETADDRLABEL, +#define RTM_GETADDRLABEL RTM_GETADDRLABEL + + RTM_GETDCB = 78, +#define RTM_GETDCB RTM_GETDCB + RTM_SETDCB, +#define RTM_SETDCB RTM_SETDCB + + __RTM_MAX, +#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) +}; + +#define RTM_NR_MSGTYPES (RTM_MAX + 1 - RTM_BASE) +#define RTM_NR_FAMILIES (RTM_NR_MSGTYPES >> 2) +#define RTM_FAM(cmd) (((cmd) - RTM_BASE) >> 2) + +/* + Generic structure for encapsulation of optional route information. + It is reminiscent of sockaddr, but with sa_family replaced + with attribute type. + */ + +struct rtattr { + unsigned short rta_len; + unsigned short rta_type; +}; + +/* Macros to handle rtattributes */ + +#define RTA_ALIGNTO 4 +#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) +#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \ + (rta)->rta_len >= sizeof(struct rtattr) && \ + (rta)->rta_len <= (len)) +#define RTA_NEXT(rta,attrlen) ((attrlen) -= RTA_ALIGN((rta)->rta_len), \ + (struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len))) +#define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) +#define RTA_SPACE(len) RTA_ALIGN(RTA_LENGTH(len)) +#define RTA_DATA(rta) ((void*)(((char*)(rta)) + RTA_LENGTH(0))) +#define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) + + + + +/****************************************************************************** + * Definitions used in routing table administration. + ****/ + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + + unsigned char rtm_table; /* Routing table id */ + unsigned char rtm_protocol; /* Routing protocol; see below */ + unsigned char rtm_scope; /* See below */ + unsigned char rtm_type; /* See below */ + + unsigned rtm_flags; +}; + +/* rtm_type */ + +enum { + RTN_UNSPEC, + RTN_UNICAST, /* Gateway or direct route */ + RTN_LOCAL, /* Accept locally */ + RTN_BROADCAST, /* Accept locally as broadcast, + send as broadcast */ + RTN_ANYCAST, /* Accept locally as broadcast, + but send as unicast */ + RTN_MULTICAST, /* Multicast route */ + RTN_BLACKHOLE, /* Drop */ + RTN_UNREACHABLE, /* Destination is unreachable */ + RTN_PROHIBIT, /* Administratively prohibited */ + RTN_THROW, /* Not in this table */ + RTN_NAT, /* Translate this address */ + RTN_XRESOLVE, /* Use external resolver */ + __RTN_MAX +}; + +#define RTN_MAX (__RTN_MAX - 1) + + +/* rtm_protocol */ + +#define RTPROT_UNSPEC 0 +#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; + not used by current IPv4 */ +#define RTPROT_KERNEL 2 /* Route installed by kernel */ +#define RTPROT_BOOT 3 /* Route installed during boot */ +#define RTPROT_STATIC 4 /* Route installed by administrator */ + +/* Values of protocol >= RTPROT_STATIC are not interpreted by kernel; + they are just passed from user and back as is. + It will be used by hypothetical multiple routing daemons. + Note that protocol values should be standardized in order to + avoid conflicts. + */ + +#define RTPROT_GATED 8 /* Apparently, GateD */ +#define RTPROT_RA 9 /* RDISC/ND router advertisements */ +#define RTPROT_MRT 10 /* Merit MRT */ +#define RTPROT_ZEBRA 11 /* Zebra */ +#define RTPROT_BIRD 12 /* BIRD */ +#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ +#define RTPROT_XORP 14 /* XORP */ +#define RTPROT_NTK 15 /* Netsukuku */ +#define RTPROT_DHCP 16 /* DHCP client */ + +/* rtm_scope + + Really it is not scope, but sort of distance to the destination. + NOWHERE are reserved for not existing destinations, HOST is our + local addresses, LINK are destinations, located on directly attached + link and UNIVERSE is everywhere in the Universe. + + Intermediate values are also possible f.e. interior routes + could be assigned a value between UNIVERSE and LINK. +*/ + +enum rt_scope_t { + RT_SCOPE_UNIVERSE=0, +/* User defined values */ + RT_SCOPE_SITE=200, + RT_SCOPE_LINK=253, + RT_SCOPE_HOST=254, + RT_SCOPE_NOWHERE=255 +}; + +/* rtm_flags */ + +#define RTM_F_NOTIFY 0x100 /* Notify user of route change */ +#define RTM_F_CLONED 0x200 /* This route is cloned */ +#define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */ +#define RTM_F_PREFIX 0x800 /* Prefix addresses */ + +/* Reserved table identifiers */ + +enum rt_class_t { + RT_TABLE_UNSPEC=0, +/* User defined values */ + RT_TABLE_COMPAT=252, + RT_TABLE_DEFAULT=253, + RT_TABLE_MAIN=254, + RT_TABLE_LOCAL=255, + RT_TABLE_MAX=0xFFFFFFFF +}; + + +/* Routing message attributes */ + +enum rtattr_type_t { + RTA_UNSPEC, + RTA_DST, + RTA_SRC, + RTA_IIF, + RTA_OIF, + RTA_GATEWAY, + RTA_PRIORITY, + RTA_PREFSRC, + RTA_METRICS, + RTA_MULTIPATH, + RTA_PROTOINFO, /* no longer used */ + RTA_FLOW, + RTA_CACHEINFO, + RTA_SESSION, /* no longer used */ + RTA_MP_ALGO, /* no longer used */ + RTA_TABLE, + RTA_MARK, + __RTA_MAX +}; + +#define RTA_MAX (__RTA_MAX - 1) + +#define RTM_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct rtmsg)))) +#define RTM_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct rtmsg)) + +/* RTM_MULTIPATH --- array of struct rtnexthop. + * + * "struct rtnexthop" describes all necessary nexthop information, + * i.e. parameters of path to a destination via this nexthop. + * + * At the moment it is impossible to set different prefsrc, mtu, window + * and rtt for different paths from multipath. + */ + +struct rtnexthop { + unsigned short rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +/* rtnh_flags */ + +#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */ +#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */ +#define RTNH_F_ONLINK 4 /* Gateway is forced on link */ + +/* Macros to handle hexthops */ + +#define RTNH_ALIGNTO 4 +#define RTNH_ALIGN(len) ( ((len)+RTNH_ALIGNTO-1) & ~(RTNH_ALIGNTO-1) ) +#define RTNH_OK(rtnh,len) ((rtnh)->rtnh_len >= sizeof(struct rtnexthop) && \ + ((int)(rtnh)->rtnh_len) <= (len)) +#define RTNH_NEXT(rtnh) ((struct rtnexthop*)(((char*)(rtnh)) + RTNH_ALIGN((rtnh)->rtnh_len))) +#define RTNH_LENGTH(len) (RTNH_ALIGN(sizeof(struct rtnexthop)) + (len)) +#define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len)) +#define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0))) + +/* RTM_CACHEINFO */ + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + +#define RTNETLINK_HAVE_PEERINFO 1 + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +/* RTM_METRICS --- array of struct rtattr with types of RTAX_* */ + +enum { + RTAX_UNSPEC, +#define RTAX_UNSPEC RTAX_UNSPEC + RTAX_LOCK, +#define RTAX_LOCK RTAX_LOCK + RTAX_MTU, +#define RTAX_MTU RTAX_MTU + RTAX_WINDOW, +#define RTAX_WINDOW RTAX_WINDOW + RTAX_RTT, +#define RTAX_RTT RTAX_RTT + RTAX_RTTVAR, +#define RTAX_RTTVAR RTAX_RTTVAR + RTAX_SSTHRESH, +#define RTAX_SSTHRESH RTAX_SSTHRESH + RTAX_CWND, +#define RTAX_CWND RTAX_CWND + RTAX_ADVMSS, +#define RTAX_ADVMSS RTAX_ADVMSS + RTAX_REORDERING, +#define RTAX_REORDERING RTAX_REORDERING + RTAX_HOPLIMIT, +#define RTAX_HOPLIMIT RTAX_HOPLIMIT + RTAX_INITCWND, +#define RTAX_INITCWND RTAX_INITCWND + RTAX_FEATURES, +#define RTAX_FEATURES RTAX_FEATURES + RTAX_RTO_MIN, +#define RTAX_RTO_MIN RTAX_RTO_MIN + RTAX_INITRWND, +#define RTAX_INITRWND RTAX_INITRWND + __RTAX_MAX +}; + +#define RTAX_MAX (__RTAX_MAX - 1) + +#define RTAX_FEATURE_ECN 0x00000001 +#define RTAX_FEATURE_SACK 0x00000002 +#define RTAX_FEATURE_TIMESTAMP 0x00000004 +#define RTAX_FEATURE_ALLFRAG 0x00000008 + +struct rta_session { + __u8 proto; + __u8 pad1; + __u16 pad2; + + union { + struct { + __u16 sport; + __u16 dport; + } ports; + + struct { + __u8 type; + __u8 code; + __u16 ident; + } icmpt; + + __u32 spi; + } u; +}; + +/**** + * General form of address family dependent message. + ****/ + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +/***************************************************************** + * Link layer specific messages. + ****/ + +/* struct ifinfomsg + * passes link level specific information, not dependent + * on network protocol. + */ + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + unsigned short ifi_type; /* ARPHRD_* */ + int ifi_index; /* Link index */ + unsigned ifi_flags; /* IFF_* flags */ + unsigned ifi_change; /* IFF_* change mask */ +}; + +/******************************************************************** + * prefix information + ****/ + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + unsigned short prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum +{ + PREFIX_UNSPEC, + PREFIX_ADDRESS, + PREFIX_CACHEINFO, + __PREFIX_MAX +}; + +#define PREFIX_MAX (__PREFIX_MAX - 1) + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + + +/***************************************************************** + * Traffic control messages. + ****/ + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + unsigned short tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +enum { + TCA_UNSPEC, + TCA_KIND, + TCA_OPTIONS, + TCA_STATS, + TCA_XSTATS, + TCA_RATE, + TCA_FCNT, + TCA_STATS2, + TCA_STAB, + __TCA_MAX +}; + +#define TCA_MAX (__TCA_MAX - 1) + +#define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg)))) +#define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg)) + +/******************************************************************** + * Neighbor Discovery userland options + ****/ + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + unsigned short nduseropt_opts_len; /* Total length of options */ + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + unsigned short nduseropt_pad2; + unsigned int nduseropt_pad3; + /* Followed by one or more ND options */ +}; + +enum { + NDUSEROPT_UNSPEC, + NDUSEROPT_SRCADDR, + __NDUSEROPT_MAX +}; + +#define NDUSEROPT_MAX (__NDUSEROPT_MAX - 1) + +#ifndef __KERNEL__ +/* RTnetlink multicast groups - backwards compatibility for userspace */ +#define RTMGRP_LINK 1 +#define RTMGRP_NOTIFY 2 +#define RTMGRP_NEIGH 4 +#define RTMGRP_TC 8 + +#define RTMGRP_IPV4_IFADDR 0x10 +#define RTMGRP_IPV4_MROUTE 0x20 +#define RTMGRP_IPV4_ROUTE 0x40 +#define RTMGRP_IPV4_RULE 0x80 + +#define RTMGRP_IPV6_IFADDR 0x100 +#define RTMGRP_IPV6_MROUTE 0x200 +#define RTMGRP_IPV6_ROUTE 0x400 +#define RTMGRP_IPV6_IFINFO 0x800 + +#define RTMGRP_DECnet_IFADDR 0x1000 +#define RTMGRP_DECnet_ROUTE 0x4000 + +#define RTMGRP_IPV6_PREFIX 0x20000 +#endif + +/* RTnetlink multicast groups */ +enum rtnetlink_groups { + RTNLGRP_NONE, +#define RTNLGRP_NONE RTNLGRP_NONE + RTNLGRP_LINK, +#define RTNLGRP_LINK RTNLGRP_LINK + RTNLGRP_NOTIFY, +#define RTNLGRP_NOTIFY RTNLGRP_NOTIFY + RTNLGRP_NEIGH, +#define RTNLGRP_NEIGH RTNLGRP_NEIGH + RTNLGRP_TC, +#define RTNLGRP_TC RTNLGRP_TC + RTNLGRP_IPV4_IFADDR, +#define RTNLGRP_IPV4_IFADDR RTNLGRP_IPV4_IFADDR + RTNLGRP_IPV4_MROUTE, +#define RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_MROUTE + RTNLGRP_IPV4_ROUTE, +#define RTNLGRP_IPV4_ROUTE RTNLGRP_IPV4_ROUTE + RTNLGRP_IPV4_RULE, +#define RTNLGRP_IPV4_RULE RTNLGRP_IPV4_RULE + RTNLGRP_IPV6_IFADDR, +#define RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_IFADDR + RTNLGRP_IPV6_MROUTE, +#define RTNLGRP_IPV6_MROUTE RTNLGRP_IPV6_MROUTE + RTNLGRP_IPV6_ROUTE, +#define RTNLGRP_IPV6_ROUTE RTNLGRP_IPV6_ROUTE + RTNLGRP_IPV6_IFINFO, +#define RTNLGRP_IPV6_IFINFO RTNLGRP_IPV6_IFINFO + RTNLGRP_DECnet_IFADDR, +#define RTNLGRP_DECnet_IFADDR RTNLGRP_DECnet_IFADDR + RTNLGRP_NOP2, + RTNLGRP_DECnet_ROUTE, +#define RTNLGRP_DECnet_ROUTE RTNLGRP_DECnet_ROUTE + RTNLGRP_DECnet_RULE, +#define RTNLGRP_DECnet_RULE RTNLGRP_DECnet_RULE + RTNLGRP_NOP4, + RTNLGRP_IPV6_PREFIX, +#define RTNLGRP_IPV6_PREFIX RTNLGRP_IPV6_PREFIX + RTNLGRP_IPV6_RULE, +#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE + RTNLGRP_ND_USEROPT, +#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT + RTNLGRP_PHONET_IFADDR, +#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR + RTNLGRP_PHONET_ROUTE, +#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE + RTNLGRP_DCB, +#define RTNLGRP_DCB RTNLGRP_DCB + __RTNLGRP_MAX +}; +#define RTNLGRP_MAX (__RTNLGRP_MAX - 1) + +/* TC action piece */ +struct tcamsg { + unsigned char tca_family; + unsigned char tca__pad1; + unsigned short tca__pad2; +}; +#define TA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcamsg)))) +#define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg)) +#define TCA_ACT_TAB 1 /* attr type must be >=1 */ +#define TCAA_MAX 1 + +/* New extended info filters for IFLA_EXT_MASK */ +#define RTEXT_FILTER_VF (1 << 0) + +/* End of information exported to user level */ + +#ifdef __KERNEL__ + +#include <linux/mutex.h> +#include <linux/netdevice.h> + +static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) +{ + int len = strlen(str) + 1; + return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len); +} + +extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); +extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); +extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); +extern void rtnl_set_sk_err(struct net *net, u32 group, int error); +extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); +extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, + u32 id, u32 ts, u32 tsage, long expires, + u32 error); + +extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data); + +#define RTA_PUT(skb, attrtype, attrlen, data) \ +({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \ + goto rtattr_failure; \ + __rta_fill(skb, attrtype, attrlen, data); }) + +#define RTA_APPEND(skb, attrlen, data) \ +({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ + goto rtattr_failure; \ + memcpy(skb_put(skb, attrlen), data, attrlen); }) + +#define RTA_PUT_NOHDR(skb, attrlen, data) \ +({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \ + memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \ + RTA_ALIGN(attrlen) - attrlen); }) + +#define RTA_PUT_U8(skb, attrtype, value) \ +({ u8 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); }) + +#define RTA_PUT_U16(skb, attrtype, value) \ +({ u16 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); }) + +#define RTA_PUT_U32(skb, attrtype, value) \ +({ u32 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); }) + +#define RTA_PUT_U64(skb, attrtype, value) \ +({ u64 _tmp = (value); \ + RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); }) + +#define RTA_PUT_SECS(skb, attrtype, value) \ + RTA_PUT_U64(skb, attrtype, (value) / HZ) + +#define RTA_PUT_MSECS(skb, attrtype, value) \ + RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value)) + +#define RTA_PUT_STRING(skb, attrtype, value) \ + RTA_PUT(skb, attrtype, strlen(value) + 1, value) + +#define RTA_PUT_FLAG(skb, attrtype) \ + RTA_PUT(skb, attrtype, 0, NULL); + +#define RTA_NEST(skb, type) \ +({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ + RTA_PUT(skb, type, 0, NULL); \ + __start; }) + +#define RTA_NEST_END(skb, start) \ +({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ + (skb)->len; }) + +#define RTA_NEST_COMPAT(skb, type, attrlen, data) \ +({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ + RTA_PUT(skb, type, attrlen, data); \ + RTA_NEST(skb, type); \ + __start; }) + +#define RTA_NEST_COMPAT_END(skb, start) \ +({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \ + (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ + RTA_NEST_END(skb, __nest); \ + (skb)->len; }) + +#define RTA_NEST_CANCEL(skb, start) \ +({ if (start) \ + skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ + -1; }) + +#define RTA_GET_U8(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \ + goto rtattr_failure; \ + *(u8 *) RTA_DATA(rta); }) + +#define RTA_GET_U16(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \ + goto rtattr_failure; \ + *(u16 *) RTA_DATA(rta); }) + +#define RTA_GET_U32(rta) \ +({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \ + goto rtattr_failure; \ + *(u32 *) RTA_DATA(rta); }) + +#define RTA_GET_U64(rta) \ +({ u64 _tmp; \ + if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \ + goto rtattr_failure; \ + memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \ + _tmp; }) + +#define RTA_GET_FLAG(rta) (!!(rta)) + +#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ) +#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta))) + +static inline struct rtattr * +__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen) +{ + struct rtattr *rta; + int size = RTA_LENGTH(attrlen); + + rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); + rta->rta_type = attrtype; + rta->rta_len = size; + memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); + return rta; +} + +#define __RTA_PUT(skb, attrtype, attrlen) \ +({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \ + goto rtattr_failure; \ + __rta_reserve(skb, attrtype, attrlen); }) + +extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); + +/* RTNL is used as a global lock for all changes to network configuration */ +extern void rtnl_lock(void); +extern void rtnl_unlock(void); +extern int rtnl_trylock(void); +extern int rtnl_is_locked(void); +#ifdef CONFIG_PROVE_LOCKING +extern int lockdep_rtnl_is_held(void); +#endif /* #ifdef CONFIG_PROVE_LOCKING */ + +/** + * rcu_dereference_rtnl - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() + */ +#define rcu_dereference_rtnl(p) \ + rcu_dereference_check(p, lockdep_rtnl_is_held()) + +/** + * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * caller holds RTNL. + */ +#define rtnl_dereference(p) \ + rcu_dereference_protected(p, lockdep_rtnl_is_held()) + +static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) +{ + return rtnl_dereference(dev->ingress_queue); +} + +extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); + +extern void rtnetlink_init(void); +extern void __rtnl_unlock(void); + +#define ASSERT_RTNL() do { \ + if (unlikely(!rtnl_is_locked())) { \ + printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ + __FILE__, __LINE__); \ + dump_stack(); \ + } \ +} while(0) + +static inline u32 rtm_get_table(struct rtattr **rta, u8 table) +{ + return RTA_GET_U32(rta[RTA_TABLE-1]); +rtattr_failure: + return table; +} + +#endif /* __KERNEL__ */ + + +#endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h new file mode 100644 index 00000000..bc2994ed --- /dev/null +++ b/include/linux/rwlock.h @@ -0,0 +1,125 @@ +#ifndef __LINUX_RWLOCK_H +#define __LINUX_RWLOCK_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * rwlock related methods + * + * split out from spinlock.h + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); +#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) + extern int do_raw_read_trylock(rwlock_t *lock); + extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); + extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); +#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) + extern int do_raw_write_trylock(rwlock_t *lock); + extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); +#else +# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) +# define do_raw_read_lock_flags(lock, flags) \ + do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) +# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) +# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) +# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) +# define do_raw_write_lock_flags(lock, flags) \ + do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) +# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) +# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) +#endif + +#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) + +/* + * Define the various rw_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various + * methods are defined as nops in the case they are not required. + */ +#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) + +#define write_lock(lock) _raw_write_lock(lock) +#define read_lock(lock) _raw_read_lock(lock) + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_write_lock_irqsave(lock); \ + } while (0) + +#else + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_write_lock_irqsave(lock, flags); \ + } while (0) + +#endif + +#define read_lock_irq(lock) _raw_read_lock_irq(lock) +#define read_lock_bh(lock) _raw_read_lock_bh(lock) +#define write_lock_irq(lock) _raw_write_lock_irq(lock) +#define write_lock_bh(lock) _raw_write_lock_bh(lock) +#define read_unlock(lock) _raw_read_unlock(lock) +#define write_unlock(lock) _raw_write_unlock(lock) +#define read_unlock_irq(lock) _raw_read_unlock_irq(lock) +#define write_unlock_irq(lock) _raw_write_unlock_irq(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_read_unlock_irqrestore(lock, flags); \ + } while (0) +#define read_unlock_bh(lock) _raw_read_unlock_bh(lock) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_write_unlock_irqrestore(lock, flags); \ + } while (0) +#define write_unlock_bh(lock) _raw_write_unlock_bh(lock) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + write_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + +#endif /* __LINUX_RWLOCK_H */ diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h new file mode 100644 index 00000000..9c9f0495 --- /dev/null +++ b/include/linux/rwlock_api_smp.h @@ -0,0 +1,282 @@ +#ifndef __LINUX_RWLOCK_API_SMP_H +#define __LINUX_RWLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_API_SMP_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/rwlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); +void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); +unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) + __acquires(lock); +unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) + __acquires(lock); +int __lockfunc _raw_read_trylock(rwlock_t *lock); +int __lockfunc _raw_write_trylock(rwlock_t *lock); +void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); +void __lockfunc +_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); +void __lockfunc +_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + __releases(lock); + +#ifdef CONFIG_INLINE_READ_LOCK +#define _raw_read_lock(lock) __raw_read_lock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK +#define _raw_write_lock(lock) __raw_write_lock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_BH +#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_BH +#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQ +#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ +#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE +#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE +#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_READ_TRYLOCK +#define _raw_read_trylock(lock) __raw_read_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_TRYLOCK +#define _raw_write_trylock(lock) __raw_write_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK +#define _raw_read_unlock(lock) __raw_read_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK +#define _raw_write_unlock(lock) __raw_write_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_BH +#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH +#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ +#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ +#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE +#define _raw_read_unlock_irqrestore(lock, flags) \ + __raw_read_unlock_irqrestore(lock, flags) +#endif + +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE +#define _raw_write_unlock_irqrestore(lock, flags) \ + __raw_write_unlock_irqrestore(lock, flags) +#endif + +static inline int __raw_read_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (do_raw_read_trylock(lock)) { + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +static inline int __raw_write_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (do_raw_write_trylock(lock)) { + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline void __raw_read_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, + do_raw_read_lock_flags, &flags); + return flags; +} + +static inline void __raw_read_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline void __raw_read_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); +} + +static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, + do_raw_write_lock_flags, &flags); + return flags; +} + +static inline void __raw_write_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __raw_write_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +static inline void __raw_write_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); +} + +#endif /* CONFIG_PREEMPT */ + +static inline void __raw_write_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + preempt_enable(); +} + +static inline void __raw_read_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + preempt_enable(); +} + +static inline void +__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_read_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_read_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_read_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, + unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_write_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_write_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + do_raw_write_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +#endif /* __LINUX_RWLOCK_API_SMP_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h new file mode 100644 index 00000000..cc0072e9 --- /dev/null +++ b/include/linux/rwlock_types.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_RWLOCK_TYPES_H +#define __LINUX_RWLOCK_TYPES_H + +/* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ +typedef struct { + arch_rwlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define RWLOCK_MAGIC 0xdeaf1eed + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } +#else +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } +#endif + +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) + +#endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h new file mode 100644 index 00000000..d5b13bc0 --- /dev/null +++ b/include/linux/rwsem-spinlock.h @@ -0,0 +1,45 @@ +/* rwsem-spinlock.h: fallback C implementation + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de> + * - Derived also from comments by Linus + */ + +#ifndef _LINUX_RWSEM_SPINLOCK_H +#define _LINUX_RWSEM_SPINLOCK_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ +/* + * the rw-semaphore definition + * - if activity is 0 then there are no active readers or writers + * - if activity is +ve then that is the number of active readers + * - if activity is -1 then there is one active writer + * - if wait_list is not empty, then there are processes waiting for the semaphore + */ +struct rw_semaphore { + __s32 activity; + raw_spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define RWSEM_UNLOCKED_VALUE 0x00000000 + +extern void __down_read(struct rw_semaphore *sem); +extern int __down_read_trylock(struct rw_semaphore *sem); +extern void __down_write(struct rw_semaphore *sem); +extern void __down_write_nested(struct rw_semaphore *sem, int subclass); +extern int __down_write_trylock(struct rw_semaphore *sem); +extern void __up_read(struct rw_semaphore *sem); +extern void __up_write(struct rw_semaphore *sem); +extern void __downgrade_write(struct rw_semaphore *sem); +extern int rwsem_is_locked(struct rw_semaphore *sem); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h new file mode 100644 index 00000000..54bd7cd7 --- /dev/null +++ b/include/linux/rwsem.h @@ -0,0 +1,133 @@ +/* rwsem.h: R/W semaphores, public interface + * + * Written by David Howells (dhowells@redhat.com). + * Derived from asm-i386/semaphore.h + */ + +#ifndef _LINUX_RWSEM_H +#define _LINUX_RWSEM_H + +#include <linux/linkage.h> + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> + +#include <linux/atomic.h> + +struct rw_semaphore; + +#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +#include <linux/rwsem-spinlock.h> /* use a generic implementation */ +#else +/* All arch specific implementations share the same struct */ +struct rw_semaphore { + long count; + raw_spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +/* Include the arch specific part */ +#include <asm/rwsem.h> + +/* In all implementations count != 0 means locked */ +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return sem->count != 0; +} + +#endif + +/* Common initializer macros and functions */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, \ + __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +/* + * lock for reading + */ +extern void down_read(struct rw_semaphore *sem); + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +extern int down_read_trylock(struct rw_semaphore *sem); + +/* + * lock for writing + */ +extern void down_write(struct rw_semaphore *sem); + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +extern int down_write_trylock(struct rw_semaphore *sem); + +/* + * release a read lock + */ +extern void up_read(struct rw_semaphore *sem); + +/* + * release a write lock + */ +extern void up_write(struct rw_semaphore *sem); + +/* + * downgrade write lock to read lock + */ +extern void downgrade_write(struct rw_semaphore *sem); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * nested locking. NOTE: rwsems are not allowed to recurse + * (which occurs if the same task tries to acquire the same + * lock instance multiple times), but multiple locks of the + * same lock class might be taken, if the order of the locks + * is always the same. This ordering rule can be expressed + * to lockdep via the _nested() APIs, but enumerating the + * subclasses that are used. (If the nesting relationship is + * static then another method for expressing nested locking is + * the explicit definition of lock class keys and the use of + * lockdep_set_class() at lock initialization time. + * See Documentation/lockdep-design.txt for more details.) + */ +extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern void down_write_nested(struct rw_semaphore *sem, int subclass); +#else +# define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nested(sem, subclass) down_write(sem) +#endif + +#endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h new file mode 100644 index 00000000..a53915cd --- /dev/null +++ b/include/linux/rxrpc.h @@ -0,0 +1,69 @@ +/* AF_RXRPC parameters + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_RXRPC_H +#define _LINUX_RXRPC_H + +#include <linux/in.h> +#include <linux/in6.h> + +/* + * RxRPC socket address + */ +struct sockaddr_rxrpc { + sa_family_t srx_family; /* address family */ + u16 srx_service; /* service desired */ + u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ + u16 transport_len; /* length of transport address */ + union { + sa_family_t family; /* transport address family */ + struct sockaddr_in sin; /* IPv4 transport address */ + struct sockaddr_in6 sin6; /* IPv6 transport address */ + } transport; +}; + +/* + * RxRPC socket options + */ +#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ +#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */ +#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ + +/* + * RxRPC control messages + * - terminal messages mean that a user call ID tag can be recycled + */ +#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */ +#define RXRPC_ABORT 2 /* abort request / notification [terminal] */ +#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */ +#define RXRPC_NET_ERROR 5 /* network error received [terminal] */ +#define RXRPC_BUSY 6 /* server busy received [terminal] */ +#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */ +#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */ +#define RXRPC_ACCEPT 9 /* [Server] accept request */ + +/* + * RxRPC security levels + */ +#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */ +#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */ +#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */ + +/* + * RxRPC security indices + */ +#define RXRPC_SECURITY_NONE 0 /* no security protocol */ +#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */ +#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */ +#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */ + +#endif /* _LINUX_RXRPC_H */ diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h new file mode 100644 index 00000000..99dadbff --- /dev/null +++ b/include/linux/s3c_adc_battery.h @@ -0,0 +1,41 @@ +#ifndef _S3C_ADC_BATTERY_H +#define _S3C_ADC_BATTERY_H + +struct s3c_adc_bat_thresh { + int volt; /* mV */ + int cur; /* mA */ + int level; /* percent */ +}; + +struct s3c_adc_bat_pdata { + int (*init)(void); + void (*exit)(void); + void (*enable_charger)(void); + void (*disable_charger)(void); + + int gpio_charge_finished; + int gpio_inverted; + + const struct s3c_adc_bat_thresh *lut_noac; + unsigned int lut_noac_cnt; + const struct s3c_adc_bat_thresh *lut_acin; + unsigned int lut_acin_cnt; + + const unsigned int volt_channel; + const unsigned int current_channel; + const unsigned int backup_volt_channel; + + const unsigned int volt_samples; + const unsigned int current_samples; + const unsigned int backup_volt_samples; + + const unsigned int volt_mult; + const unsigned int current_mult; + const unsigned int backup_volt_mult; + const unsigned int internal_impedance; + + const unsigned int backup_volt_max; + const unsigned int backup_volt_min; +}; + +#endif diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h new file mode 100644 index 00000000..65839a58 --- /dev/null +++ b/include/linux/sa11x0-dma.h @@ -0,0 +1,24 @@ +/* + * SA11x0 DMA Engine support + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_SA11X0_DMA_H +#define __LINUX_SA11X0_DMA_H + +struct dma_chan; + +#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) +bool sa11x0_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/sc26198.h b/include/linux/sc26198.h new file mode 100644 index 00000000..7ca35aba --- /dev/null +++ b/include/linux/sc26198.h @@ -0,0 +1,533 @@ +/*****************************************************************************/ + +/* + * sc26198.h -- SC26198 UART hardware info. + * + * Copyright (C) 1995-1998 Stallion Technologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*****************************************************************************/ +#ifndef _SC26198_H +#define _SC26198_H +/*****************************************************************************/ + +/* + * Define the number of async ports per sc26198 uart device. + */ +#define SC26198_PORTS 8 + +/* + * Baud rate timing clocks. All derived from a master 14.7456 MHz clock. + */ +#define SC26198_MASTERCLOCK 14745600L +#define SC26198_DCLK (SC26198_MASTERCLOCK) +#define SC26198_CCLK (SC26198_MASTERCLOCK / 2) +#define SC26198_BCLK (SC26198_MASTERCLOCK / 4) + +/* + * Define internal FIFO sizes for the 26198 ports. + */ +#define SC26198_TXFIFOSIZE 16 +#define SC26198_RXFIFOSIZE 16 + +/*****************************************************************************/ + +/* + * Global register definitions. These registers are global to each 26198 + * device, not specific ports on it. + */ +#define TSTR 0x0d +#define GCCR 0x0f +#define ICR 0x1b +#define WDTRCR 0x1d +#define IVR 0x1f +#define BRGTRUA 0x84 +#define GPOSR 0x87 +#define GPOC 0x8b +#define UCIR 0x8c +#define CIR 0x8c +#define BRGTRUB 0x8d +#define GRXFIFO 0x8e +#define GTXFIFO 0x8e +#define GCCR2 0x8f +#define BRGTRLA 0x94 +#define GPOR 0x97 +#define GPOD 0x9b +#define BRGTCR 0x9c +#define GICR 0x9c +#define BRGTRLB 0x9d +#define GIBCR 0x9d +#define GITR 0x9f + +/* + * Per port channel registers. These are the register offsets within + * the port address space, so need to have the port address (0 to 7) + * inserted in bit positions 4:6. + */ +#define MR0 0x00 +#define MR1 0x01 +#define IOPCR 0x02 +#define BCRBRK 0x03 +#define BCRCOS 0x04 +#define BCRX 0x06 +#define BCRA 0x07 +#define XONCR 0x08 +#define XOFFCR 0x09 +#define ARCR 0x0a +#define RXCSR 0x0c +#define TXCSR 0x0e +#define MR2 0x80 +#define SR 0x81 +#define SCCR 0x81 +#define ISR 0x82 +#define IMR 0x82 +#define TXFIFO 0x83 +#define RXFIFO 0x83 +#define IPR 0x84 +#define IOPIOR 0x85 +#define XISR 0x86 + +/* + * For any given port calculate the address to use to access a specified + * register. This is only used for unusual access, mostly this is done + * through the assembler access routines. + */ +#define SC26198_PORTREG(port,reg) ((((port) & 0x07) << 4) | (reg)) + +/*****************************************************************************/ + +/* + * Global configuration control register bit definitions. + */ +#define GCCR_NOACK 0x00 +#define GCCR_IVRACK 0x02 +#define GCCR_IVRCHANACK 0x04 +#define GCCR_IVRTYPCHANACK 0x06 +#define GCCR_ASYNCCYCLE 0x00 +#define GCCR_SYNCCYCLE 0x40 + +/*****************************************************************************/ + +/* + * Mode register 0 bit definitions. + */ +#define MR0_ADDRNONE 0x00 +#define MR0_AUTOWAKE 0x01 +#define MR0_AUTODOZE 0x02 +#define MR0_AUTOWAKEDOZE 0x03 +#define MR0_SWFNONE 0x00 +#define MR0_SWFTX 0x04 +#define MR0_SWFRX 0x08 +#define MR0_SWFRXTX 0x0c +#define MR0_TXMASK 0x30 +#define MR0_TXEMPTY 0x00 +#define MR0_TXHIGH 0x10 +#define MR0_TXHALF 0x20 +#define MR0_TXRDY 0x00 +#define MR0_ADDRNT 0x00 +#define MR0_ADDRT 0x40 +#define MR0_SWFNT 0x00 +#define MR0_SWFT 0x80 + +/* + * Mode register 1 bit definitions. + */ +#define MR1_CS5 0x00 +#define MR1_CS6 0x01 +#define MR1_CS7 0x02 +#define MR1_CS8 0x03 +#define MR1_PAREVEN 0x00 +#define MR1_PARODD 0x04 +#define MR1_PARENB 0x00 +#define MR1_PARFORCE 0x08 +#define MR1_PARNONE 0x10 +#define MR1_PARSPECIAL 0x18 +#define MR1_ERRCHAR 0x00 +#define MR1_ERRBLOCK 0x20 +#define MR1_ISRUNMASKED 0x00 +#define MR1_ISRMASKED 0x40 +#define MR1_AUTORTS 0x80 + +/* + * Mode register 2 bit definitions. + */ +#define MR2_STOP1 0x00 +#define MR2_STOP15 0x01 +#define MR2_STOP2 0x02 +#define MR2_STOP916 0x03 +#define MR2_RXFIFORDY 0x00 +#define MR2_RXFIFOHALF 0x04 +#define MR2_RXFIFOHIGH 0x08 +#define MR2_RXFIFOFULL 0x0c +#define MR2_AUTOCTS 0x10 +#define MR2_TXRTS 0x20 +#define MR2_MODENORM 0x00 +#define MR2_MODEAUTOECHO 0x40 +#define MR2_MODELOOP 0x80 +#define MR2_MODEREMECHO 0xc0 + +/*****************************************************************************/ + +/* + * Baud Rate Generator (BRG) selector values. + */ +#define BRG_50 0x00 +#define BRG_75 0x01 +#define BRG_150 0x02 +#define BRG_200 0x03 +#define BRG_300 0x04 +#define BRG_450 0x05 +#define BRG_600 0x06 +#define BRG_900 0x07 +#define BRG_1200 0x08 +#define BRG_1800 0x09 +#define BRG_2400 0x0a +#define BRG_3600 0x0b +#define BRG_4800 0x0c +#define BRG_7200 0x0d +#define BRG_9600 0x0e +#define BRG_14400 0x0f +#define BRG_19200 0x10 +#define BRG_28200 0x11 +#define BRG_38400 0x12 +#define BRG_57600 0x13 +#define BRG_115200 0x14 +#define BRG_230400 0x15 +#define BRG_GIN0 0x16 +#define BRG_GIN1 0x17 +#define BRG_CT0 0x18 +#define BRG_CT1 0x19 +#define BRG_RX2TX316 0x1b +#define BRG_RX2TX31 0x1c + +#define SC26198_MAXBAUD 921600 + +/*****************************************************************************/ + +/* + * Command register command definitions. + */ +#define CR_NULL 0x04 +#define CR_ADDRNORMAL 0x0c +#define CR_RXRESET 0x14 +#define CR_TXRESET 0x1c +#define CR_CLEARRXERR 0x24 +#define CR_BREAKRESET 0x2c +#define CR_TXSTARTBREAK 0x34 +#define CR_TXSTOPBREAK 0x3c +#define CR_RTSON 0x44 +#define CR_RTSOFF 0x4c +#define CR_ADDRINIT 0x5c +#define CR_RXERRBLOCK 0x6c +#define CR_TXSENDXON 0x84 +#define CR_TXSENDXOFF 0x8c +#define CR_GANGXONSET 0x94 +#define CR_GANGXOFFSET 0x9c +#define CR_GANGXONINIT 0xa4 +#define CR_GANGXOFFINIT 0xac +#define CR_HOSTXON 0xb4 +#define CR_HOSTXOFF 0xbc +#define CR_CANCELXOFF 0xc4 +#define CR_ADDRRESET 0xdc +#define CR_RESETALLPORTS 0xf4 +#define CR_RESETALL 0xfc + +#define CR_RXENABLE 0x01 +#define CR_TXENABLE 0x02 + +/*****************************************************************************/ + +/* + * Channel status register. + */ +#define SR_RXRDY 0x01 +#define SR_RXFULL 0x02 +#define SR_TXRDY 0x04 +#define SR_TXEMPTY 0x08 +#define SR_RXOVERRUN 0x10 +#define SR_RXPARITY 0x20 +#define SR_RXFRAMING 0x40 +#define SR_RXBREAK 0x80 + +#define SR_RXERRS (SR_RXPARITY | SR_RXFRAMING | SR_RXOVERRUN) + +/*****************************************************************************/ + +/* + * Interrupt status register and interrupt mask register bit definitions. + */ +#define IR_TXRDY 0x01 +#define IR_RXRDY 0x02 +#define IR_RXBREAK 0x04 +#define IR_XONXOFF 0x10 +#define IR_ADDRRECOG 0x20 +#define IR_RXWATCHDOG 0x40 +#define IR_IOPORT 0x80 + +/*****************************************************************************/ + +/* + * Interrupt vector register field definitions. + */ +#define IVR_CHANMASK 0x07 +#define IVR_TYPEMASK 0x18 +#define IVR_CONSTMASK 0xc0 + +#define IVR_RXDATA 0x10 +#define IVR_RXBADDATA 0x18 +#define IVR_TXDATA 0x08 +#define IVR_OTHER 0x00 + +/*****************************************************************************/ + +/* + * BRG timer control register bit definitions. + */ +#define BRGCTCR_DISABCLK0 0x00 +#define BRGCTCR_ENABCLK0 0x08 +#define BRGCTCR_DISABCLK1 0x00 +#define BRGCTCR_ENABCLK1 0x80 + +#define BRGCTCR_0SCLK16 0x00 +#define BRGCTCR_0SCLK32 0x01 +#define BRGCTCR_0SCLK64 0x02 +#define BRGCTCR_0SCLK128 0x03 +#define BRGCTCR_0X1 0x04 +#define BRGCTCR_0X12 0x05 +#define BRGCTCR_0IO1A 0x06 +#define BRGCTCR_0GIN0 0x07 + +#define BRGCTCR_1SCLK16 0x00 +#define BRGCTCR_1SCLK32 0x10 +#define BRGCTCR_1SCLK64 0x20 +#define BRGCTCR_1SCLK128 0x30 +#define BRGCTCR_1X1 0x40 +#define BRGCTCR_1X12 0x50 +#define BRGCTCR_1IO1B 0x60 +#define BRGCTCR_1GIN1 0x70 + +/*****************************************************************************/ + +/* + * Watch dog timer enable register. + */ +#define WDTRCR_ENABALL 0xff + +/*****************************************************************************/ + +/* + * XON/XOFF interrupt status register. + */ +#define XISR_TXCHARMASK 0x03 +#define XISR_TXCHARNORMAL 0x00 +#define XISR_TXWAIT 0x01 +#define XISR_TXXOFFPEND 0x02 +#define XISR_TXXONPEND 0x03 + +#define XISR_TXFLOWMASK 0x0c +#define XISR_TXNORMAL 0x00 +#define XISR_TXSTOPPEND 0x04 +#define XISR_TXSTARTED 0x08 +#define XISR_TXSTOPPED 0x0c + +#define XISR_RXFLOWMASK 0x30 +#define XISR_RXFLOWNONE 0x00 +#define XISR_RXXONSENT 0x10 +#define XISR_RXXOFFSENT 0x20 + +#define XISR_RXXONGOT 0x40 +#define XISR_RXXOFFGOT 0x80 + +/*****************************************************************************/ + +/* + * Current interrupt register. + */ +#define CIR_TYPEMASK 0xc0 +#define CIR_TYPEOTHER 0x00 +#define CIR_TYPETX 0x40 +#define CIR_TYPERXGOOD 0x80 +#define CIR_TYPERXBAD 0xc0 + +#define CIR_RXDATA 0x80 +#define CIR_RXBADDATA 0x40 +#define CIR_TXDATA 0x40 + +#define CIR_CHANMASK 0x07 +#define CIR_CNTMASK 0x38 + +#define CIR_SUBTYPEMASK 0x38 +#define CIR_SUBNONE 0x00 +#define CIR_SUBCOS 0x08 +#define CIR_SUBADDR 0x10 +#define CIR_SUBXONXOFF 0x18 +#define CIR_SUBBREAK 0x28 + +/*****************************************************************************/ + +/* + * Global interrupting channel register. + */ +#define GICR_CHANMASK 0x07 + +/*****************************************************************************/ + +/* + * Global interrupting byte count register. + */ +#define GICR_COUNTMASK 0x0f + +/*****************************************************************************/ + +/* + * Global interrupting type register. + */ +#define GITR_RXMASK 0xc0 +#define GITR_RXNONE 0x00 +#define GITR_RXBADDATA 0x80 +#define GITR_RXGOODDATA 0xc0 +#define GITR_TXDATA 0x20 + +#define GITR_SUBTYPEMASK 0x07 +#define GITR_SUBNONE 0x00 +#define GITR_SUBCOS 0x01 +#define GITR_SUBADDR 0x02 +#define GITR_SUBXONXOFF 0x03 +#define GITR_SUBBREAK 0x05 + +/*****************************************************************************/ + +/* + * Input port change register. + */ +#define IPR_CTS 0x01 +#define IPR_DTR 0x02 +#define IPR_RTS 0x04 +#define IPR_DCD 0x08 +#define IPR_CTSCHANGE 0x10 +#define IPR_DTRCHANGE 0x20 +#define IPR_RTSCHANGE 0x40 +#define IPR_DCDCHANGE 0x80 + +#define IPR_CHANGEMASK 0xf0 + +/*****************************************************************************/ + +/* + * IO port interrupt and output register. + */ +#define IOPR_CTS 0x01 +#define IOPR_DTR 0x02 +#define IOPR_RTS 0x04 +#define IOPR_DCD 0x08 +#define IOPR_CTSCOS 0x10 +#define IOPR_DTRCOS 0x20 +#define IOPR_RTSCOS 0x40 +#define IOPR_DCDCOS 0x80 + +/*****************************************************************************/ + +/* + * IO port configuration register. + */ +#define IOPCR_SETCTS 0x00 +#define IOPCR_SETDTR 0x04 +#define IOPCR_SETRTS 0x10 +#define IOPCR_SETDCD 0x00 + +#define IOPCR_SETSIGS (IOPCR_SETRTS | IOPCR_SETRTS | IOPCR_SETDTR | IOPCR_SETDCD) + +/*****************************************************************************/ + +/* + * General purpose output select register. + */ +#define GPORS_TXC1XA 0x08 +#define GPORS_TXC16XA 0x09 +#define GPORS_RXC16XA 0x0a +#define GPORS_TXC16XB 0x0b +#define GPORS_GPOR3 0x0c +#define GPORS_GPOR2 0x0d +#define GPORS_GPOR1 0x0e +#define GPORS_GPOR0 0x0f + +/*****************************************************************************/ + +/* + * General purpose output register. + */ +#define GPOR_0 0x01 +#define GPOR_1 0x02 +#define GPOR_2 0x04 +#define GPOR_3 0x08 + +/*****************************************************************************/ + +/* + * General purpose output clock register. + */ +#define GPORC_0NONE 0x00 +#define GPORC_0GIN0 0x01 +#define GPORC_0GIN1 0x02 +#define GPORC_0IO3A 0x02 + +#define GPORC_1NONE 0x00 +#define GPORC_1GIN0 0x04 +#define GPORC_1GIN1 0x08 +#define GPORC_1IO3C 0x0c + +#define GPORC_2NONE 0x00 +#define GPORC_2GIN0 0x10 +#define GPORC_2GIN1 0x20 +#define GPORC_2IO3E 0x20 + +#define GPORC_3NONE 0x00 +#define GPORC_3GIN0 0x40 +#define GPORC_3GIN1 0x80 +#define GPORC_3IO3G 0xc0 + +/*****************************************************************************/ + +/* + * General purpose output data register. + */ +#define GPOD_0MASK 0x03 +#define GPOD_0SET1 0x00 +#define GPOD_0SET0 0x01 +#define GPOD_0SETR0 0x02 +#define GPOD_0SETIO3B 0x03 + +#define GPOD_1MASK 0x0c +#define GPOD_1SET1 0x00 +#define GPOD_1SET0 0x04 +#define GPOD_1SETR0 0x08 +#define GPOD_1SETIO3D 0x0c + +#define GPOD_2MASK 0x30 +#define GPOD_2SET1 0x00 +#define GPOD_2SET0 0x10 +#define GPOD_2SETR0 0x20 +#define GPOD_2SETIO3F 0x30 + +#define GPOD_3MASK 0xc0 +#define GPOD_3SET1 0x00 +#define GPOD_3SET0 0x40 +#define GPOD_3SETR0 0x80 +#define GPOD_3SETIO3H 0xc0 + +/*****************************************************************************/ +#endif diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h new file mode 100644 index 00000000..ac9586da --- /dev/null +++ b/include/linux/scatterlist.h @@ -0,0 +1,269 @@ +#ifndef _LINUX_SCATTERLIST_H +#define _LINUX_SCATTERLIST_H + +#include <linux/string.h> +#include <linux/bug.h> +#include <linux/mm.h> + +#include <asm/types.h> +#include <asm/scatterlist.h> +#include <asm/io.h> + +struct sg_table { + struct scatterlist *sgl; /* the list */ + unsigned int nents; /* number of mapped entries */ + unsigned int orig_nents; /* original size of list */ +}; + +/* + * Notes on SG table design. + * + * Architectures must provide an unsigned long page_link field in the + * scatterlist struct. We use that to place the page pointer AND encode + * information about the sg table as well. The two lower bits are reserved + * for this information. + * + * If bit 0 is set, then the page_link contains a pointer to the next sg + * table list. Otherwise the next entry is at sg + 1. + * + * If bit 1 is set, then this sg entry is the last element in a list. + * + * See sg_next(). + * + */ + +#define SG_MAGIC 0x87654321 + +/* + * We overload the LSB of the page pointer to indicate whether it's + * a valid sg entry, or whether it points to the start of a new scatterlist. + * Those low bits are there for everyone! (thanks mason :-) + */ +#define sg_is_chain(sg) ((sg)->page_link & 0x01) +#define sg_is_last(sg) ((sg)->page_link & 0x02) +#define sg_chain_ptr(sg) \ + ((struct scatterlist *) ((sg)->page_link & ~0x03)) + +/** + * sg_assign_page - Assign a given page to an SG entry + * @sg: SG entry + * @page: The page + * + * Description: + * Assign page to sg entry. Also see sg_set_page(), the most commonly used + * variant. + * + **/ +static inline void sg_assign_page(struct scatterlist *sg, struct page *page) +{ + unsigned long page_link = sg->page_link & 0x3; + + /* + * In order for the low bit stealing approach to work, pages + * must be aligned at a 32-bit boundary as a minimum. + */ + BUG_ON((unsigned long) page & 0x03); +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); + BUG_ON(sg_is_chain(sg)); +#endif + sg->page_link = page_link | (unsigned long) page; +} + +/** + * sg_set_page - Set sg entry to point at given page + * @sg: SG entry + * @page: The page + * @len: Length of data + * @offset: Offset into page + * + * Description: + * Use this function to set an sg entry pointing at a page, never assign + * the page directly. We encode sg table information in the lower bits + * of the page pointer. See sg_page() for looking up the page belonging + * to an sg entry. + * + **/ +static inline void sg_set_page(struct scatterlist *sg, struct page *page, + unsigned int len, unsigned int offset) +{ + sg_assign_page(sg, page); + sg->offset = offset; + sg->length = len; +} + +static inline struct page *sg_page(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); + BUG_ON(sg_is_chain(sg)); +#endif + return (struct page *)((sg)->page_link & ~0x3); +} + +/** + * sg_set_buf - Set sg entry to point at given data + * @sg: SG entry + * @buf: Data + * @buflen: Data length + * + **/ +static inline void sg_set_buf(struct scatterlist *sg, const void *buf, + unsigned int buflen) +{ + sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); +} + +/* + * Loop over each sg element, following the pointer to a new list if necessary + */ +#define for_each_sg(sglist, sg, nr, __i) \ + for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) + +/** + * sg_chain - Chain two sglists together + * @prv: First scatterlist + * @prv_nents: Number of entries in prv + * @sgl: Second scatterlist + * + * Description: + * Links @prv@ and @sgl@ together, to form a longer scatterlist. + * + **/ +static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, + struct scatterlist *sgl) +{ +#ifndef ARCH_HAS_SG_CHAIN + BUG(); +#endif + + /* + * offset and length are unused for chain entry. Clear them. + */ + prv[prv_nents - 1].offset = 0; + prv[prv_nents - 1].length = 0; + + /* + * Set lowest bit to indicate a link pointer, and make sure to clear + * the termination bit if it happens to be set. + */ + prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; +} + +/** + * sg_mark_end - Mark the end of the scatterlist + * @sg: SG entryScatterlist + * + * Description: + * Marks the passed in sg entry as the termination point for the sg + * table. A call to sg_next() on this entry will return NULL. + * + **/ +static inline void sg_mark_end(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + /* + * Set termination bit, clear potential chain bit + */ + sg->page_link |= 0x02; + sg->page_link &= ~0x01; +} + +/** + * sg_phys - Return physical address of an sg entry + * @sg: SG entry + * + * Description: + * This calls page_to_phys() on the page in this sg entry, and adds the + * sg offset. The caller must know that it is legal to call page_to_phys() + * on the sg page. + * + **/ +static inline dma_addr_t sg_phys(struct scatterlist *sg) +{ + return page_to_phys(sg_page(sg)) + sg->offset; +} + +/** + * sg_virt - Return virtual address of an sg entry + * @sg: SG entry + * + * Description: + * This calls page_address() on the page in this sg entry, and adds the + * sg offset. The caller must know that the sg page has a valid virtual + * mapping. + * + **/ +static inline void *sg_virt(struct scatterlist *sg) +{ + return page_address(sg_page(sg)) + sg->offset; +} + +struct scatterlist *sg_next(struct scatterlist *); +struct scatterlist *sg_last(struct scatterlist *s, unsigned int); +void sg_init_table(struct scatterlist *, unsigned int); +void sg_init_one(struct scatterlist *, const void *, unsigned int); + +typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); +typedef void (sg_free_fn)(struct scatterlist *, unsigned int); + +void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); +void sg_free_table(struct sg_table *); +int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, + sg_alloc_fn *); +int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); + +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen); +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen); + +/* + * Maximum number of entries that will be allocated in one piece, if + * a list larger than this is required then chaining will be utilized. + */ +#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) + + +/* + * Mapping sg iterator + * + * Iterates over sg entries mapping page-by-page. On each successful + * iteration, @miter->page points to the mapped page and + * @miter->length bytes of data can be accessed at @miter->addr. As + * long as an interation is enclosed between start and stop, the user + * is free to choose control structure and when to stop. + * + * @miter->consumed is set to @miter->length on each iteration. It + * can be adjusted if the user can't consume all the bytes in one go. + * Also, a stopped iteration can be resumed by calling next on it. + * This is useful when iteration needs to release all resources and + * continue later (e.g. at the next interrupt). + */ + +#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ +#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ +#define SG_MITER_FROM_SG (1 << 2) /* nop */ + +struct sg_mapping_iter { + /* the following three fields can be accessed directly */ + struct page *page; /* currently mapped page */ + void *addr; /* pointer to the mapped area */ + size_t length; /* length of the mapped area */ + size_t consumed; /* number of consumed bytes */ + + /* these are internal states, keep away */ + struct scatterlist *__sg; /* current entry */ + unsigned int __nents; /* nr of remaining entries */ + unsigned int __offset; /* offset within sg */ + unsigned int __flags; +}; + +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags); +bool sg_miter_next(struct sg_mapping_iter *miter); +void sg_miter_stop(struct sg_mapping_iter *miter); + +#endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/linux/scc.h b/include/linux/scc.h new file mode 100644 index 00000000..3495bd95 --- /dev/null +++ b/include/linux/scc.h @@ -0,0 +1,252 @@ +/* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */ + +#ifndef _SCC_H +#define _SCC_H + + +/* selection of hardware types */ + +#define PA0HZP 0x00 /* hardware type for PA0HZP SCC card and compatible */ +#define EAGLE 0x01 /* hardware type for EAGLE card */ +#define PC100 0x02 /* hardware type for PC100 card */ +#define PRIMUS 0x04 /* hardware type for PRIMUS-PC (DG9BL) card */ +#define DRSI 0x08 /* hardware type for DRSI PC*Packet card */ +#define BAYCOM 0x10 /* hardware type for BayCom (U)SCC */ + +/* DEV ioctl() commands */ + +enum SCC_ioctl_cmds { + SIOCSCCRESERVED = SIOCDEVPRIVATE, + SIOCSCCCFG, + SIOCSCCINI, + SIOCSCCCHANINI, + SIOCSCCSMEM, + SIOCSCCGKISS, + SIOCSCCSKISS, + SIOCSCCGSTAT, + SIOCSCCCAL +}; + +/* Device parameter control (from WAMPES) */ + +enum L1_params { + PARAM_DATA, + PARAM_TXDELAY, + PARAM_PERSIST, + PARAM_SLOTTIME, + PARAM_TXTAIL, + PARAM_FULLDUP, + PARAM_SOFTDCD, /* was: PARAM_HW */ + PARAM_MUTE, /* ??? */ + PARAM_DTR, + PARAM_RTS, + PARAM_SPEED, + PARAM_ENDDELAY, /* ??? */ + PARAM_GROUP, + PARAM_IDLE, + PARAM_MIN, + PARAM_MAXKEY, + PARAM_WAIT, + PARAM_MAXDEFER, + PARAM_TX, + PARAM_HWEVENT = 31, + PARAM_RETURN = 255 /* reset kiss mode */ +}; + +/* fulldup parameter */ + +enum FULLDUP_modes { + KISS_DUPLEX_HALF, /* normal CSMA operation */ + KISS_DUPLEX_FULL, /* fullduplex, key down trx after transmission */ + KISS_DUPLEX_LINK, /* fullduplex, key down trx after 'idletime' sec */ + KISS_DUPLEX_OPTIMA /* fullduplex, let the protocol layer control the hw */ +}; + +/* misc. parameters */ + +#define TIMER_OFF 65535U /* to switch off timers */ +#define NO_SUCH_PARAM 65534U /* param not implemented */ + +/* HWEVENT parameter */ + +enum HWEVENT_opts { + HWEV_DCD_ON, + HWEV_DCD_OFF, + HWEV_ALL_SENT +}; + +/* channel grouping */ + +#define RXGROUP 0100 /* if set, only tx when all channels clear */ +#define TXGROUP 0200 /* if set, don't transmit simultaneously */ + +/* Tx/Rx clock sources */ + +enum CLOCK_sources { + CLK_DPLL, /* normal halfduplex operation */ + CLK_EXTERNAL, /* external clocking (G3RUH/DF9IC modems) */ + CLK_DIVIDER, /* Rx = DPLL, Tx = divider (fullduplex with */ + /* modems without clock regeneration */ + CLK_BRG /* experimental fullduplex mode with DPLL/BRG for */ + /* MODEMs without clock recovery */ +}; + +/* Tx state */ + +enum TX_state { + TXS_IDLE, /* Transmitter off, no data pending */ + TXS_BUSY, /* waiting for permission to send / tailtime */ + TXS_ACTIVE, /* Transmitter on, sending data */ + TXS_NEWFRAME, /* reset CRC and send (next) frame */ + TXS_IDLE2, /* Transmitter on, no data pending */ + TXS_WAIT, /* Waiting for Mintime to expire */ + TXS_TIMEOUT /* We had a transmission timeout */ +}; + +typedef unsigned long io_port; /* type definition for an 'io port address' */ + +/* SCC statistical information */ + +struct scc_stat { + long rxints; /* Receiver interrupts */ + long txints; /* Transmitter interrupts */ + long exints; /* External/status interrupts */ + long spints; /* Special receiver interrupts */ + + long txframes; /* Packets sent */ + long rxframes; /* Number of Frames Actually Received */ + long rxerrs; /* CRC Errors */ + long txerrs; /* KISS errors */ + + unsigned int nospace; /* "Out of buffers" */ + unsigned int rx_over; /* Receiver Overruns */ + unsigned int tx_under; /* Transmitter Underruns */ + + unsigned int tx_state; /* Transmitter state */ + int tx_queued; /* tx frames enqueued */ + + unsigned int maxqueue; /* allocated tx_buffers */ + unsigned int bufsize; /* used buffersize */ +}; + +struct scc_modem { + long speed; /* Line speed, bps */ + char clocksrc; /* 0 = DPLL, 1 = external, 2 = divider */ + char nrz; /* NRZ instead of NRZI */ +}; + +struct scc_kiss_cmd { + int command; /* one of the KISS-Commands defined above */ + unsigned param; /* KISS-Param */ +}; + +struct scc_hw_config { + io_port data_a; /* data port channel A */ + io_port ctrl_a; /* control port channel A */ + io_port data_b; /* data port channel B */ + io_port ctrl_b; /* control port channel B */ + io_port vector_latch; /* INTACK-Latch (#) */ + io_port special; /* special function port */ + + int irq; /* irq */ + long clock; /* clock */ + char option; /* command for function port */ + + char brand; /* hardware type */ + char escc; /* use ext. features of a 8580/85180/85280 */ +}; + +/* (#) only one INTACK latch allowed. */ + + +struct scc_mem_config { + unsigned int dummy; + unsigned int bufsize; +}; + +struct scc_calibrate { + unsigned int time; + unsigned char pattern; +}; + +#ifdef __KERNEL__ + +enum {TX_OFF, TX_ON}; /* command for scc_key_trx() */ + +/* Vector masks in RR2B */ + +#define VECTOR_MASK 0x06 +#define TXINT 0x00 +#define EXINT 0x02 +#define RXINT 0x04 +#define SPINT 0x06 + +#ifdef CONFIG_SCC_DELAY +#define Inb(port) inb_p(port) +#define Outb(port, val) outb_p(val, port) +#else +#define Inb(port) inb(port) +#define Outb(port, val) outb(val, port) +#endif + +/* SCC channel control structure for KISS */ + +struct scc_kiss { + unsigned char txdelay; /* Transmit Delay 10 ms/cnt */ + unsigned char persist; /* Persistence (0-255) as a % */ + unsigned char slottime; /* Delay to wait on persistence hit */ + unsigned char tailtime; /* Delay after last byte written */ + unsigned char fulldup; /* Full Duplex mode 0=CSMA 1=DUP 2=ALWAYS KEYED */ + unsigned char waittime; /* Waittime before any transmit attempt */ + unsigned int maxkeyup; /* Maximum time to transmit (seconds) */ + unsigned int mintime; /* Minimal offtime after MAXKEYUP timeout (seconds) */ + unsigned int idletime; /* Maximum idle time in ALWAYS KEYED mode (seconds) */ + unsigned int maxdefer; /* Timer for CSMA channel busy limit */ + unsigned char tx_inhibit; /* Transmit is not allowed when set */ + unsigned char group; /* Group ID for AX.25 TX interlocking */ + unsigned char mode; /* 'normal' or 'hwctrl' mode (unused) */ + unsigned char softdcd; /* Use DPLL instead of DCD pin for carrier detect */ +}; + + +/* SCC channel structure */ + +struct scc_channel { + int init; /* channel exists? */ + + struct net_device *dev; /* link to device control structure */ + struct net_device_stats dev_stat;/* device statistics */ + + char brand; /* manufacturer of the board */ + long clock; /* used clock */ + + io_port ctrl; /* I/O address of CONTROL register */ + io_port data; /* I/O address of DATA register */ + io_port special; /* I/O address of special function port */ + int irq; /* Number of Interrupt */ + + char option; + char enhanced; /* Enhanced SCC support */ + + unsigned char wreg[16]; /* Copy of last written value in WRx */ + unsigned char status; /* Copy of R0 at last external interrupt */ + unsigned char dcd; /* DCD status */ + + struct scc_kiss kiss; /* control structure for KISS params */ + struct scc_stat stat; /* statistical information */ + struct scc_modem modem; /* modem information */ + + struct sk_buff_head tx_queue; /* next tx buffer */ + struct sk_buff *rx_buff; /* pointer to frame currently received */ + struct sk_buff *tx_buff; /* pointer to frame currently transmitted */ + + /* Timer */ + struct timer_list tx_t; /* tx timer for this channel */ + struct timer_list tx_wdog; /* tx watchdogs */ + + /* Channel lock */ + spinlock_t lock; /* Channel guard lock */ +}; + +#endif /* defined(__KERNEL__) */ +#endif /* defined(_SCC_H) */ diff --git a/include/linux/sched.h b/include/linux/sched.h new file mode 100644 index 00000000..5fb37196 --- /dev/null +++ b/include/linux/sched.h @@ -0,0 +1,2826 @@ +#ifndef _LINUX_SCHED_H +#define _LINUX_SCHED_H + +/* + * cloning flags: + */ +#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ +#define CLONE_VM 0x00000100 /* set if VM shared between processes */ +#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ +#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ +#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ +#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ +#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ +#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ +#define CLONE_THREAD 0x00010000 /* Same thread group? */ +#define CLONE_NEWNS 0x00020000 /* New namespace group? */ +#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ +#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ +#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ +#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ +#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ +#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ +#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ +/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) + and is now available for re-use. */ +#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ +#define CLONE_NEWIPC 0x08000000 /* New ipcs */ +#define CLONE_NEWUSER 0x10000000 /* New user namespace */ +#define CLONE_NEWPID 0x20000000 /* New pid namespace */ +#define CLONE_NEWNET 0x40000000 /* New network namespace */ +#define CLONE_IO 0x80000000 /* Clone io context */ + +/* + * Scheduling policies + */ +#define SCHED_NORMAL 0 +#define SCHED_FIFO 1 +#define SCHED_RR 2 +#define SCHED_BATCH 3 +/* SCHED_ISO: reserved but not implemented yet */ +#define SCHED_IDLE 5 +/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ +#define SCHED_RESET_ON_FORK 0x40000000 + +#ifdef __KERNEL__ + +struct sched_param { + int sched_priority; +}; + +#include <asm/param.h> /* for HZ */ + +#include <linux/capability.h> +#include <linux/threads.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/timex.h> +#include <linux/jiffies.h> +#include <linux/rbtree.h> +#include <linux/thread_info.h> +#include <linux/cpumask.h> +#include <linux/errno.h> +#include <linux/nodemask.h> +#include <linux/mm_types.h> + +#include <asm/page.h> +#include <asm/ptrace.h> +#include <asm/cputime.h> + +#include <linux/smp.h> +#include <linux/sem.h> +#include <linux/signal.h> +#include <linux/compiler.h> +#include <linux/completion.h> +#include <linux/pid.h> +#include <linux/percpu.h> +#include <linux/topology.h> +#include <linux/proportions.h> +#include <linux/seccomp.h> +#include <linux/rcupdate.h> +#include <linux/rculist.h> +#include <linux/rtmutex.h> + +#include <linux/time.h> +#include <linux/param.h> +#include <linux/resource.h> +#include <linux/timer.h> +#include <linux/hrtimer.h> +#include <linux/task_io_accounting.h> +#include <linux/latencytop.h> +#include <linux/cred.h> +#include <linux/llist.h> + +#include <asm/processor.h> + +struct exec_domain; +struct futex_pi_state; +struct robust_list_head; +struct bio_list; +struct fs_struct; +struct perf_event_context; +struct blk_plug; + +/* + * List of flags we want to share for kernel threads, + * if only because they are not used by them anyway. + */ +#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ +#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ +#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ +#define EXP_5 2014 /* 1/exp(5sec/5min) */ +#define EXP_15 2037 /* 1/exp(5sec/15min) */ + +#define CALC_LOAD(load,exp,n) \ + load *= exp; \ + load += n*(FIXED_1-exp); \ + load >>= FSHIFT; + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern unsigned long nr_uninterruptible(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern unsigned long this_cpu_load(void); + + +extern void calc_global_load(unsigned long ticks); + +extern unsigned long get_parent_ip(unsigned long addr); + +struct seq_file; +struct cfs_rq; +struct task_group; +#ifdef CONFIG_SCHED_DEBUG +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +extern void +print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); +#else +static inline void +proc_sched_show_task(struct task_struct *p, struct seq_file *m) +{ +} +static inline void proc_sched_set_task(struct task_struct *p) +{ +} +static inline void +print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) +{ +} +#endif + +/* + * Task state bitmask. NOTE! These bits are also + * encoded in fs/proc/array.c: get_task_state(). + * + * We have two separate sets of flags: task->state + * is about runnability, while task->exit_state are + * about the task exiting. Confusing, but this way + * modifying one set can't modify the other one by + * mistake. + */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* in tsk->exit_state */ +#define EXIT_ZOMBIE 16 +#define EXIT_DEAD 32 +/* in tsk->state again */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_STATE_MAX 512 + +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" + +extern char ___assert_task_state[1 - 2*!!( + sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; + +/* Convenience macros for the sake of set_task_state */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +/* Convenience macros for the sake of wake_up */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + +/* get_task_state() */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED) + +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +#define task_is_dead(task) ((task)->exit_state != 0) +#define task_is_stopped_or_traced(task) \ + ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) +#define task_contributes_to_load(task) \ + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0) + +#define __set_task_state(tsk, state_value) \ + do { (tsk)->state = (state_value); } while (0) +#define set_task_state(tsk, state_value) \ + set_mb((tsk)->state, (state_value)) + +/* + * set_current_state() includes a barrier so that the write of current->state + * is correctly serialised wrt the caller's subsequent test of whether to + * actually sleep: + * + * set_current_state(TASK_UNINTERRUPTIBLE); + * if (do_i_need_to_sleep()) + * schedule(); + * + * If the caller does not need such serialisation then use __set_current_state() + */ +#define __set_current_state(state_value) \ + do { current->state = (state_value); } while (0) +#define set_current_state(state_value) \ + set_mb(current->state, (state_value)) + +/* Task command name length */ +#define TASK_COMM_LEN 16 + +#include <linux/spinlock.h> + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +struct task_struct; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern void sched_init(void); +extern void sched_init_smp(void); +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); + +extern int runqueue_is_locked(int cpu); + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) +extern void select_nohz_load_balancer(int stop_tick); +extern void set_cpu_sd_state_idle(void); +extern int get_nohz_timer_target(void); +#else +static inline void select_nohz_load_balancer(int stop_tick) { } +static inline void set_cpu_sd_state_idle(void) { } +#endif + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +void io_schedule(void); +long io_schedule_timeout(long timeout); + +extern void cpu_init (void); +extern void trap_init(void); +extern void update_process_times(int user); +extern void scheduler_tick(void); + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +extern unsigned int softlockup_panic; +void lockup_detector_init(void); +#else +static inline void touch_softlockup_watchdog(void) +{ +} +static inline void touch_softlockup_watchdog_sync(void) +{ +} +static inline void touch_all_softlockup_watchdogs(void) +{ +} +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK +extern unsigned int sysctl_hung_task_panic; +extern unsigned long sysctl_hung_task_check_count; +extern unsigned long sysctl_hung_task_timeout_secs; +extern unsigned long sysctl_hung_task_warnings; +extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +#else +/* Avoid need for ifdefs elsewhere in the code */ +enum { sysctl_hung_task_timeout_secs = 0 }; +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#define MAX_SCHEDULE_TIMEOUT LONG_MAX +extern signed long schedule_timeout(signed long timeout); +extern signed long schedule_timeout_interruptible(signed long timeout); +extern signed long schedule_timeout_killable(signed long timeout); +extern signed long schedule_timeout_uninterruptible(signed long timeout); +asmlinkage void schedule(void); +extern void schedule_preempt_disabled(void); +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); + +struct nsproxy; +struct user_namespace; + +/* + * Default maximum number of active map areas, this limits the number of vmas + * per mm struct. Users can overwrite this number by sysctl but there is a + * problem. + * + * When a program's coredump is generated as ELF format, a section is created + * per a vma. In ELF, the number of sections is represented in unsigned short. + * This means the number of sections should be smaller than 65535 at coredump. + * Because the kernel adds some informative sections to a image of program at + * generating coredump, we need some margin. The number of extra sections is + * 1-3 now and depends on arch. We use "5" as safe margin, here. + */ +#define MAPCOUNT_ELF_CORE_MARGIN (5) +#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + +extern int sysctl_max_map_count; + +#include <linux/aio.h> + +#ifdef CONFIG_MMU +extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +extern void arch_unmap_area(struct mm_struct *, unsigned long); +extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +#endif + + +extern void set_dumpable(struct mm_struct *mm, int value); +extern int get_dumpable(struct mm_struct *mm); + +/* mm flags */ +/* dumpable bits */ +#define MMF_DUMPABLE 0 /* core dump is permitted */ +#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ + +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 7 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + cputime_t ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + cputime_t expires; + cputime_t incr; + u32 error; + u32 incr_error; +}; + +/** + * struct task_cputime - collected CPU time counts + * @utime: time spent in user mode, in &cputime_t units + * @stime: time spent in kernel mode, in &cputime_t units + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are + * tracked for threads and thread groups. Most things considering + * CPU time want to group these counts together and treat all three + * of them in parallel. + */ +struct task_cputime { + cputime_t utime; + cputime_t stime; + unsigned long long sum_exec_runtime; +}; +/* Alternate field names when used to cache expirations. */ +#define prof_exp stime +#define virt_exp utime +#define sched_exp sum_exec_runtime + +#define INIT_CPUTIME \ + (struct task_cputime) { \ + .utime = 0, \ + .stime = 0, \ + .sum_exec_runtime = 0, \ + } + +/* + * Disable preemption until the scheduler is running. + * Reset by start_kernel()->sched_init()->init_idle(). + * + * We include PREEMPT_ACTIVE to avoid cond_resched() from working + * before the scheduler is active -- see should_resched(). + */ +#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) + +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime: thread group interval timers. + * @running: non-zero when there are timers running and + * @cputime receives updates. + * @lock: lock for fields in this struct. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime cputime; + int running; + raw_spinlock_t lock; +}; + +#include <linux/rwsem.h> +struct autogroup; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + + /* POSIX.1b Interval Timers */ + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + struct pid *leader_pid; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + cputime_t utime, stime, cutime, cstime; + cputime_t gtime; + cputime_t cgtime; +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + cputime_t prev_utime, prev_stime; +#endif + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif +#ifdef CONFIG_CGROUPS + /* + * group_rwsem prevents new tasks from entering the threadgroup and + * member tasks from exiting,a more specifically, setting of + * PF_EXITING. fork and exit paths are protected with this rwsem + * using threadgroup_change_begin/end(). Users which require + * threadgroup to remain stable should use threadgroup_[un]lock() + * which also takes care of exec path. Currently, cgroup is the + * only user. + */ + struct rw_semaphore group_rwsem; +#endif + + int oom_adj; /* OOM kill score adjustment (bit shift) */ + int oom_score_adj; /* OOM kill score adjustment */ + int oom_score_adj_min; /* OOM kill score adjustment minimum value. + * Only settable by CAP_SYS_RESOURCE. */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +}; + +/* Context switch must be unlocked if interrupts are to be enabled */ +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW +# define __ARCH_WANT_UNLOCKED_CTXSW +#endif + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t files; /* How many open files does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_INOTIFY_USER + atomic_t inotify_watches; /* How many inotify watches does this user have? */ + atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ +#endif +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + uid_t uid; + struct user_namespace *user_ns; + +#ifdef CONFIG_PERF_EVENTS + atomic_long_t locked_vm; +#endif +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(uid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +struct backing_dev_info; +struct reclaim_state; + +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) +struct sched_info { + /* cumulative counters */ + unsigned long pcount; /* # of times run on this cpu */ + unsigned long long run_delay; /* time spent waiting on a runqueue */ + + /* timestamps */ + unsigned long long last_arrival,/* when we last ran on a cpu */ + last_queued; /* when we were last queued to run */ +}; +#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ + +#ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + struct timespec freepages_start, freepages_end; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif /* CONFIG_TASK_DELAY_ACCT */ + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES +}; + +/* + * Increase resolution of nice-level calculations for 64-bit architectures. + * The extra resolution improves shares distribution and load balancing of + * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup + * hierarchies, especially on larger systems. This is not a user-visible change + * and does not change the user-interface for setting shares/weights. + * + * We increase resolution only if we have enough bits to allow this increased + * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution + * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the + * increased costs. + */ +#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ +# define SCHED_LOAD_RESOLUTION 10 +# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) +# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) +#else +# define SCHED_LOAD_RESOLUTION 0 +# define scale_load(w) (w) +# define scale_load_down(w) (w) +#endif + +#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) +#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) + +/* + * Increase resolution of cpu_power calculations + */ +#define SCHED_POWER_SHIFT 10 +#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ +#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ +#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ + +enum powersavings_balance_level { + POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ + POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package + * first for long running threads + */ + POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle + * cpu package for power savings + */ + MAX_POWERSAVINGS_BALANCE_LEVELS +}; + +extern int sched_mc_power_savings, sched_smt_power_savings; + +static inline int sd_balance_for_mc_power(void) +{ + if (sched_smt_power_savings) + return SD_POWERSAVINGS_BALANCE; + + if (!sched_mc_power_savings) + return SD_PREFER_SIBLING; + + return 0; +} + +static inline int sd_balance_for_package_power(void) +{ + if (sched_mc_power_savings | sched_smt_power_savings) + return SD_POWERSAVINGS_BALANCE; + + return SD_PREFER_SIBLING; +} + +extern int __weak arch_sd_sibiling_asym_packing(void); + +/* + * Optimise SD flags for power savings: + * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. + * Keep default SD flags if sched_{smt,mc}_power_saving=0 + */ + +static inline int sd_power_saving_flags(void) +{ + if (sched_mc_power_savings | sched_smt_power_savings) + return SD_BALANCE_NEWIDLE; + + return 0; +} + +struct sched_group_power { + atomic_t ref; + /* + * CPU power of this group, SCHED_LOAD_SCALE being max power for a + * single CPU. + */ + unsigned int power, power_orig; + unsigned long next_update; + /* + * Number of busy cpus in this group. + */ + atomic_t nr_busy_cpus; +}; + +struct sched_group { + struct sched_group *next; /* Must be a circular list */ + atomic_t ref; + + unsigned int group_weight; + struct sched_group_power *sgp; + + /* + * The CPUs this group covers. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long cpumask[0]; +}; + +static inline struct cpumask *sched_group_cpus(struct sched_group *sg) +{ + return to_cpumask(sg->cpumask); +} + +/** + * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. + * @group: The group whose first cpu is to be returned. + */ +static inline unsigned int group_first_cpu(struct sched_group *group) +{ + return cpumask_first(sched_group_cpus(group)); +} + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + u64 last_update; + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +/* Test a flag in parent sched domain */ +static inline int test_sd_parent(struct sched_domain *sd, int flag) +{ + if (sd->parent && (sd->parent->flags & flag)) + return 1; + + return 0; +} + +unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); +unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + + +struct io_context; /* See blkdev.h */ + + +#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK +extern void prefetch_stack(struct task_struct *t); +#else +static inline void prefetch_stack(struct task_struct *t) { } +#endif + +struct audit_context; /* See audit.c */ +struct mempolicy; +struct pipe_inode_info; +struct uts_namespace; + +struct rq; +struct sched_domain; + +/* + * wake flags + */ +#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ +#define WF_FORK 0x02 /* child wakeup after fork */ +#define WF_MIGRATED 0x04 /* internal use, task got migrated */ + +#define ENQUEUE_WAKEUP 1 +#define ENQUEUE_HEAD 2 +#ifdef CONFIG_SMP +#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ +#else +#define ENQUEUE_WAKING 0 +#endif + +#define DEQUEUE_SLEEP 1 + +struct sched_class { + const struct sched_class *next; + + void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); + void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); + void (*yield_task) (struct rq *rq); + bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); + + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); + + struct task_struct * (*pick_next_task) (struct rq *rq); + void (*put_prev_task) (struct rq *rq, struct task_struct *p); + +#ifdef CONFIG_SMP + int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); + + void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); + void (*post_schedule) (struct rq *this_rq); + void (*task_waking) (struct task_struct *task); + void (*task_woken) (struct rq *this_rq, struct task_struct *task); + + void (*set_cpus_allowed)(struct task_struct *p, + const struct cpumask *newmask); + + void (*rq_online)(struct rq *rq); + void (*rq_offline)(struct rq *rq); +#endif + + void (*set_curr_task) (struct rq *rq); + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); + void (*task_fork) (struct task_struct *p); + + void (*switched_from) (struct rq *this_rq, struct task_struct *task); + void (*switched_to) (struct rq *this_rq, struct task_struct *task); + void (*prio_changed) (struct rq *this_rq, struct task_struct *task, + int oldprio); + + unsigned int (*get_rr_interval) (struct rq *rq, + struct task_struct *task); + +#ifdef CONFIG_FAIR_GROUP_SCHED + void (*task_move_group) (struct task_struct *p, int on_rq); +#endif +}; + +struct load_weight { + unsigned long weight, inv_weight; +}; + +#ifdef CONFIG_SCHEDSTATS +struct sched_statistics { + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; +}; +#endif + +struct sched_entity { + struct load_weight load; /* for load-balancing */ + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; + + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; + + u64 nr_migrations; + +#ifdef CONFIG_SCHEDSTATS + struct sched_statistics statistics; +#endif + +#ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; +#endif +}; + +struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; + unsigned int time_slice; + int nr_cpus_allowed; + + struct sched_rt_entity *back; +#ifdef CONFIG_RT_GROUP_SCHED + struct sched_rt_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct rt_rq *rt_rq; + /* rq "owned" by this entity/group: */ + struct rt_rq *my_q; +#endif +}; + +/* + * default timeslice is 100 msecs (used only for SCHED_RR tasks). + * Timeslices get refilled after they expire. + */ +#define RR_TIMESLICE (100 * HZ / 1000) + +struct rcu_node; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context, + perf_nr_task_contexts, +}; + +struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ + unsigned int ptrace; + +#ifdef CONFIG_SMP + struct llist_node wake_entry; + int on_cpu; +#endif + int on_rq; + + int prio, static_prio, normal_prio; + unsigned int rt_priority; + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; + +#ifdef CONFIG_PREEMPT_NOTIFIERS + /* list of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; +#endif + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; +#ifdef CONFIG_BLK_DEV_IO_TRACE + unsigned int btrace_seq; +#endif + + unsigned int policy; + cpumask_t cpus_allowed; + +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + char rcu_read_unlock_special; + struct list_head rcu_node_entry; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TREE_PREEMPT_RCU + struct rcu_node *rcu_blocked_node; +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#ifdef CONFIG_RCU_BOOST + struct rt_mutex *rcu_boost_mutex; +#endif /* #ifdef CONFIG_RCU_BOOST */ + +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) + struct sched_info sched_info; +#endif + + struct list_head tasks; +#ifdef CONFIG_SMP + struct plist_node pushable_tasks; +#endif + + struct mm_struct *mm, *active_mm; +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif +#if defined(SPLIT_RSS_COUNTING) + struct task_rss_stat rss_stat; +#endif +/* task state */ + int exit_state; + int exit_code, exit_signal; + int pdeath_signal; /* The signal sent when the parent dies */ + unsigned int jobctl; /* JOBCTL_*, siglock protected */ + /* ??? */ + unsigned int personality; + unsigned did_exec:1; + unsigned in_execve:1; /* Tell the LSMs that the process is doing an + * execve */ + unsigned in_iowait:1; + + + /* Revert to default priority/policy when forking */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + +#ifdef CONFIG_GENERIC_HARDIRQS + /* IRQ handler threads */ + unsigned irq_thread:1; +#endif + + pid_t pid; + pid_t tgid; + +#ifdef CONFIG_CC_STACKPROTECTOR + /* Canary value for the -fstack-protector gcc feature */ + unsigned long stack_canary; +#endif + + /* + * pointers to (original) parent process, youngest child, younger sibling, + * older sibling, respectively. (p->father can be replaced with + * p->real_parent->pid) + */ + struct task_struct __rcu *real_parent; /* real parent process */ + struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ + /* + * children/sibling forms the list of my natural children + */ + struct list_head children; /* list of my children */ + struct list_head sibling; /* linkage in my parent's children list */ + struct task_struct *group_leader; /* threadgroup leader */ + + /* + * ptraced is the list of tasks this task is using ptrace on. + * This includes both natural children and PTRACE_ATTACH targets. + * p->ptrace_entry is p's link on the p->parent->ptraced list. + */ + struct list_head ptraced; + struct list_head ptrace_entry; + + /* PID/PID hash table linkage. */ + struct pid_link pids[PIDTYPE_MAX]; + struct list_head thread_group; + + struct completion *vfork_done; /* for vfork() */ + int __user *set_child_tid; /* CLONE_CHILD_SETTID */ + int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + cputime_t prev_utime, prev_stime; +#endif + unsigned long nvcsw, nivcsw; /* context switch counts */ + struct timespec start_time; /* monotonic time */ + struct timespec real_start_time; /* boot based time */ +/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ + unsigned long min_flt, maj_flt; + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; + +/* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task + * credentials (COW) */ + const struct cred __rcu *cred; /* effective (overridable) subjective task + * credentials (COW) */ + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ + + char comm[TASK_COMM_LEN]; /* executable name excluding path + - access with [gs]et_task_comm (which lock + it with task_lock()) + - initialized normally by setup_new_exec */ +/* file system info */ + int link_count, total_link_count; +#ifdef CONFIG_SYSVIPC +/* ipc stuff */ + struct sysv_sem sysvsem; +#endif +#ifdef CONFIG_DETECT_HUNG_TASK +/* hung task detection */ + unsigned long last_switch_count; +#endif +/* CPU-specific state of this task */ + struct thread_struct thread; +/* filesystem information */ + struct fs_struct *fs; +/* open file information */ + struct files_struct *files; +/* namespaces */ + struct nsproxy *nsproxy; +/* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + struct sigpending pending; + + unsigned long sas_ss_sp; + size_t sas_ss_size; + int (*notifier)(void *priv); + void *notifier_data; + sigset_t *notifier_mask; + struct audit_context *audit_context; +#ifdef CONFIG_AUDITSYSCALL + uid_t loginuid; + unsigned int sessionid; +#endif + seccomp_t seccomp; + +/* Thread group tracking */ + u32 parent_exec_id; + u32 self_exec_id; +/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, + * mempolicy */ + spinlock_t alloc_lock; + + /* Protection of the PI data structures: */ + raw_spinlock_t pi_lock; + +#ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task */ + struct plist_head pi_waiters; + /* Deadlock detection and priority inheritance handling */ + struct rt_mutex_waiter *pi_blocked_on; +#endif + +#ifdef CONFIG_DEBUG_MUTEXES + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; +#endif +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; +#endif + +/* journalling filesystem info */ + void *journal_info; + +/* stacked block device info */ + struct bio_list *bio_list; + +#ifdef CONFIG_BLOCK +/* stack plugging */ + struct blk_plug *plug; +#endif + +/* VM state */ + struct reclaim_state *reclaim_state; + + struct backing_dev_info *backing_dev_info; + + struct io_context *io_context; + + unsigned long ptrace_message; + siginfo_t *last_siginfo; /* For ptrace use. */ + struct task_io_accounting ioac; +#if defined(CONFIG_TASK_XACCT) + u64 acct_rss_mem1; /* accumulated rss usage */ + u64 acct_vm_mem1; /* accumulated virtual memory usage */ + cputime_t acct_timexpd; /* stime + utime since last update */ +#endif +#ifdef CONFIG_CPUSETS + nodemask_t mems_allowed; /* Protected by alloc_lock */ + seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; +#endif +#ifdef CONFIG_CGROUPS + /* Control Group info protected by css_set_lock */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock */ + struct list_head cg_list; +#endif +#ifdef CONFIG_FUTEX + struct robust_list_head __user *robust_list; +#ifdef CONFIG_COMPAT + struct compat_robust_list_head __user *compat_robust_list; +#endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; +#endif +#ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; +#endif +#ifdef CONFIG_NUMA + struct mempolicy *mempolicy; /* Protected by alloc_lock */ + short il_next; + short pref_node_fork; +#endif + struct rcu_head rcu; + + /* + * cache last used pipe for splice + */ + struct pipe_inode_info *splice_pipe; +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; +#endif +#ifdef CONFIG_FAULT_INJECTION + int make_it_fail; +#endif + /* + * when (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for some dirty throttling pause + */ + int nr_dirtied; + int nr_dirtied_pause; + unsigned long dirty_paused_when; /* start of a write-and-pause period */ + +#ifdef CONFIG_LATENCYTOP + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; +#endif + /* + * time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + unsigned long timer_slack_ns; + unsigned long default_timer_slack_ns; + + struct list_head *scm_work_list; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored address in ret_stack */ + int curr_ret_stack; + /* Stack of return addresses for return function tracing */ + struct ftrace_ret_stack *ret_stack; + /* time stamp for last schedule */ + unsigned long long ftrace_timestamp; + /* + * Number of functions that haven't been traced + * because of depth overrun. + */ + atomic_t trace_overrun; + /* Pause for the tracing */ + atomic_t tracing_graph_pause; +#endif +#ifdef CONFIG_TRACING + /* state flags for use by tracers */ + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; +#endif /* CONFIG_TRACING */ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ + struct memcg_batch_info { + int do_batch; /* incremented when batch uncharge started */ + struct mem_cgroup *memcg; /* target memcg of uncharge */ + unsigned long nr_pages; /* uncharged usage */ + unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ + } memcg_batch; +#endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_t ptrace_bp_refcnt; +#endif +}; + +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority + * values are inverted: lower p->prio value means higher priority. + * + * The MAX_USER_RT_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) + +static inline int rt_prio(int prio) +{ + if (unlikely(prio < MAX_RT_PRIO)) + return 1; + return 0; +} + +static inline int rt_task(struct task_struct *p) +{ + return rt_prio(p->prio); +} + +static inline struct pid *task_pid(struct task_struct *task) +{ + return task->pids[PIDTYPE_PID].pid; +} + +static inline struct pid *task_tgid(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_PID].pid; +} + +/* + * Without tasklist or rcu lock it is not safe to dereference + * the result of task_pgrp/task_session even if task == current, + * we can race with another thread doing sys_setsid/sys_setpgid. + */ +static inline struct pid *task_pgrp(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_PGID].pid; +} + +static inline struct pid *task_session(struct task_struct *task) +{ + return task->group_leader->pids[PIDTYPE_SID].pid; +} + +struct pid_namespace; + +/* + * the helpers to get the task's different pids as they are seen + * from various namespaces + * + * task_xid_nr() : global id, i.e. the id seen from the init namespace; + * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of + * current. + * task_xid_nr_ns() : id seen from the ns specified; + * + * set_task_vxid() : assigns a virtual id to a task; + * + * see also pid_nr() etc in include/linux/pid.h + */ +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, + struct pid_namespace *ns); + +static inline pid_t task_pid_nr(struct task_struct *tsk) +{ + return tsk->pid; +} + +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); +} + +static inline pid_t task_pid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); +} + + +static inline pid_t task_tgid_nr(struct task_struct *tsk) +{ + return tsk->tgid; +} + +pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return pid_vnr(task_tgid(tsk)); +} + + +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); +} + +static inline pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); +} + + +static inline pid_t task_session_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); +} + +static inline pid_t task_session_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); +} + +/* obsolete, do not use */ +static inline pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return task_pgrp_nr_ns(tsk, &init_pid_ns); +} + +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + */ +static inline int pid_alive(struct task_struct *p) +{ + return p->pids[PIDTYPE_PID].pid != NULL; +} + +/** + * is_global_init - check if a task structure is init + * @tsk: Task structure to be checked. + * + * Check if a task structure is the first user space task the kernel created. + */ +static inline int is_global_init(struct task_struct *tsk) +{ + return tsk->pid == 1; +} + +/* + * is_container_init: + * check whether in the task is init in its own pid namespace. + */ +extern int is_container_init(struct task_struct *tsk); + +extern struct pid *cad_pid; + +extern void free_task(struct task_struct *tsk); +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); +extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); + +extern int task_free_register(struct notifier_block *n); +extern int task_free_unregister(struct notifier_block *n); + +/* + * Per process flags + */ +#define PF_EXITING 0x00000004 /* getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* dumped core */ +#define PF_SIGNALED 0x00000400 /* killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* frozen for system suspend */ +#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ +#define PF_KSWAPD 0x00040000 /* I am kswapd */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ +#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ +#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ + +/* + * Only the _current_ task can read/write to tsk->flags, but other + * tasks can access tsk->flags in readonly mode for example + * with tsk_used_math (like during threaded core dumping). + * There is however an exception to this rule during ptrace + * or during fork: the ptracer task is allowed to write to the + * child->flags of its traced child (same goes for fork, the parent + * can write to the child->flags), because we're guaranteed the + * child is not running and in turn not changing child->flags + * at the same time the parent does it. + */ +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) +#define conditional_stopped_child_used_math(condition, child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) +#define conditional_used_math(condition) \ + conditional_stopped_child_used_math(condition, current) +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) + +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, + unsigned int mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, + unsigned int mask); + +#ifdef CONFIG_PREEMPT_RCU + +#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ +#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ + +static inline void rcu_copy_process(struct task_struct *p) +{ + p->rcu_read_lock_nesting = 0; + p->rcu_read_unlock_special = 0; +#ifdef CONFIG_TREE_PREEMPT_RCU + p->rcu_blocked_node = NULL; +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#ifdef CONFIG_RCU_BOOST + p->rcu_boost_mutex = NULL; +#endif /* #ifdef CONFIG_RCU_BOOST */ + INIT_LIST_HEAD(&p->rcu_node_entry); +} + +#else + +static inline void rcu_copy_process(struct task_struct *p) +{ +} + +#endif + +#ifdef CONFIG_SMP +extern void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask); + +extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); +#else +static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) +{ +} +static inline int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask) +{ + if (!cpumask_test_cpu(0, new_mask)) + return -EINVAL; + return 0; +} +#endif + +#ifndef CONFIG_CPUMASK_OFFSTACK +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) +{ + return set_cpus_allowed_ptr(p, &new_mask); +} +#endif + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); +/* + * See the comment in kernel/sched_clock.c + */ +extern u64 cpu_clock(int cpu); +extern u64 local_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_tick(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} +#else +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +extern int sched_clock_stable; + +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) +extern void wake_up_idle_cpu(int cpu); +#else +static inline void wake_up_idle_cpu(int cpu) { } +#endif + +extern unsigned int sysctl_sched_latency; +extern unsigned int sysctl_sched_min_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_child_runs_first; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE, + SCHED_TUNABLESCALING_LOG, + SCHED_TUNABLESCALING_LINEAR, + SCHED_TUNABLESCALING_END, +}; +extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; + +#ifdef CONFIG_SCHED_DEBUG +extern unsigned int sysctl_sched_migration_cost; +extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_sched_time_avg; +extern unsigned int sysctl_timer_migration; +extern unsigned int sysctl_sched_shares_window; + +int sched_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos); +#endif +#ifdef CONFIG_SCHED_DEBUG +static inline unsigned int get_sysctl_timer_migration(void) +{ + return sysctl_timer_migration; +} +#else +static inline unsigned int get_sysctl_timer_migration(void) +{ + return 1; +} +#endif +extern unsigned int sysctl_sched_rt_period; +extern int sysctl_sched_rt_runtime; + +int sched_rt_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +#ifdef CONFIG_SCHED_AUTOGROUP +extern unsigned int sysctl_sched_autogroup_enabled; + +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +#endif + +#ifdef CONFIG_CFS_BANDWIDTH +extern unsigned int sysctl_sched_cfs_bandwidth_slice; +#endif + +#ifdef CONFIG_RT_MUTEXES +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p); +static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return tsk->pi_blocked_on != NULL; +} +#else +static inline int rt_mutex_getprio(struct task_struct *p) +{ + return p->normal_prio; +} +# define rt_mutex_adjust_pi(p) do { } while (0) +static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return false; +} +#endif + +extern bool yield_to(struct task_struct *p, bool preempt); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); +extern int idle_cpu(int cpu); +extern int sched_setscheduler(struct task_struct *, int, + const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, + const struct sched_param *); +extern struct task_struct *idle_task(int cpu); +/** + * is_idle_task - is the specified task an idle task? + * @p: the task in question. + */ +static inline bool is_idle_task(const struct task_struct *p) +{ + return p->pid == 0; +} +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p); + +void yield(void); + +/* + * The default (Linux) execution domain. + */ +extern struct exec_domain default_exec_domain; + +union thread_union { + struct thread_info thread_info; + unsigned long stack[THREAD_SIZE/sizeof(long)]; +}; + +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); +} +#endif + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +extern struct mm_struct init_mm; + +extern struct pid_namespace init_pid_ns; + +/* + * find a task by one of its numerical ids + * + * find_task_by_pid_ns(): + * finds a task by its pid in the specified namespace + * find_task_by_vpid(): + * finds a task by its virtual pid + * + * see also find_vpid() etc in include/linux/pid.h + */ + +extern struct task_struct *find_task_by_vpid(pid_t nr); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, + struct pid_namespace *ns); + +extern void __set_special_pids(struct pid *pid); + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); +extern void release_uids(struct user_namespace *ns); + +#include <asm/current.h> + +extern void xtime_update(unsigned long ticks); + +extern int wake_up_state(struct task_struct *tsk, unsigned int state); +extern int wake_up_process(struct task_struct *tsk); +extern void wake_up_new_task(struct task_struct *tsk); +#ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +#else + static inline void kick_process(struct task_struct *tsk) { } +#endif +extern void sched_fork(struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +extern void proc_caches_init(void); +extern void flush_signals(struct task_struct *); +extern void __flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); + +static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + ret = dequeue_signal(tsk, mask, info); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); + + return ret; +} + +extern void block_all_signals(int (*notifier)(void *priv), void *priv, + sigset_t *mask); +extern void unblock_all_signals(void); +extern void release_task(struct task_struct * p); +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); +extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + return (current->sas_ss_size == 0 ? SS_DISABLE + : on_sig_stack(sp) ? SS_ONSTACK : 0); +} + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +/* mmdrop drops the mm and the page tables */ +extern void __mmdrop(struct mm_struct *); +static inline void mmdrop(struct mm_struct * mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); +/* Allocate a new mm structure and copy contents from tsk->mm */ +extern struct mm_struct *dup_mm(struct task_struct *tsk); + +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *, struct pt_regs *); +extern void flush_thread(void); +extern void exit_thread(void); + +extern void exit_files(struct task_struct *); +extern void __cleanup_sighand(struct sighand_struct *); + +extern void exit_itimers(struct signal_struct *); +extern void flush_itimer_signals(void); + +extern void do_group_exit(int); + +extern void daemonize(const char *, ...); +extern int allow_signal(int); +extern int disallow_signal(int); + +extern int do_execve(const char *, + const char __user * const __user *, + const char __user * const __user *, struct pt_regs *); +extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); + +extern void set_task_comm(struct task_struct *tsk, char *from); +extern char *get_task_comm(char *to, struct task_struct *tsk); + +#ifdef CONFIG_SMP +void scheduler_ipi(void); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); +#else +static inline void scheduler_ipi(void) { } +static inline unsigned long wait_task_inactive(struct task_struct *p, + long match_state) +{ + return 1; +} +#endif + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + +/* + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline int has_group_leader_pid(struct task_struct *p) +{ + return p->pid == p->tgid; +} + +static inline +int same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->tgid == p2->tgid; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +#ifdef CONFIG_CGROUPS +static inline void threadgroup_change_begin(struct task_struct *tsk) +{ + down_read(&tsk->signal->group_rwsem); +} +static inline void threadgroup_change_end(struct task_struct *tsk) +{ + up_read(&tsk->signal->group_rwsem); +} + +/** + * threadgroup_lock - lock threadgroup + * @tsk: member task of the threadgroup to lock + * + * Lock the threadgroup @tsk belongs to. No new task is allowed to enter + * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or + * perform exec. This is useful for cases where the threadgroup needs to + * stay stable across blockable operations. + * + * fork and exit paths explicitly call threadgroup_change_{begin|end}() for + * synchronization. While held, no new task will be added to threadgroup + * and no existing live task will have its PF_EXITING set. + * + * During exec, a task goes and puts its thread group through unusual + * changes. After de-threading, exclusive access is assumed to resources + * which are usually shared by tasks in the same group - e.g. sighand may + * be replaced with a new one. Also, the exec'ing task takes over group + * leader role including its pid. Exclude these changes while locked by + * grabbing cred_guard_mutex which is used to synchronize exec path. + */ +static inline void threadgroup_lock(struct task_struct *tsk) +{ + /* + * exec uses exit for de-threading nesting group_rwsem inside + * cred_guard_mutex. Grab cred_guard_mutex first. + */ + mutex_lock(&tsk->signal->cred_guard_mutex); + down_write(&tsk->signal->group_rwsem); +} + +/** + * threadgroup_unlock - unlock threadgroup + * @tsk: member task of the threadgroup to unlock + * + * Reverse threadgroup_lock(). + */ +static inline void threadgroup_unlock(struct task_struct *tsk) +{ + up_write(&tsk->signal->group_rwsem); + mutex_unlock(&tsk->signal->cred_guard_mutex); +} +#else +static inline void threadgroup_change_begin(struct task_struct *tsk) {} +static inline void threadgroup_change_end(struct task_struct *tsk) {} +static inline void threadgroup_lock(struct task_struct *tsk) {} +static inline void threadgroup_unlock(struct task_struct *tsk) {} +#endif + +#ifndef __HAVE_THREAD_FUNCTIONS + +#define task_thread_info(task) ((struct thread_info *)(task)->stack) +#define task_stack_page(task) ((task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +static inline unsigned long *end_of_stack(struct task_struct *p) +{ + return (unsigned long *)(task_thread_info(p) + 1); +} + +#endif + +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +extern void thread_info_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ + n++; + } while (!*n); + + return (unsigned long)n - (unsigned long)end_of_stack(p); +} +#endif + +/* set thread flags in other task's structures + * - see asm/thread_info.h for TIF_xxxx flags available + */ +static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + set_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + clear_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline void set_tsk_need_resched(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); +} + +static inline void clear_tsk_need_resched(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); +} + +static inline int test_tsk_need_resched(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); +} + +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + +static inline int need_resched(void) +{ + return unlikely(test_thread_flag(TIF_NEED_RESCHED)); +} + +/* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return + * value indicates whether a reschedule was done in fact. + * cond_resched_lock() will drop the spinlock before scheduling, + * cond_resched_softirq() will enable bhs before scheduling. + */ +extern int _cond_resched(void); + +#define cond_resched() ({ \ + __might_sleep(__FILE__, __LINE__, 0); \ + _cond_resched(); \ +}) + +extern int __cond_resched_lock(spinlock_t *lock); + +#ifdef CONFIG_PREEMPT_COUNT +#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif + +#define cond_resched_lock(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_lock(lock); \ +}) + +extern int __cond_resched_softirq(void); + +#define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ +}) + +/* + * Does a critical section need to be broken due to another + * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * but a general need for low latency) + */ +static inline int spin_needbreak(spinlock_t *lock) +{ +#ifdef CONFIG_PREEMPT + return spin_is_contended(lock); +#else + return 0; +#endif +} + +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + +static inline void thread_group_cputime_init(struct signal_struct *sig) +{ + raw_spin_lock_init(&sig->cputimer.lock); +} + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); + +extern void signal_wake_up(struct task_struct *t, int resume_stopped); + +/* + * Wrappers for p->thread_info->cpu access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline unsigned int task_cpu(const struct task_struct *p) +{ + return task_thread_info(p)->cpu; +} + +extern void set_task_cpu(struct task_struct *p, unsigned int cpu); + +#else + +static inline unsigned int task_cpu(const struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ +} + +#endif /* CONFIG_SMP */ + +extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); +extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +extern void normalize_rt_tasks(void); + +#ifdef CONFIG_CGROUP_SCHED + +extern struct task_group root_task_group; + +extern struct task_group *sched_create_group(struct task_group *parent); +extern void sched_destroy_group(struct task_group *tg); +extern void sched_move_task(struct task_struct *tsk); +#ifdef CONFIG_FAIR_GROUP_SCHED +extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); +extern unsigned long sched_group_shares(struct task_group *tg); +#endif +#ifdef CONFIG_RT_GROUP_SCHED +extern int sched_group_set_rt_runtime(struct task_group *tg, + long rt_runtime_us); +extern long sched_group_rt_runtime(struct task_group *tg); +extern int sched_group_set_rt_period(struct task_group *tg, + long rt_period_us); +extern long sched_group_rt_period(struct task_group *tg); +extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); +#endif +#endif + +extern int task_can_switch_user(struct user_struct *up, + struct task_struct *tsk); + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + +#ifndef TASK_SIZE_OF +#define TASK_SIZE_OF(tsk) TASK_SIZE +#endif + +#ifdef CONFIG_MM_OWNER +extern void mm_update_next_owner(struct mm_struct *mm); +extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} + +static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) +{ +} +#endif /* CONFIG_MM_OWNER */ + +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) +{ + return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); +} + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) +{ + return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); +} + +static inline unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(current, limit); +} + +static inline unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(current, limit); +} + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h new file mode 100644 index 00000000..899fbb48 --- /dev/null +++ b/include/linux/screen_info.h @@ -0,0 +1,84 @@ +#ifndef _SCREEN_INFO_H +#define _SCREEN_INFO_H + +#include <linux/types.h> + +/* + * These are set up by the setup-routine at boot-time: + */ + +struct screen_info { + __u8 orig_x; /* 0x00 */ + __u8 orig_y; /* 0x01 */ + __u16 ext_mem_k; /* 0x02 */ + __u16 orig_video_page; /* 0x04 */ + __u8 orig_video_mode; /* 0x06 */ + __u8 orig_video_cols; /* 0x07 */ + __u8 flags; /* 0x08 */ + __u8 unused2; /* 0x09 */ + __u16 orig_video_ega_bx;/* 0x0a */ + __u16 unused3; /* 0x0c */ + __u8 orig_video_lines; /* 0x0e */ + __u8 orig_video_isVGA; /* 0x0f */ + __u16 orig_video_points;/* 0x10 */ + + /* VESA graphic mode -- linear frame buffer */ + __u16 lfb_width; /* 0x12 */ + __u16 lfb_height; /* 0x14 */ + __u16 lfb_depth; /* 0x16 */ + __u32 lfb_base; /* 0x18 */ + __u32 lfb_size; /* 0x1c */ + __u16 cl_magic, cl_offset; /* 0x20 */ + __u16 lfb_linelength; /* 0x24 */ + __u8 red_size; /* 0x26 */ + __u8 red_pos; /* 0x27 */ + __u8 green_size; /* 0x28 */ + __u8 green_pos; /* 0x29 */ + __u8 blue_size; /* 0x2a */ + __u8 blue_pos; /* 0x2b */ + __u8 rsvd_size; /* 0x2c */ + __u8 rsvd_pos; /* 0x2d */ + __u16 vesapm_seg; /* 0x2e */ + __u16 vesapm_off; /* 0x30 */ + __u16 pages; /* 0x32 */ + __u16 vesa_attributes; /* 0x34 */ + __u32 capabilities; /* 0x36 */ + __u8 _reserved[6]; /* 0x3a */ +} __attribute__((packed)); + +#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ +#define VIDEO_TYPE_CGA 0x11 /* CGA Display */ +#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */ +#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */ +#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */ +#define VIDEO_TYPE_VLFB 0x23 /* VESA VGA in graphic mode */ + +#define VIDEO_TYPE_PICA_S3 0x30 /* ACER PICA-61 local S3 video */ +#define VIDEO_TYPE_MIPS_G364 0x31 /* MIPS Magnum 4000 G364 video */ +#define VIDEO_TYPE_SGI 0x33 /* Various SGI graphics hardware */ + +#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */ + +#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */ +#define VIDEO_TYPE_SUNPCI 0x51 /* Sun PCI based frame buffer. */ + +#define VIDEO_TYPE_PMAC 0x60 /* PowerMacintosh frame buffer. */ + +#define VIDEO_TYPE_EFI 0x70 /* EFI graphic mode */ + +#define VIDEO_FLAGS_NOCURSOR (1 << 0) /* The video mode has no cursor set */ + +#ifdef __KERNEL__ +extern struct screen_info screen_info; + +#define ORIG_X (screen_info.orig_x) +#define ORIG_Y (screen_info.orig_y) +#define ORIG_VIDEO_MODE (screen_info.orig_video_mode) +#define ORIG_VIDEO_COLS (screen_info.orig_video_cols) +#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx) +#define ORIG_VIDEO_LINES (screen_info.orig_video_lines) +#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA) +#define ORIG_VIDEO_POINTS (screen_info.orig_video_points) +#endif /* __KERNEL__ */ + +#endif /* _SCREEN_INFO_H */ diff --git a/include/linux/sctp.h b/include/linux/sctp.h new file mode 100644 index 00000000..c11a2870 --- /dev/null +++ b/include/linux/sctp.h @@ -0,0 +1,711 @@ +/* SCTP kernel reference Implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel reference Implementation + * + * Various protocol defined structures. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <lksctp-developerst@lists.sourceforge.net> + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Xingang Guo <xingang.guo@intel.com> + * randall@sctp.chicago.il.us + * kmorneau@cisco.com + * qxie1@email.mot.com + * Sridhar Samudrala <sri@us.ibm.com> + * Kevin Gao <kevin.gao@intel.com> + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ +#ifndef __LINUX_SCTP_H__ +#define __LINUX_SCTP_H__ + +#include <linux/in.h> /* We need in_addr. */ +#include <linux/in6.h> /* We need in6_addr. */ + + +/* Section 3.1. SCTP Common Header Format */ +typedef struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +} __packed sctp_sctphdr_t; + +#ifdef __KERNEL__ +#include <linux/skbuff.h> + +static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) +{ + return (struct sctphdr *)skb_transport_header(skb); +} +#endif + +/* Section 3.2. Chunk Field Descriptions. */ +typedef struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +} __packed sctp_chunkhdr_t; + + +/* Section 3.2. Chunk Type Values. + * [Chunk Type] identifies the type of information contained in the Chunk + * Value field. It takes a value from 0 to 254. The value of 255 is + * reserved for future use as an extension field. + */ +typedef enum { + SCTP_CID_DATA = 0, + SCTP_CID_INIT = 1, + SCTP_CID_INIT_ACK = 2, + SCTP_CID_SACK = 3, + SCTP_CID_HEARTBEAT = 4, + SCTP_CID_HEARTBEAT_ACK = 5, + SCTP_CID_ABORT = 6, + SCTP_CID_SHUTDOWN = 7, + SCTP_CID_SHUTDOWN_ACK = 8, + SCTP_CID_ERROR = 9, + SCTP_CID_COOKIE_ECHO = 10, + SCTP_CID_COOKIE_ACK = 11, + SCTP_CID_ECN_ECNE = 12, + SCTP_CID_ECN_CWR = 13, + SCTP_CID_SHUTDOWN_COMPLETE = 14, + + /* AUTH Extension Section 4.1 */ + SCTP_CID_AUTH = 0x0F, + + /* PR-SCTP Sec 3.2 */ + SCTP_CID_FWD_TSN = 0xC0, + + /* Use hex, as defined in ADDIP sec. 3.1 */ + SCTP_CID_ASCONF = 0xC1, + SCTP_CID_ASCONF_ACK = 0x80, +} sctp_cid_t; /* enum */ + + +/* Section 3.2 + * Chunk Types are encoded such that the highest-order two bits specify + * the action that must be taken if the processing endpoint does not + * recognize the Chunk Type. + */ +typedef enum { + SCTP_CID_ACTION_DISCARD = 0x00, + SCTP_CID_ACTION_DISCARD_ERR = 0x40, + SCTP_CID_ACTION_SKIP = 0x80, + SCTP_CID_ACTION_SKIP_ERR = 0xc0, +} sctp_cid_action_t; + +enum { SCTP_CID_ACTION_MASK = 0xc0, }; + +/* This flag is used in Chunk Flags for ABORT and SHUTDOWN COMPLETE. + * + * 3.3.7 Abort Association (ABORT) (6): + * The T bit is set to 0 if the sender had a TCB that it destroyed. + * If the sender did not have a TCB it should set this bit to 1. + */ +enum { SCTP_CHUNK_FLAG_T = 0x01 }; + +/* + * Set the T bit + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 14 |Reserved |T| Length = 4 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Chunk Flags: 8 bits + * + * Reserved: 7 bits + * Set to 0 on transmit and ignored on receipt. + * + * T bit: 1 bit + * The T bit is set to 0 if the sender had a TCB that it destroyed. If + * the sender did NOT have a TCB it should set this bit to 1. + * + * Note: Special rules apply to this chunk for verification, please + * see Section 8.5.1 for details. + */ + +#define sctp_test_T_bit(c) ((c)->chunk_hdr->flags & SCTP_CHUNK_FLAG_T) + +/* RFC 2960 + * Section 3.2.1 Optional/Variable-length Parmaeter Format. + */ + +typedef struct sctp_paramhdr { + __be16 type; + __be16 length; +} __packed sctp_paramhdr_t; + +typedef enum { + + /* RFC 2960 Section 3.3.5 */ + SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1), + /* RFC 2960 Section 3.3.2.1 */ + SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5), + SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6), + SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7), + SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8), + SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9), + SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11), + SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12), + SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000), + + /* AUTH Extension Section 3 */ + SCTP_PARAM_RANDOM = cpu_to_be16(0x8002), + SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003), + SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004), + + /* Add-IP: Supported Extensions, Section 4.2 */ + SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008), + + /* PR-SCTP Sec 3.1 */ + SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000), + + /* Add-IP Extension. Section 3.2 */ + SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001), + SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002), + SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003), + SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004), + SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), + SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), + +} sctp_param_t; /* enum */ + + +/* RFC 2960 Section 3.2.1 + * The Parameter Types are encoded such that the highest-order two bits + * specify the action that must be taken if the processing endpoint does + * not recognize the Parameter Type. + * + */ +typedef enum { + SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000), + SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000), + SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000), + SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000), +} sctp_param_action_t; + +enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), }; + +/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ + +typedef struct sctp_datahdr { + __be32 tsn; + __be16 stream; + __be16 ssn; + __be32 ppid; + __u8 payload[0]; +} __packed sctp_datahdr_t; + +typedef struct sctp_data_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_datahdr_t data_hdr; +} __packed sctp_data_chunk_t; + +/* DATA Chuck Specific Flags */ +enum { + SCTP_DATA_MIDDLE_FRAG = 0x00, + SCTP_DATA_LAST_FRAG = 0x01, + SCTP_DATA_FIRST_FRAG = 0x02, + SCTP_DATA_NOT_FRAG = 0x03, + SCTP_DATA_UNORDERED = 0x04, + SCTP_DATA_SACK_IMM = 0x08, +}; +enum { SCTP_DATA_FRAG_MASK = 0x03, }; + + +/* RFC 2960 Section 3.3.2 Initiation (INIT) (1) + * + * This chunk is used to initiate a SCTP association between two + * endpoints. + */ +typedef struct sctp_inithdr { + __be32 init_tag; + __be32 a_rwnd; + __be16 num_outbound_streams; + __be16 num_inbound_streams; + __be32 initial_tsn; + __u8 params[0]; +} __packed sctp_inithdr_t; + +typedef struct sctp_init_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_inithdr_t init_hdr; +} __packed sctp_init_chunk_t; + + +/* Section 3.3.2.1. IPv4 Address Parameter (5) */ +typedef struct sctp_ipv4addr_param { + sctp_paramhdr_t param_hdr; + struct in_addr addr; +} __packed sctp_ipv4addr_param_t; + +/* Section 3.3.2.1. IPv6 Address Parameter (6) */ +typedef struct sctp_ipv6addr_param { + sctp_paramhdr_t param_hdr; + struct in6_addr addr; +} __packed sctp_ipv6addr_param_t; + +/* Section 3.3.2.1 Cookie Preservative (9) */ +typedef struct sctp_cookie_preserve_param { + sctp_paramhdr_t param_hdr; + __be32 lifespan_increment; +} __packed sctp_cookie_preserve_param_t; + +/* Section 3.3.2.1 Host Name Address (11) */ +typedef struct sctp_hostname_param { + sctp_paramhdr_t param_hdr; + uint8_t hostname[0]; +} __packed sctp_hostname_param_t; + +/* Section 3.3.2.1 Supported Address Types (12) */ +typedef struct sctp_supported_addrs_param { + sctp_paramhdr_t param_hdr; + __be16 types[0]; +} __packed sctp_supported_addrs_param_t; + +/* Appendix A. ECN Capable (32768) */ +typedef struct sctp_ecn_capable_param { + sctp_paramhdr_t param_hdr; +} __packed sctp_ecn_capable_param_t; + +/* ADDIP Section 3.2.6 Adaptation Layer Indication */ +typedef struct sctp_adaptation_ind_param { + struct sctp_paramhdr param_hdr; + __be32 adaptation_ind; +} __packed sctp_adaptation_ind_param_t; + +/* ADDIP Section 4.2.7 Supported Extensions Parameter */ +typedef struct sctp_supported_ext_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +} __packed sctp_supported_ext_param_t; + +/* AUTH Section 3.1 Random */ +typedef struct sctp_random_param { + sctp_paramhdr_t param_hdr; + __u8 random_val[0]; +} __packed sctp_random_param_t; + +/* AUTH Section 3.2 Chunk List */ +typedef struct sctp_chunks_param { + sctp_paramhdr_t param_hdr; + __u8 chunks[0]; +} __packed sctp_chunks_param_t; + +/* AUTH Section 3.3 HMAC Algorithm */ +typedef struct sctp_hmac_algo_param { + sctp_paramhdr_t param_hdr; + __be16 hmac_ids[0]; +} __packed sctp_hmac_algo_param_t; + +/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): + * The INIT ACK chunk is used to acknowledge the initiation of an SCTP + * association. + */ +typedef sctp_init_chunk_t sctp_initack_chunk_t; + +/* Section 3.3.3.1 State Cookie (7) */ +typedef struct sctp_cookie_param { + sctp_paramhdr_t p; + __u8 body[0]; +} __packed sctp_cookie_param_t; + +/* Section 3.3.3.1 Unrecognized Parameters (8) */ +typedef struct sctp_unrecognized_param { + sctp_paramhdr_t param_hdr; + sctp_paramhdr_t unrecognized; +} __packed sctp_unrecognized_param_t; + + + +/* + * 3.3.4 Selective Acknowledgement (SACK) (3): + * + * This chunk is sent to the peer endpoint to acknowledge received DATA + * chunks and to inform the peer endpoint of gaps in the received + * subsequences of DATA chunks as represented by their TSNs. + */ + +typedef struct sctp_gap_ack_block { + __be16 start; + __be16 end; +} __packed sctp_gap_ack_block_t; + +typedef __be32 sctp_dup_tsn_t; + +typedef union { + sctp_gap_ack_block_t gab; + sctp_dup_tsn_t dup; +} sctp_sack_variable_t; + +typedef struct sctp_sackhdr { + __be32 cum_tsn_ack; + __be32 a_rwnd; + __be16 num_gap_ack_blocks; + __be16 num_dup_tsns; + sctp_sack_variable_t variable[0]; +} __packed sctp_sackhdr_t; + +typedef struct sctp_sack_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_sackhdr_t sack_hdr; +} __packed sctp_sack_chunk_t; + + +/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): + * + * An endpoint should send this chunk to its peer endpoint to probe the + * reachability of a particular destination transport address defined in + * the present association. + */ + +typedef struct sctp_heartbeathdr { + sctp_paramhdr_t info; +} __packed sctp_heartbeathdr_t; + +typedef struct sctp_heartbeat_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_heartbeathdr_t hb_hdr; +} __packed sctp_heartbeat_chunk_t; + + +/* For the abort and shutdown ACK we must carry the init tag in the + * common header. Just the common header is all that is needed with a + * chunk descriptor. + */ +typedef struct sctp_abort_chunk { + sctp_chunkhdr_t uh; +} __packed sctp_abort_chunk_t; + + +/* For the graceful shutdown we must carry the tag (in common header) + * and the highest consecutive acking value. + */ +typedef struct sctp_shutdownhdr { + __be32 cum_tsn_ack; +} __packed sctp_shutdownhdr_t; + +struct sctp_shutdown_chunk_t { + sctp_chunkhdr_t chunk_hdr; + sctp_shutdownhdr_t shutdown_hdr; +} __packed; + +/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ + +typedef struct sctp_errhdr { + __be16 cause; + __be16 length; + __u8 variable[0]; +} __packed sctp_errhdr_t; + +typedef struct sctp_operr_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_errhdr_t err_hdr; +} __packed sctp_operr_chunk_t; + +/* RFC 2960 3.3.10 - Operation Error + * + * Cause Code: 16 bits (unsigned integer) + * + * Defines the type of error conditions being reported. + * Cause Code + * Value Cause Code + * --------- ---------------- + * 1 Invalid Stream Identifier + * 2 Missing Mandatory Parameter + * 3 Stale Cookie Error + * 4 Out of Resource + * 5 Unresolvable Address + * 6 Unrecognized Chunk Type + * 7 Invalid Mandatory Parameter + * 8 Unrecognized Parameters + * 9 No User Data + * 10 Cookie Received While Shutting Down + */ +typedef enum { + + SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00), + SCTP_ERROR_INV_STRM = cpu_to_be16(0x01), + SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02), + SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03), + SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04), + SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05), + SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06), + SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07), + SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08), + SCTP_ERROR_NO_DATA = cpu_to_be16(0x09), + SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a), + + + /* SCTP Implementation Guide: + * 11 Restart of an association with new addresses + * 12 User Initiated Abort + * 13 Protocol Violation + */ + + SCTP_ERROR_RESTART = cpu_to_be16(0x0b), + SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c), + SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d), + + /* ADDIP Section 3.3 New Error Causes + * + * Four new Error Causes are added to the SCTP Operational Errors, + * primarily for use in the ASCONF-ACK chunk. + * + * Value Cause Code + * --------- ---------------- + * 0x00A0 Request to Delete Last Remaining IP Address. + * 0x00A1 Operation Refused Due to Resource Shortage. + * 0x00A2 Request to Delete Source IP Address. + * 0x00A3 Association Aborted due to illegal ASCONF-ACK + * 0x00A4 Request refused - no authorization. + */ + SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x00A0), + SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x00A1), + SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x00A2), + SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x00A3), + SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x00A4), + + /* AUTH Section 4. New Error Cause + * + * This section defines a new error cause that will be sent if an AUTH + * chunk is received with an unsupported HMAC identifier. + * illustrates the new error cause. + * + * Cause Code Error Cause Name + * -------------------------------------------------------------- + * 0x0105 Unsupported HMAC Identifier + */ + SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105) +} sctp_error_t; + + + +/* RFC 2960. Appendix A. Explicit Congestion Notification. + * Explicit Congestion Notification Echo (ECNE) (12) + */ +typedef struct sctp_ecnehdr { + __be32 lowest_tsn; +} sctp_ecnehdr_t; + +typedef struct sctp_ecne_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_ecnehdr_t ence_hdr; +} __packed sctp_ecne_chunk_t; + +/* RFC 2960. Appendix A. Explicit Congestion Notification. + * Congestion Window Reduced (CWR) (13) + */ +typedef struct sctp_cwrhdr { + __be32 lowest_tsn; +} sctp_cwrhdr_t; + +typedef struct sctp_cwr_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_cwrhdr_t cwr_hdr; +} __packed sctp_cwr_chunk_t; + +/* PR-SCTP + * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) + * + * Forward Cumulative TSN chunk has the following format: + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 192 | Flags = 0x00 | Length = Variable | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | New Cumulative TSN | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream-1 | Stream Sequence-1 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ / + * / \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Stream-N | Stream Sequence-N | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Chunk Flags: + * + * Set to all zeros on transmit and ignored on receipt. + * + * New Cumulative TSN: 32 bit u_int + * + * This indicates the new cumulative TSN to the data receiver. Upon + * the reception of this value, the data receiver MUST consider + * any missing TSNs earlier than or equal to this value as received + * and stop reporting them as gaps in any subsequent SACKs. + * + * Stream-N: 16 bit u_int + * + * This field holds a stream number that was skipped by this + * FWD-TSN. + * + * Stream Sequence-N: 16 bit u_int + * This field holds the sequence number associated with the stream + * that was skipped. The stream sequence field holds the largest stream + * sequence number in this stream being skipped. The receiver of + * the FWD-TSN's can use the Stream-N and Stream Sequence-N fields + * to enable delivery of any stranded TSN's that remain on the stream + * re-ordering queues. This field MUST NOT report TSN's corresponding + * to DATA chunk that are marked as unordered. For ordered DATA + * chunks this field MUST be filled in. + */ +struct sctp_fwdtsn_skip { + __be16 stream; + __be16 ssn; +} __packed; + +struct sctp_fwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_fwdtsn_skip skip[0]; +} __packed; + +struct sctp_fwdtsn_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_fwdtsn_hdr fwdtsn_hdr; +} __packed; + + +/* ADDIP + * Section 3.1.1 Address Configuration Change Chunk (ASCONF) + * + * Serial Number: 32 bits (unsigned integer) + * This value represents a Serial Number for the ASCONF Chunk. The + * valid range of Serial Number is from 0 to 2^32-1. + * Serial Numbers wrap back to 0 after reaching 2^32 -1. + * + * Address Parameter: 8 or 20 bytes (depending on type) + * The address is an address of the sender of the ASCONF chunk, + * the address MUST be considered part of the association by the + * peer endpoint. This field may be used by the receiver of the + * ASCONF to help in finding the association. This parameter MUST + * be present in every ASCONF message i.e. it is a mandatory TLV + * parameter. + * + * ASCONF Parameter: TLV format + * Each Address configuration change is represented by a TLV + * parameter as defined in Section 3.2. One or more requests may + * be present in an ASCONF Chunk. + * + * Section 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) + * + * Serial Number: 32 bits (unsigned integer) + * This value represents the Serial Number for the received ASCONF + * Chunk that is acknowledged by this chunk. This value is copied + * from the received ASCONF Chunk. + * + * ASCONF Parameter Response: TLV format + * The ASCONF Parameter Response is used in the ASCONF-ACK to + * report status of ASCONF processing. + */ +typedef struct sctp_addip_param { + sctp_paramhdr_t param_hdr; + __be32 crr_id; +} __packed sctp_addip_param_t; + +typedef struct sctp_addiphdr { + __be32 serial; + __u8 params[0]; +} __packed sctp_addiphdr_t; + +typedef struct sctp_addip_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_addiphdr_t addip_hdr; +} __packed sctp_addip_chunk_t; + +/* AUTH + * Section 4.1 Authentication Chunk (AUTH) + * + * This chunk is used to hold the result of the HMAC calculation. + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0x0F | Flags=0 | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Shared Key Identifier | HMAC Identifier | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * \ HMAC / + * / \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Type: 1 byte (unsigned integer) + * This value MUST be set to 0x0F for all AUTH-chunks. + * + * Flags: 1 byte (unsigned integer) + * Set to zero on transmit and ignored on receipt. + * + * Length: 2 bytes (unsigned integer) + * This value holds the length of the HMAC in bytes plus 8. + * + * Shared Key Identifier: 2 bytes (unsigned integer) + * This value describes which endpoint pair shared key is used. + * + * HMAC Identifier: 2 bytes (unsigned integer) + * This value describes which message digest is being used. Table 2 + * shows the currently defined values. + * + * The following Table 2 shows the currently defined values for HMAC + * identifiers. + * + * +-----------------+--------------------------+ + * | HMAC Identifier | Message Digest Algorithm | + * +-----------------+--------------------------+ + * | 0 | Reserved | + * | 1 | SHA-1 defined in [8] | + * | 2 | Reserved | + * | 3 | SHA-256 defined in [8] | + * +-----------------+--------------------------+ + * + * + * HMAC: n bytes (unsigned integer) This hold the result of the HMAC + * calculation. + */ +typedef struct sctp_authhdr { + __be16 shkey_id; + __be16 hmac_id; + __u8 hmac[0]; +} __packed sctp_authhdr_t; + +typedef struct sctp_auth_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_authhdr_t auth_hdr; +} __packed sctp_auth_chunk_t; + +#endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/scx200.h b/include/linux/scx200.h new file mode 100644 index 00000000..de466e11 --- /dev/null +++ b/include/linux/scx200.h @@ -0,0 +1,51 @@ +/* linux/include/linux/scx200.h + + Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> + + Defines for the National Semiconductor SCx200 Processors +*/ + +/* Interesting stuff for the National Semiconductor SCx200 CPU */ + +extern unsigned scx200_cb_base; + +#define scx200_cb_present() (scx200_cb_base!=0) + +/* F0 PCI Header/Bridge Configuration Registers */ +#define SCx200_DOCCS_BASE 0x78 /* DOCCS Base Address Register */ +#define SCx200_DOCCS_CTRL 0x7c /* DOCCS Control Register */ + +/* GPIO Register Block */ +#define SCx200_GPIO_SIZE 0x2c /* Size of GPIO register block */ + +/* General Configuration Block */ +#define SCx200_CB_BASE_FIXED 0x9000 /* Base fixed at 0x9000 according to errata? */ + +/* Watchdog Timer */ +#define SCx200_WDT_OFFSET 0x00 /* offset within configuration block */ +#define SCx200_WDT_SIZE 0x05 /* size */ + +#define SCx200_WDT_WDTO 0x00 /* Time-Out Register */ +#define SCx200_WDT_WDCNFG 0x02 /* Configuration Register */ +#define SCx200_WDT_WDSTS 0x04 /* Status Register */ +#define SCx200_WDT_WDSTS_WDOVF (1<<0) /* Overflow bit */ + +/* High Resolution Timer */ +#define SCx200_TIMER_OFFSET 0x08 +#define SCx200_TIMER_SIZE 0x06 + +/* Clock Generators */ +#define SCx200_CLOCKGEN_OFFSET 0x10 +#define SCx200_CLOCKGEN_SIZE 0x10 + +/* Pin Multiplexing and Miscellaneous Configuration Registers */ +#define SCx200_MISC_OFFSET 0x30 +#define SCx200_MISC_SIZE 0x10 + +#define SCx200_PMR 0x30 /* Pin Multiplexing Register */ +#define SCx200_MCR 0x34 /* Miscellaneous Configuration Register */ +#define SCx200_INTSEL 0x38 /* Interrupt Selection Register */ +#define SCx200_IID 0x3c /* IA On a Chip Identification Number Reg */ +#define SCx200_REV 0x3d /* Revision Register */ +#define SCx200_CBA 0x3e /* Configuration Base Address Register */ +#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h new file mode 100644 index 00000000..ece4e553 --- /dev/null +++ b/include/linux/scx200_gpio.h @@ -0,0 +1,88 @@ +u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear); + +extern unsigned scx200_gpio_base; +extern unsigned long scx200_gpio_shadow[2]; +extern struct nsc_gpio_ops scx200_gpio_ops; + +#define scx200_gpio_present() (scx200_gpio_base!=0) + +/* Definitions to make sure I do the same thing in all functions */ +#define __SCx200_GPIO_BANK unsigned bank = index>>5 +#define __SCx200_GPIO_IOADDR unsigned short ioaddr = scx200_gpio_base+0x10*bank +#define __SCx200_GPIO_SHADOW unsigned long *shadow = scx200_gpio_shadow+bank +#define __SCx200_GPIO_INDEX index &= 31 + +#define __SCx200_GPIO_OUT __asm__ __volatile__("outsl":"=mS" (shadow):"d" (ioaddr), "0" (shadow)) + +/* returns the value of the GPIO pin */ + +static inline int scx200_gpio_get(unsigned index) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_IOADDR + 0x04; + __SCx200_GPIO_INDEX; + + return (inl(ioaddr) & (1<<index)) ? 1 : 0; +} + +/* return the value driven on the GPIO signal (the value that will be + driven if the GPIO is configured as an output, it might not be the + state of the GPIO right now if the GPIO is configured as an input) */ + +static inline int scx200_gpio_current(unsigned index) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_INDEX; + + return (scx200_gpio_shadow[bank] & (1<<index)) ? 1 : 0; +} + +/* drive the GPIO signal high */ + +static inline void scx200_gpio_set_high(unsigned index) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_IOADDR; + __SCx200_GPIO_SHADOW; + __SCx200_GPIO_INDEX; + set_bit(index, shadow); /* __set_bit()? */ + __SCx200_GPIO_OUT; +} + +/* drive the GPIO signal low */ + +static inline void scx200_gpio_set_low(unsigned index) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_IOADDR; + __SCx200_GPIO_SHADOW; + __SCx200_GPIO_INDEX; + clear_bit(index, shadow); /* __clear_bit()? */ + __SCx200_GPIO_OUT; +} + +/* drive the GPIO signal to state */ + +static inline void scx200_gpio_set(unsigned index, int state) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_IOADDR; + __SCx200_GPIO_SHADOW; + __SCx200_GPIO_INDEX; + if (state) + set_bit(index, shadow); + else + clear_bit(index, shadow); + __SCx200_GPIO_OUT; +} + +/* toggle the GPIO signal */ +static inline void scx200_gpio_change(unsigned index) { + __SCx200_GPIO_BANK; + __SCx200_GPIO_IOADDR; + __SCx200_GPIO_SHADOW; + __SCx200_GPIO_INDEX; + change_bit(index, shadow); + __SCx200_GPIO_OUT; +} + +#undef __SCx200_GPIO_BANK +#undef __SCx200_GPIO_IOADDR +#undef __SCx200_GPIO_SHADOW +#undef __SCx200_GPIO_INDEX +#undef __SCx200_GPIO_OUT diff --git a/include/linux/sdla.h b/include/linux/sdla.h new file mode 100644 index 00000000..9995c7fc --- /dev/null +++ b/include/linux/sdla.h @@ -0,0 +1,335 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Frame relay interface. + * + * Version: @(#)if_ifrad.h 0.20 13 Apr 96 + * + * Author: Mike McLagan <mike.mclagan@linux.org> + * + * Changes: + * 0.15 Mike McLagan Structure packing + * + * 0.20 Mike McLagan New flags for S508 buffer handling + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef SDLA_H +#define SDLA_H + +/* adapter type */ +#define SDLA_TYPES +#define SDLA_S502A 5020 +#define SDLA_S502E 5021 +#define SDLA_S503 5030 +#define SDLA_S507 5070 +#define SDLA_S508 5080 +#define SDLA_S509 5090 +#define SDLA_UNKNOWN -1 + +/* port selection flags for the S508 */ +#define SDLA_S508_PORT_V35 0x00 +#define SDLA_S508_PORT_RS232 0x02 + +/* Z80 CPU speeds */ +#define SDLA_CPU_3M 0x00 +#define SDLA_CPU_5M 0x01 +#define SDLA_CPU_7M 0x02 +#define SDLA_CPU_8M 0x03 +#define SDLA_CPU_10M 0x04 +#define SDLA_CPU_16M 0x05 +#define SDLA_CPU_12M 0x06 + +/* some private IOCTLs */ +#define SDLA_IDENTIFY (FRAD_LAST_IOCTL + 1) +#define SDLA_CPUSPEED (FRAD_LAST_IOCTL + 2) +#define SDLA_PROTOCOL (FRAD_LAST_IOCTL + 3) + +#define SDLA_CLEARMEM (FRAD_LAST_IOCTL + 4) +#define SDLA_WRITEMEM (FRAD_LAST_IOCTL + 5) +#define SDLA_READMEM (FRAD_LAST_IOCTL + 6) + +struct sdla_mem { + int addr; + int len; + void __user *data; +}; + +#define SDLA_START (FRAD_LAST_IOCTL + 7) +#define SDLA_STOP (FRAD_LAST_IOCTL + 8) + +/* some offsets in the Z80's memory space */ +#define SDLA_NMIADDR 0x0000 +#define SDLA_CONF_ADDR 0x0010 +#define SDLA_S502A_NMIADDR 0x0066 +#define SDLA_CODE_BASEADDR 0x0100 +#define SDLA_WINDOW_SIZE 0x2000 +#define SDLA_ADDR_MASK 0x1FFF + +/* largest handleable block of data */ +#define SDLA_MAX_DATA 4080 +#define SDLA_MAX_MTU 4072 /* MAX_DATA - sizeof(fradhdr) */ +#define SDLA_MAX_DLCI 24 + +/* this should be the same as frad_conf */ +struct sdla_conf { + short station; + short config; + short kbaud; + short clocking; + short max_frm; + short T391; + short T392; + short N391; + short N392; + short N393; + short CIR_fwd; + short Bc_fwd; + short Be_fwd; + short CIR_bwd; + short Bc_bwd; + short Be_bwd; +}; + +/* this should be the same as dlci_conf */ +struct sdla_dlci_conf { + short config; + short CIR_fwd; + short Bc_fwd; + short Be_fwd; + short CIR_bwd; + short Bc_bwd; + short Be_bwd; + short Tc_fwd; + short Tc_bwd; + short Tf_max; + short Tb_max; +}; + +#ifdef __KERNEL__ + +/* important Z80 window addresses */ +#define SDLA_CONTROL_WND 0xE000 + +#define SDLA_502_CMD_BUF 0xEF60 +#define SDLA_502_RCV_BUF 0xA900 +#define SDLA_502_TXN_AVAIL 0xFFF1 +#define SDLA_502_RCV_AVAIL 0xFFF2 +#define SDLA_502_EVENT_FLAGS 0xFFF3 +#define SDLA_502_MDM_STATUS 0xFFF4 +#define SDLA_502_IRQ_INTERFACE 0xFFFD +#define SDLA_502_IRQ_PERMISSION 0xFFFE +#define SDLA_502_DATA_OFS 0x0010 + +#define SDLA_508_CMD_BUF 0xE000 +#define SDLA_508_TXBUF_INFO 0xF100 +#define SDLA_508_RXBUF_INFO 0xF120 +#define SDLA_508_EVENT_FLAGS 0xF003 +#define SDLA_508_MDM_STATUS 0xF004 +#define SDLA_508_IRQ_INTERFACE 0xF010 +#define SDLA_508_IRQ_PERMISSION 0xF011 +#define SDLA_508_TSE_OFFSET 0xF012 + +/* Event flags */ +#define SDLA_EVENT_STATUS 0x01 +#define SDLA_EVENT_DLCI_STATUS 0x02 +#define SDLA_EVENT_BAD_DLCI 0x04 +#define SDLA_EVENT_LINK_DOWN 0x40 + +/* IRQ Trigger flags */ +#define SDLA_INTR_RX 0x01 +#define SDLA_INTR_TX 0x02 +#define SDLA_INTR_MODEM 0x04 +#define SDLA_INTR_COMPLETE 0x08 +#define SDLA_INTR_STATUS 0x10 +#define SDLA_INTR_TIMER 0x20 + +/* DLCI status bits */ +#define SDLA_DLCI_DELETED 0x01 +#define SDLA_DLCI_ACTIVE 0x02 +#define SDLA_DLCI_WAITING 0x04 +#define SDLA_DLCI_NEW 0x08 +#define SDLA_DLCI_INCLUDED 0x40 + +/* valid command codes */ +#define SDLA_INFORMATION_WRITE 0x01 +#define SDLA_INFORMATION_READ 0x02 +#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03 +#define SDLA_SET_DLCI_CONFIGURATION 0x10 +#define SDLA_READ_DLCI_CONFIGURATION 0x11 +#define SDLA_DISABLE_COMMUNICATIONS 0x12 +#define SDLA_ENABLE_COMMUNICATIONS 0x13 +#define SDLA_READ_DLC_STATUS 0x14 +#define SDLA_READ_DLC_STATISTICS 0x15 +#define SDLA_FLUSH_DLC_STATISTICS 0x16 +#define SDLA_LIST_ACTIVE_DLCI 0x17 +#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18 +#define SDLA_ADD_DLCI 0x20 +#define SDLA_DELETE_DLCI 0x21 +#define SDLA_ACTIVATE_DLCI 0x22 +#define SDLA_DEACTIVATE_DLCI 0x23 +#define SDLA_READ_MODEM_STATUS 0x30 +#define SDLA_SET_MODEM_STATUS 0x31 +#define SDLA_READ_COMMS_ERR_STATS 0x32 +#define SDLA_FLUSH_COMMS_ERR_STATS 0x33 +#define SDLA_READ_CODE_VERSION 0x40 +#define SDLA_SET_IRQ_TRIGGER 0x50 +#define SDLA_GET_IRQ_TRIGGER 0x51 + +/* In channel signal types */ +#define SDLA_ICS_LINK_VERIFY 0x02 +#define SDLA_ICS_STATUS_ENQ 0x03 + +/* modem status flags */ +#define SDLA_MODEM_DTR_HIGH 0x01 +#define SDLA_MODEM_RTS_HIGH 0x02 +#define SDLA_MODEM_DCD_HIGH 0x08 +#define SDLA_MODEM_CTS_HIGH 0x20 + +/* used for RET_MODEM interpretation */ +#define SDLA_MODEM_DCD_LOW 0x01 +#define SDLA_MODEM_CTS_LOW 0x02 + +/* return codes */ +#define SDLA_RET_OK 0x00 +#define SDLA_RET_COMMUNICATIONS 0x01 +#define SDLA_RET_CHANNEL_INACTIVE 0x02 +#define SDLA_RET_DLCI_INACTIVE 0x03 +#define SDLA_RET_DLCI_CONFIG 0x04 +#define SDLA_RET_BUF_TOO_BIG 0x05 +#define SDLA_RET_NO_DATA 0x05 +#define SDLA_RET_BUF_OVERSIZE 0x06 +#define SDLA_RET_CIR_OVERFLOW 0x07 +#define SDLA_RET_NO_BUFS 0x08 +#define SDLA_RET_TIMEOUT 0x0A +#define SDLA_RET_MODEM 0x10 +#define SDLA_RET_CHANNEL_OFF 0x11 +#define SDLA_RET_CHANNEL_ON 0x12 +#define SDLA_RET_DLCI_STATUS 0x13 +#define SDLA_RET_DLCI_UNKNOWN 0x14 +#define SDLA_RET_COMMAND_INVALID 0x1F + +/* Configuration flags */ +#define SDLA_DIRECT_RECV 0x0080 +#define SDLA_TX_NO_EXCEPT 0x0020 +#define SDLA_NO_ICF_MSGS 0x1000 +#define SDLA_TX50_RX50 0x0000 +#define SDLA_TX70_RX30 0x2000 +#define SDLA_TX30_RX70 0x4000 + +/* IRQ selection flags */ +#define SDLA_IRQ_RECEIVE 0x01 +#define SDLA_IRQ_TRANSMIT 0x02 +#define SDLA_IRQ_MODEM_STAT 0x04 +#define SDLA_IRQ_COMMAND 0x08 +#define SDLA_IRQ_CHANNEL 0x10 +#define SDLA_IRQ_TIMER 0x20 + +/* definitions for PC memory mapping */ +#define SDLA_8K_WINDOW 0x01 +#define SDLA_S502_SEG_A 0x10 +#define SDLA_S502_SEG_C 0x20 +#define SDLA_S502_SEG_D 0x00 +#define SDLA_S502_SEG_E 0x30 +#define SDLA_S507_SEG_A 0x00 +#define SDLA_S507_SEG_B 0x40 +#define SDLA_S507_SEG_C 0x80 +#define SDLA_S507_SEG_E 0xC0 +#define SDLA_S508_SEG_A 0x00 +#define SDLA_S508_SEG_C 0x10 +#define SDLA_S508_SEG_D 0x08 +#define SDLA_S508_SEG_E 0x18 + +/* SDLA adapter port constants */ +#define SDLA_IO_EXTENTS 0x04 + +#define SDLA_REG_CONTROL 0x00 +#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */ +#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */ +#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */ + +#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */ +#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */ +#define SDLA_S508_STS 0x01 /* status reg for 508 */ +#define SDLA_S508_IDR 0x02 /* ID reg for 508 */ + +/* control register flags */ +#define SDLA_S502A_START 0x00 /* start the CPU */ +#define SDLA_S502A_INTREQ 0x02 +#define SDLA_S502A_INTEN 0x04 +#define SDLA_S502A_HALT 0x08 /* halt the CPU */ +#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */ + +#define SDLA_S502E_CPUEN 0x01 +#define SDLA_S502E_ENABLE 0x02 +#define SDLA_S502E_INTACK 0x04 + +#define SDLA_S507_ENABLE 0x01 +#define SDLA_S507_IRQ3 0x00 +#define SDLA_S507_IRQ4 0x20 +#define SDLA_S507_IRQ5 0x40 +#define SDLA_S507_IRQ7 0x60 +#define SDLA_S507_IRQ10 0x80 +#define SDLA_S507_IRQ11 0xA0 +#define SDLA_S507_IRQ12 0xC0 +#define SDLA_S507_IRQ15 0xE0 + +#define SDLA_HALT 0x00 +#define SDLA_CPUEN 0x02 +#define SDLA_MEMEN 0x04 +#define SDLA_S507_EPROMWR 0x08 +#define SDLA_S507_EPROMCLK 0x10 +#define SDLA_S508_INTRQ 0x08 +#define SDLA_S508_INTEN 0x10 + +struct sdla_cmd { + char opp_flag; + char cmd; + short length; + char retval; + short dlci; + char flags; + short rxlost_int; + long rxlost_app; + char reserve[2]; + char data[SDLA_MAX_DATA]; /* transfer data buffer */ +} __attribute__((packed)); + +struct intr_info { + char flags; + short txlen; + char irq; + char flags2; + short timeout; +} __attribute__((packed)); + +/* found in the 508's control window at RXBUF_INFO */ +struct buf_info { + unsigned short rse_num; + unsigned long rse_base; + unsigned long rse_next; + unsigned long buf_base; + unsigned short reserved; + unsigned long buf_top; +} __attribute__((packed)); + +/* structure pointed to by rse_base in RXBUF_INFO struct */ +struct buf_entry { + char opp_flag; + short length; + short dlci; + char flags; + short timestamp; + short reserved[2]; + long buf_addr; +} __attribute__((packed)); + +#endif + +#endif diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h new file mode 100644 index 00000000..cc7a4e9c --- /dev/null +++ b/include/linux/seccomp.h @@ -0,0 +1,52 @@ +#ifndef _LINUX_SECCOMP_H +#define _LINUX_SECCOMP_H + + +#ifdef CONFIG_SECCOMP + +#include <linux/thread_info.h> +#include <asm/seccomp.h> + +typedef struct { int mode; } seccomp_t; + +extern void __secure_computing(int); +static inline void secure_computing(int this_syscall) +{ + if (unlikely(test_thread_flag(TIF_SECCOMP))) + __secure_computing(this_syscall); +} + +extern long prctl_get_seccomp(void); +extern long prctl_set_seccomp(unsigned long); + +static inline int seccomp_mode(seccomp_t *s) +{ + return s->mode; +} + +#else /* CONFIG_SECCOMP */ + +#include <linux/errno.h> + +typedef struct { } seccomp_t; + +#define secure_computing(x) do { } while (0) + +static inline long prctl_get_seccomp(void) +{ + return -EINVAL; +} + +static inline long prctl_set_seccomp(unsigned long arg2) +{ + return -EINVAL; +} + +static inline int seccomp_mode(seccomp_t *s) +{ + return 0; +} + +#endif /* CONFIG_SECCOMP */ + +#endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/securebits.h b/include/linux/securebits.h new file mode 100644 index 00000000..33406174 --- /dev/null +++ b/include/linux/securebits.h @@ -0,0 +1,54 @@ +#ifndef _LINUX_SECUREBITS_H +#define _LINUX_SECUREBITS_H 1 + +/* Each securesetting is implemented using two bits. One bit specifies + whether the setting is on or off. The other bit specify whether the + setting is locked or not. A setting which is locked cannot be + changed from user-level. */ +#define issecure_mask(X) (1 << (X)) +#ifdef __KERNEL__ +#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) +#endif + +#define SECUREBITS_DEFAULT 0x00000000 + +/* When set UID 0 has no special privileges. When unset, we support + inheritance of root-permissions and suid-root executable under + compatibility mode. We raise the effective and inheritable bitmasks + *of the executable file* if the effective uid of the new process is + 0. If the real uid is 0, we raise the effective (legacy) bit of the + executable file. */ +#define SECURE_NOROOT 0 +#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ + +#define SECBIT_NOROOT (issecure_mask(SECURE_NOROOT)) +#define SECBIT_NOROOT_LOCKED (issecure_mask(SECURE_NOROOT_LOCKED)) + +/* When set, setuid to/from uid 0 does not trigger capability-"fixup". + When unset, to provide compatiblility with old programs relying on + set*uid to gain/lose privilege, transitions to/from uid 0 cause + capabilities to be gained/lost. */ +#define SECURE_NO_SETUID_FIXUP 2 +#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ + +#define SECBIT_NO_SETUID_FIXUP (issecure_mask(SECURE_NO_SETUID_FIXUP)) +#define SECBIT_NO_SETUID_FIXUP_LOCKED \ + (issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)) + +/* When set, a process can retain its capabilities even after + transitioning to a non-root user (the set-uid fixup suppressed by + bit 2). Bit-4 is cleared when a process calls exec(); setting both + bit 4 and 5 will create a barrier through exec that no exec()'d + child can use this feature again. */ +#define SECURE_KEEP_CAPS 4 +#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ + +#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS)) +#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED)) + +#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ + issecure_mask(SECURE_NO_SETUID_FIXUP) | \ + issecure_mask(SECURE_KEEP_CAPS)) +#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1) + +#endif /* !_LINUX_SECUREBITS_H */ diff --git a/include/linux/security.h b/include/linux/security.h new file mode 100644 index 00000000..673afbb8 --- /dev/null +++ b/include/linux/security.h @@ -0,0 +1,3025 @@ +/* + * Linux Security plug + * + * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> + * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> + * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> + * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Due to this file being licensed under the GPL there is controversy over + * whether this permits you to write a module that #includes this file + * without placing your module under the GPL. Please consult a lawyer for + * advice before doing this. + * + */ + +#ifndef __LINUX_SECURITY_H +#define __LINUX_SECURITY_H + +#include <linux/key.h> +#include <linux/capability.h> +#include <linux/slab.h> +#include <linux/err.h> + +struct linux_binprm; +struct cred; +struct rlimit; +struct siginfo; +struct sem_array; +struct sembuf; +struct kern_ipc_perm; +struct audit_context; +struct super_block; +struct inode; +struct dentry; +struct file; +struct vfsmount; +struct path; +struct qstr; +struct nameidata; +struct iattr; +struct fown_struct; +struct file_operations; +struct shmid_kernel; +struct msg_msg; +struct msg_queue; +struct xattr; +struct xfrm_sec_ctx; +struct mm_struct; + +/* Maximum number of letters for an LSM name string */ +#define SECURITY_NAME_MAX 10 + +/* If capable should audit the security request */ +#define SECURITY_CAP_NOAUDIT 0 +#define SECURITY_CAP_AUDIT 1 + +struct ctl_table; +struct audit_krule; +struct user_namespace; +struct timezone; + +/* + * These functions are in security/capability.c and are used + * as the default capabilities functions + */ +extern int cap_capable(const struct cred *cred, struct user_namespace *ns, + int cap, int audit); +extern int cap_settime(const struct timespec *ts, const struct timezone *tz); +extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); +extern int cap_ptrace_traceme(struct task_struct *parent); +extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern int cap_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +extern int cap_bprm_set_creds(struct linux_binprm *bprm); +extern int cap_bprm_secureexec(struct linux_binprm *bprm); +extern int cap_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +extern int cap_inode_removexattr(struct dentry *dentry, const char *name); +extern int cap_inode_need_killpriv(struct dentry *dentry); +extern int cap_inode_killpriv(struct dentry *dentry); +extern int cap_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags, + unsigned long addr, unsigned long addr_only); +extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); +extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +extern int cap_task_setscheduler(struct task_struct *p); +extern int cap_task_setioprio(struct task_struct *p, int ioprio); +extern int cap_task_setnice(struct task_struct *p, int nice); +extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); + +struct msghdr; +struct sk_buff; +struct sock; +struct sockaddr; +struct socket; +struct flowi; +struct dst_entry; +struct xfrm_selector; +struct xfrm_policy; +struct xfrm_state; +struct xfrm_user_sec_ctx; +struct seq_file; + +extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); + +void reset_security_ops(void); + +#ifdef CONFIG_MMU +extern unsigned long mmap_min_addr; +extern unsigned long dac_mmap_min_addr; +#else +#define dac_mmap_min_addr 0UL +#endif + +/* + * Values used in the task_security_ops calls + */ +/* setuid or setgid, id0 == uid or gid */ +#define LSM_SETID_ID 1 + +/* setreuid or setregid, id0 == real, id1 == eff */ +#define LSM_SETID_RE 2 + +/* setresuid or setresgid, id0 == real, id1 == eff, uid2 == saved */ +#define LSM_SETID_RES 4 + +/* setfsuid or setfsgid, id0 == fsuid or fsgid */ +#define LSM_SETID_FS 8 + +/* forward declares to avoid warnings */ +struct sched_param; +struct request_sock; + +/* bprm->unsafe reasons */ +#define LSM_UNSAFE_SHARE 1 +#define LSM_UNSAFE_PTRACE 2 +#define LSM_UNSAFE_PTRACE_CAP 4 + +#ifdef CONFIG_MMU +extern int mmap_min_addr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +#endif + +/* security_inode_init_security callback function to write xattrs */ +typedef int (*initxattrs) (struct inode *inode, + const struct xattr *xattr_array, void *fs_data); + +#ifdef CONFIG_SECURITY + +struct security_mnt_opts { + char **mnt_opts; + int *mnt_opts_flags; + int num_mnt_opts; +}; + +static inline void security_init_mnt_opts(struct security_mnt_opts *opts) +{ + opts->mnt_opts = NULL; + opts->mnt_opts_flags = NULL; + opts->num_mnt_opts = 0; +} + +static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +{ + int i; + if (opts->mnt_opts) + for (i = 0; i < opts->num_mnt_opts; i++) + kfree(opts->mnt_opts[i]); + kfree(opts->mnt_opts); + opts->mnt_opts = NULL; + kfree(opts->mnt_opts_flags); + opts->mnt_opts_flags = NULL; + opts->num_mnt_opts = 0; +} + +/** + * struct security_operations - main security structure + * + * Security module identifier. + * + * @name: + * A string that acts as a unique identifier for the LSM with max number + * of characters = SECURITY_NAME_MAX. + * + * Security hooks for program execution operations. + * + * @bprm_set_creds: + * Save security information in the bprm->security field, typically based + * on information about the bprm->file, for later use by the apply_creds + * hook. This hook may also optionally check permissions (e.g. for + * transitions between security domains). + * This hook may be called multiple times during a single execve, e.g. for + * interpreters. The hook can tell whether it has already been called by + * checking to see if @bprm->security is non-NULL. If so, then the hook + * may decide either to retain the security information saved earlier or + * to replace it. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_check_security: + * This hook mediates the point when a search for a binary handler will + * begin. It allows a check the @bprm->security value which is set in the + * preceding set_creds call. The primary difference from set_creds is + * that the argv list and envp list are reliably available in @bprm. This + * hook may be called multiple times during a single execve; and in each + * pass set_creds is called first. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_committing_creds: + * Prepare to install the new security attributes of a process being + * transformed by an execve operation, based on the old credentials + * pointed to by @current->cred and the information set in @bprm->cred by + * the bprm_set_creds hook. @bprm points to the linux_binprm structure. + * This hook is a good place to perform state changes on the process such + * as closing open file descriptors to which access will no longer be + * granted when the attributes are changed. This is called immediately + * before commit_creds(). + * @bprm_committed_creds: + * Tidy up after the installation of the new security attributes of a + * process being transformed by an execve operation. The new credentials + * have, by this point, been set to @current->cred. @bprm points to the + * linux_binprm structure. This hook is a good place to perform state + * changes on the process such as clearing out non-inheritable signal + * state. This is called immediately after commit_creds(). + * @bprm_secureexec: + * Return a boolean value (0 or 1) indicating whether a "secure exec" + * is required. The flag is passed in the auxiliary table + * on the initial stack to the ELF interpreter to indicate whether libc + * should enable secure mode. + * @bprm contains the linux_binprm structure. + * + * Security hooks for filesystem operations. + * + * @sb_alloc_security: + * Allocate and attach a security structure to the sb->s_security field. + * The s_security field is initialized to NULL when the structure is + * allocated. + * @sb contains the super_block structure to be modified. + * Return 0 if operation was successful. + * @sb_free_security: + * Deallocate and clear the sb->s_security field. + * @sb contains the super_block structure to be modified. + * @sb_statfs: + * Check permission before obtaining filesystem statistics for the @mnt + * mountpoint. + * @dentry is a handle on the superblock for the filesystem. + * Return 0 if permission is granted. + * @sb_mount: + * Check permission before an object specified by @dev_name is mounted on + * the mount point named by @nd. For an ordinary mount, @dev_name + * identifies a device if the file system type requires a device. For a + * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a + * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the + * pathname of the object being mounted. + * @dev_name contains the name for object being mounted. + * @path contains the path for mount point object. + * @type contains the filesystem type. + * @flags contains the mount flags. + * @data contains the filesystem-specific data. + * Return 0 if permission is granted. + * @sb_copy_data: + * Allow mount option data to be copied prior to parsing by the filesystem, + * so that the security module can extract security-specific mount + * options cleanly (a filesystem may modify the data e.g. with strsep()). + * This also allows the original mount data to be stripped of security- + * specific options to avoid having to make filesystems aware of them. + * @type the type of filesystem being mounted. + * @orig the original mount data copied from userspace. + * @copy copied data which will be passed to the security module. + * Returns 0 if the copy was successful. + * @sb_remount: + * Extracts security system specific mount options and verifies no changes + * are being made to those options. + * @sb superblock being remounted + * @data contains the filesystem-specific data. + * Return 0 if permission is granted. + * @sb_umount: + * Check permission before the @mnt file system is unmounted. + * @mnt contains the mounted file system. + * @flags contains the unmount flags, e.g. MNT_FORCE. + * Return 0 if permission is granted. + * @sb_pivotroot: + * Check permission before pivoting the root filesystem. + * @old_path contains the path for the new location of the current root (put_old). + * @new_path contains the path for the new root (new_root). + * Return 0 if permission is granted. + * @sb_set_mnt_opts: + * Set the security relevant mount options used for a superblock + * @sb the superblock to set security mount options for + * @opts binary data structure containing all lsm mount data + * @sb_clone_mnt_opts: + * Copy all security options from a given superblock to another + * @oldsb old superblock which contain information to clone + * @newsb new superblock which needs filled in + * @sb_parse_opts_str: + * Parse a string of security data filling in the opts structure + * @options string containing all mount options known by the LSM + * @opts binary data structure usable by the LSM + * + * Security hooks for inode operations. + * + * @inode_alloc_security: + * Allocate and attach a security structure to @inode->i_security. The + * i_security field is initialized to NULL when the inode structure is + * allocated. + * @inode contains the inode structure. + * Return 0 if operation was successful. + * @inode_free_security: + * @inode contains the inode structure. + * Deallocate the inode security structure and set @inode->i_security to + * NULL. + * @inode_init_security: + * Obtain the security attribute name suffix and value to set on a newly + * created inode and set up the incore security field for the new inode. + * This hook is called by the fs code as part of the inode creation + * transaction and provides for atomic labeling of the inode, unlike + * the post_create/mkdir/... hooks called by the VFS. The hook function + * is expected to allocate the name and value via kmalloc, with the caller + * being responsible for calling kfree after using them. + * If the security module does not use security attributes or does + * not wish to put a security attribute on this particular inode, + * then it should return -EOPNOTSUPP to skip this processing. + * @inode contains the inode structure of the newly created inode. + * @dir contains the inode structure of the parent directory. + * @qstr contains the last path component of the new object + * @name will be set to the allocated name suffix (e.g. selinux). + * @value will be set to the allocated attribute value. + * @len will be set to the length of the value. + * Returns 0 if @name and @value have been successfully set, + * -EOPNOTSUPP if no security attribute is needed, or + * -ENOMEM on memory allocation failure. + * @inode_create: + * Check permission to create a regular file. + * @dir contains inode structure of the parent of the new file. + * @dentry contains the dentry structure for the file to be created. + * @mode contains the file mode of the file to be created. + * Return 0 if permission is granted. + * @inode_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing link to the file. + * @dir contains the inode structure of the parent directory of the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. + * @path_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing link + * to the file. + * @new_dir contains the path structure of the parent directory of + * the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. + * @inode_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the inode structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. + * @path_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the path structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. + * @inode_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the inode structure of parent directory of the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. + * @path_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the path structure of parent directory of + * the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. + * @inode_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with inode structure @dir. + * @dir contains the inode structure of parent of the directory to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. + * @path_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with path structure @path. + * @dir contains the path structure of parent of the directory + * to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. + * @inode_rmdir: + * Check the permission to remove a directory. + * @dir contains the inode structure of parent of the directory to be removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. + * @path_rmdir: + * Check the permission to remove a directory. + * @dir contains the path structure of parent of the directory to be + * removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. + * @inode_mknod: + * Check permissions when creating a special file (or a socket or a fifo + * file created via the mknod system call). Note that if mknod operation + * is being done for a regular file, then the create hook will be called + * and not this hook. + * @dir contains the inode structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the device number. + * Return 0 if permission is granted. + * @path_mknod: + * Check permissions when creating a file. Note that this hook is called + * even if mknod operation is being done for a regular file. + * @dir contains the path structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the undecoded device number. Use new_decode_dev() to get + * the decoded device number. + * Return 0 if permission is granted. + * @inode_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the inode structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the inode structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. + * @path_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the path structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the path structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. + * @path_chmod: + * Check for permission to change DAC's permission of a file or directory. + * @dentry contains the dentry structure. + * @mnt contains the vfsmnt structure. + * @mode contains DAC's mode. + * Return 0 if permission is granted. + * @path_chown: + * Check for permission to change owner/group of a file or directory. + * @path contains the path structure. + * @uid contains new owner's ID. + * @gid contains new group's ID. + * Return 0 if permission is granted. + * @path_chroot: + * Check for permission to change root directory. + * @path contains the path structure. + * Return 0 if permission is granted. + * @inode_readlink: + * Check the permission to read the symbolic link. + * @dentry contains the dentry structure for the file link. + * Return 0 if permission is granted. + * @inode_follow_link: + * Check permission to follow a symbolic link when looking up a pathname. + * @dentry contains the dentry structure for the link. + * @nd contains the nameidata structure for the parent directory. + * Return 0 if permission is granted. + * @inode_permission: + * Check permission before accessing an inode. This hook is called by the + * existing Linux permission function, so a security module can use it to + * provide additional checking for existing Linux permission checks. + * Notice that this hook is called when a file is opened (as well as many + * other operations), whereas the file_security_ops permission hook is + * called when the actual read/write operations are performed. + * @inode contains the inode structure to check. + * @mask contains the permission mask. + * Return 0 if permission is granted. + * @inode_setattr: + * Check permission before setting file attributes. Note that the kernel + * call to notify_change is performed from several locations, whenever + * file attributes change (such as when a file is truncated, chown/chmod + * operations, transferring disk quotas, etc). + * @dentry contains the dentry structure for the file. + * @attr is the iattr structure containing the new file attributes. + * Return 0 if permission is granted. + * @path_truncate: + * Check permission before truncating a file. + * @path contains the path structure for the file. + * Return 0 if permission is granted. + * @inode_getattr: + * Check permission before obtaining file attributes. + * @mnt is the vfsmount where the dentry was looked up + * @dentry contains the dentry structure for the file. + * Return 0 if permission is granted. + * @inode_setxattr: + * Check permission before setting the extended attributes + * @value identified by @name for @dentry. + * Return 0 if permission is granted. + * @inode_post_setxattr: + * Update inode security field after successful setxattr operation. + * @value identified by @name for @dentry. + * @inode_getxattr: + * Check permission before obtaining the extended attributes + * identified by @name for @dentry. + * Return 0 if permission is granted. + * @inode_listxattr: + * Check permission before obtaining the list of extended attribute + * names for @dentry. + * Return 0 if permission is granted. + * @inode_removexattr: + * Check permission before removing the extended attribute + * identified by @name for @dentry. + * Return 0 if permission is granted. + * @inode_getsecurity: + * Retrieve a copy of the extended attribute representation of the + * security label associated with @name for @inode via @buffer. Note that + * @name is the remainder of the attribute name after the security prefix + * has been removed. @alloc is used to specify of the call should return a + * value via the buffer or just the value length Return size of buffer on + * success. + * @inode_setsecurity: + * Set the security label associated with @name for @inode from the + * extended attribute value @value. @size indicates the size of the + * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. + * Note that @name is the remainder of the attribute name after the + * security. prefix has been removed. + * Return 0 on success. + * @inode_listsecurity: + * Copy the extended attribute names for the security labels + * associated with @inode into @buffer. The maximum size of @buffer + * is specified by @buffer_size. @buffer may be NULL to request + * the size of the buffer required. + * Returns number of bytes used/required on success. + * @inode_need_killpriv: + * Called when an inode has been changed. + * @dentry is the dentry being changed. + * Return <0 on error to abort the inode change operation. + * Return 0 if inode_killpriv does not need to be called. + * Return >0 if inode_killpriv does need to be called. + * @inode_killpriv: + * The setuid bit is being removed. Remove similar security labels. + * Called with the dentry->d_inode->i_mutex held. + * @dentry is the dentry being changed. + * Return 0 on success. If error is returned, then the operation + * causing setuid bit removal is failed. + * @inode_getsecid: + * Get the secid associated with the node. + * @inode contains a pointer to the inode. + * @secid contains a pointer to the location where result will be saved. + * In case of failure, @secid will be set to zero. + * + * Security hooks for file operations + * + * @file_permission: + * Check file permissions before accessing an open file. This hook is + * called by various operations that read or write files. A security + * module can use this hook to perform additional checking on these + * operations, e.g. to revalidate permissions on use to support privilege + * bracketing or policy changes. Notice that this hook is used when the + * actual read/write operations are performed, whereas the + * inode_security_ops hook is called when a file is opened (as well as + * many other operations). + * Caveat: Although this hook can be used to revalidate permissions for + * various system call operations that read or write files, it does not + * address the revalidation of permissions for memory-mapped files. + * Security modules must handle this separately if they need such + * revalidation. + * @file contains the file structure being accessed. + * @mask contains the requested permissions. + * Return 0 if permission is granted. + * @file_alloc_security: + * Allocate and attach a security structure to the file->f_security field. + * The security field is initialized to NULL when the structure is first + * created. + * @file contains the file structure to secure. + * Return 0 if the hook is successful and permission is granted. + * @file_free_security: + * Deallocate and free any security structures stored in file->f_security. + * @file contains the file structure being modified. + * @file_ioctl: + * @file contains the file structure. + * @cmd contains the operation to perform. + * @arg contains the operational arguments. + * Check permission for an ioctl operation on @file. Note that @arg + * sometimes represents a user space pointer; in other cases, it may be a + * simple integer value. When @arg represents a user space pointer, it + * should never be used by the security module. + * Return 0 if permission is granted. + * @file_mmap : + * Check permissions for a mmap operation. The @file may be NULL, e.g. + * if mapping anonymous memory. + * @file contains the file structure for file to map (may be NULL). + * @reqprot contains the protection requested by the application. + * @prot contains the protection that will be applied by the kernel. + * @flags contains the operational flags. + * @addr contains virtual address that will be used for the operation. + * @addr_only contains a boolean: 0 if file-backed VMA, otherwise 1. + * Return 0 if permission is granted. + * @file_mprotect: + * Check permissions before changing memory access permissions. + * @vma contains the memory region to modify. + * @reqprot contains the protection requested by the application. + * @prot contains the protection that will be applied by the kernel. + * Return 0 if permission is granted. + * @file_lock: + * Check permission before performing file locking operations. + * Note: this hook mediates both flock and fcntl style locks. + * @file contains the file structure. + * @cmd contains the posix-translated lock operation to perform + * (e.g. F_RDLCK, F_WRLCK). + * Return 0 if permission is granted. + * @file_fcntl: + * Check permission before allowing the file operation specified by @cmd + * from being performed on the file @file. Note that @arg sometimes + * represents a user space pointer; in other cases, it may be a simple + * integer value. When @arg represents a user space pointer, it should + * never be used by the security module. + * @file contains the file structure. + * @cmd contains the operation to be performed. + * @arg contains the operational arguments. + * Return 0 if permission is granted. + * @file_set_fowner: + * Save owner security information (typically from current->security) in + * file->f_security for later use by the send_sigiotask hook. + * @file contains the file structure to update. + * Return 0 on success. + * @file_send_sigiotask: + * Check permission for the file owner @fown to send SIGIO or SIGURG to the + * process @tsk. Note that this hook is sometimes called from interrupt. + * Note that the fown_struct, @fown, is never outside the context of a + * struct file, so the file structure (and associated security information) + * can always be obtained: + * container_of(fown, struct file, f_owner) + * @tsk contains the structure of task receiving signal. + * @fown contains the file owner information. + * @sig is the signal that will be sent. When 0, kernel sends SIGIO. + * Return 0 if permission is granted. + * @file_receive: + * This hook allows security modules to control the ability of a process + * to receive an open file descriptor via socket IPC. + * @file contains the file structure being received. + * Return 0 if permission is granted. + * + * Security hook for dentry + * + * @dentry_open + * Save open-time permission checking state for later use upon + * file_permission, and recheck access if anything has changed + * since inode_permission. + * + * Security hooks for task operations. + * + * @task_create: + * Check permission before creating a child process. See the clone(2) + * manual page for definitions of the @clone_flags. + * @clone_flags contains the flags indicating what should be shared. + * Return 0 if permission is granted. + * @task_free: + * @task task being freed + * Handle release of task-related resources. (Note that this can be called + * from interrupt context.) + * @cred_alloc_blank: + * @cred points to the credentials. + * @gfp indicates the atomicity of any memory allocations. + * Only allocate sufficient memory and attach to @cred such that + * cred_transfer() will not get ENOMEM. + * @cred_free: + * @cred points to the credentials. + * Deallocate and clear the cred->security field in a set of credentials. + * @cred_prepare: + * @new points to the new credentials. + * @old points to the original credentials. + * @gfp indicates the atomicity of any memory allocations. + * Prepare a new set of credentials by copying the data from the old set. + * @cred_transfer: + * @new points to the new credentials. + * @old points to the original credentials. + * Transfer data from original creds to new creds + * @kernel_act_as: + * Set the credentials for a kernel service to act as (subjective context). + * @new points to the credentials to be modified. + * @secid specifies the security ID to be set + * The current task must be the one that nominated @secid. + * Return 0 if successful. + * @kernel_create_files_as: + * Set the file creation context in a set of credentials to be the same as + * the objective context of the specified inode. + * @new points to the credentials to be modified. + * @inode points to the inode to use as a reference. + * The current task must be the one that nominated @inode. + * Return 0 if successful. + * @kernel_module_request: + * Ability to trigger the kernel to automatically upcall to userspace for + * userspace to load a kernel module with the given name. + * @kmod_name name of the module requested by the kernel + * Return 0 if successful. + * @task_fix_setuid: + * Update the module's state after setting one or more of the user + * identity attributes of the current process. The @flags parameter + * indicates which of the set*uid system calls invoked this hook. If + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaces + * @flags contains one of the LSM_SETID_* values. + * Return 0 on success. + * @task_setpgid: + * Check permission before setting the process group identifier of the + * process @p to @pgid. + * @p contains the task_struct for process being modified. + * @pgid contains the new pgid. + * Return 0 if permission is granted. + * @task_getpgid: + * Check permission before getting the process group identifier of the + * process @p. + * @p contains the task_struct for the process. + * Return 0 if permission is granted. + * @task_getsid: + * Check permission before getting the session identifier of the process + * @p. + * @p contains the task_struct for the process. + * Return 0 if permission is granted. + * @task_getsecid: + * Retrieve the security identifier of the process @p. + * @p contains the task_struct for the process and place is into @secid. + * In case of failure, @secid will be set to zero. + * + * @task_setnice: + * Check permission before setting the nice value of @p to @nice. + * @p contains the task_struct of process. + * @nice contains the new nice value. + * Return 0 if permission is granted. + * @task_setioprio + * Check permission before setting the ioprio value of @p to @ioprio. + * @p contains the task_struct of process. + * @ioprio contains the new ioprio value + * Return 0 if permission is granted. + * @task_getioprio + * Check permission before getting the ioprio value of @p. + * @p contains the task_struct of process. + * Return 0 if permission is granted. + * @task_setrlimit: + * Check permission before setting the resource limits of the current + * process for @resource to @new_rlim. The old resource limit values can + * be examined by dereferencing (current->signal->rlim + resource). + * @resource contains the resource whose limit is being set. + * @new_rlim contains the new limits for @resource. + * Return 0 if permission is granted. + * @task_setscheduler: + * Check permission before setting scheduling policy and/or parameters of + * process @p based on @policy and @lp. + * @p contains the task_struct for process. + * @policy contains the scheduling policy. + * @lp contains the scheduling parameters. + * Return 0 if permission is granted. + * @task_getscheduler: + * Check permission before obtaining scheduling information for process + * @p. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_movememory + * Check permission before moving memory owned by process @p. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_kill: + * Check permission before sending signal @sig to @p. @info can be NULL, + * the constant 1, or a pointer to a siginfo structure. If @info is 1 or + * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming + * from the kernel and should typically be permitted. + * SIGIO signals are handled separately by the send_sigiotask hook in + * file_security_ops. + * @p contains the task_struct for process. + * @info contains the signal information. + * @sig contains the signal value. + * @secid contains the sid of the process where the signal originated + * Return 0 if permission is granted. + * @task_wait: + * Check permission before allowing a process to reap a child process @p + * and collect its status information. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_prctl: + * Check permission before performing a process control operation on the + * current process. + * @option contains the operation. + * @arg2 contains a argument. + * @arg3 contains a argument. + * @arg4 contains a argument. + * @arg5 contains a argument. + * Return -ENOSYS if no-one wanted to handle this op, any other value to + * cause prctl() to return immediately with that value. + * @task_to_inode: + * Set the security attributes for an inode based on an associated task's + * security attributes, e.g. for /proc/pid inodes. + * @p contains the task_struct for the task. + * @inode contains the inode structure for the inode. + * + * Security hooks for Netlink messaging. + * + * @netlink_send: + * Save security information for a netlink message so that permission + * checking can be performed when the message is processed. The security + * information can be saved using the eff_cap field of the + * netlink_skb_parms structure. Also may be used to provide fine + * grained control over message transmission. + * @sk associated sock of task sending the message. + * @skb contains the sk_buff structure for the netlink message. + * Return 0 if the information was successfully saved and message + * is allowed to be transmitted. + * + * Security hooks for Unix domain networking. + * + * @unix_stream_connect: + * Check permissions before establishing a Unix domain stream connection + * between @sock and @other. + * @sock contains the sock structure. + * @other contains the peer sock structure. + * @newsk contains the new sock structure. + * Return 0 if permission is granted. + * @unix_may_send: + * Check permissions before connecting or sending datagrams from @sock to + * @other. + * @sock contains the socket structure. + * @other contains the peer socket structure. + * Return 0 if permission is granted. + * + * The @unix_stream_connect and @unix_may_send hooks were necessary because + * Linux provides an alternative to the conventional file name space for Unix + * domain sockets. Whereas binding and connecting to sockets in the file name + * space is mediated by the typical file permissions (and caught by the mknod + * and permission hooks in inode_security_ops), binding and connecting to + * sockets in the abstract name space is completely unmediated. Sufficient + * control of Unix domain sockets in the abstract name space isn't possible + * using only the socket layer hooks, since we need to know the actual target + * socket, which is not looked up until we are inside the af_unix code. + * + * Security hooks for socket operations. + * + * @socket_create: + * Check permissions prior to creating a new socket. + * @family contains the requested protocol family. + * @type contains the requested communications type. + * @protocol contains the requested protocol. + * @kern set to 1 if a kernel socket. + * Return 0 if permission is granted. + * @socket_post_create: + * This hook allows a module to update or allocate a per-socket security + * structure. Note that the security field was not added directly to the + * socket structure, but rather, the socket security information is stored + * in the associated inode. Typically, the inode alloc_security hook will + * allocate and and attach security information to + * sock->inode->i_security. This hook may be used to update the + * sock->inode->i_security field with additional information that wasn't + * available when the inode was allocated. + * @sock contains the newly created socket structure. + * @family contains the requested protocol family. + * @type contains the requested communications type. + * @protocol contains the requested protocol. + * @kern set to 1 if a kernel socket. + * @socket_bind: + * Check permission before socket protocol layer bind operation is + * performed and the socket @sock is bound to the address specified in the + * @address parameter. + * @sock contains the socket structure. + * @address contains the address to bind to. + * @addrlen contains the length of address. + * Return 0 if permission is granted. + * @socket_connect: + * Check permission before socket protocol layer connect operation + * attempts to connect socket @sock to a remote address, @address. + * @sock contains the socket structure. + * @address contains the address of remote endpoint. + * @addrlen contains the length of address. + * Return 0 if permission is granted. + * @socket_listen: + * Check permission before socket protocol layer listen operation. + * @sock contains the socket structure. + * @backlog contains the maximum length for the pending connection queue. + * Return 0 if permission is granted. + * @socket_accept: + * Check permission before accepting a new connection. Note that the new + * socket, @newsock, has been created and some information copied to it, + * but the accept operation has not actually been performed. + * @sock contains the listening socket structure. + * @newsock contains the newly created server socket for connection. + * Return 0 if permission is granted. + * @socket_sendmsg: + * Check permission before transmitting a message to another socket. + * @sock contains the socket structure. + * @msg contains the message to be transmitted. + * @size contains the size of message. + * Return 0 if permission is granted. + * @socket_recvmsg: + * Check permission before receiving a message from a socket. + * @sock contains the socket structure. + * @msg contains the message structure. + * @size contains the size of message structure. + * @flags contains the operational flags. + * Return 0 if permission is granted. + * @socket_getsockname: + * Check permission before the local address (name) of the socket object + * @sock is retrieved. + * @sock contains the socket structure. + * Return 0 if permission is granted. + * @socket_getpeername: + * Check permission before the remote address (name) of a socket object + * @sock is retrieved. + * @sock contains the socket structure. + * Return 0 if permission is granted. + * @socket_getsockopt: + * Check permissions before retrieving the options associated with socket + * @sock. + * @sock contains the socket structure. + * @level contains the protocol level to retrieve option from. + * @optname contains the name of option to retrieve. + * Return 0 if permission is granted. + * @socket_setsockopt: + * Check permissions before setting the options associated with socket + * @sock. + * @sock contains the socket structure. + * @level contains the protocol level to set options for. + * @optname contains the name of the option to set. + * Return 0 if permission is granted. + * @socket_shutdown: + * Checks permission before all or part of a connection on the socket + * @sock is shut down. + * @sock contains the socket structure. + * @how contains the flag indicating how future sends and receives are handled. + * Return 0 if permission is granted. + * @socket_sock_rcv_skb: + * Check permissions on incoming network packets. This hook is distinct + * from Netfilter's IP input hooks since it is the first time that the + * incoming sk_buff @skb has been associated with a particular socket, @sk. + * Must not sleep inside this hook because some callers hold spinlocks. + * @sk contains the sock (not socket) associated with the incoming sk_buff. + * @skb contains the incoming network data. + * @socket_getpeersec_stream: + * This hook allows the security module to provide peer socket security + * state for unix or connected tcp sockets to userspace via getsockopt + * SO_GETPEERSEC. For tcp sockets this can be meaningful if the + * socket is associated with an ipsec SA. + * @sock is the local socket. + * @optval userspace memory where the security state is to be copied. + * @optlen userspace int where the module should copy the actual length + * of the security state. + * @len as input is the maximum length to copy to userspace provided + * by the caller. + * Return 0 if all is well, otherwise, typical getsockopt return + * values. + * @socket_getpeersec_dgram: + * This hook allows the security module to provide peer socket security + * state for udp sockets on a per-packet basis to userspace via + * getsockopt SO_GETPEERSEC. The application must first have indicated + * the IP_PASSSEC option via getsockopt. It can then retrieve the + * security state returned by this hook for a packet via the SCM_SECURITY + * ancillary message type. + * @skb is the skbuff for the packet being queried + * @secdata is a pointer to a buffer in which to copy the security data + * @seclen is the maximum length for @secdata + * Return 0 on success, error on failure. + * @sk_alloc_security: + * Allocate and attach a security structure to the sk->sk_security field, + * which is used to copy security attributes between local stream sockets. + * @sk_free_security: + * Deallocate security structure. + * @sk_clone_security: + * Clone/copy security structure. + * @sk_getsecid: + * Retrieve the LSM-specific secid for the sock to enable caching of network + * authorizations. + * @sock_graft: + * Sets the socket's isec sid to the sock's sid. + * @inet_conn_request: + * Sets the openreq's sid to socket's sid with MLS portion taken from peer sid. + * @inet_csk_clone: + * Sets the new child socket's sid to the openreq sid. + * @inet_conn_established: + * Sets the connection's peersid to the secmark on skb. + * @secmark_relabel_packet: + * check if the process should be allowed to relabel packets to the given secid + * @security_secmark_refcount_inc + * tells the LSM to increment the number of secmark labeling rules loaded + * @security_secmark_refcount_dec + * tells the LSM to decrement the number of secmark labeling rules loaded + * @req_classify_flow: + * Sets the flow's sid to the openreq sid. + * @tun_dev_create: + * Check permissions prior to creating a new TUN device. + * @tun_dev_post_create: + * This hook allows a module to update or allocate a per-socket security + * structure. + * @sk contains the newly created sock structure. + * @tun_dev_attach: + * Check permissions prior to attaching to a persistent TUN device. This + * hook can also be used by the module to update any security state + * associated with the TUN device's sock structure. + * @sk contains the existing sock structure. + * + * Security hooks for XFRM operations. + * + * @xfrm_policy_alloc_security: + * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy + * Database used by the XFRM system. + * @sec_ctx contains the security context information being provided by + * the user-level policy update program (e.g., setkey). + * Allocate a security structure to the xp->security field; the security + * field is initialized to NULL when the xfrm_policy is allocated. + * Return 0 if operation was successful (memory to allocate, legal context) + * @xfrm_policy_clone_security: + * @old_ctx contains an existing xfrm_sec_ctx. + * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. + * Allocate a security structure in new_ctxp that contains the + * information from the old_ctx structure. + * Return 0 if operation was successful (memory to allocate). + * @xfrm_policy_free_security: + * @ctx contains the xfrm_sec_ctx + * Deallocate xp->security. + * @xfrm_policy_delete_security: + * @ctx contains the xfrm_sec_ctx. + * Authorize deletion of xp->security. + * @xfrm_state_alloc_security: + * @x contains the xfrm_state being added to the Security Association + * Database by the XFRM system. + * @sec_ctx contains the security context information being provided by + * the user-level SA generation program (e.g., setkey or racoon). + * @secid contains the secid from which to take the mls portion of the context. + * Allocate a security structure to the x->security field; the security + * field is initialized to NULL when the xfrm_state is allocated. Set the + * context to correspond to either sec_ctx or polsec, with the mls portion + * taken from secid in the latter case. + * Return 0 if operation was successful (memory to allocate, legal context). + * @xfrm_state_free_security: + * @x contains the xfrm_state. + * Deallocate x->security. + * @xfrm_state_delete_security: + * @x contains the xfrm_state. + * Authorize deletion of x->security. + * @xfrm_policy_lookup: + * @ctx contains the xfrm_sec_ctx for which the access control is being + * checked. + * @fl_secid contains the flow security label that is used to authorize + * access to the policy xp. + * @dir contains the direction of the flow (input or output). + * Check permission when a flow selects a xfrm_policy for processing + * XFRMs on a packet. The hook is called when selecting either a + * per-socket policy or a generic xfrm policy. + * Return 0 if permission is granted, -ESRCH otherwise, or -errno + * on other errors. + * @xfrm_state_pol_flow_match: + * @x contains the state to match. + * @xp contains the policy to check for a match. + * @fl contains the flow to check for a match. + * Return 1 if there is a match. + * @xfrm_decode_session: + * @skb points to skb to decode. + * @secid points to the flow key secid to set. + * @ckall says if all xfrms used should be checked for same secid. + * Return 0 if ckall is zero or all xfrms used have the same secid. + * + * Security hooks affecting all Key Management operations + * + * @key_alloc: + * Permit allocation of a key and assign security data. Note that key does + * not have a serial number assigned at this point. + * @key points to the key. + * @flags is the allocation flags + * Return 0 if permission is granted, -ve error otherwise. + * @key_free: + * Notification of destruction; free security data. + * @key points to the key. + * No return value. + * @key_permission: + * See whether a specific operational right is granted to a process on a + * key. + * @key_ref refers to the key (key pointer + possession attribute bit). + * @cred points to the credentials to provide the context against which to + * evaluate the security data on the key. + * @perm describes the combination of permissions required of this key. + * Return 0 if permission is granted, -ve error otherwise. + * @key_getsecurity: + * Get a textual representation of the security context attached to a key + * for the purposes of honouring KEYCTL_GETSECURITY. This function + * allocates the storage for the NUL-terminated string and the caller + * should free it. + * @key points to the key to be queried. + * @_buffer points to a pointer that should be set to point to the + * resulting string (if no label or an error occurs). + * Return the length of the string (including terminating NUL) or -ve if + * an error. + * May also return 0 (and a NULL buffer pointer) if there is no label. + * + * Security hooks affecting all System V IPC operations. + * + * @ipc_permission: + * Check permissions for access to IPC + * @ipcp contains the kernel IPC permission structure + * @flag contains the desired (requested) permission set + * Return 0 if permission is granted. + * @ipc_getsecid: + * Get the secid associated with the ipc object. + * @ipcp contains the kernel IPC permission structure. + * @secid contains a pointer to the location where result will be saved. + * In case of failure, @secid will be set to zero. + * + * Security hooks for individual messages held in System V IPC message queues + * @msg_msg_alloc_security: + * Allocate and attach a security structure to the msg->security field. + * The security field is initialized to NULL when the structure is first + * created. + * @msg contains the message structure to be modified. + * Return 0 if operation was successful and permission is granted. + * @msg_msg_free_security: + * Deallocate the security structure for this message. + * @msg contains the message structure to be modified. + * + * Security hooks for System V IPC Message Queues + * + * @msg_queue_alloc_security: + * Allocate and attach a security structure to the + * msq->q_perm.security field. The security field is initialized to + * NULL when the structure is first created. + * @msq contains the message queue structure to be modified. + * Return 0 if operation was successful and permission is granted. + * @msg_queue_free_security: + * Deallocate security structure for this message queue. + * @msq contains the message queue structure to be modified. + * @msg_queue_associate: + * Check permission when a message queue is requested through the + * msgget system call. This hook is only called when returning the + * message queue identifier for an existing message queue, not when a + * new message queue is created. + * @msq contains the message queue to act upon. + * @msqflg contains the operation control flags. + * Return 0 if permission is granted. + * @msg_queue_msgctl: + * Check permission when a message control operation specified by @cmd + * is to be performed on the message queue @msq. + * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. + * @msq contains the message queue to act upon. May be NULL. + * @cmd contains the operation to be performed. + * Return 0 if permission is granted. + * @msg_queue_msgsnd: + * Check permission before a message, @msg, is enqueued on the message + * queue, @msq. + * @msq contains the message queue to send message to. + * @msg contains the message to be enqueued. + * @msqflg contains operational flags. + * Return 0 if permission is granted. + * @msg_queue_msgrcv: + * Check permission before a message, @msg, is removed from the message + * queue, @msq. The @target task structure contains a pointer to the + * process that will be receiving the message (not equal to the current + * process when inline receives are being performed). + * @msq contains the message queue to retrieve message from. + * @msg contains the message destination. + * @target contains the task structure for recipient process. + * @type contains the type of message requested. + * @mode contains the operational flags. + * Return 0 if permission is granted. + * + * Security hooks for System V Shared Memory Segments + * + * @shm_alloc_security: + * Allocate and attach a security structure to the shp->shm_perm.security + * field. The security field is initialized to NULL when the structure is + * first created. + * @shp contains the shared memory structure to be modified. + * Return 0 if operation was successful and permission is granted. + * @shm_free_security: + * Deallocate the security struct for this memory segment. + * @shp contains the shared memory structure to be modified. + * @shm_associate: + * Check permission when a shared memory region is requested through the + * shmget system call. This hook is only called when returning the shared + * memory region identifier for an existing region, not when a new shared + * memory region is created. + * @shp contains the shared memory structure to be modified. + * @shmflg contains the operation control flags. + * Return 0 if permission is granted. + * @shm_shmctl: + * Check permission when a shared memory control operation specified by + * @cmd is to be performed on the shared memory region @shp. + * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO. + * @shp contains shared memory structure to be modified. + * @cmd contains the operation to be performed. + * Return 0 if permission is granted. + * @shm_shmat: + * Check permissions prior to allowing the shmat system call to attach the + * shared memory segment @shp to the data segment of the calling process. + * The attaching address is specified by @shmaddr. + * @shp contains the shared memory structure to be modified. + * @shmaddr contains the address to attach memory region to. + * @shmflg contains the operational flags. + * Return 0 if permission is granted. + * + * Security hooks for System V Semaphores + * + * @sem_alloc_security: + * Allocate and attach a security structure to the sma->sem_perm.security + * field. The security field is initialized to NULL when the structure is + * first created. + * @sma contains the semaphore structure + * Return 0 if operation was successful and permission is granted. + * @sem_free_security: + * deallocate security struct for this semaphore + * @sma contains the semaphore structure. + * @sem_associate: + * Check permission when a semaphore is requested through the semget + * system call. This hook is only called when returning the semaphore + * identifier for an existing semaphore, not when a new one must be + * created. + * @sma contains the semaphore structure. + * @semflg contains the operation control flags. + * Return 0 if permission is granted. + * @sem_semctl: + * Check permission when a semaphore operation specified by @cmd is to be + * performed on the semaphore @sma. The @sma may be NULL, e.g. for + * IPC_INFO or SEM_INFO. + * @sma contains the semaphore structure. May be NULL. + * @cmd contains the operation to be performed. + * Return 0 if permission is granted. + * @sem_semop + * Check permissions before performing operations on members of the + * semaphore set @sma. If the @alter flag is nonzero, the semaphore set + * may be modified. + * @sma contains the semaphore structure. + * @sops contains the operations to perform. + * @nsops contains the number of operations to perform. + * @alter contains the flag indicating whether changes are to be made. + * Return 0 if permission is granted. + * + * @ptrace_access_check: + * Check permission before allowing the current process to trace the + * @child process. + * Security modules may also want to perform a process tracing check + * during an execve in the set_security or apply_creds hooks of + * tracing check during an execve in the bprm_set_creds hook of + * binprm_security_ops if the process is being traced and its security + * attributes would be changed by the execve. + * @child contains the task_struct structure for the target process. + * @mode contains the PTRACE_MODE flags indicating the form of access. + * Return 0 if permission is granted. + * @ptrace_traceme: + * Check that the @parent process has sufficient permission to trace the + * current process before allowing the current process to present itself + * to the @parent process for tracing. + * The parent process will still have to undergo the ptrace_access_check + * checks before it is allowed to trace this one. + * @parent contains the task_struct structure for debugger process. + * Return 0 if permission is granted. + * @capget: + * Get the @effective, @inheritable, and @permitted capability sets for + * the @target process. The hook may also perform permission checking to + * determine if the current process is allowed to see the capability sets + * of the @target process. + * @target contains the task_struct structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * Return 0 if the capability sets were successfully obtained. + * @capset: + * Set the @effective, @inheritable, and @permitted capability sets for + * the current process. + * @new contains the new credentials structure for target process. + * @old contains the current credentials structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * Return 0 and update @new if permission is granted. + * @capable: + * Check whether the @tsk process has the @cap capability in the indicated + * credentials. + * @cred contains the credentials to use. + * @ns contains the user namespace we want the capability in + * @cap contains the capability <include/linux/capability.h>. + * @audit: Whether to write an audit message or not + * Return 0 if the capability is granted for @tsk. + * @syslog: + * Check permission before accessing the kernel message ring or changing + * logging to the console. + * See the syslog(2) manual page for an explanation of the @type values. + * @type contains the type of action. + * @from_file indicates the context of action (if it came from /proc). + * Return 0 if permission is granted. + * @settime: + * Check permission to change the system time. + * struct timespec and timezone are defined in include/linux/time.h + * @ts contains new time + * @tz contains new timezone + * Return 0 if permission is granted. + * @vm_enough_memory: + * Check permissions for allocating a new virtual mapping. + * @mm contains the mm struct it is being added to. + * @pages contains the number of pages. + * Return 0 if permission is granted. + * + * @secid_to_secctx: + * Convert secid to security context. If secdata is NULL the length of + * the result will be returned in seclen, but no secdata will be returned. + * This does mean that the length could change between calls to check the + * length and the next call which actually allocates and returns the secdata. + * @secid contains the security ID. + * @secdata contains the pointer that stores the converted security context. + * @seclen pointer which contains the length of the data + * @secctx_to_secid: + * Convert security context to secid. + * @secid contains the pointer to the generated security ID. + * @secdata contains the security context. + * + * @release_secctx: + * Release the security context. + * @secdata contains the security context. + * @seclen contains the length of the security context. + * + * Security hooks for Audit + * + * @audit_rule_init: + * Allocate and initialize an LSM audit rule structure. + * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h + * @op contains the operator the rule uses. + * @rulestr contains the context where the rule will be applied to. + * @lsmrule contains a pointer to receive the result. + * Return 0 if @lsmrule has been successfully set, + * -EINVAL in case of an invalid rule. + * + * @audit_rule_known: + * Specifies whether given @rule contains any fields related to current LSM. + * @rule contains the audit rule of interest. + * Return 1 in case of relation found, 0 otherwise. + * + * @audit_rule_match: + * Determine if given @secid matches a rule previously approved + * by @audit_rule_known. + * @secid contains the security id in question. + * @field contains the field which relates to current LSM. + * @op contains the operator that will be used for matching. + * @rule points to the audit rule that will be checked against. + * @actx points to the audit context associated with the check. + * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. + * + * @audit_rule_free: + * Deallocate the LSM audit rule structure previously allocated by + * audit_rule_init. + * @rule contains the allocated rule + * + * @inode_notifysecctx: + * Notify the security module of what the security context of an inode + * should be. Initializes the incore security context managed by the + * security module for this inode. Example usage: NFS client invokes + * this hook to initialize the security context in its incore inode to the + * value provided by the server for the file when the server returned the + * file's attributes to the client. + * + * Must be called with inode->i_mutex locked. + * + * @inode we wish to set the security context of. + * @ctx contains the string which we wish to set in the inode. + * @ctxlen contains the length of @ctx. + * + * @inode_setsecctx: + * Change the security context of an inode. Updates the + * incore security context managed by the security module and invokes the + * fs code as needed (via __vfs_setxattr_noperm) to update any backing + * xattrs that represent the context. Example usage: NFS server invokes + * this hook to change the security context in its incore inode and on the + * backing filesystem to a value provided by the client on a SETATTR + * operation. + * + * Must be called with inode->i_mutex locked. + * + * @dentry contains the inode we wish to set the security context of. + * @ctx contains the string which we wish to set in the inode. + * @ctxlen contains the length of @ctx. + * + * @inode_getsecctx: + * Returns a string containing all relevant security context information + * + * @inode we wish to get the security context of. + * @ctx is a pointer in which to place the allocated security context. + * @ctxlen points to the place to put the length of @ctx. + * This is the main security structure. + */ +struct security_operations { + char name[SECURITY_NAME_MAX + 1]; + + int (*ptrace_access_check) (struct task_struct *child, unsigned int mode); + int (*ptrace_traceme) (struct task_struct *parent); + int (*capget) (struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, kernel_cap_t *permitted); + int (*capset) (struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); + int (*capable) (const struct cred *cred, struct user_namespace *ns, + int cap, int audit); + int (*quotactl) (int cmds, int type, int id, struct super_block *sb); + int (*quota_on) (struct dentry *dentry); + int (*syslog) (int type); + int (*settime) (const struct timespec *ts, const struct timezone *tz); + int (*vm_enough_memory) (struct mm_struct *mm, long pages); + + int (*bprm_set_creds) (struct linux_binprm *bprm); + int (*bprm_check_security) (struct linux_binprm *bprm); + int (*bprm_secureexec) (struct linux_binprm *bprm); + void (*bprm_committing_creds) (struct linux_binprm *bprm); + void (*bprm_committed_creds) (struct linux_binprm *bprm); + + int (*sb_alloc_security) (struct super_block *sb); + void (*sb_free_security) (struct super_block *sb); + int (*sb_copy_data) (char *orig, char *copy); + int (*sb_remount) (struct super_block *sb, void *data); + int (*sb_kern_mount) (struct super_block *sb, int flags, void *data); + int (*sb_show_options) (struct seq_file *m, struct super_block *sb); + int (*sb_statfs) (struct dentry *dentry); + int (*sb_mount) (char *dev_name, struct path *path, + char *type, unsigned long flags, void *data); + int (*sb_umount) (struct vfsmount *mnt, int flags); + int (*sb_pivotroot) (struct path *old_path, + struct path *new_path); + int (*sb_set_mnt_opts) (struct super_block *sb, + struct security_mnt_opts *opts); + void (*sb_clone_mnt_opts) (const struct super_block *oldsb, + struct super_block *newsb); + int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); + +#ifdef CONFIG_SECURITY_PATH + int (*path_unlink) (struct path *dir, struct dentry *dentry); + int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode); + int (*path_rmdir) (struct path *dir, struct dentry *dentry); + int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode, + unsigned int dev); + int (*path_truncate) (struct path *path); + int (*path_symlink) (struct path *dir, struct dentry *dentry, + const char *old_name); + int (*path_link) (struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); + int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); + int (*path_chmod) (struct path *path, umode_t mode); + int (*path_chown) (struct path *path, uid_t uid, gid_t gid); + int (*path_chroot) (struct path *path); +#endif + + int (*inode_alloc_security) (struct inode *inode); + void (*inode_free_security) (struct inode *inode); + int (*inode_init_security) (struct inode *inode, struct inode *dir, + const struct qstr *qstr, char **name, + void **value, size_t *len); + int (*inode_create) (struct inode *dir, + struct dentry *dentry, umode_t mode); + int (*inode_link) (struct dentry *old_dentry, + struct inode *dir, struct dentry *new_dentry); + int (*inode_unlink) (struct inode *dir, struct dentry *dentry); + int (*inode_symlink) (struct inode *dir, + struct dentry *dentry, const char *old_name); + int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode); + int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); + int (*inode_mknod) (struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t dev); + int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); + int (*inode_readlink) (struct dentry *dentry); + int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); + int (*inode_permission) (struct inode *inode, int mask); + int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); + int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); + int (*inode_setxattr) (struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); + void (*inode_post_setxattr) (struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); + int (*inode_getxattr) (struct dentry *dentry, const char *name); + int (*inode_listxattr) (struct dentry *dentry); + int (*inode_removexattr) (struct dentry *dentry, const char *name); + int (*inode_need_killpriv) (struct dentry *dentry); + int (*inode_killpriv) (struct dentry *dentry); + int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc); + int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags); + int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size); + void (*inode_getsecid) (const struct inode *inode, u32 *secid); + + int (*file_permission) (struct file *file, int mask); + int (*file_alloc_security) (struct file *file); + void (*file_free_security) (struct file *file); + int (*file_ioctl) (struct file *file, unsigned int cmd, + unsigned long arg); + int (*file_mmap) (struct file *file, + unsigned long reqprot, unsigned long prot, + unsigned long flags, unsigned long addr, + unsigned long addr_only); + int (*file_mprotect) (struct vm_area_struct *vma, + unsigned long reqprot, + unsigned long prot); + int (*file_lock) (struct file *file, unsigned int cmd); + int (*file_fcntl) (struct file *file, unsigned int cmd, + unsigned long arg); + int (*file_set_fowner) (struct file *file); + int (*file_send_sigiotask) (struct task_struct *tsk, + struct fown_struct *fown, int sig); + int (*file_receive) (struct file *file); + int (*dentry_open) (struct file *file, const struct cred *cred); + + int (*task_create) (unsigned long clone_flags); + void (*task_free) (struct task_struct *task); + int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp); + void (*cred_free) (struct cred *cred); + int (*cred_prepare)(struct cred *new, const struct cred *old, + gfp_t gfp); + void (*cred_transfer)(struct cred *new, const struct cred *old); + int (*kernel_act_as)(struct cred *new, u32 secid); + int (*kernel_create_files_as)(struct cred *new, struct inode *inode); + int (*kernel_module_request)(char *kmod_name); + int (*task_fix_setuid) (struct cred *new, const struct cred *old, + int flags); + int (*task_setpgid) (struct task_struct *p, pid_t pgid); + int (*task_getpgid) (struct task_struct *p); + int (*task_getsid) (struct task_struct *p); + void (*task_getsecid) (struct task_struct *p, u32 *secid); + int (*task_setnice) (struct task_struct *p, int nice); + int (*task_setioprio) (struct task_struct *p, int ioprio); + int (*task_getioprio) (struct task_struct *p); + int (*task_setrlimit) (struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim); + int (*task_setscheduler) (struct task_struct *p); + int (*task_getscheduler) (struct task_struct *p); + int (*task_movememory) (struct task_struct *p); + int (*task_kill) (struct task_struct *p, + struct siginfo *info, int sig, u32 secid); + int (*task_wait) (struct task_struct *p); + int (*task_prctl) (int option, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5); + void (*task_to_inode) (struct task_struct *p, struct inode *inode); + + int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); + void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid); + + int (*msg_msg_alloc_security) (struct msg_msg *msg); + void (*msg_msg_free_security) (struct msg_msg *msg); + + int (*msg_queue_alloc_security) (struct msg_queue *msq); + void (*msg_queue_free_security) (struct msg_queue *msq); + int (*msg_queue_associate) (struct msg_queue *msq, int msqflg); + int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd); + int (*msg_queue_msgsnd) (struct msg_queue *msq, + struct msg_msg *msg, int msqflg); + int (*msg_queue_msgrcv) (struct msg_queue *msq, + struct msg_msg *msg, + struct task_struct *target, + long type, int mode); + + int (*shm_alloc_security) (struct shmid_kernel *shp); + void (*shm_free_security) (struct shmid_kernel *shp); + int (*shm_associate) (struct shmid_kernel *shp, int shmflg); + int (*shm_shmctl) (struct shmid_kernel *shp, int cmd); + int (*shm_shmat) (struct shmid_kernel *shp, + char __user *shmaddr, int shmflg); + + int (*sem_alloc_security) (struct sem_array *sma); + void (*sem_free_security) (struct sem_array *sma); + int (*sem_associate) (struct sem_array *sma, int semflg); + int (*sem_semctl) (struct sem_array *sma, int cmd); + int (*sem_semop) (struct sem_array *sma, + struct sembuf *sops, unsigned nsops, int alter); + + int (*netlink_send) (struct sock *sk, struct sk_buff *skb); + + void (*d_instantiate) (struct dentry *dentry, struct inode *inode); + + int (*getprocattr) (struct task_struct *p, char *name, char **value); + int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size); + int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen); + int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); + void (*release_secctx) (char *secdata, u32 seclen); + + int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); + int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); + int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); + +#ifdef CONFIG_SECURITY_NETWORK + int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk); + int (*unix_may_send) (struct socket *sock, struct socket *other); + + int (*socket_create) (int family, int type, int protocol, int kern); + int (*socket_post_create) (struct socket *sock, int family, + int type, int protocol, int kern); + int (*socket_bind) (struct socket *sock, + struct sockaddr *address, int addrlen); + int (*socket_connect) (struct socket *sock, + struct sockaddr *address, int addrlen); + int (*socket_listen) (struct socket *sock, int backlog); + int (*socket_accept) (struct socket *sock, struct socket *newsock); + int (*socket_sendmsg) (struct socket *sock, + struct msghdr *msg, int size); + int (*socket_recvmsg) (struct socket *sock, + struct msghdr *msg, int size, int flags); + int (*socket_getsockname) (struct socket *sock); + int (*socket_getpeername) (struct socket *sock); + int (*socket_getsockopt) (struct socket *sock, int level, int optname); + int (*socket_setsockopt) (struct socket *sock, int level, int optname); + int (*socket_shutdown) (struct socket *sock, int how); + int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb); + int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); + int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); + int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); + void (*sk_free_security) (struct sock *sk); + void (*sk_clone_security) (const struct sock *sk, struct sock *newsk); + void (*sk_getsecid) (struct sock *sk, u32 *secid); + void (*sock_graft) (struct sock *sk, struct socket *parent); + int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb, + struct request_sock *req); + void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); + void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); + int (*secmark_relabel_packet) (u32 secid); + void (*secmark_refcount_inc) (void); + void (*secmark_refcount_dec) (void); + void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); + int (*tun_dev_create)(void); + void (*tun_dev_post_create)(struct sock *sk); + int (*tun_dev_attach)(struct sock *sk); +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM + int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx); + int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); + void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); + int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); + int (*xfrm_state_alloc_security) (struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx, + u32 secid); + void (*xfrm_state_free_security) (struct xfrm_state *x); + int (*xfrm_state_delete_security) (struct xfrm_state *x); + int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); + int (*xfrm_state_pol_flow_match) (struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl); + int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall); +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + + /* key management security hooks */ +#ifdef CONFIG_KEYS + int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags); + void (*key_free) (struct key *key); + int (*key_permission) (key_ref_t key_ref, + const struct cred *cred, + key_perm_t perm); + int (*key_getsecurity)(struct key *key, char **_buffer); +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT + int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule); + int (*audit_rule_known) (struct audit_krule *krule); + int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); + void (*audit_rule_free) (void *lsmrule); +#endif /* CONFIG_AUDIT */ +}; + +/* prototypes */ +extern int security_init(void); +extern int security_module_enable(struct security_operations *ops); +extern int register_security(struct security_operations *ops); +extern void __init security_fixup_ops(struct security_operations *ops); + + +/* Security operations */ +int security_ptrace_access_check(struct task_struct *child, unsigned int mode); +int security_ptrace_traceme(struct task_struct *parent); +int security_capget(struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); +int security_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +int security_capable(const struct cred *cred, struct user_namespace *ns, + int cap); +int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, + int cap); +int security_quotactl(int cmds, int type, int id, struct super_block *sb); +int security_quota_on(struct dentry *dentry); +int security_syslog(int type); +int security_settime(const struct timespec *ts, const struct timezone *tz); +int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); +int security_bprm_set_creds(struct linux_binprm *bprm); +int security_bprm_check(struct linux_binprm *bprm); +void security_bprm_committing_creds(struct linux_binprm *bprm); +void security_bprm_committed_creds(struct linux_binprm *bprm); +int security_bprm_secureexec(struct linux_binprm *bprm); +int security_sb_alloc(struct super_block *sb); +void security_sb_free(struct super_block *sb); +int security_sb_copy_data(char *orig, char *copy); +int security_sb_remount(struct super_block *sb, void *data); +int security_sb_kern_mount(struct super_block *sb, int flags, void *data); +int security_sb_show_options(struct seq_file *m, struct super_block *sb); +int security_sb_statfs(struct dentry *dentry); +int security_sb_mount(char *dev_name, struct path *path, + char *type, unsigned long flags, void *data); +int security_sb_umount(struct vfsmount *mnt, int flags); +int security_sb_pivotroot(struct path *old_path, struct path *new_path); +int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts); +void security_sb_clone_mnt_opts(const struct super_block *oldsb, + struct super_block *newsb); +int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); + +int security_inode_alloc(struct inode *inode); +void security_inode_free(struct inode *inode); +int security_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, + initxattrs initxattrs, void *fs_data); +int security_old_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, char **name, + void **value, size_t *len); +int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry); +int security_inode_unlink(struct inode *dir, struct dentry *dentry); +int security_inode_symlink(struct inode *dir, struct dentry *dentry, + const char *old_name); +int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_rmdir(struct inode *dir, struct dentry *dentry); +int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); +int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); +int security_inode_readlink(struct dentry *dentry); +int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); +int security_inode_permission(struct inode *inode, int mask); +int security_inode_setattr(struct dentry *dentry, struct iattr *attr); +int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); +int security_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +void security_inode_post_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +int security_inode_getxattr(struct dentry *dentry, const char *name); +int security_inode_listxattr(struct dentry *dentry); +int security_inode_removexattr(struct dentry *dentry, const char *name); +int security_inode_need_killpriv(struct dentry *dentry); +int security_inode_killpriv(struct dentry *dentry); +int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); +int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); +int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); +void security_inode_getsecid(const struct inode *inode, u32 *secid); +int security_file_permission(struct file *file, int mask); +int security_file_alloc(struct file *file); +void security_file_free(struct file *file); +int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int security_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags, + unsigned long addr, unsigned long addr_only); +int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, + unsigned long prot); +int security_file_lock(struct file *file, unsigned int cmd); +int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); +int security_file_set_fowner(struct file *file); +int security_file_send_sigiotask(struct task_struct *tsk, + struct fown_struct *fown, int sig); +int security_file_receive(struct file *file); +int security_dentry_open(struct file *file, const struct cred *cred); +int security_task_create(unsigned long clone_flags); +void security_task_free(struct task_struct *task); +int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); +void security_cred_free(struct cred *cred); +int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); +void security_transfer_creds(struct cred *new, const struct cred *old); +int security_kernel_act_as(struct cred *new, u32 secid); +int security_kernel_create_files_as(struct cred *new, struct inode *inode); +int security_kernel_module_request(char *kmod_name); +int security_task_fix_setuid(struct cred *new, const struct cred *old, + int flags); +int security_task_setpgid(struct task_struct *p, pid_t pgid); +int security_task_getpgid(struct task_struct *p); +int security_task_getsid(struct task_struct *p); +void security_task_getsecid(struct task_struct *p, u32 *secid); +int security_task_setnice(struct task_struct *p, int nice); +int security_task_setioprio(struct task_struct *p, int ioprio); +int security_task_getioprio(struct task_struct *p); +int security_task_setrlimit(struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim); +int security_task_setscheduler(struct task_struct *p); +int security_task_getscheduler(struct task_struct *p); +int security_task_movememory(struct task_struct *p); +int security_task_kill(struct task_struct *p, struct siginfo *info, + int sig, u32 secid); +int security_task_wait(struct task_struct *p); +int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +void security_task_to_inode(struct task_struct *p, struct inode *inode); +int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); +void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); +int security_msg_msg_alloc(struct msg_msg *msg); +void security_msg_msg_free(struct msg_msg *msg); +int security_msg_queue_alloc(struct msg_queue *msq); +void security_msg_queue_free(struct msg_queue *msq); +int security_msg_queue_associate(struct msg_queue *msq, int msqflg); +int security_msg_queue_msgctl(struct msg_queue *msq, int cmd); +int security_msg_queue_msgsnd(struct msg_queue *msq, + struct msg_msg *msg, int msqflg); +int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, + struct task_struct *target, long type, int mode); +int security_shm_alloc(struct shmid_kernel *shp); +void security_shm_free(struct shmid_kernel *shp); +int security_shm_associate(struct shmid_kernel *shp, int shmflg); +int security_shm_shmctl(struct shmid_kernel *shp, int cmd); +int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg); +int security_sem_alloc(struct sem_array *sma); +void security_sem_free(struct sem_array *sma); +int security_sem_associate(struct sem_array *sma, int semflg); +int security_sem_semctl(struct sem_array *sma, int cmd); +int security_sem_semop(struct sem_array *sma, struct sembuf *sops, + unsigned nsops, int alter); +void security_d_instantiate(struct dentry *dentry, struct inode *inode); +int security_getprocattr(struct task_struct *p, char *name, char **value); +int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size); +int security_netlink_send(struct sock *sk, struct sk_buff *skb); +int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); +int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); +void security_release_secctx(char *secdata, u32 seclen); + +int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); +int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); +int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); +#else /* CONFIG_SECURITY */ +struct security_mnt_opts { +}; + +static inline void security_init_mnt_opts(struct security_mnt_opts *opts) +{ +} + +static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +{ +} + +/* + * This is the default capabilities functionality. Most of these functions + * are just stubbed out, but a few must call the proper capable code. + */ + +static inline int security_init(void) +{ + return 0; +} + +static inline int security_ptrace_access_check(struct task_struct *child, + unsigned int mode) +{ + return cap_ptrace_access_check(child, mode); +} + +static inline int security_ptrace_traceme(struct task_struct *parent) +{ + return cap_ptrace_traceme(parent); +} + +static inline int security_capget(struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted) +{ + return cap_capget(target, effective, inheritable, permitted); +} + +static inline int security_capset(struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) +{ + return cap_capset(new, old, effective, inheritable, permitted); +} + +static inline int security_capable(const struct cred *cred, + struct user_namespace *ns, int cap) +{ + return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT); +} + +static inline int security_capable_noaudit(const struct cred *cred, + struct user_namespace *ns, int cap) { + return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT); +} + +static inline int security_quotactl(int cmds, int type, int id, + struct super_block *sb) +{ + return 0; +} + +static inline int security_quota_on(struct dentry *dentry) +{ + return 0; +} + +static inline int security_syslog(int type) +{ + return 0; +} + +static inline int security_settime(const struct timespec *ts, + const struct timezone *tz) +{ + return cap_settime(ts, tz); +} + +static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) +{ + return cap_vm_enough_memory(mm, pages); +} + +static inline int security_bprm_set_creds(struct linux_binprm *bprm) +{ + return cap_bprm_set_creds(bprm); +} + +static inline int security_bprm_check(struct linux_binprm *bprm) +{ + return 0; +} + +static inline void security_bprm_committing_creds(struct linux_binprm *bprm) +{ +} + +static inline void security_bprm_committed_creds(struct linux_binprm *bprm) +{ +} + +static inline int security_bprm_secureexec(struct linux_binprm *bprm) +{ + return cap_bprm_secureexec(bprm); +} + +static inline int security_sb_alloc(struct super_block *sb) +{ + return 0; +} + +static inline void security_sb_free(struct super_block *sb) +{ } + +static inline int security_sb_copy_data(char *orig, char *copy) +{ + return 0; +} + +static inline int security_sb_remount(struct super_block *sb, void *data) +{ + return 0; +} + +static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) +{ + return 0; +} + +static inline int security_sb_show_options(struct seq_file *m, + struct super_block *sb) +{ + return 0; +} + +static inline int security_sb_statfs(struct dentry *dentry) +{ + return 0; +} + +static inline int security_sb_mount(char *dev_name, struct path *path, + char *type, unsigned long flags, + void *data) +{ + return 0; +} + +static inline int security_sb_umount(struct vfsmount *mnt, int flags) +{ + return 0; +} + +static inline int security_sb_pivotroot(struct path *old_path, + struct path *new_path) +{ + return 0; +} + +static inline int security_sb_set_mnt_opts(struct super_block *sb, + struct security_mnt_opts *opts) +{ + return 0; +} + +static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb, + struct super_block *newsb) +{ } + +static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) +{ + return 0; +} + +static inline int security_inode_alloc(struct inode *inode) +{ + return 0; +} + +static inline void security_inode_free(struct inode *inode) +{ } + +static inline int security_inode_init_security(struct inode *inode, + struct inode *dir, + const struct qstr *qstr, + const initxattrs initxattrs, + void *fs_data) +{ + return 0; +} + +static inline int security_old_inode_init_security(struct inode *inode, + struct inode *dir, + const struct qstr *qstr, + char **name, void **value, + size_t *len) +{ + return -EOPNOTSUPP; +} + +static inline int security_inode_create(struct inode *dir, + struct dentry *dentry, + umode_t mode) +{ + return 0; +} + +static inline int security_inode_link(struct dentry *old_dentry, + struct inode *dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_inode_unlink(struct inode *dir, + struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_symlink(struct inode *dir, + struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_inode_mkdir(struct inode *dir, + struct dentry *dentry, + int mode) +{ + return 0; +} + +static inline int security_inode_rmdir(struct inode *dir, + struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_mknod(struct inode *dir, + struct dentry *dentry, + int mode, dev_t dev) +{ + return 0; +} + +static inline int security_inode_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_inode_readlink(struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_follow_link(struct dentry *dentry, + struct nameidata *nd) +{ + return 0; +} + +static inline int security_inode_permission(struct inode *inode, int mask) +{ + return 0; +} + +static inline int security_inode_setattr(struct dentry *dentry, + struct iattr *attr) +{ + return 0; +} + +static inline int security_inode_getattr(struct vfsmount *mnt, + struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) +{ + return cap_inode_setxattr(dentry, name, value, size, flags); +} + +static inline void security_inode_post_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) +{ } + +static inline int security_inode_getxattr(struct dentry *dentry, + const char *name) +{ + return 0; +} + +static inline int security_inode_listxattr(struct dentry *dentry) +{ + return 0; +} + +static inline int security_inode_removexattr(struct dentry *dentry, + const char *name) +{ + return cap_inode_removexattr(dentry, name); +} + +static inline int security_inode_need_killpriv(struct dentry *dentry) +{ + return cap_inode_need_killpriv(dentry); +} + +static inline int security_inode_killpriv(struct dentry *dentry) +{ + return cap_inode_killpriv(dentry); +} + +static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) +{ + return -EOPNOTSUPP; +} + +static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) +{ + return -EOPNOTSUPP; +} + +static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) +{ + return 0; +} + +static inline void security_inode_getsecid(const struct inode *inode, u32 *secid) +{ + *secid = 0; +} + +static inline int security_file_permission(struct file *file, int mask) +{ + return 0; +} + +static inline int security_file_alloc(struct file *file) +{ + return 0; +} + +static inline void security_file_free(struct file *file) +{ } + +static inline int security_file_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline int security_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, + unsigned long flags, + unsigned long addr, + unsigned long addr_only) +{ + return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); +} + +static inline int security_file_mprotect(struct vm_area_struct *vma, + unsigned long reqprot, + unsigned long prot) +{ + return 0; +} + +static inline int security_file_lock(struct file *file, unsigned int cmd) +{ + return 0; +} + +static inline int security_file_fcntl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline int security_file_set_fowner(struct file *file) +{ + return 0; +} + +static inline int security_file_send_sigiotask(struct task_struct *tsk, + struct fown_struct *fown, + int sig) +{ + return 0; +} + +static inline int security_file_receive(struct file *file) +{ + return 0; +} + +static inline int security_dentry_open(struct file *file, + const struct cred *cred) +{ + return 0; +} + +static inline int security_task_create(unsigned long clone_flags) +{ + return 0; +} + +static inline void security_task_free(struct task_struct *task) +{ } + +static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) +{ + return 0; +} + +static inline void security_cred_free(struct cred *cred) +{ } + +static inline int security_prepare_creds(struct cred *new, + const struct cred *old, + gfp_t gfp) +{ + return 0; +} + +static inline void security_transfer_creds(struct cred *new, + const struct cred *old) +{ +} + +static inline int security_kernel_act_as(struct cred *cred, u32 secid) +{ + return 0; +} + +static inline int security_kernel_create_files_as(struct cred *cred, + struct inode *inode) +{ + return 0; +} + +static inline int security_kernel_module_request(char *kmod_name) +{ + return 0; +} + +static inline int security_task_fix_setuid(struct cred *new, + const struct cred *old, + int flags) +{ + return cap_task_fix_setuid(new, old, flags); +} + +static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) +{ + return 0; +} + +static inline int security_task_getpgid(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_getsid(struct task_struct *p) +{ + return 0; +} + +static inline void security_task_getsecid(struct task_struct *p, u32 *secid) +{ + *secid = 0; +} + +static inline int security_task_setnice(struct task_struct *p, int nice) +{ + return cap_task_setnice(p, nice); +} + +static inline int security_task_setioprio(struct task_struct *p, int ioprio) +{ + return cap_task_setioprio(p, ioprio); +} + +static inline int security_task_getioprio(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_setrlimit(struct task_struct *p, + unsigned int resource, + struct rlimit *new_rlim) +{ + return 0; +} + +static inline int security_task_setscheduler(struct task_struct *p) +{ + return cap_task_setscheduler(p); +} + +static inline int security_task_getscheduler(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_movememory(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_kill(struct task_struct *p, + struct siginfo *info, int sig, + u32 secid) +{ + return 0; +} + +static inline int security_task_wait(struct task_struct *p) +{ + return 0; +} + +static inline int security_task_prctl(int option, unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5) +{ + return cap_task_prctl(option, arg2, arg3, arg3, arg5); +} + +static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) +{ } + +static inline int security_ipc_permission(struct kern_ipc_perm *ipcp, + short flag) +{ + return 0; +} + +static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) +{ + *secid = 0; +} + +static inline int security_msg_msg_alloc(struct msg_msg *msg) +{ + return 0; +} + +static inline void security_msg_msg_free(struct msg_msg *msg) +{ } + +static inline int security_msg_queue_alloc(struct msg_queue *msq) +{ + return 0; +} + +static inline void security_msg_queue_free(struct msg_queue *msq) +{ } + +static inline int security_msg_queue_associate(struct msg_queue *msq, + int msqflg) +{ + return 0; +} + +static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd) +{ + return 0; +} + +static inline int security_msg_queue_msgsnd(struct msg_queue *msq, + struct msg_msg *msg, int msqflg) +{ + return 0; +} + +static inline int security_msg_queue_msgrcv(struct msg_queue *msq, + struct msg_msg *msg, + struct task_struct *target, + long type, int mode) +{ + return 0; +} + +static inline int security_shm_alloc(struct shmid_kernel *shp) +{ + return 0; +} + +static inline void security_shm_free(struct shmid_kernel *shp) +{ } + +static inline int security_shm_associate(struct shmid_kernel *shp, + int shmflg) +{ + return 0; +} + +static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd) +{ + return 0; +} + +static inline int security_shm_shmat(struct shmid_kernel *shp, + char __user *shmaddr, int shmflg) +{ + return 0; +} + +static inline int security_sem_alloc(struct sem_array *sma) +{ + return 0; +} + +static inline void security_sem_free(struct sem_array *sma) +{ } + +static inline int security_sem_associate(struct sem_array *sma, int semflg) +{ + return 0; +} + +static inline int security_sem_semctl(struct sem_array *sma, int cmd) +{ + return 0; +} + +static inline int security_sem_semop(struct sem_array *sma, + struct sembuf *sops, unsigned nsops, + int alter) +{ + return 0; +} + +static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) +{ } + +static inline int security_getprocattr(struct task_struct *p, char *name, char **value) +{ + return -EINVAL; +} + +static inline int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size) +{ + return -EINVAL; +} + +static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) +{ + return cap_netlink_send(sk, skb); +} + +static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +{ + return -EOPNOTSUPP; +} + +static inline int security_secctx_to_secid(const char *secdata, + u32 seclen, + u32 *secid) +{ + return -EOPNOTSUPP; +} + +static inline void security_release_secctx(char *secdata, u32 seclen) +{ +} + +static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) +{ + return -EOPNOTSUPP; +} +static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) +{ + return -EOPNOTSUPP; +} +static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_SECURITY */ + +#ifdef CONFIG_SECURITY_NETWORK + +int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); +int security_unix_may_send(struct socket *sock, struct socket *other); +int security_socket_create(int family, int type, int protocol, int kern); +int security_socket_post_create(struct socket *sock, int family, + int type, int protocol, int kern); +int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen); +int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); +int security_socket_listen(struct socket *sock, int backlog); +int security_socket_accept(struct socket *sock, struct socket *newsock); +int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size); +int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, + int size, int flags); +int security_socket_getsockname(struct socket *sock); +int security_socket_getpeername(struct socket *sock); +int security_socket_getsockopt(struct socket *sock, int level, int optname); +int security_socket_setsockopt(struct socket *sock, int level, int optname); +int security_socket_shutdown(struct socket *sock, int how); +int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb); +int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, + int __user *optlen, unsigned len); +int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid); +int security_sk_alloc(struct sock *sk, int family, gfp_t priority); +void security_sk_free(struct sock *sk); +void security_sk_clone(const struct sock *sk, struct sock *newsk); +void security_sk_classify_flow(struct sock *sk, struct flowi *fl); +void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); +void security_sock_graft(struct sock*sk, struct socket *parent); +int security_inet_conn_request(struct sock *sk, + struct sk_buff *skb, struct request_sock *req); +void security_inet_csk_clone(struct sock *newsk, + const struct request_sock *req); +void security_inet_conn_established(struct sock *sk, + struct sk_buff *skb); +int security_secmark_relabel_packet(u32 secid); +void security_secmark_refcount_inc(void); +void security_secmark_refcount_dec(void); +int security_tun_dev_create(void); +void security_tun_dev_post_create(struct sock *sk); +int security_tun_dev_attach(struct sock *sk); + +#else /* CONFIG_SECURITY_NETWORK */ +static inline int security_unix_stream_connect(struct sock *sock, + struct sock *other, + struct sock *newsk) +{ + return 0; +} + +static inline int security_unix_may_send(struct socket *sock, + struct socket *other) +{ + return 0; +} + +static inline int security_socket_create(int family, int type, + int protocol, int kern) +{ + return 0; +} + +static inline int security_socket_post_create(struct socket *sock, + int family, + int type, + int protocol, int kern) +{ + return 0; +} + +static inline int security_socket_bind(struct socket *sock, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline int security_socket_connect(struct socket *sock, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline int security_socket_listen(struct socket *sock, int backlog) +{ + return 0; +} + +static inline int security_socket_accept(struct socket *sock, + struct socket *newsock) +{ + return 0; +} + +static inline int security_socket_sendmsg(struct socket *sock, + struct msghdr *msg, int size) +{ + return 0; +} + +static inline int security_socket_recvmsg(struct socket *sock, + struct msghdr *msg, int size, + int flags) +{ + return 0; +} + +static inline int security_socket_getsockname(struct socket *sock) +{ + return 0; +} + +static inline int security_socket_getpeername(struct socket *sock) +{ + return 0; +} + +static inline int security_socket_getsockopt(struct socket *sock, + int level, int optname) +{ + return 0; +} + +static inline int security_socket_setsockopt(struct socket *sock, + int level, int optname) +{ + return 0; +} + +static inline int security_socket_shutdown(struct socket *sock, int how) +{ + return 0; +} +static inline int security_sock_rcv_skb(struct sock *sk, + struct sk_buff *skb) +{ + return 0; +} + +static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, + int __user *optlen, unsigned len) +{ + return -ENOPROTOOPT; +} + +static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) +{ + return -ENOPROTOOPT; +} + +static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) +{ + return 0; +} + +static inline void security_sk_free(struct sock *sk) +{ +} + +static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) +{ +} + +static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) +{ +} + +static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) +{ +} + +static inline void security_sock_graft(struct sock *sk, struct socket *parent) +{ +} + +static inline int security_inet_conn_request(struct sock *sk, + struct sk_buff *skb, struct request_sock *req) +{ + return 0; +} + +static inline void security_inet_csk_clone(struct sock *newsk, + const struct request_sock *req) +{ +} + +static inline void security_inet_conn_established(struct sock *sk, + struct sk_buff *skb) +{ +} + +static inline int security_secmark_relabel_packet(u32 secid) +{ + return 0; +} + +static inline void security_secmark_refcount_inc(void) +{ +} + +static inline void security_secmark_refcount_dec(void) +{ +} + +static inline int security_tun_dev_create(void) +{ + return 0; +} + +static inline void security_tun_dev_post_create(struct sock *sk) +{ +} + +static inline int security_tun_dev_attach(struct sock *sk) +{ + return 0; +} +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM + +int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); +int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); +void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); +int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); +int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); +int security_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid); +int security_xfrm_state_delete(struct xfrm_state *x); +void security_xfrm_state_free(struct xfrm_state *x); +int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); +int security_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl); +int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); +void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); + +#else /* CONFIG_SECURITY_NETWORK_XFRM */ + +static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) +{ + return 0; +} + +static inline int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp) +{ + return 0; +} + +static inline void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) +{ +} + +static inline int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) +{ + return 0; +} + +static inline int security_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) +{ + return 0; +} + +static inline int security_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid) +{ + return 0; +} + +static inline void security_xfrm_state_free(struct xfrm_state *x) +{ +} + +static inline int security_xfrm_state_delete(struct xfrm_state *x) +{ + return 0; +} + +static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +{ + return 0; +} + +static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, const struct flowi *fl) +{ + return 1; +} + +static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) +{ + return 0; +} + +static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) +{ +} + +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + +#ifdef CONFIG_SECURITY_PATH +int security_path_unlink(struct path *dir, struct dentry *dentry); +int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode); +int security_path_rmdir(struct path *dir, struct dentry *dentry); +int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, + unsigned int dev); +int security_path_truncate(struct path *path); +int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name); +int security_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); +int security_path_rename(struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +int security_path_chmod(struct path *path, umode_t mode); +int security_path_chown(struct path *path, uid_t uid, gid_t gid); +int security_path_chroot(struct path *path); +#else /* CONFIG_SECURITY_PATH */ +static inline int security_path_unlink(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, + umode_t mode) +{ + return 0; +} + +static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mknod(struct path *dir, struct dentry *dentry, + umode_t mode, unsigned int dev) +{ + return 0; +} + +static inline int security_path_truncate(struct path *path) +{ + return 0; +} + +static inline int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_path_link(struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_path_rename(struct path *old_dir, + struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_path_chmod(struct path *path, umode_t mode) +{ + return 0; +} + +static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid) +{ + return 0; +} + +static inline int security_path_chroot(struct path *path) +{ + return 0; +} +#endif /* CONFIG_SECURITY_PATH */ + +#ifdef CONFIG_KEYS +#ifdef CONFIG_SECURITY + +int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); +void security_key_free(struct key *key); +int security_key_permission(key_ref_t key_ref, + const struct cred *cred, key_perm_t perm); +int security_key_getsecurity(struct key *key, char **_buffer); + +#else + +static inline int security_key_alloc(struct key *key, + const struct cred *cred, + unsigned long flags) +{ + return 0; +} + +static inline void security_key_free(struct key *key) +{ +} + +static inline int security_key_permission(key_ref_t key_ref, + const struct cred *cred, + key_perm_t perm) +{ + return 0; +} + +static inline int security_key_getsecurity(struct key *key, char **_buffer) +{ + *_buffer = NULL; + return 0; +} + +#endif +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT +#ifdef CONFIG_SECURITY +int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); +int security_audit_rule_known(struct audit_krule *krule); +int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); +void security_audit_rule_free(void *lsmrule); + +#else + +static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr, + void **lsmrule) +{ + return 0; +} + +static inline int security_audit_rule_known(struct audit_krule *krule) +{ + return 0; +} + +static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, + void *lsmrule, struct audit_context *actx) +{ + return 0; +} + +static inline void security_audit_rule_free(void *lsmrule) +{ } + +#endif /* CONFIG_SECURITY */ +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_SECURITYFS + +extern struct dentry *securityfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); +extern void securityfs_remove(struct dentry *dentry); + +#else /* CONFIG_SECURITYFS */ + +static inline struct dentry *securityfs_create_dir(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *securityfs_create_file(const char *name, + umode_t mode, + struct dentry *parent, + void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline void securityfs_remove(struct dentry *dentry) +{} + +#endif + +#ifdef CONFIG_SECURITY + +static inline char *alloc_secdata(void) +{ + return (char *)get_zeroed_page(GFP_KERNEL); +} + +static inline void free_secdata(void *secdata) +{ + free_page((unsigned long)secdata); +} + +#else + +static inline char *alloc_secdata(void) +{ + return (char *)1; +} + +static inline void free_secdata(void *secdata) +{ } +#endif /* CONFIG_SECURITY */ + +#endif /* ! __LINUX_SECURITY_H */ + diff --git a/include/linux/selection.h b/include/linux/selection.h new file mode 100644 index 00000000..85193aa8 --- /dev/null +++ b/include/linux/selection.h @@ -0,0 +1,44 @@ +/* + * selection.h + * + * Interface between console.c, tty_io.c, vt.c, vc_screen.c and selection.c + */ + +#ifndef _LINUX_SELECTION_H_ +#define _LINUX_SELECTION_H_ + +#include <linux/tiocl.h> +#include <linux/vt_buffer.h> + +struct tty_struct; + +extern struct vc_data *sel_cons; +struct tty_struct; + +extern void clear_selection(void); +extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty); +extern int paste_selection(struct tty_struct *tty); +extern int sel_loadlut(char __user *p); +extern int mouse_reporting(void); +extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); + +extern int console_blanked; + +extern unsigned char color_table[]; +extern int default_red[]; +extern int default_grn[]; +extern int default_blu[]; + +extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); +extern u16 screen_glyph(struct vc_data *vc, int offset); +extern void complement_pos(struct vc_data *vc, int offset); +extern void invert_screen(struct vc_data *vc, int offset, int count, int shift); + +extern void getconsxy(struct vc_data *vc, unsigned char *p); +extern void putconsxy(struct vc_data *vc, unsigned char *p); + +extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); +extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); +extern void vcs_scr_updated(struct vc_data *vc); + +#endif diff --git a/include/linux/selinux.h b/include/linux/selinux.h new file mode 100644 index 00000000..44f45961 --- /dev/null +++ b/include/linux/selinux.h @@ -0,0 +1,35 @@ +/* + * SELinux services exported to the rest of the kernel. + * + * Author: James Morris <jmorris@redhat.com> + * + * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com> + * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> + * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ +#ifndef _LINUX_SELINUX_H +#define _LINUX_SELINUX_H + +struct selinux_audit_rule; +struct audit_context; +struct kern_ipc_perm; + +#ifdef CONFIG_SECURITY_SELINUX + +/** + * selinux_is_enabled - is SELinux enabled? + */ +bool selinux_is_enabled(void); +#else + +static inline bool selinux_is_enabled(void) +{ + return false; +} +#endif /* CONFIG_SECURITY_SELINUX */ + +#endif /* _LINUX_SELINUX_H */ diff --git a/include/linux/selinux_netlink.h b/include/linux/selinux_netlink.h new file mode 100644 index 00000000..d2397977 --- /dev/null +++ b/include/linux/selinux_netlink.h @@ -0,0 +1,50 @@ +/* + * Netlink event notifications for SELinux. + * + * Author: James Morris <jmorris@redhat.com> + * + * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ +#ifndef _LINUX_SELINUX_NETLINK_H +#define _LINUX_SELINUX_NETLINK_H + +#include <linux/types.h> + +/* Message types. */ +#define SELNL_MSG_BASE 0x10 +enum { + SELNL_MSG_SETENFORCE = SELNL_MSG_BASE, + SELNL_MSG_POLICYLOAD, + SELNL_MSG_MAX +}; + +#ifndef __KERNEL__ +/* Multicast groups - backwards compatiblility for userspace */ +#define SELNL_GRP_NONE 0x00000000 +#define SELNL_GRP_AVC 0x00000001 /* AVC notifications */ +#define SELNL_GRP_ALL 0xffffffff +#endif + +enum selinux_nlgroups { + SELNLGRP_NONE, +#define SELNLGRP_NONE SELNLGRP_NONE + SELNLGRP_AVC, +#define SELNLGRP_AVC SELNLGRP_AVC + __SELNLGRP_MAX +}; +#define SELNLGRP_MAX (__SELNLGRP_MAX - 1) + +/* Message structures */ +struct selnl_msg_setenforce { + __s32 val; +}; + +struct selnl_msg_policyload { + __u32 seqno; +}; + +#endif /* _LINUX_SELINUX_NETLINK_H */ diff --git a/include/linux/sem.h b/include/linux/sem.h new file mode 100644 index 00000000..10d6b226 --- /dev/null +++ b/include/linux/sem.h @@ -0,0 +1,127 @@ +#ifndef _LINUX_SEM_H +#define _LINUX_SEM_H + +#include <linux/ipc.h> + +/* semop flags */ +#define SEM_UNDO 0x1000 /* undo the operation on exit */ + +/* semctl Command Definitions. */ +#define GETPID 11 /* get sempid */ +#define GETVAL 12 /* get semval */ +#define GETALL 13 /* get all semval's */ +#define GETNCNT 14 /* get semncnt */ +#define GETZCNT 15 /* get semzcnt */ +#define SETVAL 16 /* set semval */ +#define SETALL 17 /* set all semval's */ + +/* ipcs ctl cmds */ +#define SEM_STAT 18 +#define SEM_INFO 19 + +/* Obsolete, used only for backwards compatibility and libc5 compiles */ +struct semid_ds { + struct ipc_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + struct sem *sem_base; /* ptr to first semaphore in array */ + struct sem_queue *sem_pending; /* pending operations to be processed */ + struct sem_queue **sem_pending_last; /* last pending operation */ + struct sem_undo *undo; /* undo requests on this array */ + unsigned short sem_nsems; /* no. of semaphores in array */ +}; + +/* Include the definition of semid64_ds */ +#include <asm/sembuf.h> + +/* semop system calls takes an array of these. */ +struct sembuf { + unsigned short sem_num; /* semaphore index in array */ + short sem_op; /* semaphore operation */ + short sem_flg; /* operation flags */ +}; + +/* arg for semctl system calls. */ +union semun { + int val; /* value for SETVAL */ + struct semid_ds __user *buf; /* buffer for IPC_STAT & IPC_SET */ + unsigned short __user *array; /* array for GETALL & SETALL */ + struct seminfo __user *__buf; /* buffer for IPC_INFO */ + void __user *__pad; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */ +#define SEMMSL 250 /* <= 8 000 max num of semaphores per id */ +#define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */ +#define SEMOPM 32 /* <= 1 000 max num of ops per semop call */ +#define SEMVMX 32767 /* <= 32767 semaphore maximum value */ +#define SEMAEM SEMVMX /* adjust on exit max value */ + +/* unused */ +#define SEMUME SEMOPM /* max num of undo entries per process */ +#define SEMMNU SEMMNS /* num of undo structures system wide */ +#define SEMMAP SEMMNS /* # of entries in semaphore map */ +#define SEMUSZ 20 /* sizeof struct sem_undo */ + +#ifdef __KERNEL__ +#include <linux/atomic.h> +#include <linux/rcupdate.h> +#include <linux/cache.h> + +struct task_struct; + +/* One sem_array data structure for each set of semaphores in the system. */ +struct sem_array { + struct kern_ipc_perm ____cacheline_aligned_in_smp + sem_perm; /* permissions .. see ipc.h */ + time_t sem_otime; /* last semop time */ + time_t sem_ctime; /* last change time */ + struct sem *sem_base; /* ptr to first semaphore in array */ + struct list_head sem_pending; /* pending operations to be processed */ + struct list_head list_id; /* undo requests on this array */ + int sem_nsems; /* no. of semaphores in array */ + int complex_count; /* pending complex operations */ +}; + +#ifdef CONFIG_SYSVIPC + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); +extern void exit_sem(struct task_struct *tsk); + +#else + +struct sysv_sem { + /* empty */ +}; + +static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) +{ + return 0; +} + +static inline void exit_sem(struct task_struct *tsk) +{ + return; +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SEM_H */ diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h new file mode 100644 index 00000000..dc368b8c --- /dev/null +++ b/include/linux/semaphore.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2008 Intel Corporation + * Author: Matthew Wilcox <willy@linux.intel.com> + * + * Distributed under the terms of the GNU GPL, version 2 + * + * Please see kernel/semaphore.c for documentation of these functions + */ +#ifndef __LINUX_SEMAPHORE_H +#define __LINUX_SEMAPHORE_H + +#include <linux/list.h> +#include <linux/spinlock.h> + +/* Please don't access any members of this structure directly */ +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +#define __SEMAPHORE_INITIALIZER(name, n) \ +{ \ + .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ + .count = n, \ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ +} + +#define DEFINE_SEMAPHORE(name) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) + +static inline void sema_init(struct semaphore *sem, int val) +{ + static struct lock_class_key __key; + *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); + lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); +} + +extern void down(struct semaphore *sem); +extern int __must_check down_interruptible(struct semaphore *sem); +extern int __must_check down_killable(struct semaphore *sem); +extern int __must_check down_trylock(struct semaphore *sem); +extern int __must_check down_timeout(struct semaphore *sem, long jiffies); +extern void up(struct semaphore *sem); + +#endif /* __LINUX_SEMAPHORE_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h new file mode 100644 index 00000000..fc61854f --- /dev/null +++ b/include/linux/seq_file.h @@ -0,0 +1,160 @@ +#ifndef _LINUX_SEQ_FILE_H +#define _LINUX_SEQ_FILE_H + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/bug.h> +#include <linux/mutex.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> + +struct seq_operations; +struct file; +struct path; +struct inode; +struct dentry; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + loff_t index; + loff_t read_pos; + u64 version; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + void *private; +}; + +struct seq_operations { + void * (*start) (struct seq_file *m, loff_t *pos); + void (*stop) (struct seq_file *m, void *v); + void * (*next) (struct seq_file *m, void *v, loff_t *pos); + int (*show) (struct seq_file *m, void *v); +}; + +#define SEQ_SKIP 1 + +/** + * seq_get_buf - get buffer to write arbitrary data to + * @m: the seq_file handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_get_buf(struct seq_file *m, char **bufp) +{ + BUG_ON(m->count > m->size); + if (m->count < m->size) + *bufp = m->buf + m->count; + else + *bufp = NULL; + + return m->size - m->count; +} + +/** + * seq_commit - commit data to the buffer + * @m: the seq_file handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_commit(struct seq_file *m, int num) +{ + if (num < 0) { + m->count = m->size; + } else { + BUG_ON(m->count + num > m->size); + m->count += num; + } +} + +char *mangle_path(char *s, const char *p, const char *esc); +int seq_open(struct file *, const struct seq_operations *); +ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); +loff_t seq_lseek(struct file *, loff_t, int); +int seq_release(struct inode *, struct file *); +int seq_escape(struct seq_file *, const char *, const char *); +int seq_putc(struct seq_file *m, char c); +int seq_puts(struct seq_file *m, const char *s); +int seq_write(struct seq_file *seq, const void *data, size_t len); + +__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); + +int seq_path(struct seq_file *, const struct path *, const char *); +int seq_dentry(struct seq_file *, struct dentry *, const char *); +int seq_path_root(struct seq_file *m, const struct path *path, + const struct path *root, const char *esc); +int seq_bitmap(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) +{ + return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} + +int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits); + +static inline int seq_cpumask_list(struct seq_file *m, + const struct cpumask *mask) +{ + return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids); +} + +static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap_list(m, mask->bits, MAX_NUMNODES); +} + +int single_open(struct file *, int (*)(struct seq_file *, void *), void *); +int single_release(struct inode *, struct file *); +void *__seq_open_private(struct file *, const struct seq_operations *, int); +int seq_open_private(struct file *, const struct seq_operations *, int); +int seq_release_private(struct inode *, struct file *); +int seq_put_decimal_ull(struct seq_file *m, char delimiter, + unsigned long long num); +int seq_put_decimal_ll(struct seq_file *m, char delimiter, + long long num); + +#define SEQ_START_TOKEN ((void *)1) +/* + * Helpers for iteration over list_head-s in seq_files + */ + +extern struct list_head *seq_list_start(struct list_head *head, + loff_t pos); +extern struct list_head *seq_list_start_head(struct list_head *head, + loff_t pos); +extern struct list_head *seq_list_next(void *v, struct list_head *head, + loff_t *ppos); + +/* + * Helpers for iteration over hlist_head-s in seq_files + */ + +extern struct hlist_node *seq_hlist_start(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, + loff_t *ppos); + +extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_next_rcu(void *v, + struct hlist_head *head, + loff_t *ppos); +#endif diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h new file mode 100644 index 00000000..32c89bbe --- /dev/null +++ b/include/linux/seq_file_net.h @@ -0,0 +1,30 @@ +#ifndef __SEQ_FILE_NET_H__ +#define __SEQ_FILE_NET_H__ + +#include <linux/seq_file.h> + +struct net; +extern struct net init_net; + +struct seq_net_private { +#ifdef CONFIG_NET_NS + struct net *net; +#endif +}; + +int seq_open_net(struct inode *, struct file *, + const struct seq_operations *, int); +int single_open_net(struct inode *, struct file *file, + int (*show)(struct seq_file *, void *)); +int seq_release_net(struct inode *, struct file *); +int single_release_net(struct inode *, struct file *); +static inline struct net *seq_file_net(struct seq_file *seq) +{ +#ifdef CONFIG_NET_NS + return ((struct seq_net_private *)seq->private)->net; +#else + return &init_net; +#endif +} + +#endif diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h new file mode 100644 index 00000000..600060e2 --- /dev/null +++ b/include/linux/seqlock.h @@ -0,0 +1,282 @@ +#ifndef __LINUX_SEQLOCK_H +#define __LINUX_SEQLOCK_H +/* + * Reader/writer consistent mechanism without starving writers. This type of + * lock for data where the reader wants a consistent set of information + * and is willing to retry if the information changes. Readers never + * block but they may have to retry if a writer is in + * progress. Writers do not wait for readers. + * + * This is not as cache friendly as brlock. Also, this will not work + * for data that contains pointers, because any writer could + * invalidate a pointer that a reader was following. + * + * Expected reader usage: + * do { + * seq = read_seqbegin(&foo); + * ... + * } while (read_seqretry(&foo, seq)); + * + * + * On non-SMP the spin locks disappear but the writer still needs + * to increment the sequence variables because an interrupt routine could + * change the state of the data. + * + * Based on x86_64 vsyscall gettimeofday + * by Keith Owens and Andrea Arcangeli + */ + +#include <linux/spinlock.h> +#include <linux/preempt.h> +#include <asm/processor.h> + +typedef struct { + unsigned sequence; + spinlock_t lock; +} seqlock_t; + +/* + * These macros triggered gcc-3.x compile-time problems. We think these are + * OK now. Be cautious. + */ +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } + +#define seqlock_init(x) \ + do { \ + (x)->sequence = 0; \ + spin_lock_init(&(x)->lock); \ + } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) + +/* Lock out other writers and update the count. + * Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. + */ +static inline void write_seqlock(seqlock_t *sl) +{ + spin_lock(&sl->lock); + ++sl->sequence; + smp_wmb(); +} + +static inline void write_sequnlock(seqlock_t *sl) +{ + smp_wmb(); + sl->sequence++; + spin_unlock(&sl->lock); +} + +static inline int write_tryseqlock(seqlock_t *sl) +{ + int ret = spin_trylock(&sl->lock); + + if (ret) { + ++sl->sequence; + smp_wmb(); + } + return ret; +} + +/* Start of read calculation -- fetch last complete writer token */ +static __always_inline unsigned read_seqbegin(const seqlock_t *sl) +{ + unsigned ret; + +repeat: + ret = ACCESS_ONCE(sl->sequence); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + smp_rmb(); + + return ret; +} + +/* + * Test if reader processed invalid data. + * + * If sequence value changed then writer changed data while in section. + */ +static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) +{ + smp_rmb(); + + return unlikely(sl->sequence != start); +} + + +/* + * Version using sequence counter only. + * This can be used when code has its own mutex protecting the + * updating starting before the write_seqcountbeqin() and ending + * after the write_seqcount_end(). + */ + +typedef struct seqcount { + unsigned sequence; +} seqcount_t; + +#define SEQCNT_ZERO { 0 } +#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) + +/** + * __read_seqcount_begin - begin a seq-read critical section (without barrier) + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() + * barrier. Callers should ensure that smp_rmb() or equivalent ordering is + * provided before actually loading any of the variables that are to be + * protected in this critical section. + * + * Use carefully, only in critical code, and comment how the barrier is + * provided. + */ +static inline unsigned __read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret; + +repeat: + ret = ACCESS_ONCE(s->sequence); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + return ret; +} + +/** + * read_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * read_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + */ +static inline unsigned read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = __read_seqcount_begin(s); + smp_rmb(); + return ret; +} + +/** + * raw_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + * + * Unlike read_seqcount_begin(), this function will not wait for the count + * to stabilize. If a writer is active when we begin, we will fail the + * read_seqcount_retry() instead of stabilizing at the beginning of the + * critical section. + */ +static inline unsigned raw_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = ACCESS_ONCE(s->sequence); + smp_rmb(); + return ret & ~1; +} + +/** + * __read_seqcount_retry - end a seq-read critical section (without barrier) + * @s: pointer to seqcount_t + * @start: count, from read_seqcount_begin + * Returns: 1 if retry is required, else 0 + * + * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() + * barrier. Callers should ensure that smp_rmb() or equivalent ordering is + * provided before actually loading any of the variables that are to be + * protected in this critical section. + * + * Use carefully, only in critical code, and comment how the barrier is + * provided. + */ +static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +{ + return unlikely(s->sequence != start); +} + +/** + * read_seqcount_retry - end a seq-read critical section + * @s: pointer to seqcount_t + * @start: count, from read_seqcount_begin + * Returns: 1 if retry is required, else 0 + * + * read_seqcount_retry closes a read critical section of the given seqcount. + * If the critical section was invalid, it must be ignored (and typically + * retried). + */ +static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +{ + smp_rmb(); + + return __read_seqcount_retry(s, start); +} + + +/* + * Sequence counter only version assumes that callers are using their + * own mutexing. + */ +static inline void write_seqcount_begin(seqcount_t *s) +{ + s->sequence++; + smp_wmb(); +} + +static inline void write_seqcount_end(seqcount_t *s) +{ + smp_wmb(); + s->sequence++; +} + +/** + * write_seqcount_barrier - invalidate in-progress read-side seq operations + * @s: pointer to seqcount_t + * + * After write_seqcount_barrier, no read-side seq operations will complete + * successfully and see data older than this. + */ +static inline void write_seqcount_barrier(seqcount_t *s) +{ + smp_wmb(); + s->sequence+=2; +} + +/* + * Possible sw/hw IRQ protected versions of the interfaces. + */ +#define write_seqlock_irqsave(lock, flags) \ + do { local_irq_save(flags); write_seqlock(lock); } while (0) +#define write_seqlock_irq(lock) \ + do { local_irq_disable(); write_seqlock(lock); } while (0) +#define write_seqlock_bh(lock) \ + do { local_bh_disable(); write_seqlock(lock); } while (0) + +#define write_sequnlock_irqrestore(lock, flags) \ + do { write_sequnlock(lock); local_irq_restore(flags); } while(0) +#define write_sequnlock_irq(lock) \ + do { write_sequnlock(lock); local_irq_enable(); } while(0) +#define write_sequnlock_bh(lock) \ + do { write_sequnlock(lock); local_bh_enable(); } while(0) + +#define read_seqbegin_irqsave(lock, flags) \ + ({ local_irq_save(flags); read_seqbegin(lock); }) + +#define read_seqretry_irqrestore(lock, iv, flags) \ + ({ \ + int ret = read_seqretry(lock, iv); \ + local_irq_restore(flags); \ + ret; \ + }) + +#endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/serial.h b/include/linux/serial.h new file mode 100644 index 00000000..441980ec --- /dev/null +++ b/include/linux/serial.h @@ -0,0 +1,227 @@ +/* + * include/linux/serial.h + * + * Copyright (C) 1992 by Theodore Ts'o. + * + * Redistribution of this file is permitted under the terms of the GNU + * Public License (GPL) + */ + +#ifndef _LINUX_SERIAL_H +#define _LINUX_SERIAL_H + +#include <linux/types.h> + +#ifdef __KERNEL__ +#include <asm/page.h> + +/* + * Counters of the input lines (CTS, DSR, RI, CD) interrupts + */ + +struct async_icount { + __u32 cts, dsr, rng, dcd, tx, rx; + __u32 frame, parity, overrun, brk; + __u32 buf_overrun; +}; + +/* + * The size of the serial xmit buffer is 1 page, or 4096 bytes + */ +#define SERIAL_XMIT_SIZE PAGE_SIZE + +#endif + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + unsigned short close_delay; + char io_type; + char reserved_char[1]; + int hub6; + unsigned short closing_wait; /* time to wait before closing */ + unsigned short closing_wait2; /* no longer used... */ + unsigned char *iomem_base; + unsigned short iomem_reg_shift; + unsigned int port_high; + unsigned long iomap_base; /* cookie passed into ioremap */ +}; + +/* + * For the close wait times, 0 means wait forever for serial port to + * flush its output. 65535 means don't wait at all. + */ +#define ASYNC_CLOSING_WAIT_INF 0 +#define ASYNC_CLOSING_WAIT_NONE 65535 + +/* + * These are the supported serial types. + */ +#define PORT_UNKNOWN 0 +#define PORT_8250 1 +#define PORT_16450 2 +#define PORT_16550 3 +#define PORT_16550A 4 +#define PORT_CIRRUS 5 /* usurped by cyclades.c */ +#define PORT_16650 6 +#define PORT_16650V2 7 +#define PORT_16750 8 +#define PORT_STARTECH 9 /* usurped by cyclades.c */ +#define PORT_16C950 10 /* Oxford Semiconductor */ +#define PORT_16654 11 +#define PORT_16850 12 +#define PORT_RSA 13 /* RSA-DV II/S card */ +#define PORT_MAX 13 + +#define SERIAL_IO_PORT 0 +#define SERIAL_IO_HUB6 1 +#define SERIAL_IO_MEM 2 + +struct serial_uart_config { + char *name; + int dfl_xmit_fifo_size; + int flags; +}; + +#define UART_CLEAR_FIFO 0x01 +#define UART_USE_FIFO 0x02 +#define UART_STARTECH 0x04 +#define UART_NATSEMI 0x08 + +/* + * Definitions for async_struct (and serial_struct) flags field + * + * Define ASYNCB_* for convenient use with {test,set,clear}_bit. + */ +#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes + * on the callout port */ +#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ +#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ +#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */ +#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ +#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ +#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ +#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during + * autoconfiguration */ +#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */ +#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */ +#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */ +#define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */ +#define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */ +#define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */ +#define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety + * checks. Note: can be dangerous! */ +#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */ +#define ASYNCB_LAST_USER 15 + +/* Internal flags used only by kernel */ +#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ +#define ASYNCB_SUSPENDED 30 /* Serial port is suspended */ +#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ +#define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */ +#define ASYNCB_CLOSING 27 /* Serial port is closing */ +#define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */ +#define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */ +#define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */ +#define ASYNCB_CONS_FLOW 23 /* flow control for console */ +#define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */ +#define ASYNCB_FIRST_KERNEL 22 + +#define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) +#define ASYNC_SUSPENDED (1U << ASYNCB_SUSPENDED) +#define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) +#define ASYNC_SAK (1U << ASYNCB_SAK) +#define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS) +#define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI) +#define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI) +#define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST) +#define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ) +#define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT) +#define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT) +#define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP) +#define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD) +#define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI) +#define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY) +#define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) +#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) + +#define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1) +#define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \ + ASYNC_LOW_LATENCY) +#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) +#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) +#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) + +#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) +#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) +#define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF) +#define ASYNC_CLOSING (1U << ASYNCB_CLOSING) +#define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW) +#define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD) +#define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ) +#define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW) +#define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA) +#define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1)) + +/* + * Multiport serial configuration structure --- external structure + */ +struct serial_multiport_struct { + int irq; + int port1; + unsigned char mask1, match1; + int port2; + unsigned char mask2, match2; + int port3; + unsigned char mask3, match3; + int port4; + unsigned char mask4, match4; + int port_monitor; + int reserved[32]; +}; + +/* + * Serial input interrupt line counters -- external structure + * Four lines can interrupt: CTS, DSR, RI, DCD + */ +struct serial_icounter_struct { + int cts, dsr, rng, dcd; + int rx, tx; + int frame, overrun, parity, brk; + int buf_overrun; + int reserved[9]; +}; + +/* + * Serial interface for controlling RS485 settings on chips with suitable + * support. Set with TIOCSRS485 and get with TIOCGRS485 if supported by your + * platform. The set function returns the new state, with any unsupported bits + * reverted appropriately. + */ + +struct serial_rs485 { + __u32 flags; /* RS485 feature flags */ +#define SER_RS485_ENABLED (1 << 0) /* If enabled */ +#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for + RTS pin when + sending */ +#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for + RTS pin after sent*/ +#define SER_RS485_RX_DURING_TX (1 << 4) + __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ + __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ + __u32 padding[5]; /* Memory is cheap, new structs + are a royal PITA .. */ +}; + +#ifdef __KERNEL__ +#include <linux/compiler.h> + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SERIAL_H */ diff --git a/include/linux/serial167.h b/include/linux/serial167.h new file mode 100644 index 00000000..59c81b70 --- /dev/null +++ b/include/linux/serial167.h @@ -0,0 +1,157 @@ +/* + * serial167.h + * + * Richard Hirst [richard@sleepie.demon.co.uk] + * + * Based on cyclades.h + */ + +struct cyclades_monitor { + unsigned long int_count; + unsigned long char_count; + unsigned long char_max; + unsigned long char_last; +}; + +/* + * This is our internal structure for each serial port's state. + * + * Many fields are paralleled by the structure used by the serial_struct + * structure. + * + * For definitions of the flags field, see tty.h + */ + +struct cyclades_port { + int magic; + int type; + int card; + int line; + int flags; /* defined in tty.h */ + struct tty_struct *tty; + int read_status_mask; + int timeout; + int xmit_fifo_size; + int cor1,cor2,cor3,cor4,cor5,cor6,cor7; + int tbpr,tco,rbpr,rco; + int ignore_status_mask; + int close_delay; + int IER; /* Interrupt Enable Register */ + unsigned long last_active; + int count; /* # of fd on device */ + int x_char; /* to be pushed out ASAP */ + int x_break; + int blocked_open; /* # of blocked opens */ + unsigned char *xmit_buf; + int xmit_head; + int xmit_tail; + int xmit_cnt; + int default_threshold; + int default_timeout; + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + struct cyclades_monitor mon; +}; + +#define CYCLADES_MAGIC 0x4359 + +#define CYGETMON 0x435901 +#define CYGETTHRESH 0x435902 +#define CYSETTHRESH 0x435903 +#define CYGETDEFTHRESH 0x435904 +#define CYSETDEFTHRESH 0x435905 +#define CYGETTIMEOUT 0x435906 +#define CYSETTIMEOUT 0x435907 +#define CYGETDEFTIMEOUT 0x435908 +#define CYSETDEFTIMEOUT 0x435909 + +#define CyMaxChipsPerCard 1 + +/**** cd2401 registers ****/ + +#define CyGFRCR (0x81) +#define CyCCR (0x13) +#define CyCLR_CHAN (0x40) +#define CyINIT_CHAN (0x20) +#define CyCHIP_RESET (0x10) +#define CyENB_XMTR (0x08) +#define CyDIS_XMTR (0x04) +#define CyENB_RCVR (0x02) +#define CyDIS_RCVR (0x01) +#define CyCAR (0xee) +#define CyIER (0x11) +#define CyMdmCh (0x80) +#define CyRxExc (0x20) +#define CyRxData (0x08) +#define CyTxMpty (0x02) +#define CyTxRdy (0x01) +#define CyLICR (0x26) +#define CyRISR (0x89) +#define CyTIMEOUT (0x80) +#define CySPECHAR (0x70) +#define CyOVERRUN (0x08) +#define CyPARITY (0x04) +#define CyFRAME (0x02) +#define CyBREAK (0x01) +#define CyREOIR (0x84) +#define CyTEOIR (0x85) +#define CyMEOIR (0x86) +#define CyNOTRANS (0x08) +#define CyRFOC (0x30) +#define CyRDR (0xf8) +#define CyTDR (0xf8) +#define CyMISR (0x8b) +#define CyRISR (0x89) +#define CyTISR (0x8a) +#define CyMSVR1 (0xde) +#define CyMSVR2 (0xdf) +#define CyDSR (0x80) +#define CyDCD (0x40) +#define CyCTS (0x20) +#define CyDTR (0x02) +#define CyRTS (0x01) +#define CyRTPRL (0x25) +#define CyRTPRH (0x24) +#define CyCOR1 (0x10) +#define CyPARITY_NONE (0x00) +#define CyPARITY_E (0x40) +#define CyPARITY_O (0xC0) +#define Cy_5_BITS (0x04) +#define Cy_6_BITS (0x05) +#define Cy_7_BITS (0x06) +#define Cy_8_BITS (0x07) +#define CyCOR2 (0x17) +#define CyETC (0x20) +#define CyCtsAE (0x02) +#define CyCOR3 (0x16) +#define Cy_1_STOP (0x02) +#define Cy_2_STOP (0x04) +#define CyCOR4 (0x15) +#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */ +#define CyCOR5 (0x14) +#define CyCOR6 (0x18) +#define CyCOR7 (0x07) +#define CyRBPR (0xcb) +#define CyRCOR (0xc8) +#define CyTBPR (0xc3) +#define CyTCOR (0xc0) +#define CySCHR1 (0x1f) +#define CySCHR2 (0x1e) +#define CyTPR (0xda) +#define CyPILR1 (0xe3) +#define CyPILR2 (0xe0) +#define CyPILR3 (0xe1) +#define CyCMR (0x1b) +#define CyASYNC (0x02) +#define CyLICR (0x26) +#define CyLIVR (0x09) +#define CySCRL (0x23) +#define CySCRH (0x22) +#define CyTFTC (0x80) + + +/* max number of chars in the FIFO */ + +#define CyMAX_CHAR_FIFO 12 + +/***************************************************************************/ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h new file mode 100644 index 00000000..8f012f8a --- /dev/null +++ b/include/linux/serial_8250.h @@ -0,0 +1,95 @@ +/* + * linux/include/linux/serial_8250.h + * + * Copyright (C) 2004 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_SERIAL_8250_H +#define _LINUX_SERIAL_8250_H + +#include <linux/serial_core.h> +#include <linux/platform_device.h> + +/* + * This is the platform device platform_data structure + */ +struct plat_serial8250_port { + unsigned long iobase; /* io base address */ + void __iomem *membase; /* ioremap cookie or NULL */ + resource_size_t mapbase; /* resource base */ + unsigned int irq; /* interrupt number */ + unsigned long irqflags; /* request_irq flags */ + unsigned int uartclk; /* UART clock rate */ + void *private_data; + unsigned char regshift; /* register shift */ + unsigned char iotype; /* UPIO_* */ + unsigned char hub6; + upf_t flags; /* UPF_* flags */ + unsigned int type; /* If UPF_FIXED_TYPE */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, + struct ktermios *new, + struct ktermios *old); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int state, + unsigned old); +}; + +/* + * Allocate 8250 platform device IDs. Nothing is implied by + * the numbering here, except for the legacy entry being -1. + */ +enum { + PLAT8250_DEV_LEGACY = -1, + PLAT8250_DEV_PLATFORM, + PLAT8250_DEV_PLATFORM1, + PLAT8250_DEV_PLATFORM2, + PLAT8250_DEV_FOURPORT, + PLAT8250_DEV_ACCENT, + PLAT8250_DEV_BOCA, + PLAT8250_DEV_EXAR_ST16C554, + PLAT8250_DEV_HUB6, + PLAT8250_DEV_MCA, + PLAT8250_DEV_AU1X00, + PLAT8250_DEV_SM501, +}; + +/* + * This should be used by drivers which want to register + * their own 8250 ports without registering their own + * platform device. Using these will make your driver + * dependent on the 8250 driver. + */ +struct uart_port; +struct uart_8250_port; + +int serial8250_register_port(struct uart_port *); +void serial8250_unregister_port(int line); +void serial8250_suspend_port(int line); +void serial8250_resume_port(int line); + +extern int early_serial_setup(struct uart_port *port); + +extern int serial8250_find_port(struct uart_port *p); +extern int serial8250_find_port_for_earlycon(void); +extern int setup_early_serial8250_console(char *cmdline); +extern void serial8250_do_set_termios(struct uart_port *port, + struct ktermios *termios, struct ktermios *old); +extern void serial8250_do_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate); +extern int fsl8250_handle_irq(struct uart_port *port); +int serial8250_handle_irq(struct uart_port *port, unsigned int iir); +unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); +void serial8250_tx_chars(struct uart_8250_port *up); +unsigned int serial8250_modem_status(struct uart_8250_port *up); + +extern void serial8250_set_isa_configurator(void (*v) + (int port, struct uart_port *up, + unsigned short *capabilities)); + +#endif diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h new file mode 100644 index 00000000..828c8037 --- /dev/null +++ b/include/linux/serial_core.h @@ -0,0 +1,560 @@ +/* + * linux/drivers/char/serial_core.h + * + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef LINUX_SERIAL_CORE_H +#define LINUX_SERIAL_CORE_H + +#include <linux/serial.h> + +/* + * The type definitions. These are from Ted Ts'o's serial.h + */ +#define PORT_UNKNOWN 0 +#define PORT_8250 1 +#define PORT_16450 2 +#define PORT_16550 3 +#define PORT_16550A 4 +#define PORT_CIRRUS 5 +#define PORT_16650 6 +#define PORT_16650V2 7 +#define PORT_16750 8 +#define PORT_STARTECH 9 +#define PORT_16C950 10 +#define PORT_16654 11 +#define PORT_16850 12 +#define PORT_RSA 13 +#define PORT_NS16550A 14 +#define PORT_XSCALE 15 +#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ +#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ +#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ +#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */ +#define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */ +#define PORT_XR17D15X 21 /* Exar XR17D15x UART */ +#define PORT_MAX_8250 21 /* max port ID */ + +/* + * ARM specific type numbers. These are not currently guaranteed + * to be implemented, and will change in the future. These are + * separate so any additions to the old serial.c that occur before + * we are merged can be easily merged here. + */ +#define PORT_PXA 31 +#define PORT_AMBA 32 +#define PORT_CLPS711X 33 +#define PORT_SA1100 34 +#define PORT_UART00 35 +#define PORT_21285 37 + +/* Sparc type numbers. */ +#define PORT_SUNZILOG 38 +#define PORT_SUNSAB 39 + +/* DEC */ +#define PORT_DZ 46 +#define PORT_ZS 47 + +/* Parisc type numbers. */ +#define PORT_MUX 48 + +/* Atmel AT91 / AT32 SoC */ +#define PORT_ATMEL 49 + +/* Macintosh Zilog type numbers */ +#define PORT_MAC_ZILOG 50 /* m68k : not yet implemented */ +#define PORT_PMAC_ZILOG 51 + +/* SH-SCI */ +#define PORT_SCI 52 +#define PORT_SCIF 53 +#define PORT_IRDA 54 + +/* Samsung S3C2410 SoC and derivatives thereof */ +#define PORT_S3C2410 55 + +/* SGI IP22 aka Indy / Challenge S / Indigo 2 */ +#define PORT_IP22ZILOG 56 + +/* Sharp LH7a40x -- an ARM9 SoC series */ +#define PORT_LH7A40X 57 + +/* PPC CPM type number */ +#define PORT_CPM 58 + +/* MPC52xx (and MPC512x) type numbers */ +#define PORT_MPC52xx 59 + +/* IBM icom */ +#define PORT_ICOM 60 + +/* Samsung S3C2440 SoC */ +#define PORT_S3C2440 61 + +/* Motorola i.MX SoC */ +#define PORT_IMX 62 + +/* Marvell MPSC */ +#define PORT_MPSC 63 + +/* TXX9 type number */ +#define PORT_TXX9 64 + +/* NEC VR4100 series SIU/DSIU */ +#define PORT_VR41XX_SIU 65 +#define PORT_VR41XX_DSIU 66 + +/* Samsung S3C2400 SoC */ +#define PORT_S3C2400 67 + +/* M32R SIO */ +#define PORT_M32R_SIO 68 + +/*Digi jsm */ +#define PORT_JSM 69 + +#define PORT_PNX8XXX 70 + +/* Hilscher netx */ +#define PORT_NETX 71 + +/* SUN4V Hypervisor Console */ +#define PORT_SUNHV 72 + +#define PORT_S3C2412 73 + +/* Xilinx uartlite */ +#define PORT_UARTLITE 74 + +/* Blackfin bf5xx */ +#define PORT_BFIN 75 + +/* Micrel KS8695 */ +#define PORT_KS8695 76 + +/* Broadcom SB1250, etc. SOC */ +#define PORT_SB1250_DUART 77 + +/* Freescale ColdFire */ +#define PORT_MCF 78 + +/* Blackfin SPORT */ +#define PORT_BFIN_SPORT 79 + +/* MN10300 on-chip UART numbers */ +#define PORT_MN10300 80 +#define PORT_MN10300_CTS 81 + +#define PORT_SC26XX 82 + +/* SH-SCI */ +#define PORT_SCIFA 83 + +#define PORT_S3C6400 84 + +/* NWPSERIAL */ +#define PORT_NWPSERIAL 85 + +/* MAX3100 */ +#define PORT_MAX3100 86 + +/* Timberdale UART */ +#define PORT_TIMBUART 87 + +/* Qualcomm MSM SoCs */ +#define PORT_MSM 88 + +/* BCM63xx family SoCs */ +#define PORT_BCM63XX 89 + +/* Aeroflex Gaisler GRLIB APBUART */ +#define PORT_APBUART 90 + +/* Altera UARTs */ +#define PORT_ALTERA_JTAGUART 91 +#define PORT_ALTERA_UART 92 + +/* SH-SCI */ +#define PORT_SCIFB 93 + +/* MAX3107 */ +#define PORT_MAX3107 94 + +/* High Speed UART for Medfield */ +#define PORT_MFD 95 + +/* TI OMAP-UART */ +#define PORT_OMAP 96 + +/* VIA VT8500 SoC */ +#define PORT_VT8500 97 + +/* Xilinx PSS UART */ +#define PORT_XUARTPS 98 + +/* Atheros AR933X SoC */ +#define PORT_AR933X 99 + +/* Energy Micro efm32 SoC */ +#define PORT_EFMUART 100 + +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <linux/interrupt.h> +#include <linux/circ_buf.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/tty.h> +#include <linux/mutex.h> +#include <linux/sysrq.h> +#include <linux/pps_kernel.h> + +struct uart_port; +struct serial_struct; +struct device; + +/* + * This structure describes all the operations that can be + * done on the physical hardware. + */ +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int mctrl); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char ch); + void (*stop_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int ctl); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *new, + struct ktermios *old); + void (*set_ldisc)(struct uart_port *, int new); + void (*pm)(struct uart_port *, unsigned int state, + unsigned int oldstate); + int (*set_wake)(struct uart_port *, unsigned int state); + void (*wake_peer)(struct uart_port *); + + /* + * Return a string describing the type of the port + */ + const char *(*type)(struct uart_port *); + + /* + * Release IO and memory resources used by the port. + * This includes iounmap if necessary. + */ + void (*release_port)(struct uart_port *); + + /* + * Request IO and memory resources used by the port. + * This includes iomapping the port if necessary. + */ + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, unsigned long); +#ifdef CONFIG_CONSOLE_POLL + void (*poll_put_char)(struct uart_port *, unsigned char); + int (*poll_get_char)(struct uart_port *); +#endif +}; + +#define NO_POLL_CHAR 0x00ff0000 +#define UART_CONFIG_TYPE (1 << 0) +#define UART_CONFIG_IRQ (1 << 1) + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +typedef unsigned int __bitwise__ upf_t; + +struct uart_port { + spinlock_t lock; /* port lock */ + unsigned long iobase; /* in/out[bwl] */ + unsigned char __iomem *membase; /* read/write[bwl] */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, + struct ktermios *new, + struct ktermios *old); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int state, + unsigned int old); + unsigned int irq; /* irq number */ + unsigned long irqflags; /* irq flags */ + unsigned int uartclk; /* base uart clock */ + unsigned int fifosize; /* tx fifo size */ + unsigned char x_char; /* xon/xoff char */ + unsigned char regshift; /* reg offset shift */ + unsigned char iotype; /* io access style */ + unsigned char unused1; + +#define UPIO_PORT (0) +#define UPIO_HUB6 (1) +#define UPIO_MEM (2) +#define UPIO_MEM32 (3) +#define UPIO_AU (4) /* Au1x00 type IO */ +#define UPIO_TSI (5) /* Tsi108/109 type IO */ +#define UPIO_RM9000 (6) /* RM9000 type IO */ + + unsigned int read_status_mask; /* driver specific */ + unsigned int ignore_status_mask; /* driver specific */ + struct uart_state *state; /* pointer to parent state */ + struct uart_icount icount; /* statistics */ + + struct console *cons; /* struct console, if any */ +#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) + unsigned long sysrq; /* sysrq timeout */ +#endif + + upf_t flags; + +#define UPF_FOURPORT ((__force upf_t) (1 << 1)) +#define UPF_SAK ((__force upf_t) (1 << 2)) +#define UPF_SPD_MASK ((__force upf_t) (0x1030)) +#define UPF_SPD_HI ((__force upf_t) (0x0010)) +#define UPF_SPD_VHI ((__force upf_t) (0x0020)) +#define UPF_SPD_CUST ((__force upf_t) (0x0030)) +#define UPF_SPD_SHI ((__force upf_t) (0x1000)) +#define UPF_SPD_WARP ((__force upf_t) (0x1010)) +#define UPF_SKIP_TEST ((__force upf_t) (1 << 6)) +#define UPF_AUTO_IRQ ((__force upf_t) (1 << 7)) +#define UPF_HARDPPS_CD ((__force upf_t) (1 << 11)) +#define UPF_LOW_LATENCY ((__force upf_t) (1 << 13)) +#define UPF_BUGGY_UART ((__force upf_t) (1 << 14)) +#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) +#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) +#define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) +#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) +#define UPF_EXAR_EFR ((__force upf_t) (1 << 25)) +#define UPF_BUG_THRE ((__force upf_t) (1 << 26)) +/* The exact UART type is known and should not be probed. */ +#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) +#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) +#define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) +#define UPF_DEAD ((__force upf_t) (1 << 30)) +#define UPF_IOREMAP ((__force upf_t) (1 << 31)) + +#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) +#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) + + unsigned int mctrl; /* current modem ctrl settings */ + unsigned int timeout; /* character-based timeout */ + unsigned int type; /* port type */ + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; /* port index */ + resource_size_t mapbase; /* for ioremap */ + struct device *dev; /* parent device */ + unsigned char hub6; /* this should be in the 8250 driver */ + unsigned char suspended; + unsigned char irq_wake; + unsigned char unused[2]; + void *private_data; /* generic platform data pointer */ +}; + +static inline int serial_port_in(struct uart_port *up, int offset) +{ + return up->serial_in(up, offset); +} + +static inline void serial_port_out(struct uart_port *up, int offset, int value) +{ + up->serial_out(up, offset, value); +} + +/* + * This is the state information which is persistent across opens. + */ +struct uart_state { + struct tty_port port; + + int pm_state; + struct circ_buf xmit; + + struct uart_port *uart_port; +}; + +#define UART_XMIT_SIZE PAGE_SIZE + + +/* number of characters left in xmit buffer before we ask for more */ +#define WAKEUP_CHARS 256 + +struct module; +struct tty_driver; + +struct uart_driver { + struct module *owner; + const char *driver_name; + const char *dev_name; + int major; + int minor; + int nr; + struct console *cons; + + /* + * these are private; the low level driver should not + * touch these; they should be initialised to NULL + */ + struct uart_state *state; + struct tty_driver *tty_driver; +}; + +void uart_write_wakeup(struct uart_port *port); + +/* + * Baud rate helpers. + */ +void uart_update_timeout(struct uart_port *port, unsigned int cflag, + unsigned int baud); +unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, + struct ktermios *old, unsigned int min, + unsigned int max); +unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); + +/* Base timer interval for polling */ +static inline int uart_poll_timeout(struct uart_port *port) +{ + int timeout = port->timeout; + + return timeout > 6 ? (timeout / 2 - 2) : 1; +} + +/* + * Console helpers. + */ +struct uart_port *uart_get_console(struct uart_port *ports, int nr, + struct console *c); +void uart_parse_options(char *options, int *baud, int *parity, int *bits, + int *flow); +int uart_set_options(struct uart_port *port, struct console *co, int baud, + int parity, int bits, int flow); +struct tty_driver *uart_console_device(struct console *co, int *index); +void uart_console_write(struct uart_port *port, const char *s, + unsigned int count, + void (*putchar)(struct uart_port *, int)); + +/* + * Port/driver registration/removal + */ +int uart_register_driver(struct uart_driver *uart); +void uart_unregister_driver(struct uart_driver *uart); +int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); +int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); +int uart_match_port(struct uart_port *port1, struct uart_port *port2); + +/* + * Power Management + */ +int uart_suspend_port(struct uart_driver *reg, struct uart_port *port); +int uart_resume_port(struct uart_driver *reg, struct uart_port *port); + +#define uart_circ_empty(circ) ((circ)->head == (circ)->tail) +#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0) + +#define uart_circ_chars_pending(circ) \ + (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) + +#define uart_circ_chars_free(circ) \ + (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) + +static inline int uart_tx_stopped(struct uart_port *port) +{ + struct tty_struct *tty = port->state->port.tty; + if(tty->stopped || tty->hw_stopped) + return 1; + return 0; +} + +/* + * The following are helper functions for the low level drivers. + */ + +extern void uart_handle_dcd_change(struct uart_port *uport, + unsigned int status); +extern void uart_handle_cts_change(struct uart_port *uport, + unsigned int status); + +extern void uart_insert_char(struct uart_port *port, unsigned int status, + unsigned int overrun, unsigned int ch, unsigned int flag); + +#ifdef SUPPORT_SYSRQ +static inline int +uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (port->sysrq) { + if (ch && time_before(jiffies, port->sysrq)) { + handle_sysrq(ch); + port->sysrq = 0; + return 1; + } + port->sysrq = 0; + } + return 0; +} +#else +#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; }) +#endif + +/* + * We do the SysRQ and SAK checking like this... + */ +static inline int uart_handle_break(struct uart_port *port) +{ + struct uart_state *state = port->state; +#ifdef SUPPORT_SYSRQ + if (port->cons && port->cons->index == port->line) { + if (!port->sysrq) { + port->sysrq = jiffies + HZ*5; + return 1; + } + port->sysrq = 0; + } +#endif + if (port->flags & UPF_SAK) + do_SAK(state->port.tty); + return 0; +} + +/* + * UART_ENABLE_MS - determine if port should enable modem status irqs + */ +#define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \ + (cflag) & CRTSCTS || \ + !((cflag) & CLOCAL)) + +#endif + +#endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h new file mode 100644 index 00000000..4976befb --- /dev/null +++ b/include/linux/serial_max3100.h @@ -0,0 +1,52 @@ +/* + * + * Copyright (C) 2007 Christian Pellegrin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + + +#ifndef _LINUX_SERIAL_MAX3100_H +#define _LINUX_SERIAL_MAX3100_H 1 + + +/** + * struct plat_max3100 - MAX3100 SPI UART platform data + * @loopback: force MAX3100 in loopback + * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 + * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook + * called on suspend and resume to activate it. + * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw + * flow ctrl is possible but you have less CPU usage) + * + * You should use this structure in your machine description to specify + * how the MAX3100 is connected. Example: + * + * static struct plat_max3100 max3100_plat_data = { + * .loopback = 0, + * .crystal = 0, + * .poll_time = 100, + * }; + * + * static struct spi_board_info spi_board_info[] = { + * { + * .modalias = "max3100", + * .platform_data = &max3100_plat_data, + * .irq = IRQ_EINT12, + * .max_speed_hz = 5*1000*1000, + * .chip_select = 0, + * }, + * }; + * + **/ +struct plat_max3100 { + int loopback; + int crystal; + void (*max3100_hw_suspend) (int suspend); + int poll_time; +}; + +#endif diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h new file mode 100644 index 00000000..2b071e0b --- /dev/null +++ b/include/linux/serial_mfd.h @@ -0,0 +1,47 @@ +#ifndef _SERIAL_MFD_H_ +#define _SERIAL_MFD_H_ + +/* HW register offset definition */ +#define UART_FOR 0x08 +#define UART_PS 0x0C +#define UART_MUL 0x0D +#define UART_DIV 0x0E + +#define HSU_GBL_IEN 0x0 +#define HSU_GBL_IST 0x4 + +#define HSU_GBL_INT_BIT_PORT0 0x0 +#define HSU_GBL_INT_BIT_PORT1 0x1 +#define HSU_GBL_INT_BIT_PORT2 0x2 +#define HSU_GBL_INT_BIT_IRI 0x3 +#define HSU_GBL_INT_BIT_HDLC 0x4 +#define HSU_GBL_INT_BIT_DMA 0x5 + +#define HSU_GBL_ISR 0x8 +#define HSU_GBL_DMASR 0x400 +#define HSU_GBL_DMAISR 0x404 + +#define HSU_PORT_REG_OFFSET 0x80 +#define HSU_PORT0_REG_OFFSET 0x80 +#define HSU_PORT1_REG_OFFSET 0x100 +#define HSU_PORT2_REG_OFFSET 0x180 +#define HSU_PORT_REG_LENGTH 0x80 + +#define HSU_DMA_CHANS_REG_OFFSET 0x500 +#define HSU_DMA_CHANS_REG_LENGTH 0x40 + +#define HSU_CH_SR 0x0 /* channel status reg */ +#define HSU_CH_CR 0x4 /* control reg */ +#define HSU_CH_DCR 0x8 /* descriptor control reg */ +#define HSU_CH_BSR 0x10 /* max fifo buffer size reg */ +#define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */ +#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */ +#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */ +#define HSU_CH_D1SAR 0x28 +#define HSU_CH_D1TSR 0x2C +#define HSU_CH_D2SAR 0x30 +#define HSU_CH_D2TSR 0x34 +#define HSU_CH_D3SAR 0x38 +#define HSU_CH_D3TSR 0x3C + +#endif diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h new file mode 100644 index 00000000..79ad87b0 --- /dev/null +++ b/include/linux/serial_pnx8xxx.h @@ -0,0 +1,80 @@ +/* + * Embedded Alley Solutions, source@embeddedalley.com. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_SERIAL_PNX8XXX_H +#define _LINUX_SERIAL_PNX8XXX_H + +#include <linux/serial_core.h> + +#define PNX8XXX_NR_PORTS 2 + +struct pnx8xxx_port { + struct uart_port port; + struct timer_list timer; + unsigned int old_status; +}; + +/* register offsets */ +#define PNX8XXX_LCR 0 +#define PNX8XXX_MCR 0x004 +#define PNX8XXX_BAUD 0x008 +#define PNX8XXX_CFG 0x00c +#define PNX8XXX_FIFO 0x028 +#define PNX8XXX_ISTAT 0xfe0 +#define PNX8XXX_IEN 0xfe4 +#define PNX8XXX_ICLR 0xfe8 +#define PNX8XXX_ISET 0xfec +#define PNX8XXX_PD 0xff4 +#define PNX8XXX_MID 0xffc + +#define PNX8XXX_UART_LCR_TXBREAK (1<<30) +#define PNX8XXX_UART_LCR_PAREVN 0x10000000 +#define PNX8XXX_UART_LCR_PAREN 0x08000000 +#define PNX8XXX_UART_LCR_2STOPB 0x04000000 +#define PNX8XXX_UART_LCR_8BIT 0x01000000 +#define PNX8XXX_UART_LCR_TX_RST 0x00040000 +#define PNX8XXX_UART_LCR_RX_RST 0x00020000 +#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000 + +#define PNX8XXX_UART_MCR_SCR 0xFF000000 +#define PNX8XXX_UART_MCR_DCD 0x00800000 +#define PNX8XXX_UART_MCR_CTS 0x00100000 +#define PNX8XXX_UART_MCR_LOOP 0x00000010 +#define PNX8XXX_UART_MCR_RTS 0x00000002 +#define PNX8XXX_UART_MCR_DTR 0x00000001 + +#define PNX8XXX_UART_INT_TX 0x00000080 +#define PNX8XXX_UART_INT_EMPTY 0x00000040 +#define PNX8XXX_UART_INT_RCVTO 0x00000020 +#define PNX8XXX_UART_INT_RX 0x00000010 +#define PNX8XXX_UART_INT_RXOVRN 0x00000008 +#define PNX8XXX_UART_INT_FRERR 0x00000004 +#define PNX8XXX_UART_INT_BREAK 0x00000002 +#define PNX8XXX_UART_INT_PARITY 0x00000001 +#define PNX8XXX_UART_INT_ALLRX 0x0000003F +#define PNX8XXX_UART_INT_ALLTX 0x000000C0 + +#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000 +#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16) +#define PNX8XXX_UART_FIFO_RXBRK 0x00008000 +#define PNX8XXX_UART_FIFO_RXFE 0x00004000 +#define PNX8XXX_UART_FIFO_RXPAR 0x00002000 +#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00 +#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF + +#endif diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h new file mode 100644 index 00000000..8ce70d76 --- /dev/null +++ b/include/linux/serial_reg.h @@ -0,0 +1,367 @@ +/* + * include/linux/serial_reg.h + * + * Copyright (C) 1992, 1994 by Theodore Ts'o. + * + * Redistribution of this file is permitted under the terms of the GNU + * Public License (GPL) + * + * These are the UART port assignments, expressed as offsets from the base + * register. These assignments should hold for any serial port based on + * a 8250, 16450, or 16550(A). + */ + +#ifndef _LINUX_SERIAL_REG_H +#define _LINUX_SERIAL_REG_H + +/* + * DLAB=0 + */ +#define UART_RX 0 /* In: Receive buffer */ +#define UART_TX 0 /* Out: Transmit buffer */ + +#define UART_IER 1 /* Out: Interrupt Enable Register */ +#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ +#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ +#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ +#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ +/* + * Sleep mode for ST16650 and TI16750. For the ST16650, EFR[4]=1 + */ +#define UART_IERX_SLEEP 0x10 /* Enable sleep mode */ + +#define UART_IIR 2 /* In: Interrupt ID Register */ +#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ +#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */ +#define UART_IIR_MSI 0x00 /* Modem status interrupt */ +#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ +#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ +#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ + +#define UART_IIR_BUSY 0x07 /* DesignWare APB Busy Detect */ + +#define UART_FCR 2 /* Out: FIFO Control Register */ +#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ +#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ +#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ +#define UART_FCR_DMA_SELECT 0x08 /* For DMA applications */ +/* + * Note: The FIFO trigger levels are chip specific: + * RX:76 = 00 01 10 11 TX:54 = 00 01 10 11 + * PC16550D: 1 4 8 14 xx xx xx xx + * TI16C550A: 1 4 8 14 xx xx xx xx + * TI16C550C: 1 4 8 14 xx xx xx xx + * ST16C550: 1 4 8 14 xx xx xx xx + * ST16C650: 8 16 24 28 16 8 24 30 PORT_16650V2 + * NS16C552: 1 4 8 14 xx xx xx xx + * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654 + * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750 + * TI16C752: 8 16 56 60 8 16 32 56 + * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA + */ +#define UART_FCR_R_TRIG_00 0x00 +#define UART_FCR_R_TRIG_01 0x40 +#define UART_FCR_R_TRIG_10 0x80 +#define UART_FCR_R_TRIG_11 0xc0 +#define UART_FCR_T_TRIG_00 0x00 +#define UART_FCR_T_TRIG_01 0x10 +#define UART_FCR_T_TRIG_10 0x20 +#define UART_FCR_T_TRIG_11 0x30 + +#define UART_FCR_TRIGGER_MASK 0xC0 /* Mask for the FIFO trigger range */ +#define UART_FCR_TRIGGER_1 0x00 /* Mask for trigger set at 1 */ +#define UART_FCR_TRIGGER_4 0x40 /* Mask for trigger set at 4 */ +#define UART_FCR_TRIGGER_8 0x80 /* Mask for trigger set at 8 */ +#define UART_FCR_TRIGGER_14 0xC0 /* Mask for trigger set at 14 */ +/* 16650 definitions */ +#define UART_FCR6_R_TRIGGER_8 0x00 /* Mask for receive trigger set at 1 */ +#define UART_FCR6_R_TRIGGER_16 0x40 /* Mask for receive trigger set at 4 */ +#define UART_FCR6_R_TRIGGER_24 0x80 /* Mask for receive trigger set at 8 */ +#define UART_FCR6_R_TRIGGER_28 0xC0 /* Mask for receive trigger set at 14 */ +#define UART_FCR6_T_TRIGGER_16 0x00 /* Mask for transmit trigger set at 16 */ +#define UART_FCR6_T_TRIGGER_8 0x10 /* Mask for transmit trigger set at 8 */ +#define UART_FCR6_T_TRIGGER_24 0x20 /* Mask for transmit trigger set at 24 */ +#define UART_FCR6_T_TRIGGER_30 0x30 /* Mask for transmit trigger set at 30 */ +#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750) */ + +#define UART_LCR 3 /* Out: Line Control Register */ +/* + * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting + * UART_LCR_STOP will select 1.5 stop bits, not 2 stop bits. + */ +#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ +#define UART_LCR_SBC 0x40 /* Set break control */ +#define UART_LCR_SPAR 0x20 /* Stick parity (?) */ +#define UART_LCR_EPAR 0x10 /* Even parity select */ +#define UART_LCR_PARITY 0x08 /* Parity Enable */ +#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 bit, 1=2 bits */ +#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */ +#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */ +#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */ +#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ + +/* + * Access to some registers depends on register access / configuration + * mode. + */ +#define UART_LCR_CONF_MODE_A UART_LCR_DLAB /* Configutation mode A */ +#define UART_LCR_CONF_MODE_B 0xBF /* Configutation mode B */ + +#define UART_MCR 4 /* Out: Modem Control Register */ +#define UART_MCR_CLKSEL 0x80 /* Divide clock by 4 (TI16C752, EFR[4]=1) */ +#define UART_MCR_TCRTLR 0x40 /* Access TCR/TLR (TI16C752, EFR[4]=1) */ +#define UART_MCR_XONANY 0x20 /* Enable Xon Any (TI16C752, EFR[4]=1) */ +#define UART_MCR_AFE 0x20 /* Enable auto-RTS/CTS (TI16C550C/TI16C750) */ +#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ +#define UART_MCR_OUT2 0x08 /* Out2 complement */ +#define UART_MCR_OUT1 0x04 /* Out1 complement */ +#define UART_MCR_RTS 0x02 /* RTS complement */ +#define UART_MCR_DTR 0x01 /* DTR complement */ + +#define UART_LSR 5 /* In: Line Status Register */ +#define UART_LSR_FIFOE 0x80 /* Fifo error */ +#define UART_LSR_TEMT 0x40 /* Transmitter empty */ +#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ +#define UART_LSR_BI 0x10 /* Break interrupt indicator */ +#define UART_LSR_FE 0x08 /* Frame error indicator */ +#define UART_LSR_PE 0x04 /* Parity error indicator */ +#define UART_LSR_OE 0x02 /* Overrun error indicator */ +#define UART_LSR_DR 0x01 /* Receiver data ready */ +#define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */ + +#define UART_MSR 6 /* In: Modem Status Register */ +#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ +#define UART_MSR_RI 0x40 /* Ring Indicator */ +#define UART_MSR_DSR 0x20 /* Data Set Ready */ +#define UART_MSR_CTS 0x10 /* Clear to Send */ +#define UART_MSR_DDCD 0x08 /* Delta DCD */ +#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ +#define UART_MSR_DDSR 0x02 /* Delta DSR */ +#define UART_MSR_DCTS 0x01 /* Delta CTS */ +#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ + +#define UART_SCR 7 /* I/O: Scratch Register */ + +/* + * DLAB=1 + */ +#define UART_DLL 0 /* Out: Divisor Latch Low */ +#define UART_DLM 1 /* Out: Divisor Latch High */ + +/* + * LCR=0xBF (or DLAB=1 for 16C660) + */ +#define UART_EFR 2 /* I/O: Extended Features Register */ +#define UART_XR_EFR 9 /* I/O: Extended Features Register (XR17D15x) */ +#define UART_EFR_CTS 0x80 /* CTS flow control */ +#define UART_EFR_RTS 0x40 /* RTS flow control */ +#define UART_EFR_SCD 0x20 /* Special character detect */ +#define UART_EFR_ECB 0x10 /* Enhanced control bit */ +/* + * the low four bits control software flow control + */ + +/* + * LCR=0xBF, TI16C752, ST16650, ST16650A, ST16654 + */ +#define UART_XON1 4 /* I/O: Xon character 1 */ +#define UART_XON2 5 /* I/O: Xon character 2 */ +#define UART_XOFF1 6 /* I/O: Xoff character 1 */ +#define UART_XOFF2 7 /* I/O: Xoff character 2 */ + +/* + * EFR[4]=1 MCR[6]=1, TI16C752 + */ +#define UART_TI752_TCR 6 /* I/O: transmission control register */ +#define UART_TI752_TLR 7 /* I/O: trigger level register */ + +/* + * LCR=0xBF, XR16C85x + */ +#define UART_TRG 0 /* FCTR bit 7 selects Rx or Tx + * In: Fifo count + * Out: Fifo custom trigger levels */ +/* + * These are the definitions for the Programmable Trigger Register + */ +#define UART_TRG_1 0x01 +#define UART_TRG_4 0x04 +#define UART_TRG_8 0x08 +#define UART_TRG_16 0x10 +#define UART_TRG_32 0x20 +#define UART_TRG_64 0x40 +#define UART_TRG_96 0x60 +#define UART_TRG_120 0x78 +#define UART_TRG_128 0x80 + +#define UART_FCTR 1 /* Feature Control Register */ +#define UART_FCTR_RTS_NODELAY 0x00 /* RTS flow control delay */ +#define UART_FCTR_RTS_4DELAY 0x01 +#define UART_FCTR_RTS_6DELAY 0x02 +#define UART_FCTR_RTS_8DELAY 0x03 +#define UART_FCTR_IRDA 0x04 /* IrDa data encode select */ +#define UART_FCTR_TX_INT 0x08 /* Tx interrupt type select */ +#define UART_FCTR_TRGA 0x00 /* Tx/Rx 550 trigger table select */ +#define UART_FCTR_TRGB 0x10 /* Tx/Rx 650 trigger table select */ +#define UART_FCTR_TRGC 0x20 /* Tx/Rx 654 trigger table select */ +#define UART_FCTR_TRGD 0x30 /* Tx/Rx 850 programmable trigger select */ +#define UART_FCTR_SCR_SWAP 0x40 /* Scratch pad register swap */ +#define UART_FCTR_RX 0x00 /* Programmable trigger mode select */ +#define UART_FCTR_TX 0x80 /* Programmable trigger mode select */ + +/* + * LCR=0xBF, FCTR[6]=1 + */ +#define UART_EMSR 7 /* Extended Mode Select Register */ +#define UART_EMSR_FIFO_COUNT 0x01 /* Rx/Tx select */ +#define UART_EMSR_ALT_COUNT 0x02 /* Alternating count select */ + +/* + * The Intel XScale on-chip UARTs define these bits + */ +#define UART_IER_DMAE 0x80 /* DMA Requests Enable */ +#define UART_IER_UUE 0x40 /* UART Unit Enable */ +#define UART_IER_NRZE 0x20 /* NRZ coding Enable */ +#define UART_IER_RTOIE 0x10 /* Receiver Time Out Interrupt Enable */ + +#define UART_IIR_TOD 0x08 /* Character Timeout Indication Detected */ + +#define UART_FCR_PXAR1 0x00 /* receive FIFO threshold = 1 */ +#define UART_FCR_PXAR8 0x40 /* receive FIFO threshold = 8 */ +#define UART_FCR_PXAR16 0x80 /* receive FIFO threshold = 16 */ +#define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */ + +/* + * Intel MID on-chip HSU (High Speed UART) defined bits + */ +#define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */ +#define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */ +#define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */ +#define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */ + +#define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */ +#define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */ +#define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */ +#define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */ + +#define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */ +#define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */ + +#define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */ +#define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */ + +/* + * These register definitions are for the 16C950 + */ +#define UART_ASR 0x01 /* Additional Status Register */ +#define UART_RFL 0x03 /* Receiver FIFO level */ +#define UART_TFL 0x04 /* Transmitter FIFO level */ +#define UART_ICR 0x05 /* Index Control Register */ + +/* The 16950 ICR registers */ +#define UART_ACR 0x00 /* Additional Control Register */ +#define UART_CPR 0x01 /* Clock Prescalar Register */ +#define UART_TCR 0x02 /* Times Clock Register */ +#define UART_CKS 0x03 /* Clock Select Register */ +#define UART_TTL 0x04 /* Transmitter Interrupt Trigger Level */ +#define UART_RTL 0x05 /* Receiver Interrupt Trigger Level */ +#define UART_FCL 0x06 /* Flow Control Level Lower */ +#define UART_FCH 0x07 /* Flow Control Level Higher */ +#define UART_ID1 0x08 /* ID #1 */ +#define UART_ID2 0x09 /* ID #2 */ +#define UART_ID3 0x0A /* ID #3 */ +#define UART_REV 0x0B /* Revision */ +#define UART_CSR 0x0C /* Channel Software Reset */ +#define UART_NMR 0x0D /* Nine-bit Mode Register */ +#define UART_CTR 0xFF + +/* + * The 16C950 Additional Control Register + */ +#define UART_ACR_RXDIS 0x01 /* Receiver disable */ +#define UART_ACR_TXDIS 0x02 /* Transmitter disable */ +#define UART_ACR_DSRFC 0x04 /* DSR Flow Control */ +#define UART_ACR_TLENB 0x20 /* 950 trigger levels enable */ +#define UART_ACR_ICRRD 0x40 /* ICR Read enable */ +#define UART_ACR_ASREN 0x80 /* Additional status enable */ + + + +/* + * These definitions are for the RSA-DV II/S card, from + * + * Kiyokazu SUTO <suto@ks-and-ks.ne.jp> + */ + +#define UART_RSA_BASE (-8) + +#define UART_RSA_MSR ((UART_RSA_BASE) + 0) /* I/O: Mode Select Register */ + +#define UART_RSA_MSR_SWAP (1 << 0) /* Swap low/high 8 bytes in I/O port addr */ +#define UART_RSA_MSR_FIFO (1 << 2) /* Enable the external FIFO */ +#define UART_RSA_MSR_FLOW (1 << 3) /* Enable the auto RTS/CTS flow control */ +#define UART_RSA_MSR_ITYP (1 << 4) /* Level (1) / Edge triger (0) */ + +#define UART_RSA_IER ((UART_RSA_BASE) + 1) /* I/O: Interrupt Enable Register */ + +#define UART_RSA_IER_Rx_FIFO_H (1 << 0) /* Enable Rx FIFO half full int. */ +#define UART_RSA_IER_Tx_FIFO_H (1 << 1) /* Enable Tx FIFO half full int. */ +#define UART_RSA_IER_Tx_FIFO_E (1 << 2) /* Enable Tx FIFO empty int. */ +#define UART_RSA_IER_Rx_TOUT (1 << 3) /* Enable char receive timeout int */ +#define UART_RSA_IER_TIMER (1 << 4) /* Enable timer interrupt */ + +#define UART_RSA_SRR ((UART_RSA_BASE) + 2) /* IN: Status Read Register */ + +#define UART_RSA_SRR_Tx_FIFO_NEMP (1 << 0) /* Tx FIFO is not empty (1) */ +#define UART_RSA_SRR_Tx_FIFO_NHFL (1 << 1) /* Tx FIFO is not half full (1) */ +#define UART_RSA_SRR_Tx_FIFO_NFUL (1 << 2) /* Tx FIFO is not full (1) */ +#define UART_RSA_SRR_Rx_FIFO_NEMP (1 << 3) /* Rx FIFO is not empty (1) */ +#define UART_RSA_SRR_Rx_FIFO_NHFL (1 << 4) /* Rx FIFO is not half full (1) */ +#define UART_RSA_SRR_Rx_FIFO_NFUL (1 << 5) /* Rx FIFO is not full (1) */ +#define UART_RSA_SRR_Rx_TOUT (1 << 6) /* Character reception timeout occurred (1) */ +#define UART_RSA_SRR_TIMER (1 << 7) /* Timer interrupt occurred */ + +#define UART_RSA_FRR ((UART_RSA_BASE) + 2) /* OUT: FIFO Reset Register */ + +#define UART_RSA_TIVSR ((UART_RSA_BASE) + 3) /* I/O: Timer Interval Value Set Register */ + +#define UART_RSA_TCR ((UART_RSA_BASE) + 4) /* OUT: Timer Control Register */ + +#define UART_RSA_TCR_SWITCH (1 << 0) /* Timer on */ + +/* + * The RSA DSV/II board has two fixed clock frequencies. One is the + * standard rate, and the other is 8 times faster. + */ +#define SERIAL_RSA_BAUD_BASE (921600) +#define SERIAL_RSA_BAUD_BASE_LO (SERIAL_RSA_BAUD_BASE / 8) + +/* + * Extra serial register definitions for the internal UARTs + * in TI OMAP processors. + */ +#define UART_OMAP_MDR1 0x08 /* Mode definition register */ +#define UART_OMAP_MDR2 0x09 /* Mode definition register 2 */ +#define UART_OMAP_SCR 0x10 /* Supplementary control register */ +#define UART_OMAP_SSR 0x11 /* Supplementary status register */ +#define UART_OMAP_EBLR 0x12 /* BOF length register */ +#define UART_OMAP_OSC_12M_SEL 0x13 /* OMAP1510 12MHz osc select */ +#define UART_OMAP_MVER 0x14 /* Module version register */ +#define UART_OMAP_SYSC 0x15 /* System configuration register */ +#define UART_OMAP_SYSS 0x16 /* System status register */ +#define UART_OMAP_WER 0x17 /* Wake-up enable register */ + +/* + * These are the definitions for the MDR1 register + */ +#define UART_OMAP_MDR1_16X_MODE 0x00 /* UART 16x mode */ +#define UART_OMAP_MDR1_SIR_MODE 0x01 /* SIR mode */ +#define UART_OMAP_MDR1_16X_ABAUD_MODE 0x02 /* UART 16x auto-baud */ +#define UART_OMAP_MDR1_13X_MODE 0x03 /* UART 13x mode */ +#define UART_OMAP_MDR1_MIR_MODE 0x04 /* MIR mode */ +#define UART_OMAP_MDR1_FIR_MODE 0x05 /* FIR mode */ +#define UART_OMAP_MDR1_CIR_MODE 0x06 /* CIR mode */ +#define UART_OMAP_MDR1_DISABLE 0x07 /* Disable (default state) */ + +#endif /* _LINUX_SERIAL_REG_H */ + diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h new file mode 100644 index 00000000..78779074 --- /dev/null +++ b/include/linux/serial_sci.h @@ -0,0 +1,161 @@ +#ifndef __LINUX_SERIAL_SCI_H +#define __LINUX_SERIAL_SCI_H + +#include <linux/serial_core.h> +#include <linux/sh_dma.h> + +/* + * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) + */ + +#define SCIx_NOT_SUPPORTED (-1) + +enum { + SCBRR_ALGO_1, /* ((clk + 16 * bps) / (16 * bps) - 1) */ + SCBRR_ALGO_2, /* ((clk + 16 * bps) / (32 * bps) - 1) */ + SCBRR_ALGO_3, /* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */ + SCBRR_ALGO_4, /* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */ + SCBRR_ALGO_5, /* (((clk * 1000 / 32) / bps) - 1) */ +}; + +#define SCSCR_TIE (1 << 7) +#define SCSCR_RIE (1 << 6) +#define SCSCR_TE (1 << 5) +#define SCSCR_RE (1 << 4) +#define SCSCR_REIE (1 << 3) /* not supported by all parts */ +#define SCSCR_TOIE (1 << 2) /* not supported by all parts */ +#define SCSCR_CKE1 (1 << 1) +#define SCSCR_CKE0 (1 << 0) + +/* SCxSR SCI */ +#define SCI_TDRE 0x80 +#define SCI_RDRF 0x40 +#define SCI_ORER 0x20 +#define SCI_FER 0x10 +#define SCI_PER 0x08 +#define SCI_TEND 0x04 + +#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER) + +/* SCxSR SCIF */ +#define SCIF_ER 0x0080 +#define SCIF_TEND 0x0040 +#define SCIF_TDFE 0x0020 +#define SCIF_BRK 0x0010 +#define SCIF_FER 0x0008 +#define SCIF_PER 0x0004 +#define SCIF_RDF 0x0002 +#define SCIF_DR 0x0001 + +#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK) + +/* SCSPTR, optional */ +#define SCSPTR_RTSIO (1 << 7) +#define SCSPTR_CTSIO (1 << 5) + +/* Offsets into the sci_port->irqs array */ +enum { + SCIx_ERI_IRQ, + SCIx_RXI_IRQ, + SCIx_TXI_IRQ, + SCIx_BRI_IRQ, + SCIx_NR_IRQS, + + SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */ +}; + +/* Offsets into the sci_port->gpios array */ +enum { + SCIx_SCK, + SCIx_RXD, + SCIx_TXD, + SCIx_CTS, + SCIx_RTS, + + SCIx_NR_FNS, +}; + +enum { + SCIx_PROBE_REGTYPE, + + SCIx_SCI_REGTYPE, + SCIx_IRDA_REGTYPE, + SCIx_SCIFA_REGTYPE, + SCIx_SCIFB_REGTYPE, + SCIx_SH2_SCIF_FIFODATA_REGTYPE, + SCIx_SH3_SCIF_REGTYPE, + SCIx_SH4_SCIF_REGTYPE, + SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, + SCIx_SH4_SCIF_FIFODATA_REGTYPE, + SCIx_SH7705_SCIF_REGTYPE, + + SCIx_NR_REGTYPES, +}; + +#define SCIx_IRQ_MUXED(irq) \ +{ \ + [SCIx_ERI_IRQ] = (irq), \ + [SCIx_RXI_IRQ] = (irq), \ + [SCIx_TXI_IRQ] = (irq), \ + [SCIx_BRI_IRQ] = (irq), \ +} + +#define SCIx_IRQ_IS_MUXED(port) \ + ((port)->cfg->irqs[SCIx_ERI_IRQ] == \ + (port)->cfg->irqs[SCIx_RXI_IRQ]) || \ + ((port)->cfg->irqs[SCIx_ERI_IRQ] && \ + !(port)->cfg->irqs[SCIx_RXI_IRQ]) +/* + * SCI register subset common for all port types. + * Not all registers will exist on all parts. + */ +enum { + SCSMR, SCBRR, SCSCR, SCxSR, + SCFCR, SCFDR, SCxTDR, SCxRDR, + SCLSR, SCTFDR, SCRFDR, SCSPTR, + + SCIx_NR_REGS, +}; + +struct device; + +struct plat_sci_port_ops { + void (*init_pins)(struct uart_port *, unsigned int cflag); +}; + +/* + * Port-specific capabilities + */ +#define SCIx_HAVE_RTSCTS (1 << 0) + +/* + * Platform device specific platform_data struct + */ +struct plat_sci_port { + unsigned long mapbase; /* resource base */ + unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ + unsigned int gpios[SCIx_NR_FNS]; /* SCK, RXD, TXD, CTS, RTS */ + unsigned int type; /* SCI / SCIF / IRDA */ + upf_t flags; /* UPF_* flags */ + unsigned long capabilities; /* Port features/capabilities */ + + unsigned int scbrr_algo_id; /* SCBRR calculation algo */ + unsigned int scscr; /* SCSCR initialization */ + + /* + * Platform overrides if necessary, defaults otherwise. + */ + int overrun_bit; + unsigned int error_mask; + + int port_reg; + unsigned char regshift; + unsigned char regtype; + + struct plat_sci_port_ops *ops; + + unsigned int dma_slave_tx; + unsigned int dma_slave_rx; +}; + +#endif /* __LINUX_SERIAL_SCI_H */ diff --git a/include/linux/serio.h b/include/linux/serio.h new file mode 100644 index 00000000..ca82861b --- /dev/null +++ b/include/linux/serio.h @@ -0,0 +1,206 @@ +#ifndef _SERIO_H +#define _SERIO_H + +/* + * Copyright (C) 1999-2002 Vojtech Pavlik +* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/ioctl.h> + +#define SPIOCSTYPE _IOW('q', 0x01, unsigned long) + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +struct serio { + void *port_data; + + char name[32]; + char phys[32]; + + bool manual_bind; + + struct serio_device_id id; + + spinlock_t lock; /* protects critical sections from port's interrupt handler */ + + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + + struct serio *parent; + struct list_head child_node; /* Entry in parent->children list */ + struct list_head children; + unsigned int depth; /* level of nesting in serio hierarchy */ + + struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ + struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ + + struct device dev; + + struct list_head node; +}; +#define to_serio_port(d) container_of(d, struct serio, dev) + +struct serio_driver { + const char *description; + + const struct serio_device_id *id_table; + bool manual_bind; + + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *drv); + int (*reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + + struct device_driver driver; +}; +#define to_serio_driver(d) container_of(d, struct serio_driver, driver) + +int serio_open(struct serio *serio, struct serio_driver *drv); +void serio_close(struct serio *serio); +void serio_rescan(struct serio *serio); +void serio_reconnect(struct serio *serio); +irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int flags); + +void __serio_register_port(struct serio *serio, struct module *owner); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define serio_register_port(serio) \ + __serio_register_port(serio, THIS_MODULE) + +void serio_unregister_port(struct serio *serio); +void serio_unregister_child_port(struct serio *serio); + +int __must_check __serio_register_driver(struct serio_driver *drv, + struct module *owner, const char *mod_name); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define serio_register_driver(drv) \ + __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) + +void serio_unregister_driver(struct serio_driver *drv); + +static inline int serio_write(struct serio *serio, unsigned char data) +{ + if (serio->write) + return serio->write(serio, data); + else + return -1; +} + +static inline void serio_drv_write_wakeup(struct serio *serio) +{ + if (serio->drv && serio->drv->write_wakeup) + serio->drv->write_wakeup(serio); +} + +/* + * Use the following functions to manipulate serio's per-port + * driver-specific data. + */ +static inline void *serio_get_drvdata(struct serio *serio) +{ + return dev_get_drvdata(&serio->dev); +} + +static inline void serio_set_drvdata(struct serio *serio, void *data) +{ + dev_set_drvdata(&serio->dev, data); +} + +/* + * Use the following functions to protect critical sections in + * driver code from port's interrupt handler + */ +static inline void serio_pause_rx(struct serio *serio) +{ + spin_lock_irq(&serio->lock); +} + +static inline void serio_continue_rx(struct serio *serio) +{ + spin_unlock_irq(&serio->lock); +} + +#endif + +/* + * bit masks for use in "interrupt" flags (3rd argument) + */ +#define SERIO_TIMEOUT 1 +#define SERIO_PARITY 2 +#define SERIO_FRAME 4 + +/* + * Serio types + */ +#define SERIO_XT 0x00 +#define SERIO_8042 0x01 +#define SERIO_RS232 0x02 +#define SERIO_HIL_MLC 0x03 +#define SERIO_PS_PSTHRU 0x05 +#define SERIO_8042_XL 0x06 + +/* + * Serio protocols + */ +#define SERIO_UNKNOWN 0x00 +#define SERIO_MSC 0x01 +#define SERIO_SUN 0x02 +#define SERIO_MS 0x03 +#define SERIO_MP 0x04 +#define SERIO_MZ 0x05 +#define SERIO_MZP 0x06 +#define SERIO_MZPP 0x07 +#define SERIO_VSXXXAA 0x08 +#define SERIO_SUNKBD 0x10 +#define SERIO_WARRIOR 0x18 +#define SERIO_SPACEORB 0x19 +#define SERIO_MAGELLAN 0x1a +#define SERIO_SPACEBALL 0x1b +#define SERIO_GUNZE 0x1c +#define SERIO_IFORCE 0x1d +#define SERIO_STINGER 0x1e +#define SERIO_NEWTON 0x1f +#define SERIO_STOWAWAY 0x20 +#define SERIO_H3600 0x21 +#define SERIO_PS2SER 0x22 +#define SERIO_TWIDKBD 0x23 +#define SERIO_TWIDJOY 0x24 +#define SERIO_HIL 0x25 +#define SERIO_SNES232 0x26 +#define SERIO_SEMTECH 0x27 +#define SERIO_LKKBD 0x28 +#define SERIO_ELO 0x29 +#define SERIO_MICROTOUCH 0x30 +#define SERIO_PENMOUNT 0x31 +#define SERIO_TOUCHRIGHT 0x32 +#define SERIO_TOUCHWIN 0x33 +#define SERIO_TAOSEVM 0x34 +#define SERIO_FUJITSU 0x35 +#define SERIO_ZHENHUA 0x36 +#define SERIO_INEXIO 0x37 +#define SERIO_TOUCHIT213 0x38 +#define SERIO_W8001 0x39 +#define SERIO_DYNAPRO 0x3a +#define SERIO_HAMPSHIRE 0x3b +#define SERIO_PS2MULT 0x3c +#define SERIO_TSC40 0x3d + +#endif diff --git a/include/linux/sfi.h b/include/linux/sfi.h new file mode 100644 index 00000000..fe817918 --- /dev/null +++ b/include/linux/sfi.h @@ -0,0 +1,206 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_H +#define _LINUX_SFI_H + +/* Table signatures reserved by the SFI specification */ +#define SFI_SIG_SYST "SYST" +#define SFI_SIG_FREQ "FREQ" +#define SFI_SIG_IDLE "IDLE" +#define SFI_SIG_CPUS "CPUS" +#define SFI_SIG_MTMR "MTMR" +#define SFI_SIG_MRTC "MRTC" +#define SFI_SIG_MMAP "MMAP" +#define SFI_SIG_APIC "APIC" +#define SFI_SIG_XSDT "XSDT" +#define SFI_SIG_WAKE "WAKE" +#define SFI_SIG_DEVS "DEVS" +#define SFI_SIG_GPIO "GPIO" + +#define SFI_SIGNATURE_SIZE 4 +#define SFI_OEM_ID_SIZE 6 +#define SFI_OEM_TABLE_ID_SIZE 8 + +#define SFI_NAME_LEN 16 + +#define SFI_SYST_SEARCH_BEGIN 0x000E0000 +#define SFI_SYST_SEARCH_END 0x000FFFFF + +#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \ + ((ptable->header.len - sizeof(struct sfi_table_header)) / \ + (sizeof(entry_type))) +/* + * Table structures must be byte-packed to match the SFI specification, + * as they are provided by the BIOS. + */ +struct sfi_table_header { + char sig[SFI_SIGNATURE_SIZE]; + u32 len; + u8 rev; + u8 csum; + char oem_id[SFI_OEM_ID_SIZE]; + char oem_table_id[SFI_OEM_TABLE_ID_SIZE]; +} __packed; + +struct sfi_table_simple { + struct sfi_table_header header; + u64 pentry[1]; +} __packed; + +/* Comply with UEFI spec 2.1 */ +struct sfi_mem_entry { + u32 type; + u64 phys_start; + u64 virt_start; + u64 pages; + u64 attrib; +} __packed; + +struct sfi_cpu_table_entry { + u32 apic_id; +} __packed; + +struct sfi_cstate_table_entry { + u32 hint; /* MWAIT hint */ + u32 latency; /* latency in ms */ +} __packed; + +struct sfi_apic_table_entry { + u64 phys_addr; /* phy base addr for APIC reg */ +} __packed; + +struct sfi_freq_table_entry { + u32 freq_mhz; /* in MHZ */ + u32 latency; /* transition latency in ms */ + u32 ctrl_val; /* value to write to PERF_CTL */ +} __packed; + +struct sfi_wake_table_entry { + u64 phys_addr; /* pointer to where the wake vector locates */ +} __packed; + +struct sfi_timer_table_entry { + u64 phys_addr; /* phy base addr for the timer */ + u32 freq_hz; /* in HZ */ + u32 irq; +} __packed; + +struct sfi_rtc_table_entry { + u64 phys_addr; /* phy base addr for the RTC */ + u32 irq; +} __packed; + +struct sfi_device_table_entry { + u8 type; /* bus type, I2C, SPI or ...*/ +#define SFI_DEV_TYPE_SPI 0 +#define SFI_DEV_TYPE_I2C 1 +#define SFI_DEV_TYPE_UART 2 +#define SFI_DEV_TYPE_HSI 3 +#define SFI_DEV_TYPE_IPC 4 + + u8 host_num; /* attached to host 0, 1...*/ + u16 addr; + u8 irq; + u32 max_freq; + char name[SFI_NAME_LEN]; +} __packed; + +struct sfi_gpio_table_entry { + char controller_name[SFI_NAME_LEN]; + u16 pin_no; + char pin_name[SFI_NAME_LEN]; +} __packed; + +typedef int (*sfi_table_handler) (struct sfi_table_header *table); + +#ifdef CONFIG_SFI +extern void __init sfi_init(void); +extern int __init sfi_platform_init(void); +extern void __init sfi_init_late(void); +extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id, + sfi_table_handler handler); + +extern int sfi_disabled; +static inline void disable_sfi(void) +{ + sfi_disabled = 1; +} + +#else /* !CONFIG_SFI */ + +static inline void sfi_init(void) +{ +} + +static inline void sfi_init_late(void) +{ +} + +#define sfi_disabled 0 + +static inline int sfi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + sfi_table_handler handler) +{ + return -1; +} + +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_H*/ diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h new file mode 100644 index 00000000..c4a5a8cd --- /dev/null +++ b/include/linux/sfi_acpi.h @@ -0,0 +1,93 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_ACPI_H +#define _LINUX_SFI_ACPI_H + +#ifdef CONFIG_SFI +#include <acpi/acpi.h> /* struct acpi_table_header */ + +extern int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)); + +static inline int acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + if (!acpi_table_parse(signature, handler)) + return 0; + + return sfi_acpi_table_parse(signature, NULL, NULL, handler); +} +#else /* !CONFIG_SFI */ + +static inline int sfi_acpi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + int (*handler)(struct acpi_table_header *)) +{ + return -1; +} + +static inline int acpi_sfi_table_parse(char *signature, + int (*handler)(struct acpi_table_header *)) +{ + return acpi_table_parse(signature, handler); +} +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_ACPI_H*/ diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h new file mode 100644 index 00000000..0a9d8f2a --- /dev/null +++ b/include/linux/sh_clk.h @@ -0,0 +1,161 @@ +#ifndef __SH_CLOCK_H +#define __SH_CLOCK_H + +#include <linux/list.h> +#include <linux/seq_file.h> +#include <linux/cpufreq.h> +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/clk.h> +#include <linux/err.h> + +struct clk; + +struct clk_mapping { + phys_addr_t phys; + void __iomem *base; + unsigned long len; + struct kref ref; +}; + + +struct sh_clk_ops { +#ifdef CONFIG_SH_CLK_CPG_LEGACY + void (*init)(struct clk *clk); +#endif + int (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + unsigned long (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate); + int (*set_parent)(struct clk *clk, struct clk *parent); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + struct clk *parent; + struct clk **parent_table; /* list of parents to */ + unsigned short parent_num; /* choose between */ + unsigned char src_shift; /* source clock field in the */ + unsigned char src_width; /* configuration register */ + struct sh_clk_ops *ops; + + struct list_head children; + struct list_head sibling; /* node for children */ + + int usecount; + + unsigned long rate; + unsigned long flags; + + void __iomem *enable_reg; + unsigned int enable_bit; + void __iomem *mapped_reg; + + unsigned long arch_flags; + void *priv; + struct clk_mapping *mapping; + struct cpufreq_frequency_table *freq_table; + unsigned int nr_freqs; +}; + +#define CLK_ENABLE_ON_INIT (1 << 0) + +/* drivers/sh/clk.c */ +unsigned long followparent_recalc(struct clk *); +void recalculate_root_clocks(void); +void propagate_rate(struct clk *); +int clk_reparent(struct clk *child, struct clk *parent); +int clk_register(struct clk *); +void clk_unregister(struct clk *); +void clk_enable_init_clocks(void); + +struct clk_div_mult_table { + unsigned int *divisors; + unsigned int nr_divisors; + unsigned int *multipliers; + unsigned int nr_multipliers; +}; + +struct cpufreq_frequency_table; +void clk_rate_table_build(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + int nr_freqs, + struct clk_div_mult_table *src_table, + unsigned long *bitmap); + +long clk_rate_table_round(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate); + +int clk_rate_table_find(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate); + +long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, + unsigned int div_max, unsigned long rate); + +long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min, + unsigned int mult_max, unsigned long rate); + +long clk_round_parent(struct clk *clk, unsigned long target, + unsigned long *best_freq, unsigned long *parent_freq, + unsigned int div_min, unsigned int div_max); + +#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ +{ \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_enable_reg, \ + .enable_bit = _enable_bit, \ + .flags = _flags, \ +} + +int sh_clk_mstp32_register(struct clk *clks, int nr); + +#define SH_CLK_DIV4(_parent, _reg, _shift, _div_bitmap, _flags) \ +{ \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_reg, \ + .enable_bit = _shift, \ + .arch_flags = _div_bitmap, \ + .flags = _flags, \ +} + +struct clk_div4_table { + struct clk_div_mult_table *div_mult_table; + void (*kick)(struct clk *clk); +}; + +int sh_clk_div4_register(struct clk *clks, int nr, + struct clk_div4_table *table); +int sh_clk_div4_enable_register(struct clk *clks, int nr, + struct clk_div4_table *table); +int sh_clk_div4_reparent_register(struct clk *clks, int nr, + struct clk_div4_table *table); + +#define SH_CLK_DIV6_EXT(_reg, _flags, _parents, \ + _num_parents, _src_shift, _src_width) \ +{ \ + .enable_reg = (void __iomem *)_reg, \ + .flags = _flags, \ + .parent_table = _parents, \ + .parent_num = _num_parents, \ + .src_shift = _src_shift, \ + .src_width = _src_width, \ +} + +#define SH_CLK_DIV6(_parent, _reg, _flags) \ +{ \ + .parent = _parent, \ + .enable_reg = (void __iomem *)_reg, \ + .flags = _flags, \ +} + +int sh_clk_div6_register(struct clk *clks, int nr); +int sh_clk_div6_reparent_register(struct clk *clks, int nr); + +#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } +#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk } +#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk } + +#endif /* __SH_CLOCK_H */ diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h new file mode 100644 index 00000000..425450b9 --- /dev/null +++ b/include/linux/sh_dma.h @@ -0,0 +1,113 @@ +/* + * Header for the new SH dmaengine driver + * + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef SH_DMA_H +#define SH_DMA_H + +#include <linux/list.h> +#include <linux/dmaengine.h> + +/* Used by slave DMA clients to request DMA to/from a specific peripheral */ +struct sh_dmae_slave { + unsigned int slave_id; /* Set by the platform */ + struct device *dma_dev; /* Set by the platform */ + const struct sh_dmae_slave_config *config; /* Set by the driver */ +}; + +struct sh_dmae_regs { + u32 sar; /* SAR / source address */ + u32 dar; /* DAR / destination address */ + u32 tcr; /* TCR / transfer count */ +}; + +struct sh_desc { + struct sh_dmae_regs hw; + struct list_head node; + struct dma_async_tx_descriptor async_tx; + enum dma_transfer_direction direction; + dma_cookie_t cookie; + size_t partial; + int chunks; + int mark; +}; + +struct sh_dmae_slave_config { + unsigned int slave_id; + dma_addr_t addr; + u32 chcr; + char mid_rid; +}; + +struct sh_dmae_channel { + unsigned int offset; + unsigned int dmars; + unsigned int dmars_bit; + unsigned int chclr_offset; +}; + +struct sh_dmae_pdata { + const struct sh_dmae_slave_config *slave; + int slave_num; + const struct sh_dmae_channel *channel; + int channel_num; + unsigned int ts_low_shift; + unsigned int ts_low_mask; + unsigned int ts_high_shift; + unsigned int ts_high_mask; + const unsigned int *ts_shift; + int ts_shift_num; + u16 dmaor_init; + unsigned int chcr_offset; + u32 chcr_ie_bit; + + unsigned int dmaor_is_32bit:1; + unsigned int needs_tend_set:1; + unsigned int no_dmars:1; + unsigned int chclr_present:1; + unsigned int slave_only:1; +}; + +/* DMA register */ +#define SAR 0x00 +#define DAR 0x04 +#define TCR 0x08 +#define CHCR 0x0C +#define DMAOR 0x40 + +#define TEND 0x18 /* USB-DMAC */ + +/* DMAOR definitions */ +#define DMAOR_AE 0x00000004 +#define DMAOR_NMIF 0x00000002 +#define DMAOR_DME 0x00000001 + +/* Definitions for the SuperH DMAC */ +#define REQ_L 0x00000000 +#define REQ_E 0x00080000 +#define RACK_H 0x00000000 +#define RACK_L 0x00040000 +#define ACK_R 0x00000000 +#define ACK_W 0x00020000 +#define ACK_H 0x00000000 +#define ACK_L 0x00010000 +#define DM_INC 0x00004000 +#define DM_DEC 0x00008000 +#define DM_FIX 0x0000c000 +#define SM_INC 0x00001000 +#define SM_DEC 0x00002000 +#define SM_FIX 0x00003000 +#define RS_IN 0x00000200 +#define RS_OUT 0x00000300 +#define TS_BLK 0x00000040 +#define TM_BUR 0x00000020 +#define CHCR_DE 0x00000001 +#define CHCR_TE 0x00000002 +#define CHCR_IE 0x00000004 + +#endif diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h new file mode 100644 index 00000000..b17d765d --- /dev/null +++ b/include/linux/sh_eth.h @@ -0,0 +1,26 @@ +#ifndef __ASM_SH_ETH_H__ +#define __ASM_SH_ETH_H__ + +#include <linux/phy.h> + +enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; +enum { + SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_SH4, + SH_ETH_REG_FAST_SH3_SH2 +}; + +struct sh_eth_plat_data { + int phy; + int edmac_endian; + int register_type; + phy_interface_t phy_interface; + void (*set_mdio_gate)(void *addr); + + unsigned char mac_addr[6]; + unsigned no_ether_link:1; + unsigned ether_link_active_low:1; + unsigned needs_init:1; +}; + +#endif diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h new file mode 100644 index 00000000..6aed0805 --- /dev/null +++ b/include/linux/sh_intc.h @@ -0,0 +1,150 @@ +#ifndef __SH_INTC_H +#define __SH_INTC_H + +#include <linux/ioport.h> + +#ifdef CONFIG_SUPERH +#define INTC_NR_IRQS 512 +#else +#define INTC_NR_IRQS 1024 +#endif + +/* + * Convert back and forth between INTEVT and IRQ values. + */ +#ifdef CONFIG_CPU_HAS_INTEVT +#define evt2irq(evt) (((evt) >> 5) - 16) +#define irq2evt(irq) (((irq) + 16) << 5) +#else +#define evt2irq(evt) (evt) +#define irq2evt(irq) (irq) +#endif + +typedef unsigned char intc_enum; + +struct intc_vect { + intc_enum enum_id; + unsigned short vect; +}; + +#define INTC_VECT(enum_id, vect) { enum_id, vect } +#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq)) + +struct intc_group { + intc_enum enum_id; + intc_enum enum_ids[32]; +}; + +#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } + +struct intc_subgroup { + unsigned long reg, reg_width; + intc_enum parent_id; + intc_enum enum_ids[32]; +}; + +struct intc_mask_reg { + unsigned long set_reg, clr_reg, reg_width; + intc_enum enum_ids[32]; +#ifdef CONFIG_INTC_BALANCING + unsigned long dist_reg; +#endif +#ifdef CONFIG_SMP + unsigned long smp; +#endif +}; + +struct intc_prio_reg { + unsigned long set_reg, clr_reg, reg_width, field_width; + intc_enum enum_ids[16]; +#ifdef CONFIG_SMP + unsigned long smp; +#endif +}; + +struct intc_sense_reg { + unsigned long reg, reg_width, field_width; + intc_enum enum_ids[16]; +}; + +#ifdef CONFIG_INTC_BALANCING +#define INTC_SMP_BALANCING(reg) .dist_reg = (reg) +#else +#define INTC_SMP_BALANCING(reg) +#endif + +#ifdef CONFIG_SMP +#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8) +#else +#define INTC_SMP(stride, nr) +#endif + +struct intc_hw_desc { + struct intc_vect *vectors; + unsigned int nr_vectors; + struct intc_group *groups; + unsigned int nr_groups; + struct intc_mask_reg *mask_regs; + unsigned int nr_mask_regs; + struct intc_prio_reg *prio_regs; + unsigned int nr_prio_regs; + struct intc_sense_reg *sense_regs; + unsigned int nr_sense_regs; + struct intc_mask_reg *ack_regs; + unsigned int nr_ack_regs; + struct intc_subgroup *subgroups; + unsigned int nr_subgroups; +}; + +#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a) + +#define INTC_HW_DESC(vectors, groups, mask_regs, \ + prio_regs, sense_regs, ack_regs) \ +{ \ + _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ + _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ + _INTC_ARRAY(sense_regs), _INTC_ARRAY(ack_regs), \ +} + +struct intc_desc { + char *name; + struct resource *resource; + unsigned int num_resources; + intc_enum force_enable; + intc_enum force_disable; + bool skip_syscore_suspend; + struct intc_hw_desc hw; +}; + +#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \ + mask_regs, prio_regs, sense_regs) \ +struct intc_desc symbol __initdata = { \ + .name = chipname, \ + .hw = INTC_HW_DESC(vectors, groups, mask_regs, \ + prio_regs, sense_regs, NULL), \ +} + +#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ + mask_regs, prio_regs, sense_regs, ack_regs) \ +struct intc_desc symbol __initdata = { \ + .name = chipname, \ + .hw = INTC_HW_DESC(vectors, groups, mask_regs, \ + prio_regs, sense_regs, ack_regs), \ +} + +int register_intc_controller(struct intc_desc *desc); +void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs); +int intc_set_priority(unsigned int irq, unsigned int prio); +int intc_irq_lookup(const char *chipname, intc_enum enum_id); +void intc_finalize(void); + +#ifdef CONFIG_INTC_USERIMASK +int register_intc_userimask(unsigned long addr); +#else +static inline int register_intc_userimask(unsigned long addr) +{ + return 0; +} +#endif + +#endif /* __SH_INTC_H */ diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h new file mode 100644 index 00000000..5c15aed9 --- /dev/null +++ b/include/linux/sh_pfc.h @@ -0,0 +1,203 @@ +/* + * SuperH Pin Function Controller Support + * + * Copyright (c) 2008 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __SH_PFC_H +#define __SH_PFC_H + +#include <asm-generic/gpio.h> + +typedef unsigned short pinmux_enum_t; +typedef unsigned short pinmux_flag_t; + +#define PINMUX_TYPE_NONE 0 +#define PINMUX_TYPE_FUNCTION 1 +#define PINMUX_TYPE_GPIO 2 +#define PINMUX_TYPE_OUTPUT 3 +#define PINMUX_TYPE_INPUT 4 +#define PINMUX_TYPE_INPUT_PULLUP 5 +#define PINMUX_TYPE_INPUT_PULLDOWN 6 + +#define PINMUX_FLAG_TYPE (0x7) +#define PINMUX_FLAG_WANT_PULLUP (1 << 3) +#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) + +#define PINMUX_FLAG_DBIT_SHIFT 5 +#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT) +#define PINMUX_FLAG_DREG_SHIFT 10 +#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT) + +struct pinmux_gpio { + pinmux_enum_t enum_id; + pinmux_flag_t flags; +}; + +#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark } +#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0 + +struct pinmux_cfg_reg { + unsigned long reg, reg_width, field_width; + unsigned long *cnt; + pinmux_enum_t *enum_ids; + unsigned long *var_field_width; +}; + +#define PINMUX_CFG_REG(name, r, r_width, f_width) \ + .reg = r, .reg_width = r_width, .field_width = f_width, \ + .cnt = (unsigned long [r_width / f_width]) {}, \ + .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) + +#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \ + .reg = r, .reg_width = r_width, \ + .cnt = (unsigned long [r_width]) {}, \ + .var_field_width = (unsigned long [r_width]) { var_fw0, var_fwn, 0 }, \ + .enum_ids = (pinmux_enum_t []) + +struct pinmux_data_reg { + unsigned long reg, reg_width, reg_shadow; + pinmux_enum_t *enum_ids; + void __iomem *mapped_reg; +}; + +#define PINMUX_DATA_REG(name, r, r_width) \ + .reg = r, .reg_width = r_width, \ + .enum_ids = (pinmux_enum_t [r_width]) \ + +struct pinmux_irq { + int irq; + pinmux_enum_t *enum_ids; +}; + +#define PINMUX_IRQ(irq_nr, ids...) \ + { .irq = irq_nr, .enum_ids = (pinmux_enum_t []) { ids, 0 } } \ + +struct pinmux_range { + pinmux_enum_t begin; + pinmux_enum_t end; + pinmux_enum_t force; +}; + +struct pfc_window { + phys_addr_t phys; + void __iomem *virt; + unsigned long size; +}; + +struct pinmux_info { + char *name; + pinmux_enum_t reserved_id; + struct pinmux_range data; + struct pinmux_range input; + struct pinmux_range input_pd; + struct pinmux_range input_pu; + struct pinmux_range output; + struct pinmux_range mark; + struct pinmux_range function; + + unsigned first_gpio, last_gpio; + + struct pinmux_gpio *gpios; + struct pinmux_cfg_reg *cfg_regs; + struct pinmux_data_reg *data_regs; + + pinmux_enum_t *gpio_data; + unsigned int gpio_data_size; + + struct pinmux_irq *gpio_irq; + unsigned int gpio_irq_size; + + struct resource *resource; + unsigned int num_resources; + struct pfc_window *window; + + unsigned long unlock_reg; + + struct gpio_chip chip; +}; + +int register_pinmux(struct pinmux_info *pip); +int unregister_pinmux(struct pinmux_info *pip); + +/* helper macro for port */ +#define PORT_1(fn, pfx, sfx) fn(pfx, sfx) + +#define PORT_10(fn, pfx, sfx) \ + PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \ + PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \ + PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \ + PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \ + PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx) + +#define PORT_90(fn, pfx, sfx) \ + PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \ + PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \ + PORT_10(fn, pfx##5, sfx), PORT_10(fn, pfx##6, sfx), \ + PORT_10(fn, pfx##7, sfx), PORT_10(fn, pfx##8, sfx), \ + PORT_10(fn, pfx##9, sfx) + +#define _PORT_ALL(pfx, sfx) pfx##_##sfx +#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA) +#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str) +#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused) +#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK) + +/* helper macro for pinmux_enum_t */ +#define PORT_DATA_I(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN) + +#define PORT_DATA_I_PD(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ + PORT##nr##_IN, PORT##nr##_IN_PD) + +#define PORT_DATA_I_PU(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ + PORT##nr##_IN, PORT##nr##_IN_PU) + +#define PORT_DATA_I_PU_PD(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ + PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU) + +#define PORT_DATA_O(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT) + +#define PORT_DATA_IO(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \ + PORT##nr##_IN) + +#define PORT_DATA_IO_PD(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \ + PORT##nr##_IN, PORT##nr##_IN_PD) + +#define PORT_DATA_IO_PU(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \ + PORT##nr##_IN, PORT##nr##_IN_PU) + +#define PORT_DATA_IO_PU_PD(nr) \ + PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \ + PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU) + +/* helper macro for top 4 bits in PORTnCR */ +#define _PCRH(in, in_pd, in_pu, out) \ + 0, (out), (in), 0, \ + 0, 0, 0, 0, \ + 0, 0, (in_pd), 0, \ + 0, 0, (in_pu), 0 + +#define PORTCR(nr, reg) \ + { \ + PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \ + _PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \ + PORT##nr##_IN_PU, PORT##nr##_OUT), \ + PORT##nr##_FN0, PORT##nr##_FN1, \ + PORT##nr##_FN2, PORT##nr##_FN3, \ + PORT##nr##_FN4, PORT##nr##_FN5, \ + PORT##nr##_FN6, PORT##nr##_FN7 } \ + } + +#endif /* __SH_PFC_H */ diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h new file mode 100644 index 00000000..4d9dcd13 --- /dev/null +++ b/include/linux/sh_timer.h @@ -0,0 +1,12 @@ +#ifndef __SH_TIMER_H__ +#define __SH_TIMER_H__ + +struct sh_timer_config { + char *name; + long channel_offset; + int timer_bit; + unsigned long clockevent_rating; + unsigned long clocksource_rating; +}; + +#endif /* __SH_TIMER_H__ */ diff --git a/include/linux/sha256.h b/include/linux/sha256.h new file mode 100644 index 00000000..cf883a2e --- /dev/null +++ b/include/linux/sha256.h @@ -0,0 +1,25 @@ +#ifndef _SHA256_H +#define _SHA256_H + +#ifndef uint8 +#define uint8 unsigned char +#endif + +#ifndef uint32 +#define uint32 unsigned long int +#endif + +typedef struct +{ + uint32 total[2]; + uint32 state[8]; + uint8 buffer[64]; +} +sha256_context; + +void sha256_starts( sha256_context *ctx ); +void sha256_update( sha256_context *ctx, uint8 *input, uint32 length ); +void sha256_finish( sha256_context *ctx, uint8 digest[32] ); + +#endif /* sha256.h */ + diff --git a/include/linux/shm.h b/include/linux/shm.h new file mode 100644 index 00000000..92808b86 --- /dev/null +++ b/include/linux/shm.h @@ -0,0 +1,130 @@ +#ifndef _LINUX_SHM_H_ +#define _LINUX_SHM_H_ + +#include <linux/ipc.h> +#include <linux/errno.h> +#ifdef __KERNEL__ +#include <asm/page.h> +#else +#include <unistd.h> +#endif + +/* + * SHMMAX, SHMMNI and SHMALL are upper limits are defaults which can + * be increased by sysctl + */ + +#define SHMMAX 0x2000000 /* max shared seg size (bytes) */ +#define SHMMIN 1 /* min shared seg size (bytes) */ +#define SHMMNI 4096 /* max num of segs system wide */ +#ifdef __KERNEL__ +#define SHMALL (SHMMAX/PAGE_SIZE*(SHMMNI/16)) /* max shm system wide (pages) */ +#else +#define SHMALL (SHMMAX/getpagesize()*(SHMMNI/16)) +#endif +#define SHMSEG SHMMNI /* max shared segs per process */ + +#ifdef __KERNEL__ +#include <asm/shmparam.h> +#endif + +/* Obsolete, used only for backwards compatibility and libc5 compiles */ +struct shmid_ds { + struct ipc_perm shm_perm; /* operation perms */ + int shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_ipc_pid_t shm_cpid; /* pid of creator */ + __kernel_ipc_pid_t shm_lpid; /* pid of last operator */ + unsigned short shm_nattch; /* no. of current attaches */ + unsigned short shm_unused; /* compatibility */ + void *shm_unused2; /* ditto - used by DIPC */ + void *shm_unused3; /* unused */ +}; + +/* Include the definition of shmid64_ds and shminfo64 */ +#include <asm/shmbuf.h> + +/* permission flag for shmget */ +#define SHM_R 0400 /* or S_IRUGO from <linux/stat.h> */ +#define SHM_W 0200 /* or S_IWUGO from <linux/stat.h> */ + +/* mode for attach */ +#define SHM_RDONLY 010000 /* read-only access */ +#define SHM_RND 020000 /* round attach address to SHMLBA boundary */ +#define SHM_REMAP 040000 /* take-over region on attach */ +#define SHM_EXEC 0100000 /* execution access */ + +/* super user shmctl commands */ +#define SHM_LOCK 11 +#define SHM_UNLOCK 12 + +/* ipcs ctl commands */ +#define SHM_STAT 13 +#define SHM_INFO 14 + +/* Obsolete, used only for backwards compatibility */ +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + unsigned long shm_tot; /* total allocated shm */ + unsigned long shm_rss; /* total resident shm */ + unsigned long shm_swp; /* total swapped shm */ + unsigned long swap_attempts; + unsigned long swap_successes; +}; + +#ifdef __KERNEL__ +struct shmid_kernel /* private to the kernel */ +{ + struct kern_ipc_perm shm_perm; + struct file * shm_file; + unsigned long shm_nattch; + unsigned long shm_segsz; + time_t shm_atim; + time_t shm_dtim; + time_t shm_ctim; + pid_t shm_cprid; + pid_t shm_lprid; + struct user_struct *mlock_user; + + /* The task created the shm object. NULL if the task is dead. */ + struct task_struct *shm_creator; +}; + +/* shm_mode upper byte flags */ +#define SHM_DEST 01000 /* segment will be destroyed on last detach */ +#define SHM_LOCKED 02000 /* segment will not be swapped */ +#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */ +#define SHM_NORESERVE 010000 /* don't check for reservations */ + +#ifdef CONFIG_SYSVIPC +long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr); +extern int is_file_shm_hugepages(struct file *file); +extern void exit_shm(struct task_struct *task); +#else +static inline long do_shmat(int shmid, char __user *shmaddr, + int shmflg, unsigned long *addr) +{ + return -ENOSYS; +} +static inline int is_file_shm_hugepages(struct file *file) +{ + return 0; +} +static inline void exit_shm(struct task_struct *task) +{ +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SHM_H_ */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h new file mode 100644 index 00000000..79ab2555 --- /dev/null +++ b/include/linux/shmem_fs.h @@ -0,0 +1,64 @@ +#ifndef __SHMEM_FS_H +#define __SHMEM_FS_H + +#include <linux/swap.h> +#include <linux/mempolicy.h> +#include <linux/pagemap.h> +#include <linux/percpu_counter.h> + +/* inode in-kernel data */ + +struct shmem_inode_info { + spinlock_t lock; + unsigned long flags; + unsigned long alloced; /* data pages alloced to file */ + union { + unsigned long swapped; /* subtotal assigned to swap */ + char *symlink; /* unswappable short symlink */ + }; + struct shared_policy policy; /* NUMA memory alloc policy */ + struct list_head swaplist; /* chain of maybes on swap */ + struct list_head xattr_list; /* list of shmem_xattr */ + struct inode vfs_inode; +}; + +struct shmem_sb_info { + unsigned long max_blocks; /* How many blocks are allowed */ + struct percpu_counter used_blocks; /* How many are allocated */ + unsigned long max_inodes; /* How many inodes are allowed */ + unsigned long free_inodes; /* How many are left for allocation */ + spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + uid_t uid; /* Mount uid for root directory */ + gid_t gid; /* Mount gid for root directory */ + umode_t mode; /* Mount mode for root directory */ + struct mempolicy *mpol; /* default memory policy for mappings */ +}; + +static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) +{ + return container_of(inode, struct shmem_inode_info, vfs_inode); +} + +/* + * Functions in mm/shmem.c called directly from elsewhere: + */ +extern int shmem_init(void); +extern int shmem_fill_super(struct super_block *sb, void *data, int silent); +extern struct file *shmem_file_setup(const char *name, + loff_t size, unsigned long flags); +extern int shmem_zero_setup(struct vm_area_struct *); +extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern void shmem_unlock_mapping(struct address_space *mapping); +extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); +extern int shmem_unuse(swp_entry_t entry, struct page *page); + +static inline struct page *shmem_read_mapping_page( + struct address_space *mapping, pgoff_t index) +{ + return shmem_read_mapping_page_gfp(mapping, index, + mapping_gfp_mask(mapping)); +} + +#endif diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h new file mode 100644 index 00000000..07ceb97d --- /dev/null +++ b/include/linux/shrinker.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_SHRINKER_H +#define _LINUX_SHRINKER_H + +/* + * This struct is used to pass information from page reclaim to the shrinkers. + * We consolidate the values for easier extention later. + */ +struct shrink_control { + gfp_t gfp_mask; + + /* How many slab objects shrinker() should scan and try to reclaim */ + unsigned long nr_to_scan; +}; + +/* + * A callback you can register to apply pressure to ageable caches. + * + * 'sc' is passed shrink_control which includes a count 'nr_to_scan' + * and a 'gfpmask'. It should look through the least-recently-used + * 'nr_to_scan' entries and attempt to free them up. It should return + * the number of objects which remain in the cache. If it returns -1, it means + * it cannot do any scanning at this time (eg. there is a risk of deadlock). + * The callback must not return -1 if nr_to_scan is zero. + * + * The 'gfpmask' refers to the allocation we are currently trying to + * fulfil. + * + * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is + * querying the cache size, so a fastpath for that case is appropriate. + */ +struct shrinker { + int (*shrink)(struct shrinker *, struct shrink_control *sc); + int seeks; /* seeks to recreate an obj */ + long batch; /* reclaim batch size, 0 = default */ + + /* These are for internal use */ + struct list_head list; + atomic_long_t nr_in_batch; /* objs pending delete */ +}; +#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ +extern void register_shrinker(struct shrinker *); +extern void unregister_shrinker(struct shrinker *); +#endif diff --git a/include/linux/sht15.h b/include/linux/sht15.h new file mode 100644 index 00000000..f85c7c52 --- /dev/null +++ b/include/linux/sht15.h @@ -0,0 +1,34 @@ +/* + * sht15.h - support for the SHT15 Temperature and Humidity Sensor + * + * Copyright (c) 2009 Jonathan Cameron + * + * Copyright (c) 2007 Wouter Horre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * For further information, see the Documentation/hwmon/sht15 file. + */ + +/** + * struct sht15_platform_data - sht15 connectivity info + * @gpio_data: no. of gpio to which bidirectional data line is + * connected. + * @gpio_sck: no. of gpio to which the data clock is connected. + * @supply_mv: supply voltage in mv. Overridden by regulator if + * available. + * @checksum: flag to indicate the checksum should be validated. + * @no_otp_reload: flag to indicate no reload from OTP. + * @low_resolution: flag to indicate the temp/humidity resolution to use. + */ +struct sht15_platform_data { + int gpio_data; + int gpio_sck; + int supply_mv; + bool checksum; + bool no_otp_reload; + bool low_resolution; +}; + diff --git a/include/linux/signal.h b/include/linux/signal.h new file mode 100644 index 00000000..7987ce74 --- /dev/null +++ b/include/linux/signal.h @@ -0,0 +1,391 @@ +#ifndef _LINUX_SIGNAL_H +#define _LINUX_SIGNAL_H + +#include <asm/signal.h> +#include <asm/siginfo.h> + +#ifdef __KERNEL__ +#include <linux/list.h> + +struct task_struct; + +/* for sysctl */ +extern int print_fatal_signals; +/* + * Real Time signals may be queued. + */ + +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; +}; + +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +/* + * Define some primitives to manipulate sigset_t. + */ + +#ifndef __HAVE_ARCH_SIG_BITOPS +#include <linux/bitops.h> + +/* We don't use <linux/bitops.h> for these because there is no need to + be atomic. */ +static inline void sigaddset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if (_NSIG_WORDS == 1) + set->sig[0] |= 1UL << sig; + else + set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW); +} + +static inline void sigdelset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if (_NSIG_WORDS == 1) + set->sig[0] &= ~(1UL << sig); + else + set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW)); +} + +static inline int sigismember(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if (_NSIG_WORDS == 1) + return 1 & (set->sig[0] >> sig); + else + return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); +} + +static inline int sigfindinword(unsigned long word) +{ + return ffz(~word); +} + +#endif /* __HAVE_ARCH_SIG_BITOPS */ + +static inline int sigisemptyset(sigset_t *set) +{ + extern void _NSIG_WORDS_is_unsupported_size(void); + switch (_NSIG_WORDS) { + case 4: + return (set->sig[3] | set->sig[2] | + set->sig[1] | set->sig[0]) == 0; + case 2: + return (set->sig[1] | set->sig[0]) == 0; + case 1: + return set->sig[0] == 0; + default: + _NSIG_WORDS_is_unsupported_size(); + return 0; + } +} + +#define sigmask(sig) (1UL << ((sig) - 1)) + +#ifndef __HAVE_ARCH_SIG_SETOPS +#include <linux/string.h> + +#define _SIG_SET_BINOP(name, op) \ +static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ +{ \ + extern void _NSIG_WORDS_is_unsupported_size(void); \ + unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \ + \ + switch (_NSIG_WORDS) { \ + case 4: \ + a3 = a->sig[3]; a2 = a->sig[2]; \ + b3 = b->sig[3]; b2 = b->sig[2]; \ + r->sig[3] = op(a3, b3); \ + r->sig[2] = op(a2, b2); \ + case 2: \ + a1 = a->sig[1]; b1 = b->sig[1]; \ + r->sig[1] = op(a1, b1); \ + case 1: \ + a0 = a->sig[0]; b0 = b->sig[0]; \ + r->sig[0] = op(a0, b0); \ + break; \ + default: \ + _NSIG_WORDS_is_unsupported_size(); \ + } \ +} + +#define _sig_or(x,y) ((x) | (y)) +_SIG_SET_BINOP(sigorsets, _sig_or) + +#define _sig_and(x,y) ((x) & (y)) +_SIG_SET_BINOP(sigandsets, _sig_and) + +#define _sig_andn(x,y) ((x) & ~(y)) +_SIG_SET_BINOP(sigandnsets, _sig_andn) + +#undef _SIG_SET_BINOP +#undef _sig_or +#undef _sig_and +#undef _sig_andn + +#define _SIG_SET_OP(name, op) \ +static inline void name(sigset_t *set) \ +{ \ + extern void _NSIG_WORDS_is_unsupported_size(void); \ + \ + switch (_NSIG_WORDS) { \ + case 4: set->sig[3] = op(set->sig[3]); \ + set->sig[2] = op(set->sig[2]); \ + case 2: set->sig[1] = op(set->sig[1]); \ + case 1: set->sig[0] = op(set->sig[0]); \ + break; \ + default: \ + _NSIG_WORDS_is_unsupported_size(); \ + } \ +} + +#define _sig_not(x) (~(x)) +_SIG_SET_OP(signotset, _sig_not) + +#undef _SIG_SET_OP +#undef _sig_not + +static inline void sigemptyset(sigset_t *set) +{ + switch (_NSIG_WORDS) { + default: + memset(set, 0, sizeof(sigset_t)); + break; + case 2: set->sig[1] = 0; + case 1: set->sig[0] = 0; + break; + } +} + +static inline void sigfillset(sigset_t *set) +{ + switch (_NSIG_WORDS) { + default: + memset(set, -1, sizeof(sigset_t)); + break; + case 2: set->sig[1] = -1; + case 1: set->sig[0] = -1; + break; + } +} + +/* Some extensions for manipulating the low 32 signals in particular. */ + +static inline void sigaddsetmask(sigset_t *set, unsigned long mask) +{ + set->sig[0] |= mask; +} + +static inline void sigdelsetmask(sigset_t *set, unsigned long mask) +{ + set->sig[0] &= ~mask; +} + +static inline int sigtestsetmask(sigset_t *set, unsigned long mask) +{ + return (set->sig[0] & mask) != 0; +} + +static inline void siginitset(sigset_t *set, unsigned long mask) +{ + set->sig[0] = mask; + switch (_NSIG_WORDS) { + default: + memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); + break; + case 2: set->sig[1] = 0; + case 1: ; + } +} + +static inline void siginitsetinv(sigset_t *set, unsigned long mask) +{ + set->sig[0] = ~mask; + switch (_NSIG_WORDS) { + default: + memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); + break; + case 2: set->sig[1] = -1; + case 1: ; + } +} + +#endif /* __HAVE_ARCH_SIG_SETOPS */ + +static inline void init_sigpending(struct sigpending *sig) +{ + sigemptyset(&sig->signal); + INIT_LIST_HEAD(&sig->list); +} + +extern void flush_sigqueue(struct sigpending *queue); + +/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ +static inline int valid_signal(unsigned long sig) +{ + return sig <= _NSIG ? 1 : 0; +} + +struct timespec; +struct pt_regs; + +extern int next_signal(struct sigpending *pending, sigset_t *mask); +extern int do_send_sig_info(int sig, struct siginfo *info, + struct task_struct *p, bool group); +extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); +extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); +extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, + siginfo_t *info); +extern long do_sigpending(void __user *, unsigned long); +extern int do_sigtimedwait(const sigset_t *, siginfo_t *, + const struct timespec *); +extern int sigprocmask(int, sigset_t *, sigset_t *); +extern void set_current_blocked(const sigset_t *); +extern int show_unhandled_signals; + +extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); +extern void block_sigmask(struct k_sigaction *ka, int signr); +extern void exit_signals(struct task_struct *tsk); + +extern struct kmem_cache *sighand_cachep; + +int unhandled_signal(struct task_struct *tsk, int sig); + +/* + * In POSIX a signal is sent either to a specific thread (Linux task) + * or to the process as a whole (Linux thread group). How the signal + * is sent determines whether it's to one thread or the whole group, + * which determines which signal mask(s) are involved in blocking it + * from being delivered until later. When the signal is delivered, + * either it's caught or ignored by a user handler or it has a default + * effect that applies to the whole thread group (POSIX process). + * + * The possible effects an unblocked signal set to SIG_DFL can have are: + * ignore - Nothing Happens + * terminate - kill the process, i.e. all threads in the group, + * similar to exit_group. The group leader (only) reports + * WIFSIGNALED status to its parent. + * coredump - write a core dump file describing all threads using + * the same mm and then kill all those threads + * stop - stop all the threads in the group, i.e. TASK_STOPPED state + * + * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. + * Other signals when not blocked and set to SIG_DFL behaves as follows. + * The job control signals also have other special effects. + * + * +--------------------+------------------+ + * | POSIX signal | default action | + * +--------------------+------------------+ + * | SIGHUP | terminate | + * | SIGINT | terminate | + * | SIGQUIT | coredump | + * | SIGILL | coredump | + * | SIGTRAP | coredump | + * | SIGABRT/SIGIOT | coredump | + * | SIGBUS | coredump | + * | SIGFPE | coredump | + * | SIGKILL | terminate(+) | + * | SIGUSR1 | terminate | + * | SIGSEGV | coredump | + * | SIGUSR2 | terminate | + * | SIGPIPE | terminate | + * | SIGALRM | terminate | + * | SIGTERM | terminate | + * | SIGCHLD | ignore | + * | SIGCONT | ignore(*) | + * | SIGSTOP | stop(*)(+) | + * | SIGTSTP | stop(*) | + * | SIGTTIN | stop(*) | + * | SIGTTOU | stop(*) | + * | SIGURG | ignore | + * | SIGXCPU | coredump | + * | SIGXFSZ | coredump | + * | SIGVTALRM | terminate | + * | SIGPROF | terminate | + * | SIGPOLL/SIGIO | terminate | + * | SIGSYS/SIGUNUSED | coredump | + * | SIGSTKFLT | terminate | + * | SIGWINCH | ignore | + * | SIGPWR | terminate | + * | SIGRTMIN-SIGRTMAX | terminate | + * +--------------------+------------------+ + * | non-POSIX signal | default action | + * +--------------------+------------------+ + * | SIGEMT | coredump | + * +--------------------+------------------+ + * + * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". + * (*) Special job control effects: + * When SIGCONT is sent, it resumes the process (all threads in the group) + * from TASK_STOPPED state and also clears any pending/queued stop signals + * (any of those marked with "stop(*)"). This happens regardless of blocking, + * catching, or ignoring SIGCONT. When any stop signal is sent, it clears + * any pending/queued SIGCONT signals; this happens regardless of blocking, + * catching, or ignored the stop signal, though (except for SIGSTOP) the + * default action of stopping the process may happen later or never. + */ + +#ifdef SIGEMT +#define SIGEMT_MASK rt_sigmask(SIGEMT) +#else +#define SIGEMT_MASK 0 +#endif + +#if SIGRTMIN > BITS_PER_LONG +#define rt_sigmask(sig) (1ULL << ((sig)-1)) +#else +#define rt_sigmask(sig) sigmask(sig) +#endif +#define siginmask(sig, mask) (rt_sigmask(sig) & (mask)) + +#define SIG_KERNEL_ONLY_MASK (\ + rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) + +#define SIG_KERNEL_STOP_MASK (\ + rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \ + rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) ) + +#define SIG_KERNEL_COREDUMP_MASK (\ + rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \ + rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \ + rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \ + rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \ + rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \ + SIGEMT_MASK ) + +#define SIG_KERNEL_IGNORE_MASK (\ + rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ + rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) + +#define sig_kernel_only(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK)) +#define sig_kernel_coredump(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK)) +#define sig_kernel_ignore(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK)) +#define sig_kernel_stop(sig) \ + (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK)) + +#define sig_user_defined(t, signr) \ + (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ + ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) + +#define sig_fatal(t, signr) \ + (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ + (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) + +void signals_init(void); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SIGNAL_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h new file mode 100644 index 00000000..247399b2 --- /dev/null +++ b/include/linux/signalfd.h @@ -0,0 +1,76 @@ +/* + * include/linux/signalfd.h + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + */ + +#ifndef _LINUX_SIGNALFD_H +#define _LINUX_SIGNALFD_H + +#include <linux/types.h> +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> + +/* Flags for signalfd4. */ +#define SFD_CLOEXEC O_CLOEXEC +#define SFD_NONBLOCK O_NONBLOCK + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + + /* + * Pad strcture to 128 bytes. Remember to update the + * pad size when you add new members. We use a fixed + * size structure to avoid compatibility problems with + * future versions, and we leave extra space for additional + * members. We use fixed size members because this strcture + * comes out of a read(2) and we really don't want to have + * a compat on read(2). + */ + __u8 __pad[46]; +}; + + +#ifdef __KERNEL__ + +#ifdef CONFIG_SIGNALFD + +/* + * Deliver the signal to listening signalfd. + */ +static inline void signalfd_notify(struct task_struct *tsk, int sig) +{ + if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh))) + wake_up(&tsk->sighand->signalfd_wqh); +} + +extern void signalfd_cleanup(struct sighand_struct *sighand); + +#else /* CONFIG_SIGNALFD */ + +static inline void signalfd_notify(struct task_struct *tsk, int sig) { } + +static inline void signalfd_cleanup(struct sighand_struct *sighand) { } + +#endif /* CONFIG_SIGNALFD */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SIGNALFD_H */ diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h new file mode 100644 index 00000000..29d95933 --- /dev/null +++ b/include/linux/sirfsoc_dma.h @@ -0,0 +1,6 @@ +#ifndef _SIRFSOC_DMA_H_ +#define _SIRFSOC_DMA_H_ + +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id); + +#endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h new file mode 100644 index 00000000..c1bae8df --- /dev/null +++ b/include/linux/skbuff.h @@ -0,0 +1,2559 @@ +/* + * Definitions for the 'struct sk_buff' memory handlers. + * + * Authors: + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * Florian La Roche, <rzsfl@rz.uni-sb.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_SKBUFF_H +#define _LINUX_SKBUFF_H + +#include <linux/kernel.h> +#include <linux/kmemcheck.h> +#include <linux/compiler.h> +#include <linux/time.h> +#include <linux/bug.h> +#include <linux/cache.h> + +#include <linux/atomic.h> +#include <asm/types.h> +#include <linux/spinlock.h> +#include <linux/net.h> +#include <linux/textsearch.h> +#include <net/checksum.h> +#include <linux/rcupdate.h> +#include <linux/dmaengine.h> +#include <linux/hrtimer.h> +#include <linux/dma-mapping.h> +#include <linux/netdev_features.h> + +/* Don't change this without changing skb_csum_unnecessary! */ +#define CHECKSUM_NONE 0 +#define CHECKSUM_UNNECESSARY 1 +#define CHECKSUM_COMPLETE 2 +#define CHECKSUM_PARTIAL 3 + +#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ + ~(SMP_CACHE_BYTES - 1)) +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define SKB_MAX_ORDER(X, ORDER) \ + SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) +#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) +#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) + +/* return minimum truesize of one skb containing X bytes of data */ +#define SKB_TRUESIZE(X) ((X) + \ + SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* A. Checksumming of received packets by device. + * + * NONE: device failed to checksum this packet. + * skb->csum is undefined. + * + * UNNECESSARY: device parsed packet and wouldbe verified checksum. + * skb->csum is undefined. + * It is bad option, but, unfortunately, many of vendors do this. + * Apparently with secret goal to sell you new device, when you + * will add new protocol to your host. F.e. IPv6. 8) + * + * COMPLETE: the most generic way. Device supplied checksum of _all_ + * the packet as seen by netif_rx in skb->csum. + * NOTE: Even if device supports only some protocols, but + * is able to produce some skb->csum, it MUST use COMPLETE, + * not UNNECESSARY. + * + * PARTIAL: identical to the case for output below. This may occur + * on a packet received directly from another Linux OS, e.g., + * a virtualised Linux kernel on the same host. The packet can + * be treated in the same way as UNNECESSARY except that on + * output (i.e., forwarding) the checksum must be filled in + * by the OS or the hardware. + * + * B. Checksumming on output. + * + * NONE: skb is checksummed by protocol or csum is not required. + * + * PARTIAL: device is required to csum packet as seen by hard_start_xmit + * from skb->csum_start to the end and to record the checksum + * at skb->csum_start + skb->csum_offset. + * + * Device must show its capabilities in dev->features, set + * at device setup time. + * NETIF_F_HW_CSUM - it is clever device, it is able to checksum + * everything. + * NETIF_F_IP_CSUM - device is dumb. It is able to csum only + * TCP/UDP over IPv4. Sigh. Vendors like this + * way by an unknown reason. Though, see comment above + * about CHECKSUM_UNNECESSARY. 8) + * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. + * + * UNNECESSARY: device will do per protocol specific csum. Protocol drivers + * that do not want net to perform the checksum calculation should use + * this flag in their outgoing skbs. + * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC + * offload. Correspondingly, the FCoE protocol driver + * stack should use CHECKSUM_UNNECESSARY. + * + * Any questions? No questions, good. --ANK + */ + +struct net_device; +struct scatterlist; +struct pipe_inode_info; + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +struct nf_conntrack { + atomic_t use; +}; +#endif + +#ifdef CONFIG_BRIDGE_NETFILTER +struct nf_bridge_info { + atomic_t use; + struct net_device *physindev; + struct net_device *physoutdev; + unsigned int mask; + unsigned long data[32 / sizeof(unsigned long)]; +}; +#endif + +struct sk_buff_head { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + __u32 qlen; + spinlock_t lock; +}; + +struct sk_buff; + +/* To allow 64K frame to be packed as single skb without frag_list we + * require 64K/PAGE_SIZE pages plus 1 additional page to allow for + * buffers which do not start on a page boundary. + * + * Since GRO uses frags we allocate at least 16 regardless of page + * size. + */ +#if (65536/PAGE_SIZE + 1) < 16 +#define MAX_SKB_FRAGS 16UL +#else +#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) +#endif + +typedef struct skb_frag_struct skb_frag_t; + +struct skb_frag_struct { + struct { + struct page *p; + } page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; + __u32 size; +#else + __u16 page_offset; + __u16 size; +#endif +}; + +static inline unsigned int skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} + +static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) +{ + frag->size = size; +} + +static inline void skb_frag_size_add(skb_frag_t *frag, int delta) +{ + frag->size += delta; +} + +static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} + +#define HAVE_HW_TIME_STAMP + +/** + * struct skb_shared_hwtstamps - hardware time stamps + * @hwtstamp: hardware time stamp transformed into duration + * since arbitrary point in time + * @syststamp: hwtstamp transformed to system time base + * + * Software time stamps generated by ktime_get_real() are stored in + * skb->tstamp. The relation between the different kinds of time + * stamps is as follows: + * + * syststamp and tstamp can be compared against each other in + * arbitrary combinations. The accuracy of a + * syststamp/tstamp/"syststamp from other device" comparison is + * limited by the accuracy of the transformation into system time + * base. This depends on the device driver and its underlying + * hardware. + * + * hwtstamps can only be compared against other hwtstamps from + * the same device. + * + * This structure is attached to packets as part of the + * &skb_shared_info. Use skb_hwtstamps() to get a pointer. + */ +struct skb_shared_hwtstamps { + ktime_t hwtstamp; + ktime_t syststamp; +}; + +/* Definitions for tx_flags in struct skb_shared_info */ +enum { + /* generate hardware time stamp */ + SKBTX_HW_TSTAMP = 1 << 0, + + /* generate software time stamp */ + SKBTX_SW_TSTAMP = 1 << 1, + + /* device driver is going to provide hardware time stamp */ + SKBTX_IN_PROGRESS = 1 << 2, + + /* device driver supports TX zero-copy buffers */ + SKBTX_DEV_ZEROCOPY = 1 << 3, + + /* generate wifi status information (where possible) */ + SKBTX_WIFI_STATUS = 1 << 4, +}; + +/* + * The callback notifies userspace to release buffers when skb DMA is done in + * lower device, the skb last reference should be 0 when calling this. + * The ctx field is used to track device context. + * The desc field is used to track userspace buffer index. + */ +struct ubuf_info { + void (*callback)(struct ubuf_info *); + void *ctx; + unsigned long desc; +}; + +/* This data is invariant across clones and lives at + * the end of the header data, ie. at skb->end. + */ +struct skb_shared_info { + unsigned char nr_frags; + __u8 tx_flags; + unsigned short gso_size; + /* Warning: this field is not always filled in (UFO)! */ + unsigned short gso_segs; + unsigned short gso_type; + struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; + __be32 ip6_frag_id; + + /* + * Warning : all fields before dataref are cleared in __alloc_skb() + */ + atomic_t dataref; + + /* Intermediate layers must ensure that destructor_arg + * remains valid until skb destructor */ + void * destructor_arg; + + /* must be last field, see pskb_expand_head() */ + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +/* We divide dataref into two halves. The higher 16 bits hold references + * to the payload part of skb->data. The lower 16 bits hold references to + * the entire skb->data. A clone of a headerless skb holds the length of + * the header in skb->hdr_len. + * + * All users must obey the rule that the skb->data reference count must be + * greater than or equal to the payload reference count. + * + * Holding a reference to the payload part means that the user does not + * care about modifications to the header part of skb->data. + */ +#define SKB_DATAREF_SHIFT 16 +#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) + + +enum { + SKB_FCLONE_UNAVAILABLE, + SKB_FCLONE_ORIG, + SKB_FCLONE_CLONE, +}; + +enum { + SKB_GSO_TCPV4 = 1 << 0, + SKB_GSO_UDP = 1 << 1, + + /* This indicates the skb is from an untrusted source. */ + SKB_GSO_DODGY = 1 << 2, + + /* This indicates the tcp segment has CWR set. */ + SKB_GSO_TCP_ECN = 1 << 3, + + SKB_GSO_TCPV6 = 1 << 4, + + SKB_GSO_FCOE = 1 << 5, +}; + +#if BITS_PER_LONG > 32 +#define NET_SKBUFF_DATA_USES_OFFSET 1 +#endif + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +typedef unsigned int sk_buff_data_t; +#else +typedef unsigned char *sk_buff_data_t; +#endif + +#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ + defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) +#define NET_SKBUFF_NF_DEFRAG_NEEDED 1 +#endif + +/** + * struct sk_buff - socket buffer + * @next: Next buffer in list + * @prev: Previous buffer in list + * @tstamp: Time we arrived + * @sk: Socket we are owned by + * @dev: Device we arrived on/are leaving by + * @cb: Control buffer. Free for use by every layer. Put private vars here + * @_skb_refdst: destination entry (with norefcount bit) + * @sp: the security path, used for xfrm + * @len: Length of actual data + * @data_len: Data length + * @mac_len: Length of link layer header + * @hdr_len: writable header length of cloned skb + * @csum: Checksum (must include start/offset pair) + * @csum_start: Offset from skb->head where checksumming should start + * @csum_offset: Offset from csum_start where checksum should be stored + * @priority: Packet queueing priority + * @local_df: allow local fragmentation + * @cloned: Head may be cloned (check refcnt to be sure) + * @ip_summed: Driver fed us an IP checksum + * @nohdr: Payload reference only, must not modify header + * @nfctinfo: Relationship of this skb to the connection + * @pkt_type: Packet class + * @fclone: skbuff clone status + * @ipvs_property: skbuff is owned by ipvs + * @peeked: this packet has been seen already, so stats have been + * done for it, don't do them again + * @nf_trace: netfilter packet trace flag + * @protocol: Packet protocol from driver + * @destructor: Destruct function + * @nfct: Associated connection, if any + * @nfct_reasm: netfilter conntrack re-assembly pointer + * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c + * @skb_iif: ifindex of device we arrived on + * @tc_index: Traffic control index + * @tc_verd: traffic control verdict + * @rxhash: the packet hash computed on receive + * @queue_mapping: Queue mapping for multiqueue devices + * @ndisc_nodetype: router type (from link layer) + * @ooo_okay: allow the mapping of a socket to a queue to be changed + * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport + * ports. + * @wifi_acked_valid: wifi_acked was set + * @wifi_acked: whether frame was acked on wifi or not + * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS + * @dma_cookie: a cookie to one of several possible DMA operations + * done by skb DMA functions + * @secmark: security marking + * @mark: Generic packet mark + * @dropcount: total number of sk_receive_queue overflows + * @vlan_tci: vlan tag control information + * @transport_header: Transport layer header + * @network_header: Network layer header + * @mac_header: Link layer header + * @tail: Tail pointer + * @end: End pointer + * @head: Head of buffer + * @data: Data head pointer + * @truesize: Buffer size + * @users: User count - see {datagram,tcp}.c + */ + +struct sk_buff { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + ktime_t tstamp; + + struct sock *sk; + struct net_device *dev; + + /* + * This is the control buffer. It is free to use for every + * layer. Please put your private variables there. If you + * want to keep them across layers you have to do a skb_clone() + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48] __aligned(8); + + unsigned long _skb_refdst; +#ifdef CONFIG_XFRM + struct sec_path *sp; +#endif + unsigned int len, + data_len; + __u16 mac_len, + hdr_len; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + kmemcheck_bitfield_begin(flags1); + __u8 local_df:1, + cloned:1, + ip_summed:2, + nohdr:1, + nfctinfo:3; + __u8 pkt_type:3, + fclone:2, + ipvs_property:1, + peeked:1, + nf_trace:1; + kmemcheck_bitfield_end(flags1); + __be16 protocol; + + void (*destructor)(struct sk_buff *skb); +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + struct nf_conntrack *nfct; +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED + struct sk_buff *nfct_reasm; +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + struct nf_bridge_info *nf_bridge; +#endif + + int skb_iif; + + __u32 rxhash; + + __u16 vlan_tci; + +#ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ +#ifdef CONFIG_NET_CLS_ACT + __u16 tc_verd; /* traffic control verdict */ +#endif +#endif + + __u16 queue_mapping; + kmemcheck_bitfield_begin(flags2); +#ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; +#endif + __u8 ooo_okay:1; + __u8 l4_rxhash:1; + __u8 wifi_acked_valid:1; + __u8 wifi_acked:1; + __u8 no_fcs:1; + /* 9/11 bit hole (depending on ndisc_nodetype presence) */ + kmemcheck_bitfield_end(flags2); + +#ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +#endif +#ifdef CONFIG_NETWORK_SECMARK + __u32 secmark; +#endif + union { + __u32 mark; + __u32 dropcount; + __u32 avail_size; + }; + + sk_buff_data_t transport_header; + sk_buff_data_t network_header; + sk_buff_data_t mac_header; + /* These elements must be at the end, see alloc_skb() for details. */ + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head, + *data; + unsigned int truesize; + atomic_t users; +}; + +#ifdef __KERNEL__ +/* + * Handling routines are only of interest to the kernel + */ +#include <linux/slab.h> + + +/* + * skb might have a dst pointer attached, refcounted or not. + * _skb_refdst low order bit is set if refcount was _not_ taken + */ +#define SKB_DST_NOREF 1UL +#define SKB_DST_PTRMASK ~(SKB_DST_NOREF) + +/** + * skb_dst - returns skb dst_entry + * @skb: buffer + * + * Returns skb dst_entry, regardless of reference taken or not. + */ +static inline struct dst_entry *skb_dst(const struct sk_buff *skb) +{ + /* If refdst was not refcounted, check we still are in a + * rcu_read_lock section + */ + WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && + !rcu_read_lock_held() && + !rcu_read_lock_bh_held()); + return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); +} + +/** + * skb_dst_set - sets skb dst + * @skb: buffer + * @dst: dst entry + * + * Sets skb dst, assuming a reference was taken on dst and should + * be released by skb_dst_drop() + */ +static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) +{ + skb->_skb_refdst = (unsigned long)dst; +} + +extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); + +/** + * skb_dst_is_noref - Test if skb dst isn't refcounted + * @skb: buffer + */ +static inline bool skb_dst_is_noref(const struct sk_buff *skb) +{ + return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); +} + +static inline struct rtable *skb_rtable(const struct sk_buff *skb) +{ + return (struct rtable *)skb_dst(skb); +} + +extern void kfree_skb(struct sk_buff *skb); +extern void consume_skb(struct sk_buff *skb); +extern void __kfree_skb(struct sk_buff *skb); +extern struct sk_buff *__alloc_skb(unsigned int size, + gfp_t priority, int fclone, int node); +extern struct sk_buff *build_skb(void *data); +static inline struct sk_buff *alloc_skb(unsigned int size, + gfp_t priority) +{ + return __alloc_skb(size, priority, 0, NUMA_NO_NODE); +} + +static inline struct sk_buff *alloc_skb_fclone(unsigned int size, + gfp_t priority) +{ + return __alloc_skb(size, priority, 1, NUMA_NO_NODE); +} + +extern void skb_recycle(struct sk_buff *skb); +extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); + +extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); +extern struct sk_buff *skb_clone(struct sk_buff *skb, + gfp_t priority); +extern struct sk_buff *skb_copy(const struct sk_buff *skb, + gfp_t priority); +extern struct sk_buff *__pskb_copy(struct sk_buff *skb, + int headroom, gfp_t gfp_mask); + +extern int pskb_expand_head(struct sk_buff *skb, + int nhead, int ntail, + gfp_t gfp_mask); +extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, + unsigned int headroom); +extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, + int newheadroom, int newtailroom, + gfp_t priority); +extern int skb_to_sgvec(struct sk_buff *skb, + struct scatterlist *sg, int offset, + int len); +extern int skb_cow_data(struct sk_buff *skb, int tailbits, + struct sk_buff **trailer); +extern int skb_pad(struct sk_buff *skb, int pad); +#define dev_kfree_skb(a) consume_skb(a) + +extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, + int getfrag(void *from, char *to, int offset, + int len,int odd, struct sk_buff *skb), + void *from, int length); + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; +}; + +extern void skb_prepare_seq_read(struct sk_buff *skb, + unsigned int from, unsigned int to, + struct skb_seq_state *st); +extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st); +extern void skb_abort_seq_read(struct skb_seq_state *st); + +extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config, + struct ts_state *state); + +extern void __skb_get_rxhash(struct sk_buff *skb); +static inline __u32 skb_get_rxhash(struct sk_buff *skb) +{ + if (!skb->rxhash) + __skb_get_rxhash(skb); + + return skb->rxhash; +} + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->end; +} +#else +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->end; +} +#endif + +/* Internal */ +#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) + +static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) +{ + return &skb_shinfo(skb)->hwtstamps; +} + +/** + * skb_queue_empty - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + */ +static inline int skb_queue_empty(const struct sk_buff_head *list) +{ + return list->next == (struct sk_buff *)list; +} + +/** + * skb_queue_is_last - check if skb is the last entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the last buffer on the list. + */ +static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return skb->next == (struct sk_buff *)list; +} + +/** + * skb_queue_is_first - check if skb is the first entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the first buffer on the list. + */ +static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return skb->prev == (struct sk_buff *)list; +} + +/** + * skb_queue_next - return the next packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the next packet in @list after @skb. It is only valid to + * call this if skb_queue_is_last() evaluates to false. + */ +static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_last(list, skb)); + return skb->next; +} + +/** + * skb_queue_prev - return the prev packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the prev packet in @list before @skb. It is only valid to + * call this if skb_queue_is_first() evaluates to false. + */ +static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_first(list, skb)); + return skb->prev; +} + +/** + * skb_get - reference buffer + * @skb: buffer to reference + * + * Makes another reference to a socket buffer and returns a pointer + * to the buffer. + */ +static inline struct sk_buff *skb_get(struct sk_buff *skb) +{ + atomic_inc(&skb->users); + return skb; +} + +/* + * If users == 1, we are the only owner and are can avoid redundant + * atomic change. + */ + +/** + * skb_cloned - is the buffer a clone + * @skb: buffer to check + * + * Returns true if the buffer was generated with skb_clone() and is + * one of multiple shared copies of the buffer. Cloned buffers are + * shared data so must not be written to under normal circumstances. + */ +static inline int skb_cloned(const struct sk_buff *skb) +{ + return skb->cloned && + (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; +} + +/** + * skb_header_cloned - is the header a clone + * @skb: buffer to check + * + * Returns true if modifying the header part of the buffer requires + * the data to be copied. + */ +static inline int skb_header_cloned(const struct sk_buff *skb) +{ + int dataref; + + if (!skb->cloned) + return 0; + + dataref = atomic_read(&skb_shinfo(skb)->dataref); + dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); + return dataref != 1; +} + +/** + * skb_header_release - release reference to header + * @skb: buffer to operate on + * + * Drop a reference to the header part of the buffer. This is done + * by acquiring a payload reference. You must not read from the header + * part of skb->data after this. + */ +static inline void skb_header_release(struct sk_buff *skb) +{ + BUG_ON(skb->nohdr); + skb->nohdr = 1; + atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); +} + +/** + * skb_shared - is the buffer shared + * @skb: buffer to check + * + * Returns true if more than one person has a reference to this + * buffer. + */ +static inline int skb_shared(const struct sk_buff *skb) +{ + return atomic_read(&skb->users) != 1; +} + +/** + * skb_share_check - check if buffer is shared and if so clone it + * @skb: buffer to check + * @pri: priority for memory allocation + * + * If the buffer is shared the buffer is cloned and the old copy + * drops a reference. A new clone with a single reference is returned. + * If the buffer is not shared the original buffer is returned. When + * being called from interrupt status or with spinlocks held pri must + * be GFP_ATOMIC. + * + * NULL is returned on a memory allocation failure. + */ +static inline struct sk_buff *skb_share_check(struct sk_buff *skb, + gfp_t pri) +{ + might_sleep_if(pri & __GFP_WAIT); + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, pri); + kfree_skb(skb); + skb = nskb; + } + return skb; +} + +/* + * Copy shared buffers into a new sk_buff. We effectively do COW on + * packets to handle cases where we have a local reader and forward + * and a couple of other messy ones. The normal one is tcpdumping + * a packet thats being forwarded. + */ + +/** + * skb_unshare - make a copy of a shared buffer + * @skb: buffer to check + * @pri: priority for memory allocation + * + * If the socket buffer is a clone then this function creates a new + * copy of the data, drops a reference count on the old copy and returns + * the new copy with the reference count at 1. If the buffer is not a clone + * the original buffer is returned. When called with a spinlock held or + * from interrupt state @pri must be %GFP_ATOMIC + * + * %NULL is returned on a memory allocation failure. + */ +static inline struct sk_buff *skb_unshare(struct sk_buff *skb, + gfp_t pri) +{ + might_sleep_if(pri & __GFP_WAIT); + if (skb_cloned(skb)) { + struct sk_buff *nskb = skb_copy(skb, pri); + kfree_skb(skb); /* Free our shared copy */ + skb = nskb; + } + return skb; +} + +/** + * skb_peek - peek at the head of an &sk_buff_head + * @list_: list to peek at + * + * Peek an &sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. You must hold + * the appropriate locks or have a private queue to do this. + * + * Returns %NULL for an empty list or a pointer to the head element. + * The reference count is not incremented and the reference is therefore + * volatile. Use with caution. + */ +static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) +{ + struct sk_buff *list = ((const struct sk_buff *)list_)->next; + if (list == (struct sk_buff *)list_) + list = NULL; + return list; +} + +/** + * skb_peek_next - peek skb following the given one from a queue + * @skb: skb to start from + * @list_: list to peek at + * + * Returns %NULL when the end of the list is met or a pointer to the + * next element. The reference count is not incremented and the + * reference is therefore volatile. Use with caution. + */ +static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, + const struct sk_buff_head *list_) +{ + struct sk_buff *next = skb->next; + if (next == (struct sk_buff *)list_) + next = NULL; + return next; +} + +/** + * skb_peek_tail - peek at the tail of an &sk_buff_head + * @list_: list to peek at + * + * Peek an &sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. You must hold + * the appropriate locks or have a private queue to do this. + * + * Returns %NULL for an empty list or a pointer to the tail element. + * The reference count is not incremented and the reference is therefore + * volatile. Use with caution. + */ +static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) +{ + struct sk_buff *list = ((const struct sk_buff *)list_)->prev; + if (list == (struct sk_buff *)list_) + list = NULL; + return list; +} + +/** + * skb_queue_len - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + */ +static inline __u32 skb_queue_len(const struct sk_buff_head *list_) +{ + return list_->qlen; +} + +/** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize + * + * This initializes only the list and queue length aspects of + * an sk_buff_head object. This allows to initialize the list + * aspects of an sk_buff_head without reinitializing things like + * the spinlock. It can also be used for on-stack sk_buff_head + * objects where the spinlock is known to not be used. + */ +static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} + +/* + * This function creates a split out lock class for each invocation; + * this is needed for now since a whole lot of users of the skb-queue + * infrastructure in drivers have different locking usage (in hardirq) + * than the networking core (in softirq only). In the long run either the + * network layer or drivers should need annotation to consolidate the + * main types of usage into 3 classes. + */ +static inline void skb_queue_head_init(struct sk_buff_head *list) +{ + spin_lock_init(&list->lock); + __skb_queue_head_init(list); +} + +static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) +{ + skb_queue_head_init(list); + lockdep_set_class(&list->lock, class); +} + +/* + * Insert an sk_buff on a list. + * + * The "__skb_xxxx()" functions are the non-atomic ones that + * can only be called with interrupts disabled. + */ +extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); +static inline void __skb_insert(struct sk_buff *newsk, + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) +{ + newsk->next = next; + newsk->prev = prev; + next->prev = prev->next = newsk; + list->qlen++; +} + +static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * skb_queue_splice - join two skb lists, this is designed for stacks + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_init - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * skb_queue_splice_tail - join two skb lists, each list being a queue + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * __skb_queue_after - queue a buffer at the list head + * @list: list to use + * @prev: place after this buffer + * @newsk: buffer to queue + * + * Queue a buffer int the middle of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ +static inline void __skb_queue_after(struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *newsk) +{ + __skb_insert(newsk, prev, prev->next, list); +} + +extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); + +static inline void __skb_queue_before(struct sk_buff_head *list, + struct sk_buff *next, + struct sk_buff *newsk) +{ + __skb_insert(newsk, next->prev, next, list); +} + +/** + * __skb_queue_head - queue a buffer at the list head + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the start of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ +extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); +static inline void __skb_queue_head(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + __skb_queue_after(list, (struct sk_buff *)list, newsk); +} + +/** + * __skb_queue_tail - queue a buffer at the list tail + * @list: list to use + * @newsk: buffer to queue + * + * Queue a buffer at the end of a list. This function takes no locks + * and you must therefore hold required locks before calling it. + * + * A buffer cannot be placed on two lists at the same time. + */ +extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); +static inline void __skb_queue_tail(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + __skb_queue_before(list, (struct sk_buff *)list, newsk); +} + +/* + * remove sk_buff from list. _Must_ be called atomically, and with + * the list known.. + */ +extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); +static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) +{ + struct sk_buff *next, *prev; + + list->qlen--; + next = skb->next; + prev = skb->prev; + skb->next = skb->prev = NULL; + next->prev = prev; + prev->next = next; +} + +/** + * __skb_dequeue - remove from the head of the queue + * @list: list to dequeue from + * + * Remove the head of the list. This function does not take any locks + * so must be used with appropriate locks held only. The head item is + * returned or %NULL if the list is empty. + */ +extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); +static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} + +/** + * __skb_dequeue_tail - remove from the tail of the queue + * @list: list to dequeue from + * + * Remove the tail of the list. This function does not take any locks + * so must be used with appropriate locks held only. The tail item is + * returned or %NULL if the list is empty. + */ +extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); +static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek_tail(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} + + +static inline bool skb_is_nonlinear(const struct sk_buff *skb) +{ + return skb->data_len; +} + +static inline unsigned int skb_headlen(const struct sk_buff *skb) +{ + return skb->len - skb->data_len; +} + +static inline int skb_pagelen(const struct sk_buff *skb) +{ + int i, len = 0; + + for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) + len += skb_frag_size(&skb_shinfo(skb)->frags[i]); + return len + skb_headlen(skb); +} + +/** + * __skb_fill_page_desc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * Initialises the @i'th fragment of @skb to point to &size bytes at + * offset @off within @page. + * + * Does not take any additional reference on the fragment. + */ +static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + frag->page.p = page; + frag->page_offset = off; + skb_frag_size_set(frag, size); +} + +/** + * skb_fill_page_desc - initialise a paged fragment in an skb + * @skb: buffer containing fragment to be initialised + * @i: paged fragment index to initialise + * @page: the page to use for this fragment + * @off: the offset to the data with @page + * @size: the length of the data + * + * As per __skb_fill_page_desc() -- initialises the @i'th fragment of + * @skb to point to &size bytes at offset @off within @page. In + * addition updates @skb such that @i is the last fragment. + * + * Does not take any additional reference on the fragment. + */ +static inline void skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) +{ + __skb_fill_page_desc(skb, i, page, off, size); + skb_shinfo(skb)->nr_frags = i + 1; +} + +extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize); + +#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) +#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) +#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data - skb->head; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb_reset_tail_pointer(skb); + skb->tail += offset; +} +#else /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb->tail = skb->data + offset; +} + +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +/* + * Add data to an sk_buff + */ +extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) +{ + unsigned char *tmp = skb_tail_pointer(skb); + SKB_LINEAR_ASSERT(skb); + skb->tail += len; + skb->len += len; + return tmp; +} + +extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) +{ + skb->data -= len; + skb->len += len; + return skb->data; +} + +extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); +static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) +{ + skb->len -= len; + BUG_ON(skb->len < skb->data_len); + return skb->data += len; +} + +static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) +{ + return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); +} + +extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); + +static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) +{ + if (len > skb_headlen(skb) && + !__pskb_pull_tail(skb, len - skb_headlen(skb))) + return NULL; + skb->len -= len; + return skb->data += len; +} + +static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) +{ + return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); +} + +static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) +{ + if (likely(len <= skb_headlen(skb))) + return 1; + if (unlikely(len > skb->len)) + return 0; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; +} + +/** + * skb_headroom - bytes at buffer head + * @skb: buffer to check + * + * Return the number of bytes of free space at the head of an &sk_buff. + */ +static inline unsigned int skb_headroom(const struct sk_buff *skb) +{ + return skb->data - skb->head; +} + +/** + * skb_tailroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + */ +static inline int skb_tailroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; +} + +/** + * skb_availroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + * allocated by sk_stream_alloc() + */ +static inline int skb_availroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; +} + +/** + * skb_reserve - adjust headroom + * @skb: buffer to alter + * @len: bytes to move + * + * Increase the headroom of an empty &sk_buff by reducing the tail + * room. This is only allowed for an empty buffer. + */ +static inline void skb_reserve(struct sk_buff *skb, int len) +{ + skb->data += len; + skb->tail += len; +} + +static inline void skb_reset_mac_len(struct sk_buff *skb) +{ + skb->mac_len = skb->network_header - skb->mac_header; +} + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->head + skb->transport_header; +} + +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data - skb->head; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_transport_header(skb); + skb->transport_header += offset; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->network_header; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data - skb->head; +} + +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb_reset_network_header(skb); + skb->network_header += offset; +} + +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->mac_header; +} + +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != ~0U; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data - skb->head; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb_reset_mac_header(skb); + skb->mac_header += offset; +} + +#else /* NET_SKBUFF_DATA_USES_OFFSET */ + +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->transport_header; +} + +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb->transport_header = skb->data + offset; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->network_header; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data; +} + +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb->network_header = skb->data + offset; +} + +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->mac_header; +} + +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != NULL; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb->mac_header = skb->data + offset; +} +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +static inline void skb_mac_header_rebuild(struct sk_buff *skb) +{ + if (skb_mac_header_was_set(skb)) { + const unsigned char *old_mac = skb_mac_header(skb); + + skb_set_mac_header(skb, -skb->mac_len); + memmove(skb_mac_header(skb), old_mac, skb->mac_len); + } +} + +static inline int skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} + +static inline int skb_transport_offset(const struct sk_buff *skb) +{ + return skb_transport_header(skb) - skb->data; +} + +static inline u32 skb_network_header_len(const struct sk_buff *skb) +{ + return skb->transport_header - skb->network_header; +} + +static inline int skb_network_offset(const struct sk_buff *skb) +{ + return skb_network_header(skb) - skb->data; +} + +static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull(skb, skb_network_offset(skb) + len); +} + +/* + * CPUs often take a performance hit when accessing unaligned memory + * locations. The actual performance hit varies, it can be small if the + * hardware handles it or large if we have to take an exception and fix it + * in software. + * + * Since an ethernet header is 14 bytes network drivers often end up with + * the IP header at an unaligned offset. The IP header can be aligned by + * shifting the start of the packet by 2 bytes. Drivers should do this + * with: + * + * skb_reserve(skb, NET_IP_ALIGN); + * + * The downside to this alignment of the IP header is that the DMA is now + * unaligned. On some architectures the cost of an unaligned DMA is high + * and this cost outweighs the gains made by aligning the IP header. + * + * Since this trade off varies between architectures, we allow NET_IP_ALIGN + * to be overridden. + */ +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +/* + * The networking layer reserves some headroom in skb data (via + * dev_alloc_skb). This is used to avoid having to reallocate skb data when + * the header has to grow. In the default case, if the header has to grow + * 32 bytes or less we avoid the reallocation. + * + * Unfortunately this headroom changes the DMA alignment of the resulting + * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive + * on some architectures. An architecture can override this value, + * perhaps setting it to a cacheline in size (since that will maintain + * cacheline alignment of the DMA). It must be a power of 2. + * + * Various parts of the networking layer expect at least 32 bytes of + * headroom, you should not reduce this. + * + * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) + * to reduce average number of cache lines per packet. + * get_rps_cpus() for example only access one 64 bytes aligned block : + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) + */ +#ifndef NET_SKB_PAD +#define NET_SKB_PAD max(32, L1_CACHE_BYTES) +#endif + +extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); + +static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +{ + if (unlikely(skb_is_nonlinear(skb))) { + WARN_ON(1); + return; + } + skb->len = len; + skb_set_tail_pointer(skb, len); +} + +extern void skb_trim(struct sk_buff *skb, unsigned int len); + +static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) +{ + if (skb->data_len) + return ___pskb_trim(skb, len); + __skb_trim(skb, len); + return 0; +} + +static inline int pskb_trim(struct sk_buff *skb, unsigned int len) +{ + return (len < skb->len) ? __pskb_trim(skb, len) : 0; +} + +/** + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer + * @skb: buffer to alter + * @len: new length + * + * This is identical to pskb_trim except that the caller knows that + * the skb is not cloned so we should never get an error due to out- + * of-memory. + */ +static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) +{ + int err = pskb_trim(skb, len); + BUG_ON(err); +} + +/** + * skb_orphan - orphan a buffer + * @skb: buffer to orphan + * + * If a buffer currently has an owner then we call the owner's + * destructor function and make the @skb unowned. The buffer continues + * to exist but is no longer charged to its former owner. + */ +static inline void skb_orphan(struct sk_buff *skb) +{ + if (skb->destructor) + skb->destructor(skb); + skb->destructor = NULL; + skb->sk = NULL; +} + +/** + * __skb_queue_purge - empty a list + * @list: list to empty + * + * Delete all buffers on an &sk_buff list. Each buffer is removed from + * the list and one reference dropped. This function does not take the + * list lock and the caller must hold the relevant locks to use it. + */ +extern void skb_queue_purge(struct sk_buff_head *list); +static inline void __skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb = __skb_dequeue(list)) != NULL) + kfree_skb(skb); +} + +/** + * __dev_alloc_skb - allocate an skbuff for receiving + * @length: length to allocate + * @gfp_mask: get_free_pages mask, passed to alloc_skb + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. + */ +static inline struct sk_buff *__dev_alloc_skb(unsigned int length, + gfp_t gfp_mask) +{ + struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); + if (likely(skb)) + skb_reserve(skb, NET_SKB_PAD); + return skb; +} + +extern struct sk_buff *dev_alloc_skb(unsigned int length); + +extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, + unsigned int length, gfp_t gfp_mask); + +/** + * netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on + * @length: length to allocate + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. Although this function + * allocates memory it can be called from an interrupt. + */ +static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); +} + +static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length, gfp_t gfp) +{ + struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); + + if (NET_IP_ALIGN && skb) + skb_reserve(skb, NET_IP_ALIGN); + return skb; +} + +static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); +} + +/** + * skb_frag_page - retrieve the page refered to by a paged fragment + * @frag: the paged fragment + * + * Returns the &struct page associated with @frag. + */ +static inline struct page *skb_frag_page(const skb_frag_t *frag) +{ + return frag->page.p; +} + +/** + * __skb_frag_ref - take an addition reference on a paged fragment. + * @frag: the paged fragment + * + * Takes an additional reference on the paged fragment @frag. + */ +static inline void __skb_frag_ref(skb_frag_t *frag) +{ + get_page(skb_frag_page(frag)); +} + +/** + * skb_frag_ref - take an addition reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset. + * + * Takes an additional reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_ref(struct sk_buff *skb, int f) +{ + __skb_frag_ref(&skb_shinfo(skb)->frags[f]); +} + +/** + * __skb_frag_unref - release a reference on a paged fragment. + * @frag: the paged fragment + * + * Releases a reference on the paged fragment @frag. + */ +static inline void __skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} + +/** + * skb_frag_unref - release a reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset + * + * Releases a reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_unref(struct sk_buff *skb, int f) +{ + __skb_frag_unref(&skb_shinfo(skb)->frags[f]); +} + +/** + * skb_frag_address - gets the address of the data contained in a paged fragment + * @frag: the paged fragment buffer + * + * Returns the address of the data within @frag. The page must already + * be mapped. + */ +static inline void *skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} + +/** + * skb_frag_address_safe - gets the address of the data contained in a paged fragment + * @frag: the paged fragment buffer + * + * Returns the address of the data within @frag. Checks that the page + * is mapped and returns %NULL otherwise. + */ +static inline void *skb_frag_address_safe(const skb_frag_t *frag) +{ + void *ptr = page_address(skb_frag_page(frag)); + if (unlikely(!ptr)) + return NULL; + + return ptr + frag->page_offset; +} + +/** + * __skb_frag_set_page - sets the page contained in a paged fragment + * @frag: the paged fragment + * @page: the page to set + * + * Sets the fragment @frag to contain @page. + */ +static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) +{ + frag->page.p = page; +} + +/** + * skb_frag_set_page - sets the page contained in a paged fragment of an skb + * @skb: the buffer + * @f: the fragment offset + * @page: the page to set + * + * Sets the @f'th fragment of @skb to contain @page. + */ +static inline void skb_frag_set_page(struct sk_buff *skb, int f, + struct page *page) +{ + __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); +} + +/** + * skb_frag_dma_map - maps a paged fragment via the DMA API + * @dev: the device to map the fragment to + * @frag: the paged fragment to map + * @offset: the offset within the fragment (starting at the + * fragment's own offset) + * @size: the number of bytes to map + * @dir: the direction of the mapping (%PCI_DMA_*) + * + * Maps the page associated with @frag to @device. + */ +static inline dma_addr_t skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} + +static inline struct sk_buff *pskb_copy(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy(skb, skb_headroom(skb), gfp_mask); +} + +/** + * skb_clone_writable - is the header of a clone writable + * @skb: buffer to check + * @len: length up to which to write + * + * Returns true if modifying the header part of the cloned buffer + * does not requires the data to be copied. + */ +static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) +{ + return !skb_header_cloned(skb) && + skb_headroom(skb) + len <= skb->hdr_len; +} + +static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, + int cloned) +{ + int delta = 0; + + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || cloned) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} + +/** + * skb_cow - copy header of skb when it is required + * @skb: buffer to cow + * @headroom: needed headroom + * + * If the skb passed lacks sufficient headroom or its data part + * is shared, data is reallocated. If reallocation fails, an error + * is returned and original skb is not changed. + * + * The result is skb with writable area skb->head...skb->tail + * and at least @headroom of space at head. + */ +static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_cloned(skb)); +} + +/** + * skb_cow_head - skb_cow but only making the head writable + * @skb: buffer to cow + * @headroom: needed headroom + * + * This function is identical to skb_cow except that we replace the + * skb_cloned check by skb_header_cloned. It should be used when + * you only need to push on some header and do not need to modify + * the data. + */ +static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_header_cloned(skb)); +} + +/** + * skb_padto - pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ + +static inline int skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if (likely(size >= len)) + return 0; + return skb_pad(skb, len - size); +} + +static inline int skb_add_data(struct sk_buff *skb, + char __user *from, int copy) +{ + const int off = skb->len; + + if (skb->ip_summed == CHECKSUM_NONE) { + int err = 0; + __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), + copy, 0, &err); + if (!err) { + skb->csum = csum_block_add(skb->csum, csum, off); + return 0; + } + } else if (!copy_from_user(skb_put(skb, copy), from, copy)) + return 0; + + __skb_trim(skb, off); + return -EFAULT; +} + +static inline int skb_can_coalesce(struct sk_buff *skb, int i, + const struct page *page, int off) +{ + if (i) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + return page == skb_frag_page(frag) && + off == frag->page_offset + skb_frag_size(frag); + } + return 0; +} + +static inline int __skb_linearize(struct sk_buff *skb) +{ + return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; +} + +/** + * skb_linearize - convert paged skb to linear one + * @skb: buffer to linarize + * + * If there is no free memory -ENOMEM is returned, otherwise zero + * is returned and the old skb data released. + */ +static inline int skb_linearize(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; +} + +/** + * skb_linearize_cow - make sure skb is linear and writable + * @skb: buffer to process + * + * If there is no free memory -ENOMEM is returned, otherwise zero + * is returned and the old skb data released. + */ +static inline int skb_linearize_cow(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) || skb_cloned(skb) ? + __skb_linearize(skb) : 0; +} + +/** + * skb_postpull_rcsum - update checksum for received skb after pull + * @skb: buffer to update + * @start: start of data before pull + * @len: length of data pulled + * + * After doing a pull on a received packet, you need to call this to + * update the CHECKSUM_COMPLETE checksum, or set ip_summed to + * CHECKSUM_NONE so that it can be recomputed from scratch. + */ + +static inline void skb_postpull_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); +} + +unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); + +/** + * pskb_trim_rcsum - trim received skb and update checksum + * @skb: buffer to trim + * @len: new length + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. + */ + +static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (likely(len >= skb->len)) + return 0; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __pskb_trim(skb, len); +} + +#define skb_queue_walk(queue, skb) \ + for (skb = (queue)->next; \ + skb != (struct sk_buff *)(queue); \ + skb = skb->next) + +#define skb_queue_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->next, tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + +#define skb_queue_walk_from(queue, skb) \ + for (; skb != (struct sk_buff *)(queue); \ + skb = skb->next) + +#define skb_queue_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + +#define skb_queue_reverse_walk(queue, skb) \ + for (skb = (queue)->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = skb->prev) + +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) + +#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) + +static inline bool skb_has_frag_list(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->frag_list != NULL; +} + +static inline void skb_frag_list_init(struct sk_buff *skb) +{ + skb_shinfo(skb)->frag_list = NULL; +} + +static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) +{ + frag->next = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = frag; +} + +#define skb_walk_frags(skb, iter) \ + for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) + +extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err); +extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, + int noblock, int *err); +extern unsigned int datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +extern int skb_copy_datagram_iovec(const struct sk_buff *from, + int offset, struct iovec *to, + int size); +extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, + int hlen, + struct iovec *iov); +extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, + int offset, + const struct iovec *from, + int from_offset, + int len); +extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, + int offset, + const struct iovec *to, + int to_offset, + int size); +extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +extern void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb); +extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, + unsigned int flags); +extern __wsum skb_checksum(const struct sk_buff *skb, int offset, + int len, __wsum csum); +extern int skb_copy_bits(const struct sk_buff *skb, int offset, + void *to, int len); +extern int skb_store_bits(struct sk_buff *skb, int offset, + const void *from, int len); +extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, + int offset, u8 *to, int len, + __wsum csum); +extern int skb_splice_bits(struct sk_buff *skb, + unsigned int offset, + struct pipe_inode_info *pipe, + unsigned int len, + unsigned int flags); +extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); +extern void skb_split(struct sk_buff *skb, + struct sk_buff *skb1, const u32 len); +extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, + int shiftlen); + +extern struct sk_buff *skb_segment(struct sk_buff *skb, + netdev_features_t features); + +static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +} + +static inline void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) +{ + memcpy(to, skb->data, len); +} + +static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, + const int offset, void *to, + const unsigned int len) +{ + memcpy(to, skb->data + offset, len); +} + +static inline void skb_copy_to_linear_data(struct sk_buff *skb, + const void *from, + const unsigned int len) +{ + memcpy(skb->data, from, len); +} + +static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, + const int offset, + const void *from, + const unsigned int len) +{ + memcpy(skb->data + offset, from, len); +} + +extern void skb_init(void); + +static inline ktime_t skb_get_ktime(const struct sk_buff *skb) +{ + return skb->tstamp; +} + +/** + * skb_get_timestamp - get timestamp from a skb + * @skb: skb to get stamp from + * @stamp: pointer to struct timeval to store stamp in + * + * Timestamps are stored in the skb as offsets to a base timestamp. + * This function converts the offset back to a struct timeval and stores + * it in stamp. + */ +static inline void skb_get_timestamp(const struct sk_buff *skb, + struct timeval *stamp) +{ + *stamp = ktime_to_timeval(skb->tstamp); +} + +static inline void skb_get_timestampns(const struct sk_buff *skb, + struct timespec *stamp) +{ + *stamp = ktime_to_timespec(skb->tstamp); +} + +static inline void __net_timestamp(struct sk_buff *skb) +{ + skb->tstamp = ktime_get_real(); +} + +static inline ktime_t net_timedelta(ktime_t t) +{ + return ktime_sub(ktime_get_real(), t); +} + +static inline ktime_t net_invalid_timestamp(void) +{ + return ktime_set(0, 0); +} + +extern void skb_timestamping_init(void); + +#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + +extern void skb_clone_tx_timestamp(struct sk_buff *skb); +extern bool skb_defer_rx_timestamp(struct sk_buff *skb); + +#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ + +static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +{ +} + +static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ + return false; +} + +#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ + +/** + * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps + * + * PHY drivers may accept clones of transmitted packets for + * timestamping via their phy_driver.txtstamp method. These drivers + * must call this function to return the skb back to the stack, with + * or without a timestamp. + * + * @skb: clone of the the original outgoing packet + * @hwtstamps: hardware time stamps, may be NULL if not available + * + */ +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); + +/** + * skb_tstamp_tx - queue clone of skb with send time stamps + * @orig_skb: the original outgoing packet + * @hwtstamps: hardware time stamps, may be NULL if not available + * + * If the skb has a socket associated, then this function clones the + * skb (thus sharing the actual data and optional structures), stores + * the optional hardware time stamping information (if non NULL) or + * generates a software time stamp (otherwise), then queues the clone + * to the error queue of the socket. Errors are silently ignored. + */ +extern void skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps); + +static inline void sw_tx_timestamp(struct sk_buff *skb) +{ + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && + !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + skb_tstamp_tx(skb, NULL); +} + +/** + * skb_tx_timestamp() - Driver hook for transmit timestamping + * + * Ethernet MAC Drivers should call this function in their hard_xmit() + * function immediately before giving the sk_buff to the MAC hardware. + * + * @skb: A socket buffer. + */ +static inline void skb_tx_timestamp(struct sk_buff *skb) +{ + skb_clone_tx_timestamp(skb); + sw_tx_timestamp(skb); +} + +/** + * skb_complete_wifi_ack - deliver skb with wifi status + * + * @skb: the original outgoing packet + * @acked: ack status + * + */ +void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); + +extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); +extern __sum16 __skb_checksum_complete(struct sk_buff *skb); + +static inline int skb_csum_unnecessary(const struct sk_buff *skb) +{ + return skb->ip_summed & CHECKSUM_UNNECESSARY; +} + +/** + * skb_checksum_complete - Calculate checksum of an entire packet + * @skb: packet to process + * + * This function calculates the checksum over the entire packet plus + * the value of skb->csum. The latter can be used to supply the + * checksum of a pseudo header as used by TCP/UDP. It returns the + * checksum. + * + * For protocols that contain complete checksums such as ICMP/TCP/UDP, + * this function can be used to verify that checksum on received + * packets. In that case the function should return zero if the + * checksum is correct. In particular, this function will return zero + * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the + * hardware has already verified the correctness of the checksum. + */ +static inline __sum16 skb_checksum_complete(struct sk_buff *skb) +{ + return skb_csum_unnecessary(skb) ? + 0 : __skb_checksum_complete(skb); +} + +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +extern void nf_conntrack_destroy(struct nf_conntrack *nfct); +static inline void nf_conntrack_put(struct nf_conntrack *nfct) +{ + if (nfct && atomic_dec_and_test(&nfct->use)) + nf_conntrack_destroy(nfct); +} +static inline void nf_conntrack_get(struct nf_conntrack *nfct) +{ + if (nfct) + atomic_inc(&nfct->use); +} +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED +static inline void nf_conntrack_get_reasm(struct sk_buff *skb) +{ + if (skb) + atomic_inc(&skb->users); +} +static inline void nf_conntrack_put_reasm(struct sk_buff *skb) +{ + if (skb) + kfree_skb(skb); +} +#endif +#ifdef CONFIG_BRIDGE_NETFILTER +static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) +{ + if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) + kfree(nf_bridge); +} +static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) +{ + if (nf_bridge) + atomic_inc(&nf_bridge->use); +} +#endif /* CONFIG_BRIDGE_NETFILTER */ +static inline void nf_reset(struct sk_buff *skb) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); + skb->nfct = NULL; +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED + nf_conntrack_put_reasm(skb->nfct_reasm); + skb->nfct_reasm = NULL; +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + nf_bridge_put(skb->nf_bridge); + skb->nf_bridge = NULL; +#endif +} + +/* Note: This doesn't put any conntrack and bridge info in dst. */ +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + dst->nfct = src->nfct; + nf_conntrack_get(src->nfct); + dst->nfctinfo = src->nfctinfo; +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED + dst->nfct_reasm = src->nfct_reasm; + nf_conntrack_get_reasm(src->nfct_reasm); +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + dst->nf_bridge = src->nf_bridge; + nf_bridge_get(src->nf_bridge); +#endif +} + +static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(dst->nfct); +#endif +#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED + nf_conntrack_put_reasm(dst->nfct_reasm); +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + nf_bridge_put(dst->nf_bridge); +#endif + __nf_copy(dst, src); +} + +#ifdef CONFIG_NETWORK_SECMARK +static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ + to->secmark = from->secmark; +} + +static inline void skb_init_secmark(struct sk_buff *skb) +{ + skb->secmark = 0; +} +#else +static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ } + +static inline void skb_init_secmark(struct sk_buff *skb) +{ } +#endif + +static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) +{ + skb->queue_mapping = queue_mapping; +} + +static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) +{ + return skb->queue_mapping; +} + +static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) +{ + to->queue_mapping = from->queue_mapping; +} + +static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) +{ + skb->queue_mapping = rx_queue + 1; +} + +static inline u16 skb_get_rx_queue(const struct sk_buff *skb) +{ + return skb->queue_mapping - 1; +} + +static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) +{ + return skb->queue_mapping != 0; +} + +extern u16 __skb_tx_hash(const struct net_device *dev, + const struct sk_buff *skb, + unsigned int num_tx_queues); + +#ifdef CONFIG_XFRM +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ + return skb->sp; +} +#else +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ + return NULL; +} +#endif + +static inline bool skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} + +static inline bool skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} + +extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline bool skb_warn_if_lro(const struct sk_buff *skb) +{ + /* LRO sets gso_size but not gso_type, whereas if GSO is really + * wanted then gso_type will be set. */ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && + unlikely(shinfo->gso_type == 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + +static inline void skb_forward_csum(struct sk_buff *skb) +{ + /* Unfortunately we don't support this one. Any brave souls? */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +} + +/** + * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE + * @skb: skb to check + * + * fresh skbs have their ip_summed set to CHECKSUM_NONE. + * Instead of forcing ip_summed to CHECKSUM_NONE, we can + * use this helper, to document places where we make this assertion. + */ +static inline void skb_checksum_none_assert(const struct sk_buff *skb) +{ +#ifdef DEBUG + BUG_ON(skb->ip_summed != CHECKSUM_NONE); +#endif +} + +bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); + +static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) +{ + if (irqs_disabled()) + return false; + + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) + return false; + + if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) + return false; + + skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); + if (skb_end_pointer(skb) - skb->head < skb_size) + return false; + + if (skb_shared(skb) || skb_cloned(skb)) + return false; + + return true; +} +#endif /* __KERNEL__ */ +#endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h new file mode 100644 index 00000000..a595dce6 --- /dev/null +++ b/include/linux/slab.h @@ -0,0 +1,367 @@ +/* + * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). + * + * (C) SGI 2006, Christoph Lameter + * Cleaned up and restructured to ease the addition of alternative + * implementations of SLAB allocators. + */ + +#ifndef _LINUX_SLAB_H +#define _LINUX_SLAB_H + +#include <linux/gfp.h> +#include <linux/types.h> + +/* + * Flags to pass to kmem_cache_create(). + * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. + */ +#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ +#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ +#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ +#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ +#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ +#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ +#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ +/* + * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! + * + * This delays freeing the SLAB page by a grace period, it does _NOT_ + * delay object freeing. This means that if you do kmem_cache_free() + * that memory location is free to be reused at any time. Thus it may + * be possible to see another object there in the same RCU grace period. + * + * This feature only ensures the memory location backing the object + * stays valid, the trick to using this is relying on an independent + * object validation pass. Something like: + * + * rcu_read_lock() + * again: + * obj = lockless_lookup(key); + * if (obj) { + * if (!try_get_ref(obj)) // might fail for free objects + * goto again; + * + * if (obj->key != key) { // not the object we expected + * put_ref(obj); + * goto again; + * } + * } + * rcu_read_unlock(); + * + * See also the comment on struct slab_rcu in mm/slab.c. + */ +#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ +#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ +#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ + +/* Flag to prevent checks on free */ +#ifdef CONFIG_DEBUG_OBJECTS +# define SLAB_DEBUG_OBJECTS 0x00400000UL +#else +# define SLAB_DEBUG_OBJECTS 0x00000000UL +#endif + +#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ + +/* Don't track use of uninitialized memory */ +#ifdef CONFIG_KMEMCHECK +# define SLAB_NOTRACK 0x01000000UL +#else +# define SLAB_NOTRACK 0x00000000UL +#endif +#ifdef CONFIG_FAILSLAB +# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ +#else +# define SLAB_FAILSLAB 0x00000000UL +#endif + +/* The following flags affect the page allocator grouping pages by mobility */ +#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ +#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ +/* + * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. + * + * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. + * + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +#define ZERO_SIZE_PTR ((void *)16) + +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ + (unsigned long)ZERO_SIZE_PTR) + +/* + * struct kmem_cache related prototypes + */ +void __init kmem_cache_init(void); +int slab_is_available(void); + +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, + void (*)(void *)); +void kmem_cache_destroy(struct kmem_cache *); +int kmem_cache_shrink(struct kmem_cache *); +void kmem_cache_free(struct kmem_cache *, void *); +unsigned int kmem_cache_size(struct kmem_cache *); + +/* + * Please use this macro to create slab caches. Simply specify the + * name of the structure and maybe some flags that are listed above. + * + * The alignment of the struct determines object alignment. If you + * f.e. add ____cacheline_aligned_in_smp to the struct declaration + * then the objects will be properly aligned in SMP configurations. + */ +#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ + sizeof(struct __struct), __alignof__(struct __struct),\ + (__flags), NULL) + +/* + * The largest kmalloc size supported by the slab allocators is + * 32 megabyte (2^25) or the maximum allocatable page order if that is + * less than 32 MB. + * + * WARNING: Its not easy to increase this value since the allocators have + * to do various tricks to work around compiler limitations in order to + * ensure proper constant folding. + */ +#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ + (MAX_ORDER + PAGE_SHIFT - 1) : 25) + +#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) +#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) + +/* + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. + */ +#ifdef ARCH_DMA_MINALIGN +#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN +#else +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +/* + * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. + * Intended for arches that get misalignment faults even for 64 bit integer + * aligned buffers. + */ +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#endif + +/* + * Common kmalloc functions provided by all allocators + */ +void * __must_check __krealloc(const void *, size_t, gfp_t); +void * __must_check krealloc(const void *, size_t, gfp_t); +void kfree(const void *); +void kzfree(const void *); +size_t ksize(const void *); + +/* + * Allocator specific definitions. These are mainly used to establish optimized + * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by + * selecting the appropriate general cache at compile time. + * + * Allocators must define at least: + * + * kmem_cache_alloc() + * __kmalloc() + * kmalloc() + * + * Those wishing to support NUMA must also define: + * + * kmem_cache_alloc_node() + * kmalloc_node() + * + * See each allocator definition file for additional comments and + * implementation notes. + */ +#ifdef CONFIG_SLUB +#include <linux/slub_def.h> +#elif defined(CONFIG_SLOB) +#include <linux/slob_def.h> +#else +#include <linux/slab_def.h> +#endif + +/** + * kmalloc_array - allocate memory for an array. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate. + * + * The @flags argument may be one of: + * + * %GFP_USER - Allocate memory on behalf of user. May sleep. + * + * %GFP_KERNEL - Allocate normal kernel ram. May sleep. + * + * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. + * For example, use this inside interrupt handlers. + * + * %GFP_HIGHUSER - Allocate pages from high memory. + * + * %GFP_NOIO - Do not do any I/O at all while trying to get memory. + * + * %GFP_NOFS - Do not make any fs calls while trying to get memory. + * + * %GFP_NOWAIT - Allocation will not sleep. + * + * %GFP_THISNODE - Allocate node-local memory only. + * + * %GFP_DMA - Allocation suitable for DMA. + * Should only be used for kmalloc() caches. Otherwise, use a + * slab created with SLAB_DMA. + * + * Also it is possible to set different flags by OR'ing + * in one or more of the following additional @flags: + * + * %__GFP_COLD - Request cache-cold pages instead of + * trying to return cache-warm pages. + * + * %__GFP_HIGH - This allocation has high priority and may use emergency pools. + * + * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail + * (think twice before using). + * + * %__GFP_NORETRY - If memory is not immediately available, + * then give up at once. + * + * %__GFP_NOWARN - If allocation fails, don't issue any warnings. + * + * %__GFP_REPEAT - If allocation fails initially, try once more before failing. + * + * There are other flags available as well, but these are not intended + * for general use, and so are not documented here. For a full list of + * potential flags, always refer to linux/gfp.h. + */ +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ + if (size != 0 && n > ULONG_MAX / size) + return NULL; + return __kmalloc(n * size, flags); +} + +/** + * kcalloc - allocate memory for an array. The memory is set to zero. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate (see kmalloc). + */ +static inline void *kcalloc(size_t n, size_t size, gfp_t flags) +{ + return kmalloc_array(n, size, flags | __GFP_ZERO); +} + +#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) +/** + * kmalloc_node - allocate memory from a specific node + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kcalloc). + * @node: node to allocate from. + * + * kmalloc() for non-local nodes, used to allocate from a specific node + * if available. Equivalent to kmalloc() in the non-NUMA single-node + * case. + */ +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + return kmalloc(size, flags); +} + +static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __kmalloc(size, flags); +} + +void *kmem_cache_alloc(struct kmem_cache *, gfp_t); + +static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, + gfp_t flags, int node) +{ + return kmem_cache_alloc(cachep, flags); +} +#endif /* !CONFIG_NUMA && !CONFIG_SLOB */ + +/* + * kmalloc_track_caller is a special version of kmalloc that records the + * calling function of the routine calling it for slab leak tracking instead + * of just the calling function (confusing, eh?). + * It's useful when the call to kmalloc comes from a widely-used standard + * allocator where we care about the real place the memory allocation + * request comes from. + */ +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); +#define kmalloc_track_caller(size, flags) \ + __kmalloc_track_caller(size, flags, _RET_IP_) +#else +#define kmalloc_track_caller(size, flags) \ + __kmalloc(size, flags) +#endif /* DEBUG_SLAB */ + +#ifdef CONFIG_NUMA +/* + * kmalloc_node_track_caller is a special version of kmalloc_node that + * records the calling function of the routine calling it for slab leak + * tracking instead of just the calling function (confusing, eh?). + * It's useful when the call to kmalloc_node comes from a widely-used + * standard allocator where we care about the real place the memory + * allocation request comes from. + */ +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ + (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); +#define kmalloc_node_track_caller(size, flags, node) \ + __kmalloc_node_track_caller(size, flags, node, \ + _RET_IP_) +#else +#define kmalloc_node_track_caller(size, flags, node) \ + __kmalloc_node(size, flags, node) +#endif + +#else /* CONFIG_NUMA */ + +#define kmalloc_node_track_caller(size, flags, node) \ + kmalloc_track_caller(size, flags) + +#endif /* CONFIG_NUMA */ + +/* + * Shortcuts + */ +static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ + return kmem_cache_alloc(k, flags | __GFP_ZERO); +} + +/** + * kzalloc - allocate memory. The memory is set to zero. + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kmalloc). + */ +static inline void *kzalloc(size_t size, gfp_t flags) +{ + return kmalloc(size, flags | __GFP_ZERO); +} + +/** + * kzalloc_node - allocate zeroed memory from a particular memory node. + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kmalloc). + * @node: memory node from which to allocate + */ +static inline void *kzalloc_node(size_t size, gfp_t flags, int node) +{ + return kmalloc_node(size, flags | __GFP_ZERO, node); +} + +void __init kmem_cache_init_late(void); + +#endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h new file mode 100644 index 00000000..fbd1117f --- /dev/null +++ b/include/linux/slab_def.h @@ -0,0 +1,215 @@ +#ifndef _LINUX_SLAB_DEF_H +#define _LINUX_SLAB_DEF_H + +/* + * Definitions unique to the original Linux SLAB allocator. + * + * What we provide here is a way to optimize the frequent kmalloc + * calls in the kernel by selecting the appropriate general cache + * if kmalloc was called with a size that can be established at + * compile time. + */ + +#include <linux/init.h> +#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ +#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ +#include <linux/compiler.h> + +/* + * struct kmem_cache + * + * manages a cache. + */ + +struct kmem_cache { +/* 1) Cache tunables. Protected by cache_chain_mutex */ + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + + unsigned int buffer_size; + u32 reciprocal_buffer_size; +/* 2) touched by every alloc & free from the backend */ + + unsigned int flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + +/* 3) cache_grow/shrink */ + /* order of pgs per slab (2^n) */ + unsigned int gfporder; + + /* force GFP flags, e.g. GFP_DMA */ + gfp_t gfpflags; + + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + struct kmem_cache *slabp_cache; + unsigned int slab_size; + unsigned int dflags; /* dynamic flags */ + + /* constructor func */ + void (*ctor)(void *obj); + +/* 4) cache creation/removal */ + const char *name; + struct list_head next; + +/* 5) statistics */ +#ifdef CONFIG_DEBUG_SLAB + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. buffer_size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. + */ + int obj_offset; + int obj_size; +#endif /* CONFIG_DEBUG_SLAB */ + +/* 6) per-cpu/per-node data, touched during every alloc/free */ + /* + * We put array[] at the end of kmem_cache, because we want to size + * this array to nr_cpu_ids slots instead of NR_CPUS + * (see kmem_cache_init()) + * We still use [NR_CPUS] and not [1] or [0] because cache_cache + * is statically defined, so we reserve the max number of cpus. + */ + struct kmem_list3 **nodelists; + struct array_cache *array[NR_CPUS]; + /* + * Do not add fields after array[] + */ +}; + +/* Size description struct for general caches. */ +struct cache_sizes { + size_t cs_size; + struct kmem_cache *cs_cachep; +#ifdef CONFIG_ZONE_DMA + struct kmem_cache *cs_dmacachep; +#endif +}; +extern struct cache_sizes malloc_sizes[]; + +void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +void *__kmalloc(size_t size, gfp_t flags); + +#ifdef CONFIG_TRACING +extern void *kmem_cache_alloc_trace(size_t size, + struct kmem_cache *cachep, gfp_t flags); +extern size_t slab_buffer_size(struct kmem_cache *cachep); +#else +static __always_inline void * +kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) +{ + return kmem_cache_alloc(cachep, flags); +} +static inline size_t slab_buffer_size(struct kmem_cache *cachep) +{ + return 0; +} +#endif + +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + struct kmem_cache *cachep; + void *ret; + + if (__builtin_constant_p(size)) { + int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + +#define CACHE(x) \ + if (size <= x) \ + goto found; \ + else \ + i++; +#include <linux/kmalloc_sizes.h> +#undef CACHE + return NULL; +found: +#ifdef CONFIG_ZONE_DMA + if (flags & GFP_DMA) + cachep = malloc_sizes[i].cs_dmacachep; + else +#endif + cachep = malloc_sizes[i].cs_cachep; + + ret = kmem_cache_alloc_trace(size, cachep, flags); + + return ret; + } + return __kmalloc(size, flags); +} + +#ifdef CONFIG_NUMA +extern void *__kmalloc_node(size_t size, gfp_t flags, int node); +extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + +#ifdef CONFIG_TRACING +extern void *kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid); +#else +static __always_inline void * +kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid) +{ + return kmem_cache_alloc_node(cachep, flags, nodeid); +} +#endif + +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + struct kmem_cache *cachep; + + if (__builtin_constant_p(size)) { + int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + +#define CACHE(x) \ + if (size <= x) \ + goto found; \ + else \ + i++; +#include <linux/kmalloc_sizes.h> +#undef CACHE + return NULL; +found: +#ifdef CONFIG_ZONE_DMA + if (flags & GFP_DMA) + cachep = malloc_sizes[i].cs_dmacachep; + else +#endif + cachep = malloc_sizes[i].cs_cachep; + + return kmem_cache_alloc_node_trace(size, cachep, flags, node); + } + return __kmalloc_node(size, flags, node); +} + +#endif /* CONFIG_NUMA */ + +#endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h new file mode 100644 index 00000000..0ec00b39 --- /dev/null +++ b/include/linux/slob_def.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_SLOB_DEF_H +#define __LINUX_SLOB_DEF_H + +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + +static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, + gfp_t flags) +{ + return kmem_cache_alloc_node(cachep, flags, -1); +} + +void *__kmalloc_node(size_t size, gfp_t flags, int node); + +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __kmalloc_node(size, flags, node); +} + +/** + * kmalloc - allocate memory + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kcalloc). + * + * kmalloc is the normal method of allocating memory + * in the kernel. + */ +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + return __kmalloc_node(size, flags, -1); +} + +static __always_inline void *__kmalloc(size_t size, gfp_t flags) +{ + return kmalloc(size, flags); +} + +#endif /* __LINUX_SLOB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h new file mode 100644 index 00000000..c2f8c8bc --- /dev/null +++ b/include/linux/slub_def.h @@ -0,0 +1,319 @@ +#ifndef _LINUX_SLUB_DEF_H +#define _LINUX_SLUB_DEF_H + +/* + * SLUB : A Slab allocator without object queues. + * + * (C) 2007 SGI, Christoph Lameter + */ +#include <linux/types.h> +#include <linux/gfp.h> +#include <linux/bug.h> +#include <linux/workqueue.h> +#include <linux/kobject.h> + +#include <linux/kmemleak.h> + +enum stat_item { + ALLOC_FASTPATH, /* Allocation from cpu slab */ + ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ + FREE_FASTPATH, /* Free to cpu slub */ + FREE_SLOWPATH, /* Freeing not to cpu slab */ + FREE_FROZEN, /* Freeing to frozen slab */ + FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ + FREE_REMOVE_PARTIAL, /* Freeing removes last object */ + ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ + ALLOC_SLAB, /* Cpu slab acquired from page allocator */ + ALLOC_REFILL, /* Refill cpu slab from slab freelist */ + ALLOC_NODE_MISMATCH, /* Switching cpu slab */ + FREE_SLAB, /* Slab freed to the page allocator */ + CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ + DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ + DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ + DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ + DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ + DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + DEACTIVATE_BYPASS, /* Implicit deactivation */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ + CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ + CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ + CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ + CPU_PARTIAL_FREE, /* Refill cpu partial on free */ + CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ + CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ + NR_SLUB_STAT_ITEMS }; + +struct kmem_cache_cpu { + void **freelist; /* Pointer to next available object */ + unsigned long tid; /* Globally unique transaction id */ + struct page *page; /* The slab from which we are allocating */ + struct page *partial; /* Partially allocated frozen slabs */ + int node; /* The node of the page (or -1 for debug) */ +#ifdef CONFIG_SLUB_STATS + unsigned stat[NR_SLUB_STAT_ITEMS]; +#endif +}; + +struct kmem_cache_node { + spinlock_t list_lock; /* Protect partial list and nr_partial */ + unsigned long nr_partial; + struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +#endif +}; + +/* + * Word size structure that can be atomically updated or read and that + * contains both the order and the number of objects that a slab of the + * given order would contain. + */ +struct kmem_cache_order_objects { + unsigned long x; +}; + +/* + * Slab cache management. + */ +struct kmem_cache { + struct kmem_cache_cpu __percpu *cpu_slab; + /* Used for retriving partial slabs etc */ + unsigned long flags; + unsigned long min_partial; + int size; /* The size of an object including meta data */ + int objsize; /* The size of an object without meta data */ + int offset; /* Free pointer offset. */ + int cpu_partial; /* Number of per cpu partial objects to keep around */ + struct kmem_cache_order_objects oo; + + /* Allocation and freeing of slabs */ + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; /* gfp flags to use on each alloc */ + int refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ + int reserved; /* Reserved bytes at the end of slabs */ + const char *name; /* Name (only for display!) */ + struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SYSFS + struct kobject kobj; /* For sysfs */ +#endif + +#ifdef CONFIG_NUMA + /* + * Defragmentation by allocating from a remote node. + */ + int remote_node_defrag_ratio; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; +}; + +/* + * Kmalloc subsystem. + */ +#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 +#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN +#else +#define KMALLOC_MIN_SIZE 8 +#endif + +#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) + +/* + * Maximum kmalloc object size handled by SLUB. Larger object allocations + * are passed through to the page allocator. The page allocator "fastpath" + * is relatively slow so we need this value sufficiently high so that + * performance critical objects are allocated through the SLUB fastpath. + * + * This should be dropped to PAGE_SIZE / 2 once the page allocator + * "fastpath" becomes competitive with the slab allocator fastpaths. + */ +#define SLUB_MAX_SIZE (2 * PAGE_SIZE) + +#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) + +#ifdef CONFIG_ZONE_DMA +#define SLUB_DMA __GFP_DMA +#else +/* Disable DMA functionality */ +#define SLUB_DMA (__force gfp_t)0 +#endif + +/* + * We keep the general caches in an array of slab caches that are used for + * 2^x bytes of allocations. + */ +extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; + +/* + * Sorry that the following has to be that ugly but some versions of GCC + * have trouble with constant propagation and loops. + */ +static __always_inline int kmalloc_index(size_t size) +{ + if (!size) + return 0; + + if (size <= KMALLOC_MIN_SIZE) + return KMALLOC_SHIFT_LOW; + + if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) + return 1; + if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) + return 2; + if (size <= 8) return 3; + if (size <= 16) return 4; + if (size <= 32) return 5; + if (size <= 64) return 6; + if (size <= 128) return 7; + if (size <= 256) return 8; + if (size <= 512) return 9; + if (size <= 1024) return 10; + if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; +/* + * The following is only needed to support architectures with a larger page + * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page + * size we would have to go up to 128k. + */ + if (size <= 8 * 1024) return 13; + if (size <= 16 * 1024) return 14; + if (size <= 32 * 1024) return 15; + if (size <= 64 * 1024) return 16; + if (size <= 128 * 1024) return 17; + if (size <= 256 * 1024) return 18; + if (size <= 512 * 1024) return 19; + if (size <= 1024 * 1024) return 20; + if (size <= 2 * 1024 * 1024) return 21; + BUG(); + return -1; /* Will never be reached */ + +/* + * What we really wanted to do and cannot do because of compiler issues is: + * int i; + * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + * if (size <= (1 << i)) + * return i; + */ +} + +/* + * Find the slab cache for a given combination of allocation flags and size. + * + * This ought to end up with a global pointer to the right cache + * in kmalloc_caches. + */ +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) +{ + int index = kmalloc_index(size); + + if (index == 0) + return NULL; + + return kmalloc_caches[index]; +} + +void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +void *__kmalloc(size_t size, gfp_t flags); + +static __always_inline void * +kmalloc_order(size_t size, gfp_t flags, unsigned int order) +{ + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + kmemleak_alloc(ret, size, 1, flags); + return ret; +} + +/** + * Calling this on allocated memory will check that the memory + * is expected to be in use, and print warnings if not. + */ +#ifdef CONFIG_SLUB_DEBUG +extern bool verify_mem_not_deleted(const void *x); +#else +static inline bool verify_mem_not_deleted(const void *x) +{ + return true; +} +#endif + +#ifdef CONFIG_TRACING +extern void * +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); +#else +static __always_inline void * +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) +{ + return kmem_cache_alloc(s, gfpflags); +} + +static __always_inline void * +kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + return kmalloc_order(size, flags, order); +} +#endif + +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + unsigned int order = get_order(size); + return kmalloc_order_trace(size, flags, order); +} + +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size)) { + if (size > SLUB_MAX_SIZE) + return kmalloc_large(size, flags); + + if (!(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_trace(s, flags, size); + } + } + return __kmalloc(size, flags); +} + +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t flags, int node); +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + +#ifdef CONFIG_TRACING +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, + int node, size_t size); +#else +static __always_inline void * +kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, + int node, size_t size) +{ + return kmem_cache_alloc_node(s, gfpflags, node); +} +#endif + +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + if (__builtin_constant_p(size) && + size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_node_trace(s, flags, node, size); + } + return __kmalloc_node(size, flags, node); +} +#endif + +#endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h new file mode 100644 index 00000000..67ed2c54 --- /dev/null +++ b/include/linux/sm501-regs.h @@ -0,0 +1,388 @@ +/* sm501-regs.h + * + * Copyright 2006 Simtec Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Silicon Motion SM501 register definitions +*/ + +/* System Configuration area */ +/* System config base */ +#define SM501_SYS_CONFIG (0x000000) + +/* config 1 */ +#define SM501_SYSTEM_CONTROL (0x000000) + +#define SM501_SYSCTRL_PANEL_TRISTATE (1<<0) +#define SM501_SYSCTRL_MEM_TRISTATE (1<<1) +#define SM501_SYSCTRL_CRT_TRISTATE (1<<2) + +#define SM501_SYSCTRL_PCI_SLAVE_BURST_MASK (3<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_1 (0<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_2 (1<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_4 (2<<4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_8 (3<<4) + +#define SM501_SYSCTRL_PCI_CLOCK_RUN_EN (1<<6) +#define SM501_SYSCTRL_PCI_RETRY_DISABLE (1<<7) +#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11) +#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15) + +#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19) + +/* miscellaneous control */ + +#define SM501_MISC_CONTROL (0x000004) + +#define SM501_MISC_BUS_SH (0x0) +#define SM501_MISC_BUS_PCI (0x1) +#define SM501_MISC_BUS_XSCALE (0x2) +#define SM501_MISC_BUS_NEC (0x6) +#define SM501_MISC_BUS_MASK (0x7) + +#define SM501_MISC_VR_62MB (1<<3) +#define SM501_MISC_CDR_RESET (1<<7) +#define SM501_MISC_USB_LB (1<<8) +#define SM501_MISC_USB_SLAVE (1<<9) +#define SM501_MISC_BL_1 (1<<10) +#define SM501_MISC_MC (1<<11) +#define SM501_MISC_DAC_POWER (1<<12) +#define SM501_MISC_IRQ_INVERT (1<<16) +#define SM501_MISC_SH (1<<17) + +#define SM501_MISC_HOLD_EMPTY (0<<18) +#define SM501_MISC_HOLD_8 (1<<18) +#define SM501_MISC_HOLD_16 (2<<18) +#define SM501_MISC_HOLD_24 (3<<18) +#define SM501_MISC_HOLD_32 (4<<18) +#define SM501_MISC_HOLD_MASK (7<<18) + +#define SM501_MISC_FREQ_12 (1<<24) +#define SM501_MISC_PNL_24BIT (1<<25) +#define SM501_MISC_8051_LE (1<<26) + + + +#define SM501_GPIO31_0_CONTROL (0x000008) +#define SM501_GPIO63_32_CONTROL (0x00000C) +#define SM501_DRAM_CONTROL (0x000010) + +/* command list */ +#define SM501_ARBTRTN_CONTROL (0x000014) + +/* command list */ +#define SM501_COMMAND_LIST_STATUS (0x000024) + +/* interrupt debug */ +#define SM501_RAW_IRQ_STATUS (0x000028) +#define SM501_RAW_IRQ_CLEAR (0x000028) +#define SM501_IRQ_STATUS (0x00002C) +#define SM501_IRQ_MASK (0x000030) +#define SM501_DEBUG_CONTROL (0x000034) + +/* power management */ +#define SM501_POWERMODE_P2X_SRC (1<<29) +#define SM501_POWERMODE_V2X_SRC (1<<20) +#define SM501_POWERMODE_M_SRC (1<<12) +#define SM501_POWERMODE_M1_SRC (1<<4) + +#define SM501_CURRENT_GATE (0x000038) +#define SM501_CURRENT_CLOCK (0x00003C) +#define SM501_POWER_MODE_0_GATE (0x000040) +#define SM501_POWER_MODE_0_CLOCK (0x000044) +#define SM501_POWER_MODE_1_GATE (0x000048) +#define SM501_POWER_MODE_1_CLOCK (0x00004C) +#define SM501_SLEEP_MODE_GATE (0x000050) +#define SM501_POWER_MODE_CONTROL (0x000054) + +/* power gates for units within the 501 */ +#define SM501_GATE_HOST (0) +#define SM501_GATE_MEMORY (1) +#define SM501_GATE_DISPLAY (2) +#define SM501_GATE_2D_ENGINE (3) +#define SM501_GATE_CSC (4) +#define SM501_GATE_ZVPORT (5) +#define SM501_GATE_GPIO (6) +#define SM501_GATE_UART0 (7) +#define SM501_GATE_UART1 (8) +#define SM501_GATE_SSP (10) +#define SM501_GATE_USB_HOST (11) +#define SM501_GATE_USB_GADGET (12) +#define SM501_GATE_UCONTROLLER (17) +#define SM501_GATE_AC97 (18) + +/* panel clock */ +#define SM501_CLOCK_P2XCLK (24) +/* crt clock */ +#define SM501_CLOCK_V2XCLK (16) +/* main clock */ +#define SM501_CLOCK_MCLK (8) +/* SDRAM controller clock */ +#define SM501_CLOCK_M1XCLK (0) + +/* config 2 */ +#define SM501_PCI_MASTER_BASE (0x000058) +#define SM501_ENDIAN_CONTROL (0x00005C) +#define SM501_DEVICEID (0x000060) +/* 0x050100A0 */ + +#define SM501_DEVICEID_SM501 (0x05010000) +#define SM501_DEVICEID_IDMASK (0xffff0000) +#define SM501_DEVICEID_REVMASK (0x000000ff) + +#define SM501_PLLCLOCK_COUNT (0x000064) +#define SM501_MISC_TIMING (0x000068) +#define SM501_CURRENT_SDRAM_CLOCK (0x00006C) + +#define SM501_PROGRAMMABLE_PLL_CONTROL (0x000074) + +/* GPIO base */ +#define SM501_GPIO (0x010000) +#define SM501_GPIO_DATA_LOW (0x00) +#define SM501_GPIO_DATA_HIGH (0x04) +#define SM501_GPIO_DDR_LOW (0x08) +#define SM501_GPIO_DDR_HIGH (0x0C) +#define SM501_GPIO_IRQ_SETUP (0x10) +#define SM501_GPIO_IRQ_STATUS (0x14) +#define SM501_GPIO_IRQ_RESET (0x14) + +/* I2C controller base */ +#define SM501_I2C (0x010040) +#define SM501_I2C_BYTE_COUNT (0x00) +#define SM501_I2C_CONTROL (0x01) +#define SM501_I2C_STATUS (0x02) +#define SM501_I2C_RESET (0x02) +#define SM501_I2C_SLAVE_ADDRESS (0x03) +#define SM501_I2C_DATA (0x04) + +/* SSP base */ +#define SM501_SSP (0x020000) + +/* Uart 0 base */ +#define SM501_UART0 (0x030000) + +/* Uart 1 base */ +#define SM501_UART1 (0x030020) + +/* USB host port base */ +#define SM501_USB_HOST (0x040000) + +/* USB slave/gadget base */ +#define SM501_USB_GADGET (0x060000) + +/* USB slave/gadget data port base */ +#define SM501_USB_GADGET_DATA (0x070000) + +/* Display controller/video engine base */ +#define SM501_DC (0x080000) + +/* common defines for the SM501 address registers */ +#define SM501_ADDR_FLIP (1<<31) +#define SM501_ADDR_EXT (1<<27) +#define SM501_ADDR_CS1 (1<<26) +#define SM501_ADDR_MASK (0x3f << 26) + +#define SM501_FIFO_MASK (0x3 << 16) +#define SM501_FIFO_1 (0x0 << 16) +#define SM501_FIFO_3 (0x1 << 16) +#define SM501_FIFO_7 (0x2 << 16) +#define SM501_FIFO_11 (0x3 << 16) + +/* common registers for panel and the crt */ +#define SM501_OFF_DC_H_TOT (0x000) +#define SM501_OFF_DC_V_TOT (0x008) +#define SM501_OFF_DC_H_SYNC (0x004) +#define SM501_OFF_DC_V_SYNC (0x00C) + +#define SM501_DC_PANEL_CONTROL (0x000) + +#define SM501_DC_PANEL_CONTROL_FPEN (1<<27) +#define SM501_DC_PANEL_CONTROL_BIAS (1<<26) +#define SM501_DC_PANEL_CONTROL_DATA (1<<25) +#define SM501_DC_PANEL_CONTROL_VDD (1<<24) +#define SM501_DC_PANEL_CONTROL_DP (1<<23) + +#define SM501_DC_PANEL_CONTROL_TFT_888 (0<<21) +#define SM501_DC_PANEL_CONTROL_TFT_333 (1<<21) +#define SM501_DC_PANEL_CONTROL_TFT_444 (2<<21) + +#define SM501_DC_PANEL_CONTROL_DE (1<<20) + +#define SM501_DC_PANEL_CONTROL_LCD_TFT (0<<18) +#define SM501_DC_PANEL_CONTROL_LCD_STN8 (1<<18) +#define SM501_DC_PANEL_CONTROL_LCD_STN12 (2<<18) + +#define SM501_DC_PANEL_CONTROL_CP (1<<14) +#define SM501_DC_PANEL_CONTROL_VSP (1<<13) +#define SM501_DC_PANEL_CONTROL_HSP (1<<12) +#define SM501_DC_PANEL_CONTROL_CK (1<<9) +#define SM501_DC_PANEL_CONTROL_TE (1<<8) +#define SM501_DC_PANEL_CONTROL_VPD (1<<7) +#define SM501_DC_PANEL_CONTROL_VP (1<<6) +#define SM501_DC_PANEL_CONTROL_HPD (1<<5) +#define SM501_DC_PANEL_CONTROL_HP (1<<4) +#define SM501_DC_PANEL_CONTROL_GAMMA (1<<3) +#define SM501_DC_PANEL_CONTROL_EN (1<<2) + +#define SM501_DC_PANEL_CONTROL_8BPP (0<<0) +#define SM501_DC_PANEL_CONTROL_16BPP (1<<0) +#define SM501_DC_PANEL_CONTROL_32BPP (2<<0) + + +#define SM501_DC_PANEL_PANNING_CONTROL (0x004) +#define SM501_DC_PANEL_COLOR_KEY (0x008) +#define SM501_DC_PANEL_FB_ADDR (0x00C) +#define SM501_DC_PANEL_FB_OFFSET (0x010) +#define SM501_DC_PANEL_FB_WIDTH (0x014) +#define SM501_DC_PANEL_FB_HEIGHT (0x018) +#define SM501_DC_PANEL_TL_LOC (0x01C) +#define SM501_DC_PANEL_BR_LOC (0x020) +#define SM501_DC_PANEL_H_TOT (0x024) +#define SM501_DC_PANEL_H_SYNC (0x028) +#define SM501_DC_PANEL_V_TOT (0x02C) +#define SM501_DC_PANEL_V_SYNC (0x030) +#define SM501_DC_PANEL_CUR_LINE (0x034) + +#define SM501_DC_VIDEO_CONTROL (0x040) +#define SM501_DC_VIDEO_FB0_ADDR (0x044) +#define SM501_DC_VIDEO_FB_WIDTH (0x048) +#define SM501_DC_VIDEO_FB0_LAST_ADDR (0x04C) +#define SM501_DC_VIDEO_TL_LOC (0x050) +#define SM501_DC_VIDEO_BR_LOC (0x054) +#define SM501_DC_VIDEO_SCALE (0x058) +#define SM501_DC_VIDEO_INIT_SCALE (0x05C) +#define SM501_DC_VIDEO_YUV_CONSTANTS (0x060) +#define SM501_DC_VIDEO_FB1_ADDR (0x064) +#define SM501_DC_VIDEO_FB1_LAST_ADDR (0x068) + +#define SM501_DC_VIDEO_ALPHA_CONTROL (0x080) +#define SM501_DC_VIDEO_ALPHA_FB_ADDR (0x084) +#define SM501_DC_VIDEO_ALPHA_FB_OFFSET (0x088) +#define SM501_DC_VIDEO_ALPHA_FB_LAST_ADDR (0x08C) +#define SM501_DC_VIDEO_ALPHA_TL_LOC (0x090) +#define SM501_DC_VIDEO_ALPHA_BR_LOC (0x094) +#define SM501_DC_VIDEO_ALPHA_SCALE (0x098) +#define SM501_DC_VIDEO_ALPHA_INIT_SCALE (0x09C) +#define SM501_DC_VIDEO_ALPHA_CHROMA_KEY (0x0A0) +#define SM501_DC_VIDEO_ALPHA_COLOR_LOOKUP (0x0A4) + +#define SM501_DC_PANEL_HWC_BASE (0x0F0) +#define SM501_DC_PANEL_HWC_ADDR (0x0F0) +#define SM501_DC_PANEL_HWC_LOC (0x0F4) +#define SM501_DC_PANEL_HWC_COLOR_1_2 (0x0F8) +#define SM501_DC_PANEL_HWC_COLOR_3 (0x0FC) + +#define SM501_HWC_EN (1<<31) + +#define SM501_OFF_HWC_ADDR (0x00) +#define SM501_OFF_HWC_LOC (0x04) +#define SM501_OFF_HWC_COLOR_1_2 (0x08) +#define SM501_OFF_HWC_COLOR_3 (0x0C) + +#define SM501_DC_ALPHA_CONTROL (0x100) +#define SM501_DC_ALPHA_FB_ADDR (0x104) +#define SM501_DC_ALPHA_FB_OFFSET (0x108) +#define SM501_DC_ALPHA_TL_LOC (0x10C) +#define SM501_DC_ALPHA_BR_LOC (0x110) +#define SM501_DC_ALPHA_CHROMA_KEY (0x114) +#define SM501_DC_ALPHA_COLOR_LOOKUP (0x118) + +#define SM501_DC_CRT_CONTROL (0x200) + +#define SM501_DC_CRT_CONTROL_TVP (1<<15) +#define SM501_DC_CRT_CONTROL_CP (1<<14) +#define SM501_DC_CRT_CONTROL_VSP (1<<13) +#define SM501_DC_CRT_CONTROL_HSP (1<<12) +#define SM501_DC_CRT_CONTROL_VS (1<<11) +#define SM501_DC_CRT_CONTROL_BLANK (1<<10) +#define SM501_DC_CRT_CONTROL_SEL (1<<9) +#define SM501_DC_CRT_CONTROL_TE (1<<8) +#define SM501_DC_CRT_CONTROL_PIXEL_MASK (0xF << 4) +#define SM501_DC_CRT_CONTROL_GAMMA (1<<3) +#define SM501_DC_CRT_CONTROL_ENABLE (1<<2) + +#define SM501_DC_CRT_CONTROL_8BPP (0<<0) +#define SM501_DC_CRT_CONTROL_16BPP (1<<0) +#define SM501_DC_CRT_CONTROL_32BPP (2<<0) + +#define SM501_DC_CRT_FB_ADDR (0x204) +#define SM501_DC_CRT_FB_OFFSET (0x208) +#define SM501_DC_CRT_H_TOT (0x20C) +#define SM501_DC_CRT_H_SYNC (0x210) +#define SM501_DC_CRT_V_TOT (0x214) +#define SM501_DC_CRT_V_SYNC (0x218) +#define SM501_DC_CRT_SIGNATURE_ANALYZER (0x21C) +#define SM501_DC_CRT_CUR_LINE (0x220) +#define SM501_DC_CRT_MONITOR_DETECT (0x224) + +#define SM501_DC_CRT_HWC_BASE (0x230) +#define SM501_DC_CRT_HWC_ADDR (0x230) +#define SM501_DC_CRT_HWC_LOC (0x234) +#define SM501_DC_CRT_HWC_COLOR_1_2 (0x238) +#define SM501_DC_CRT_HWC_COLOR_3 (0x23C) + +#define SM501_DC_PANEL_PALETTE (0x400) + +#define SM501_DC_VIDEO_PALETTE (0x800) + +#define SM501_DC_CRT_PALETTE (0xC00) + +/* Zoom Video port base */ +#define SM501_ZVPORT (0x090000) + +/* AC97/I2S base */ +#define SM501_AC97 (0x0A0000) + +/* 8051 micro controller base */ +#define SM501_UCONTROLLER (0x0B0000) + +/* 8051 micro controller SRAM base */ +#define SM501_UCONTROLLER_SRAM (0x0C0000) + +/* DMA base */ +#define SM501_DMA (0x0D0000) + +/* 2d engine base */ +#define SM501_2D_ENGINE (0x100000) +#define SM501_2D_SOURCE (0x00) +#define SM501_2D_DESTINATION (0x04) +#define SM501_2D_DIMENSION (0x08) +#define SM501_2D_CONTROL (0x0C) +#define SM501_2D_PITCH (0x10) +#define SM501_2D_FOREGROUND (0x14) +#define SM501_2D_BACKGROUND (0x18) +#define SM501_2D_STRETCH (0x1C) +#define SM501_2D_COLOR_COMPARE (0x20) +#define SM501_2D_COLOR_COMPARE_MASK (0x24) +#define SM501_2D_MASK (0x28) +#define SM501_2D_CLIP_TL (0x2C) +#define SM501_2D_CLIP_BR (0x30) +#define SM501_2D_MONO_PATTERN_LOW (0x34) +#define SM501_2D_MONO_PATTERN_HIGH (0x38) +#define SM501_2D_WINDOW_WIDTH (0x3C) +#define SM501_2D_SOURCE_BASE (0x40) +#define SM501_2D_DESTINATION_BASE (0x44) +#define SM501_2D_ALPHA (0x48) +#define SM501_2D_WRAP (0x4C) +#define SM501_2D_STATUS (0x50) + +#define SM501_CSC_Y_SOURCE_BASE (0xC8) +#define SM501_CSC_CONSTANTS (0xCC) +#define SM501_CSC_Y_SOURCE_X (0xD0) +#define SM501_CSC_Y_SOURCE_Y (0xD4) +#define SM501_CSC_U_SOURCE_BASE (0xD8) +#define SM501_CSC_V_SOURCE_BASE (0xDC) +#define SM501_CSC_SOURCE_DIMENSION (0xE0) +#define SM501_CSC_SOURCE_PITCH (0xE4) +#define SM501_CSC_DESTINATION (0xE8) +#define SM501_CSC_DESTINATION_DIMENSION (0xEC) +#define SM501_CSC_DESTINATION_PITCH (0xF0) +#define SM501_CSC_SCALE_FACTOR (0xF4) +#define SM501_CSC_DESTINATION_BASE (0xF8) +#define SM501_CSC_CONTROL (0xFC) + +/* 2d engine data port base */ +#define SM501_2D_ENGINE_DATA (0x110000) diff --git a/include/linux/sm501.h b/include/linux/sm501.h new file mode 100644 index 00000000..02fde50a --- /dev/null +++ b/include/linux/sm501.h @@ -0,0 +1,182 @@ +/* include/linux/sm501.h + * + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * Vincent Sanders <vince@simtec.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +extern int sm501_unit_power(struct device *dev, + unsigned int unit, unsigned int to); + +extern unsigned long sm501_set_clock(struct device *dev, + int clksrc, unsigned long freq); + +extern unsigned long sm501_find_clock(struct device *dev, + int clksrc, unsigned long req_freq); + +/* sm501_misc_control + * + * Modify the SM501's MISC_CONTROL register +*/ + +extern int sm501_misc_control(struct device *dev, + unsigned long set, unsigned long clear); + +/* sm501_modify_reg + * + * Modify a register in the SM501 which may be shared with other + * drivers. +*/ + +extern unsigned long sm501_modify_reg(struct device *dev, + unsigned long reg, + unsigned long set, + unsigned long clear); + + +/* Platform data definitions */ + +#define SM501FB_FLAG_USE_INIT_MODE (1<<0) +#define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1) +#define SM501FB_FLAG_USE_HWCURSOR (1<<2) +#define SM501FB_FLAG_USE_HWACCEL (1<<3) +#define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) +#define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) +#define SM501FB_FLAG_PANEL_INV_FPEN (1<<6) +#define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7) + +struct sm501_platdata_fbsub { + struct fb_videomode *def_mode; + unsigned int def_bpp; + unsigned long max_mem; + unsigned int flags; +}; + +enum sm501_fb_routing { + SM501_FB_OWN = 0, /* CRT=>CRT, Panel=>Panel */ + SM501_FB_CRT_PANEL = 1, /* Panel=>CRT, Panel=>Panel */ +}; + +/* sm501_platdata_fb flag field bit definitions */ + +#define SM501_FBPD_SWAP_FB_ENDIAN (1<<0) /* need to endian swap */ + +/* sm501_platdata_fb + * + * configuration data for the framebuffer driver +*/ + +struct sm501_platdata_fb { + enum sm501_fb_routing fb_route; + unsigned int flags; + struct sm501_platdata_fbsub *fb_crt; + struct sm501_platdata_fbsub *fb_pnl; +}; + +/* gpio i2c + * + * Note, we have to pass in the bus number, as the number used will be + * passed to the i2c-gpio driver's platform_device.id, subsequently used + * to register the i2c bus. +*/ + +struct sm501_platdata_gpio_i2c { + unsigned int bus_num; + unsigned int pin_sda; + unsigned int pin_scl; + int udelay; + int timeout; +}; + +/* sm501_initdata + * + * use for initialising values that may not have been setup + * before the driver is loaded. +*/ + +struct sm501_reg_init { + unsigned long set; + unsigned long mask; +}; + +#define SM501_USE_USB_HOST (1<<0) +#define SM501_USE_USB_SLAVE (1<<1) +#define SM501_USE_SSP0 (1<<2) +#define SM501_USE_SSP1 (1<<3) +#define SM501_USE_UART0 (1<<4) +#define SM501_USE_UART1 (1<<5) +#define SM501_USE_FBACCEL (1<<6) +#define SM501_USE_AC97 (1<<7) +#define SM501_USE_I2S (1<<8) +#define SM501_USE_GPIO (1<<9) + +#define SM501_USE_ALL (0xffffffff) + +struct sm501_initdata { + struct sm501_reg_init gpio_low; + struct sm501_reg_init gpio_high; + struct sm501_reg_init misc_timing; + struct sm501_reg_init misc_control; + + unsigned long devices; + unsigned long mclk; /* non-zero to modify */ + unsigned long m1xclk; /* non-zero to modify */ +}; + +/* sm501_init_gpio + * + * default gpio settings +*/ + +struct sm501_init_gpio { + struct sm501_reg_init gpio_data_low; + struct sm501_reg_init gpio_data_high; + struct sm501_reg_init gpio_ddr_low; + struct sm501_reg_init gpio_ddr_high; +}; + +#define SM501_FLAG_SUSPEND_OFF (1<<4) + +/* sm501_platdata + * + * This is passed with the platform device to allow the board + * to control the behaviour of the SM501 driver(s) which attach + * to the device. + * +*/ + +struct sm501_platdata { + struct sm501_initdata *init; + struct sm501_init_gpio *init_gpiop; + struct sm501_platdata_fb *fb; + + int flags; + int gpio_base; + + int (*get_power)(struct device *dev); + int (*set_power)(struct device *dev, unsigned int on); + + struct sm501_platdata_gpio_i2c *gpio_i2c; + unsigned int gpio_i2c_nr; +}; + +#if defined(CONFIG_PPC32) +#define smc501_readl(addr) ioread32be((addr)) +#define smc501_writel(val, addr) iowrite32be((val), (addr)) +#else +#define smc501_readl(addr) readl(addr) +#define smc501_writel(val, addr) writel(val, addr) +#endif diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h new file mode 100644 index 00000000..521f3714 --- /dev/null +++ b/include/linux/smc911x.h @@ -0,0 +1,13 @@ +#ifndef __SMC911X_H__ +#define __SMC911X_H__ + +#define SMC911X_USE_16BIT (1 << 0) +#define SMC911X_USE_32BIT (1 << 1) + +struct smc911x_platdata { + unsigned long flags; + unsigned long irq_flags; /* IRQF_... */ + int irq_polarity; +}; + +#endif /* __SMC911X_H__ */ diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h new file mode 100644 index 00000000..76199b75 --- /dev/null +++ b/include/linux/smc91x.h @@ -0,0 +1,34 @@ +#ifndef __SMC91X_H__ +#define __SMC91X_H__ + +#define SMC91X_USE_8BIT (1 << 0) +#define SMC91X_USE_16BIT (1 << 1) +#define SMC91X_USE_32BIT (1 << 2) + +#define SMC91X_NOWAIT (1 << 3) + +/* two bits for IO_SHIFT, let's hope later designs will keep this sane */ +#define SMC91X_IO_SHIFT_0 (0 << 4) +#define SMC91X_IO_SHIFT_1 (1 << 4) +#define SMC91X_IO_SHIFT_2 (2 << 4) +#define SMC91X_IO_SHIFT_3 (3 << 4) +#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) + +#define SMC91X_USE_DMA (1 << 6) + +#define RPC_LED_100_10 (0x00) /* LED = 100Mbps OR's with 10Mbps link detect */ +#define RPC_LED_RES (0x01) /* LED = Reserved */ +#define RPC_LED_10 (0x02) /* LED = 10Mbps link detect */ +#define RPC_LED_FD (0x03) /* LED = Full Duplex Mode */ +#define RPC_LED_TX_RX (0x04) /* LED = TX or RX packet occurred */ +#define RPC_LED_100 (0x05) /* LED = 100Mbps link detect */ +#define RPC_LED_TX (0x06) /* LED = TX packet occurred */ +#define RPC_LED_RX (0x07) /* LED = RX packet occurred */ + +struct smc91x_platdata { + unsigned long flags; + unsigned char leda; + unsigned char ledb; +}; + +#endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h new file mode 100644 index 00000000..10530d92 --- /dev/null +++ b/include/linux/smp.h @@ -0,0 +1,230 @@ +#ifndef __LINUX_SMP_H +#define __LINUX_SMP_H + +/* + * Generic SMP support + * Alan Cox. <alan@redhat.com> + */ + +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/cpumask.h> +#include <linux/init.h> + +extern void cpu_idle(void); + +typedef void (*smp_call_func_t)(void *info); +struct call_single_data { + struct list_head list; + smp_call_func_t func; + void *info; + u16 flags; + u16 priv; +}; + +/* total number of cpus in this system (may exceed NR_CPUS) */ +extern unsigned int total_cpus; + +int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, + int wait); + +#ifdef CONFIG_SMP + +#include <linux/preempt.h> +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/thread_info.h> +#include <asm/smp.h> + +/* + * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. + * (defined in asm header): + */ + +/* + * stops all CPUs but the current one: + */ +extern void smp_send_stop(void); + +/* + * sends a 'reschedule' event to another CPU: + */ +extern void smp_send_reschedule(int cpu); + + +/* + * Prepare machine for booting other CPUs. + */ +extern void smp_prepare_cpus(unsigned int max_cpus); + +/* + * Bring a CPU up + */ +extern int __cpu_up(unsigned int cpunum); + +/* + * Final polishing of CPUs + */ +extern void smp_cpus_done(unsigned int max_cpus); + +/* + * Call a function on all other processors + */ +int smp_call_function(smp_call_func_t func, void *info, int wait); +void smp_call_function_many(const struct cpumask *mask, + smp_call_func_t func, void *info, bool wait); + +void __smp_call_function_single(int cpuid, struct call_single_data *data, + int wait); + +int smp_call_function_any(const struct cpumask *mask, + smp_call_func_t func, void *info, int wait); + +/* + * Generic and arch helpers + */ +#ifdef CONFIG_USE_GENERIC_SMP_HELPERS +void __init call_function_init(void); +void generic_smp_call_function_single_interrupt(void); +void generic_smp_call_function_interrupt(void); +void ipi_call_lock(void); +void ipi_call_unlock(void); +void ipi_call_lock_irq(void); +void ipi_call_unlock_irq(void); +#else +static inline void call_function_init(void) { } +#endif + +/* + * Call a function on all processors + */ +int on_each_cpu(smp_call_func_t func, void *info, int wait); + +/* + * Call a function on processors specified by mask, which might include + * the local one. + */ +void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, + void *info, bool wait); + +/* + * Call a function on each processor for which the supplied function + * cond_func returns a positive value. This may include the local + * processor. + */ +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags); + +/* + * Mark the boot cpu "online" so that it can call console drivers in + * printk() and can access its per-cpu storage. + */ +void smp_prepare_boot_cpu(void); + +extern unsigned int setup_max_cpus; +extern void __init setup_nr_cpu_ids(void); +extern void __init smp_init(void); + +#else /* !SMP */ + +static inline void smp_send_stop(void) { } + +/* + * These macros fold the SMP functionality into a single CPU system + */ +#define raw_smp_processor_id() 0 +static inline int up_smp_call_function(smp_call_func_t func, void *info) +{ + return 0; +} +#define smp_call_function(func, info, wait) \ + (up_smp_call_function(func, info)) +#define on_each_cpu(func,info,wait) \ + ({ \ + local_irq_disable(); \ + func(info); \ + local_irq_enable(); \ + 0; \ + }) +/* + * Note we still need to test the mask even for UP + * because we actually can get an empty mask from + * code that on SMP might call us without the local + * CPU in the mask. + */ +#define on_each_cpu_mask(mask, func, info, wait) \ + do { \ + if (cpumask_test_cpu(0, (mask))) { \ + local_irq_disable(); \ + (func)(info); \ + local_irq_enable(); \ + } \ + } while (0) +/* + * Preemption is disabled here to make sure the cond_func is called under the + * same condtions in UP and SMP. + */ +#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ + do { \ + void *__info = (info); \ + preempt_disable(); \ + if ((cond_func)(0, __info)) { \ + local_irq_disable(); \ + (func)(__info); \ + local_irq_enable(); \ + } \ + preempt_enable(); \ + } while (0) + +static inline void smp_send_reschedule(int cpu) { } +#define num_booting_cpus() 1 +#define smp_prepare_boot_cpu() do {} while (0) +#define smp_call_function_many(mask, func, info, wait) \ + (up_smp_call_function(func, info)) +static inline void call_function_init(void) { } + +static inline int +smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, + void *info, int wait) +{ + return smp_call_function_single(0, func, info, wait); +} + +#endif /* !SMP */ + +/* + * smp_processor_id(): get the current CPU ID. + * + * if DEBUG_PREEMPT is enabled then we check whether it is + * used in a preemption-safe way. (smp_processor_id() is safe + * if it's used in a preemption-off critical section, or in + * a thread that is bound to the current CPU.) + * + * NOTE: raw_smp_processor_id() is for internal use only + * (smp_processor_id() is the preferred variant), but in rare + * instances it might also be used to turn off false positives + * (i.e. smp_processor_id() use that the debugging code reports but + * which use for some reason is legal). Don't use this to hack around + * the warning message, as your code might not work under PREEMPT. + */ +#ifdef CONFIG_DEBUG_PREEMPT + extern unsigned int debug_smp_processor_id(void); +# define smp_processor_id() debug_smp_processor_id() +#else +# define smp_processor_id() raw_smp_processor_id() +#endif + +#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) +#define put_cpu() preempt_enable() + +/* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: + */ +extern void arch_disable_smp_support(void); + +void smp_setup_processor_id(void); + +#endif /* __LINUX_SMP_H */ diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h new file mode 100644 index 00000000..4dde70e7 --- /dev/null +++ b/include/linux/smsc911x.h @@ -0,0 +1,62 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ***************************************************************************/ +#ifndef __LINUX_SMSC911X_H__ +#define __LINUX_SMSC911X_H__ + +#include <linux/phy.h> + +/* platform_device configuration data, should be assigned to + * the platform_device's dev.platform_data */ +struct smsc911x_platform_config { + unsigned int irq_polarity; + unsigned int irq_type; + unsigned int flags; + unsigned int shift; + phy_interface_t phy_interface; + unsigned char mac[6]; +}; + +/* Constants for platform_device irq polarity configuration */ +#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0 +#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1 + +/* Constants for platform_device irq type configuration */ +#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0 +#define SMSC911X_IRQ_TYPE_PUSH_PULL 1 + +/* Constants for flags */ +#define SMSC911X_USE_16BIT (BIT(0)) +#define SMSC911X_USE_32BIT (BIT(1)) +#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2)) +#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3)) +#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4)) + +/* + * SMSC911X_SWAP_FIFO: + * Enables software byte swap for fifo data. Should only be used as a + * "last resort" in the case of big endian mode on boards with incorrectly + * routed data bus to older devices such as LAN9118. Newer devices such as + * LAN9221 can handle this in hardware, there are registers to control + * this swapping but the driver doesn't currently use them. + */ +#define SMSC911X_SWAP_FIFO (BIT(5)) + +#endif /* __LINUX_SMSC911X_H__ */ diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h new file mode 100644 index 00000000..ce718cbc --- /dev/null +++ b/include/linux/smscphy.h @@ -0,0 +1,25 @@ +#ifndef __LINUX_SMSCPHY_H__ +#define __LINUX_SMSCPHY_H__ + +#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ +#define MII_LAN83C185_IM 30 /* Interrupt Mask */ +#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ + +#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ +#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ +#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ +#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ +#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ +#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ +#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ + +#define MII_LAN83C185_ISF_INT_ALL (0x0e) + +#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ + (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ + MII_LAN83C185_ISF_INT7) + +#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ +#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */ + +#endif /* __LINUX_SMSCPHY_H__ */ diff --git a/include/linux/snmp.h b/include/linux/snmp.h new file mode 100644 index 00000000..2e68f5ba --- /dev/null +++ b/include/linux/snmp.h @@ -0,0 +1,273 @@ +/* + * Definitions for MIBs + * + * Author: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> + */ + +#ifndef _LINUX_SNMP_H +#define _LINUX_SNMP_H + +/* ipstats mib definitions */ +/* + * RFC 1213: MIB-II + * RFC 2011 (updates 1213): SNMPv2-MIB-IP + * RFC 2863: Interfaces Group MIB + * RFC 2465: IPv6 MIB: General Group + * draft-ietf-ipv6-rfc2011-update-10.txt: MIB for IP: IP Statistics Tables + */ +enum +{ + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS, /* InReceives */ + IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */ + IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */ + IPSTATS_MIB_INNOROUTES, /* InNoRoutes */ + IPSTATS_MIB_INADDRERRORS, /* InAddrErrors */ + IPSTATS_MIB_INUNKNOWNPROTOS, /* InUnknownProtos */ + IPSTATS_MIB_INTRUNCATEDPKTS, /* InTruncatedPkts */ + IPSTATS_MIB_INDISCARDS, /* InDiscards */ + IPSTATS_MIB_INDELIVERS, /* InDelivers */ + IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */ + IPSTATS_MIB_OUTPKTS, /* OutRequests */ + IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */ + IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */ + IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */ + IPSTATS_MIB_REASMREQDS, /* ReasmReqds */ + IPSTATS_MIB_REASMOKS, /* ReasmOKs */ + IPSTATS_MIB_REASMFAILS, /* ReasmFails */ + IPSTATS_MIB_FRAGOKS, /* FragOKs */ + IPSTATS_MIB_FRAGFAILS, /* FragFails */ + IPSTATS_MIB_FRAGCREATES, /* FragCreates */ + IPSTATS_MIB_INMCASTPKTS, /* InMcastPkts */ + IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */ + IPSTATS_MIB_INBCASTPKTS, /* InBcastPkts */ + IPSTATS_MIB_OUTBCASTPKTS, /* OutBcastPkts */ + IPSTATS_MIB_INOCTETS, /* InOctets */ + IPSTATS_MIB_OUTOCTETS, /* OutOctets */ + IPSTATS_MIB_INMCASTOCTETS, /* InMcastOctets */ + IPSTATS_MIB_OUTMCASTOCTETS, /* OutMcastOctets */ + IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */ + IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */ + __IPSTATS_MIB_MAX +}; + +/* icmp mib definitions */ +/* + * RFC 1213: MIB-II ICMP Group + * RFC 2011 (updates 1213): SNMPv2 MIB for IP: ICMP group + */ +enum +{ + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS, /* InMsgs */ + ICMP_MIB_INERRORS, /* InErrors */ + ICMP_MIB_INDESTUNREACHS, /* InDestUnreachs */ + ICMP_MIB_INTIMEEXCDS, /* InTimeExcds */ + ICMP_MIB_INPARMPROBS, /* InParmProbs */ + ICMP_MIB_INSRCQUENCHS, /* InSrcQuenchs */ + ICMP_MIB_INREDIRECTS, /* InRedirects */ + ICMP_MIB_INECHOS, /* InEchos */ + ICMP_MIB_INECHOREPS, /* InEchoReps */ + ICMP_MIB_INTIMESTAMPS, /* InTimestamps */ + ICMP_MIB_INTIMESTAMPREPS, /* InTimestampReps */ + ICMP_MIB_INADDRMASKS, /* InAddrMasks */ + ICMP_MIB_INADDRMASKREPS, /* InAddrMaskReps */ + ICMP_MIB_OUTMSGS, /* OutMsgs */ + ICMP_MIB_OUTERRORS, /* OutErrors */ + ICMP_MIB_OUTDESTUNREACHS, /* OutDestUnreachs */ + ICMP_MIB_OUTTIMEEXCDS, /* OutTimeExcds */ + ICMP_MIB_OUTPARMPROBS, /* OutParmProbs */ + ICMP_MIB_OUTSRCQUENCHS, /* OutSrcQuenchs */ + ICMP_MIB_OUTREDIRECTS, /* OutRedirects */ + ICMP_MIB_OUTECHOS, /* OutEchos */ + ICMP_MIB_OUTECHOREPS, /* OutEchoReps */ + ICMP_MIB_OUTTIMESTAMPS, /* OutTimestamps */ + ICMP_MIB_OUTTIMESTAMPREPS, /* OutTimestampReps */ + ICMP_MIB_OUTADDRMASKS, /* OutAddrMasks */ + ICMP_MIB_OUTADDRMASKREPS, /* OutAddrMaskReps */ + __ICMP_MIB_MAX +}; + +#define __ICMPMSG_MIB_MAX 512 /* Out+In for all 8-bit ICMP types */ + +/* icmp6 mib definitions */ +/* + * RFC 2466: ICMPv6-MIB + */ +enum +{ + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS, /* InMsgs */ + ICMP6_MIB_INERRORS, /* InErrors */ + ICMP6_MIB_OUTMSGS, /* OutMsgs */ + ICMP6_MIB_OUTERRORS, /* OutErrors */ + __ICMP6_MIB_MAX +}; + +#define __ICMP6MSG_MIB_MAX 512 /* Out+In for all 8-bit ICMPv6 types */ + +/* tcp mib definitions */ +/* + * RFC 1213: MIB-II TCP group + * RFC 2012 (updates 1213): SNMPv2-MIB-TCP + */ +enum +{ + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM, /* RtoAlgorithm */ + TCP_MIB_RTOMIN, /* RtoMin */ + TCP_MIB_RTOMAX, /* RtoMax */ + TCP_MIB_MAXCONN, /* MaxConn */ + TCP_MIB_ACTIVEOPENS, /* ActiveOpens */ + TCP_MIB_PASSIVEOPENS, /* PassiveOpens */ + TCP_MIB_ATTEMPTFAILS, /* AttemptFails */ + TCP_MIB_ESTABRESETS, /* EstabResets */ + TCP_MIB_CURRESTAB, /* CurrEstab */ + TCP_MIB_INSEGS, /* InSegs */ + TCP_MIB_OUTSEGS, /* OutSegs */ + TCP_MIB_RETRANSSEGS, /* RetransSegs */ + TCP_MIB_INERRS, /* InErrs */ + TCP_MIB_OUTRSTS, /* OutRsts */ + __TCP_MIB_MAX +}; + +/* udp mib definitions */ +/* + * RFC 1213: MIB-II UDP group + * RFC 2013 (updates 1213): SNMPv2-MIB-UDP + */ +enum +{ + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS, /* InDatagrams */ + UDP_MIB_NOPORTS, /* NoPorts */ + UDP_MIB_INERRORS, /* InErrors */ + UDP_MIB_OUTDATAGRAMS, /* OutDatagrams */ + UDP_MIB_RCVBUFERRORS, /* RcvbufErrors */ + UDP_MIB_SNDBUFERRORS, /* SndbufErrors */ + __UDP_MIB_MAX +}; + +/* linux mib definitions */ +enum +{ + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT, /* SyncookiesSent */ + LINUX_MIB_SYNCOOKIESRECV, /* SyncookiesRecv */ + LINUX_MIB_SYNCOOKIESFAILED, /* SyncookiesFailed */ + LINUX_MIB_EMBRYONICRSTS, /* EmbryonicRsts */ + LINUX_MIB_PRUNECALLED, /* PruneCalled */ + LINUX_MIB_RCVPRUNED, /* RcvPruned */ + LINUX_MIB_OFOPRUNED, /* OfoPruned */ + LINUX_MIB_OUTOFWINDOWICMPS, /* OutOfWindowIcmps */ + LINUX_MIB_LOCKDROPPEDICMPS, /* LockDroppedIcmps */ + LINUX_MIB_ARPFILTER, /* ArpFilter */ + LINUX_MIB_TIMEWAITED, /* TimeWaited */ + LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */ + LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */ + LINUX_MIB_PAWSPASSIVEREJECTED, /* PAWSPassiveRejected */ + LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */ + LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */ + LINUX_MIB_DELAYEDACKS, /* DelayedACKs */ + LINUX_MIB_DELAYEDACKLOCKED, /* DelayedACKLocked */ + LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */ + LINUX_MIB_LISTENOVERFLOWS, /* ListenOverflows */ + LINUX_MIB_LISTENDROPS, /* ListenDrops */ + LINUX_MIB_TCPPREQUEUED, /* TCPPrequeued */ + LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, /* TCPDirectCopyFromBacklog */ + LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, /* TCPDirectCopyFromPrequeue */ + LINUX_MIB_TCPPREQUEUEDROPPED, /* TCPPrequeueDropped */ + LINUX_MIB_TCPHPHITS, /* TCPHPHits */ + LINUX_MIB_TCPHPHITSTOUSER, /* TCPHPHitsToUser */ + LINUX_MIB_TCPPUREACKS, /* TCPPureAcks */ + LINUX_MIB_TCPHPACKS, /* TCPHPAcks */ + LINUX_MIB_TCPRENORECOVERY, /* TCPRenoRecovery */ + LINUX_MIB_TCPSACKRECOVERY, /* TCPSackRecovery */ + LINUX_MIB_TCPSACKRENEGING, /* TCPSACKReneging */ + LINUX_MIB_TCPFACKREORDER, /* TCPFACKReorder */ + LINUX_MIB_TCPSACKREORDER, /* TCPSACKReorder */ + LINUX_MIB_TCPRENOREORDER, /* TCPRenoReorder */ + LINUX_MIB_TCPTSREORDER, /* TCPTSReorder */ + LINUX_MIB_TCPFULLUNDO, /* TCPFullUndo */ + LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */ + LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */ + LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */ + LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */ + LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */ + LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */ + LINUX_MIB_TCPLOSSFAILURES, /* TCPLossFailures */ + LINUX_MIB_TCPFASTRETRANS, /* TCPFastRetrans */ + LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */ + LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */ + LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */ + LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */ + LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */ + LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */ + LINUX_MIB_TCPRCVCOLLAPSED, /* TCPRcvCollapsed */ + LINUX_MIB_TCPDSACKOLDSENT, /* TCPDSACKOldSent */ + LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */ + LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */ + LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */ + LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */ + LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */ + LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */ + LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */ + LINUX_MIB_TCPABORTONTIMEOUT, /* TCPAbortOnTimeout */ + LINUX_MIB_TCPABORTONLINGER, /* TCPAbortOnLinger */ + LINUX_MIB_TCPABORTFAILED, /* TCPAbortFailed */ + LINUX_MIB_TCPMEMORYPRESSURES, /* TCPMemoryPressures */ + LINUX_MIB_TCPSACKDISCARD, /* TCPSACKDiscard */ + LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ + LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ + LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ + LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ + LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ + LINUX_MIB_SACKSHIFTED, + LINUX_MIB_SACKMERGED, + LINUX_MIB_SACKSHIFTFALLBACK, + LINUX_MIB_TCPBACKLOGDROP, + LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ + LINUX_MIB_TCPDEFERACCEPTDROP, + LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ + LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ + LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ + LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ + LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */ + LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */ + __LINUX_MIB_MAX +}; + +/* linux Xfrm mib definitions */ +enum +{ + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR, /* XfrmInError */ + LINUX_MIB_XFRMINBUFFERERROR, /* XfrmInBufferError */ + LINUX_MIB_XFRMINHDRERROR, /* XfrmInHdrError */ + LINUX_MIB_XFRMINNOSTATES, /* XfrmInNoStates */ + LINUX_MIB_XFRMINSTATEPROTOERROR, /* XfrmInStateProtoError */ + LINUX_MIB_XFRMINSTATEMODEERROR, /* XfrmInStateModeError */ + LINUX_MIB_XFRMINSTATESEQERROR, /* XfrmInStateSeqError */ + LINUX_MIB_XFRMINSTATEEXPIRED, /* XfrmInStateExpired */ + LINUX_MIB_XFRMINSTATEMISMATCH, /* XfrmInStateMismatch */ + LINUX_MIB_XFRMINSTATEINVALID, /* XfrmInStateInvalid */ + LINUX_MIB_XFRMINTMPLMISMATCH, /* XfrmInTmplMismatch */ + LINUX_MIB_XFRMINNOPOLS, /* XfrmInNoPols */ + LINUX_MIB_XFRMINPOLBLOCK, /* XfrmInPolBlock */ + LINUX_MIB_XFRMINPOLERROR, /* XfrmInPolError */ + LINUX_MIB_XFRMOUTERROR, /* XfrmOutError */ + LINUX_MIB_XFRMOUTBUNDLEGENERROR, /* XfrmOutBundleGenError */ + LINUX_MIB_XFRMOUTBUNDLECHECKERROR, /* XfrmOutBundleCheckError */ + LINUX_MIB_XFRMOUTNOSTATES, /* XfrmOutNoStates */ + LINUX_MIB_XFRMOUTSTATEPROTOERROR, /* XfrmOutStateProtoError */ + LINUX_MIB_XFRMOUTSTATEMODEERROR, /* XfrmOutStateModeError */ + LINUX_MIB_XFRMOUTSTATESEQERROR, /* XfrmOutStateSeqError */ + LINUX_MIB_XFRMOUTSTATEEXPIRED, /* XfrmOutStateExpired */ + LINUX_MIB_XFRMOUTPOLBLOCK, /* XfrmOutPolBlock */ + LINUX_MIB_XFRMOUTPOLDEAD, /* XfrmOutPolDead */ + LINUX_MIB_XFRMOUTPOLERROR, /* XfrmOutPolError */ + LINUX_MIB_XFRMFWDHDRERROR, /* XfrmFwdHdrError*/ + __LINUX_MIB_XFRMMAX +}; + +#endif /* _LINUX_SNMP_H */ diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h new file mode 100644 index 00000000..251729a4 --- /dev/null +++ b/include/linux/sock_diag.h @@ -0,0 +1,48 @@ +#ifndef __SOCK_DIAG_H__ +#define __SOCK_DIAG_H__ + +#include <linux/types.h> + +#define SOCK_DIAG_BY_FAMILY 20 + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC, + SK_MEMINFO_RCVBUF, + SK_MEMINFO_WMEM_ALLOC, + SK_MEMINFO_SNDBUF, + SK_MEMINFO_FWD_ALLOC, + SK_MEMINFO_WMEM_QUEUED, + SK_MEMINFO_OPTMEM, + + SK_MEMINFO_VARS, +}; + +#ifdef __KERNEL__ +struct sk_buff; +struct nlmsghdr; +struct sock; + +struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); +}; + +int sock_diag_register(struct sock_diag_handler *h); +void sock_diag_unregister(struct sock_diag_handler *h); + +void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); +void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); + +int sock_diag_check_cookie(void *sk, __u32 *cookie); +void sock_diag_save_cookie(void *sk, __u32 *cookie); + +int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); + +extern struct sock *sock_diag_nlsk; +#endif /* KERNEL */ +#endif diff --git a/include/linux/socket.h b/include/linux/socket.h new file mode 100644 index 00000000..b84bbd48 --- /dev/null +++ b/include/linux/socket.h @@ -0,0 +1,343 @@ +#ifndef _LINUX_SOCKET_H +#define _LINUX_SOCKET_H + +/* + * Desired design of maximum size and alignment (see RFC2553) + */ +#define _K_SS_MAXSIZE 128 /* Implementation specific max size */ +#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) + /* Implementation specific desired alignment */ + +typedef unsigned short __kernel_sa_family_t; + +struct __kernel_sockaddr_storage { + __kernel_sa_family_t ss_family; /* address family */ + /* Following field(s) are implementation specific */ + char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; + /* space to achieve desired size, */ + /* _SS_MAXSIZE value minus size of ss_family */ +} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ + +#ifdef __KERNEL__ + +#include <asm/socket.h> /* arch-dependent defines */ +#include <linux/sockios.h> /* the SIOCxxx I/O controls */ +#include <linux/uio.h> /* iovec support */ +#include <linux/types.h> /* pid_t */ +#include <linux/compiler.h> /* __user */ + +struct pid; +struct cred; + +#define __sockaddr_check_size(size) \ + BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) + +#ifdef CONFIG_PROC_FS +struct seq_file; +extern void socket_seq_show(struct seq_file *seq); +#endif + +typedef __kernel_sa_family_t sa_family_t; + +/* + * 1003.1g requires sa_family_t and that sa_data is char. + */ + +struct sockaddr { + sa_family_t sa_family; /* address family, AF_xxx */ + char sa_data[14]; /* 14 bytes of protocol address */ +}; + +struct linger { + int l_onoff; /* Linger active */ + int l_linger; /* How long to linger for */ +}; + +#define sockaddr_storage __kernel_sockaddr_storage + +/* + * As we do 4.4BSD message passing we use a 4.4BSD message passing + * system, not 4.3. Thus msg_accrights(len) are now missing. They + * belong in an obscure libc emulation or the bin. + */ + +struct msghdr { + void * msg_name; /* Socket name */ + int msg_namelen; /* Length of name */ + struct iovec * msg_iov; /* Data blocks */ + __kernel_size_t msg_iovlen; /* Number of blocks */ + void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */ + __kernel_size_t msg_controllen; /* Length of cmsg list */ + unsigned msg_flags; +}; + +/* For recvmmsg/sendmmsg */ +struct mmsghdr { + struct msghdr msg_hdr; + unsigned msg_len; +}; + +/* + * POSIX 1003.1g - ancillary data object information + * Ancillary data consits of a sequence of pairs of + * (cmsghdr, cmsg_data[]) + */ + +struct cmsghdr { + __kernel_size_t cmsg_len; /* data byte count, including hdr */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +}; + +/* + * Ancillary data object information MACROS + * Table 5-14 of POSIX 1003.1g + */ + +#define __CMSG_NXTHDR(ctl, len, cmsg) __cmsg_nxthdr((ctl),(len),(cmsg)) +#define CMSG_NXTHDR(mhdr, cmsg) cmsg_nxthdr((mhdr), (cmsg)) + +#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) + +#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr)))) +#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len)) +#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len)) + +#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ + (struct cmsghdr *)(ctl) : \ + (struct cmsghdr *)NULL) +#define CMSG_FIRSTHDR(msg) __CMSG_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen) +#define CMSG_OK(mhdr, cmsg) ((cmsg)->cmsg_len >= sizeof(struct cmsghdr) && \ + (cmsg)->cmsg_len <= (unsigned long) \ + ((mhdr)->msg_controllen - \ + ((char *)(cmsg) - (char *)(mhdr)->msg_control))) + +/* + * Get the next cmsg header + * + * PLEASE, do not touch this function. If you think, that it is + * incorrect, grep kernel sources and think about consequences + * before trying to improve it. + * + * Now it always returns valid, not truncated ancillary object + * HEADER. But caller still MUST check, that cmsg->cmsg_len is + * inside range, given by msg->msg_controllen before using + * ancillary object DATA. --ANK (980731) + */ + +static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, + struct cmsghdr *__cmsg) +{ + struct cmsghdr * __ptr; + + __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + CMSG_ALIGN(__cmsg->cmsg_len)); + if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size) + return (struct cmsghdr *)0; + + return __ptr; +} + +static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) +{ + return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); +} + +/* "Socket"-level control message types: */ + +#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ +#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */ +#define SCM_SECURITY 0x03 /* rw: security label */ + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +/* Supported address families. */ +#define AF_UNSPEC 0 +#define AF_UNIX 1 /* Unix domain sockets */ +#define AF_LOCAL 1 /* POSIX name for AF_UNIX */ +#define AF_INET 2 /* Internet IP Protocol */ +#define AF_AX25 3 /* Amateur Radio AX.25 */ +#define AF_IPX 4 /* Novell IPX */ +#define AF_APPLETALK 5 /* AppleTalk DDP */ +#define AF_NETROM 6 /* Amateur Radio NET/ROM */ +#define AF_BRIDGE 7 /* Multiprotocol bridge */ +#define AF_ATMPVC 8 /* ATM PVCs */ +#define AF_X25 9 /* Reserved for X.25 project */ +#define AF_INET6 10 /* IP version 6 */ +#define AF_ROSE 11 /* Amateur Radio X.25 PLP */ +#define AF_DECnet 12 /* Reserved for DECnet project */ +#define AF_NETBEUI 13 /* Reserved for 802.2LLC project*/ +#define AF_SECURITY 14 /* Security callback pseudo AF */ +#define AF_KEY 15 /* PF_KEY key management API */ +#define AF_NETLINK 16 +#define AF_ROUTE AF_NETLINK /* Alias to emulate 4.4BSD */ +#define AF_PACKET 17 /* Packet family */ +#define AF_ASH 18 /* Ash */ +#define AF_ECONET 19 /* Acorn Econet */ +#define AF_ATMSVC 20 /* ATM SVCs */ +#define AF_RDS 21 /* RDS sockets */ +#define AF_SNA 22 /* Linux SNA Project (nutters!) */ +#define AF_IRDA 23 /* IRDA sockets */ +#define AF_PPPOX 24 /* PPPoX sockets */ +#define AF_WANPIPE 25 /* Wanpipe API Sockets */ +#define AF_LLC 26 /* Linux LLC */ +#define AF_CAN 29 /* Controller Area Network */ +#define AF_TIPC 30 /* TIPC sockets */ +#define AF_BLUETOOTH 31 /* Bluetooth sockets */ +#define AF_IUCV 32 /* IUCV sockets */ +#define AF_RXRPC 33 /* RxRPC sockets */ +#define AF_ISDN 34 /* mISDN sockets */ +#define AF_PHONET 35 /* Phonet sockets */ +#define AF_IEEE802154 36 /* IEEE802154 sockets */ +#define AF_CAIF 37 /* CAIF sockets */ +#define AF_ALG 38 /* Algorithm sockets */ +#define AF_NFC 39 /* NFC sockets */ +#define AF_MAX 40 /* For now.. */ + +/* Protocol families, same as address families. */ +#define PF_UNSPEC AF_UNSPEC +#define PF_UNIX AF_UNIX +#define PF_LOCAL AF_LOCAL +#define PF_INET AF_INET +#define PF_AX25 AF_AX25 +#define PF_IPX AF_IPX +#define PF_APPLETALK AF_APPLETALK +#define PF_NETROM AF_NETROM +#define PF_BRIDGE AF_BRIDGE +#define PF_ATMPVC AF_ATMPVC +#define PF_X25 AF_X25 +#define PF_INET6 AF_INET6 +#define PF_ROSE AF_ROSE +#define PF_DECnet AF_DECnet +#define PF_NETBEUI AF_NETBEUI +#define PF_SECURITY AF_SECURITY +#define PF_KEY AF_KEY +#define PF_NETLINK AF_NETLINK +#define PF_ROUTE AF_ROUTE +#define PF_PACKET AF_PACKET +#define PF_ASH AF_ASH +#define PF_ECONET AF_ECONET +#define PF_ATMSVC AF_ATMSVC +#define PF_RDS AF_RDS +#define PF_SNA AF_SNA +#define PF_IRDA AF_IRDA +#define PF_PPPOX AF_PPPOX +#define PF_WANPIPE AF_WANPIPE +#define PF_LLC AF_LLC +#define PF_CAN AF_CAN +#define PF_TIPC AF_TIPC +#define PF_BLUETOOTH AF_BLUETOOTH +#define PF_IUCV AF_IUCV +#define PF_RXRPC AF_RXRPC +#define PF_ISDN AF_ISDN +#define PF_PHONET AF_PHONET +#define PF_IEEE802154 AF_IEEE802154 +#define PF_CAIF AF_CAIF +#define PF_ALG AF_ALG +#define PF_NFC AF_NFC +#define PF_MAX AF_MAX + +/* Maximum queue length specifiable by listen. */ +#define SOMAXCONN 128 + +/* Flags we can use with send/ and recv. + Added those for 1003.1g not all are supported yet + */ + +#define MSG_OOB 1 +#define MSG_PEEK 2 +#define MSG_DONTROUTE 4 +#define MSG_TRYHARD 4 /* Synonym for MSG_DONTROUTE for DECnet */ +#define MSG_CTRUNC 8 +#define MSG_PROBE 0x10 /* Do not send. Only probe path f.e. for MTU */ +#define MSG_TRUNC 0x20 +#define MSG_DONTWAIT 0x40 /* Nonblocking io */ +#define MSG_EOR 0x80 /* End of record */ +#define MSG_WAITALL 0x100 /* Wait for a full request */ +#define MSG_FIN 0x200 +#define MSG_SYN 0x400 +#define MSG_CONFIRM 0x800 /* Confirm path validity */ +#define MSG_RST 0x1000 +#define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ +#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ +#define MSG_MORE 0x8000 /* Sender will send more */ +#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ +#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ +#define MSG_EOF MSG_FIN + +#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file + descriptor received through + SCM_RIGHTS */ +#if defined(CONFIG_COMPAT) +#define MSG_CMSG_COMPAT 0x80000000 /* This message needs 32 bit fixups */ +#else +#define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */ +#endif + + +/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */ +#define SOL_IP 0 +/* #define SOL_ICMP 1 No-no-no! Due to Linux :-) we cannot use SOL_ICMP=1 */ +#define SOL_TCP 6 +#define SOL_UDP 17 +#define SOL_IPV6 41 +#define SOL_ICMPV6 58 +#define SOL_SCTP 132 +#define SOL_UDPLITE 136 /* UDP-Lite (RFC 3828) */ +#define SOL_RAW 255 +#define SOL_IPX 256 +#define SOL_AX25 257 +#define SOL_ATALK 258 +#define SOL_NETROM 259 +#define SOL_ROSE 260 +#define SOL_DECNET 261 +#define SOL_X25 262 +#define SOL_PACKET 263 +#define SOL_ATM 264 /* ATM layer (cell level) */ +#define SOL_AAL 265 /* ATM Adaption Layer (packet level) */ +#define SOL_IRDA 266 +#define SOL_NETBEUI 267 +#define SOL_LLC 268 +#define SOL_DCCP 269 +#define SOL_NETLINK 270 +#define SOL_TIPC 271 +#define SOL_RXRPC 272 +#define SOL_PPPOL2TP 273 +#define SOL_BLUETOOTH 274 +#define SOL_PNPIPE 275 +#define SOL_RDS 276 +#define SOL_IUCV 277 +#define SOL_CAIF 278 +#define SOL_ALG 279 + +/* IPX options */ +#define IPX_TYPE 1 + +extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); + +extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); +extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, + int offset, int len); +extern int csum_partial_copy_fromiovecend(unsigned char *kdata, + struct iovec *iov, + int offset, + unsigned int len, __wsum *csump); + +extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); +extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); +extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, + int offset, int len); +extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); +extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); + +struct timespec; + +extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, + unsigned int flags, struct timespec *timeout); +extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, + unsigned int vlen, unsigned int flags); +#endif /* not kernel and not glibc */ +#endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/sockios.h b/include/linux/sockios.h new file mode 100644 index 00000000..f7ffe36d --- /dev/null +++ b/include/linux/sockios.h @@ -0,0 +1,149 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions of the socket-level I/O control calls. + * + * Version: @(#)sockios.h 1.0.2 03/09/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_SOCKIOS_H +#define _LINUX_SOCKIOS_H + +#include <asm/sockios.h> + +/* Linux-specific socket ioctls */ +#define SIOCINQ FIONREAD +#define SIOCOUTQ TIOCOUTQ /* output queue size (not sent + not acked) */ + +/* Routing table calls. */ +#define SIOCADDRT 0x890B /* add routing table entry */ +#define SIOCDELRT 0x890C /* delete routing table entry */ +#define SIOCRTMSG 0x890D /* call to routing system */ + +/* Socket configuration controls. */ +#define SIOCGIFNAME 0x8910 /* get iface name */ +#define SIOCSIFLINK 0x8911 /* set iface channel */ +#define SIOCGIFCONF 0x8912 /* get iface list */ +#define SIOCGIFFLAGS 0x8913 /* get flags */ +#define SIOCSIFFLAGS 0x8914 /* set flags */ +#define SIOCGIFADDR 0x8915 /* get PA address */ +#define SIOCSIFADDR 0x8916 /* set PA address */ +#define SIOCGIFDSTADDR 0x8917 /* get remote PA address */ +#define SIOCSIFDSTADDR 0x8918 /* set remote PA address */ +#define SIOCGIFBRDADDR 0x8919 /* get broadcast PA address */ +#define SIOCSIFBRDADDR 0x891a /* set broadcast PA address */ +#define SIOCGIFNETMASK 0x891b /* get network PA mask */ +#define SIOCSIFNETMASK 0x891c /* set network PA mask */ +#define SIOCGIFMETRIC 0x891d /* get metric */ +#define SIOCSIFMETRIC 0x891e /* set metric */ +#define SIOCGIFMEM 0x891f /* get memory address (BSD) */ +#define SIOCSIFMEM 0x8920 /* set memory address (BSD) */ +#define SIOCGIFMTU 0x8921 /* get MTU size */ +#define SIOCSIFMTU 0x8922 /* set MTU size */ +#define SIOCSIFNAME 0x8923 /* set interface name */ +#define SIOCSIFHWADDR 0x8924 /* set hardware address */ +#define SIOCGIFENCAP 0x8925 /* get/set encapsulations */ +#define SIOCSIFENCAP 0x8926 +#define SIOCGIFHWADDR 0x8927 /* Get hardware address */ +#define SIOCGIFSLAVE 0x8929 /* Driver slaving support */ +#define SIOCSIFSLAVE 0x8930 +#define SIOCADDMULTI 0x8931 /* Multicast address lists */ +#define SIOCDELMULTI 0x8932 +#define SIOCGIFINDEX 0x8933 /* name -> if_index mapping */ +#define SIOGIFINDEX SIOCGIFINDEX /* misprint compatibility :-) */ +#define SIOCSIFPFLAGS 0x8934 /* set/get extended flags set */ +#define SIOCGIFPFLAGS 0x8935 +#define SIOCDIFADDR 0x8936 /* delete PA address */ +#define SIOCSIFHWBROADCAST 0x8937 /* set hardware broadcast addr */ +#define SIOCGIFCOUNT 0x8938 /* get number of devices */ +#define SIOCKILLADDR 0x8939 /* kill sockets with this local addr */ + +#define SIOCGIFBR 0x8940 /* Bridging support */ +#define SIOCSIFBR 0x8941 /* Set bridging options */ + +#define SIOCGIFTXQLEN 0x8942 /* Get the tx queue length */ +#define SIOCSIFTXQLEN 0x8943 /* Set the tx queue length */ + +/* SIOCGIFDIVERT was: 0x8944 Frame diversion support */ +/* SIOCSIFDIVERT was: 0x8945 Set frame diversion options */ + +#define SIOCETHTOOL 0x8946 /* Ethtool interface */ + +#define SIOCGMIIPHY 0x8947 /* Get address of MII PHY in use. */ +#define SIOCGMIIREG 0x8948 /* Read MII PHY register. */ +#define SIOCSMIIREG 0x8949 /* Write MII PHY register. */ + +#define SIOCWANDEV 0x894A /* get/set netdev parameters */ + +#define SIOCOUTQNSD 0x894B /* output queue size (not sent only) */ + +/* ARP cache control calls. */ + /* 0x8950 - 0x8952 * obsolete calls, don't re-use */ +#define SIOCDARP 0x8953 /* delete ARP table entry */ +#define SIOCGARP 0x8954 /* get ARP table entry */ +#define SIOCSARP 0x8955 /* set ARP table entry */ + +/* RARP cache control calls. */ +#define SIOCDRARP 0x8960 /* delete RARP table entry */ +#define SIOCGRARP 0x8961 /* get RARP table entry */ +#define SIOCSRARP 0x8962 /* set RARP table entry */ + +/* Driver configuration calls */ + +#define SIOCGIFMAP 0x8970 /* Get device parameters */ +#define SIOCSIFMAP 0x8971 /* Set device parameters */ + +/* DLCI configuration calls */ + +#define SIOCADDDLCI 0x8980 /* Create new DLCI device */ +#define SIOCDELDLCI 0x8981 /* Delete DLCI device */ + +#define SIOCGIFVLAN 0x8982 /* 802.1Q VLAN support */ +#define SIOCSIFVLAN 0x8983 /* Set 802.1Q VLAN options */ + +/* bonding calls */ + +#define SIOCBONDENSLAVE 0x8990 /* enslave a device to the bond */ +#define SIOCBONDRELEASE 0x8991 /* release a slave from the bond*/ +#define SIOCBONDSETHWADDR 0x8992 /* set the hw addr of the bond */ +#define SIOCBONDSLAVEINFOQUERY 0x8993 /* rtn info about slave state */ +#define SIOCBONDINFOQUERY 0x8994 /* rtn info about bond state */ +#define SIOCBONDCHANGEACTIVE 0x8995 /* update to a new active slave */ + +/* bridge calls */ +#define SIOCBRADDBR 0x89a0 /* create new bridge device */ +#define SIOCBRDELBR 0x89a1 /* remove bridge device */ +#define SIOCBRADDIF 0x89a2 /* add interface to bridge */ +#define SIOCBRDELIF 0x89a3 /* remove interface from bridge */ + +/* hardware time stamping: parameters in linux/net_tstamp.h */ +#define SIOCSHWTSTAMP 0x89b0 + +/* Device private ioctl calls */ + +/* + * These 16 ioctls are available to devices via the do_ioctl() device + * vector. Each device should include this file and redefine these names + * as their own. Because these are device dependent it is a good idea + * _NOT_ to issue them to random objects and hope. + * + * THESE IOCTLS ARE _DEPRECATED_ AND WILL DISAPPEAR IN 2.5.X -DaveM + */ + +#define SIOCDEVPRIVATE 0x89F0 /* to 89FF */ + +/* + * These 16 ioctl calls are protocol private + */ + +#define SIOCPROTOPRIVATE 0x89E0 /* to 89EF */ +#endif /* _LINUX_SOCKIOS_H */ diff --git a/include/linux/som.h b/include/linux/som.h new file mode 100644 index 00000000..166594e4 --- /dev/null +++ b/include/linux/som.h @@ -0,0 +1,154 @@ +#ifndef _LINUX_SOM_H +#define _LINUX_SOM_H + +/* File format definition for SOM executables / shared libraries */ + +/* we need struct timespec */ +#include <linux/time.h> + +#define SOM_PAGESIZE 4096 + +/* this is the SOM header */ +struct som_hdr { + short system_id; /* magic number - system */ + short a_magic; /* magic number - file type */ + unsigned int version_id; /* versiod ID: YYMMDDHH */ + struct timespec file_time; /* system clock */ + unsigned int entry_space; /* space for entry point */ + unsigned int entry_subspace; /* subspace for entry point */ + unsigned int entry_offset; /* offset of entry point */ + unsigned int aux_header_location; /* auxiliary header location */ + unsigned int aux_header_size; /* auxiliary header size */ + unsigned int som_length; /* length of entire SOM */ + unsigned int presumed_dp; /* compiler's DP value */ + unsigned int space_location; /* space dictionary location */ + unsigned int space_total; /* number of space entries */ + unsigned int subspace_location; /* subspace entries location */ + unsigned int subspace_total; /* number of subspace entries */ + unsigned int loader_fixup_location; /* MPE/iX loader fixup */ + unsigned int loader_fixup_total; /* number of fixup records */ + unsigned int space_strings_location; /* (sub)space names */ + unsigned int space_strings_size; /* size of strings area */ + unsigned int init_array_location; /* reserved */ + unsigned int init_array_total; /* reserved */ + unsigned int compiler_location; /* module dictionary */ + unsigned int compiler_total; /* number of modules */ + unsigned int symbol_location; /* symbol dictionary */ + unsigned int symbol_total; /* number of symbols */ + unsigned int fixup_request_location; /* fixup requests */ + unsigned int fixup_request_total; /* number of fixup requests */ + unsigned int symbol_strings_location;/* module & symbol names area */ + unsigned int symbol_strings_size; /* size of strings area */ + unsigned int unloadable_sp_location; /* unloadable spaces location */ + unsigned int unloadable_sp_size; /* size of data */ + unsigned int checksum; +}; + +/* values for system_id */ + +#define SOM_SID_PARISC_1_0 0x020b +#define SOM_SID_PARISC_1_1 0x0210 +#define SOM_SID_PARISC_2_0 0x0214 + +/* values for a_magic */ + +#define SOM_LIB_EXEC 0x0104 +#define SOM_RELOCATABLE 0x0106 +#define SOM_EXEC_NONSHARE 0x0107 +#define SOM_EXEC_SHARE 0x0108 +#define SOM_EXEC_DEMAND 0x010B +#define SOM_LIB_DYN 0x010D +#define SOM_LIB_SHARE 0x010E +#define SOM_LIB_RELOC 0x0619 + +/* values for version_id. Decimal not hex, yes. Grr. */ + +#define SOM_ID_OLD 85082112 +#define SOM_ID_NEW 87102412 + +struct aux_id { + unsigned int mandatory :1; /* the linker must understand this */ + unsigned int copy :1; /* Must be copied by the linker */ + unsigned int append :1; /* Must be merged by the linker */ + unsigned int ignore :1; /* Discard section if unknown */ + unsigned int reserved :12; + unsigned int type :16; /* Header type */ + unsigned int length; /* length of _following_ data */ +}; + +/* The Exec Auxiliary Header. Called The HP-UX Header within HP apparently. */ +struct som_exec_auxhdr { + struct aux_id som_auxhdr; + int exec_tsize; /* Text size in bytes */ + int exec_tmem; /* Address to load text at */ + int exec_tfile; /* Location of text in file */ + int exec_dsize; /* Data size in bytes */ + int exec_dmem; /* Address to load data at */ + int exec_dfile; /* Location of data in file */ + int exec_bsize; /* Uninitialised data (bss) */ + int exec_entry; /* Address to start executing */ + int exec_flags; /* loader flags */ + int exec_bfill; /* initialisation value for bss */ +}; + +/* Oh, the things people do to avoid casts. Shame it'll break with gcc's + * new aliasing rules really. + */ +union name_pt { + char * n_name; + unsigned int n_strx; +}; + +/* The Space Dictionary */ +struct space_dictionary_record { + union name_pt name; /* index to subspace name */ + unsigned int is_loadable :1; /* loadable */ + unsigned int is_defined :1; /* defined within file */ + unsigned int is_private :1; /* not sharable */ + unsigned int has_intermediate_code :1; /* contains intermediate code */ + unsigned int is_tspecific :1; /* thread specific */ + unsigned int reserved :11; /* for future expansion */ + unsigned int sort_key :8; /* for linker */ + unsigned int reserved2 :8; /* for future expansion */ + + int space_number; /* index */ + int subspace_index; /* index into subspace dict */ + unsigned int subspace_quantity; /* number of subspaces */ + int loader_fix_index; /* for loader */ + unsigned int loader_fix_quantity; /* for loader */ + int init_pointer_index; /* data pointer array index */ + unsigned int init_pointer_quantity; /* number of data pointers */ +}; + +/* The Subspace Dictionary */ +struct subspace_dictionary_record { + int space_index; + unsigned int access_control_bits :7; + unsigned int memory_resident :1; + unsigned int dup_common :1; + unsigned int is_common :1; + unsigned int quadrant :2; + unsigned int initially_frozen :1; + unsigned int is_first :1; + unsigned int code_only :1; + unsigned int sort_key :8; + unsigned int replicate_init :1; + unsigned int continuation :1; + unsigned int is_tspecific :1; + unsigned int is_comdat :1; + unsigned int reserved :4; + + int file_loc_init_value; + unsigned int initialization_length; + unsigned int subspace_start; + unsigned int subspace_length; + + unsigned int reserved2 :5; + unsigned int alignment :27; + + union name_pt name; + int fixup_request_index; + unsigned int fixup_request_quantity; +}; + +#endif /* _LINUX_SOM_H */ diff --git a/include/linux/sonet.h b/include/linux/sonet.h new file mode 100644 index 00000000..de8832dd --- /dev/null +++ b/include/linux/sonet.h @@ -0,0 +1,75 @@ +/* sonet.h - SONET/SHD physical layer control */ + +/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_SONET_H +#define LINUX_SONET_H + +#define __SONET_ITEMS \ + __HANDLE_ITEM(section_bip); /* section parity errors (B1) */ \ + __HANDLE_ITEM(line_bip); /* line parity errors (B2) */ \ + __HANDLE_ITEM(path_bip); /* path parity errors (B3) */ \ + __HANDLE_ITEM(line_febe); /* line parity errors at remote */ \ + __HANDLE_ITEM(path_febe); /* path parity errors at remote */ \ + __HANDLE_ITEM(corr_hcs); /* correctable header errors */ \ + __HANDLE_ITEM(uncorr_hcs); /* uncorrectable header errors */ \ + __HANDLE_ITEM(tx_cells); /* cells sent */ \ + __HANDLE_ITEM(rx_cells); /* cells received */ + +struct sonet_stats { +#define __HANDLE_ITEM(i) int i + __SONET_ITEMS +#undef __HANDLE_ITEM +} __attribute__ ((packed)); + + +#define SONET_GETSTAT _IOR('a',ATMIOC_PHYTYP,struct sonet_stats) + /* get statistics */ +#define SONET_GETSTATZ _IOR('a',ATMIOC_PHYTYP+1,struct sonet_stats) + /* ... and zero counters */ +#define SONET_SETDIAG _IOWR('a',ATMIOC_PHYTYP+2,int) + /* set error insertion */ +#define SONET_CLRDIAG _IOWR('a',ATMIOC_PHYTYP+3,int) + /* clear error insertion */ +#define SONET_GETDIAG _IOR('a',ATMIOC_PHYTYP+4,int) + /* query error insertion */ +#define SONET_SETFRAMING _IOW('a',ATMIOC_PHYTYP+5,int) + /* set framing mode (SONET/SDH) */ +#define SONET_GETFRAMING _IOR('a',ATMIOC_PHYTYP+6,int) + /* get framing mode */ +#define SONET_GETFRSENSE _IOR('a',ATMIOC_PHYTYP+7, \ + unsigned char[SONET_FRSENSE_SIZE]) /* get framing sense information */ + +#define SONET_INS_SBIP 1 /* section BIP */ +#define SONET_INS_LBIP 2 /* line BIP */ +#define SONET_INS_PBIP 4 /* path BIP */ +#define SONET_INS_FRAME 8 /* out of frame */ +#define SONET_INS_LOS 16 /* set line to zero */ +#define SONET_INS_LAIS 32 /* line alarm indication signal */ +#define SONET_INS_PAIS 64 /* path alarm indication signal */ +#define SONET_INS_HCS 128 /* insert HCS error */ + +#define SONET_FRAME_SONET 0 /* SONET STS-3 framing */ +#define SONET_FRAME_SDH 1 /* SDH STM-1 framing */ + +#define SONET_FRSENSE_SIZE 6 /* C1[3],H1[3] (0xff for unknown) */ + + +#ifdef __KERNEL__ + +#include <linux/atomic.h> + +struct k_sonet_stats { +#define __HANDLE_ITEM(i) atomic_t i + __SONET_ITEMS +#undef __HANDLE_ITEM +}; + +extern void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to); +extern void sonet_subtract_stats(struct k_sonet_stats *from, + struct sonet_stats *to); + +#endif + +#endif diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h new file mode 100644 index 00000000..e2e036d9 --- /dev/null +++ b/include/linux/sony-laptop.h @@ -0,0 +1,34 @@ +#ifndef _SONYLAPTOP_H_ +#define _SONYLAPTOP_H_ + +#include <linux/types.h> + +#ifdef __KERNEL__ + +/* used only for communication between v4l and sony-laptop */ + +#define SONY_PIC_COMMAND_GETCAMERA 1 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERA 2 +#define SONY_PIC_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERABRIGHTNESS 4 +#define SONY_PIC_COMMAND_GETCAMERACONTRAST 5 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERACONTRAST 6 +#define SONY_PIC_COMMAND_GETCAMERAHUE 7 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERAHUE 8 +#define SONY_PIC_COMMAND_GETCAMERACOLOR 9 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERACOLOR 10 +#define SONY_PIC_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERASHARPNESS 12 +#define SONY_PIC_COMMAND_GETCAMERAPICTURE 13 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERAPICTURE 14 +#define SONY_PIC_COMMAND_GETCAMERAAGC 15 /* obsolete */ +#define SONY_PIC_COMMAND_SETCAMERAAGC 16 +#define SONY_PIC_COMMAND_GETCAMERADIRECTION 17 /* obsolete */ +#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */ +#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */ + +int sony_pic_camera_command(int command, u8 value); + +#endif /* __KERNEL__ */ + +#endif /* _SONYLAPTOP_H_ */ diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h new file mode 100644 index 00000000..c0f87da7 --- /dev/null +++ b/include/linux/sonypi.h @@ -0,0 +1,171 @@ +/* + * Sony Programmable I/O Control Device driver for VAIO + * + * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> + * + * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> + + * Copyright (C) 2001-2002 Alcôve <www.alcove.com> + * + * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> + * + * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> + * + * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> + * + * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> + * + * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef _SONYPI_H_ +#define _SONYPI_H_ + +#include <linux/types.h> + +/* events the user application reading /dev/sonypi can use */ + +#define SONYPI_EVENT_IGNORE 0 +#define SONYPI_EVENT_JOGDIAL_DOWN 1 +#define SONYPI_EVENT_JOGDIAL_UP 2 +#define SONYPI_EVENT_JOGDIAL_DOWN_PRESSED 3 +#define SONYPI_EVENT_JOGDIAL_UP_PRESSED 4 +#define SONYPI_EVENT_JOGDIAL_PRESSED 5 +#define SONYPI_EVENT_JOGDIAL_RELEASED 6 /* obsolete */ +#define SONYPI_EVENT_CAPTURE_PRESSED 7 +#define SONYPI_EVENT_CAPTURE_RELEASED 8 /* obsolete */ +#define SONYPI_EVENT_CAPTURE_PARTIALPRESSED 9 +#define SONYPI_EVENT_CAPTURE_PARTIALRELEASED 10 +#define SONYPI_EVENT_FNKEY_ESC 11 +#define SONYPI_EVENT_FNKEY_F1 12 +#define SONYPI_EVENT_FNKEY_F2 13 +#define SONYPI_EVENT_FNKEY_F3 14 +#define SONYPI_EVENT_FNKEY_F4 15 +#define SONYPI_EVENT_FNKEY_F5 16 +#define SONYPI_EVENT_FNKEY_F6 17 +#define SONYPI_EVENT_FNKEY_F7 18 +#define SONYPI_EVENT_FNKEY_F8 19 +#define SONYPI_EVENT_FNKEY_F9 20 +#define SONYPI_EVENT_FNKEY_F10 21 +#define SONYPI_EVENT_FNKEY_F11 22 +#define SONYPI_EVENT_FNKEY_F12 23 +#define SONYPI_EVENT_FNKEY_1 24 +#define SONYPI_EVENT_FNKEY_2 25 +#define SONYPI_EVENT_FNKEY_D 26 +#define SONYPI_EVENT_FNKEY_E 27 +#define SONYPI_EVENT_FNKEY_F 28 +#define SONYPI_EVENT_FNKEY_S 29 +#define SONYPI_EVENT_FNKEY_B 30 +#define SONYPI_EVENT_BLUETOOTH_PRESSED 31 +#define SONYPI_EVENT_PKEY_P1 32 +#define SONYPI_EVENT_PKEY_P2 33 +#define SONYPI_EVENT_PKEY_P3 34 +#define SONYPI_EVENT_BACK_PRESSED 35 +#define SONYPI_EVENT_LID_CLOSED 36 +#define SONYPI_EVENT_LID_OPENED 37 +#define SONYPI_EVENT_BLUETOOTH_ON 38 +#define SONYPI_EVENT_BLUETOOTH_OFF 39 +#define SONYPI_EVENT_HELP_PRESSED 40 +#define SONYPI_EVENT_FNKEY_ONLY 41 +#define SONYPI_EVENT_JOGDIAL_FAST_DOWN 42 +#define SONYPI_EVENT_JOGDIAL_FAST_UP 43 +#define SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED 44 +#define SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED 45 +#define SONYPI_EVENT_JOGDIAL_VFAST_DOWN 46 +#define SONYPI_EVENT_JOGDIAL_VFAST_UP 47 +#define SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED 48 +#define SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED 49 +#define SONYPI_EVENT_ZOOM_PRESSED 50 +#define SONYPI_EVENT_THUMBPHRASE_PRESSED 51 +#define SONYPI_EVENT_MEYE_FACE 52 +#define SONYPI_EVENT_MEYE_OPPOSITE 53 +#define SONYPI_EVENT_MEMORYSTICK_INSERT 54 +#define SONYPI_EVENT_MEMORYSTICK_EJECT 55 +#define SONYPI_EVENT_ANYBUTTON_RELEASED 56 +#define SONYPI_EVENT_BATTERY_INSERT 57 +#define SONYPI_EVENT_BATTERY_REMOVE 58 +#define SONYPI_EVENT_FNKEY_RELEASED 59 +#define SONYPI_EVENT_WIRELESS_ON 60 +#define SONYPI_EVENT_WIRELESS_OFF 61 +#define SONYPI_EVENT_ZOOM_IN_PRESSED 62 +#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 +#define SONYPI_EVENT_CD_EJECT_PRESSED 64 +#define SONYPI_EVENT_MODEKEY_PRESSED 65 +#define SONYPI_EVENT_PKEY_P4 66 +#define SONYPI_EVENT_PKEY_P5 67 +#define SONYPI_EVENT_SETTINGKEY_PRESSED 68 +#define SONYPI_EVENT_VOLUME_INC_PRESSED 69 +#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 +#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 +#define SONYPI_EVENT_MEDIA_PRESSED 72 +#define SONYPI_EVENT_VENDOR_PRESSED 73 + +/* get/set brightness */ +#define SONYPI_IOCGBRT _IOR('v', 0, __u8) +#define SONYPI_IOCSBRT _IOW('v', 0, __u8) + +/* get battery full capacity/remaining capacity */ +#define SONYPI_IOCGBAT1CAP _IOR('v', 2, __u16) +#define SONYPI_IOCGBAT1REM _IOR('v', 3, __u16) +#define SONYPI_IOCGBAT2CAP _IOR('v', 4, __u16) +#define SONYPI_IOCGBAT2REM _IOR('v', 5, __u16) + +/* get battery flags: battery1/battery2/ac adapter present */ +#define SONYPI_BFLAGS_B1 0x01 +#define SONYPI_BFLAGS_B2 0x02 +#define SONYPI_BFLAGS_AC 0x04 +#define SONYPI_IOCGBATFLAGS _IOR('v', 7, __u8) + +/* get/set bluetooth subsystem state on/off */ +#define SONYPI_IOCGBLUE _IOR('v', 8, __u8) +#define SONYPI_IOCSBLUE _IOW('v', 9, __u8) + +/* get/set fan state on/off */ +#define SONYPI_IOCGFAN _IOR('v', 10, __u8) +#define SONYPI_IOCSFAN _IOW('v', 11, __u8) + +/* get temperature (C) */ +#define SONYPI_IOCGTEMP _IOR('v', 12, __u8) + +#ifdef __KERNEL__ + +/* used only for communication between v4l and sonypi */ + +#define SONYPI_COMMAND_GETCAMERA 1 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERA 2 +#define SONYPI_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERABRIGHTNESS 4 +#define SONYPI_COMMAND_GETCAMERACONTRAST 5 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERACONTRAST 6 +#define SONYPI_COMMAND_GETCAMERAHUE 7 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERAHUE 8 +#define SONYPI_COMMAND_GETCAMERACOLOR 9 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERACOLOR 10 +#define SONYPI_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERASHARPNESS 12 +#define SONYPI_COMMAND_GETCAMERAPICTURE 13 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERAPICTURE 14 +#define SONYPI_COMMAND_GETCAMERAAGC 15 /* obsolete */ +#define SONYPI_COMMAND_SETCAMERAAGC 16 +#define SONYPI_COMMAND_GETCAMERADIRECTION 17 /* obsolete */ +#define SONYPI_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */ +#define SONYPI_COMMAND_GETCAMERAREVISION 19 /* obsolete */ + +#endif /* __KERNEL__ */ + +#endif /* _SONYPI_H_ */ diff --git a/include/linux/sort.h b/include/linux/sort.h new file mode 100644 index 00000000..d534da2b --- /dev/null +++ b/include/linux/sort.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_SORT_H +#define _LINUX_SORT_H + +#include <linux/types.h> + +void sort(void *base, size_t num, size_t size, + int (*cmp)(const void *, const void *), + void (*swap)(void *, void *, int)); + +#endif diff --git a/include/linux/sound.h b/include/linux/sound.h new file mode 100644 index 00000000..fae20ba0 --- /dev/null +++ b/include/linux/sound.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_SOUND_H +#define _LINUX_SOUND_H + +/* + * Minor numbers for the sound driver. + */ + +#include <linux/fs.h> + +#define SND_DEV_CTL 0 /* Control port /dev/mixer */ +#define SND_DEV_SEQ 1 /* Sequencer output /dev/sequencer (FM + synthesizer and MIDI output) */ +#define SND_DEV_MIDIN 2 /* Raw midi access */ +#define SND_DEV_DSP 3 /* Digitized voice /dev/dsp */ +#define SND_DEV_AUDIO 4 /* Sparc compatible /dev/audio */ +#define SND_DEV_DSP16 5 /* Like /dev/dsp but 16 bits/sample */ +/* #define SND_DEV_STATUS 6 */ /* /dev/sndstat (obsolete) */ +#define SND_DEV_UNUSED 6 +#define SND_DEV_AWFM 7 /* Reserved */ +#define SND_DEV_SEQ2 8 /* /dev/sequencer, level 2 interface */ +/* #define SND_DEV_SNDPROC 9 */ /* /dev/sndproc for programmable devices (not used) */ +/* #define SND_DEV_DMMIDI 9 */ +#define SND_DEV_SYNTH 9 /* Raw synth access /dev/synth (same as /dev/dmfm) */ +#define SND_DEV_DMFM 10 /* Raw synth access /dev/dmfm */ +#define SND_DEV_UNKNOWN11 11 +#define SND_DEV_ADSP 12 /* Like /dev/dsp (obsolete) */ +#define SND_DEV_AMIDI 13 /* Like /dev/midi (obsolete) */ +#define SND_DEV_ADMMIDI 14 /* Like /dev/dmmidi (onsolete) */ + +#ifdef __KERNEL__ +/* + * Sound core interface functions + */ + +struct device; +extern int register_sound_special(const struct file_operations *fops, int unit); +extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev); +extern int register_sound_mixer(const struct file_operations *fops, int dev); +extern int register_sound_midi(const struct file_operations *fops, int dev); +extern int register_sound_dsp(const struct file_operations *fops, int dev); + +extern void unregister_sound_special(int unit); +extern void unregister_sound_mixer(int unit); +extern void unregister_sound_midi(int unit); +extern void unregister_sound_dsp(int unit); +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SOUND_H */ diff --git a/include/linux/soundcard.h b/include/linux/soundcard.h new file mode 100644 index 00000000..dfcf86f0 --- /dev/null +++ b/include/linux/soundcard.h @@ -0,0 +1,1292 @@ +#ifndef SOUNDCARD_H +#define SOUNDCARD_H +/* + * Copyright by Hannu Savolainen 1993-1997 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. 2. + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +/* + * OSS interface version. With versions earlier than 3.6 this value is + * an integer with value less than 361. In versions 3.6 and later + * it's a six digit hexadecimal value. For example value + * of 0x030600 represents OSS version 3.6.0. + * Use ioctl(fd, OSS_GETVERSION, &int) to get the version number of + * the currently active driver. + */ +#define SOUND_VERSION 0x030802 +#define OPEN_SOUND_SYSTEM + +/* In Linux we need to be prepared for cross compiling */ +#include <linux/ioctl.h> + +/* Endian macros. */ +#ifdef __KERNEL__ +# include <asm/byteorder.h> +#else +# include <endian.h> +#endif + +/* + * Supported card ID numbers (Should be somewhere else?) + */ + +#define SNDCARD_ADLIB 1 +#define SNDCARD_SB 2 +#define SNDCARD_PAS 3 +#define SNDCARD_GUS 4 +#define SNDCARD_MPU401 5 +#define SNDCARD_SB16 6 +#define SNDCARD_SB16MIDI 7 +#define SNDCARD_UART6850 8 +#define SNDCARD_GUS16 9 +#define SNDCARD_MSS 10 +#define SNDCARD_PSS 11 +#define SNDCARD_SSCAPE 12 +#define SNDCARD_PSS_MPU 13 +#define SNDCARD_PSS_MSS 14 +#define SNDCARD_SSCAPE_MSS 15 +#define SNDCARD_TRXPRO 16 +#define SNDCARD_TRXPRO_SB 17 +#define SNDCARD_TRXPRO_MPU 18 +#define SNDCARD_MAD16 19 +#define SNDCARD_MAD16_MPU 20 +#define SNDCARD_CS4232 21 +#define SNDCARD_CS4232_MPU 22 +#define SNDCARD_MAUI 23 +#define SNDCARD_PSEUDO_MSS 24 +#define SNDCARD_GUSPNP 25 +#define SNDCARD_UART401 26 +/* Sound card numbers 27 to N are reserved. Don't add more numbers here. */ + +/*********************************** + * IOCTL Commands for /dev/sequencer + */ + +#ifndef _SIOWR +#if defined(_IOWR) && (defined(_AIX) || (!defined(sun) && !defined(sparc) && !defined(__sparc__) && !defined(__INCioctlh) && !defined(__Lynx__))) +/* Use already defined ioctl defines if they exist (except with Sun or Sparc) */ +#define SIOCPARM_MASK IOCPARM_MASK +#define SIOC_VOID IOC_VOID +#define SIOC_OUT IOC_OUT +#define SIOC_IN IOC_IN +#define SIOC_INOUT IOC_INOUT +#define _SIOC_SIZE _IOC_SIZE +#define _SIOC_DIR _IOC_DIR +#define _SIOC_NONE _IOC_NONE +#define _SIOC_READ _IOC_READ +#define _SIOC_WRITE _IOC_WRITE +#define _SIO _IO +#define _SIOR _IOR +#define _SIOW _IOW +#define _SIOWR _IOWR +#else + +/* Ioctl's have the command encoded in the lower word, + * and the size of any in or out parameters in the upper + * word. The high 2 bits of the upper word are used + * to encode the in/out status of the parameter; for now + * we restrict parameters to at most 8191 bytes. + */ +/* #define SIOCTYPE (0xff<<8) */ +#define SIOCPARM_MASK 0x1fff /* parameters must be < 8192 bytes */ +#define SIOC_VOID 0x00000000 /* no parameters */ +#define SIOC_OUT 0x20000000 /* copy out parameters */ +#define SIOC_IN 0x40000000 /* copy in parameters */ +#define SIOC_INOUT (SIOC_IN|SIOC_OUT) +/* the 0x20000000 is so we can distinguish new ioctl's from old */ +#define _SIO(x,y) ((int)(SIOC_VOID|(x<<8)|y)) +#define _SIOR(x,y,t) ((int)(SIOC_OUT|((sizeof(t)&SIOCPARM_MASK)<<16)|(x<<8)|y)) +#define _SIOW(x,y,t) ((int)(SIOC_IN|((sizeof(t)&SIOCPARM_MASK)<<16)|(x<<8)|y)) +/* this should be _SIORW, but stdio got there first */ +#define _SIOWR(x,y,t) ((int)(SIOC_INOUT|((sizeof(t)&SIOCPARM_MASK)<<16)|(x<<8)|y)) +#define _SIOC_SIZE(x) ((x>>16)&SIOCPARM_MASK) +#define _SIOC_DIR(x) (x & 0xf0000000) +#define _SIOC_NONE SIOC_VOID +#define _SIOC_READ SIOC_OUT +#define _SIOC_WRITE SIOC_IN +# endif /* _IOWR */ +#endif /* !_SIOWR */ + +#define SNDCTL_SEQ_RESET _SIO ('Q', 0) +#define SNDCTL_SEQ_SYNC _SIO ('Q', 1) +#define SNDCTL_SYNTH_INFO _SIOWR('Q', 2, struct synth_info) +#define SNDCTL_SEQ_CTRLRATE _SIOWR('Q', 3, int) /* Set/get timer resolution (HZ) */ +#define SNDCTL_SEQ_GETOUTCOUNT _SIOR ('Q', 4, int) +#define SNDCTL_SEQ_GETINCOUNT _SIOR ('Q', 5, int) +#define SNDCTL_SEQ_PERCMODE _SIOW ('Q', 6, int) +#define SNDCTL_FM_LOAD_INSTR _SIOW ('Q', 7, struct sbi_instrument) /* Obsolete. Don't use!!!!!! */ +#define SNDCTL_SEQ_TESTMIDI _SIOW ('Q', 8, int) +#define SNDCTL_SEQ_RESETSAMPLES _SIOW ('Q', 9, int) +#define SNDCTL_SEQ_NRSYNTHS _SIOR ('Q',10, int) +#define SNDCTL_SEQ_NRMIDIS _SIOR ('Q',11, int) +#define SNDCTL_MIDI_INFO _SIOWR('Q',12, struct midi_info) +#define SNDCTL_SEQ_THRESHOLD _SIOW ('Q',13, int) +#define SNDCTL_SYNTH_MEMAVL _SIOWR('Q',14, int) /* in=dev#, out=memsize */ +#define SNDCTL_FM_4OP_ENABLE _SIOW ('Q',15, int) /* in=dev# */ +#define SNDCTL_SEQ_PANIC _SIO ('Q',17) +#define SNDCTL_SEQ_OUTOFBAND _SIOW ('Q',18, struct seq_event_rec) +#define SNDCTL_SEQ_GETTIME _SIOR ('Q',19, int) +#define SNDCTL_SYNTH_ID _SIOWR('Q',20, struct synth_info) +#define SNDCTL_SYNTH_CONTROL _SIOWR('Q',21, struct synth_control) +#define SNDCTL_SYNTH_REMOVESAMPLE _SIOWR('Q',22, struct remove_sample) + +typedef struct synth_control +{ + int devno; /* Synthesizer # */ + char data[4000]; /* Device spesific command/data record */ +}synth_control; + +typedef struct remove_sample +{ + int devno; /* Synthesizer # */ + int bankno; /* MIDI bank # (0=General MIDI) */ + int instrno; /* MIDI instrument number */ +} remove_sample; + +typedef struct seq_event_rec { + unsigned char arr[8]; +} seq_event_rec; + +#define SNDCTL_TMR_TIMEBASE _SIOWR('T', 1, int) +#define SNDCTL_TMR_START _SIO ('T', 2) +#define SNDCTL_TMR_STOP _SIO ('T', 3) +#define SNDCTL_TMR_CONTINUE _SIO ('T', 4) +#define SNDCTL_TMR_TEMPO _SIOWR('T', 5, int) +#define SNDCTL_TMR_SOURCE _SIOWR('T', 6, int) +# define TMR_INTERNAL 0x00000001 +# define TMR_EXTERNAL 0x00000002 +# define TMR_MODE_MIDI 0x00000010 +# define TMR_MODE_FSK 0x00000020 +# define TMR_MODE_CLS 0x00000040 +# define TMR_MODE_SMPTE 0x00000080 +#define SNDCTL_TMR_METRONOME _SIOW ('T', 7, int) +#define SNDCTL_TMR_SELECT _SIOW ('T', 8, int) + +/* + * Some big endian/little endian handling macros + */ + +#define _LINUX_PATCHKEY_H_INDIRECT +#include <linux/patchkey.h> +#undef _LINUX_PATCHKEY_H_INDIRECT + +#if defined(__KERNEL__) +# if defined(__BIG_ENDIAN) +# define AFMT_S16_NE AFMT_S16_BE +# elif defined(__LITTLE_ENDIAN) +# define AFMT_S16_NE AFMT_S16_LE +# else +# error "could not determine byte order" +# endif +#else +# if defined(__BYTE_ORDER) +# if __BYTE_ORDER == __BIG_ENDIAN +# define AFMT_S16_NE AFMT_S16_BE +# elif __BYTE_ORDER == __LITTLE_ENDIAN +# define AFMT_S16_NE AFMT_S16_LE +# else +# error "could not determine byte order" +# endif +# endif +#endif + +/* + * Sample loading mechanism for internal synthesizers (/dev/sequencer) + * The following patch_info structure has been designed to support + * Gravis UltraSound. It tries to be universal format for uploading + * sample based patches but is probably too limited. + * + * (PBD) As Hannu guessed, the GUS structure is too limited for + * the WaveFront, but this is the right place for a constant definition. + */ + +struct patch_info { + unsigned short key; /* Use WAVE_PATCH here */ +#define WAVE_PATCH _PATCHKEY(0x04) +#define GUS_PATCH WAVE_PATCH +#define WAVEFRONT_PATCH _PATCHKEY(0x06) + + short device_no; /* Synthesizer number */ + short instr_no; /* Midi pgm# */ + + unsigned int mode; +/* + * The least significant byte has the same format than the GUS .PAT + * files + */ +#define WAVE_16_BITS 0x01 /* bit 0 = 8 or 16 bit wave data. */ +#define WAVE_UNSIGNED 0x02 /* bit 1 = Signed - Unsigned data. */ +#define WAVE_LOOPING 0x04 /* bit 2 = looping enabled-1. */ +#define WAVE_BIDIR_LOOP 0x08 /* bit 3 = Set is bidirectional looping. */ +#define WAVE_LOOP_BACK 0x10 /* bit 4 = Set is looping backward. */ +#define WAVE_SUSTAIN_ON 0x20 /* bit 5 = Turn sustaining on. (Env. pts. 3)*/ +#define WAVE_ENVELOPES 0x40 /* bit 6 = Enable envelopes - 1 */ +#define WAVE_FAST_RELEASE 0x80 /* bit 7 = Shut off immediately after note off */ + /* (use the env_rate/env_offs fields). */ +/* Linux specific bits */ +#define WAVE_VIBRATO 0x00010000 /* The vibrato info is valid */ +#define WAVE_TREMOLO 0x00020000 /* The tremolo info is valid */ +#define WAVE_SCALE 0x00040000 /* The scaling info is valid */ +#define WAVE_FRACTIONS 0x00080000 /* Fraction information is valid */ +/* Reserved bits */ +#define WAVE_ROM 0x40000000 /* For future use */ +#define WAVE_MULAW 0x20000000 /* For future use */ +/* Other bits must be zeroed */ + + int len; /* Size of the wave data in bytes */ + int loop_start, loop_end; /* Byte offsets from the beginning */ + +/* + * The base_freq and base_note fields are used when computing the + * playback speed for a note. The base_note defines the tone frequency + * which is heard if the sample is played using the base_freq as the + * playback speed. + * + * The low_note and high_note fields define the minimum and maximum note + * frequencies for which this sample is valid. It is possible to define + * more than one samples for an instrument number at the same time. The + * low_note and high_note fields are used to select the most suitable one. + * + * The fields base_note, high_note and low_note should contain + * the note frequency multiplied by 1000. For example value for the + * middle A is 440*1000. + */ + + unsigned int base_freq; + unsigned int base_note; + unsigned int high_note; + unsigned int low_note; + int panning; /* -128=left, 127=right */ + int detuning; + +/* New fields introduced in version 1.99.5 */ + + /* Envelope. Enabled by mode bit WAVE_ENVELOPES */ + unsigned char env_rate[ 6 ]; /* GUS HW ramping rate */ + unsigned char env_offset[ 6 ]; /* 255 == 100% */ + + /* + * The tremolo, vibrato and scale info are not supported yet. + * Enable by setting the mode bits WAVE_TREMOLO, WAVE_VIBRATO or + * WAVE_SCALE + */ + + unsigned char tremolo_sweep; + unsigned char tremolo_rate; + unsigned char tremolo_depth; + + unsigned char vibrato_sweep; + unsigned char vibrato_rate; + unsigned char vibrato_depth; + + int scale_frequency; + unsigned int scale_factor; /* from 0 to 2048 or 0 to 2 */ + + int volume; + int fractions; + int reserved1; + int spare[2]; + char data[1]; /* The waveform data starts here */ + }; + +struct sysex_info { + short key; /* Use SYSEX_PATCH or MAUI_PATCH here */ +#define SYSEX_PATCH _PATCHKEY(0x05) +#define MAUI_PATCH _PATCHKEY(0x06) + short device_no; /* Synthesizer number */ + int len; /* Size of the sysex data in bytes */ + unsigned char data[1]; /* Sysex data starts here */ + }; + +/* + * /dev/sequencer input events. + * + * The data written to the /dev/sequencer is a stream of events. Events + * are records of 4 or 8 bytes. The first byte defines the size. + * Any number of events can be written with a write call. There + * is a set of macros for sending these events. Use these macros if you + * want to maximize portability of your program. + * + * Events SEQ_WAIT, SEQ_MIDIPUTC and SEQ_ECHO. Are also input events. + * (All input events are currently 4 bytes long. Be prepared to support + * 8 byte events also. If you receive any event having first byte >= 128, + * it's a 8 byte event. + * + * The events are documented at the end of this file. + * + * Normal events (4 bytes) + * There is also a 8 byte version of most of the 4 byte events. The + * 8 byte one is recommended. + */ +#define SEQ_NOTEOFF 0 +#define SEQ_FMNOTEOFF SEQ_NOTEOFF /* Just old name */ +#define SEQ_NOTEON 1 +#define SEQ_FMNOTEON SEQ_NOTEON +#define SEQ_WAIT TMR_WAIT_ABS +#define SEQ_PGMCHANGE 3 +#define SEQ_FMPGMCHANGE SEQ_PGMCHANGE +#define SEQ_SYNCTIMER TMR_START +#define SEQ_MIDIPUTC 5 +#define SEQ_DRUMON 6 /*** OBSOLETE ***/ +#define SEQ_DRUMOFF 7 /*** OBSOLETE ***/ +#define SEQ_ECHO TMR_ECHO /* For synching programs with output */ +#define SEQ_AFTERTOUCH 9 +#define SEQ_CONTROLLER 10 + +/******************************************* + * Midi controller numbers + ******************************************* + * Controllers 0 to 31 (0x00 to 0x1f) and + * 32 to 63 (0x20 to 0x3f) are continuous + * controllers. + * In the MIDI 1.0 these controllers are sent using + * two messages. Controller numbers 0 to 31 are used + * to send the MSB and the controller numbers 32 to 63 + * are for the LSB. Note that just 7 bits are used in MIDI bytes. + */ + +#define CTL_BANK_SELECT 0x00 +#define CTL_MODWHEEL 0x01 +#define CTL_BREATH 0x02 +/* undefined 0x03 */ +#define CTL_FOOT 0x04 +#define CTL_PORTAMENTO_TIME 0x05 +#define CTL_DATA_ENTRY 0x06 +#define CTL_MAIN_VOLUME 0x07 +#define CTL_BALANCE 0x08 +/* undefined 0x09 */ +#define CTL_PAN 0x0a +#define CTL_EXPRESSION 0x0b +/* undefined 0x0c */ +/* undefined 0x0d */ +/* undefined 0x0e */ +/* undefined 0x0f */ +#define CTL_GENERAL_PURPOSE1 0x10 +#define CTL_GENERAL_PURPOSE2 0x11 +#define CTL_GENERAL_PURPOSE3 0x12 +#define CTL_GENERAL_PURPOSE4 0x13 +/* undefined 0x14 - 0x1f */ + +/* undefined 0x20 */ +/* The controller numbers 0x21 to 0x3f are reserved for the */ +/* least significant bytes of the controllers 0x00 to 0x1f. */ +/* These controllers are not recognised by the driver. */ + +/* Controllers 64 to 69 (0x40 to 0x45) are on/off switches. */ +/* 0=OFF and 127=ON (intermediate values are possible) */ +#define CTL_DAMPER_PEDAL 0x40 +#define CTL_SUSTAIN 0x40 /* Alias */ +#define CTL_HOLD 0x40 /* Alias */ +#define CTL_PORTAMENTO 0x41 +#define CTL_SOSTENUTO 0x42 +#define CTL_SOFT_PEDAL 0x43 +/* undefined 0x44 */ +#define CTL_HOLD2 0x45 +/* undefined 0x46 - 0x4f */ + +#define CTL_GENERAL_PURPOSE5 0x50 +#define CTL_GENERAL_PURPOSE6 0x51 +#define CTL_GENERAL_PURPOSE7 0x52 +#define CTL_GENERAL_PURPOSE8 0x53 +/* undefined 0x54 - 0x5a */ +#define CTL_EXT_EFF_DEPTH 0x5b +#define CTL_TREMOLO_DEPTH 0x5c +#define CTL_CHORUS_DEPTH 0x5d +#define CTL_DETUNE_DEPTH 0x5e +#define CTL_CELESTE_DEPTH 0x5e /* Alias for the above one */ +#define CTL_PHASER_DEPTH 0x5f +#define CTL_DATA_INCREMENT 0x60 +#define CTL_DATA_DECREMENT 0x61 +#define CTL_NONREG_PARM_NUM_LSB 0x62 +#define CTL_NONREG_PARM_NUM_MSB 0x63 +#define CTL_REGIST_PARM_NUM_LSB 0x64 +#define CTL_REGIST_PARM_NUM_MSB 0x65 +/* undefined 0x66 - 0x78 */ +/* reserved 0x79 - 0x7f */ + +/* Pseudo controllers (not midi compatible) */ +#define CTRL_PITCH_BENDER 255 +#define CTRL_PITCH_BENDER_RANGE 254 +#define CTRL_EXPRESSION 253 /* Obsolete */ +#define CTRL_MAIN_VOLUME 252 /* Obsolete */ +#define SEQ_BALANCE 11 +#define SEQ_VOLMODE 12 + +/* + * Volume mode decides how volumes are used + */ + +#define VOL_METHOD_ADAGIO 1 +#define VOL_METHOD_LINEAR 2 + +/* + * Note! SEQ_WAIT, SEQ_MIDIPUTC and SEQ_ECHO are used also as + * input events. + */ + +/* + * Event codes 0xf0 to 0xfc are reserved for future extensions. + */ + +#define SEQ_FULLSIZE 0xfd /* Long events */ +/* + * SEQ_FULLSIZE events are used for loading patches/samples to the + * synthesizer devices. These events are passed directly to the driver + * of the associated synthesizer device. There is no limit to the size + * of the extended events. These events are not queued but executed + * immediately when the write() is called (execution can take several + * seconds of time). + * + * When a SEQ_FULLSIZE message is written to the device, it must + * be written using exactly one write() call. Other events cannot + * be mixed to the same write. + * + * For FM synths (YM3812/OPL3) use struct sbi_instrument and write it to the + * /dev/sequencer. Don't write other data together with the instrument structure + * Set the key field of the structure to FM_PATCH. The device field is used to + * route the patch to the corresponding device. + * + * For wave table use struct patch_info. Initialize the key field + * to WAVE_PATCH. + */ +#define SEQ_PRIVATE 0xfe /* Low level HW dependent events (8 bytes) */ +#define SEQ_EXTENDED 0xff /* Extended events (8 bytes) OBSOLETE */ + +/* + * Record for FM patches + */ + +typedef unsigned char sbi_instr_data[32]; + +struct sbi_instrument { + unsigned short key; /* FM_PATCH or OPL3_PATCH */ +#define FM_PATCH _PATCHKEY(0x01) +#define OPL3_PATCH _PATCHKEY(0x03) + short device; /* Synth# (0-4) */ + int channel; /* Program# to be initialized */ + sbi_instr_data operators; /* Register settings for operator cells (.SBI format) */ + }; + +struct synth_info { /* Read only */ + char name[30]; + int device; /* 0-N. INITIALIZE BEFORE CALLING */ + int synth_type; +#define SYNTH_TYPE_FM 0 +#define SYNTH_TYPE_SAMPLE 1 +#define SYNTH_TYPE_MIDI 2 /* Midi interface */ + + int synth_subtype; +#define FM_TYPE_ADLIB 0x00 +#define FM_TYPE_OPL3 0x01 +#define MIDI_TYPE_MPU401 0x401 + +#define SAMPLE_TYPE_BASIC 0x10 +#define SAMPLE_TYPE_GUS SAMPLE_TYPE_BASIC +#define SAMPLE_TYPE_WAVEFRONT 0x11 + + int perc_mode; /* No longer supported */ + int nr_voices; + int nr_drums; /* Obsolete field */ + int instr_bank_size; + unsigned int capabilities; +#define SYNTH_CAP_PERCMODE 0x00000001 /* No longer used */ +#define SYNTH_CAP_OPL3 0x00000002 /* Set if OPL3 supported */ +#define SYNTH_CAP_INPUT 0x00000004 /* Input (MIDI) device */ + int dummies[19]; /* Reserve space */ + }; + +struct sound_timer_info { + char name[32]; + int caps; + }; + +#define MIDI_CAP_MPU401 1 /* MPU-401 intelligent mode */ + +struct midi_info { + char name[30]; + int device; /* 0-N. INITIALIZE BEFORE CALLING */ + unsigned int capabilities; /* To be defined later */ + int dev_type; + int dummies[18]; /* Reserve space */ + }; + +/******************************************** + * ioctl commands for the /dev/midi## + */ +typedef struct { + unsigned char cmd; + char nr_args, nr_returns; + unsigned char data[30]; + } mpu_command_rec; + +#define SNDCTL_MIDI_PRETIME _SIOWR('m', 0, int) +#define SNDCTL_MIDI_MPUMODE _SIOWR('m', 1, int) +#define SNDCTL_MIDI_MPUCMD _SIOWR('m', 2, mpu_command_rec) + +/******************************************** + * IOCTL commands for /dev/dsp and /dev/audio + */ + +#define SNDCTL_DSP_RESET _SIO ('P', 0) +#define SNDCTL_DSP_SYNC _SIO ('P', 1) +#define SNDCTL_DSP_SPEED _SIOWR('P', 2, int) +#define SNDCTL_DSP_STEREO _SIOWR('P', 3, int) +#define SNDCTL_DSP_GETBLKSIZE _SIOWR('P', 4, int) +#define SNDCTL_DSP_SAMPLESIZE SNDCTL_DSP_SETFMT +#define SNDCTL_DSP_CHANNELS _SIOWR('P', 6, int) +#define SOUND_PCM_WRITE_CHANNELS SNDCTL_DSP_CHANNELS +#define SOUND_PCM_WRITE_FILTER _SIOWR('P', 7, int) +#define SNDCTL_DSP_POST _SIO ('P', 8) +#define SNDCTL_DSP_SUBDIVIDE _SIOWR('P', 9, int) +#define SNDCTL_DSP_SETFRAGMENT _SIOWR('P',10, int) + +/* Audio data formats (Note! U8=8 and S16_LE=16 for compatibility) */ +#define SNDCTL_DSP_GETFMTS _SIOR ('P',11, int) /* Returns a mask */ +#define SNDCTL_DSP_SETFMT _SIOWR('P',5, int) /* Selects ONE fmt*/ +# define AFMT_QUERY 0x00000000 /* Return current fmt */ +# define AFMT_MU_LAW 0x00000001 +# define AFMT_A_LAW 0x00000002 +# define AFMT_IMA_ADPCM 0x00000004 +# define AFMT_U8 0x00000008 +# define AFMT_S16_LE 0x00000010 /* Little endian signed 16*/ +# define AFMT_S16_BE 0x00000020 /* Big endian signed 16 */ +# define AFMT_S8 0x00000040 +# define AFMT_U16_LE 0x00000080 /* Little endian U16 */ +# define AFMT_U16_BE 0x00000100 /* Big endian U16 */ +# define AFMT_MPEG 0x00000200 /* MPEG (2) audio */ +# define AFMT_AC3 0x00000400 /* Dolby Digital AC3 */ + +/* + * Buffer status queries. + */ +typedef struct audio_buf_info { + int fragments; /* # of available fragments (partially usend ones not counted) */ + int fragstotal; /* Total # of fragments allocated */ + int fragsize; /* Size of a fragment in bytes */ + + int bytes; /* Available space in bytes (includes partially used fragments) */ + /* Note! 'bytes' could be more than fragments*fragsize */ + } audio_buf_info; + +#define SNDCTL_DSP_GETOSPACE _SIOR ('P',12, audio_buf_info) +#define SNDCTL_DSP_GETISPACE _SIOR ('P',13, audio_buf_info) +#define SNDCTL_DSP_NONBLOCK _SIO ('P',14) +#define SNDCTL_DSP_GETCAPS _SIOR ('P',15, int) +# define DSP_CAP_REVISION 0x000000ff /* Bits for revision level (0 to 255) */ +# define DSP_CAP_DUPLEX 0x00000100 /* Full duplex record/playback */ +# define DSP_CAP_REALTIME 0x00000200 /* Real time capability */ +# define DSP_CAP_BATCH 0x00000400 /* Device has some kind of */ + /* internal buffers which may */ + /* cause some delays and */ + /* decrease precision of timing */ +# define DSP_CAP_COPROC 0x00000800 /* Has a coprocessor */ + /* Sometimes it's a DSP */ + /* but usually not */ +# define DSP_CAP_TRIGGER 0x00001000 /* Supports SETTRIGGER */ +# define DSP_CAP_MMAP 0x00002000 /* Supports mmap() */ +# define DSP_CAP_MULTI 0x00004000 /* support multiple open */ +# define DSP_CAP_BIND 0x00008000 /* channel binding to front/rear/cneter/lfe */ + + +#define SNDCTL_DSP_GETTRIGGER _SIOR ('P',16, int) +#define SNDCTL_DSP_SETTRIGGER _SIOW ('P',16, int) +# define PCM_ENABLE_INPUT 0x00000001 +# define PCM_ENABLE_OUTPUT 0x00000002 + +typedef struct count_info { + int bytes; /* Total # of bytes processed */ + int blocks; /* # of fragment transitions since last time */ + int ptr; /* Current DMA pointer value */ + } count_info; + +#define SNDCTL_DSP_GETIPTR _SIOR ('P',17, count_info) +#define SNDCTL_DSP_GETOPTR _SIOR ('P',18, count_info) + +typedef struct buffmem_desc { + unsigned *buffer; + int size; + } buffmem_desc; +#define SNDCTL_DSP_MAPINBUF _SIOR ('P', 19, buffmem_desc) +#define SNDCTL_DSP_MAPOUTBUF _SIOR ('P', 20, buffmem_desc) +#define SNDCTL_DSP_SETSYNCRO _SIO ('P', 21) +#define SNDCTL_DSP_SETDUPLEX _SIO ('P', 22) +#define SNDCTL_DSP_GETODELAY _SIOR ('P', 23, int) + +#define SNDCTL_DSP_GETCHANNELMASK _SIOWR('P', 64, int) +#define SNDCTL_DSP_BIND_CHANNEL _SIOWR('P', 65, int) +# define DSP_BIND_QUERY 0x00000000 +# define DSP_BIND_FRONT 0x00000001 +# define DSP_BIND_SURR 0x00000002 +# define DSP_BIND_CENTER_LFE 0x00000004 +# define DSP_BIND_HANDSET 0x00000008 +# define DSP_BIND_MIC 0x00000010 +# define DSP_BIND_MODEM1 0x00000020 +# define DSP_BIND_MODEM2 0x00000040 +# define DSP_BIND_I2S 0x00000080 +# define DSP_BIND_SPDIF 0x00000100 + +#define SNDCTL_DSP_SETSPDIF _SIOW ('P', 66, int) +#define SNDCTL_DSP_GETSPDIF _SIOR ('P', 67, int) +# define SPDIF_PRO 0x0001 +# define SPDIF_N_AUD 0x0002 +# define SPDIF_COPY 0x0004 +# define SPDIF_PRE 0x0008 +# define SPDIF_CC 0x07f0 +# define SPDIF_L 0x0800 +# define SPDIF_DRS 0x4000 +# define SPDIF_V 0x8000 + +/* + * Application's profile defines the way how playback underrun situations should be handled. + * + * APF_NORMAL (the default) and APF_NETWORK make the driver to cleanup the + * playback buffer whenever an underrun occurs. This consumes some time + * prevents looping the existing buffer. + * APF_CPUINTENS is intended to be set by CPU intensive applications which + * are likely to run out of time occasionally. In this mode the buffer cleanup is + * disabled which saves CPU time but also let's the previous buffer content to + * be played during the "pause" after the underrun. + */ +#define SNDCTL_DSP_PROFILE _SIOW ('P', 23, int) +#define APF_NORMAL 0 /* Normal applications */ +#define APF_NETWORK 1 /* Underruns probably caused by an "external" delay */ +#define APF_CPUINTENS 2 /* Underruns probably caused by "overheating" the CPU */ + +#define SOUND_PCM_READ_RATE _SIOR ('P', 2, int) +#define SOUND_PCM_READ_CHANNELS _SIOR ('P', 6, int) +#define SOUND_PCM_READ_BITS _SIOR ('P', 5, int) +#define SOUND_PCM_READ_FILTER _SIOR ('P', 7, int) + +/* Some alias names */ +#define SOUND_PCM_WRITE_BITS SNDCTL_DSP_SETFMT +#define SOUND_PCM_WRITE_RATE SNDCTL_DSP_SPEED +#define SOUND_PCM_POST SNDCTL_DSP_POST +#define SOUND_PCM_RESET SNDCTL_DSP_RESET +#define SOUND_PCM_SYNC SNDCTL_DSP_SYNC +#define SOUND_PCM_SUBDIVIDE SNDCTL_DSP_SUBDIVIDE +#define SOUND_PCM_SETFRAGMENT SNDCTL_DSP_SETFRAGMENT +#define SOUND_PCM_GETFMTS SNDCTL_DSP_GETFMTS +#define SOUND_PCM_SETFMT SNDCTL_DSP_SETFMT +#define SOUND_PCM_GETOSPACE SNDCTL_DSP_GETOSPACE +#define SOUND_PCM_GETISPACE SNDCTL_DSP_GETISPACE +#define SOUND_PCM_NONBLOCK SNDCTL_DSP_NONBLOCK +#define SOUND_PCM_GETCAPS SNDCTL_DSP_GETCAPS +#define SOUND_PCM_GETTRIGGER SNDCTL_DSP_GETTRIGGER +#define SOUND_PCM_SETTRIGGER SNDCTL_DSP_SETTRIGGER +#define SOUND_PCM_SETSYNCRO SNDCTL_DSP_SETSYNCRO +#define SOUND_PCM_GETIPTR SNDCTL_DSP_GETIPTR +#define SOUND_PCM_GETOPTR SNDCTL_DSP_GETOPTR +#define SOUND_PCM_MAPINBUF SNDCTL_DSP_MAPINBUF +#define SOUND_PCM_MAPOUTBUF SNDCTL_DSP_MAPOUTBUF + +/* + * ioctl calls to be used in communication with coprocessors and + * DSP chips. + */ + +typedef struct copr_buffer { + int command; /* Set to 0 if not used */ + int flags; +#define CPF_NONE 0x0000 +#define CPF_FIRST 0x0001 /* First block */ +#define CPF_LAST 0x0002 /* Last block */ + int len; + int offs; /* If required by the device (0 if not used) */ + + unsigned char data[4000]; /* NOTE! 4000 is not 4k */ + } copr_buffer; + +typedef struct copr_debug_buf { + int command; /* Used internally. Set to 0 */ + int parm1; + int parm2; + int flags; + int len; /* Length of data in bytes */ + } copr_debug_buf; + +typedef struct copr_msg { + int len; + unsigned char data[4000]; + } copr_msg; + +#define SNDCTL_COPR_RESET _SIO ('C', 0) +#define SNDCTL_COPR_LOAD _SIOWR('C', 1, copr_buffer) +#define SNDCTL_COPR_RDATA _SIOWR('C', 2, copr_debug_buf) +#define SNDCTL_COPR_RCODE _SIOWR('C', 3, copr_debug_buf) +#define SNDCTL_COPR_WDATA _SIOW ('C', 4, copr_debug_buf) +#define SNDCTL_COPR_WCODE _SIOW ('C', 5, copr_debug_buf) +#define SNDCTL_COPR_RUN _SIOWR('C', 6, copr_debug_buf) +#define SNDCTL_COPR_HALT _SIOWR('C', 7, copr_debug_buf) +#define SNDCTL_COPR_SENDMSG _SIOWR('C', 8, copr_msg) +#define SNDCTL_COPR_RCVMSG _SIOR ('C', 9, copr_msg) + +/********************************************* + * IOCTL commands for /dev/mixer + */ + +/* + * Mixer devices + * + * There can be up to 20 different analog mixer channels. The + * SOUND_MIXER_NRDEVICES gives the currently supported maximum. + * The SOUND_MIXER_READ_DEVMASK returns a bitmask which tells + * the devices supported by the particular mixer. + */ + +#define SOUND_MIXER_NRDEVICES 25 +#define SOUND_MIXER_VOLUME 0 +#define SOUND_MIXER_BASS 1 +#define SOUND_MIXER_TREBLE 2 +#define SOUND_MIXER_SYNTH 3 +#define SOUND_MIXER_PCM 4 +#define SOUND_MIXER_SPEAKER 5 +#define SOUND_MIXER_LINE 6 +#define SOUND_MIXER_MIC 7 +#define SOUND_MIXER_CD 8 +#define SOUND_MIXER_IMIX 9 /* Recording monitor */ +#define SOUND_MIXER_ALTPCM 10 +#define SOUND_MIXER_RECLEV 11 /* Recording level */ +#define SOUND_MIXER_IGAIN 12 /* Input gain */ +#define SOUND_MIXER_OGAIN 13 /* Output gain */ +/* + * The AD1848 codec and compatibles have three line level inputs + * (line, aux1 and aux2). Since each card manufacturer have assigned + * different meanings to these inputs, it's inpractical to assign + * specific meanings (line, cd, synth etc.) to them. + */ +#define SOUND_MIXER_LINE1 14 /* Input source 1 (aux1) */ +#define SOUND_MIXER_LINE2 15 /* Input source 2 (aux2) */ +#define SOUND_MIXER_LINE3 16 /* Input source 3 (line) */ +#define SOUND_MIXER_DIGITAL1 17 /* Digital (input) 1 */ +#define SOUND_MIXER_DIGITAL2 18 /* Digital (input) 2 */ +#define SOUND_MIXER_DIGITAL3 19 /* Digital (input) 3 */ +#define SOUND_MIXER_PHONEIN 20 /* Phone input */ +#define SOUND_MIXER_PHONEOUT 21 /* Phone output */ +#define SOUND_MIXER_VIDEO 22 /* Video/TV (audio) in */ +#define SOUND_MIXER_RADIO 23 /* Radio in */ +#define SOUND_MIXER_MONITOR 24 /* Monitor (usually mic) volume */ + +/* Some on/off settings (SOUND_SPECIAL_MIN - SOUND_SPECIAL_MAX) */ +/* Not counted to SOUND_MIXER_NRDEVICES, but use the same number space */ +#define SOUND_ONOFF_MIN 28 +#define SOUND_ONOFF_MAX 30 + +/* Note! Number 31 cannot be used since the sign bit is reserved */ +#define SOUND_MIXER_NONE 31 + +/* + * The following unsupported macros are no longer functional. + * Use SOUND_MIXER_PRIVATE# macros in future. + */ +#define SOUND_MIXER_ENHANCE SOUND_MIXER_NONE +#define SOUND_MIXER_MUTE SOUND_MIXER_NONE +#define SOUND_MIXER_LOUD SOUND_MIXER_NONE + + +#define SOUND_DEVICE_LABELS {"Vol ", "Bass ", "Trebl", "Synth", "Pcm ", "Spkr ", "Line ", \ + "Mic ", "CD ", "Mix ", "Pcm2 ", "Rec ", "IGain", "OGain", \ + "Line1", "Line2", "Line3", "Digital1", "Digital2", "Digital3", \ + "PhoneIn", "PhoneOut", "Video", "Radio", "Monitor"} + +#define SOUND_DEVICE_NAMES {"vol", "bass", "treble", "synth", "pcm", "speaker", "line", \ + "mic", "cd", "mix", "pcm2", "rec", "igain", "ogain", \ + "line1", "line2", "line3", "dig1", "dig2", "dig3", \ + "phin", "phout", "video", "radio", "monitor"} + +/* Device bitmask identifiers */ + +#define SOUND_MIXER_RECSRC 0xff /* Arg contains a bit for each recording source */ +#define SOUND_MIXER_DEVMASK 0xfe /* Arg contains a bit for each supported device */ +#define SOUND_MIXER_RECMASK 0xfd /* Arg contains a bit for each supported recording source */ +#define SOUND_MIXER_CAPS 0xfc +# define SOUND_CAP_EXCL_INPUT 0x00000001 /* Only one recording source at a time */ +#define SOUND_MIXER_STEREODEVS 0xfb /* Mixer channels supporting stereo */ +#define SOUND_MIXER_OUTSRC 0xfa /* Arg contains a bit for each input source to output */ +#define SOUND_MIXER_OUTMASK 0xf9 /* Arg contains a bit for each supported input source to output */ + +/* Device mask bits */ + +#define SOUND_MASK_VOLUME (1 << SOUND_MIXER_VOLUME) +#define SOUND_MASK_BASS (1 << SOUND_MIXER_BASS) +#define SOUND_MASK_TREBLE (1 << SOUND_MIXER_TREBLE) +#define SOUND_MASK_SYNTH (1 << SOUND_MIXER_SYNTH) +#define SOUND_MASK_PCM (1 << SOUND_MIXER_PCM) +#define SOUND_MASK_SPEAKER (1 << SOUND_MIXER_SPEAKER) +#define SOUND_MASK_LINE (1 << SOUND_MIXER_LINE) +#define SOUND_MASK_MIC (1 << SOUND_MIXER_MIC) +#define SOUND_MASK_CD (1 << SOUND_MIXER_CD) +#define SOUND_MASK_IMIX (1 << SOUND_MIXER_IMIX) +#define SOUND_MASK_ALTPCM (1 << SOUND_MIXER_ALTPCM) +#define SOUND_MASK_RECLEV (1 << SOUND_MIXER_RECLEV) +#define SOUND_MASK_IGAIN (1 << SOUND_MIXER_IGAIN) +#define SOUND_MASK_OGAIN (1 << SOUND_MIXER_OGAIN) +#define SOUND_MASK_LINE1 (1 << SOUND_MIXER_LINE1) +#define SOUND_MASK_LINE2 (1 << SOUND_MIXER_LINE2) +#define SOUND_MASK_LINE3 (1 << SOUND_MIXER_LINE3) +#define SOUND_MASK_DIGITAL1 (1 << SOUND_MIXER_DIGITAL1) +#define SOUND_MASK_DIGITAL2 (1 << SOUND_MIXER_DIGITAL2) +#define SOUND_MASK_DIGITAL3 (1 << SOUND_MIXER_DIGITAL3) +#define SOUND_MASK_PHONEIN (1 << SOUND_MIXER_PHONEIN) +#define SOUND_MASK_PHONEOUT (1 << SOUND_MIXER_PHONEOUT) +#define SOUND_MASK_RADIO (1 << SOUND_MIXER_RADIO) +#define SOUND_MASK_VIDEO (1 << SOUND_MIXER_VIDEO) +#define SOUND_MASK_MONITOR (1 << SOUND_MIXER_MONITOR) + +/* Obsolete macros */ +#define SOUND_MASK_MUTE (1 << SOUND_MIXER_MUTE) +#define SOUND_MASK_ENHANCE (1 << SOUND_MIXER_ENHANCE) +#define SOUND_MASK_LOUD (1 << SOUND_MIXER_LOUD) + +#define MIXER_READ(dev) _SIOR('M', dev, int) +#define SOUND_MIXER_READ_VOLUME MIXER_READ(SOUND_MIXER_VOLUME) +#define SOUND_MIXER_READ_BASS MIXER_READ(SOUND_MIXER_BASS) +#define SOUND_MIXER_READ_TREBLE MIXER_READ(SOUND_MIXER_TREBLE) +#define SOUND_MIXER_READ_SYNTH MIXER_READ(SOUND_MIXER_SYNTH) +#define SOUND_MIXER_READ_PCM MIXER_READ(SOUND_MIXER_PCM) +#define SOUND_MIXER_READ_SPEAKER MIXER_READ(SOUND_MIXER_SPEAKER) +#define SOUND_MIXER_READ_LINE MIXER_READ(SOUND_MIXER_LINE) +#define SOUND_MIXER_READ_MIC MIXER_READ(SOUND_MIXER_MIC) +#define SOUND_MIXER_READ_CD MIXER_READ(SOUND_MIXER_CD) +#define SOUND_MIXER_READ_IMIX MIXER_READ(SOUND_MIXER_IMIX) +#define SOUND_MIXER_READ_ALTPCM MIXER_READ(SOUND_MIXER_ALTPCM) +#define SOUND_MIXER_READ_RECLEV MIXER_READ(SOUND_MIXER_RECLEV) +#define SOUND_MIXER_READ_IGAIN MIXER_READ(SOUND_MIXER_IGAIN) +#define SOUND_MIXER_READ_OGAIN MIXER_READ(SOUND_MIXER_OGAIN) +#define SOUND_MIXER_READ_LINE1 MIXER_READ(SOUND_MIXER_LINE1) +#define SOUND_MIXER_READ_LINE2 MIXER_READ(SOUND_MIXER_LINE2) +#define SOUND_MIXER_READ_LINE3 MIXER_READ(SOUND_MIXER_LINE3) + +/* Obsolete macros */ +#define SOUND_MIXER_READ_MUTE MIXER_READ(SOUND_MIXER_MUTE) +#define SOUND_MIXER_READ_ENHANCE MIXER_READ(SOUND_MIXER_ENHANCE) +#define SOUND_MIXER_READ_LOUD MIXER_READ(SOUND_MIXER_LOUD) + +#define SOUND_MIXER_READ_RECSRC MIXER_READ(SOUND_MIXER_RECSRC) +#define SOUND_MIXER_READ_DEVMASK MIXER_READ(SOUND_MIXER_DEVMASK) +#define SOUND_MIXER_READ_RECMASK MIXER_READ(SOUND_MIXER_RECMASK) +#define SOUND_MIXER_READ_STEREODEVS MIXER_READ(SOUND_MIXER_STEREODEVS) +#define SOUND_MIXER_READ_CAPS MIXER_READ(SOUND_MIXER_CAPS) + +#define MIXER_WRITE(dev) _SIOWR('M', dev, int) +#define SOUND_MIXER_WRITE_VOLUME MIXER_WRITE(SOUND_MIXER_VOLUME) +#define SOUND_MIXER_WRITE_BASS MIXER_WRITE(SOUND_MIXER_BASS) +#define SOUND_MIXER_WRITE_TREBLE MIXER_WRITE(SOUND_MIXER_TREBLE) +#define SOUND_MIXER_WRITE_SYNTH MIXER_WRITE(SOUND_MIXER_SYNTH) +#define SOUND_MIXER_WRITE_PCM MIXER_WRITE(SOUND_MIXER_PCM) +#define SOUND_MIXER_WRITE_SPEAKER MIXER_WRITE(SOUND_MIXER_SPEAKER) +#define SOUND_MIXER_WRITE_LINE MIXER_WRITE(SOUND_MIXER_LINE) +#define SOUND_MIXER_WRITE_MIC MIXER_WRITE(SOUND_MIXER_MIC) +#define SOUND_MIXER_WRITE_CD MIXER_WRITE(SOUND_MIXER_CD) +#define SOUND_MIXER_WRITE_IMIX MIXER_WRITE(SOUND_MIXER_IMIX) +#define SOUND_MIXER_WRITE_ALTPCM MIXER_WRITE(SOUND_MIXER_ALTPCM) +#define SOUND_MIXER_WRITE_RECLEV MIXER_WRITE(SOUND_MIXER_RECLEV) +#define SOUND_MIXER_WRITE_IGAIN MIXER_WRITE(SOUND_MIXER_IGAIN) +#define SOUND_MIXER_WRITE_OGAIN MIXER_WRITE(SOUND_MIXER_OGAIN) +#define SOUND_MIXER_WRITE_LINE1 MIXER_WRITE(SOUND_MIXER_LINE1) +#define SOUND_MIXER_WRITE_LINE2 MIXER_WRITE(SOUND_MIXER_LINE2) +#define SOUND_MIXER_WRITE_LINE3 MIXER_WRITE(SOUND_MIXER_LINE3) + +/* Obsolete macros */ +#define SOUND_MIXER_WRITE_MUTE MIXER_WRITE(SOUND_MIXER_MUTE) +#define SOUND_MIXER_WRITE_ENHANCE MIXER_WRITE(SOUND_MIXER_ENHANCE) +#define SOUND_MIXER_WRITE_LOUD MIXER_WRITE(SOUND_MIXER_LOUD) + +#define SOUND_MIXER_WRITE_RECSRC MIXER_WRITE(SOUND_MIXER_RECSRC) + +typedef struct mixer_info +{ + char id[16]; + char name[32]; + int modify_counter; + int fillers[10]; +} mixer_info; + +typedef struct _old_mixer_info /* Obsolete */ +{ + char id[16]; + char name[32]; +} _old_mixer_info; + +#define SOUND_MIXER_INFO _SIOR ('M', 101, mixer_info) +#define SOUND_OLD_MIXER_INFO _SIOR ('M', 101, _old_mixer_info) + +/* + * A mechanism for accessing "proprietary" mixer features. This method + * permits passing 128 bytes of arbitrary data between a mixer application + * and the mixer driver. Interpretation of the record is defined by + * the particular mixer driver. + */ +typedef unsigned char mixer_record[128]; + +#define SOUND_MIXER_ACCESS _SIOWR('M', 102, mixer_record) + +/* + * Two ioctls for special souncard function + */ +#define SOUND_MIXER_AGC _SIOWR('M', 103, int) +#define SOUND_MIXER_3DSE _SIOWR('M', 104, int) + +/* + * The SOUND_MIXER_PRIVATE# commands can be redefined by low level drivers. + * These features can be used when accessing device specific features. + */ +#define SOUND_MIXER_PRIVATE1 _SIOWR('M', 111, int) +#define SOUND_MIXER_PRIVATE2 _SIOWR('M', 112, int) +#define SOUND_MIXER_PRIVATE3 _SIOWR('M', 113, int) +#define SOUND_MIXER_PRIVATE4 _SIOWR('M', 114, int) +#define SOUND_MIXER_PRIVATE5 _SIOWR('M', 115, int) + +/* + * SOUND_MIXER_GETLEVELS and SOUND_MIXER_SETLEVELS calls can be used + * for querying current mixer settings from the driver and for loading + * default volume settings _prior_ activating the mixer (loading + * doesn't affect current state of the mixer hardware). These calls + * are for internal use only. + */ + +typedef struct mixer_vol_table { + int num; /* Index to volume table */ + char name[32]; + int levels[32]; +} mixer_vol_table; + +#define SOUND_MIXER_GETLEVELS _SIOWR('M', 116, mixer_vol_table) +#define SOUND_MIXER_SETLEVELS _SIOWR('M', 117, mixer_vol_table) + +/* + * An ioctl for identifying the driver version. It will return value + * of the SOUND_VERSION macro used when compiling the driver. + * This call was introduced in OSS version 3.6 and it will not work + * with earlier versions (returns EINVAL). + */ +#define OSS_GETVERSION _SIOR ('M', 118, int) + +/* + * Level 2 event types for /dev/sequencer + */ + +/* + * The 4 most significant bits of byte 0 specify the class of + * the event: + * + * 0x8X = system level events, + * 0x9X = device/port specific events, event[1] = device/port, + * The last 4 bits give the subtype: + * 0x02 = Channel event (event[3] = chn). + * 0x01 = note event (event[4] = note). + * (0x01 is not used alone but always with bit 0x02). + * event[2] = MIDI message code (0x80=note off etc.) + * + */ + +#define EV_SEQ_LOCAL 0x80 +#define EV_TIMING 0x81 +#define EV_CHN_COMMON 0x92 +#define EV_CHN_VOICE 0x93 +#define EV_SYSEX 0x94 +/* + * Event types 200 to 220 are reserved for application use. + * These numbers will not be used by the driver. + */ + +/* + * Events for event type EV_CHN_VOICE + */ + +#define MIDI_NOTEOFF 0x80 +#define MIDI_NOTEON 0x90 +#define MIDI_KEY_PRESSURE 0xA0 + +/* + * Events for event type EV_CHN_COMMON + */ + +#define MIDI_CTL_CHANGE 0xB0 +#define MIDI_PGM_CHANGE 0xC0 +#define MIDI_CHN_PRESSURE 0xD0 +#define MIDI_PITCH_BEND 0xE0 + +#define MIDI_SYSTEM_PREFIX 0xF0 + +/* + * Timer event types + */ +#define TMR_WAIT_REL 1 /* Time relative to the prev time */ +#define TMR_WAIT_ABS 2 /* Absolute time since TMR_START */ +#define TMR_STOP 3 +#define TMR_START 4 +#define TMR_CONTINUE 5 +#define TMR_TEMPO 6 +#define TMR_ECHO 8 +#define TMR_CLOCK 9 /* MIDI clock */ +#define TMR_SPP 10 /* Song position pointer */ +#define TMR_TIMESIG 11 /* Time signature */ + +/* + * Local event types + */ +#define LOCL_STARTAUDIO 1 + +#if !defined(__KERNEL__) || defined(USE_SEQ_MACROS) +/* + * Some convenience macros to simplify programming of the + * /dev/sequencer interface + * + * This is a legacy interface for applications written against + * the OSSlib-3.8 style interface. It is no longer possible + * to actually link against OSSlib with this header, but we + * still provide these macros for programs using them. + * + * If you want to use OSSlib, it is recommended that you get + * the GPL version of OSS-4.x and build against that version + * of the header. + * + * We redefine the extern keyword so that make headers_check + * does not complain about SEQ_USE_EXTBUF. + */ +#define SEQ_DECLAREBUF() SEQ_USE_EXTBUF() + +void seqbuf_dump(void); /* This function must be provided by programs */ + +#define SEQ_PM_DEFINES int __foo_bar___ + +#define SEQ_LOAD_GMINSTR(dev, instr) +#define SEQ_LOAD_GMDRUM(dev, drum) + +#define _SEQ_EXTERN extern +#define SEQ_USE_EXTBUF() \ + _SEQ_EXTERN unsigned char _seqbuf[]; \ + _SEQ_EXTERN int _seqbuflen; _SEQ_EXTERN int _seqbufptr + +#ifndef USE_SIMPLE_MACROS +/* Sample seqbuf_dump() implementation: + * + * SEQ_DEFINEBUF (2048); -- Defines a buffer for 2048 bytes + * + * int seqfd; -- The file descriptor for /dev/sequencer. + * + * void + * seqbuf_dump () + * { + * if (_seqbufptr) + * if (write (seqfd, _seqbuf, _seqbufptr) == -1) + * { + * perror ("write /dev/sequencer"); + * exit (-1); + * } + * _seqbufptr = 0; + * } + */ + +#define SEQ_DEFINEBUF(len) unsigned char _seqbuf[len]; int _seqbuflen = len;int _seqbufptr = 0 +#define _SEQ_NEEDBUF(len) if ((_seqbufptr+(len)) > _seqbuflen) seqbuf_dump() +#define _SEQ_ADVBUF(len) _seqbufptr += len +#define SEQ_DUMPBUF seqbuf_dump +#else +/* + * This variation of the sequencer macros is used just to format one event + * using fixed buffer. + * + * The program using the macro library must define the following macros before + * using this library. + * + * #define _seqbuf name of the buffer (unsigned char[]) + * #define _SEQ_ADVBUF(len) If the applic needs to know the exact + * size of the event, this macro can be used. + * Otherwise this must be defined as empty. + * #define _seqbufptr Define the name of index variable or 0 if + * not required. + */ +#define _SEQ_NEEDBUF(len) /* empty */ +#endif + +#define SEQ_VOLUME_MODE(dev, mode) {_SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr] = SEQ_EXTENDED;\ + _seqbuf[_seqbufptr+1] = SEQ_VOLMODE;\ + _seqbuf[_seqbufptr+2] = (dev);\ + _seqbuf[_seqbufptr+3] = (mode);\ + _seqbuf[_seqbufptr+4] = 0;\ + _seqbuf[_seqbufptr+5] = 0;\ + _seqbuf[_seqbufptr+6] = 0;\ + _seqbuf[_seqbufptr+7] = 0;\ + _SEQ_ADVBUF(8);} + +/* + * Midi voice messages + */ + +#define _CHN_VOICE(dev, event, chn, note, parm) \ + {_SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr] = EV_CHN_VOICE;\ + _seqbuf[_seqbufptr+1] = (dev);\ + _seqbuf[_seqbufptr+2] = (event);\ + _seqbuf[_seqbufptr+3] = (chn);\ + _seqbuf[_seqbufptr+4] = (note);\ + _seqbuf[_seqbufptr+5] = (parm);\ + _seqbuf[_seqbufptr+6] = (0);\ + _seqbuf[_seqbufptr+7] = 0;\ + _SEQ_ADVBUF(8);} + +#define SEQ_START_NOTE(dev, chn, note, vol) \ + _CHN_VOICE(dev, MIDI_NOTEON, chn, note, vol) + +#define SEQ_STOP_NOTE(dev, chn, note, vol) \ + _CHN_VOICE(dev, MIDI_NOTEOFF, chn, note, vol) + +#define SEQ_KEY_PRESSURE(dev, chn, note, pressure) \ + _CHN_VOICE(dev, MIDI_KEY_PRESSURE, chn, note, pressure) + +/* + * Midi channel messages + */ + +#define _CHN_COMMON(dev, event, chn, p1, p2, w14) \ + {_SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr] = EV_CHN_COMMON;\ + _seqbuf[_seqbufptr+1] = (dev);\ + _seqbuf[_seqbufptr+2] = (event);\ + _seqbuf[_seqbufptr+3] = (chn);\ + _seqbuf[_seqbufptr+4] = (p1);\ + _seqbuf[_seqbufptr+5] = (p2);\ + *(short *)&_seqbuf[_seqbufptr+6] = (w14);\ + _SEQ_ADVBUF(8);} +/* + * SEQ_SYSEX permits sending of sysex messages. (It may look that it permits + * sending any MIDI bytes but it's absolutely not possible. Trying to do + * so _will_ cause problems with MPU401 intelligent mode). + * + * Sysex messages are sent in blocks of 1 to 6 bytes. Longer messages must be + * sent by calling SEQ_SYSEX() several times (there must be no other events + * between them). First sysex fragment must have 0xf0 in the first byte + * and the last byte (buf[len-1] of the last fragment must be 0xf7. No byte + * between these sysex start and end markers cannot be larger than 0x7f. Also + * lengths of each fragments (except the last one) must be 6. + * + * Breaking the above rules may work with some MIDI ports but is likely to + * cause fatal problems with some other devices (such as MPU401). + */ +#define SEQ_SYSEX(dev, buf, len) \ + {int ii, ll=(len); \ + unsigned char *bufp=buf;\ + if (ll>6)ll=6;\ + _SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr] = EV_SYSEX;\ + _seqbuf[_seqbufptr+1] = (dev);\ + for(ii=0;ii<ll;ii++)\ + _seqbuf[_seqbufptr+ii+2] = bufp[ii];\ + for(ii=ll;ii<6;ii++)\ + _seqbuf[_seqbufptr+ii+2] = 0xff;\ + _SEQ_ADVBUF(8);} + +#define SEQ_CHN_PRESSURE(dev, chn, pressure) \ + _CHN_COMMON(dev, MIDI_CHN_PRESSURE, chn, pressure, 0, 0) + +#define SEQ_SET_PATCH SEQ_PGM_CHANGE +#define SEQ_PGM_CHANGE(dev, chn, patch) \ + _CHN_COMMON(dev, MIDI_PGM_CHANGE, chn, patch, 0, 0) + +#define SEQ_CONTROL(dev, chn, controller, value) \ + _CHN_COMMON(dev, MIDI_CTL_CHANGE, chn, controller, 0, value) + +#define SEQ_BENDER(dev, chn, value) \ + _CHN_COMMON(dev, MIDI_PITCH_BEND, chn, 0, 0, value) + + +#define SEQ_V2_X_CONTROL(dev, voice, controller, value) {_SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr] = SEQ_EXTENDED;\ + _seqbuf[_seqbufptr+1] = SEQ_CONTROLLER;\ + _seqbuf[_seqbufptr+2] = (dev);\ + _seqbuf[_seqbufptr+3] = (voice);\ + _seqbuf[_seqbufptr+4] = (controller);\ + _seqbuf[_seqbufptr+5] = ((value)&0xff);\ + _seqbuf[_seqbufptr+6] = ((value>>8)&0xff);\ + _seqbuf[_seqbufptr+7] = 0;\ + _SEQ_ADVBUF(8);} +/* + * The following 5 macros are incorrectly implemented and obsolete. + * Use SEQ_BENDER and SEQ_CONTROL (with proper controller) instead. + */ +#define SEQ_PITCHBEND(dev, voice, value) SEQ_V2_X_CONTROL(dev, voice, CTRL_PITCH_BENDER, value) +#define SEQ_BENDER_RANGE(dev, voice, value) SEQ_V2_X_CONTROL(dev, voice, CTRL_PITCH_BENDER_RANGE, value) +#define SEQ_EXPRESSION(dev, voice, value) SEQ_CONTROL(dev, voice, CTL_EXPRESSION, value*128) +#define SEQ_MAIN_VOLUME(dev, voice, value) SEQ_CONTROL(dev, voice, CTL_MAIN_VOLUME, (value*16383)/100) +#define SEQ_PANNING(dev, voice, pos) SEQ_CONTROL(dev, voice, CTL_PAN, (pos+128) / 2) + +/* + * Timing and synchronization macros + */ + +#define _TIMER_EVENT(ev, parm) {_SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr+0] = EV_TIMING; \ + _seqbuf[_seqbufptr+1] = (ev); \ + _seqbuf[_seqbufptr+2] = 0;\ + _seqbuf[_seqbufptr+3] = 0;\ + *(unsigned int *)&_seqbuf[_seqbufptr+4] = (parm); \ + _SEQ_ADVBUF(8);} + +#define SEQ_START_TIMER() _TIMER_EVENT(TMR_START, 0) +#define SEQ_STOP_TIMER() _TIMER_EVENT(TMR_STOP, 0) +#define SEQ_CONTINUE_TIMER() _TIMER_EVENT(TMR_CONTINUE, 0) +#define SEQ_WAIT_TIME(ticks) _TIMER_EVENT(TMR_WAIT_ABS, ticks) +#define SEQ_DELTA_TIME(ticks) _TIMER_EVENT(TMR_WAIT_REL, ticks) +#define SEQ_ECHO_BACK(key) _TIMER_EVENT(TMR_ECHO, key) +#define SEQ_SET_TEMPO(value) _TIMER_EVENT(TMR_TEMPO, value) +#define SEQ_SONGPOS(pos) _TIMER_EVENT(TMR_SPP, pos) +#define SEQ_TIME_SIGNATURE(sig) _TIMER_EVENT(TMR_TIMESIG, sig) + +/* + * Local control events + */ + +#define _LOCAL_EVENT(ev, parm) {_SEQ_NEEDBUF(8);\ + _seqbuf[_seqbufptr+0] = EV_SEQ_LOCAL; \ + _seqbuf[_seqbufptr+1] = (ev); \ + _seqbuf[_seqbufptr+2] = 0;\ + _seqbuf[_seqbufptr+3] = 0;\ + *(unsigned int *)&_seqbuf[_seqbufptr+4] = (parm); \ + _SEQ_ADVBUF(8);} + +#define SEQ_PLAYAUDIO(devmask) _LOCAL_EVENT(LOCL_STARTAUDIO, devmask) +/* + * Events for the level 1 interface only + */ + +#define SEQ_MIDIOUT(device, byte) {_SEQ_NEEDBUF(4);\ + _seqbuf[_seqbufptr] = SEQ_MIDIPUTC;\ + _seqbuf[_seqbufptr+1] = (byte);\ + _seqbuf[_seqbufptr+2] = (device);\ + _seqbuf[_seqbufptr+3] = 0;\ + _SEQ_ADVBUF(4);} + +/* + * Patch loading. + */ +#define SEQ_WRPATCH(patchx, len) \ + {if (_seqbufptr) SEQ_DUMPBUF();\ + if (write(seqfd, (char*)(patchx), len)==-1) \ + perror("Write patch: /dev/sequencer");} +#define SEQ_WRPATCH2(patchx, len) \ + (SEQ_DUMPBUF(), write(seqfd, (char*)(patchx), len)) + +#endif +#endif diff --git a/include/linux/spi/74x164.h b/include/linux/spi/74x164.h new file mode 100644 index 00000000..0aa6acc7 --- /dev/null +++ b/include/linux/spi/74x164.h @@ -0,0 +1,9 @@ +#ifndef LINUX_SPI_74X164_H +#define LINUX_SPI_74X164_H + +struct gen_74x164_chip_platform_data { + /* number assigned to the first GPIO */ + unsigned base; +}; + +#endif diff --git a/include/linux/spi/Kbuild b/include/linux/spi/Kbuild new file mode 100644 index 00000000..d375a082 --- /dev/null +++ b/include/linux/spi/Kbuild @@ -0,0 +1 @@ +header-y += spidev.h diff --git a/include/linux/spi/ad7877.h b/include/linux/spi/ad7877.h new file mode 100644 index 00000000..cdbed816 --- /dev/null +++ b/include/linux/spi/ad7877.h @@ -0,0 +1,24 @@ +/* linux/spi/ad7877.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7877_platform_data { + u16 model; /* 7877 */ + u16 vref_delay_usecs; /* 0 for external vref; etc */ + u16 x_plate_ohms; + u16 y_plate_ohms; + + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + u8 stopacq_polarity; /* 1 = Active HIGH, 0 = Active LOW */ + u8 first_conversion_delay; /* 0 = 0.5us, 1 = 128us, 2 = 1ms, 3 = 8ms */ + u8 acquisition_time; /* 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 averaging; /* 0 = 1, 1 = 4, 2 = 8, 3 = 16 */ + u8 pen_down_acc_interval; /* 0 = covert once, 1 = every 0.5 ms, + 2 = ever 1 ms, 3 = every 8 ms,*/ +}; diff --git a/include/linux/spi/ad7879.h b/include/linux/spi/ad7879.h new file mode 100644 index 00000000..6334cee1 --- /dev/null +++ b/include/linux/spi/ad7879.h @@ -0,0 +1,39 @@ +/* linux/spi/ad7879.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7879_platform_data { + u16 model; /* 7879 */ + u16 x_plate_ohms; + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + /* [0..255] 0=OFF Starts at 1=550us and goes + * all the way to 9.440ms in steps of 35us. + */ + u8 pen_down_acc_interval; + /* [0..15] Starts at 0=128us and goes all the + * way to 4.096ms in steps of 128us. + */ + u8 first_conversion_delay; + /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 acquisition_time; + /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */ + u8 averaging; + /* [0..3] Perform X measurements 0 = OFF, + * 1 = 4, 2 = 8, 3 = 16 (median > averaging) + */ + u8 median; + /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib + * requires CONFIG_GPIOLIB + */ + bool gpio_export; + /* identifies the first GPIO number handled by this chip; + * or, if negative, requests dynamic ID allocation. + */ + s32 gpio_base; +}; diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h new file mode 100644 index 00000000..c64de9dd --- /dev/null +++ b/include/linux/spi/ads7846.h @@ -0,0 +1,60 @@ +/* linux/spi/ads7846.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +enum ads7846_filter { + ADS7846_FILTER_OK, + ADS7846_FILTER_REPEAT, + ADS7846_FILTER_IGNORE, +}; + +struct ads7846_platform_data { + u16 model; /* 7843, 7845, 7846, 7873. */ + u16 vref_delay_usecs; /* 0 for external vref; etc */ + u16 vref_mv; /* external vref value, milliVolts + * ads7846: if 0, use internal vref */ + bool keep_vref_on; /* set to keep vref on for differential + * measurements as well */ + bool swap_xy; /* swap x and y axes */ + + /* Settling time of the analog signals; a function of Vcc and the + * capacitance on the X/Y drivers. If set to non-zero, two samples + * are taken with settle_delay us apart, and the second one is used. + * ~150 uSec with 0.01uF caps. + */ + u16 settle_delay_usecs; + + /* If set to non-zero, after samples are taken this delay is applied + * and penirq is rechecked, to help avoid false events. This value + * is affected by the material used to build the touch layer. + */ + u16 penirq_recheck_delay_usecs; + + u16 x_plate_ohms; + u16 y_plate_ohms; + + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + u16 debounce_max; /* max number of additional readings + * per sample */ + u16 debounce_tol; /* tolerance used for filtering */ + u16 debounce_rep; /* additional consecutive good readings + * required after the first two */ + int gpio_pendown; /* the GPIO used to decide the pendown + * state if get_pendown_state == NULL + */ + int (*get_pendown_state)(void); + int (*filter_init) (const struct ads7846_platform_data *pdata, + void **filter_data); + int (*filter) (void *filter_data, int data_idx, int *val); + void (*filter_cleanup)(void *filter_data); + void (*wait_for_sync)(void); + bool wakeup; + unsigned long irq_flags; +}; + diff --git a/include/linux/spi/at73c213.h b/include/linux/spi/at73c213.h new file mode 100644 index 00000000..0f20a70e --- /dev/null +++ b/include/linux/spi/at73c213.h @@ -0,0 +1,25 @@ +/* + * Board-specific data used to set up AT73c213 audio DAC driver. + */ + +#ifndef __LINUX_SPI_AT73C213_H +#define __LINUX_SPI_AT73C213_H + +/** + * at73c213_board_info - how the external DAC is wired to the device. + * + * @ssc_id: SSC platform_driver id the DAC shall use to stream the audio. + * @dac_clk: the external clock used to provide master clock to the DAC. + * @shortname: a short discription for the DAC, seen by userspace tools. + * + * This struct contains the configuration of the hardware connection to the + * external DAC. The DAC needs a master clock and a I2S audio stream. It also + * provides a name which is used to identify it in userspace tools. + */ +struct at73c213_board_info { + int ssc_id; + struct clk *dac_clk; + char shortname[32]; +}; + +#endif /* __LINUX_SPI_AT73C213_H */ diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h new file mode 100644 index 00000000..6692b341 --- /dev/null +++ b/include/linux/spi/corgi_lcd.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_SPI_CORGI_LCD_H +#define __LINUX_SPI_CORGI_LCD_H + +#define CORGI_LCD_MODE_QVGA 1 +#define CORGI_LCD_MODE_VGA 2 + +struct corgi_lcd_platform_data { + int init_mode; + int max_intensity; + int default_intensity; + int limit_mask; + + int gpio_backlight_on; /* -1 if n/a */ + int gpio_backlight_cont; /* -1 if n/a */ + + void (*notify)(int intensity); + void (*kick_battery)(void); +}; + +#endif /* __LINUX_SPI_CORGI_LCD_H */ diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h new file mode 100644 index 00000000..287ec830 --- /dev/null +++ b/include/linux/spi/ds1305.h @@ -0,0 +1,35 @@ +#ifndef __LINUX_SPI_DS1305_H +#define __LINUX_SPI_DS1305_H + +/* + * One-time configuration for ds1305 and ds1306 RTC chips. + * + * Put a pointer to this in spi_board_info.platform_data if you want to + * be sure that Linux (re)initializes this as needed ... after losing + * backup power, and potentially on the first boot. + */ +struct ds1305_platform_data { + + /* Trickle charge configuration: it's OK to leave out the MAGIC + * bitmask; mask in either DS1 or DS2, and then one of 2K/4k/8K. + */ +#define DS1305_TRICKLE_MAGIC 0xa0 +#define DS1305_TRICKLE_DS2 0x08 /* two diodes */ +#define DS1305_TRICKLE_DS1 0x04 /* one diode */ +#define DS1305_TRICKLE_2K 0x01 /* 2 KOhm resistance */ +#define DS1305_TRICKLE_4K 0x02 /* 4 KOhm resistance */ +#define DS1305_TRICKLE_8K 0x03 /* 8 KOhm resistance */ + u8 trickle; + + /* set only on ds1306 parts */ + bool is_ds1306; + + /* ds1306 only: enable 1 Hz output */ + bool en_1hz; + + /* REVISIT: the driver currently expects nINT0 to be wired + * as the alarm IRQ. ALM1 may also need to be set up ... + */ +}; + +#endif /* __LINUX_SPI_DS1305_H */ diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h new file mode 100644 index 00000000..306e7b1c --- /dev/null +++ b/include/linux/spi/eeprom.h @@ -0,0 +1,28 @@ +#ifndef __LINUX_SPI_EEPROM_H +#define __LINUX_SPI_EEPROM_H + +#include <linux/memory.h> + +/* + * Put one of these structures in platform_data for SPI EEPROMS handled + * by the "at25" driver. On SPI, most EEPROMS understand the same core + * command set. If you need to support EEPROMs that don't yet fit, add + * flags to support those protocol options. These values all come from + * the chip datasheets. + */ +struct spi_eeprom { + u32 byte_len; + char name[10]; + u16 page_size; /* for writes */ + u16 flags; +#define EE_ADDR1 0x0001 /* 8 bit addrs */ +#define EE_ADDR2 0x0002 /* 16 bit addrs */ +#define EE_ADDR3 0x0004 /* 24 bit addrs */ +#define EE_READONLY 0x0008 /* disallow writes */ + + /* for exporting this chip's data to other kernel code */ + void (*setup)(struct memory_accessor *mem, void *context); + void *context; +}; + +#endif /* __LINUX_SPI_EEPROM_H */ diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h new file mode 100644 index 00000000..3f22932e --- /dev/null +++ b/include/linux/spi/flash.h @@ -0,0 +1,31 @@ +#ifndef LINUX_SPI_FLASH_H +#define LINUX_SPI_FLASH_H + +struct mtd_partition; + +/** + * struct flash_platform_data: board-specific flash data + * @name: optional flash device name (eg, as used with mtdparts=) + * @parts: optional array of mtd_partitions for static partitioning + * @nr_parts: number of mtd_partitions for static partitoning + * @type: optional flash device type (e.g. m25p80 vs m25p64), for use + * with chips that can't be queried for JEDEC or other IDs + * + * Board init code (in arch/.../mach-xxx/board-yyy.c files) can + * provide information about SPI flash parts (such as DataFlash) to + * help set up the device and its appropriate default partitioning. + * + * Note that for DataFlash, sizes for pages, blocks, and sectors are + * rarely powers of two; and partitions should be sector-aligned. + */ +struct flash_platform_data { + char *name; + struct mtd_partition *parts; + unsigned int nr_parts; + + char *type; + + /* we'll likely add more ... use JEDEC IDs, etc */ +}; + +#endif diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h new file mode 100644 index 00000000..394fec9e --- /dev/null +++ b/include/linux/spi/ifx_modem.h @@ -0,0 +1,19 @@ +#ifndef LINUX_IFX_MODEM_H +#define LINUX_IFX_MODEM_H + +struct ifx_modem_platform_data { + unsigned short rst_out; /* modem reset out */ + unsigned short pwr_on; /* power on */ + unsigned short rst_pmu; /* reset modem */ + unsigned short tx_pwr; /* modem power threshold */ + unsigned short srdy; /* SRDY */ + unsigned short mrdy; /* MRDY */ + unsigned char modem_type; /* Modem type */ + unsigned long max_hz; /* max SPI frequency */ + unsigned short use_dma:1; /* spi protocol driver supplies + dma-able addrs */ +}; +#define IFX_MODEM_6160 1 +#define IFX_MODEM_6260 2 + +#endif diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h new file mode 100644 index 00000000..bc8677c8 --- /dev/null +++ b/include/linux/spi/l4f00242t03.h @@ -0,0 +1,29 @@ +/* + * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD + * + * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> + * Based on Marek Vasut work in lms283gf05.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ +#define _INCLUDE_LINUX_SPI_L4F00242T03_H_ + +struct l4f00242t03_pdata { + unsigned int reset_gpio; + unsigned int data_enable_gpio; +}; + +#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */ diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h new file mode 100644 index 00000000..1b5d5384 --- /dev/null +++ b/include/linux/spi/libertas_spi.h @@ -0,0 +1,29 @@ +/* + * board-specific data for the libertas_spi driver. + * + * Copyright 2008 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#ifndef _LIBERTAS_SPI_H_ +#define _LIBERTAS_SPI_H_ + +struct spi_device; + +struct libertas_spi_platform_data { + /* There are two ways to read data from the WLAN module's SPI + * interface. Setting 0 or 1 here controls which one is used. + * + * Usually you want to set use_dummy_writes = 1. + * However, if that doesn't work or if you are using a slow SPI clock + * speed, you may want to use 0 here. */ + u16 use_dummy_writes; + + /* Board specific setup/teardown */ + int (*setup)(struct spi_device *spi); + int (*teardown)(struct spi_device *spi); +}; +#endif diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h new file mode 100644 index 00000000..555d254e --- /dev/null +++ b/include/linux/spi/lms283gf05.h @@ -0,0 +1,28 @@ +/* + * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD + * + * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ +#define _INCLUDE_LINUX_SPI_LMS283GF05_H_ + +struct lms283gf05_pdata { + unsigned long reset_gpio; + bool reset_inverted; +}; + +#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */ diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h new file mode 100644 index 00000000..bcaa2f76 --- /dev/null +++ b/include/linux/spi/max7301.h @@ -0,0 +1,35 @@ +#ifndef LINUX_SPI_MAX7301_H +#define LINUX_SPI_MAX7301_H + +#include <linux/gpio.h> + +/* + * Some registers must be read back to modify. + * To save time we cache them here in memory + */ +struct max7301 { + struct mutex lock; + u8 port_config[8]; /* field 0 is unused */ + u32 out_level; /* cached output levels */ + u32 input_pullup_active; + struct gpio_chip chip; + struct device *dev; + int (*write)(struct device *dev, unsigned int reg, unsigned int val); + int (*read)(struct device *dev, unsigned int reg); +}; + +struct max7301_platform_data { + /* number assigned to the first GPIO */ + unsigned base; + /* + * bitmask controlling the pullup configuration, + * + * _note_ the 4 lowest bits are unused, because the first 4 + * ports of the controller are not used, too. + */ + u32 input_pullup_active; +}; + +extern int __max730x_remove(struct device *dev); +extern int __max730x_probe(struct max7301 *ts); +#endif diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h new file mode 100644 index 00000000..82ffccd6 --- /dev/null +++ b/include/linux/spi/mc33880.h @@ -0,0 +1,10 @@ +#ifndef LINUX_SPI_MC33880_H +#define LINUX_SPI_MC33880_H + +struct mc33880_platform_data { + /* number assigned to the first GPIO */ + unsigned base; +}; + +#endif + diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h new file mode 100644 index 00000000..2d676d5a --- /dev/null +++ b/include/linux/spi/mcp23s08.h @@ -0,0 +1,25 @@ + +/* FIXME driver should be able to handle IRQs... */ + +struct mcp23s08_chip_info { + bool is_present; /* true if populated */ + unsigned pullups; /* BIT(x) means enable pullup x */ +}; + +struct mcp23s08_platform_data { + /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI + * chipselect, each providing 1 gpio_chip instance with 8 gpios. + * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI + * chipselect, each providing 1 gpio_chip (port A + port B) with + * 16 gpios. + */ + struct mcp23s08_chip_info chip[8]; + + /* "base" is the number of the first GPIO. Dynamic assignment is + * not currently supported, and even if there are gaps in chip + * addressing the GPIO numbers are sequential .. so for example + * if only slaves 0 and 3 are present, their GPIOs range from + * base to base+15 (or base+31 for s17 variant). + */ + unsigned base; +}; diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h new file mode 100644 index 00000000..32be8dbd --- /dev/null +++ b/include/linux/spi/mmc_spi.h @@ -0,0 +1,57 @@ +#ifndef __LINUX_SPI_MMC_SPI_H +#define __LINUX_SPI_MMC_SPI_H + +#include <linux/spi/spi.h> +#include <linux/interrupt.h> + +struct device; +struct mmc_host; + +/* Put this in platform_data of a device being used to manage an MMC/SD + * card slot. (Modeled after PXA mmc glue; see that for usage examples.) + * + * REVISIT This is not a spi-specific notion. Any card slot should be + * able to handle it. If the MMC core doesn't adopt this kind of notion, + * switch the "struct device *" parameters over to "struct spi_device *". + */ +struct mmc_spi_platform_data { + /* driver activation and (optional) card detect irq hookup */ + int (*init)(struct device *, + irqreturn_t (*)(int, void *), + void *); + void (*exit)(struct device *, void *); + + /* sense switch on sd cards */ + int (*get_ro)(struct device *); + + /* + * If board does not use CD interrupts, driver can optimize polling + * using this function. + */ + int (*get_cd)(struct device *); + + /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ + unsigned long caps; + + /* how long to debounce card detect, in msecs */ + u16 detect_delay; + + /* power management */ + u16 powerup_msecs; /* delay of up to 250 msec */ + u32 ocr_mask; /* available voltages */ + void (*setpower)(struct device *, unsigned int maskval); +}; + +#ifdef CONFIG_OF +extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi); +extern void mmc_spi_put_pdata(struct spi_device *spi); +#else +static inline struct mmc_spi_platform_data * +mmc_spi_get_pdata(struct spi_device *spi) +{ + return spi->dev.platform_data; +} +static inline void mmc_spi_put_pdata(struct spi_device *spi) {} +#endif /* CONFIG_OF */ + +#endif /* __LINUX_SPI_MMC_SPI_H */ diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h new file mode 100644 index 00000000..b4d9fa6f --- /dev/null +++ b/include/linux/spi/orion_spi.h @@ -0,0 +1,17 @@ +/* + * orion_spi.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_SPI_ORION_SPI_H +#define __LINUX_SPI_ORION_SPI_H + +struct orion_spi_info { + u32 tclk; /* no <linux/clk.h> support yet */ +}; + + +#endif diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h new file mode 100644 index 00000000..d3e1075f --- /dev/null +++ b/include/linux/spi/pxa2xx_spi.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef __linux_pxa2xx_spi_h +#define __linux_pxa2xx_spi_h + +#include <linux/pxa2xx_ssp.h> + +#define PXA2XX_CS_ASSERT (0x01) +#define PXA2XX_CS_DEASSERT (0x02) + +/* device.platform_data for SSP controller devices */ +struct pxa2xx_spi_master { + u32 clock_enable; + u16 num_chipselect; + u8 enable_dma; +}; + +/* spi_board_info.controller_data for SPI slave devices, + * copied to spi_device.platform_data ... mostly for dma tuning + */ +struct pxa2xx_spi_chip { + u8 tx_threshold; + u8 rx_threshold; + u8 dma_burst_size; + u32 timeout; + u8 enable_loopback; + int gpio_cs; + void (*cs_control)(u32 command); +}; + +#ifdef CONFIG_ARCH_PXA + +#include <linux/clk.h> +#include <mach/dma.h> + +extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); + +#else +/* + * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or + * plat/ include path. + * The CE4100 does not provide DMA support. This bits are here to let the driver + * compile and will never be used. Maybe we get DMA support at a later point in + * time. + */ + +#define DCSR(n) (n) +#define DSADR(n) (n) +#define DTADR(n) (n) +#define DCMD(n) (n) +#define DRCMR(n) (n) + +#define DCSR_RUN (1 << 31) /* Run Bit */ +#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */ +#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */ +#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ +#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ +#define DCSR_ENDINTR (1 << 2) /* End Interrupt */ +#define DCSR_STARTINTR (1 << 1) /* Start Interrupt */ +#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */ + +#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */ +#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ +#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ +#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ +#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ +#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ +#define DCSR_EORINTR (1 << 9) /* The end of Receive */ + +#define DRCMR_MAPVLD (1 << 7) /* Map Valid */ +#define DRCMR_CHLNUM 0x1f /* mask for Channel Number */ + +#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */ +#define DDADR_STOP (1 << 0) /* Stop */ + +#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ +#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ +#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ +#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ +#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ +#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ +#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ +#define DCMD_BURST8 (1 << 16) /* 8 byte burst */ +#define DCMD_BURST16 (2 << 16) /* 16 byte burst */ +#define DCMD_BURST32 (3 << 16) /* 32 byte burst */ +#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ +#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ +#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ +#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ + +/* + * Descriptor structure for PXA's DMA engine + * Note: this structure must always be aligned to a 16-byte boundary. + */ + +typedef enum { + DMA_PRIO_HIGH = 0, + DMA_PRIO_MEDIUM = 1, + DMA_PRIO_LOW = 2 +} pxa_dma_prio; + +/* + * DMA registration + */ + +static inline int pxa_request_dma(char *name, + pxa_dma_prio prio, + void (*irq_handler)(int, void *), + void *data) +{ + return -ENODEV; +} + +static inline void pxa_free_dma(int dma_ch) +{ +} + +/* + * The CE4100 does not have the clk framework implemented and SPI clock can + * not be switched on/off or the divider changed. + */ +static inline void clk_disable(struct clk *clk) +{ +} + +static inline int clk_enable(struct clk *clk) +{ + return 0; +} + +static inline unsigned long clk_get_rate(struct clk *clk) +{ + return 3686400; +} + +#endif +#endif diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h new file mode 100644 index 00000000..c23b923e --- /dev/null +++ b/include/linux/spi/s3c24xx.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * S3C2410 - SPI Controller platform_device info + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __LINUX_SPI_S3C24XX_H +#define __LINUX_SPI_S3C24XX_H __FILE__ + +struct s3c2410_spi_info { + int pin_cs; /* simple gpio cs */ + unsigned int num_cs; /* total chipselects */ + int bus_num; /* bus number to use. */ + + unsigned int use_fiq:1; /* use fiq */ + + void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable); + void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); +}; + +#endif /* __LINUX_SPI_S3C24XX_H */ diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h new file mode 100644 index 00000000..a1121f87 --- /dev/null +++ b/include/linux/spi/sh_hspi.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2011 Kuninori Morimoto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef SH_HSPI_H +#define SH_HSPI_H + +struct sh_hspi_info { +}; + +#endif diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h new file mode 100644 index 00000000..2e8db3d2 --- /dev/null +++ b/include/linux/spi/sh_msiof.h @@ -0,0 +1,10 @@ +#ifndef __SPI_SH_MSIOF_H__ +#define __SPI_SH_MSIOF_H__ + +struct sh_msiof_spi_info { + int tx_fifo_override; + int rx_fifo_override; + u16 num_chipselect; +}; + +#endif /* __SPI_SH_MSIOF_H__ */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h new file mode 100644 index 00000000..fa702aeb --- /dev/null +++ b/include/linux/spi/spi.h @@ -0,0 +1,859 @@ +/* + * Copyright (C) 2005 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_SPI_H +#define __LINUX_SPI_H + +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/slab.h> +#include <linux/kthread.h> + +/* + * INTERFACES between SPI master-side drivers and SPI infrastructure. + * (There's no SPI slave support for Linux yet...) + */ +extern struct bus_type spi_bus_type; + +/** + * struct spi_device - Master side proxy for an SPI slave device + * @dev: Driver model representation of the device. + * @master: SPI controller used with the device. + * @max_speed_hz: Maximum clock rate to be used with this chip + * (on this board); may be changed by the device's driver. + * The spi_transfer.speed_hz can override this for each transfer. + * @chip_select: Chipselect, distinguishing chips handled by @master. + * @mode: The spi mode defines how data is clocked out and in. + * This may be changed by the device's driver. + * The "active low" default for chipselect mode can be overridden + * (by specifying SPI_CS_HIGH) as can the "MSB first" default for + * each word in a transfer (by specifying SPI_LSB_FIRST). + * @bits_per_word: Data transfers involve one or more words; word sizes + * like eight or 12 bits are common. In-memory wordsizes are + * powers of two bytes (e.g. 20 bit samples use 32 bits). + * This may be changed by the device's driver, or left at the + * default (0) indicating protocol words are eight bit bytes. + * The spi_transfer.bits_per_word can override this for each transfer. + * @irq: Negative, or the number passed to request_irq() to receive + * interrupts from this device. + * @controller_state: Controller's runtime state + * @controller_data: Board-specific definitions for controller, such as + * FIFO initialization parameters; from board_info.controller_data + * @modalias: Name of the driver to use with this device, or an alias + * for that name. This appears in the sysfs "modalias" attribute + * for driver coldplugging, and in uevents used for hotplugging + * + * A @spi_device is used to interchange data between an SPI slave + * (usually a discrete chip) and CPU memory. + * + * In @dev, the platform_data is used to hold information about this + * device that's meaningful to the device's protocol driver, but not + * to its controller. One example might be an identifier for a chip + * variant with slightly different functionality; another might be + * information about how this particular board wires the chip's pins. + */ +struct spi_device { + struct device dev; + struct spi_master *master; + u32 max_speed_hz; + u8 chip_select; + u8 mode; +#define SPI_CPHA 0x01 /* clock phase */ +#define SPI_CPOL 0x02 /* clock polarity */ +#define SPI_MODE_0 (0|0) /* (original MicroWire) */ +#define SPI_MODE_1 (0|SPI_CPHA) +#define SPI_MODE_2 (SPI_CPOL|0) +#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) +#define SPI_CS_HIGH 0x04 /* chipselect active high? */ +#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ +#define SPI_3WIRE 0x10 /* SI/SO signals shared */ +#define SPI_LOOP 0x20 /* loopback mode */ +#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define SPI_READY 0x80 /* slave pulls low to pause */ + u8 bits_per_word; + int irq; + void *controller_state; + void *controller_data; + char modalias[SPI_NAME_SIZE]; + + /* + * likely need more hooks for more protocol options affecting how + * the controller talks to each chip, like: + * - memory packing (12 bit samples into low bits, others zeroed) + * - priority + * - drop chipselect after each word + * - chipselect delays + * - ... + */ +}; + +static inline struct spi_device *to_spi_device(struct device *dev) +{ + return dev ? container_of(dev, struct spi_device, dev) : NULL; +} + +/* most drivers won't need to care about device refcounting */ +static inline struct spi_device *spi_dev_get(struct spi_device *spi) +{ + return (spi && get_device(&spi->dev)) ? spi : NULL; +} + +static inline void spi_dev_put(struct spi_device *spi) +{ + if (spi) + put_device(&spi->dev); +} + +/* ctldata is for the bus_master driver's runtime state */ +static inline void *spi_get_ctldata(struct spi_device *spi) +{ + return spi->controller_state; +} + +static inline void spi_set_ctldata(struct spi_device *spi, void *state) +{ + spi->controller_state = state; +} + +/* device driver data */ + +static inline void spi_set_drvdata(struct spi_device *spi, void *data) +{ + dev_set_drvdata(&spi->dev, data); +} + +static inline void *spi_get_drvdata(struct spi_device *spi) +{ + return dev_get_drvdata(&spi->dev); +} + +struct spi_message; + + + +/** + * struct spi_driver - Host side "protocol" driver + * @id_table: List of SPI devices supported by this driver + * @probe: Binds this driver to the spi device. Drivers can verify + * that the device is actually present, and may need to configure + * characteristics (such as bits_per_word) which weren't needed for + * the initial configuration done during system setup. + * @remove: Unbinds this driver from the spi device + * @shutdown: Standard shutdown callback used during system state + * transitions such as powerdown/halt and kexec + * @suspend: Standard suspend callback used during system state transitions + * @resume: Standard resume callback used during system state transitions + * @driver: SPI device drivers should initialize the name and owner + * field of this structure. + * + * This represents the kind of device driver that uses SPI messages to + * interact with the hardware at the other end of a SPI link. It's called + * a "protocol" driver because it works through messages rather than talking + * directly to SPI hardware (which is what the underlying SPI controller + * driver does to pass those messages). These protocols are defined in the + * specification for the device(s) supported by the driver. + * + * As a rule, those device protocols represent the lowest level interface + * supported by a driver, and it will support upper level interfaces too. + * Examples of such upper levels include frameworks like MTD, networking, + * MMC, RTC, filesystem character device nodes, and hardware monitoring. + */ +struct spi_driver { + const struct spi_device_id *id_table; + int (*probe)(struct spi_device *spi); + int (*remove)(struct spi_device *spi); + void (*shutdown)(struct spi_device *spi); + int (*suspend)(struct spi_device *spi, pm_message_t mesg); + int (*resume)(struct spi_device *spi); + struct device_driver driver; +}; + +static inline struct spi_driver *to_spi_driver(struct device_driver *drv) +{ + return drv ? container_of(drv, struct spi_driver, driver) : NULL; +} + +extern int spi_register_driver(struct spi_driver *sdrv); + +/** + * spi_unregister_driver - reverse effect of spi_register_driver + * @sdrv: the driver to unregister + * Context: can sleep + */ +static inline void spi_unregister_driver(struct spi_driver *sdrv) +{ + if (sdrv) + driver_unregister(&sdrv->driver); +} + +/** + * module_spi_driver() - Helper macro for registering a SPI driver + * @__spi_driver: spi_driver struct + * + * Helper macro for SPI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_spi_driver(__spi_driver) \ + module_driver(__spi_driver, spi_register_driver, \ + spi_unregister_driver) + +/** + * struct spi_master - interface to SPI master controller + * @dev: device interface to this driver + * @list: link with the global spi_master list + * @bus_num: board-specific (and often SOC-specific) identifier for a + * given SPI controller. + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @dma_alignment: SPI controller constraint on DMA buffers alignment. + * @mode_bits: flags understood by this controller driver + * @flags: other constraints relevant to this driver + * @bus_lock_spinlock: spinlock for SPI bus locking + * @bus_lock_mutex: mutex for SPI bus locking + * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use + * @setup: updates the device mode and clocking records used by a + * device's SPI controller; protocol code may call this. This + * must fail if an unrecognized or unsupported mode is requested. + * It's always safe to call this unless transfers are pending on + * the device whose settings are being modified. + * @transfer: adds a message to the controller's transfer queue. + * @cleanup: frees controller-specific state + * @queued: whether this master is providing an internal message queue + * @kworker: thread struct for message pump + * @kworker_task: pointer to task for message pump kworker thread + * @pump_messages: work struct for scheduling work to the message pump + * @queue_lock: spinlock to syncronise access to message queue + * @queue: message queue + * @cur_msg: the currently in-flight message + * @busy: message pump is busy + * @running: message pump is running + * @rt: whether this queue is set to run as a realtime task + * @prepare_transfer_hardware: a message will soon arrive from the queue + * so the subsystem requests the driver to prepare the transfer hardware + * by issuing this call + * @transfer_one_message: the subsystem calls the driver to transfer a single + * message while queuing transfers that arrive in the meantime. When the + * driver is finished with this message, it must call + * spi_finalize_current_message() so the subsystem can issue the next + * transfer + * @unprepare_transfer_hardware: there are currently no more messages on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * + * Each SPI master controller can communicate with one or more @spi_device + * children. These make a small bus, sharing MOSI, MISO and SCK signals + * but not chip select signals. Each device may be configured to use a + * different clock rate, since those shared signals are ignored unless + * the chip is selected. + * + * The driver for an SPI controller manages access to those devices through + * a queue of spi_message transactions, copying data between CPU memory and + * an SPI slave device. For each such message it queues, it calls the + * message's completion function when the transaction completes. + */ +struct spi_master { + struct device dev; + + struct list_head list; + + /* other than negative (== assign one dynamically), bus_num is fully + * board-specific. usually that simplifies to being SOC-specific. + * example: one SOC has three SPI controllers, numbered 0..2, + * and one board's schematics might show it using SPI-2. software + * would normally use bus_num=2 for that controller. + */ + s16 bus_num; + + /* chipselects will be integral to many controllers; some others + * might use board-specific GPIOs. + */ + u16 num_chipselect; + + /* some SPI controllers pose alignment requirements on DMAable + * buffers; let protocol drivers know about these requirements. + */ + u16 dma_alignment; + + /* spi_device.mode flags understood by this controller driver */ + u16 mode_bits; + + /* other constraints relevant to this driver */ + u16 flags; +#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ +#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ + + /* lock and mutex for SPI bus locking */ + spinlock_t bus_lock_spinlock; + struct mutex bus_lock_mutex; + + /* flag indicating that the SPI bus is locked for exclusive use */ + bool bus_lock_flag; + + /* Setup mode and clock, etc (spi driver may call many times). + * + * IMPORTANT: this may be called when transfers to another + * device are active. DO NOT UPDATE SHARED REGISTERS in ways + * which could break those transfers. + */ + int (*setup)(struct spi_device *spi); + + /* bidirectional bulk transfers + * + * + The transfer() method may not sleep; its main role is + * just to add the message to the queue. + * + For now there's no remove-from-queue operation, or + * any other request management + * + To a given spi_device, message queueing is pure fifo + * + * + The master's main job is to process its message queue, + * selecting a chip then transferring data + * + If there are multiple spi_device children, the i/o queue + * arbitration algorithm is unspecified (round robin, fifo, + * priority, reservations, preemption, etc) + * + * + Chipselect stays active during the entire message + * (unless modified by spi_transfer.cs_change != 0). + * + The message transfers use clock and SPI mode parameters + * previously established by setup() for this device + */ + int (*transfer)(struct spi_device *spi, + struct spi_message *mesg); + + /* called on release() to free memory provided by spi_master */ + void (*cleanup)(struct spi_device *spi); + + /* + * These hooks are for drivers that want to use the generic + * master transfer queueing mechanism. If these are used, the + * transfer() function above must NOT be specified by the driver. + * Over time we expect SPI drivers to be phased over to this API. + */ + bool queued; + struct kthread_worker kworker; + struct task_struct *kworker_task; + struct kthread_work pump_messages; + spinlock_t queue_lock; + struct list_head queue; + struct spi_message *cur_msg; + bool busy; + bool running; + bool rt; + + int (*prepare_transfer_hardware)(struct spi_master *master); + int (*transfer_one_message)(struct spi_master *master, + struct spi_message *mesg); + int (*unprepare_transfer_hardware)(struct spi_master *master); +}; + +static inline void *spi_master_get_devdata(struct spi_master *master) +{ + return dev_get_drvdata(&master->dev); +} + +static inline void spi_master_set_devdata(struct spi_master *master, void *data) +{ + dev_set_drvdata(&master->dev, data); +} + +static inline struct spi_master *spi_master_get(struct spi_master *master) +{ + if (!master || !get_device(&master->dev)) + return NULL; + return master; +} + +static inline void spi_master_put(struct spi_master *master) +{ + if (master) + put_device(&master->dev); +} + +/* PM calls that need to be issued by the driver */ +extern int spi_master_suspend(struct spi_master *master); +extern int spi_master_resume(struct spi_master *master); + +/* Calls the driver make to interact with the message queue */ +extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); +extern void spi_finalize_current_message(struct spi_master *master); + +/* the spi driver core manages memory for the spi_master classdev */ +extern struct spi_master * +spi_alloc_master(struct device *host, unsigned size); + +extern int spi_register_master(struct spi_master *master); +extern void spi_unregister_master(struct spi_master *master); + +extern struct spi_master *spi_busnum_to_master(u16 busnum); + +/*---------------------------------------------------------------------------*/ + +/* + * I/O INTERFACE between SPI controller and protocol drivers + * + * Protocol drivers use a queue of spi_messages, each transferring data + * between the controller and memory buffers. + * + * The spi_messages themselves consist of a series of read+write transfer + * segments. Those segments always read the same number of bits as they + * write; but one or the other is easily ignored by passing a null buffer + * pointer. (This is unlike most types of I/O API, because SPI hardware + * is full duplex.) + * + * NOTE: Allocation of spi_transfer and spi_message memory is entirely + * up to the protocol driver, which guarantees the integrity of both (as + * well as the data buffers) for as long as the message is queued. + */ + +/** + * struct spi_transfer - a read/write buffer pair + * @tx_buf: data to be written (dma-safe memory), or NULL + * @rx_buf: data to be read (dma-safe memory), or NULL + * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped + * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped + * @len: size of rx and tx buffers (in bytes) + * @speed_hz: Select a speed other than the device default for this + * transfer. If 0 the default (from @spi_device) is used. + * @bits_per_word: select a bits_per_word other than the device default + * for this transfer. If 0 the default (from @spi_device) is used. + * @cs_change: affects chipselect after this transfer completes + * @delay_usecs: microseconds to delay after this transfer before + * (optionally) changing the chipselect status, then starting + * the next transfer or completing this @spi_message. + * @transfer_list: transfers are sequenced through @spi_message.transfers + * + * SPI transfers always write the same number of bytes as they read. + * Protocol drivers should always provide @rx_buf and/or @tx_buf. + * In some cases, they may also want to provide DMA addresses for + * the data being transferred; that may reduce overhead, when the + * underlying driver uses dma. + * + * If the transmit buffer is null, zeroes will be shifted out + * while filling @rx_buf. If the receive buffer is null, the data + * shifted in will be discarded. Only "len" bytes shift out (or in). + * It's an error to try to shift out a partial word. (For example, by + * shifting out three bytes with word size of sixteen or twenty bits; + * the former uses two bytes per word, the latter uses four bytes.) + * + * In-memory data values are always in native CPU byte order, translated + * from the wire byte order (big-endian except with SPI_LSB_FIRST). So + * for example when bits_per_word is sixteen, buffers are 2N bytes long + * (@len = 2N) and hold N sixteen bit words in CPU byte order. + * + * When the word size of the SPI transfer is not a power-of-two multiple + * of eight bits, those in-memory words include extra bits. In-memory + * words are always seen by protocol drivers as right-justified, so the + * undefined (rx) or unused (tx) bits are always the most significant bits. + * + * All SPI transfers start with the relevant chipselect active. Normally + * it stays selected until after the last transfer in a message. Drivers + * can affect the chipselect signal using cs_change. + * + * (i) If the transfer isn't the last one in the message, this flag is + * used to make the chipselect briefly go inactive in the middle of the + * message. Toggling chipselect in this way may be needed to terminate + * a chip command, letting a single spi_message perform all of group of + * chip transactions together. + * + * (ii) When the transfer is the last one in the message, the chip may + * stay selected until the next transfer. On multi-device SPI busses + * with nothing blocking messages going to other devices, this is just + * a performance hint; starting a message to another device deselects + * this one. But in other cases, this can be used to ensure correctness. + * Some devices need protocol transactions to be built from a series of + * spi_message submissions, where the content of one message is determined + * by the results of previous messages and where the whole transaction + * ends when the chipselect goes intactive. + * + * The code that submits an spi_message (and its spi_transfers) + * to the lower layers is responsible for managing its memory. + * Zero-initialize every field you don't set up explicitly, to + * insulate against future API updates. After you submit a message + * and its transfers, ignore them until its completion callback. + */ +struct spi_transfer { + /* it's ok if tx_buf == rx_buf (right?) + * for MicroWire, one buffer must be null + * buffers must work with dma_*map_single() calls, unless + * spi_message.is_dma_mapped reports a pre-existing mapping + */ + const void *tx_buf; + void *rx_buf; + unsigned len; + + dma_addr_t tx_dma; + dma_addr_t rx_dma; + + unsigned cs_change:1; + u8 bits_per_word; + u16 delay_usecs; + u32 speed_hz; + + struct list_head transfer_list; +}; + +/** + * struct spi_message - one multi-segment SPI transaction + * @transfers: list of transfer segments in this transaction + * @spi: SPI device to which the transaction is queued + * @is_dma_mapped: if true, the caller provided both dma and cpu virtual + * addresses for each transfer buffer + * @complete: called to report transaction completions + * @context: the argument to complete() when it's called + * @actual_length: the total number of bytes that were transferred in all + * successful segments + * @status: zero for success, else negative errno + * @queue: for use by whichever driver currently owns the message + * @state: for use by whichever driver currently owns the message + * + * A @spi_message is used to execute an atomic sequence of data transfers, + * each represented by a struct spi_transfer. The sequence is "atomic" + * in the sense that no other spi_message may use that SPI bus until that + * sequence completes. On some systems, many such sequences can execute as + * as single programmed DMA transfer. On all systems, these messages are + * queued, and might complete after transactions to other devices. Messages + * sent to a given spi_device are alway executed in FIFO order. + * + * The code that submits an spi_message (and its spi_transfers) + * to the lower layers is responsible for managing its memory. + * Zero-initialize every field you don't set up explicitly, to + * insulate against future API updates. After you submit a message + * and its transfers, ignore them until its completion callback. + */ +struct spi_message { + struct list_head transfers; + + struct spi_device *spi; + + unsigned is_dma_mapped:1; + + /* REVISIT: we might want a flag affecting the behavior of the + * last transfer ... allowing things like "read 16 bit length L" + * immediately followed by "read L bytes". Basically imposing + * a specific message scheduling algorithm. + * + * Some controller drivers (message-at-a-time queue processing) + * could provide that as their default scheduling algorithm. But + * others (with multi-message pipelines) could need a flag to + * tell them about such special cases. + */ + + /* completion is reported through a callback */ + void (*complete)(void *context); + void *context; + unsigned actual_length; + int status; + + /* for optional use by whatever driver currently owns the + * spi_message ... between calls to spi_async and then later + * complete(), that's the spi_master controller driver. + */ + struct list_head queue; + void *state; +}; + +static inline void spi_message_init(struct spi_message *m) +{ + memset(m, 0, sizeof *m); + INIT_LIST_HEAD(&m->transfers); +} + +static inline void +spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) +{ + list_add_tail(&t->transfer_list, &m->transfers); +} + +static inline void +spi_transfer_del(struct spi_transfer *t) +{ + list_del(&t->transfer_list); +} + +/* It's fine to embed message and transaction structures in other data + * structures so long as you don't free them while they're in use. + */ + +static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) +{ + struct spi_message *m; + + m = kzalloc(sizeof(struct spi_message) + + ntrans * sizeof(struct spi_transfer), + flags); + if (m) { + unsigned i; + struct spi_transfer *t = (struct spi_transfer *)(m + 1); + + INIT_LIST_HEAD(&m->transfers); + for (i = 0; i < ntrans; i++, t++) + spi_message_add_tail(t, m); + } + return m; +} + +static inline void spi_message_free(struct spi_message *m) +{ + kfree(m); +} + +extern int spi_setup(struct spi_device *spi); +extern int spi_async(struct spi_device *spi, struct spi_message *message); +extern int spi_async_locked(struct spi_device *spi, + struct spi_message *message); + +/*---------------------------------------------------------------------------*/ + +/* All these synchronous SPI transfer routines are utilities layered + * over the core async transfer primitive. Here, "synchronous" means + * they will sleep uninterruptibly until the async transfer completes. + */ + +extern int spi_sync(struct spi_device *spi, struct spi_message *message); +extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); +extern int spi_bus_lock(struct spi_master *master); +extern int spi_bus_unlock(struct spi_master *master); + +/** + * spi_write - SPI synchronous write + * @spi: device to which data will be written + * @buf: data buffer + * @len: data buffer size + * Context: can sleep + * + * This writes the buffer and returns zero or a negative error code. + * Callable only from contexts that can sleep. + */ +static inline int +spi_write(struct spi_device *spi, const void *buf, size_t len) +{ + struct spi_transfer t = { + .tx_buf = buf, + .len = len, + }; + struct spi_message m; + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + return spi_sync(spi, &m); +} + +/** + * spi_read - SPI synchronous read + * @spi: device from which data will be read + * @buf: data buffer + * @len: data buffer size + * Context: can sleep + * + * This reads the buffer and returns zero or a negative error code. + * Callable only from contexts that can sleep. + */ +static inline int +spi_read(struct spi_device *spi, void *buf, size_t len) +{ + struct spi_transfer t = { + .rx_buf = buf, + .len = len, + }; + struct spi_message m; + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + return spi_sync(spi, &m); +} + +/* this copies txbuf and rxbuf data; for small transfers only! */ +extern int spi_write_then_read(struct spi_device *spi, + const void *txbuf, unsigned n_tx, + void *rxbuf, unsigned n_rx); + +/** + * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read + * @spi: device with which data will be exchanged + * @cmd: command to be written before data is read back + * Context: can sleep + * + * This returns the (unsigned) eight bit number returned by the + * device, or else a negative error code. Callable only from + * contexts that can sleep. + */ +static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) +{ + ssize_t status; + u8 result; + + status = spi_write_then_read(spi, &cmd, 1, &result, 1); + + /* return negative errno or unsigned value */ + return (status < 0) ? status : result; +} + +/** + * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read + * @spi: device with which data will be exchanged + * @cmd: command to be written before data is read back + * Context: can sleep + * + * This returns the (unsigned) sixteen bit number returned by the + * device, or else a negative error code. Callable only from + * contexts that can sleep. + * + * The number is returned in wire-order, which is at least sometimes + * big-endian. + */ +static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) +{ + ssize_t status; + u16 result; + + status = spi_write_then_read(spi, &cmd, 1, (u8 *) &result, 2); + + /* return negative errno or unsigned value */ + return (status < 0) ? status : result; +} + +/*---------------------------------------------------------------------------*/ + +/* + * INTERFACE between board init code and SPI infrastructure. + * + * No SPI driver ever sees these SPI device table segments, but + * it's how the SPI core (or adapters that get hotplugged) grows + * the driver model tree. + * + * As a rule, SPI devices can't be probed. Instead, board init code + * provides a table listing the devices which are present, with enough + * information to bind and set up the device's driver. There's basic + * support for nonstatic configurations too; enough to handle adding + * parport adapters, or microcontrollers acting as USB-to-SPI bridges. + */ + +/** + * struct spi_board_info - board-specific template for a SPI device + * @modalias: Initializes spi_device.modalias; identifies the driver. + * @platform_data: Initializes spi_device.platform_data; the particular + * data stored there is driver-specific. + * @controller_data: Initializes spi_device.controller_data; some + * controllers need hints about hardware setup, e.g. for DMA. + * @irq: Initializes spi_device.irq; depends on how the board is wired. + * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits + * from the chip datasheet and board-specific signal quality issues. + * @bus_num: Identifies which spi_master parents the spi_device; unused + * by spi_new_device(), and otherwise depends on board wiring. + * @chip_select: Initializes spi_device.chip_select; depends on how + * the board is wired. + * @mode: Initializes spi_device.mode; based on the chip datasheet, board + * wiring (some devices support both 3WIRE and standard modes), and + * possibly presence of an inverter in the chipselect path. + * + * When adding new SPI devices to the device tree, these structures serve + * as a partial device template. They hold information which can't always + * be determined by drivers. Information that probe() can establish (such + * as the default transfer wordsize) is not included here. + * + * These structures are used in two places. Their primary role is to + * be stored in tables of board-specific device descriptors, which are + * declared early in board initialization and then used (much later) to + * populate a controller's device tree after the that controller's driver + * initializes. A secondary (and atypical) role is as a parameter to + * spi_new_device() call, which happens after those controller drivers + * are active in some dynamic board configuration models. + */ +struct spi_board_info { + /* the device name and module name are coupled, like platform_bus; + * "modalias" is normally the driver name. + * + * platform_data goes to spi_device.dev.platform_data, + * controller_data goes to spi_device.controller_data, + * irq is copied too + */ + char modalias[SPI_NAME_SIZE]; + const void *platform_data; + void *controller_data; + int irq; + + /* slower signaling on noisy or low voltage boards */ + u32 max_speed_hz; + + + /* bus_num is board specific and matches the bus_num of some + * spi_master that will probably be registered later. + * + * chip_select reflects how this chip is wired to that master; + * it's less than num_chipselect. + */ + u16 bus_num; + u16 chip_select; + + /* mode becomes spi_device.mode, and is essential for chips + * where the default of SPI_CS_HIGH = 0 is wrong. + */ + u8 mode; + + /* ... may need additional spi_device chip config data here. + * avoid stuff protocol drivers can set; but include stuff + * needed to behave without being bound to a driver: + * - quirks like clock rate mattering when not selected + */ +}; + +#ifdef CONFIG_SPI +extern int +spi_register_board_info(struct spi_board_info const *info, unsigned n); +#else +/* board init code may ignore whether SPI is configured or not */ +static inline int +spi_register_board_info(struct spi_board_info const *info, unsigned n) + { return 0; } +#endif + + +/* If you're hotplugging an adapter with devices (parport, usb, etc) + * use spi_new_device() to describe each device. You can also call + * spi_unregister_device() to start making that device vanish, but + * normally that would be handled by spi_unregister_master(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. + */ +extern struct spi_device * +spi_alloc_device(struct spi_master *master); + +extern int +spi_add_device(struct spi_device *spi); + +extern struct spi_device * +spi_new_device(struct spi_master *, struct spi_board_info *); + +static inline void +spi_unregister_device(struct spi_device *spi) +{ + if (spi) + device_unregister(&spi->dev); +} + +extern const struct spi_device_id * +spi_get_device_id(const struct spi_device *sdev); + +#endif /* __LINUX_SPI_H */ diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h new file mode 100644 index 00000000..f987a2be --- /dev/null +++ b/include/linux/spi/spi_bitbang.h @@ -0,0 +1,52 @@ +#ifndef __SPI_BITBANG_H +#define __SPI_BITBANG_H + +#include <linux/workqueue.h> + +struct spi_bitbang { + struct workqueue_struct *workqueue; + struct work_struct work; + + spinlock_t lock; + struct list_head queue; + u8 busy; + u8 use_dma; + u8 flags; /* extra spi->mode support */ + + struct spi_master *master; + + /* setup_transfer() changes clock and/or wordsize to match settings + * for this transfer; zeroes restore defaults from spi_device. + */ + int (*setup_transfer)(struct spi_device *spi, + struct spi_transfer *t); + + void (*chipselect)(struct spi_device *spi, int is_on); +#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ +#define BITBANG_CS_INACTIVE 0 + + /* txrx_bufs() may handle dma mapping for transfers that don't + * already have one (transfer.{tx,rx}_dma is zero), or use PIO + */ + int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); + + /* txrx_word[SPI_MODE_*]() just looks like a shift register */ + u32 (*txrx_word[4])(struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits); +}; + +/* you can call these default bitbang->master methods from your custom + * methods, if you like. + */ +extern int spi_bitbang_setup(struct spi_device *spi); +extern void spi_bitbang_cleanup(struct spi_device *spi); +extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m); +extern int spi_bitbang_setup_transfer(struct spi_device *spi, + struct spi_transfer *t); + +/* start or stop queue processing */ +extern int spi_bitbang_start(struct spi_bitbang *spi); +extern int spi_bitbang_stop(struct spi_bitbang *spi); + +#endif /* __SPI_BITBANG_H */ diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h new file mode 100644 index 00000000..369b3d7d --- /dev/null +++ b/include/linux/spi/spi_gpio.h @@ -0,0 +1,71 @@ +#ifndef __LINUX_SPI_GPIO_H +#define __LINUX_SPI_GPIO_H + +/* + * For each bitbanged SPI bus, set up a platform_device node with: + * - name "spi_gpio" + * - id the same as the SPI bus number it implements + * - dev.platform data pointing to a struct spi_gpio_platform_data + * + * Or, see the driver code for information about speedups that are + * possible on platforms that support inlined access for GPIOs (no + * spi_gpio_platform_data is used). + * + * Use spi_board_info with these busses in the usual way, being sure + * that the controller_data being the GPIO used for each device's + * chipselect: + * + * static struct spi_board_info ... [] = { + * ... + * // this slave uses GPIO 42 for its chipselect + * .controller_data = (void *) 42, + * ... + * // this one uses GPIO 86 for its chipselect + * .controller_data = (void *) 86, + * ... + * }; + * + * If chipselect is not used (there's only one device on the bus), assign + * SPI_GPIO_NO_CHIPSELECT to the controller_data: + * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT; + * + * If the MISO or MOSI pin is not available then it should be set to + * SPI_GPIO_NO_MISO or SPI_GPIO_NO_MOSI. + * + * If the bitbanged bus is later switched to a "native" controller, + * that platform_device and controller_data should be removed. + */ + +#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l) +#define SPI_GPIO_NO_MISO ((unsigned long)-1l) +#define SPI_GPIO_NO_MOSI ((unsigned long)-1l) + +/** + * struct spi_gpio_platform_data - parameter for bitbanged SPI master + * @sck: number of the GPIO used for clock output + * @mosi: number of the GPIO used for Master Output, Slave In (MOSI) data + * @miso: number of the GPIO used for Master Input, Slave Output (MISO) data + * @num_chipselect: how many slaves to allow + * + * All GPIO signals used with the SPI bus managed through this driver + * (chipselects, MOSI, MISO, SCK) must be configured as GPIOs, instead + * of some alternate function. + * + * It can be convenient to use this driver with pins that have alternate + * functions associated with a "native" SPI controller if a driver for that + * controller is not available, or is missing important functionality. + * + * On platforms which can do so, configure MISO with a weak pullup unless + * there's an external pullup on that signal. That saves power by avoiding + * floating signals. (A weak pulldown would save power too, but many + * drivers expect to see all-ones data as the no slave "response".) + */ +struct spi_gpio_platform_data { + unsigned sck; + unsigned mosi; + unsigned miso; + + u16 num_chipselect; +}; + +#endif /* __LINUX_SPI_GPIO_H */ diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h new file mode 100644 index 00000000..1ac529cf --- /dev/null +++ b/include/linux/spi/spi_oc_tiny.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_SPI_SPI_OC_TINY_H +#define _LINUX_SPI_SPI_OC_TINY_H + +/** + * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI + * @freq: input clock freq to the core. + * @baudwidth: baud rate divider width of the core. + * @gpio_cs_count: number of gpio pins used for chipselect. + * @gpio_cs: array of gpio pins used for chipselect. + * + * freq and baudwidth are used only if the divider is programmable. + */ +struct tiny_spi_platform_data { + unsigned int freq; + unsigned int baudwidth; + unsigned int gpio_cs_count; + int *gpio_cs; +}; + +#endif /* _LINUX_SPI_SPI_OC_TINY_H */ diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h new file mode 100644 index 00000000..52d9ed01 --- /dev/null +++ b/include/linux/spi/spidev.h @@ -0,0 +1,131 @@ +/* + * include/linux/spi/spidev.h + * + * Copyright (C) 2006 SWAPP + * Andrea Paterniani <a.paterniani@swapp-eng.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef SPIDEV_H +#define SPIDEV_H + +#include <linux/types.h> + +/* User space versions of kernel symbols for SPI clocking modes, + * matching <linux/spi/spi.h> + */ + +#define SPI_CPHA 0x01 +#define SPI_CPOL 0x02 + +#define SPI_MODE_0 (0|0) +#define SPI_MODE_1 (0|SPI_CPHA) +#define SPI_MODE_2 (SPI_CPOL|0) +#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) + +#define SPI_CS_HIGH 0x04 +#define SPI_LSB_FIRST 0x08 +#define SPI_3WIRE 0x10 +#define SPI_LOOP 0x20 +#define SPI_NO_CS 0x40 +#define SPI_READY 0x80 + +/*---------------------------------------------------------------------------*/ + +/* IOCTL commands */ + +#define SPI_IOC_MAGIC 'k' + +/** + * struct spi_ioc_transfer - describes a single SPI transfer + * @tx_buf: Holds pointer to userspace buffer with transmit data, or null. + * If no data is provided, zeroes are shifted out. + * @rx_buf: Holds pointer to userspace buffer for receive data, or null. + * @len: Length of tx and rx buffers, in bytes. + * @speed_hz: Temporary override of the device's bitrate. + * @bits_per_word: Temporary override of the device's wordsize. + * @delay_usecs: If nonzero, how long to delay after the last bit transfer + * before optionally deselecting the device before the next transfer. + * @cs_change: True to deselect device before starting the next transfer. + * + * This structure is mapped directly to the kernel spi_transfer structure; + * the fields have the same meanings, except of course that the pointers + * are in a different address space (and may be of different sizes in some + * cases, such as 32-bit i386 userspace over a 64-bit x86_64 kernel). + * Zero-initialize the structure, including currently unused fields, to + * accommodate potential future updates. + * + * SPI_IOC_MESSAGE gives userspace the equivalent of kernel spi_sync(). + * Pass it an array of related transfers, they'll execute together. + * Each transfer may be half duplex (either direction) or full duplex. + * + * struct spi_ioc_transfer mesg[4]; + * ... + * status = ioctl(fd, SPI_IOC_MESSAGE(4), mesg); + * + * So for example one transfer might send a nine bit command (right aligned + * in a 16-bit word), the next could read a block of 8-bit data before + * terminating that command by temporarily deselecting the chip; the next + * could send a different nine bit command (re-selecting the chip), and the + * last transfer might write some register values. + */ +struct spi_ioc_transfer { + __u64 tx_buf; + __u64 rx_buf; + + __u32 len; + __u32 speed_hz; + + __u16 delay_usecs; + __u8 bits_per_word; + __u8 cs_change; + __u32 pad; + + /* If the contents of 'struct spi_ioc_transfer' ever change + * incompatibly, then the ioctl number (currently 0) must change; + * ioctls with constant size fields get a bit more in the way of + * error checking than ones (like this) where that field varies. + * + * NOTE: struct layout is the same in 64bit and 32bit userspace. + */ +}; + +/* not all platforms use <asm-generic/ioctl.h> or _IOC_TYPECHECK() ... */ +#define SPI_MSGSIZE(N) \ + ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \ + ? ((N)*(sizeof (struct spi_ioc_transfer))) : 0) +#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)]) + + +/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) */ +#define SPI_IOC_RD_MODE _IOR(SPI_IOC_MAGIC, 1, __u8) +#define SPI_IOC_WR_MODE _IOW(SPI_IOC_MAGIC, 1, __u8) + +/* Read / Write SPI bit justification */ +#define SPI_IOC_RD_LSB_FIRST _IOR(SPI_IOC_MAGIC, 2, __u8) +#define SPI_IOC_WR_LSB_FIRST _IOW(SPI_IOC_MAGIC, 2, __u8) + +/* Read / Write SPI device word length (1..N) */ +#define SPI_IOC_RD_BITS_PER_WORD _IOR(SPI_IOC_MAGIC, 3, __u8) +#define SPI_IOC_WR_BITS_PER_WORD _IOW(SPI_IOC_MAGIC, 3, __u8) + +/* Read / Write SPI device default max speed hz */ +#define SPI_IOC_RD_MAX_SPEED_HZ _IOR(SPI_IOC_MAGIC, 4, __u32) +#define SPI_IOC_WR_MAX_SPEED_HZ _IOW(SPI_IOC_MAGIC, 4, __u32) + + + +#endif /* SPIDEV_H */ diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h new file mode 100644 index 00000000..7572d4e1 --- /dev/null +++ b/include/linux/spi/tdo24m.h @@ -0,0 +1,13 @@ +#ifndef __TDO24M_H__ +#define __TDO24M_H__ + +enum tdo24m_model { + TDO24M, + TDO35S, +}; + +struct tdo24m_platform_data { + enum tdo24m_model model; +}; + +#endif /* __TDO24M_H__ */ diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h new file mode 100644 index 00000000..60b59187 --- /dev/null +++ b/include/linux/spi/tle62x0.h @@ -0,0 +1,24 @@ +/* + * tle62x0.h - platform glue to Infineon TLE62x0 driver chips + * + * Copyright 2007 Simtec Electronics + * Ben Dooks <ben@simtec.co.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +struct tle62x0_pdata { + unsigned int init_state; + unsigned int gpio_count; +}; diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h new file mode 100644 index 00000000..d9b0c842 --- /dev/null +++ b/include/linux/spi/tsc2005.h @@ -0,0 +1,41 @@ +/* + * This file is part of TSC2005 touchscreen driver + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Aaro Koskinen <aaro.koskinen@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _LINUX_SPI_TSC2005_H +#define _LINUX_SPI_TSC2005_H + +#include <linux/types.h> + +struct tsc2005_platform_data { + int ts_pressure_max; + int ts_pressure_fudge; + int ts_x_max; + int ts_x_fudge; + int ts_y_max; + int ts_y_fudge; + int ts_x_plate_ohm; + unsigned int esd_timeout_ms; + void (*set_reset)(bool enable); +}; + +#endif diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h new file mode 100644 index 00000000..6f172788 --- /dev/null +++ b/include/linux/spi/xilinx_spi.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_SPI_XILINX_SPI_H +#define __LINUX_SPI_XILINX_SPI_H + +/** + * struct xspi_platform_data - Platform data of the Xilinx SPI driver + * @num_chipselect: Number of chip select by the IP. + * @little_endian: If registers should be accessed little endian or not. + * @bits_per_word: Number of bits per word. + * @devices: Devices to add when the driver is probed. + * @num_devices: Number of devices in the devices array. + */ +struct xspi_platform_data { + u16 num_chipselect; + bool little_endian; + u8 bits_per_word; + struct spi_board_info *devices; + u8 num_devices; +}; + +#endif /* __LINUX_SPI_XILINX_SPI_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h new file mode 100644 index 00000000..7d537ced --- /dev/null +++ b/include/linux/spinlock.h @@ -0,0 +1,397 @@ +#ifndef __LINUX_SPINLOCK_H +#define __LINUX_SPINLOCK_H + +/* + * include/linux/spinlock.h - generic spinlock/rwlock declarations + * + * here's the role of the various spinlock/rwlock related include files: + * + * on SMP builds: + * + * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the + * initializers + * + * linux/spinlock_types.h: + * defines the generic type and initializers + * + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel + * implementations, mostly inline assembly code + * + * (also included on UP-debug builds:) + * + * linux/spinlock_api_smp.h: + * contains the prototypes for the _spin_*() APIs. + * + * linux/spinlock.h: builds the final spin_*() APIs. + * + * on UP builds: + * + * linux/spinlock_type_up.h: + * contains the generic, simplified UP spinlock type. + * (which is an empty structure on non-debug builds) + * + * linux/spinlock_types.h: + * defines the generic type and initializers + * + * linux/spinlock_up.h: + * contains the arch_spin_*()/etc. version of UP + * builds. (which are NOPs on non-debug, non-preempt + * builds) + * + * (included on UP-non-debug builds:) + * + * linux/spinlock_api_up.h: + * builds the _spin_*() APIs. + * + * linux/spinlock.h: builds the final spin_*() APIs. + */ + +#include <linux/typecheck.h> +#include <linux/preempt.h> +#include <linux/linkage.h> +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <linux/thread_info.h> +#include <linux/kernel.h> +#include <linux/stringify.h> +#include <linux/bottom_half.h> +#include <asm/barrier.h> + + +/* + * Must define these before including other files, inline functions need them + */ +#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME + +#define LOCK_SECTION_START(extra) \ + ".subsection 1\n\t" \ + extra \ + ".ifndef " LOCK_SECTION_NAME "\n\t" \ + LOCK_SECTION_NAME ":\n\t" \ + ".endif\n" + +#define LOCK_SECTION_END \ + ".previous\n\t" + +#define __lockfunc __attribute__((section(".spinlock.text"))) + +/* + * Pull the arch_spinlock_t and arch_rwlock_t definitions: + */ +#include <linux/spinlock_types.h> + +/* + * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): + */ +#ifdef CONFIG_SMP +# include <asm/spinlock.h> +#else +# include <linux/spinlock_up.h> +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define raw_spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +# define raw_spin_lock_init(lock) \ + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) +#endif + +#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) + +#ifdef CONFIG_GENERIC_LOCKBREAK +#define raw_spin_is_contended(lock) ((lock)->break_lock) +#else + +#ifdef arch_spin_is_contended +#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) +#else +#define raw_spin_is_contended(lock) (((void)(lock), 0)) +#endif /*arch_spin_is_contended*/ +#endif + +/* The lock does not imply full memory barrier. */ +#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK +static inline void smp_mb__after_lock(void) { smp_mb(); } +#endif + +/** + * raw_spin_unlock_wait - wait until the spinlock gets unlocked + * @lock: the spinlock in question. + */ +#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) + extern int do_raw_spin_trylock(raw_spinlock_t *lock); + extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +#else +static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) +{ + __acquire(lock); + arch_spin_lock(&lock->raw_lock); +} + +static inline void +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) +{ + __acquire(lock); + arch_spin_lock_flags(&lock->raw_lock, *flags); +} + +static inline int do_raw_spin_trylock(raw_spinlock_t *lock) +{ + return arch_spin_trylock(&(lock)->raw_lock); +} + +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) +{ + arch_spin_unlock(&lock->raw_lock); + __release(lock); +} +#endif + +/* + * Define the various spin_lock methods. Note we define these + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * various methods are defined as nops in the case they are not + * required. + */ +#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) + +#define raw_spin_lock(lock) _raw_spin_lock(lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock_nested(lock, subclass) + +# define raw_spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) +#else +# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) +# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +#endif + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#define raw_spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave(lock); \ + } while (0) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ + } while (0) +#else +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave(lock); \ + } while (0) +#endif + +#else + +#define raw_spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_lock_irqsave(lock, flags); \ + } while (0) + +#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ + raw_spin_lock_irqsave(lock, flags) + +#endif + +#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) +#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) +#define raw_spin_unlock(lock) _raw_spin_unlock(lock) +#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) + +#define raw_spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_unlock_irqrestore(lock, flags); \ + } while (0) +#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) + +#define raw_spin_trylock_bh(lock) \ + __cond_lock(lock, _raw_spin_trylock_bh(lock)) + +#define raw_spin_trylock_irq(lock) \ +({ \ + local_irq_disable(); \ + raw_spin_trylock(lock) ? \ + 1 : ({ local_irq_enable(); 0; }); \ +}) + +#define raw_spin_trylock_irqsave(lock, flags) \ +({ \ + local_irq_save(flags); \ + raw_spin_trylock(lock) ? \ + 1 : ({ local_irq_restore(flags); 0; }); \ +}) + +/** + * raw_spin_can_lock - would raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + +/* Include rwlock functions */ +#include <linux/rwlock.h> + +/* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +# include <linux/spinlock_api_smp.h> +#else +# include <linux/spinlock_api_up.h> +#endif + +/* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ + +static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) +{ + return &lock->rlock; +} + +#define spin_lock_init(_lock) \ +do { \ + spinlock_check(_lock); \ + raw_spin_lock_init(&(_lock)->rlock); \ +} while (0) + +static inline void spin_lock(spinlock_t *lock) +{ + raw_spin_lock(&lock->rlock); +} + +static inline void spin_lock_bh(spinlock_t *lock) +{ + raw_spin_lock_bh(&lock->rlock); +} + +static inline int spin_trylock(spinlock_t *lock) +{ + return raw_spin_trylock(&lock->rlock); +} + +#define spin_lock_nested(lock, subclass) \ +do { \ + raw_spin_lock_nested(spinlock_check(lock), subclass); \ +} while (0) + +#define spin_lock_nest_lock(lock, nest_lock) \ +do { \ + raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ +} while (0) + +static inline void spin_lock_irq(spinlock_t *lock) +{ + raw_spin_lock_irq(&lock->rlock); +} + +#define spin_lock_irqsave(lock, flags) \ +do { \ + raw_spin_lock_irqsave(spinlock_check(lock), flags); \ +} while (0) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ +do { \ + raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ +} while (0) + +static inline void spin_unlock(spinlock_t *lock) +{ + raw_spin_unlock(&lock->rlock); +} + +static inline void spin_unlock_bh(spinlock_t *lock) +{ + raw_spin_unlock_bh(&lock->rlock); +} + +static inline void spin_unlock_irq(spinlock_t *lock) +{ + raw_spin_unlock_irq(&lock->rlock); +} + +static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&lock->rlock, flags); +} + +static inline int spin_trylock_bh(spinlock_t *lock) +{ + return raw_spin_trylock_bh(&lock->rlock); +} + +static inline int spin_trylock_irq(spinlock_t *lock) +{ + return raw_spin_trylock_irq(&lock->rlock); +} + +#define spin_trylock_irqsave(lock, flags) \ +({ \ + raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ +}) + +static inline void spin_unlock_wait(spinlock_t *lock) +{ + raw_spin_unlock_wait(&lock->rlock); +} + +static inline int spin_is_locked(spinlock_t *lock) +{ + return raw_spin_is_locked(&lock->rlock); +} + +static inline int spin_is_contended(spinlock_t *lock) +{ + return raw_spin_is_contended(&lock->rlock); +} + +static inline int spin_can_lock(spinlock_t *lock) +{ + return raw_spin_can_lock(&lock->rlock); +} + +#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) + +/* + * Pull the atomic_t declaration: + * (asm-mips/atomic.h needs above definitions) + */ +#include <linux/atomic.h> +/** + * atomic_dec_and_lock - lock on reaching reference count zero + * @atomic: the atomic counter + * @lock: the spinlock in question + * + * Decrements @atomic by 1. If the result is 0, returns true and locks + * @lock. Returns false for all other cases. + */ +extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); +#define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + +#endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 00000000..51df117a --- /dev/null +++ b/include/linux/spinlock_api_smp.h @@ -0,0 +1,196 @@ +#ifndef __LINUX_SPINLOCK_API_SMP_H +#define __LINUX_SPINLOCK_API_SMP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_api_smp.h + * + * spinlock API declarations on SMP (and debug) + * (implemented in kernel/spinlock.c) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +int in_lock_functions(unsigned long addr); + +#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) + +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +void __lockfunc +_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); +void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); +void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) + __acquires(lock); + +unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) + __acquires(lock); +unsigned long __lockfunc +_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); +void __lockfunc +_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + __releases(lock); + +#ifdef CONFIG_INLINE_SPIN_LOCK +#define _raw_spin_lock(lock) __raw_spin_lock(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_LOCK_BH +#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ +#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE +#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_TRYLOCK +#define _raw_spin_trylock(lock) __raw_spin_trylock(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH +#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) +#endif + +#ifndef CONFIG_UNINLINE_SPIN_UNLOCK +#define _raw_spin_unlock(lock) __raw_spin_unlock(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH +#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ +#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) +#endif + +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE +#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) +#endif + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + preempt_disable(); + if (do_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + /* + * On lockdep we dont want the hand-coded irq-enable of + * do_raw_spin_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#ifdef CONFIG_LOCKDEP + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#else + do_raw_spin_lock_flags(lock, &flags); +#endif + return flags; +} + +static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} + +static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +} + +#endif /* CONFIG_PREEMPT */ + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + preempt_enable(); +} + +static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, + unsigned long flags) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + do_raw_spin_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + if (do_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + return 0; +} + +#include <linux/rwlock_api_smp.h> + +#endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 00000000..af1f4722 --- /dev/null +++ b/include/linux/spinlock_api_up.h @@ -0,0 +1,85 @@ +#ifndef __LINUX_SPINLOCK_API_UP_H +#define __LINUX_SPINLOCK_API_UP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_api_up.h + * + * spinlock API implementation on UP-nondebug (inlined implementation) + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#define in_lock_functions(ADDR) 0 + +#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) + +/* + * In the UP-nondebug case there's no real locking going on, so the + * only thing we have to do is to keep the preempt counts and irq + * flags straight, to suppress compiler warnings of unused lock + * variables, and to add the proper checker annotations: + */ +#define __LOCK(lock) \ + do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) + +#define __LOCK_BH(lock) \ + do { local_bh_disable(); __LOCK(lock); } while (0) + +#define __LOCK_IRQ(lock) \ + do { local_irq_disable(); __LOCK(lock); } while (0) + +#define __LOCK_IRQSAVE(lock, flags) \ + do { local_irq_save(flags); __LOCK(lock); } while (0) + +#define __UNLOCK(lock) \ + do { preempt_enable(); __release(lock); (void)(lock); } while (0) + +#define __UNLOCK_BH(lock) \ + do { preempt_enable_no_resched(); local_bh_enable(); \ + __release(lock); (void)(lock); } while (0) + +#define __UNLOCK_IRQ(lock) \ + do { local_irq_enable(); __UNLOCK(lock); } while (0) + +#define __UNLOCK_IRQRESTORE(lock, flags) \ + do { local_irq_restore(flags); __UNLOCK(lock); } while (0) + +#define _raw_spin_lock(lock) __LOCK(lock) +#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_read_lock(lock) __LOCK(lock) +#define _raw_write_lock(lock) __LOCK(lock) +#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) +#define _raw_read_lock_bh(lock) __LOCK_BH(lock) +#define _raw_write_lock_bh(lock) __LOCK_BH(lock) +#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) +#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) +#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) +#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define _raw_spin_unlock(lock) __UNLOCK(lock) +#define _raw_read_unlock(lock) __UNLOCK(lock) +#define _raw_write_unlock(lock) __UNLOCK(lock) +#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) +#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define _raw_spin_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_read_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) +#define _raw_write_unlock_irqrestore(lock, flags) \ + __UNLOCK_IRQRESTORE(lock, flags) + +#endif /* __LINUX_SPINLOCK_API_UP_H */ diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 00000000..73548eb1 --- /dev/null +++ b/include/linux/spinlock_types.h @@ -0,0 +1,88 @@ +#ifndef __LINUX_SPINLOCK_TYPES_H +#define __LINUX_SPINLOCK_TYPES_H + +/* + * include/linux/spinlock_types.h - generic spinlock type definitions + * and initializers + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +#include <linux/lockdep.h> + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#include <linux/rwlock_types.h> + +#endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 00000000..c09b6407 --- /dev/null +++ b/include/linux/spinlock_types_up.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_SPINLOCK_TYPES_UP_H +#define __LINUX_SPINLOCK_TYPES_UP_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +/* + * include/linux/spinlock_types_up.h - spinlock type definitions for UP + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + */ + +#ifdef CONFIG_DEBUG_SPINLOCK + +typedef struct { + volatile unsigned int slock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } + +#else + +typedef struct { } arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { } + +#endif + +typedef struct { + /* no debug version on UP */ +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { } + +#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 00000000..a26e2fb6 --- /dev/null +++ b/include/linux/spinlock_up.h @@ -0,0 +1,78 @@ +#ifndef __LINUX_SPINLOCK_UP_H +#define __LINUX_SPINLOCK_UP_H + +#ifndef __LINUX_SPINLOCK_H +# error "please don't include this file directly" +#endif + +#include <asm/processor.h> /* for cpu_relax() */ + +/* + * include/linux/spinlock_up.h - UP-debug version of spinlocks. + * + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar + * Released under the General Public License (GPL). + * + * In the debug case, 1 means unlocked, 0 means locked. (the values + * are inverted, to catch initialization bugs) + * + * No atomicity anywhere, we are on UP. + */ + +#ifdef CONFIG_DEBUG_SPINLOCK +#define arch_spin_is_locked(x) ((x)->slock == 0) + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + lock->slock = 0; +} + +static inline void +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +{ + local_irq_save(flags); + lock->slock = 0; +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + char oldval = lock->slock; + + lock->slock = 0; + + return oldval > 0; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + lock->slock = 1; +} + +/* + * Read-write spinlocks. No debug version. + */ +#define arch_read_lock(lock) do { (void)(lock); } while (0) +#define arch_write_lock(lock) do { (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ (void)(lock); 1; }) +#define arch_read_unlock(lock) do { (void)(lock); } while (0) +#define arch_write_unlock(lock) do { (void)(lock); } while (0) + +#else /* DEBUG_SPINLOCK */ +#define arch_spin_is_locked(lock) ((void)(lock), 0) +/* for sched.c and kernel_lock.c: */ +# define arch_spin_lock(lock) do { (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) +#endif /* DEBUG_SPINLOCK */ + +#define arch_spin_is_contended(lock) (((void)(lock), 0)) + +#define arch_read_can_lock(lock) (((void)(lock), 1)) +#define arch_write_can_lock(lock) (((void)(lock), 1)) + +#define arch_spin_unlock_wait(lock) \ + do { cpu_relax(); } while (arch_spin_is_locked(lock)) + +#endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/include/linux/splice.h b/include/linux/splice.h new file mode 100644 index 00000000..09a545a7 --- /dev/null +++ b/include/linux/splice.h @@ -0,0 +1,94 @@ +/* + * Function declerations and data structures related to the splice + * implementation. + * + * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> + * + */ +#ifndef SPLICE_H +#define SPLICE_H + +#include <linux/pipe_fs_i.h> + +/* + * Flags passed in from splice/tee/vmsplice + */ +#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ +#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ + /* we may still block on the fd we splice */ + /* from/to, of course */ +#define SPLICE_F_MORE (0x04) /* expect more data */ +#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */ + +/* + * Passed to the actors + */ +struct splice_desc { + unsigned int len, total_len; /* current and remaining length */ + unsigned int flags; /* splice flags */ + /* + * actor() private data + */ + union { + void __user *userptr; /* memory to write to */ + struct file *file; /* file to read/write */ + void *data; /* cookie */ + } u; + loff_t pos; /* file position */ + size_t num_spliced; /* number of bytes already spliced */ + bool need_wakeup; /* need to wake up writer */ +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + unsigned long private; +}; + +/* + * Passed to splice_to_pipe + */ +struct splice_pipe_desc { + struct page **pages; /* page map */ + struct partial_page *partial; /* pages[] may not be contig */ + int nr_pages; /* number of populated pages in map */ + unsigned int nr_pages_max; /* pages[] & partial[] arrays size */ + unsigned int flags; /* splice flags */ + const struct pipe_buf_operations *ops;/* ops associated with output pipe */ + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, + struct splice_desc *); +typedef int (splice_direct_actor)(struct pipe_inode_info *, + struct splice_desc *); + +extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, + loff_t *, size_t, unsigned int, + splice_actor *); +extern ssize_t __splice_from_pipe(struct pipe_inode_info *, + struct splice_desc *, splice_actor *); +extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *, + splice_actor *); +extern int splice_from_pipe_next(struct pipe_inode_info *, + struct splice_desc *); +extern void splice_from_pipe_begin(struct splice_desc *); +extern void splice_from_pipe_end(struct pipe_inode_info *, + struct splice_desc *); +extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *, + struct splice_desc *); + +extern ssize_t splice_to_pipe(struct pipe_inode_info *, + struct splice_pipe_desc *); +extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, + splice_direct_actor *); + +/* + * for dynamic pipe sizing + */ +extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *); +extern void splice_shrink_spd(struct splice_pipe_desc *); +extern void spd_release_page(struct splice_pipe_desc *, unsigned int); + +extern const struct pipe_buf_operations page_cache_pipe_buf_ops; +#endif diff --git a/include/linux/srcu.h b/include/linux/srcu.h new file mode 100644 index 00000000..d3d5fa54 --- /dev/null +++ b/include/linux/srcu.h @@ -0,0 +1,239 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Paul McKenney <paulmck@us.ibm.com> + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU/ *.txt + * + */ + +#ifndef _LINUX_SRCU_H +#define _LINUX_SRCU_H + +#include <linux/mutex.h> +#include <linux/rcupdate.h> + +struct srcu_struct_array { + int c[2]; +}; + +struct srcu_struct { + int completed; + struct srcu_struct_array __percpu *per_cpu_ref; + struct mutex mutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; + +#ifndef CONFIG_PREEMPT +#define srcu_barrier() barrier() +#else /* #ifndef CONFIG_PREEMPT */ +#define srcu_barrier() +#endif /* #else #ifndef CONFIG_PREEMPT */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +int __init_srcu_struct(struct srcu_struct *sp, const char *name, + struct lock_class_key *key); + +#define init_srcu_struct(sp) \ +({ \ + static struct lock_class_key __srcu_key; \ + \ + __init_srcu_struct((sp), #sp, &__srcu_key); \ +}) + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +int init_srcu_struct(struct srcu_struct *sp); + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +void cleanup_srcu_struct(struct srcu_struct *sp); +int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); +void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); +void synchronize_srcu(struct srcu_struct *sp); +void synchronize_srcu_expedited(struct srcu_struct *sp); +long srcu_batches_completed(struct srcu_struct *sp); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +/** + * srcu_read_lock_held - might we be in SRCU read-side critical section? + * + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, + * this assumes we are in an SRCU read-side critical section unless it can + * prove otherwise. + * + * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. + * + * Note that if the CPU is in the idle loop from an RCU point of view + * (ie: that we are in the section between rcu_idle_enter() and + * rcu_idle_exit()) then srcu_read_lock_held() returns false even if + * the CPU did an srcu_read_lock(). The reason for this is that RCU + * ignores CPUs that are in such a section, considering these as in + * extended quiescent state, so such a CPU is effectively never in an + * RCU read-side critical section regardless of what RCU primitives it + * invokes. This state of affairs is required --- we need to keep an + * RCU-free window in idle where the CPU may possibly enter into low + * power mode. This way we can notice an extended quiescent state to + * other CPUs that started a grace period. Otherwise we would delay any + * grace period as long as we run in the idle task. + * + * Similarly, we avoid claiming an SRCU read lock held if the current + * CPU is offline. + */ +static inline int srcu_read_lock_held(struct srcu_struct *sp) +{ + if (!debug_lockdep_rcu_enabled()) + return 1; + if (rcu_is_cpu_idle()) + return 0; + if (!rcu_lockdep_current_cpu_online()) + return 0; + return lock_is_held(&sp->dep_map); +} + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline int srcu_read_lock_held(struct srcu_struct *sp) +{ + return 1; +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +/** + * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing + * @p: the pointer to fetch and protect for later dereferencing + * @sp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. + * @c: condition to check for update-side use + * + * If PROVE_RCU is enabled, invoking this outside of an RCU read-side + * critical section will result in an RCU-lockdep splat, unless @c evaluates + * to 1. The @c argument will normally be a logical expression containing + * lockdep_is_held() calls. + */ +#define srcu_dereference_check(p, sp, c) \ + __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) + +/** + * srcu_dereference - fetch SRCU-protected pointer for later dereferencing + * @p: the pointer to fetch and protect for later dereferencing + * @sp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. + * + * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU + * is enabled, invoking this outside of an RCU read-side critical + * section will result in an RCU-lockdep splat. + */ +#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) + +/** + * srcu_read_lock - register a new reader for an SRCU-protected structure. + * @sp: srcu_struct in which to register the new reader. + * + * Enter an SRCU read-side critical section. Note that SRCU read-side + * critical sections may be nested. However, it is illegal to + * call anything that waits on an SRCU grace period for the same + * srcu_struct, whether directly or indirectly. Please note that + * one way to indirectly wait on an SRCU grace period is to acquire + * a mutex that is held elsewhere while calling synchronize_srcu() or + * synchronize_srcu_expedited(). + * + * Note that srcu_read_lock() and the matching srcu_read_unlock() must + * occur in the same context, for example, it is illegal to invoke + * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() + * was invoked in process context. + */ +static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) +{ + int retval = __srcu_read_lock(sp); + + rcu_lock_acquire(&(sp)->dep_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "srcu_read_lock() used illegally while idle"); + return retval; +} + +/** + * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. + * @sp: srcu_struct in which to unregister the old reader. + * @idx: return value from corresponding srcu_read_lock(). + * + * Exit an SRCU read-side critical section. + */ +static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) + __releases(sp) +{ + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "srcu_read_unlock() used illegally while idle"); + rcu_lock_release(&(sp)->dep_map); + __srcu_read_unlock(sp, idx); +} + +/** + * srcu_read_lock_raw - register a new reader for an SRCU-protected structure. + * @sp: srcu_struct in which to register the new reader. + * + * Enter an SRCU read-side critical section. Similar to srcu_read_lock(), + * but avoids the RCU-lockdep checking. This means that it is legal to + * use srcu_read_lock_raw() in one context, for example, in an exception + * handler, and then have the matching srcu_read_unlock_raw() in another + * context, for example in the task that took the exception. + * + * However, the entire SRCU read-side critical section must reside within a + * single task. For example, beware of using srcu_read_lock_raw() in + * a device interrupt handler and srcu_read_unlock() in the interrupted + * task: This will not work if interrupts are threaded. + */ +static inline int srcu_read_lock_raw(struct srcu_struct *sp) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __srcu_read_lock(sp); + local_irq_restore(flags); + return ret; +} + +/** + * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure. + * @sp: srcu_struct in which to unregister the old reader. + * @idx: return value from corresponding srcu_read_lock_raw(). + * + * Exit an SRCU read-side critical section without lockdep-RCU checking. + * See srcu_read_lock_raw() for more details. + */ +static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx) +{ + unsigned long flags; + + local_irq_save(flags); + __srcu_read_unlock(sp, idx); + local_irq_restore(flags); +} + +#endif diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h new file mode 100644 index 00000000..d2768318 --- /dev/null +++ b/include/linux/ssb/ssb.h @@ -0,0 +1,625 @@ +#ifndef LINUX_SSB_H_ +#define LINUX_SSB_H_ + +#include <linux/device.h> +#include <linux/list.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/mod_devicetable.h> +#include <linux/dma-mapping.h> + +#include <linux/ssb/ssb_regs.h> + + +struct pcmcia_device; +struct ssb_bus; +struct ssb_driver; + +struct ssb_sprom_core_pwr_info { + u8 itssi_2g, itssi_5g; + u8 maxpwr_2g, maxpwr_5gl, maxpwr_5g, maxpwr_5gh; + u16 pa_2g[4], pa_5gl[4], pa_5g[4], pa_5gh[4]; +}; + +struct ssb_sprom { + u8 revision; + u8 il0mac[6]; /* MAC address for 802.11b/g */ + u8 et0mac[6]; /* MAC address for Ethernet */ + u8 et1mac[6]; /* MAC address for 802.11a */ + u8 et0phyaddr; /* MII address for enet0 */ + u8 et1phyaddr; /* MII address for enet1 */ + u8 et0mdcport; /* MDIO for enet0 */ + u8 et1mdcport; /* MDIO for enet1 */ + u16 board_rev; /* Board revision number from SPROM. */ + u16 board_num; /* Board number from SPROM. */ + u16 board_type; /* Board type from SPROM. */ + u8 country_code; /* Country Code */ + char alpha2[2]; /* Country Code as two chars like EU or US */ + u8 leddc_on_time; /* LED Powersave Duty Cycle On Count */ + u8 leddc_off_time; /* LED Powersave Duty Cycle Off Count */ + u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */ + u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */ + u16 pa0b0; + u16 pa0b1; + u16 pa0b2; + u16 pa1b0; + u16 pa1b1; + u16 pa1b2; + u16 pa1lob0; + u16 pa1lob1; + u16 pa1lob2; + u16 pa1hib0; + u16 pa1hib1; + u16 pa1hib2; + u8 gpio0; /* GPIO pin 0 */ + u8 gpio1; /* GPIO pin 1 */ + u8 gpio2; /* GPIO pin 2 */ + u8 gpio3; /* GPIO pin 3 */ + u8 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */ + u8 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */ + u8 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */ + u8 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */ + u8 itssi_a; /* Idle TSSI Target for A-PHY */ + u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ + u8 tri2g; /* 2.4GHz TX isolation */ + u8 tri5gl; /* 5.2GHz TX isolation */ + u8 tri5g; /* 5.3GHz TX isolation */ + u8 tri5gh; /* 5.8GHz TX isolation */ + u8 txpid2g[4]; /* 2GHz TX power index */ + u8 txpid5gl[4]; /* 4.9 - 5.1GHz TX power index */ + u8 txpid5g[4]; /* 5.1 - 5.5GHz TX power index */ + u8 txpid5gh[4]; /* 5.5 - ...GHz TX power index */ + s8 rxpo2g; /* 2GHz RX power offset */ + s8 rxpo5g; /* 5GHz RX power offset */ + u8 rssisav2g; /* 2GHz RSSI params */ + u8 rssismc2g; + u8 rssismf2g; + u8 bxa2g; /* 2GHz BX arch */ + u8 rssisav5g; /* 5GHz RSSI params */ + u8 rssismc5g; + u8 rssismf5g; + u8 bxa5g; /* 5GHz BX arch */ + u16 cck2gpo; /* CCK power offset */ + u32 ofdm2gpo; /* 2.4GHz OFDM power offset */ + u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ + u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ + u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ + u16 boardflags_lo; /* Board flags (bits 0-15) */ + u16 boardflags_hi; /* Board flags (bits 16-31) */ + u16 boardflags2_lo; /* Board flags (bits 32-47) */ + u16 boardflags2_hi; /* Board flags (bits 48-63) */ + /* TODO store board flags in a single u64 */ + + struct ssb_sprom_core_pwr_info core_pwr_info[4]; + + /* Antenna gain values for up to 4 antennas + * on each band. Values in dBm/4 (Q5.2). Negative gain means the + * loss in the connectors is bigger than the gain. */ + struct { + s8 a0, a1, a2, a3; + } antenna_gain; + + struct { + struct { + u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; + } ghz2; + struct { + u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; + } ghz5; + } fem; + + u16 mcs2gpo[8]; + u16 mcs5gpo[8]; + u16 mcs5glpo[8]; + u16 mcs5ghpo[8]; + u8 opo; + + u8 rxgainerr2ga[3]; + u8 rxgainerr5gla[3]; + u8 rxgainerr5gma[3]; + u8 rxgainerr5gha[3]; + u8 rxgainerr5gua[3]; + + u8 noiselvl2ga[3]; + u8 noiselvl5gla[3]; + u8 noiselvl5gma[3]; + u8 noiselvl5gha[3]; + u8 noiselvl5gua[3]; + + u8 regrev; + u8 txchain; + u8 rxchain; + u8 antswitch; + u16 cddpo; + u16 stbcpo; + u16 bw40po; + u16 bwduppo; + + u8 tempthresh; + u8 tempoffset; + u16 rawtempsense; + u8 measpower; + u8 tempsense_slope; + u8 tempcorrx; + u8 tempsense_option; + u8 freqoffset_corr; + u8 iqcal_swp_dis; + u8 hw_iqcal_en; + u8 elna2g; + u8 elna5g; + u8 phycal_tempdelta; + u8 temps_period; + u8 temps_hysteresis; + u8 measpower1; + u8 measpower2; + u8 pcieingress_war; + + /* power per rate from sromrev 9 */ + u16 cckbw202gpo; + u16 cckbw20ul2gpo; + u32 legofdmbw202gpo; + u32 legofdmbw20ul2gpo; + u32 legofdmbw205glpo; + u32 legofdmbw20ul5glpo; + u32 legofdmbw205gmpo; + u32 legofdmbw20ul5gmpo; + u32 legofdmbw205ghpo; + u32 legofdmbw20ul5ghpo; + u32 mcsbw202gpo; + u32 mcsbw20ul2gpo; + u32 mcsbw402gpo; + u32 mcsbw205glpo; + u32 mcsbw20ul5glpo; + u32 mcsbw405glpo; + u32 mcsbw205gmpo; + u32 mcsbw20ul5gmpo; + u32 mcsbw405gmpo; + u32 mcsbw205ghpo; + u32 mcsbw20ul5ghpo; + u32 mcsbw405ghpo; + u16 mcs32po; + u16 legofdm40duppo; + u8 sar2g; + u8 sar5g; +}; + +/* Information about the PCB the circuitry is soldered on. */ +struct ssb_boardinfo { + u16 vendor; + u16 type; + u8 rev; +}; + + +struct ssb_device; +/* Lowlevel read/write operations on the device MMIO. + * Internal, don't use that outside of ssb. */ +struct ssb_bus_ops { + u8 (*read8)(struct ssb_device *dev, u16 offset); + u16 (*read16)(struct ssb_device *dev, u16 offset); + u32 (*read32)(struct ssb_device *dev, u16 offset); + void (*write8)(struct ssb_device *dev, u16 offset, u8 value); + void (*write16)(struct ssb_device *dev, u16 offset, u16 value); + void (*write32)(struct ssb_device *dev, u16 offset, u32 value); +#ifdef CONFIG_SSB_BLOCKIO + void (*block_read)(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif +}; + + +/* Core-ID values. */ +#define SSB_DEV_CHIPCOMMON 0x800 +#define SSB_DEV_ILINE20 0x801 +#define SSB_DEV_SDRAM 0x803 +#define SSB_DEV_PCI 0x804 +#define SSB_DEV_MIPS 0x805 +#define SSB_DEV_ETHERNET 0x806 +#define SSB_DEV_V90 0x807 +#define SSB_DEV_USB11_HOSTDEV 0x808 +#define SSB_DEV_ADSL 0x809 +#define SSB_DEV_ILINE100 0x80A +#define SSB_DEV_IPSEC 0x80B +#define SSB_DEV_PCMCIA 0x80D +#define SSB_DEV_INTERNAL_MEM 0x80E +#define SSB_DEV_MEMC_SDRAM 0x80F +#define SSB_DEV_EXTIF 0x811 +#define SSB_DEV_80211 0x812 +#define SSB_DEV_MIPS_3302 0x816 +#define SSB_DEV_USB11_HOST 0x817 +#define SSB_DEV_USB11_DEV 0x818 +#define SSB_DEV_USB20_HOST 0x819 +#define SSB_DEV_USB20_DEV 0x81A +#define SSB_DEV_SDIO_HOST 0x81B +#define SSB_DEV_ROBOSWITCH 0x81C +#define SSB_DEV_PARA_ATA 0x81D +#define SSB_DEV_SATA_XORDMA 0x81E +#define SSB_DEV_ETHERNET_GBIT 0x81F +#define SSB_DEV_PCIE 0x820 +#define SSB_DEV_MIMO_PHY 0x821 +#define SSB_DEV_SRAM_CTRLR 0x822 +#define SSB_DEV_MINI_MACPHY 0x823 +#define SSB_DEV_ARM_1176 0x824 +#define SSB_DEV_ARM_7TDMI 0x825 + +/* Vendor-ID values */ +#define SSB_VENDOR_BROADCOM 0x4243 + +/* Some kernel subsystems poke with dev->drvdata, so we must use the + * following ugly workaround to get from struct device to struct ssb_device */ +struct __ssb_dev_wrapper { + struct device dev; + struct ssb_device *sdev; +}; + +struct ssb_device { + /* Having a copy of the ops pointer in each dev struct + * is an optimization. */ + const struct ssb_bus_ops *ops; + + struct device *dev, *dma_dev; + + struct ssb_bus *bus; + struct ssb_device_id id; + + u8 core_index; + unsigned int irq; + + /* Internal-only stuff follows. */ + void *drvdata; /* Per-device data */ + void *devtypedata; /* Per-devicetype (eg 802.11) data */ +}; + +/* Go from struct device to struct ssb_device. */ +static inline +struct ssb_device * dev_to_ssb_dev(struct device *dev) +{ + struct __ssb_dev_wrapper *wrap; + wrap = container_of(dev, struct __ssb_dev_wrapper, dev); + return wrap->sdev; +} + +/* Device specific user data */ +static inline +void ssb_set_drvdata(struct ssb_device *dev, void *data) +{ + dev->drvdata = data; +} +static inline +void * ssb_get_drvdata(struct ssb_device *dev) +{ + return dev->drvdata; +} + +/* Devicetype specific user data. This is per device-type (not per device) */ +void ssb_set_devtypedata(struct ssb_device *dev, void *data); +static inline +void * ssb_get_devtypedata(struct ssb_device *dev) +{ + return dev->devtypedata; +} + + +struct ssb_driver { + const char *name; + const struct ssb_device_id *id_table; + + int (*probe)(struct ssb_device *dev, const struct ssb_device_id *id); + void (*remove)(struct ssb_device *dev); + int (*suspend)(struct ssb_device *dev, pm_message_t state); + int (*resume)(struct ssb_device *dev); + void (*shutdown)(struct ssb_device *dev); + + struct device_driver drv; +}; +#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv) + +extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner); +#define ssb_driver_register(drv) \ + __ssb_driver_register(drv, THIS_MODULE) + +extern void ssb_driver_unregister(struct ssb_driver *drv); + + + + +enum ssb_bustype { + SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */ + SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */ + SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */ + SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */ +}; + +/* board_vendor */ +#define SSB_BOARDVENDOR_BCM 0x14E4 /* Broadcom */ +#define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */ +#define SSB_BOARDVENDOR_HP 0x0E11 /* HP */ +/* board_type */ +#define SSB_BOARD_BCM94306MP 0x0418 +#define SSB_BOARD_BCM4309G 0x0421 +#define SSB_BOARD_BCM4306CB 0x0417 +#define SSB_BOARD_BCM4309MP 0x040C +#define SSB_BOARD_MP4318 0x044A +#define SSB_BOARD_BU4306 0x0416 +#define SSB_BOARD_BU4309 0x040A +/* chip_package */ +#define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */ +#define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */ +#define SSB_CHIPPACK_BCM4712L 0 /* Large 340pin 4712 */ + +#include <linux/ssb/ssb_driver_chipcommon.h> +#include <linux/ssb/ssb_driver_mips.h> +#include <linux/ssb/ssb_driver_extif.h> +#include <linux/ssb/ssb_driver_pci.h> + +struct ssb_bus { + /* The MMIO area. */ + void __iomem *mmio; + + const struct ssb_bus_ops *ops; + + /* The core currently mapped into the MMIO window. + * Not valid on all host-buses. So don't use outside of SSB. */ + struct ssb_device *mapped_device; + union { + /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ + u8 mapped_pcmcia_seg; + /* Current SSB base address window for SDIO. */ + u32 sdio_sbaddr; + }; + /* Lock for core and segment switching. + * On PCMCIA-host busses this is used to protect the whole MMIO access. */ + spinlock_t bar_lock; + + /* The host-bus this backplane is running on. */ + enum ssb_bustype bustype; + /* Pointers to the host-bus. Check bustype before using any of these pointers. */ + union { + /* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */ + struct pci_dev *host_pci; + /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ + struct pcmcia_device *host_pcmcia; + /* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */ + struct sdio_func *host_sdio; + }; + + /* See enum ssb_quirks */ + unsigned int quirks; + +#ifdef CONFIG_SSB_SPROM + /* Mutex to protect the SPROM writing. */ + struct mutex sprom_mutex; +#endif + + /* ID information about the Chip. */ + u16 chip_id; + u8 chip_rev; + u16 sprom_offset; + u16 sprom_size; /* number of words in sprom */ + u8 chip_package; + + /* List of devices (cores) on the backplane. */ + struct ssb_device devices[SSB_MAX_NR_CORES]; + u8 nr_devices; + + /* Software ID number for this bus. */ + unsigned int busnumber; + + /* The ChipCommon device (if available). */ + struct ssb_chipcommon chipco; + /* The PCI-core device (if available). */ + struct ssb_pcicore pcicore; + /* The MIPS-core device (if available). */ + struct ssb_mipscore mipscore; + /* The EXTif-core device (if available). */ + struct ssb_extif extif; + + /* The following structure elements are not available in early + * SSB initialization. Though, they are available for regular + * registered drivers at any stage. So be careful when + * using them in the ssb core code. */ + + /* ID information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* Contents of the SPROM. */ + struct ssb_sprom sprom; + /* If the board has a cardbus slot, this is set to true. */ + bool has_cardbus_slot; + +#ifdef CONFIG_SSB_EMBEDDED + /* Lock for GPIO register access. */ + spinlock_t gpio_lock; +#endif /* EMBEDDED */ + + /* Internal-only stuff follows. Do not touch. */ + struct list_head list; +#ifdef CONFIG_SSB_DEBUG + /* Is the bus already powered up? */ + bool powered_up; + int power_warn_count; +#endif /* DEBUG */ +}; + +enum ssb_quirks { + /* SDIO connected card requires performing a read after writing a 32-bit value */ + SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0), +}; + +/* The initialization-invariants. */ +struct ssb_init_invariants { + /* Versioning information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* The SPROM information. That's either stored in an + * EEPROM or NVRAM on the board. */ + struct ssb_sprom sprom; + /* If the board has a cardbus slot, this is set to true. */ + bool has_cardbus_slot; +}; +/* Type of function to fetch the invariants. */ +typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, + struct ssb_init_invariants *iv); + +/* Register a SSB system bus. get_invariants() is called after the + * basic system devices are initialized. + * The invariants are usually fetched from some NVRAM. + * Put the invariants into the struct pointed to by iv. */ +extern int ssb_bus_ssbbus_register(struct ssb_bus *bus, + unsigned long baseaddr, + ssb_invariants_func_t get_invariants); +#ifdef CONFIG_SSB_PCIHOST +extern int ssb_bus_pcibus_register(struct ssb_bus *bus, + struct pci_dev *host_pci); +#endif /* CONFIG_SSB_PCIHOST */ +#ifdef CONFIG_SSB_PCMCIAHOST +extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, + struct pcmcia_device *pcmcia_dev, + unsigned long baseaddr); +#endif /* CONFIG_SSB_PCMCIAHOST */ +#ifdef CONFIG_SSB_SDIOHOST +extern int ssb_bus_sdiobus_register(struct ssb_bus *bus, + struct sdio_func *sdio_func, + unsigned int quirks); +#endif /* CONFIG_SSB_SDIOHOST */ + + +extern void ssb_bus_unregister(struct ssb_bus *bus); + +/* Does the device have an SPROM? */ +extern bool ssb_is_sprom_available(struct ssb_bus *bus); + +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int ssb_arch_register_fallback_sprom( + int (*sprom_callback)(struct ssb_bus *bus, + struct ssb_sprom *out)); + +/* Suspend a SSB bus. + * Call this from the parent bus suspend routine. */ +extern int ssb_bus_suspend(struct ssb_bus *bus); +/* Resume a SSB bus. + * Call this from the parent bus resume routine. */ +extern int ssb_bus_resume(struct ssb_bus *bus); + +extern u32 ssb_clockspeed(struct ssb_bus *bus); + +/* Is the device enabled in hardware? */ +int ssb_device_is_enabled(struct ssb_device *dev); +/* Enable a device and pass device-specific SSB_TMSLOW flags. + * If no device-specific flags are available, use 0. */ +void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags); +/* Disable a device in hardware and pass SSB_TMSLOW flags (if any). */ +void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags); + + +/* Device MMIO register read/write functions. */ +static inline u8 ssb_read8(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read8(dev, offset); +} +static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read16(dev, offset); +} +static inline u32 ssb_read32(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read32(dev, offset); +} +static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value) +{ + dev->ops->write8(dev, offset, value); +} +static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + dev->ops->write16(dev, offset, value); +} +static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + dev->ops->write32(dev, offset, value); +} +#ifdef CONFIG_SSB_BLOCKIO +static inline void ssb_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + dev->ops->block_read(dev, buffer, count, offset, reg_width); +} + +static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + dev->ops->block_write(dev, buffer, count, offset, reg_width); +} +#endif /* CONFIG_SSB_BLOCKIO */ + + +/* The SSB DMA API. Use this API for any DMA operation on the device. + * This API basically is a wrapper that calls the correct DMA API for + * the host device type the SSB device is attached to. */ + +/* Translation (routing) bits that need to be ORed to DMA + * addresses before they are given to a device. */ +extern u32 ssb_dma_translation(struct ssb_device *dev); +#define SSB_DMA_TRANSLATION_MASK 0xC0000000 +#define SSB_DMA_TRANSLATION_SHIFT 30 + +static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) +{ +#ifdef CONFIG_SSB_DEBUG + printk(KERN_ERR "SSB: BUG! Calling DMA API for " + "unsupported bustype %d\n", dev->bus->bustype); +#endif /* DEBUG */ +} + +#ifdef CONFIG_SSB_PCIHOST +/* PCI-host wrapper driver */ +extern int ssb_pcihost_register(struct pci_driver *driver); +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ + pci_unregister_driver(driver); +} + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ + if (sdev->bus->bustype == SSB_BUSTYPE_PCI) + pci_set_power_state(sdev->bus->host_pci, state); +} +#else +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ +} + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ +} +#endif /* CONFIG_SSB_PCIHOST */ + + +/* If a driver is shutdown or suspended, call this to signal + * that the bus may be completely powered down. SSB will decide, + * if it's really time to power down the bus, based on if there + * are other devices that want to run. */ +extern int ssb_bus_may_powerdown(struct ssb_bus *bus); +/* Before initializing and enabling a device, call this to power-up the bus. + * If you want to allow use of dynamic-power-control, pass the flag. + * Otherwise static always-on powercontrol will be used. */ +extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl); + +extern void ssb_commit_settings(struct ssb_bus *bus); + +/* Various helper functions */ +extern u32 ssb_admatch_base(u32 adm); +extern u32 ssb_admatch_size(u32 adm); + +/* PCI device mapping and fixup routines. + * Called from the architecture pcibios init code. + * These are only available on SSB_EMBEDDED configurations. */ +#ifdef CONFIG_SSB_EMBEDDED +int ssb_pcibios_plat_dev_init(struct pci_dev *dev); +int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +#endif /* CONFIG_SSB_EMBEDDED */ + +#endif /* LINUX_SSB_H_ */ diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h new file mode 100644 index 00000000..1a6b0045 --- /dev/null +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -0,0 +1,665 @@ +#ifndef LINUX_SSB_CHIPCO_H_ +#define LINUX_SSB_CHIPCO_H_ + +/* SonicsSiliconBackplane CHIPCOMMON core hardware definitions + * + * The chipcommon core provides chip identification, SB control, + * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer, + * gpio interface, extbus, and support for serial and parallel flashes. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch <m@bues.ch> + * + * Licensed under the GPL version 2. See COPYING for details. + */ + +/** ChipCommon core registers. **/ + +#define SSB_CHIPCO_CHIPID 0x0000 +#define SSB_CHIPCO_IDMASK 0x0000FFFF +#define SSB_CHIPCO_REVMASK 0x000F0000 +#define SSB_CHIPCO_REVSHIFT 16 +#define SSB_CHIPCO_PACKMASK 0x00F00000 +#define SSB_CHIPCO_PACKSHIFT 20 +#define SSB_CHIPCO_NRCORESMASK 0x0F000000 +#define SSB_CHIPCO_NRCORESSHIFT 24 +#define SSB_CHIPCO_CAP 0x0004 /* Capabilities */ +#define SSB_CHIPCO_CAP_NRUART 0x00000003 /* # of UARTs */ +#define SSB_CHIPCO_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define SSB_CHIPCO_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define SSB_CHIPCO_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define SSB_CHIPCO_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define SSB_CHIPCO_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define SSB_CHIPCO_CAP_FLASHT 0x00000700 /* Flash Type */ +#define SSB_CHIPCO_FLASHT_NONE 0x00000000 /* No flash */ +#define SSB_CHIPCO_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define SSB_CHIPCO_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define SSB_CHIPCO_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define SSB_CHIPCO_CAP_PLLT 0x00038000 /* PLL Type */ +#define SSB_PLLTYPE_NONE 0x00000000 +#define SSB_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define SSB_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define SSB_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define SSB_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */ +#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */ +#define SSB_CHIPCO_CAP_OTPS_SHIFT 19 +#define SSB_CHIPCO_CAP_OTPS_BASE 5 +#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define SSB_CHIPCO_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ +#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ +#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */ +#define SSB_CHIPCO_CORECTL 0x0008 +#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define SSB_CHIPCO_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ +#define SSB_CHIPCO_BIST 0x000C +#define SSB_CHIPCO_OTPS 0x0010 /* OTP status */ +#define SSB_CHIPCO_OTPS_PROGFAIL 0x80000000 +#define SSB_CHIPCO_OTPS_PROTECT 0x00000007 +#define SSB_CHIPCO_OTPS_HW_PROTECT 0x00000001 +#define SSB_CHIPCO_OTPS_SW_PROTECT 0x00000002 +#define SSB_CHIPCO_OTPS_CID_PROTECT 0x00000004 +#define SSB_CHIPCO_OTPC 0x0014 /* OTP control */ +#define SSB_CHIPCO_OTPC_RECWAIT 0xFF000000 +#define SSB_CHIPCO_OTPC_PROGWAIT 0x00FFFF00 +#define SSB_CHIPCO_OTPC_PRW_SHIFT 8 +#define SSB_CHIPCO_OTPC_MAXFAIL 0x00000038 +#define SSB_CHIPCO_OTPC_VSEL 0x00000006 +#define SSB_CHIPCO_OTPC_SELVL 0x00000001 +#define SSB_CHIPCO_OTPP 0x0018 /* OTP prog */ +#define SSB_CHIPCO_OTPP_COL 0x000000FF +#define SSB_CHIPCO_OTPP_ROW 0x0000FF00 +#define SSB_CHIPCO_OTPP_ROW_SHIFT 8 +#define SSB_CHIPCO_OTPP_READERR 0x10000000 +#define SSB_CHIPCO_OTPP_VALUE 0x20000000 +#define SSB_CHIPCO_OTPP_READ 0x40000000 +#define SSB_CHIPCO_OTPP_START 0x80000000 +#define SSB_CHIPCO_OTPP_BUSY 0x80000000 +#define SSB_CHIPCO_IRQSTAT 0x0020 +#define SSB_CHIPCO_IRQMASK 0x0024 +#define SSB_CHIPCO_IRQ_GPIO 0x00000001 /* gpio intr */ +#define SSB_CHIPCO_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define SSB_CHIPCO_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define SSB_CHIPCO_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define SSB_CHIPCO_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define SSB_CHIPCO_JCMD 0x0030 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCMD_START 0x80000000 +#define SSB_CHIPCO_JCMD_BUSY 0x80000000 +#define SSB_CHIPCO_JCMD_PAUSE 0x40000000 +#define SSB_CHIPCO_JCMD0_ACC_MASK 0x0000F000 +#define SSB_CHIPCO_JCMD0_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD0_ACC_DR 0x00001000 +#define SSB_CHIPCO_JCMD0_ACC_IR 0x00002000 +#define SSB_CHIPCO_JCMD0_ACC_RESET 0x00003000 +#define SSB_CHIPCO_JCMD0_ACC_IRPDR 0x00004000 +#define SSB_CHIPCO_JCMD0_ACC_PDR 0x00005000 +#define SSB_CHIPCO_JCMD0_IRW_MASK 0x00000F00 +#define SSB_CHIPCO_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define SSB_CHIPCO_JCMD_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD_ACC_DR 0x00010000 +#define SSB_CHIPCO_JCMD_ACC_IR 0x00020000 +#define SSB_CHIPCO_JCMD_ACC_RESET 0x00030000 +#define SSB_CHIPCO_JCMD_ACC_IRPDR 0x00040000 +#define SSB_CHIPCO_JCMD_ACC_PDR 0x00050000 +#define SSB_CHIPCO_JCMD_IRW_MASK 0x00001F00 +#define SSB_CHIPCO_JCMD_IRW_SHIFT 8 +#define SSB_CHIPCO_JCMD_DRW_MASK 0x0000003F +#define SSB_CHIPCO_JIR 0x0034 /* Rev >= 10 only */ +#define SSB_CHIPCO_JDR 0x0038 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL 0x003C /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL_FORCE_CLK 4 /* Force clock */ +#define SSB_CHIPCO_JCTL_EXT_EN 2 /* Enable external targets */ +#define SSB_CHIPCO_JCTL_EN 1 /* Enable Jtag master */ +#define SSB_CHIPCO_FLASHCTL 0x0040 +#define SSB_CHIPCO_FLASHCTL_START 0x80000000 +#define SSB_CHIPCO_FLASHCTL_BUSY SSB_CHIPCO_FLASHCTL_START +#define SSB_CHIPCO_FLASHADDR 0x0044 +#define SSB_CHIPCO_FLASHDATA 0x0048 +#define SSB_CHIPCO_BCAST_ADDR 0x0050 +#define SSB_CHIPCO_BCAST_DATA 0x0054 +#define SSB_CHIPCO_GPIOPULLUP 0x0058 /* Rev >= 20 only */ +#define SSB_CHIPCO_GPIOPULLDOWN 0x005C /* Rev >= 20 only */ +#define SSB_CHIPCO_GPIOIN 0x0060 +#define SSB_CHIPCO_GPIOOUT 0x0064 +#define SSB_CHIPCO_GPIOOUTEN 0x0068 +#define SSB_CHIPCO_GPIOCTL 0x006C +#define SSB_CHIPCO_GPIOPOL 0x0070 +#define SSB_CHIPCO_GPIOIRQ 0x0074 +#define SSB_CHIPCO_WATCHDOG 0x0080 +#define SSB_CHIPCO_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_GPIOTIMER_OFFTIME 0x0000FFFF +#define SSB_CHIPCO_GPIOTIMER_OFFTIME_SHIFT 0 +#define SSB_CHIPCO_GPIOTIMER_ONTIME 0xFFFF0000 +#define SSB_CHIPCO_GPIOTIMER_ONTIME_SHIFT 16 +#define SSB_CHIPCO_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_CLOCK_N 0x0090 +#define SSB_CHIPCO_CLOCK_SB 0x0094 +#define SSB_CHIPCO_CLOCK_PCI 0x0098 +#define SSB_CHIPCO_CLOCK_M2 0x009C +#define SSB_CHIPCO_CLOCK_MIPS 0x00A0 +#define SSB_CHIPCO_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define SSB_CHIPCO_CLKDIV_SFLASH 0x0F000000 +#define SSB_CHIPCO_CLKDIV_SFLASH_SHIFT 24 +#define SSB_CHIPCO_CLKDIV_OTP 0x000F0000 +#define SSB_CHIPCO_CLKDIV_OTP_SHIFT 16 +#define SSB_CHIPCO_CLKDIV_JTAG 0x00000F00 +#define SSB_CHIPCO_CLKDIV_JTAG_SHIFT 8 +#define SSB_CHIPCO_CLKDIV_UART 0x000000FF +#define SSB_CHIPCO_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define SSB_CHIPCO_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define SSB_CHIPCO_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define SSB_CHIPCO_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define SSB_CHIPCO_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define SSB_CHIPCO_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define SSB_CHIPCO_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define SSB_CHIPCO_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define SSB_CHIPCO_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define SSB_CHIPCO_PCMCIA_CFG 0x0100 +#define SSB_CHIPCO_PCMCIA_MEMWAIT 0x0104 +#define SSB_CHIPCO_PCMCIA_ATTRWAIT 0x0108 +#define SSB_CHIPCO_PCMCIA_IOWAIT 0x010C +#define SSB_CHIPCO_IDE_CFG 0x0110 +#define SSB_CHIPCO_IDE_MEMWAIT 0x0114 +#define SSB_CHIPCO_IDE_ATTRWAIT 0x0118 +#define SSB_CHIPCO_IDE_IOWAIT 0x011C +#define SSB_CHIPCO_PROG_CFG 0x0120 +#define SSB_CHIPCO_PROG_WAITCNT 0x0124 +#define SSB_CHIPCO_FLASH_CFG 0x0128 +#define SSB_CHIPCO_FLASH_WAITCNT 0x012C +#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */ +#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ +#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00020000 /* HT available */ +#define SSB_CHIPCO_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ +#define SSB_CHIPCO_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ +#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ +#define SSB_CHIPCO_UART0_DATA 0x0300 +#define SSB_CHIPCO_UART0_IMR 0x0304 +#define SSB_CHIPCO_UART0_FCR 0x0308 +#define SSB_CHIPCO_UART0_LCR 0x030C +#define SSB_CHIPCO_UART0_MCR 0x0310 +#define SSB_CHIPCO_UART0_LSR 0x0314 +#define SSB_CHIPCO_UART0_MSR 0x0318 +#define SSB_CHIPCO_UART0_SCRATCH 0x031C +#define SSB_CHIPCO_UART1_DATA 0x0400 +#define SSB_CHIPCO_UART1_IMR 0x0404 +#define SSB_CHIPCO_UART1_FCR 0x0408 +#define SSB_CHIPCO_UART1_LCR 0x040C +#define SSB_CHIPCO_UART1_MCR 0x0410 +#define SSB_CHIPCO_UART1_LSR 0x0414 +#define SSB_CHIPCO_UART1_MSR 0x0418 +#define SSB_CHIPCO_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 +#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2 +#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */ +#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */ +#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */ +#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */ +#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650 +#define SSB_CHIPCO_CHIPCTL_DATA 0x0654 +#define SSB_CHIPCO_REGCTL_ADDR 0x0658 +#define SSB_CHIPCO_REGCTL_DATA 0x065C +#define SSB_CHIPCO_PLLCTL_ADDR 0x0660 +#define SSB_CHIPCO_PLLCTL_DATA 0x0664 + + + +/** PMU PLL registers */ + +/* PMU rev 0 PLL registers */ +#define SSB_PMU0_PLLCTL0 0 +#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001 +#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */ +#define SSB_PMU0_PLLCTL1 1 +#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */ +#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28 +#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */ +#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8 +#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */ +#define SSB_PMU0_PLLCTL2 2 +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */ +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0 + +/* PMU rev 1 PLL registers */ +#define SSB_PMU1_PLLCTL0 0 +#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */ +#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20 +#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */ +#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL1 1 +#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */ +#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */ +#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */ +#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16 +#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */ +#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL2 2 +#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */ +#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */ +#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */ +#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17 +#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */ +#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20 +#define SSB_PMU1_PLLCTL3 3 +#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */ +#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0 +#define SSB_PMU1_PLLCTL4 4 +#define SSB_PMU1_PLLCTL5 5 +#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */ +#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8 + +/* BCM4312 PLL resource numbers. */ +#define SSB_PMURES_4312_SWITCHER_BURST 0 +#define SSB_PMURES_4312_SWITCHER_PWM 1 +#define SSB_PMURES_4312_PA_REF_LDO 2 +#define SSB_PMURES_4312_CORE_LDO_BURST 3 +#define SSB_PMURES_4312_CORE_LDO_PWM 4 +#define SSB_PMURES_4312_RADIO_LDO 5 +#define SSB_PMURES_4312_ILP_REQUEST 6 +#define SSB_PMURES_4312_BG_FILTBYP 7 +#define SSB_PMURES_4312_TX_FILTBYP 8 +#define SSB_PMURES_4312_RX_FILTBYP 9 +#define SSB_PMURES_4312_XTAL_PU 10 +#define SSB_PMURES_4312_ALP_AVAIL 11 +#define SSB_PMURES_4312_BB_PLL_FILTBYP 12 +#define SSB_PMURES_4312_RF_PLL_FILTBYP 13 +#define SSB_PMURES_4312_HT_AVAIL 14 + +/* BCM4325 PLL resource numbers. */ +#define SSB_PMURES_4325_BUCK_BOOST_BURST 0 +#define SSB_PMURES_4325_CBUCK_BURST 1 +#define SSB_PMURES_4325_CBUCK_PWM 2 +#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3 +#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4 +#define SSB_PMURES_4325_BUCK_BOOST_PWM 5 +#define SSB_PMURES_4325_ILP_REQUEST 6 +#define SSB_PMURES_4325_ABUCK_BURST 7 +#define SSB_PMURES_4325_ABUCK_PWM 8 +#define SSB_PMURES_4325_LNLDO1_PU 9 +#define SSB_PMURES_4325_LNLDO2_PU 10 +#define SSB_PMURES_4325_LNLDO3_PU 11 +#define SSB_PMURES_4325_LNLDO4_PU 12 +#define SSB_PMURES_4325_XTAL_PU 13 +#define SSB_PMURES_4325_ALP_AVAIL 14 +#define SSB_PMURES_4325_RX_PWRSW_PU 15 +#define SSB_PMURES_4325_TX_PWRSW_PU 16 +#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17 +#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18 +#define SSB_PMURES_4325_AFE_PWRSW_PU 19 +#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20 +#define SSB_PMURES_4325_HT_AVAIL 21 + +/* BCM4328 PLL resource numbers. */ +#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_4328_BB_SWITCHER_PWM 1 +#define SSB_PMURES_4328_BB_SWITCHER_BURST 2 +#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_4328_ILP_REQUEST 4 +#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_4328_ROM_SWITCH 7 +#define SSB_PMURES_4328_PA_REF_LDO 8 +#define SSB_PMURES_4328_RADIO_LDO 9 +#define SSB_PMURES_4328_AFE_LDO 10 +#define SSB_PMURES_4328_PLL_LDO 11 +#define SSB_PMURES_4328_BG_FILTBYP 12 +#define SSB_PMURES_4328_TX_FILTBYP 13 +#define SSB_PMURES_4328_RX_FILTBYP 14 +#define SSB_PMURES_4328_XTAL_PU 15 +#define SSB_PMURES_4328_XTAL_EN 16 +#define SSB_PMURES_4328_BB_PLL_FILTBYP 17 +#define SSB_PMURES_4328_RF_PLL_FILTBYP 18 +#define SSB_PMURES_4328_BB_PLL_PU 19 + +/* BCM5354 PLL resource numbers. */ +#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_5354_BB_SWITCHER_PWM 1 +#define SSB_PMURES_5354_BB_SWITCHER_BURST 2 +#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_5354_ILP_REQUEST 4 +#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_5354_ROM_SWITCH 7 +#define SSB_PMURES_5354_PA_REF_LDO 8 +#define SSB_PMURES_5354_RADIO_LDO 9 +#define SSB_PMURES_5354_AFE_LDO 10 +#define SSB_PMURES_5354_PLL_LDO 11 +#define SSB_PMURES_5354_BG_FILTBYP 12 +#define SSB_PMURES_5354_TX_FILTBYP 13 +#define SSB_PMURES_5354_RX_FILTBYP 14 +#define SSB_PMURES_5354_XTAL_PU 15 +#define SSB_PMURES_5354_XTAL_EN 16 +#define SSB_PMURES_5354_BB_PLL_FILTBYP 17 +#define SSB_PMURES_5354_RF_PLL_FILTBYP 18 +#define SSB_PMURES_5354_BB_PLL_PU 19 + + + +/** Chip specific Chip-Status register contents. */ +#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */ +#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 +#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ +#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ +#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004 +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 +#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ + +/** Macros to determine SPROM presence based on Chip-Status register. */ +#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \ + ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ + SSB_CHIPCO_CHST_4325_OTP_SEL) +#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \ + (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS) +#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \ + (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ + SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \ + ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ + SSB_CHIPCO_CHST_4325_OTP_SEL)) + + + +/** Clockcontrol masks and values **/ + +/* SSB_CHIPCO_CLOCK_N */ +#define SSB_CHIPCO_CLK_N1 0x0000003F /* n1 control */ +#define SSB_CHIPCO_CLK_N2 0x00003F00 /* n2 control */ +#define SSB_CHIPCO_CLK_N2_SHIFT 8 +#define SSB_CHIPCO_CLK_PLLC 0x000F0000 /* pll control */ +#define SSB_CHIPCO_CLK_PLLC_SHIFT 16 + +/* SSB_CHIPCO_CLOCK_SB/PCI/UART */ +#define SSB_CHIPCO_CLK_M1 0x0000003F /* m1 control */ +#define SSB_CHIPCO_CLK_M2 0x00003F00 /* m2 control */ +#define SSB_CHIPCO_CLK_M2_SHIFT 8 +#define SSB_CHIPCO_CLK_M3 0x003F0000 /* m3 control */ +#define SSB_CHIPCO_CLK_M3_SHIFT 16 +#define SSB_CHIPCO_CLK_MC 0x1F000000 /* mux control */ +#define SSB_CHIPCO_CLK_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define SSB_CHIPCO_CLK_F6_2 0x02 /* A factor of 2 in */ +#define SSB_CHIPCO_CLK_F6_3 0x03 /* 6-bit fields like */ +#define SSB_CHIPCO_CLK_F6_4 0x05 /* N1, M1 or M3 */ +#define SSB_CHIPCO_CLK_F6_5 0x09 +#define SSB_CHIPCO_CLK_F6_6 0x11 +#define SSB_CHIPCO_CLK_F6_7 0x21 + +#define SSB_CHIPCO_CLK_F5_BIAS 5 /* 5-bit fields get this added */ + +#define SSB_CHIPCO_CLK_MC_BYPASS 0x08 +#define SSB_CHIPCO_CLK_MC_M1 0x04 +#define SSB_CHIPCO_CLK_MC_M1M2 0x02 +#define SSB_CHIPCO_CLK_MC_M1M2M3 0x01 +#define SSB_CHIPCO_CLK_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ +#define SSB_CHIPCO_CLK_T2M2_BIAS 3 /* m2 bias */ + +#define SSB_CHIPCO_CLK_T2MC_M1BYP 1 +#define SSB_CHIPCO_CLK_T2MC_M2BYP 2 +#define SSB_CHIPCO_CLK_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T6_MMASK 1 /* bits of interest in m */ +#define SSB_CHIPCO_CLK_T6_M0 120000000 /* sb clock for m = 0 */ +#define SSB_CHIPCO_CLK_T6_M1 100000000 /* sb clock for m = 1 */ +#define SSB_CHIPCO_CLK_SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define SSB_CHIPCO_CLK_BASE1 24000000 /* Half the clock freq */ +#define SSB_CHIPCO_CLK_BASE2 12500000 /* Alternate crystal on some PLL's */ + +/* Clock control values for 200Mhz in 5350 */ +#define SSB_CHIPCO_CLK_5350_N 0x0311 +#define SSB_CHIPCO_CLK_5350_M 0x04020009 + + +/** Bits in the config registers **/ + +#define SSB_CHIPCO_CFG_EN 0x0001 /* Enable */ +#define SSB_CHIPCO_CFG_EXTM 0x000E /* Extif Mode */ +#define SSB_CHIPCO_CFG_EXTM_ASYNC 0x0002 /* Async/Parallel flash */ +#define SSB_CHIPCO_CFG_EXTM_SYNC 0x0004 /* Synchronous */ +#define SSB_CHIPCO_CFG_EXTM_PCMCIA 0x0008 /* PCMCIA */ +#define SSB_CHIPCO_CFG_EXTM_IDE 0x000A /* IDE */ +#define SSB_CHIPCO_CFG_DS16 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define SSB_CHIPCO_CFG_CLKDIV 0x0060 /* Sync: Clock divisor */ +#define SSB_CHIPCO_CFG_CLKEN 0x0080 /* Sync: Clock enable */ +#define SSB_CHIPCO_CFG_BSTRO 0x0100 /* Sync: Size/Bytestrobe */ + + +/** Flash-specific control/status values */ + +/* flashcontrol opcodes for ST flashes */ +#define SSB_CHIPCO_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ +#define SSB_CHIPCO_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ +#define SSB_CHIPCO_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ +#define SSB_CHIPCO_FLASHCTL_ST_PP 0x0302 /* Page Program */ +#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */ +#define SSB_CHIPCO_FLASHCTL_ST_RSIG 0x03AB /* Read Electronic Signature */ + +/* Status register bits for ST flashes */ +#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */ +#define SSB_CHIPCO_FLASHSTA_ST_WEL 0x02 /* Write Enable Latch */ +#define SSB_CHIPCO_FLASHSTA_ST_BP 0x1C /* Block Protect */ +#define SSB_CHIPCO_FLASHSTA_ST_BP_SHIFT 2 +#define SSB_CHIPCO_FLASHSTA_ST_SRWD 0x80 /* Status Register Write Disable */ + +/* flashcontrol opcodes for Atmel flashes */ +#define SSB_CHIPCO_FLASHCTL_AT_READ 0x07E8 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_READ 0x07D2 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_STATUS 0x01D7 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRITE 0x0384 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRITE 0x0387 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_ERASE_PRGM 0x0283 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_ERASE_PRGM 0x0286 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_PROGRAM 0x0288 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_PROGRAM 0x0289 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_ERASE 0x0281 +#define SSB_CHIPCO_FLASHCTL_AT_BLOCK_ERASE 0x0250 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRER_PRGM 0x0382 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRER_PRGM 0x0385 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_LOAD 0x0253 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_LOAD 0x0255 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_COMPARE 0x0260 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_COMPARE 0x0261 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SSB_CHIPCO_FLASHSTA_AT_READY 0x80 +#define SSB_CHIPCO_FLASHSTA_AT_MISMATCH 0x40 +#define SSB_CHIPCO_FLASHSTA_AT_ID 0x38 +#define SSB_CHIPCO_FLASHSTA_AT_ID_SHIFT 3 + + +/** OTP **/ + +/* OTP regions */ +#define SSB_CHIPCO_OTP_HW_REGION SSB_CHIPCO_OTPS_HW_PROTECT +#define SSB_CHIPCO_OTP_SW_REGION SSB_CHIPCO_OTPS_SW_PROTECT +#define SSB_CHIPCO_OTP_CID_REGION SSB_CHIPCO_OTPS_CID_PROTECT + +/* OTP regions (Byte offsets from otp size) */ +#define SSB_CHIPCO_OTP_SWLIM_OFF (-8) +#define SSB_CHIPCO_OTP_CIDBASE_OFF 0 +#define SSB_CHIPCO_OTP_CIDLIM_OFF 8 + +/* Predefined OTP words (Word offset from otp size) */ +#define SSB_CHIPCO_OTP_BOUNDARY_OFF (-4) +#define SSB_CHIPCO_OTP_HWSIGN_OFF (-3) +#define SSB_CHIPCO_OTP_SWSIGN_OFF (-2) +#define SSB_CHIPCO_OTP_CIDSIGN_OFF (-1) + +#define SSB_CHIPCO_OTP_CID_OFF 0 +#define SSB_CHIPCO_OTP_PKG_OFF 1 +#define SSB_CHIPCO_OTP_FID_OFF 2 +#define SSB_CHIPCO_OTP_RSV_OFF 3 +#define SSB_CHIPCO_OTP_LIM_OFF 4 + +#define SSB_CHIPCO_OTP_SIGNATURE 0x578A +#define SSB_CHIPCO_OTP_MAGIC 0x4E56 + + +struct ssb_device; +struct ssb_serial_port; + +/* Data for the PMU, if available. + * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU) + */ +struct ssb_chipcommon_pmu { + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + +struct ssb_chipcommon { + struct ssb_device *dev; + u32 capabilities; + u32 status; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + struct ssb_chipcommon_pmu pmu; +}; + +static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) +{ + return (cc->dev != NULL); +} + +/* Register access */ +#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset) +#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val) + +#define chipco_mask32(cc, offset, mask) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask)) +#define chipco_set32(cc, offset, set) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) | (set)) +#define chipco_maskset32(cc, offset, mask, set) \ + chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set)) + +extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); + +extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); +extern void ssb_chipco_resume(struct ssb_chipcommon *cc); + +extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_timing_init(struct ssb_chipcommon *cc, + unsigned long ns_per_cycle); + +enum ssb_clkmode { + SSB_CLKMODE_SLOW, + SSB_CLKMODE_FAST, + SSB_CLKMODE_DYNAMIC, +}; + +extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, + enum ssb_clkmode mode); + +extern void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, + u32 ticks); + +void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value); + +u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask); + +/* Chipcommon GPIO pin access. */ +u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); +u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + +/* PMU support */ +extern void ssb_pmu_init(struct ssb_chipcommon *cc); + +enum ssb_pmu_ldo_volt_id { + LDO_PAREF = 0, + LDO_VOLT1, + LDO_VOLT2, + LDO_VOLT3, +}; + +void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, + enum ssb_pmu_ldo_volt_id id, u32 voltage); +void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); + +#endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h new file mode 100644 index 00000000..91161f0a --- /dev/null +++ b/include/linux/ssb/ssb_driver_extif.h @@ -0,0 +1,214 @@ +/* + * Hardware-specific External Interface I/O core definitions + * for the BCM47xx family of SiliconBackplane-based chips. + * + * The External Interface core supports a total of three external chip selects + * supporting external interfaces. One of the external chip selects is + * used for Flash, one is used for PCMCIA, and the other may be + * programmed to support either a synchronous interface or an + * asynchronous interface. The asynchronous interface can be used to + * support external devices such as UARTs and the BCM2019 Bluetooth + * baseband processor. + * The external interface core also contains 2 on-chip 16550 UARTs, clock + * frequency control, a watchdog interrupt timer, and a GPIO interface. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ +#ifndef LINUX_SSB_EXTIFCORE_H_ +#define LINUX_SSB_EXTIFCORE_H_ + +/* external interface address space */ +#define SSB_EXTIF_PCMCIA_MEMBASE(x) (x) +#define SSB_EXTIF_PCMCIA_IOBASE(x) ((x) + 0x100000) +#define SSB_EXTIF_PCMCIA_CFGBASE(x) ((x) + 0x200000) +#define SSB_EXTIF_CFGIF_BASE(x) ((x) + 0x800000) +#define SSB_EXTIF_FLASH_BASE(x) ((x) + 0xc00000) + +#define SSB_EXTIF_NR_GPIOOUT 5 +/* GPIO NOTE: + * The multiple instances of output and output enable registers + * are present to allow driver software for multiple cores to control + * gpio outputs without needing to share a single register pair. + * Use the following helper macro to get a register offset value. + */ +#define SSB_EXTIF_GPIO_OUT(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUT_BASE + ((index) * 8); \ + }) +#define SSB_EXTIF_GPIO_OUTEN(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUTEN_BASE + ((index) * 8); \ + }) + +/** EXTIF core registers **/ + +#define SSB_EXTIF_CTL 0x0000 +#define SSB_EXTIF_CTL_UARTEN (1 << 0) /* UART enable */ +#define SSB_EXTIF_EXTSTAT 0x0004 +#define SSB_EXTIF_EXTSTAT_EMODE (1 << 0) /* Endian mode (ro) */ +#define SSB_EXTIF_EXTSTAT_EIRQPIN (1 << 1) /* External interrupt pin (ro) */ +#define SSB_EXTIF_EXTSTAT_GPIOIRQPIN (1 << 2) /* GPIO interrupt pin (ro) */ +#define SSB_EXTIF_PCMCIA_CFG 0x0010 +#define SSB_EXTIF_PCMCIA_MEMWAIT 0x0014 +#define SSB_EXTIF_PCMCIA_ATTRWAIT 0x0018 +#define SSB_EXTIF_PCMCIA_IOWAIT 0x001C +#define SSB_EXTIF_PROG_CFG 0x0020 +#define SSB_EXTIF_PROG_WAITCNT 0x0024 +#define SSB_EXTIF_FLASH_CFG 0x0028 +#define SSB_EXTIF_FLASH_WAITCNT 0x002C +#define SSB_EXTIF_WATCHDOG 0x0040 +#define SSB_EXTIF_CLOCK_N 0x0044 +#define SSB_EXTIF_CLOCK_SB 0x0048 +#define SSB_EXTIF_CLOCK_PCI 0x004C +#define SSB_EXTIF_CLOCK_MII 0x0050 +#define SSB_EXTIF_GPIO_IN 0x0060 +#define SSB_EXTIF_GPIO_OUT_BASE 0x0064 +#define SSB_EXTIF_GPIO_OUTEN_BASE 0x0068 +#define SSB_EXTIF_EJTAG_OUTEN 0x0090 +#define SSB_EXTIF_GPIO_INTPOL 0x0094 +#define SSB_EXTIF_GPIO_INTMASK 0x0098 +#define SSB_EXTIF_UART_DATA 0x0300 +#define SSB_EXTIF_UART_TIMER 0x0310 +#define SSB_EXTIF_UART_FCR 0x0320 +#define SSB_EXTIF_UART_LCR 0x0330 +#define SSB_EXTIF_UART_MCR 0x0340 +#define SSB_EXTIF_UART_LSR 0x0350 +#define SSB_EXTIF_UART_MSR 0x0360 +#define SSB_EXTIF_UART_SCRATCH 0x0370 + + + + +/* pcmcia/prog/flash_config */ +#define SSB_EXTCFG_EN (1 << 0) /* enable */ +#define SSB_EXTCFG_MODE 0xE /* mode */ +#define SSB_EXTCFG_MODE_SHIFT 1 +#define SSB_EXTCFG_MODE_FLASH 0x0 /* flash/asynchronous mode */ +#define SSB_EXTCFG_MODE_SYNC 0x2 /* synchronous mode */ +#define SSB_EXTCFG_MODE_PCMCIA 0x4 /* pcmcia mode */ +#define SSB_EXTCFG_DS16 (1 << 4) /* destsize: 0=8bit, 1=16bit */ +#define SSB_EXTCFG_BSWAP (1 << 5) /* byteswap */ +#define SSB_EXTCFG_CLKDIV 0xC0 /* clock divider */ +#define SSB_EXTCFG_CLKDIV_SHIFT 6 +#define SSB_EXTCFG_CLKDIV_2 0x0 /* backplane/2 */ +#define SSB_EXTCFG_CLKDIV_3 0x40 /* backplane/3 */ +#define SSB_EXTCFG_CLKDIV_4 0x80 /* backplane/4 */ +#define SSB_EXTCFG_CLKEN (1 << 8) /* clock enable */ +#define SSB_EXTCFG_STROBE (1 << 9) /* size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define SSB_PCMCIA_MEMW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_MEMW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_MEMW_1_SHIFT 8 +#define SSB_PCMCIA_MEMW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_MEMW_2_SHIFT 16 +#define SSB_PCMCIA_MEMW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_MEMW_3_SHIFT 24 + +/* pcmcia_attrwait */ +#define SSB_PCMCIA_ATTW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_ATTW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_ATTW_1_SHIFT 8 +#define SSB_PCMCIA_ATTW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_ATTW_2_SHIFT 16 +#define SSB_PCMCIA_ATTW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_ATTW_3_SHIFT 24 + +/* pcmcia_iowait */ +#define SSB_PCMCIA_IOW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_IOW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_IOW_1_SHIFT 8 +#define SSB_PCMCIA_IOW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_IOW_2_SHIFT 16 +#define SSB_PCMCIA_IOW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_IOW_3_SHIFT 24 + +/* prog_waitcount */ +#define SSB_PROG_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_PROG_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_PROG_WCNT_1_SHIFT 8 +#define SSB_PROG_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_PROG_WCNT_2_SHIFT 16 +#define SSB_PROG_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_PROG_WCNT_3_SHIFT 24 + +#define SSB_PROG_W0 0x0000000C +#define SSB_PROG_W1 0x00000A00 +#define SSB_PROG_W2 0x00020000 +#define SSB_PROG_W3 0x01000000 + +/* flash_waitcount */ +#define SSB_FLASH_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_FLASH_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_FLASH_WCNT_1_SHIFT 8 +#define SSB_FLASH_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_FLASH_WCNT_2_SHIFT 16 +#define SSB_FLASH_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_FLASH_WCNT_3_SHIFT 24 + +/* watchdog */ +#define SSB_EXTIF_WATCHDOG_CLK 48000000 /* Hz */ + + + +#ifdef CONFIG_SSB_DRIVER_EXTIF + +struct ssb_extif { + struct ssb_device *dev; +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return (extif->dev != NULL); +} + +extern void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m); + +extern void ssb_extif_timing_init(struct ssb_extif *extif, + unsigned long ns); + +extern void ssb_extif_watchdog_timer_set(struct ssb_extif *extif, + u32 ticks); + +/* Extif GPIO pin access */ +u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask); +u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_extif_serial_init(struct ssb_extif *extif, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + + +#else /* CONFIG_SSB_DRIVER_EXTIF */ +/* extif disabled */ + +struct ssb_extif { +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return 0; +} + +static inline +void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m) +{ +} + +static inline +void ssb_extif_watchdog_timer_set(struct ssb_extif *extif, + u32 ticks) +{ +} + +#endif /* CONFIG_SSB_DRIVER_EXTIF */ +#endif /* LINUX_SSB_EXTIFCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h new file mode 100644 index 00000000..6b05dcd9 --- /dev/null +++ b/include/linux/ssb/ssb_driver_gige.h @@ -0,0 +1,180 @@ +#ifndef LINUX_SSB_DRIVER_GIGE_H_ +#define LINUX_SSB_DRIVER_GIGE_H_ + +#include <linux/ssb/ssb.h> +#include <linux/bug.h> +#include <linux/pci.h> +#include <linux/spinlock.h> + + +#ifdef CONFIG_SSB_DRIVER_GIGE + + +#define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */ +#define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */ +#define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */ +#define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */ +#define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */ +#define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */ +#define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */ +#define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */ +#define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */ + +/* TM Status High flags */ +#define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */ +/* TM Status Low flags */ +#define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */ +#define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */ +#define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */ + +/* Boardflags (low) */ +#define SSB_GIGE_BFL_ROBOSWITCH 0x0010 + + +#define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory" +#define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O" + +struct ssb_gige { + struct ssb_device *dev; + + spinlock_t lock; + + /* True, if the device has an RGMII bus. + * False, if the device has a GMII bus. */ + bool has_rgmii; + + /* The PCI controller device. */ + struct pci_controller pci_controller; + struct pci_ops pci_ops; + struct resource mem_resource; + struct resource io_resource; +}; + +/* Check whether a PCI device is a SSB Gigabit Ethernet core. */ +extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev); + +/* Convert a pci_dev pointer to a ssb_gige pointer. */ +static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) +{ + if (!pdev_is_ssb_gige_core(pdev)) + return NULL; + return container_of(pdev->bus->ops, struct ssb_gige, pci_ops); +} + +/* Returns whether the PHY is connected by an RGMII bus. */ +static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + return (dev ? dev->has_rgmii : 0); +} + +/* Returns whether we have a Roboswitch. */ +static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return !!(dev->dev->bus->sprom.boardflags_lo & + SSB_GIGE_BFL_ROBOSWITCH); + return 0; +} + +/* Returns whether we can only do one DMA at once. */ +static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return ((dev->dev->bus->chip_id == 0x4785) && + (dev->dev->bus->chip_rev < 2)); + return 0; +} + +/* Returns whether we must flush posted writes. */ +static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return (dev->dev->bus->chip_id == 0x4785); + return 0; +} + +#ifdef CONFIG_BCM47XX +#include <asm/mach-bcm47xx/nvram.h> +/* Get the device MAC address */ +static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) +{ + char buf[20]; + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0) + return; + nvram_parse_macaddr(buf, macaddr); +} +#else +static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) +{ +} +#endif + +extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev); +extern int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev); + +/* The GigE driver is not a standalone module, because we don't have support + * for unregistering the driver. So we could not unload the module anyway. */ +extern int ssb_gige_init(void); +static inline void ssb_gige_exit(void) +{ + /* Currently we can not unregister the GigE driver, + * because we can not unregister the PCI bridge. */ + BUG(); +} + + +#else /* CONFIG_SSB_DRIVER_GIGE */ +/* Gigabit Ethernet driver disabled */ + + +static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev) +{ + return -ENOSYS; +} +static inline int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev) +{ + return -ENOSYS; +} +static inline int ssb_gige_init(void) +{ + return 0; +} +static inline void ssb_gige_exit(void) +{ +} + +static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) +{ + return 0; +} +static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) +{ + return NULL; +} +static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) +{ + return 0; +} + +#endif /* CONFIG_SSB_DRIVER_GIGE */ +#endif /* LINUX_SSB_DRIVER_GIGE_H_ */ diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h new file mode 100644 index 00000000..5f44e974 --- /dev/null +++ b/include/linux/ssb/ssb_driver_mips.h @@ -0,0 +1,46 @@ +#ifndef LINUX_SSB_MIPSCORE_H_ +#define LINUX_SSB_MIPSCORE_H_ + +#ifdef CONFIG_SSB_DRIVER_MIPS + +struct ssb_device; + +struct ssb_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; + + +struct ssb_mipscore { + struct ssb_device *dev; + + int nr_serial_ports; + struct ssb_serial_port serial_ports[4]; + + u8 flash_buswidth; + u32 flash_window; + u32 flash_window_size; +}; + +extern void ssb_mipscore_init(struct ssb_mipscore *mcore); +extern u32 ssb_cpu_clock(struct ssb_mipscore *mcore); + +extern unsigned int ssb_mips_irq(struct ssb_device *dev); + + +#else /* CONFIG_SSB_DRIVER_MIPS */ + +struct ssb_mipscore { +}; + +static inline +void ssb_mipscore_init(struct ssb_mipscore *mcore) +{ +} + +#endif /* CONFIG_SSB_DRIVER_MIPS */ + +#endif /* LINUX_SSB_MIPSCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h new file mode 100644 index 00000000..41e330e5 --- /dev/null +++ b/include/linux/ssb/ssb_driver_pci.h @@ -0,0 +1,130 @@ +#ifndef LINUX_SSB_PCICORE_H_ +#define LINUX_SSB_PCICORE_H_ + +#include <linux/types.h> + +struct pci_dev; + + +#ifdef CONFIG_SSB_DRIVER_PCICORE + +/* PCI core registers. */ +#define SSB_PCICORE_CTL 0x0000 /* PCI Control */ +#define SSB_PCICORE_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define SSB_PCICORE_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define SSB_PCICORE_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define SSB_PCICORE_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define SSB_PCICORE_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define SSB_PCICORE_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define SSB_PCICORE_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define SSB_PCICORE_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define SSB_PCICORE_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define SSB_PCICORE_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define SSB_PCICORE_ISTAT 0x0020 /* Interrupt status */ +#define SSB_PCICORE_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_IMASK 0x0024 /* Interrupt mask */ +#define SSB_PCICORE_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define SSB_PCICORE_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define SSB_PCICORE_IMASK_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define SSB_PCICORE_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define SSB_PCICORE_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define SSB_PCICORE_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define SSB_PCICORE_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define SSB_PCICORE_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define SSB_PCICORE_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define SSB_PCICORE_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define SSB_PCICORE_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define SSB_PCICORE_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define SSB_PCICORE_BCAST_ADDR_MASK 0x000000FF +#define SSB_PCICORE_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define SSB_PCICORE_GPIO_IN 0x0060 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_CTL 0x006C /* rev >= 2 only */ +#define SSB_PCICORE_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define SSB_PCICORE_SBTOPCI0_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define SSB_PCICORE_SBTOPCI1_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define SSB_PCICORE_SBTOPCI2_MASK 0xC0000000 +#define SSB_PCICORE_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ +#define SSB_PCICORE_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ +#define SSB_PCICORE_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ +#define SSB_PCICORE_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ +#define SSB_PCICORE_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ + +/* SBtoPCIx */ +#define SSB_PCICORE_SBTOPCI_MEM 0x00000000 +#define SSB_PCICORE_SBTOPCI_IO 0x00000001 +#define SSB_PCICORE_SBTOPCI_CFG0 0x00000002 +#define SSB_PCICORE_SBTOPCI_CFG1 0x00000003 +#define SSB_PCICORE_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define SSB_PCICORE_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define SSB_PCICORE_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define SSB_PCICORE_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define SSB_PCICORE_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define SSB_PCICORE_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define SSB_PCICORE_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + + +/* PCIcore specific boardflags */ +#define SSB_PCICORE_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + + +struct ssb_pcicore { + struct ssb_device *dev; + u8 setup_done:1; + u8 hostmode:1; + u8 cardbusmode:1; +}; + +extern void ssb_pcicore_init(struct ssb_pcicore *pc); + +/* Enable IRQ routing for a specific device */ +extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev); + +int ssb_pcicore_plat_dev_init(struct pci_dev *d); +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + + +#else /* CONFIG_SSB_DRIVER_PCICORE */ + + +struct ssb_pcicore { +}; + +static inline +void ssb_pcicore_init(struct ssb_pcicore *pc) +{ +} + +static inline +int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev) +{ + return 0; +} + +static inline +int ssb_pcicore_plat_dev_init(struct pci_dev *d) +{ + return -ENODEV; +} +static inline +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return -ENODEV; +} + +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +#endif /* LINUX_SSB_PCICORE_H_ */ diff --git a/include/linux/ssb/ssb_embedded.h b/include/linux/ssb/ssb_embedded.h new file mode 100644 index 00000000..8d8dedff --- /dev/null +++ b/include/linux/ssb/ssb_embedded.h @@ -0,0 +1,18 @@ +#ifndef LINUX_SSB_EMBEDDED_H_ +#define LINUX_SSB_EMBEDDED_H_ + +#include <linux/types.h> +#include <linux/ssb/ssb.h> + + +extern int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks); + +/* Generic GPIO API */ +u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask); +u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value); + +#endif /* LINUX_SSB_EMBEDDED_H_ */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h new file mode 100644 index 00000000..40b1ef85 --- /dev/null +++ b/include/linux/ssb/ssb_regs.h @@ -0,0 +1,594 @@ +#ifndef LINUX_SSB_REGS_H_ +#define LINUX_SSB_REGS_H_ + + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define SSB_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define SSB_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SSB_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SSB_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define SSB_ENUM_BASE 0x18000000U /* Enumeration space base */ +#define SSB_ENUM_LIMIT 0x18010000U /* Enumeration space limit */ + +#define SSB_FLASH2 0x1c000000U /* Flash Region 2 (region 1 shadowed here) */ +#define SSB_FLASH2_SZ 0x02000000U /* Size of Flash Region 2 */ + +#define SSB_EXTIF_BASE 0x1f000000U /* External Interface region base address */ +#define SSB_FLASH1 0x1fc00000U /* Flash Region 1 */ +#define SSB_FLASH1_SZ 0x00400000U /* Size of Flash Region 1 */ + +#define SSB_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SSB_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define SSB_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */ +#define SSB_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ +#define SSB_EUART (SSB_EXTIF_BASE + 0x00800000) +#define SSB_LED (SSB_EXTIF_BASE + 0x00900000) + + +/* Enumeration space constants */ +#define SSB_CORE_SIZE 0x1000 /* Size of a core MMIO area */ +#define SSB_MAX_NR_CORES ((SSB_ENUM_LIMIT - SSB_ENUM_BASE) / SSB_CORE_SIZE) + + +/* mips address */ +#define SSB_EJTAG 0xff200000 /* MIPS EJTAG space (2M) */ + + +/* SSB PCI config space registers. */ +#define SSB_PMCSR 0x44 +#define SSB_PE 0x100 +#define SSB_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define SSB_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define SSB_SPROMCTL 0x88 /* SPROM control */ +#define SSB_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define SSB_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define SSB_PCI_IRQS 0x90 /* PCI interrupts */ +#define SSB_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define SSB_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define SSB_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define SSB_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define SSB_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define SSB_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define SSB_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define SSB_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define SSB_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + + +#define SSB_BAR0_MAX_RETRIES 50 + +/* Silicon backplane configuration register definitions */ +#define SSB_IPSFLAG 0x0F08 +#define SSB_IPSFLAG_IRQ1 0x0000003F /* which sbflags get routed to mips interrupt 1 */ +#define SSB_IPSFLAG_IRQ1_SHIFT 0 +#define SSB_IPSFLAG_IRQ2 0x00003F00 /* which sbflags get routed to mips interrupt 2 */ +#define SSB_IPSFLAG_IRQ2_SHIFT 8 +#define SSB_IPSFLAG_IRQ3 0x003F0000 /* which sbflags get routed to mips interrupt 3 */ +#define SSB_IPSFLAG_IRQ3_SHIFT 16 +#define SSB_IPSFLAG_IRQ4 0x3F000000 /* which sbflags get routed to mips interrupt 4 */ +#define SSB_IPSFLAG_IRQ4_SHIFT 24 +#define SSB_TPSFLAG 0x0F18 +#define SSB_TPSFLAG_BPFLAG 0x0000003F /* Backplane flag # */ +#define SSB_TPSFLAG_ALWAYSIRQ 0x00000040 /* IRQ is always sent on the Backplane */ +#define SSB_TMERRLOGA 0x0F48 +#define SSB_TMERRLOG 0x0F50 +#define SSB_ADMATCH3 0x0F60 +#define SSB_ADMATCH2 0x0F68 +#define SSB_ADMATCH1 0x0F70 +#define SSB_IMSTATE 0x0F90 /* SB Initiator Agent State */ +#define SSB_IMSTATE_PC 0x0000000f /* Pipe Count */ +#define SSB_IMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ +#define SSB_IMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ +#define SSB_IMSTATE_AP_TS 0x00000010 /* Use timeslices only */ +#define SSB_IMSTATE_AP_TK 0x00000020 /* Use token only */ +#define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */ +#define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */ +#define SSB_IMSTATE_TO 0x00040000 /* Timeout */ +#define SSB_IMSTATE_BUSY 0x01800000 /* Busy (Backplane rev >= 2.3 only) */ +#define SSB_IMSTATE_REJECT 0x02000000 /* Reject (Backplane rev >= 2.3 only) */ +#define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */ +#define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ +#define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ +#define SSB_INTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ +#define SSB_INTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ +#define SSB_INTVEC_USB 0x00000010 /* Enable interrupts for usb */ +#define SSB_INTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ +#define SSB_INTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ +#define SSB_TMSLOW 0x0F98 /* SB Target State Low */ +#define SSB_TMSLOW_RESET 0x00000001 /* Reset */ +#define SSB_TMSLOW_REJECT 0x00000002 /* Reject (Standard Backplane) */ +#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ +#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ +#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ +#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ +#define SSB_TMSLOW_BE 0x80000000 /* BIST Enable */ +#define SSB_TMSHIGH 0x0F9C /* SB Target State High */ +#define SSB_TMSHIGH_SERR 0x00000001 /* S-error */ +#define SSB_TMSHIGH_INT 0x00000002 /* Interrupt */ +#define SSB_TMSHIGH_BUSY 0x00000004 /* Busy */ +#define SSB_TMSHIGH_TO 0x00000020 /* Timeout. Backplane rev >= 2.3 only */ +#define SSB_TMSHIGH_COREFL 0x1FFF0000 /* Core specific flags */ +#define SSB_TMSHIGH_COREFL_SHIFT 16 +#define SSB_TMSHIGH_DMA64 0x10000000 /* 64bit DMA supported */ +#define SSB_TMSHIGH_GCR 0x20000000 /* Gated Clock Request */ +#define SSB_TMSHIGH_BISTF 0x40000000 /* BIST Failed */ +#define SSB_TMSHIGH_BISTD 0x80000000 /* BIST Done */ +#define SSB_BWA0 0x0FA0 +#define SSB_IMCFGLO 0x0FA8 +#define SSB_IMCFGLO_SERTO 0x00000007 /* Service timeout */ +#define SSB_IMCFGLO_REQTO 0x00000070 /* Request timeout */ +#define SSB_IMCFGLO_REQTO_SHIFT 4 +#define SSB_IMCFGLO_CONNID 0x00FF0000 /* Connection ID */ +#define SSB_IMCFGLO_CONNID_SHIFT 16 +#define SSB_IMCFGHI 0x0FAC +#define SSB_ADMATCH0 0x0FB0 +#define SSB_TMCFGLO 0x0FB8 +#define SSB_TMCFGHI 0x0FBC +#define SSB_BCONFIG 0x0FC0 +#define SSB_BSTATE 0x0FC8 +#define SSB_ACTCFG 0x0FD8 +#define SSB_FLAGST 0x0FE8 +#define SSB_IDLOW 0x0FF8 +#define SSB_IDLOW_CFGSP 0x00000003 /* Config Space */ +#define SSB_IDLOW_ADDRNGE 0x00000038 /* Address Ranges supported */ +#define SSB_IDLOW_ADDRNGE_SHIFT 3 +#define SSB_IDLOW_SYNC 0x00000040 +#define SSB_IDLOW_INITIATOR 0x00000080 +#define SSB_IDLOW_MIBL 0x00000F00 /* Minimum Backplane latency */ +#define SSB_IDLOW_MIBL_SHIFT 8 +#define SSB_IDLOW_MABL 0x0000F000 /* Maximum Backplane latency */ +#define SSB_IDLOW_MABL_SHIFT 12 +#define SSB_IDLOW_TIF 0x00010000 /* This Initiator is first */ +#define SSB_IDLOW_CCW 0x000C0000 /* Cycle counter width */ +#define SSB_IDLOW_CCW_SHIFT 18 +#define SSB_IDLOW_TPT 0x00F00000 /* Target ports */ +#define SSB_IDLOW_TPT_SHIFT 20 +#define SSB_IDLOW_INITP 0x0F000000 /* Initiator ports */ +#define SSB_IDLOW_INITP_SHIFT 24 +#define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */ +#define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */ +#define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */ +#define SSB_IDLOW_SSBREV_24 0x40000000 /* ?? Found in BCM4328 */ +#define SSB_IDLOW_SSBREV_25 0x50000000 /* ?? Not Found yet */ +#define SSB_IDLOW_SSBREV_26 0x60000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDLOW_SSBREV_27 0x70000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDHIGH 0x0FFC /* SB Identification High */ +#define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */ +#define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */ +#define SSB_IDHIGH_CC_SHIFT 4 +#define SSB_IDHIGH_RCHI 0x00007000 /* Revision Code (high part) */ +#define SSB_IDHIGH_RCHI_SHIFT 8 /* yes, shift 8 is right */ +#define SSB_IDHIGH_VC 0xFFFF0000 /* Vendor Code */ +#define SSB_IDHIGH_VC_SHIFT 16 + +/* SPROM shadow area. If not otherwise noted, fields are + * two bytes wide. Note that the SPROM can _only_ be read + * in two-byte quantities. + */ +#define SSB_SPROMSIZE_WORDS 64 +#define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R123 64 +#define SSB_SPROMSIZE_WORDS_R4 220 +#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) +#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) +#define SSB_SPROM_BASE1 0x1000 +#define SSB_SPROM_BASE31 0x0800 +#define SSB_SPROM_REVISION 0x007E +#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ +#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ +#define SSB_SPROM_REVISION_CRC_SHIFT 8 + +/* SPROM Revision 1 */ +#define SSB_SPROM1_SPID 0x0004 /* Subsystem Product ID for PCI */ +#define SSB_SPROM1_SVID 0x0006 /* Subsystem Vendor ID for PCI */ +#define SSB_SPROM1_PID 0x0008 /* Product ID for PCI */ +#define SSB_SPROM1_IL0MAC 0x0048 /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM1_ET0MAC 0x004E /* 6 bytes MAC address for Ethernet */ +#define SSB_SPROM1_ET1MAC 0x0054 /* 6 bytes MAC address for 802.11a */ +#define SSB_SPROM1_ETHPHY 0x005A /* Ethernet PHY settings */ +#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM1_BINF 0x005C /* Board info */ +#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ +#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ +#define SSB_SPROM1_BINF_CCODE_SHIFT 8 +#define SSB_SPROM1_BINF_ANTBG 0x3000 /* Available B-PHY and G-PHY antennas */ +#define SSB_SPROM1_BINF_ANTBG_SHIFT 12 +#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */ +#define SSB_SPROM1_BINF_ANTA_SHIFT 14 +#define SSB_SPROM1_PA0B0 0x005E +#define SSB_SPROM1_PA0B1 0x0060 +#define SSB_SPROM1_PA0B2 0x0062 +#define SSB_SPROM1_GPIOA 0x0064 /* General Purpose IO pins 0 and 1 */ +#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM1_GPIOA_P1_SHIFT 8 +#define SSB_SPROM1_GPIOB 0x0066 /* General Purpuse IO pins 2 and 3 */ +#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM1_GPIOB_P3_SHIFT 8 +#define SSB_SPROM1_MAXPWR 0x0068 /* Power Amplifier Max Power */ +#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A_SHIFT 8 +#define SSB_SPROM1_PA1B0 0x006A +#define SSB_SPROM1_PA1B1 0x006C +#define SSB_SPROM1_PA1B2 0x006E +#define SSB_SPROM1_ITSSI 0x0070 /* Idle TSSI Target */ +#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/ +#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_ITSSI_A_SHIFT 8 +#define SSB_SPROM1_BFLLO 0x0072 /* Boardflags (low 16 bits) */ +#define SSB_SPROM1_AGAIN 0x0074 /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */ +#define SSB_SPROM1_AGAIN_BG_SHIFT 0 +#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_AGAIN_A_SHIFT 8 + +/* SPROM Revision 2 (inherits from rev 1) */ +#define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */ +#define SSB_SPROM2_MAXP_A 0x003A /* A-PHY Max Power */ +#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */ +#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */ +#define SSB_SPROM2_MAXP_A_LO_SHIFT 8 +#define SSB_SPROM2_PA1LOB0 0x003C /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB1 0x003E /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB2 0x0040 /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1HIB0 0x0042 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB1 0x0044 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB2 0x0046 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_OPO 0x0078 /* OFDM Power Offset from CCK Level */ +#define SSB_SPROM2_OPO_VALUE 0x00FF +#define SSB_SPROM2_OPO_UNUSED 0xFF00 +#define SSB_SPROM2_CCODE 0x007C /* Two char Country Code */ + +/* SPROM Revision 3 (inherits most data from rev 2) */ +#define SSB_SPROM3_OFDMAPO 0x002C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMALPO 0x0030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMAHPO 0x0034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC 0x0042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */ +#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8 +#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */ +#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16 +#define SSB_SPROM3_IL0MAC 0x004A /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM3_CCKPO 0x0078 /* CCK Power Offset */ +#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */ +#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */ +#define SSB_SPROM3_CCKPO_2M_SHIFT 4 +#define SSB_SPROM3_CCKPO_55M 0x0F00 /* 5.5M Rate PO */ +#define SSB_SPROM3_CCKPO_55M_SHIFT 8 +#define SSB_SPROM3_CCKPO_11M 0xF000 /* 11M Rate PO */ +#define SSB_SPROM3_CCKPO_11M_SHIFT 12 +#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ + +/* SPROM Revision 4 */ +#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */ +#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */ +#define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */ +#define SSB_SPROM4_BFL2HI 0x004A /* Board flags 2 Hi */ +#define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */ +#define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM4_GPIOA_P1_SHIFT 8 +#define SSB_SPROM4_GPIOB 0x0058 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM4_GPIOB_P3_SHIFT 8 +#define SSB_SPROM4_ETHPHY 0x005A /* Ethernet PHY settings ?? */ +#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM4_ANTAVAIL 0x005D /* Antenna available bitfields */ +#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0 +#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8 +#define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM4_AGAIN0_SHIFT 0 +#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM4_AGAIN1_SHIFT 8 +#define SSB_SPROM4_AGAIN23 0x0060 +#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM4_AGAIN2_SHIFT 0 +#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM4_AGAIN3_SHIFT 8 +#define SSB_SPROM4_TXPID2G01 0x0062 /* TX Power Index 2GHz */ +#define SSB_SPROM4_TXPID2G0 0x00FF +#define SSB_SPROM4_TXPID2G0_SHIFT 0 +#define SSB_SPROM4_TXPID2G1 0xFF00 +#define SSB_SPROM4_TXPID2G1_SHIFT 8 +#define SSB_SPROM4_TXPID2G23 0x0064 /* TX Power Index 2GHz */ +#define SSB_SPROM4_TXPID2G2 0x00FF +#define SSB_SPROM4_TXPID2G2_SHIFT 0 +#define SSB_SPROM4_TXPID2G3 0xFF00 +#define SSB_SPROM4_TXPID2G3_SHIFT 8 +#define SSB_SPROM4_TXPID5G01 0x0066 /* TX Power Index 5GHz middle subband */ +#define SSB_SPROM4_TXPID5G0 0x00FF +#define SSB_SPROM4_TXPID5G0_SHIFT 0 +#define SSB_SPROM4_TXPID5G1 0xFF00 +#define SSB_SPROM4_TXPID5G1_SHIFT 8 +#define SSB_SPROM4_TXPID5G23 0x0068 /* TX Power Index 5GHz middle subband */ +#define SSB_SPROM4_TXPID5G2 0x00FF +#define SSB_SPROM4_TXPID5G2_SHIFT 0 +#define SSB_SPROM4_TXPID5G3 0xFF00 +#define SSB_SPROM4_TXPID5G3_SHIFT 8 +#define SSB_SPROM4_TXPID5GL01 0x006A /* TX Power Index 5GHz low subband */ +#define SSB_SPROM4_TXPID5GL0 0x00FF +#define SSB_SPROM4_TXPID5GL0_SHIFT 0 +#define SSB_SPROM4_TXPID5GL1 0xFF00 +#define SSB_SPROM4_TXPID5GL1_SHIFT 8 +#define SSB_SPROM4_TXPID5GL23 0x006C /* TX Power Index 5GHz low subband */ +#define SSB_SPROM4_TXPID5GL2 0x00FF +#define SSB_SPROM4_TXPID5GL2_SHIFT 0 +#define SSB_SPROM4_TXPID5GL3 0xFF00 +#define SSB_SPROM4_TXPID5GL3_SHIFT 8 +#define SSB_SPROM4_TXPID5GH01 0x006E /* TX Power Index 5GHz high subband */ +#define SSB_SPROM4_TXPID5GH0 0x00FF +#define SSB_SPROM4_TXPID5GH0_SHIFT 0 +#define SSB_SPROM4_TXPID5GH1 0xFF00 +#define SSB_SPROM4_TXPID5GH1_SHIFT 8 +#define SSB_SPROM4_TXPID5GH23 0x0070 /* TX Power Index 5GHz high subband */ +#define SSB_SPROM4_TXPID5GH2 0x00FF +#define SSB_SPROM4_TXPID5GH2_SHIFT 0 +#define SSB_SPROM4_TXPID5GH3 0xFF00 +#define SSB_SPROM4_TXPID5GH3_SHIFT 8 +#define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */ +#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ +#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM4_ITSSI_BG_SHIFT 8 +#define SSB_SPROM4_MAXP_A 0x008A /* Max Power A in path 1 */ +#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */ +#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM4_ITSSI_A_SHIFT 8 +#define SSB_SPROM4_PA0B0 0x0082 /* The paXbY locations are */ +#define SSB_SPROM4_PA0B1 0x0084 /* only guesses */ +#define SSB_SPROM4_PA0B2 0x0086 +#define SSB_SPROM4_PA1B0 0x008E +#define SSB_SPROM4_PA1B1 0x0090 +#define SSB_SPROM4_PA1B2 0x0092 + +/* SPROM Revision 5 (inherits most data from rev 4) */ +#define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */ +#define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */ +#define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */ +#define SSB_SPROM5_BFL2LO 0x004E /* Board flags 2 (low 16 bits) */ +#define SSB_SPROM5_BFL2HI 0x0050 /* Board flags 2 Hi */ +#define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM5_GPIOA_P1_SHIFT 8 +#define SSB_SPROM5_GPIOB 0x0078 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM5_GPIOB_P3_SHIFT 8 + +/* SPROM Revision 8 */ +#define SSB_SPROM8_BOARDREV 0x0082 /* Board revision */ +#define SSB_SPROM8_BFLLO 0x0084 /* Board flags (bits 0-15) */ +#define SSB_SPROM8_BFLHI 0x0086 /* Board flags (bits 16-31) */ +#define SSB_SPROM8_BFL2LO 0x0088 /* Board flags (bits 32-47) */ +#define SSB_SPROM8_BFL2HI 0x008A /* Board flags (bits 48-63) */ +#define SSB_SPROM8_IL0MAC 0x008C /* 6 byte MAC address */ +#define SSB_SPROM8_CCODE 0x0092 /* 2 byte country code */ +#define SSB_SPROM8_GPIOA 0x0096 /*Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM8_GPIOA_P1_SHIFT 8 +#define SSB_SPROM8_GPIOB 0x0098 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM8_GPIOB_P3_SHIFT 8 +#define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/ +#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8 +#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0 +#define SSB_SPROM8_AGAIN01 0x009E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM8_AGAIN0_SHIFT 0 +#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM8_AGAIN1_SHIFT 8 +#define SSB_SPROM8_AGAIN23 0x00A0 +#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM8_AGAIN2_SHIFT 0 +#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM8_AGAIN3_SHIFT 8 +#define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */ +#define SSB_SPROM8_RSSISMF2G 0x000F +#define SSB_SPROM8_RSSISMC2G 0x00F0 +#define SSB_SPROM8_RSSISMC2G_SHIFT 4 +#define SSB_SPROM8_RSSISAV2G 0x0700 +#define SSB_SPROM8_RSSISAV2G_SHIFT 8 +#define SSB_SPROM8_BXA2G 0x1800 +#define SSB_SPROM8_BXA2G_SHIFT 11 +#define SSB_SPROM8_RSSIPARM5G 0x00A6 /* RSSI params for 5GHz */ +#define SSB_SPROM8_RSSISMF5G 0x000F +#define SSB_SPROM8_RSSISMC5G 0x00F0 +#define SSB_SPROM8_RSSISMC5G_SHIFT 4 +#define SSB_SPROM8_RSSISAV5G 0x0700 +#define SSB_SPROM8_RSSISAV5G_SHIFT 8 +#define SSB_SPROM8_BXA5G 0x1800 +#define SSB_SPROM8_BXA5G_SHIFT 11 +#define SSB_SPROM8_TRI25G 0x00A8 /* TX isolation 2.4&5.3GHz */ +#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */ +#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */ +#define SSB_SPROM8_TRI5G_SHIFT 8 +#define SSB_SPROM8_TRI5GHL 0x00AA /* TX isolation 5.2/5.8GHz */ +#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */ +#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */ +#define SSB_SPROM8_TRI5GH_SHIFT 8 +#define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */ +#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ +#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ +#define SSB_SPROM8_RXPO5G_SHIFT 8 +#define SSB_SPROM8_FEM2G 0x00AE +#define SSB_SPROM8_FEM5G 0x00B0 +#define SSB_SROM8_FEM_TSSIPOS 0x0001 +#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0 +#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006 +#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SSB_SROM8_FEM_PDET_RANGE 0x00F8 +#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SSB_SROM8_FEM_TR_ISO 0x0700 +#define SSB_SROM8_FEM_TR_ISO_SHIFT 8 +#define SSB_SROM8_FEM_ANTSWLUT 0xF800 +#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SSB_SPROM8_THERMAL 0x00B2 +#define SSB_SPROM8_MPWR_RAWTS 0x00B4 +#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6 +#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8 +#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA + +/* There are 4 blocks with power info sharing the same layout */ +#define SSB_SROM8_PWR_INFO_CORE0 0x00C0 +#define SSB_SROM8_PWR_INFO_CORE1 0x00E0 +#define SSB_SROM8_PWR_INFO_CORE2 0x0100 +#define SSB_SROM8_PWR_INFO_CORE3 0x0120 + +#define SSB_SROM8_2G_MAXP_ITSSI 0x00 +#define SSB_SPROM8_2G_MAXP 0x00FF +#define SSB_SPROM8_2G_ITSSI 0xFF00 +#define SSB_SPROM8_2G_ITSSI_SHIFT 8 +#define SSB_SROM8_2G_PA_0 0x02 /* 2GHz power amp settings */ +#define SSB_SROM8_2G_PA_1 0x04 +#define SSB_SROM8_2G_PA_2 0x06 +#define SSB_SROM8_5G_MAXP_ITSSI 0x08 /* 5GHz ITSSI and 5.3GHz Max Power */ +#define SSB_SPROM8_5G_MAXP 0x00FF +#define SSB_SPROM8_5G_ITSSI 0xFF00 +#define SSB_SPROM8_5G_ITSSI_SHIFT 8 +#define SSB_SPROM8_5GHL_MAXP 0x0A /* 5.2GHz and 5.8GHz Max Power */ +#define SSB_SPROM8_5GH_MAXP 0x00FF +#define SSB_SPROM8_5GL_MAXP 0xFF00 +#define SSB_SPROM8_5GL_MAXP_SHIFT 8 +#define SSB_SROM8_5G_PA_0 0x0C /* 5.3GHz power amp settings */ +#define SSB_SROM8_5G_PA_1 0x0E +#define SSB_SROM8_5G_PA_2 0x10 +#define SSB_SROM8_5GL_PA_0 0x12 /* 5.2GHz power amp settings */ +#define SSB_SROM8_5GL_PA_1 0x14 +#define SSB_SROM8_5GL_PA_2 0x16 +#define SSB_SROM8_5GH_PA_0 0x18 /* 5.8GHz power amp settings */ +#define SSB_SROM8_5GH_PA_1 0x1A +#define SSB_SROM8_5GH_PA_2 0x1C + +/* TODO: Make it deprecated */ +#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ +#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ +#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM8_ITSSI_BG_SHIFT 8 +#define SSB_SPROM8_PA0B0 0x00C2 /* 2GHz power amp settings */ +#define SSB_SPROM8_PA0B1 0x00C4 +#define SSB_SPROM8_PA0B2 0x00C6 +#define SSB_SPROM8_MAXP_A 0x00C8 /* Max Power 5.3GHz */ +#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */ +#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM8_ITSSI_A_SHIFT 8 +#define SSB_SPROM8_MAXP_AHL 0x00CA /* Max Power 5.2/5.8GHz */ +#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */ +#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */ +#define SSB_SPROM8_MAXP_AL_SHIFT 8 +#define SSB_SPROM8_PA1B0 0x00CC /* 5.3GHz power amp settings */ +#define SSB_SPROM8_PA1B1 0x00CE +#define SSB_SPROM8_PA1B2 0x00D0 +#define SSB_SPROM8_PA1LOB0 0x00D2 /* 5.2GHz power amp settings */ +#define SSB_SPROM8_PA1LOB1 0x00D4 +#define SSB_SPROM8_PA1LOB2 0x00D6 +#define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */ +#define SSB_SPROM8_PA1HIB1 0x00DA +#define SSB_SPROM8_PA1HIB2 0x00DC + +#define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */ +#define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */ + +/* Values for boardflags_lo read from SPROM */ +#define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ +#define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ +#define SSB_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ +#define SSB_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ +#define SSB_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ +#define SSB_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ +#define SSB_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ +#define SSB_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define SSB_BFL_ENETVLAN 0x0100 /* can do vlan */ +#define SSB_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ +#define SSB_BFL_NOPCI 0x0400 /* board leaves PCI floating */ +#define SSB_BFL_FEM 0x0800 /* supports the Front End Module */ +#define SSB_BFL_EXTLNA 0x1000 /* has an external LNA */ +#define SSB_BFL_HGPA 0x2000 /* had high gain PA */ +#define SSB_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ +#define SSB_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ + +/* Values for boardflags_hi read from SPROM */ +#define SSB_BFH_NOPA 0x0001 /* has no PA */ +#define SSB_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */ +#define SSB_BFH_PAREF 0x0004 /* uses the PARef LDO */ +#define SSB_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared with bluetooth */ +#define SSB_BFH_PHASESHIFT 0x0010 /* can support phase shifter */ +#define SSB_BFH_BUCKBOOST 0x0020 /* has buck/booster */ +#define SSB_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna with bluetooth */ + +/* Values for boardflags2_lo read from SPROM */ +#define SSB_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */ +#define SSB_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */ +#define SSB_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */ +#define SSB_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */ +#define SSB_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */ +#define SSB_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */ +#define SSB_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */ +#define SSB_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */ +#define SSB_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */ +#define SSB_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */ +#define SSB_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */ + +/* Values for SSB_SPROM1_BINF_CCODE */ +enum { + SSB_SPROM1CCODE_WORLD = 0, + SSB_SPROM1CCODE_THAILAND, + SSB_SPROM1CCODE_ISRAEL, + SSB_SPROM1CCODE_JORDAN, + SSB_SPROM1CCODE_CHINA, + SSB_SPROM1CCODE_JAPAN, + SSB_SPROM1CCODE_USA_CANADA_ANZ, + SSB_SPROM1CCODE_EUROPE, + SSB_SPROM1CCODE_USA_LOW, + SSB_SPROM1CCODE_JAPAN_HIGH, + SSB_SPROM1CCODE_ALL, + SSB_SPROM1CCODE_NONE, +}; + +/* Address-Match values and masks (SSB_ADMATCHxxx) */ +#define SSB_ADM_TYPE 0x00000003 /* Address type */ +#define SSB_ADM_TYPE0 0 +#define SSB_ADM_TYPE1 1 +#define SSB_ADM_TYPE2 2 +#define SSB_ADM_AD64 0x00000004 +#define SSB_ADM_SZ0 0x000000F8 /* Type0 size */ +#define SSB_ADM_SZ0_SHIFT 3 +#define SSB_ADM_SZ1 0x000001F8 /* Type1 size */ +#define SSB_ADM_SZ1_SHIFT 3 +#define SSB_ADM_SZ2 0x000001F8 /* Type2 size */ +#define SSB_ADM_SZ2_SHIFT 3 +#define SSB_ADM_EN 0x00000400 /* Enable */ +#define SSB_ADM_NEG 0x00000800 /* Negative decode */ +#define SSB_ADM_BASE0 0xFFFFFF00 /* Type0 base address */ +#define SSB_ADM_BASE0_SHIFT 8 +#define SSB_ADM_BASE1 0xFFFFF000 /* Type1 base address for the core */ +#define SSB_ADM_BASE1_SHIFT 12 +#define SSB_ADM_BASE2 0xFFFF0000 /* Type2 base address for the core */ +#define SSB_ADM_BASE2_SHIFT 16 + + +#endif /* LINUX_SSB_REGS_H_ */ diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h new file mode 100644 index 00000000..6f3e54c7 --- /dev/null +++ b/include/linux/stackprotector.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_STACKPROTECTOR_H +#define _LINUX_STACKPROTECTOR_H 1 + +#include <linux/compiler.h> +#include <linux/sched.h> +#include <linux/random.h> + +#ifdef CONFIG_CC_STACKPROTECTOR +# include <asm/stackprotector.h> +#else +static inline void boot_init_stack_canary(void) +{ +} +#endif + +#endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h new file mode 100644 index 00000000..115b570e --- /dev/null +++ b/include/linux/stacktrace.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_STACKTRACE_H +#define __LINUX_STACKTRACE_H + +struct task_struct; +struct pt_regs; + +#ifdef CONFIG_STACKTRACE +struct task_struct; + +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; + int skip; /* input argument: How many entries to skip */ +}; + +extern void save_stack_trace(struct stack_trace *trace); +extern void save_stack_trace_regs(struct pt_regs *regs, + struct stack_trace *trace); +extern void save_stack_trace_tsk(struct task_struct *tsk, + struct stack_trace *trace); + +extern void print_stack_trace(struct stack_trace *trace, int spaces); + +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +extern void save_stack_trace_user(struct stack_trace *trace); +#else +# define save_stack_trace_user(trace) do { } while (0) +#endif + +#else +# define save_stack_trace(trace) do { } while (0) +# define save_stack_trace_tsk(tsk, trace) do { } while (0) +# define save_stack_trace_user(trace) do { } while (0) +# define print_stack_trace(trace, spaces) do { } while (0) +#endif + +#endif diff --git a/include/linux/stallion.h b/include/linux/stallion.h new file mode 100644 index 00000000..336af33c --- /dev/null +++ b/include/linux/stallion.h @@ -0,0 +1,147 @@ +/*****************************************************************************/ + +/* + * stallion.h -- stallion multiport serial driver. + * + * Copyright (C) 1996-1998 Stallion Technologies + * Copyright (C) 1994-1996 Greg Ungerer. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*****************************************************************************/ +#ifndef _STALLION_H +#define _STALLION_H +/*****************************************************************************/ + +/* + * Define important driver constants here. + */ +#define STL_MAXBRDS 4 +#define STL_MAXPANELS 4 +#define STL_MAXBANKS 8 +#define STL_PORTSPERPANEL 16 +#define STL_MAXPORTS 64 +#define STL_MAXDEVS (STL_MAXBRDS * STL_MAXPORTS) + + +/* + * Define a set of structures to hold all the board/panel/port info + * for our ports. These will be dynamically allocated as required. + */ + +/* + * Define a ring queue structure for each port. This will hold the + * TX data waiting to be output. Characters are fed into this buffer + * from the line discipline (or even direct from user space!) and + * then fed into the UARTs during interrupts. Will use a classic ring + * queue here for this. The good thing about this type of ring queue + * is that the head and tail pointers can be updated without interrupt + * protection - since "write" code only needs to change the head, and + * interrupt code only needs to change the tail. + */ +struct stlrq { + char *buf; + char *head; + char *tail; +}; + +/* + * Port, panel and board structures to hold status info about each. + * The board structure contains pointers to structures for each panel + * connected to it, and in turn each panel structure contains pointers + * for each port structure for each port on that panel. Note that + * the port structure also contains the board and panel number that it + * is associated with, this makes it (fairly) easy to get back to the + * board/panel info for a port. + */ +struct stlport { + unsigned long magic; + struct tty_port port; + unsigned int portnr; + unsigned int panelnr; + unsigned int brdnr; + int ioaddr; + int uartaddr; + unsigned int pagenr; + unsigned long istate; + int baud_base; + int custom_divisor; + int close_delay; + int closing_wait; + int openwaitcnt; + int brklen; + unsigned int sigs; + unsigned int rxignoremsk; + unsigned int rxmarkmsk; + unsigned int imr; + unsigned int crenable; + unsigned long clk; + unsigned long hwid; + void *uartp; + comstats_t stats; + struct stlrq tx; +}; + +struct stlpanel { + unsigned long magic; + unsigned int panelnr; + unsigned int brdnr; + unsigned int pagenr; + unsigned int nrports; + int iobase; + void *uartp; + void (*isr)(struct stlpanel *panelp, unsigned int iobase); + unsigned int hwid; + unsigned int ackmask; + struct stlport *ports[STL_PORTSPERPANEL]; +}; + +struct stlbrd { + unsigned long magic; + unsigned int brdnr; + unsigned int brdtype; + unsigned int state; + unsigned int nrpanels; + unsigned int nrports; + unsigned int nrbnks; + int irq; + int irqtype; + int (*isr)(struct stlbrd *brdp); + unsigned int ioaddr1; + unsigned int ioaddr2; + unsigned int iosize1; + unsigned int iosize2; + unsigned int iostatus; + unsigned int ioctrl; + unsigned int ioctrlval; + unsigned int hwid; + unsigned long clk; + unsigned int bnkpageaddr[STL_MAXBANKS]; + unsigned int bnkstataddr[STL_MAXBANKS]; + struct stlpanel *bnk2panel[STL_MAXBANKS]; + struct stlpanel *panels[STL_MAXPANELS]; +}; + + +/* + * Define MAGIC numbers used for above structures. + */ +#define STL_PORTMAGIC 0x5a7182c9 +#define STL_PANELMAGIC 0x7ef621a1 +#define STL_BOARDMAGIC 0xa2267f52 + +/*****************************************************************************/ +#endif diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h new file mode 100644 index 00000000..d3e5f275 --- /dev/null +++ b/include/linux/start_kernel.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_START_KERNEL_H +#define _LINUX_START_KERNEL_H + +#include <linux/linkage.h> +#include <linux/init.h> + +/* Define the prototype for start_kernel here, rather than cluttering + up something else. */ + +extern asmlinkage void __init start_kernel(void); + +#endif /* _LINUX_START_KERNEL_H */ diff --git a/include/linux/stat.h b/include/linux/stat.h new file mode 100644 index 00000000..611c398d --- /dev/null +++ b/include/linux/stat.h @@ -0,0 +1,80 @@ +#ifndef _LINUX_STAT_H +#define _LINUX_STAT_H + +#ifdef __KERNEL__ + +#include <asm/stat.h> + +#endif + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#define S_IFMT 00170000 +#define S_IFSOCK 0140000 +#define S_IFLNK 0120000 +#define S_IFREG 0100000 +#define S_IFBLK 0060000 +#define S_IFDIR 0040000 +#define S_IFCHR 0020000 +#define S_IFIFO 0010000 +#define S_ISUID 0004000 +#define S_ISGID 0002000 +#define S_ISVTX 0001000 + +#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) +#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) +#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) +#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK) + +#define S_IRWXU 00700 +#define S_IRUSR 00400 +#define S_IWUSR 00200 +#define S_IXUSR 00100 + +#define S_IRWXG 00070 +#define S_IRGRP 00040 +#define S_IWGRP 00020 +#define S_IXGRP 00010 + +#define S_IRWXO 00007 +#define S_IROTH 00004 +#define S_IWOTH 00002 +#define S_IXOTH 00001 + +#endif + +#ifdef __KERNEL__ +#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO) +#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO) +#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH) +#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH) +#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH) + +#define UTIME_NOW ((1l << 30) - 1l) +#define UTIME_OMIT ((1l << 30) - 2l) + +#include <linux/types.h> +#include <linux/time.h> + +struct kstat { + u64 ino; + dev_t dev; + umode_t mode; + unsigned int nlink; + uid_t uid; + gid_t gid; + dev_t rdev; + loff_t size; + struct timespec atime; + struct timespec mtime; + struct timespec ctime; + unsigned long blksize; + unsigned long long blocks; +}; + +#endif + +#endif diff --git a/include/linux/statfs.h b/include/linux/statfs.h new file mode 100644 index 00000000..0166d320 --- /dev/null +++ b/include/linux/statfs.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_STATFS_H +#define _LINUX_STATFS_H + +#include <linux/types.h> +#include <asm/statfs.h> + +struct kstatfs { + long f_type; + long f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long f_namelen; + long f_frsize; + long f_flags; + long f_spare[4]; +}; + +/* + * Definitions for the flag in f_flag. + * + * Generally these flags are equivalent to the MS_ flags used in the mount + * ABI. The exception is ST_VALID which has the same value as MS_REMOUNT + * which doesn't make any sense for statfs. + */ +#define ST_RDONLY 0x0001 /* mount read-only */ +#define ST_NOSUID 0x0002 /* ignore suid and sgid bits */ +#define ST_NODEV 0x0004 /* disallow access to device special files */ +#define ST_NOEXEC 0x0008 /* disallow program execution */ +#define ST_SYNCHRONOUS 0x0010 /* writes are synced at once */ +#define ST_VALID 0x0020 /* f_flags support is implemented */ +#define ST_MANDLOCK 0x0040 /* allow mandatory locks on an FS */ +/* 0x0080 used for ST_WRITE in glibc */ +/* 0x0100 used for ST_APPEND in glibc */ +/* 0x0200 used for ST_IMMUTABLE in glibc */ +#define ST_NOATIME 0x0400 /* do not update access times */ +#define ST_NODIRATIME 0x0800 /* do not update directory access times */ +#define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */ + +#endif diff --git a/include/linux/static_key.h b/include/linux/static_key.h new file mode 100644 index 00000000..27bd3f8a --- /dev/null +++ b/include/linux/static_key.h @@ -0,0 +1 @@ +#include <linux/jump_label.h> diff --git a/include/linux/stddef.h b/include/linux/stddef.h new file mode 100644 index 00000000..1747b678 --- /dev/null +++ b/include/linux/stddef.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_STDDEF_H +#define _LINUX_STDDEF_H + +#include <linux/compiler.h> + +#ifdef __KERNEL__ + +#undef NULL +#define NULL ((void *)0) + +enum { + false = 0, + true = 1 +}; + +#undef offsetof +#ifdef __compiler_offsetof +#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) +#else +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h new file mode 100644 index 00000000..0dddc9e4 --- /dev/null +++ b/include/linux/stmmac.h @@ -0,0 +1,61 @@ +/******************************************************************************* + + Header file for stmmac platform data + + Copyright (C) 2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#ifndef __STMMAC_PLATFORM_DATA +#define __STMMAC_PLATFORM_DATA + +#include <linux/platform_device.h> + +/* Platfrom data for platform device structure's platform_data field */ + +struct stmmac_mdio_bus_data { + int bus_id; + int (*phy_reset)(void *priv); + unsigned int phy_mask; + int *irqs; + int probed_phy_irq; +}; + +struct plat_stmmacenet_data { + int bus_id; + int phy_addr; + int interface; + struct stmmac_mdio_bus_data *mdio_bus_data; + int pbl; + int clk_csr; + int has_gmac; + int enh_desc; + int tx_coe; + int bugged_jumbo; + int pmt; + int force_sf_dma_mode; + void (*fix_mac_speed)(void *priv, unsigned int speed); + void (*bus_setup)(void __iomem *ioaddr); + int (*init)(struct platform_device *pdev); + void (*exit)(struct platform_device *pdev); + void *custom_cfg; + void *bsp_priv; +}; +#endif diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h new file mode 100644 index 00000000..3b5e910d --- /dev/null +++ b/include/linux/stop_machine.h @@ -0,0 +1,156 @@ +#ifndef _LINUX_STOP_MACHINE +#define _LINUX_STOP_MACHINE + +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/smp.h> +#include <linux/list.h> + +/* + * stop_cpu[s]() is simplistic per-cpu maximum priority cpu + * monopolization mechanism. The caller can specify a non-sleeping + * function to be executed on a single or multiple cpus preempting all + * other processes and monopolizing those cpus until it finishes. + * + * Resources for this mechanism are preallocated when a cpu is brought + * up and requests are guaranteed to be served as long as the target + * cpus are online. + */ +typedef int (*cpu_stop_fn_t)(void *arg); + +#ifdef CONFIG_SMP + +struct cpu_stop_work { + struct list_head list; /* cpu_stopper->works */ + cpu_stop_fn_t fn; + void *arg; + struct cpu_stop_done *done; +}; + +int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); +void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf); +int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); + +#else /* CONFIG_SMP */ + +#include <linux/workqueue.h> + +struct cpu_stop_work { + struct work_struct work; + cpu_stop_fn_t fn; + void *arg; +}; + +static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) +{ + int ret = -ENOENT; + preempt_disable(); + if (cpu == smp_processor_id()) + ret = fn(arg); + preempt_enable(); + return ret; +} + +static void stop_one_cpu_nowait_workfn(struct work_struct *work) +{ + struct cpu_stop_work *stwork = + container_of(work, struct cpu_stop_work, work); + preempt_disable(); + stwork->fn(stwork->arg); + preempt_enable(); +} + +static inline void stop_one_cpu_nowait(unsigned int cpu, + cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf) +{ + if (cpu == smp_processor_id()) { + INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); + work_buf->fn = fn; + work_buf->arg = arg; + schedule_work(&work_buf->work); + } +} + +static inline int stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) + return stop_one_cpu(raw_smp_processor_id(), fn, arg); + return -ENOENT; +} + +static inline int try_stop_cpus(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg) +{ + return stop_cpus(cpumask, fn, arg); +} + +#endif /* CONFIG_SMP */ + +/* + * stop_machine "Bogolock": stop the entire machine, disable + * interrupts. This is a very heavy lock, which is equivalent to + * grabbing every spinlock (and more). So the "read" side to such a + * lock is anything which disables preemption. + */ +#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) + +/** + * stop_machine: freeze the machine on all CPUs and run this function + * @fn: the function to run + * @data: the data ptr for the @fn() + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that no one is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. + * + * This can be thought of as a very heavy write lock, equivalent to + * grabbing every spinlock in the kernel. */ +int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); + +/** + * __stop_machine: freeze the machine on all CPUs and run this function + * @fn: the function to run + * @data: the data ptr for the @fn + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) + * + * Description: This is a special version of the above, which assumes cpus + * won't come or go while it's being called. Used by hotplug cpu. + */ +int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); + +int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, + const struct cpumask *cpus); + +#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ + +static inline int __stop_machine(int (*fn)(void *), void *data, + const struct cpumask *cpus) +{ + unsigned long flags; + int ret; + local_irq_save(flags); + ret = fn(data); + local_irq_restore(flags); + return ret; +} + +static inline int stop_machine(int (*fn)(void *), void *data, + const struct cpumask *cpus) +{ + return __stop_machine(fn, data, cpus); +} + +static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, + const struct cpumask *cpus) +{ + return __stop_machine(fn, data, cpus); +} + +#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ +#endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/string.h b/include/linux/string.h new file mode 100644 index 00000000..e033564f --- /dev/null +++ b/include/linux/string.h @@ -0,0 +1,148 @@ +#ifndef _LINUX_STRING_H_ +#define _LINUX_STRING_H_ + +/* We don't want strings.h stuff being used by user stuff by accident */ + +#ifndef __KERNEL__ +#include <string.h> +#else + +#include <linux/compiler.h> /* for inline */ +#include <linux/types.h> /* for size_t */ +#include <linux/stddef.h> /* for NULL */ +#include <stdarg.h> + +extern char *strndup_user(const char __user *, long); +extern void *memdup_user(const void __user *, size_t); + +/* + * Include machine specific inline routines + */ +#include <asm/string.h> + +#ifndef __HAVE_ARCH_STRCPY +extern char * strcpy(char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRNCPY +extern char * strncpy(char *,const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRLCPY +size_t strlcpy(char *, const char *, size_t); +#endif +#ifndef __HAVE_ARCH_STRCAT +extern char * strcat(char *, const char *); +#endif +#ifndef __HAVE_ARCH_STRNCAT +extern char * strncat(char *, const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRLCAT +extern size_t strlcat(char *, const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRCMP +extern int strcmp(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRNCMP +extern int strncmp(const char *,const char *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRNICMP +extern int strnicmp(const char *, const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRCASECMP +extern int strcasecmp(const char *s1, const char *s2); +#endif +#ifndef __HAVE_ARCH_STRNCASECMP +extern int strncasecmp(const char *s1, const char *s2, size_t n); +#endif +#ifndef __HAVE_ARCH_STRCHR +extern char * strchr(const char *,int); +#endif +#ifndef __HAVE_ARCH_STRNCHR +extern char * strnchr(const char *, size_t, int); +#endif +#ifndef __HAVE_ARCH_STRRCHR +extern char * strrchr(const char *,int); +#endif +extern char * __must_check skip_spaces(const char *); + +extern char *strim(char *); + +static inline __must_check char *strstrip(char *str) +{ + return strim(str); +} + +#ifndef __HAVE_ARCH_STRSTR +extern char * strstr(const char *, const char *); +#endif +#ifndef __HAVE_ARCH_STRNSTR +extern char * strnstr(const char *, const char *, size_t); +#endif +#ifndef __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); +#endif +#ifndef __HAVE_ARCH_STRNLEN +extern __kernel_size_t strnlen(const char *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRPBRK +extern char * strpbrk(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRSEP +extern char * strsep(char **,const char *); +#endif +#ifndef __HAVE_ARCH_STRSPN +extern __kernel_size_t strspn(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRCSPN +extern __kernel_size_t strcspn(const char *,const char *); +#endif + +#ifndef __HAVE_ARCH_MEMSET +extern void * memset(void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCPY +extern void * memcpy(void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMMOVE +extern void * memmove(void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMSCAN +extern void * memscan(void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCMP +extern int memcmp(const void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCHR +extern void * memchr(const void *,int,__kernel_size_t); +#endif +void *memchr_inv(const void *s, int c, size_t n); + +extern char *kstrdup(const char *s, gfp_t gfp); +extern char *kstrndup(const char *s, size_t len, gfp_t gfp); +extern void *kmemdup(const void *src, size_t len, gfp_t gfp); + +extern char **argv_split(gfp_t gfp, const char *str, int *argcp); +extern void argv_free(char **argv); + +extern bool sysfs_streq(const char *s1, const char *s2); +extern int strtobool(const char *s, bool *res); + +#ifdef CONFIG_BINARY_PRINTF +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); +int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); +#endif + +extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, + const void *from, size_t available); + +/** + * strstarts - does @str start with @prefix? + * @str: string to examine + * @prefix: prefix to look for. + */ +static inline bool strstarts(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} +#endif +#endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h new file mode 100644 index 00000000..a3eb2f65 --- /dev/null +++ b/include/linux/string_helpers.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_STRING_HELPERS_H_ +#define _LINUX_STRING_HELPERS_H_ + +#include <linux/types.h> + +/* Descriptions of the types of units to + * print in */ +enum string_size_units { + STRING_UNITS_10, /* use powers of 10^3 (standard SI) */ + STRING_UNITS_2, /* use binary powers of 2^10 */ +}; + +int string_get_size(u64 size, enum string_size_units units, + char *buf, int len); + +#endif diff --git a/include/linux/stringify.h b/include/linux/stringify.h new file mode 100644 index 00000000..841cec8e --- /dev/null +++ b/include/linux/stringify.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_STRINGIFY_H +#define __LINUX_STRINGIFY_H + +/* Indirect stringification. Doing two levels allows the parameter to be a + * macro itself. For example, compile with -DFOO=bar, __stringify(FOO) + * converts to "bar". + */ + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#endif /* !__LINUX_STRINGIFY_H */ diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h new file mode 100644 index 00000000..bd9be9f5 --- /dev/null +++ b/include/linux/sungem_phy.h @@ -0,0 +1,132 @@ +#ifndef __SUNGEM_PHY_H__ +#define __SUNGEM_PHY_H__ + +struct mii_phy; + +/* Operations supported by any kind of PHY */ +struct mii_phy_ops +{ + int (*init)(struct mii_phy *phy); + int (*suspend)(struct mii_phy *phy); + int (*setup_aneg)(struct mii_phy *phy, u32 advertise); + int (*setup_forced)(struct mii_phy *phy, int speed, int fd); + int (*poll_link)(struct mii_phy *phy); + int (*read_link)(struct mii_phy *phy); + int (*enable_fiber)(struct mii_phy *phy, int autoneg); +}; + +/* Structure used to statically define an mii/gii based PHY */ +struct mii_phy_def +{ + u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ + u32 phy_id_mask; /* Significant bits */ + u32 features; /* Ethtool SUPPORTED_* defines */ + int magic_aneg; /* Autoneg does all speed test for us */ + const char* name; + const struct mii_phy_ops* ops; +}; + +enum { + BCM54XX_COPPER, + BCM54XX_FIBER, + BCM54XX_GBIC, + BCM54XX_SGMII, + BCM54XX_UNKNOWN, +}; + +/* An instance of a PHY, partially borrowed from mii_if_info */ +struct mii_phy +{ + struct mii_phy_def* def; + u32 advertising; + int mii_id; + + /* 1: autoneg enabled, 0: disabled */ + int autoneg; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + + /* Provided by host chip */ + struct net_device *dev; + int (*mdio_read) (struct net_device *dev, int mii_id, int reg); + void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val); + void *platform_data; +}; + +/* Pass in a struct mii_phy with dev, mdio_read and mdio_write + * filled, the remaining fields will be filled on return + */ +extern int sungem_phy_probe(struct mii_phy *phy, int mii_id); + + +/* MII definitions missing from mii.h */ + +#define BMCR_SPD2 0x0040 /* Gigabit enable (bcm54xx) */ +#define LPA_PAUSE 0x0400 + +/* More PHY registers (model specific) */ + +/* MII BCM5201 MULTIPHY interrupt register */ +#define MII_BCM5201_INTERRUPT 0x1A +#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 + +#define MII_BCM5201_AUXMODE2 0x1B +#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 + +#define MII_BCM5201_MULTIPHY 0x1E + +/* MII BCM5201 MULTIPHY register bits */ +#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 +#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 + +/* MII BCM5221 Additional registers */ +#define MII_BCM5221_TEST 0x1f +#define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080 +#define MII_BCM5221_SHDOW_AUX_STAT2 0x1b +#define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020 +#define MII_BCM5221_SHDOW_AUX_MODE4 0x1a +#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 +#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 + +/* MII BCM5241 Additional registers */ +#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008 + +/* MII BCM5400 1000-BASET Control register */ +#define MII_BCM5400_GB_CONTROL 0x09 +#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 + +/* MII BCM5400 AUXCONTROL register */ +#define MII_BCM5400_AUXCONTROL 0x18 +#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 + +/* MII BCM5400 AUXSTATUS register */ +#define MII_BCM5400_AUXSTATUS 0x19 +#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 +#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8 + +/* 1000BT control (Marvell & BCM54xx at least) */ +#define MII_1000BASETCONTROL 0x09 +#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 +#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 + +/* Marvell 88E1011 PHY control */ +#define MII_M1011_PHY_SPEC_CONTROL 0x10 +#define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20 +#define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX 0x40 + +/* Marvell 88E1011 PHY status */ +#define MII_M1011_PHY_SPEC_STATUS 0x11 +#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 +#define MII_M1011_PHY_SPEC_STATUS_100 0x4000 +#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 +#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 +#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 +#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008 +#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004 + +#endif /* __SUNGEM_PHY_H__ */ diff --git a/include/linux/sunrpc/Kbuild b/include/linux/sunrpc/Kbuild new file mode 100644 index 00000000..98df2116 --- /dev/null +++ b/include/linux/sunrpc/Kbuild @@ -0,0 +1 @@ +header-y += debug.h diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h new file mode 100644 index 00000000..492a36d7 --- /dev/null +++ b/include/linux/sunrpc/auth.h @@ -0,0 +1,162 @@ +/* + * linux/include/linux/sunrpc/auth.h + * + * Declarations for the RPC client authentication machinery. + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_AUTH_H +#define _LINUX_SUNRPC_AUTH_H + +#ifdef __KERNEL__ + +#include <linux/sunrpc/sched.h> +#include <linux/sunrpc/msg_prot.h> +#include <linux/sunrpc/xdr.h> + +#include <linux/atomic.h> +#include <linux/rcupdate.h> + +/* size of the nodename buffer */ +#define UNX_MAXNODENAME 32 + +/* Work around the lack of a VFS credential */ +struct auth_cred { + uid_t uid; + gid_t gid; + struct group_info *group_info; + const char *principal; + unsigned char machine_cred : 1; +}; + +/* + * Client user credentials + */ +struct rpc_auth; +struct rpc_credops; +struct rpc_cred { + struct hlist_node cr_hash; /* hash chain */ + struct list_head cr_lru; /* lru garbage collection */ + struct rcu_head cr_rcu; + struct rpc_auth * cr_auth; + const struct rpc_credops *cr_ops; +#ifdef RPC_DEBUG + unsigned long cr_magic; /* 0x0f4aa4f0 */ +#endif + unsigned long cr_expire; /* when to gc */ + unsigned long cr_flags; /* various flags */ + atomic_t cr_count; /* ref count */ + + uid_t cr_uid; + + /* per-flavor data */ +}; +#define RPCAUTH_CRED_NEW 0 +#define RPCAUTH_CRED_UPTODATE 1 +#define RPCAUTH_CRED_HASHED 2 +#define RPCAUTH_CRED_NEGATIVE 3 + +#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 + +/* + * Client authentication handle + */ +struct rpc_cred_cache; +struct rpc_authops; +struct rpc_auth { + unsigned int au_cslack; /* call cred size estimate */ + /* guess at number of u32's auth adds before + * reply data; normally the verifier size: */ + unsigned int au_rslack; + /* for gss, used to calculate au_rslack: */ + unsigned int au_verfsize; + + unsigned int au_flags; /* various flags */ + const struct rpc_authops *au_ops; /* operations */ + rpc_authflavor_t au_flavor; /* pseudoflavor (note may + * differ from the flavor in + * au_ops->au_flavor in gss + * case) */ + atomic_t au_count; /* Reference counter */ + + struct rpc_cred_cache * au_credcache; + /* per-flavor data */ +}; + +/* Flags for rpcauth_lookupcred() */ +#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ + +/* + * Client authentication ops + */ +struct rpc_authops { + struct module *owner; + rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */ + char * au_name; + struct rpc_auth * (*create)(struct rpc_clnt *, rpc_authflavor_t); + void (*destroy)(struct rpc_auth *); + + struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); + struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int); + int (*pipes_create)(struct rpc_auth *); + void (*pipes_destroy)(struct rpc_auth *); +}; + +struct rpc_credops { + const char * cr_name; /* Name of the auth flavour */ + int (*cr_init)(struct rpc_auth *, struct rpc_cred *); + void (*crdestroy)(struct rpc_cred *); + + int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); + struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); + __be32 * (*crmarshal)(struct rpc_task *, __be32 *); + int (*crrefresh)(struct rpc_task *); + __be32 * (*crvalidate)(struct rpc_task *, __be32 *); + int (*crwrap_req)(struct rpc_task *, kxdreproc_t, + void *, __be32 *, void *); + int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, + void *, __be32 *, void *); +}; + +extern const struct rpc_authops authunix_ops; +extern const struct rpc_authops authnull_ops; + +int __init rpc_init_authunix(void); +int __init rpc_init_generic_auth(void); +int __init rpcauth_init_module(void); +void rpcauth_remove_module(void); +void rpc_destroy_generic_auth(void); +void rpc_destroy_authunix(void); + +struct rpc_cred * rpc_lookup_cred(void); +struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); +int rpcauth_register(const struct rpc_authops *); +int rpcauth_unregister(const struct rpc_authops *); +struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *); +void rpcauth_release(struct rpc_auth *); +struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); +void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); +struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); +struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); +void put_rpccred(struct rpc_cred *); +__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); +__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); +int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); +int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); +int rpcauth_refreshcred(struct rpc_task *); +void rpcauth_invalcred(struct rpc_task *); +int rpcauth_uptodatecred(struct rpc_task *); +int rpcauth_init_credcache(struct rpc_auth *); +void rpcauth_destroy_credcache(struct rpc_auth *); +void rpcauth_clear_credcache(struct rpc_cred_cache *); + +static inline +struct rpc_cred * get_rpccred(struct rpc_cred *cred) +{ + atomic_inc(&cred->cr_count); + return cred; +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_AUTH_H */ diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h new file mode 100644 index 00000000..f1cfd4c8 --- /dev/null +++ b/include/linux/sunrpc/auth_gss.h @@ -0,0 +1,91 @@ +/* + * linux/include/linux/sunrpc/auth_gss.h + * + * Declarations for RPCSEC_GSS + * + * Dug Song <dugsong@monkey.org> + * Andy Adamson <andros@umich.edu> + * Bruce Fields <bfields@umich.edu> + * Copyright (c) 2000 The Regents of the University of Michigan + */ + +#ifndef _LINUX_SUNRPC_AUTH_GSS_H +#define _LINUX_SUNRPC_AUTH_GSS_H + +#ifdef __KERNEL__ +#include <linux/sunrpc/auth.h> +#include <linux/sunrpc/svc.h> +#include <linux/sunrpc/gss_api.h> + +#define RPC_GSS_VERSION 1 + +#define MAXSEQ 0x80000000 /* maximum legal sequence number, from rfc 2203 */ + +enum rpc_gss_proc { + RPC_GSS_PROC_DATA = 0, + RPC_GSS_PROC_INIT = 1, + RPC_GSS_PROC_CONTINUE_INIT = 2, + RPC_GSS_PROC_DESTROY = 3 +}; + +enum rpc_gss_svc { + RPC_GSS_SVC_NONE = 1, + RPC_GSS_SVC_INTEGRITY = 2, + RPC_GSS_SVC_PRIVACY = 3 +}; + +/* on-the-wire gss cred: */ +struct rpc_gss_wire_cred { + u32 gc_v; /* version */ + u32 gc_proc; /* control procedure */ + u32 gc_seq; /* sequence number */ + u32 gc_svc; /* service */ + struct xdr_netobj gc_ctx; /* context handle */ +}; + +/* on-the-wire gss verifier: */ +struct rpc_gss_wire_verf { + u32 gv_flavor; + struct xdr_netobj gv_verf; +}; + +/* return from gss NULL PROC init sec context */ +struct rpc_gss_init_res { + struct xdr_netobj gr_ctx; /* context handle */ + u32 gr_major; /* major status */ + u32 gr_minor; /* minor status */ + u32 gr_win; /* sequence window */ + struct xdr_netobj gr_token; /* token */ +}; + +/* The gss_cl_ctx struct holds all the information the rpcsec_gss client + * code needs to know about a single security context. In particular, + * gc_gss_ctx is the context handle that is used to do gss-api calls, while + * gc_wire_ctx is the context handle that is used to identify the context on + * the wire when communicating with a server. */ + +struct gss_cl_ctx { + atomic_t count; + enum rpc_gss_proc gc_proc; + u32 gc_seq; + spinlock_t gc_seq_lock; + struct gss_ctx __rcu *gc_gss_ctx; + struct xdr_netobj gc_wire_ctx; + u32 gc_win; + unsigned long gc_expiry; + struct rcu_head gc_rcu; +}; + +struct gss_upcall_msg; +struct gss_cred { + struct rpc_cred gc_base; + enum rpc_gss_svc gc_service; + struct gss_cl_ctx __rcu *gc_ctx; + struct gss_upcall_msg *gc_upcall; + const char *gc_principal; + unsigned long gc_upcall_timestamp; +}; + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_AUTH_GSS_H */ + diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h new file mode 100644 index 00000000..969c0a67 --- /dev/null +++ b/include/linux/sunrpc/bc_xprt.h @@ -0,0 +1,67 @@ +/****************************************************************************** + +(c) 2008 NetApp. All Rights Reserved. + +NetApp provides this source code under the GPL v2 License. +The GPL v2 license is available at +http://opensource.org/licenses/gpl-license.php. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ + +/* + * Functions to create and manage the backchannel + */ + +#ifndef _LINUX_SUNRPC_BC_XPRT_H +#define _LINUX_SUNRPC_BC_XPRT_H + +#include <linux/sunrpc/svcsock.h> +#include <linux/sunrpc/xprt.h> +#include <linux/sunrpc/sched.h> + +#ifdef CONFIG_SUNRPC_BACKCHANNEL +struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt); +void xprt_free_bc_request(struct rpc_rqst *req); +int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); +void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); +int bc_send(struct rpc_rqst *req); + +/* + * Determine if a shared backchannel is in use + */ +static inline int svc_is_backchannel(const struct svc_rqst *rqstp) +{ + if (rqstp->rq_server->sv_bc_xprt) + return 1; + return 0; +} +#else /* CONFIG_SUNRPC_BACKCHANNEL */ +static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, + unsigned int min_reqs) +{ + return 0; +} + +static inline int svc_is_backchannel(const struct svc_rqst *rqstp) +{ + return 0; +} + +static inline void xprt_free_bc_request(struct rpc_rqst *req) +{ +} +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ +#endif /* _LINUX_SUNRPC_BC_XPRT_H */ + diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h new file mode 100644 index 00000000..f5fd6160 --- /dev/null +++ b/include/linux/sunrpc/cache.h @@ -0,0 +1,263 @@ +/* + * include/linux/sunrpc/cache.h + * + * Generic code for various authentication-related caches + * used by sunrpc clients and servers. + * + * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> + * + * Released under terms in GPL version 2. See COPYING. + * + */ + +#ifndef _LINUX_SUNRPC_CACHE_H_ +#define _LINUX_SUNRPC_CACHE_H_ + +#include <linux/kref.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/proc_fs.h> + +/* + * Each cache requires: + * - A 'struct cache_detail' which contains information specific to the cache + * for common code to use. + * - An item structure that must contain a "struct cache_head" + * - A lookup function defined using DefineCacheLookup + * - A 'put' function that can release a cache item. It will only + * be called after cache_put has succeed, so there are guarantee + * to be no references. + * - A function to calculate a hash of an item's key. + * + * as well as assorted code fragments (e.g. compare keys) and numbers + * (e.g. hash size, goal_age, etc). + * + * Each cache must be registered so that it can be cleaned regularly. + * When the cache is unregistered, it is flushed completely. + * + * Entries have a ref count and a 'hashed' flag which counts the existence + * in the hash table. + * We only expire entries when refcount is zero. + * Existence in the cache is counted the refcount. + */ + +/* Every cache item has a common header that is used + * for expiring and refreshing entries. + * + */ +struct cache_head { + struct cache_head * next; + time_t expiry_time; /* After time time, don't use the data */ + time_t last_refresh; /* If CACHE_PENDING, this is when upcall + * was sent, else this is when update was received + */ + struct kref ref; + unsigned long flags; +}; +#define CACHE_VALID 0 /* Entry contains valid data */ +#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */ +#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/ + +#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ + +struct cache_detail_procfs { + struct proc_dir_entry *proc_ent; + struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; +}; + +struct cache_detail_pipefs { + struct dentry *dir; +}; + +struct cache_detail { + struct module * owner; + int hash_size; + struct cache_head ** hash_table; + rwlock_t hash_lock; + + atomic_t inuse; /* active user-space update or lookup */ + + char *name; + void (*cache_put)(struct kref *); + + int (*cache_upcall)(struct cache_detail *, + struct cache_head *); + + int (*cache_parse)(struct cache_detail *, + char *buf, int len); + + int (*cache_show)(struct seq_file *m, + struct cache_detail *cd, + struct cache_head *h); + void (*warn_no_listener)(struct cache_detail *cd, + int has_died); + + struct cache_head * (*alloc)(void); + int (*match)(struct cache_head *orig, struct cache_head *new); + void (*init)(struct cache_head *orig, struct cache_head *new); + void (*update)(struct cache_head *orig, struct cache_head *new); + + /* fields below this comment are for internal use + * and should not be touched by cache owners + */ + time_t flush_time; /* flush all cache items with last_refresh + * earlier than this */ + struct list_head others; + time_t nextcheck; + int entries; + + /* fields for communication over channel */ + struct list_head queue; + + atomic_t readers; /* how many time is /chennel open */ + time_t last_close; /* if no readers, when did last close */ + time_t last_warn; /* when we last warned about no readers */ + + union { + struct cache_detail_procfs procfs; + struct cache_detail_pipefs pipefs; + } u; + struct net *net; +}; + + +/* this must be embedded in any request structure that + * identifies an object that will want a callback on + * a cache fill + */ +struct cache_req { + struct cache_deferred_req *(*defer)(struct cache_req *req); + int thread_wait; /* How long (jiffies) we can block the + * current thread to wait for updates. + */ +}; +/* this must be embedded in a deferred_request that is being + * delayed awaiting cache-fill + */ +struct cache_deferred_req { + struct hlist_node hash; /* on hash chain */ + struct list_head recent; /* on fifo */ + struct cache_head *item; /* cache item we wait on */ + void *owner; /* we might need to discard all defered requests + * owned by someone */ + void (*revisit)(struct cache_deferred_req *req, + int too_many); +}; + + +extern const struct file_operations cache_file_operations_pipefs; +extern const struct file_operations content_file_operations_pipefs; +extern const struct file_operations cache_flush_operations_pipefs; + +extern struct cache_head * +sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash); +extern struct cache_head * +sunrpc_cache_update(struct cache_detail *detail, + struct cache_head *new, struct cache_head *old, int hash); + +extern int +sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, + void (*cache_request)(struct cache_detail *, + struct cache_head *, + char **, + int *)); + + +extern void cache_clean_deferred(void *owner); + +static inline struct cache_head *cache_get(struct cache_head *h) +{ + kref_get(&h->ref); + return h; +} + + +static inline void cache_put(struct cache_head *h, struct cache_detail *cd) +{ + if (atomic_read(&h->ref.refcount) <= 2 && + h->expiry_time < cd->nextcheck) + cd->nextcheck = h->expiry_time; + kref_put(&h->ref, cd->cache_put); +} + +static inline int cache_valid(struct cache_head *h) +{ + /* If an item has been unhashed pending removal when + * the refcount drops to 0, the expiry_time will be + * set to 0. We don't want to consider such items + * valid in this context even though CACHE_VALID is + * set. + */ + return (h->expiry_time != 0 && test_bit(CACHE_VALID, &h->flags)); +} + +extern int cache_check(struct cache_detail *detail, + struct cache_head *h, struct cache_req *rqstp); +extern void cache_flush(void); +extern void cache_purge(struct cache_detail *detail); +#define NEVER (0x7FFFFFFF) +extern void __init cache_initialize(void); +extern int cache_register_net(struct cache_detail *cd, struct net *net); +extern void cache_unregister_net(struct cache_detail *cd, struct net *net); + +extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net); +extern void cache_destroy_net(struct cache_detail *cd, struct net *net); + +extern void sunrpc_init_cache_detail(struct cache_detail *cd); +extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); +extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, + umode_t, struct cache_detail *); +extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); + +extern void qword_add(char **bpp, int *lp, char *str); +extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); +extern int qword_get(char **bpp, char *dest, int bufsize); + +static inline int get_int(char **bpp, int *anint) +{ + char buf[50]; + char *ep; + int rv; + int len = qword_get(bpp, buf, 50); + if (len < 0) return -EINVAL; + if (len ==0) return -ENOENT; + rv = simple_strtol(buf, &ep, 0); + if (*ep) return -EINVAL; + *anint = rv; + return 0; +} + +/* + * timestamps kept in the cache are expressed in seconds + * since boot. This is the best for measuring differences in + * real time. + */ +static inline time_t seconds_since_boot(void) +{ + struct timespec boot; + getboottime(&boot); + return get_seconds() - boot.tv_sec; +} + +static inline time_t convert_to_wallclock(time_t sinceboot) +{ + struct timespec boot; + getboottime(&boot); + return boot.tv_sec + sinceboot; +} + +static inline time_t get_expiry(char **bpp) +{ + int rv; + struct timespec boot; + + if (get_int(bpp, &rv)) + return 0; + if (rv < 0) + return 0; + getboottime(&boot); + return rv - boot.tv_sec; +} + +#endif /* _LINUX_SUNRPC_CACHE_H_ */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h new file mode 100644 index 00000000..523547ec --- /dev/null +++ b/include/linux/sunrpc/clnt.h @@ -0,0 +1,319 @@ +/* + * linux/include/linux/sunrpc/clnt.h + * + * Declarations for the high-level RPC client interface + * + * Copyright (C) 1995, 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_CLNT_H +#define _LINUX_SUNRPC_CLNT_H + +#include <linux/types.h> +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> + +#include <linux/sunrpc/msg_prot.h> +#include <linux/sunrpc/sched.h> +#include <linux/sunrpc/xprt.h> +#include <linux/sunrpc/auth.h> +#include <linux/sunrpc/stats.h> +#include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/timer.h> +#include <asm/signal.h> +#include <linux/path.h> +#include <net/ipv6.h> + +struct rpc_inode; + +/* + * The high-level client handle + */ +struct rpc_clnt { + atomic_t cl_count; /* Number of references */ + struct list_head cl_clients; /* Global list of clients */ + struct list_head cl_tasks; /* List of tasks */ + spinlock_t cl_lock; /* spinlock */ + struct rpc_xprt __rcu * cl_xprt; /* transport */ + struct rpc_procinfo * cl_procinfo; /* procedure info */ + u32 cl_prog, /* RPC program number */ + cl_vers, /* RPC version number */ + cl_maxproc; /* max procedure number */ + + const char * cl_protname; /* protocol name */ + struct rpc_auth * cl_auth; /* authenticator */ + struct rpc_stat * cl_stats; /* per-program statistics */ + struct rpc_iostats * cl_metrics; /* per-client statistics */ + + unsigned int cl_softrtry : 1,/* soft timeouts */ + cl_discrtry : 1,/* disconnect before retry */ + cl_autobind : 1,/* use getport() */ + cl_chatty : 1;/* be verbose */ + + struct rpc_rtt * cl_rtt; /* RTO estimator data */ + const struct rpc_timeout *cl_timeout; /* Timeout strategy */ + + int cl_nodelen; /* nodename length */ + char cl_nodename[UNX_MAXNODENAME]; + struct dentry * cl_dentry; + struct rpc_clnt * cl_parent; /* Points to parent of clones */ + struct rpc_rtt cl_rtt_default; + struct rpc_timeout cl_timeout_default; + const struct rpc_program *cl_program; + char *cl_principal; /* target to authenticate to */ +}; + +/* + * General RPC program info + */ +#define RPC_MAXVERSION 4 +struct rpc_program { + const char * name; /* protocol name */ + u32 number; /* program number */ + unsigned int nrvers; /* number of versions */ + const struct rpc_version ** version; /* version array */ + struct rpc_stat * stats; /* statistics */ + const char * pipe_dir_name; /* path to rpc_pipefs dir */ +}; + +struct rpc_version { + u32 number; /* version number */ + unsigned int nrprocs; /* number of procs */ + struct rpc_procinfo * procs; /* procedure array */ +}; + +/* + * Procedure information + */ +struct rpc_procinfo { + u32 p_proc; /* RPC procedure number */ + kxdreproc_t p_encode; /* XDR encode function */ + kxdrdproc_t p_decode; /* XDR decode function */ + unsigned int p_arglen; /* argument hdr length (u32) */ + unsigned int p_replen; /* reply hdr length (u32) */ + unsigned int p_count; /* call count */ + unsigned int p_timer; /* Which RTT timer to use */ + u32 p_statidx; /* Which procedure to account */ + const char * p_name; /* name of procedure */ +}; + +#ifdef __KERNEL__ + +struct rpc_create_args { + struct net *net; + int protocol; + struct sockaddr *address; + size_t addrsize; + struct sockaddr *saddress; + const struct rpc_timeout *timeout; + const char *servername; + const struct rpc_program *program; + u32 prognumber; /* overrides program->number */ + u32 version; + rpc_authflavor_t authflavor; + unsigned long flags; + char *client_name; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ +}; + +/* Values for "flags" field */ +#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) +#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) +#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) +#define RPC_CLNT_CREATE_NOPING (1UL << 4) +#define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) +#define RPC_CLNT_CREATE_QUIET (1UL << 6) + +struct rpc_clnt *rpc_create(struct rpc_create_args *args); +struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, + const struct rpc_program *, u32); +void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt); +struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); +void rpc_shutdown_client(struct rpc_clnt *); +void rpc_release_client(struct rpc_clnt *); +void rpc_task_release_client(struct rpc_task *); + +int rpcb_create_local(struct net *); +void rpcb_put_local(struct net *); +int rpcb_register(struct net *, u32, u32, int, unsigned short); +int rpcb_v4_register(struct net *net, const u32 program, + const u32 version, + const struct sockaddr *address, + const char *netid); +void rpcb_getport_async(struct rpc_task *); + +void rpc_call_start(struct rpc_task *); +int rpc_call_async(struct rpc_clnt *clnt, + const struct rpc_message *msg, int flags, + const struct rpc_call_ops *tk_ops, + void *calldata); +int rpc_call_sync(struct rpc_clnt *clnt, + const struct rpc_message *msg, int flags); +struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, + int flags); +int rpc_restart_call_prepare(struct rpc_task *); +int rpc_restart_call(struct rpc_task *); +void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); +int rpc_protocol(struct rpc_clnt *); +struct net * rpc_net_ns(struct rpc_clnt *); +size_t rpc_max_payload(struct rpc_clnt *); +void rpc_force_rebind(struct rpc_clnt *); +size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); +const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); +int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); + +size_t rpc_ntop(const struct sockaddr *, char *, const size_t); +size_t rpc_pton(struct net *, const char *, const size_t, + struct sockaddr *, const size_t); +char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t); +size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t, + struct sockaddr *, const size_t); + +static inline unsigned short rpc_get_port(const struct sockaddr *sap) +{ + switch (sap->sa_family) { + case AF_INET: + return ntohs(((struct sockaddr_in *)sap)->sin_port); + case AF_INET6: + return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); + } + return 0; +} + +static inline void rpc_set_port(struct sockaddr *sap, + const unsigned short port) +{ + switch (sap->sa_family) { + case AF_INET: + ((struct sockaddr_in *)sap)->sin_port = htons(port); + break; + case AF_INET6: + ((struct sockaddr_in6 *)sap)->sin6_port = htons(port); + break; + } +} + +#define IPV6_SCOPE_DELIMITER '%' +#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") + +static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; + const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; + + return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; +} + +static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in *ssin = (struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; + dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; + return true; +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; + const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; + + if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr)) + return false; + else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL) + return sin1->sin6_scope_id == sin2->sin6_scope_id; + + return true; +} + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; + struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; + + dsin6->sin6_family = ssin6->sin6_family; + dsin6->sin6_addr = ssin6->sin6_addr; + return true; +} +#else /* !(IS_ENABLED(CONFIG_IPV6) */ +static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + return false; +} + +static inline bool __rpc_copy_addr6(struct sockaddr *dst, + const struct sockaddr *src) +{ + return false; +} +#endif /* !(IS_ENABLED(CONFIG_IPV6) */ + +/** + * rpc_cmp_addr - compare the address portion of two sockaddrs. + * @sap1: first sockaddr + * @sap2: second sockaddr + * + * Just compares the family and address portion. Ignores port, scope, etc. + * Returns true if the addrs are equal, false if they aren't. + */ +static inline bool rpc_cmp_addr(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + if (sap1->sa_family == sap2->sa_family) { + switch (sap1->sa_family) { + case AF_INET: + return __rpc_cmp_addr4(sap1, sap2); + case AF_INET6: + return __rpc_cmp_addr6(sap1, sap2); + } + } + return false; +} + +/** + * rpc_copy_addr - copy the address portion of one sockaddr to another + * @dst: destination sockaddr + * @src: source sockaddr + * + * Just copies the address portion and family. Ignores port, scope, etc. + * Caller is responsible for making certain that dst is large enough to hold + * the address in src. Returns true if address family is supported. Returns + * false otherwise. + */ +static inline bool rpc_copy_addr(struct sockaddr *dst, + const struct sockaddr *src) +{ + switch (src->sa_family) { + case AF_INET: + return __rpc_copy_addr4(dst, src); + case AF_INET6: + return __rpc_copy_addr6(dst, src); + } + return false; +} + +/** + * rpc_get_scope_id - return scopeid for a given sockaddr + * @sa: sockaddr to get scopeid from + * + * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if + * not an AF_INET6 address. + */ +static inline u32 rpc_get_scope_id(const struct sockaddr *sa) +{ + if (sa->sa_family != AF_INET6) + return 0; + + return ((struct sockaddr_in6 *) sa)->sin6_scope_id; +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h new file mode 100644 index 00000000..a76cc20d --- /dev/null +++ b/include/linux/sunrpc/debug.h @@ -0,0 +1,110 @@ +/* + * linux/include/linux/sunrpc/debug.h + * + * Debugging support for sunrpc module + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_DEBUG_H_ +#define _LINUX_SUNRPC_DEBUG_H_ + +/* + * RPC debug facilities + */ +#define RPCDBG_XPRT 0x0001 +#define RPCDBG_CALL 0x0002 +#define RPCDBG_DEBUG 0x0004 +#define RPCDBG_NFS 0x0008 +#define RPCDBG_AUTH 0x0010 +#define RPCDBG_BIND 0x0020 +#define RPCDBG_SCHED 0x0040 +#define RPCDBG_TRANS 0x0080 +#define RPCDBG_SVCXPRT 0x0100 +#define RPCDBG_SVCDSP 0x0200 +#define RPCDBG_MISC 0x0400 +#define RPCDBG_CACHE 0x0800 +#define RPCDBG_ALL 0x7fff + +#ifdef __KERNEL__ + +/* + * Enable RPC debugging/profiling. + */ +#ifdef CONFIG_SUNRPC_DEBUG +#define RPC_DEBUG +#endif +#ifdef CONFIG_TRACEPOINTS +#define RPC_TRACEPOINTS +#endif +/* #define RPC_PROFILE */ + +/* + * Debugging macros etc + */ +#ifdef RPC_DEBUG +extern unsigned int rpc_debug; +extern unsigned int nfs_debug; +extern unsigned int nfsd_debug; +extern unsigned int nlm_debug; +#endif + +#define dprintk(args...) dfprintk(FACILITY, ## args) +#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) + +#undef ifdebug +#ifdef RPC_DEBUG +# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) + +# define dfprintk(fac, args...) \ + do { \ + ifdebug(fac) \ + printk(KERN_DEFAULT args); \ + } while (0) + +# define dfprintk_rcu(fac, args...) \ + do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT args); \ + rcu_read_unlock(); \ + } \ + } while (0) + +# define RPC_IFDEBUG(x) x +#else +# define ifdebug(fac) if (0) +# define dfprintk(fac, args...) do {} while (0) +# define dfprintk_rcu(fac, args...) do {} while (0) +# define RPC_IFDEBUG(x) +#endif + +/* + * Sysctl interface for RPC debugging + */ +#ifdef RPC_DEBUG +void rpc_register_sysctl(void); +void rpc_unregister_sysctl(void); +#endif + +#endif /* __KERNEL__ */ + +/* + * Declarations for the sysctl debug interface, which allows to read or + * change the debug flags for rpc, nfs, nfsd, and lockd. Since the sunrpc + * module currently registers its sysctl table dynamically, the sysctl path + * for module FOO is <CTL_SUNRPC, CTL_FOODEBUG>. + */ + +enum { + CTL_RPCDEBUG = 1, + CTL_NFSDEBUG, + CTL_NFSDDEBUG, + CTL_NLMDEBUG, + CTL_SLOTTABLE_UDP, + CTL_SLOTTABLE_TCP, + CTL_MIN_RESVPORT, + CTL_MAX_RESVPORT, +}; + +#endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h new file mode 100644 index 00000000..332da61c --- /dev/null +++ b/include/linux/sunrpc/gss_api.h @@ -0,0 +1,141 @@ +/* + * linux/include/linux/sunrpc/gss_api.h + * + * Somewhat simplified version of the gss api. + * + * Dug Song <dugsong@monkey.org> + * Andy Adamson <andros@umich.edu> + * Bruce Fields <bfields@umich.edu> + * Copyright (c) 2000 The Regents of the University of Michigan + */ + +#ifndef _LINUX_SUNRPC_GSS_API_H +#define _LINUX_SUNRPC_GSS_API_H + +#ifdef __KERNEL__ +#include <linux/sunrpc/xdr.h> +#include <linux/uio.h> + +/* The mechanism-independent gss-api context: */ +struct gss_ctx { + struct gss_api_mech *mech_type; + void *internal_ctx_id; +}; + +#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) +#define GSS_C_NO_CONTEXT ((struct gss_ctx *) 0) +#define GSS_C_NULL_OID ((struct xdr_netobj) 0) + +/*XXX arbitrary length - is this set somewhere? */ +#define GSS_OID_MAX_LEN 32 + +/* gss-api prototypes; note that these are somewhat simplified versions of + * the prototypes specified in RFC 2744. */ +int gss_import_sec_context( + const void* input_token, + size_t bufsize, + struct gss_api_mech *mech, + struct gss_ctx **ctx_id, + gfp_t gfp_mask); +u32 gss_get_mic( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); +u32 gss_verify_mic( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); +u32 gss_wrap( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *outbuf, + struct page **inpages); +u32 gss_unwrap( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *inbuf); +u32 gss_delete_sec_context( + struct gss_ctx **ctx_id); + +u32 gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 service); +u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); +char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); + +struct pf_desc { + u32 pseudoflavor; + u32 service; + char *name; + char *auth_domain_name; +}; + +/* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and + * mechanisms may be dynamically registered or unregistered by modules. */ + +/* Each mechanism is described by the following struct: */ +struct gss_api_mech { + struct list_head gm_list; + struct module *gm_owner; + struct xdr_netobj gm_oid; + char *gm_name; + const struct gss_api_ops *gm_ops; + /* pseudoflavors supported by this mechanism: */ + int gm_pf_num; + struct pf_desc * gm_pfs; + /* Should the following be a callback operation instead? */ + const char *gm_upcall_enctypes; +}; + +/* and must provide the following operations: */ +struct gss_api_ops { + int (*gss_import_sec_context)( + const void *input_token, + size_t bufsize, + struct gss_ctx *ctx_id, + gfp_t gfp_mask); + u32 (*gss_get_mic)( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); + u32 (*gss_verify_mic)( + struct gss_ctx *ctx_id, + struct xdr_buf *message, + struct xdr_netobj *mic_token); + u32 (*gss_wrap)( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *outbuf, + struct page **inpages); + u32 (*gss_unwrap)( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *buf); + void (*gss_delete_sec_context)( + void *internal_ctx_id); +}; + +int gss_mech_register(struct gss_api_mech *); +void gss_mech_unregister(struct gss_api_mech *); + +/* returns a mechanism descriptor given an OID, and increments the mechanism's + * reference count. */ +struct gss_api_mech * gss_mech_get_by_OID(struct xdr_netobj *); + +/* Returns a reference to a mechanism, given a name like "krb5" etc. */ +struct gss_api_mech *gss_mech_get_by_name(const char *); + +/* Similar, but get by pseudoflavor. */ +struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32); + +/* Fill in an array with a list of supported pseudoflavors */ +int gss_mech_list_pseudoflavors(u32 *); + +/* Just increments the mechanism's reference count and returns its input: */ +struct gss_api_mech * gss_mech_get(struct gss_api_mech *); + +/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a + * corresponding call to gss_mech_put. */ +void gss_mech_put(struct gss_api_mech *); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_GSS_API_H */ + diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h new file mode 100644 index 00000000..3ccecd0a --- /dev/null +++ b/include/linux/sunrpc/gss_asn1.h @@ -0,0 +1,81 @@ +/* + * linux/include/linux/sunrpc/gss_asn1.h + * + * minimal asn1 for generic encoding/decoding of gss tokens + * + * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, + * lib/gssapi/krb5/gssapiP_krb5.h, and others + * + * Copyright (c) 2000 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson <andros@umich.edu> + */ + +/* + * Copyright 1995 by the Massachusetts Institute of Technology. + * All Rights Reserved. + * + * Export of this software from the United States of America may + * require a specific license from the United States Government. + * It is the responsibility of any person or organization contemplating + * export to obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of M.I.T. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. Furthermore if you modify this software you must label + * your software as modified software and not distribute it in such a + * fashion that it might be confused with the original M.I.T. software. + * M.I.T. makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + */ + + +#include <linux/sunrpc/gss_api.h> + +#define SIZEOF_INT 4 + +/* from gssapi_err_generic.h */ +#define G_BAD_SERVICE_NAME (-2045022976L) +#define G_BAD_STRING_UID (-2045022975L) +#define G_NOUSER (-2045022974L) +#define G_VALIDATE_FAILED (-2045022973L) +#define G_BUFFER_ALLOC (-2045022972L) +#define G_BAD_MSG_CTX (-2045022971L) +#define G_WRONG_SIZE (-2045022970L) +#define G_BAD_USAGE (-2045022969L) +#define G_UNKNOWN_QOP (-2045022968L) +#define G_NO_HOSTNAME (-2045022967L) +#define G_BAD_HOSTNAME (-2045022966L) +#define G_WRONG_MECH (-2045022965L) +#define G_BAD_TOK_HEADER (-2045022964L) +#define G_BAD_DIRECTION (-2045022963L) +#define G_TOK_TRUNC (-2045022962L) +#define G_REFLECT (-2045022961L) +#define G_WRONG_TOKID (-2045022960L) + +#define g_OID_equal(o1,o2) \ + (((o1)->len == (o2)->len) && \ + (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0)) + +u32 g_verify_token_header( + struct xdr_netobj *mech, + int *body_size, + unsigned char **buf_in, + int toksize); + +int g_token_size( + struct xdr_netobj *mech, + unsigned int body_size); + +void g_make_token_header( + struct xdr_netobj *mech, + int body_size, + unsigned char **buf); diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h new file mode 100644 index 00000000..a6807867 --- /dev/null +++ b/include/linux/sunrpc/gss_err.h @@ -0,0 +1,167 @@ +/* + * linux/include/sunrpc/gss_err.h + * + * Adapted from MIT Kerberos 5-1.2.1 include/gssapi/gssapi.h + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson <andros@umich.edu> + */ + +/* + * Copyright 1993 by OpenVision Technologies, Inc. + * + * Permission to use, copy, modify, distribute, and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appears in all copies and + * that both that copyright notice and this permission notice appear in + * supporting documentation, and that the name of OpenVision not be used + * in advertising or publicity pertaining to distribution of the software + * without specific, written prior permission. OpenVision makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF + * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_SUNRPC_GSS_ERR_H +#define _LINUX_SUNRPC_GSS_ERR_H + +#ifdef __KERNEL__ + +typedef unsigned int OM_uint32; + +/* + * Flag bits for context-level services. + */ +#define GSS_C_DELEG_FLAG 1 +#define GSS_C_MUTUAL_FLAG 2 +#define GSS_C_REPLAY_FLAG 4 +#define GSS_C_SEQUENCE_FLAG 8 +#define GSS_C_CONF_FLAG 16 +#define GSS_C_INTEG_FLAG 32 +#define GSS_C_ANON_FLAG 64 +#define GSS_C_PROT_READY_FLAG 128 +#define GSS_C_TRANS_FLAG 256 + +/* + * Credential usage options + */ +#define GSS_C_BOTH 0 +#define GSS_C_INITIATE 1 +#define GSS_C_ACCEPT 2 + +/* + * Status code types for gss_display_status + */ +#define GSS_C_GSS_CODE 1 +#define GSS_C_MECH_CODE 2 + + +/* + * Expiration time of 2^32-1 seconds means infinite lifetime for a + * credential or security context + */ +#define GSS_C_INDEFINITE ((OM_uint32) 0xfffffffful) + + +/* Major status codes */ + +#define GSS_S_COMPLETE 0 + +/* + * Some "helper" definitions to make the status code macros obvious. + */ +#define GSS_C_CALLING_ERROR_OFFSET 24 +#define GSS_C_ROUTINE_ERROR_OFFSET 16 +#define GSS_C_SUPPLEMENTARY_OFFSET 0 +#define GSS_C_CALLING_ERROR_MASK ((OM_uint32) 0377ul) +#define GSS_C_ROUTINE_ERROR_MASK ((OM_uint32) 0377ul) +#define GSS_C_SUPPLEMENTARY_MASK ((OM_uint32) 0177777ul) + +/* + * The macros that test status codes for error conditions. Note that the + * GSS_ERROR() macro has changed slightly from the V1 GSSAPI so that it now + * evaluates its argument only once. + */ +#define GSS_CALLING_ERROR(x) \ + ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET)) +#define GSS_ROUTINE_ERROR(x) \ + ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)) +#define GSS_SUPPLEMENTARY_INFO(x) \ + ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET)) +#define GSS_ERROR(x) \ + ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \ + (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))) + +/* + * Now the actual status code definitions + */ + +/* + * Calling errors: + */ +#define GSS_S_CALL_INACCESSIBLE_READ \ + (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET) +#define GSS_S_CALL_INACCESSIBLE_WRITE \ + (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET) +#define GSS_S_CALL_BAD_STRUCTURE \ + (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET) + +/* + * Routine errors: + */ +#define GSS_S_BAD_MECH (((OM_uint32) 1ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_NAME (((OM_uint32) 2ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_NAMETYPE (((OM_uint32) 3ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_BINDINGS (((OM_uint32) 4ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_STATUS (((OM_uint32) 5ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_SIG (((OM_uint32) 6ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_NO_CRED (((OM_uint32) 7ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_NO_CONTEXT (((OM_uint32) 8ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_DEFECTIVE_TOKEN (((OM_uint32) 9ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_DEFECTIVE_CREDENTIAL \ + (((OM_uint32) 10ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_CREDENTIALS_EXPIRED \ + (((OM_uint32) 11ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_CONTEXT_EXPIRED \ + (((OM_uint32) 12ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_FAILURE (((OM_uint32) 13ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_BAD_QOP (((OM_uint32) 14ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_UNAUTHORIZED (((OM_uint32) 15ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_UNAVAILABLE (((OM_uint32) 16ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_DUPLICATE_ELEMENT \ + (((OM_uint32) 17ul) << GSS_C_ROUTINE_ERROR_OFFSET) +#define GSS_S_NAME_NOT_MN \ + (((OM_uint32) 18ul) << GSS_C_ROUTINE_ERROR_OFFSET) + +/* + * Supplementary info bits: + */ +#define GSS_S_CONTINUE_NEEDED (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 0)) +#define GSS_S_DUPLICATE_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 1)) +#define GSS_S_OLD_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 2)) +#define GSS_S_UNSEQ_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 3)) +#define GSS_S_GAP_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 4)) + +/* XXXX these are not part of the GSSAPI C bindings! (but should be) */ + +#define GSS_CALLING_ERROR_FIELD(x) \ + (((x) >> GSS_C_CALLING_ERROR_OFFSET) & GSS_C_CALLING_ERROR_MASK) +#define GSS_ROUTINE_ERROR_FIELD(x) \ + (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) +#define GSS_SUPPLEMENTARY_INFO_FIELD(x) \ + (((x) >> GSS_C_SUPPLEMENTARY_OFFSET) & GSS_C_SUPPLEMENTARY_MASK) + +/* XXXX This is a necessary evil until the spec is fixed */ +#define GSS_S_CRED_UNAVAIL GSS_S_FAILURE + +#endif /* __KERNEL__ */ +#endif /* __LINUX_SUNRPC_GSS_ERR_H */ diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h new file mode 100644 index 00000000..5af2931c --- /dev/null +++ b/include/linux/sunrpc/gss_krb5.h @@ -0,0 +1,331 @@ +/* + * linux/include/linux/sunrpc/gss_krb5_types.h + * + * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, + * lib/gssapi/krb5/gssapiP_krb5.h, and others + * + * Copyright (c) 2000-2008 The Regents of the University of Michigan. + * All rights reserved. + * + * Andy Adamson <andros@umich.edu> + * Bruce Fields <bfields@umich.edu> + */ + +/* + * Copyright 1995 by the Massachusetts Institute of Technology. + * All Rights Reserved. + * + * Export of this software from the United States of America may + * require a specific license from the United States Government. + * It is the responsibility of any person or organization contemplating + * export to obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of M.I.T. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. Furthermore if you modify this software you must label + * your software as modified software and not distribute it in such a + * fashion that it might be confused with the original M.I.T. software. + * M.I.T. makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + */ + +#include <linux/crypto.h> +#include <linux/sunrpc/auth_gss.h> +#include <linux/sunrpc/gss_err.h> +#include <linux/sunrpc/gss_asn1.h> + +/* Length of constant used in key derivation */ +#define GSS_KRB5_K5CLENGTH (5) + +/* Maximum key length (in bytes) for the supported crypto algorithms*/ +#define GSS_KRB5_MAX_KEYLEN (32) + +/* Maximum checksum function output for the supported crypto algorithms */ +#define GSS_KRB5_MAX_CKSUM_LEN (20) + +/* Maximum blocksize for the supported crypto algorithms */ +#define GSS_KRB5_MAX_BLOCKSIZE (16) + +struct krb5_ctx; + +struct gss_krb5_enctype { + const u32 etype; /* encryption (key) type */ + const u32 ctype; /* checksum type */ + const char *name; /* "friendly" name */ + const char *encrypt_name; /* crypto encrypt name */ + const char *cksum_name; /* crypto checksum name */ + const u16 signalg; /* signing algorithm */ + const u16 sealalg; /* sealing algorithm */ + const u32 blocksize; /* encryption blocksize */ + const u32 conflen; /* confounder length + (normally the same as + the blocksize) */ + const u32 cksumlength; /* checksum length */ + const u32 keyed_cksum; /* is it a keyed cksum? */ + const u32 keybytes; /* raw key len, in bytes */ + const u32 keylength; /* final key len, in bytes */ + u32 (*encrypt) (struct crypto_blkcipher *tfm, + void *iv, void *in, void *out, + int length); /* encryption function */ + u32 (*decrypt) (struct crypto_blkcipher *tfm, + void *iv, void *in, void *out, + int length); /* decryption function */ + u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *in, + struct xdr_netobj *out); /* complete key generation */ + u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, int ec, + struct page **pages); /* v2 encryption function */ + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, u32 *headskip, + u32 *tailskip); /* v2 decryption function */ +}; + +/* krb5_ctx flags definitions */ +#define KRB5_CTX_FLAG_INITIATOR 0x00000001 +#define KRB5_CTX_FLAG_CFX 0x00000002 +#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004 + +struct krb5_ctx { + int initiate; /* 1 = initiating, 0 = accepting */ + u32 enctype; + u32 flags; + const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ + struct crypto_blkcipher *enc; + struct crypto_blkcipher *seq; + struct crypto_blkcipher *acceptor_enc; + struct crypto_blkcipher *initiator_enc; + struct crypto_blkcipher *acceptor_enc_aux; + struct crypto_blkcipher *initiator_enc_aux; + u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ + u8 cksum[GSS_KRB5_MAX_KEYLEN]; + s32 endtime; + u32 seq_send; + u64 seq_send64; + struct xdr_netobj mech_used; + u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; + u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; + u8 initiator_seal[GSS_KRB5_MAX_KEYLEN]; + u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN]; + u8 initiator_integ[GSS_KRB5_MAX_KEYLEN]; + u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; +}; + +extern spinlock_t krb5_seq_lock; + +/* The length of the Kerberos GSS token header */ +#define GSS_KRB5_TOK_HDR_LEN (16) + +#define KG_TOK_MIC_MSG 0x0101 +#define KG_TOK_WRAP_MSG 0x0201 + +#define KG2_TOK_INITIAL 0x0101 +#define KG2_TOK_RESPONSE 0x0202 +#define KG2_TOK_MIC 0x0404 +#define KG2_TOK_WRAP 0x0504 + +#define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01 +#define KG2_TOKEN_FLAG_SEALED 0x02 +#define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04 + +#define KG2_RESP_FLAG_ERROR 0x0001 +#define KG2_RESP_FLAG_DELEG_OK 0x0002 + +enum sgn_alg { + SGN_ALG_DES_MAC_MD5 = 0x0000, + SGN_ALG_MD2_5 = 0x0001, + SGN_ALG_DES_MAC = 0x0002, + SGN_ALG_3 = 0x0003, /* not published */ + SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */ + SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004 +}; +enum seal_alg { + SEAL_ALG_NONE = 0xffff, + SEAL_ALG_DES = 0x0000, + SEAL_ALG_1 = 0x0001, /* not published */ + SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */ + SEAL_ALG_DES3KD = 0x0002 +}; + +#define CKSUMTYPE_CRC32 0x0001 +#define CKSUMTYPE_RSA_MD4 0x0002 +#define CKSUMTYPE_RSA_MD4_DES 0x0003 +#define CKSUMTYPE_DESCBC 0x0004 +#define CKSUMTYPE_RSA_MD5 0x0007 +#define CKSUMTYPE_RSA_MD5_DES 0x0008 +#define CKSUMTYPE_NIST_SHA 0x0009 +#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c +#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f +#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ + +/* from gssapi_err_krb5.h */ +#define KG_CCACHE_NOMATCH (39756032L) +#define KG_KEYTAB_NOMATCH (39756033L) +#define KG_TGT_MISSING (39756034L) +#define KG_NO_SUBKEY (39756035L) +#define KG_CONTEXT_ESTABLISHED (39756036L) +#define KG_BAD_SIGN_TYPE (39756037L) +#define KG_BAD_LENGTH (39756038L) +#define KG_CTX_INCOMPLETE (39756039L) +#define KG_CONTEXT (39756040L) +#define KG_CRED (39756041L) +#define KG_ENC_DESC (39756042L) +#define KG_BAD_SEQ (39756043L) +#define KG_EMPTY_CCACHE (39756044L) +#define KG_NO_CTYPES (39756045L) + +/* per Kerberos v5 protocol spec crypto types from the wire. + * these get mapped to linux kernel crypto routines. + */ +#define ENCTYPE_NULL 0x0000 +#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */ +#define ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */ +#define ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */ +#define ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */ +/* XXX deprecated? */ +#define ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */ +#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ +#define ENCTYPE_DES_HMAC_SHA1 0x0008 +#define ENCTYPE_DES3_CBC_SHA1 0x0010 +#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 +#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 +#define ENCTYPE_ARCFOUR_HMAC 0x0017 +#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 +#define ENCTYPE_UNKNOWN 0x01ff + +/* + * Constants used for key derivation + */ +/* for 3DES */ +#define KG_USAGE_SEAL (22) +#define KG_USAGE_SIGN (23) +#define KG_USAGE_SEQ (24) + +/* from rfc3961 */ +#define KEY_USAGE_SEED_CHECKSUM (0x99) +#define KEY_USAGE_SEED_ENCRYPTION (0xAA) +#define KEY_USAGE_SEED_INTEGRITY (0x55) + +/* from rfc4121 */ +#define KG_USAGE_ACCEPTOR_SEAL (22) +#define KG_USAGE_ACCEPTOR_SIGN (23) +#define KG_USAGE_INITIATOR_SEAL (24) +#define KG_USAGE_INITIATOR_SIGN (25) + +/* + * This compile-time check verifies that we will not exceed the + * slack space allotted by the client and server auth_gss code + * before they call gss_wrap(). + */ +#define GSS_KRB5_MAX_SLACK_NEEDED \ + (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \ + + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \ + + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \ + + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \ + + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\ + + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \ + + 4 + 4 /* RPC verifier */ \ + + GSS_KRB5_TOK_HDR_LEN \ + + GSS_KRB5_MAX_CKSUM_LEN) + +u32 +make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + unsigned int usage, struct xdr_netobj *cksumout); + +u32 +make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *key, + unsigned int usage, struct xdr_netobj *cksum); + +u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *, + struct xdr_netobj *); + +u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *, + struct xdr_netobj *); + +u32 +gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *outbuf, struct page **pages); + +u32 +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *buf); + + +u32 +krb5_encrypt(struct crypto_blkcipher *key, + void *iv, void *in, void *out, int length); + +u32 +krb5_decrypt(struct crypto_blkcipher *key, + void *iv, void *in, void *out, int length); + +int +gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf, + int offset, struct page **pages); + +int +gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf, + int offset); + +s32 +krb5_make_seq_num(struct krb5_ctx *kctx, + struct crypto_blkcipher *key, + int direction, + u32 seqnum, unsigned char *cksum, unsigned char *buf); + +s32 +krb5_get_seq_num(struct krb5_ctx *kctx, + unsigned char *cksum, + unsigned char *buf, int *direction, u32 *seqnum); + +int +xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen); + +u32 +krb5_derive_key(const struct gss_krb5_enctype *gk5e, + const struct xdr_netobj *inkey, + struct xdr_netobj *outkey, + const struct xdr_netobj *in_constant, + gfp_t gfp_mask); + +u32 +gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key); + +u32 +gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key); + +u32 +gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, int ec, + struct page **pages); + +u32 +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, u32 *plainoffset, + u32 *plainlen); + +int +krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, + struct crypto_blkcipher *cipher, + unsigned char *cksum); + +int +krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, + struct crypto_blkcipher *cipher, + s32 seqnum); +void +gss_krb5_make_confounder(char *p, u32 conflen); diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h new file mode 100644 index 00000000..ec6234ee --- /dev/null +++ b/include/linux/sunrpc/gss_krb5_enctypes.h @@ -0,0 +1,4 @@ +/* + * Dumb way to share this static piece of information with nfsd + */ +#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h new file mode 100644 index 00000000..1565bbe8 --- /dev/null +++ b/include/linux/sunrpc/metrics.h @@ -0,0 +1,92 @@ +/* + * linux/include/linux/sunrpc/metrics.h + * + * Declarations for RPC client per-operation metrics + * + * Copyright (C) 2005 Chuck Lever <cel@netapp.com> + * + * RPC client per-operation statistics provide latency and retry + * information about each type of RPC procedure in a given RPC program. + * These statistics are not for detailed problem diagnosis, but simply + * to indicate whether the problem is local or remote. + * + * These counters are not meant to be human-readable, but are meant to be + * integrated into system monitoring tools such as "sar" and "iostat". As + * such, the counters are sampled by the tools over time, and are never + * zeroed after a file system is mounted. Moving averages can be computed + * by the tools by taking the difference between two instantaneous samples + * and dividing that by the time between the samples. + * + * The counters are maintained in a single array per RPC client, indexed + * by procedure number. There is no need to maintain separate counter + * arrays per-CPU because these counters are always modified behind locks. + */ + +#ifndef _LINUX_SUNRPC_METRICS_H +#define _LINUX_SUNRPC_METRICS_H + +#include <linux/seq_file.h> +#include <linux/ktime.h> + +#define RPC_IOSTATS_VERS "1.0" + +struct rpc_iostats { + /* + * These counters give an idea about how many request + * transmissions are required, on average, to complete that + * particular procedure. Some procedures may require more + * than one transmission because the server is unresponsive, + * the client is retransmitting too aggressively, or the + * requests are large and the network is congested. + */ + unsigned long om_ops, /* count of operations */ + om_ntrans, /* count of RPC transmissions */ + om_timeouts; /* count of major timeouts */ + + /* + * These count how many bytes are sent and received for a + * given RPC procedure type. This indicates how much load a + * particular procedure is putting on the network. These + * counts include the RPC and ULP headers, and the request + * payload. + */ + unsigned long long om_bytes_sent, /* count of bytes out */ + om_bytes_recv; /* count of bytes in */ + + /* + * The length of time an RPC request waits in queue before + * transmission, the network + server latency of the request, + * and the total time the request spent from init to release + * are measured. + */ + ktime_t om_queue, /* queued for xmit */ + om_rtt, /* RPC RTT */ + om_execute; /* RPC execution */ +} ____cacheline_aligned; + +struct rpc_task; +struct rpc_clnt; + +/* + * EXPORTed functions for managing rpc_iostats structures + */ + +#ifdef CONFIG_PROC_FS + +struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *); +void rpc_count_iostats(const struct rpc_task *, + struct rpc_iostats *); +void rpc_print_iostats(struct seq_file *, struct rpc_clnt *); +void rpc_free_iostats(struct rpc_iostats *); + +#else /* CONFIG_PROC_FS */ + +static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } +static inline void rpc_count_iostats(const struct rpc_task *task, + struct rpc_iostats *stats) {} +static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} +static inline void rpc_free_iostats(struct rpc_iostats *stats) {} + +#endif /* CONFIG_PROC_FS */ + +#endif /* _LINUX_SUNRPC_METRICS_H */ diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h new file mode 100644 index 00000000..c68a1479 --- /dev/null +++ b/include/linux/sunrpc/msg_prot.h @@ -0,0 +1,211 @@ +/* + * linux/include/linux/sunrpc/msg_prot.h + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_MSGPROT_H_ +#define _LINUX_SUNRPC_MSGPROT_H_ + +#ifdef __KERNEL__ /* user programs should get these from the rpc header files */ + +#define RPC_VERSION 2 + +/* size of an XDR encoding unit in bytes, i.e. 32bit */ +#define XDR_UNIT (4) + +/* spec defines authentication flavor as an unsigned 32 bit integer */ +typedef u32 rpc_authflavor_t; + +enum rpc_auth_flavors { + RPC_AUTH_NULL = 0, + RPC_AUTH_UNIX = 1, + RPC_AUTH_SHORT = 2, + RPC_AUTH_DES = 3, + RPC_AUTH_KRB = 4, + RPC_AUTH_GSS = 6, + RPC_AUTH_MAXFLAVOR = 8, + /* pseudoflavors: */ + RPC_AUTH_GSS_KRB5 = 390003, + RPC_AUTH_GSS_KRB5I = 390004, + RPC_AUTH_GSS_KRB5P = 390005, + RPC_AUTH_GSS_LKEY = 390006, + RPC_AUTH_GSS_LKEYI = 390007, + RPC_AUTH_GSS_LKEYP = 390008, + RPC_AUTH_GSS_SPKM = 390009, + RPC_AUTH_GSS_SPKMI = 390010, + RPC_AUTH_GSS_SPKMP = 390011, +}; + +/* Maximum size (in bytes) of an rpc credential or verifier */ +#define RPC_MAX_AUTH_SIZE (400) + +enum rpc_msg_type { + RPC_CALL = 0, + RPC_REPLY = 1 +}; + +enum rpc_reply_stat { + RPC_MSG_ACCEPTED = 0, + RPC_MSG_DENIED = 1 +}; + +enum rpc_accept_stat { + RPC_SUCCESS = 0, + RPC_PROG_UNAVAIL = 1, + RPC_PROG_MISMATCH = 2, + RPC_PROC_UNAVAIL = 3, + RPC_GARBAGE_ARGS = 4, + RPC_SYSTEM_ERR = 5, + /* internal use only */ + RPC_DROP_REPLY = 60000, +}; + +enum rpc_reject_stat { + RPC_MISMATCH = 0, + RPC_AUTH_ERROR = 1 +}; + +enum rpc_auth_stat { + RPC_AUTH_OK = 0, + RPC_AUTH_BADCRED = 1, + RPC_AUTH_REJECTEDCRED = 2, + RPC_AUTH_BADVERF = 3, + RPC_AUTH_REJECTEDVERF = 4, + RPC_AUTH_TOOWEAK = 5, + /* RPCSEC_GSS errors */ + RPCSEC_GSS_CREDPROBLEM = 13, + RPCSEC_GSS_CTXPROBLEM = 14 +}; + +#define RPC_MAXNETNAMELEN 256 + +/* + * From RFC 1831: + * + * "A record is composed of one or more record fragments. A record + * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of + * fragment data. The bytes encode an unsigned binary number; as with + * XDR integers, the byte order is from highest to lowest. The number + * encodes two values -- a boolean which indicates whether the fragment + * is the last fragment of the record (bit value 1 implies the fragment + * is the last fragment) and a 31-bit unsigned binary value which is the + * length in bytes of the fragment's data. The boolean value is the + * highest-order bit of the header; the length is the 31 low-order bits. + * (Note that this record specification is NOT in XDR standard form!)" + * + * The Linux RPC client always sends its requests in a single record + * fragment, limiting the maximum payload size for stream transports to + * 2GB. + */ + +typedef __be32 rpc_fraghdr; + +#define RPC_LAST_STREAM_FRAGMENT (1U << 31) +#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT) +#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1) + +/* + * RPC call and reply header size as number of 32bit words (verifier + * size computed separately, see below) + */ +#define RPC_CALLHDRSIZE (6) +#define RPC_REPHDRSIZE (4) + + +/* + * Maximum RPC header size, including authentication, + * as number of 32bit words (see RFCs 1831, 1832). + * + * xid 1 xdr unit = 4 bytes + * mtype 1 + * rpc_version 1 + * program 1 + * prog_version 1 + * procedure 1 + * cred { + * flavor 1 + * length 1 + * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes + * } + * verf { + * flavor 1 + * length 1 + * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes + * } + * TOTAL 210 xdr units = 840 bytes + */ +#define RPC_MAX_HEADER_WITH_AUTH \ + (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4)) + +/* + * RFC1833/RFC3530 rpcbind (v3+) well-known netid's. + */ +#define RPCBIND_NETID_UDP "udp" +#define RPCBIND_NETID_TCP "tcp" +#define RPCBIND_NETID_UDP6 "udp6" +#define RPCBIND_NETID_TCP6 "tcp6" +#define RPCBIND_NETID_LOCAL "local" + +/* + * Note that RFC 1833 does not put any size restrictions on the + * netid string, but all currently defined netid's fit in 4 bytes. + */ +#define RPCBIND_MAXNETIDLEN (4u) + +/* + * Universal addresses are introduced in RFC 1833 and further spelled + * out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length + * of a universal address for use in allocating buffers and character + * arrays. + * + * Quoting RFC 3530, section 2.2: + * + * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the + * US-ASCII string: + * + * h1.h2.h3.h4.p1.p2 + * + * The prefix, "h1.h2.h3.h4", is the standard textual form for + * representing an IPv4 address, which is always four octets long. + * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively, + * the first through fourth octets each converted to ASCII-decimal. + * Assuming big-endian ordering, p1 and p2 are, respectively, the first + * and second octets each converted to ASCII-decimal. For example, if a + * host, in big-endian order, has an address of 0x0A010307 and there is + * a service listening on, in big endian order, port 0x020F (decimal + * 527), then the complete universal address is "10.1.3.7.2.15". + * + * ... + * + * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the + * US-ASCII string: + * + * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2 + * + * The suffix "p1.p2" is the service port, and is computed the same way + * as with universal addresses for TCP and UDP over IPv4. The prefix, + * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for + * representing an IPv6 address as defined in Section 2.2 of [RFC2373]. + * Additionally, the two alternative forms specified in Section 2.2 of + * [RFC2373] are also acceptable. + */ + +#include <linux/inet.h> + +/* Maximum size of the port number part of a universal address */ +#define RPCBIND_MAXUADDRPLEN sizeof(".255.255") + +/* Maximum size of an IPv4 universal address */ +#define RPCBIND_MAXUADDR4LEN \ + (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) + +/* Maximum size of an IPv6 universal address */ +#define RPCBIND_MAXUADDR6LEN \ + (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) + +/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */ +#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h new file mode 100644 index 00000000..a7b422b3 --- /dev/null +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -0,0 +1,97 @@ +#ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H +#define _LINUX_SUNRPC_RPC_PIPE_FS_H + +#ifdef __KERNEL__ + +#include <linux/workqueue.h> + +struct rpc_pipe_msg { + struct list_head list; + void *data; + size_t len; + size_t copied; + int errno; +}; + +struct rpc_pipe_ops { + ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); + ssize_t (*downcall)(struct file *, const char __user *, size_t); + void (*release_pipe)(struct inode *); + int (*open_pipe)(struct inode *); + void (*destroy_msg)(struct rpc_pipe_msg *); +}; + +struct rpc_pipe { + struct list_head pipe; + struct list_head in_upcall; + struct list_head in_downcall; + int pipelen; + int nreaders; + int nwriters; +#define RPC_PIPE_WAIT_FOR_OPEN 1 + int flags; + struct delayed_work queue_timeout; + const struct rpc_pipe_ops *ops; + spinlock_t lock; + struct dentry *dentry; +}; + +struct rpc_inode { + struct inode vfs_inode; + void *private; + struct rpc_pipe *pipe; + wait_queue_head_t waitq; +}; + +static inline struct rpc_inode * +RPC_I(struct inode *inode) +{ + return container_of(inode, struct rpc_inode, vfs_inode); +} + +enum { + SUNRPC_PIPEFS_NFS_PRIO, + SUNRPC_PIPEFS_RPC_PRIO, +}; + +extern int rpc_pipefs_notifier_register(struct notifier_block *); +extern void rpc_pipefs_notifier_unregister(struct notifier_block *); + +enum { + RPC_PIPEFS_MOUNT, + RPC_PIPEFS_UMOUNT, +}; + +extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, + const unsigned char *dir_name); +extern void rpc_pipefs_init_net(struct net *net); +extern struct super_block *rpc_get_sb_net(const struct net *net); +extern void rpc_put_sb_net(const struct net *net); + +extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, + char __user *, size_t); +extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); + +struct rpc_clnt; +extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *); +extern int rpc_remove_client_dir(struct dentry *); + +struct cache_detail; +extern struct dentry *rpc_create_cache_dir(struct dentry *, + struct qstr *, + umode_t umode, + struct cache_detail *); +extern void rpc_remove_cache_dir(struct dentry *); + +extern int rpc_rmdir(struct dentry *dentry); + +struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); +void rpc_destroy_pipe_data(struct rpc_pipe *pipe); +extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, + struct rpc_pipe *); +extern int rpc_unlink(struct dentry *); +extern int register_rpc_pipefs(void); +extern void unregister_rpc_pipefs(void); + +#endif +#endif diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h new file mode 100644 index 00000000..b78f16b1 --- /dev/null +++ b/include/linux/sunrpc/rpc_rdma.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the BSD-type + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Network Appliance, Inc. nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_SUNRPC_RPC_RDMA_H +#define _LINUX_SUNRPC_RPC_RDMA_H + +#include <linux/types.h> + +struct rpcrdma_segment { + __be32 rs_handle; /* Registered memory handle */ + __be32 rs_length; /* Length of the chunk in bytes */ + __be64 rs_offset; /* Chunk virtual address or offset */ +}; + +/* + * read chunk(s), encoded as a linked list. + */ +struct rpcrdma_read_chunk { + __be32 rc_discrim; /* 1 indicates presence */ + __be32 rc_position; /* Position in XDR stream */ + struct rpcrdma_segment rc_target; +}; + +/* + * write chunk, and reply chunk. + */ +struct rpcrdma_write_chunk { + struct rpcrdma_segment wc_target; +}; + +/* + * write chunk(s), encoded as a counted array. + */ +struct rpcrdma_write_array { + __be32 wc_discrim; /* 1 indicates presence */ + __be32 wc_nchunks; /* Array count */ + struct rpcrdma_write_chunk wc_array[0]; +}; + +struct rpcrdma_msg { + __be32 rm_xid; /* Mirrors the RPC header xid */ + __be32 rm_vers; /* Version of this protocol */ + __be32 rm_credit; /* Buffers requested/granted */ + __be32 rm_type; /* Type of message (enum rpcrdma_proc) */ + union { + + struct { /* no chunks */ + __be32 rm_empty[3]; /* 3 empty chunk lists */ + } rm_nochunks; + + struct { /* no chunks and padded */ + __be32 rm_align; /* Padding alignment */ + __be32 rm_thresh; /* Padding threshold */ + __be32 rm_pempty[3]; /* 3 empty chunk lists */ + } rm_padded; + + __be32 rm_chunks[0]; /* read, write and reply chunks */ + + } rm_body; +}; + +#define RPCRDMA_HDRLEN_MIN 28 + +enum rpcrdma_errcode { + ERR_VERS = 1, + ERR_CHUNK = 2 +}; + +struct rpcrdma_err_vers { + uint32_t rdma_vers_low; /* Version range supported by peer */ + uint32_t rdma_vers_high; +}; + +enum rpcrdma_proc { + RDMA_MSG = 0, /* An RPC call or reply msg */ + RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */ + RDMA_MSGP = 2, /* An RPC call or reply msg with padding */ + RDMA_DONE = 3, /* Client signals reply completion */ + RDMA_ERROR = 4 /* An RPC RDMA encoding error */ +}; + +#endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h new file mode 100644 index 00000000..dc0c3cc3 --- /dev/null +++ b/include/linux/sunrpc/sched.h @@ -0,0 +1,292 @@ +/* + * linux/include/linux/sunrpc/sched.h + * + * Scheduling primitives for kernel Sun RPC. + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_SCHED_H_ +#define _LINUX_SUNRPC_SCHED_H_ + +#include <linux/timer.h> +#include <linux/ktime.h> +#include <linux/sunrpc/types.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/workqueue.h> +#include <linux/sunrpc/xdr.h> + +/* + * This is the actual RPC procedure call info. + */ +struct rpc_procinfo; +struct rpc_message { + struct rpc_procinfo * rpc_proc; /* Procedure information */ + void * rpc_argp; /* Arguments */ + void * rpc_resp; /* Result */ + struct rpc_cred * rpc_cred; /* Credentials */ +}; + +struct rpc_call_ops; +struct rpc_wait_queue; +struct rpc_wait { + struct list_head list; /* wait queue links */ + struct list_head links; /* Links to related tasks */ + struct list_head timer_list; /* Timer list */ + unsigned long expires; +}; + +/* + * This is the RPC task struct + */ +struct rpc_task { + atomic_t tk_count; /* Reference count */ + struct list_head tk_task; /* global list of tasks */ + struct rpc_clnt * tk_client; /* RPC client */ + struct rpc_rqst * tk_rqstp; /* RPC request */ + + /* + * RPC call state + */ + struct rpc_message tk_msg; /* RPC call info */ + + /* + * callback to be executed after waking up + * action next procedure for async tasks + * tk_ops caller callbacks + */ + void (*tk_callback)(struct rpc_task *); + void (*tk_action)(struct rpc_task *); + const struct rpc_call_ops *tk_ops; + void * tk_calldata; + + unsigned long tk_timeout; /* timeout for rpc_sleep() */ + unsigned long tk_runstate; /* Task run status */ + struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could + * be any workqueue + */ + struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */ + union { + struct work_struct tk_work; /* Async task work queue */ + struct rpc_wait tk_wait; /* RPC wait */ + } u; + + ktime_t tk_start; /* RPC task init timestamp */ + + pid_t tk_owner; /* Process id for batching tasks */ + int tk_status; /* result of last operation */ + unsigned short tk_flags; /* misc flags */ + unsigned short tk_timeouts; /* maj timeouts */ + +#ifdef RPC_DEBUG + unsigned short tk_pid; /* debugging aid */ +#endif + unsigned char tk_priority : 2,/* Task priority */ + tk_garb_retry : 2, + tk_cred_retry : 2, + tk_rebind_retry : 2; +}; +#define tk_xprt tk_client->cl_xprt + +/* support walking a list of tasks on a wait queue */ +#define task_for_each(task, pos, head) \ + list_for_each(pos, head) \ + if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1) + +#define task_for_first(task, head) \ + if (!list_empty(head) && \ + ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) + +typedef void (*rpc_action)(struct rpc_task *); + +struct rpc_call_ops { + void (*rpc_call_prepare)(struct rpc_task *, void *); + void (*rpc_call_done)(struct rpc_task *, void *); + void (*rpc_count_stats)(struct rpc_task *, void *); + void (*rpc_release)(void *); +}; + +struct rpc_task_setup { + struct rpc_task *task; + struct rpc_clnt *rpc_client; + const struct rpc_message *rpc_message; + const struct rpc_call_ops *callback_ops; + void *callback_data; + struct workqueue_struct *workqueue; + unsigned short flags; + signed char priority; +}; + +/* + * RPC task flags + */ +#define RPC_TASK_ASYNC 0x0001 /* is an async task */ +#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ +#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ +#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ +#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ +#define RPC_TASK_KILLED 0x0100 /* task was killed */ +#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ +#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ +#define RPC_TASK_SENT 0x0800 /* message was sent */ +#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ + +#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) +#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) +#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) +#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) +#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) +#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) +#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) + +#define RPC_TASK_RUNNING 0 +#define RPC_TASK_QUEUED 1 +#define RPC_TASK_ACTIVE 2 + +#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) +#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) +#define rpc_test_and_set_running(t) \ + test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) +#define rpc_clear_running(t) \ + do { \ + smp_mb__before_clear_bit(); \ + clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ + smp_mb__after_clear_bit(); \ + } while (0) + +#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) +#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) +#define rpc_clear_queued(t) \ + do { \ + smp_mb__before_clear_bit(); \ + clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ + smp_mb__after_clear_bit(); \ + } while (0) + +#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) + +/* + * Task priorities. + * Note: if you change these, you must also change + * the task initialization definitions below. + */ +#define RPC_PRIORITY_LOW (-1) +#define RPC_PRIORITY_NORMAL (0) +#define RPC_PRIORITY_HIGH (1) +#define RPC_PRIORITY_PRIVILEGED (2) +#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) + +struct rpc_timer { + struct timer_list timer; + struct list_head list; + unsigned long expires; +}; + +/* + * RPC synchronization objects + */ +struct rpc_wait_queue { + spinlock_t lock; + struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ + pid_t owner; /* process id of last task serviced */ + unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ + unsigned char priority; /* current priority */ + unsigned char count; /* # task groups remaining serviced so far */ + unsigned char nr; /* # tasks remaining for cookie */ + unsigned short qlen; /* total # tasks waiting in queue */ + struct rpc_timer timer_list; +#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) + const char * name; +#endif +}; + +/* + * This is the # requests to send consecutively + * from a single cookie. The aim is to improve + * performance of NFS operations such as read/write. + */ +#define RPC_BATCH_COUNT 16 +#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) + +/* + * Function prototypes + */ +struct rpc_task *rpc_new_task(const struct rpc_task_setup *); +struct rpc_task *rpc_run_task(const struct rpc_task_setup *); +struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, + const struct rpc_call_ops *ops); +void rpc_put_task(struct rpc_task *); +void rpc_put_task_async(struct rpc_task *); +void rpc_exit_task(struct rpc_task *); +void rpc_exit(struct rpc_task *, int); +void rpc_release_calldata(const struct rpc_call_ops *, void *); +void rpc_killall_tasks(struct rpc_clnt *); +void rpc_execute(struct rpc_task *); +void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); +void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); +void rpc_destroy_wait_queue(struct rpc_wait_queue *); +void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, + rpc_action action); +void rpc_sleep_on_priority(struct rpc_wait_queue *, + struct rpc_task *, + rpc_action action, + int priority); +void rpc_wake_up_queued_task(struct rpc_wait_queue *, + struct rpc_task *); +void rpc_wake_up(struct rpc_wait_queue *); +struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); +struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, + bool (*)(struct rpc_task *, void *), + void *); +void rpc_wake_up_status(struct rpc_wait_queue *, int); +int rpc_queue_empty(struct rpc_wait_queue *); +void rpc_delay(struct rpc_task *, unsigned long); +void * rpc_malloc(struct rpc_task *, size_t); +void rpc_free(void *); +int rpciod_up(void); +void rpciod_down(void); +int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *)); +#ifdef RPC_DEBUG +struct net; +void rpc_show_tasks(struct net *); +#endif +int rpc_init_mempool(void); +void rpc_destroy_mempool(void); +extern struct workqueue_struct *rpciod_workqueue; +void rpc_prepare_task(struct rpc_task *task); + +static inline int rpc_wait_for_completion_task(struct rpc_task *task) +{ + return __rpc_wait_for_completion_task(task, NULL); +} + +static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio) +{ + task->tk_priority = prio - RPC_PRIORITY_LOW; +} + +static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio) +{ + return (task->tk_priority + RPC_PRIORITY_LOW == prio); +} + +#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) +static inline const char * rpc_qname(const struct rpc_wait_queue *q) +{ + return ((q && q->name) ? q->name : "unknown"); +} + +static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, + const char *name) +{ + q->name = name; +} +#else +static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, + const char *name) +{ +} +#endif + +#endif /* _LINUX_SUNRPC_SCHED_H_ */ diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h new file mode 100644 index 00000000..edc64219 --- /dev/null +++ b/include/linux/sunrpc/stats.h @@ -0,0 +1,84 @@ +/* + * linux/include/linux/sunrpc/stats.h + * + * Client statistics collection for SUN RPC + * + * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_STATS_H +#define _LINUX_SUNRPC_STATS_H + +#include <linux/proc_fs.h> + +struct rpc_stat { + const struct rpc_program *program; + + unsigned int netcnt, + netudpcnt, + nettcpcnt, + nettcpconn, + netreconn; + unsigned int rpccnt, + rpcretrans, + rpcauthrefresh, + rpcgarbage; +}; + +struct svc_stat { + struct svc_program * program; + + unsigned int netcnt, + netudpcnt, + nettcpcnt, + nettcpconn; + unsigned int rpccnt, + rpcbadfmt, + rpcbadauth, + rpcbadclnt; +}; + +struct net; +#ifdef CONFIG_PROC_FS +int rpc_proc_init(struct net *); +void rpc_proc_exit(struct net *); +#else +static inline int rpc_proc_init(struct net *net) +{ + return 0; +} + +static inline void rpc_proc_exit(struct net *net) +{ +} +#endif + +#ifdef MODULE +void rpc_modcount(struct inode *, int); +#endif + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *); +void rpc_proc_unregister(struct net *,const char *); +void rpc_proc_zero(const struct rpc_program *); +struct proc_dir_entry * svc_proc_register(struct net *, struct svc_stat *, + const struct file_operations *); +void svc_proc_unregister(struct net *, const char *); + +void svc_seq_show(struct seq_file *, + const struct svc_stat *); +#else + +static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; } +static inline void rpc_proc_unregister(struct net *net, const char *p) {} +static inline void rpc_proc_zero(const struct rpc_program *p) {} + +static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s, + const struct file_operations *f) { return NULL; } +static inline void svc_proc_unregister(struct net *net, const char *p) {} + +static inline void svc_seq_show(struct seq_file *seq, + const struct svc_stat *st) {} +#endif + +#endif /* _LINUX_SUNRPC_STATS_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h new file mode 100644 index 00000000..2b43e021 --- /dev/null +++ b/include/linux/sunrpc/svc.h @@ -0,0 +1,461 @@ +/* + * linux/include/linux/sunrpc/svc.h + * + * RPC server declarations. + * + * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> + */ + + +#ifndef SUNRPC_SVC_H +#define SUNRPC_SVC_H + +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/sunrpc/types.h> +#include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/auth.h> +#include <linux/sunrpc/svcauth.h> +#include <linux/wait.h> +#include <linux/mm.h> + +/* + * This is the RPC server thread function prototype + */ +typedef int (*svc_thread_fn)(void *); + +/* statistics for svc_pool structures */ +struct svc_pool_stats { + unsigned long packets; + unsigned long sockets_queued; + unsigned long threads_woken; + unsigned long threads_timedout; +}; + +/* + * + * RPC service thread pool. + * + * Pool of threads and temporary sockets. Generally there is only + * a single one of these per RPC service, but on NUMA machines those + * services that can benefit from it (i.e. nfs but not lockd) will + * have one pool per NUMA node. This optimisation reduces cross- + * node traffic on multi-node NUMA NFS servers. + */ +struct svc_pool { + unsigned int sp_id; /* pool id; also node id on NUMA */ + spinlock_t sp_lock; /* protects all fields */ + struct list_head sp_threads; /* idle server threads */ + struct list_head sp_sockets; /* pending sockets */ + unsigned int sp_nrthreads; /* # of threads in pool */ + struct list_head sp_all_threads; /* all server threads */ + struct svc_pool_stats sp_stats; /* statistics on pool operation */ +} ____cacheline_aligned_in_smp; + +/* + * RPC service. + * + * An RPC service is a ``daemon,'' possibly multithreaded, which + * receives and processes incoming RPC messages. + * It has one or more transport sockets associated with it, and maintains + * a list of idle threads waiting for input. + * + * We currently do not support more than one RPC program per daemon. + */ +struct svc_serv { + struct svc_program * sv_program; /* RPC program */ + struct svc_stat * sv_stats; /* RPC statistics */ + spinlock_t sv_lock; + unsigned int sv_nrthreads; /* # of server threads */ + unsigned int sv_maxconn; /* max connections allowed or + * '0' causing max to be based + * on number of threads. */ + + unsigned int sv_max_payload; /* datagram payload size */ + unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ + unsigned int sv_xdrsize; /* XDR buffer size */ + struct list_head sv_permsocks; /* all permanent sockets */ + struct list_head sv_tempsocks; /* all temporary sockets */ + int sv_tmpcnt; /* count of temporary sockets */ + struct timer_list sv_temptimer; /* timer for aging temporary sockets */ + + char * sv_name; /* service name */ + + unsigned int sv_nrpools; /* number of thread pools */ + struct svc_pool * sv_pools; /* array of thread pools */ + + void (*sv_shutdown)(struct svc_serv *serv, + struct net *net); + /* Callback to use when last thread + * exits. + */ + + struct module * sv_module; /* optional module to count when + * adding threads */ + svc_thread_fn sv_function; /* main function for threads */ +#if defined(CONFIG_SUNRPC_BACKCHANNEL) + struct list_head sv_cb_list; /* queue for callback requests + * that arrive over the same + * connection */ + spinlock_t sv_cb_lock; /* protects the svc_cb_list */ + wait_queue_head_t sv_cb_waitq; /* sleep here if there are no + * entries in the svc_cb_list */ + struct svc_xprt *sv_bc_xprt; /* callback on fore channel */ +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ +}; + +/* + * We use sv_nrthreads as a reference count. svc_destroy() drops + * this refcount, so we need to bump it up around operations that + * change the number of threads. Horrible, but there it is. + * Should be called with the BKL held. + */ +static inline void svc_get(struct svc_serv *serv) +{ + serv->sv_nrthreads++; +} + +/* + * Maximum payload size supported by a kernel RPC server. + * This is use to determine the max number of pages nfsd is + * willing to return in a single READ operation. + * + * These happen to all be powers of 2, which is not strictly + * necessary but helps enforce the real limitation, which is + * that they should be multiples of PAGE_CACHE_SIZE. + * + * For UDP transports, a block plus NFS,RPC, and UDP headers + * has to fit into the IP datagram limit of 64K. The largest + * feasible number for all known page sizes is probably 48K, + * but we choose 32K here. This is the same as the historical + * Linux limit; someone who cares more about NFS/UDP performance + * can test a larger number. + * + * For TCP transports we have more freedom. A size of 1MB is + * chosen to match the client limit. Other OSes are known to + * have larger limits, but those numbers are probably beyond + * the point of diminishing returns. + */ +#define RPCSVC_MAXPAYLOAD (1*1024*1024u) +#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD +#define RPCSVC_MAXPAYLOAD_UDP (32*1024u) + +extern u32 svc_max_payload(const struct svc_rqst *rqstp); + +/* + * RPC Requsts and replies are stored in one or more pages. + * We maintain an array of pages for each server thread. + * Requests are copied into these pages as they arrive. Remaining + * pages are available to write the reply into. + * + * Pages are sent using ->sendpage so each server thread needs to + * allocate more to replace those used in sending. To help keep track + * of these pages we have a receive list where all pages initialy live, + * and a send list where pages are moved to when there are to be part + * of a reply. + * + * We use xdr_buf for holding responses as it fits well with NFS + * read responses (that have a header, and some data pages, and possibly + * a tail) and means we can share some client side routines. + * + * The xdr_buf.head kvec always points to the first page in the rq_*pages + * list. The xdr_buf.pages pointer points to the second page on that + * list. xdr_buf.tail points to the end of the first page. + * This assumes that the non-page part of an rpc reply will fit + * in a page - NFSd ensures this. lockd also has no trouble. + * + * Each request/reply pair can have at most one "payload", plus two pages, + * one for the request, and one for the reply. + * We using ->sendfile to return read data, we might need one extra page + * if the request is not page-aligned. So add another '1'. + */ +#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \ + + 2 + 1) + +static inline u32 svc_getnl(struct kvec *iov) +{ + __be32 val, *vp; + vp = iov->iov_base; + val = *vp++; + iov->iov_base = (void*)vp; + iov->iov_len -= sizeof(__be32); + return ntohl(val); +} + +static inline void svc_putnl(struct kvec *iov, u32 val) +{ + __be32 *vp = iov->iov_base + iov->iov_len; + *vp = htonl(val); + iov->iov_len += sizeof(__be32); +} + +static inline __be32 svc_getu32(struct kvec *iov) +{ + __be32 val, *vp; + vp = iov->iov_base; + val = *vp++; + iov->iov_base = (void*)vp; + iov->iov_len -= sizeof(__be32); + return val; +} + +static inline void svc_ungetu32(struct kvec *iov) +{ + __be32 *vp = (__be32 *)iov->iov_base; + iov->iov_base = (void *)(vp - 1); + iov->iov_len += sizeof(*vp); +} + +static inline void svc_putu32(struct kvec *iov, __be32 val) +{ + __be32 *vp = iov->iov_base + iov->iov_len; + *vp = val; + iov->iov_len += sizeof(__be32); +} + +/* + * The context of a single thread, including the request currently being + * processed. + */ +struct svc_rqst { + struct list_head rq_list; /* idle list */ + struct list_head rq_all; /* all threads list */ + struct svc_xprt * rq_xprt; /* transport ptr */ + + struct sockaddr_storage rq_addr; /* peer address */ + size_t rq_addrlen; + struct sockaddr_storage rq_daddr; /* dest addr of request + * - reply from here */ + size_t rq_daddrlen; + + struct svc_serv * rq_server; /* RPC service definition */ + struct svc_pool * rq_pool; /* thread pool */ + struct svc_procedure * rq_procinfo; /* procedure info */ + struct auth_ops * rq_authop; /* authentication flavour */ + u32 rq_flavor; /* pseudoflavor */ + struct svc_cred rq_cred; /* auth info */ + void * rq_xprt_ctxt; /* transport specific context ptr */ + struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ + int rq_usedeferral; /* use deferral */ + + size_t rq_xprt_hlen; /* xprt header len */ + struct xdr_buf rq_arg; + struct xdr_buf rq_res; + struct page * rq_pages[RPCSVC_MAXPAGES]; + struct page * *rq_respages; /* points into rq_pages */ + int rq_resused; /* number of pages used for result */ + + struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ + + __be32 rq_xid; /* transmission id */ + u32 rq_prog; /* program number */ + u32 rq_vers; /* program version */ + u32 rq_proc; /* procedure number */ + u32 rq_prot; /* IP protocol */ + unsigned short + rq_secure : 1; /* secure port */ + + void * rq_argp; /* decoded arguments */ + void * rq_resp; /* xdr'd results */ + void * rq_auth_data; /* flavor-specific data */ + + int rq_reserved; /* space on socket outq + * reserved for this request + */ + + struct cache_req rq_chandle; /* handle passed to caches for + * request delaying + */ + bool rq_dropme; + /* Catering to nfsd */ + struct auth_domain * rq_client; /* RPC peer info */ + struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ + int rq_cachetype; + struct svc_cacherep * rq_cacherep; /* cache info */ + int rq_splice_ok; /* turned off in gss privacy + * to prevent encrypting page + * cache pages */ + wait_queue_head_t rq_wait; /* synchronization */ + struct task_struct *rq_task; /* service thread */ +}; + +/* + * Rigorous type checking on sockaddr type conversions + */ +static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in *) &rqst->rq_addr; +} + +static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in6 *) &rqst->rq_addr; +} + +static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst) +{ + return (struct sockaddr *) &rqst->rq_addr; +} + +static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in *) &rqst->rq_daddr; +} + +static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst) +{ + return (struct sockaddr_in6 *) &rqst->rq_daddr; +} + +static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst) +{ + return (struct sockaddr *) &rqst->rq_daddr; +} + +/* + * Check buffer bounds after decoding arguments + */ +static inline int +xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) +{ + char *cp = (char *)p; + struct kvec *vec = &rqstp->rq_arg.head[0]; + return cp >= (char*)vec->iov_base + && cp <= (char*)vec->iov_base + vec->iov_len; +} + +static inline int +xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p) +{ + struct kvec *vec = &rqstp->rq_res.head[0]; + char *cp = (char*)p; + + vec->iov_len = cp - (char*)vec->iov_base; + + return vec->iov_len <= PAGE_SIZE; +} + +static inline void svc_free_res_pages(struct svc_rqst *rqstp) +{ + while (rqstp->rq_resused) { + struct page **pp = (rqstp->rq_respages + + --rqstp->rq_resused); + if (*pp) { + put_page(*pp); + *pp = NULL; + } + } +} + +struct svc_deferred_req { + u32 prot; /* protocol (UDP or TCP) */ + struct svc_xprt *xprt; + struct sockaddr_storage addr; /* where reply must go */ + size_t addrlen; + struct sockaddr_storage daddr; /* where reply must come from */ + size_t daddrlen; + struct cache_deferred_req handle; + size_t xprt_hlen; + int argslen; + __be32 args[0]; +}; + +/* + * List of RPC programs on the same transport endpoint + */ +struct svc_program { + struct svc_program * pg_next; /* other programs (same xprt) */ + u32 pg_prog; /* program number */ + unsigned int pg_lovers; /* lowest version */ + unsigned int pg_hivers; /* lowest version */ + unsigned int pg_nvers; /* number of versions */ + struct svc_version ** pg_vers; /* version array */ + char * pg_name; /* service name */ + char * pg_class; /* class name: services sharing authentication */ + struct svc_stat * pg_stats; /* rpc statistics */ + int (*pg_authenticate)(struct svc_rqst *); +}; + +/* + * RPC program version + */ +struct svc_version { + u32 vs_vers; /* version number */ + u32 vs_nproc; /* number of procedures */ + struct svc_procedure * vs_proc; /* per-procedure info */ + u32 vs_xdrsize; /* xdrsize needed for this version */ + + unsigned int vs_hidden : 1; /* Don't register with portmapper. + * Only used for nfsacl so far. */ + + /* Override dispatch function (e.g. when caching replies). + * A return value of 0 means drop the request. + * vs_dispatch == NULL means use default dispatcher. + */ + int (*vs_dispatch)(struct svc_rqst *, __be32 *); +}; + +/* + * RPC procedure info + */ +typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp); +struct svc_procedure { + svc_procfunc pc_func; /* process the request */ + kxdrproc_t pc_decode; /* XDR decode args */ + kxdrproc_t pc_encode; /* XDR encode result */ + kxdrproc_t pc_release; /* XDR free result */ + unsigned int pc_argsize; /* argument struct size */ + unsigned int pc_ressize; /* result struct size */ + unsigned int pc_count; /* call count */ + unsigned int pc_cachetype; /* cache info (NFS) */ + unsigned int pc_xdrressize; /* maximum size of XDR reply */ +}; + +/* + * Function prototypes. + */ +int svc_rpcb_setup(struct svc_serv *serv, struct net *net); +void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); +int svc_bind(struct svc_serv *serv, struct net *net); +struct svc_serv *svc_create(struct svc_program *, unsigned int, + void (*shutdown)(struct svc_serv *, struct net *net)); +struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, + struct svc_pool *pool, int node); +void svc_exit_thread(struct svc_rqst *); +struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, + void (*shutdown)(struct svc_serv *, struct net *net), + svc_thread_fn, struct module *); +int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); +int svc_pool_stats_open(struct svc_serv *serv, struct file *file); +void svc_destroy(struct svc_serv *); +void svc_shutdown_net(struct svc_serv *, struct net *); +int svc_process(struct svc_rqst *); +int bc_svc_process(struct svc_serv *, struct rpc_rqst *, + struct svc_rqst *); +int svc_register(const struct svc_serv *, struct net *, const int, + const unsigned short, const unsigned short); + +void svc_wake_up(struct svc_serv *); +void svc_reserve(struct svc_rqst *rqstp, int space); +struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); +char * svc_print_addr(struct svc_rqst *, char *, size_t); + +#define RPC_MAX_ADDRBUFLEN (63U) + +/* + * When we want to reduce the size of the reserved space in the response + * buffer, we need to take into account the size of any checksum data that + * may be at the end of the packet. This is difficult to determine exactly + * for all cases without actually generating the checksum, so we just use a + * static value. + */ +static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) +{ + int added_space = 0; + + if (rqstp->rq_authop->flavour) + added_space = RPC_MAX_AUTH_SIZE; + svc_reserve(rqstp, space + added_space); +} + +#endif /* SUNRPC_SVC_H */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h new file mode 100644 index 00000000..0b8e3e6b --- /dev/null +++ b/include/linux/sunrpc/svc_rdma.h @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the BSD-type + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Network Appliance, Inc. nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Tom Tucker <tom@opengridcomputing.com> + */ + +#ifndef SVC_RDMA_H +#define SVC_RDMA_H +#include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/svcsock.h> +#include <linux/sunrpc/rpc_rdma.h> +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> +#define SVCRDMA_DEBUG + +/* RPC/RDMA parameters and stats */ +extern unsigned int svcrdma_ord; +extern unsigned int svcrdma_max_requests; +extern unsigned int svcrdma_max_req_size; + +extern atomic_t rdma_stat_recv; +extern atomic_t rdma_stat_read; +extern atomic_t rdma_stat_write; +extern atomic_t rdma_stat_sq_starve; +extern atomic_t rdma_stat_rq_starve; +extern atomic_t rdma_stat_rq_poll; +extern atomic_t rdma_stat_rq_prod; +extern atomic_t rdma_stat_sq_poll; +extern atomic_t rdma_stat_sq_prod; + +#define RPCRDMA_VERSION 1 + +/* + * Contexts are built when an RDMA request is created and are a + * record of the resources that can be recovered when the request + * completes. + */ +struct svc_rdma_op_ctxt { + struct svc_rdma_op_ctxt *read_hdr; + struct svc_rdma_fastreg_mr *frmr; + int hdr_count; + struct xdr_buf arg; + struct list_head dto_q; + enum ib_wr_opcode wr_op; + enum ib_wc_status wc_status; + u32 byte_len; + struct svcxprt_rdma *xprt; + unsigned long flags; + enum dma_data_direction direction; + int count; + struct ib_sge sge[RPCSVC_MAXPAGES]; + struct page *pages[RPCSVC_MAXPAGES]; +}; + +/* + * NFS_ requests are mapped on the client side by the chunk lists in + * the RPCRDMA header. During the fetching of the RPC from the client + * and the writing of the reply to the client, the memory in the + * client and the memory in the server must be mapped as contiguous + * vaddr/len for access by the hardware. These data strucures keep + * these mappings. + * + * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the + * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the + * 'ch' field maps the read-list of the RPCRDMA header to the 'sge' + * mapping of the reply. + */ +struct svc_rdma_chunk_sge { + int start; /* sge no for this chunk */ + int count; /* sge count for this chunk */ +}; +struct svc_rdma_fastreg_mr { + struct ib_mr *mr; + void *kva; + struct ib_fast_reg_page_list *page_list; + int page_list_len; + unsigned long access_flags; + unsigned long map_len; + enum dma_data_direction direction; + struct list_head frmr_list; +}; +struct svc_rdma_req_map { + struct svc_rdma_fastreg_mr *frmr; + unsigned long count; + union { + struct kvec sge[RPCSVC_MAXPAGES]; + struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES]; + }; +}; +#define RDMACTXT_F_FAST_UNREG 1 +#define RDMACTXT_F_LAST_CTXT 2 + +#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */ +#define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */ + +struct svcxprt_rdma { + struct svc_xprt sc_xprt; /* SVC transport structure */ + struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ + struct list_head sc_accept_q; /* Conn. waiting accept */ + int sc_ord; /* RDMA read limit */ + int sc_max_sge; + + int sc_sq_depth; /* Depth of SQ */ + atomic_t sc_sq_count; /* Number of SQ WR on queue */ + + int sc_max_requests; /* Depth of RQ */ + int sc_max_req_size; /* Size of each RQ WR buf */ + + struct ib_pd *sc_pd; + + atomic_t sc_dma_used; + atomic_t sc_ctxt_used; + struct list_head sc_rq_dto_q; + spinlock_t sc_rq_dto_lock; + struct ib_qp *sc_qp; + struct ib_cq *sc_rq_cq; + struct ib_cq *sc_sq_cq; + struct ib_mr *sc_phys_mr; /* MR for server memory */ + u32 sc_dev_caps; /* distilled device caps */ + u32 sc_dma_lkey; /* local dma key */ + unsigned int sc_frmr_pg_list_len; + struct list_head sc_frmr_q; + spinlock_t sc_frmr_q_lock; + + spinlock_t sc_lock; /* transport lock */ + + wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ + unsigned long sc_flags; + struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ + struct list_head sc_read_complete_q; + struct work_struct sc_work; +}; +/* sc_flags */ +#define RDMAXPRT_RQ_PENDING 1 +#define RDMAXPRT_SQ_PENDING 2 +#define RDMAXPRT_CONN_PENDING 3 + +#define RPCRDMA_LISTEN_BACKLOG 10 +/* The default ORD value is based on two outstanding full-size writes with a + * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ +#define RPCRDMA_ORD (64/4) +#define RPCRDMA_SQ_DEPTH_MULT 8 +#define RPCRDMA_MAX_THREADS 16 +#define RPCRDMA_MAX_REQUESTS 16 +#define RPCRDMA_MAX_REQ_SIZE 4096 + +/* svc_rdma_marshal.c */ +extern void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *, + int *, int *); +extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); +extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *); +extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, + struct rpcrdma_msg *, + enum rpcrdma_errcode, u32 *); +extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); +extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); +extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, + __be32, __be64, u32); +extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, + struct rpcrdma_msg *, + struct rpcrdma_msg *, + enum rpcrdma_proc); +extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *); + +/* svc_rdma_recvfrom.c */ +extern int svc_rdma_recvfrom(struct svc_rqst *); + +/* svc_rdma_sendto.c */ +extern int svc_rdma_sendto(struct svc_rqst *); + +/* svc_rdma_transport.c */ +extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); +extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, + enum rpcrdma_errcode); +struct page *svc_rdma_get_page(void); +extern int svc_rdma_post_recv(struct svcxprt_rdma *); +extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); +extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); +extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); +extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); +extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); +extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); +extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *); +extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); +extern void svc_rdma_put_frmr(struct svcxprt_rdma *, + struct svc_rdma_fastreg_mr *); +extern void svc_sq_reap(struct svcxprt_rdma *); +extern void svc_rq_reap(struct svcxprt_rdma *); +extern struct svc_xprt_class svc_rdma_class; +extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); + +/* svc_rdma.c */ +extern int svc_rdma_init(void); +extern void svc_rdma_cleanup(void); + +/* + * Returns the address of the first read chunk or <nul> if no read chunk is + * present + */ +static inline struct rpcrdma_read_chunk * +svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp) +{ + struct rpcrdma_read_chunk *ch = + (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; + + if (ch->rc_discrim == 0) + return NULL; + + return ch; +} + +/* + * Returns the address of the first read write array element or <nul> if no + * write array list is present + */ +static inline struct rpcrdma_write_array * +svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp) +{ + if (rmsgp->rm_body.rm_chunks[0] != 0 + || rmsgp->rm_body.rm_chunks[1] == 0) + return NULL; + + return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1]; +} + +/* + * Returns the address of the first reply array element or <nul> if no + * reply array is present + */ +static inline struct rpcrdma_write_array * +svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp) +{ + struct rpcrdma_read_chunk *rch; + struct rpcrdma_write_array *wr_ary; + struct rpcrdma_write_array *rp_ary; + + /* XXX: Need to fix when reply list may occur with read-list and/or + * write list */ + if (rmsgp->rm_body.rm_chunks[0] != 0 || + rmsgp->rm_body.rm_chunks[1] != 0) + return NULL; + + rch = svc_rdma_get_read_chunk(rmsgp); + if (rch) { + while (rch->rc_discrim) + rch++; + + /* The reply list follows an empty write array located + * at 'rc_position' here. The reply array is at rc_target. + */ + rp_ary = (struct rpcrdma_write_array *)&rch->rc_target; + + goto found_it; + } + + wr_ary = svc_rdma_get_write_array(rmsgp); + if (wr_ary) { + rp_ary = (struct rpcrdma_write_array *) + &wr_ary-> + wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length; + + goto found_it; + } + + /* No read list, no write list */ + rp_ary = (struct rpcrdma_write_array *) + &rmsgp->rm_body.rm_chunks[2]; + + found_it: + if (rp_ary->wc_discrim == 0) + return NULL; + + return rp_ary; +} +#endif diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h new file mode 100644 index 00000000..b3f64b12 --- /dev/null +++ b/include/linux/sunrpc/svc_xprt.h @@ -0,0 +1,208 @@ +/* + * linux/include/linux/sunrpc/svc_xprt.h + * + * RPC server transport I/O + */ + +#ifndef SUNRPC_SVC_XPRT_H +#define SUNRPC_SVC_XPRT_H + +#include <linux/sunrpc/svc.h> + +struct module; + +struct svc_xprt_ops { + struct svc_xprt *(*xpo_create)(struct svc_serv *, + struct net *net, + struct sockaddr *, int, + int); + struct svc_xprt *(*xpo_accept)(struct svc_xprt *); + int (*xpo_has_wspace)(struct svc_xprt *); + int (*xpo_recvfrom)(struct svc_rqst *); + void (*xpo_prep_reply_hdr)(struct svc_rqst *); + int (*xpo_sendto)(struct svc_rqst *); + void (*xpo_release_rqst)(struct svc_rqst *); + void (*xpo_detach)(struct svc_xprt *); + void (*xpo_free)(struct svc_xprt *); +}; + +struct svc_xprt_class { + const char *xcl_name; + struct module *xcl_owner; + struct svc_xprt_ops *xcl_ops; + struct list_head xcl_list; + u32 xcl_max_payload; +}; + +/* + * This is embedded in an object that wants a callback before deleting + * an xprt; intended for use by NFSv4.1, which needs to know when a + * client's tcp connection (and hence possibly a backchannel) goes away. + */ +struct svc_xpt_user { + struct list_head list; + void (*callback)(struct svc_xpt_user *); +}; + +struct svc_xprt { + struct svc_xprt_class *xpt_class; + struct svc_xprt_ops *xpt_ops; + struct kref xpt_ref; + struct list_head xpt_list; + struct list_head xpt_ready; + unsigned long xpt_flags; +#define XPT_BUSY 0 /* enqueued/receiving */ +#define XPT_CONN 1 /* conn pending */ +#define XPT_CLOSE 2 /* dead or dying */ +#define XPT_DATA 3 /* data pending */ +#define XPT_TEMP 4 /* connected transport */ +#define XPT_DEAD 6 /* transport closed */ +#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ +#define XPT_DEFERRED 8 /* deferred request pending */ +#define XPT_OLD 9 /* used for xprt aging mark+sweep */ +#define XPT_DETACHED 10 /* detached from tempsocks list */ +#define XPT_LISTENER 11 /* listening endpoint */ +#define XPT_CACHE_AUTH 12 /* cache auth info */ + + struct svc_serv *xpt_server; /* service for transport */ + atomic_t xpt_reserved; /* space on outq that is rsvd */ + struct mutex xpt_mutex; /* to serialize sending data */ + spinlock_t xpt_lock; /* protects sk_deferred + * and xpt_auth_cache */ + void *xpt_auth_cache;/* auth cache */ + struct list_head xpt_deferred; /* deferred requests that need + * to be revisted */ + struct sockaddr_storage xpt_local; /* local address */ + size_t xpt_locallen; /* length of address */ + struct sockaddr_storage xpt_remote; /* remote peer's address */ + size_t xpt_remotelen; /* length of address */ + struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ + struct list_head xpt_users; /* callbacks on free */ + + struct net *xpt_net; + struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ +}; + +static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) +{ + spin_lock(&xpt->xpt_lock); + list_del_init(&u->list); + spin_unlock(&xpt->xpt_lock); +} + +static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) +{ + spin_lock(&xpt->xpt_lock); + if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) { + /* + * The connection is about to be deleted soon (or, + * worse, may already be deleted--in which case we've + * already notified the xpt_users). + */ + spin_unlock(&xpt->xpt_lock); + return -ENOTCONN; + } + list_add(&u->list, &xpt->xpt_users); + spin_unlock(&xpt->xpt_lock); + return 0; +} + +int svc_reg_xprt_class(struct svc_xprt_class *); +void svc_unreg_xprt_class(struct svc_xprt_class *); +void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, + struct svc_serv *); +int svc_create_xprt(struct svc_serv *, const char *, struct net *, + const int, const unsigned short, int); +void svc_xprt_enqueue(struct svc_xprt *xprt); +void svc_xprt_received(struct svc_xprt *); +void svc_xprt_put(struct svc_xprt *xprt); +void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); +void svc_close_xprt(struct svc_xprt *xprt); +int svc_port_is_privileged(struct sockaddr *sin); +int svc_print_xprts(char *buf, int maxlen); +struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, + struct net *net, const sa_family_t af, + const unsigned short port); +int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); + +static inline void svc_xprt_get(struct svc_xprt *xprt) +{ + kref_get(&xprt->xpt_ref); +} +static inline void svc_xprt_set_local(struct svc_xprt *xprt, + const struct sockaddr *sa, + const size_t salen) +{ + memcpy(&xprt->xpt_local, sa, salen); + xprt->xpt_locallen = salen; +} +static inline void svc_xprt_set_remote(struct svc_xprt *xprt, + const struct sockaddr *sa, + const size_t salen) +{ + memcpy(&xprt->xpt_remote, sa, salen); + xprt->xpt_remotelen = salen; +} +static inline unsigned short svc_addr_port(const struct sockaddr *sa) +{ + const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa; + + switch (sa->sa_family) { + case AF_INET: + return ntohs(sin->sin_port); + case AF_INET6: + return ntohs(sin6->sin6_port); + } + + return 0; +} + +static inline size_t svc_addr_len(const struct sockaddr *sa) +{ + switch (sa->sa_family) { + case AF_INET: + return sizeof(struct sockaddr_in); + case AF_INET6: + return sizeof(struct sockaddr_in6); + } + + return 0; +} + +static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt) +{ + return svc_addr_port((const struct sockaddr *)&xprt->xpt_local); +} + +static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt) +{ + return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote); +} + +static inline char *__svc_print_addr(const struct sockaddr *addr, + char *buf, const size_t len) +{ + const struct sockaddr_in *sin = (const struct sockaddr_in *)addr; + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr; + + switch (addr->sa_family) { + case AF_INET: + snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr, + ntohs(sin->sin_port)); + break; + + case AF_INET6: + snprintf(buf, len, "%pI6, port=%u", + &sin6->sin6_addr, + ntohs(sin6->sin6_port)); + break; + + default: + snprintf(buf, len, "unknown address type: %d", addr->sa_family); + break; + } + + return buf; +} +#endif /* SUNRPC_SVC_XPRT_H */ diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h new file mode 100644 index 00000000..548790e9 --- /dev/null +++ b/include/linux/sunrpc/svcauth.h @@ -0,0 +1,180 @@ +/* + * linux/include/linux/sunrpc/svcauth.h + * + * RPC server-side authentication stuff. + * + * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_SVCAUTH_H_ +#define _LINUX_SUNRPC_SVCAUTH_H_ + +#ifdef __KERNEL__ + +#include <linux/string.h> +#include <linux/sunrpc/msg_prot.h> +#include <linux/sunrpc/cache.h> +#include <linux/hash.h> + +#define SVC_CRED_NGROUPS 32 +struct svc_cred { + uid_t cr_uid; + gid_t cr_gid; + struct group_info *cr_group_info; +}; + +struct svc_rqst; /* forward decl */ +struct in6_addr; + +/* Authentication is done in the context of a domain. + * + * Currently, the nfs server uses the auth_domain to stand + * for the "client" listed in /etc/exports. + * + * More generally, a domain might represent a group of clients using + * a common mechanism for authentication and having a common mapping + * between local identity (uid) and network identity. All clients + * in a domain have similar general access rights. Each domain can + * contain multiple principals which will have different specific right + * based on normal Discretionary Access Control. + * + * A domain is created by an authentication flavour module based on name + * only. Userspace then fills in detail on demand. + * + * In the case of auth_unix and auth_null, the auth_domain is also + * associated with entries in another cache representing the mapping + * of ip addresses to the given client. + */ +struct auth_domain { + struct kref ref; + struct hlist_node hash; + char *name; + struct auth_ops *flavour; +}; + +/* + * Each authentication flavour registers an auth_ops + * structure. + * name is simply the name. + * flavour gives the auth flavour. It determines where the flavour is registered + * accept() is given a request and should verify it. + * It should inspect the authenticator and verifier, and possibly the data. + * If there is a problem with the authentication *authp should be set. + * The return value of accept() can indicate: + * OK - authorised. client and credential are set in rqstp. + * reqbuf points to arguments + * resbuf points to good place for results. verfier + * is (probably) already in place. Certainly space is + * reserved for it. + * DROP - simply drop the request. It may have been deferred + * GARBAGE - rpc garbage_args error + * SYSERR - rpc system_err error + * DENIED - authp holds reason for denial. + * COMPLETE - the reply is encoded already and ready to be sent; no + * further processing is necessary. (This is used for processing + * null procedure calls which are used to set up encryption + * contexts.) + * + * accept is passed the proc number so that it can accept NULL rpc requests + * even if it cannot authenticate the client (as is sometimes appropriate). + * + * release() is given a request after the procedure has been run. + * It should sign/encrypt the results if needed + * It should return: + * OK - the resbuf is ready to be sent + * DROP - the reply should be quitely dropped + * DENIED - authp holds a reason for MSG_DENIED + * SYSERR - rpc system_err + * + * domain_release() + * This call releases a domain. + * set_client() + * Givens a pending request (struct svc_rqst), finds and assigns + * an appropriate 'auth_domain' as the client. + */ +struct auth_ops { + char * name; + struct module *owner; + int flavour; + int (*accept)(struct svc_rqst *rq, __be32 *authp); + int (*release)(struct svc_rqst *rq); + void (*domain_release)(struct auth_domain *); + int (*set_client)(struct svc_rqst *rq); +}; + +#define SVC_GARBAGE 1 +#define SVC_SYSERR 2 +#define SVC_VALID 3 +#define SVC_NEGATIVE 4 +#define SVC_OK 5 +#define SVC_DROP 6 +#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely + * lost so if there is a tcp connection, it + * should be closed + */ +#define SVC_DENIED 8 +#define SVC_PENDING 9 +#define SVC_COMPLETE 10 + +struct svc_xprt; + +extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); +extern int svc_authorise(struct svc_rqst *rqstp); +extern int svc_set_client(struct svc_rqst *rqstp); +extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); +extern void svc_auth_unregister(rpc_authflavor_t flavor); + +extern struct auth_domain *unix_domain_find(char *name); +extern void auth_domain_put(struct auth_domain *item); +extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom); +extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); +extern struct auth_domain *auth_domain_find(char *name); +extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr); +extern int auth_unix_forget_old(struct auth_domain *dom); +extern void svcauth_unix_purge(void); +extern void svcauth_unix_info_release(struct svc_xprt *xpt); +extern int svcauth_unix_set_client(struct svc_rqst *rqstp); + +extern int unix_gid_cache_create(struct net *net); +extern void unix_gid_cache_destroy(struct net *net); + +static inline unsigned long hash_str(char *name, int bits) +{ + unsigned long hash = 0; + unsigned long l = 0; + int len = 0; + unsigned char c; + do { + if (unlikely(!(c = *name++))) { + c = (char)len; len = -1; + } + l = (l << 8) | c; + len++; + if ((len & (BITS_PER_LONG/8-1))==0) + hash = hash_long(hash^l, BITS_PER_LONG); + } while (len); + return hash >> (BITS_PER_LONG - bits); +} + +static inline unsigned long hash_mem(char *buf, int length, int bits) +{ + unsigned long hash = 0; + unsigned long l = 0; + int len = 0; + unsigned char c; + do { + if (len == length) { + c = (char)len; len = -1; + } else + c = *buf++; + l = (l << 8) | c; + len++; + if ((len & (BITS_PER_LONG/8-1))==0) + hash = hash_long(hash^l, BITS_PER_LONG); + } while (len); + return hash >> (BITS_PER_LONG - bits); +} + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h new file mode 100644 index 00000000..7c32daa0 --- /dev/null +++ b/include/linux/sunrpc/svcauth_gss.h @@ -0,0 +1,28 @@ +/* + * linux/include/linux/sunrpc/svcauth_gss.h + * + * Bruce Fields <bfields@umich.edu> + * Copyright (c) 2002 The Regents of the University of Michigan + */ + +#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H +#define _LINUX_SUNRPC_SVCAUTH_GSS_H + +#ifdef __KERNEL__ +#include <linux/sched.h> +#include <linux/sunrpc/types.h> +#include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/svcauth.h> +#include <linux/sunrpc/svcsock.h> +#include <linux/sunrpc/auth_gss.h> + +int gss_svc_init(void); +void gss_svc_shutdown(void); +int gss_svc_init_net(struct net *net); +void gss_svc_shutdown_net(struct net *net); +int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +u32 svcauth_gss_flavor(struct auth_domain *dom); +char *svc_gss_principal(struct svc_rqst *); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h new file mode 100644 index 00000000..cb4ac69e --- /dev/null +++ b/include/linux/sunrpc/svcsock.h @@ -0,0 +1,59 @@ +/* + * linux/include/linux/sunrpc/svcsock.h + * + * RPC server socket I/O. + * + * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef SUNRPC_SVCSOCK_H +#define SUNRPC_SVCSOCK_H + +#include <linux/sunrpc/svc.h> +#include <linux/sunrpc/svc_xprt.h> + +/* + * RPC server socket. + */ +struct svc_sock { + struct svc_xprt sk_xprt; + struct socket * sk_sock; /* berkeley socket layer */ + struct sock * sk_sk; /* INET layer */ + + /* We keep the old state_change and data_ready CB's here */ + void (*sk_ostate)(struct sock *); + void (*sk_odata)(struct sock *, int bytes); + void (*sk_owspace)(struct sock *); + + /* private TCP part */ + u32 sk_reclen; /* length of record */ + u32 sk_tcplen; /* current read length */ + struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ +}; + +/* + * Function prototypes. + */ +void svc_close_net(struct svc_serv *, struct net *); +int svc_recv(struct svc_rqst *, long); +int svc_send(struct svc_rqst *); +void svc_drop(struct svc_rqst *); +void svc_sock_update_bufs(struct svc_serv *serv); +int svc_sock_names(struct svc_serv *serv, char *buf, + const size_t buflen, + const char *toclose); +int svc_addsock(struct svc_serv *serv, const int fd, + char *name_return, const size_t len); +void svc_init_xprt_sock(void); +void svc_cleanup_xprt_sock(void); +struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); +void svc_sock_destroy(struct svc_xprt *); + +/* + * svc_makesock socket characteristics + */ +#define SVC_SOCK_DEFAULTS (0U) +#define SVC_SOCK_ANONYMOUS (1U << 0) /* don't register with pmap */ +#define SVC_SOCK_TEMPORARY (1U << 1) /* flag socket as temporary */ + +#endif /* SUNRPC_SVCSOCK_H */ diff --git a/include/linux/sunrpc/timer.h b/include/linux/sunrpc/timer.h new file mode 100644 index 00000000..697d6e69 --- /dev/null +++ b/include/linux/sunrpc/timer.h @@ -0,0 +1,49 @@ +/* + * linux/include/linux/sunrpc/timer.h + * + * Declarations for the RPC transport timer. + * + * Copyright (C) 2002 Trond Myklebust <trond.myklebust@fys.uio.no> + */ + +#ifndef _LINUX_SUNRPC_TIMER_H +#define _LINUX_SUNRPC_TIMER_H + +#include <linux/atomic.h> + +struct rpc_rtt { + unsigned long timeo; /* default timeout value */ + unsigned long srtt[5]; /* smoothed round trip time << 3 */ + unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */ + int ntimeouts[5]; /* Number of timeouts for the last request */ +}; + + +extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo); +extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m); +extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer); + +static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo) +{ + int *t; + if (!timer) + return; + t = &rt->ntimeouts[timer-1]; + if (ntimeo < *t) { + if (*t > 0) + (*t)--; + } else { + if (ntimeo > 8) + ntimeo = 8; + *t = ntimeo; + } +} + +static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer) +{ + if (!timer) + return 0; + return rt->ntimeouts[timer-1]; +} + +#endif /* _LINUX_SUNRPC_TIMER_H */ diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h new file mode 100644 index 00000000..d222f475 --- /dev/null +++ b/include/linux/sunrpc/types.h @@ -0,0 +1,22 @@ +/* + * linux/include/linux/sunrpc/types.h + * + * Generic types and misc stuff for RPC. + * + * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_TYPES_H_ +#define _LINUX_SUNRPC_TYPES_H_ + +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/sunrpc/debug.h> +#include <linux/list.h> + +/* + * Shorthands + */ +#define signalled() (signal_pending(current)) + +#endif /* _LINUX_SUNRPC_TYPES_H_ */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h new file mode 100644 index 00000000..af70af33 --- /dev/null +++ b/include/linux/sunrpc/xdr.h @@ -0,0 +1,231 @@ +/* + * XDR standard data types and function declarations + * + * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> + * + * Based on: + * RFC 4506 "XDR: External Data Representation Standard", May 2006 + */ + +#ifndef _SUNRPC_XDR_H_ +#define _SUNRPC_XDR_H_ + +#ifdef __KERNEL__ + +#include <linux/uio.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include <linux/scatterlist.h> + +/* + * Buffer adjustment + */ +#define XDR_QUADLEN(l) (((l) + 3) >> 2) + +/* + * Generic opaque `network object.' At the kernel level, this type + * is used only by lockd. + */ +#define XDR_MAX_NETOBJ 1024 +struct xdr_netobj { + unsigned int len; + u8 * data; +}; + +/* + * This is the legacy generic XDR function. rqstp is either a rpc_rqst + * (client side) or svc_rqst pointer (server side). + * Encode functions always assume there's enough room in the buffer. + */ +typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); + +/* + * Basic structure for transmission/reception of a client XDR message. + * Features a header (for a linear buffer containing RPC headers + * and the data payload for short messages), and then an array of + * pages. + * The tail iovec allows you to append data after the page array. Its + * main interest is for appending padding to the pages in order to + * satisfy the int_32-alignment requirements in RFC1832. + * + * For the future, we might want to string several of these together + * in a list if anybody wants to make use of NFSv4 COMPOUND + * operations and/or has a need for scatter/gather involving pages. + */ +struct xdr_buf { + struct kvec head[1], /* RPC header + non-page data */ + tail[1]; /* Appended after page data */ + + struct page ** pages; /* Array of contiguous pages */ + unsigned int page_base, /* Start of page data */ + page_len, /* Length of page data */ + flags; /* Flags for data disposition */ +#define XDRBUF_READ 0x01 /* target of file read */ +#define XDRBUF_WRITE 0x02 /* source of file write */ + + unsigned int buflen, /* Total length of storage buffer */ + len; /* Length of XDR encoded message */ +}; + +/* + * pre-xdr'ed macros. + */ + +#define xdr_zero cpu_to_be32(0) +#define xdr_one cpu_to_be32(1) +#define xdr_two cpu_to_be32(2) + +#define rpc_success cpu_to_be32(RPC_SUCCESS) +#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) +#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) +#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL) +#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS) +#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) +#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) + +#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) +#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) +#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) +#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) +#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) +#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) +#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) +#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) +#define rpc_autherr_oldseqnum cpu_to_be32(101) + +/* + * Miscellaneous XDR helper functions + */ +__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len); +__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len); +__be32 *xdr_encode_string(__be32 *p, const char *s); +__be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, + unsigned int maxlen); +__be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *); +__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); + +void xdr_encode_pages(struct xdr_buf *, struct page **, unsigned int, + unsigned int); +void xdr_inline_pages(struct xdr_buf *, unsigned int, + struct page **, unsigned int, unsigned int); +void xdr_terminate_string(struct xdr_buf *, const u32); + +static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) +{ + return xdr_encode_opaque(p, s, len); +} + +/* + * Decode 64bit quantities (NFSv3 support) + */ +static inline __be32 * +xdr_encode_hyper(__be32 *p, __u64 val) +{ + put_unaligned_be64(val, p); + return p + 2; +} + +static inline __be32 * +xdr_decode_hyper(__be32 *p, __u64 *valp) +{ + *valp = get_unaligned_be64(p); + return p + 2; +} + +static inline __be32 * +xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) +{ + memcpy(ptr, p, len); + return p + XDR_QUADLEN(len); +} + +/* + * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) + */ +static inline int +xdr_adjust_iovec(struct kvec *iov, __be32 *p) +{ + return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base); +} + +/* + * XDR buffer helper functions + */ +extern void xdr_shift_buf(struct xdr_buf *, size_t); +extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); +extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); +extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); +extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); + +/* + * Helper structure for copying from an sk_buff. + */ +struct xdr_skb_reader { + struct sk_buff *skb; + unsigned int offset; + size_t count; + __wsum csum; +}; + +typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); + +size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len); +extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); +extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, + struct xdr_skb_reader *, xdr_skb_read_actor); + +extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); +extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); + +struct xdr_array2_desc; +typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); +struct xdr_array2_desc { + unsigned int elem_size; + unsigned int array_len; + unsigned int array_maxlen; + xdr_xcode_elem_t xcode; +}; + +extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc); +extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, + struct xdr_array2_desc *desc); +extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, + size_t len); + +/* + * Provide some simple tools for XDR buffer overflow-checking etc. + */ +struct xdr_stream { + __be32 *p; /* start of available buffer */ + struct xdr_buf *buf; /* XDR buffer to read/write */ + + __be32 *end; /* end of available buffer space */ + struct kvec *iov; /* pointer to the current kvec */ + struct kvec scratch; /* Scratch buffer */ + struct page **page_ptr; /* pointer to the current page */ +}; + +/* + * These are the xdr_stream style generic XDR encode and decode functions. + */ +typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj); +typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj); + +extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); +extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); +extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, + unsigned int base, unsigned int len); +extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); +extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, + struct page **pages, unsigned int len); +extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); +extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); +extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len); +extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); +extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); + +#endif /* __KERNEL__ */ + +#endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h new file mode 100644 index 00000000..77d278de --- /dev/null +++ b/include/linux/sunrpc/xprt.h @@ -0,0 +1,404 @@ +/* + * linux/include/linux/sunrpc/xprt.h + * + * Declarations for the RPC transport interface. + * + * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> + */ + +#ifndef _LINUX_SUNRPC_XPRT_H +#define _LINUX_SUNRPC_XPRT_H + +#include <linux/uio.h> +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/ktime.h> +#include <linux/sunrpc/sched.h> +#include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/msg_prot.h> + +#ifdef __KERNEL__ + +#define RPC_MIN_SLOT_TABLE (2U) +#define RPC_DEF_SLOT_TABLE (16U) +#define RPC_MAX_SLOT_TABLE_LIMIT (65536U) +#define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT + +/* + * This describes a timeout strategy + */ +struct rpc_timeout { + unsigned long to_initval, /* initial timeout */ + to_maxval, /* max timeout */ + to_increment; /* if !exponential */ + unsigned int to_retries; /* max # of retries */ + unsigned char to_exponential; +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT, + RPC_DISPLAY_PROTO, + RPC_DISPLAY_HEX_ADDR, + RPC_DISPLAY_HEX_PORT, + RPC_DISPLAY_NETID, + RPC_DISPLAY_MAX, +}; + +struct rpc_task; +struct rpc_xprt; +struct seq_file; + +/* + * This describes a complete RPC request + */ +struct rpc_rqst { + /* + * This is the user-visible part + */ + struct rpc_xprt * rq_xprt; /* RPC client */ + struct xdr_buf rq_snd_buf; /* send buffer */ + struct xdr_buf rq_rcv_buf; /* recv buffer */ + + /* + * This is the private part + */ + struct rpc_task * rq_task; /* RPC task data */ + struct rpc_cred * rq_cred; /* Bound cred */ + __be32 rq_xid; /* request XID */ + int rq_cong; /* has incremented xprt->cong */ + u32 rq_seqno; /* gss seq no. used on req. */ + int rq_enc_pages_num; + struct page **rq_enc_pages; /* scratch pages for use by + gss privacy code */ + void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ + struct list_head rq_list; + + __u32 * rq_buffer; /* XDR encode buffer */ + size_t rq_callsize, + rq_rcvsize; + size_t rq_xmit_bytes_sent; /* total bytes sent */ + size_t rq_reply_bytes_recvd; /* total reply bytes */ + /* received */ + + struct xdr_buf rq_private_buf; /* The receive buffer + * used in the softirq. + */ + unsigned long rq_majortimeo; /* major timeout alarm */ + unsigned long rq_timeout; /* Current timeout value */ + ktime_t rq_rtt; /* round-trip time */ + unsigned int rq_retries; /* # of retries */ + unsigned int rq_connect_cookie; + /* A cookie used to track the + state of the transport + connection */ + + /* + * Partial send handling + */ + u32 rq_bytes_sent; /* Bytes we have sent */ + + ktime_t rq_xtime; /* transmit time stamp */ + int rq_ntrans; + +#if defined(CONFIG_SUNRPC_BACKCHANNEL) + struct list_head rq_bc_list; /* Callback service list */ + unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ + struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ +#endif /* CONFIG_SUNRPC_BACKCHANEL */ +}; +#define rq_svec rq_snd_buf.head +#define rq_slen rq_snd_buf.len + +struct rpc_xprt_ops { + void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); + int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*rpcbind)(struct rpc_task *task); + void (*set_port)(struct rpc_xprt *xprt, unsigned short port); + void (*connect)(struct rpc_task *task); + void * (*buf_alloc)(struct rpc_task *task, size_t size); + void (*buf_free)(void *buffer); + int (*send_request)(struct rpc_task *task); + void (*set_retrans_timeout)(struct rpc_task *task); + void (*timer)(struct rpc_task *task); + void (*release_request)(struct rpc_task *task); + void (*close)(struct rpc_xprt *xprt); + void (*destroy)(struct rpc_xprt *xprt); + void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); +}; + +/* + * RPC transport identifiers + * + * To preserve compatibility with the historical use of raw IP protocol + * id's for transport selection, UDP and TCP identifiers are specified + * with the previous values. No such restriction exists for new transports, + * except that they may not collide with these values (17 and 6, + * respectively). + */ +#define XPRT_TRANSPORT_BC (1 << 31) +enum xprt_transports { + XPRT_TRANSPORT_UDP = IPPROTO_UDP, + XPRT_TRANSPORT_TCP = IPPROTO_TCP, + XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, + XPRT_TRANSPORT_RDMA = 256, + XPRT_TRANSPORT_LOCAL = 257, +}; + +struct rpc_xprt { + atomic_t count; /* Reference count */ + struct rpc_xprt_ops * ops; /* transport methods */ + + const struct rpc_timeout *timeout; /* timeout parms */ + struct sockaddr_storage addr; /* server address */ + size_t addrlen; /* size of server address */ + int prot; /* IP protocol */ + + unsigned long cong; /* current congestion */ + unsigned long cwnd; /* congestion window */ + + size_t max_payload; /* largest RPC payload size, + in bytes */ + unsigned int tsh_size; /* size of transport specific + header */ + + struct rpc_wait_queue binding; /* requests waiting on rpcbind */ + struct rpc_wait_queue sending; /* requests waiting to send */ + struct rpc_wait_queue pending; /* requests in flight */ + struct rpc_wait_queue backlog; /* waiting for slot */ + struct list_head free; /* free slots */ + unsigned int max_reqs; /* max number of slots */ + unsigned int min_reqs; /* min number of slots */ + atomic_t num_reqs; /* total slots */ + unsigned long state; /* transport state */ + unsigned char shutdown : 1, /* being shut down */ + resvport : 1; /* use a reserved port */ + unsigned int bind_index; /* bind function index */ + + /* + * Connection of transports + */ + unsigned long bind_timeout, + reestablish_timeout; + unsigned int connect_cookie; /* A cookie that gets bumped + every time the transport + is reconnected */ + + /* + * Disconnection of idle transports + */ + struct work_struct task_cleanup; + struct timer_list timer; + unsigned long last_used, + idle_timeout; + + /* + * Send stuff + */ + spinlock_t transport_lock; /* lock transport info */ + spinlock_t reserve_lock; /* lock slot table */ + u32 xid; /* Next XID value to use */ + struct rpc_task * snd_task; /* Task blocked in send */ + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ +#if defined(CONFIG_SUNRPC_BACKCHANNEL) + struct svc_serv *bc_serv; /* The RPC service which will */ + /* process the callback */ + unsigned int bc_alloc_count; /* Total number of preallocs */ + spinlock_t bc_pa_lock; /* Protects the preallocated + * items */ + struct list_head bc_pa_list; /* List of preallocated + * backchannel rpc_rqst's */ +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ + struct list_head recv; + + struct { + unsigned long bind_count, /* total number of binds */ + connect_count, /* total number of connects */ + connect_start, /* connect start timestamp */ + connect_time, /* jiffies waiting for connect */ + sends, /* how many complete requests */ + recvs, /* how many complete requests */ + bad_xids, /* lookup_rqst didn't find XID */ + max_slots; /* max rpc_slots used */ + + unsigned long long req_u, /* average requests on the wire */ + bklog_u, /* backlog queue utilization */ + sending_u, /* send q utilization */ + pending_u; /* pend q utilization */ + } stat; + + struct net *xprt_net; + const char *servername; + const char *address_strings[RPC_DISPLAY_MAX]; +}; + +#if defined(CONFIG_SUNRPC_BACKCHANNEL) +/* + * Backchannel flags + */ +#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ + /* buffer in use */ +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ + +#if defined(CONFIG_SUNRPC_BACKCHANNEL) +static inline int bc_prealloc(struct rpc_rqst *req) +{ + return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); +} +#else +static inline int bc_prealloc(struct rpc_rqst *req) +{ + return 0; +} +#endif /* CONFIG_SUNRPC_BACKCHANNEL */ + +struct xprt_create { + int ident; /* XPRT_TRANSPORT identifier */ + struct net * net; + struct sockaddr * srcaddr; /* optional local address */ + struct sockaddr * dstaddr; /* remote peer address */ + size_t addrlen; + const char *servername; + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ +}; + +struct xprt_class { + struct list_head list; + int ident; /* XPRT_TRANSPORT identifier */ + struct rpc_xprt * (*setup)(struct xprt_create *); + struct module *owner; + char name[32]; +}; + +/* + * Generic internal transport functions + */ +struct rpc_xprt *xprt_create_transport(struct xprt_create *args); +void xprt_connect(struct rpc_task *task); +void xprt_reserve(struct rpc_task *task); +int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); +int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +int xprt_prepare_transmit(struct rpc_task *task); +void xprt_transmit(struct rpc_task *task); +void xprt_end_transmit(struct rpc_task *task); +int xprt_adjust_timeout(struct rpc_rqst *req); +void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_release(struct rpc_task *task); +struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); +void xprt_put(struct rpc_xprt *xprt); +struct rpc_xprt * xprt_alloc(struct net *net, size_t size, + unsigned int num_prealloc, + unsigned int max_req); +void xprt_free(struct rpc_xprt *); + +static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) +{ + return p + xprt->tsh_size; +} + +/* + * Transport switch helper functions + */ +int xprt_register_transport(struct xprt_class *type); +int xprt_unregister_transport(struct xprt_class *type); +int xprt_load_transport(const char *); +void xprt_set_retrans_timeout_def(struct rpc_task *task); +void xprt_set_retrans_timeout_rtt(struct rpc_task *task); +void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); +void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); +void xprt_write_space(struct rpc_xprt *xprt); +void xprt_adjust_cwnd(struct rpc_task *task, int result); +struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); +void xprt_complete_rqst(struct rpc_task *task, int copied); +void xprt_release_rqst_cong(struct rpc_task *task); +void xprt_disconnect_done(struct rpc_xprt *xprt); +void xprt_force_disconnect(struct rpc_xprt *xprt); +void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); + +/* + * Reserved bit positions in xprt->state + */ +#define XPRT_LOCKED (0) +#define XPRT_CONNECTED (1) +#define XPRT_CONNECTING (2) +#define XPRT_CLOSE_WAIT (3) +#define XPRT_BOUND (4) +#define XPRT_BINDING (5) +#define XPRT_CLOSING (6) +#define XPRT_CONNECTION_ABORT (7) +#define XPRT_CONNECTION_CLOSE (8) + +static inline void xprt_set_connected(struct rpc_xprt *xprt) +{ + set_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline void xprt_clear_connected(struct rpc_xprt *xprt) +{ + clear_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_connected(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) +{ + return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline void xprt_clear_connecting(struct rpc_xprt *xprt) +{ + smp_mb__before_clear_bit(); + clear_bit(XPRT_CONNECTING, &xprt->state); + smp_mb__after_clear_bit(); +} + +static inline int xprt_connecting(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CONNECTING, &xprt->state); +} + +static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_CONNECTING, &xprt->state); +} + +static inline void xprt_set_bound(struct rpc_xprt *xprt) +{ + test_and_set_bit(XPRT_BOUND, &xprt->state); +} + +static inline int xprt_bound(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_BOUND, &xprt->state); +} + +static inline void xprt_clear_bound(struct rpc_xprt *xprt) +{ + clear_bit(XPRT_BOUND, &xprt->state); +} + +static inline void xprt_clear_binding(struct rpc_xprt *xprt) +{ + smp_mb__before_clear_bit(); + clear_bit(XPRT_BINDING, &xprt->state); + smp_mb__after_clear_bit(); +} + +static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_BINDING, &xprt->state); +} + +#endif /* __KERNEL__*/ + +#endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h new file mode 100644 index 00000000..c2f04e1a --- /dev/null +++ b/include/linux/sunrpc/xprtrdma.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the BSD-type + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Network Appliance, Inc. nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_SUNRPC_XPRTRDMA_H +#define _LINUX_SUNRPC_XPRTRDMA_H + +/* + * rpcbind (v3+) RDMA netid. + */ +#define RPCBIND_NETID_RDMA "rdma" + +/* + * Constants. Max RPC/NFS header is big enough to account for + * additional marshaling buffers passed down by Linux client. + * + * RDMA header is currently fixed max size, and is big enough for a + * fully-chunked NFS message (read chunks are the largest). Note only + * a single chunk type per message is supported currently. + */ +#define RPCRDMA_MIN_SLOT_TABLE (2U) +#define RPCRDMA_DEF_SLOT_TABLE (32U) +#define RPCRDMA_MAX_SLOT_TABLE (256U) + +#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ + +#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */ + +/* memory registration strategies */ +#define RPCRDMA_PERSISTENT_REGISTRATION (1) + +enum rpcrdma_memreg { + RPCRDMA_BOUNCEBUFFERS = 0, + RPCRDMA_REGISTER, + RPCRDMA_MEMWINDOWS, + RPCRDMA_MEMWINDOWS_ASYNC, + RPCRDMA_MTHCAFMR, + RPCRDMA_FRMR, + RPCRDMA_ALLPHYSICAL, + RPCRDMA_LAST +}; + +#endif /* _LINUX_SUNRPC_XPRTRDMA_H */ diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h new file mode 100644 index 00000000..1ad36cc2 --- /dev/null +++ b/include/linux/sunrpc/xprtsock.h @@ -0,0 +1,22 @@ +/* + * linux/include/linux/sunrpc/xprtsock.h + * + * Declarations for the RPC transport socket provider. + */ + +#ifndef _LINUX_SUNRPC_XPRTSOCK_H +#define _LINUX_SUNRPC_XPRTSOCK_H + +#ifdef __KERNEL__ + +int init_socket_xprt(void); +void cleanup_socket_xprt(void); + +#define RPC_MIN_RESVPORT (1U) +#define RPC_MAX_RESVPORT (65535U) +#define RPC_DEF_MIN_RESVPORT (665U) +#define RPC_DEF_MAX_RESVPORT (1023U) + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/sunserialcore.h b/include/linux/sunserialcore.h new file mode 100644 index 00000000..68e7430b --- /dev/null +++ b/include/linux/sunserialcore.h @@ -0,0 +1,33 @@ +/* sunserialcore.h + * + * Generic SUN serial/kbd/ms layer. Based entirely + * upon drivers/sbus/char/sunserial.h which is: + * + * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * + * Port to new UART layer is: + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + */ + +#ifndef _SERIAL_SUN_H +#define _SERIAL_SUN_H + +/* Serial keyboard defines for L1-A processing... */ +#define SUNKBD_RESET 0xff +#define SUNKBD_L1 0x01 +#define SUNKBD_UP 0x80 +#define SUNKBD_A 0x4d + +extern unsigned int suncore_mouse_baud_cflag_next(unsigned int, int *); +extern int suncore_mouse_baud_detection(unsigned char, int); + +extern int sunserial_register_minors(struct uart_driver *, int); +extern void sunserial_unregister_minors(struct uart_driver *, int); + +extern int sunserial_console_match(struct console *, struct device_node *, + struct uart_driver *, int, bool); +extern void sunserial_console_termios(struct console *, + struct device_node *); + +#endif /* !(_SERIAL_SUN_H) */ diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h new file mode 100644 index 00000000..17ea468f --- /dev/null +++ b/include/linux/superhyway.h @@ -0,0 +1,107 @@ +/* + * include/linux/superhyway.h + * + * SuperHyway Bus definitions + * + * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __LINUX_SUPERHYWAY_H +#define __LINUX_SUPERHYWAY_H + +#include <linux/device.h> + +/* + * SuperHyway IDs + */ +#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183 + +struct superhyway_vcr_info { + u8 perr_flags; /* P-port Error flags */ + u8 merr_flags; /* Module Error flags */ + u16 mod_vers; /* Module Version */ + u16 mod_id; /* Module ID */ + u8 bot_mb; /* Bottom Memory block */ + u8 top_mb; /* Top Memory block */ +}; + +struct superhyway_ops { + int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr); + int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr); +}; + +struct superhyway_bus { + struct superhyway_ops *ops; +}; + +extern struct superhyway_bus superhyway_channels[]; + +struct superhyway_device_id { + unsigned int id; + unsigned long driver_data; +}; + +struct superhyway_device; +extern struct bus_type superhyway_bus_type; + +struct superhyway_driver { + char *name; + + const struct superhyway_device_id *id_table; + struct device_driver drv; + + int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id); + void (*remove)(struct superhyway_device *dev); +}; + +#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv) + +struct superhyway_device { + char name[32]; + + struct device dev; + + struct superhyway_device_id id; + struct superhyway_driver *drv; + struct superhyway_bus *bus; + + int num_resources; + struct resource *resource; + struct superhyway_vcr_info vcr; +}; + +#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev) + +#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + +static inline int +superhyway_read_vcr(struct superhyway_device *dev, unsigned long base, + struct superhyway_vcr_info *vcr) +{ + return dev->bus->ops->read_vcr(base, vcr); +} + +static inline int +superhyway_write_vcr(struct superhyway_device *dev, unsigned long base, + struct superhyway_vcr_info vcr) +{ + return dev->bus->ops->write_vcr(base, vcr); +} + +extern int superhyway_scan_bus(struct superhyway_bus *); + +/* drivers/sh/superhyway/superhyway.c */ +int superhyway_register_driver(struct superhyway_driver *); +void superhyway_unregister_driver(struct superhyway_driver *); +int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *); +int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices); + +/* drivers/sh/superhyway/superhyway-sysfs.c */ +extern struct device_attribute superhyway_dev_attrs[]; + +#endif /* __LINUX_SUPERHYWAY_H */ + diff --git a/include/linux/suspend.h b/include/linux/suspend.h new file mode 100644 index 00000000..a79a28f5 --- /dev/null +++ b/include/linux/suspend.h @@ -0,0 +1,456 @@ +#ifndef _LINUX_SUSPEND_H +#define _LINUX_SUSPEND_H + +#include <linux/swap.h> +#include <linux/notifier.h> +#include <linux/init.h> +#include <linux/pm.h> +#include <linux/mm.h> +#include <linux/freezer.h> +#include <asm/errno.h> + +#ifdef CONFIG_VT +extern void pm_set_vt_switch(int); +#else +static inline void pm_set_vt_switch(int do_switch) +{ +} +#endif + +#ifdef CONFIG_VT_CONSOLE_SLEEP +extern int pm_prepare_console(void); +extern void pm_restore_console(void); +#else +static inline int pm_prepare_console(void) +{ + return 0; +} + +static inline void pm_restore_console(void) +{ +} +#endif + +typedef int __bitwise suspend_state_t; + +#define PM_SUSPEND_ON ((__force suspend_state_t) 0) +#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) +#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) +#define PM_SUSPEND_MAX ((__force suspend_state_t) 4) + +enum suspend_stat_step { + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE, + SUSPEND_SUSPEND, + SUSPEND_SUSPEND_LATE, + SUSPEND_SUSPEND_NOIRQ, + SUSPEND_RESUME_NOIRQ, + SUSPEND_RESUME_EARLY, + SUSPEND_RESUME +}; + +struct suspend_stats { + int success; + int fail; + int failed_freeze; + int failed_prepare; + int failed_suspend; + int failed_suspend_late; + int failed_suspend_noirq; + int failed_resume; + int failed_resume_early; + int failed_resume_noirq; +#define REC_FAILED_NUM 2 + int last_failed_dev; + char failed_devs[REC_FAILED_NUM][40]; + int last_failed_errno; + int errno[REC_FAILED_NUM]; + int last_failed_step; + enum suspend_stat_step failed_steps[REC_FAILED_NUM]; +}; + +extern struct suspend_stats suspend_stats; + +static inline void dpm_save_failed_dev(const char *name) +{ + strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], + name, + sizeof(suspend_stats.failed_devs[0])); + suspend_stats.last_failed_dev++; + suspend_stats.last_failed_dev %= REC_FAILED_NUM; +} + +static inline void dpm_save_failed_errno(int err) +{ + suspend_stats.errno[suspend_stats.last_failed_errno] = err; + suspend_stats.last_failed_errno++; + suspend_stats.last_failed_errno %= REC_FAILED_NUM; +} + +static inline void dpm_save_failed_step(enum suspend_stat_step step) +{ + suspend_stats.failed_steps[suspend_stats.last_failed_step] = step; + suspend_stats.last_failed_step++; + suspend_stats.last_failed_step %= REC_FAILED_NUM; +} + +/** + * struct platform_suspend_ops - Callbacks for managing platform dependent + * system sleep states. + * + * @valid: Callback to determine if given system sleep state is supported by + * the platform. + * Valid (ie. supported) states are advertised in /sys/power/state. Note + * that it still may be impossible to enter given system sleep state if the + * conditions aren't right. + * There is the %suspend_valid_only_mem function available that can be + * assigned to this if the platform only supports mem sleep. + * + * @begin: Initialise a transition to given system sleep state. + * @begin() is executed right prior to suspending devices. The information + * conveyed to the platform code by @begin() should be disregarded by it as + * soon as @end() is executed. If @begin() fails (ie. returns nonzero), + * @prepare(), @enter() and @finish() will not be called by the PM core. + * This callback is optional. However, if it is implemented, the argument + * passed to @enter() is redundant and should be ignored. + * + * @prepare: Prepare the platform for entering the system sleep state indicated + * by @begin(). + * @prepare() is called right after devices have been suspended (ie. the + * appropriate .suspend() method has been executed for each device) and + * before device drivers' late suspend callbacks are executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@prepare_late(), @enter(), + * and @wake() will not be called in that case). + * + * @prepare_late: Finish preparing the platform for entering the system sleep + * state indicated by @begin(). + * @prepare_late is called before disabling nonboot CPUs and after + * device drivers' late suspend callbacks have been executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@enter() will not be + * executed). + * + * @enter: Enter the system sleep state indicated by @begin() or represented by + * the argument if @begin() is not implemented. + * This callback is mandatory. It returns 0 on success or a negative + * error code otherwise, in which case the system cannot enter the desired + * sleep state. + * + * @wake: Called when the system has just left a sleep state, right after + * the nonboot CPUs have been enabled and before device drivers' early + * resume callbacks are executed. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare_late(). If implemented, it is always called + * after @prepare_late and @enter(), even if one of them fails. + * + * @finish: Finish wake-up of the platform. + * @finish is called right prior to calling device drivers' regular suspend + * callbacks. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare(). If implemented, it is always called after + * @enter() and @wake(), even if any of them fails. It is executed after + * a failing @prepare. + * + * @suspend_again: Returns whether the system should suspend again (true) or + * not (false). If the platform wants to poll sensors or execute some + * code during suspended without invoking userspace and most of devices, + * suspend_again callback is the place assuming that periodic-wakeup or + * alarm-wakeup is already setup. This allows to execute some codes while + * being kept suspended in the view of userland and devices. + * + * @end: Called by the PM core right after resuming devices, to indicate to + * the platform that the system has returned to the working state or + * the transition to the sleep state has been aborted. + * This callback is optional, but should be implemented by the platforms + * that implement @begin(). Accordingly, platforms implementing @begin() + * should also provide a @end() which cleans up transitions aborted before + * @enter(). + * + * @recover: Recover the platform from a suspend failure. + * Called by the PM core if the suspending of devices fails. + * This callback is optional and should only be implemented by platforms + * which require special recovery actions in that situation. + */ +struct platform_suspend_ops { + int (*valid)(suspend_state_t state); + int (*begin)(suspend_state_t state); + int (*prepare)(void); + int (*prepare_late)(void); + int (*enter)(suspend_state_t state); + void (*wake)(void); + void (*finish)(void); + bool (*suspend_again)(void); + void (*end)(void); + void (*recover)(void); +}; + +#ifdef CONFIG_SUSPEND +/** + * suspend_set_ops - set platform dependent suspend operations + * @ops: The new suspend operations to set. + */ +extern void suspend_set_ops(const struct platform_suspend_ops *ops); +extern int suspend_valid_only_mem(suspend_state_t state); + +/** + * arch_suspend_disable_irqs - disable IRQs for suspend + * + * Disables IRQs (in the default case). This is a weak symbol in the common + * code and thus allows architectures to override it if more needs to be + * done. Not called for suspend to disk. + */ +extern void arch_suspend_disable_irqs(void); + +/** + * arch_suspend_enable_irqs - enable IRQs after suspend + * + * Enables IRQs (in the default case). This is a weak symbol in the common + * code and thus allows architectures to override it if more needs to be + * done. Not called for suspend to disk. + */ +extern void arch_suspend_enable_irqs(void); + +extern int pm_suspend(suspend_state_t state); +#else /* !CONFIG_SUSPEND */ +#define suspend_valid_only_mem NULL + +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} +static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } +#endif /* !CONFIG_SUSPEND */ + +/* struct pbe is used for creating lists of pages that should be restored + * atomically during the resume from disk, because the page frames they have + * occupied before the suspend are in use. + */ +struct pbe { + void *address; /* address of the copy */ + void *orig_address; /* original address of a page */ + struct pbe *next; +}; + +/* mm/page_alloc.c */ +extern void mark_free_pages(struct zone *zone); + +/** + * struct platform_hibernation_ops - hibernation platform support + * + * The methods in this structure allow a platform to carry out special + * operations required by it during a hibernation transition. + * + * All the methods below, except for @recover(), must be implemented. + * + * @begin: Tell the platform driver that we're starting hibernation. + * Called right after shrinking memory and before freezing devices. + * + * @end: Called by the PM core right after resuming devices, to indicate to + * the platform that the system has returned to the working state. + * + * @pre_snapshot: Prepare the platform for creating the hibernation image. + * Called right after devices have been frozen and before the nonboot + * CPUs are disabled (runs with IRQs on). + * + * @finish: Restore the previous state of the platform after the hibernation + * image has been created *or* put the platform into the normal operation + * mode after the hibernation (the same method is executed in both cases). + * Called right after the nonboot CPUs have been enabled and before + * thawing devices (runs with IRQs on). + * + * @prepare: Prepare the platform for entering the low power state. + * Called right after the hibernation image has been saved and before + * devices are prepared for entering the low power state. + * + * @enter: Put the system into the low power state after the hibernation image + * has been saved to disk. + * Called after the nonboot CPUs have been disabled and all of the low + * level devices have been shut down (runs with IRQs off). + * + * @leave: Perform the first stage of the cleanup after the system sleep state + * indicated by @set_target() has been left. + * Called right after the control has been passed from the boot kernel to + * the image kernel, before the nonboot CPUs are enabled and before devices + * are resumed. Executed with interrupts disabled. + * + * @pre_restore: Prepare system for the restoration from a hibernation image. + * Called right after devices have been frozen and before the nonboot + * CPUs are disabled (runs with IRQs on). + * + * @restore_cleanup: Clean up after a failing image restoration. + * Called right after the nonboot CPUs have been enabled and before + * thawing devices (runs with IRQs on). + * + * @recover: Recover the platform from a failure to suspend devices. + * Called by the PM core if the suspending of devices during hibernation + * fails. This callback is optional and should only be implemented by + * platforms which require special recovery actions in that situation. + */ +struct platform_hibernation_ops { + int (*begin)(void); + void (*end)(void); + int (*pre_snapshot)(void); + void (*finish)(void); + int (*prepare)(void); + int (*enter)(void); + void (*leave)(void); + int (*pre_restore)(void); + void (*restore_cleanup)(void); + void (*recover)(void); +}; + +#ifdef CONFIG_HIBERNATION +/* kernel/power/snapshot.c */ +extern void __register_nosave_region(unsigned long b, unsigned long e, int km); +static inline void __init register_nosave_region(unsigned long b, unsigned long e) +{ + __register_nosave_region(b, e, 0); +} +static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) +{ + __register_nosave_region(b, e, 1); +} +extern int swsusp_page_is_forbidden(struct page *); +extern void swsusp_set_page_free(struct page *); +extern void swsusp_unset_page_free(struct page *); +extern unsigned long get_safe_page(gfp_t gfp_mask); + +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); +extern int hibernate(void); +extern bool system_entering_hibernation(void); +#else /* CONFIG_HIBERNATION */ +static inline void register_nosave_region(unsigned long b, unsigned long e) {} +static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} +static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } +static inline void swsusp_set_page_free(struct page *p) {} +static inline void swsusp_unset_page_free(struct page *p) {} + +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} +static inline int hibernate(void) { return -ENOSYS; } +static inline bool system_entering_hibernation(void) { return false; } +#endif /* CONFIG_HIBERNATION */ + +/* Hibernation and suspend events */ +#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ +#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ +#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ +#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ +#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ +#define PM_POST_RESTORE 0x0006 /* Restore failed */ +#define PM_HIBERNATION_FINISH 0x0007 +extern struct mutex pm_mutex; + +#ifdef CONFIG_PM_SLEEP +void save_processor_state(void); +void restore_processor_state(void); + +/* kernel/power/main.c */ +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); + +#define pm_notifier(fn, pri) { \ + static struct notifier_block fn##_nb = \ + { .notifier_call = fn, .priority = pri }; \ + register_pm_notifier(&fn##_nb); \ +} + +/* drivers/base/power/wakeup.c */ +extern bool events_check_enabled; + +extern bool pm_wakeup_pending(void); +extern bool pm_get_wakeup_count(unsigned int *count, bool block); +extern bool pm_save_wakeup_count(unsigned int count); +extern void pm_wakep_autosleep_enabled(bool set); + +static inline void lock_system_sleep(void) +{ + current->flags |= PF_FREEZER_SKIP; + mutex_lock(&pm_mutex); +} + +static inline void unlock_system_sleep(void) +{ + /* + * Don't use freezer_count() because we don't want the call to + * try_to_freeze() here. + * + * Reason: + * Fundamentally, we just don't need it, because freezing condition + * doesn't come into effect until we release the pm_mutex lock, + * since the freezer always works with pm_mutex held. + * + * More importantly, in the case of hibernation, + * unlock_system_sleep() gets called in snapshot_read() and + * snapshot_write() when the freezing condition is still in effect. + * Which means, if we use try_to_freeze() here, it would make them + * enter the refrigerator, thus causing hibernation to lockup. + */ + current->flags &= ~PF_FREEZER_SKIP; + mutex_unlock(&pm_mutex); +} + +#else /* !CONFIG_PM_SLEEP */ + +static inline int register_pm_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_pm_notifier(struct notifier_block *nb) +{ + return 0; +} + +#define pm_notifier(fn, pri) do { (void)(fn); } while (0) + +static inline bool pm_wakeup_pending(void) { return false; } + +static inline void lock_system_sleep(void) {} +static inline void unlock_system_sleep(void) {} + +#endif /* !CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_AUTOSLEEP + +/* kernel/power/autosleep.c */ +void queue_up_suspend_work(void); + +#else /* !CONFIG_PM_AUTOSLEEP */ + +static inline void queue_up_suspend_work(void) {} + +#endif /* !CONFIG_PM_AUTOSLEEP */ + +#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS +/* + * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture + * to save/restore additional information to/from the array of page + * frame numbers in the hibernation image. For s390 this is used to + * save and restore the storage key for each page that is included + * in the hibernation image. + */ +unsigned long page_key_additional_pages(unsigned long pages); +int page_key_alloc(unsigned long pages); +void page_key_free(void); +void page_key_read(unsigned long *pfn); +void page_key_memorize(unsigned long *pfn); +void page_key_write(void *address); + +#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ + +static inline unsigned long page_key_additional_pages(unsigned long pages) +{ + return 0; +} + +static inline int page_key_alloc(unsigned long pages) +{ + return 0; +} + +static inline void page_key_free(void) {} +static inline void page_key_read(unsigned long *pfn) {} +static inline void page_key_memorize(unsigned long *pfn) {} +static inline void page_key_write(void *address) {} + +#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ + +#endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/suspend_ioctls.h b/include/linux/suspend_ioctls.h new file mode 100644 index 00000000..0b303829 --- /dev/null +++ b/include/linux/suspend_ioctls.h @@ -0,0 +1,33 @@ +#ifndef _LINUX_SUSPEND_IOCTLS_H +#define _LINUX_SUSPEND_IOCTLS_H + +#include <linux/types.h> +/* + * This structure is used to pass the values needed for the identification + * of the resume swap area from a user space to the kernel via the + * SNAPSHOT_SET_SWAP_AREA ioctl + */ +struct resume_swap_area { + __kernel_loff_t offset; + __u32 dev; +} __attribute__((packed)); + +#define SNAPSHOT_IOC_MAGIC '3' +#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) +#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2) +#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4) +#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5) +#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9) +#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11) +#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \ + struct resume_swap_area) +#define SNAPSHOT_GET_IMAGE_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 14, __kernel_loff_t) +#define SNAPSHOT_PLATFORM_SUPPORT _IO(SNAPSHOT_IOC_MAGIC, 15) +#define SNAPSHOT_POWER_OFF _IO(SNAPSHOT_IOC_MAGIC, 16) +#define SNAPSHOT_CREATE_IMAGE _IOW(SNAPSHOT_IOC_MAGIC, 17, int) +#define SNAPSHOT_PREF_IMAGE_SIZE _IO(SNAPSHOT_IOC_MAGIC, 18) +#define SNAPSHOT_AVAIL_SWAP_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 19, __kernel_loff_t) +#define SNAPSHOT_ALLOC_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 20, __kernel_loff_t) +#define SNAPSHOT_IOC_MAXNR 20 + +#endif /* _LINUX_SUSPEND_IOCTLS_H */ diff --git a/include/linux/svga.h b/include/linux/svga.h new file mode 100644 index 00000000..bfa68e83 --- /dev/null +++ b/include/linux/svga.h @@ -0,0 +1,124 @@ +#ifndef _LINUX_SVGA_H +#define _LINUX_SVGA_H + +#include <linux/pci.h> +#include <video/vga.h> + +/* Terminator for register set */ + +#define VGA_REGSET_END_VAL 0xFF +#define VGA_REGSET_END {VGA_REGSET_END_VAL, 0, 0} + +struct vga_regset { + u8 regnum; + u8 lowbit; + u8 highbit; +}; + +/* ------------------------------------------------------------------------- */ + +#define SVGA_FORMAT_END_VAL 0xFFFF +#define SVGA_FORMAT_END {SVGA_FORMAT_END_VAL, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, 0, 0, 0, 0, 0, 0} + +struct svga_fb_format { + /* var part */ + u32 bits_per_pixel; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + u32 nonstd; + /* fix part */ + u32 type; + u32 type_aux; + u32 visual; + u32 xpanstep; + u32 xresstep; +}; + +struct svga_timing_regs { + const struct vga_regset *h_total_regs; + const struct vga_regset *h_display_regs; + const struct vga_regset *h_blank_start_regs; + const struct vga_regset *h_blank_end_regs; + const struct vga_regset *h_sync_start_regs; + const struct vga_regset *h_sync_end_regs; + + const struct vga_regset *v_total_regs; + const struct vga_regset *v_display_regs; + const struct vga_regset *v_blank_start_regs; + const struct vga_regset *v_blank_end_regs; + const struct vga_regset *v_sync_start_regs; + const struct vga_regset *v_sync_end_regs; +}; + +struct svga_pll { + u16 m_min; + u16 m_max; + u16 n_min; + u16 n_max; + u16 r_min; + u16 r_max; /* r_max < 32 */ + u32 f_vco_min; + u32 f_vco_max; + u32 f_base; +}; + + +/* Write a value to the attribute register */ + +static inline void svga_wattr(void __iomem *regbase, u8 index, u8 data) +{ + vga_r(regbase, VGA_IS1_RC); + vga_w(regbase, VGA_ATT_IW, index); + vga_w(regbase, VGA_ATT_W, data); +} + +/* Write a value to a sequence register with a mask */ + +static inline void svga_wseq_mask(void __iomem *regbase, u8 index, u8 data, u8 mask) +{ + vga_wseq(regbase, index, (data & mask) | (vga_rseq(regbase, index) & ~mask)); +} + +/* Write a value to a CRT register with a mask */ + +static inline void svga_wcrt_mask(void __iomem *regbase, u8 index, u8 data, u8 mask) +{ + vga_wcrt(regbase, index, (data & mask) | (vga_rcrt(regbase, index) & ~mask)); +} + +static inline int svga_primary_device(struct pci_dev *dev) +{ + u16 flags; + pci_read_config_word(dev, PCI_COMMAND, &flags); + return (flags & PCI_COMMAND_IO); +} + + +void svga_wcrt_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value); +void svga_wseq_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value); + +void svga_set_default_gfx_regs(void __iomem *regbase); +void svga_set_default_atc_regs(void __iomem *regbase); +void svga_set_default_seq_regs(void __iomem *regbase); +void svga_set_default_crt_regs(void __iomem *regbase); +void svga_set_textmode_vga_regs(void __iomem *regbase); + +void svga_settile(struct fb_info *info, struct fb_tilemap *map); +void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area); +void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect); +void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit); +void svga_tilecursor(void __iomem *regbase, struct fb_info *info, struct fb_tilecursor *cursor); +int svga_get_tilemax(struct fb_info *info); +void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps, + struct fb_var_screeninfo *var); + +int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); +int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node); +void svga_set_timings(void __iomem *regbase, const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node); + +int svga_match_format(const struct svga_fb_format *frm, struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix); + +#endif /* _LINUX_SVGA_H */ + diff --git a/include/linux/sw_sync.h b/include/linux/sw_sync.h new file mode 100644 index 00000000..bd6f2089 --- /dev/null +++ b/include/linux/sw_sync.h @@ -0,0 +1,58 @@ +/* + * include/linux/sw_sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SW_SYNC_H +#define _LINUX_SW_SYNC_H + +#include <linux/types.h> + +#ifdef __KERNEL__ + +#include <linux/sync.h> + +struct sw_sync_timeline { + struct sync_timeline obj; + + u32 value; +}; + +struct sw_sync_pt { + struct sync_pt pt; + + u32 value; +}; + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name); +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); + +#endif /* __KERNEL __ */ + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC 'W' + +#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ + struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + + +#endif /* _LINUX_SW_SYNC_H */ diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000..ea0c02fd --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,299 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/swab.h> + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define ___constant_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define ___constant_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define ___constant_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define ___constant_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define ___constant_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 __fswab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#else + return ___constant_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 __fswab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#else + return ___constant_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 __fswab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); +#else + return ___constant_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 __fswahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#else + return ___constant_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 __fswahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#else + return ___constant_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + ___constant_swab16(x) : \ + __fswab16(x)) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swab32(x) : \ + __fswab32(x)) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + ___constant_swab64(x) : \ + __fswab64(x)) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swahw32(x) : \ + __fswahw32(x)) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swahb32(x) : \ + __fswahb32(x)) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h new file mode 100644 index 00000000..b1fd5c79 --- /dev/null +++ b/include/linux/swap.h @@ -0,0 +1,512 @@ +#ifndef _LINUX_SWAP_H +#define _LINUX_SWAP_H + +#include <linux/spinlock.h> +#include <linux/linkage.h> +#include <linux/mmzone.h> +#include <linux/list.h> +#include <linux/memcontrol.h> +#include <linux/sched.h> +#include <linux/node.h> + +#include <linux/atomic.h> +#include <asm/page.h> + +struct notifier_block; + +struct bio; + +#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ +#define SWAP_FLAG_PRIO_MASK 0x7fff +#define SWAP_FLAG_PRIO_SHIFT 0 +#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ + +#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ + SWAP_FLAG_DISCARD) + +static inline int current_is_kswapd(void) +{ + return current->flags & PF_KSWAPD; +} + +/* + * MAX_SWAPFILES defines the maximum number of swaptypes: things which can + * be swapped to. The swap type and the offset into that swap type are + * encoded into pte's and into pgoff_t's in the swapcache. Using five bits + * for the type means that the maximum number of swapcache pages is 27 bits + * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs + * the type/offset into the pte as 5/27 as well. + */ +#define MAX_SWAPFILES_SHIFT 5 + +/* + * Use some of the swap files numbers for other purposes. This + * is a convenient way to hook into the VM to trigger special + * actions on faults. + */ + +/* + * NUMA node memory migration support + */ +#ifdef CONFIG_MIGRATION +#define SWP_MIGRATION_NUM 2 +#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) +#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) +#else +#define SWP_MIGRATION_NUM 0 +#endif + +/* + * Handling of hardware poisoned pages with memory corruption. + */ +#ifdef CONFIG_MEMORY_FAILURE +#define SWP_HWPOISON_NUM 1 +#define SWP_HWPOISON MAX_SWAPFILES +#else +#define SWP_HWPOISON_NUM 0 +#endif + +#define MAX_SWAPFILES \ + ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) + +/* + * Magic header for a swap area. The first part of the union is + * what the swap magic looks like for the old (limited to 128MB) + * swap area format, the second part of the union adds - in the + * old reserved area - some extra information. Note that the first + * kilobyte is reserved for boot loader or disk label stuff... + * + * Having the magic at the end of the PAGE_SIZE makes detecting swap + * areas somewhat tricky on machines that support multiple page sizes. + * For 2.5 we'll probably want to move the magic to just beyond the + * bootbits... + */ +union swap_header { + struct { + char reserved[PAGE_SIZE - 10]; + char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ + } magic; + struct { + char bootbits[1024]; /* Space for disklabel etc. */ + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + + /* A swap entry has to fit into a "unsigned long", as + * the entry is hidden in the "index" field of the + * swapper address space. + */ +typedef struct { + unsigned long val; +} swp_entry_t; + +/* + * current->reclaim_state points to one of these when a task is running + * memory reclaim + */ +struct reclaim_state { + unsigned long reclaimed_slab; +}; + +#ifdef __KERNEL__ + +struct address_space; +struct sysinfo; +struct writeback_control; +struct zone; + +/* + * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of + * disk blocks. A list of swap extents maps the entire swapfile. (Where the + * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart + * from setup, they're handled identically. + * + * We always assume that blocks are of size PAGE_SIZE. + */ +struct swap_extent { + struct list_head list; + pgoff_t start_page; + pgoff_t nr_pages; + sector_t start_block; +}; + +/* + * Max bad pages in the new format.. + */ +#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) +#define MAX_SWAP_BADPAGES \ + ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) + +enum { + SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ + SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ + SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ + SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ + SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ + SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ + SWP_BLKDEV = (1 << 6), /* its a block device */ + /* add others here before... */ + SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ +}; + +#define SWAP_CLUSTER_MAX 32 +#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX + +/* + * Ratio between the present memory in the zone and the "gap" that + * we're allowing kswapd to shrink in addition to the per-zone high + * wmark, even for zones that already have the high wmark satisfied, + * in order to provide better per-zone lru behavior. We are ok to + * spend not more than 1% of the memory for this zone balancing "gap". + */ +#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 + +#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ +#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ +#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ +#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ +#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ +#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ + +/* + * The in-memory structure used to track swap areas. + */ +struct swap_info_struct { + unsigned long flags; /* SWP_USED etc: see above */ + signed short prio; /* swap priority of this type */ + signed char type; /* strange name for an index */ + signed char next; /* next type on the swap list */ + unsigned int max; /* extent of the swap_map */ + unsigned char *swap_map; /* vmalloc'ed array of usage counts */ + unsigned int lowest_bit; /* index of first free in swap_map */ + unsigned int highest_bit; /* index of last free in swap_map */ + unsigned int pages; /* total of usable pages of swap */ + unsigned int inuse_pages; /* number of those currently in use */ + unsigned int cluster_next; /* likely index for next allocation */ + unsigned int cluster_nr; /* countdown to next cluster search */ + unsigned int lowest_alloc; /* while preparing discard cluster */ + unsigned int highest_alloc; /* while preparing discard cluster */ + struct swap_extent *curr_swap_extent; + struct swap_extent first_swap_extent; + struct block_device *bdev; /* swap device or bdev of swap file */ + struct file *swap_file; /* seldom referenced */ + unsigned int old_block_size; /* seldom referenced */ +}; + +struct swap_list_t { + int head; /* head of priority-ordered swapfile list */ + int next; /* swapfile to be used next */ +}; + +/* Swap 50% full? Release swapcache more aggressively.. */ +#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) + +/* linux/mm/page_alloc.c */ +extern unsigned long totalram_pages; +extern unsigned long totalreserve_pages; +extern unsigned long dirty_balance_reserve; +extern unsigned int nr_free_buffer_pages(void); +extern unsigned int nr_free_pagecache_pages(void); + +/* Definition of global_page_state not available yet */ +#define nr_free_pages() global_page_state(NR_FREE_PAGES) + + +/* linux/mm/swap.c */ +extern void __lru_cache_add(struct page *, enum lru_list lru); +extern void lru_cache_add_lru(struct page *, enum lru_list lru); +extern void lru_add_page_tail(struct zone* zone, + struct page *page, struct page *page_tail); +extern void activate_page(struct page *); +extern void mark_page_accessed(struct page *); +extern void lru_add_drain(void); +extern void lru_add_drain_cpu(int cpu); +extern int lru_add_drain_all(void); +extern void rotate_reclaimable_page(struct page *page); +extern void deactivate_page(struct page *page); +extern void swap_setup(void); + +extern void add_page_to_unevictable_list(struct page *page); + +/** + * lru_cache_add: add a page to the page lists + * @page: the page to add + */ +static inline void lru_cache_add_anon(struct page *page) +{ + __lru_cache_add(page, LRU_INACTIVE_ANON); +} + +static inline void lru_cache_add_file(struct page *page) +{ + __lru_cache_add(page, LRU_INACTIVE_FILE); +} + +/* linux/mm/vmscan.c */ +extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, + gfp_t gfp_mask, nodemask_t *mask); +extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap); +extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap, + struct zone *zone, + unsigned long *nr_scanned); +extern unsigned long shrink_all_memory(unsigned long nr_pages); +extern int vm_swappiness; +extern int remove_mapping(struct address_space *mapping, struct page *page); +extern long vm_total_pages; + +#ifdef CONFIG_NUMA +extern int zone_reclaim_mode; +extern int sysctl_min_unmapped_ratio; +extern int sysctl_min_slab_ratio; +extern int zone_reclaim(struct zone *, gfp_t, unsigned int); +#else +#define zone_reclaim_mode 0 +static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) +{ + return 0; +} +#endif + +extern int page_evictable(struct page *page, struct vm_area_struct *vma); +extern void check_move_unevictable_pages(struct page **, int nr_pages); + +extern unsigned long scan_unevictable_pages; +extern int scan_unevictable_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +#ifdef CONFIG_NUMA +extern int scan_unevictable_register_node(struct node *node); +extern void scan_unevictable_unregister_node(struct node *node); +#else +static inline int scan_unevictable_register_node(struct node *node) +{ + return 0; +} +static inline void scan_unevictable_unregister_node(struct node *node) +{ +} +#endif + +extern int kswapd_run(int nid); +extern void kswapd_stop(int nid); +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +extern int mem_cgroup_swappiness(struct mem_cgroup *mem); +#else +static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) +{ + return vm_swappiness; +} +#endif +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern void mem_cgroup_uncharge_swap(swp_entry_t ent); +#else +static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) +{ +} +#endif +#ifdef CONFIG_SWAP +/* linux/mm/page_io.c */ +extern int swap_readpage(struct page *); +extern int swap_writepage(struct page *page, struct writeback_control *wbc); +extern void end_swap_bio_read(struct bio *bio, int err); + +/* linux/mm/swap_state.c */ +extern struct address_space swapper_space; +#define total_swapcache_pages swapper_space.nrpages +extern void show_swap_cache_info(void); +extern int add_to_swap(struct page *); +extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); +extern void __delete_from_swap_cache(struct page *); +extern void delete_from_swap_cache(struct page *); +extern void free_page_and_swap_cache(struct page *); +extern void free_pages_and_swap_cache(struct page **, int); +extern struct page *lookup_swap_cache(swp_entry_t); +extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, + struct vm_area_struct *vma, unsigned long addr); +extern struct page *swapin_readahead(swp_entry_t, gfp_t, + struct vm_area_struct *vma, unsigned long addr); + +/* linux/mm/swapfile.c */ +extern long nr_swap_pages; +extern long total_swap_pages; +extern void si_swapinfo(struct sysinfo *); +extern swp_entry_t get_swap_page(void); +extern swp_entry_t get_swap_page_of_type(int); +extern int add_swap_count_continuation(swp_entry_t, gfp_t); +extern void swap_shmem_alloc(swp_entry_t); +extern int swap_duplicate(swp_entry_t); +extern int swapcache_prepare(swp_entry_t); +extern void swap_free(swp_entry_t); +extern void swapcache_free(swp_entry_t, struct page *page); +extern int free_swap_and_cache(swp_entry_t); +extern int swap_type_of(dev_t, sector_t, struct block_device **); +extern unsigned int count_swap_pages(int, int); +extern sector_t map_swap_page(struct page *, struct block_device **); +extern sector_t swapdev_block(int, pgoff_t); +extern int reuse_swap_page(struct page *); +extern int try_to_free_swap(struct page *); +struct backing_dev_info; + +/* linux/mm/thrash.c */ +extern struct mm_struct *swap_token_mm; +extern void grab_swap_token(struct mm_struct *); +extern void __put_swap_token(struct mm_struct *); +extern void disable_swap_token(struct mem_cgroup *memcg); + +static inline int has_swap_token(struct mm_struct *mm) +{ + return (mm == swap_token_mm); +} + +static inline void put_swap_token(struct mm_struct *mm) +{ + if (has_swap_token(mm)) + __put_swap_token(mm); +} + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +extern void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); +extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep); +#else +static inline void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) +{ +} +#endif + +#else /* CONFIG_SWAP */ + +#define nr_swap_pages 0L +#define total_swap_pages 0L +#define total_swapcache_pages 0UL + +#define si_swapinfo(val) \ + do { (val)->freeswap = (val)->totalswap = 0; } while (0) +/* only sparc can not include linux/pagemap.h in this file + * so leave page_cache_release and release_pages undeclared... */ +#define free_page_and_swap_cache(page) \ + page_cache_release(page) +#define free_pages_and_swap_cache(pages, nr) \ + release_pages((pages), (nr), 0); + +static inline void show_swap_cache_info(void) +{ +} + +#define free_swap_and_cache(swp) is_migration_entry(swp) +#define swapcache_prepare(swp) is_migration_entry(swp) + +static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) +{ + return 0; +} + +static inline void swap_shmem_alloc(swp_entry_t swp) +{ +} + +static inline int swap_duplicate(swp_entry_t swp) +{ + return 0; +} + +static inline void swap_free(swp_entry_t swp) +{ +} + +static inline void swapcache_free(swp_entry_t swp, struct page *page) +{ +} + +static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr) +{ + return NULL; +} + +static inline int swap_writepage(struct page *p, struct writeback_control *wbc) +{ + return 0; +} + +static inline struct page *lookup_swap_cache(swp_entry_t swp) +{ + return NULL; +} + +static inline int add_to_swap(struct page *page) +{ + return 0; +} + +static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp_mask) +{ + return -1; +} + +static inline void __delete_from_swap_cache(struct page *page) +{ +} + +static inline void delete_from_swap_cache(struct page *page) +{ +} + +#define reuse_swap_page(page) (page_mapcount(page) == 1) + +static inline int try_to_free_swap(struct page *page) +{ + return 0; +} + +static inline swp_entry_t get_swap_page(void) +{ + swp_entry_t entry; + entry.val = 0; + return entry; +} + +/* linux/mm/thrash.c */ +static inline void put_swap_token(struct mm_struct *mm) +{ +} + +static inline void grab_swap_token(struct mm_struct *mm) +{ +} + +static inline int has_swap_token(struct mm_struct *mm) +{ + return 0; +} + +static inline void disable_swap_token(struct mem_cgroup *memcg) +{ +} + +static inline void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +{ +} + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +static inline int +mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) +{ + return 0; +} +#endif + +#endif /* CONFIG_SWAP */ +#endif /* __KERNEL__*/ +#endif /* _LINUX_SWAP_H */ diff --git a/include/linux/swapops.h b/include/linux/swapops.h new file mode 100644 index 00000000..47ead515 --- /dev/null +++ b/include/linux/swapops.h @@ -0,0 +1,197 @@ +#ifndef _LINUX_SWAPOPS_H +#define _LINUX_SWAPOPS_H + +#include <linux/radix-tree.h> +#include <linux/bug.h> + +/* + * swapcache pages are stored in the swapper_space radix tree. We want to + * get good packing density in that tree, so the index should be dense in + * the low-order bits. + * + * We arrange the `type' and `offset' fields so that `type' is at the seven + * high-order bits of the swp_entry_t and `offset' is right-aligned in the + * remaining bits. Although `type' itself needs only five bits, we allow for + * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). + * + * swp_entry_t's are *never* stored anywhere in their arch-dependent format. + */ +#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ + (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) +#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) + +/* + * Store a type+offset into a swp_entry_t in an arch-independent format + */ +static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) +{ + swp_entry_t ret; + + ret.val = (type << SWP_TYPE_SHIFT(ret)) | + (offset & SWP_OFFSET_MASK(ret)); + return ret; +} + +/* + * Extract the `type' field from a swp_entry_t. The swp_entry_t is in + * arch-independent format + */ +static inline unsigned swp_type(swp_entry_t entry) +{ + return (entry.val >> SWP_TYPE_SHIFT(entry)); +} + +/* + * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in + * arch-independent format + */ +static inline pgoff_t swp_offset(swp_entry_t entry) +{ + return entry.val & SWP_OFFSET_MASK(entry); +} + +#ifdef CONFIG_MMU +/* check whether a pte points to a swap entry */ +static inline int is_swap_pte(pte_t pte) +{ + return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); +} +#endif + +/* + * Convert the arch-dependent pte representation of a swp_entry_t into an + * arch-independent swp_entry_t. + */ +static inline swp_entry_t pte_to_swp_entry(pte_t pte) +{ + swp_entry_t arch_entry; + + BUG_ON(pte_file(pte)); + arch_entry = __pte_to_swp_entry(pte); + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); +} + +/* + * Convert the arch-independent representation of a swp_entry_t into the + * arch-dependent pte representation. + */ +static inline pte_t swp_entry_to_pte(swp_entry_t entry) +{ + swp_entry_t arch_entry; + + arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); + BUG_ON(pte_file(__swp_entry_to_pte(arch_entry))); + return __swp_entry_to_pte(arch_entry); +} + +static inline swp_entry_t radix_to_swp_entry(void *arg) +{ + swp_entry_t entry; + + entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; + return entry; +} + +static inline void *swp_to_radix_entry(swp_entry_t entry) +{ + unsigned long value; + + value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; + return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); +} + +#ifdef CONFIG_MIGRATION +static inline swp_entry_t make_migration_entry(struct page *page, int write) +{ + BUG_ON(!PageLocked(page)); + return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, + page_to_pfn(page)); +} + +static inline int is_migration_entry(swp_entry_t entry) +{ + return unlikely(swp_type(entry) == SWP_MIGRATION_READ || + swp_type(entry) == SWP_MIGRATION_WRITE); +} + +static inline int is_write_migration_entry(swp_entry_t entry) +{ + return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); +} + +static inline struct page *migration_entry_to_page(swp_entry_t entry) +{ + struct page *p = pfn_to_page(swp_offset(entry)); + /* + * Any use of migration entries may only occur while the + * corresponding page is locked + */ + BUG_ON(!PageLocked(p)); + return p; +} + +static inline void make_migration_entry_read(swp_entry_t *entry) +{ + *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); +} + +extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address); +#else + +#define make_migration_entry(page, write) swp_entry(0, 0) +static inline int is_migration_entry(swp_entry_t swp) +{ + return 0; +} +#define migration_entry_to_page(swp) NULL +static inline void make_migration_entry_read(swp_entry_t *entryp) { } +static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) { } +static inline int is_write_migration_entry(swp_entry_t entry) +{ + return 0; +} + +#endif + +#ifdef CONFIG_MEMORY_FAILURE +/* + * Support for hardware poisoned pages + */ +static inline swp_entry_t make_hwpoison_entry(struct page *page) +{ + BUG_ON(!PageLocked(page)); + return swp_entry(SWP_HWPOISON, page_to_pfn(page)); +} + +static inline int is_hwpoison_entry(swp_entry_t entry) +{ + return swp_type(entry) == SWP_HWPOISON; +} +#else + +static inline swp_entry_t make_hwpoison_entry(struct page *page) +{ + return swp_entry(0, 0); +} + +static inline int is_hwpoison_entry(swp_entry_t swp) +{ + return 0; +} +#endif + +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +static inline int non_swap_entry(swp_entry_t entry) +{ + return swp_type(entry) >= MAX_SWAPFILES; +} +#else +static inline int non_swap_entry(swp_entry_t entry) +{ + return 0; +} +#endif + +#endif /* _LINUX_SWAPOPS_H */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h new file mode 100644 index 00000000..e872526f --- /dev/null +++ b/include/linux/swiotlb.h @@ -0,0 +1,113 @@ +#ifndef __LINUX_SWIOTLB_H +#define __LINUX_SWIOTLB_H + +#include <linux/types.h> + +struct device; +struct dma_attrs; +struct scatterlist; + +extern int swiotlb_force; + +/* + * Maximum allowable number of contiguous slabs to map, + * must be a power of 2. What is the appropriate value ? + * The complexity of {map,unmap}_single is linearly dependent on this value. + */ +#define IO_TLB_SEGSIZE 128 + +/* + * log of the size of each IO TLB slab. The number of slabs is command line + * controllable. + */ +#define IO_TLB_SHIFT 11 + +extern void swiotlb_init(int verbose); +extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); +extern unsigned long swiotlb_nr_tbl(void); + +/* + * Enumeration for sync targets + */ +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; +extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, + phys_addr_t phys, size_t size, + enum dma_data_direction dir); + +extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, + size_t size, enum dma_data_direction dir); + +extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target); + +/* Accessory functions. */ +extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, + enum dma_data_direction dir); + +extern void +*swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags); + +extern void +swiotlb_free_coherent(struct device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); +extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); + +extern int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction dir); + +extern void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, + enum dma_data_direction dir); + +extern int +swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, struct dma_attrs *attrs); + +extern void +swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs); + +extern void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); + +extern void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir); + +extern void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); + +extern int +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); + +extern int +swiotlb_dma_supported(struct device *hwdev, u64 mask); + +#ifdef CONFIG_SWIOTLB +extern void __init swiotlb_free(void); +#else +static inline void swiotlb_free(void) { } +#endif + +extern void swiotlb_print_info(void); +#endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/switch.h b/include/linux/switch.h new file mode 100644 index 00000000..3e4c748e --- /dev/null +++ b/include/linux/switch.h @@ -0,0 +1,53 @@ +/* + * Switch class driver + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef __LINUX_SWITCH_H__ +#define __LINUX_SWITCH_H__ + +struct switch_dev { + const char *name; + struct device *dev; + int index; + int state; + + ssize_t (*print_name)(struct switch_dev *sdev, char *buf); + ssize_t (*print_state)(struct switch_dev *sdev, char *buf); +}; + +struct gpio_switch_platform_data { + const char *name; + unsigned gpio; + + /* if NULL, switch_dev.name will be printed */ + const char *name_on; + const char *name_off; + /* if NULL, "0" or "1" will be printed */ + const char *state_on; + const char *state_off; +}; + +extern int switch_dev_register(struct switch_dev *sdev); +extern void switch_dev_unregister(struct switch_dev *sdev); + +static inline int switch_get_state(struct switch_dev *sdev) +{ + return sdev->state; +} + +extern void switch_set_state(struct switch_dev *sdev, int state); + +#endif /* __LINUX_SWITCH_H__ */ diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h new file mode 100644 index 00000000..5539cc52 --- /dev/null +++ b/include/linux/synaptics_i2c_rmi.h @@ -0,0 +1,55 @@ +/* + * include/linux/synaptics_i2c_rmi.h - platform data structure for f75375s sensor + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNAPTICS_I2C_RMI_H +#define _LINUX_SYNAPTICS_I2C_RMI_H + +#define SYNAPTICS_I2C_RMI_NAME "synaptics-rmi-ts" + +enum { + SYNAPTICS_FLIP_X = 1UL << 0, + SYNAPTICS_FLIP_Y = 1UL << 1, + SYNAPTICS_SWAP_XY = 1UL << 2, + SYNAPTICS_SNAP_TO_INACTIVE_EDGE = 1UL << 3, +}; + +struct synaptics_i2c_rmi_platform_data { + uint32_t version; /* Use this entry for panels with */ + /* (major << 8 | minor) version or above. */ + /* If non-zero another array entry follows */ + int (*power)(int on); /* Only valid in first array entry */ + uint32_t flags; + unsigned long irqflags; + uint32_t inactive_left; /* 0x10000 = screen width */ + uint32_t inactive_right; /* 0x10000 = screen width */ + uint32_t inactive_top; /* 0x10000 = screen height */ + uint32_t inactive_bottom; /* 0x10000 = screen height */ + uint32_t snap_left_on; /* 0x10000 = screen width */ + uint32_t snap_left_off; /* 0x10000 = screen width */ + uint32_t snap_right_on; /* 0x10000 = screen width */ + uint32_t snap_right_off; /* 0x10000 = screen width */ + uint32_t snap_top_on; /* 0x10000 = screen height */ + uint32_t snap_top_off; /* 0x10000 = screen height */ + uint32_t snap_bottom_on; /* 0x10000 = screen height */ + uint32_t snap_bottom_off; /* 0x10000 = screen height */ + uint32_t fuzz_x; /* 0x10000 = screen width */ + uint32_t fuzz_y; /* 0x10000 = screen height */ + int fuzz_p; + int fuzz_w; + int8_t sensitivity_adjust; +}; + +#endif /* _LINUX_SYNAPTICS_I2C_RMI_H */ diff --git a/include/linux/sync.h b/include/linux/sync.h new file mode 100644 index 00000000..38ea986d --- /dev/null +++ b/include/linux/sync.h @@ -0,0 +1,426 @@ +/* + * include/linux/sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_H +#define _LINUX_SYNC_H + +#include <linux/types.h> +#ifdef __KERNEL__ + +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/wait.h> + +struct sync_timeline; +struct sync_pt; +struct sync_fence; + +/** + * struct sync_timeline_ops - sync object implementation ops + * @driver_name: name of the implentation + * @dup: duplicate a sync_pt + * @has_signaled: returns: + * 1 if pt has signaled + * 0 if pt has not signaled + * <0 on error + * @compare: returns: + * 1 if b will signal before a + * 0 if a and b will signal at the same time + * -1 if a will signabl before b + * @free_pt: called before sync_pt is freed + * @release_obj: called before sync_timeline is freed + * @print_obj: deprecated + * @print_pt: deprecated + * @fill_driver_data: write implmentation specific driver data to data. + * should return an error if there is not enough room + * as specified by size. This information is returned + * to userspace by SYNC_IOC_FENCE_INFO. + * @timeline_value_str: fill str with the value of the sync_timeline's counter + * @pt_value_str: fill str with the value of the sync_pt + */ +struct sync_timeline_ops { + const char *driver_name; + + /* required */ + struct sync_pt *(*dup)(struct sync_pt *pt); + + /* required */ + int (*has_signaled)(struct sync_pt *pt); + + /* required */ + int (*compare)(struct sync_pt *a, struct sync_pt *b); + + /* optional */ + void (*free_pt)(struct sync_pt *sync_pt); + + /* optional */ + void (*release_obj)(struct sync_timeline *sync_timeline); + + /* deprecated */ + void (*print_obj)(struct seq_file *s, + struct sync_timeline *sync_timeline); + + /* deprecated */ + void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); + + /* optional */ + int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + + /* optional */ + void (*timeline_value_str)(struct sync_timeline *timeline, char *str, + int size); + + /* optional */ + void (*pt_value_str)(struct sync_pt *pt, char *str, int size); +}; + +/** + * struct sync_timeline - sync object + * @kref: reference count on fence. + * @ops: ops that define the implementaiton of the sync_timeline + * @name: name of the sync_timeline. Useful for debugging + * @destoryed: set when sync_timeline is destroyed + * @child_list_head: list of children sync_pts for this sync_timeline + * @child_list_lock: lock protecting @child_list_head, destroyed, and + * sync_pt.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + * @sync_timeline_list: membership in global sync_timeline_list + */ +struct sync_timeline { + struct kref kref; + const struct sync_timeline_ops *ops; + char name[32]; + + /* protected by child_list_lock */ + bool destroyed; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; + spinlock_t active_list_lock; + + struct list_head sync_timeline_list; +}; + +/** + * struct sync_pt - sync point + * @parent: sync_timeline to which this sync_pt belongs + * @child_list: membership in sync_timeline.child_list_head + * @active_list: membership in sync_timeline.active_list_head + * @signaled_list: membership in temorary signaled_list on stack + * @fence: sync_fence to which the sync_pt belongs + * @pt_list: membership in sync_fence.pt_list_head + * @status: 1: signaled, 0:active, <0: error + * @timestamp: time which sync_pt status transitioned from active to + * singaled or error. + */ +struct sync_pt { + struct sync_timeline *parent; + struct list_head child_list; + + struct list_head active_list; + struct list_head signaled_list; + + struct sync_fence *fence; + struct list_head pt_list; + + /* protected by parent->active_list_lock */ + int status; + + ktime_t timestamp; +}; + +/** + * struct sync_fence - sync fence + * @file: file representing this fence + * @kref: referenace count on fence. + * @name: name of sync_fence. Useful for debugging + * @pt_list_head: list of sync_pts in ths fence. immutable once fence + * is created + * @waiter_list_head: list of asynchronous waiters on this fence + * @waiter_list_lock: lock protecting @waiter_list_head and @status + * @status: 1: signaled, 0:active, <0: error + * + * @wq: wait queue for fence signaling + * @sync_fence_list: membership in global fence list + */ +struct sync_fence { + struct file *file; + struct kref kref; + char name[32]; + + /* this list is immutable once the fence is created */ + struct list_head pt_list_head; + + struct list_head waiter_list_head; + spinlock_t waiter_list_lock; /* also protects status */ + int status; + + wait_queue_head_t wq; + + struct list_head sync_fence_list; +}; + +struct sync_fence_waiter; +typedef void (*sync_callback_t)(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * struct sync_fence_waiter - metadata for asynchronous waiter on a fence + * @waiter_list: membership in sync_fence.waiter_list_head + * @callback: function pointer to call when fence signals + * @callback_data: pointer to pass to @callback + */ +struct sync_fence_waiter { + struct list_head waiter_list; + + sync_callback_t callback; +}; + +static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, + sync_callback_t callback) +{ + waiter->callback = callback; +} + +/* + * API for sync_timeline implementers + */ + +/** + * sync_timeline_create() - creates a sync object + * @ops: specifies the implemention ops for the object + * @size: size to allocate for this obj + * @name: sync_timeline name + * + * Creates a new sync_timeline which will use the implemetation specified by + * @ops. @size bytes will be allocated allowing for implemntation specific + * data to be kept after the generic sync_timeline stuct. + */ +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name); + +/** + * sync_timeline_destory() - destorys a sync object + * @obj: sync_timeline to destroy + * + * A sync implemntation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its childern + * sync_pts are freed. + */ +void sync_timeline_destroy(struct sync_timeline *obj); + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj: sync_timeline to signal + * + * A sync implemntation should call this any time one of it's sync_pts + * has signaled or has an error condition. + */ +void sync_timeline_signal(struct sync_timeline *obj); + +/** + * sync_pt_create() - creates a sync pt + * @parent: sync_pt's parent sync_timeline + * @size: size to allocate for this pt + * + * Creates a new sync_pt as a chiled of @parent. @size bytes will be + * allocated allowing for implemntation specific data to be kept after + * the generic sync_timeline struct. + */ +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); + +/** + * sync_pt_free() - frees a sync pt + * @pt: sync_pt to free + * + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void sync_pt_free(struct sync_pt *pt); + +/** + * sync_fence_create() - creates a sync fence + * @name: name of fence to create + * @pt: sync_pt to add to the fence + * + * Creates a fence containg @pt. Once this is called, the fence takes + * ownership of @pt. + */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); + +/* + * API for sync_fence consumers + */ + +/** + * sync_fence_merge() - merge two fences + * @name: name of new fence + * @a: fence a + * @b: fence b + * + * Creates a new fence which contains copies of all the sync_pts in both + * @a and @b. @a and @b remain valid, independent fences. + */ +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b); + +/** + * sync_fence_fdget() - get a fence from an fd + * @fd: fd referencing a fence + * + * Ensures @fd references a valid fence, increments the refcount of the backing + * file, and returns the fence. + */ +struct sync_fence *sync_fence_fdget(int fd); + +/** + * sync_fence_put() - puts a refernnce of a sync fence + * @fence: fence to put + * + * Puts a reference on @fence. If this is the last reference, the fence and + * all it's sync_pts will be freed + */ +void sync_fence_put(struct sync_fence *fence); + +/** + * sync_fence_install() - installs a fence into a file descriptor + * @fence: fence to instal + * @fd: file descriptor in which to install the fence + * + * Installs @fence into @fd. @fd's should be acquired through get_unused_fd(). + */ +void sync_fence_install(struct sync_fence *fence, int fd); + +/** + * sync_fence_wait_async() - registers and async wait on the fence + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * Returns 1 if @fence has already signaled. + * + * Registers a callback to be called when @fence signals or has an error. + * @waiter should be initialized with sync_fence_waiter_init(). + */ +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_cancel_async() - cancels an async wait + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * returns 0 if waiter was removed from fence's async waiter list. + * returns -ENOENT if waiter was not found on fence's async waiter list. + * + * Cancels a previously registered async wait. Will fail gracefully if + * @waiter was never registered or if @fence has already signaled @waiter. + */ +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_wait() - wait on fence + * @fence: fence to wait on + * @tiemout: timeout in ms + * + * Wait for @fence to be signaled or have an error. Waits indefinitely + * if @timeout < 0 + */ +int sync_fence_wait(struct sync_fence *fence, long timeout); + +#endif /* __KERNEL__ */ + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2: file descriptor of second fence + * @name: name of new fence + * @fence: returns the fd of the new fence to userspace + */ +struct sync_merge_data { + __s32 fd2; /* fd of second fence */ + char name[32]; /* name of new fence */ + __s32 fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len: length of sync_pt_info including any driver_data + * @obj_name: name of parent sync_timeline + * @driver_name: name of driver implmenting the parent + * @status: status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns: timestamp of status change in nanoseconds + * @driver_data: any driver dependant data + */ +struct sync_pt_info { + __u32 len; + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u64 timestamp_ns; + + __u8 driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len: ioctl caller writes the size of the buffer its passing in. + * ioctl returns length of sync_fence_data reutnred to userspace + * including pt_info. + * @name: name of fence + * @status: status of fence. 1: signaled 0:active <0:error + * @pt_info: a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { + __u32 len; + char name[32]; + __s32 status; + + __u8 pt_info[0]; +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds. Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len. On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To itterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ + struct sync_fence_info_data) + +#endif /* _LINUX_SYNC_H */ diff --git a/include/linux/synclink.h b/include/linux/synclink.h new file mode 100644 index 00000000..2e7d81c4 --- /dev/null +++ b/include/linux/synclink.h @@ -0,0 +1,323 @@ +/* + * SyncLink Multiprotocol Serial Adapter Driver + * + * $Id: synclink.h,v 3.14 2006/07/17 20:15:43 paulkf Exp $ + * + * Copyright (C) 1998-2000 by Microgate Corporation + * + * Redistribution of this file is permitted under + * the terms of the GNU Public License (GPL) + */ + +#ifndef _SYNCLINK_H_ +#define _SYNCLINK_H_ +#define SYNCLINK_H_VERSION 3.6 + +#include <linux/types.h> + +#define BIT0 0x0001 +#define BIT1 0x0002 +#define BIT2 0x0004 +#define BIT3 0x0008 +#define BIT4 0x0010 +#define BIT5 0x0020 +#define BIT6 0x0040 +#define BIT7 0x0080 +#define BIT8 0x0100 +#define BIT9 0x0200 +#define BIT10 0x0400 +#define BIT11 0x0800 +#define BIT12 0x1000 +#define BIT13 0x2000 +#define BIT14 0x4000 +#define BIT15 0x8000 +#define BIT16 0x00010000 +#define BIT17 0x00020000 +#define BIT18 0x00040000 +#define BIT19 0x00080000 +#define BIT20 0x00100000 +#define BIT21 0x00200000 +#define BIT22 0x00400000 +#define BIT23 0x00800000 +#define BIT24 0x01000000 +#define BIT25 0x02000000 +#define BIT26 0x04000000 +#define BIT27 0x08000000 +#define BIT28 0x10000000 +#define BIT29 0x20000000 +#define BIT30 0x40000000 +#define BIT31 0x80000000 + + +#define HDLC_MAX_FRAME_SIZE 65535 +#define MAX_ASYNC_TRANSMIT 4096 +#define MAX_ASYNC_BUFFER_SIZE 4096 + +#define ASYNC_PARITY_NONE 0 +#define ASYNC_PARITY_EVEN 1 +#define ASYNC_PARITY_ODD 2 +#define ASYNC_PARITY_SPACE 3 + +#define HDLC_FLAG_UNDERRUN_ABORT7 0x0000 +#define HDLC_FLAG_UNDERRUN_ABORT15 0x0001 +#define HDLC_FLAG_UNDERRUN_FLAG 0x0002 +#define HDLC_FLAG_UNDERRUN_CRC 0x0004 +#define HDLC_FLAG_SHARE_ZERO 0x0010 +#define HDLC_FLAG_AUTO_CTS 0x0020 +#define HDLC_FLAG_AUTO_DCD 0x0040 +#define HDLC_FLAG_AUTO_RTS 0x0080 +#define HDLC_FLAG_RXC_DPLL 0x0100 +#define HDLC_FLAG_RXC_BRG 0x0200 +#define HDLC_FLAG_RXC_TXCPIN 0x8000 +#define HDLC_FLAG_RXC_RXCPIN 0x0000 +#define HDLC_FLAG_TXC_DPLL 0x0400 +#define HDLC_FLAG_TXC_BRG 0x0800 +#define HDLC_FLAG_TXC_TXCPIN 0x0000 +#define HDLC_FLAG_TXC_RXCPIN 0x0008 +#define HDLC_FLAG_DPLL_DIV8 0x1000 +#define HDLC_FLAG_DPLL_DIV16 0x2000 +#define HDLC_FLAG_DPLL_DIV32 0x0000 +#define HDLC_FLAG_HDLC_LOOPMODE 0x4000 + +#define HDLC_CRC_NONE 0 +#define HDLC_CRC_16_CCITT 1 +#define HDLC_CRC_32_CCITT 2 +#define HDLC_CRC_MASK 0x00ff +#define HDLC_CRC_RETURN_EX 0x8000 + +#define RX_OK 0 +#define RX_CRC_ERROR 1 + +#define HDLC_TXIDLE_FLAGS 0 +#define HDLC_TXIDLE_ALT_ZEROS_ONES 1 +#define HDLC_TXIDLE_ZEROS 2 +#define HDLC_TXIDLE_ONES 3 +#define HDLC_TXIDLE_ALT_MARK_SPACE 4 +#define HDLC_TXIDLE_SPACE 5 +#define HDLC_TXIDLE_MARK 6 +#define HDLC_TXIDLE_CUSTOM_8 0x10000000 +#define HDLC_TXIDLE_CUSTOM_16 0x20000000 + +#define HDLC_ENCODING_NRZ 0 +#define HDLC_ENCODING_NRZB 1 +#define HDLC_ENCODING_NRZI_MARK 2 +#define HDLC_ENCODING_NRZI_SPACE 3 +#define HDLC_ENCODING_NRZI HDLC_ENCODING_NRZI_SPACE +#define HDLC_ENCODING_BIPHASE_MARK 4 +#define HDLC_ENCODING_BIPHASE_SPACE 5 +#define HDLC_ENCODING_BIPHASE_LEVEL 6 +#define HDLC_ENCODING_DIFF_BIPHASE_LEVEL 7 + +#define HDLC_PREAMBLE_LENGTH_8BITS 0 +#define HDLC_PREAMBLE_LENGTH_16BITS 1 +#define HDLC_PREAMBLE_LENGTH_32BITS 2 +#define HDLC_PREAMBLE_LENGTH_64BITS 3 + +#define HDLC_PREAMBLE_PATTERN_NONE 0 +#define HDLC_PREAMBLE_PATTERN_ZEROS 1 +#define HDLC_PREAMBLE_PATTERN_FLAGS 2 +#define HDLC_PREAMBLE_PATTERN_10 3 +#define HDLC_PREAMBLE_PATTERN_01 4 +#define HDLC_PREAMBLE_PATTERN_ONES 5 + +#define MGSL_MODE_ASYNC 1 +#define MGSL_MODE_HDLC 2 +#define MGSL_MODE_MONOSYNC 3 +#define MGSL_MODE_BISYNC 4 +#define MGSL_MODE_RAW 6 +#define MGSL_MODE_BASE_CLOCK 7 +#define MGSL_MODE_XSYNC 8 + +#define MGSL_BUS_TYPE_ISA 1 +#define MGSL_BUS_TYPE_EISA 2 +#define MGSL_BUS_TYPE_PCI 5 + +#define MGSL_INTERFACE_MASK 0xf +#define MGSL_INTERFACE_DISABLE 0 +#define MGSL_INTERFACE_RS232 1 +#define MGSL_INTERFACE_V35 2 +#define MGSL_INTERFACE_RS422 3 +#define MGSL_INTERFACE_RTS_EN 0x10 +#define MGSL_INTERFACE_LL 0x20 +#define MGSL_INTERFACE_RL 0x40 +#define MGSL_INTERFACE_MSB_FIRST 0x80 + +typedef struct _MGSL_PARAMS +{ + /* Common */ + + unsigned long mode; /* Asynchronous or HDLC */ + unsigned char loopback; /* internal loopback mode */ + + /* HDLC Only */ + + unsigned short flags; + unsigned char encoding; /* NRZ, NRZI, etc. */ + unsigned long clock_speed; /* external clock speed in bits per second */ + unsigned char addr_filter; /* receive HDLC address filter, 0xFF = disable */ + unsigned short crc_type; /* None, CRC16-CCITT, or CRC32-CCITT */ + unsigned char preamble_length; + unsigned char preamble; + + /* Async Only */ + + unsigned long data_rate; /* bits per second */ + unsigned char data_bits; /* 7 or 8 data bits */ + unsigned char stop_bits; /* 1 or 2 stop bits */ + unsigned char parity; /* none, even, or odd */ + +} MGSL_PARAMS, *PMGSL_PARAMS; + +#define MICROGATE_VENDOR_ID 0x13c0 +#define SYNCLINK_DEVICE_ID 0x0010 +#define MGSCC_DEVICE_ID 0x0020 +#define SYNCLINK_SCA_DEVICE_ID 0x0030 +#define SYNCLINK_GT_DEVICE_ID 0x0070 +#define SYNCLINK_GT4_DEVICE_ID 0x0080 +#define SYNCLINK_AC_DEVICE_ID 0x0090 +#define SYNCLINK_GT2_DEVICE_ID 0x00A0 +#define MGSL_MAX_SERIAL_NUMBER 30 + +/* +** device diagnostics status +*/ + +#define DiagStatus_OK 0 +#define DiagStatus_AddressFailure 1 +#define DiagStatus_AddressConflict 2 +#define DiagStatus_IrqFailure 3 +#define DiagStatus_IrqConflict 4 +#define DiagStatus_DmaFailure 5 +#define DiagStatus_DmaConflict 6 +#define DiagStatus_PciAdapterNotFound 7 +#define DiagStatus_CantAssignPciResources 8 +#define DiagStatus_CantAssignPciMemAddr 9 +#define DiagStatus_CantAssignPciIoAddr 10 +#define DiagStatus_CantAssignPciIrq 11 +#define DiagStatus_MemoryError 12 + +#define SerialSignal_DCD 0x01 /* Data Carrier Detect */ +#define SerialSignal_TXD 0x02 /* Transmit Data */ +#define SerialSignal_RI 0x04 /* Ring Indicator */ +#define SerialSignal_RXD 0x08 /* Receive Data */ +#define SerialSignal_CTS 0x10 /* Clear to Send */ +#define SerialSignal_RTS 0x20 /* Request to Send */ +#define SerialSignal_DSR 0x40 /* Data Set Ready */ +#define SerialSignal_DTR 0x80 /* Data Terminal Ready */ + + +/* + * Counters of the input lines (CTS, DSR, RI, CD) interrupts + */ +struct mgsl_icount { + __u32 cts, dsr, rng, dcd, tx, rx; + __u32 frame, parity, overrun, brk; + __u32 buf_overrun; + __u32 txok; + __u32 txunder; + __u32 txabort; + __u32 txtimeout; + __u32 rxshort; + __u32 rxlong; + __u32 rxabort; + __u32 rxover; + __u32 rxcrc; + __u32 rxok; + __u32 exithunt; + __u32 rxidle; +}; + +struct gpio_desc { + __u32 state; + __u32 smask; + __u32 dir; + __u32 dmask; +}; + +#define DEBUG_LEVEL_DATA 1 +#define DEBUG_LEVEL_ERROR 2 +#define DEBUG_LEVEL_INFO 3 +#define DEBUG_LEVEL_BH 4 +#define DEBUG_LEVEL_ISR 5 + +/* +** Event bit flags for use with MgslWaitEvent +*/ + +#define MgslEvent_DsrActive 0x0001 +#define MgslEvent_DsrInactive 0x0002 +#define MgslEvent_Dsr 0x0003 +#define MgslEvent_CtsActive 0x0004 +#define MgslEvent_CtsInactive 0x0008 +#define MgslEvent_Cts 0x000c +#define MgslEvent_DcdActive 0x0010 +#define MgslEvent_DcdInactive 0x0020 +#define MgslEvent_Dcd 0x0030 +#define MgslEvent_RiActive 0x0040 +#define MgslEvent_RiInactive 0x0080 +#define MgslEvent_Ri 0x00c0 +#define MgslEvent_ExitHuntMode 0x0100 +#define MgslEvent_IdleReceived 0x0200 + +/* Private IOCTL codes: + * + * MGSL_IOCSPARAMS set MGSL_PARAMS structure values + * MGSL_IOCGPARAMS get current MGSL_PARAMS structure values + * MGSL_IOCSTXIDLE set current transmit idle mode + * MGSL_IOCGTXIDLE get current transmit idle mode + * MGSL_IOCTXENABLE enable or disable transmitter + * MGSL_IOCRXENABLE enable or disable receiver + * MGSL_IOCTXABORT abort transmitting frame (HDLC) + * MGSL_IOCGSTATS return current statistics + * MGSL_IOCWAITEVENT wait for specified event to occur + * MGSL_LOOPTXDONE transmit in HDLC LoopMode done + * MGSL_IOCSIF set the serial interface type + * MGSL_IOCGIF get the serial interface type + */ +#define MGSL_MAGIC_IOC 'm' +#define MGSL_IOCSPARAMS _IOW(MGSL_MAGIC_IOC,0,struct _MGSL_PARAMS) +#define MGSL_IOCGPARAMS _IOR(MGSL_MAGIC_IOC,1,struct _MGSL_PARAMS) +#define MGSL_IOCSTXIDLE _IO(MGSL_MAGIC_IOC,2) +#define MGSL_IOCGTXIDLE _IO(MGSL_MAGIC_IOC,3) +#define MGSL_IOCTXENABLE _IO(MGSL_MAGIC_IOC,4) +#define MGSL_IOCRXENABLE _IO(MGSL_MAGIC_IOC,5) +#define MGSL_IOCTXABORT _IO(MGSL_MAGIC_IOC,6) +#define MGSL_IOCGSTATS _IO(MGSL_MAGIC_IOC,7) +#define MGSL_IOCWAITEVENT _IOWR(MGSL_MAGIC_IOC,8,int) +#define MGSL_IOCCLRMODCOUNT _IO(MGSL_MAGIC_IOC,15) +#define MGSL_IOCLOOPTXDONE _IO(MGSL_MAGIC_IOC,9) +#define MGSL_IOCSIF _IO(MGSL_MAGIC_IOC,10) +#define MGSL_IOCGIF _IO(MGSL_MAGIC_IOC,11) +#define MGSL_IOCSGPIO _IOW(MGSL_MAGIC_IOC,16,struct gpio_desc) +#define MGSL_IOCGGPIO _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc) +#define MGSL_IOCWAITGPIO _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc) +#define MGSL_IOCSXSYNC _IO(MGSL_MAGIC_IOC, 19) +#define MGSL_IOCGXSYNC _IO(MGSL_MAGIC_IOC, 20) +#define MGSL_IOCSXCTRL _IO(MGSL_MAGIC_IOC, 21) +#define MGSL_IOCGXCTRL _IO(MGSL_MAGIC_IOC, 22) + +#ifdef __KERNEL__ +/* provide 32 bit ioctl compatibility on 64 bit systems */ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +struct MGSL_PARAMS32 { + compat_ulong_t mode; + unsigned char loopback; + unsigned short flags; + unsigned char encoding; + compat_ulong_t clock_speed; + unsigned char addr_filter; + unsigned short crc_type; + unsigned char preamble_length; + unsigned char preamble; + compat_ulong_t data_rate; + unsigned char data_bits; + unsigned char stop_bits; + unsigned char parity; +}; +#define MGSL_IOCSPARAMS32 _IOW(MGSL_MAGIC_IOC,0,struct MGSL_PARAMS32) +#define MGSL_IOCGPARAMS32 _IOR(MGSL_MAGIC_IOC,1,struct MGSL_PARAMS32) +#endif +#endif + +#endif /* _SYNCLINK_H_ */ diff --git a/include/linux/sys.h b/include/linux/sys.h new file mode 100644 index 00000000..daa6008b --- /dev/null +++ b/include/linux/sys.h @@ -0,0 +1,29 @@ +#ifndef _LINUX_SYS_H +#define _LINUX_SYS_H + +/* + * This file is no longer used or needed + */ + +/* + * These are system calls that will be removed at some time + * due to newer versions existing.. + * (please be careful - ibcs2 may need some of these). + */ +#ifdef notdef +#define _sys_waitpid _sys_old_syscall /* _sys_wait4 */ +#define _sys_olduname _sys_old_syscall /* _sys_newuname */ +#define _sys_uname _sys_old_syscall /* _sys_newuname */ +#define _sys_stat _sys_old_syscall /* _sys_newstat */ +#define _sys_fstat _sys_old_syscall /* _sys_newfstat */ +#define _sys_lstat _sys_old_syscall /* _sys_newlstat */ +#define _sys_signal _sys_old_syscall /* _sys_sigaction */ +#define _sys_sgetmask _sys_old_syscall /* _sys_sigprocmask */ +#define _sys_ssetmask _sys_old_syscall /* _sys_sigprocmask */ +#endif + +/* + * These are system calls that haven't been implemented yet + * but have an entry in the table for future expansion.. + */ +#endif diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h new file mode 100644 index 00000000..2739ccb6 --- /dev/null +++ b/include/linux/sys_soc.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 + */ +#ifndef __SOC_BUS_H +#define __SOC_BUS_H + +#include <linux/device.h> + +struct soc_device_attribute { + const char *machine; + const char *family; + const char *revision; + const char *soc_id; +}; + +/** + * soc_device_register - register SoC as a device + * @soc_plat_dev_attr: Attributes passed from platform to be attributed to a SoC + */ +struct soc_device *soc_device_register( + struct soc_device_attribute *soc_plat_dev_attr); + +/** + * soc_device_unregister - unregister SoC device + * @dev: SoC device to be unregistered + */ +void soc_device_unregister(struct soc_device *soc_dev); + +/** + * soc_device_to_device - helper function to fetch struct device + * @soc: Previously registered SoC device container + */ +struct device *soc_device_to_device(struct soc_device *soc); + +#endif /* __SOC_BUS_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h new file mode 100644 index 00000000..3de3acb8 --- /dev/null +++ b/include/linux/syscalls.h @@ -0,0 +1,861 @@ +/* + * syscalls.h - Linux syscall interfaces (non-arch-specific) + * + * Copyright (c) 2004 Randy Dunlap + * Copyright (c) 2004 Open Source Development Labs + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#ifndef _LINUX_SYSCALLS_H +#define _LINUX_SYSCALLS_H + +struct epoll_event; +struct iattr; +struct inode; +struct iocb; +struct io_event; +struct iovec; +struct itimerspec; +struct itimerval; +struct kexec_segment; +struct linux_dirent; +struct linux_dirent64; +struct list_head; +struct mmap_arg_struct; +struct msgbuf; +struct msghdr; +struct mmsghdr; +struct msqid_ds; +struct new_utsname; +struct nfsctl_arg; +struct __old_kernel_stat; +struct oldold_utsname; +struct old_utsname; +struct pollfd; +struct rlimit; +struct rlimit64; +struct rusage; +struct sched_param; +struct sel_arg_struct; +struct semaphore; +struct sembuf; +struct shmid_ds; +struct sockaddr; +struct stat; +struct stat64; +struct statfs; +struct statfs64; +struct __sysctl_args; +struct sysinfo; +struct timespec; +struct timeval; +struct timex; +struct timezone; +struct tms; +struct utimbuf; +struct mq_attr; +struct compat_stat; +struct compat_timeval; +struct robust_list_head; +struct getcpu_cache; +struct old_linux_dirent; +struct perf_event_attr; +struct file_handle; + +#include <linux/types.h> +#include <linux/aio_abi.h> +#include <linux/capability.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/sem.h> +#include <asm/siginfo.h> +#include <asm/signal.h> +#include <linux/unistd.h> +#include <linux/quota.h> +#include <linux/key.h> +#include <trace/syscall.h> + +#define __SC_DECL1(t1, a1) t1 a1 +#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) +#define __SC_DECL3(t3, a3, ...) t3 a3, __SC_DECL2(__VA_ARGS__) +#define __SC_DECL4(t4, a4, ...) t4 a4, __SC_DECL3(__VA_ARGS__) +#define __SC_DECL5(t5, a5, ...) t5 a5, __SC_DECL4(__VA_ARGS__) +#define __SC_DECL6(t6, a6, ...) t6 a6, __SC_DECL5(__VA_ARGS__) + +#define __SC_LONG1(t1, a1) long a1 +#define __SC_LONG2(t2, a2, ...) long a2, __SC_LONG1(__VA_ARGS__) +#define __SC_LONG3(t3, a3, ...) long a3, __SC_LONG2(__VA_ARGS__) +#define __SC_LONG4(t4, a4, ...) long a4, __SC_LONG3(__VA_ARGS__) +#define __SC_LONG5(t5, a5, ...) long a5, __SC_LONG4(__VA_ARGS__) +#define __SC_LONG6(t6, a6, ...) long a6, __SC_LONG5(__VA_ARGS__) + +#define __SC_CAST1(t1, a1) (t1) a1 +#define __SC_CAST2(t2, a2, ...) (t2) a2, __SC_CAST1(__VA_ARGS__) +#define __SC_CAST3(t3, a3, ...) (t3) a3, __SC_CAST2(__VA_ARGS__) +#define __SC_CAST4(t4, a4, ...) (t4) a4, __SC_CAST3(__VA_ARGS__) +#define __SC_CAST5(t5, a5, ...) (t5) a5, __SC_CAST4(__VA_ARGS__) +#define __SC_CAST6(t6, a6, ...) (t6) a6, __SC_CAST5(__VA_ARGS__) + +#define __SC_TEST(type) BUILD_BUG_ON(sizeof(type) > sizeof(long)) +#define __SC_TEST1(t1, a1) __SC_TEST(t1) +#define __SC_TEST2(t2, a2, ...) __SC_TEST(t2); __SC_TEST1(__VA_ARGS__) +#define __SC_TEST3(t3, a3, ...) __SC_TEST(t3); __SC_TEST2(__VA_ARGS__) +#define __SC_TEST4(t4, a4, ...) __SC_TEST(t4); __SC_TEST3(__VA_ARGS__) +#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) +#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) + +#ifdef CONFIG_FTRACE_SYSCALLS +#define __SC_STR_ADECL1(t, a) #a +#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) +#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__) +#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__) +#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__) +#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__) + +#define __SC_STR_TDECL1(t, a) #t +#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__) +#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__) +#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__) +#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) +#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) + +extern struct ftrace_event_class event_class_syscall_enter; +extern struct ftrace_event_class event_class_syscall_exit; +extern struct trace_event_functions enter_syscall_print_funcs; +extern struct trace_event_functions exit_syscall_print_funcs; + +#define SYSCALL_TRACE_ENTER_EVENT(sname) \ + static struct syscall_metadata __syscall_meta_##sname; \ + static struct ftrace_event_call __used \ + event_enter_##sname = { \ + .name = "sys_enter"#sname, \ + .class = &event_class_syscall_enter, \ + .event.funcs = &enter_syscall_print_funcs, \ + .data = (void *)&__syscall_meta_##sname,\ + .flags = TRACE_EVENT_FL_CAP_ANY, \ + }; \ + static struct ftrace_event_call __used \ + __attribute__((section("_ftrace_events"))) \ + *__event_enter_##sname = &event_enter_##sname; + +#define SYSCALL_TRACE_EXIT_EVENT(sname) \ + static struct syscall_metadata __syscall_meta_##sname; \ + static struct ftrace_event_call __used \ + event_exit_##sname = { \ + .name = "sys_exit"#sname, \ + .class = &event_class_syscall_exit, \ + .event.funcs = &exit_syscall_print_funcs, \ + .data = (void *)&__syscall_meta_##sname,\ + .flags = TRACE_EVENT_FL_CAP_ANY, \ + }; \ + static struct ftrace_event_call __used \ + __attribute__((section("_ftrace_events"))) \ + *__event_exit_##sname = &event_exit_##sname; + +#define SYSCALL_METADATA(sname, nb) \ + SYSCALL_TRACE_ENTER_EVENT(sname); \ + SYSCALL_TRACE_EXIT_EVENT(sname); \ + static struct syscall_metadata __used \ + __syscall_meta_##sname = { \ + .name = "sys"#sname, \ + .syscall_nr = -1, /* Filled in at boot */ \ + .nb_args = nb, \ + .types = types_##sname, \ + .args = args_##sname, \ + .enter_event = &event_enter_##sname, \ + .exit_event = &event_exit_##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ + }; \ + static struct syscall_metadata __used \ + __attribute__((section("__syscalls_metadata"))) \ + *__p_syscall_meta_##sname = &__syscall_meta_##sname; + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_TRACE_ENTER_EVENT(_##sname); \ + SYSCALL_TRACE_EXIT_EVENT(_##sname); \ + static struct syscall_metadata __used \ + __syscall_meta__##sname = { \ + .name = "sys_"#sname, \ + .syscall_nr = -1, /* Filled in at boot */ \ + .nb_args = 0, \ + .enter_event = &event_enter__##sname, \ + .exit_event = &event_exit__##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ + }; \ + static struct syscall_metadata __used \ + __attribute__((section("__syscalls_metadata"))) \ + *__p_syscall_meta_##sname = &__syscall_meta__##sname; \ + asmlinkage long sys_##sname(void) +#else +#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) +#endif + +#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) + +#ifdef CONFIG_PPC64 +#define SYSCALL_ALIAS(alias, name) \ + asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ + "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) +#else +#if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS) +#define SYSCALL_ALIAS(alias, name) \ + asm ( #alias " = " #name "\n\t.globl " #alias) +#else +#define SYSCALL_ALIAS(alias, name) \ + asm ("\t.globl " #alias "\n\t.set " #alias ", " #name) +#endif +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define SYSCALL_DEFINEx(x, sname, ...) \ + static const char *types_##sname[] = { \ + __SC_STR_TDECL##x(__VA_ARGS__) \ + }; \ + static const char *args_##sname[] = { \ + __SC_STR_ADECL##x(__VA_ARGS__) \ + }; \ + SYSCALL_METADATA(sname, x); \ + __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) +#else +#define SYSCALL_DEFINEx(x, sname, ...) \ + __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) +#endif + +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS + +#define SYSCALL_DEFINE(name) static inline long SYSC_##name + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ + static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ + asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ + { \ + __SC_TEST##x(__VA_ARGS__); \ + return (long) SYSC##name(__SC_CAST##x(__VA_ARGS__)); \ + } \ + SYSCALL_ALIAS(sys##name, SyS##name); \ + static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)) + +#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ + +#define SYSCALL_DEFINE(name) asmlinkage long sys_##name +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) + +#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ + +asmlinkage long sys_time(time_t __user *tloc); +asmlinkage long sys_stime(time_t __user *tptr); +asmlinkage long sys_gettimeofday(struct timeval __user *tv, + struct timezone __user *tz); +asmlinkage long sys_settimeofday(struct timeval __user *tv, + struct timezone __user *tz); +asmlinkage long sys_adjtimex(struct timex __user *txc_p); + +asmlinkage long sys_times(struct tms __user *tbuf); + +asmlinkage long sys_gettid(void); +asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp); +asmlinkage long sys_alarm(unsigned int seconds); +asmlinkage long sys_getpid(void); +asmlinkage long sys_getppid(void); +asmlinkage long sys_getuid(void); +asmlinkage long sys_geteuid(void); +asmlinkage long sys_getgid(void); +asmlinkage long sys_getegid(void); +asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid); +asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid); +asmlinkage long sys_getpgid(pid_t pid); +asmlinkage long sys_getpgrp(void); +asmlinkage long sys_getsid(pid_t pid); +asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist); + +asmlinkage long sys_setregid(gid_t rgid, gid_t egid); +asmlinkage long sys_setgid(gid_t gid); +asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); +asmlinkage long sys_setuid(uid_t uid); +asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); +asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); +asmlinkage long sys_setfsuid(uid_t uid); +asmlinkage long sys_setfsgid(gid_t gid); +asmlinkage long sys_setpgid(pid_t pid, pid_t pgid); +asmlinkage long sys_setsid(void); +asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist); + +asmlinkage long sys_acct(const char __user *name); +asmlinkage long sys_capget(cap_user_header_t header, + cap_user_data_t dataptr); +asmlinkage long sys_capset(cap_user_header_t header, + const cap_user_data_t data); +asmlinkage long sys_personality(unsigned int personality); + +asmlinkage long sys_sigpending(old_sigset_t __user *set); +asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, + old_sigset_t __user *oset); +asmlinkage long sys_getitimer(int which, struct itimerval __user *value); +asmlinkage long sys_setitimer(int which, + struct itimerval __user *value, + struct itimerval __user *ovalue); +asmlinkage long sys_timer_create(clockid_t which_clock, + struct sigevent __user *timer_event_spec, + timer_t __user * created_timer_id); +asmlinkage long sys_timer_gettime(timer_t timer_id, + struct itimerspec __user *setting); +asmlinkage long sys_timer_getoverrun(timer_t timer_id); +asmlinkage long sys_timer_settime(timer_t timer_id, int flags, + const struct itimerspec __user *new_setting, + struct itimerspec __user *old_setting); +asmlinkage long sys_timer_delete(timer_t timer_id); +asmlinkage long sys_clock_settime(clockid_t which_clock, + const struct timespec __user *tp); +asmlinkage long sys_clock_gettime(clockid_t which_clock, + struct timespec __user *tp); +asmlinkage long sys_clock_adjtime(clockid_t which_clock, + struct timex __user *tx); +asmlinkage long sys_clock_getres(clockid_t which_clock, + struct timespec __user *tp); +asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, + const struct timespec __user *rqtp, + struct timespec __user *rmtp); + +asmlinkage long sys_nice(int increment); +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, + struct sched_param __user *param); +asmlinkage long sys_sched_setparam(pid_t pid, + struct sched_param __user *param); +asmlinkage long sys_sched_getscheduler(pid_t pid); +asmlinkage long sys_sched_getparam(pid_t pid, + struct sched_param __user *param); +asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr); +asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long __user *user_mask_ptr); +asmlinkage long sys_sched_yield(void); +asmlinkage long sys_sched_get_priority_max(int policy); +asmlinkage long sys_sched_get_priority_min(int policy); +asmlinkage long sys_sched_rr_get_interval(pid_t pid, + struct timespec __user *interval); +asmlinkage long sys_setpriority(int which, int who, int niceval); +asmlinkage long sys_getpriority(int which, int who); + +asmlinkage long sys_shutdown(int, int); +asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, + void __user *arg); +asmlinkage long sys_restart_syscall(void); +asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, + struct kexec_segment __user *segments, + unsigned long flags); + +asmlinkage long sys_exit(int error_code); +asmlinkage long sys_exit_group(int error_code); +asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, + int options, struct rusage __user *ru); +asmlinkage long sys_waitid(int which, pid_t pid, + struct siginfo __user *infop, + int options, struct rusage __user *ru); +asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options); +asmlinkage long sys_set_tid_address(int __user *tidptr); +asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, + struct timespec __user *utime, u32 __user *uaddr2, + u32 val3); + +asmlinkage long sys_init_module(void __user *umod, unsigned long len, + const char __user *uargs); +asmlinkage long sys_delete_module(const char __user *name_user, + unsigned int flags); + +asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set, + sigset_t __user *oset, size_t sigsetsize); +asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize); +asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, + siginfo_t __user *uinfo, + const struct timespec __user *uts, + size_t sigsetsize); +asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, + siginfo_t __user *uinfo); +asmlinkage long sys_kill(int pid, int sig); +asmlinkage long sys_tgkill(int tgid, int pid, int sig); +asmlinkage long sys_tkill(int pid, int sig); +asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo); +asmlinkage long sys_sgetmask(void); +asmlinkage long sys_ssetmask(int newmask); +asmlinkage long sys_signal(int sig, __sighandler_t handler); +asmlinkage long sys_pause(void); + +asmlinkage long sys_sync(void); +asmlinkage long sys_fsync(unsigned int fd); +asmlinkage long sys_fdatasync(unsigned int fd); +asmlinkage long sys_bdflush(int func, long data); +asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name, + char __user *type, unsigned long flags, + void __user *data); +asmlinkage long sys_umount(char __user *name, int flags); +asmlinkage long sys_oldumount(char __user *name); +asmlinkage long sys_truncate(const char __user *path, long length); +asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); +asmlinkage long sys_stat(const char __user *filename, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_statfs(const char __user * path, + struct statfs __user *buf); +asmlinkage long sys_statfs64(const char __user *path, size_t sz, + struct statfs64 __user *buf); +asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf); +asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, + struct statfs64 __user *buf); +asmlinkage long sys_lstat(const char __user *filename, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_fstat(unsigned int fd, + struct __old_kernel_stat __user *statbuf); +asmlinkage long sys_newstat(const char __user *filename, + struct stat __user *statbuf); +asmlinkage long sys_newlstat(const char __user *filename, + struct stat __user *statbuf); +asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf); +asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); +#if BITS_PER_LONG == 32 +asmlinkage long sys_stat64(const char __user *filename, + struct stat64 __user *statbuf); +asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); +asmlinkage long sys_lstat64(const char __user *filename, + struct stat64 __user *statbuf); +asmlinkage long sys_truncate64(const char __user *path, loff_t length); +asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); +#endif + +asmlinkage long sys_setxattr(const char __user *path, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_fsetxattr(int fd, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_getxattr(const char __user *path, const char __user *name, + void __user *value, size_t size); +asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name, + void __user *value, size_t size); +asmlinkage long sys_fgetxattr(int fd, const char __user *name, + void __user *value, size_t size); +asmlinkage long sys_listxattr(const char __user *path, char __user *list, + size_t size); +asmlinkage long sys_llistxattr(const char __user *path, char __user *list, + size_t size); +asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size); +asmlinkage long sys_removexattr(const char __user *path, + const char __user *name); +asmlinkage long sys_lremovexattr(const char __user *path, + const char __user *name); +asmlinkage long sys_fremovexattr(int fd, const char __user *name); + +asmlinkage long sys_brk(unsigned long brk); +asmlinkage long sys_mprotect(unsigned long start, size_t len, + unsigned long prot); +asmlinkage long sys_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr); +asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); +asmlinkage long sys_msync(unsigned long start, size_t len, int flags); +asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice); +asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); +asmlinkage long sys_munmap(unsigned long addr, size_t len); +asmlinkage long sys_mlock(unsigned long start, size_t len); +asmlinkage long sys_munlock(unsigned long start, size_t len); +asmlinkage long sys_mlockall(int flags); +asmlinkage long sys_munlockall(void); +asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); +asmlinkage long sys_mincore(unsigned long start, size_t len, + unsigned char __user * vec); + +asmlinkage long sys_pivot_root(const char __user *new_root, + const char __user *put_old); +asmlinkage long sys_chroot(const char __user *filename); +asmlinkage long sys_mknod(const char __user *filename, umode_t mode, + unsigned dev); +asmlinkage long sys_link(const char __user *oldname, + const char __user *newname); +asmlinkage long sys_symlink(const char __user *old, const char __user *new); +asmlinkage long sys_unlink(const char __user *pathname); +asmlinkage long sys_rename(const char __user *oldname, + const char __user *newname); +asmlinkage long sys_chmod(const char __user *filename, umode_t mode); +asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); + +asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); +#if BITS_PER_LONG == 32 +asmlinkage long sys_fcntl64(unsigned int fd, + unsigned int cmd, unsigned long arg); +#endif +asmlinkage long sys_pipe(int __user *fildes); +asmlinkage long sys_pipe2(int __user *fildes, int flags); +asmlinkage long sys_dup(unsigned int fildes); +asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); +asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); +asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, + unsigned long arg); +asmlinkage long sys_flock(unsigned int fd, unsigned int cmd); +asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx); +asmlinkage long sys_io_destroy(aio_context_t ctx); +asmlinkage long sys_io_getevents(aio_context_t ctx_id, + long min_nr, + long nr, + struct io_event __user *events, + struct timespec __user *timeout); +asmlinkage long sys_io_submit(aio_context_t, long, + struct iocb __user * __user *); +asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, + struct io_event __user *result); +asmlinkage long sys_sendfile(int out_fd, int in_fd, + off_t __user *offset, size_t count); +asmlinkage long sys_sendfile64(int out_fd, int in_fd, + loff_t __user *offset, size_t count); +asmlinkage long sys_readlink(const char __user *path, + char __user *buf, int bufsiz); +asmlinkage long sys_creat(const char __user *pathname, umode_t mode); +asmlinkage long sys_open(const char __user *filename, + int flags, umode_t mode); +asmlinkage long sys_close(unsigned int fd); +asmlinkage long sys_access(const char __user *filename, int mode); +asmlinkage long sys_vhangup(void); +asmlinkage long sys_chown(const char __user *filename, + uid_t user, gid_t group); +asmlinkage long sys_lchown(const char __user *filename, + uid_t user, gid_t group); +asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); +#ifdef CONFIG_UID16 +asmlinkage long sys_chown16(const char __user *filename, + old_uid_t user, old_gid_t group); +asmlinkage long sys_lchown16(const char __user *filename, + old_uid_t user, old_gid_t group); +asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group); +asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid); +asmlinkage long sys_setgid16(old_gid_t gid); +asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid); +asmlinkage long sys_setuid16(old_uid_t uid); +asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid); +asmlinkage long sys_getresuid16(old_uid_t __user *ruid, + old_uid_t __user *euid, old_uid_t __user *suid); +asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid); +asmlinkage long sys_getresgid16(old_gid_t __user *rgid, + old_gid_t __user *egid, old_gid_t __user *sgid); +asmlinkage long sys_setfsuid16(old_uid_t uid); +asmlinkage long sys_setfsgid16(old_gid_t gid); +asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist); +asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist); +asmlinkage long sys_getuid16(void); +asmlinkage long sys_geteuid16(void); +asmlinkage long sys_getgid16(void); +asmlinkage long sys_getegid16(void); +#endif + +asmlinkage long sys_utime(char __user *filename, + struct utimbuf __user *times); +asmlinkage long sys_utimes(char __user *filename, + struct timeval __user *utimes); +asmlinkage long sys_lseek(unsigned int fd, off_t offset, + unsigned int origin); +asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, + unsigned long offset_low, loff_t __user *result, + unsigned int origin); +asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count); +asmlinkage long sys_readahead(int fd, loff_t offset, size_t count); +asmlinkage long sys_readv(unsigned long fd, + const struct iovec __user *vec, + unsigned long vlen); +asmlinkage long sys_write(unsigned int fd, const char __user *buf, + size_t count); +asmlinkage long sys_writev(unsigned long fd, + const struct iovec __user *vec, + unsigned long vlen); +asmlinkage long sys_pread64(unsigned int fd, char __user *buf, + size_t count, loff_t pos); +asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, + size_t count, loff_t pos); +asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); +asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); +asmlinkage long sys_getcwd(char __user *buf, unsigned long size); +asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); +asmlinkage long sys_chdir(const char __user *filename); +asmlinkage long sys_fchdir(unsigned int fd); +asmlinkage long sys_rmdir(const char __user *pathname); +asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); +asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, + qid_t id, void __user *addr); +asmlinkage long sys_getdents(unsigned int fd, + struct linux_dirent __user *dirent, + unsigned int count); +asmlinkage long sys_getdents64(unsigned int fd, + struct linux_dirent64 __user *dirent, + unsigned int count); + +asmlinkage long sys_setsockopt(int fd, int level, int optname, + char __user *optval, int optlen); +asmlinkage long sys_getsockopt(int fd, int level, int optname, + char __user *optval, int __user *optlen); +asmlinkage long sys_bind(int, struct sockaddr __user *, int); +asmlinkage long sys_connect(int, struct sockaddr __user *, int); +asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); +asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_send(int, void __user *, size_t, unsigned); +asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, + struct sockaddr __user *, int); +asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags); +asmlinkage long sys_recv(int, void __user *, size_t, unsigned); +asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, + struct sockaddr __user *, int __user *); +asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags, + struct timespec __user *timeout); +asmlinkage long sys_socket(int, int, int); +asmlinkage long sys_socketpair(int, int, int, int __user *); +asmlinkage long sys_socketcall(int call, unsigned long __user *args); +asmlinkage long sys_listen(int, int); +asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, + int timeout); +asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, + fd_set __user *exp, struct timeval __user *tvp); +asmlinkage long sys_old_select(struct sel_arg_struct __user *arg); +asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_epoll_create1(int flags); +asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, + struct epoll_event __user *event); +asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout); +asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, + int maxevents, int timeout, + const sigset_t __user *sigmask, + size_t sigsetsize); +asmlinkage long sys_gethostname(char __user *name, int len); +asmlinkage long sys_sethostname(char __user *name, int len); +asmlinkage long sys_setdomainname(char __user *name, int len); +asmlinkage long sys_newuname(struct new_utsname __user *name); +asmlinkage long sys_uname(struct old_utsname __user *); +asmlinkage long sys_olduname(struct oldold_utsname __user *); + +asmlinkage long sys_getrlimit(unsigned int resource, + struct rlimit __user *rlim); +#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64)) +asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); +#endif +asmlinkage long sys_setrlimit(unsigned int resource, + struct rlimit __user *rlim); +asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, + const struct rlimit64 __user *new_rlim, + struct rlimit64 __user *old_rlim); +asmlinkage long sys_getrusage(int who, struct rusage __user *ru); +asmlinkage long sys_umask(int mask); + +asmlinkage long sys_msgget(key_t key, int msgflg); +asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, + size_t msgsz, int msgflg); +asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, + size_t msgsz, long msgtyp, int msgflg); +asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); + +asmlinkage long sys_semget(key_t key, int nsems, int semflg); +asmlinkage long sys_semop(int semid, struct sembuf __user *sops, + unsigned nsops); +asmlinkage long sys_semctl(int semid, int semnum, int cmd, union semun arg); +asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, + unsigned nsops, + const struct timespec __user *timeout); +asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); +asmlinkage long sys_shmget(key_t key, size_t size, int flag); +asmlinkage long sys_shmdt(char __user *shmaddr); +asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); +asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, + unsigned long third, void __user *ptr, long fifth); + +asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr); +asmlinkage long sys_mq_unlink(const char __user *name); +asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout); +asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout); +asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); +asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); + +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn); +asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, + void __user *buf); +asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, + void __user *buf); + +asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags); +asmlinkage long sys_swapoff(const char __user *specialfile); +asmlinkage long sys_sysctl(struct __sysctl_args __user *args); +asmlinkage long sys_sysinfo(struct sysinfo __user *info); +asmlinkage long sys_sysfs(int option, + unsigned long arg1, unsigned long arg2); +asmlinkage long sys_syslog(int type, char __user *buf, int len); +asmlinkage long sys_uselib(const char __user *library); +asmlinkage long sys_ni_syscall(void); +asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, + unsigned long data); + +asmlinkage long sys_add_key(const char __user *_type, + const char __user *_description, + const void __user *_payload, + size_t plen, + key_serial_t destringid); + +asmlinkage long sys_request_key(const char __user *_type, + const char __user *_description, + const char __user *_callout_info, + key_serial_t destringid); + +asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); + +asmlinkage long sys_ioprio_set(int which, int who, int ioprio); +asmlinkage long sys_ioprio_get(int which, int who); +asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, + unsigned long maxnode); +asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, + const unsigned long __user *from, + const unsigned long __user *to); +asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, + const void __user * __user *pages, + const int __user *nodes, + int __user *status, + int flags); +asmlinkage long sys_mbind(unsigned long start, unsigned long len, + unsigned long mode, + unsigned long __user *nmask, + unsigned long maxnode, + unsigned flags); +asmlinkage long sys_get_mempolicy(int __user *policy, + unsigned long __user *nmask, + unsigned long maxnode, + unsigned long addr, unsigned long flags); + +asmlinkage long sys_inotify_init(void); +asmlinkage long sys_inotify_init1(int flags); +asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, + u32 mask); +asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); + +asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, + __u32 __user *ustatus); +asmlinkage long sys_spu_create(const char __user *name, + unsigned int flags, umode_t mode, int fd); + +asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode, + unsigned dev); +asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode); +asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag); +asmlinkage long sys_symlinkat(const char __user * oldname, + int newdfd, const char __user * newname); +asmlinkage long sys_linkat(int olddfd, const char __user *oldname, + int newdfd, const char __user *newname, int flags); +asmlinkage long sys_renameat(int olddfd, const char __user * oldname, + int newdfd, const char __user * newname); +asmlinkage long sys_futimesat(int dfd, const char __user *filename, + struct timeval __user *utimes); +asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); +asmlinkage long sys_fchmodat(int dfd, const char __user * filename, + umode_t mode); +asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, + gid_t group, int flag); +asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, + umode_t mode); +asmlinkage long sys_newfstatat(int dfd, const char __user *filename, + struct stat __user *statbuf, int flag); +asmlinkage long sys_fstatat64(int dfd, const char __user *filename, + struct stat64 __user *statbuf, int flag); +asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf, + int bufsiz); +asmlinkage long sys_utimensat(int dfd, const char __user *filename, + struct timespec __user *utimes, int flags); +asmlinkage long sys_unshare(unsigned long unshare_flags); + +asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, + int fd_out, loff_t __user *off_out, + size_t len, unsigned int flags); + +asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, + unsigned long nr_segs, unsigned int flags); + +asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); + +asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, + unsigned int flags); +asmlinkage long sys_sync_file_range2(int fd, unsigned int flags, + loff_t offset, loff_t nbytes); +asmlinkage long sys_get_robust_list(int pid, + struct robust_list_head __user * __user *head_ptr, + size_t __user *len_ptr); +asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, + size_t len); +asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); +asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); +asmlinkage long sys_timerfd_create(int clockid, int flags); +asmlinkage long sys_timerfd_settime(int ufd, int flags, + const struct itimerspec __user *utmr, + struct itimerspec __user *otmr); +asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); +asmlinkage long sys_eventfd(unsigned int count); +asmlinkage long sys_eventfd2(unsigned int count, int flags); +asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); +asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); +asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, + fd_set __user *, struct timespec __user *, + void __user *); +asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, + struct timespec __user *, const sigset_t __user *, + size_t); +asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); +asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, + u64 mask, int fd, + const char __user *pathname); +asmlinkage long sys_syncfs(int fd); + +int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]); + + +asmlinkage long sys_perf_event_open( + struct perf_event_attr __user *attr_uptr, + pid_t pid, int cpu, int group_fd, unsigned long flags); + +asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg); +asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, + struct file_handle __user *handle, + int __user *mnt_id, int flag); +asmlinkage long sys_open_by_handle_at(int mountdirfd, + struct file_handle __user *handle, + int flags); +asmlinkage long sys_setns(int fd, int nstype); +asmlinkage long sys_process_vm_readv(pid_t pid, + const struct iovec __user *lvec, + unsigned long liovcnt, + const struct iovec __user *rvec, + unsigned long riovcnt, + unsigned long flags); +asmlinkage long sys_process_vm_writev(pid_t pid, + const struct iovec __user *lvec, + unsigned long liovcnt, + const struct iovec __user *rvec, + unsigned long riovcnt, + unsigned long flags); + +#endif diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h new file mode 100644 index 00000000..27b3b0bc --- /dev/null +++ b/include/linux/syscore_ops.h @@ -0,0 +1,29 @@ +/* + * syscore_ops.h - System core operations. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_SYSCORE_OPS_H +#define _LINUX_SYSCORE_OPS_H + +#include <linux/list.h> + +struct syscore_ops { + struct list_head node; + int (*suspend)(void); + void (*resume)(void); + void (*shutdown)(void); +}; + +extern void register_syscore_ops(struct syscore_ops *ops); +extern void unregister_syscore_ops(struct syscore_ops *ops); +#ifdef CONFIG_PM_SLEEP +extern int syscore_suspend(void); +extern void syscore_resume(void); +#endif +extern void syscore_shutdown(void); + +#endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h new file mode 100644 index 00000000..c34b4c82 --- /dev/null +++ b/include/linux/sysctl.h @@ -0,0 +1,1127 @@ +/* + * sysctl.h: General linux system control interface + * + * Begun 24 March 1995, Stephen Tweedie + * + **************************************************************** + **************************************************************** + ** + ** WARNING: + ** The values in this file are exported to user space via + ** the sysctl() binary interface. Do *NOT* change the + ** numbering of any existing values here, and do not change + ** any numbers within any one set of values. If you have to + ** redefine an existing interface, use a new number for it. + ** The kernel will then return -ENOTDIR to any application using + ** the old binary interface. + ** + **************************************************************** + **************************************************************** + */ + +#ifndef _LINUX_SYSCTL_H +#define _LINUX_SYSCTL_H + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/compiler.h> + +struct completion; + +#define CTL_MAXNAME 10 /* how many path components do we allow in a + call to sysctl? In other words, what is + the largest acceptable value for the nlen + member of a struct __sysctl_args to have? */ + +struct __sysctl_args { + int __user *name; + int nlen; + void __user *oldval; + size_t __user *oldlenp; + void __user *newval; + size_t newlen; + unsigned long __unused[4]; +}; + +/* Define sysctl names first */ + +/* Top-level names: */ + +enum +{ + CTL_KERN=1, /* General kernel info and control */ + CTL_VM=2, /* VM management */ + CTL_NET=3, /* Networking */ + CTL_PROC=4, /* removal breaks strace(1) compilation */ + CTL_FS=5, /* Filesystems */ + CTL_DEBUG=6, /* Debugging */ + CTL_DEV=7, /* Devices */ + CTL_BUS=8, /* Busses */ + CTL_ABI=9, /* Binary emulation */ + CTL_CPU=10, /* CPU stuff (speed scaling, etc) */ + CTL_ARLAN=254, /* arlan wireless driver */ + CTL_S390DBF=5677, /* s390 debug */ + CTL_SUNRPC=7249, /* sunrpc debug */ + CTL_PM=9899, /* frv power management */ + CTL_FRV=9898, /* frv specific sysctls */ +}; + +/* CTL_BUS names: */ +enum +{ + CTL_BUS_ISA=1 /* ISA */ +}; + +/* /proc/sys/fs/inotify/ */ +enum +{ + INOTIFY_MAX_USER_INSTANCES=1, /* max instances per user */ + INOTIFY_MAX_USER_WATCHES=2, /* max watches per user */ + INOTIFY_MAX_QUEUED_EVENTS=3 /* max queued events per instance */ +}; + +/* CTL_KERN names: */ +enum +{ + KERN_OSTYPE=1, /* string: system version */ + KERN_OSRELEASE=2, /* string: system release */ + KERN_OSREV=3, /* int: system revision */ + KERN_VERSION=4, /* string: compile time info */ + KERN_SECUREMASK=5, /* struct: maximum rights mask */ + KERN_PROF=6, /* table: profiling information */ + KERN_NODENAME=7, /* string: hostname */ + KERN_DOMAINNAME=8, /* string: domainname */ + + KERN_PANIC=15, /* int: panic timeout */ + KERN_REALROOTDEV=16, /* real root device to mount after initrd */ + + KERN_SPARC_REBOOT=21, /* reboot command on Sparc */ + KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */ + KERN_PRINTK=23, /* struct: control printk logging parameters */ + KERN_NAMETRANS=24, /* Name translation */ + KERN_PPC_HTABRECLAIM=25, /* turn htab reclaimation on/off on PPC */ + KERN_PPC_ZEROPAGED=26, /* turn idle page zeroing on/off on PPC */ + KERN_PPC_POWERSAVE_NAP=27, /* use nap mode for power saving */ + KERN_MODPROBE=28, /* string: modprobe path */ + KERN_SG_BIG_BUFF=29, /* int: sg driver reserved buffer size */ + KERN_ACCT=30, /* BSD process accounting parameters */ + KERN_PPC_L2CR=31, /* l2cr register on PPC */ + + KERN_RTSIGNR=32, /* Number of rt sigs queued */ + KERN_RTSIGMAX=33, /* Max queuable */ + + KERN_SHMMAX=34, /* long: Maximum shared memory segment */ + KERN_MSGMAX=35, /* int: Maximum size of a messege */ + KERN_MSGMNB=36, /* int: Maximum message queue size */ + KERN_MSGPOOL=37, /* int: Maximum system message pool size */ + KERN_SYSRQ=38, /* int: Sysreq enable */ + KERN_MAX_THREADS=39, /* int: Maximum nr of threads in the system */ + KERN_RANDOM=40, /* Random driver */ + KERN_SHMALL=41, /* int: Maximum size of shared memory */ + KERN_MSGMNI=42, /* int: msg queue identifiers */ + KERN_SEM=43, /* struct: sysv semaphore limits */ + KERN_SPARC_STOP_A=44, /* int: Sparc Stop-A enable */ + KERN_SHMMNI=45, /* int: shm array identifiers */ + KERN_OVERFLOWUID=46, /* int: overflow UID */ + KERN_OVERFLOWGID=47, /* int: overflow GID */ + KERN_SHMPATH=48, /* string: path to shm fs */ + KERN_HOTPLUG=49, /* string: path to uevent helper (deprecated) */ + KERN_IEEE_EMULATION_WARNINGS=50, /* int: unimplemented ieee instructions */ + KERN_S390_USER_DEBUG_LOGGING=51, /* int: dumps of user faults */ + KERN_CORE_USES_PID=52, /* int: use core or core.%pid */ + KERN_TAINTED=53, /* int: various kernel tainted flags */ + KERN_CADPID=54, /* int: PID of the process to notify on CAD */ + KERN_PIDMAX=55, /* int: PID # limit */ + KERN_CORE_PATTERN=56, /* string: pattern for core-file names */ + KERN_PANIC_ON_OOPS=57, /* int: whether we will panic on an oops */ + KERN_HPPA_PWRSW=58, /* int: hppa soft-power enable */ + KERN_HPPA_UNALIGNED=59, /* int: hppa unaligned-trap enable */ + KERN_PRINTK_RATELIMIT=60, /* int: tune printk ratelimiting */ + KERN_PRINTK_RATELIMIT_BURST=61, /* int: tune printk ratelimiting */ + KERN_PTY=62, /* dir: pty driver */ + KERN_NGROUPS_MAX=63, /* int: NGROUPS_MAX */ + KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */ + KERN_HZ_TIMER=65, /* int: hz timer on or off */ + KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */ + KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */ + KERN_RANDOMIZE=68, /* int: randomize virtual address space */ + KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ + KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ + KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ + KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ + KERN_COMPAT_LOG=73, /* int: print compat layer messages */ + KERN_MAX_LOCK_DEPTH=74, /* int: rtmutex's maximum lock depth */ + KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ +}; + + + +/* CTL_VM names: */ +enum +{ + VM_UNUSED1=1, /* was: struct: Set vm swapping control */ + VM_UNUSED2=2, /* was; int: Linear or sqrt() swapout for hogs */ + VM_UNUSED3=3, /* was: struct: Set free page thresholds */ + VM_UNUSED4=4, /* Spare */ + VM_OVERCOMMIT_MEMORY=5, /* Turn off the virtual memory safety limit */ + VM_UNUSED5=6, /* was: struct: Set buffer memory thresholds */ + VM_UNUSED7=7, /* was: struct: Set cache memory thresholds */ + VM_UNUSED8=8, /* was: struct: Control kswapd behaviour */ + VM_UNUSED9=9, /* was: struct: Set page table cache parameters */ + VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */ + VM_DIRTY_BACKGROUND=11, /* dirty_background_ratio */ + VM_DIRTY_RATIO=12, /* dirty_ratio */ + VM_DIRTY_WB_CS=13, /* dirty_writeback_centisecs */ + VM_DIRTY_EXPIRE_CS=14, /* dirty_expire_centisecs */ + VM_NR_PDFLUSH_THREADS=15, /* nr_pdflush_threads */ + VM_OVERCOMMIT_RATIO=16, /* percent of RAM to allow overcommit in */ + VM_PAGEBUF=17, /* struct: Control pagebuf parameters */ + VM_HUGETLB_PAGES=18, /* int: Number of available Huge Pages */ + VM_SWAPPINESS=19, /* Tendency to steal mapped memory */ + VM_LOWMEM_RESERVE_RATIO=20,/* reservation ratio for lower memory zones */ + VM_MIN_FREE_KBYTES=21, /* Minimum free kilobytes to maintain */ + VM_MAX_MAP_COUNT=22, /* int: Maximum number of mmaps/address-space */ + VM_LAPTOP_MODE=23, /* vm laptop mode */ + VM_BLOCK_DUMP=24, /* block dump mode */ + VM_HUGETLB_GROUP=25, /* permitted hugetlb group */ + VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */ + VM_LEGACY_VA_LAYOUT=27, /* legacy/compatibility virtual address space layout */ + VM_SWAP_TOKEN_TIMEOUT=28, /* default time for token time out */ + VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ + VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ + VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ + VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ + VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ + VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ + VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */ +}; + + +/* CTL_NET names: */ +enum +{ + NET_CORE=1, + NET_ETHER=2, + NET_802=3, + NET_UNIX=4, + NET_IPV4=5, + NET_IPX=6, + NET_ATALK=7, + NET_NETROM=8, + NET_AX25=9, + NET_BRIDGE=10, + NET_ROSE=11, + NET_IPV6=12, + NET_X25=13, + NET_TR=14, + NET_DECNET=15, + NET_ECONET=16, + NET_SCTP=17, + NET_LLC=18, + NET_NETFILTER=19, + NET_DCCP=20, + NET_IRDA=412, +}; + +/* /proc/sys/kernel/random */ +enum +{ + RANDOM_POOLSIZE=1, + RANDOM_ENTROPY_COUNT=2, + RANDOM_READ_THRESH=3, + RANDOM_WRITE_THRESH=4, + RANDOM_BOOT_ID=5, + RANDOM_UUID=6 +}; + +/* /proc/sys/kernel/pty */ +enum +{ + PTY_MAX=1, + PTY_NR=2 +}; + +/* /proc/sys/bus/isa */ +enum +{ + BUS_ISA_MEM_BASE=1, + BUS_ISA_PORT_BASE=2, + BUS_ISA_PORT_SHIFT=3 +}; + +/* /proc/sys/net/core */ +enum +{ + NET_CORE_WMEM_MAX=1, + NET_CORE_RMEM_MAX=2, + NET_CORE_WMEM_DEFAULT=3, + NET_CORE_RMEM_DEFAULT=4, +/* was NET_CORE_DESTROY_DELAY */ + NET_CORE_MAX_BACKLOG=6, + NET_CORE_FASTROUTE=7, + NET_CORE_MSG_COST=8, + NET_CORE_MSG_BURST=9, + NET_CORE_OPTMEM_MAX=10, + NET_CORE_HOT_LIST_LENGTH=11, + NET_CORE_DIVERT_VERSION=12, + NET_CORE_NO_CONG_THRESH=13, + NET_CORE_NO_CONG=14, + NET_CORE_LO_CONG=15, + NET_CORE_MOD_CONG=16, + NET_CORE_DEV_WEIGHT=17, + NET_CORE_SOMAXCONN=18, + NET_CORE_BUDGET=19, + NET_CORE_AEVENT_ETIME=20, + NET_CORE_AEVENT_RSEQTH=21, + NET_CORE_WARNINGS=22, +}; + +/* /proc/sys/net/ethernet */ + +/* /proc/sys/net/802 */ + +/* /proc/sys/net/unix */ + +enum +{ + NET_UNIX_DESTROY_DELAY=1, + NET_UNIX_DELETE_DELAY=2, + NET_UNIX_MAX_DGRAM_QLEN=3, +}; + +/* /proc/sys/net/netfilter */ +enum +{ + NET_NF_CONNTRACK_MAX=1, + NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, + NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, + NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, + NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, + NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, + NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, + NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, + NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, + NET_NF_CONNTRACK_UDP_TIMEOUT=10, + NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, + NET_NF_CONNTRACK_ICMP_TIMEOUT=12, + NET_NF_CONNTRACK_GENERIC_TIMEOUT=13, + NET_NF_CONNTRACK_BUCKETS=14, + NET_NF_CONNTRACK_LOG_INVALID=15, + NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, + NET_NF_CONNTRACK_TCP_LOOSE=17, + NET_NF_CONNTRACK_TCP_BE_LIBERAL=18, + NET_NF_CONNTRACK_TCP_MAX_RETRANS=19, + NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, + NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, + NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, + NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, + NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, + NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, + NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, + NET_NF_CONNTRACK_COUNT=27, + NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28, + NET_NF_CONNTRACK_FRAG6_TIMEOUT=29, + NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30, + NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31, + NET_NF_CONNTRACK_CHECKSUM=32, +}; + +/* /proc/sys/net/ipv4 */ +enum +{ + /* v2.0 compatibile variables */ + NET_IPV4_FORWARD=8, + NET_IPV4_DYNADDR=9, + + NET_IPV4_CONF=16, + NET_IPV4_NEIGH=17, + NET_IPV4_ROUTE=18, + NET_IPV4_FIB_HASH=19, + NET_IPV4_NETFILTER=20, + + NET_IPV4_TCP_TIMESTAMPS=33, + NET_IPV4_TCP_WINDOW_SCALING=34, + NET_IPV4_TCP_SACK=35, + NET_IPV4_TCP_RETRANS_COLLAPSE=36, + NET_IPV4_DEFAULT_TTL=37, + NET_IPV4_AUTOCONFIG=38, + NET_IPV4_NO_PMTU_DISC=39, + NET_IPV4_TCP_SYN_RETRIES=40, + NET_IPV4_IPFRAG_HIGH_THRESH=41, + NET_IPV4_IPFRAG_LOW_THRESH=42, + NET_IPV4_IPFRAG_TIME=43, + NET_IPV4_TCP_MAX_KA_PROBES=44, + NET_IPV4_TCP_KEEPALIVE_TIME=45, + NET_IPV4_TCP_KEEPALIVE_PROBES=46, + NET_IPV4_TCP_RETRIES1=47, + NET_IPV4_TCP_RETRIES2=48, + NET_IPV4_TCP_FIN_TIMEOUT=49, + NET_IPV4_IP_MASQ_DEBUG=50, + NET_TCP_SYNCOOKIES=51, + NET_TCP_STDURG=52, + NET_TCP_RFC1337=53, + NET_TCP_SYN_TAILDROP=54, + NET_TCP_MAX_SYN_BACKLOG=55, + NET_IPV4_LOCAL_PORT_RANGE=56, + NET_IPV4_ICMP_ECHO_IGNORE_ALL=57, + NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58, + NET_IPV4_ICMP_SOURCEQUENCH_RATE=59, + NET_IPV4_ICMP_DESTUNREACH_RATE=60, + NET_IPV4_ICMP_TIMEEXCEED_RATE=61, + NET_IPV4_ICMP_PARAMPROB_RATE=62, + NET_IPV4_ICMP_ECHOREPLY_RATE=63, + NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64, + NET_IPV4_IGMP_MAX_MEMBERSHIPS=65, + NET_TCP_TW_RECYCLE=66, + NET_IPV4_ALWAYS_DEFRAG=67, + NET_IPV4_TCP_KEEPALIVE_INTVL=68, + NET_IPV4_INET_PEER_THRESHOLD=69, + NET_IPV4_INET_PEER_MINTTL=70, + NET_IPV4_INET_PEER_MAXTTL=71, + NET_IPV4_INET_PEER_GC_MINTIME=72, + NET_IPV4_INET_PEER_GC_MAXTIME=73, + NET_TCP_ORPHAN_RETRIES=74, + NET_TCP_ABORT_ON_OVERFLOW=75, + NET_TCP_SYNACK_RETRIES=76, + NET_TCP_MAX_ORPHANS=77, + NET_TCP_MAX_TW_BUCKETS=78, + NET_TCP_FACK=79, + NET_TCP_REORDERING=80, + NET_TCP_ECN=81, + NET_TCP_DSACK=82, + NET_TCP_MEM=83, + NET_TCP_WMEM=84, + NET_TCP_RMEM=85, + NET_TCP_APP_WIN=86, + NET_TCP_ADV_WIN_SCALE=87, + NET_IPV4_NONLOCAL_BIND=88, + NET_IPV4_ICMP_RATELIMIT=89, + NET_IPV4_ICMP_RATEMASK=90, + NET_TCP_TW_REUSE=91, + NET_TCP_FRTO=92, + NET_TCP_LOW_LATENCY=93, + NET_IPV4_IPFRAG_SECRET_INTERVAL=94, + NET_IPV4_IGMP_MAX_MSF=96, + NET_TCP_NO_METRICS_SAVE=97, + NET_TCP_DEFAULT_WIN_SCALE=105, + NET_TCP_MODERATE_RCVBUF=106, + NET_TCP_TSO_WIN_DIVISOR=107, + NET_TCP_BIC_BETA=108, + NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109, + NET_TCP_CONG_CONTROL=110, + NET_TCP_ABC=111, + NET_IPV4_IPFRAG_MAX_DIST=112, + NET_TCP_MTU_PROBING=113, + NET_TCP_BASE_MSS=114, + NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115, + NET_TCP_DMA_COPYBREAK=116, + NET_TCP_SLOW_START_AFTER_IDLE=117, + NET_CIPSOV4_CACHE_ENABLE=118, + NET_CIPSOV4_CACHE_BUCKET_SIZE=119, + NET_CIPSOV4_RBM_OPTFMT=120, + NET_CIPSOV4_RBM_STRICTVALID=121, + NET_TCP_AVAIL_CONG_CONTROL=122, + NET_TCP_ALLOWED_CONG_CONTROL=123, + NET_TCP_MAX_SSTHRESH=124, + NET_TCP_FRTO_RESPONSE=125, +}; + +enum { + NET_IPV4_ROUTE_FLUSH=1, + NET_IPV4_ROUTE_MIN_DELAY=2, /* obsolete since 2.6.25 */ + NET_IPV4_ROUTE_MAX_DELAY=3, /* obsolete since 2.6.25 */ + NET_IPV4_ROUTE_GC_THRESH=4, + NET_IPV4_ROUTE_MAX_SIZE=5, + NET_IPV4_ROUTE_GC_MIN_INTERVAL=6, + NET_IPV4_ROUTE_GC_TIMEOUT=7, + NET_IPV4_ROUTE_GC_INTERVAL=8, /* obsolete since 2.6.38 */ + NET_IPV4_ROUTE_REDIRECT_LOAD=9, + NET_IPV4_ROUTE_REDIRECT_NUMBER=10, + NET_IPV4_ROUTE_REDIRECT_SILENCE=11, + NET_IPV4_ROUTE_ERROR_COST=12, + NET_IPV4_ROUTE_ERROR_BURST=13, + NET_IPV4_ROUTE_GC_ELASTICITY=14, + NET_IPV4_ROUTE_MTU_EXPIRES=15, + NET_IPV4_ROUTE_MIN_PMTU=16, + NET_IPV4_ROUTE_MIN_ADVMSS=17, + NET_IPV4_ROUTE_SECRET_INTERVAL=18, + NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19, +}; + +enum +{ + NET_PROTO_CONF_ALL=-2, + NET_PROTO_CONF_DEFAULT=-3 + + /* And device ifindices ... */ +}; + +enum +{ + NET_IPV4_CONF_FORWARDING=1, + NET_IPV4_CONF_MC_FORWARDING=2, + NET_IPV4_CONF_PROXY_ARP=3, + NET_IPV4_CONF_ACCEPT_REDIRECTS=4, + NET_IPV4_CONF_SECURE_REDIRECTS=5, + NET_IPV4_CONF_SEND_REDIRECTS=6, + NET_IPV4_CONF_SHARED_MEDIA=7, + NET_IPV4_CONF_RP_FILTER=8, + NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9, + NET_IPV4_CONF_BOOTP_RELAY=10, + NET_IPV4_CONF_LOG_MARTIANS=11, + NET_IPV4_CONF_TAG=12, + NET_IPV4_CONF_ARPFILTER=13, + NET_IPV4_CONF_MEDIUM_ID=14, + NET_IPV4_CONF_NOXFRM=15, + NET_IPV4_CONF_NOPOLICY=16, + NET_IPV4_CONF_FORCE_IGMP_VERSION=17, + NET_IPV4_CONF_ARP_ANNOUNCE=18, + NET_IPV4_CONF_ARP_IGNORE=19, + NET_IPV4_CONF_PROMOTE_SECONDARIES=20, + NET_IPV4_CONF_ARP_ACCEPT=21, + NET_IPV4_CONF_ARP_NOTIFY=22, +}; + +/* /proc/sys/net/ipv4/netfilter */ +enum +{ + NET_IPV4_NF_CONNTRACK_MAX=1, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, + NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10, + NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, + NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12, + NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13, + NET_IPV4_NF_CONNTRACK_BUCKETS=14, + NET_IPV4_NF_CONNTRACK_LOG_INVALID=15, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, + NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17, + NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18, + NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, + NET_IPV4_NF_CONNTRACK_COUNT=27, + NET_IPV4_NF_CONNTRACK_CHECKSUM=28, +}; + +/* /proc/sys/net/ipv6 */ +enum { + NET_IPV6_CONF=16, + NET_IPV6_NEIGH=17, + NET_IPV6_ROUTE=18, + NET_IPV6_ICMP=19, + NET_IPV6_BINDV6ONLY=20, + NET_IPV6_IP6FRAG_HIGH_THRESH=21, + NET_IPV6_IP6FRAG_LOW_THRESH=22, + NET_IPV6_IP6FRAG_TIME=23, + NET_IPV6_IP6FRAG_SECRET_INTERVAL=24, + NET_IPV6_MLD_MAX_MSF=25, +}; + +enum { + NET_IPV6_ROUTE_FLUSH=1, + NET_IPV6_ROUTE_GC_THRESH=2, + NET_IPV6_ROUTE_MAX_SIZE=3, + NET_IPV6_ROUTE_GC_MIN_INTERVAL=4, + NET_IPV6_ROUTE_GC_TIMEOUT=5, + NET_IPV6_ROUTE_GC_INTERVAL=6, + NET_IPV6_ROUTE_GC_ELASTICITY=7, + NET_IPV6_ROUTE_MTU_EXPIRES=8, + NET_IPV6_ROUTE_MIN_ADVMSS=9, + NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10 +}; + +enum { + NET_IPV6_FORWARDING=1, + NET_IPV6_HOP_LIMIT=2, + NET_IPV6_MTU=3, + NET_IPV6_ACCEPT_RA=4, + NET_IPV6_ACCEPT_REDIRECTS=5, + NET_IPV6_AUTOCONF=6, + NET_IPV6_DAD_TRANSMITS=7, + NET_IPV6_RTR_SOLICITS=8, + NET_IPV6_RTR_SOLICIT_INTERVAL=9, + NET_IPV6_RTR_SOLICIT_DELAY=10, + NET_IPV6_USE_TEMPADDR=11, + NET_IPV6_TEMP_VALID_LFT=12, + NET_IPV6_TEMP_PREFERED_LFT=13, + NET_IPV6_REGEN_MAX_RETRY=14, + NET_IPV6_MAX_DESYNC_FACTOR=15, + NET_IPV6_MAX_ADDRESSES=16, + NET_IPV6_FORCE_MLD_VERSION=17, + NET_IPV6_ACCEPT_RA_DEFRTR=18, + NET_IPV6_ACCEPT_RA_PINFO=19, + NET_IPV6_ACCEPT_RA_RTR_PREF=20, + NET_IPV6_RTR_PROBE_INTERVAL=21, + NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22, + NET_IPV6_PROXY_NDP=23, + NET_IPV6_ACCEPT_SOURCE_ROUTE=25, + __NET_IPV6_MAX +}; + +/* /proc/sys/net/ipv6/icmp */ +enum { + NET_IPV6_ICMP_RATELIMIT=1 +}; + +/* /proc/sys/net/<protocol>/neigh/<dev> */ +enum { + NET_NEIGH_MCAST_SOLICIT=1, + NET_NEIGH_UCAST_SOLICIT=2, + NET_NEIGH_APP_SOLICIT=3, + NET_NEIGH_RETRANS_TIME=4, + NET_NEIGH_REACHABLE_TIME=5, + NET_NEIGH_DELAY_PROBE_TIME=6, + NET_NEIGH_GC_STALE_TIME=7, + NET_NEIGH_UNRES_QLEN=8, + NET_NEIGH_PROXY_QLEN=9, + NET_NEIGH_ANYCAST_DELAY=10, + NET_NEIGH_PROXY_DELAY=11, + NET_NEIGH_LOCKTIME=12, + NET_NEIGH_GC_INTERVAL=13, + NET_NEIGH_GC_THRESH1=14, + NET_NEIGH_GC_THRESH2=15, + NET_NEIGH_GC_THRESH3=16, + NET_NEIGH_RETRANS_TIME_MS=17, + NET_NEIGH_REACHABLE_TIME_MS=18, +}; + +/* /proc/sys/net/dccp */ +enum { + NET_DCCP_DEFAULT=1, +}; + +/* /proc/sys/net/ipx */ +enum { + NET_IPX_PPROP_BROADCASTING=1, + NET_IPX_FORWARDING=2 +}; + +/* /proc/sys/net/llc */ +enum { + NET_LLC2=1, + NET_LLC_STATION=2, +}; + +/* /proc/sys/net/llc/llc2 */ +enum { + NET_LLC2_TIMEOUT=1, +}; + +/* /proc/sys/net/llc/station */ +enum { + NET_LLC_STATION_ACK_TIMEOUT=1, +}; + +/* /proc/sys/net/llc/llc2/timeout */ +enum { + NET_LLC2_ACK_TIMEOUT=1, + NET_LLC2_P_TIMEOUT=2, + NET_LLC2_REJ_TIMEOUT=3, + NET_LLC2_BUSY_TIMEOUT=4, +}; + +/* /proc/sys/net/appletalk */ +enum { + NET_ATALK_AARP_EXPIRY_TIME=1, + NET_ATALK_AARP_TICK_TIME=2, + NET_ATALK_AARP_RETRANSMIT_LIMIT=3, + NET_ATALK_AARP_RESOLVE_TIME=4 +}; + + +/* /proc/sys/net/netrom */ +enum { + NET_NETROM_DEFAULT_PATH_QUALITY=1, + NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2, + NET_NETROM_NETWORK_TTL_INITIALISER=3, + NET_NETROM_TRANSPORT_TIMEOUT=4, + NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5, + NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6, + NET_NETROM_TRANSPORT_BUSY_DELAY=7, + NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8, + NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9, + NET_NETROM_ROUTING_CONTROL=10, + NET_NETROM_LINK_FAILS_COUNT=11, + NET_NETROM_RESET=12 +}; + +/* /proc/sys/net/ax25 */ +enum { + NET_AX25_IP_DEFAULT_MODE=1, + NET_AX25_DEFAULT_MODE=2, + NET_AX25_BACKOFF_TYPE=3, + NET_AX25_CONNECT_MODE=4, + NET_AX25_STANDARD_WINDOW=5, + NET_AX25_EXTENDED_WINDOW=6, + NET_AX25_T1_TIMEOUT=7, + NET_AX25_T2_TIMEOUT=8, + NET_AX25_T3_TIMEOUT=9, + NET_AX25_IDLE_TIMEOUT=10, + NET_AX25_N2=11, + NET_AX25_PACLEN=12, + NET_AX25_PROTOCOL=13, + NET_AX25_DAMA_SLAVE_TIMEOUT=14 +}; + +/* /proc/sys/net/rose */ +enum { + NET_ROSE_RESTART_REQUEST_TIMEOUT=1, + NET_ROSE_CALL_REQUEST_TIMEOUT=2, + NET_ROSE_RESET_REQUEST_TIMEOUT=3, + NET_ROSE_CLEAR_REQUEST_TIMEOUT=4, + NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5, + NET_ROSE_ROUTING_CONTROL=6, + NET_ROSE_LINK_FAIL_TIMEOUT=7, + NET_ROSE_MAX_VCS=8, + NET_ROSE_WINDOW_SIZE=9, + NET_ROSE_NO_ACTIVITY_TIMEOUT=10 +}; + +/* /proc/sys/net/x25 */ +enum { + NET_X25_RESTART_REQUEST_TIMEOUT=1, + NET_X25_CALL_REQUEST_TIMEOUT=2, + NET_X25_RESET_REQUEST_TIMEOUT=3, + NET_X25_CLEAR_REQUEST_TIMEOUT=4, + NET_X25_ACK_HOLD_BACK_TIMEOUT=5, + NET_X25_FORWARD=6 +}; + +/* /proc/sys/net/token-ring */ +enum +{ + NET_TR_RIF_TIMEOUT=1 +}; + +/* /proc/sys/net/decnet/ */ +enum { + NET_DECNET_NODE_TYPE = 1, + NET_DECNET_NODE_ADDRESS = 2, + NET_DECNET_NODE_NAME = 3, + NET_DECNET_DEFAULT_DEVICE = 4, + NET_DECNET_TIME_WAIT = 5, + NET_DECNET_DN_COUNT = 6, + NET_DECNET_DI_COUNT = 7, + NET_DECNET_DR_COUNT = 8, + NET_DECNET_DST_GC_INTERVAL = 9, + NET_DECNET_CONF = 10, + NET_DECNET_NO_FC_MAX_CWND = 11, + NET_DECNET_MEM = 12, + NET_DECNET_RMEM = 13, + NET_DECNET_WMEM = 14, + NET_DECNET_DEBUG_LEVEL = 255 +}; + +/* /proc/sys/net/decnet/conf/<dev> */ +enum { + NET_DECNET_CONF_LOOPBACK = -2, + NET_DECNET_CONF_DDCMP = -3, + NET_DECNET_CONF_PPP = -4, + NET_DECNET_CONF_X25 = -5, + NET_DECNET_CONF_GRE = -6, + NET_DECNET_CONF_ETHER = -7 + + /* ... and ifindex of devices */ +}; + +/* /proc/sys/net/decnet/conf/<dev>/ */ +enum { + NET_DECNET_CONF_DEV_PRIORITY = 1, + NET_DECNET_CONF_DEV_T1 = 2, + NET_DECNET_CONF_DEV_T2 = 3, + NET_DECNET_CONF_DEV_T3 = 4, + NET_DECNET_CONF_DEV_FORWARDING = 5, + NET_DECNET_CONF_DEV_BLKSIZE = 6, + NET_DECNET_CONF_DEV_STATE = 7 +}; + +/* /proc/sys/net/sctp */ +enum { + NET_SCTP_RTO_INITIAL = 1, + NET_SCTP_RTO_MIN = 2, + NET_SCTP_RTO_MAX = 3, + NET_SCTP_RTO_ALPHA = 4, + NET_SCTP_RTO_BETA = 5, + NET_SCTP_VALID_COOKIE_LIFE = 6, + NET_SCTP_ASSOCIATION_MAX_RETRANS = 7, + NET_SCTP_PATH_MAX_RETRANS = 8, + NET_SCTP_MAX_INIT_RETRANSMITS = 9, + NET_SCTP_HB_INTERVAL = 10, + NET_SCTP_PRESERVE_ENABLE = 11, + NET_SCTP_MAX_BURST = 12, + NET_SCTP_ADDIP_ENABLE = 13, + NET_SCTP_PRSCTP_ENABLE = 14, + NET_SCTP_SNDBUF_POLICY = 15, + NET_SCTP_SACK_TIMEOUT = 16, + NET_SCTP_RCVBUF_POLICY = 17, +}; + +/* /proc/sys/net/bridge */ +enum { + NET_BRIDGE_NF_CALL_ARPTABLES = 1, + NET_BRIDGE_NF_CALL_IPTABLES = 2, + NET_BRIDGE_NF_CALL_IP6TABLES = 3, + NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4, + NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, +}; + +/* proc/sys/net/irda */ +enum { + NET_IRDA_DISCOVERY=1, + NET_IRDA_DEVNAME=2, + NET_IRDA_DEBUG=3, + NET_IRDA_FAST_POLL=4, + NET_IRDA_DISCOVERY_SLOTS=5, + NET_IRDA_DISCOVERY_TIMEOUT=6, + NET_IRDA_SLOT_TIMEOUT=7, + NET_IRDA_MAX_BAUD_RATE=8, + NET_IRDA_MIN_TX_TURN_TIME=9, + NET_IRDA_MAX_TX_DATA_SIZE=10, + NET_IRDA_MAX_TX_WINDOW=11, + NET_IRDA_MAX_NOREPLY_TIME=12, + NET_IRDA_WARN_NOREPLY_TIME=13, + NET_IRDA_LAP_KEEPALIVE_TIME=14, +}; + + +/* CTL_FS names: */ +enum +{ + FS_NRINODE=1, /* int:current number of allocated inodes */ + FS_STATINODE=2, + FS_MAXINODE=3, /* int:maximum number of inodes that can be allocated */ + FS_NRDQUOT=4, /* int:current number of allocated dquots */ + FS_MAXDQUOT=5, /* int:maximum number of dquots that can be allocated */ + FS_NRFILE=6, /* int:current number of allocated filedescriptors */ + FS_MAXFILE=7, /* int:maximum number of filedescriptors that can be allocated */ + FS_DENTRY=8, + FS_NRSUPER=9, /* int:current number of allocated super_blocks */ + FS_MAXSUPER=10, /* int:maximum number of super_blocks that can be allocated */ + FS_OVERFLOWUID=11, /* int: overflow UID */ + FS_OVERFLOWGID=12, /* int: overflow GID */ + FS_LEASES=13, /* int: leases enabled */ + FS_DIR_NOTIFY=14, /* int: directory notification enabled */ + FS_LEASE_TIME=15, /* int: maximum time to wait for a lease break */ + FS_DQSTATS=16, /* disc quota usage statistics and control */ + FS_XFS=17, /* struct: control xfs parameters */ + FS_AIO_NR=18, /* current system-wide number of aio requests */ + FS_AIO_MAX_NR=19, /* system-wide maximum number of aio requests */ + FS_INOTIFY=20, /* inotify submenu */ + FS_OCFS2=988, /* ocfs2 */ +}; + +/* /proc/sys/fs/quota/ */ +enum { + FS_DQ_LOOKUPS = 1, + FS_DQ_DROPS = 2, + FS_DQ_READS = 3, + FS_DQ_WRITES = 4, + FS_DQ_CACHE_HITS = 5, + FS_DQ_ALLOCATED = 6, + FS_DQ_FREE = 7, + FS_DQ_SYNCS = 8, + FS_DQ_WARNINGS = 9, +}; + +/* CTL_DEBUG names: */ + +/* CTL_DEV names: */ +enum { + DEV_CDROM=1, + DEV_HWMON=2, + DEV_PARPORT=3, + DEV_RAID=4, + DEV_MAC_HID=5, + DEV_SCSI=6, + DEV_IPMI=7, +}; + +/* /proc/sys/dev/cdrom */ +enum { + DEV_CDROM_INFO=1, + DEV_CDROM_AUTOCLOSE=2, + DEV_CDROM_AUTOEJECT=3, + DEV_CDROM_DEBUG=4, + DEV_CDROM_LOCK=5, + DEV_CDROM_CHECK_MEDIA=6 +}; + +/* /proc/sys/dev/parport */ +enum { + DEV_PARPORT_DEFAULT=-3 +}; + +/* /proc/sys/dev/raid */ +enum { + DEV_RAID_SPEED_LIMIT_MIN=1, + DEV_RAID_SPEED_LIMIT_MAX=2 +}; + +/* /proc/sys/dev/parport/default */ +enum { + DEV_PARPORT_DEFAULT_TIMESLICE=1, + DEV_PARPORT_DEFAULT_SPINTIME=2 +}; + +/* /proc/sys/dev/parport/parport n */ +enum { + DEV_PARPORT_SPINTIME=1, + DEV_PARPORT_BASE_ADDR=2, + DEV_PARPORT_IRQ=3, + DEV_PARPORT_DMA=4, + DEV_PARPORT_MODES=5, + DEV_PARPORT_DEVICES=6, + DEV_PARPORT_AUTOPROBE=16 +}; + +/* /proc/sys/dev/parport/parport n/devices/ */ +enum { + DEV_PARPORT_DEVICES_ACTIVE=-3, +}; + +/* /proc/sys/dev/parport/parport n/devices/device n */ +enum { + DEV_PARPORT_DEVICE_TIMESLICE=1, +}; + +/* /proc/sys/dev/mac_hid */ +enum { + DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1, + DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2, + DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3, + DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4, + DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5, + DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6 +}; + +/* /proc/sys/dev/scsi */ +enum { + DEV_SCSI_LOGGING_LEVEL=1, +}; + +/* /proc/sys/dev/ipmi */ +enum { + DEV_IPMI_POWEROFF_POWERCYCLE=1, +}; + +/* /proc/sys/abi */ +enum +{ + ABI_DEFHANDLER_COFF=1, /* default handler for coff binaries */ + ABI_DEFHANDLER_ELF=2, /* default handler for ELF binaries */ + ABI_DEFHANDLER_LCALL7=3,/* default handler for procs using lcall7 */ + ABI_DEFHANDLER_LIBCSO=4,/* default handler for an libc.so ELF interp */ + ABI_TRACE=5, /* tracing flags */ + ABI_FAKE_UTSNAME=6, /* fake target utsname information */ +}; + +#ifdef __KERNEL__ +#include <linux/list.h> +#include <linux/rcupdate.h> +#include <linux/wait.h> +#include <linux/rbtree.h> + +/* For the /proc/sys support */ +struct ctl_table; +struct nsproxy; +struct ctl_table_root; +struct ctl_table_header; +struct ctl_dir; + +typedef struct ctl_table ctl_table; + +typedef int proc_handler (struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +extern int proc_dostring(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_minmax(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_jiffies(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_doulongvec_minmax(struct ctl_table *, int, + void __user *, size_t *, loff_t *); +extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, + void __user *, size_t *, loff_t *); +extern int proc_do_large_bitmap(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + +/* + * Register a set of sysctl names by calling register_sysctl_table + * with an initialised array of struct ctl_table's. An entry with + * NULL procname terminates the table. table->de will be + * set up by the registration and need not be initialised in advance. + * + * sysctl names can be mirrored automatically under /proc/sys. The + * procname supplied controls /proc naming. + * + * The table's mode will be honoured both for sys_sysctl(2) and + * proc-fs access. + * + * Leaf nodes in the sysctl tree will be represented by a single file + * under /proc; non-leaf nodes will be represented by directories. A + * null procname disables /proc mirroring at this node. + * + * sysctl(2) can automatically manage read and write requests through + * the sysctl table. The data and maxlen fields of the ctl_table + * struct enable minimal validation of the values being written to be + * performed, and the mode field allows minimal authentication. + * + * There must be a proc_handler routine for any terminal nodes + * mirrored under /proc/sys (non-terminals are handled by a built-in + * directory handler). Several default handlers are available to + * cover common cases. + */ + +/* Support for userspace poll() to watch for changes */ +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +static inline void *proc_sys_poll_event(struct ctl_table_poll *poll) +{ + return (void *)(unsigned long)atomic_read(&poll->event); +} + +#define __CTL_TABLE_POLL_INITIALIZER(name) { \ + .event = ATOMIC_INIT(0), \ + .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait) } + +#define DEFINE_CTL_TABLE_POLL(name) \ + struct ctl_table_poll name = __CTL_TABLE_POLL_INITIALIZER(name) + +/* A sysctl table is an array of struct ctl_table: */ +struct ctl_table +{ + const char *procname; /* Text ID for /proc/sys, or zero */ + void *data; + int maxlen; + umode_t mode; + struct ctl_table *child; /* Deprecated */ + proc_handler *proc_handler; /* Callback for text formatting */ + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +/* struct ctl_table_header is used to maintain dynamic lists of + struct ctl_table trees. */ +struct ctl_table_header +{ + union { + struct { + struct ctl_table *ctl_table; + int used; + int count; + int nreg; + }; + struct rcu_head rcu; + }; + struct completion *unregistering; + struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; +}; + +struct ctl_dir { + /* Header must be at the start of ctl_dir */ + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set *(*lookup)(struct ctl_table_root *root, + struct nsproxy *namespaces); + int (*permissions)(struct ctl_table_root *root, + struct nsproxy *namespaces, struct ctl_table *table); +}; + +/* struct ctl_path describes where in the hierarchy a table is added */ +struct ctl_path { + const char *procname; +}; + +#ifdef CONFIG_SYSCTL + +void proc_sys_poll_notify(struct ctl_table_poll *poll); + +extern void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_root *root, + int (*is_seen)(struct ctl_table_set *)); +extern void retire_sysctl_set(struct ctl_table_set *set); + +void register_sysctl_root(struct ctl_table_root *root); +struct ctl_table_header *__register_sysctl_table( + struct ctl_table_set *set, + const char *path, struct ctl_table *table); +struct ctl_table_header *__register_sysctl_paths( + struct ctl_table_set *set, + const struct ctl_path *path, struct ctl_table *table); +struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table); +struct ctl_table_header *register_sysctl_table(struct ctl_table * table); +struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, + struct ctl_table *table); + +void unregister_sysctl_table(struct ctl_table_header * table); + +extern int sysctl_init(void); +#else /* CONFIG_SYSCTL */ +static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) +{ + return NULL; +} + +static inline struct ctl_table_header *register_sysctl_paths( + const struct ctl_path *path, struct ctl_table *table) +{ + return NULL; +} + +static inline void unregister_sysctl_table(struct ctl_table_header * table) +{ +} + +static inline void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_root *root, + int (*is_seen)(struct ctl_table_set *)) +{ +} + +#endif /* CONFIG_SYSCTL */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h new file mode 100644 index 00000000..0010009b --- /dev/null +++ b/include/linux/sysfs.h @@ -0,0 +1,346 @@ +/* + * sysfs.h - definitions for the device driver filesystem + * + * Copyright (c) 2001,2002 Patrick Mochel + * Copyright (c) 2004 Silicon Graphics, Inc. + * Copyright (c) 2007 SUSE Linux Products GmbH + * Copyright (c) 2007 Tejun Heo <teheo@suse.de> + * + * Please see Documentation/filesystems/sysfs.txt for more information. + */ + +#ifndef _SYSFS_H_ +#define _SYSFS_H_ + +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/kobject_ns.h> +#include <linux/atomic.h> + +struct kobject; +struct module; +enum kobj_ns_type; + +struct attribute { + const char *name; + umode_t mode; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key *key; + struct lock_class_key skey; +#endif +}; + +/** + * sysfs_attr_init - initialize a dynamically allocated sysfs attribute + * @attr: struct attribute to initialize + * + * Initialize a dynamically allocated struct attribute so we can + * make lockdep happy. This is a new requirement for attributes + * and initially this is only needed when lockdep is enabled. + * Lockdep gives a nice error when your attribute is added to + * sysfs if you don't have this. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ +do { \ + static struct lock_class_key __key; \ + \ + (attr)->key = &__key; \ +} while(0) +#else +#define sysfs_attr_init(attr) do {} while(0) +#endif + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, + struct attribute *, int); + struct attribute **attrs; +}; + + + +/** + * Use these macros to make defining attributes easier. See include/linux/device.h + * for examples.. + */ + +#define __ATTR(_name,_mode,_show,_store) { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +} + +#define __ATTR_RO(_name) { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _name##_show, \ +} + +#define __ATTR_NULL { .attr = { .name = NULL } } + +#define attr_name(_attr) (_attr).attr.name + +struct file; +struct vm_area_struct; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, + char *, loff_t, size_t); + ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *, + char *, loff_t, size_t); + int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, + struct vm_area_struct *vma); +}; + +/** + * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute + * @attr: struct bin_attribute to initialize + * + * Initialize a dynamically allocated struct bin_attribute so we + * can make lockdep happy. This is a new requirement for + * attributes and initially this is only needed when lockdep is + * enabled. Lockdep gives a nice error when your attribute is + * added to sysfs if you don't have this. + */ +#define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *,char *); + ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); + const void *(*namespace)(struct kobject *, const struct attribute *); +}; + +struct sysfs_dirent; + +#ifdef CONFIG_SYSFS + +int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), + void *data, struct module *owner); + +int __must_check sysfs_create_dir(struct kobject *kobj); +void sysfs_remove_dir(struct kobject *kobj); +int __must_check sysfs_rename_dir(struct kobject *kobj, const char *new_name); +int __must_check sysfs_move_dir(struct kobject *kobj, + struct kobject *new_parent_kobj); + +int __must_check sysfs_create_file(struct kobject *kobj, + const struct attribute *attr); +int __must_check sysfs_create_files(struct kobject *kobj, + const struct attribute **attr); +int __must_check sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, umode_t mode); +void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); +void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); + +int __must_check sysfs_create_bin_file(struct kobject *kobj, + const struct bin_attribute *attr); +void sysfs_remove_bin_file(struct kobject *kobj, + const struct bin_attribute *attr); + +int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, + const char *name); +int __must_check sysfs_create_link_nowarn(struct kobject *kobj, + struct kobject *target, + const char *name); +void sysfs_remove_link(struct kobject *kobj, const char *name); + +int sysfs_rename_link(struct kobject *kobj, struct kobject *target, + const char *old_name, const char *new_name); + +void sysfs_delete_link(struct kobject *dir, struct kobject *targ, + const char *name); + +int __must_check sysfs_create_group(struct kobject *kobj, + const struct attribute_group *grp); +int sysfs_update_group(struct kobject *kobj, + const struct attribute_group *grp); +void sysfs_remove_group(struct kobject *kobj, + const struct attribute_group *grp); +int sysfs_add_file_to_group(struct kobject *kobj, + const struct attribute *attr, const char *group); +void sysfs_remove_file_from_group(struct kobject *kobj, + const struct attribute *attr, const char *group); +int sysfs_merge_group(struct kobject *kobj, + const struct attribute_group *grp); +void sysfs_unmerge_group(struct kobject *kobj, + const struct attribute_group *grp); + +void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); +void sysfs_notify_dirent(struct sysfs_dirent *sd); +struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, + const void *ns, + const unsigned char *name); +struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); +void sysfs_put(struct sysfs_dirent *sd); + +int __must_check sysfs_init(void); + +#else /* CONFIG_SYSFS */ + +static inline int sysfs_schedule_callback(struct kobject *kobj, + void (*func)(void *), void *data, struct module *owner) +{ + return -ENOSYS; +} + +static inline int sysfs_create_dir(struct kobject *kobj) +{ + return 0; +} + +static inline void sysfs_remove_dir(struct kobject *kobj) +{ +} + +static inline int sysfs_rename_dir(struct kobject *kobj, const char *new_name) +{ + return 0; +} + +static inline int sysfs_move_dir(struct kobject *kobj, + struct kobject *new_parent_kobj) +{ + return 0; +} + +static inline int sysfs_create_file(struct kobject *kobj, + const struct attribute *attr) +{ + return 0; +} + +static inline int sysfs_create_files(struct kobject *kobj, + const struct attribute **attr) +{ + return 0; +} + +static inline int sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, umode_t mode) +{ + return 0; +} + +static inline void sysfs_remove_file(struct kobject *kobj, + const struct attribute *attr) +{ +} + +static inline void sysfs_remove_files(struct kobject *kobj, + const struct attribute **attr) +{ +} + +static inline int sysfs_create_bin_file(struct kobject *kobj, + const struct bin_attribute *attr) +{ + return 0; +} + +static inline void sysfs_remove_bin_file(struct kobject *kobj, + const struct bin_attribute *attr) +{ +} + +static inline int sysfs_create_link(struct kobject *kobj, + struct kobject *target, const char *name) +{ + return 0; +} + +static inline int sysfs_create_link_nowarn(struct kobject *kobj, + struct kobject *target, + const char *name) +{ + return 0; +} + +static inline void sysfs_remove_link(struct kobject *kobj, const char *name) +{ +} + +static inline int sysfs_rename_link(struct kobject *k, struct kobject *t, + const char *old_name, const char *new_name) +{ + return 0; +} + +static inline void sysfs_delete_link(struct kobject *k, struct kobject *t, + const char *name) +{ +} + +static inline int sysfs_create_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + +static inline int sysfs_update_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + +static inline void sysfs_remove_group(struct kobject *kobj, + const struct attribute_group *grp) +{ +} + +static inline int sysfs_add_file_to_group(struct kobject *kobj, + const struct attribute *attr, const char *group) +{ + return 0; +} + +static inline void sysfs_remove_file_from_group(struct kobject *kobj, + const struct attribute *attr, const char *group) +{ +} + +static inline int sysfs_merge_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + +static inline void sysfs_unmerge_group(struct kobject *kobj, + const struct attribute_group *grp) +{ +} + +static inline void sysfs_notify(struct kobject *kobj, const char *dir, + const char *attr) +{ +} +static inline void sysfs_notify_dirent(struct sysfs_dirent *sd) +{ +} +static inline +struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, + const void *ns, + const unsigned char *name) +{ + return NULL; +} +static inline struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd) +{ + return NULL; +} +static inline void sysfs_put(struct sysfs_dirent *sd) +{ +} + +static inline int __must_check sysfs_init(void) +{ + return 0; +} + +#endif /* CONFIG_SYSFS */ + +#endif /* _SYSFS_H_ */ diff --git a/include/linux/sysinfo.h b/include/linux/sysinfo.h new file mode 100644 index 00000000..934335a2 --- /dev/null +++ b/include/linux/sysinfo.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SYSINFO_H +#define _LINUX_SYSINFO_H + +#include <linux/types.h> + +#define SI_LOAD_SHIFT 16 +struct sysinfo { + __kernel_long_t uptime; /* Seconds since boot */ + __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */ + __kernel_ulong_t totalram; /* Total usable main memory size */ + __kernel_ulong_t freeram; /* Available memory size */ + __kernel_ulong_t sharedram; /* Amount of shared memory */ + __kernel_ulong_t bufferram; /* Memory used by buffers */ + __kernel_ulong_t totalswap; /* Total swap space size */ + __kernel_ulong_t freeswap; /* swap space still available */ + __u16 procs; /* Number of current processes */ + __u16 pad; /* Explicit padding for m68k */ + __kernel_ulong_t totalhigh; /* Total high memory size */ + __kernel_ulong_t freehigh; /* Available high memory size */ + __u32 mem_unit; /* Memory unit size in bytes */ + char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; /* Padding: libc5 uses this.. */ +}; + +#endif /* _LINUX_SYSINFO_H */ diff --git a/include/linux/syslog.h b/include/linux/syslog.h new file mode 100644 index 00000000..38911391 --- /dev/null +++ b/include/linux/syslog.h @@ -0,0 +1,52 @@ +/* Syslog internals + * + * Copyright 2010 Canonical, Ltd. + * Author: Kees Cook <kees.cook@canonical.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _LINUX_SYSLOG_H +#define _LINUX_SYSLOG_H + +/* Close the log. Currently a NOP. */ +#define SYSLOG_ACTION_CLOSE 0 +/* Open the log. Currently a NOP. */ +#define SYSLOG_ACTION_OPEN 1 +/* Read from the log. */ +#define SYSLOG_ACTION_READ 2 +/* Read all messages remaining in the ring buffer. */ +#define SYSLOG_ACTION_READ_ALL 3 +/* Read and clear all messages remaining in the ring buffer */ +#define SYSLOG_ACTION_READ_CLEAR 4 +/* Clear ring buffer. */ +#define SYSLOG_ACTION_CLEAR 5 +/* Disable printk's to console */ +#define SYSLOG_ACTION_CONSOLE_OFF 6 +/* Enable printk's to console */ +#define SYSLOG_ACTION_CONSOLE_ON 7 +/* Set level of messages printed to console */ +#define SYSLOG_ACTION_CONSOLE_LEVEL 8 +/* Return number of unread characters in the log buffer */ +#define SYSLOG_ACTION_SIZE_UNREAD 9 +/* Return size of the log buffer */ +#define SYSLOG_ACTION_SIZE_BUFFER 10 + +#define SYSLOG_FROM_CALL 0 +#define SYSLOG_FROM_FILE 1 + +int do_syslog(int type, char __user *buf, int count, bool from_file); + +#endif /* _LINUX_SYSLOG_H */ diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h new file mode 100644 index 00000000..7faf933c --- /dev/null +++ b/include/linux/sysrq.h @@ -0,0 +1,78 @@ +/* -*- linux-c -*- + * + * $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $ + * + * Linux Magic System Request Key Hacks + * + * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> + * + * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com> + * overhauled to use key registration + * based upon discusions in irc://irc.openprojects.net/#kernelnewbies + */ + +#ifndef _LINUX_SYSRQ_H +#define _LINUX_SYSRQ_H + +#include <linux/errno.h> +#include <linux/types.h> + +/* Enable/disable SYSRQ support by default (0==no, 1==yes). */ +#define SYSRQ_DEFAULT_ENABLE 1 + +/* Possible values of bitmask for enabling sysrq functions */ +/* 0x0001 is reserved for enable everything */ +#define SYSRQ_ENABLE_LOG 0x0002 +#define SYSRQ_ENABLE_KEYBOARD 0x0004 +#define SYSRQ_ENABLE_DUMP 0x0008 +#define SYSRQ_ENABLE_SYNC 0x0010 +#define SYSRQ_ENABLE_REMOUNT 0x0020 +#define SYSRQ_ENABLE_SIGNAL 0x0040 +#define SYSRQ_ENABLE_BOOT 0x0080 +#define SYSRQ_ENABLE_RTNICE 0x0100 + +struct sysrq_key_op { + void (*handler)(int); + char *help_msg; + char *action_msg; + int enable_mask; +}; + +#ifdef CONFIG_MAGIC_SYSRQ + +/* Generic SysRq interface -- you may call it from any device driver, supplying + * ASCII code of the key, pointer to registers and kbd/tty structs (if they + * are available -- else NULL's). + */ + +void handle_sysrq(int key); +void __handle_sysrq(int key, bool check_mask); +int register_sysrq_key(int key, struct sysrq_key_op *op); +int unregister_sysrq_key(int key, struct sysrq_key_op *op); +struct sysrq_key_op *__sysrq_get_key_op(int key); + +int sysrq_toggle_support(int enable_mask); + +#else + +static inline void handle_sysrq(int key) +{ +} + +static inline void __handle_sysrq(int key, bool check_mask) +{ +} + +static inline int register_sysrq_key(int key, struct sysrq_key_op *op) +{ + return -EINVAL; +} + +static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op) +{ + return -EINVAL; +} + +#endif + +#endif /* _LINUX_SYSRQ_H */ diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h new file mode 100644 index 00000000..e47d6d90 --- /dev/null +++ b/include/linux/sysv_fs.h @@ -0,0 +1,213 @@ +#ifndef _LINUX_SYSV_FS_H +#define _LINUX_SYSV_FS_H + +#define __packed2__ __attribute__((packed, aligned(2))) + + +#ifndef __KERNEL__ +typedef u16 __fs16; +typedef u32 __fs16; +#endif + +/* inode numbers are 16 bit */ +typedef __fs16 sysv_ino_t; + +/* Block numbers are 24 bit, sometimes stored in 32 bit. + On Coherent FS, they are always stored in PDP-11 manner: the least + significant 16 bits come last. */ +typedef __fs32 sysv_zone_t; + +/* 0 is non-existent */ +#define SYSV_BADBL_INO 1 /* inode of bad blocks file */ +#define SYSV_ROOT_INO 2 /* inode of root directory */ + + +/* Xenix super-block data on disk */ +#define XENIX_NICINOD 100 /* number of inode cache entries */ +#define XENIX_NICFREE 100 /* number of free block list chunk entries */ +struct xenix_super_block { + __fs16 s_isize; /* index of first data zone */ + __fs32 s_fsize __packed2__; /* total number of zones of this fs */ + /* the start of the free block list: */ + __fs16 s_nfree; /* number of free blocks in s_free, <= XENIX_NICFREE */ + sysv_zone_t s_free[XENIX_NICFREE]; /* first free block list chunk */ + /* the cache of free inodes: */ + __fs16 s_ninode; /* number of free inodes in s_inode, <= XENIX_NICINOD */ + sysv_ino_t s_inode[XENIX_NICINOD]; /* some free inodes */ + /* locks, not used by Linux: */ + char s_flock; /* lock during free block list manipulation */ + char s_ilock; /* lock during inode cache manipulation */ + char s_fmod; /* super-block modified flag */ + char s_ronly; /* flag whether fs is mounted read-only */ + __fs32 s_time __packed2__; /* time of last super block update */ + __fs32 s_tfree __packed2__; /* total number of free zones */ + __fs16 s_tinode; /* total number of free inodes */ + __fs16 s_dinfo[4]; /* device information ?? */ + char s_fname[6]; /* file system volume name */ + char s_fpack[6]; /* file system pack name */ + char s_clean; /* set to 0x46 when filesystem is properly unmounted */ + char s_fill[371]; + s32 s_magic; /* version of file system */ + __fs32 s_type; /* type of file system: 1 for 512 byte blocks + 2 for 1024 byte blocks + 3 for 2048 byte blocks */ + +}; + +/* + * SystemV FS comes in two variants: + * sysv2: System V Release 2 (e.g. Microport), structure elements aligned(2). + * sysv4: System V Release 4 (e.g. Consensys), structure elements aligned(4). + */ +#define SYSV_NICINOD 100 /* number of inode cache entries */ +#define SYSV_NICFREE 50 /* number of free block list chunk entries */ + +/* SystemV4 super-block data on disk */ +struct sysv4_super_block { + __fs16 s_isize; /* index of first data zone */ + u16 s_pad0; + __fs32 s_fsize; /* total number of zones of this fs */ + /* the start of the free block list: */ + __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */ + u16 s_pad1; + sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */ + /* the cache of free inodes: */ + __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */ + u16 s_pad2; + sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */ + /* locks, not used by Linux: */ + char s_flock; /* lock during free block list manipulation */ + char s_ilock; /* lock during inode cache manipulation */ + char s_fmod; /* super-block modified flag */ + char s_ronly; /* flag whether fs is mounted read-only */ + __fs32 s_time; /* time of last super block update */ + __fs16 s_dinfo[4]; /* device information ?? */ + __fs32 s_tfree; /* total number of free zones */ + __fs16 s_tinode; /* total number of free inodes */ + u16 s_pad3; + char s_fname[6]; /* file system volume name */ + char s_fpack[6]; /* file system pack name */ + s32 s_fill[12]; + __fs32 s_state; /* file system state: 0x7c269d38-s_time means clean */ + s32 s_magic; /* version of file system */ + __fs32 s_type; /* type of file system: 1 for 512 byte blocks + 2 for 1024 byte blocks */ +}; + +/* SystemV2 super-block data on disk */ +struct sysv2_super_block { + __fs16 s_isize; /* index of first data zone */ + __fs32 s_fsize __packed2__; /* total number of zones of this fs */ + /* the start of the free block list: */ + __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */ + sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */ + /* the cache of free inodes: */ + __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */ + sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */ + /* locks, not used by Linux: */ + char s_flock; /* lock during free block list manipulation */ + char s_ilock; /* lock during inode cache manipulation */ + char s_fmod; /* super-block modified flag */ + char s_ronly; /* flag whether fs is mounted read-only */ + __fs32 s_time __packed2__; /* time of last super block update */ + __fs16 s_dinfo[4]; /* device information ?? */ + __fs32 s_tfree __packed2__; /* total number of free zones */ + __fs16 s_tinode; /* total number of free inodes */ + char s_fname[6]; /* file system volume name */ + char s_fpack[6]; /* file system pack name */ + s32 s_fill[14]; + __fs32 s_state; /* file system state: 0xcb096f43 means clean */ + s32 s_magic; /* version of file system */ + __fs32 s_type; /* type of file system: 1 for 512 byte blocks + 2 for 1024 byte blocks */ +}; + +/* V7 super-block data on disk */ +#define V7_NICINOD 100 /* number of inode cache entries */ +#define V7_NICFREE 50 /* number of free block list chunk entries */ +struct v7_super_block { + __fs16 s_isize; /* index of first data zone */ + __fs32 s_fsize __packed2__; /* total number of zones of this fs */ + /* the start of the free block list: */ + __fs16 s_nfree; /* number of free blocks in s_free, <= V7_NICFREE */ + sysv_zone_t s_free[V7_NICFREE]; /* first free block list chunk */ + /* the cache of free inodes: */ + __fs16 s_ninode; /* number of free inodes in s_inode, <= V7_NICINOD */ + sysv_ino_t s_inode[V7_NICINOD]; /* some free inodes */ + /* locks, not used by Linux or V7: */ + char s_flock; /* lock during free block list manipulation */ + char s_ilock; /* lock during inode cache manipulation */ + char s_fmod; /* super-block modified flag */ + char s_ronly; /* flag whether fs is mounted read-only */ + __fs32 s_time __packed2__; /* time of last super block update */ + /* the following fields are not maintained by V7: */ + __fs32 s_tfree __packed2__; /* total number of free zones */ + __fs16 s_tinode; /* total number of free inodes */ + __fs16 s_m; /* interleave factor */ + __fs16 s_n; /* interleave factor */ + char s_fname[6]; /* file system name */ + char s_fpack[6]; /* file system pack name */ +}; +/* Constants to aid sanity checking */ +/* This is not a hard limit, nor enforced by v7 kernel. It's actually just + * the limit used by Seventh Edition's ls, though is high enough to assume + * that no reasonable file system would have that much entries in root + * directory. Thus, if we see anything higher, we just probably got the + * endiannes wrong. */ +#define V7_NFILES 1024 +/* The disk addresses are three-byte (despite direct block addresses being + * aligned word-wise in inode). If the most significant byte is non-zero, + * something is most likely wrong (not a filesystem, bad bytesex). */ +#define V7_MAXSIZE 0x00ffffff + +/* Coherent super-block data on disk */ +#define COH_NICINOD 100 /* number of inode cache entries */ +#define COH_NICFREE 64 /* number of free block list chunk entries */ +struct coh_super_block { + __fs16 s_isize; /* index of first data zone */ + __fs32 s_fsize __packed2__; /* total number of zones of this fs */ + /* the start of the free block list: */ + __fs16 s_nfree; /* number of free blocks in s_free, <= COH_NICFREE */ + sysv_zone_t s_free[COH_NICFREE] __packed2__; /* first free block list chunk */ + /* the cache of free inodes: */ + __fs16 s_ninode; /* number of free inodes in s_inode, <= COH_NICINOD */ + sysv_ino_t s_inode[COH_NICINOD]; /* some free inodes */ + /* locks, not used by Linux: */ + char s_flock; /* lock during free block list manipulation */ + char s_ilock; /* lock during inode cache manipulation */ + char s_fmod; /* super-block modified flag */ + char s_ronly; /* flag whether fs is mounted read-only */ + __fs32 s_time __packed2__; /* time of last super block update */ + __fs32 s_tfree __packed2__; /* total number of free zones */ + __fs16 s_tinode; /* total number of free inodes */ + __fs16 s_interleave_m; /* interleave factor */ + __fs16 s_interleave_n; + char s_fname[6]; /* file system volume name */ + char s_fpack[6]; /* file system pack name */ + __fs32 s_unique; /* zero, not used */ +}; + +/* SystemV/Coherent inode data on disk */ +struct sysv_inode { + __fs16 i_mode; + __fs16 i_nlink; + __fs16 i_uid; + __fs16 i_gid; + __fs32 i_size; + u8 i_data[3*(10+1+1+1)]; + u8 i_gen; + __fs32 i_atime; /* time of last access */ + __fs32 i_mtime; /* time of last modification */ + __fs32 i_ctime; /* time of creation */ +}; + +/* SystemV/Coherent directory entry on disk */ +#define SYSV_NAMELEN 14 /* max size of name in struct sysv_dir_entry */ +struct sysv_dir_entry { + sysv_ino_t inode; + char name[SYSV_NAMELEN]; /* up to 14 characters, the rest are zeroes */ +}; + +#define SYSV_DIRSIZE sizeof(struct sysv_dir_entry) /* size of every directory entry */ + +#endif /* _LINUX_SYSV_FS_H */ diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h new file mode 100644 index 00000000..bdf855c2 --- /dev/null +++ b/include/linux/task_io_accounting.h @@ -0,0 +1,45 @@ +/* + * task_io_accounting: a structure which is used for recording a single task's + * IO statistics. + * + * Don't include this header file directly - it is designed to be dragged in via + * sched.h. + * + * Blame Andrew Morton for all this. + */ + +struct task_io_accounting { +#ifdef CONFIG_TASK_XACCT + /* bytes read */ + u64 rchar; + /* bytes written */ + u64 wchar; + /* # of read syscalls */ + u64 syscr; + /* # of write syscalls */ + u64 syscw; +#endif /* CONFIG_TASK_XACCT */ + +#ifdef CONFIG_TASK_IO_ACCOUNTING + /* + * The number of bytes which this task has caused to be read from + * storage. + */ + u64 read_bytes; + + /* + * The number of bytes which this task has caused, or shall cause to be + * written to disk. + */ + u64 write_bytes; + + /* + * A task can cause "negative" IO too. If this task truncates some + * dirty pagecache, some IO which another task has been accounted for + * (in its write_bytes) will not be happening. We _could_ just + * subtract that from the truncating task's write_bytes, but there is + * information loss in doing that. + */ + u64 cancelled_write_bytes; +#endif /* CONFIG_TASK_IO_ACCOUNTING */ +}; diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h new file mode 100644 index 00000000..4d090f9e --- /dev/null +++ b/include/linux/task_io_accounting_ops.h @@ -0,0 +1,113 @@ +/* + * Task I/O accounting operations + */ +#ifndef __TASK_IO_ACCOUNTING_OPS_INCLUDED +#define __TASK_IO_ACCOUNTING_OPS_INCLUDED + +#include <linux/sched.h> + +#ifdef CONFIG_TASK_IO_ACCOUNTING +static inline void task_io_account_read(size_t bytes) +{ + current->ioac.read_bytes += bytes; +} + +/* + * We approximate number of blocks, because we account bytes only. + * A 'block' is 512 bytes + */ +static inline unsigned long task_io_get_inblock(const struct task_struct *p) +{ + return p->ioac.read_bytes >> 9; +} + +static inline void task_io_account_write(size_t bytes) +{ + current->ioac.write_bytes += bytes; +} + +/* + * We approximate number of blocks, because we account bytes only. + * A 'block' is 512 bytes + */ +static inline unsigned long task_io_get_oublock(const struct task_struct *p) +{ + return p->ioac.write_bytes >> 9; +} + +static inline void task_io_account_cancelled_write(size_t bytes) +{ + current->ioac.cancelled_write_bytes += bytes; +} + +static inline void task_io_accounting_init(struct task_io_accounting *ioac) +{ + memset(ioac, 0, sizeof(*ioac)); +} + +static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + dst->read_bytes += src->read_bytes; + dst->write_bytes += src->write_bytes; + dst->cancelled_write_bytes += src->cancelled_write_bytes; +} + +#else + +static inline void task_io_account_read(size_t bytes) +{ +} + +static inline unsigned long task_io_get_inblock(const struct task_struct *p) +{ + return 0; +} + +static inline void task_io_account_write(size_t bytes) +{ +} + +static inline unsigned long task_io_get_oublock(const struct task_struct *p) +{ + return 0; +} + +static inline void task_io_account_cancelled_write(size_t bytes) +{ +} + +static inline void task_io_accounting_init(struct task_io_accounting *ioac) +{ +} + +static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ +} + +#endif /* CONFIG_TASK_IO_ACCOUNTING */ + +#ifdef CONFIG_TASK_XACCT +static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + dst->rchar += src->rchar; + dst->wchar += src->wchar; + dst->syscr += src->syscr; + dst->syscw += src->syscw; +} +#else +static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ +} +#endif /* CONFIG_TASK_XACCT */ + +static inline void task_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + task_chr_io_accounting_add(dst, src); + task_blk_io_accounting_add(dst, src); +} +#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */ diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h new file mode 100644 index 00000000..2466e550 --- /dev/null +++ b/include/linux/taskstats.h @@ -0,0 +1,213 @@ +/* taskstats.h - exporting per-task statistics + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * (C) Balbir Singh, IBM Corp. 2006 + * (C) Jay Lan, SGI, 2006 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _LINUX_TASKSTATS_H +#define _LINUX_TASKSTATS_H + +#include <linux/types.h> + +/* Format for per-task data returned to userland when + * - a task exits + * - listener requests stats for a task + * + * The struct is versioned. Newer versions should only add fields to + * the bottom of the struct to maintain backward compatibility. + * + * + * To add new fields + * a) bump up TASKSTATS_VERSION + * b) add comment indicating new version number at end of struct + * c) add new fields after version comment; maintain 64-bit alignment + */ + + +#define TASKSTATS_VERSION 8 +#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN + * in linux/sched.h */ + +struct taskstats { + + /* The version number of this struct. This field is always set to + * TAKSTATS_VERSION, which is defined in <linux/taskstats.h>. + * Each time the struct is changed, the value should be incremented. + */ + __u16 version; + __u32 ac_exitcode; /* Exit status */ + + /* The accounting flags of a task as defined in <linux/acct.h> + * Defined values are AFORK, ASU, ACOMPAT, ACORE, and AXSIG. + */ + __u8 ac_flag; /* Record flags */ + __u8 ac_nice; /* task_nice */ + + /* Delay accounting fields start + * + * All values, until comment "Delay accounting fields end" are + * available only if delay accounting is enabled, even though the last + * few fields are not delays + * + * xxx_count is the number of delay values recorded + * xxx_delay_total is the corresponding cumulative delay in nanoseconds + * + * xxx_delay_total wraps around to zero on overflow + * xxx_count incremented regardless of overflow + */ + + /* Delay waiting for cpu, while runnable + * count, delay_total NOT updated atomically + */ + __u64 cpu_count __attribute__((aligned(8))); + __u64 cpu_delay_total; + + /* Following four fields atomically updated using task->delays->lock */ + + /* Delay waiting for synchronous block I/O to complete + * does not account for delays in I/O submission + */ + __u64 blkio_count; + __u64 blkio_delay_total; + + /* Delay waiting for page fault I/O (swap in only) */ + __u64 swapin_count; + __u64 swapin_delay_total; + + /* cpu "wall-clock" running time + * On some architectures, value will adjust for cpu time stolen + * from the kernel in involuntary waits due to virtualization. + * Value is cumulative, in nanoseconds, without a corresponding count + * and wraps around to zero silently on overflow + */ + __u64 cpu_run_real_total; + + /* cpu "virtual" running time + * Uses time intervals seen by the kernel i.e. no adjustment + * for kernel's involuntary waits due to virtualization. + * Value is cumulative, in nanoseconds, without a corresponding count + * and wraps around to zero silently on overflow + */ + __u64 cpu_run_virtual_total; + /* Delay accounting fields end */ + /* version 1 ends here */ + + /* Basic Accounting Fields start */ + char ac_comm[TS_COMM_LEN]; /* Command name */ + __u8 ac_sched __attribute__((aligned(8))); + /* Scheduling discipline */ + __u8 ac_pad[3]; + __u32 ac_uid __attribute__((aligned(8))); + /* User ID */ + __u32 ac_gid; /* Group ID */ + __u32 ac_pid; /* Process ID */ + __u32 ac_ppid; /* Parent process ID */ + __u32 ac_btime; /* Begin time [sec since 1970] */ + __u64 ac_etime __attribute__((aligned(8))); + /* Elapsed time [usec] */ + __u64 ac_utime; /* User CPU time [usec] */ + __u64 ac_stime; /* SYstem CPU time [usec] */ + __u64 ac_minflt; /* Minor Page Fault Count */ + __u64 ac_majflt; /* Major Page Fault Count */ + /* Basic Accounting Fields end */ + + /* Extended accounting fields start */ + /* Accumulated RSS usage in duration of a task, in MBytes-usecs. + * The current rss usage is added to this counter every time + * a tick is charged to a task's system time. So, at the end we + * will have memory usage multiplied by system time. Thus an + * average usage per system time unit can be calculated. + */ + __u64 coremem; /* accumulated RSS usage in MB-usec */ + /* Accumulated virtual memory usage in duration of a task. + * Same as acct_rss_mem1 above except that we keep track of VM usage. + */ + __u64 virtmem; /* accumulated VM usage in MB-usec */ + + /* High watermark of RSS and virtual memory usage in duration of + * a task, in KBytes. + */ + __u64 hiwater_rss; /* High-watermark of RSS usage, in KB */ + __u64 hiwater_vm; /* High-water VM usage, in KB */ + + /* The following four fields are I/O statistics of a task. */ + __u64 read_char; /* bytes read */ + __u64 write_char; /* bytes written */ + __u64 read_syscalls; /* read syscalls */ + __u64 write_syscalls; /* write syscalls */ + /* Extended accounting fields end */ + +#define TASKSTATS_HAS_IO_ACCOUNTING + /* Per-task storage I/O accounting starts */ + __u64 read_bytes; /* bytes of read I/O */ + __u64 write_bytes; /* bytes of write I/O */ + __u64 cancelled_write_bytes; /* bytes of cancelled write I/O */ + + __u64 nvcsw; /* voluntary_ctxt_switches */ + __u64 nivcsw; /* nonvoluntary_ctxt_switches */ + + /* time accounting for SMT machines */ + __u64 ac_utimescaled; /* utime scaled on frequency etc */ + __u64 ac_stimescaled; /* stime scaled on frequency etc */ + __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */ + + /* Delay waiting for memory reclaim */ + __u64 freepages_count; + __u64 freepages_delay_total; +}; + + +/* + * Commands sent from userspace + * Not versioned. New commands should only be inserted at the enum's end + * prior to __TASKSTATS_CMD_MAX + */ + +enum { + TASKSTATS_CMD_UNSPEC = 0, /* Reserved */ + TASKSTATS_CMD_GET, /* user->kernel request/get-response */ + TASKSTATS_CMD_NEW, /* kernel->user event */ + __TASKSTATS_CMD_MAX, +}; + +#define TASKSTATS_CMD_MAX (__TASKSTATS_CMD_MAX - 1) + +enum { + TASKSTATS_TYPE_UNSPEC = 0, /* Reserved */ + TASKSTATS_TYPE_PID, /* Process id */ + TASKSTATS_TYPE_TGID, /* Thread group id */ + TASKSTATS_TYPE_STATS, /* taskstats structure */ + TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */ + TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */ + TASKSTATS_TYPE_NULL, /* contains nothing */ + __TASKSTATS_TYPE_MAX, +}; + +#define TASKSTATS_TYPE_MAX (__TASKSTATS_TYPE_MAX - 1) + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID, + TASKSTATS_CMD_ATTR_TGID, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, + __TASKSTATS_CMD_ATTR_MAX, +}; + +#define TASKSTATS_CMD_ATTR_MAX (__TASKSTATS_CMD_ATTR_MAX - 1) + +/* NETLINK_GENERIC related info */ + +#define TASKSTATS_GENL_NAME "TASKSTATS" +#define TASKSTATS_GENL_VERSION 0x1 + +#endif /* _LINUX_TASKSTATS_H */ diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h new file mode 100644 index 00000000..58de6edf --- /dev/null +++ b/include/linux/taskstats_kern.h @@ -0,0 +1,36 @@ +/* taskstats_kern.h - kernel header for per-task statistics interface + * + * Copyright (C) Shailabh Nagar, IBM Corp. 2006 + * (C) Balbir Singh, IBM Corp. 2006 + */ + +#ifndef _LINUX_TASKSTATS_KERN_H +#define _LINUX_TASKSTATS_KERN_H + +#include <linux/taskstats.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#ifdef CONFIG_TASKSTATS +extern struct kmem_cache *taskstats_cache; +extern struct mutex taskstats_exit_mutex; + +static inline void taskstats_tgid_free(struct signal_struct *sig) +{ + if (sig->stats) + kmem_cache_free(taskstats_cache, sig->stats); +} + +extern void taskstats_exit(struct task_struct *, int group_dead); +extern void taskstats_init_early(void); +#else +static inline void taskstats_exit(struct task_struct *tsk, int group_dead) +{} +static inline void taskstats_tgid_free(struct signal_struct *sig) +{} +static inline void taskstats_init_early(void) +{} +#endif /* CONFIG_TASKSTATS */ + +#endif + diff --git a/include/linux/tboot.h b/include/linux/tboot.h new file mode 100644 index 00000000..c75128be --- /dev/null +++ b/include/linux/tboot.h @@ -0,0 +1,162 @@ +/* + * tboot.h: shared data structure with tboot and kernel and functions + * used by kernel for runtime support of Intel(R) Trusted + * Execution Technology + * + * Copyright (c) 2006-2009, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef _LINUX_TBOOT_H +#define _LINUX_TBOOT_H + +/* these must have the values from 0-5 in this order */ +enum { + TB_SHUTDOWN_REBOOT = 0, + TB_SHUTDOWN_S5, + TB_SHUTDOWN_S4, + TB_SHUTDOWN_S3, + TB_SHUTDOWN_HALT, + TB_SHUTDOWN_WFS +}; + +#ifdef CONFIG_INTEL_TXT +#include <acpi/acpi.h> +/* used to communicate between tboot and the launched kernel */ + +#define TB_KEY_SIZE 64 /* 512 bits */ + +#define MAX_TB_MAC_REGIONS 32 + +struct tboot_mac_region { + u64 start; /* must be 64 byte -aligned */ + u32 size; /* must be 64 byte -granular */ +} __packed; + +/* GAS - Generic Address Structure (ACPI 2.0+) */ +struct tboot_acpi_generic_address { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __packed; + +/* + * combines Sx info from FADT and FACS tables per ACPI 2.0+ spec + * (http://www.acpi.info/) + */ +struct tboot_acpi_sleep_info { + struct tboot_acpi_generic_address pm1a_cnt_blk; + struct tboot_acpi_generic_address pm1b_cnt_blk; + struct tboot_acpi_generic_address pm1a_evt_blk; + struct tboot_acpi_generic_address pm1b_evt_blk; + u16 pm1a_cnt_val; + u16 pm1b_cnt_val; + u64 wakeup_vector; + u32 vector_width; + u64 kernel_s3_resume_vector; +} __packed; + +/* + * shared memory page used for communication between tboot and kernel + */ +struct tboot { + /* + * version 3+ fields: + */ + + /* TBOOT_UUID */ + u8 uuid[16]; + + /* version number: 5 is current */ + u32 version; + + /* physical addr of tb_log_t log */ + u32 log_addr; + + /* + * physical addr of entry point for tboot shutdown and + * type of shutdown (TB_SHUTDOWN_*) being requested + */ + u32 shutdown_entry; + u32 shutdown_type; + + /* kernel-specified ACPI info for Sx shutdown */ + struct tboot_acpi_sleep_info acpi_sinfo; + + /* tboot location in memory (physical) */ + u32 tboot_base; + u32 tboot_size; + + /* memory regions (phys addrs) for tboot to MAC on S3 */ + u8 num_mac_regions; + struct tboot_mac_region mac_regions[MAX_TB_MAC_REGIONS]; + + + /* + * version 4+ fields: + */ + + /* symmetric key for use by kernel; will be encrypted on S3 */ + u8 s3_key[TB_KEY_SIZE]; + + + /* + * version 5+ fields: + */ + + /* used to 4byte-align num_in_wfs */ + u8 reserved_align[3]; + + /* number of processors in wait-for-SIPI */ + u32 num_in_wfs; +} __packed; + +/* + * UUID for tboot data struct to facilitate matching + * defined as {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} by tboot, which is + * represented as {} in the char array used here + */ +#define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\ + 0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8} + +extern struct tboot *tboot; + +static inline int tboot_enabled(void) +{ + return tboot != NULL; +} + +extern void tboot_probe(void); +extern void tboot_shutdown(u32 shutdown_type); +extern struct acpi_table_header *tboot_get_dmar_table( + struct acpi_table_header *dmar_tbl); +extern int tboot_force_iommu(void); + +#else + +#define tboot_enabled() 0 +#define tboot_probe() do { } while (0) +#define tboot_shutdown(shutdown_type) do { } while (0) +#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \ + do { } while (0) +#define tboot_get_dmar_table(dmar_tbl) (dmar_tbl) +#define tboot_force_iommu() 0 + +#endif /* !CONFIG_INTEL_TXT */ + +#endif /* _LINUX_TBOOT_H */ diff --git a/include/linux/tc.h b/include/linux/tc.h new file mode 100644 index 00000000..f92511e5 --- /dev/null +++ b/include/linux/tc.h @@ -0,0 +1,141 @@ +/* + * Interface to the TURBOchannel related routines. + * + * Copyright (c) 1998 Harald Koerfgen + * Copyright (c) 2005 James Simmons + * Copyright (c) 2006 Maciej W. Rozycki + * + * Based on: + * + * "TURBOchannel Firmware Specification", EK-TCAAD-FS-004 + * + * from Digital Equipment Corporation. + * + * This file is subject to the terms and conditions of the GNU + * General Public License. See the file "COPYING" in the main + * directory of this archive for more details. + */ +#ifndef _LINUX_TC_H +#define _LINUX_TC_H + +#include <linux/compiler.h> +#include <linux/device.h> +#include <linux/ioport.h> +#include <linux/types.h> + +/* + * Offsets for the ROM header locations for TURBOchannel cards. + */ +#define TC_OLDCARD 0x3c0000 +#define TC_NEWCARD 0x000000 + +#define TC_ROM_WIDTH 0x3e0 +#define TC_ROM_STRIDE 0x3e4 +#define TC_ROM_SIZE 0x3e8 +#define TC_SLOT_SIZE 0x3ec +#define TC_PATTERN0 0x3f0 +#define TC_PATTERN1 0x3f4 +#define TC_PATTERN2 0x3f8 +#define TC_PATTERN3 0x3fc +#define TC_FIRM_VER 0x400 +#define TC_VENDOR 0x420 +#define TC_MODULE 0x440 +#define TC_FIRM_TYPE 0x460 +#define TC_FLAGS 0x470 +#define TC_ROM_OBJECTS 0x480 + +/* + * Information obtained through the get_tcinfo() PROM call. + */ +struct tcinfo { + s32 revision; /* Hardware revision level. */ + s32 clk_period; /* Clock period in nanoseconds. */ + s32 slot_size; /* Slot size in megabytes. */ + s32 io_timeout; /* I/O timeout in cycles. */ + s32 dma_range; /* DMA address range in megabytes. */ + s32 max_dma_burst; /* Maximum DMA burst length. */ + s32 parity; /* System module supports TC parity. */ + s32 reserved[4]; +}; + +/* + * TURBOchannel bus. + */ +struct tc_bus { + struct list_head devices; /* List of devices on this bus. */ + struct resource resource[2]; /* Address space routed to this bus. */ + + struct device dev; + char name[13]; + resource_size_t slot_base; + resource_size_t ext_slot_base; + resource_size_t ext_slot_size; + int num_tcslots; + struct tcinfo info; +}; + +/* + * TURBOchannel device. + */ +struct tc_dev { + struct list_head node; /* Node in list of all TC devices. */ + struct tc_bus *bus; /* Bus this device is on. */ + struct tc_driver *driver; /* Which driver has allocated this + device. */ + struct device dev; /* Generic device interface. */ + struct resource resource; /* Address space of this device. */ + char vendor[9]; + char name[9]; + char firmware[9]; + int interrupt; + int slot; +}; + +#define to_tc_dev(n) container_of(n, struct tc_dev, dev) + +struct tc_device_id { + char vendor[9]; + char name[9]; +}; + +/* + * TURBOchannel driver. + */ +struct tc_driver { + struct list_head node; + const struct tc_device_id *id_table; + struct device_driver driver; +}; + +#define to_tc_driver(drv) container_of(drv, struct tc_driver, driver) + +/* + * Return TURBOchannel clock frequency in Hz. + */ +static inline unsigned long tc_get_speed(struct tc_bus *tbus) +{ + return 100000 * (10000 / (unsigned long)tbus->info.clk_period); +} + +#ifdef CONFIG_TC + +extern struct bus_type tc_bus_type; + +extern int tc_register_driver(struct tc_driver *tdrv); +extern void tc_unregister_driver(struct tc_driver *tdrv); + +#else /* !CONFIG_TC */ + +static inline int tc_register_driver(struct tc_driver *tdrv) { return 0; } +static inline void tc_unregister_driver(struct tc_driver *tdrv) { } + +#endif /* CONFIG_TC */ + +/* + * These have to be provided by the architecture. + */ +extern int tc_preadb(u8 *valp, void __iomem *addr); +extern int tc_bus_get_info(struct tc_bus *tbus); +extern void tc_device_get_irq(struct tc_dev *tdev); + +#endif /* _LINUX_TC_H */ diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild new file mode 100644 index 00000000..67b501c3 --- /dev/null +++ b/include/linux/tc_act/Kbuild @@ -0,0 +1,7 @@ +header-y += tc_gact.h +header-y += tc_ipt.h +header-y += tc_mirred.h +header-y += tc_pedit.h +header-y += tc_nat.h +header-y += tc_skbedit.h +header-y += tc_csum.h diff --git a/include/linux/tc_act/tc_csum.h b/include/linux/tc_act/tc_csum.h new file mode 100644 index 00000000..a047c49a --- /dev/null +++ b/include/linux/tc_act/tc_csum.h @@ -0,0 +1,32 @@ +#ifndef __LINUX_TC_CSUM_H +#define __LINUX_TC_CSUM_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TCA_ACT_CSUM 16 + +enum { + TCA_CSUM_UNSPEC, + TCA_CSUM_PARMS, + TCA_CSUM_TM, + __TCA_CSUM_MAX +}; +#define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1) + +enum { + TCA_CSUM_UPDATE_FLAG_IPV4HDR = 1, + TCA_CSUM_UPDATE_FLAG_ICMP = 2, + TCA_CSUM_UPDATE_FLAG_IGMP = 4, + TCA_CSUM_UPDATE_FLAG_TCP = 8, + TCA_CSUM_UPDATE_FLAG_UDP = 16, + TCA_CSUM_UPDATE_FLAG_UDPLITE = 32 +}; + +struct tc_csum { + tc_gen; + + __u32 update_flags; +}; + +#endif /* __LINUX_TC_CSUM_H */ diff --git a/include/linux/tc_act/tc_defact.h b/include/linux/tc_act/tc_defact.h new file mode 100644 index 00000000..6f65d07c --- /dev/null +++ b/include/linux/tc_act/tc_defact.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_TC_DEF_H +#define __LINUX_TC_DEF_H + +#include <linux/pkt_cls.h> + +struct tc_defact { + tc_gen; +}; + +enum { + TCA_DEF_UNSPEC, + TCA_DEF_TM, + TCA_DEF_PARMS, + TCA_DEF_DATA, + __TCA_DEF_MAX +}; +#define TCA_DEF_MAX (__TCA_DEF_MAX - 1) + +#endif diff --git a/include/linux/tc_act/tc_gact.h b/include/linux/tc_act/tc_gact.h new file mode 100644 index 00000000..f7bf94ee --- /dev/null +++ b/include/linux/tc_act/tc_gact.h @@ -0,0 +1,32 @@ +#ifndef __LINUX_TC_GACT_H +#define __LINUX_TC_GACT_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TCA_ACT_GACT 5 +struct tc_gact { + tc_gen; + +}; + +struct tc_gact_p { +#define PGACT_NONE 0 +#define PGACT_NETRAND 1 +#define PGACT_DETERM 2 +#define MAX_RAND (PGACT_DETERM + 1 ) + __u16 ptype; + __u16 pval; + int paction; +}; + +enum { + TCA_GACT_UNSPEC, + TCA_GACT_TM, + TCA_GACT_PARMS, + TCA_GACT_PROB, + __TCA_GACT_MAX +}; +#define TCA_GACT_MAX (__TCA_GACT_MAX - 1) + +#endif diff --git a/include/linux/tc_act/tc_ipt.h b/include/linux/tc_act/tc_ipt.h new file mode 100644 index 00000000..a2335563 --- /dev/null +++ b/include/linux/tc_act/tc_ipt.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_TC_IPT_H +#define __LINUX_TC_IPT_H + +#include <linux/pkt_cls.h> + +#define TCA_ACT_IPT 6 + +enum { + TCA_IPT_UNSPEC, + TCA_IPT_TABLE, + TCA_IPT_HOOK, + TCA_IPT_INDEX, + TCA_IPT_CNT, + TCA_IPT_TM, + TCA_IPT_TARG, + __TCA_IPT_MAX +}; +#define TCA_IPT_MAX (__TCA_IPT_MAX - 1) + +#endif diff --git a/include/linux/tc_act/tc_mirred.h b/include/linux/tc_act/tc_mirred.h new file mode 100644 index 00000000..7561750e --- /dev/null +++ b/include/linux/tc_act/tc_mirred.h @@ -0,0 +1,27 @@ +#ifndef __LINUX_TC_MIR_H +#define __LINUX_TC_MIR_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TCA_ACT_MIRRED 8 +#define TCA_EGRESS_REDIR 1 /* packet redirect to EGRESS*/ +#define TCA_EGRESS_MIRROR 2 /* mirror packet to EGRESS */ +#define TCA_INGRESS_REDIR 3 /* packet redirect to INGRESS*/ +#define TCA_INGRESS_MIRROR 4 /* mirror packet to INGRESS */ + +struct tc_mirred { + tc_gen; + int eaction; /* one of IN/EGRESS_MIRROR/REDIR */ + __u32 ifindex; /* ifindex of egress port */ +}; + +enum { + TCA_MIRRED_UNSPEC, + TCA_MIRRED_TM, + TCA_MIRRED_PARMS, + __TCA_MIRRED_MAX +}; +#define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1) + +#endif diff --git a/include/linux/tc_act/tc_nat.h b/include/linux/tc_act/tc_nat.h new file mode 100644 index 00000000..6663aeba --- /dev/null +++ b/include/linux/tc_act/tc_nat.h @@ -0,0 +1,27 @@ +#ifndef __LINUX_TC_NAT_H +#define __LINUX_TC_NAT_H + +#include <linux/pkt_cls.h> +#include <linux/types.h> + +#define TCA_ACT_NAT 9 + +enum { + TCA_NAT_UNSPEC, + TCA_NAT_PARMS, + TCA_NAT_TM, + __TCA_NAT_MAX +}; +#define TCA_NAT_MAX (__TCA_NAT_MAX - 1) + +#define TCA_NAT_FLAG_EGRESS 1 + +struct tc_nat { + tc_gen; + __be32 old_addr; + __be32 new_addr; + __be32 mask; + __u32 flags; +}; + +#endif diff --git a/include/linux/tc_act/tc_pedit.h b/include/linux/tc_act/tc_pedit.h new file mode 100644 index 00000000..716cfabc --- /dev/null +++ b/include/linux/tc_act/tc_pedit.h @@ -0,0 +1,34 @@ +#ifndef __LINUX_TC_PED_H +#define __LINUX_TC_PED_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TCA_ACT_PEDIT 7 + +enum { + TCA_PEDIT_UNSPEC, + TCA_PEDIT_TM, + TCA_PEDIT_PARMS, + __TCA_PEDIT_MAX +}; +#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) + +struct tc_pedit_key { + __u32 mask; /* AND */ + __u32 val; /*XOR */ + __u32 off; /*offset */ + __u32 at; + __u32 offmask; + __u32 shift; +}; + +struct tc_pedit_sel { + tc_gen; + unsigned char nkeys; + unsigned char flags; + struct tc_pedit_key keys[0]; +}; +#define tc_pedit tc_pedit_sel + +#endif diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h new file mode 100644 index 00000000..7a2e910a --- /dev/null +++ b/include/linux/tc_act/tc_skbedit.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Author: Alexander Duyck <alexander.h.duyck@intel.com> + */ + +#ifndef __LINUX_TC_SKBEDIT_H +#define __LINUX_TC_SKBEDIT_H + +#include <linux/pkt_cls.h> + +#define TCA_ACT_SKBEDIT 11 + +#define SKBEDIT_F_PRIORITY 0x1 +#define SKBEDIT_F_QUEUE_MAPPING 0x2 +#define SKBEDIT_F_MARK 0x4 + +struct tc_skbedit { + tc_gen; +}; + +enum { + TCA_SKBEDIT_UNSPEC, + TCA_SKBEDIT_TM, + TCA_SKBEDIT_PARMS, + TCA_SKBEDIT_PRIORITY, + TCA_SKBEDIT_QUEUE_MAPPING, + TCA_SKBEDIT_MARK, + __TCA_SKBEDIT_MAX +}; +#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) + +#endif diff --git a/include/linux/tc_ematch/Kbuild b/include/linux/tc_ematch/Kbuild new file mode 100644 index 00000000..4a58a1c3 --- /dev/null +++ b/include/linux/tc_ematch/Kbuild @@ -0,0 +1,4 @@ +header-y += tc_em_cmp.h +header-y += tc_em_meta.h +header-y += tc_em_nbyte.h +header-y += tc_em_text.h diff --git a/include/linux/tc_ematch/tc_em_cmp.h b/include/linux/tc_ematch/tc_em_cmp.h new file mode 100644 index 00000000..f34bb1ba --- /dev/null +++ b/include/linux/tc_ematch/tc_em_cmp.h @@ -0,0 +1,25 @@ +#ifndef __LINUX_TC_EM_CMP_H +#define __LINUX_TC_EM_CMP_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +struct tcf_em_cmp { + __u32 val; + __u32 mask; + __u16 off; + __u8 align:4; + __u8 flags:4; + __u8 layer:4; + __u8 opnd:4; +}; + +enum { + TCF_EM_ALIGN_U8 = 1, + TCF_EM_ALIGN_U16 = 2, + TCF_EM_ALIGN_U32 = 4 +}; + +#define TCF_EM_CMP_TRANS 1 + +#endif diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h new file mode 100644 index 00000000..b11f8ce2 --- /dev/null +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -0,0 +1,92 @@ +#ifndef __LINUX_TC_EM_META_H +#define __LINUX_TC_EM_META_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +enum { + TCA_EM_META_UNSPEC, + TCA_EM_META_HDR, + TCA_EM_META_LVALUE, + TCA_EM_META_RVALUE, + __TCA_EM_META_MAX +}; +#define TCA_EM_META_MAX (__TCA_EM_META_MAX - 1) + +struct tcf_meta_val { + __u16 kind; + __u8 shift; + __u8 op; +}; + +#define TCF_META_TYPE_MASK (0xf << 12) +#define TCF_META_TYPE(kind) (((kind) & TCF_META_TYPE_MASK) >> 12) +#define TCF_META_ID_MASK 0x7ff +#define TCF_META_ID(kind) ((kind) & TCF_META_ID_MASK) + +enum { + TCF_META_TYPE_VAR, + TCF_META_TYPE_INT, + __TCF_META_TYPE_MAX +}; +#define TCF_META_TYPE_MAX (__TCF_META_TYPE_MAX - 1) + +enum { + TCF_META_ID_VALUE, + TCF_META_ID_RANDOM, + TCF_META_ID_LOADAVG_0, + TCF_META_ID_LOADAVG_1, + TCF_META_ID_LOADAVG_2, + TCF_META_ID_DEV, + TCF_META_ID_PRIORITY, + TCF_META_ID_PROTOCOL, + TCF_META_ID_PKTTYPE, + TCF_META_ID_PKTLEN, + TCF_META_ID_DATALEN, + TCF_META_ID_MACLEN, + TCF_META_ID_NFMARK, + TCF_META_ID_TCINDEX, + TCF_META_ID_RTCLASSID, + TCF_META_ID_RTIIF, + TCF_META_ID_SK_FAMILY, + TCF_META_ID_SK_STATE, + TCF_META_ID_SK_REUSE, + TCF_META_ID_SK_BOUND_IF, + TCF_META_ID_SK_REFCNT, + TCF_META_ID_SK_SHUTDOWN, + TCF_META_ID_SK_PROTO, + TCF_META_ID_SK_TYPE, + TCF_META_ID_SK_RCVBUF, + TCF_META_ID_SK_RMEM_ALLOC, + TCF_META_ID_SK_WMEM_ALLOC, + TCF_META_ID_SK_OMEM_ALLOC, + TCF_META_ID_SK_WMEM_QUEUED, + TCF_META_ID_SK_RCV_QLEN, + TCF_META_ID_SK_SND_QLEN, + TCF_META_ID_SK_ERR_QLEN, + TCF_META_ID_SK_FORWARD_ALLOCS, + TCF_META_ID_SK_SNDBUF, + TCF_META_ID_SK_ALLOCS, + __TCF_META_ID_SK_ROUTE_CAPS, /* unimplemented but in ABI already */ + TCF_META_ID_SK_HASH, + TCF_META_ID_SK_LINGERTIME, + TCF_META_ID_SK_ACK_BACKLOG, + TCF_META_ID_SK_MAX_ACK_BACKLOG, + TCF_META_ID_SK_PRIO, + TCF_META_ID_SK_RCVLOWAT, + TCF_META_ID_SK_RCVTIMEO, + TCF_META_ID_SK_SNDTIMEO, + TCF_META_ID_SK_SENDMSG_OFF, + TCF_META_ID_SK_WRITE_PENDING, + TCF_META_ID_VLAN_TAG, + TCF_META_ID_RXHASH, + __TCF_META_ID_MAX +}; +#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) + +struct tcf_meta_hdr { + struct tcf_meta_val left; + struct tcf_meta_val right; +}; + +#endif diff --git a/include/linux/tc_ematch/tc_em_nbyte.h b/include/linux/tc_ematch/tc_em_nbyte.h new file mode 100644 index 00000000..7172cfb9 --- /dev/null +++ b/include/linux/tc_ematch/tc_em_nbyte.h @@ -0,0 +1,13 @@ +#ifndef __LINUX_TC_EM_NBYTE_H +#define __LINUX_TC_EM_NBYTE_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +struct tcf_em_nbyte { + __u16 off; + __u16 len:12; + __u8 layer:4; +}; + +#endif diff --git a/include/linux/tc_ematch/tc_em_text.h b/include/linux/tc_ematch/tc_em_text.h new file mode 100644 index 00000000..5aac4045 --- /dev/null +++ b/include/linux/tc_ematch/tc_em_text.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_TC_EM_TEXT_H +#define __LINUX_TC_EM_TEXT_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TC_EM_TEXT_ALGOSIZ 16 + +struct tcf_em_text { + char algo[TC_EM_TEXT_ALGOSIZ]; + __u16 from_offset; + __u16 to_offset; + __u16 pattern_len; + __u8 from_layer:4; + __u8 to_layer:4; + __u8 pad; +}; + +#endif diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h new file mode 100644 index 00000000..7bd266f3 --- /dev/null +++ b/include/linux/tca6416_keypad.h @@ -0,0 +1,34 @@ +/* + * tca6416 keypad platform support + * + * Copyright (C) 2010 Texas Instruments + * + * Author: Sriramakrishnan <srk@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _TCA6416_KEYS_H +#define _TCA6416_KEYS_H + +#include <linux/types.h> + +struct tca6416_button { + /* Configuration parameters */ + int code; /* input event code (KEY_*, SW_*) */ + int active_low; + int type; /* input event type (EV_KEY, EV_SW) */ +}; + +struct tca6416_keys_platform_data { + struct tca6416_button *buttons; + int nbuttons; + unsigned int rep:1; /* enable input subsystem auto repeat */ + uint16_t pinmask; + uint16_t invert; + int irq_is_gpio; + int use_polling; /* use polling if Interrupt is not connected*/ +}; +#endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h new file mode 100644 index 00000000..b6c62d29 --- /dev/null +++ b/include/linux/tcp.h @@ -0,0 +1,505 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the TCP protocol. + * + * Version: @(#)tcp.h 1.0.2 04/28/93 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_TCP_H +#define _LINUX_TCP_H + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/socket.h> + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 res1:4, + doff:4, + fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 doff:4, + res1:4, + cwr:1, + ece:1, + urg:1, + ack:1, + psh:1, + rst:1, + syn:1, + fin:1; +#else +#error "Adjust your <asm/byteorder.h> defines" +#endif + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +/* + * The union cast uses a gcc extension to avoid aliasing problems + * (union is compatible to any of its members) + * This means this part of the code is -fstrict-aliasing safe now. + */ +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) + +enum { + TCP_FLAG_CWR = __cpu_to_be32(0x00800000), + TCP_FLAG_ECE = __cpu_to_be32(0x00400000), + TCP_FLAG_URG = __cpu_to_be32(0x00200000), + TCP_FLAG_ACK = __cpu_to_be32(0x00100000), + TCP_FLAG_PSH = __cpu_to_be32(0x00080000), + TCP_FLAG_RST = __cpu_to_be32(0x00040000), + TCP_FLAG_SYN = __cpu_to_be32(0x00020000), + TCP_FLAG_FIN = __cpu_to_be32(0x00010000), + TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), + TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) +}; + +/* + * TCP general constants + */ +#define TCP_MSS_DEFAULT 536U /* IPv4 (RFC1122, RFC2581) */ +#define TCP_MSS_DESIRED 1220U /* IPv6 (tunneled), EDNS0 (RFC3226) */ + +/* TCP socket options */ +#define TCP_NODELAY 1 /* Turn off Nagle's algorithm. */ +#define TCP_MAXSEG 2 /* Limit MSS */ +#define TCP_CORK 3 /* Never send partially complete segments */ +#define TCP_KEEPIDLE 4 /* Start keeplives after this period */ +#define TCP_KEEPINTVL 5 /* Interval between keepalives */ +#define TCP_KEEPCNT 6 /* Number of keepalives before death */ +#define TCP_SYNCNT 7 /* Number of SYN retransmits */ +#define TCP_LINGER2 8 /* Life time of orphaned FIN-WAIT-2 state */ +#define TCP_DEFER_ACCEPT 9 /* Wake up listener only when data arrive */ +#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */ +#define TCP_INFO 11 /* Information about this connection. */ +#define TCP_QUICKACK 12 /* Block/reenable quick acks */ +#define TCP_CONGESTION 13 /* Congestion control algorithm */ +#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */ +#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ +#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ +#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ +#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ + +/* for TCP_INFO socket option */ +#define TCPI_OPT_TIMESTAMPS 1 +#define TCPI_OPT_SACK 2 +#define TCPI_OPT_WSCALE 4 +#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */ +#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */ + +enum tcp_ca_state { + TCP_CA_Open = 0, +#define TCPF_CA_Open (1<<TCP_CA_Open) + TCP_CA_Disorder = 1, +#define TCPF_CA_Disorder (1<<TCP_CA_Disorder) + TCP_CA_CWR = 2, +#define TCPF_CA_CWR (1<<TCP_CA_CWR) + TCP_CA_Recovery = 3, +#define TCPF_CA_Recovery (1<<TCP_CA_Recovery) + TCP_CA_Loss = 4 +#define TCPF_CA_Loss (1<<TCP_CA_Loss) +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4; + + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + + /* Times. */ + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; /* Not remembered, sorry. */ + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + + /* Metrics. */ + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + + __u32 tcpi_total_retrans; +}; + +/* for TCP_MD5SIG socket option */ +#define TCP_MD5SIG_MAXKEYLEN 80 + +struct tcp_md5sig { + struct __kernel_sockaddr_storage tcpm_addr; /* address associated */ + __u16 __tcpm_pad1; /* zero */ + __u16 tcpm_keylen; /* key length */ + __u32 __tcpm_pad2; /* zero */ + __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */ +}; + +/* for TCP_COOKIE_TRANSACTIONS (TCPCT) socket option */ +#define TCP_COOKIE_MIN 8 /* 64-bits */ +#define TCP_COOKIE_MAX 16 /* 128-bits */ +#define TCP_COOKIE_PAIR_SIZE (2*TCP_COOKIE_MAX) + +/* Flags for both getsockopt and setsockopt */ +#define TCP_COOKIE_IN_ALWAYS (1 << 0) /* Discard SYN without cookie */ +#define TCP_COOKIE_OUT_NEVER (1 << 1) /* Prohibit outgoing cookies, + * supercedes everything. */ + +/* Flags for getsockopt */ +#define TCP_S_DATA_IN (1 << 2) /* Was data received? */ +#define TCP_S_DATA_OUT (1 << 3) /* Was data sent? */ + +/* TCP_COOKIE_TRANSACTIONS data */ +struct tcp_cookie_transactions { + __u16 tcpct_flags; /* see above */ + __u8 __tcpct_pad1; /* zero */ + __u8 tcpct_cookie_desired; /* bytes */ + __u16 tcpct_s_data_desired; /* bytes of variable data */ + __u16 tcpct_used; /* bytes in value */ + __u8 tcpct_value[TCP_MSS_DEFAULT]; +}; + +#ifdef __KERNEL__ + +#include <linux/skbuff.h> +#include <linux/dmaengine.h> +#include <net/sock.h> +#include <net/inet_connection_sock.h> +#include <net/inet_timewait_sock.h> + +static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) +{ + return (struct tcphdr *)skb_transport_header(skb); +} + +static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) +{ + return tcp_hdr(skb)->doff * 4; +} + +static inline unsigned int tcp_optlen(const struct sk_buff *skb) +{ + return (tcp_hdr(skb)->doff - 5) * 4; +} + +/* This defines a selective acknowledgement block. */ +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +/*These are used to set the sack_ok field in struct tcp_options_received */ +#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ +#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/ +#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ + +struct tcp_options_received { +/* PAWS/RTTM data */ + long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ + u32 ts_recent; /* Time stamp to echo next */ + u32 rcv_tsval; /* Time stamp value */ + u32 rcv_tsecr; /* Time stamp echo reply */ + u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */ + tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */ + dsack : 1, /* D-SACK is scheduled */ + wscale_ok : 1, /* Wscale seen on SYN packet */ + sack_ok : 4, /* SACK seen on SYN packet */ + snd_wscale : 4, /* Window scaling received from sender */ + rcv_wscale : 4; /* Window scaling to send to receiver */ + u8 cookie_plus:6, /* bytes in authenticator/cookie option */ + cookie_out_never:1, + cookie_in_always:1; + u8 num_sacks; /* Number of SACK blocks */ + u16 user_mss; /* mss requested by user in ioctl */ + u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ +}; + +static inline void tcp_clear_options(struct tcp_options_received *rx_opt) +{ + rx_opt->tstamp_ok = rx_opt->sack_ok = 0; + rx_opt->wscale_ok = rx_opt->snd_wscale = 0; + rx_opt->cookie_plus = 0; +} + +/* This is the max number of SACKS that we'll generate and process. It's safe + * to increase this, although since: + * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8) + * only four options will fit in a standard TCP header */ +#define TCP_NUM_SACKS 4 + +struct tcp_cookie_values; +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; +#ifdef CONFIG_TCP_MD5SIG + /* Only used by TCP MD5 Signature so far. */ + const struct tcp_request_sock_ops *af_specific; +#endif + u32 rcv_isn; + u32 snt_isn; + u32 snt_synack; /* synack sent time */ +}; + +static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) +{ + return (struct tcp_request_sock *)req; +} + +struct tcp_sock { + /* inet_connection_sock has to be the first member of tcp_sock */ + struct inet_connection_sock inet_conn; + u16 tcp_header_len; /* Bytes of tcp header to send */ + u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ + +/* + * Header prediction flags + * 0x5?10 << 16 + snd_wnd in net byte order + */ + __be32 pred_flags; + +/* + * RFC793 variables by their proper names. This means you can + * read the code and the spec side by side (and laugh ...) + * See RFC793 and RFC1122. The RFC writes these in capitals. + */ + u32 rcv_nxt; /* What we want to receive next */ + u32 copied_seq; /* Head of yet unread data */ + u32 rcv_wup; /* rcv_nxt on last window update sent */ + u32 snd_nxt; /* Next sequence we send */ + + u32 snd_una; /* First byte we want an ack for */ + u32 snd_sml; /* Last byte of the most recently transmitted small packet */ + u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ + u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ + + /* Data for direct copy to user */ + struct { + struct sk_buff_head prequeue; + struct task_struct *task; + struct iovec *iov; + int memory; + int len; +#ifdef CONFIG_NET_DMA + /* members for async copy */ + struct dma_chan *dma_chan; + int wakeup; + struct dma_pinned_list *pinned_list; + dma_cookie_t dma_cookie; +#endif + } ucopy; + + u32 snd_wl1; /* Sequence for window update */ + u32 snd_wnd; /* The window we expect to receive */ + u32 max_window; /* Maximal window ever seen from peer */ + u32 mss_cache; /* Cached effective mss, not including SACKS */ + + u32 window_clamp; /* Maximal window to advertise */ + u32 rcv_ssthresh; /* Current window clamp */ + + u32 frto_highmark; /* snd_nxt when RTO occurred */ + u16 advmss; /* Advertised MSS */ + u8 frto_counter; /* Number of new acks after RTO */ + u8 nonagle : 4,/* Disable Nagle algorithm? */ + thin_lto : 1,/* Use linear timeouts for thin streams */ + thin_dupack : 1,/* Fast retransmit on first dupack */ + unused : 2; + +/* RTT measurement */ + u32 srtt; /* smoothed round trip time << 3 */ + u32 mdev; /* medium deviation */ + u32 mdev_max; /* maximal mdev for the last rtt period */ + u32 rttvar; /* smoothed mdev_max */ + u32 rtt_seq; /* sequence number to update rttvar */ + + u32 packets_out; /* Packets which are "in flight" */ + u32 retrans_out; /* Retransmitted packets out */ + + u16 urg_data; /* Saved octet of OOB data and control flags */ + u8 ecn_flags; /* ECN status bits. */ + u8 reordering; /* Packet reordering metric. */ + u32 snd_up; /* Urgent pointer */ + + u8 keepalive_probes; /* num of allowed keep alive probes */ +/* + * Options received (usually on last packet, some only on SYN packets). + */ + struct tcp_options_received rx_opt; + +/* + * Slow start and congestion control (see also Nagle, and Karn & Partridge) + */ + u32 snd_ssthresh; /* Slow start size threshold */ + u32 snd_cwnd; /* Sending congestion window */ + u32 snd_cwnd_cnt; /* Linear increase counter */ + u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; /* Congestion window at start of Recovery. */ + u32 prr_delivered; /* Number of newly delivered packets to + * receiver in Recovery. */ + u32 prr_out; /* Total number of pkts sent during Recovery. */ + + u32 rcv_wnd; /* Current receiver window */ + u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ + u32 pushed_seq; /* Last pushed seq, required to talk to windows */ + u32 lost_out; /* Lost packets */ + u32 sacked_out; /* SACK'd packets */ + u32 fackets_out; /* FACK'd packets */ + u32 tso_deferred; + u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */ + + /* from STCP, retrans queue hinting */ + struct sk_buff* lost_skb_hint; + struct sk_buff *scoreboard_skb_hint; + struct sk_buff *retransmit_skb_hint; + + struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ + + /* SACKs data, these 2 need to be together (see tcp_build_and_update_options) */ + struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ + struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ + + struct tcp_sack_block recv_sack_cache[4]; + + struct sk_buff *highest_sack; /* skb just after the highest + * skb with SACKed bit set + * (validity guaranteed only if + * sacked_out > 0) + */ + + int lost_cnt_hint; + u32 retransmit_high; /* L-bits may be on up to this seqno */ + + u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ + + u32 prior_ssthresh; /* ssthresh saved at recovery start */ + u32 high_seq; /* snd_nxt at onset of congestion */ + + u32 retrans_stamp; /* Timestamp of the last retransmit, + * also used in SYN-SENT to remember stamp of + * the first SYN. */ + u32 undo_marker; /* tracking retrans started here. */ + int undo_retrans; /* number of undoable retransmissions. */ + u32 total_retrans; /* Total retransmits for entire connection */ + + u32 urg_seq; /* Seq of received urgent pointer */ + unsigned int keepalive_time; /* time before keep alive takes place */ + unsigned int keepalive_intvl; /* time interval between keep alive probes */ + + int linger2; + +/* Receiver side RTT estimation */ + struct { + u32 rtt; + u32 seq; + u32 time; + } rcv_rtt_est; + +/* Receiver queue space */ + struct { + int space; + u32 seq; + u32 time; + } rcvq_space; + +/* TCP-specific MTU probe information. */ + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + +#ifdef CONFIG_TCP_MD5SIG +/* TCP AF-Specific parts; only used by MD5 Signature support so far */ + const struct tcp_sock_af_ops *af_specific; + +/* TCP MD5 Signature Option information */ + struct tcp_md5sig_info __rcu *md5sig_info; +#endif + + /* When the cookie options are generated and exchanged, then this + * object holds a reference to them (cookie_values->kref). Also + * contains related tcp_cookie_transactions fields. + */ + struct tcp_cookie_values *cookie_values; +}; + +static inline struct tcp_sock *tcp_sk(const struct sock *sk) +{ + return (struct tcp_sock *)sk; +} + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_nxt; + u32 tw_snd_nxt; + u32 tw_rcv_wnd; + u32 tw_ts_recent; + long tw_ts_recent_stamp; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *tw_md5_key; +#endif + /* Few sockets in timewait have cookies; in that case, then this + * object holds a reference to them (tw_cookie_values->kref). + */ + struct tcp_cookie_values *tw_cookie_values; +}; + +static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) +{ + return (struct tcp_timewait_sock *)sk; +} + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_TCP_H */ diff --git a/include/linux/telephony.h b/include/linux/telephony.h new file mode 100644 index 00000000..f63afe33 --- /dev/null +++ b/include/linux/telephony.h @@ -0,0 +1,262 @@ +/****************************************************************************** + * + * telephony.h + * + * Basic Linux Telephony Interface + * + * (c) Copyright 1999-2001 Quicknet Technologies, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Ed Okerson, <eokerson@quicknet.net> + * Greg Herlein, <gherlein@quicknet.net> + * + * Contributors: Alan Cox, <alan@lxorguk.ukuu.org.uk> + * David W. Erhart, <derhart@quicknet.net> + * + * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT + * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET + * TECHNOLOGIES, INC. HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION + * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + *****************************************************************************/ + +#ifndef TELEPHONY_H +#define TELEPHONY_H + +#define TELEPHONY_VERSION 3013 + +#define PHONE_VENDOR_IXJ 1 +#define PHONE_VENDOR_QUICKNET PHONE_VENDOR_IXJ +#define PHONE_VENDOR_VOICETRONIX 2 +#define PHONE_VENDOR_ACULAB 3 +#define PHONE_VENDOR_DIGI 4 +#define PHONE_VENDOR_FRANKLIN 5 + +/****************************************************************************** + * Vendor Summary Information Area + * + * Quicknet Technologies, Inc. - makes low density analog telephony cards + * with audio compression, POTS and PSTN interfaces (www.quicknet.net) + * + * (other vendors following this API shuld add a short description of + * the telephony products they support under Linux) + * + *****************************************************************************/ +#define QTI_PHONEJACK 100 +#define QTI_LINEJACK 300 +#define QTI_PHONEJACK_LITE 400 +#define QTI_PHONEJACK_PCI 500 +#define QTI_PHONECARD 600 + +/****************************************************************************** +* +* The capabilities ioctls can inform you of the capabilities of each phone +* device installed in your system. The PHONECTL_CAPABILITIES ioctl +* returns an integer value indicating the number of capabilities the +* device has. The PHONECTL_CAPABILITIES_LIST will fill an array of +* capability structs with all of its capabilities. The +* PHONECTL_CAPABILITIES_CHECK takes a single capability struct and returns +* a TRUE if the device has that capability, otherwise it returns false. +* +******************************************************************************/ +typedef enum { + vendor = 0, + device, + port, + codec, + dsp +} phone_cap; + +struct phone_capability { + char desc[80]; + phone_cap captype; + int cap; + int handle; +}; + +typedef enum { + pots = 0, + pstn, + handset, + speaker +} phone_ports; + +#define PHONE_CAPABILITIES _IO ('q', 0x80) +#define PHONE_CAPABILITIES_LIST _IOR ('q', 0x81, struct phone_capability *) +#define PHONE_CAPABILITIES_CHECK _IOW ('q', 0x82, struct phone_capability *) + +typedef struct { + char month[3]; + char day[3]; + char hour[3]; + char min[3]; + int numlen; + char number[11]; + int namelen; + char name[80]; +} PHONE_CID; + +#define PHONE_RING _IO ('q', 0x83) +#define PHONE_HOOKSTATE _IO ('q', 0x84) +#define PHONE_MAXRINGS _IOW ('q', 0x85, char) +#define PHONE_RING_CADENCE _IOW ('q', 0x86, short) +#define OLD_PHONE_RING_START _IO ('q', 0x87) +#define PHONE_RING_START _IOW ('q', 0x87, PHONE_CID *) +#define PHONE_RING_STOP _IO ('q', 0x88) + +#define USA_RING_CADENCE 0xC0C0 + +#define PHONE_REC_CODEC _IOW ('q', 0x89, int) +#define PHONE_REC_START _IO ('q', 0x8A) +#define PHONE_REC_STOP _IO ('q', 0x8B) +#define PHONE_REC_DEPTH _IOW ('q', 0x8C, int) +#define PHONE_FRAME _IOW ('q', 0x8D, int) +#define PHONE_REC_VOLUME _IOW ('q', 0x8E, int) +#define PHONE_REC_VOLUME_LINEAR _IOW ('q', 0xDB, int) +#define PHONE_REC_LEVEL _IO ('q', 0x8F) + +#define PHONE_PLAY_CODEC _IOW ('q', 0x90, int) +#define PHONE_PLAY_START _IO ('q', 0x91) +#define PHONE_PLAY_STOP _IO ('q', 0x92) +#define PHONE_PLAY_DEPTH _IOW ('q', 0x93, int) +#define PHONE_PLAY_VOLUME _IOW ('q', 0x94, int) +#define PHONE_PLAY_VOLUME_LINEAR _IOW ('q', 0xDC, int) +#define PHONE_PLAY_LEVEL _IO ('q', 0x95) +#define PHONE_DTMF_READY _IOR ('q', 0x96, int) +#define PHONE_GET_DTMF _IOR ('q', 0x97, int) +#define PHONE_GET_DTMF_ASCII _IOR ('q', 0x98, int) +#define PHONE_DTMF_OOB _IOW ('q', 0x99, int) +#define PHONE_EXCEPTION _IOR ('q', 0x9A, int) +#define PHONE_PLAY_TONE _IOW ('q', 0x9B, char) +#define PHONE_SET_TONE_ON_TIME _IOW ('q', 0x9C, int) +#define PHONE_SET_TONE_OFF_TIME _IOW ('q', 0x9D, int) +#define PHONE_GET_TONE_ON_TIME _IO ('q', 0x9E) +#define PHONE_GET_TONE_OFF_TIME _IO ('q', 0x9F) +#define PHONE_GET_TONE_STATE _IO ('q', 0xA0) +#define PHONE_BUSY _IO ('q', 0xA1) +#define PHONE_RINGBACK _IO ('q', 0xA2) +#define PHONE_DIALTONE _IO ('q', 0xA3) +#define PHONE_CPT_STOP _IO ('q', 0xA4) + +#define PHONE_PSTN_SET_STATE _IOW ('q', 0xA4, int) +#define PHONE_PSTN_GET_STATE _IO ('q', 0xA5) + +#define PSTN_ON_HOOK 0 +#define PSTN_RINGING 1 +#define PSTN_OFF_HOOK 2 +#define PSTN_PULSE_DIAL 3 + +/****************************************************************************** +* +* The wink duration is tunable with this ioctl. The default wink duration +* is 320ms. You do not need to use this ioctl if you do not require a +* different wink duration. +* +******************************************************************************/ +#define PHONE_WINK_DURATION _IOW ('q', 0xA6, int) +#define PHONE_WINK _IOW ('q', 0xAA, int) + +/****************************************************************************** +* +* Codec Definitions +* +******************************************************************************/ +typedef enum { + G723_63 = 1, + G723_53 = 2, + TS85 = 3, + TS48 = 4, + TS41 = 5, + G728 = 6, + G729 = 7, + ULAW = 8, + ALAW = 9, + LINEAR16 = 10, + LINEAR8 = 11, + WSS = 12, + G729B = 13 +} phone_codec; + +struct phone_codec_data +{ + phone_codec type; + unsigned short buf_min, buf_opt, buf_max; +}; + +#define PHONE_QUERY_CODEC _IOWR ('q', 0xA7, struct phone_codec_data *) +#define PHONE_PSTN_LINETEST _IO ('q', 0xA8) + +/****************************************************************************** +* +* This controls the VAD/CNG functionality of G.723.1. The driver will +* always pass full size frames, any unused bytes will be padded with zeros, +* and frames passed to the driver should also be padded with zeros. The +* frame type is encoded in the least significant two bits of the first +* WORD of the frame as follows: +* +* bits 1-0 Frame Type Data Rate Significant Words +* 00 0 G.723.1 6.3 12 +* 01 1 G.723.1 5.3 10 +* 10 2 VAD/CNG 2 +* 11 3 Repeat last CNG 2 bits +* +******************************************************************************/ +#define PHONE_VAD _IOW ('q', 0xA9, int) + + +/****************************************************************************** +* +* The exception structure allows us to multiplex multiple events onto the +* select() exception set. If any of these flags are set select() will +* return with a positive indication on the exception set. The dtmf_ready +* bit indicates if there is data waiting in the DTMF buffer. The +* hookstate bit is set if there is a change in hookstate status, it does not +* indicate the current state of the hookswitch. The pstn_ring bit +* indicates that the DAA on a LineJACK card has detected ring voltage on +* the PSTN port. The caller_id bit indicates that caller_id data has been +* received and is available. The pstn_wink bit indicates that the DAA on +* the LineJACK has received a wink from the telco switch. The f0, f1, f2 +* and f3 bits indicate that the filter has been triggered by detecting the +* frequency programmed into that filter. +* +* The remaining bits should be set to zero. They will become defined over time +* for other interface cards and their needs. +* +******************************************************************************/ +struct phone_except +{ + unsigned int dtmf_ready:1; + unsigned int hookstate:1; + unsigned int pstn_ring:1; + unsigned int caller_id:1; + unsigned int pstn_wink:1; + unsigned int f0:1; + unsigned int f1:1; + unsigned int f2:1; + unsigned int f3:1; + unsigned int flash:1; + unsigned int fc0:1; + unsigned int fc1:1; + unsigned int fc2:1; + unsigned int fc3:1; + unsigned int reserved:18; +}; + +union telephony_exception { + struct phone_except bits; + unsigned int bytes; +}; + + +#endif /* TELEPHONY_H */ + diff --git a/include/linux/termios.h b/include/linux/termios.h new file mode 100644 index 00000000..2acd0c1f --- /dev/null +++ b/include/linux/termios.h @@ -0,0 +1,22 @@ +#ifndef _LINUX_TERMIOS_H +#define _LINUX_TERMIOS_H + +#include <linux/types.h> +#include <asm/termios.h> + +#define NFF 5 + +struct termiox +{ + __u16 x_hflag; + __u16 x_cflag; + __u16 x_rflag[NFF]; + __u16 x_sflag; +}; + +#define RTSXOFF 0x0001 /* RTS flow control on input */ +#define CTSXON 0x0002 /* CTS flow control on output */ +#define DTRXOFF 0x0004 /* DTR flow control on input */ +#define DSRXON 0x0008 /* DCD flow control on output */ + +#endif diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h new file mode 100644 index 00000000..cfaee869 --- /dev/null +++ b/include/linux/textsearch.h @@ -0,0 +1,178 @@ +#ifndef __LINUX_TEXTSEARCH_H +#define __LINUX_TEXTSEARCH_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/slab.h> + +struct module; + +struct ts_config; + +#define TS_AUTOLOAD 1 /* Automatically load textsearch modules when needed */ +#define TS_IGNORECASE 2 /* Searches string case insensitively */ + +/** + * struct ts_state - search state + * @offset: offset for next match + * @cb: control buffer, for persistent variables of get_next_block() + */ +struct ts_state +{ + unsigned int offset; + char cb[40]; +}; + +/** + * struct ts_ops - search module operations + * @name: name of search algorithm + * @init: initialization function to prepare a search + * @find: find the next occurrence of the pattern + * @destroy: destroy algorithm specific parts of a search configuration + * @get_pattern: return head of pattern + * @get_pattern_len: return length of pattern + * @owner: module reference to algorithm + */ +struct ts_ops +{ + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, + struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +/** + * struct ts_config - search configuration + * @ops: operations of chosen algorithm + * @flags: flags + * @get_next_block: callback to fetch the next block to search in + * @finish: callback to finalize a search + */ +struct ts_config +{ + struct ts_ops *ops; + int flags; + + /** + * get_next_block - fetch next block of data + * @consumed: number of bytes consumed by the caller + * @dst: destination buffer + * @conf: search configuration + * @state: search state + * + * Called repeatedly until 0 is returned. Must assign the + * head of the next block of data to &*dst and return the length + * of the block or 0 if at the end. consumed == 0 indicates + * a new search. May store/read persistent values in state->cb. + */ + unsigned int (*get_next_block)(unsigned int consumed, + const u8 **dst, + struct ts_config *conf, + struct ts_state *state); + + /** + * finish - finalize/clean a series of get_next_block() calls + * @conf: search configuration + * @state: search state + * + * Called after the last use of get_next_block(), may be used + * to cleanup any leftovers. + */ + void (*finish)(struct ts_config *conf, + struct ts_state *state); +}; + +/** + * textsearch_next - continue searching for a pattern + * @conf: search configuration + * @state: search state + * + * Continues a search looking for more occurrences of the pattern. + * textsearch_find() must be called to find the first occurrence + * in order to reset the state. + * + * Returns the position of the next occurrence of the pattern or + * UINT_MAX if not match was found. + */ +static inline unsigned int textsearch_next(struct ts_config *conf, + struct ts_state *state) +{ + unsigned int ret = conf->ops->find(conf, state); + + if (conf->finish) + conf->finish(conf, state); + + return ret; +} + +/** + * textsearch_find - start searching for a pattern + * @conf: search configuration + * @state: search state + * + * Returns the position of first occurrence of the pattern or + * UINT_MAX if no match was found. + */ +static inline unsigned int textsearch_find(struct ts_config *conf, + struct ts_state *state) +{ + state->offset = 0; + return textsearch_next(conf, state); +} + +/** + * textsearch_get_pattern - return head of the pattern + * @conf: search configuration + */ +static inline void *textsearch_get_pattern(struct ts_config *conf) +{ + return conf->ops->get_pattern(conf); +} + +/** + * textsearch_get_pattern_len - return length of the pattern + * @conf: search configuration + */ +static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf) +{ + return conf->ops->get_pattern_len(conf); +} + +extern int textsearch_register(struct ts_ops *); +extern int textsearch_unregister(struct ts_ops *); +extern struct ts_config *textsearch_prepare(const char *, const void *, + unsigned int, gfp_t, int); +extern void textsearch_destroy(struct ts_config *conf); +extern unsigned int textsearch_find_continuous(struct ts_config *, + struct ts_state *, + const void *, unsigned int); + + +#define TS_PRIV_ALIGNTO 8 +#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) + +static inline struct ts_config *alloc_ts_config(size_t payload, + gfp_t gfp_mask) +{ + struct ts_config *conf; + + conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); + if (conf == NULL) + return ERR_PTR(-ENOMEM); + + return conf; +} + +static inline void *ts_config_priv(struct ts_config *conf) +{ + return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config))); +} + +#endif diff --git a/include/linux/textsearch_fsm.h b/include/linux/textsearch_fsm.h new file mode 100644 index 00000000..fdfa078c --- /dev/null +++ b/include/linux/textsearch_fsm.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_TEXTSEARCH_FSM_H +#define __LINUX_TEXTSEARCH_FSM_H + +#include <linux/types.h> + +enum { + TS_FSM_SPECIFIC, /* specific character */ + TS_FSM_WILDCARD, /* any character */ + TS_FSM_DIGIT, /* isdigit() */ + TS_FSM_XDIGIT, /* isxdigit() */ + TS_FSM_PRINT, /* isprint() */ + TS_FSM_ALPHA, /* isalpha() */ + TS_FSM_ALNUM, /* isalnum() */ + TS_FSM_ASCII, /* isascii() */ + TS_FSM_CNTRL, /* iscntrl() */ + TS_FSM_GRAPH, /* isgraph() */ + TS_FSM_LOWER, /* islower() */ + TS_FSM_UPPER, /* isupper() */ + TS_FSM_PUNCT, /* ispunct() */ + TS_FSM_SPACE, /* isspace() */ + __TS_FSM_TYPE_MAX, +}; +#define TS_FSM_TYPE_MAX (__TS_FSM_TYPE_MAX - 1) + +enum { + TS_FSM_SINGLE, /* 1 occurrence */ + TS_FSM_PERHAPS, /* 1 or 0 occurrence */ + TS_FSM_ANY, /* 0..n occurrences */ + TS_FSM_MULTI, /* 1..n occurrences */ + TS_FSM_HEAD_IGNORE, /* 0..n ignored occurrences at head */ + __TS_FSM_RECUR_MAX, +}; +#define TS_FSM_RECUR_MAX (__TS_FSM_RECUR_MAX - 1) + +/** + * struct ts_fsm_token - state machine token (state) + * @type: type of token + * @recur: number of recurrences + * @value: character value for TS_FSM_SPECIFIC + */ +struct ts_fsm_token +{ + __u16 type; + __u8 recur; + __u8 value; +}; + +#endif diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h new file mode 100644 index 00000000..8a8462b4 --- /dev/null +++ b/include/linux/tfrc.h @@ -0,0 +1,55 @@ +#ifndef _LINUX_TFRC_H_ +#define _LINUX_TFRC_H_ +/* + * TFRC - Data Structures for the TCP-Friendly Rate Control congestion + * control mechanism as specified in RFC 3448. + * + * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> + * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#include <linux/types.h> + +/** tfrc_rx_info - TFRC Receiver Data Structure + * + * @tfrcrx_x_recv: receiver estimate of sending rate (3.2.2) + * @tfrcrx_rtt: round-trip-time (communicated by sender) + * @tfrcrx_p: current estimate of loss event rate (3.2.2) + */ +struct tfrc_rx_info { + __u32 tfrcrx_x_recv; + __u32 tfrcrx_rtt; + __u32 tfrcrx_p; +}; + +/** tfrc_tx_info - TFRC Sender Data Structure + * + * @tfrctx_x: computed transmit rate (4.3 (4)) + * @tfrctx_x_recv: receiver estimate of send rate (4.3) + * @tfrctx_x_calc: return value of throughput equation (3.1) + * @tfrctx_rtt: (moving average) estimate of RTT (4.3) + * @tfrctx_p: current loss event rate (5.4) + * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3) + * @tfrctx_ipi: inter-packet interval (4.6) + * + * Note: X and X_recv are both maintained in units of 64 * bytes/second. This + * enables a finer resolution of sending rates and avoids problems with + * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits. + */ +struct tfrc_tx_info { + __u64 tfrctx_x; + __u64 tfrctx_x_recv; + __u32 tfrctx_x_calc; + __u32 tfrctx_rtt; + __u32 tfrctx_p; + __u32 tfrctx_rto; + __u32 tfrctx_ipi; +}; + +#endif /* _LINUX_TFRC_H_ */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h new file mode 100644 index 00000000..796f1ff0 --- /dev/null +++ b/include/linux/thermal.h @@ -0,0 +1,163 @@ +/* + * thermal.h ($Revision: 0 $) + * + * Copyright (C) 2008 Intel Corp + * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> + * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef __THERMAL_H__ +#define __THERMAL_H__ + +#include <linux/idr.h> +#include <linux/device.h> +#include <linux/workqueue.h> + +struct thermal_zone_device; +struct thermal_cooling_device; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_HOT, + THERMAL_TRIP_CRITICAL, +}; + +struct thermal_zone_device_ops { + int (*bind) (struct thermal_zone_device *, + struct thermal_cooling_device *); + int (*unbind) (struct thermal_zone_device *, + struct thermal_cooling_device *); + int (*get_temp) (struct thermal_zone_device *, unsigned long *); + int (*get_mode) (struct thermal_zone_device *, + enum thermal_device_mode *); + int (*set_mode) (struct thermal_zone_device *, + enum thermal_device_mode); + int (*get_trip_type) (struct thermal_zone_device *, int, + enum thermal_trip_type *); + int (*get_trip_temp) (struct thermal_zone_device *, int, + unsigned long *); + int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); + int (*notify) (struct thermal_zone_device *, int, + enum thermal_trip_type); +}; + +struct thermal_cooling_device_ops { + int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); + int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); + int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); +}; + +#define THERMAL_TRIPS_NONE -1 +#define THERMAL_MAX_TRIPS 12 +#define THERMAL_NAME_LENGTH 20 +struct thermal_cooling_device { + int id; + char type[THERMAL_NAME_LENGTH]; + struct device device; + void *devdata; + const struct thermal_cooling_device_ops *ops; + struct list_head node; +}; + +#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ + ((long)t-2732+5)/10 : ((long)t-2732-5)/10) +#define CELSIUS_TO_KELVIN(t) ((t)*10+2732) + +struct thermal_zone_device { + int id; + char type[THERMAL_NAME_LENGTH]; + struct device device; + void *devdata; + int trips; + int tc1; + int tc2; + int passive_delay; + int polling_delay; + int last_temperature; + bool passive; + unsigned int forced_passive; + const struct thermal_zone_device_ops *ops; + struct list_head cooling_devices; + struct idr idr; + struct mutex lock; /* protect cooling devices list */ + struct list_head node; + struct delayed_work poll_queue; +}; +/* Adding event notification support elements */ +#define THERMAL_GENL_FAMILY_NAME "thermal_event" +#define THERMAL_GENL_VERSION 0x01 +#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" + +enum events { + THERMAL_AUX0, + THERMAL_AUX1, + THERMAL_CRITICAL, + THERMAL_DEV_FAULT, +}; + +struct thermal_genl_event { + u32 orig; + enum events event; +}; +/* attributes of thermal_genl_family */ +enum { + THERMAL_GENL_ATTR_UNSPEC, + THERMAL_GENL_ATTR_EVENT, + __THERMAL_GENL_ATTR_MAX, +}; +#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) + +/* commands supported by the thermal_genl_family */ +enum { + THERMAL_GENL_CMD_UNSPEC, + THERMAL_GENL_CMD_EVENT, + __THERMAL_GENL_CMD_MAX, +}; +#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) + +struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, + const struct thermal_zone_device_ops *, int tc1, int tc2, + int passive_freq, int polling_freq); +void thermal_zone_device_unregister(struct thermal_zone_device *); + +int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, + struct thermal_cooling_device *); +int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, + struct thermal_cooling_device *); +void thermal_zone_device_update(struct thermal_zone_device *); +struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, + const struct thermal_cooling_device_ops *); +void thermal_cooling_device_unregister(struct thermal_cooling_device *); + +#ifdef CONFIG_NET +extern int thermal_generate_netlink_event(u32 orig, enum events event); +#else +static inline int thermal_generate_netlink_event(u32 orig, enum events event) +{ + return 0; +} +#endif + +#endif /* __THERMAL_H__ */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h new file mode 100644 index 00000000..8d03f079 --- /dev/null +++ b/include/linux/thread_info.h @@ -0,0 +1,128 @@ +/* thread_info.h: common low-level thread information accessors + * + * Copyright (C) 2002 David Howells (dhowells@redhat.com) + * - Incorporating suggestions made by Linus Torvalds + */ + +#ifndef _LINUX_THREAD_INFO_H +#define _LINUX_THREAD_INFO_H + +#include <linux/types.h> + +struct timespec; +struct compat_timespec; + +/* + * System call restart block. + */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + struct timespec __user *rmtp; +#ifdef CONFIG_COMPAT + struct compat_timespec __user *compat_rmtp; +#endif + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + +#include <linux/bitops.h> +#include <asm/thread_info.h> + +#ifdef __KERNEL__ + +/* + * flag set/clear/test wrappers + * - pass TIF_xxxx constants to these functions + */ + +static inline void set_ti_thread_flag(struct thread_info *ti, int flag) +{ + set_bit(flag, (unsigned long *)&ti->flags); +} + +static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) +{ + clear_bit(flag, (unsigned long *)&ti->flags); +} + +static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) +{ + return test_and_set_bit(flag, (unsigned long *)&ti->flags); +} + +static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) +{ + return test_and_clear_bit(flag, (unsigned long *)&ti->flags); +} + +static inline int test_ti_thread_flag(struct thread_info *ti, int flag) +{ + return test_bit(flag, (unsigned long *)&ti->flags); +} + +#define set_thread_flag(flag) \ + set_ti_thread_flag(current_thread_info(), flag) +#define clear_thread_flag(flag) \ + clear_ti_thread_flag(current_thread_info(), flag) +#define test_and_set_thread_flag(flag) \ + test_and_set_ti_thread_flag(current_thread_info(), flag) +#define test_and_clear_thread_flag(flag) \ + test_and_clear_ti_thread_flag(current_thread_info(), flag) +#define test_thread_flag(flag) \ + test_ti_thread_flag(current_thread_info(), flag) + +#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED) +#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED) + +#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK +/* + * An arch can define its own version of set_restore_sigmask() to get the + * job done however works, with or without TIF_RESTORE_SIGMASK. + */ +#define HAVE_SET_RESTORE_SIGMASK 1 + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + set_thread_flag(TIF_SIGPENDING); +} +#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */ + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_THREAD_INFO_H */ diff --git a/include/linux/threads.h b/include/linux/threads.h new file mode 100644 index 00000000..383ab959 --- /dev/null +++ b/include/linux/threads.h @@ -0,0 +1,45 @@ +#ifndef _LINUX_THREADS_H +#define _LINUX_THREADS_H + + +/* + * The default limit for the nr of threads is now in + * /proc/sys/kernel/threads-max. + */ + +/* + * Maximum supported processors. Setting this smaller saves quite a + * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. + */ +#ifndef CONFIG_NR_CPUS +/* FIXME: This should be fixed in the arch's Kconfig */ +#define CONFIG_NR_CPUS 1 +#endif + +/* Places which use this should consider cpumask_var_t. */ +#define NR_CPUS CONFIG_NR_CPUS + +#define MIN_THREADS_LEFT_FOR_ROOT 4 + +/* + * This controls the default maximum pid allocated to a process + */ +#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) + +/* + * A maximum of 4 million PIDs should be enough for a while. + * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] + */ +#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ + (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) + +/* + * Define a minimum number of pids per cpu. Heuristically based + * on original pid max of 32k for 32 cpus. Also, increase the + * minimum settable value for pid_max on the running system based + * on similar defaults. See kernel/pid.c:pidmap_init() for details. + */ +#define PIDS_PER_CPU_DEFAULT 1024 +#define PIDS_PER_CPU_MIN 8 + +#endif diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h new file mode 100644 index 00000000..2ef4385d --- /dev/null +++ b/include/linux/ti_wilink_st.h @@ -0,0 +1,448 @@ +/* + * Shared Transport Header file + * To be included by the protocol stack drivers for + * Texas Instruments BT,FM and GPS combo chip drivers + * and also serves the sub-modules of the shared transport driver. + * + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef TI_WILINK_ST_H +#define TI_WILINK_ST_H + +/** + * enum proto-type - The protocol on WiLink chips which share a + * common physical interface like UART. + */ +enum proto_type { + ST_BT, + ST_FM, + ST_GPS, + ST_MAX_CHANNELS = 16, +}; + +/** + * struct st_proto_s - Per Protocol structure from BT/FM/GPS to ST + * @type: type of the protocol being registered among the + * available proto_type(BT, FM, GPS the protocol which share TTY). + * @recv: the receiver callback pointing to a function in the + * protocol drivers called by the ST driver upon receiving + * relevant data. + * @match_packet: reserved for future use, to make ST more generic + * @reg_complete_cb: callback handler pointing to a function in protocol + * handler called by ST when the pending registrations are complete. + * The registrations are marked pending, in situations when fw + * download is in progress. + * @write: pointer to function in ST provided to protocol drivers from ST, + * to be made use when protocol drivers have data to send to TTY. + * @priv_data: privdate data holder for the protocol drivers, sent + * from the protocol drivers during registration, and sent back on + * reg_complete_cb and recv. + * @chnl_id: channel id the protocol driver is interested in, the channel + * id is nothing but the 1st byte of the packet in UART frame. + * @max_frame_size: size of the largest frame the protocol can receive. + * @hdr_len: length of the header structure of the protocol. + * @offset_len_in_hdr: this provides the offset of the length field in the + * header structure of the protocol header, to assist ST to know + * how much to receive, if the data is split across UART frames. + * @len_size: whether the length field inside the header is 2 bytes + * or 1 byte. + * @reserve: the number of bytes ST needs to reserve in the skb being + * prepared for the protocol driver. + */ +struct st_proto_s { + enum proto_type type; + long (*recv) (void *, struct sk_buff *); + unsigned char (*match_packet) (const unsigned char *data); + void (*reg_complete_cb) (void *, char data); + long (*write) (struct sk_buff *skb); + void *priv_data; + + unsigned char chnl_id; + unsigned short max_frame_size; + unsigned char hdr_len; + unsigned char offset_len_in_hdr; + unsigned char len_size; + unsigned char reserve; +}; + +extern long st_register(struct st_proto_s *); +extern long st_unregister(struct st_proto_s *); + + +/* + * header information used by st_core.c + */ + +/* states of protocol list */ +#define ST_NOTEMPTY 1 +#define ST_EMPTY 0 + +/* + * possible st_states + */ +#define ST_INITIALIZING 1 +#define ST_REG_IN_PROGRESS 2 +#define ST_REG_PENDING 3 +#define ST_WAITING_FOR_RESP 4 + +/** + * struct st_data_s - ST core internal structure + * @st_state: different states of ST like initializing, registration + * in progress, this is mainly used to return relevant err codes + * when protocol drivers are registering. It is also used to track + * the recv function, as in during fw download only HCI events + * can occur , where as during other times other events CH8, CH9 + * can occur. + * @tty: tty provided by the TTY core for line disciplines. + * @tx_skb: If for some reason the tty's write returns lesser bytes written + * then to maintain the rest of data to be written on next instance. + * This needs to be protected, hence the lock inside wakeup func. + * @tx_state: if the data is being written onto the TTY and protocol driver + * wants to send more, queue up data and mark that there is + * more data to send. + * @list: the list of protocols registered, only MAX can exist, one protocol + * can register only once. + * @rx_state: states to be maintained inside st's tty receive + * @rx_count: count to be maintained inside st's tty receieve + * @rx_skb: the skb where all data for a protocol gets accumulated, + * since tty might not call receive when a complete event packet + * is received, the states, count and the skb needs to be maintained. + * @rx_chnl: the channel ID for which the data is getting accumalated for. + * @txq: the list of skbs which needs to be sent onto the TTY. + * @tx_waitq: if the chip is not in AWAKE state, the skbs needs to be queued + * up in here, PM(WAKEUP_IND) data needs to be sent and then the skbs + * from waitq can be moved onto the txq. + * Needs locking too. + * @lock: the lock to protect skbs, queues, and ST states. + * @protos_registered: count of the protocols registered, also when 0 the + * chip enable gpio can be toggled, and when it changes to 1 the fw + * needs to be downloaded to initialize chip side ST. + * @ll_state: the various PM states the chip can be, the states are notified + * to us, when the chip sends relevant PM packets(SLEEP_IND, WAKE_IND). + * @kim_data: reference to the parent encapsulating structure. + * + */ +struct st_data_s { + unsigned long st_state; + struct sk_buff *tx_skb; +#define ST_TX_SENDING 1 +#define ST_TX_WAKEUP 2 + unsigned long tx_state; + struct st_proto_s *list[ST_MAX_CHANNELS]; + bool is_registered[ST_MAX_CHANNELS]; + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; + unsigned char rx_chnl; + struct sk_buff_head txq, tx_waitq; + spinlock_t lock; + unsigned char protos_registered; + unsigned long ll_state; + void *kim_data; + struct tty_struct *tty; +}; + +/* + * wrapper around tty->ops->write_room to check + * availability during firmware download + */ +int st_get_uart_wr_room(struct st_data_s *st_gdata); +/** + * st_int_write - + * point this to tty->driver->write or tty->ops->write + * depending upon the kernel version + */ +int st_int_write(struct st_data_s*, const unsigned char*, int); + +/** + * st_write - + * internal write function, passed onto protocol drivers + * via the write function ptr of protocol struct + */ +long st_write(struct sk_buff *); + +/* function to be called from ST-LL */ +void st_ll_send_frame(enum proto_type, struct sk_buff *); + +/* internal wake up function */ +void st_tx_wakeup(struct st_data_s *st_data); + +/* init, exit entry funcs called from KIM */ +int st_core_init(struct st_data_s **); +void st_core_exit(struct st_data_s *); + +/* ask for reference from KIM */ +void st_kim_ref(struct st_data_s **, int); + +#define GPS_STUB_TEST +#ifdef GPS_STUB_TEST +int gps_chrdrv_stub_write(const unsigned char*, int); +void gps_chrdrv_stub_init(void); +#endif + +/* + * header information used by st_kim.c + */ + +/* time in msec to wait for + * line discipline to be installed + */ +#define LDISC_TIME 1000 +#define CMD_RESP_TIME 800 +#define CMD_WR_TIME 5000 +#define MAKEWORD(a, b) ((unsigned short)(((unsigned char)(a)) \ + | ((unsigned short)((unsigned char)(b))) << 8)) + +#define GPIO_HIGH 1 +#define GPIO_LOW 0 + +/* the Power-On-Reset logic, requires to attempt + * to download firmware onto chip more than once + * since the self-test for chip takes a while + */ +#define POR_RETRY_COUNT 5 + +/** + * struct chip_version - save the chip version + */ +struct chip_version { + unsigned short full; + unsigned short chip; + unsigned short min_ver; + unsigned short maj_ver; +}; + +#define UART_DEV_NAME_LEN 32 +/** + * struct kim_data_s - the KIM internal data, embedded as the + * platform's drv data. One for each ST device in the system. + * @uim_pid: KIM needs to communicate with UIM to request to install + * the ldisc by opening UART when protocol drivers register. + * @kim_pdev: the platform device added in one of the board-XX.c file + * in arch/XX/ directory, 1 for each ST device. + * @kim_rcvd: completion handler to notify when data was received, + * mainly used during fw download, which involves multiple send/wait + * for each of the HCI-VS commands. + * @ldisc_installed: completion handler to notify that the UIM accepted + * the request to install ldisc, notify from tty_open which suggests + * the ldisc was properly installed. + * @resp_buffer: data buffer for the .bts fw file name. + * @fw_entry: firmware class struct to request/release the fw. + * @rx_state: the rx state for kim's receive func during fw download. + * @rx_count: the rx count for the kim's receive func during fw download. + * @rx_skb: all of fw data might not come at once, and hence data storage for + * whole of the fw response, only HCI_EVENTs and hence diff from ST's + * response. + * @core_data: ST core's data, which mainly is the tty's disc_data + * @version: chip version available via a sysfs entry. + * + */ +struct kim_data_s { + long uim_pid; + struct platform_device *kim_pdev; + struct completion kim_rcvd, ldisc_installed; + char resp_buffer[30]; + const struct firmware *fw_entry; + long nshutdown; + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; + struct st_data_s *core_data; + struct chip_version version; + unsigned char ldisc_install; + unsigned char dev_name[UART_DEV_NAME_LEN]; + unsigned char flow_cntrl; + unsigned long baud_rate; +}; + +/** + * functions called when 1 of the protocol drivers gets + * registered, these need to communicate with UIM to request + * ldisc installed, read chip_version, download relevant fw + */ +long st_kim_start(void *); +long st_kim_stop(void *); + +void st_kim_recv(void *, const unsigned char *, long count); +void st_kim_complete(void *); +void kim_st_list_protocols(struct st_data_s *, void *); + +/* + * BTS headers + */ +#define ACTION_SEND_COMMAND 1 +#define ACTION_WAIT_EVENT 2 +#define ACTION_SERIAL 3 +#define ACTION_DELAY 4 +#define ACTION_RUN_SCRIPT 5 +#define ACTION_REMARKS 6 + +/** + * struct bts_header - the fw file is NOT binary which can + * be sent onto TTY as is. The .bts is more a script + * file which has different types of actions. + * Each such action needs to be parsed by the KIM and + * relevant procedure to be called. + */ +struct bts_header { + u32 magic; + u32 version; + u8 future[24]; + u8 actions[0]; +} __attribute__ ((packed)); + +/** + * struct bts_action - Each .bts action has its own type of + * data. + */ +struct bts_action { + u16 type; + u16 size; + u8 data[0]; +} __attribute__ ((packed)); + +struct bts_action_send { + u8 data[0]; +} __attribute__ ((packed)); + +struct bts_action_wait { + u32 msec; + u32 size; + u8 data[0]; +} __attribute__ ((packed)); + +struct bts_action_delay { + u32 msec; +} __attribute__ ((packed)); + +struct bts_action_serial { + u32 baud; + u32 flow_control; +} __attribute__ ((packed)); + +/** + * struct hci_command - the HCI-VS for intrepreting + * the change baud rate of host-side UART, which + * needs to be ignored, since UIM would do that + * when it receives request from KIM for ldisc installation. + */ +struct hci_command { + u8 prefix; + u16 opcode; + u8 plen; + u32 speed; +} __attribute__ ((packed)); + +/* + * header information used by st_ll.c + */ + +/* ST LL receiver states */ +#define ST_W4_PACKET_TYPE 0 +#define ST_W4_HEADER 1 +#define ST_W4_DATA 2 + +/* ST LL state machines */ +#define ST_LL_ASLEEP 0 +#define ST_LL_ASLEEP_TO_AWAKE 1 +#define ST_LL_AWAKE 2 +#define ST_LL_AWAKE_TO_ASLEEP 3 +#define ST_LL_INVALID 4 + +/* different PM notifications coming from chip */ +#define LL_SLEEP_IND 0x30 +#define LL_SLEEP_ACK 0x31 +#define LL_WAKE_UP_IND 0x32 +#define LL_WAKE_UP_ACK 0x33 + +/* initialize and de-init ST LL */ +long st_ll_init(struct st_data_s *); +long st_ll_deinit(struct st_data_s *); + +/** + * enable/disable ST LL along with KIM start/stop + * called by ST Core + */ +void st_ll_enable(struct st_data_s *); +void st_ll_disable(struct st_data_s *); + +/** + * various funcs used by ST core to set/get the various PM states + * of the chip. + */ +unsigned long st_ll_getstate(struct st_data_s *); +unsigned long st_ll_sleep_state(struct st_data_s *, unsigned char); +void st_ll_wakeup(struct st_data_s *); + +/* + * header information used by st_core.c for FM and GPS + * packet parsing, the bluetooth headers are already available + * at net/bluetooth/ + */ + +struct fm_event_hdr { + u8 plen; +} __attribute__ ((packed)); + +#define FM_MAX_FRAME_SIZE 0xFF /* TODO: */ +#define FM_EVENT_HDR_SIZE 1 /* size of fm_event_hdr */ +#define ST_FM_CH8_PKT 0x8 + +/* gps stuff */ +struct gps_event_hdr { + u8 opcode; + u16 plen; +} __attribute__ ((packed)); + +/** + * struct ti_st_plat_data - platform data shared between ST driver and + * platform specific board file which adds the ST device. + * @nshutdown_gpio: Host's GPIO line to which chip's BT_EN is connected. + * @dev_name: The UART/TTY name to which chip is interfaced. (eg: /dev/ttyS1) + * @flow_cntrl: Should always be 1, since UART's CTS/RTS is used for PM + * purposes. + * @baud_rate: The baud rate supported by the Host UART controller, this will + * be shared across with the chip via a HCI VS command from User-Space Init + * Mgr application. + * @suspend: + * @resume: legacy PM routines hooked to platform specific board file, so as + * to take chip-host interface specific action. + * @chip_enable: + * @chip_disable: Platform/Interface specific mux mode setting, GPIO + * configuring, Host side PM disabling etc.. can be done here. + * @chip_asleep: + * @chip_awake: Chip specific deep sleep states is communicated to Host + * specific board-xx.c to take actions such as cut UART clocks when chip + * asleep or run host faster when chip awake etc.. + * + */ +struct ti_st_plat_data { + long nshutdown_gpio; + unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */ + unsigned char flow_cntrl; /* flow control flag */ + unsigned long baud_rate; + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + int (*chip_enable) (struct kim_data_s *); + int (*chip_disable) (struct kim_data_s *); + int (*chip_asleep) (struct kim_data_s *); + int (*chip_awake) (struct kim_data_s *); +}; + +#endif /* TI_WILINK_ST_H */ diff --git a/include/linux/tick.h b/include/linux/tick.h new file mode 100644 index 00000000..ab8be90b --- /dev/null +++ b/include/linux/tick.h @@ -0,0 +1,145 @@ +/* linux/include/linux/tick.h + * + * This file contains the structure definitions for tick related functions + * + */ +#ifndef _LINUX_TICK_H +#define _LINUX_TICK_H + +#include <linux/clockchips.h> +#include <linux/irqflags.h> + +#ifdef CONFIG_GENERIC_CLOCKEVENTS + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC, + TICKDEV_MODE_ONESHOT, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +enum tick_nohz_mode { + NOHZ_MODE_INACTIVE, + NOHZ_MODE_LOWRES, + NOHZ_MODE_HIGHRES, +}; + +/** + * struct tick_sched - sched tick emulation and no idle tick control/stats + * @sched_timer: hrtimer to schedule the periodic tick in high + * resolution mode + * @idle_tick: Store the last idle tick expiry time when the tick + * timer is modified for idle sleeps. This is necessary + * to resume the tick timer operation in the timeline + * when the CPU returns from idle + * @tick_stopped: Indicator that the idle tick has been stopped + * @idle_jiffies: jiffies at the entry to idle for idle time accounting + * @idle_calls: Total number of idle calls + * @idle_sleeps: Number of idle calls, where the sched tick was stopped + * @idle_entrytime: Time when the idle call was entered + * @idle_waketime: Time when the idle was interrupted + * @idle_exittime: Time when the idle state was left + * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped + * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding + * @sleep_length: Duration of the current idle sleep + * @do_timer_lst: CPU was the last one doing do_timer before going idle + */ +struct tick_sched { + struct hrtimer sched_timer; + unsigned long check_clocks; + enum tick_nohz_mode nohz_mode; + ktime_t idle_tick; + int inidle; + int tick_stopped; + unsigned long idle_jiffies; + unsigned long idle_calls; + unsigned long idle_sleeps; + int idle_active; + ktime_t idle_entrytime; + ktime_t idle_waketime; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + ktime_t sleep_length; + unsigned long last_jiffies; + unsigned long next_jiffies; + ktime_t idle_expires; + int do_timer_last; +}; + +extern void __init tick_init(void); +extern int tick_is_oneshot_available(void); +extern struct tick_device *tick_get_device(int cpu); + +# ifdef CONFIG_HIGH_RES_TIMERS +extern int tick_init_highres(void); +extern int tick_program_event(ktime_t expires, int force); +extern void tick_setup_sched_timer(void); +# endif + +# if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS +extern void tick_cancel_sched_timer(int cpu); +# else +static inline void tick_cancel_sched_timer(int cpu) { } +# endif + +# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +extern struct tick_device *tick_get_broadcast_device(void); +extern struct cpumask *tick_get_broadcast_mask(void); + +# ifdef CONFIG_TICK_ONESHOT +extern struct cpumask *tick_get_broadcast_oneshot_mask(void); +# endif + +# endif /* BROADCAST */ + +# ifdef CONFIG_TICK_ONESHOT +extern void tick_clock_notify(void); +extern int tick_check_oneshot_change(int allow_nohz); +extern struct tick_sched *tick_get_tick_sched(int cpu); +extern void tick_check_idle(int cpu); +extern int tick_oneshot_mode_active(void); +# ifndef arch_needs_cpu +# define arch_needs_cpu(cpu) (0) +# endif +# else +static inline void tick_clock_notify(void) { } +static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } +static inline void tick_check_idle(int cpu) { } +static inline int tick_oneshot_mode_active(void) { return 0; } +# endif + +#else /* CONFIG_GENERIC_CLOCKEVENTS */ +static inline void tick_init(void) { } +static inline void tick_cancel_sched_timer(int cpu) { } +static inline void tick_clock_notify(void) { } +static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } +static inline void tick_check_idle(int cpu) { } +static inline int tick_oneshot_mode_active(void) { return 0; } +#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ + +# ifdef CONFIG_NO_HZ +extern void tick_nohz_idle_enter(void); +extern void tick_nohz_idle_exit(void); +extern void tick_nohz_irq_exit(void); +extern ktime_t tick_nohz_get_sleep_length(void); +extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); +extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); +# else +static inline void tick_nohz_idle_enter(void) { } +static inline void tick_nohz_idle_exit(void) { } + +static inline ktime_t tick_nohz_get_sleep_length(void) +{ + ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; + + return len; +} +static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } +static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } +# endif /* !NO_HZ */ + +#endif diff --git a/include/linux/tifm.h b/include/linux/tifm.h new file mode 100644 index 00000000..848c0f39 --- /dev/null +++ b/include/linux/tifm.h @@ -0,0 +1,164 @@ +/* + * tifm.h - TI FlashMedia driver + * + * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef _TIFM_H +#define _TIFM_H + +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/workqueue.h> + +/* Host registers (relative to pci base address): */ +enum { + FM_SET_INTERRUPT_ENABLE = 0x008, + FM_CLEAR_INTERRUPT_ENABLE = 0x00c, + FM_INTERRUPT_STATUS = 0x014 +}; + +/* Socket registers (relative to socket base address): */ +enum { + SOCK_CONTROL = 0x004, + SOCK_PRESENT_STATE = 0x008, + SOCK_DMA_ADDRESS = 0x00c, + SOCK_DMA_CONTROL = 0x010, + SOCK_DMA_FIFO_INT_ENABLE_SET = 0x014, + SOCK_DMA_FIFO_INT_ENABLE_CLEAR = 0x018, + SOCK_DMA_FIFO_STATUS = 0x020, + SOCK_FIFO_CONTROL = 0x024, + SOCK_FIFO_PAGE_SIZE = 0x028, + SOCK_MMCSD_COMMAND = 0x104, + SOCK_MMCSD_ARG_LOW = 0x108, + SOCK_MMCSD_ARG_HIGH = 0x10c, + SOCK_MMCSD_CONFIG = 0x110, + SOCK_MMCSD_STATUS = 0x114, + SOCK_MMCSD_INT_ENABLE = 0x118, + SOCK_MMCSD_COMMAND_TO = 0x11c, + SOCK_MMCSD_DATA_TO = 0x120, + SOCK_MMCSD_DATA = 0x124, + SOCK_MMCSD_BLOCK_LEN = 0x128, + SOCK_MMCSD_NUM_BLOCKS = 0x12c, + SOCK_MMCSD_BUFFER_CONFIG = 0x130, + SOCK_MMCSD_SPI_CONFIG = 0x134, + SOCK_MMCSD_SDIO_MODE_CONFIG = 0x138, + SOCK_MMCSD_RESPONSE = 0x144, + SOCK_MMCSD_SDIO_SR = 0x164, + SOCK_MMCSD_SYSTEM_CONTROL = 0x168, + SOCK_MMCSD_SYSTEM_STATUS = 0x16c, + SOCK_MS_COMMAND = 0x184, + SOCK_MS_DATA = 0x188, + SOCK_MS_STATUS = 0x18c, + SOCK_MS_SYSTEM = 0x190, + SOCK_FIFO_ACCESS = 0x200 +}; + +#define TIFM_CTRL_LED 0x00000040 +#define TIFM_CTRL_FAST_CLK 0x00000100 +#define TIFM_CTRL_POWER_MASK 0x00000007 + +#define TIFM_SOCK_STATE_OCCUPIED 0x00000008 +#define TIFM_SOCK_STATE_POWERED 0x00000080 + +#define TIFM_FIFO_ENABLE 0x00000001 +#define TIFM_FIFO_READY 0x00000001 +#define TIFM_FIFO_MORE 0x00000008 +#define TIFM_FIFO_INT_SETALL 0x0000ffff +#define TIFM_FIFO_INTMASK 0x00000005 + +#define TIFM_DMA_RESET 0x00000002 +#define TIFM_DMA_TX 0x00008000 +#define TIFM_DMA_EN 0x00000001 +#define TIFM_DMA_TSIZE 0x0000007f + +#define TIFM_TYPE_XD 1 +#define TIFM_TYPE_MS 2 +#define TIFM_TYPE_SD 3 + +struct tifm_device_id { + unsigned char type; +}; + +struct tifm_driver; +struct tifm_dev { + char __iomem *addr; + spinlock_t lock; + unsigned char type; + unsigned int socket_id; + + void (*card_event)(struct tifm_dev *sock); + void (*data_event)(struct tifm_dev *sock); + + struct device dev; +}; + +struct tifm_driver { + struct tifm_device_id *id_table; + int (*probe)(struct tifm_dev *dev); + void (*remove)(struct tifm_dev *dev); + int (*suspend)(struct tifm_dev *dev, + pm_message_t state); + int (*resume)(struct tifm_dev *dev); + + struct device_driver driver; +}; + +struct tifm_adapter { + char __iomem *addr; + spinlock_t lock; + unsigned int irq_status; + unsigned int socket_change_set; + unsigned int id; + unsigned int num_sockets; + struct completion *finish_me; + + struct work_struct media_switcher; + struct device dev; + + void (*eject)(struct tifm_adapter *fm, + struct tifm_dev *sock); + int (*has_ms_pif)(struct tifm_adapter *fm, + struct tifm_dev *sock); + + struct tifm_dev *sockets[0]; +}; + +struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, + struct device *dev); +int tifm_add_adapter(struct tifm_adapter *fm); +void tifm_remove_adapter(struct tifm_adapter *fm); +void tifm_free_adapter(struct tifm_adapter *fm); + +void tifm_free_device(struct device *dev); +struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, + unsigned char type); + +int tifm_register_driver(struct tifm_driver *drv); +void tifm_unregister_driver(struct tifm_driver *drv); +void tifm_eject(struct tifm_dev *sock); +int tifm_has_ms_pif(struct tifm_dev *sock); +int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, + int direction); +void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, + int direction); +void tifm_queue_work(struct work_struct *work); + +static inline void *tifm_get_drvdata(struct tifm_dev *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void tifm_set_drvdata(struct tifm_dev *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +#endif diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h new file mode 100644 index 00000000..bb043e97 --- /dev/null +++ b/include/linux/timb_dma.h @@ -0,0 +1,55 @@ +/* + * timb_dma.h timberdale FPGA DMA driver defines + * Copyright (c) 2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Timberdale FPGA DMA engine + */ + +#ifndef _LINUX_TIMB_DMA_H +#define _LINUX_TIMB_DMA_H + +/** + * struct timb_dma_platform_data_channel - Description of each individual + * DMA channel for the timberdale DMA driver + * @rx: true if this channel handles data in the direction to + * the CPU. + * @bytes_per_line: Number of bytes per line, this is specific for channels + * handling video data. For other channels this shall be left to 0. + * @descriptors: Number of descriptors to allocate for this channel. + * @descriptor_elements: Number of elements in each descriptor. + * + */ +struct timb_dma_platform_data_channel { + bool rx; + unsigned int bytes_per_line; + unsigned int descriptors; + unsigned int descriptor_elements; +}; + +/** + * struct timb_dma_platform_data - Platform data of the timberdale DMA driver + * @nr_channels: Number of defined channels in the channels array. + * @channels: Definition of the each channel. + * + */ +struct timb_dma_platform_data { + unsigned nr_channels; + struct timb_dma_platform_data_channel channels[32]; +}; + +#endif diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h new file mode 100644 index 00000000..ce456eaa --- /dev/null +++ b/include/linux/timb_gpio.h @@ -0,0 +1,37 @@ +/* + * timb_gpio.h timberdale FPGA GPIO driver, platform data definition + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _LINUX_TIMB_GPIO_H +#define _LINUX_TIMB_GPIO_H + +/** + * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver + * @gpio_base The number of the first GPIO pin, set to -1 for + * dynamic number allocation. + * @nr_pins Number of pins that is supported by the hardware (1-32) + * @irq_base If IRQ is supported by the hardware, this is the base + * number of IRQ:s. One IRQ per pin will be used. Set to + * -1 if IRQ:s is not supported. + */ +struct timbgpio_platform_data { + int gpio_base; + int nr_pins; + int irq_base; +}; + +#endif diff --git a/include/linux/time.h b/include/linux/time.h new file mode 100644 index 00000000..33a92ead --- /dev/null +++ b/include/linux/time.h @@ -0,0 +1,314 @@ +#ifndef _LINUX_TIME_H +#define _LINUX_TIME_H + +#include <linux/types.h> + +#ifdef __KERNEL__ +# include <linux/cache.h> +# include <linux/seqlock.h> +# include <linux/math64.h> +#endif + +#ifndef _STRUCT_TIMESPEC +#define _STRUCT_TIMESPEC +struct timespec { + __kernel_time_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; +#endif + +struct timeval { + __kernel_time_t tv_sec; /* seconds */ + __kernel_suseconds_t tv_usec; /* microseconds */ +}; + +struct timezone { + int tz_minuteswest; /* minutes west of Greenwich */ + int tz_dsttime; /* type of dst correction */ +}; + +#ifdef __KERNEL__ + +extern struct timezone sys_tz; + +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000LL + +#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) + +static inline int timespec_equal(const struct timespec *a, + const struct timespec *b) +{ + return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); +} + +/* + * lhs < rhs: return <0 + * lhs == rhs: return 0 + * lhs > rhs: return >0 + */ +static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_usec - rhs->tv_usec; +} + +extern unsigned long mktime(const unsigned int year, const unsigned int mon, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec); + +extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); + +/* + * timespec_add_safe assumes both values are positive and checks + * for overflow. It will return TIME_T_MAX if the reutrn would be + * smaller then either of the arguments. + */ +extern struct timespec timespec_add_safe(const struct timespec lhs, + const struct timespec rhs); + + +static inline struct timespec timespec_add(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + return ts_delta; +} + +/* + * sub = lhs - rhs, in normalized form + */ +static inline struct timespec timespec_sub(struct timespec lhs, + struct timespec rhs) +{ + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; +} + +/* + * Returns true if the timespec is norm, false if denorm: + */ +#define timespec_valid(ts) \ + (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) + +extern void read_persistent_clock(struct timespec *ts); +extern void read_boot_clock(struct timespec *ts); +extern int update_persistent_clock(struct timespec now); +void timekeeping_init(void); +extern int timekeeping_suspended; + +unsigned long get_seconds(void); +struct timespec current_kernel_time(void); +struct timespec __current_kernel_time(void); /* does not take xtime_lock */ +struct timespec get_monotonic_coarse(void); +void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, + struct timespec *wtom, struct timespec *sleep); +void timekeeping_inject_sleeptime(struct timespec *delta); + +#define CURRENT_TIME (current_kernel_time()) +#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) + +/* Some architectures do not supply their own clocksource. + * This is mainly the case in architectures that get their + * inter-tick times by reading the counter on their interval + * timer. Since these timers wrap every tick, they're not really + * useful as clocksources. Wrapping them to act like one is possible + * but not very efficient. So we provide a callout these arches + * can implement for use with the jiffies clocksource to provide + * finer then tick granular time. + */ +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +extern u32 arch_gettimeoffset(void); +#else +static inline u32 arch_gettimeoffset(void) { return 0; } +#endif + +extern void do_gettimeofday(struct timeval *tv); +extern int do_settimeofday(const struct timespec *tv); +extern int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz); +#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) +extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); +struct itimerval; +extern int do_setitimer(int which, struct itimerval *value, + struct itimerval *ovalue); +extern unsigned int alarm_setitimer(unsigned int seconds); +extern int do_getitimer(int which, struct itimerval *value); +extern void getnstimeofday(struct timespec *tv); +extern void getrawmonotonic(struct timespec *ts); +extern void getnstime_raw_and_real(struct timespec *ts_raw, + struct timespec *ts_real); +extern void getboottime(struct timespec *ts); +extern void monotonic_to_bootbased(struct timespec *ts); +extern void get_monotonic_boottime(struct timespec *ts); + +extern struct timespec timespec_trunc(struct timespec t, unsigned gran); +extern int timekeeping_valid_for_hres(void); +extern u64 timekeeping_max_deferment(void); +extern void timekeeping_leap_insert(int leapsecond); +extern int timekeeping_inject_offset(struct timespec *ts); + +struct tms; +extern void do_sys_times(struct tms *); + +/* + * Similar to the struct tm in userspace <time.h>, but it needs to be here so + * that the kernel source is self contained. + */ +struct tm { + /* + * the number of seconds after the minute, normally in the range + * 0 to 59, but can be up to 60 to allow for leap seconds + */ + int tm_sec; + /* the number of minutes after the hour, in the range 0 to 59*/ + int tm_min; + /* the number of hours past midnight, in the range 0 to 23 */ + int tm_hour; + /* the day of the month, in the range 1 to 31 */ + int tm_mday; + /* the number of months since January, in the range 0 to 11 */ + int tm_mon; + /* the number of years since 1900 */ + long tm_year; + /* the number of days since Sunday, in the range 0 to 6 */ + int tm_wday; + /* the number of days since January 1, in the range 0 to 365 */ + int tm_yday; +}; + +void time_to_tm(time_t totalsecs, int offset, struct tm *result); + +/** + * timespec_to_ns - Convert timespec to nanoseconds + * @ts: pointer to the timespec variable to be converted + * + * Returns the scalar nanosecond representation of the timespec + * parameter. + */ +static inline s64 timespec_to_ns(const struct timespec *ts) +{ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} + +/** + * timeval_to_ns - Convert timeval to nanoseconds + * @ts: pointer to the timeval variable to be converted + * + * Returns the scalar nanosecond representation of the timeval + * parameter. + */ +static inline s64 timeval_to_ns(const struct timeval *tv) +{ + return ((s64) tv->tv_sec * NSEC_PER_SEC) + + tv->tv_usec * NSEC_PER_USEC; +} + +/** + * ns_to_timespec - Convert nanoseconds to timespec + * @nsec: the nanoseconds value to be converted + * + * Returns the timespec representation of the nsec parameter. + */ +extern struct timespec ns_to_timespec(const s64 nsec); + +/** + * ns_to_timeval - Convert nanoseconds to timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the timeval representation of the nsec parameter. + */ +extern struct timeval ns_to_timeval(const s64 nsec); + +/** + * timespec_add_ns - Adds nanoseconds to a timespec + * @a: pointer to timespec to be incremented + * @ns: unsigned nanoseconds value to be added + * + * This must always be inlined because its used from the x86-64 vdso, + * which cannot call other kernel functions. + */ +static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) +{ + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); + a->tv_nsec = ns; +} + +#endif /* __KERNEL__ */ + +#define NFDBITS __NFDBITS + +#define FD_SETSIZE __FD_SETSIZE +#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp) +#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp) +#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp) +#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp) + +/* + * Names of the interval timers, and structure + * defining a timer setting: + */ +#define ITIMER_REAL 0 +#define ITIMER_VIRTUAL 1 +#define ITIMER_PROF 2 + +struct itimerspec { + struct timespec it_interval; /* timer period */ + struct timespec it_value; /* timer expiration */ +}; + +struct itimerval { + struct timeval it_interval; /* timer interval */ + struct timeval it_value; /* current value */ +}; + +/* + * The IDs of the various system clocks (for POSIX.1b interval timers): + */ +#define CLOCK_REALTIME 0 +#define CLOCK_MONOTONIC 1 +#define CLOCK_PROCESS_CPUTIME_ID 2 +#define CLOCK_THREAD_CPUTIME_ID 3 +#define CLOCK_MONOTONIC_RAW 4 +#define CLOCK_REALTIME_COARSE 5 +#define CLOCK_MONOTONIC_COARSE 6 +#define CLOCK_BOOTTIME 7 +#define CLOCK_REALTIME_ALARM 8 +#define CLOCK_BOOTTIME_ALARM 9 + +/* + * The IDs of various hardware clocks: + */ +#define CLOCK_SGI_CYCLE 10 +#define MAX_CLOCKS 16 +#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) +#define CLOCKS_MONO CLOCK_MONOTONIC + +/* + * The various flags for setting POSIX.1b interval timers: + */ +#define TIMER_ABSTIME 0x01 + +#endif diff --git a/include/linux/timecompare.h b/include/linux/timecompare.h new file mode 100644 index 00000000..546e2234 --- /dev/null +++ b/include/linux/timecompare.h @@ -0,0 +1,125 @@ +/* + * Utility code which helps transforming between two different time + * bases, called "source" and "target" time in this code. + * + * Source time has to be provided via the timecounter API while target + * time is accessed via a function callback whose prototype + * intentionally matches ktime_get() and ktime_get_real(). These + * interfaces where chosen like this so that the code serves its + * initial purpose without additional glue code. + * + * This purpose is synchronizing a hardware clock in a NIC with system + * time, in order to implement the Precision Time Protocol (PTP, + * IEEE1588) with more accurate hardware assisted time stamping. In + * that context only synchronization against system time (= + * ktime_get_real()) is currently needed. But this utility code might + * become useful in other situations, which is why it was written as + * general purpose utility code. + * + * The source timecounter is assumed to return monotonically + * increasing time (but this code does its best to compensate if that + * is not the case) whereas target time may jump. + * + * The target time corresponding to a source time is determined by + * reading target time, reading source time, reading target time + * again, then assuming that average target time corresponds to source + * time. In other words, the assumption is that reading the source + * time is slow and involves equal time for sending the request and + * receiving the reply, whereas reading target time is assumed to be + * fast. + * + * Copyright (C) 2009 Intel Corporation. + * Author: Patrick Ohly <patrick.ohly@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef _LINUX_TIMECOMPARE_H +#define _LINUX_TIMECOMPARE_H + +#include <linux/clocksource.h> +#include <linux/ktime.h> + +/** + * struct timecompare - stores state and configuration for the two clocks + * + * Initialize to zero, then set source/target/num_samples. + * + * Transformation between source time and target time is done with: + * target_time = source_time + offset + + * (source_time - last_update) * skew / + * TIMECOMPARE_SKEW_RESOLUTION + * + * @source: used to get source time stamps via timecounter_read() + * @target: function returning target time (for example, ktime_get + * for monotonic time, or ktime_get_real for wall clock) + * @num_samples: number of times that source time and target time are to + * be compared when determining their offset + * @offset: (target time - source time) at the time of the last update + * @skew: average (target time - source time) / delta source time * + * TIMECOMPARE_SKEW_RESOLUTION + * @last_update: last source time stamp when time offset was measured + */ +struct timecompare { + struct timecounter *source; + ktime_t (*target)(void); + int num_samples; + + s64 offset; + s64 skew; + u64 last_update; +}; + +/** + * timecompare_transform - transform source time stamp into target time base + * @sync: context for time sync + * @source_tstamp: the result of timecounter_read() or + * timecounter_cyc2time() + */ +extern ktime_t timecompare_transform(struct timecompare *sync, + u64 source_tstamp); + +/** + * timecompare_offset - measure current (target time - source time) offset + * @sync: context for time sync + * @offset: average offset during sample period returned here + * @source_tstamp: average source time during sample period returned here + * + * Returns number of samples used. Might be zero (= no result) in the + * unlikely case that target time was monotonically decreasing for all + * samples (= broken). + */ +extern int timecompare_offset(struct timecompare *sync, + s64 *offset, + u64 *source_tstamp); + +extern void __timecompare_update(struct timecompare *sync, + u64 source_tstamp); + +/** + * timecompare_update - update offset and skew by measuring current offset + * @sync: context for time sync + * @source_tstamp: the result of timecounter_read() or + * timecounter_cyc2time(), pass zero to force update + * + * Updates are only done at most once per second. + */ +static inline void timecompare_update(struct timecompare *sync, + u64 source_tstamp) +{ + if (!source_tstamp || + (s64)(source_tstamp - sync->last_update) >= NSEC_PER_SEC) + __timecompare_update(sync, source_tstamp); +} + +#endif /* _LINUX_TIMECOMPARE_H */ diff --git a/include/linux/timer.h b/include/linux/timer.h new file mode 100644 index 00000000..6abd9138 --- /dev/null +++ b/include/linux/timer.h @@ -0,0 +1,302 @@ +#ifndef _LINUX_TIMER_H +#define _LINUX_TIMER_H + +#include <linux/list.h> +#include <linux/ktime.h> +#include <linux/stddef.h> +#include <linux/debugobjects.h> +#include <linux/stringify.h> + +struct tvec_base; + +struct timer_list { + /* + * All fields that change during normal runtime grouped to the + * same cacheline + */ + struct list_head entry; + unsigned long expires; + struct tvec_base *base; + + void (*function)(unsigned long); + unsigned long data; + + int slack; + +#ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; + char start_comm[16]; +#endif +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +extern struct tvec_base boot_tvec_bases; + +#ifdef CONFIG_LOCKDEP +/* + * NB: because we have to copy the lockdep_map, setting the lockdep_map key + * (second argument) here is required, otherwise it could be initialised to + * the copy of the lockdep_map later! We use the pointer to and the string + * "<file>:<line>" as the key resp. the name of the lockdep_map. + */ +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \ + .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn), +#else +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) +#endif + +/* + * Note that all tvec_bases are 2 byte aligned and lower bit of + * base in timer_list is guaranteed to be zero. Use the LSB to + * indicate whether the timer is deferrable. + * + * A deferrable timer will work normally when the system is busy, but + * will not cause a CPU to come out of idle just to service it; instead, + * the timer will be serviced when the CPU eventually wakes up with a + * subsequent non-deferrable timer. + */ +#define TBASE_DEFERRABLE_FLAG (0x1) + +#define TIMER_INITIALIZER(_function, _expires, _data) { \ + .entry = { .prev = TIMER_ENTRY_STATIC }, \ + .function = (_function), \ + .expires = (_expires), \ + .data = (_data), \ + .base = &boot_tvec_bases, \ + .slack = -1, \ + __TIMER_LOCKDEP_MAP_INITIALIZER( \ + __FILE__ ":" __stringify(__LINE__)) \ + } + +#define TBASE_MAKE_DEFERRED(ptr) ((struct tvec_base *) \ + ((unsigned char *)(ptr) + TBASE_DEFERRABLE_FLAG)) + +#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) {\ + .entry = { .prev = TIMER_ENTRY_STATIC }, \ + .function = (_function), \ + .expires = (_expires), \ + .data = (_data), \ + .base = TBASE_MAKE_DEFERRED(&boot_tvec_bases), \ + __TIMER_LOCKDEP_MAP_INITIALIZER( \ + __FILE__ ":" __stringify(__LINE__)) \ + } + +#define DEFINE_TIMER(_name, _function, _expires, _data) \ + struct timer_list _name = \ + TIMER_INITIALIZER(_function, _expires, _data) + +void init_timer_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); +void init_timer_deferrable_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); + +#ifdef CONFIG_LOCKDEP +#define init_timer(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_deferrable(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_deferrable_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_on_stack(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_on_stack_key((timer), #timer, &__key); \ + } while (0) + +#define setup_timer(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_key((timer), #timer, &__key, (fn), (data));\ + } while (0) + +#define setup_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_on_stack_key((timer), #timer, &__key, \ + (fn), (data)); \ + } while (0) +#define setup_deferrable_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_deferrable_timer_on_stack_key((timer), #timer, \ + &__key, (fn), \ + (data)); \ + } while (0) +#else +#define init_timer(timer)\ + init_timer_key((timer), NULL, NULL) +#define init_timer_deferrable(timer)\ + init_timer_deferrable_key((timer), NULL, NULL) +#define init_timer_on_stack(timer)\ + init_timer_on_stack_key((timer), NULL, NULL) +#define setup_timer(timer, fn, data)\ + setup_timer_key((timer), NULL, NULL, (fn), (data)) +#define setup_timer_on_stack(timer, fn, data)\ + setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#define setup_deferrable_timer_on_stack(timer, fn, data)\ + setup_deferrable_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_TIMERS +extern void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); +extern void destroy_timer_on_stack(struct timer_list *timer); +#else +static inline void destroy_timer_on_stack(struct timer_list *timer) { } +static inline void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) +{ + init_timer_key(timer, name, key); +} +#endif + +static inline void setup_timer_key(struct timer_list * timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data) +{ + timer->function = function; + timer->data = data; + init_timer_key(timer, name, key); +} + +static inline void setup_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data) +{ + timer->function = function; + timer->data = data; + init_timer_on_stack_key(timer, name, key); +} + +extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data); + +/** + * timer_pending - is a timer pending? + * @timer: the timer in question + * + * timer_pending will tell whether a given timer is currently pending, + * or not. Callers must ensure serialization wrt. other operations done + * to this timer, eg. interrupt contexts, or other CPUs on SMP. + * + * return value: 1 if the timer is pending, 0 if not. + */ +static inline int timer_pending(const struct timer_list * timer) +{ + return timer->entry.next != NULL; +} + +extern void add_timer_on(struct timer_list *timer, int cpu); +extern int del_timer(struct timer_list * timer); +extern int mod_timer(struct timer_list *timer, unsigned long expires); +extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); +extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); + +extern void set_timer_slack(struct timer_list *time, int slack_hz); + +#define TIMER_NOT_PINNED 0 +#define TIMER_PINNED 1 +/* + * The jiffies value which is added to now, when there is no timer + * in the timer wheel: + */ +#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) + +/* + * Return when the next timer-wheel timeout occurs (in absolute jiffies), + * locks the timer base and does the comparison against the given + * jiffie. + */ +extern unsigned long get_next_timer_interrupt(unsigned long now); + +/* + * Timer-statistics info: + */ +#ifdef CONFIG_TIMER_STATS + +extern int timer_stats_active; + +#define TIMER_STATS_FLAG_DEFERRABLE 0x1 + +extern void init_timer_stats(void); + +extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + void *timerf, char *comm, + unsigned int timer_flag); + +extern void __timer_stats_timer_set_start_info(struct timer_list *timer, + void *addr); + +static inline void timer_stats_timer_set_start_info(struct timer_list *timer) +{ + if (likely(!timer_stats_active)) + return; + __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); +} + +static inline void timer_stats_timer_clear_start_info(struct timer_list *timer) +{ + timer->start_site = NULL; +} +#else +static inline void init_timer_stats(void) +{ +} + +static inline void timer_stats_timer_set_start_info(struct timer_list *timer) +{ +} + +static inline void timer_stats_timer_clear_start_info(struct timer_list *timer) +{ +} +#endif + +extern void add_timer(struct timer_list *timer); + +extern int try_to_del_timer_sync(struct timer_list *timer); + +#ifdef CONFIG_SMP + extern int del_timer_sync(struct timer_list *timer); +#else +# define del_timer_sync(t) del_timer(t) +#endif + +#define del_singleshot_timer_sync(t) del_timer_sync(t) + +extern void init_timers(void); +extern void run_local_timers(void); +struct hrtimer; +extern enum hrtimer_restart it_real_fn(struct hrtimer *); + +unsigned long __round_jiffies(unsigned long j, int cpu); +unsigned long __round_jiffies_relative(unsigned long j, int cpu); +unsigned long round_jiffies(unsigned long j); +unsigned long round_jiffies_relative(unsigned long j); + +unsigned long __round_jiffies_up(unsigned long j, int cpu); +unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); +unsigned long round_jiffies_up(unsigned long j); +unsigned long round_jiffies_up_relative(unsigned long j); + +#endif diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h new file mode 100644 index 00000000..d3b57fa1 --- /dev/null +++ b/include/linux/timerfd.h @@ -0,0 +1,32 @@ +/* + * include/linux/timerfd.h + * + * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> + * + */ + +#ifndef _LINUX_TIMERFD_H +#define _LINUX_TIMERFD_H + +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> + +/* + * CAREFUL: Check include/asm-generic/fcntl.h when defining + * new flags, since they might collide with O_* ones. We want + * to re-use O_* flags that couldn't possibly have a meaning + * from eventfd, in order to leave a free define-space for + * shared O_* flags. + */ +#define TFD_TIMER_ABSTIME (1 << 0) +#define TFD_TIMER_CANCEL_ON_SET (1 << 1) +#define TFD_CLOEXEC O_CLOEXEC +#define TFD_NONBLOCK O_NONBLOCK + +#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK) +/* Flags for timerfd_create. */ +#define TFD_CREATE_FLAGS TFD_SHARED_FCNTL_FLAGS +/* Flags for timerfd_settime. */ +#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) + +#endif /* _LINUX_TIMERFD_H */ diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h new file mode 100644 index 00000000..3e08a1c8 --- /dev/null +++ b/include/linux/timeriomem-rng.h @@ -0,0 +1,21 @@ +/* + * linux/include/linux/timeriomem-rng.h + * + * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/completion.h> + +struct timeriomem_rng_data { + struct completion completion; + unsigned int present:1; + + void __iomem *address; + + /* measures in usecs */ + unsigned int period; +}; diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h new file mode 100644 index 00000000..50887274 --- /dev/null +++ b/include/linux/timerqueue.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_TIMERQUEUE_H +#define _LINUX_TIMERQUEUE_H + +#include <linux/rbtree.h> +#include <linux/ktime.h> + + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +struct timerqueue_head { + struct rb_root head; + struct timerqueue_node *next; +}; + + +extern void timerqueue_add(struct timerqueue_head *head, + struct timerqueue_node *node); +extern void timerqueue_del(struct timerqueue_head *head, + struct timerqueue_node *node); +extern struct timerqueue_node *timerqueue_iterate_next( + struct timerqueue_node *node); + +/** + * timerqueue_getnext - Returns the timer with the earliest expiration time + * + * @head: head of timerqueue + * + * Returns a pointer to the timer node that has the + * earliest expiration time. + */ +static inline +struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) +{ + return head->next; +} + +static inline void timerqueue_init(struct timerqueue_node *node) +{ + rb_init_node(&node->node); +} + +static inline void timerqueue_init_head(struct timerqueue_head *head) +{ + head->head = RB_ROOT; + head->next = NULL; +} +#endif /* _LINUX_TIMERQUEUE_H */ diff --git a/include/linux/times.h b/include/linux/times.h new file mode 100644 index 00000000..87b62615 --- /dev/null +++ b/include/linux/times.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_TIMES_H +#define _LINUX_TIMES_H + +#include <linux/types.h> + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +#endif diff --git a/include/linux/timex.h b/include/linux/timex.h new file mode 100644 index 00000000..99bc88b1 --- /dev/null +++ b/include/linux/timex.h @@ -0,0 +1,266 @@ +/***************************************************************************** + * * + * Copyright (c) David L. Mills 1993 * + * * + * Permission to use, copy, modify, and distribute this software and its * + * documentation for any purpose and without fee is hereby granted, provided * + * that the above copyright notice appears in all copies and that both the * + * copyright notice and this permission notice appear in supporting * + * documentation, and that the name University of Delaware not be used in * + * advertising or publicity pertaining to distribution of the software * + * without specific, written prior permission. The University of Delaware * + * makes no representations about the suitability this software for any * + * purpose. It is provided "as is" without express or implied warranty. * + * * + *****************************************************************************/ + +/* + * Modification history timex.h + * + * 29 Dec 97 Russell King + * Moved CLOCK_TICK_RATE, CLOCK_TICK_FACTOR and FINETUNE to asm/timex.h + * for ARM machines + * + * 9 Jan 97 Adrian Sun + * Shifted LATCH define to allow access to alpha machines. + * + * 26 Sep 94 David L. Mills + * Added defines for hybrid phase/frequency-lock loop. + * + * 19 Mar 94 David L. Mills + * Moved defines from kernel routines to header file and added new + * defines for PPS phase-lock loop. + * + * 20 Feb 94 David L. Mills + * Revised status codes and structures for external clock and PPS + * signal discipline. + * + * 28 Nov 93 David L. Mills + * Adjusted parameters to improve stability and increase poll + * interval. + * + * 17 Sep 93 David L. Mills + * Created file $NTP/include/sys/timex.h + * 07 Oct 93 Torsten Duwe + * Derived linux/timex.h + * 1995-08-13 Torsten Duwe + * kernel PLL updated to 1994-12-13 specs (rfc-1589) + * 1997-08-30 Ulrich Windl + * Added new constant NTP_PHASE_LIMIT + * 2004-08-12 Christoph Lameter + * Reworked time interpolation logic + */ +#ifndef _LINUX_TIMEX_H +#define _LINUX_TIMEX_H + +#include <linux/time.h> + +#define NTP_API 4 /* NTP API version */ + +/* + * syscall interface - used (mainly by NTP daemon) + * to discipline kernel clock oscillator + */ +struct timex { + unsigned int modes; /* mode selector */ + long offset; /* time offset (usec) */ + long freq; /* frequency offset (scaled ppm) */ + long maxerror; /* maximum error (usec) */ + long esterror; /* estimated error (usec) */ + int status; /* clock command/status */ + long constant; /* pll time constant */ + long precision; /* clock precision (usec) (read only) */ + long tolerance; /* clock frequency tolerance (ppm) + * (read only) + */ + struct timeval time; /* (read only, except for ADJ_SETOFFSET) */ + long tick; /* (modified) usecs between clock ticks */ + + long ppsfreq; /* pps frequency (scaled ppm) (ro) */ + long jitter; /* pps jitter (us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + long stabil; /* pps stability (scaled ppm) (ro) */ + long jitcnt; /* jitter limit exceeded (ro) */ + long calcnt; /* calibration intervals (ro) */ + long errcnt; /* calibration errors (ro) */ + long stbcnt; /* stability limit exceeded (ro) */ + + int tai; /* TAI offset (ro) */ + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; +}; + +/* + * Mode codes (timex.mode) + */ +#define ADJ_OFFSET 0x0001 /* time offset */ +#define ADJ_FREQUENCY 0x0002 /* frequency offset */ +#define ADJ_MAXERROR 0x0004 /* maximum time error */ +#define ADJ_ESTERROR 0x0008 /* estimated time error */ +#define ADJ_STATUS 0x0010 /* clock status */ +#define ADJ_TIMECONST 0x0020 /* pll time constant */ +#define ADJ_TAI 0x0080 /* set TAI offset */ +#define ADJ_SETOFFSET 0x0100 /* add 'time' to current time */ +#define ADJ_MICRO 0x1000 /* select microsecond resolution */ +#define ADJ_NANO 0x2000 /* select nanosecond resolution */ +#define ADJ_TICK 0x4000 /* tick value */ + +#ifdef __KERNEL__ +#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */ +#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */ +#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */ +#else +#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */ +#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */ +#endif + +/* NTP userland likes the MOD_ prefix better */ +#define MOD_OFFSET ADJ_OFFSET +#define MOD_FREQUENCY ADJ_FREQUENCY +#define MOD_MAXERROR ADJ_MAXERROR +#define MOD_ESTERROR ADJ_ESTERROR +#define MOD_STATUS ADJ_STATUS +#define MOD_TIMECONST ADJ_TIMECONST +#define MOD_TAI ADJ_TAI +#define MOD_MICRO ADJ_MICRO +#define MOD_NANO ADJ_NANO + + +/* + * Status codes (timex.status) + */ +#define STA_PLL 0x0001 /* enable PLL updates (rw) */ +#define STA_PPSFREQ 0x0002 /* enable PPS freq discipline (rw) */ +#define STA_PPSTIME 0x0004 /* enable PPS time discipline (rw) */ +#define STA_FLL 0x0008 /* select frequency-lock mode (rw) */ + +#define STA_INS 0x0010 /* insert leap (rw) */ +#define STA_DEL 0x0020 /* delete leap (rw) */ +#define STA_UNSYNC 0x0040 /* clock unsynchronized (rw) */ +#define STA_FREQHOLD 0x0080 /* hold frequency (rw) */ + +#define STA_PPSSIGNAL 0x0100 /* PPS signal present (ro) */ +#define STA_PPSJITTER 0x0200 /* PPS signal jitter exceeded (ro) */ +#define STA_PPSWANDER 0x0400 /* PPS signal wander exceeded (ro) */ +#define STA_PPSERROR 0x0800 /* PPS signal calibration error (ro) */ + +#define STA_CLOCKERR 0x1000 /* clock hardware fault (ro) */ +#define STA_NANO 0x2000 /* resolution (0 = us, 1 = ns) (ro) */ +#define STA_MODE 0x4000 /* mode (0 = PLL, 1 = FLL) (ro) */ +#define STA_CLK 0x8000 /* clock source (0 = A, 1 = B) (ro) */ + +/* read-only bits */ +#define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \ + STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK) + +/* + * Clock states (time_state) + */ +#define TIME_OK 0 /* clock synchronized, no leap second */ +#define TIME_INS 1 /* insert leap second */ +#define TIME_DEL 2 /* delete leap second */ +#define TIME_OOP 3 /* leap second in progress */ +#define TIME_WAIT 4 /* leap second has occurred */ +#define TIME_ERROR 5 /* clock not synchronized */ +#define TIME_BAD TIME_ERROR /* bw compat */ + +#ifdef __KERNEL__ +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/param.h> + +#include <asm/timex.h> + +/* + * SHIFT_PLL is used as a dampening factor to define how much we + * adjust the frequency correction for a given offset in PLL mode. + * It also used in dampening the offset correction, to define how + * much of the current value in time_offset we correct for each + * second. Changing this value changes the stiffness of the ntp + * adjustment code. A lower value makes it more flexible, reducing + * NTP convergence time. A higher value makes it stiffer, increasing + * convergence time, but making the clock more stable. + * + * In David Mills' nanokernel reference implementation SHIFT_PLL is 4. + * However this seems to increase convergence time much too long. + * + * https://lists.ntp.org/pipermail/hackers/2008-January/003487.html + * + * In the above mailing list discussion, it seems the value of 4 + * was appropriate for other Unix systems with HZ=100, and that + * SHIFT_PLL should be decreased as HZ increases. However, Linux's + * clock steering implementation is HZ independent. + * + * Through experimentation, a SHIFT_PLL value of 2 was found to allow + * for fast convergence (very similar to the NTPv3 code used prior to + * v2.6.19), with good clock stability. + * + * + * SHIFT_FLL is used as a dampening factor to define how much we + * adjust the frequency correction for a given offset in FLL mode. + * In David Mills' nanokernel reference implementation SHIFT_FLL is 2. + * + * MAXTC establishes the maximum time constant of the PLL. + */ +#define SHIFT_PLL 2 /* PLL frequency factor (shift) */ +#define SHIFT_FLL 2 /* FLL frequency factor (shift) */ +#define MAXTC 10 /* maximum time constant (shift) */ + +/* + * SHIFT_USEC defines the scaling (shift) of the time_freq and + * time_tolerance variables, which represent the current frequency + * offset and maximum frequency tolerance. + */ +#define SHIFT_USEC 16 /* frequency offset scale (shift) */ +#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) +#define PPM_SCALE_INV_SHIFT 19 +#define PPM_SCALE_INV ((1LL << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ + PPM_SCALE + 1) + +#define MAXPHASE 500000000L /* max phase error (ns) */ +#define MAXFREQ 500000 /* max frequency error (ns/s) */ +#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) +#define MINSEC 256 /* min interval between updates (s) */ +#define MAXSEC 2048 /* max interval between updates (s) */ +#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ + +/* + * kernel variables + * Note: maximum error = NTP synch distance = dispersion + delay / 2; + * estimated error = NTP dispersion. + */ +extern unsigned long tick_usec; /* USER_HZ period (usec) */ +extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ + +extern void ntp_init(void); +extern void ntp_clear(void); + +/* Required to safely shift negative values */ +#define shift_right(x, s) ({ \ + __typeof__(x) __x = (x); \ + __typeof__(s) __s = (s); \ + __x < 0 ? -(-__x >> __s) : __x >> __s; \ +}) + +#define NTP_SCALE_SHIFT 32 + +#define NTP_INTERVAL_FREQ (HZ) +#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) + +/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ +extern u64 ntp_tick_length(void); + +extern int second_overflow(unsigned long secs); +extern int do_adjtimex(struct timex *); +extern void hardpps(const struct timespec *, const struct timespec *); + +int read_current_timer(unsigned long *timer_val); + +/* The clock frequency of the i8253/i8254 PIT */ +#define PIT_TICK_RATE 1193182ul + +#endif /* KERNEL */ + +#endif /* LINUX_TIMEX_H */ diff --git a/include/linux/tiocl.h b/include/linux/tiocl.h new file mode 100644 index 00000000..4756862c --- /dev/null +++ b/include/linux/tiocl.h @@ -0,0 +1,39 @@ +#ifndef _LINUX_TIOCL_H +#define _LINUX_TIOCL_H + +#define TIOCL_SETSEL 2 /* set a selection */ +#define TIOCL_SELCHAR 0 /* select characters */ +#define TIOCL_SELWORD 1 /* select whole words */ +#define TIOCL_SELLINE 2 /* select whole lines */ +#define TIOCL_SELPOINTER 3 /* show the pointer */ +#define TIOCL_SELCLEAR 4 /* clear visibility of selection */ +#define TIOCL_SELMOUSEREPORT 16 /* report beginning of selection */ +#define TIOCL_SELBUTTONMASK 15 /* button mask for report */ +/* selection extent */ +struct tiocl_selection { + unsigned short xs; /* X start */ + unsigned short ys; /* Y start */ + unsigned short xe; /* X end */ + unsigned short ye; /* Y end */ + unsigned short sel_mode; /* selection mode */ +}; + +#define TIOCL_PASTESEL 3 /* paste previous selection */ +#define TIOCL_UNBLANKSCREEN 4 /* unblank screen */ + +#define TIOCL_SELLOADLUT 5 + /* set characters to be considered alphabetic when selecting */ + /* u32[8] bit array, 4 bytes-aligned with type */ + +/* these two don't return a value: they write it back in the type */ +#define TIOCL_GETSHIFTSTATE 6 /* write shift state */ +#define TIOCL_GETMOUSEREPORTING 7 /* write whether mouse event are reported */ +#define TIOCL_SETVESABLANK 10 /* set vesa blanking mode */ +#define TIOCL_SETKMSGREDIRECT 11 /* restrict kernel messages to a vt */ +#define TIOCL_GETFGCONSOLE 12 /* get foreground vt */ +#define TIOCL_SCROLLCONSOLE 13 /* scroll console */ +#define TIOCL_BLANKSCREEN 14 /* keep screen blank even if a key is pressed */ +#define TIOCL_BLANKEDSCREEN 15 /* return which vt was blanked */ +#define TIOCL_GETKMSGREDIRECT 17 /* get the vt the kernel messages are restricted to */ + +#endif /* _LINUX_TIOCL_H */ diff --git a/include/linux/tipc.h b/include/linux/tipc.h new file mode 100644 index 00000000..f2d90091 --- /dev/null +++ b/include/linux/tipc.h @@ -0,0 +1,209 @@ +/* + * include/linux/tipc.h: Include file for TIPC socket interface + * + * Copyright (c) 2003-2006, Ericsson AB + * Copyright (c) 2005, 2010-2011, Wind River Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_TIPC_H_ +#define _LINUX_TIPC_H_ + +#include <linux/types.h> + +/* + * TIPC addressing primitives + */ + +struct tipc_portid { + __u32 ref; + __u32 node; +}; + +struct tipc_name { + __u32 type; + __u32 instance; +}; + +struct tipc_name_seq { + __u32 type; + __u32 lower; + __u32 upper; +}; + +static inline __u32 tipc_addr(unsigned int zone, + unsigned int cluster, + unsigned int node) +{ + return (zone << 24) | (cluster << 12) | node; +} + +static inline unsigned int tipc_zone(__u32 addr) +{ + return addr >> 24; +} + +static inline unsigned int tipc_cluster(__u32 addr) +{ + return (addr >> 12) & 0xfff; +} + +static inline unsigned int tipc_node(__u32 addr) +{ + return addr & 0xfff; +} + +/* + * Application-accessible port name types + */ + +#define TIPC_CFG_SRV 0 /* configuration service name type */ +#define TIPC_TOP_SRV 1 /* topology service name type */ +#define TIPC_RESERVED_TYPES 64 /* lowest user-publishable name type */ + +/* + * Publication scopes when binding port names and port name sequences + */ + +#define TIPC_ZONE_SCOPE 1 +#define TIPC_CLUSTER_SCOPE 2 +#define TIPC_NODE_SCOPE 3 + +/* + * Limiting values for messages + */ + +#define TIPC_MAX_USER_MSG_SIZE 66000U + +/* + * Message importance levels + */ + +#define TIPC_LOW_IMPORTANCE 0 +#define TIPC_MEDIUM_IMPORTANCE 1 +#define TIPC_HIGH_IMPORTANCE 2 +#define TIPC_CRITICAL_IMPORTANCE 3 + +/* + * Msg rejection/connection shutdown reasons + */ + +#define TIPC_OK 0 +#define TIPC_ERR_NO_NAME 1 +#define TIPC_ERR_NO_PORT 2 +#define TIPC_ERR_NO_NODE 3 +#define TIPC_ERR_OVERLOAD 4 +#define TIPC_CONN_SHUTDOWN 5 + +/* + * TIPC topology subscription service definitions + */ + +#define TIPC_SUB_PORTS 0x01 /* filter for port availability */ +#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */ +#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */ + +#define TIPC_WAIT_FOREVER (~0) /* timeout for permanent subscription */ + +struct tipc_subscr { + struct tipc_name_seq seq; /* name sequence of interest */ + __u32 timeout; /* subscription duration (in ms) */ + __u32 filter; /* bitmask of filter options */ + char usr_handle[8]; /* available for subscriber use */ +}; + +#define TIPC_PUBLISHED 1 /* publication event */ +#define TIPC_WITHDRAWN 2 /* withdraw event */ +#define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ + +struct tipc_event { + __u32 event; /* event type */ + __u32 found_lower; /* matching name seq instances */ + __u32 found_upper; /* " " " " */ + struct tipc_portid port; /* associated port */ + struct tipc_subscr s; /* associated subscription */ +}; + +/* + * Socket API + */ + +#ifndef AF_TIPC +#define AF_TIPC 30 +#endif + +#ifndef PF_TIPC +#define PF_TIPC AF_TIPC +#endif + +#ifndef SOL_TIPC +#define SOL_TIPC 271 +#endif + +#define TIPC_ADDR_NAMESEQ 1 +#define TIPC_ADDR_MCAST 1 +#define TIPC_ADDR_NAME 2 +#define TIPC_ADDR_ID 3 + +struct sockaddr_tipc { + unsigned short family; + unsigned char addrtype; + signed char scope; + union { + struct tipc_portid id; + struct tipc_name_seq nameseq; + struct { + struct tipc_name name; + __u32 domain; + } name; + } addr; +}; + +/* + * Ancillary data objects supported by recvmsg() + */ + +#define TIPC_ERRINFO 1 /* error info */ +#define TIPC_RETDATA 2 /* returned data */ +#define TIPC_DESTNAME 3 /* destination name */ + +/* + * TIPC-specific socket option values + */ + +#define TIPC_IMPORTANCE 127 /* Default: TIPC_LOW_IMPORTANCE */ +#define TIPC_SRC_DROPPABLE 128 /* Default: based on socket type */ +#define TIPC_DEST_DROPPABLE 129 /* Default: based on socket type */ +#define TIPC_CONN_TIMEOUT 130 /* Default: 8000 (ms) */ +#define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */ +#define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */ + +#endif diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h new file mode 100644 index 00000000..9730b0e5 --- /dev/null +++ b/include/linux/tipc_config.h @@ -0,0 +1,395 @@ +/* + * include/linux/tipc_config.h: Include file for TIPC configuration interface + * + * Copyright (c) 2003-2006, Ericsson AB + * Copyright (c) 2005-2007, 2010-2011, Wind River Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_TIPC_CONFIG_H_ +#define _LINUX_TIPC_CONFIG_H_ + +#include <linux/types.h> +#include <linux/string.h> +#include <asm/byteorder.h> + +#ifndef __KERNEL__ +#include <arpa/inet.h> /* for ntohs etc. */ +#endif + +/* + * Configuration + * + * All configuration management messaging involves sending a request message + * to the TIPC configuration service on a node, which sends a reply message + * back. (In the future multi-message replies may be supported.) + * + * Both request and reply messages consist of a transport header and payload. + * The transport header contains info about the desired operation; + * the payload consists of zero or more type/length/value (TLV) items + * which specify parameters or results for the operation. + * + * For many operations, the request and reply messages have a fixed number + * of TLVs (usually zero or one); however, some reply messages may return + * a variable number of TLVs. A failed request is denoted by the presence + * of an "error string" TLV in the reply message instead of the TLV(s) the + * reply should contain if the request succeeds. + */ + +/* + * Public commands: + * May be issued by any process. + * Accepted by own node, or by remote node only if remote management enabled. + */ + +#define TIPC_CMD_NOOP 0x0000 /* tx none, rx none */ +#define TIPC_CMD_GET_NODES 0x0001 /* tx net_addr, rx node_info(s) */ +#define TIPC_CMD_GET_MEDIA_NAMES 0x0002 /* tx none, rx media_name(s) */ +#define TIPC_CMD_GET_BEARER_NAMES 0x0003 /* tx none, rx bearer_name(s) */ +#define TIPC_CMD_GET_LINKS 0x0004 /* tx net_addr, rx link_info(s) */ +#define TIPC_CMD_SHOW_NAME_TABLE 0x0005 /* tx name_tbl_query, rx ultra_string */ +#define TIPC_CMD_SHOW_PORTS 0x0006 /* tx none, rx ultra_string */ +#define TIPC_CMD_SHOW_LINK_STATS 0x000B /* tx link_name, rx ultra_string */ +#define TIPC_CMD_SHOW_STATS 0x000F /* tx unsigned, rx ultra_string */ + +/* + * Protected commands: + * May only be issued by "network administration capable" process. + * Accepted by own node, or by remote node only if remote management enabled + * and this node is zone manager. + */ + +#define TIPC_CMD_GET_REMOTE_MNG 0x4003 /* tx none, rx unsigned */ +#define TIPC_CMD_GET_MAX_PORTS 0x4004 /* tx none, rx unsigned */ +#define TIPC_CMD_GET_MAX_PUBL 0x4005 /* tx none, rx unsigned */ +#define TIPC_CMD_GET_MAX_SUBSCR 0x4006 /* tx none, rx unsigned */ +#define TIPC_CMD_GET_MAX_ZONES 0x4007 /* obsoleted */ +#define TIPC_CMD_GET_MAX_CLUSTERS 0x4008 /* obsoleted */ +#define TIPC_CMD_GET_MAX_NODES 0x4009 /* obsoleted */ +#define TIPC_CMD_GET_MAX_SLAVES 0x400A /* obsoleted */ +#define TIPC_CMD_GET_NETID 0x400B /* tx none, rx unsigned */ + +#define TIPC_CMD_ENABLE_BEARER 0x4101 /* tx bearer_config, rx none */ +#define TIPC_CMD_DISABLE_BEARER 0x4102 /* tx bearer_name, rx none */ +#define TIPC_CMD_SET_LINK_TOL 0x4107 /* tx link_config, rx none */ +#define TIPC_CMD_SET_LINK_PRI 0x4108 /* tx link_config, rx none */ +#define TIPC_CMD_SET_LINK_WINDOW 0x4109 /* tx link_config, rx none */ +#define TIPC_CMD_SET_LOG_SIZE 0x410A /* tx unsigned, rx none */ +#define TIPC_CMD_DUMP_LOG 0x410B /* tx none, rx ultra_string */ +#define TIPC_CMD_RESET_LINK_STATS 0x410C /* tx link_name, rx none */ + +/* + * Private commands: + * May only be issued by "network administration capable" process. + * Accepted by own node only; cannot be used on a remote node. + */ + +#define TIPC_CMD_SET_NODE_ADDR 0x8001 /* tx net_addr, rx none */ +#define TIPC_CMD_SET_REMOTE_MNG 0x8003 /* tx unsigned, rx none */ +#define TIPC_CMD_SET_MAX_PORTS 0x8004 /* tx unsigned, rx none */ +#define TIPC_CMD_SET_MAX_PUBL 0x8005 /* tx unsigned, rx none */ +#define TIPC_CMD_SET_MAX_SUBSCR 0x8006 /* tx unsigned, rx none */ +#define TIPC_CMD_SET_MAX_ZONES 0x8007 /* obsoleted */ +#define TIPC_CMD_SET_MAX_CLUSTERS 0x8008 /* obsoleted */ +#define TIPC_CMD_SET_MAX_NODES 0x8009 /* obsoleted */ +#define TIPC_CMD_SET_MAX_SLAVES 0x800A /* obsoleted */ +#define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */ + +/* + * Reserved commands: + * May not be issued by any process. + * Used internally by TIPC. + */ + +#define TIPC_CMD_NOT_NET_ADMIN 0xC001 /* tx none, rx none */ + +/* + * TLV types defined for TIPC + */ + +#define TIPC_TLV_NONE 0 /* no TLV present */ +#define TIPC_TLV_VOID 1 /* empty TLV (0 data bytes)*/ +#define TIPC_TLV_UNSIGNED 2 /* 32-bit integer */ +#define TIPC_TLV_STRING 3 /* char[128] (max) */ +#define TIPC_TLV_LARGE_STRING 4 /* char[2048] (max) */ +#define TIPC_TLV_ULTRA_STRING 5 /* char[32768] (max) */ + +#define TIPC_TLV_ERROR_STRING 16 /* char[128] containing "error code" */ +#define TIPC_TLV_NET_ADDR 17 /* 32-bit integer denoting <Z.C.N> */ +#define TIPC_TLV_MEDIA_NAME 18 /* char[TIPC_MAX_MEDIA_NAME] */ +#define TIPC_TLV_BEARER_NAME 19 /* char[TIPC_MAX_BEARER_NAME] */ +#define TIPC_TLV_LINK_NAME 20 /* char[TIPC_MAX_LINK_NAME] */ +#define TIPC_TLV_NODE_INFO 21 /* struct tipc_node_info */ +#define TIPC_TLV_LINK_INFO 22 /* struct tipc_link_info */ +#define TIPC_TLV_BEARER_CONFIG 23 /* struct tipc_bearer_config */ +#define TIPC_TLV_LINK_CONFIG 24 /* struct tipc_link_config */ +#define TIPC_TLV_NAME_TBL_QUERY 25 /* struct tipc_name_table_query */ +#define TIPC_TLV_PORT_REF 26 /* 32-bit port reference */ + +/* + * Maximum sizes of TIPC bearer-related names (including terminating NUL) + */ + +#define TIPC_MAX_MEDIA_NAME 16 /* format = media */ +#define TIPC_MAX_IF_NAME 16 /* format = interface */ +#define TIPC_MAX_BEARER_NAME 32 /* format = media:interface */ +#define TIPC_MAX_LINK_NAME 60 /* format = Z.C.N:interface-Z.C.N:interface */ + +/* + * Link priority limits (min, default, max, media default) + */ + +#define TIPC_MIN_LINK_PRI 0 +#define TIPC_DEF_LINK_PRI 10 +#define TIPC_MAX_LINK_PRI 31 +#define TIPC_MEDIA_LINK_PRI (TIPC_MAX_LINK_PRI + 1) + +/* + * Link tolerance limits (min, default, max), in ms + */ + +#define TIPC_MIN_LINK_TOL 50 +#define TIPC_DEF_LINK_TOL 1500 +#define TIPC_MAX_LINK_TOL 30000 + +#if (TIPC_MIN_LINK_TOL < 16) +#error "TIPC_MIN_LINK_TOL is too small (abort limit may be NaN)" +#endif + +/* + * Link window limits (min, default, max), in packets + */ + +#define TIPC_MIN_LINK_WIN 16 +#define TIPC_DEF_LINK_WIN 50 +#define TIPC_MAX_LINK_WIN 150 + + +struct tipc_node_info { + __be32 addr; /* network address of node */ + __be32 up; /* 0=down, 1= up */ +}; + +struct tipc_link_info { + __be32 dest; /* network address of peer node */ + __be32 up; /* 0=down, 1=up */ + char str[TIPC_MAX_LINK_NAME]; /* link name */ +}; + +struct tipc_bearer_config { + __be32 priority; /* Range [1,31]. Override per link */ + __be32 disc_domain; /* <Z.C.N> describing desired nodes */ + char name[TIPC_MAX_BEARER_NAME]; +}; + +struct tipc_link_config { + __be32 value; + char name[TIPC_MAX_LINK_NAME]; +}; + +#define TIPC_NTQ_ALLTYPES 0x80000000 + +struct tipc_name_table_query { + __be32 depth; /* 1:type, 2:+name info, 3:+port info, 4+:+debug info */ + __be32 type; /* {t,l,u} info ignored if high bit of "depth" is set */ + __be32 lowbound; /* (i.e. displays all entries of name table) */ + __be32 upbound; +}; + +/* + * The error string TLV is a null-terminated string describing the cause + * of the request failure. To simplify error processing (and to save space) + * the first character of the string can be a special error code character + * (lying by the range 0x80 to 0xFF) which represents a pre-defined reason. + */ + +#define TIPC_CFG_TLV_ERROR "\x80" /* request contains incorrect TLV(s) */ +#define TIPC_CFG_NOT_NET_ADMIN "\x81" /* must be network administrator */ +#define TIPC_CFG_NOT_ZONE_MSTR "\x82" /* must be zone master */ +#define TIPC_CFG_NO_REMOTE "\x83" /* remote management not enabled */ +#define TIPC_CFG_NOT_SUPPORTED "\x84" /* request is not supported by TIPC */ +#define TIPC_CFG_INVALID_VALUE "\x85" /* request has invalid argument value */ + +/* + * A TLV consists of a descriptor, followed by the TLV value. + * TLV descriptor fields are stored in network byte order; + * TLV values must also be stored in network byte order (where applicable). + * TLV descriptors must be aligned to addresses which are multiple of 4, + * so up to 3 bytes of padding may exist at the end of the TLV value area. + * There must not be any padding between the TLV descriptor and its value. + */ + +struct tlv_desc { + __be16 tlv_len; /* TLV length (descriptor + value) */ + __be16 tlv_type; /* TLV identifier */ +}; + +#define TLV_ALIGNTO 4 + +#define TLV_ALIGN(datalen) (((datalen)+(TLV_ALIGNTO-1)) & ~(TLV_ALIGNTO-1)) +#define TLV_LENGTH(datalen) (sizeof(struct tlv_desc) + (datalen)) +#define TLV_SPACE(datalen) (TLV_ALIGN(TLV_LENGTH(datalen))) +#define TLV_DATA(tlv) ((void *)((char *)(tlv) + TLV_LENGTH(0))) + +static inline int TLV_OK(const void *tlv, __u16 space) +{ + /* + * Would also like to check that "tlv" is a multiple of 4, + * but don't know how to do this in a portable way. + * - Tried doing (!(tlv & (TLV_ALIGNTO-1))), but GCC compiler + * won't allow binary "&" with a pointer. + * - Tried casting "tlv" to integer type, but causes warning about size + * mismatch when pointer is bigger than chosen type (int, long, ...). + */ + + return (space >= TLV_SPACE(0)) && + (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space); +} + +static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type) +{ + return TLV_OK(tlv, space) && + (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type); +} + +static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) +{ + struct tlv_desc *tlv_ptr; + int tlv_len; + + tlv_len = TLV_LENGTH(len); + tlv_ptr = (struct tlv_desc *)tlv; + tlv_ptr->tlv_type = htons(type); + tlv_ptr->tlv_len = htons(tlv_len); + if (len && data) + memcpy(TLV_DATA(tlv_ptr), data, tlv_len); + return TLV_SPACE(len); +} + +/* + * A TLV list descriptor simplifies processing of messages + * containing multiple TLVs. + */ + +struct tlv_list_desc { + struct tlv_desc *tlv_ptr; /* ptr to current TLV */ + __u32 tlv_space; /* # bytes from curr TLV to list end */ +}; + +static inline void TLV_LIST_INIT(struct tlv_list_desc *list, + void *data, __u32 space) +{ + list->tlv_ptr = (struct tlv_desc *)data; + list->tlv_space = space; +} + +static inline int TLV_LIST_EMPTY(struct tlv_list_desc *list) +{ + return (list->tlv_space == 0); +} + +static inline int TLV_LIST_CHECK(struct tlv_list_desc *list, __u16 exp_type) +{ + return TLV_CHECK(list->tlv_ptr, list->tlv_space, exp_type); +} + +static inline void *TLV_LIST_DATA(struct tlv_list_desc *list) +{ + return TLV_DATA(list->tlv_ptr); +} + +static inline void TLV_LIST_STEP(struct tlv_list_desc *list) +{ + __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len)); + + list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space); + list->tlv_space -= tlv_space; +} + +/* + * Configuration messages exchanged via NETLINK_GENERIC use the following + * family id, name, version and command. + */ +#define TIPC_GENL_NAME "TIPC" +#define TIPC_GENL_VERSION 0x1 +#define TIPC_GENL_CMD 0x1 + +/* + * TIPC specific header used in NETLINK_GENERIC requests. + */ +struct tipc_genlmsghdr { + __u32 dest; /* Destination address */ + __u16 cmd; /* Command */ + __u16 reserved; /* Unused */ +}; + +#define TIPC_GENL_HDRLEN NLMSG_ALIGN(sizeof(struct tipc_genlmsghdr)) + +/* + * Configuration messages exchanged via TIPC sockets use the TIPC configuration + * message header, which is defined below. This structure is analogous + * to the Netlink message header, but fields are stored in network byte order + * and no padding is permitted between the header and the message data + * that follows. + */ + +struct tipc_cfg_msg_hdr { + __be32 tcm_len; /* Message length (including header) */ + __be16 tcm_type; /* Command type */ + __be16 tcm_flags; /* Additional flags */ + char tcm_reserved[8]; /* Unused */ +}; + +#define TCM_F_REQUEST 0x1 /* Flag: Request message */ +#define TCM_F_MORE 0x2 /* Flag: Message to be continued */ + +#define TCM_ALIGN(datalen) (((datalen)+3) & ~3) +#define TCM_LENGTH(datalen) (sizeof(struct tipc_cfg_msg_hdr) + datalen) +#define TCM_SPACE(datalen) (TCM_ALIGN(TCM_LENGTH(datalen))) +#define TCM_DATA(tcm_hdr) ((void *)((char *)(tcm_hdr) + TCM_LENGTH(0))) + +static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags, + void *data, __u16 data_len) +{ + struct tipc_cfg_msg_hdr *tcm_hdr; + int msg_len; + + msg_len = TCM_LENGTH(data_len); + tcm_hdr = (struct tipc_cfg_msg_hdr *)msg; + tcm_hdr->tcm_len = htonl(msg_len); + tcm_hdr->tcm_type = htons(cmd); + tcm_hdr->tcm_flags = htons(flags); + if (data_len && data) + memcpy(TCM_DATA(msg), data, data_len); + return TCM_SPACE(data_len); +} + +#endif diff --git a/include/linux/topology.h b/include/linux/topology.h new file mode 100644 index 00000000..e26db031 --- /dev/null +++ b/include/linux/topology.h @@ -0,0 +1,335 @@ +/* + * include/linux/topology.h + * + * Written by: Matthew Dobson, IBM Corporation + * + * Copyright (C) 2002, IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to <colpatch@us.ibm.com> + */ +#ifndef _LINUX_TOPOLOGY_H +#define _LINUX_TOPOLOGY_H + +#include <linux/cpumask.h> +#include <linux/bitops.h> +#include <linux/mmzone.h> +#include <linux/smp.h> +#include <linux/percpu.h> +#include <asm/topology.h> + +#ifndef node_has_online_mem +#define node_has_online_mem(nid) (1) +#endif + +#ifndef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#define for_each_node_with_cpus(node) \ + for_each_online_node(node) \ + if (nr_cpus_node(node)) + +int arch_update_cpu_topology(void); + +/* Conform to ACPI 2.0 SLIT distance definitions */ +#define LOCAL_DISTANCE 10 +#define REMOTE_DISTANCE 20 +#ifndef node_distance +#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE) +#endif +#ifndef RECLAIM_DISTANCE +/* + * If the distance between nodes in a system is larger than RECLAIM_DISTANCE + * (in whatever arch specific measurement units returned by node_distance()) + * then switch on zone reclaim on boot. + */ +#define RECLAIM_DISTANCE 30 +#endif +#ifndef PENALTY_FOR_NODE_WITH_CPUS +#define PENALTY_FOR_NODE_WITH_CPUS (1) +#endif + +/* + * Below are the 3 major initializers used in building sched_domains: + * SD_SIBLING_INIT, for SMT domains + * SD_CPU_INIT, for SMP domains + * SD_NODE_INIT, for NUMA domains + * + * Any architecture that cares to do any tuning to these values should do so + * by defining their own arch-specific initializer in include/asm/topology.h. + * A definition there will automagically override these default initializers + * and allow arch-specific performance tuning of sched_domains. + * (Only non-zero and non-null fields need be specified.) + */ + +#ifdef CONFIG_SCHED_SMT +/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is, + * so can't we drop this in favor of CONFIG_SCHED_SMT? + */ +#define ARCH_HAS_SCHED_WAKE_IDLE +/* Common values for SMT siblings */ +#ifndef SD_SIBLING_INIT +#define SD_SIBLING_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 2, \ + .busy_factor = 64, \ + .imbalance_pct = 110, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 1*SD_WAKE_AFFINE \ + | 1*SD_SHARE_CPUPOWER \ + | 0*SD_POWERSAVINGS_BALANCE \ + | 1*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | 0*SD_PREFER_SIBLING \ + | arch_sd_sibling_asym_packing() \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .smt_gain = 1178, /* 15% */ \ +} +#endif +#endif /* CONFIG_SCHED_SMT */ + +#ifdef CONFIG_SCHED_MC +/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ +#ifndef SD_MC_INIT +#define SD_MC_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 1*SD_WAKE_AFFINE \ + | 0*SD_PREFER_LOCAL \ + | 0*SD_SHARE_CPUPOWER \ + | 1*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | sd_balance_for_mc_power() \ + | sd_power_saving_flags() \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ +} +#endif +#endif /* CONFIG_SCHED_MC */ + +/* Common values for CPUs */ +#ifndef SD_CPU_INIT +#define SD_CPU_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 1*SD_WAKE_AFFINE \ + | 0*SD_PREFER_LOCAL \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | sd_balance_for_package_power() \ + | sd_power_saving_flags() \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ +} +#endif + +/* sched_domains SD_ALLNODES_INIT for NUMA machines */ +#define SD_ALLNODES_INIT (struct sched_domain) { \ + .min_interval = 64, \ + .max_interval = 64*num_online_cpus(), \ + .busy_factor = 128, \ + .imbalance_pct = 133, \ + .cache_nice_tries = 1, \ + .busy_idx = 3, \ + .idle_idx = 3, \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 0*SD_BALANCE_EXEC \ + | 0*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 0*SD_WAKE_AFFINE \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_POWERSAVINGS_BALANCE \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 1*SD_SERIALIZE \ + | 0*SD_PREFER_SIBLING \ + , \ + .last_balance = jiffies, \ + .balance_interval = 64, \ +} + +#ifndef SD_NODES_PER_DOMAIN +#define SD_NODES_PER_DOMAIN 16 +#endif + +#ifdef CONFIG_SCHED_BOOK +#ifndef SD_BOOK_INIT +#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!! +#endif +#endif /* CONFIG_SCHED_BOOK */ + +#ifdef CONFIG_NUMA +#ifndef SD_NODE_INIT +#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! +#endif + +#endif /* CONFIG_NUMA */ + +#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID +DECLARE_PER_CPU(int, numa_node); + +#ifndef numa_node_id +/* Returns the number of the current Node. */ +static inline int numa_node_id(void) +{ + return __this_cpu_read(numa_node); +} +#endif + +#ifndef cpu_to_node +static inline int cpu_to_node(int cpu) +{ + return per_cpu(numa_node, cpu); +} +#endif + +#ifndef set_numa_node +static inline void set_numa_node(int node) +{ + percpu_write(numa_node, node); +} +#endif + +#ifndef set_cpu_numa_node +static inline void set_cpu_numa_node(int cpu, int node) +{ + per_cpu(numa_node, cpu) = node; +} +#endif + +#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */ + +/* Returns the number of the current Node. */ +#ifndef numa_node_id +static inline int numa_node_id(void) +{ + return cpu_to_node(raw_smp_processor_id()); +} +#endif + +#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */ + +#ifdef CONFIG_HAVE_MEMORYLESS_NODES + +/* + * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. + * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. + * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). + */ +DECLARE_PER_CPU(int, _numa_mem_); + +#ifndef set_numa_mem +static inline void set_numa_mem(int node) +{ + percpu_write(_numa_mem_, node); +} +#endif + +#ifndef numa_mem_id +/* Returns the number of the nearest Node with memory */ +static inline int numa_mem_id(void) +{ + return __this_cpu_read(_numa_mem_); +} +#endif + +#ifndef cpu_to_mem +static inline int cpu_to_mem(int cpu) +{ + return per_cpu(_numa_mem_, cpu); +} +#endif + +#ifndef set_cpu_numa_mem +static inline void set_cpu_numa_mem(int cpu, int node) +{ + per_cpu(_numa_mem_, cpu) = node; +} +#endif + +#else /* !CONFIG_HAVE_MEMORYLESS_NODES */ + +#ifndef numa_mem_id +/* Returns the number of the nearest Node with memory */ +static inline int numa_mem_id(void) +{ + return numa_node_id(); +} +#endif + +#ifndef cpu_to_mem +static inline int cpu_to_mem(int cpu) +{ + return cpu_to_node(cpu); +} +#endif + +#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */ + +#ifndef topology_physical_package_id +#define topology_physical_package_id(cpu) ((void)(cpu), -1) +#endif +#ifndef topology_core_id +#define topology_core_id(cpu) ((void)(cpu), 0) +#endif +#ifndef topology_thread_cpumask +#define topology_thread_cpumask(cpu) cpumask_of(cpu) +#endif +#ifndef topology_core_cpumask +#define topology_core_cpumask(cpu) cpumask_of(cpu) +#endif + +#endif /* _LINUX_TOPOLOGY_H */ diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h new file mode 100644 index 00000000..772dedbc --- /dev/null +++ b/include/linux/toshiba.h @@ -0,0 +1,40 @@ +/* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops + * + * Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk) + * + * Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers + * on making sure the structure is aligned and packed. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#ifndef _LINUX_TOSHIBA_H +#define _LINUX_TOSHIBA_H + +#define TOSH_PROC "/proc/toshiba" +#define TOSH_DEVICE "/dev/toshiba" +#define TOSH_SMM _IOWR('t', 0x90, int) /* broken: meant 24 bytes */ + +typedef struct { + unsigned int eax; + unsigned int ebx __attribute__ ((packed)); + unsigned int ecx __attribute__ ((packed)); + unsigned int edx __attribute__ ((packed)); + unsigned int esi __attribute__ ((packed)); + unsigned int edi __attribute__ ((packed)); +} SMMRegisters; + +#ifdef __KERNEL__ +int tosh_smm(SMMRegisters *regs); +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/tpm.h b/include/linux/tpm.h new file mode 100644 index 00000000..fdc718ab --- /dev/null +++ b/include/linux/tpm.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2004,2007,2008 IBM Corporation + * + * Authors: + * Leendert van Doorn <leendert@watson.ibm.com> + * Dave Safford <safford@watson.ibm.com> + * Reiner Sailer <sailer@watson.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * Debora Velarde <dvelarde@us.ibm.com> + * + * Maintained by: <tpmdd_devel@lists.sourceforge.net> + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#ifndef __LINUX_TPM_H__ +#define __LINUX_TPM_H__ + +/* + * Chip num is this value or a valid tpm idx + */ +#define TPM_ANY_NUM 0xFFFF + +#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) + +extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); +extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); +extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); +#else +static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { + return -ENODEV; +} +static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { + return -ENODEV; +} +static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { + return -ENODEV; +} +#endif +#endif diff --git a/include/linux/tpm_command.h b/include/linux/tpm_command.h new file mode 100644 index 00000000..727512e2 --- /dev/null +++ b/include/linux/tpm_command.h @@ -0,0 +1,28 @@ +#ifndef __LINUX_TPM_COMMAND_H__ +#define __LINUX_TPM_COMMAND_H__ + +/* + * TPM Command constants from specifications at + * http://www.trustedcomputinggroup.org + */ + +/* Command TAGS */ +#define TPM_TAG_RQU_COMMAND 193 +#define TPM_TAG_RQU_AUTH1_COMMAND 194 +#define TPM_TAG_RQU_AUTH2_COMMAND 195 +#define TPM_TAG_RSP_COMMAND 196 +#define TPM_TAG_RSP_AUTH1_COMMAND 197 +#define TPM_TAG_RSP_AUTH2_COMMAND 198 + +/* Command Ordinals */ +#define TPM_ORD_GETRANDOM 70 +#define TPM_ORD_OSAP 11 +#define TPM_ORD_OIAP 10 +#define TPM_ORD_SEAL 23 +#define TPM_ORD_UNSEAL 24 + +/* Other constants */ +#define SRKHANDLE 0x40000000 +#define TPM_NONCE_SIZE 20 + +#endif diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h new file mode 100644 index 00000000..4eb49023 --- /dev/null +++ b/include/linux/trace_clock.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_TRACE_CLOCK_H +#define _LINUX_TRACE_CLOCK_H + +/* + * 3 trace clock variants, with differing scalability/precision + * tradeoffs: + * + * - local: CPU-local trace clock + * - medium: scalable global clock with some jitter + * - global: globally monotonic, serialized clock + */ +#include <linux/compiler.h> +#include <linux/types.h> + +extern u64 notrace trace_clock_local(void); +extern u64 notrace trace_clock(void); +extern u64 notrace trace_clock_global(void); +extern u64 notrace trace_clock_counter(void); + +#endif /* _LINUX_TRACE_CLOCK_H */ diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h new file mode 100644 index 00000000..a32d86ec --- /dev/null +++ b/include/linux/trace_seq.h @@ -0,0 +1,97 @@ +#ifndef _LINUX_TRACE_SEQ_H +#define _LINUX_TRACE_SEQ_H + +#include <linux/fs.h> + +#include <asm/page.h> + +/* + * Trace sequences are used to allow a function to call several other functions + * to create a string of data to use (up to a max of PAGE_SIZE). + */ + +struct trace_seq { + unsigned char buffer[PAGE_SIZE]; + unsigned int len; + unsigned int readpos; + int full; +}; + +static inline void +trace_seq_init(struct trace_seq *s) +{ + s->len = 0; + s->readpos = 0; + s->full = 0; +} + +/* + * Currently only defined when tracing is enabled. + */ +#ifdef CONFIG_TRACING +extern __printf(2, 3) +int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); +extern __printf(2, 0) +int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); +extern int +trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); +extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); +extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + size_t cnt); +extern int trace_seq_puts(struct trace_seq *s, const char *str); +extern int trace_seq_putc(struct trace_seq *s, unsigned char c); +extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); +extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, + size_t len); +extern void *trace_seq_reserve(struct trace_seq *s, size_t len); +extern int trace_seq_path(struct trace_seq *s, const struct path *path); + +#else /* CONFIG_TRACING */ +static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +{ + return 0; +} +static inline int +trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) +{ + return 0; +} + +static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) +{ + return 0; +} +static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + size_t cnt) +{ + return 0; +} +static inline int trace_seq_puts(struct trace_seq *s, const char *str) +{ + return 0; +} +static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) +{ + return 0; +} +static inline int +trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) +{ + return 0; +} +static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, + size_t len) +{ + return 0; +} +static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) +{ + return NULL; +} +static inline int trace_seq_path(struct trace_seq *s, const struct path *path) +{ + return 0; +} +#endif /* CONFIG_TRACING */ + +#endif /* _LINUX_TRACE_SEQ_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h new file mode 100644 index 00000000..51bd91d9 --- /dev/null +++ b/include/linux/tracehook.h @@ -0,0 +1,190 @@ +/* + * Tracing hooks + * + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file defines hook entry points called by core code where + * user tracing/debugging support might need to do something. These + * entry points are called tracehook_*(). Each hook declared below + * has a detailed kerneldoc comment giving the context (locking et + * al) from which it is called, and the meaning of its return value. + * + * Each function here typically has only one call site, so it is ok + * to have some nontrivial tracehook_*() inlines. In all cases, the + * fast path when no tracing is enabled should be very short. + * + * The purpose of this file and the tracehook_* layer is to consolidate + * the interface that the kernel core and arch code uses to enable any + * user debugging or tracing facility (such as ptrace). The interfaces + * here are carefully documented so that maintainers of core and arch + * code do not need to think about the implementation details of the + * tracing facilities. Likewise, maintainers of the tracing code do not + * need to understand all the calling core or arch code in detail, just + * documented circumstances of each call, such as locking conditions. + * + * If the calling core code changes so that locking is different, then + * it is ok to change the interface documented here. The maintainer of + * core code changing should notify the maintainers of the tracing code + * that they need to work out the change. + * + * Some tracehook_*() inlines take arguments that the current tracing + * implementations might not necessarily use. These function signatures + * are chosen to pass in all the information that is on hand in the + * caller and might conceivably be relevant to a tracer, so that the + * core code won't have to be updated when tracing adds more features. + * If a call site changes so that some of those parameters are no longer + * already on hand without extra work, then the tracehook_* interface + * can change so there is no make-work burden on the core code. The + * maintainer of core code changing should notify the maintainers of the + * tracing code that they need to work out the change. + */ + +#ifndef _LINUX_TRACEHOOK_H +#define _LINUX_TRACEHOOK_H 1 + +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <linux/security.h> +struct linux_binprm; + +/* + * ptrace report for syscall entry and exit looks identical. + */ +static inline int ptrace_report_syscall(struct pt_regs *regs) +{ + int ptrace = current->ptrace; + + if (!(ptrace & PT_PTRACED)) + return 0; + + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } + + return fatal_signal_pending(current); +} + +/** + * tracehook_report_syscall_entry - task is about to attempt a system call + * @regs: user register state of current task + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just entered the kernel for a system call. + * Full user register state is available here. Changing the values + * in @regs can affect the system call number and arguments to be tried. + * It is safe to block here, preventing the system call from beginning. + * + * Returns zero normally, or nonzero if the calling arch code should abort + * the system call. That must prevent normal entry so no system call is + * made. If @task ever returns to user mode after this, its register state + * is unspecified, but should be something harmless like an %ENOSYS error + * return. It should preserve enough information so that syscall_rollback() + * can work (see asm-generic/syscall.h). + * + * Called without locks, just after entering kernel mode. + */ +static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) +{ + return ptrace_report_syscall(regs); +} + +/** + * tracehook_report_syscall_exit - task has just finished a system call + * @regs: user register state of current task + * @step: nonzero if simulating single-step or block-step + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just finished an attempted system call. Full + * user register state is available here. It is safe to block here, + * preventing signals from being processed. + * + * If @step is nonzero, this report is also in lieu of the normal + * trap that would follow the system call instruction because + * user_enable_block_step() or user_enable_single_step() was used. + * In this case, %TIF_SYSCALL_TRACE might not be set. + * + * Called without locks, just before checking for pending signals. + */ +static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) +{ + if (step) { + siginfo_t info; + user_single_step_siginfo(current, regs, &info); + force_sig_info(SIGTRAP, &info, current); + return; + } + + ptrace_report_syscall(regs); +} + +/** + * tracehook_signal_handler - signal handler setup is complete + * @sig: number of signal being delivered + * @info: siginfo_t of signal being delivered + * @ka: sigaction setting that chose the handler + * @regs: user register state + * @stepping: nonzero if debugger single-step or block-step in use + * + * Called by the arch code after a signal handler has been set up. + * Register and stack state reflects the user handler about to run. + * Signal mask changes have already been made. + * + * Called without locks, shortly before returning to user mode + * (or handling more signals). + */ +static inline void tracehook_signal_handler(int sig, siginfo_t *info, + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) +{ + if (stepping) + ptrace_notify(SIGTRAP); +} + +#ifdef TIF_NOTIFY_RESUME +/** + * set_notify_resume - cause tracehook_notify_resume() to be called + * @task: task that will call tracehook_notify_resume() + * + * Calling this arranges that @task will call tracehook_notify_resume() + * before returning to user mode. If it's already running in user mode, + * it will enter the kernel and call tracehook_notify_resume() soon. + * If it's blocked, it will not be woken. + */ +static inline void set_notify_resume(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) + kick_process(task); +} + +/** + * tracehook_notify_resume - report when about to return to user mode + * @regs: user-mode registers of @current task + * + * This is called when %TIF_NOTIFY_RESUME has been set. Now we are + * about to return to user mode, and the user state in @regs can be + * inspected or adjusted. The caller in arch code has cleared + * %TIF_NOTIFY_RESUME before the call. If the flag gets set again + * asynchronously, this will be called again before we return to + * user mode. + * + * Called without locks. + */ +static inline void tracehook_notify_resume(struct pt_regs *regs) +{ +} +#endif /* TIF_NOTIFY_RESUME */ + +#endif /* <linux/tracehook.h> */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h new file mode 100644 index 00000000..bd96ecd0 --- /dev/null +++ b/include/linux/tracepoint.h @@ -0,0 +1,392 @@ +#ifndef _LINUX_TRACEPOINT_H +#define _LINUX_TRACEPOINT_H + +/* + * Kernel Tracepoint API. + * + * See Documentation/trace/tracepoints.txt. + * + * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> + * + * Heavily inspired from the Linux Kernel Markers. + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/rcupdate.h> +#include <linux/static_key.h> + +struct module; +struct tracepoint; + +struct tracepoint_func { + void *func; + void *data; +}; + +struct tracepoint { + const char *name; /* Tracepoint name */ + struct static_key key; + void (*regfunc)(void); + void (*unregfunc)(void); + struct tracepoint_func __rcu *funcs; +}; + +/* + * Connect a probe to a tracepoint. + * Internal API, should not be used directly. + */ +extern int tracepoint_probe_register(const char *name, void *probe, void *data); + +/* + * Disconnect a probe from a tracepoint. + * Internal API, should not be used directly. + */ +extern int +tracepoint_probe_unregister(const char *name, void *probe, void *data); + +extern int tracepoint_probe_register_noupdate(const char *name, void *probe, + void *data); +extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, + void *data); +extern void tracepoint_probe_update_all(void); + +#ifdef CONFIG_MODULES +struct tp_module { + struct list_head list; + unsigned int num_tracepoints; + struct tracepoint * const *tracepoints_ptrs; +}; +#endif /* CONFIG_MODULES */ + +struct tracepoint_iter { +#ifdef CONFIG_MODULES + struct tp_module *module; +#endif /* CONFIG_MODULES */ + struct tracepoint * const *tracepoint; +}; + +extern void tracepoint_iter_start(struct tracepoint_iter *iter); +extern void tracepoint_iter_next(struct tracepoint_iter *iter); +extern void tracepoint_iter_stop(struct tracepoint_iter *iter); +extern void tracepoint_iter_reset(struct tracepoint_iter *iter); + +/* + * tracepoint_synchronize_unregister must be called between the last tracepoint + * probe unregistration and the end of module exit to make sure there is no + * caller executing a probe when it is freed. + */ +static inline void tracepoint_synchronize_unregister(void) +{ + synchronize_sched(); +} + +#define PARAMS(args...) args + +#endif /* _LINUX_TRACEPOINT_H */ + +/* + * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include + * file ifdef protection. + * This is due to the way trace events work. If a file includes two + * trace event headers under one "CREATE_TRACE_POINTS" the first include + * will override the TRACE_EVENT and break the second include. + */ + +#ifndef DECLARE_TRACE + +#define TP_PROTO(args...) args +#define TP_ARGS(args...) args +#define TP_CONDITION(args...) args + +#ifdef CONFIG_TRACEPOINTS + +/* + * it_func[0] is never NULL because there is at least one element in the array + * when the array itself is non NULL. + * + * Note, the proto and args passed in includes "__data" as the first parameter. + * The reason for this is to handle the "void" prototype. If a tracepoint + * has a "void" prototype, then it is invalid to declare a function + * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just + * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". + */ +#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ + do { \ + struct tracepoint_func *it_func_ptr; \ + void *it_func; \ + void *__data; \ + \ + if (!(cond)) \ + return; \ + prercu; \ + rcu_read_lock_sched_notrace(); \ + it_func_ptr = rcu_dereference_sched((tp)->funcs); \ + if (it_func_ptr) { \ + do { \ + it_func = (it_func_ptr)->func; \ + __data = (it_func_ptr)->data; \ + ((void(*)(proto))(it_func))(args); \ + } while ((++it_func_ptr)->func); \ + } \ + rcu_read_unlock_sched_notrace(); \ + postrcu; \ + } while (0) + +/* + * Make sure the alignment of the structure in the __tracepoints section will + * not add unwanted padding between the beginning of the section and the + * structure. Force alignment to the same alignment as the section start. + */ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ + extern struct tracepoint __tracepoint_##name; \ + static inline void trace_##name(proto) \ + { \ + if (static_key_false(&__tracepoint_##name.key)) \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond),,); \ + } \ + static inline void trace_##name##_rcuidle(proto) \ + { \ + if (static_branch(&__tracepoint_##name.key)) \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond), \ + rcu_idle_exit(), \ + rcu_idle_enter()); \ + } \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), void *data) \ + { \ + return tracepoint_probe_register(#name, (void *)probe, \ + data); \ + } \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), void *data) \ + { \ + return tracepoint_probe_unregister(#name, (void *)probe, \ + data); \ + } \ + static inline void \ + check_trace_callback_type_##name(void (*cb)(data_proto)) \ + { \ + } + +/* + * We have no guarantee that gcc and the linker won't up-align the tracepoint + * structures, so we create an array of pointers that will be used for iteration + * on the tracepoints. + */ +#define DEFINE_TRACE_FN(name, reg, unreg) \ + static const char __tpstrtab_##name[] \ + __attribute__((section("__tracepoints_strings"))) = #name; \ + struct tracepoint __tracepoint_##name \ + __attribute__((section("__tracepoints"))) = \ + { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ + static struct tracepoint * const __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ + &__tracepoint_##name; + +#define DEFINE_TRACE(name) \ + DEFINE_TRACE_FN(name, NULL, NULL); + +#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ + EXPORT_SYMBOL_GPL(__tracepoint_##name) +#define EXPORT_TRACEPOINT_SYMBOL(name) \ + EXPORT_SYMBOL(__tracepoint_##name) + +#else /* !CONFIG_TRACEPOINTS */ +#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ + static inline void trace_##name(proto) \ + { } \ + static inline void trace_##name##_rcuidle(proto) \ + { } \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), \ + void *data) \ + { \ + return -ENOSYS; \ + } \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), \ + void *data) \ + { \ + return -ENOSYS; \ + } \ + static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ + { \ + } + +#define DEFINE_TRACE_FN(name, reg, unreg) +#define DEFINE_TRACE(name) +#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) +#define EXPORT_TRACEPOINT_SYMBOL(name) + +#endif /* CONFIG_TRACEPOINTS */ + +/* + * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype + * (void). "void" is a special value in a function prototype and can + * not be combined with other arguments. Since the DECLARE_TRACE() + * macro adds a data element at the beginning of the prototype, + * we need a way to differentiate "(void *data, proto)" from + * "(void *data, void)". The second prototype is invalid. + * + * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype + * and "void *__data" as the callback prototype. + * + * DECLARE_TRACE() passes "proto" as the tracepoint protoype and + * "void *__data, proto" as the callback prototype. + */ +#define DECLARE_TRACE_NOARGS(name) \ + __DECLARE_TRACE(name, void, , 1, void *__data, __data) + +#define DECLARE_TRACE(name, proto, args) \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + +#define TRACE_EVENT_FLAGS(event, flag) + +#endif /* DECLARE_TRACE */ + +#ifndef TRACE_EVENT +/* + * For use with the TRACE_EVENT macro: + * + * We define a tracepoint, its arguments, its printk format + * and its 'fast binay record' layout. + * + * Firstly, name your tracepoint via TRACE_EVENT(name : the + * 'subsystem_event' notation is fine. + * + * Think about this whole construct as the + * 'trace_sched_switch() function' from now on. + * + * + * TRACE_EVENT(sched_switch, + * + * * + * * A function has a regular function arguments + * * prototype, declare it via TP_PROTO(): + * * + * + * TP_PROTO(struct rq *rq, struct task_struct *prev, + * struct task_struct *next), + * + * * + * * Define the call signature of the 'function'. + * * (Design sidenote: we use this instead of a + * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) + * * + * + * TP_ARGS(rq, prev, next), + * + * * + * * Fast binary tracing: define the trace record via + * * TP_STRUCT__entry(). You can think about it like a + * * regular C structure local variable definition. + * * + * * This is how the trace record is structured and will + * * be saved into the ring buffer. These are the fields + * * that will be exposed to user-space in + * * /sys/kernel/debug/tracing/events/<*>/format. + * * + * * The declared 'local variable' is called '__entry' + * * + * * __field(pid_t, prev_prid) is equivalent to a standard declariton: + * * + * * pid_t prev_pid; + * * + * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: + * * + * * char prev_comm[TASK_COMM_LEN]; + * * + * + * TP_STRUCT__entry( + * __array( char, prev_comm, TASK_COMM_LEN ) + * __field( pid_t, prev_pid ) + * __field( int, prev_prio ) + * __array( char, next_comm, TASK_COMM_LEN ) + * __field( pid_t, next_pid ) + * __field( int, next_prio ) + * ), + * + * * + * * Assign the entry into the trace record, by embedding + * * a full C statement block into TP_fast_assign(). You + * * can refer to the trace record as '__entry' - + * * otherwise you can put arbitrary C code in here. + * * + * * Note: this C code will execute every time a trace event + * * happens, on an active tracepoint. + * * + * + * TP_fast_assign( + * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); + * __entry->prev_pid = prev->pid; + * __entry->prev_prio = prev->prio; + * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); + * __entry->next_pid = next->pid; + * __entry->next_prio = next->prio; + * ), + * + * * + * * Formatted output of a trace record via TP_printk(). + * * This is how the tracepoint will appear under ftrace + * * plugins that make use of this tracepoint. + * * + * * (raw-binary tracing wont actually perform this step.) + * * + * + * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", + * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, + * __entry->next_comm, __entry->next_pid, __entry->next_prio), + * + * ); + * + * This macro construct is thus used for the regular printk format + * tracing setup, it is used to construct a function pointer based + * tracepoint callback (this is used by programmatic plugins and + * can also by used by generic instrumentation like SystemTap), and + * it is also used to expose a structured trace record in + * /sys/kernel/debug/tracing/events/. + * + * A set of (un)registration functions can be passed to the variant + * TRACE_EVENT_FN to perform any (un)registration work. + */ + +#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) +#define DEFINE_EVENT(template, name, proto, args) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define DEFINE_EVENT_CONDITION(template, name, proto, \ + args, cond) \ + DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ + PARAMS(args), PARAMS(cond)) + +#define TRACE_EVENT(name, proto, args, struct, assign, print) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define TRACE_EVENT_FN(name, proto, args, struct, \ + assign, print, reg, unreg) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define TRACE_EVENT_CONDITION(name, proto, args, cond, \ + struct, assign, print) \ + DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ + PARAMS(args), PARAMS(cond)) + +#define TRACE_EVENT_FLAGS(event, flag) + +#endif /* ifdef TRACE_EVENT (see note above) */ diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h new file mode 100644 index 00000000..11087cdd --- /dev/null +++ b/include/linux/transport_class.h @@ -0,0 +1,102 @@ +/* + * transport_class.h - a generic container for all transport classes + * + * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> + * + * This file is licensed under GPLv2 + */ + +#ifndef _TRANSPORT_CLASS_H_ +#define _TRANSPORT_CLASS_H_ + +#include <linux/device.h> +#include <linux/bug.h> +#include <linux/attribute_container.h> + +struct transport_container; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, + struct device *); + int (*configure)(struct transport_container *, struct device *, + struct device *); + int (*remove)(struct transport_container *, struct device *, + struct device *); +}; + +#define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \ +struct transport_class cls = { \ + .class = { \ + .name = nm, \ + }, \ + .setup = su, \ + .remove = rm, \ + .configure = cfg, \ +} + + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +#define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \ +struct anon_transport_class cls = { \ + .tclass = { \ + .configure = cfg, \ + }, \ + . container = { \ + .match = mtch, \ + }, \ +} + +#define class_to_transport_class(x) \ + container_of(x, struct transport_class, class) + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +#define attribute_container_to_transport_container(x) \ + container_of(x, struct transport_container, ac) + +void transport_remove_device(struct device *); +void transport_add_device(struct device *); +void transport_setup_device(struct device *); +void transport_configure_device(struct device *); +void transport_destroy_device(struct device *); + +static inline void +transport_register_device(struct device *dev) +{ + transport_setup_device(dev); + transport_add_device(dev); +} + +static inline void +transport_unregister_device(struct device *dev) +{ + transport_remove_device(dev); + transport_destroy_device(dev); +} + +static inline int transport_container_register(struct transport_container *tc) +{ + return attribute_container_register(&tc->ac); +} + +static inline void transport_container_unregister(struct transport_container *tc) +{ + if (unlikely(attribute_container_unregister(&tc->ac))) + BUG(); +} + +int transport_class_register(struct transport_class *); +int anon_transport_class_register(struct anon_transport_class *); +void transport_class_unregister(struct transport_class *); +void anon_transport_class_unregister(struct anon_transport_class *); + + +#endif diff --git a/include/linux/trdevice.h b/include/linux/trdevice.h new file mode 100644 index 00000000..bfc84a7a --- /dev/null +++ b/include/linux/trdevice.h @@ -0,0 +1,37 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Token-ring handlers. + * + * Version: @(#)eth.h 1.0.4 05/13/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * Relocated to include/linux where it belongs by Alan Cox + * <gw4pts@gw4pts.ampr.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * WARNING: This move may well be temporary. This file will get merged with others RSN. + * + */ +#ifndef _LINUX_TRDEVICE_H +#define _LINUX_TRDEVICE_H + + +#include <linux/if_tr.h> + +#ifdef __KERNEL__ +extern __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev); +extern void tr_source_route(struct sk_buff *skb, struct trh_hdr *trh, struct net_device *dev); +extern struct net_device *alloc_trdev(int sizeof_priv); + +#endif + +#endif /* _LINUX_TRDEVICE_H */ diff --git a/include/linux/tsacct_kern.h b/include/linux/tsacct_kern.h new file mode 100644 index 00000000..7e50ac79 --- /dev/null +++ b/include/linux/tsacct_kern.h @@ -0,0 +1,34 @@ +/* + * tsacct_kern.h - kernel header for system accounting over taskstats interface + * + * Copyright (C) Jay Lan SGI + */ + +#ifndef _LINUX_TSACCT_KERN_H +#define _LINUX_TSACCT_KERN_H + +#include <linux/taskstats.h> + +#ifdef CONFIG_TASKSTATS +extern void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk); +#else +static inline void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) +{} +#endif /* CONFIG_TASKSTATS */ + +#ifdef CONFIG_TASK_XACCT +extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p); +extern void acct_update_integrals(struct task_struct *tsk); +extern void acct_clear_integrals(struct task_struct *tsk); +#else +static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) +{} +static inline void acct_update_integrals(struct task_struct *tsk) +{} +static inline void acct_clear_integrals(struct task_struct *tsk) +{} +#endif /* CONFIG_TASK_XACCT */ + +#endif + + diff --git a/include/linux/tty.h b/include/linux/tty.h new file mode 100644 index 00000000..9f47ab54 --- /dev/null +++ b/include/linux/tty.h @@ -0,0 +1,672 @@ +#ifndef _LINUX_TTY_H +#define _LINUX_TTY_H + +/* + * 'tty.h' defines some structures used by tty_io.c and some defines. + */ + +#define NR_LDISCS 30 + +/* line disciplines */ +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ +#define N_IRDA 11 /* Linux IrDa - http://irda.sourceforge.net/ */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data */ + /* cards about SMS messages */ +#define N_HDLC 13 /* synchronous HDLC */ +#define N_SYNC_PPP 14 /* synchronous PPP */ +#define N_HCI 15 /* Bluetooth HCI UART */ +#define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */ +#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ +#define N_PPS 18 /* Pulse per Second */ +#define N_V253 19 /* Codec control over voice modem */ +#define N_CAIF 20 /* CAIF protocol for talking to modems */ +#define N_GSM0710 21 /* GSM 0710 Mux */ +#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ +#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */ +#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */ + +#ifdef __KERNEL__ +#include <linux/fs.h> +#include <linux/major.h> +#include <linux/termios.h> +#include <linux/workqueue.h> +#include <linux/tty_driver.h> +#include <linux/tty_ldisc.h> +#include <linux/mutex.h> + + + +/* + * (Note: the *_driver.minor_start values 1, 64, 128, 192 are + * hardcoded at present.) + */ +#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ +#define NR_UNIX98_PTY_RESERVE 1024 /* Default reserve for main devpts */ +#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ + +/* + * This character is the same as _POSIX_VDISABLE: it cannot be used as + * a c_cc[] character, but indicates that a particular special character + * isn't in use (eg VINTR has no character etc) + */ +#define __DISABLED_CHAR '\0' + +struct tty_buffer { + struct tty_buffer *next; + char *char_buf_ptr; + unsigned char *flag_buf_ptr; + int used; + int size; + int commit; + int read; + /* Data points here */ + unsigned long data[0]; +}; + +/* + * We default to dicing tty buffer allocations to this many characters + * in order to avoid multiple page allocations. We know the size of + * tty_buffer itself but it must also be taken into account that the + * the buffer is 256 byte aligned. See tty_buffer_find for the allocation + * logic this must match + */ + +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) + + +struct tty_bufhead { + struct work_struct work; + spinlock_t lock; + struct tty_buffer *head; /* Queue head */ + struct tty_buffer *tail; /* Active buffer */ + struct tty_buffer *free; /* Free queue head */ + int memory_used; /* Buffer space used excluding + free queue */ +}; +/* + * When a break, frame error, or parity error happens, these codes are + * stuffed into the flags buffer. + */ +#define TTY_NORMAL 0 +#define TTY_BREAK 1 +#define TTY_FRAME 2 +#define TTY_PARITY 3 +#define TTY_OVERRUN 4 + +#define INTR_CHAR(tty) ((tty)->termios->c_cc[VINTR]) +#define QUIT_CHAR(tty) ((tty)->termios->c_cc[VQUIT]) +#define ERASE_CHAR(tty) ((tty)->termios->c_cc[VERASE]) +#define KILL_CHAR(tty) ((tty)->termios->c_cc[VKILL]) +#define EOF_CHAR(tty) ((tty)->termios->c_cc[VEOF]) +#define TIME_CHAR(tty) ((tty)->termios->c_cc[VTIME]) +#define MIN_CHAR(tty) ((tty)->termios->c_cc[VMIN]) +#define SWTC_CHAR(tty) ((tty)->termios->c_cc[VSWTC]) +#define START_CHAR(tty) ((tty)->termios->c_cc[VSTART]) +#define STOP_CHAR(tty) ((tty)->termios->c_cc[VSTOP]) +#define SUSP_CHAR(tty) ((tty)->termios->c_cc[VSUSP]) +#define EOL_CHAR(tty) ((tty)->termios->c_cc[VEOL]) +#define REPRINT_CHAR(tty) ((tty)->termios->c_cc[VREPRINT]) +#define DISCARD_CHAR(tty) ((tty)->termios->c_cc[VDISCARD]) +#define WERASE_CHAR(tty) ((tty)->termios->c_cc[VWERASE]) +#define LNEXT_CHAR(tty) ((tty)->termios->c_cc[VLNEXT]) +#define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2]) + +#define _I_FLAG(tty, f) ((tty)->termios->c_iflag & (f)) +#define _O_FLAG(tty, f) ((tty)->termios->c_oflag & (f)) +#define _C_FLAG(tty, f) ((tty)->termios->c_cflag & (f)) +#define _L_FLAG(tty, f) ((tty)->termios->c_lflag & (f)) + +#define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK) +#define I_BRKINT(tty) _I_FLAG((tty), BRKINT) +#define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR) +#define I_PARMRK(tty) _I_FLAG((tty), PARMRK) +#define I_INPCK(tty) _I_FLAG((tty), INPCK) +#define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP) +#define I_INLCR(tty) _I_FLAG((tty), INLCR) +#define I_IGNCR(tty) _I_FLAG((tty), IGNCR) +#define I_ICRNL(tty) _I_FLAG((tty), ICRNL) +#define I_IUCLC(tty) _I_FLAG((tty), IUCLC) +#define I_IXON(tty) _I_FLAG((tty), IXON) +#define I_IXANY(tty) _I_FLAG((tty), IXANY) +#define I_IXOFF(tty) _I_FLAG((tty), IXOFF) +#define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL) +#define I_IUTF8(tty) _I_FLAG((tty), IUTF8) + +#define O_OPOST(tty) _O_FLAG((tty), OPOST) +#define O_OLCUC(tty) _O_FLAG((tty), OLCUC) +#define O_ONLCR(tty) _O_FLAG((tty), ONLCR) +#define O_OCRNL(tty) _O_FLAG((tty), OCRNL) +#define O_ONOCR(tty) _O_FLAG((tty), ONOCR) +#define O_ONLRET(tty) _O_FLAG((tty), ONLRET) +#define O_OFILL(tty) _O_FLAG((tty), OFILL) +#define O_OFDEL(tty) _O_FLAG((tty), OFDEL) +#define O_NLDLY(tty) _O_FLAG((tty), NLDLY) +#define O_CRDLY(tty) _O_FLAG((tty), CRDLY) +#define O_TABDLY(tty) _O_FLAG((tty), TABDLY) +#define O_BSDLY(tty) _O_FLAG((tty), BSDLY) +#define O_VTDLY(tty) _O_FLAG((tty), VTDLY) +#define O_FFDLY(tty) _O_FLAG((tty), FFDLY) + +#define C_BAUD(tty) _C_FLAG((tty), CBAUD) +#define C_CSIZE(tty) _C_FLAG((tty), CSIZE) +#define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB) +#define C_CREAD(tty) _C_FLAG((tty), CREAD) +#define C_PARENB(tty) _C_FLAG((tty), PARENB) +#define C_PARODD(tty) _C_FLAG((tty), PARODD) +#define C_HUPCL(tty) _C_FLAG((tty), HUPCL) +#define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL) +#define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD) +#define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS) + +#define L_ISIG(tty) _L_FLAG((tty), ISIG) +#define L_ICANON(tty) _L_FLAG((tty), ICANON) +#define L_XCASE(tty) _L_FLAG((tty), XCASE) +#define L_ECHO(tty) _L_FLAG((tty), ECHO) +#define L_ECHOE(tty) _L_FLAG((tty), ECHOE) +#define L_ECHOK(tty) _L_FLAG((tty), ECHOK) +#define L_ECHONL(tty) _L_FLAG((tty), ECHONL) +#define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH) +#define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP) +#define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL) +#define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT) +#define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE) +#define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) +#define L_PENDIN(tty) _L_FLAG((tty), PENDIN) +#define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) +#define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC) + +struct device; +struct signal_struct; + +/* + * Port level information. Each device keeps its own port level information + * so provide a common structure for those ports wanting to use common support + * routines. + * + * The tty port has a different lifetime to the tty so must be kept apart. + * In addition be careful as tty -> port mappings are valid for the life + * of the tty object but in many cases port -> tty mappings are valid only + * until a hangup so don't use the wrong path. + */ + +struct tty_port; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + /* Control the DTR line */ + void (*dtr_rts)(struct tty_port *port, int raise); + /* Called when the last close completes or a hangup finishes + IFF the port was initialized. Do not use to free resources. Called + under the port mutex to serialize against activate/shutdowns */ + void (*shutdown)(struct tty_port *port); + void (*drop)(struct tty_port *port); + /* Called under the port mutex from tty_port_open, serialized using + the port mutex */ + /* FIXME: long term getting the tty argument *out* of this would be + good for consoles */ + int (*activate)(struct tty_port *port, struct tty_struct *tty); + /* Called on the final put of a port */ + void (*destruct)(struct tty_port *port); +}; + +struct tty_port { + struct tty_struct *tty; /* Back pointer */ + const struct tty_port_operations *ops; /* Port operations */ + spinlock_t lock; /* Lock protecting tty field */ + int blocked_open; /* Waiting to open */ + int count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t close_wait; /* Close waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ + unsigned long flags; /* TTY flags ASY_*/ + unsigned char console:1; /* port is a console */ + struct mutex mutex; /* Locking */ + struct mutex buf_mutex; /* Buffer alloc lock */ + unsigned char *xmit_buf; /* Optional buffer */ + unsigned int close_delay; /* Close port delay */ + unsigned int closing_wait; /* Delay for output */ + int drain_delay; /* Set to zero if no pure time + based drain is needed else + set to size of fifo */ + struct kref kref; /* Ref counter */ +}; + +/* + * Where all of the state associated with a tty is kept while the tty + * is open. Since the termios state should be kept even if the tty + * has been closed --- for things like the baud rate, etc --- it is + * not stored here, but rather a pointer to the real state is stored + * here. Possible the winsize structure should have the same + * treatment, but (1) the default 80x24 is usually right and (2) it's + * most often used by a windowing system, which will set the correct + * size each time the window is created or resized anyway. + * - TYT, 9/14/92 + */ + +struct tty_operations; + +struct tty_struct { + int magic; + struct kref kref; + struct device *dev; + struct tty_driver *driver; + const struct tty_operations *ops; + int index; + + /* Protects ldisc changes: Lock tty not pty */ + struct mutex ldisc_mutex; + struct tty_ldisc *ldisc; + + struct mutex termios_mutex; + spinlock_t ctrl_lock; + /* Termios values are protected by the termios mutex */ + struct ktermios *termios, *termios_locked; + struct termiox *termiox; /* May be NULL for unsupported */ + char name[64]; + struct pid *pgrp; /* Protected by ctrl lock */ + struct pid *session; + unsigned long flags; + int count; + struct winsize winsize; /* termios mutex */ + unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; + unsigned char low_latency:1, warned:1; + unsigned char ctrl_status; /* ctrl_lock */ + unsigned int receive_room; /* Bytes free for queue */ + + struct tty_struct *link; + struct fasync_struct *fasync; + struct tty_bufhead buf; /* Locked internally */ + int alt_speed; /* For magic substitution of 38400 bps */ + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + struct list_head tty_files; + +#define N_TTY_BUF_SIZE 4096 + + /* + * The following is data for the N_TTY line discipline. For + * historical reasons, this is included in the tty structure. + * Mostly locked by the BKL. + */ + unsigned int column; + unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; + unsigned char closing:1; + unsigned char echo_overrun:1; + unsigned short minimum_to_wake; + unsigned long overrun_time; + int num_overrun; + unsigned long process_char_map[256/(8*sizeof(unsigned long))]; + char *read_buf; + int read_head; + int read_tail; + int read_cnt; + unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))]; + unsigned char *echo_buf; + unsigned int echo_pos; + unsigned int echo_cnt; + int canon_data; + unsigned long canon_head; + unsigned int canon_column; + struct mutex atomic_read_lock; + struct mutex atomic_write_lock; + struct mutex output_lock; + struct mutex echo_lock; + unsigned char *write_buf; + int write_cnt; + spinlock_t read_lock; + /* If the tty has a pending do_SAK, queue it here - akpm */ + struct work_struct SAK_work; + struct tty_port *port; +}; + +/* Each of a tty's open files has private_data pointing to tty_file_private */ +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +/* tty magic number */ +#define TTY_MAGIC 0x5401 + +/* + * These bits are used in the flags field of the tty structure. + * + * So that interrupts won't be able to mess up the queues, + * copy_to_cooked must be atomic with respect to itself, as must + * tty->write. Thus, you must use the inline functions set_bit() and + * clear_bit() to make things atomic. + */ +#define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */ +#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */ +#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ +#define TTY_EXCLUSIVE 3 /* Exclusive open mode */ +#define TTY_DEBUG 4 /* Debugging */ +#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ +#define TTY_PUSH 6 /* n_tty private */ +#define TTY_CLOSING 7 /* ->close() in progress */ +#define TTY_LDISC 9 /* Line discipline attached */ +#define TTY_LDISC_CHANGING 10 /* Line discipline changing */ +#define TTY_LDISC_OPEN 11 /* Line discipline is open */ +#define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ +#define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ +#define TTY_PTY_LOCK 16 /* pty private */ +#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ +#define TTY_HUPPED 18 /* Post driver->hangup() */ +#define TTY_FLUSHING 19 /* Flushing to ldisc in progress */ +#define TTY_FLUSHPENDING 20 /* Queued buffer flush pending */ +#define TTY_HUPPING 21 /* ->hangup() in progress */ + +#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) + +extern void tty_write_flush(struct tty_struct *); + +extern struct ktermios tty_std_termios; + +extern void console_init(void); +extern int vcs_init(void); + +extern struct class *tty_class; + +/** + * tty_kref_get - get a tty reference + * @tty: tty device + * + * Return a new reference to a tty object. The caller must hold + * sufficient locks/counts to ensure that their existing reference cannot + * go away + */ + +static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) +{ + if (tty) + kref_get(&tty->kref); + return tty; +} +extern void tty_kref_put(struct tty_struct *tty); + +extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, + const char *routine); +extern char *tty_name(struct tty_struct *tty, char *buf); +extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); +extern int tty_check_change(struct tty_struct *tty); +extern void stop_tty(struct tty_struct *tty); +extern void start_tty(struct tty_struct *tty); +extern int tty_register_driver(struct tty_driver *driver); +extern int tty_unregister_driver(struct tty_driver *driver); +extern struct device *tty_register_device(struct tty_driver *driver, + unsigned index, struct device *dev); +extern void tty_unregister_device(struct tty_driver *driver, unsigned index); +extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, + int buflen); +extern void tty_write_message(struct tty_struct *tty, char *msg); +extern int tty_put_char(struct tty_struct *tty, unsigned char c); +extern int tty_chars_in_buffer(struct tty_struct *tty); +extern int tty_write_room(struct tty_struct *tty); +extern void tty_driver_flush_buffer(struct tty_struct *tty); +extern void tty_throttle(struct tty_struct *tty); +extern void tty_unthrottle(struct tty_struct *tty); +extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); +extern void tty_driver_remove_tty(struct tty_driver *driver, + struct tty_struct *tty); +extern void tty_shutdown(struct tty_struct *tty); +extern void tty_free_termios(struct tty_struct *tty); +extern int is_current_pgrp_orphaned(void); +extern struct pid *tty_get_pgrp(struct tty_struct *tty); +extern int is_ignored(int sig); +extern int tty_signal(int sig, struct tty_struct *tty); +extern void tty_hangup(struct tty_struct *tty); +extern void tty_vhangup(struct tty_struct *tty); +extern void tty_vhangup_locked(struct tty_struct *tty); +extern void tty_vhangup_self(void); +extern void tty_unhangup(struct file *filp); +extern int tty_hung_up_p(struct file *filp); +extern void do_SAK(struct tty_struct *tty); +extern void __do_SAK(struct tty_struct *tty); +extern void disassociate_ctty(int priv); +extern void no_tty(void); +extern void tty_flip_buffer_push(struct tty_struct *tty); +extern void tty_flush_to_ldisc(struct tty_struct *tty); +extern void tty_buffer_free_all(struct tty_struct *tty); +extern void tty_buffer_flush(struct tty_struct *tty); +extern void tty_buffer_init(struct tty_struct *tty); +extern speed_t tty_get_baud_rate(struct tty_struct *tty); +extern speed_t tty_termios_baud_rate(struct ktermios *termios); +extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); +extern void tty_termios_encode_baud_rate(struct ktermios *termios, + speed_t ibaud, speed_t obaud); +extern void tty_encode_baud_rate(struct tty_struct *tty, + speed_t ibaud, speed_t obaud); +extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); +extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); +extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); + +extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); +extern void tty_ldisc_deref(struct tty_ldisc *); +extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); +extern void tty_ldisc_hangup(struct tty_struct *tty); +extern const struct file_operations tty_ldiscs_proc_fops; + +extern void tty_wakeup(struct tty_struct *tty); +extern void tty_ldisc_flush(struct tty_struct *tty); + +extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); +extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); +extern dev_t tty_devnum(struct tty_struct *tty); +extern void proc_clear_tty(struct task_struct *p); +extern struct tty_struct *get_current_tty(void); +extern void tty_default_fops(struct file_operations *fops); +extern struct tty_struct *alloc_tty_struct(void); +extern int tty_alloc_file(struct file *file); +extern void tty_add_file(struct tty_struct *tty, struct file *file); +extern void tty_free_file(struct file *file); +extern void free_tty_struct(struct tty_struct *tty); +extern void initialize_tty_struct(struct tty_struct *tty, + struct tty_driver *driver, int idx); +extern void deinitialize_tty_struct(struct tty_struct *tty); +extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); +extern int tty_release(struct inode *inode, struct file *filp); +extern int tty_init_termios(struct tty_struct *tty); +extern int tty_standard_install(struct tty_driver *driver, + struct tty_struct *tty); + +extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); +extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); + +extern struct mutex tty_mutex; +extern spinlock_t tty_files_lock; + +extern void tty_write_unlock(struct tty_struct *tty); +extern int tty_write_lock(struct tty_struct *tty, int ndelay); +#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) + +extern void tty_port_init(struct tty_port *port); +extern int tty_port_alloc_xmit_buf(struct tty_port *port); +extern void tty_port_free_xmit_buf(struct tty_port *port); +extern void tty_port_put(struct tty_port *port); + +static inline struct tty_port *tty_port_get(struct tty_port *port) +{ + if (port) + kref_get(&port->kref); + return port; +} + +extern struct tty_struct *tty_port_tty_get(struct tty_port *port); +extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +extern int tty_port_carrier_raised(struct tty_port *port); +extern void tty_port_raise_dtr_rts(struct tty_port *port); +extern void tty_port_lower_dtr_rts(struct tty_port *port); +extern void tty_port_hangup(struct tty_port *port); +extern int tty_port_block_til_ready(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern int tty_port_close_start(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); +extern void tty_port_close(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern int tty_port_open(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +static inline int tty_port_users(struct tty_port *port) +{ + return port->count + port->blocked_open; +} + +extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); +extern int tty_unregister_ldisc(int disc); +extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); +extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); +extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); +extern void tty_ldisc_init(struct tty_struct *tty); +extern void tty_ldisc_deinit(struct tty_struct *tty); +extern void tty_ldisc_begin(void); +/* This last one is just for the tty layer internals and shouldn't be used elsewhere */ +extern void tty_ldisc_enable(struct tty_struct *tty); + + +/* n_tty.c */ +extern struct tty_ldisc_ops tty_ldisc_N_TTY; +extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); + +/* tty_audit.c */ +#ifdef CONFIG_AUDIT +extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, + size_t size); +extern void tty_audit_exit(void); +extern void tty_audit_fork(struct signal_struct *sig); +extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); +extern void tty_audit_push(struct tty_struct *tty); +extern int tty_audit_push_task(struct task_struct *tsk, + uid_t loginuid, u32 sessionid); +#else +static inline void tty_audit_add_data(struct tty_struct *tty, + unsigned char *data, size_t size) +{ +} +static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) +{ +} +static inline void tty_audit_exit(void) +{ +} +static inline void tty_audit_fork(struct signal_struct *sig) +{ +} +static inline void tty_audit_push(struct tty_struct *tty) +{ +} +static inline int tty_audit_push_task(struct task_struct *tsk, + uid_t loginuid, u32 sessionid) +{ + return 0; +} +#endif + +/* tty_io.c */ +extern int __init tty_init(void); + +/* tty_ioctl.c */ +extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); +extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg); + +/* serial.c */ + +extern void serial_console_init(void); + +/* pcxx.c */ + +extern int pcxe_open(struct tty_struct *tty, struct file *filp); + +/* vt.c */ + +extern int vt_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg); + +extern long vt_compat_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg); + +/* tty_mutex.c */ +/* functions for preparation of BKL removal */ +extern void __lockfunc tty_lock(void) __acquires(tty_lock); +extern void __lockfunc tty_unlock(void) __releases(tty_lock); + +/* + * this shall be called only from where BTM is held (like close) + * + * We need this to ensure nobody waits for us to finish while we are waiting. + * Without this we were encountering system stalls. + * + * This should be indeed removed with BTM removal later. + * + * Locking: BTM required. Nobody is allowed to hold port->mutex. + */ +static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, + long timeout) +{ + tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */ + tty_wait_until_sent(tty, timeout); + tty_lock(); +} + +/* + * wait_event_interruptible_tty -- wait for a condition with the tty lock held + * + * The condition we are waiting for might take a long time to + * become true, or might depend on another thread taking the + * BTM. In either case, we need to drop the BTM to guarantee + * forward progress. This is a leftover from the conversion + * from the BKL and should eventually get removed as the BTM + * falls out of use. + * + * Do not use in new code. + */ +#define wait_event_interruptible_tty(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) { \ + __wait_event_interruptible_tty(wq, condition, __ret); \ + } \ + __ret; \ +}) + +#define __wait_event_interruptible_tty(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + tty_unlock(); \ + schedule(); \ + tty_lock(); \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + + +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h new file mode 100644 index 00000000..6e6dbb74 --- /dev/null +++ b/include/linux/tty_driver.h @@ -0,0 +1,412 @@ +#ifndef _LINUX_TTY_DRIVER_H +#define _LINUX_TTY_DRIVER_H + +/* + * This structure defines the interface between the low-level tty + * driver and the tty routines. The following routines can be + * defined; unless noted otherwise, they are optional, and can be + * filled in with a null pointer. + * + * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) + * + * Return the tty device corresponding to idx, NULL if there is not + * one currently in use and an ERR_PTR value on error. Called under + * tty_mutex (for now!) + * + * Optional method. Default behaviour is to use the ttys array + * + * int (*install)(struct tty_driver *self, struct tty_struct *tty) + * + * Install a new tty into the tty driver internal tables. Used in + * conjunction with lookup and remove methods. + * + * Optional method. Default behaviour is to use the ttys array + * + * void (*remove)(struct tty_driver *self, struct tty_struct *tty) + * + * Remove a closed tty from the tty driver internal tables. Used in + * conjunction with lookup and remove methods. + * + * Optional method. Default behaviour is to use the ttys array + * + * int (*open)(struct tty_struct * tty, struct file * filp); + * + * This routine is called when a particular tty device is opened. + * This routine is mandatory; if this routine is not filled in, + * the attempted open will fail with ENODEV. + * + * Required method. + * + * void (*close)(struct tty_struct * tty, struct file * filp); + * + * This routine is called when a particular tty device is closed. + * + * Required method. + * + * void (*shutdown)(struct tty_struct * tty); + * + * This routine is called synchronously when a particular tty device + * is closed for the last time freeing up the resources. + * Note that tty_shutdown() is not called if ops->shutdown is defined. + * This means one is responsible to take care of calling ops->remove (e.g. + * via tty_driver_remove_tty) and releasing tty->termios. + * Note that this hook may be called from *all* the contexts where one + * uses tty refcounting (e.g. tty_port_tty_get). + * + * + * void (*cleanup)(struct tty_struct * tty); + * + * This routine is called asynchronously when a particular tty device + * is closed for the last time freeing up the resources. This is + * actually the second part of shutdown for routines that might sleep. + * + * + * int (*write)(struct tty_struct * tty, + * const unsigned char *buf, int count); + * + * This routine is called by the kernel to write a series of + * characters to the tty device. The characters may come from + * user space or kernel space. This routine will return the + * number of characters actually accepted for writing. + * + * Optional: Required for writable devices. + * + * int (*put_char)(struct tty_struct *tty, unsigned char ch); + * + * This routine is called by the kernel to write a single + * character to the tty device. If the kernel uses this routine, + * it must call the flush_chars() routine (if defined) when it is + * done stuffing characters into the driver. If there is no room + * in the queue, the character is ignored. + * + * Optional: Kernel will use the write method if not provided. + * + * Note: Do not call this function directly, call tty_put_char + * + * void (*flush_chars)(struct tty_struct *tty); + * + * This routine is called by the kernel after it has written a + * series of characters to the tty device using put_char(). + * + * Optional: + * + * Note: Do not call this function directly, call tty_driver_flush_chars + * + * int (*write_room)(struct tty_struct *tty); + * + * This routine returns the numbers of characters the tty driver + * will accept for queuing to be written. This number is subject + * to change as output buffers get emptied, or if the output flow + * control is acted. + * + * Required if write method is provided else not needed. + * + * Note: Do not call this function directly, call tty_write_room + * + * int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); + * + * This routine allows the tty driver to implement + * device-specific ioctls. If the ioctl number passed in cmd + * is not recognized by the driver, it should return ENOIOCTLCMD. + * + * Optional + * + * long (*compat_ioctl)(struct tty_struct *tty,, + * unsigned int cmd, unsigned long arg); + * + * implement ioctl processing for 32 bit process on 64 bit system + * + * Optional + * + * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); + * + * This routine allows the tty driver to be notified when + * device's termios settings have changed. + * + * Optional: Called under the termios lock + * + * + * void (*set_ldisc)(struct tty_struct *tty); + * + * This routine allows the tty driver to be notified when the + * device's termios settings have changed. + * + * Optional: Called under BKL (currently) + * + * void (*throttle)(struct tty_struct * tty); + * + * This routine notifies the tty driver that input buffers for + * the line discipline are close to full, and it should somehow + * signal that no more characters should be sent to the tty. + * + * Optional: Always invoke via tty_throttle(), called under the + * termios lock. + * + * void (*unthrottle)(struct tty_struct * tty); + * + * This routine notifies the tty drivers that it should signals + * that characters can now be sent to the tty without fear of + * overrunning the input buffers of the line disciplines. + * + * Optional: Always invoke via tty_unthrottle(), called under the + * termios lock. + * + * void (*stop)(struct tty_struct *tty); + * + * This routine notifies the tty driver that it should stop + * outputting characters to the tty device. + * + * Optional: + * + * Note: Call stop_tty not this method. + * + * void (*start)(struct tty_struct *tty); + * + * This routine notifies the tty driver that it resume sending + * characters to the tty device. + * + * Optional: + * + * Note: Call start_tty not this method. + * + * void (*hangup)(struct tty_struct *tty); + * + * This routine notifies the tty driver that it should hang up the + * tty device. + * + * Optional: + * + * int (*break_ctl)(struct tty_struct *tty, int state); + * + * This optional routine requests the tty driver to turn on or + * off BREAK status on the RS-232 port. If state is -1, + * then the BREAK status should be turned on; if state is 0, then + * BREAK should be turned off. + * + * If this routine is implemented, the high-level tty driver will + * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK, + * TIOCCBRK. + * + * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface + * will also be called with actual times and the hardware is expected + * to do the delay work itself. 0 and -1 are still used for on/off. + * + * Optional: Required for TCSBRK/BRKP/etc handling. + * + * void (*wait_until_sent)(struct tty_struct *tty, int timeout); + * + * This routine waits until the device has written out all of the + * characters in its transmitter FIFO. + * + * Optional: If not provided the device is assumed to have no FIFO + * + * Note: Usually correct to call tty_wait_until_sent + * + * void (*send_xchar)(struct tty_struct *tty, char ch); + * + * This routine is used to send a high-priority XON/XOFF + * character to the device. + * + * Optional: If not provided then the write method is called under + * the atomic write lock to keep it serialized with the ldisc. + * + * int (*resize)(struct tty_struct *tty, struct winsize *ws) + * + * Called when a termios request is issued which changes the + * requested terminal geometry. + * + * Optional: the default action is to update the termios structure + * without error. This is usually the correct behaviour. Drivers should + * not force errors here if they are not resizable objects (eg a serial + * line). See tty_do_resize() if you need to wrap the standard method + * in your own logic - the usual case. + * + * void (*set_termiox)(struct tty_struct *tty, struct termiox *new); + * + * Called when the device receives a termiox based ioctl. Passes down + * the requested data from user space. This method will not be invoked + * unless the tty also has a valid tty->termiox pointer. + * + * Optional: Called under the termios lock + * + * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount); + * + * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel + * structure to complete. This method is optional and will only be called + * if provided (otherwise EINVAL will be returned). + */ + +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/cdev.h> +#include <linux/termios.h> + +struct tty_struct; +struct tty_driver; +struct serial_icounter_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *driver, + struct inode *inode, int idx); + int (*install)(struct tty_driver *driver, struct tty_struct *tty); + void (*remove)(struct tty_driver *driver, struct tty_struct *tty); + int (*open)(struct tty_struct * tty, struct file * filp); + void (*close)(struct tty_struct * tty, struct file * filp); + void (*shutdown)(struct tty_struct *tty); + void (*cleanup)(struct tty_struct *tty); + int (*write)(struct tty_struct * tty, + const unsigned char *buf, int count); + int (*put_char)(struct tty_struct *tty, unsigned char ch); + void (*flush_chars)(struct tty_struct *tty); + int (*write_room)(struct tty_struct *tty); + int (*chars_in_buffer)(struct tty_struct *tty); + int (*ioctl)(struct tty_struct *tty, + unsigned int cmd, unsigned long arg); + long (*compat_ioctl)(struct tty_struct *tty, + unsigned int cmd, unsigned long arg); + void (*set_termios)(struct tty_struct *tty, struct ktermios * old); + void (*throttle)(struct tty_struct * tty); + void (*unthrottle)(struct tty_struct * tty); + void (*stop)(struct tty_struct *tty); + void (*start)(struct tty_struct *tty); + void (*hangup)(struct tty_struct *tty); + int (*break_ctl)(struct tty_struct *tty, int state); + void (*flush_buffer)(struct tty_struct *tty); + void (*set_ldisc)(struct tty_struct *tty); + void (*wait_until_sent)(struct tty_struct *tty, int timeout); + void (*send_xchar)(struct tty_struct *tty, char ch); + int (*tiocmget)(struct tty_struct *tty); + int (*tiocmset)(struct tty_struct *tty, + unsigned int set, unsigned int clear); + int (*resize)(struct tty_struct *tty, struct winsize *ws); + int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); + int (*get_icount)(struct tty_struct *tty, + struct serial_icounter_struct *icount); +#ifdef CONFIG_CONSOLE_POLL + int (*poll_init)(struct tty_driver *driver, int line, char *options); + int (*poll_get_char)(struct tty_driver *driver, int line); + void (*poll_put_char)(struct tty_driver *driver, int line, char ch); +#endif + const struct file_operations *proc_fops; +}; + +struct tty_driver { + int magic; /* magic number for this structure */ + struct kref kref; /* Reference management */ + struct cdev cdev; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; /* offset of printed name */ + int major; /* major device number */ + int minor_start; /* start of minor device number */ + int num; /* number of devices allocated */ + short type; /* type of tty driver */ + short subtype; /* subtype of tty driver */ + struct ktermios init_termios; /* Initial termios */ + int flags; /* tty driver flags */ + struct proc_dir_entry *proc_entry; /* /proc fs entry */ + struct tty_driver *other; /* only used for the PTY driver */ + + /* + * Pointer to the tty data structures + */ + struct tty_struct **ttys; + struct ktermios **termios; + void *driver_state; + + /* + * Driver methods + */ + + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +extern struct list_head tty_drivers; + +extern struct tty_driver *__alloc_tty_driver(int lines, struct module *owner); +extern void put_tty_driver(struct tty_driver *driver); +extern void tty_set_operations(struct tty_driver *driver, + const struct tty_operations *op); +extern struct tty_driver *tty_find_polling_driver(char *name, int *line); + +extern void tty_driver_kref_put(struct tty_driver *driver); + +#define alloc_tty_driver(lines) __alloc_tty_driver(lines, THIS_MODULE) + +static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) +{ + kref_get(&d->kref); + return d; +} + +/* tty driver magic number */ +#define TTY_DRIVER_MAGIC 0x5402 + +/* + * tty driver flags + * + * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the + * termios setting when the last process has closed the device. + * Used for PTY's, in particular. + * + * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will + * guarantee never not to set any special character handling + * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || + * !INPCK)). That is, if there is no reason for the driver to + * send notifications of parity and break characters up to the + * line driver, it won't do so. This allows the line driver to + * optimize for this case if this flag is set. (Note that there + * is also a promise, if the above case is true, not to signal + * overruns, either.) + * + * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need + * to be registered with a call to tty_register_device() when the + * device is found in the system and unregistered with a call to + * tty_unregister_device() so the devices will be show up + * properly in sysfs. If not set, driver->num entries will be + * created by the tty core in sysfs when tty_register_driver() is + * called. This is to be used by drivers that have tty devices + * that can appear and disappear while the main tty driver is + * registered with the tty core. + * + * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead + * use dynamic memory keyed through the devpts filesystem. This + * is only applicable to the pty driver. + * + * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass + * the requested timeout to the caller instead of using a simple + * on/off interface. + * + */ +#define TTY_DRIVER_INSTALLED 0x0001 +#define TTY_DRIVER_RESET_TERMIOS 0x0002 +#define TTY_DRIVER_REAL_RAW 0x0004 +#define TTY_DRIVER_DYNAMIC_DEV 0x0008 +#define TTY_DRIVER_DEVPTS_MEM 0x0010 +#define TTY_DRIVER_HARDWARE_BREAK 0x0020 + +/* tty driver types */ +#define TTY_DRIVER_TYPE_SYSTEM 0x0001 +#define TTY_DRIVER_TYPE_CONSOLE 0x0002 +#define TTY_DRIVER_TYPE_SERIAL 0x0003 +#define TTY_DRIVER_TYPE_PTY 0x0004 +#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */ +#define TTY_DRIVER_TYPE_SYSCONS 0x0006 + +/* system subtypes (magic, used by tty_io.c) */ +#define SYSTEM_TYPE_TTY 0x0001 +#define SYSTEM_TYPE_CONSOLE 0x0002 +#define SYSTEM_TYPE_SYSCONS 0x0003 +#define SYSTEM_TYPE_SYSPTMX 0x0004 + +/* pty subtypes (magic, used by tty_io.c) */ +#define PTY_TYPE_MASTER 0x0001 +#define PTY_TYPE_SLAVE 0x0002 + +/* serial subtype definitions */ +#define SERIAL_TYPE_NORMAL 1 + +#endif /* #ifdef _LINUX_TTY_DRIVER_H */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h new file mode 100644 index 00000000..9239d033 --- /dev/null +++ b/include/linux/tty_flip.h @@ -0,0 +1,28 @@ +#ifndef _LINUX_TTY_FLIP_H +#define _LINUX_TTY_FLIP_H + +extern int tty_buffer_request_room(struct tty_struct *tty, size_t size); +extern int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size); +extern int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, const unsigned char *chars, char flag, size_t size); +extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size); +extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size); +void tty_schedule_flip(struct tty_struct *tty); + +static inline int tty_insert_flip_char(struct tty_struct *tty, + unsigned char ch, char flag) +{ + struct tty_buffer *tb = tty->buf.tail; + if (tb && tb->used < tb->size) { + tb->flag_buf_ptr[tb->used] = flag; + tb->char_buf_ptr[tb->used++] = ch; + return 1; + } + return tty_insert_flip_string_flags(tty, &ch, &flag, 1); +} + +static inline int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size) +{ + return tty_insert_flip_string_fixed_flag(tty, chars, TTY_NORMAL, size); +} + +#endif /* _LINUX_TTY_FLIP_H */ diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h new file mode 100644 index 00000000..ff7dc086 --- /dev/null +++ b/include/linux/tty_ldisc.h @@ -0,0 +1,166 @@ +#ifndef _LINUX_TTY_LDISC_H +#define _LINUX_TTY_LDISC_H + +/* + * This structure defines the interface between the tty line discipline + * implementation and the tty routines. The following routines can be + * defined; unless noted otherwise, they are optional, and can be + * filled in with a null pointer. + * + * int (*open)(struct tty_struct *); + * + * This function is called when the line discipline is associated + * with the tty. The line discipline can use this as an + * opportunity to initialize any state needed by the ldisc routines. + * + * void (*close)(struct tty_struct *); + * + * This function is called when the line discipline is being + * shutdown, either because the tty is being closed or because + * the tty is being changed to use a new line discipline + * + * void (*flush_buffer)(struct tty_struct *tty); + * + * This function instructs the line discipline to clear its + * buffers of any input characters it may have queued to be + * delivered to the user mode process. + * + * ssize_t (*chars_in_buffer)(struct tty_struct *tty); + * + * This function returns the number of input characters the line + * discipline may have queued up to be delivered to the user mode + * process. + * + * ssize_t (*read)(struct tty_struct * tty, struct file * file, + * unsigned char * buf, size_t nr); + * + * This function is called when the user requests to read from + * the tty. The line discipline will return whatever characters + * it has buffered up for the user. If this function is not + * defined, the user will receive an EIO error. + * + * ssize_t (*write)(struct tty_struct * tty, struct file * file, + * const unsigned char * buf, size_t nr); + * + * This function is called when the user requests to write to the + * tty. The line discipline will deliver the characters to the + * low-level tty device for transmission, optionally performing + * some processing on the characters first. If this function is + * not defined, the user will receive an EIO error. + * + * int (*ioctl)(struct tty_struct * tty, struct file * file, + * unsigned int cmd, unsigned long arg); + * + * This function is called when the user requests an ioctl which + * is not handled by the tty layer or the low-level tty driver. + * It is intended for ioctls which affect line discpline + * operation. Note that the search order for ioctls is (1) tty + * layer, (2) tty low-level driver, (3) line discpline. So a + * low-level driver can "grab" an ioctl request before the line + * discpline has a chance to see it. + * + * long (*compat_ioctl)(struct tty_struct * tty, struct file * file, + * unsigned int cmd, unsigned long arg); + * + * Process ioctl calls from 32-bit process on 64-bit system + * + * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); + * + * This function notifies the line discpline that a change has + * been made to the termios structure. + * + * int (*poll)(struct tty_struct * tty, struct file * file, + * poll_table *wait); + * + * This function is called when a user attempts to select/poll on a + * tty device. It is solely the responsibility of the line + * discipline to handle poll requests. + * + * void (*receive_buf)(struct tty_struct *, const unsigned char *cp, + * char *fp, int count); + * + * This function is called by the low-level tty driver to send + * characters received by the hardware to the line discpline for + * processing. <cp> is a pointer to the buffer of input + * character received by the device. <fp> is a pointer to a + * pointer of flag bytes which indicate whether a character was + * received with a parity error, etc. + * + * void (*write_wakeup)(struct tty_struct *); + * + * This function is called by the low-level tty driver to signal + * that line discpline should try to send more characters to the + * low-level driver for transmission. If the line discpline does + * not have any more data to send, it can just return. + * + * int (*hangup)(struct tty_struct *) + * + * Called on a hangup. Tells the discipline that it should + * cease I/O to the tty driver. Can sleep. The driver should + * seek to perform this action quickly but should wait until + * any pending driver I/O is completed. + * + * void (*dcd_change)(struct tty_struct *tty, unsigned int status, + * struct pps_event_time *ts) + * + * Tells the discipline that the DCD pin has changed its status and + * the relative timestamp. Pointer ts cannot be NULL. + */ + +#include <linux/fs.h> +#include <linux/wait.h> +#include <linux/pps_kernel.h> + +struct tty_ldisc_ops { + int magic; + char *name; + int num; + int flags; + + /* + * The following routines are called from above. + */ + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *tty); + ssize_t (*chars_in_buffer)(struct tty_struct *tty); + ssize_t (*read)(struct tty_struct * tty, struct file * file, + unsigned char __user * buf, size_t nr); + ssize_t (*write)(struct tty_struct * tty, struct file * file, + const unsigned char * buf, size_t nr); + int (*ioctl)(struct tty_struct * tty, struct file * file, + unsigned int cmd, unsigned long arg); + long (*compat_ioctl)(struct tty_struct * tty, struct file * file, + unsigned int cmd, unsigned long arg); + void (*set_termios)(struct tty_struct *tty, struct ktermios * old); + unsigned int (*poll)(struct tty_struct *, struct file *, + struct poll_table_struct *); + int (*hangup)(struct tty_struct *tty); + + /* + * The following routines are called from below. + */ + void (*receive_buf)(struct tty_struct *, const unsigned char *cp, + char *fp, int count); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, unsigned int, + struct pps_event_time *); + + struct module *owner; + + int refcount; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + atomic_t users; +}; + +#define TTY_LDISC_MAGIC 0x5403 + +#define LDISC_FLAG_DEFINED 0x00000001 + +#define MODULE_ALIAS_LDISC(ldisc) \ + MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) + +#endif /* _LINUX_TTY_LDISC_H */ diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h new file mode 100644 index 00000000..eb5b74a5 --- /dev/null +++ b/include/linux/typecheck.h @@ -0,0 +1,24 @@ +#ifndef TYPECHECK_H_INCLUDED +#define TYPECHECK_H_INCLUDED + +/* + * Check at compile time that something is of a particular type. + * Always evaluates to 1 so you may use it easily in comparisons. + */ +#define typecheck(type,x) \ +({ type __dummy; \ + typeof(x) __dummy2; \ + (void)(&__dummy == &__dummy2); \ + 1; \ +}) + +/* + * Check at compile time that 'function' is a certain type, or is a pointer + * to that type (needs to use typedef for the function type.) + */ +#define typecheck_fn(type,function) \ +({ typeof(type) __tmp = function; \ + (void)__tmp; \ +}) + +#endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/types.h b/include/linux/types.h new file mode 100644 index 00000000..7f480db6 --- /dev/null +++ b/include/linux/types.h @@ -0,0 +1,260 @@ +#ifndef _LINUX_TYPES_H +#define _LINUX_TYPES_H + +#include <asm/types.h> + +#ifndef __ASSEMBLY__ +#ifdef __KERNEL__ + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] +#else +#ifndef __EXPORTED_HEADERS__ +#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" +#endif /* __EXPORTED_HEADERS__ */ +#endif + +#include <linux/posix_types.h> + +#ifdef __KERNEL__ + +typedef __u32 __kernel_dev_t; + +typedef __kernel_fd_set fd_set; +typedef __kernel_dev_t dev_t; +typedef __kernel_ino_t ino_t; +typedef __kernel_mode_t mode_t; +typedef unsigned short umode_t; +typedef __kernel_nlink_t nlink_t; +typedef __kernel_off_t off_t; +typedef __kernel_pid_t pid_t; +typedef __kernel_daddr_t daddr_t; +typedef __kernel_key_t key_t; +typedef __kernel_suseconds_t suseconds_t; +typedef __kernel_timer_t timer_t; +typedef __kernel_clockid_t clockid_t; +typedef __kernel_mqd_t mqd_t; + +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; +typedef __kernel_gid32_t gid_t; +typedef __kernel_uid16_t uid16_t; +typedef __kernel_gid16_t gid16_t; + +typedef unsigned long uintptr_t; + +#ifdef CONFIG_UID16 +/* This is defined by include/asm-{arch}/posix_types.h */ +typedef __kernel_old_uid_t old_uid_t; +typedef __kernel_old_gid_t old_gid_t; +#endif /* CONFIG_UID16 */ + +#if defined(__GNUC__) +typedef __kernel_loff_t loff_t; +#endif + +/* + * The following typedefs are also protected by individual ifdefs for + * historical reasons: + */ +#ifndef _SIZE_T +#define _SIZE_T +typedef __kernel_size_t size_t; +#endif + +#ifndef _SSIZE_T +#define _SSIZE_T +typedef __kernel_ssize_t ssize_t; +#endif + +#ifndef _PTRDIFF_T +#define _PTRDIFF_T +typedef __kernel_ptrdiff_t ptrdiff_t; +#endif + +#ifndef _TIME_T +#define _TIME_T +typedef __kernel_time_t time_t; +#endif + +#ifndef _CLOCK_T +#define _CLOCK_T +typedef __kernel_clock_t clock_t; +#endif + +#ifndef _CADDR_T +#define _CADDR_T +typedef __kernel_caddr_t caddr_t; +#endif + +/* bsd */ +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; + +/* sysv */ +typedef unsigned char unchar; +typedef unsigned short ushort; +typedef unsigned int uint; +typedef unsigned long ulong; + +#ifndef __BIT_TYPES_DEFINED__ +#define __BIT_TYPES_DEFINED__ + +typedef __u8 u_int8_t; +typedef __s8 int8_t; +typedef __u16 u_int16_t; +typedef __s16 int16_t; +typedef __u32 u_int32_t; +typedef __s32 int32_t; + +#endif /* !(__BIT_TYPES_DEFINED__) */ + +typedef __u8 uint8_t; +typedef __u16 uint16_t; +typedef __u32 uint32_t; + +#if defined(__GNUC__) +typedef __u64 uint64_t; +typedef __u64 u_int64_t; +typedef __s64 int64_t; +#endif + +/* this is a special 64bit data type that is 8-byte aligned */ +#define aligned_u64 __u64 __attribute__((aligned(8))) +#define aligned_be64 __be64 __attribute__((aligned(8))) +#define aligned_le64 __le64 __attribute__((aligned(8))) + +/** + * The type used for indexing onto a disc or disc partition. + * + * Linux always considers sectors to be 512 bytes long independently + * of the devices real block size. + * + * blkcnt_t is the type of the inode's block count. + */ +#ifdef CONFIG_LBDAF +typedef u64 sector_t; +typedef u64 blkcnt_t; +#else +typedef unsigned long sector_t; +typedef unsigned long blkcnt_t; +#endif + +/* + * The type of an index into the pagecache. Use a #define so asm/types.h + * can override it. + */ +#ifndef pgoff_t +#define pgoff_t unsigned long +#endif + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +typedef u64 dma_addr_t; +#else +typedef u32 dma_addr_t; +#endif /* dma_addr_t */ + +#endif /* __KERNEL__ */ + +/* + * Below are truly Linux-specific types that should never collide with + * any application/library that wants linux/types.h. + */ + +#ifdef __CHECKER__ +#define __bitwise__ __attribute__((bitwise)) +#else +#define __bitwise__ +#endif +#ifdef __CHECK_ENDIAN__ +#define __bitwise __bitwise__ +#else +#define __bitwise +#endif + +typedef __u16 __bitwise __le16; +typedef __u16 __bitwise __be16; +typedef __u32 __bitwise __le32; +typedef __u32 __bitwise __be32; +typedef __u64 __bitwise __le64; +typedef __u64 __bitwise __be64; + +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; + +/* + * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid + * common 32/64-bit compat problems. + * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other + * architectures) and to 8-byte boundaries on 64-bit architectures. The new + * aligned_64 type enforces 8-byte alignment so that structs containing + * aligned_64 values have the same alignment on 32-bit and 64-bit architectures. + * No conversions are necessary between 32-bit user-space and a 64-bit kernel. + */ +#define __aligned_u64 __u64 __attribute__((aligned(8))) +#define __aligned_be64 __be64 __attribute__((aligned(8))) +#define __aligned_le64 __le64 __attribute__((aligned(8))) + +#ifdef __KERNEL__ +typedef unsigned __bitwise__ gfp_t; +typedef unsigned __bitwise__ fmode_t; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +typedef u64 phys_addr_t; +#else +typedef u32 phys_addr_t; +#endif + +typedef phys_addr_t resource_size_t; + +/* + * This type is the placeholder for a hardware interrupt number. It has to be + * big enough to enclose whatever representation is used by a given platform. + */ +typedef unsigned long irq_hw_number_t; + +typedef struct { + int counter; +} atomic_t; + +#ifdef CONFIG_64BIT +typedef struct { + long counter; +} atomic64_t; +#endif + +struct list_head { + struct list_head *next, *prev; +}; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +struct ustat { + __kernel_daddr_t f_tfree; + __kernel_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +/** + * struct rcu_head - callback structure for use with RCU + * @next: next update requests in a list + * @func: actual update function to call after the grace period. + */ +struct rcu_head { + struct rcu_head *next; + void (*func)(struct rcu_head *head); +}; + +#endif /* __KERNEL__ */ +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_TYPES_H */ diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h new file mode 100644 index 00000000..8da8c4e8 --- /dev/null +++ b/include/linux/u64_stats_sync.h @@ -0,0 +1,140 @@ +#ifndef _LINUX_U64_STATS_SYNC_H +#define _LINUX_U64_STATS_SYNC_H + +/* + * To properly implement 64bits network statistics on 32bit and 64bit hosts, + * we provide a synchronization point, that is a noop on 64bit or UP kernels. + * + * Key points : + * 1) Use a seqcount on SMP 32bits, with low overhead. + * 2) Whole thing is a noop on 64bit arches or UP kernels. + * 3) Write side must ensure mutual exclusion or one seqcount update could + * be lost, thus blocking readers forever. + * If this synchronization point is not a mutex, but a spinlock or + * spinlock_bh() or disable_bh() : + * 3.1) Write side should not sleep. + * 3.2) Write side should not allow preemption. + * 3.3) If applicable, interrupts should be disabled. + * + * 4) If reader fetches several counters, there is no guarantee the whole values + * are consistent (remember point 1) : this is a noop on 64bit arches anyway) + * + * 5) readers are allowed to sleep or be preempted/interrupted : They perform + * pure reads. But if they have to fetch many values, it's better to not allow + * preemptions/interruptions to avoid many retries. + * + * 6) If counter might be written by an interrupt, readers should block interrupts. + * (On UP, there is no seqcount_t protection, a reader allowing interrupts could + * read partial values) + * + * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and + * u64_stats_fetch_retry_bh() helpers + * + * Usage : + * + * Stats producer (writer) should use following template granted it already got + * an exclusive access to counters (a lock is already taken, or per cpu + * data is used [in a non preemptable context]) + * + * spin_lock_bh(...) or other synchronization to get exclusive access + * ... + * u64_stats_update_begin(&stats->syncp); + * stats->bytes64 += len; // non atomic operation + * stats->packets64++; // non atomic operation + * u64_stats_update_end(&stats->syncp); + * + * While a consumer (reader) should use following template to get consistent + * snapshot for each variable (but no guarantee on several ones) + * + * u64 tbytes, tpackets; + * unsigned int start; + * + * do { + * start = u64_stats_fetch_begin(&stats->syncp); + * tbytes = stats->bytes64; // non atomic operation + * tpackets = stats->packets64; // non atomic operation + * } while (u64_stats_fetch_retry(&stats->syncp, start)); + * + * + * Example of use in drivers/net/loopback.c, using per_cpu containers, + * in BH disabled context. + */ +#include <linux/seqlock.h> + +struct u64_stats_sync { +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + seqcount_t seq; +#endif +}; + +static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_begin(&syncp->seq); +#endif +} + +static inline void u64_stats_update_end(struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_end(&syncp->seq); +#endif +} + +static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + preempt_disable(); +#endif + return 0; +#endif +} + +static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + preempt_enable(); +#endif + return false; +#endif +} + +/* + * In case softirq handlers can update u64 counters, readers can use following helpers + * - SMP 32bit arches use seqcount protection, irq safe. + * - UP 32bit must disable BH. + * - 64bit have no problem atomically reading u64 values, irq safe. + */ +static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_begin(&syncp->seq); +#else +#if BITS_PER_LONG==32 + local_bh_disable(); +#endif + return 0; +#endif +} + +static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + return read_seqcount_retry(&syncp->seq, start); +#else +#if BITS_PER_LONG==32 + local_bh_enable(); +#endif + return false; +#endif +} + +#endif /* _LINUX_U64_STATS_SYNC_H */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h new file mode 100644 index 00000000..5ca0951e --- /dev/null +++ b/include/linux/uaccess.h @@ -0,0 +1,111 @@ +#ifndef __LINUX_UACCESS_H__ +#define __LINUX_UACCESS_H__ + +#include <linux/preempt.h> +#include <asm/uaccess.h> + +/* + * These routines enable/disable the pagefault handler in that + * it will not take any locks and go straight to the fixup table. + * + * They have great resemblance to the preempt_disable/enable calls + * and in fact they are identical; this is because currently there is + * no other way to make the pagefault handlers do this. So we do + * disable preemption but we don't necessarily care about that. + */ +static inline void pagefault_disable(void) +{ + inc_preempt_count(); + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} + +static inline void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + dec_preempt_count(); + /* + * make sure we do.. + */ + barrier(); + preempt_check_resched(); +} + +#ifndef ARCH_HAS_NOCACHE_UACCESS + +static inline unsigned long __copy_from_user_inatomic_nocache(void *to, + const void __user *from, unsigned long n) +{ + return __copy_from_user_inatomic(to, from, n); +} + +static inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) +{ + return __copy_from_user(to, from, n); +} + +#endif /* ARCH_HAS_NOCACHE_UACCESS */ + +/** + * probe_kernel_address(): safely attempt to read from a location + * @addr: address to read from - its type is type typeof(retval)* + * @retval: read into this variable + * + * Safely read from address @addr into variable @revtal. If a kernel fault + * happens, handle that and return -EFAULT. + * We ensure that the __get_user() is executed in atomic context so that + * do_page_fault() doesn't attempt to take mmap_sem. This makes + * probe_kernel_address() suitable for use within regions where the caller + * already holds mmap_sem, or other locks which nest inside mmap_sem. + * This must be a macro because __get_user() needs to know the types of the + * args. + * + * We don't include enough header files to be able to do the set_fs(). We + * require that the probe_kernel_address() caller will do that. + */ +#define probe_kernel_address(addr, retval) \ + ({ \ + long ret; \ + mm_segment_t old_fs = get_fs(); \ + \ + set_fs(KERNEL_DS); \ + pagefault_disable(); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ + pagefault_enable(); \ + set_fs(old_fs); \ + ret; \ + }) + +/* + * probe_kernel_read(): safely attempt to read from a location + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_kernel_read(void *dst, const void *src, size_t size); +extern long __probe_kernel_read(void *dst, const void *src, size_t size); + +/* + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); + +#endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h new file mode 100644 index 00000000..d21b33c4 --- /dev/null +++ b/include/linux/ucb1400.h @@ -0,0 +1,171 @@ +/* + * Register definitions and functions for: + * Philips UCB1400 driver + * + * Based on ucb1400_ts: + * Author: Nicolas Pitre + * Created: September 25, 2006 + * Copyright: MontaVista Software, Inc. + * + * Spliting done by: Marek Vasut <marek.vasut@gmail.com> + * If something doesn't work and it worked before spliting, e-mail me, + * dont bother Nicolas please ;-) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This code is heavily based on ucb1x00-*.c copyrighted by Russell King + * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has + * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request. + */ + +#ifndef _LINUX__UCB1400_H +#define _LINUX__UCB1400_H + +#include <sound/ac97_codec.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> + +/* + * UCB1400 AC-link registers + */ + +#define UCB_IO_DATA 0x5a +#define UCB_IO_DIR 0x5c +#define UCB_IE_RIS 0x5e +#define UCB_IE_FAL 0x60 +#define UCB_IE_STATUS 0x62 +#define UCB_IE_CLEAR 0x62 +#define UCB_IE_ADC (1 << 11) +#define UCB_IE_TSPX (1 << 12) + +#define UCB_TS_CR 0x64 +#define UCB_TS_CR_TSMX_POW (1 << 0) +#define UCB_TS_CR_TSPX_POW (1 << 1) +#define UCB_TS_CR_TSMY_POW (1 << 2) +#define UCB_TS_CR_TSPY_POW (1 << 3) +#define UCB_TS_CR_TSMX_GND (1 << 4) +#define UCB_TS_CR_TSPX_GND (1 << 5) +#define UCB_TS_CR_TSMY_GND (1 << 6) +#define UCB_TS_CR_TSPY_GND (1 << 7) +#define UCB_TS_CR_MODE_INT (0 << 8) +#define UCB_TS_CR_MODE_PRES (1 << 8) +#define UCB_TS_CR_MODE_POS (2 << 8) +#define UCB_TS_CR_BIAS_ENA (1 << 11) +#define UCB_TS_CR_TSPX_LOW (1 << 12) +#define UCB_TS_CR_TSMX_LOW (1 << 13) + +#define UCB_ADC_CR 0x66 +#define UCB_ADC_SYNC_ENA (1 << 0) +#define UCB_ADC_VREFBYP_CON (1 << 1) +#define UCB_ADC_INP_TSPX (0 << 2) +#define UCB_ADC_INP_TSMX (1 << 2) +#define UCB_ADC_INP_TSPY (2 << 2) +#define UCB_ADC_INP_TSMY (3 << 2) +#define UCB_ADC_INP_AD0 (4 << 2) +#define UCB_ADC_INP_AD1 (5 << 2) +#define UCB_ADC_INP_AD2 (6 << 2) +#define UCB_ADC_INP_AD3 (7 << 2) +#define UCB_ADC_EXT_REF (1 << 5) +#define UCB_ADC_START (1 << 7) +#define UCB_ADC_ENA (1 << 15) + +#define UCB_ADC_DATA 0x68 +#define UCB_ADC_DAT_VALID (1 << 15) + +#define UCB_FCSR 0x6c +#define UCB_FCSR_AVE (1 << 12) + +#define UCB_ADC_DAT_MASK 0x3ff + +#define UCB_ID 0x7e +#define UCB_ID_1400 0x4304 + +struct ucb1400_gpio_data { + int gpio_offset; + int (*gpio_setup)(struct device *dev, int ngpio); + int (*gpio_teardown)(struct device *dev, int ngpio); +}; + +struct ucb1400_gpio { + struct gpio_chip gc; + struct snd_ac97 *ac97; +}; + +struct ucb1400_ts { + struct input_dev *ts_idev; + int id; + int irq; + struct snd_ac97 *ac97; + wait_queue_head_t ts_wait; + bool stopped; +}; + +struct ucb1400 { + struct platform_device *ucb1400_ts; + struct platform_device *ucb1400_gpio; +}; + +struct ucb1400_pdata { + int irq; +}; + +static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg) +{ + return ac97->bus->ops->read(ac97, reg); +} + +static inline void ucb1400_reg_write(struct snd_ac97 *ac97, u16 reg, u16 val) +{ + ac97->bus->ops->write(ac97, reg, val); +} + +static inline u16 ucb1400_gpio_get_value(struct snd_ac97 *ac97, u16 gpio) +{ + return ucb1400_reg_read(ac97, UCB_IO_DATA) & (1 << gpio); +} + +static inline void ucb1400_gpio_set_value(struct snd_ac97 *ac97, u16 gpio, + u16 val) +{ + ucb1400_reg_write(ac97, UCB_IO_DATA, val ? + ucb1400_reg_read(ac97, UCB_IO_DATA) | (1 << gpio) : + ucb1400_reg_read(ac97, UCB_IO_DATA) & ~(1 << gpio)); +} + +static inline u16 ucb1400_gpio_get_direction(struct snd_ac97 *ac97, u16 gpio) +{ + return ucb1400_reg_read(ac97, UCB_IO_DIR) & (1 << gpio); +} + +static inline void ucb1400_gpio_set_direction(struct snd_ac97 *ac97, u16 gpio, + u16 dir) +{ + ucb1400_reg_write(ac97, UCB_IO_DIR, dir ? + ucb1400_reg_read(ac97, UCB_IO_DIR) | (1 << gpio) : + ucb1400_reg_read(ac97, UCB_IO_DIR) & ~(1 << gpio)); +} + +static inline void ucb1400_adc_enable(struct snd_ac97 *ac97) +{ + ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA); +} + +static inline void ucb1400_adc_disable(struct snd_ac97 *ac97) +{ + ucb1400_reg_write(ac97, UCB_ADC_CR, 0); +} + + +unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, + int adcsync); + +#ifdef CONFIG_GPIO_UCB1400 +void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data); +#else +static inline void ucb1400_gpio_set_data(struct ucb1400_gpio_data *data) {} +#endif + +#endif diff --git a/include/linux/udf_fs_i.h b/include/linux/udf_fs_i.h new file mode 100644 index 00000000..35369659 --- /dev/null +++ b/include/linux/udf_fs_i.h @@ -0,0 +1,21 @@ +/* + * udf_fs_i.h + * + * This file is intended for the Linux kernel/module. + * + * COPYRIGHT + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from: + * ftp://prep.ai.mit.edu/pub/gnu/GPL + * Each contributing author retains all rights to their own work. + */ +#ifndef _UDF_FS_I_H +#define _UDF_FS_I_H 1 + +/* exported IOCTLs, we have 'l', 0x40-0x7f */ +#define UDF_GETEASIZE _IOR('l', 0x40, int) +#define UDF_GETEABLOCK _IOR('l', 0x41, void *) +#define UDF_GETVOLIDENT _IOR('l', 0x42, void *) +#define UDF_RELOCATE_BLOCKS _IOWR('l', 0x43, long) + +#endif /* _UDF_FS_I_H */ diff --git a/include/linux/udp.h b/include/linux/udp.h new file mode 100644 index 00000000..03f72a2b --- /dev/null +++ b/include/linux/udp.h @@ -0,0 +1,101 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the UDP protocol. + * + * Version: @(#)udp.h 1.0.2 04/28/93 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_UDP_H +#define _LINUX_UDP_H + +#include <linux/types.h> + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +/* UDP socket options */ +#define UDP_CORK 1 /* Never send partially complete segments */ +#define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ + +/* UDP encapsulation types */ +#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */ +#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */ +#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ + +#ifdef __KERNEL__ +#include <net/inet_sock.h> +#include <linux/skbuff.h> +#include <net/netns/hash.h> + +static inline struct udphdr *udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} + +#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) + +static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask) +{ + return (num + net_hash_mix(net)) & mask; +} + +struct udp_sock { + /* inet_sock has to be the first member */ + struct inet_sock inet; +#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0] +#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1] +#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node + int pending; /* Any pending frames ? */ + unsigned int corkflag; /* Cork is required */ + __u16 encap_type; /* Is this an Encapsulation socket? */ + /* + * Following member retains the information to create a UDP header + * when the socket is uncorked. + */ + __u16 len; /* total length of pending frames */ + /* + * Fields specific to UDP-Lite. + */ + __u16 pcslen; + __u16 pcrlen; +/* indicator bits used by pcflag: */ +#define UDPLITE_BIT 0x1 /* set by udplite proto init function */ +#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */ +#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */ + __u8 pcflag; /* marks socket as UDP-Lite if > 0 */ + __u8 unused[3]; + /* + * For encapsulation sockets. + */ + int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); +}; + +static inline struct udp_sock *udp_sk(const struct sock *sk) +{ + return (struct udp_sock *)sk; +} + +#define udp_portaddr_for_each_entry(__sk, node, list) \ + hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) + +#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \ + hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node) + +#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) + +#endif + +#endif /* _LINUX_UDP_H */ diff --git a/include/linux/uhid.h b/include/linux/uhid.h new file mode 100644 index 00000000..9c6974f1 --- /dev/null +++ b/include/linux/uhid.h @@ -0,0 +1,104 @@ +#ifndef __UHID_H_ +#define __UHID_H_ + +/* + * User-space I/O driver support for HID subsystem + * Copyright (c) 2012 David Herrmann + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* + * Public header for user-space communication. We try to keep every structure + * aligned but to be safe we also use __attribute__((__packed__)). Therefore, + * the communication should be ABI compatible even between architectures. + */ + +#include <linux/input.h> +#include <linux/types.h> + +enum uhid_event_type { + UHID_CREATE, + UHID_DESTROY, + UHID_START, + UHID_STOP, + UHID_OPEN, + UHID_CLOSE, + UHID_OUTPUT, + UHID_OUTPUT_EV, + UHID_INPUT, + UHID_FEATURE, + UHID_FEATURE_ANSWER, +}; + +struct uhid_create_req { + __u8 name[128]; + __u8 phys[64]; + __u8 uniq[64]; + __u8 __user *rd_data; + __u16 rd_size; + + __u16 bus; + __u32 vendor; + __u32 product; + __u32 version; + __u32 country; +} __attribute__((__packed__)); + +#define UHID_DATA_MAX 4096 + +enum uhid_report_type { + UHID_FEATURE_REPORT, + UHID_OUTPUT_REPORT, + UHID_INPUT_REPORT, +}; + +struct uhid_input_req { + __u8 data[UHID_DATA_MAX]; + __u16 size; +} __attribute__((__packed__)); + +struct uhid_output_req { + __u8 data[UHID_DATA_MAX]; + __u16 size; + __u8 rtype; +} __attribute__((__packed__)); + +struct uhid_output_ev_req { + __u16 type; + __u16 code; + __s32 value; +} __attribute__((__packed__)); + +struct uhid_feature_req { + __u32 id; + __u8 rnum; + __u8 rtype; +} __attribute__((__packed__)); + +struct uhid_feature_answer_req { + __u32 id; + __u16 err; + __u16 size; + __u8 data[UHID_DATA_MAX]; +}; + +struct uhid_event { + __u32 type; + + union { + struct uhid_create_req create; + struct uhid_input_req input; + struct uhid_output_req output; + struct uhid_output_ev_req output_ev; + struct uhid_feature_req feature; + struct uhid_feature_answer_req feature_answer; + } u; +} __attribute__((__packed__)); + +#endif /* __UHID_H_ */ diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h new file mode 100644 index 00000000..6bd6c4e5 --- /dev/null +++ b/include/linux/uid_stat.h @@ -0,0 +1,29 @@ +/* include/linux/uid_stat.h + * + * Copyright (C) 2008-2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __uid_stat_h +#define __uid_stat_h + +/* Contains definitions for resource tracking per uid. */ + +#ifdef CONFIG_UID_STAT +int uid_stat_tcp_snd(uid_t uid, int size); +int uid_stat_tcp_rcv(uid_t uid, int size); +#else +#define uid_stat_tcp_snd(uid, size) do {} while (0); +#define uid_stat_tcp_rcv(uid, size) do {} while (0); +#endif + +#endif /* _LINUX_UID_STAT_H */ diff --git a/include/linux/uinput.h b/include/linux/uinput.h new file mode 100644 index 00000000..2aa2881b --- /dev/null +++ b/include/linux/uinput.h @@ -0,0 +1,176 @@ +#ifndef __UINPUT_H_ +#define __UINPUT_H_ +/* + * User level driver support for input subsystem + * + * Heavily based on evdev.c by Vojtech Pavlik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> + * + * Changes/Revisions: + * 0.3 24/05/2006 (Anssi Hannula <anssi.hannulagmail.com>) + * - update ff support for the changes in kernel interface + * - add UINPUT_VERSION + * 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>) + * - added force feedback support + * - added UI_SET_PHYS + * 0.1 20/06/2002 + * - first public version + */ + +#include <linux/input.h> + +#define UINPUT_VERSION 3 + +#ifdef __KERNEL__ +#define UINPUT_NAME "uinput" +#define UINPUT_BUFFER_SIZE 16 +#define UINPUT_NUM_REQUESTS 16 + +enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED }; + +struct uinput_request { + int id; + int code; /* UI_FF_UPLOAD, UI_FF_ERASE */ + + int retval; + struct completion done; + + union { + int effect_id; + struct { + struct ff_effect *effect; + struct ff_effect *old; + } upload; + } u; +}; + +struct uinput_device { + struct input_dev *dev; + struct mutex mutex; + enum uinput_state state; + wait_queue_head_t waitq; + unsigned char ready; + unsigned char head; + unsigned char tail; + struct input_event buff[UINPUT_BUFFER_SIZE]; + unsigned int ff_effects_max; + + struct uinput_request *requests[UINPUT_NUM_REQUESTS]; + wait_queue_head_t requests_waitq; + spinlock_t requests_lock; +}; +#endif /* __KERNEL__ */ + +struct uinput_ff_upload { + int request_id; + int retval; + struct ff_effect effect; + struct ff_effect old; +}; + +struct uinput_ff_erase { + int request_id; + int retval; + int effect_id; +}; + +/* ioctl */ +#define UINPUT_IOCTL_BASE 'U' +#define UI_DEV_CREATE _IO(UINPUT_IOCTL_BASE, 1) +#define UI_DEV_DESTROY _IO(UINPUT_IOCTL_BASE, 2) + +#define UI_SET_EVBIT _IOW(UINPUT_IOCTL_BASE, 100, int) +#define UI_SET_KEYBIT _IOW(UINPUT_IOCTL_BASE, 101, int) +#define UI_SET_RELBIT _IOW(UINPUT_IOCTL_BASE, 102, int) +#define UI_SET_ABSBIT _IOW(UINPUT_IOCTL_BASE, 103, int) +#define UI_SET_MSCBIT _IOW(UINPUT_IOCTL_BASE, 104, int) +#define UI_SET_LEDBIT _IOW(UINPUT_IOCTL_BASE, 105, int) +#define UI_SET_SNDBIT _IOW(UINPUT_IOCTL_BASE, 106, int) +#define UI_SET_FFBIT _IOW(UINPUT_IOCTL_BASE, 107, int) +#define UI_SET_PHYS _IOW(UINPUT_IOCTL_BASE, 108, char*) +#define UI_SET_SWBIT _IOW(UINPUT_IOCTL_BASE, 109, int) +#define UI_SET_PROPBIT _IOW(UINPUT_IOCTL_BASE, 110, int) + +#define UI_BEGIN_FF_UPLOAD _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload) +#define UI_END_FF_UPLOAD _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload) +#define UI_BEGIN_FF_ERASE _IOWR(UINPUT_IOCTL_BASE, 202, struct uinput_ff_erase) +#define UI_END_FF_ERASE _IOW(UINPUT_IOCTL_BASE, 203, struct uinput_ff_erase) + +/* + * To write a force-feedback-capable driver, the upload_effect + * and erase_effect callbacks in input_dev must be implemented. + * The uinput driver will generate a fake input event when one of + * these callbacks are invoked. The userspace code then uses + * ioctls to retrieve additional parameters and send the return code. + * The callback blocks until this return code is sent. + * + * The described callback mechanism is only used if ff_effects_max + * is set. + * + * To implement upload_effect(): + * 1. Wait for an event with type == EV_UINPUT and code == UI_FF_UPLOAD. + * A request ID will be given in 'value'. + * 2. Allocate a uinput_ff_upload struct, fill in request_id with + * the 'value' from the EV_UINPUT event. + * 3. Issue a UI_BEGIN_FF_UPLOAD ioctl, giving it the + * uinput_ff_upload struct. It will be filled in with the + * ff_effects passed to upload_effect(). + * 4. Perform the effect upload, and place a return code back into + the uinput_ff_upload struct. + * 5. Issue a UI_END_FF_UPLOAD ioctl, also giving it the + * uinput_ff_upload_effect struct. This will complete execution + * of our upload_effect() handler. + * + * To implement erase_effect(): + * 1. Wait for an event with type == EV_UINPUT and code == UI_FF_ERASE. + * A request ID will be given in 'value'. + * 2. Allocate a uinput_ff_erase struct, fill in request_id with + * the 'value' from the EV_UINPUT event. + * 3. Issue a UI_BEGIN_FF_ERASE ioctl, giving it the + * uinput_ff_erase struct. It will be filled in with the + * effect ID passed to erase_effect(). + * 4. Perform the effect erasure, and place a return code back + * into the uinput_ff_erase struct. + * 5. Issue a UI_END_FF_ERASE ioctl, also giving it the + * uinput_ff_erase_effect struct. This will complete execution + * of our erase_effect() handler. + */ + +/* + * This is the new event type, used only by uinput. + * 'code' is UI_FF_UPLOAD or UI_FF_ERASE, and 'value' + * is the unique request ID. This number was picked + * arbitrarily, above EV_MAX (since the input system + * never sees it) but in the range of a 16-bit int. + */ +#define EV_UINPUT 0x0101 +#define UI_FF_UPLOAD 1 +#define UI_FF_ERASE 2 + +#define UINPUT_MAX_NAME_SIZE 80 +struct uinput_user_dev { + char name[UINPUT_MAX_NAME_SIZE]; + struct input_id id; + int ff_effects_max; + int absmax[ABS_CNT]; + int absmin[ABS_CNT]; + int absfuzz[ABS_CNT]; + int absflat[ABS_CNT]; +}; +#endif /* __UINPUT_H_ */ + diff --git a/include/linux/uio.h b/include/linux/uio.h new file mode 100644 index 00000000..98c11432 --- /dev/null +++ b/include/linux/uio.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_UIO_H +#define __LINUX_UIO_H + +#include <linux/compiler.h> +#include <linux/types.h> + +/* + * Berkeley style UIO structures - Alan Cox 1994. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +struct iovec +{ + void __user *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */ + __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ +}; + +/* + * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1) + */ + +#define UIO_FASTIOV 8 +#define UIO_MAXIOV 1024 + +#ifdef __KERNEL__ + +struct kvec { + void *iov_base; /* and that should *never* hold a userland pointer */ + size_t iov_len; +}; + +/* + * Total number of bytes covered by an iovec. + * + * NOTE that it is not safe to use this function until all the iovec's + * segment lengths have been validated. Because the individual lengths can + * overflow a size_t when added together. + */ +static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) +{ + unsigned long seg; + size_t ret = 0; + + for (seg = 0; seg < nr_segs; seg++) + ret += iov[seg].iov_len; + return ret; +} + +unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); +#endif + +#endif diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h new file mode 100644 index 00000000..1ad47244 --- /dev/null +++ b/include/linux/uio_driver.h @@ -0,0 +1,128 @@ +/* + * include/linux/uio_driver.h + * + * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> + * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> + * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> + * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> + * + * Userspace IO driver. + * + * Licensed under the GPLv2 only. + */ + +#ifndef _UIO_DRIVER_H_ +#define _UIO_DRIVER_H_ + +#include <linux/fs.h> +#include <linux/interrupt.h> + +struct module; +struct uio_map; + +/** + * struct uio_mem - description of a UIO memory region + * @name: name of the memory region for identification + * @addr: address of the device's memory (phys_addr is used since + * addr can be logical, virtual, or physical & phys_addr_t + * should always be large enough to handle any of the + * address types) + * @size: size of IO + * @memtype: type of memory addr points to + * @internal_addr: ioremap-ped version of addr, for driver internal use + * @map: for use by the UIO core only. + */ +struct uio_mem { + const char *name; + phys_addr_t addr; + unsigned long size; + int memtype; + void __iomem *internal_addr; + struct uio_map *map; +}; + +#define MAX_UIO_MAPS 5 + +struct uio_portio; + +/** + * struct uio_port - description of a UIO port region + * @name: name of the port region for identification + * @start: start of port region + * @size: size of port region + * @porttype: type of port (see UIO_PORT_* below) + * @portio: for use by the UIO core only. + */ +struct uio_port { + const char *name; + unsigned long start; + unsigned long size; + int porttype; + struct uio_portio *portio; +}; + +#define MAX_UIO_PORT_REGIONS 5 + +struct uio_device; + +/** + * struct uio_info - UIO device capabilities + * @uio_dev: the UIO device this info belongs to + * @name: device name + * @version: device driver version + * @mem: list of mappable memory regions, size==0 for end of list + * @port: list of port regions, size==0 for end of list + * @irq: interrupt number or UIO_IRQ_CUSTOM + * @irq_flags: flags for request_irq() + * @priv: optional private data + * @handler: the device's irq handler + * @mmap: mmap operation for this uio device + * @open: open operation for this uio device + * @release: release operation for this uio device + * @irqcontrol: disable/enable irqs when 0/1 is written to /dev/uioX + */ +struct uio_info { + struct uio_device *uio_dev; + const char *name; + const char *version; + struct uio_mem mem[MAX_UIO_MAPS]; + struct uio_port port[MAX_UIO_PORT_REGIONS]; + long irq; + unsigned long irq_flags; + void *priv; + irqreturn_t (*handler)(int irq, struct uio_info *dev_info); + int (*mmap)(struct uio_info *info, struct vm_area_struct *vma); + int (*open)(struct uio_info *info, struct inode *inode); + int (*release)(struct uio_info *info, struct inode *inode); + int (*irqcontrol)(struct uio_info *info, s32 irq_on); +}; + +extern int __must_check + __uio_register_device(struct module *owner, + struct device *parent, + struct uio_info *info); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define uio_register_device(parent, info) \ + __uio_register_device(THIS_MODULE, parent, info) + +extern void uio_unregister_device(struct uio_info *info); +extern void uio_event_notify(struct uio_info *info); + +/* defines for uio_info->irq */ +#define UIO_IRQ_CUSTOM -1 +#define UIO_IRQ_NONE 0 + +/* defines for uio_mem->memtype */ +#define UIO_MEM_NONE 0 +#define UIO_MEM_PHYS 1 +#define UIO_MEM_LOGICAL 2 +#define UIO_MEM_VIRTUAL 3 + +/* defines for uio_port->porttype */ +#define UIO_PORT_NONE 0 +#define UIO_PORT_X86 1 +#define UIO_PORT_GPIO 2 +#define UIO_PORT_OTHER 3 + +#endif /* _LINUX_UIO_DRIVER_H_ */ diff --git a/include/linux/ultrasound.h b/include/linux/ultrasound.h new file mode 100644 index 00000000..71339dc5 --- /dev/null +++ b/include/linux/ultrasound.h @@ -0,0 +1,103 @@ +#ifndef _ULTRASOUND_H_ +#define _ULTRASOUND_H_ +/* + * ultrasound.h - Macros for programming the Gravis Ultrasound + * These macros are extremely device dependent + * and not portable. + */ +/* + * Copyright (C) by Hannu Savolainen 1993-1997 + * + * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) + * Version 2 (June 1991). See the "COPYING" file distributed with this software + * for more info. + */ + + +/* + * Private events for Gravis Ultrasound (GUS) + * + * Format: + * byte 0 - SEQ_PRIVATE (0xfe) + * byte 1 - Synthesizer device number (0-N) + * byte 2 - Command (see below) + * byte 3 - Voice number (0-31) + * bytes 4 and 5 - parameter P1 (unsigned short) + * bytes 6 and 7 - parameter P2 (unsigned short) + * + * Commands: + * Each command affects one voice defined in byte 3. + * Unused parameters (P1 and/or P2 *MUST* be initialized to zero). + * _GUS_NUMVOICES - Sets max. number of concurrent voices (P1=14-31, default 16) + * _GUS_VOICESAMPLE- ************ OBSOLETE ************* + * _GUS_VOICEON - Starts voice (P1=voice mode) + * _GUS_VOICEOFF - Stops voice (no parameters) + * _GUS_VOICEFADE - Stops the voice smoothly. + * _GUS_VOICEMODE - Alters the voice mode, don't start or stop voice (P1=voice mode) + * _GUS_VOICEBALA - Sets voice balance (P1, 0=left, 7=middle and 15=right, default 7) + * _GUS_VOICEFREQ - Sets voice (sample) playback frequency (P1=Hz) + * _GUS_VOICEVOL - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off) + * _GUS_VOICEVOL2 - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off) + * (Like GUS_VOICEVOL but doesn't change the hw + * volume. It just updates volume in the voice table). + * + * _GUS_RAMPRANGE - Sets limits for volume ramping (P1=low volume, P2=high volume) + * _GUS_RAMPRATE - Sets the speed for volume ramping (P1=scale, P2=rate) + * _GUS_RAMPMODE - Sets the volume ramping mode (P1=ramping mode) + * _GUS_RAMPON - Starts volume ramping (no parameters) + * _GUS_RAMPOFF - Stops volume ramping (no parameters) + * _GUS_VOLUME_SCALE - Changes the volume calculation constants + * for all voices. + */ + +#define _GUS_NUMVOICES 0x00 +#define _GUS_VOICESAMPLE 0x01 /* OBSOLETE */ +#define _GUS_VOICEON 0x02 +#define _GUS_VOICEOFF 0x03 +#define _GUS_VOICEMODE 0x04 +#define _GUS_VOICEBALA 0x05 +#define _GUS_VOICEFREQ 0x06 +#define _GUS_VOICEVOL 0x07 +#define _GUS_RAMPRANGE 0x08 +#define _GUS_RAMPRATE 0x09 +#define _GUS_RAMPMODE 0x0a +#define _GUS_RAMPON 0x0b +#define _GUS_RAMPOFF 0x0c +#define _GUS_VOICEFADE 0x0d +#define _GUS_VOLUME_SCALE 0x0e +#define _GUS_VOICEVOL2 0x0f +#define _GUS_VOICE_POS 0x10 + +/* + * GUS API macros + */ + +#define _GUS_CMD(chn, voice, cmd, p1, p2) \ + {_SEQ_NEEDBUF(8); _seqbuf[_seqbufptr] = SEQ_PRIVATE;\ + _seqbuf[_seqbufptr+1] = (chn); _seqbuf[_seqbufptr+2] = cmd;\ + _seqbuf[_seqbufptr+3] = voice;\ + *(unsigned short*)&_seqbuf[_seqbufptr+4] = p1;\ + *(unsigned short*)&_seqbuf[_seqbufptr+6] = p2;\ + _SEQ_ADVBUF(8);} + +#define GUS_NUMVOICES(chn, p1) _GUS_CMD(chn, 0, _GUS_NUMVOICES, (p1), 0) +#define GUS_VOICESAMPLE(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_VOICESAMPLE, (p1), 0) /* OBSOLETE */ +#define GUS_VOICEON(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_VOICEON, (p1), 0) +#define GUS_VOICEOFF(chn, voice) _GUS_CMD(chn, voice, _GUS_VOICEOFF, 0, 0) +#define GUS_VOICEFADE(chn, voice) _GUS_CMD(chn, voice, _GUS_VOICEFADE, 0, 0) +#define GUS_VOICEMODE(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_VOICEMODE, (p1), 0) +#define GUS_VOICEBALA(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_VOICEBALA, (p1), 0) +#define GUS_VOICEFREQ(chn, voice, p) _GUS_CMD(chn, voice, _GUS_VOICEFREQ, \ + (p) & 0xffff, ((p) >> 16) & 0xffff) +#define GUS_VOICEVOL(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_VOICEVOL, (p1), 0) +#define GUS_VOICEVOL2(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_VOICEVOL2, (p1), 0) +#define GUS_RAMPRANGE(chn, voice, low, high) _GUS_CMD(chn, voice, _GUS_RAMPRANGE, (low), (high)) +#define GUS_RAMPRATE(chn, voice, p1, p2) _GUS_CMD(chn, voice, _GUS_RAMPRATE, (p1), (p2)) +#define GUS_RAMPMODE(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_RAMPMODE, (p1), 0) +#define GUS_RAMPON(chn, voice, p1) _GUS_CMD(chn, voice, _GUS_RAMPON, (p1), 0) +#define GUS_RAMPOFF(chn, voice) _GUS_CMD(chn, voice, _GUS_RAMPOFF, 0, 0) +#define GUS_VOLUME_SCALE(chn, voice, p1, p2) _GUS_CMD(chn, voice, _GUS_VOLUME_SCALE, (p1), (p2)) +#define GUS_VOICE_POS(chn, voice, p) _GUS_CMD(chn, voice, _GUS_VOICE_POS, \ + (p) & 0xffff, ((p) >> 16) & 0xffff) + +#endif diff --git a/include/linux/un.h b/include/linux/un.h new file mode 100644 index 00000000..3ed3e46c --- /dev/null +++ b/include/linux/un.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_UN_H +#define _LINUX_UN_H + +#include <linux/socket.h> + +#define UNIX_PATH_MAX 108 + +struct sockaddr_un { + __kernel_sa_family_t sun_family; /* AF_UNIX */ + char sun_path[UNIX_PATH_MAX]; /* pathname */ +}; + +#endif /* _LINUX_UN_H */ diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h new file mode 100644 index 00000000..99c1b4d2 --- /dev/null +++ b/include/linux/unaligned/access_ok.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_UNALIGNED_ACCESS_OK_H +#define _LINUX_UNALIGNED_ACCESS_OK_H + +#include <linux/kernel.h> +#include <asm/byteorder.h> + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpup((__le16 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpup((__le32 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpup((__le64 *)p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpup((__be16 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpup((__be32 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpup((__be64 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + *((__le16 *)p) = cpu_to_le16(val); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + *((__le32 *)p) = cpu_to_le32(val); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + *((__le64 *)p) = cpu_to_le64(val); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + *((__be16 *)p) = cpu_to_be16(val); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + *((__be32 *)p) = cpu_to_be32(val); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + *((__be64 *)p) = cpu_to_be64(val); +} + +#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h new file mode 100644 index 00000000..9356b242 --- /dev/null +++ b/include/linux/unaligned/be_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H +#define _LINUX_UNALIGNED_BE_BYTESHIFT_H + +#include <linux/types.h> + +static inline u16 __get_unaligned_be16(const u8 *p) +{ + return p[0] << 8 | p[1]; +} + +static inline u32 __get_unaligned_be32(const u8 *p) +{ + return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; +} + +static inline u64 __get_unaligned_be64(const u8 *p) +{ + return (u64)__get_unaligned_be32(p) << 32 | + __get_unaligned_be32(p + 4); +} + +static inline void __put_unaligned_be16(u16 val, u8 *p) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_unaligned_be32(u32 val, u8 *p) +{ + __put_unaligned_be16(val >> 16, p); + __put_unaligned_be16(val, p + 2); +} + +static inline void __put_unaligned_be64(u64 val, u8 *p) +{ + __put_unaligned_be32(val >> 32, p); + __put_unaligned_be32(val, p + 4); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_be16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_be32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_be64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_be16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_be32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_be64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h new file mode 100644 index 00000000..c2a76c5c --- /dev/null +++ b/include/linux/unaligned/be_memmove.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H +#define _LINUX_UNALIGNED_BE_MEMMOVE_H + +#include <linux/unaligned/memmove.h> + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_memmove16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_memmove32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_memmove64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_memmove16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_memmove32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_memmove64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h new file mode 100644 index 00000000..13241583 --- /dev/null +++ b/include/linux/unaligned/be_struct.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_BE_STRUCT_H +#define _LINUX_UNALIGNED_BE_STRUCT_H + +#include <linux/unaligned/packed_struct.h> + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_cpu16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_cpu32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_cpu64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_cpu16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_cpu32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_cpu64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */ diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h new file mode 100644 index 00000000..02d97ff3 --- /dev/null +++ b/include/linux/unaligned/generic.h @@ -0,0 +1,68 @@ +#ifndef _LINUX_UNALIGNED_GENERIC_H +#define _LINUX_UNALIGNED_GENERIC_H + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h new file mode 100644 index 00000000..be376fb7 --- /dev/null +++ b/include/linux/unaligned/le_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H +#define _LINUX_UNALIGNED_LE_BYTESHIFT_H + +#include <linux/types.h> + +static inline u16 __get_unaligned_le16(const u8 *p) +{ + return p[0] | p[1] << 8; +} + +static inline u32 __get_unaligned_le32(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; +} + +static inline u64 __get_unaligned_le64(const u8 *p) +{ + return (u64)__get_unaligned_le32(p + 4) << 32 | + __get_unaligned_le32(p); +} + +static inline void __put_unaligned_le16(u16 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_unaligned_le32(u32 val, u8 *p) +{ + __put_unaligned_le16(val >> 16, p + 2); + __put_unaligned_le16(val, p); +} + +static inline void __put_unaligned_le64(u64 val, u8 *p) +{ + __put_unaligned_le32(val >> 32, p + 4); + __put_unaligned_le32(val, p); +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_le16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_le32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_le64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_le16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_le32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_le64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h new file mode 100644 index 00000000..269849be --- /dev/null +++ b/include/linux/unaligned/le_memmove.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H +#define _LINUX_UNALIGNED_LE_MEMMOVE_H + +#include <linux/unaligned/memmove.h> + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_memmove16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_memmove32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_memmove64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_memmove16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_memmove32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_memmove64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h new file mode 100644 index 00000000..088c4572 --- /dev/null +++ b/include/linux/unaligned/le_struct.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_LE_STRUCT_H +#define _LINUX_UNALIGNED_LE_STRUCT_H + +#include <linux/unaligned/packed_struct.h> + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_cpu16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_cpu32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_cpu64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_cpu16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_cpu32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_cpu64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */ diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h new file mode 100644 index 00000000..eeb5a779 --- /dev/null +++ b/include/linux/unaligned/memmove.h @@ -0,0 +1,45 @@ +#ifndef _LINUX_UNALIGNED_MEMMOVE_H +#define _LINUX_UNALIGNED_MEMMOVE_H + +#include <linux/kernel.h> +#include <linux/string.h> + +/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ + +static inline u16 __get_unaligned_memmove16(const void *p) +{ + u16 tmp; + memmove(&tmp, p, 2); + return tmp; +} + +static inline u32 __get_unaligned_memmove32(const void *p) +{ + u32 tmp; + memmove(&tmp, p, 4); + return tmp; +} + +static inline u64 __get_unaligned_memmove64(const void *p) +{ + u64 tmp; + memmove(&tmp, p, 8); + return tmp; +} + +static inline void __put_unaligned_memmove16(u16 val, void *p) +{ + memmove(p, &val, 2); +} + +static inline void __put_unaligned_memmove32(u32 val, void *p) +{ + memmove(p, &val, 4); +} + +static inline void __put_unaligned_memmove64(u64 val, void *p) +{ + memmove(p, &val, 8); +} + +#endif /* _LINUX_UNALIGNED_MEMMOVE_H */ diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h new file mode 100644 index 00000000..c0d817de --- /dev/null +++ b/include/linux/unaligned/packed_struct.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +#include <linux/kernel.h> + +struct __una_u16 { u16 x; } __packed; +struct __una_u32 { u32 x; } __packed; +struct __una_u64 { u64 x; } __packed; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ diff --git a/include/linux/unistd.h b/include/linux/unistd.h new file mode 100644 index 00000000..aa8d5b5e --- /dev/null +++ b/include/linux/unistd.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_UNISTD_H_ +#define _LINUX_UNISTD_H_ + +/* + * Include machine specific syscall numbers + */ +#include <asm/unistd.h> + +#endif /* _LINUX_UNISTD_H_ */ diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h new file mode 100644 index 00000000..b1d2bf16 --- /dev/null +++ b/include/linux/unix_diag.h @@ -0,0 +1,54 @@ +#ifndef __UNIX_DIAG_H__ +#define __UNIX_DIAG_H__ + +#include <linux/types.h> + +struct unix_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u16 pad; + __u32 udiag_states; + __u32 udiag_ino; + __u32 udiag_show; + __u32 udiag_cookie[2]; +}; + +#define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */ +#define UDIAG_SHOW_VFS 0x00000002 /* show VFS inode info */ +#define UDIAG_SHOW_PEER 0x00000004 /* show peer socket info */ +#define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */ +#define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */ +#define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */ + +struct unix_diag_msg { + __u8 udiag_family; + __u8 udiag_type; + __u8 udiag_state; + __u8 pad; + + __u32 udiag_ino; + __u32 udiag_cookie[2]; +}; + +enum { + UNIX_DIAG_NAME, + UNIX_DIAG_VFS, + UNIX_DIAG_PEER, + UNIX_DIAG_ICONS, + UNIX_DIAG_RQLEN, + UNIX_DIAG_MEMINFO, + + UNIX_DIAG_MAX, +}; + +struct unix_diag_vfs { + __u32 udiag_vfs_ino; + __u32 udiag_vfs_dev; +}; + +struct unix_diag_rqlen { + __u32 udiag_rqueue; + __u32 udiag_wqueue; +}; + +#endif diff --git a/include/linux/usb.h b/include/linux/usb.h new file mode 100644 index 00000000..90f35bbe --- /dev/null +++ b/include/linux/usb.h @@ -0,0 +1,1669 @@ +#ifndef __LINUX_USB_H +#define __LINUX_USB_H + +#include <linux/mod_devicetable.h> +#include <linux/usb/ch9.h> + +#define USB_MAJOR 180 +#define USB_DEVICE_MAJOR 189 + + +#ifdef __KERNEL__ + +#include <linux/errno.h> /* for -ENODEV */ +#include <linux/delay.h> /* for mdelay() */ +#include <linux/interrupt.h> /* for in_interrupt() */ +#include <linux/list.h> /* for struct list_head */ +#include <linux/kref.h> /* for struct kref */ +#include <linux/device.h> /* for struct device */ +#include <linux/fs.h> /* for struct file_operations */ +#include <linux/completion.h> /* for struct completion */ +#include <linux/sched.h> /* for current && schedule_timeout */ +#include <linux/mutex.h> /* for struct mutex */ +#include <linux/pm_runtime.h> /* for runtime PM */ + +struct usb_device; +struct usb_driver; +struct wusb_dev; + +/*-------------------------------------------------------------------------*/ + +/* + * Host-side wrappers for standard USB descriptors ... these are parsed + * from the data provided by devices. Parsing turns them from a flat + * sequence of descriptors into a hierarchy: + * + * - devices have one (usually) or more configs; + * - configs have one (often) or more interfaces; + * - interfaces have one (usually) or more settings; + * - each interface setting has zero or (usually) more endpoints. + * - a SuperSpeed endpoint has a companion descriptor + * + * And there might be other descriptors mixed in with those. + * + * Devices may also have class-specific or vendor-specific descriptors. + */ + +struct ep_device; + +/** + * struct usb_host_endpoint - host-side endpoint descriptor and queue + * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder + * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint + * @urb_list: urbs queued to this endpoint; maintained by usbcore + * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) + * with one or more transfer descriptors (TDs) per urb + * @ep_dev: ep_device for sysfs info + * @extra: descriptors following this endpoint in the configuration + * @extralen: how many bytes of "extra" are valid + * @enabled: URBs may be submitted to this endpoint + * + * USB requests are always queued to a given endpoint, identified by a + * descriptor within an active interface in a given USB configuration. + */ +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; /* For sysfs info */ + + unsigned char *extra; /* Extra descriptors */ + int extralen; + int enabled; +}; + +/* host-side wrapper for one interface setting's parsed descriptors */ +struct usb_host_interface { + struct usb_interface_descriptor desc; + + /* array of desc.bNumEndpoint endpoints associated with this + * interface setting. these will be in no particular order. + */ + struct usb_host_endpoint *endpoint; + + char *string; /* iInterface string, if present */ + unsigned char *extra; /* Extra descriptors */ + int extralen; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING, + USB_INTERFACE_BOUND, + USB_INTERFACE_UNBINDING, +}; + +/** + * struct usb_interface - what usb device drivers talk to + * @altsetting: array of interface structures, one for each alternate + * setting that may be selected. Each one includes a set of + * endpoint configurations. They will be in no particular order. + * @cur_altsetting: the current altsetting. + * @num_altsetting: number of altsettings defined. + * @intf_assoc: interface association descriptor + * @minor: the minor number assigned to this interface, if this + * interface is bound to a driver that uses the USB major number. + * If this interface does not use the USB major, this field should + * be unused. The driver should set this value in the probe() + * function of the driver, after it has been assigned a minor + * number from the USB core by calling usb_register_dev(). + * @condition: binding state of the interface: not bound, binding + * (in probe()), bound to a driver, or unbinding (in disconnect()) + * @sysfs_files_created: sysfs attributes exist + * @ep_devs_created: endpoint child pseudo-devices exist + * @unregistering: flag set when the interface is being unregistered + * @needs_remote_wakeup: flag set when the driver requires remote-wakeup + * capability during autosuspend. + * @needs_altsetting0: flag set when a set-interface request for altsetting 0 + * has been deferred. + * @needs_binding: flag set when the driver should be re-probed or unbound + * following a reset or suspend operation it doesn't support. + * @dev: driver model's view of this device + * @usb_dev: if an interface is bound to the USB major, this will point + * to the sysfs representation for that device. + * @pm_usage_cnt: PM usage counter for this interface + * @reset_ws: Used for scheduling resets from atomic context. + * @reset_running: set to 1 if the interface is currently running a + * queued reset so that usb_cancel_queued_reset() doesn't try to + * remove from the workqueue when running inside the worker + * thread. See __usb_queue_reset_device(). + * @resetting_device: USB core reset the device, so use alt setting 0 as + * current; needs bandwidth alloc after reset. + * + * USB device drivers attach to interfaces on a physical device. Each + * interface encapsulates a single high level function, such as feeding + * an audio stream to a speaker or reporting a change in a volume control. + * Many USB devices only have one interface. The protocol used to talk to + * an interface's endpoints can be defined in a usb "class" specification, + * or by a product's vendor. The (default) control endpoint is part of + * every interface, but is never listed among the interface's descriptors. + * + * The driver that is bound to the interface can use standard driver model + * calls such as dev_get_drvdata() on the dev member of this structure. + * + * Each interface may have alternate settings. The initial configuration + * of a device sets altsetting 0, but the device driver can change + * that setting using usb_set_interface(). Alternate settings are often + * used to control the use of periodic endpoints, such as by having + * different endpoints use different amounts of reserved USB bandwidth. + * All standards-conformant USB devices that use isochronous endpoints + * will use them in non-default settings. + * + * The USB specification says that alternate setting numbers must run from + * 0 to one less than the total number of alternate settings. But some + * devices manage to mess this up, and the structures aren't necessarily + * stored in numerical order anyhow. Use usb_altnum_to_altsetting() to + * look up an alternate setting in the altsetting array based on its number. + */ +struct usb_interface { + /* array of alternate settings for this interface, + * stored in no particular order */ + struct usb_host_interface *altsetting; + + struct usb_host_interface *cur_altsetting; /* the currently + * active alternate setting */ + unsigned num_altsetting; /* number of alternate settings */ + + /* If there is an interface association descriptor then it will list + * the associated interfaces */ + struct usb_interface_assoc_descriptor *intf_assoc; + + int minor; /* minor number this interface is + * bound to */ + enum usb_interface_condition condition; /* state of binding */ + unsigned sysfs_files_created:1; /* the sysfs attributes exist */ + unsigned ep_devs_created:1; /* endpoint "devices" exist */ + unsigned unregistering:1; /* unregistration is in progress */ + unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ + unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ + unsigned needs_binding:1; /* needs delayed unbind/rebind */ + unsigned reset_running:1; + unsigned resetting_device:1; /* true: bandwidth alloc after reset */ + + struct device dev; /* interface specific device info */ + struct device *usb_dev; + atomic_t pm_usage_cnt; /* usage counter for autosuspend */ + struct work_struct reset_ws; /* for resets in atomic context */ +}; +#define to_usb_interface(d) container_of(d, struct usb_interface, dev) + +static inline void *usb_get_intfdata(struct usb_interface *intf) +{ + return dev_get_drvdata(&intf->dev); +} + +static inline void usb_set_intfdata(struct usb_interface *intf, void *data) +{ + dev_set_drvdata(&intf->dev, data); +} + +struct usb_interface *usb_get_intf(struct usb_interface *intf); +void usb_put_intf(struct usb_interface *intf); + +/* this maximum is arbitrary */ +#define USB_MAXINTERFACES 32 +#define USB_MAXIADS (USB_MAXINTERFACES/2) + +/** + * struct usb_interface_cache - long-term representation of a device interface + * @num_altsetting: number of altsettings defined. + * @ref: reference counter. + * @altsetting: variable-length array of interface structures, one for + * each alternate setting that may be selected. Each one includes a + * set of endpoint configurations. They will be in no particular order. + * + * These structures persist for the lifetime of a usb_device, unlike + * struct usb_interface (which persists only as long as its configuration + * is installed). The altsetting arrays can be accessed through these + * structures at any time, permitting comparison of configurations and + * providing support for the /proc/bus/usb/devices pseudo-file. + */ +struct usb_interface_cache { + unsigned num_altsetting; /* number of alternate settings */ + struct kref ref; /* reference counter */ + + /* variable-length array of alternate settings for this interface, + * stored in no particular order */ + struct usb_host_interface altsetting[0]; +}; +#define ref_to_usb_interface_cache(r) \ + container_of(r, struct usb_interface_cache, ref) +#define altsetting_to_usb_interface_cache(a) \ + container_of(a, struct usb_interface_cache, altsetting[0]) + +/** + * struct usb_host_config - representation of a device's configuration + * @desc: the device's configuration descriptor. + * @string: pointer to the cached version of the iConfiguration string, if + * present for this configuration. + * @intf_assoc: list of any interface association descriptors in this config + * @interface: array of pointers to usb_interface structures, one for each + * interface in the configuration. The number of interfaces is stored + * in desc.bNumInterfaces. These pointers are valid only while the + * the configuration is active. + * @intf_cache: array of pointers to usb_interface_cache structures, one + * for each interface in the configuration. These structures exist + * for the entire life of the device. + * @extra: pointer to buffer containing all extra descriptors associated + * with this configuration (those preceding the first interface + * descriptor). + * @extralen: length of the extra descriptors buffer. + * + * USB devices may have multiple configurations, but only one can be active + * at any time. Each encapsulates a different operational environment; + * for example, a dual-speed device would have separate configurations for + * full-speed and high-speed operation. The number of configurations + * available is stored in the device descriptor as bNumConfigurations. + * + * A configuration can contain multiple interfaces. Each corresponds to + * a different function of the USB device, and all are available whenever + * the configuration is active. The USB standard says that interfaces + * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot + * of devices get this wrong. In addition, the interface array is not + * guaranteed to be sorted in numerical order. Use usb_ifnum_to_if() to + * look up an interface entry based on its number. + * + * Device drivers should not attempt to activate configurations. The choice + * of which configuration to install is a policy decision based on such + * considerations as available power, functionality provided, and the user's + * desires (expressed through userspace tools). However, drivers can call + * usb_reset_configuration() to reinitialize the current configuration and + * all its interfaces. + */ +struct usb_host_config { + struct usb_config_descriptor desc; + + char *string; /* iConfiguration string, if present */ + + /* List of any Interface Association Descriptors in this + * configuration. */ + struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS]; + + /* the interfaces associated with this configuration, + * stored in no particular order */ + struct usb_interface *interface[USB_MAXINTERFACES]; + + /* Interface information available even when this is not the + * active configuration */ + struct usb_interface_cache *intf_cache[USB_MAXINTERFACES]; + + unsigned char *extra; /* Extra descriptors */ + int extralen; +}; + +/* USB2.0 and USB3.0 device BOS descriptor set */ +struct usb_host_bos { + struct usb_bos_descriptor *desc; + + /* wireless cap descriptor is handled by wusb */ + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ss_container_id_descriptor *ss_id; +}; + +int __usb_get_extra_descriptor(char *buffer, unsigned size, + unsigned char type, void **ptr); +#define usb_get_extra_descriptor(ifpoint, type, ptr) \ + __usb_get_extra_descriptor((ifpoint)->extra, \ + (ifpoint)->extralen, \ + type, (void **)ptr) + +/* ----------------------------------------------------------------------- */ + +/* USB device number allocation bitmap */ +struct usb_devmap { + unsigned long devicemap[128 / (8*sizeof(unsigned long))]; +}; + +/* + * Allocated per bus (tree of devices) we have: + */ +struct usb_bus { + struct device *controller; /* host/master side hardware */ + int busnum; /* Bus number (in order of reg) */ + const char *bus_name; /* stable id (PCI slot_name etc) */ + u8 uses_dma; /* Does the host controller use DMA? */ + u8 uses_pio_for_control; /* + * Does the host controller use PIO + * for control transfers? + */ + u8 otg_port; /* 0, or number of OTG/HNP port */ + unsigned is_b_host:1; /* true during some HNP roleswitches */ + unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ + unsigned sg_tablesize; /* 0 or largest number of sg list entries */ + + int devnum_next; /* Next open device number in + * round-robin allocation */ + + struct usb_devmap devmap; /* device address allocation map */ + struct usb_device *root_hub; /* Root hub */ + struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ + struct list_head bus_list; /* list of busses */ + + int bandwidth_allocated; /* on this bus: how much of the time + * reserved for periodic (intr/iso) + * requests is used, on average? + * Units: microseconds/frame. + * Limits: Full/low speed reserve 90%, + * while high speed reserves 80%. + */ + int bandwidth_int_reqs; /* number of Interrupt requests */ + int bandwidth_isoc_reqs; /* number of Isoc. requests */ + +#ifdef CONFIG_USB_DEVICEFS + struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */ +#endif + +#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) + struct mon_bus *mon_bus; /* non-null when associated */ + int monitored; /* non-zero when monitored */ +#endif +}; + +/* ----------------------------------------------------------------------- */ + +/* This is arbitrary. + * From USB 2.0 spec Table 11-13, offset 7, a hub can + * have up to 255 ports. The most yet reported is 10. + * + * Current Wireless USB host hardware (Intel i1480 for example) allows + * up to 22 devices to connect. Upcoming hardware might raise that + * limit. Because the arrays need to add a bit for hub status data, we + * do 31, so plus one evens out to four bytes. + */ +#define USB_MAXCHILDREN (31) + +struct usb_tt; + +enum usb_device_removable { + USB_DEVICE_REMOVABLE_UNKNOWN = 0, + USB_DEVICE_REMOVABLE, + USB_DEVICE_FIXED, +}; + +/** + * struct usb_device - kernel's representation of a USB device + * @devnum: device number; address on a USB bus + * @devpath: device ID string for use in messages (e.g., /port/...) + * @route: tree topology hex string for use with xHCI + * @state: device state: configured, not attached, etc. + * @speed: device speed: high/full/low (or error) + * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub + * @ttport: device port on that tt hub + * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints + * @parent: our hub, unless we're the root + * @bus: bus we're part of + * @ep0: endpoint 0 data (default control pipe) + * @dev: generic device interface + * @descriptor: USB device descriptor + * @bos: USB device BOS descriptor set + * @config: all of the device's configs + * @actconfig: the active configuration + * @ep_in: array of IN endpoints + * @ep_out: array of OUT endpoints + * @rawdescriptors: raw descriptors for each config + * @bus_mA: Current available from the bus + * @portnum: parent port number (origin 1) + * @level: number of USB hub ancestors + * @can_submit: URBs may be submitted + * @persist_enabled: USB_PERSIST enabled for this device + * @have_langid: whether string_langid is valid + * @authorized: policy has said we can use it; + * (user space) policy determines if we authorize this device to be + * used or not. By default, wired USB devices are authorized. + * WUSB devices are not, until we authorize them from user space. + * FIXME -- complete doc + * @authenticated: Crypto authentication passed + * @wusb: device is Wireless USB + * @lpm_capable: device supports LPM + * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM + * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled + * @string_langid: language ID for strings + * @product: iProduct string, if present (static) + * @manufacturer: iManufacturer string, if present (static) + * @serial: iSerialNumber string, if present (static) + * @filelist: usbfs files that are open to this device + * @usb_classdev: USB class device that was created for usbfs device + * access from userspace + * @usbfs_dentry: usbfs dentry entry for the device + * @maxchild: number of ports if hub + * @children: child devices - USB devices that are attached to this hub + * @quirks: quirks of the whole device + * @urbnum: number of URBs submitted for the whole device + * @active_duration: total time device is not suspended + * @connect_time: time device was first connected + * @do_remote_wakeup: remote wakeup should be enabled + * @reset_resume: needs reset instead of resume + * @wusb_dev: if this is a Wireless USB device, link to the WUSB + * specific data for the device. + * @slot_id: Slot ID assigned by xHCI + * @removable: Device can be physically removed from this port + * + * Notes: + * Usbcore drivers should not set usbdev->state directly. Instead use + * usb_set_device_state(). + */ +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + + struct usb_tt *tt; + int ttport; + + unsigned int toggle[2]; + + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + + struct device dev; + + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + + char **rawdescriptors; + + unsigned short bus_mA; + u8 portnum; + u8 level; + + unsigned can_submit:1; + unsigned persist_enabled:1; + unsigned have_langid:1; + unsigned authorized:1; + unsigned authenticated:1; + unsigned wusb:1; + unsigned lpm_capable:1; + unsigned usb2_hw_lpm_capable:1; + unsigned usb2_hw_lpm_enabled:1; + int string_langid; + + /* static strings from the device */ + char *product; + char *manufacturer; + char *serial; + + struct list_head filelist; +#ifdef CONFIG_USB_DEVICE_CLASS + struct device *usb_classdev; +#endif +#ifdef CONFIG_USB_DEVICEFS + struct dentry *usbfs_dentry; +#endif + + int maxchild; + struct usb_device **children; + + u32 quirks; + atomic_t urbnum; + + unsigned long active_duration; + +#ifdef CONFIG_PM + unsigned long connect_time; + + unsigned do_remote_wakeup:1; + unsigned reset_resume:1; +#endif + struct wusb_dev *wusb_dev; + int slot_id; + enum usb_device_removable removable; +}; +#define to_usb_device(d) container_of(d, struct usb_device, dev) + +static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf) +{ + return to_usb_device(intf->dev.parent); +} + +extern struct usb_device *usb_get_dev(struct usb_device *dev); +extern void usb_put_dev(struct usb_device *dev); + +/* USB device locking */ +#define usb_lock_device(udev) device_lock(&(udev)->dev) +#define usb_unlock_device(udev) device_unlock(&(udev)->dev) +#define usb_trylock_device(udev) device_trylock(&(udev)->dev) +extern int usb_lock_device_for_reset(struct usb_device *udev, + const struct usb_interface *iface); + +/* USB port reset for device reinitialization */ +extern int usb_reset_device(struct usb_device *dev); +extern void usb_queue_reset_device(struct usb_interface *dev); + + +/* USB autosuspend and autoresume */ +#ifdef CONFIG_USB_SUSPEND +extern void usb_enable_autosuspend(struct usb_device *udev); +extern void usb_disable_autosuspend(struct usb_device *udev); + +extern int usb_autopm_get_interface(struct usb_interface *intf); +extern void usb_autopm_put_interface(struct usb_interface *intf); +extern int usb_autopm_get_interface_async(struct usb_interface *intf); +extern void usb_autopm_put_interface_async(struct usb_interface *intf); +extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf); +extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf); + +static inline void usb_mark_last_busy(struct usb_device *udev) +{ + pm_runtime_mark_last_busy(&udev->dev); +} + +#else + +static inline int usb_enable_autosuspend(struct usb_device *udev) +{ return 0; } +static inline int usb_disable_autosuspend(struct usb_device *udev) +{ return 0; } + +static inline int usb_autopm_get_interface(struct usb_interface *intf) +{ return 0; } +static inline int usb_autopm_get_interface_async(struct usb_interface *intf) +{ return 0; } + +static inline void usb_autopm_put_interface(struct usb_interface *intf) +{ } +static inline void usb_autopm_put_interface_async(struct usb_interface *intf) +{ } +static inline void usb_autopm_get_interface_no_resume( + struct usb_interface *intf) +{ } +static inline void usb_autopm_put_interface_no_suspend( + struct usb_interface *intf) +{ } +static inline void usb_mark_last_busy(struct usb_device *udev) +{ } +#endif + +/*-------------------------------------------------------------------------*/ + +/* for drivers using iso endpoints */ +extern int usb_get_current_frame_number(struct usb_device *usb_dev); + +/* Sets up a group of bulk endpoints to support multiple stream IDs. */ +extern int usb_alloc_streams(struct usb_interface *interface, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int num_streams, gfp_t mem_flags); + +/* Reverts a group of bulk endpoints back to not using stream IDs. */ +extern void usb_free_streams(struct usb_interface *interface, + struct usb_host_endpoint **eps, unsigned int num_eps, + gfp_t mem_flags); + +/* used these for multi-interface device registration */ +extern int usb_driver_claim_interface(struct usb_driver *driver, + struct usb_interface *iface, void *priv); + +/** + * usb_interface_claimed - returns true iff an interface is claimed + * @iface: the interface being checked + * + * Returns true (nonzero) iff the interface is claimed, else false (zero). + * Callers must own the driver model's usb bus readlock. So driver + * probe() entries don't need extra locking, but other call contexts + * may need to explicitly claim that lock. + * + */ +static inline int usb_interface_claimed(struct usb_interface *iface) +{ + return (iface->dev.driver != NULL); +} + +extern void usb_driver_release_interface(struct usb_driver *driver, + struct usb_interface *iface); +const struct usb_device_id *usb_match_id(struct usb_interface *interface, + const struct usb_device_id *id); +extern int usb_match_one_id(struct usb_interface *interface, + const struct usb_device_id *id); + +extern struct usb_interface *usb_find_interface(struct usb_driver *drv, + int minor); +extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, + unsigned ifnum); +extern struct usb_host_interface *usb_altnum_to_altsetting( + const struct usb_interface *intf, unsigned int altnum); +extern struct usb_host_interface *usb_find_alt_setting( + struct usb_host_config *config, + unsigned int iface_num, + unsigned int alt_num); + + +/** + * usb_make_path - returns stable device path in the usb tree + * @dev: the device whose path is being constructed + * @buf: where to put the string + * @size: how big is "buf"? + * + * Returns length of the string (> 0) or negative if size was too small. + * + * This identifier is intended to be "stable", reflecting physical paths in + * hardware such as physical bus addresses for host controllers or ports on + * USB hubs. That makes it stay the same until systems are physically + * reconfigured, by re-cabling a tree of USB devices or by moving USB host + * controllers. Adding and removing devices, including virtual root hubs + * in host controller driver modules, does not change these path identifiers; + * neither does rebooting or re-enumerating. These are more useful identifiers + * than changeable ("unstable") ones like bus numbers or device addresses. + * + * With a partial exception for devices connected to USB 2.0 root hubs, these + * identifiers are also predictable. So long as the device tree isn't changed, + * plugging any USB device into a given hub port always gives it the same path. + * Because of the use of "companion" controllers, devices connected to ports on + * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are + * high speed, and a different one if they are full or low speed. + */ +static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) +{ + int actual; + actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name, + dev->devpath); + return (actual >= (int)size) ? -1 : actual; +} + +/*-------------------------------------------------------------------------*/ + +#define USB_DEVICE_ID_MATCH_DEVICE \ + (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) +#define USB_DEVICE_ID_MATCH_DEV_RANGE \ + (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI) +#define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ + (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE) +#define USB_DEVICE_ID_MATCH_DEV_INFO \ + (USB_DEVICE_ID_MATCH_DEV_CLASS | \ + USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \ + USB_DEVICE_ID_MATCH_DEV_PROTOCOL) +#define USB_DEVICE_ID_MATCH_INT_INFO \ + (USB_DEVICE_ID_MATCH_INT_CLASS | \ + USB_DEVICE_ID_MATCH_INT_SUBCLASS | \ + USB_DEVICE_ID_MATCH_INT_PROTOCOL) + +/** + * USB_DEVICE - macro used to describe a specific usb device + * @vend: the 16 bit USB Vendor ID + * @prod: the 16 bit USB Product ID + * + * This macro is used to create a struct usb_device_id that matches a + * specific device. + */ +#define USB_DEVICE(vend, prod) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod) +/** + * USB_DEVICE_VER - describe a specific usb device with a version range + * @vend: the 16 bit USB Vendor ID + * @prod: the 16 bit USB Product ID + * @lo: the bcdDevice_lo value + * @hi: the bcdDevice_hi value + * + * This macro is used to create a struct usb_device_id that matches a + * specific device, with a version range. + */ +#define USB_DEVICE_VER(vend, prod, lo, hi) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bcdDevice_lo = (lo), \ + .bcdDevice_hi = (hi) + +/** + * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol + * @vend: the 16 bit USB Vendor ID + * @prod: the 16 bit USB Product ID + * @pr: bInterfaceProtocol value + * + * This macro is used to create a struct usb_device_id that matches a + * specific interface protocol of devices. + */ +#define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_INT_PROTOCOL, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceProtocol = (pr) + +/** + * USB_DEVICE_INFO - macro used to describe a class of usb devices + * @cl: bDeviceClass value + * @sc: bDeviceSubClass value + * @pr: bDeviceProtocol value + * + * This macro is used to create a struct usb_device_id that matches a + * specific class of devices. + */ +#define USB_DEVICE_INFO(cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \ + .bDeviceClass = (cl), \ + .bDeviceSubClass = (sc), \ + .bDeviceProtocol = (pr) + +/** + * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces + * @cl: bInterfaceClass value + * @sc: bInterfaceSubClass value + * @pr: bInterfaceProtocol value + * + * This macro is used to create a struct usb_device_id that matches a + * specific class of interfaces. + */ +#define USB_INTERFACE_INFO(cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \ + .bInterfaceClass = (cl), \ + .bInterfaceSubClass = (sc), \ + .bInterfaceProtocol = (pr) + +/** + * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces + * @vend: the 16 bit USB Vendor ID + * @prod: the 16 bit USB Product ID + * @cl: bInterfaceClass value + * @sc: bInterfaceSubClass value + * @pr: bInterfaceProtocol value + * + * This macro is used to create a struct usb_device_id that matches a + * specific device with a specific class of interfaces. + * + * This is especially useful when explicitly matching devices that have + * vendor specific bDeviceClass values, but standards-compliant interfaces. + */ +#define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ + | USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bInterfaceClass = (cl), \ + .bInterfaceSubClass = (sc), \ + .bInterfaceProtocol = (pr) + +/* ----------------------------------------------------------------------- */ + +/* Stuff for dynamic usb ids */ +struct usb_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct usb_dynid { + struct list_head node; + struct usb_device_id id; +}; + +extern ssize_t usb_store_new_id(struct usb_dynids *dynids, + struct device_driver *driver, + const char *buf, size_t count); + +/** + * struct usbdrv_wrap - wrapper for driver-model structure + * @driver: The driver-model core driver structure. + * @for_devices: Non-zero for device drivers, 0 for interface drivers. + */ +struct usbdrv_wrap { + struct device_driver driver; + int for_devices; +}; + +/** + * struct usb_driver - identifies USB interface driver to usbcore + * @name: The driver name should be unique among USB drivers, + * and should normally be the same as the module name. + * @probe: Called to see if the driver is willing to manage a particular + * interface on a device. If it is, probe returns zero and uses + * usb_set_intfdata() to associate driver-specific data with the + * interface. It may also use usb_set_interface() to specify the + * appropriate altsetting. If unwilling to manage the interface, + * return -ENODEV, if genuine IO errors occurred, an appropriate + * negative errno value. + * @disconnect: Called when the interface is no longer accessible, usually + * because its device has been (or is being) disconnected or the + * driver module is being unloaded. + * @unlocked_ioctl: Used for drivers that want to talk to userspace through + * the "usbfs" filesystem. This lets devices provide ways to + * expose information to user space regardless of where they + * do (or don't) show up otherwise in the filesystem. + * @suspend: Called when the device is going to be suspended by the system. + * @resume: Called when the device is being resumed by the system. + * @reset_resume: Called when the suspended device has been reset instead + * of being resumed. + * @pre_reset: Called by usb_reset_device() when the device is about to be + * reset. This routine must not return until the driver has no active + * URBs for the device, and no more URBs may be submitted until the + * post_reset method is called. + * @post_reset: Called by usb_reset_device() after the device + * has been reset + * @id_table: USB drivers use ID table to support hotplugging. + * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set + * or your driver's probe function will never get called. + * @dynids: used internally to hold the list of dynamically added device + * ids for this driver. + * @drvwrap: Driver-model core structure wrapper. + * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be + * added to this driver by preventing the sysfs file from being created. + * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend + * for interfaces bound to this driver. + * @soft_unbind: if set to 1, the USB core will not kill URBs and disable + * endpoints before calling the driver's disconnect method. + * + * USB interface drivers must provide a name, probe() and disconnect() + * methods, and an id_table. Other driver fields are optional. + * + * The id_table is used in hotplugging. It holds a set of descriptors, + * and specialized data may be associated with each entry. That table + * is used by both user and kernel mode hotplugging support. + * + * The probe() and disconnect() methods are called in a context where + * they can sleep, but they should avoid abusing the privilege. Most + * work to connect to a device should be done when the device is opened, + * and undone at the last close. The disconnect code needs to address + * concurrency issues with respect to open() and close() methods, as + * well as forcing all pending I/O requests to complete (by unlinking + * them as necessary, and blocking until the unlinks complete). + */ +struct usb_driver { + const char *name; + + int (*probe) (struct usb_interface *intf, + const struct usb_device_id *id); + + void (*disconnect) (struct usb_interface *intf); + + int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code, + void *buf); + + int (*suspend) (struct usb_interface *intf, pm_message_t message); + int (*resume) (struct usb_interface *intf); + int (*reset_resume)(struct usb_interface *intf); + + int (*pre_reset)(struct usb_interface *intf); + int (*post_reset)(struct usb_interface *intf); + + const struct usb_device_id *id_table; + + struct usb_dynids dynids; + struct usbdrv_wrap drvwrap; + unsigned int no_dynamic_id:1; + unsigned int supports_autosuspend:1; + unsigned int soft_unbind:1; +}; +#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver) + +/** + * struct usb_device_driver - identifies USB device driver to usbcore + * @name: The driver name should be unique among USB drivers, + * and should normally be the same as the module name. + * @probe: Called to see if the driver is willing to manage a particular + * device. If it is, probe returns zero and uses dev_set_drvdata() + * to associate driver-specific data with the device. If unwilling + * to manage the device, return a negative errno value. + * @disconnect: Called when the device is no longer accessible, usually + * because it has been (or is being) disconnected or the driver's + * module is being unloaded. + * @suspend: Called when the device is going to be suspended by the system. + * @resume: Called when the device is being resumed by the system. + * @drvwrap: Driver-model core structure wrapper. + * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend + * for devices bound to this driver. + * + * USB drivers must provide all the fields listed above except drvwrap. + */ +struct usb_device_driver { + const char *name; + + int (*probe) (struct usb_device *udev); + void (*disconnect) (struct usb_device *udev); + + int (*suspend) (struct usb_device *udev, pm_message_t message); + int (*resume) (struct usb_device *udev, pm_message_t message); + struct usbdrv_wrap drvwrap; + unsigned int supports_autosuspend:1; +}; +#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ + drvwrap.driver) + +extern struct bus_type usb_bus_type; + +/** + * struct usb_class_driver - identifies a USB driver that wants to use the USB major number + * @name: the usb class device name for this driver. Will show up in sysfs. + * @devnode: Callback to provide a naming hint for a possible + * device node to create. + * @fops: pointer to the struct file_operations of this driver. + * @minor_base: the start of the minor range for this driver. + * + * This structure is used for the usb_register_dev() and + * usb_unregister_dev() functions, to consolidate a number of the + * parameters used for them. + */ +struct usb_class_driver { + char *name; + char *(*devnode)(struct device *dev, umode_t *mode); + const struct file_operations *fops; + int minor_base; +}; + +/* + * use these in module_init()/module_exit() + * and don't forget MODULE_DEVICE_TABLE(usb, ...) + */ +extern int usb_register_driver(struct usb_driver *, struct module *, + const char *); + +/* use a define to avoid include chaining to get THIS_MODULE & friends */ +#define usb_register(driver) \ + usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) + +extern void usb_deregister(struct usb_driver *); + +/** + * module_usb_driver() - Helper macro for registering a USB driver + * @__usb_driver: usb_driver struct + * + * Helper macro for USB drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_usb_driver(__usb_driver) \ + module_driver(__usb_driver, usb_register, \ + usb_deregister) + +extern int usb_register_device_driver(struct usb_device_driver *, + struct module *); +extern void usb_deregister_device_driver(struct usb_device_driver *); + +extern int usb_register_dev(struct usb_interface *intf, + struct usb_class_driver *class_driver); +extern void usb_deregister_dev(struct usb_interface *intf, + struct usb_class_driver *class_driver); + +extern int usb_disabled(void); + +/* ----------------------------------------------------------------------- */ + +/* + * URB support, for asynchronous request completions + */ + +/* + * urb->transfer_flags: + * + * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb(). + */ +#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */ +#define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame + * ignored */ +#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ +#define URB_NO_FSBR 0x0020 /* UHCI-specific */ +#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ +#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt + * needed */ +#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */ + +/* The following flags are used internally by usbcore and HCDs */ +#define URB_DIR_IN 0x0200 /* Transfer from device to host */ +#define URB_DIR_OUT 0 +#define URB_DIR_MASK URB_DIR_IN + +#define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */ +#define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */ +#define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */ +#define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */ +#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */ +#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */ +#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */ +#define URB_ALIGNED_TEMP_BUFFER 0x00800000 /* Temp buffer was alloc'd */ +/*{CharlesTu, 2010.08.26, for test mode */ +#define URB_HCD_DRIVER_TEST 0xFFFF /* Do NOT hand back or free this URB */ +/*CharlesTu}*/ + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; /* expected length */ + unsigned int actual_length; + int status; +}; + +struct urb; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + unsigned int poisoned:1; +}; + +static inline void init_usb_anchor(struct usb_anchor *anchor) +{ + INIT_LIST_HEAD(&anchor->urb_list); + init_waitqueue_head(&anchor->wait); + spin_lock_init(&anchor->lock); +} + +typedef void (*usb_complete_t)(struct urb *); + +/** + * struct urb - USB Request Block + * @urb_list: For use by current owner of the URB. + * @anchor_list: membership in the list of an anchor + * @anchor: to anchor URBs to a common mooring + * @ep: Points to the endpoint's data structure. Will eventually + * replace @pipe. + * @pipe: Holds endpoint number, direction, type, and more. + * Create these values with the eight macros available; + * usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl" + * (control), "bulk", "int" (interrupt), or "iso" (isochronous). + * For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint + * numbers range from zero to fifteen. Note that "in" endpoint two + * is a different endpoint (and pipe) from "out" endpoint two. + * The current configuration controls the existence, type, and + * maximum packet size of any given endpoint. + * @stream_id: the endpoint's stream ID for bulk streams + * @dev: Identifies the USB device to perform the request. + * @status: This is read in non-iso completion functions to get the + * status of the particular request. ISO requests only use it + * to tell whether the URB was unlinked; detailed status for + * each frame is in the fields of the iso_frame-desc. + * @transfer_flags: A variety of flags may be used to affect how URB + * submission, unlinking, or operation are handled. Different + * kinds of URB can use different flags. + * @transfer_buffer: This identifies the buffer to (or from) which the I/O + * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set + * (however, do not leave garbage in transfer_buffer even then). + * This buffer must be suitable for DMA; allocate it with + * kmalloc() or equivalent. For transfers to "in" endpoints, contents + * of this buffer will be modified. This buffer is used for the data + * stage of control transfers. + * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP, + * the device driver is saying that it provided this DMA address, + * which the host controller driver should use in preference to the + * transfer_buffer. + * @sg: scatter gather buffer list + * @num_mapped_sgs: (internal) number of mapped sg entries + * @num_sgs: number of entries in the sg list + * @transfer_buffer_length: How big is transfer_buffer. The transfer may + * be broken up into chunks according to the current maximum packet + * size for the endpoint, which is a function of the configuration + * and is encoded in the pipe. When the length is zero, neither + * transfer_buffer nor transfer_dma is used. + * @actual_length: This is read in non-iso completion functions, and + * it tells how many bytes (out of transfer_buffer_length) were + * transferred. It will normally be the same as requested, unless + * either an error was reported or a short read was performed. + * The URB_SHORT_NOT_OK transfer flag may be used to make such + * short reads be reported as errors. + * @setup_packet: Only used for control transfers, this points to eight bytes + * of setup data. Control transfers always start by sending this data + * to the device. Then transfer_buffer is read or written, if needed. + * @setup_dma: DMA pointer for the setup packet. The caller must not use + * this field; setup_packet must point to a valid buffer. + * @start_frame: Returns the initial frame for isochronous transfers. + * @number_of_packets: Lists the number of ISO transfer buffers. + * @interval: Specifies the polling interval for interrupt or isochronous + * transfers. The units are frames (milliseconds) for full and low + * speed devices, and microframes (1/8 millisecond) for highspeed + * and SuperSpeed devices. + * @error_count: Returns the number of ISO transfers that reported errors. + * @context: For use in completion functions. This normally points to + * request-specific driver context. + * @complete: Completion handler. This URB is passed as the parameter to the + * completion function. The completion function may then do what + * it likes with the URB, including resubmitting or freeing it. + * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to + * collect the transfer status for each buffer. + * + * This structure identifies USB transfer requests. URBs must be allocated by + * calling usb_alloc_urb() and freed with a call to usb_free_urb(). + * Initialization may be done using various usb_fill_*_urb() functions. URBs + * are submitted using usb_submit_urb(), and pending requests may be canceled + * using usb_unlink_urb() or usb_kill_urb(). + * + * Data Transfer Buffers: + * + * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise + * taken from the general page pool. That is provided by transfer_buffer + * (control requests also use setup_packet), and host controller drivers + * perform a dma mapping (and unmapping) for each buffer transferred. Those + * mapping operations can be expensive on some platforms (perhaps using a dma + * bounce buffer or talking to an IOMMU), + * although they're cheap on commodity x86 and ppc hardware. + * + * Alternatively, drivers may pass the URB_NO_TRANSFER_DMA_MAP transfer flag, + * which tells the host controller driver that no such mapping is needed for + * the transfer_buffer since + * the device driver is DMA-aware. For example, a device driver might + * allocate a DMA buffer with usb_alloc_coherent() or call usb_buffer_map(). + * When this transfer flag is provided, host controller drivers will + * attempt to use the dma address found in the transfer_dma + * field rather than determining a dma address themselves. + * + * Note that transfer_buffer must still be set if the controller + * does not support DMA (as indicated by bus.uses_dma) and when talking + * to root hub. If you have to trasfer between highmem zone and the device + * on such controller, create a bounce buffer or bail out with an error. + * If transfer_buffer cannot be set (is in highmem) and the controller is DMA + * capable, assign NULL to it, so that usbmon knows not to use the value. + * The setup_packet must always be set, so it cannot be located in highmem. + * + * Initialization: + * + * All URBs submitted must initialize the dev, pipe, transfer_flags (may be + * zero), and complete fields. All URBs must also initialize + * transfer_buffer and transfer_buffer_length. They may provide the + * URB_SHORT_NOT_OK transfer flag, indicating that short reads are + * to be treated as errors; that flag is invalid for write requests. + * + * Bulk URBs may + * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers + * should always terminate with a short packet, even if it means adding an + * extra zero length packet. + * + * Control URBs must provide a valid pointer in the setup_packet field. + * Unlike the transfer_buffer, the setup_packet may not be mapped for DMA + * beforehand. + * + * Interrupt URBs must provide an interval, saying how often (in milliseconds + * or, for highspeed devices, 125 microsecond units) + * to poll for transfers. After the URB has been submitted, the interval + * field reflects how the transfer was actually scheduled. + * The polling interval may be more frequent than requested. + * For example, some controllers have a maximum interval of 32 milliseconds, + * while others support intervals of up to 1024 milliseconds. + * Isochronous URBs also have transfer intervals. (Note that for isochronous + * endpoints, as well as high speed interrupt endpoints, the encoding of + * the transfer interval in the endpoint descriptor is logarithmic. + * Device drivers must convert that value to linear units themselves.) + * + * Isochronous URBs normally use the URB_ISO_ASAP transfer flag, telling + * the host controller to schedule the transfer as soon as bandwidth + * utilization allows, and then set start_frame to reflect the actual frame + * selected during submission. Otherwise drivers must specify the start_frame + * and handle the case where the transfer can't begin then. However, drivers + * won't know how bandwidth is currently allocated, and while they can + * find the current frame using usb_get_current_frame_number () they can't + * know the range for that frame number. (Ranges for frame counter values + * are HC-specific, and can go from 256 to 65536 frames from "now".) + * + * Isochronous URBs have a different data transfer model, in part because + * the quality of service is only "best effort". Callers provide specially + * allocated URBs, with number_of_packets worth of iso_frame_desc structures + * at the end. Each such packet is an individual ISO transfer. Isochronous + * URBs are normally queued, submitted by drivers to arrange that + * transfers are at least double buffered, and then explicitly resubmitted + * in completion handlers, so + * that data (such as audio or video) streams at as constant a rate as the + * host controller scheduler can support. + * + * Completion Callbacks: + * + * The completion callback is made in_interrupt(), and one of the first + * things that a completion handler should do is check the status field. + * The status field is provided for all URBs. It is used to report + * unlinked URBs, and status for all non-ISO transfers. It should not + * be examined before the URB is returned to the completion handler. + * + * The context field is normally used to link URBs back to the relevant + * driver or request state. + * + * When the completion callback is invoked for non-isochronous URBs, the + * actual_length field tells how many bytes were transferred. This field + * is updated even when the URB terminated with an error or was unlinked. + * + * ISO transfer status is reported in the status and actual_length fields + * of the iso_frame_desc array, and the number of errors is reported in + * error_count. Completion callbacks for ISO transfers will normally + * (re)submit URBs to ensure a constant transfer rate. + * + * Note that even fields marked "public" should not be touched by the driver + * when the urb is owned by the hcd, that is, since the call to + * usb_submit_urb() till the entry into the completion routine. + */ +struct urb { + /* private: usb core and host controller only fields in the urb */ + struct kref kref; /* reference count of the URB */ + void *hcpriv; /* private data for host controller */ + atomic_t use_count; /* concurrent submissions counter */ + atomic_t reject; /* submissions will fail */ + int unlinked; /* unlink error code */ + + /* public: documented fields in the urb that can be used by drivers */ + struct list_head urb_list; /* list head for use by the urb's + * current owner */ + struct list_head anchor_list; /* the URB may be anchored */ + struct usb_anchor *anchor; + struct usb_device *dev; /* (in) pointer to associated device */ + struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */ + unsigned int pipe; /* (in) pipe information */ + unsigned int stream_id; /* (in) stream ID */ + int status; /* (return) non-ISO status */ + unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ + void *transfer_buffer; /* (in) associated data buffer */ + dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ + struct scatterlist *sg; /* (in) scatter gather buffer list */ + int num_mapped_sgs; /* (internal) mapped sg entries */ + int num_sgs; /* (in) number of entries in the sg list */ + u32 transfer_buffer_length; /* (in) data buffer length */ + u32 actual_length; /* (return) actual transfer length */ + unsigned char *setup_packet; /* (in) setup packet (control only) */ + dma_addr_t setup_dma; /* (in) dma addr for setup_packet */ + int start_frame; /* (modify) start frame (ISO) */ + int number_of_packets; /* (in) number of ISO packets */ + int interval; /* (modify) transfer interval + * (INT/ISO) */ + int error_count; /* (return) number of ISO errors */ + void *context; /* (in) context for completion */ + usb_complete_t complete; /* (in) completion routine */ + struct usb_iso_packet_descriptor iso_frame_desc[0]; + /* (in) ISO ONLY */ +}; + +/* ----------------------------------------------------------------------- */ + +/** + * usb_fill_control_urb - initializes a control urb + * @urb: pointer to the urb to initialize. + * @dev: pointer to the struct usb_device for this urb. + * @pipe: the endpoint pipe + * @setup_packet: pointer to the setup_packet buffer + * @transfer_buffer: pointer to the transfer buffer + * @buffer_length: length of the transfer buffer + * @complete_fn: pointer to the usb_complete_t function + * @context: what to set the urb context to. + * + * Initializes a control urb with the proper information needed to submit + * it to a device. + */ +static inline void usb_fill_control_urb(struct urb *urb, + struct usb_device *dev, + unsigned int pipe, + unsigned char *setup_packet, + void *transfer_buffer, + int buffer_length, + usb_complete_t complete_fn, + void *context) +{ + urb->dev = dev; + urb->pipe = pipe; + urb->setup_packet = setup_packet; + urb->transfer_buffer = transfer_buffer; + urb->transfer_buffer_length = buffer_length; + urb->complete = complete_fn; + urb->context = context; +} + +/** + * usb_fill_bulk_urb - macro to help initialize a bulk urb + * @urb: pointer to the urb to initialize. + * @dev: pointer to the struct usb_device for this urb. + * @pipe: the endpoint pipe + * @transfer_buffer: pointer to the transfer buffer + * @buffer_length: length of the transfer buffer + * @complete_fn: pointer to the usb_complete_t function + * @context: what to set the urb context to. + * + * Initializes a bulk urb with the proper information needed to submit it + * to a device. + */ +static inline void usb_fill_bulk_urb(struct urb *urb, + struct usb_device *dev, + unsigned int pipe, + void *transfer_buffer, + int buffer_length, + usb_complete_t complete_fn, + void *context) +{ + urb->dev = dev; + urb->pipe = pipe; + urb->transfer_buffer = transfer_buffer; + urb->transfer_buffer_length = buffer_length; + urb->complete = complete_fn; + urb->context = context; +} + +/** + * usb_fill_int_urb - macro to help initialize a interrupt urb + * @urb: pointer to the urb to initialize. + * @dev: pointer to the struct usb_device for this urb. + * @pipe: the endpoint pipe + * @transfer_buffer: pointer to the transfer buffer + * @buffer_length: length of the transfer buffer + * @complete_fn: pointer to the usb_complete_t function + * @context: what to set the urb context to. + * @interval: what to set the urb interval to, encoded like + * the endpoint descriptor's bInterval value. + * + * Initializes a interrupt urb with the proper information needed to submit + * it to a device. + * + * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic + * encoding of the endpoint interval, and express polling intervals in + * microframes (eight per millisecond) rather than in frames (one per + * millisecond). + * + * Wireless USB also uses the logarithmic encoding, but specifies it in units of + * 128us instead of 125us. For Wireless USB devices, the interval is passed + * through to the host controller, rather than being translated into microframe + * units. + */ +static inline void usb_fill_int_urb(struct urb *urb, + struct usb_device *dev, + unsigned int pipe, + void *transfer_buffer, + int buffer_length, + usb_complete_t complete_fn, + void *context, + int interval) +{ + urb->dev = dev; + urb->pipe = pipe; + urb->transfer_buffer = transfer_buffer; + urb->transfer_buffer_length = buffer_length; + urb->complete = complete_fn; + urb->context = context; + if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) + urb->interval = 1 << (interval - 1); + else + urb->interval = interval; + urb->start_frame = -1; +} + +extern void usb_init_urb(struct urb *urb); +extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags); +extern void usb_free_urb(struct urb *urb); +#define usb_put_urb usb_free_urb +extern struct urb *usb_get_urb(struct urb *urb); +extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags); +extern int usb_unlink_urb(struct urb *urb); +extern void usb_kill_urb(struct urb *urb); +extern void usb_poison_urb(struct urb *urb); +extern void usb_unpoison_urb(struct urb *urb); +extern void usb_block_urb(struct urb *urb); +extern void usb_kill_anchored_urbs(struct usb_anchor *anchor); +extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); +extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); +extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor); +extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); +extern void usb_unanchor_urb(struct urb *urb); +extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, + unsigned int timeout); +extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor); +extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor); +extern int usb_anchor_empty(struct usb_anchor *anchor); + +#define usb_unblock_urb usb_unpoison_urb + +/** + * usb_urb_dir_in - check if an URB describes an IN transfer + * @urb: URB to be checked + * + * Returns 1 if @urb describes an IN transfer (device-to-host), + * otherwise 0. + */ +static inline int usb_urb_dir_in(struct urb *urb) +{ + return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN; +} + +/** + * usb_urb_dir_out - check if an URB describes an OUT transfer + * @urb: URB to be checked + * + * Returns 1 if @urb describes an OUT transfer (host-to-device), + * otherwise 0. + */ +static inline int usb_urb_dir_out(struct urb *urb) +{ + return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; +} + +void *usb_alloc_coherent(struct usb_device *dev, size_t size, + gfp_t mem_flags, dma_addr_t *dma); +void usb_free_coherent(struct usb_device *dev, size_t size, + void *addr, dma_addr_t dma); + +#if 0 +struct urb *usb_buffer_map(struct urb *urb); +void usb_buffer_dmasync(struct urb *urb); +void usb_buffer_unmap(struct urb *urb); +#endif + +struct scatterlist; +int usb_buffer_map_sg(const struct usb_device *dev, int is_in, + struct scatterlist *sg, int nents); +#if 0 +void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, + struct scatterlist *sg, int n_hw_ents); +#endif +void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, + struct scatterlist *sg, int n_hw_ents); + +/*-------------------------------------------------------------------* + * SYNCHRONOUS CALL SUPPORT * + *-------------------------------------------------------------------*/ + +extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, + __u8 request, __u8 requesttype, __u16 value, __u16 index, + void *data, __u16 size, int timeout); +extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, int timeout); +extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, + int timeout); + +/* wrappers around usb_control_msg() for the most common standard requests */ +extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, + unsigned char descindex, void *buf, int size); +extern int usb_get_status(struct usb_device *dev, + int type, int target, void *data); +extern int usb_string(struct usb_device *dev, int index, + char *buf, size_t size); + +/* wrappers that also update important state inside usbcore */ +extern int usb_clear_halt(struct usb_device *dev, int pipe); +extern int usb_reset_configuration(struct usb_device *dev); +extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); +extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); + +/* this request isn't really synchronous, but it belongs with the others */ +extern int usb_driver_set_configuration(struct usb_device *udev, int config); + +/* + * timeouts, in milliseconds, used for sending/receiving control messages + * they typically complete within a few frames (msec) after they're issued + * USB identifies 5 second timeouts, maybe more in a few cases, and a few + * slow devices (like some MGE Ellipse UPSes) actually push that limit. + */ +#define USB_CTRL_GET_TIMEOUT 5000 +#define USB_CTRL_SET_TIMEOUT 5000 + + +/** + * struct usb_sg_request - support for scatter/gather I/O + * @status: zero indicates success, else negative errno + * @bytes: counts bytes transferred. + * + * These requests are initialized using usb_sg_init(), and then are used + * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most + * members of the request object aren't for driver access. + * + * The status and bytecount values are valid only after usb_sg_wait() + * returns. If the status is zero, then the bytecount matches the total + * from the request. + * + * After an error completion, drivers may need to clear a halt condition + * on the endpoint. + */ +struct usb_sg_request { + int status; + size_t bytes; + + /* private: + * members below are private to usbcore, + * and are not provided for driver access! + */ + spinlock_t lock; + + struct usb_device *dev; + int pipe; + + int entries; + struct urb **urbs; + + int count; + struct completion complete; +}; + +int usb_sg_init( + struct usb_sg_request *io, + struct usb_device *dev, + unsigned pipe, + unsigned period, + struct scatterlist *sg, + int nents, + size_t length, + gfp_t mem_flags +); +void usb_sg_cancel(struct usb_sg_request *io); +void usb_sg_wait(struct usb_sg_request *io); + + +/* ----------------------------------------------------------------------- */ + +/* + * For various legacy reasons, Linux has a small cookie that's paired with + * a struct usb_device to identify an endpoint queue. Queue characteristics + * are defined by the endpoint's descriptor. This cookie is called a "pipe", + * an unsigned int encoded as: + * + * - direction: bit 7 (0 = Host-to-Device [Out], + * 1 = Device-to-Host [In] ... + * like endpoint bEndpointAddress) + * - device address: bits 8-14 ... bit positions known to uhci-hcd + * - endpoint: bits 15-18 ... bit positions known to uhci-hcd + * - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt, + * 10 = control, 11 = bulk) + * + * Given the device address and endpoint descriptor, pipes are redundant. + */ + +/* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ +/* (yet ... they're the values used by usbfs) */ +#define PIPE_ISOCHRONOUS 0 +#define PIPE_INTERRUPT 1 +#define PIPE_CONTROL 2 +#define PIPE_BULK 3 + +#define usb_pipein(pipe) ((pipe) & USB_DIR_IN) +#define usb_pipeout(pipe) (!usb_pipein(pipe)) + +#define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f) +#define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf) + +#define usb_pipetype(pipe) (((pipe) >> 30) & 3) +#define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS) +#define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT) +#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) +#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) + +static inline unsigned int __create_pipe(struct usb_device *dev, + unsigned int endpoint) +{ + return (dev->devnum << 8) | (endpoint << 15); +} + +/* Create various pipes... */ +#define usb_sndctrlpipe(dev, endpoint) \ + ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint)) +#define usb_rcvctrlpipe(dev, endpoint) \ + ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) +#define usb_sndisocpipe(dev, endpoint) \ + ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint)) +#define usb_rcvisocpipe(dev, endpoint) \ + ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) +#define usb_sndbulkpipe(dev, endpoint) \ + ((PIPE_BULK << 30) | __create_pipe(dev, endpoint)) +#define usb_rcvbulkpipe(dev, endpoint) \ + ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) +#define usb_sndintpipe(dev, endpoint) \ + ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint)) +#define usb_rcvintpipe(dev, endpoint) \ + ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) + +static inline struct usb_host_endpoint * +usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe) +{ + struct usb_host_endpoint **eps; + eps = usb_pipein(pipe) ? dev->ep_in : dev->ep_out; + return eps[usb_pipeendpoint(pipe)]; +} + +/*-------------------------------------------------------------------------*/ + +static inline __u16 +usb_maxpacket(struct usb_device *udev, int pipe, int is_out) +{ + struct usb_host_endpoint *ep; + unsigned epnum = usb_pipeendpoint(pipe); + + if (is_out) { + WARN_ON(usb_pipein(pipe)); + ep = udev->ep_out[epnum]; + } else { + WARN_ON(usb_pipeout(pipe)); + ep = udev->ep_in[epnum]; + } + if (!ep) + return 0; + + /* NOTE: only 0x07ff bits are for packet size... */ + return usb_endpoint_maxp(&ep->desc); +} + +/* ----------------------------------------------------------------------- */ + +/* translate USB error codes to codes user space understands */ +static inline int usb_translate_errors(int error_code) +{ + switch (error_code) { + case 0: + case -ENOMEM: + case -ENODEV: + return error_code; + default: + return -EIO; + } +} + +/* Events from the usb core */ +#define USB_DEVICE_ADD 0x0001 +#define USB_DEVICE_REMOVE 0x0002 +#define USB_BUS_ADD 0x0003 +#define USB_BUS_REMOVE 0x0004 +extern void usb_register_notify(struct notifier_block *nb); +extern void usb_unregister_notify(struct notifier_block *nb); + +#ifdef DEBUG +#define dbg(format, arg...) \ + printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg) +#else +#define dbg(format, arg...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ +} while (0) +#endif + +#define err(format, arg...) \ + printk(KERN_ERR KBUILD_MODNAME ": " format "\n", ##arg) + +/* debugfs stuff */ +extern struct dentry *usb_debug_root; + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild new file mode 100644 index 00000000..b607f353 --- /dev/null +++ b/include/linux/usb/Kbuild @@ -0,0 +1,10 @@ +header-y += audio.h +header-y += cdc.h +header-y += ch9.h +header-y += ch11.h +header-y += functionfs.h +header-y += gadgetfs.h +header-y += midi.h +header-y += g_printer.h +header-y += tmc.h +header-y += video.h diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h new file mode 100644 index 00000000..0a4a18b3 --- /dev/null +++ b/include/linux/usb/association.h @@ -0,0 +1,150 @@ +/* + * Wireless USB - Cable Based Association + * + * Copyright (C) 2006 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + */ +#ifndef __LINUX_USB_ASSOCIATION_H +#define __LINUX_USB_ASSOCIATION_H + + +/* + * Association attributes + * + * Association Models Supplement to WUSB 1.0 T[3-1] + * + * Each field in the structures has it's ID, it's length and then the + * value. This is the actual definition of the field's ID and its + * length. + */ +struct wusb_am_attr { + __u8 id; + __u8 len; +}; + +/* Different fields defined by the spec */ +#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) } +#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) } +#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) } +#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) } +#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) } +#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */ +#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */ +#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) } +#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) } +#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) } +#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) } + +/* CBAF Control Requests (AMS1.0[T4-1] */ +enum { + CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01, + CBAF_REQ_GET_ASSOCIATION_REQUEST, + CBAF_REQ_SET_ASSOCIATION_RESPONSE +}; + +/* + * CBAF USB-interface defitions + * + * No altsettings, one optional interrupt endpoint. + */ +enum { + CBAF_IFACECLASS = 0xef, + CBAF_IFACESUBCLASS = 0x03, + CBAF_IFACEPROTOCOL = 0x01, +}; + +/* Association Information (AMS1.0[T4-3]) */ +struct wusb_cbaf_assoc_info { + __le16 Length; + __u8 NumAssociationRequests; + __le16 Flags; + __u8 AssociationRequestsArray[]; +} __attribute__((packed)); + +/* Association Request (AMS1.0[T4-4]) */ +struct wusb_cbaf_assoc_request { + __u8 AssociationDataIndex; + __u8 Reserved; + __le16 AssociationTypeId; + __le16 AssociationSubTypeId; + __le32 AssociationTypeInfoSize; +} __attribute__((packed)); + +enum { + AR_TYPE_WUSB = 0x0001, + AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000, + AR_TYPE_WUSB_ASSOCIATE = 0x0001, +}; + +/* Association Attribute header (AMS1.0[3.8]) */ +struct wusb_cbaf_attr_hdr { + __le16 id; + __le16 len; +} __attribute__((packed)); + +/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */ +struct wusb_cbaf_host_info { + struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; + __le16 AssociationTypeId; + struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; + __le16 AssociationSubTypeId; + struct wusb_cbaf_attr_hdr CHID_hdr; + struct wusb_ckhdid CHID; + struct wusb_cbaf_attr_hdr LangID_hdr; + __le16 LangID; + struct wusb_cbaf_attr_hdr HostFriendlyName_hdr; + __u8 HostFriendlyName[]; +} __attribute__((packed)); + +/* Device Info (AMS1.0[T4-8]) + * + * I still don't get this tag'n'header stuff for each goddamn + * field... + */ +struct wusb_cbaf_device_info { + struct wusb_cbaf_attr_hdr Length_hdr; + __le32 Length; + struct wusb_cbaf_attr_hdr CDID_hdr; + struct wusb_ckhdid CDID; + struct wusb_cbaf_attr_hdr BandGroups_hdr; + __le16 BandGroups; + struct wusb_cbaf_attr_hdr LangID_hdr; + __le16 LangID; + struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr; + __u8 DeviceFriendlyName[]; +} __attribute__((packed)); + +/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */ +struct wusb_cbaf_cc_data { + struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; + __le16 AssociationTypeId; + struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; + __le16 AssociationSubTypeId; + struct wusb_cbaf_attr_hdr Length_hdr; + __le32 Length; + struct wusb_cbaf_attr_hdr ConnectionContext_hdr; + struct wusb_ckhdid CHID; + struct wusb_ckhdid CDID; + struct wusb_ckhdid CK; + struct wusb_cbaf_attr_hdr BandGroups_hdr; + __le16 BandGroups; +} __attribute__((packed)); + +/* CC_DATA - Failure case (AMS1.0[T4-10]) */ +struct wusb_cbaf_cc_data_fail { + struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; + __le16 AssociationTypeId; + struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; + __le16 AssociationSubTypeId; + struct wusb_cbaf_attr_hdr Length_hdr; + __le16 Length; + struct wusb_cbaf_attr_hdr AssociationStatus_hdr; + __u32 AssociationStatus; +} __attribute__((packed)); + +#endif /* __LINUX_USB_ASSOCIATION_H */ diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h new file mode 100644 index 00000000..ba99af27 --- /dev/null +++ b/include/linux/usb/atmel_usba_udc.h @@ -0,0 +1,23 @@ +/* + * Platform data definitions for Atmel USBA gadget driver. + */ +#ifndef __LINUX_USB_USBA_H +#define __LINUX_USB_USBA_H + +struct usba_ep_data { + char *name; + int index; + int fifo_size; + int nr_banks; + int can_dma; + int can_isoc; +}; + +struct usba_platform_data { + int vbus_pin; + int vbus_pin_inverted; + int num_ep; + struct usba_ep_data ep[0]; +}; + +#endif /* __LINUX_USB_USBA_H */ diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h new file mode 100644 index 00000000..ed130531 --- /dev/null +++ b/include/linux/usb/audio-v2.h @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de> + * + * This software is distributed under the terms of the GNU General Public + * License ("GPL") version 2, as published by the Free Software Foundation. + * + * This file holds USB constants and structures defined + * by the USB Device Class Definition for Audio Devices in version 2.0. + * Comments below reference relevant sections of the documents contained + * in http://www.usb.org/developers/devclass_docs/Audio2.0_final.zip + */ + +#ifndef __LINUX_USB_AUDIO_V2_H +#define __LINUX_USB_AUDIO_V2_H + +#include <linux/types.h> + +/* v1.0 and v2.0 of this standard have many things in common. For the rest + * of the definitions, please refer to audio.h */ + +/* + * bmControl field decoders + * + * From the USB Audio spec v2.0: + * + * bmaControls() is a (ch+1)-element array of 4-byte bitmaps, + * each containing a set of bit pairs. If a Control is present, + * it must be Host readable. If a certain Control is not + * present then the bit pair must be set to 0b00. + * If a Control is present but read-only, the bit pair must be + * set to 0b01. If a Control is also Host programmable, the bit + * pair must be set to 0b11. The value 0b10 is not allowed. + * + */ + +static inline bool uac2_control_is_readable(u32 bmControls, u8 control) +{ + return (bmControls >> (control * 2)) & 0x1; +} + +static inline bool uac2_control_is_writeable(u32 bmControls, u8 control) +{ + return (bmControls >> (control * 2)) & 0x2; +} + +/* 4.7.2 Class-Specific AC Interface Descriptor */ +struct uac2_ac_header_descriptor { + __u8 bLength; /* 9 */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ + __le16 bcdADC; /* 0x0200 */ + __u8 bCategory; + __le16 wTotalLength; /* includes Unit and Terminal desc. */ + __u8 bmControls; +} __packed; + +/* 2.3.1.6 Type I Format Type Descriptor (Frmts20 final.pdf)*/ +struct uac2_format_type_i_descriptor { + __u8 bLength; /* in bytes: 6 */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* FORMAT_TYPE */ + __u8 bFormatType; /* FORMAT_TYPE_1 */ + __u8 bSubslotSize; /* {1,2,3,4} */ + __u8 bBitResolution; +} __packed; + +/* 4.7.2.1 Clock Source Descriptor */ + +struct uac_clock_source_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bmAttributes; + __u8 bmControls; + __u8 bAssocTerminal; + __u8 iClockSource; +} __attribute__((packed)); + +/* bmAttribute fields */ +#define UAC_CLOCK_SOURCE_TYPE_EXT 0x0 +#define UAC_CLOCK_SOURCE_TYPE_INT_FIXED 0x1 +#define UAC_CLOCK_SOURCE_TYPE_INT_VAR 0x2 +#define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3 +#define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2) + +/* 4.7.2.2 Clock Source Descriptor */ + +struct uac_clock_selector_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bNrInPins; + __u8 baCSourceID[]; + /* bmControls, bAssocTerminal and iClockSource omitted */ +} __attribute__((packed)); + +/* 4.7.2.3 Clock Multiplier Descriptor */ + +struct uac_clock_multiplier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bCSourceID; + __u8 bmControls; + __u8 iClockMultiplier; +} __attribute__((packed)); + +/* 4.7.2.4 Input terminal descriptor */ + +struct uac2_input_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 bCSourceID; + __u8 bNrChannels; + __u32 bmChannelConfig; + __u8 iChannelNames; + __u16 bmControls; + __u8 iTerminal; +} __attribute__((packed)); + +/* 4.7.2.5 Output terminal descriptor */ + +struct uac2_output_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 bSourceID; + __u8 bCSourceID; + __u16 bmControls; + __u8 iTerminal; +} __attribute__((packed)); + + + +/* 4.7.2.8 Feature Unit Descriptor */ + +struct uac2_feature_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __u8 bSourceID; + /* bmaControls is actually u32, + * but u8 is needed for the hybrid parser */ + __u8 bmaControls[0]; /* variable length */ +} __attribute__((packed)); + +/* 4.9.2 Class-Specific AS Interface Descriptor */ + +struct uac2_as_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bTerminalLink; + __u8 bmControls; + __u8 bFormatType; + __u32 bmFormats; + __u8 bNrChannels; + __u32 bmChannelConfig; + __u8 iChannelNames; +} __attribute__((packed)); + +/* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */ + +struct uac2_iso_endpoint_descriptor { + __u8 bLength; /* in bytes: 8 */ + __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ + __u8 bDescriptorSubtype; /* EP_GENERAL */ + __u8 bmAttributes; + __u8 bmControls; + __u8 bLockDelayUnits; + __le16 wLockDelay; +} __attribute__((packed)); + +#define UAC2_CONTROL_PITCH (3 << 0) +#define UAC2_CONTROL_DATA_OVERRUN (3 << 2) +#define UAC2_CONTROL_DATA_UNDERRUN (3 << 4) + +/* 6.1 Interrupt Data Message */ + +#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) +#define UAC2_INTERRUPT_DATA_MSG_EP (1 << 1) + +struct uac2_interrupt_data_msg { + __u8 bInfo; + __u8 bAttribute; + __le16 wValue; + __le16 wIndex; +} __attribute__((packed)); + +/* A.7 Audio Function Category Codes */ +#define UAC2_FUNCTION_SUBCLASS_UNDEFINED 0x00 +#define UAC2_FUNCTION_DESKTOP_SPEAKER 0x01 +#define UAC2_FUNCTION_HOME_THEATER 0x02 +#define UAC2_FUNCTION_MICROPHONE 0x03 +#define UAC2_FUNCTION_HEADSET 0x04 +#define UAC2_FUNCTION_TELEPHONE 0x05 +#define UAC2_FUNCTION_CONVERTER 0x06 +#define UAC2_FUNCTION_SOUND_RECORDER 0x07 +#define UAC2_FUNCTION_IO_BOX 0x08 +#define UAC2_FUNCTION_MUSICAL_INSTRUMENT 0x09 +#define UAC2_FUNCTION_PRO_AUDIO 0x0a +#define UAC2_FUNCTION_AUDIO_VIDEO 0x0b +#define UAC2_FUNCTION_CONTROL_PANEL 0x0c +#define UAC2_FUNCTION_OTHER 0xff + +/* A.9 Audio Class-Specific AC Interface Descriptor Subtypes */ +/* see audio.h for the rest, which is identical to v1 */ +#define UAC2_EFFECT_UNIT 0x07 +#define UAC2_PROCESSING_UNIT_V2 0x08 +#define UAC2_EXTENSION_UNIT_V2 0x09 +#define UAC2_CLOCK_SOURCE 0x0a +#define UAC2_CLOCK_SELECTOR 0x0b +#define UAC2_CLOCK_MULTIPLIER 0x0c +#define UAC2_SAMPLE_RATE_CONVERTER 0x0d + +/* A.10 Audio Class-Specific AS Interface Descriptor Subtypes */ +/* see audio.h for the rest, which is identical to v1 */ +#define UAC2_ENCODER 0x03 +#define UAC2_DECODER 0x04 + +/* A.11 Effect Unit Effect Types */ +#define UAC2_EFFECT_UNDEFINED 0x00 +#define UAC2_EFFECT_PARAM_EQ 0x01 +#define UAC2_EFFECT_REVERB 0x02 +#define UAC2_EFFECT_MOD_DELAY 0x03 +#define UAC2_EFFECT_DYN_RANGE_COMP 0x04 + +/* A.12 Processing Unit Process Types */ +#define UAC2_PROCESS_UNDEFINED 0x00 +#define UAC2_PROCESS_UP_DOWNMIX 0x01 +#define UAC2_PROCESS_DOLBY_PROLOCIC 0x02 +#define UAC2_PROCESS_STEREO_EXTENDER 0x03 + +/* A.14 Audio Class-Specific Request Codes */ +#define UAC2_CS_CUR 0x01 +#define UAC2_CS_RANGE 0x02 +#define UAC2_CS_MEM 0x03 + +/* A.15 Encoder Type Codes */ +#define UAC2_ENCODER_UNDEFINED 0x00 +#define UAC2_ENCODER_OTHER 0x01 +#define UAC2_ENCODER_MPEG 0x02 +#define UAC2_ENCODER_AC3 0x03 +#define UAC2_ENCODER_WMA 0x04 +#define UAC2_ENCODER_DTS 0x05 + +/* A.16 Decoder Type Codes */ +#define UAC2_DECODER_UNDEFINED 0x00 +#define UAC2_DECODER_OTHER 0x01 +#define UAC2_DECODER_MPEG 0x02 +#define UAC2_DECODER_AC3 0x03 +#define UAC2_DECODER_WMA 0x04 +#define UAC2_DECODER_DTS 0x05 + +/* A.17.1 Clock Source Control Selectors */ +#define UAC2_CS_UNDEFINED 0x00 +#define UAC2_CS_CONTROL_SAM_FREQ 0x01 +#define UAC2_CS_CONTROL_CLOCK_VALID 0x02 + +/* A.17.2 Clock Selector Control Selectors */ +#define UAC2_CX_UNDEFINED 0x00 +#define UAC2_CX_CLOCK_SELECTOR 0x01 + +/* A.17.3 Clock Multiplier Control Selectors */ +#define UAC2_CM_UNDEFINED 0x00 +#define UAC2_CM_NUMERATOR 0x01 +#define UAC2_CM_DENOMINTATOR 0x02 + +/* A.17.4 Terminal Control Selectors */ +#define UAC2_TE_UNDEFINED 0x00 +#define UAC2_TE_COPY_PROTECT 0x01 +#define UAC2_TE_CONNECTOR 0x02 +#define UAC2_TE_OVERLOAD 0x03 +#define UAC2_TE_CLUSTER 0x04 +#define UAC2_TE_UNDERFLOW 0x05 +#define UAC2_TE_OVERFLOW 0x06 +#define UAC2_TE_LATENCY 0x07 + +/* A.17.5 Mixer Control Selectors */ +#define UAC2_MU_UNDEFINED 0x00 +#define UAC2_MU_MIXER 0x01 +#define UAC2_MU_CLUSTER 0x02 +#define UAC2_MU_UNDERFLOW 0x03 +#define UAC2_MU_OVERFLOW 0x04 +#define UAC2_MU_LATENCY 0x05 + +/* A.17.6 Selector Control Selectors */ +#define UAC2_SU_UNDEFINED 0x00 +#define UAC2_SU_SELECTOR 0x01 +#define UAC2_SU_LATENCY 0x02 + +/* A.17.7 Feature Unit Control Selectors */ +/* see audio.h for the rest, which is identical to v1 */ +#define UAC2_FU_INPUT_GAIN 0x0b +#define UAC2_FU_INPUT_GAIN_PAD 0x0c +#define UAC2_FU_PHASE_INVERTER 0x0d +#define UAC2_FU_UNDERFLOW 0x0e +#define UAC2_FU_OVERFLOW 0x0f +#define UAC2_FU_LATENCY 0x10 + +/* A.17.8.1 Parametric Equalizer Section Effect Unit Control Selectors */ +#define UAC2_PE_UNDEFINED 0x00 +#define UAC2_PE_ENABLE 0x01 +#define UAC2_PE_CENTERFREQ 0x02 +#define UAC2_PE_QFACTOR 0x03 +#define UAC2_PE_GAIN 0x04 +#define UAC2_PE_UNDERFLOW 0x05 +#define UAC2_PE_OVERFLOW 0x06 +#define UAC2_PE_LATENCY 0x07 + +/* A.17.8.2 Reverberation Effect Unit Control Selectors */ +#define UAC2_RV_UNDEFINED 0x00 +#define UAC2_RV_ENABLE 0x01 +#define UAC2_RV_TYPE 0x02 +#define UAC2_RV_LEVEL 0x03 +#define UAC2_RV_TIME 0x04 +#define UAC2_RV_FEEDBACK 0x05 +#define UAC2_RV_PREDELAY 0x06 +#define UAC2_RV_DENSITY 0x07 +#define UAC2_RV_HIFREQ_ROLLOFF 0x08 +#define UAC2_RV_UNDERFLOW 0x09 +#define UAC2_RV_OVERFLOW 0x0a +#define UAC2_RV_LATENCY 0x0b + +/* A.17.8.3 Modulation Delay Effect Control Selectors */ +#define UAC2_MD_UNDEFINED 0x00 +#define UAC2_MD_ENABLE 0x01 +#define UAC2_MD_BALANCE 0x02 +#define UAC2_MD_RATE 0x03 +#define UAC2_MD_DEPTH 0x04 +#define UAC2_MD_TIME 0x05 +#define UAC2_MD_FEEDBACK 0x06 +#define UAC2_MD_UNDERFLOW 0x07 +#define UAC2_MD_OVERFLOW 0x08 +#define UAC2_MD_LATENCY 0x09 + +/* A.17.8.4 Dynamic Range Compressor Effect Unit Control Selectors */ +#define UAC2_DR_UNDEFINED 0x00 +#define UAC2_DR_ENABLE 0x01 +#define UAC2_DR_COMPRESSION_RATE 0x02 +#define UAC2_DR_MAXAMPL 0x03 +#define UAC2_DR_THRESHOLD 0x04 +#define UAC2_DR_ATTACK_TIME 0x05 +#define UAC2_DR_RELEASE_TIME 0x06 +#define UAC2_DR_UNDEFLOW 0x07 +#define UAC2_DR_OVERFLOW 0x08 +#define UAC2_DR_LATENCY 0x09 + +/* A.17.9.1 Up/Down-mix Processing Unit Control Selectors */ +#define UAC2_UD_UNDEFINED 0x00 +#define UAC2_UD_ENABLE 0x01 +#define UAC2_UD_MODE_SELECT 0x02 +#define UAC2_UD_CLUSTER 0x03 +#define UAC2_UD_UNDERFLOW 0x04 +#define UAC2_UD_OVERFLOW 0x05 +#define UAC2_UD_LATENCY 0x06 + +/* A.17.9.2 Dolby Prologic[tm] Processing Unit Control Selectors */ +#define UAC2_DP_UNDEFINED 0x00 +#define UAC2_DP_ENABLE 0x01 +#define UAC2_DP_MODE_SELECT 0x02 +#define UAC2_DP_CLUSTER 0x03 +#define UAC2_DP_UNDERFFLOW 0x04 +#define UAC2_DP_OVERFLOW 0x05 +#define UAC2_DP_LATENCY 0x06 + +/* A.17.9.3 Stereo Expander Processing Unit Control Selectors */ +#define UAC2_ST_EXT_UNDEFINED 0x00 +#define UAC2_ST_EXT_ENABLE 0x01 +#define UAC2_ST_EXT_WIDTH 0x02 +#define UAC2_ST_EXT_UNDEFLOW 0x03 +#define UAC2_ST_EXT_OVERFLOW 0x04 +#define UAC2_ST_EXT_LATENCY 0x05 + +/* A.17.10 Extension Unit Control Selectors */ +#define UAC2_XU_UNDEFINED 0x00 +#define UAC2_XU_ENABLE 0x01 +#define UAC2_XU_CLUSTER 0x02 +#define UAC2_XU_UNDERFLOW 0x03 +#define UAC2_XU_OVERFLOW 0x04 +#define UAC2_XU_LATENCY 0x05 + +/* A.17.11 AudioStreaming Interface Control Selectors */ +#define UAC2_AS_UNDEFINED 0x00 +#define UAC2_AS_ACT_ALT_SETTING 0x01 +#define UAC2_AS_VAL_ALT_SETTINGS 0x02 +#define UAC2_AS_AUDIO_DATA_FORMAT 0x03 + +/* A.17.12 Encoder Control Selectors */ +#define UAC2_EN_UNDEFINED 0x00 +#define UAC2_EN_BIT_RATE 0x01 +#define UAC2_EN_QUALITY 0x02 +#define UAC2_EN_VBR 0x03 +#define UAC2_EN_TYPE 0x04 +#define UAC2_EN_UNDERFLOW 0x05 +#define UAC2_EN_OVERFLOW 0x06 +#define UAC2_EN_ENCODER_ERROR 0x07 +#define UAC2_EN_PARAM1 0x08 +#define UAC2_EN_PARAM2 0x09 +#define UAC2_EN_PARAM3 0x0a +#define UAC2_EN_PARAM4 0x0b +#define UAC2_EN_PARAM5 0x0c +#define UAC2_EN_PARAM6 0x0d +#define UAC2_EN_PARAM7 0x0e +#define UAC2_EN_PARAM8 0x0f + +/* A.17.13.1 MPEG Decoder Control Selectors */ +#define UAC2_MPEG_UNDEFINED 0x00 +#define UAC2_MPEG_DUAL_CHANNEL 0x01 +#define UAC2_MPEG_SECOND_STEREO 0x02 +#define UAC2_MPEG_MULTILINGUAL 0x03 +#define UAC2_MPEG_DYN_RANGE 0x04 +#define UAC2_MPEG_SCALING 0x05 +#define UAC2_MPEG_HILO_SCALING 0x06 +#define UAC2_MPEG_UNDERFLOW 0x07 +#define UAC2_MPEG_OVERFLOW 0x08 +#define UAC2_MPEG_DECODER_ERROR 0x09 + +/* A17.13.2 AC3 Decoder Control Selectors */ +#define UAC2_AC3_UNDEFINED 0x00 +#define UAC2_AC3_MODE 0x01 +#define UAC2_AC3_DYN_RANGE 0x02 +#define UAC2_AC3_SCALING 0x03 +#define UAC2_AC3_HILO_SCALING 0x04 +#define UAC2_AC3_UNDERFLOW 0x05 +#define UAC2_AC3_OVERFLOW 0x06 +#define UAC2_AC3_DECODER_ERROR 0x07 + +/* A17.13.3 WMA Decoder Control Selectors */ +#define UAC2_WMA_UNDEFINED 0x00 +#define UAC2_WMA_UNDERFLOW 0x01 +#define UAC2_WMA_OVERFLOW 0x02 +#define UAC2_WMA_DECODER_ERROR 0x03 + +/* A17.13.4 DTS Decoder Control Selectors */ +#define UAC2_DTS_UNDEFINED 0x00 +#define UAC2_DTS_UNDERFLOW 0x01 +#define UAC2_DTS_OVERFLOW 0x02 +#define UAC2_DTS_DECODER_ERROR 0x03 + +/* A17.14 Endpoint Control Selectors */ +#define UAC2_EP_CS_UNDEFINED 0x00 +#define UAC2_EP_CS_PITCH 0x01 +#define UAC2_EP_CS_DATA_OVERRUN 0x02 +#define UAC2_EP_CS_DATA_UNDERRUN 0x03 + +#endif /* __LINUX_USB_AUDIO_V2_H */ + diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h new file mode 100644 index 00000000..a54b8255 --- /dev/null +++ b/include/linux/usb/audio.h @@ -0,0 +1,566 @@ +/* + * <linux/usb/audio.h> -- USB Audio definitions. + * + * Copyright (C) 2006 Thumtronics Pty Ltd. + * Developed for Thumtronics by Grey Innovation + * Ben Williamson <ben.williamson@greyinnovation.com> + * + * This software is distributed under the terms of the GNU General Public + * License ("GPL") version 2, as published by the Free Software Foundation. + * + * This file holds USB constants and structures defined + * by the USB Device Class Definition for Audio Devices. + * Comments below reference relevant sections of that document: + * + * http://www.usb.org/developers/devclass_docs/audio10.pdf + * + * Types and defines in this file are either specific to version 1.0 of + * this standard or common for newer versions. + */ + +#ifndef __LINUX_USB_AUDIO_H +#define __LINUX_USB_AUDIO_H + +#include <linux/types.h> + +/* bInterfaceProtocol values to denote the version of the standard used */ +#define UAC_VERSION_1 0x00 +#define UAC_VERSION_2 0x20 + +/* A.2 Audio Interface Subclass Codes */ +#define USB_SUBCLASS_AUDIOCONTROL 0x01 +#define USB_SUBCLASS_AUDIOSTREAMING 0x02 +#define USB_SUBCLASS_MIDISTREAMING 0x03 + +/* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */ +#define UAC_HEADER 0x01 +#define UAC_INPUT_TERMINAL 0x02 +#define UAC_OUTPUT_TERMINAL 0x03 +#define UAC_MIXER_UNIT 0x04 +#define UAC_SELECTOR_UNIT 0x05 +#define UAC_FEATURE_UNIT 0x06 +#define UAC1_PROCESSING_UNIT 0x07 +#define UAC1_EXTENSION_UNIT 0x08 + +/* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ +#define UAC_AS_GENERAL 0x01 +#define UAC_FORMAT_TYPE 0x02 +#define UAC_FORMAT_SPECIFIC 0x03 + +/* A.7 Processing Unit Process Types */ +#define UAC_PROCESS_UNDEFINED 0x00 +#define UAC_PROCESS_UP_DOWNMIX 0x01 +#define UAC_PROCESS_DOLBY_PROLOGIC 0x02 +#define UAC_PROCESS_STEREO_EXTENDER 0x03 +#define UAC_PROCESS_REVERB 0x04 +#define UAC_PROCESS_CHORUS 0x05 +#define UAC_PROCESS_DYN_RANGE_COMP 0x06 + +/* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */ +#define UAC_EP_GENERAL 0x01 + +/* A.9 Audio Class-Specific Request Codes */ +#define UAC_SET_ 0x00 +#define UAC_GET_ 0x80 + +#define UAC__CUR 0x1 +#define UAC__MIN 0x2 +#define UAC__MAX 0x3 +#define UAC__RES 0x4 +#define UAC__MEM 0x5 + +#define UAC_SET_CUR (UAC_SET_ | UAC__CUR) +#define UAC_GET_CUR (UAC_GET_ | UAC__CUR) +#define UAC_SET_MIN (UAC_SET_ | UAC__MIN) +#define UAC_GET_MIN (UAC_GET_ | UAC__MIN) +#define UAC_SET_MAX (UAC_SET_ | UAC__MAX) +#define UAC_GET_MAX (UAC_GET_ | UAC__MAX) +#define UAC_SET_RES (UAC_SET_ | UAC__RES) +#define UAC_GET_RES (UAC_GET_ | UAC__RES) +#define UAC_SET_MEM (UAC_SET_ | UAC__MEM) +#define UAC_GET_MEM (UAC_GET_ | UAC__MEM) + +#define UAC_GET_STAT 0xff + +/* A.10 Control Selector Codes */ + +/* A.10.1 Terminal Control Selectors */ +#define UAC_TERM_COPY_PROTECT 0x01 + +/* A.10.2 Feature Unit Control Selectors */ +#define UAC_FU_MUTE 0x01 +#define UAC_FU_VOLUME 0x02 +#define UAC_FU_BASS 0x03 +#define UAC_FU_MID 0x04 +#define UAC_FU_TREBLE 0x05 +#define UAC_FU_GRAPHIC_EQUALIZER 0x06 +#define UAC_FU_AUTOMATIC_GAIN 0x07 +#define UAC_FU_DELAY 0x08 +#define UAC_FU_BASS_BOOST 0x09 +#define UAC_FU_LOUDNESS 0x0a + +#define UAC_CONTROL_BIT(CS) (1 << ((CS) - 1)) + +/* A.10.3.1 Up/Down-mix Processing Unit Controls Selectors */ +#define UAC_UD_ENABLE 0x01 +#define UAC_UD_MODE_SELECT 0x02 + +/* A.10.3.2 Dolby Prologic (tm) Processing Unit Controls Selectors */ +#define UAC_DP_ENABLE 0x01 +#define UAC_DP_MODE_SELECT 0x02 + +/* A.10.3.3 3D Stereo Extender Processing Unit Control Selectors */ +#define UAC_3D_ENABLE 0x01 +#define UAC_3D_SPACE 0x02 + +/* A.10.3.4 Reverberation Processing Unit Control Selectors */ +#define UAC_REVERB_ENABLE 0x01 +#define UAC_REVERB_LEVEL 0x02 +#define UAC_REVERB_TIME 0x03 +#define UAC_REVERB_FEEDBACK 0x04 + +/* A.10.3.5 Chorus Processing Unit Control Selectors */ +#define UAC_CHORUS_ENABLE 0x01 +#define UAC_CHORUS_LEVEL 0x02 +#define UAC_CHORUS_RATE 0x03 +#define UAC_CHORUS_DEPTH 0x04 + +/* A.10.3.6 Dynamic Range Compressor Unit Control Selectors */ +#define UAC_DCR_ENABLE 0x01 +#define UAC_DCR_RATE 0x02 +#define UAC_DCR_MAXAMPL 0x03 +#define UAC_DCR_THRESHOLD 0x04 +#define UAC_DCR_ATTACK_TIME 0x05 +#define UAC_DCR_RELEASE_TIME 0x06 + +/* A.10.4 Extension Unit Control Selectors */ +#define UAC_XU_ENABLE 0x01 + +/* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */ +#define UAC_MS_HEADER 0x01 +#define UAC_MIDI_IN_JACK 0x02 +#define UAC_MIDI_OUT_JACK 0x03 + +/* MIDI - A.1 MS Class-Specific Endpoint Descriptor Subtypes */ +#define UAC_MS_GENERAL 0x01 + +/* Terminals - 2.1 USB Terminal Types */ +#define UAC_TERMINAL_UNDEFINED 0x100 +#define UAC_TERMINAL_STREAMING 0x101 +#define UAC_TERMINAL_VENDOR_SPEC 0x1FF + +/* Terminal Control Selectors */ +/* 4.3.2 Class-Specific AC Interface Descriptor */ +struct uac1_ac_header_descriptor { + __u8 bLength; /* 8 + n */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ + __le16 bcdADC; /* 0x0100 */ + __le16 wTotalLength; /* includes Unit and Terminal desc. */ + __u8 bInCollection; /* n */ + __u8 baInterfaceNr[]; /* [n] */ +} __attribute__ ((packed)); + +#define UAC_DT_AC_HEADER_SIZE(n) (8 + (n)) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ +struct uac1_ac_header_descriptor_##n { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __le16 bcdADC; \ + __le16 wTotalLength; \ + __u8 bInCollection; \ + __u8 baInterfaceNr[n]; \ +} __attribute__ ((packed)) + +/* 4.3.2.1 Input Terminal Descriptor */ +struct uac_input_terminal_descriptor { + __u8 bLength; /* in bytes: 12 */ + __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ + __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */ + __u8 bTerminalID; /* Constant uniquely terminal ID */ + __le16 wTerminalType; /* USB Audio Terminal Types */ + __u8 bAssocTerminal; /* ID of the Output Terminal associated */ + __u8 bNrChannels; /* Number of logical output channels */ + __le16 wChannelConfig; + __u8 iChannelNames; + __u8 iTerminal; +} __attribute__ ((packed)); + +#define UAC_DT_INPUT_TERMINAL_SIZE 12 + +/* Terminals - 2.2 Input Terminal Types */ +#define UAC_INPUT_TERMINAL_UNDEFINED 0x200 +#define UAC_INPUT_TERMINAL_MICROPHONE 0x201 +#define UAC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 +#define UAC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 +#define UAC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 +#define UAC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 +#define UAC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 + +/* Terminals - control selectors */ + +#define UAC_TERMINAL_CS_COPY_PROTECT_CONTROL 0x01 + +/* 4.3.2.2 Output Terminal Descriptor */ +struct uac1_output_terminal_descriptor { + __u8 bLength; /* in bytes: 9 */ + __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ + __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ + __u8 bTerminalID; /* Constant uniquely terminal ID */ + __le16 wTerminalType; /* USB Audio Terminal Types */ + __u8 bAssocTerminal; /* ID of the Input Terminal associated */ + __u8 bSourceID; /* ID of the connected Unit or Terminal*/ + __u8 iTerminal; +} __attribute__ ((packed)); + +#define UAC_DT_OUTPUT_TERMINAL_SIZE 9 + +/* Terminals - 2.3 Output Terminal Types */ +#define UAC_OUTPUT_TERMINAL_UNDEFINED 0x300 +#define UAC_OUTPUT_TERMINAL_SPEAKER 0x301 +#define UAC_OUTPUT_TERMINAL_HEADPHONES 0x302 +#define UAC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 +#define UAC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 +#define UAC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 +#define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 +#define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 + +/* Set bControlSize = 2 as default setting */ +#define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac_feature_unit_descriptor_##ch { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bUnitID; \ + __u8 bSourceID; \ + __u8 bControlSize; \ + __le16 bmaControls[ch + 1]; \ + __u8 iFeature; \ +} __attribute__ ((packed)) + +/* 4.3.2.3 Mixer Unit Descriptor */ +struct uac_mixer_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __u8 bNrInPins; + __u8 baSourceID[]; +} __attribute__ ((packed)); + +static inline __u8 uac_mixer_unit_bNrChannels(struct uac_mixer_unit_descriptor *desc) +{ + return desc->baSourceID[desc->bNrInPins]; +} + +static inline __u32 uac_mixer_unit_wChannelConfig(struct uac_mixer_unit_descriptor *desc, + int protocol) +{ + if (protocol == UAC_VERSION_1) + return (desc->baSourceID[desc->bNrInPins + 2] << 8) | + desc->baSourceID[desc->bNrInPins + 1]; + else + return (desc->baSourceID[desc->bNrInPins + 4] << 24) | + (desc->baSourceID[desc->bNrInPins + 3] << 16) | + (desc->baSourceID[desc->bNrInPins + 2] << 8) | + (desc->baSourceID[desc->bNrInPins + 1]); +} + +static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor *desc, + int protocol) +{ + return (protocol == UAC_VERSION_1) ? + desc->baSourceID[desc->bNrInPins + 3] : + desc->baSourceID[desc->bNrInPins + 5]; +} + +static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc, + int protocol) +{ + return (protocol == UAC_VERSION_1) ? + &desc->baSourceID[desc->bNrInPins + 4] : + &desc->baSourceID[desc->bNrInPins + 6]; +} + +static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc) +{ + __u8 *raw = (__u8 *) desc; + return raw[desc->bLength - 1]; +} + +/* 4.3.2.4 Selector Unit Descriptor */ +struct uac_selector_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUintID; + __u8 bNrInPins; + __u8 baSourceID[]; +} __attribute__ ((packed)); + +static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc) +{ + __u8 *raw = (__u8 *) desc; + return raw[desc->bLength - 1]; +} + +/* 4.3.2.5 Feature Unit Descriptor */ +struct uac_feature_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __u8 bSourceID; + __u8 bControlSize; + __u8 bmaControls[0]; /* variable length */ +} __attribute__((packed)); + +static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc) +{ + __u8 *raw = (__u8 *) desc; + return raw[desc->bLength - 1]; +} + +/* 4.3.2.6 Processing Unit Descriptors */ +struct uac_processing_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __u16 wProcessType; + __u8 bNrInPins; + __u8 baSourceID[]; +} __attribute__ ((packed)); + +static inline __u8 uac_processing_unit_bNrChannels(struct uac_processing_unit_descriptor *desc) +{ + return desc->baSourceID[desc->bNrInPins]; +} + +static inline __u32 uac_processing_unit_wChannelConfig(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + if (protocol == UAC_VERSION_1) + return (desc->baSourceID[desc->bNrInPins + 2] << 8) | + desc->baSourceID[desc->bNrInPins + 1]; + else + return (desc->baSourceID[desc->bNrInPins + 4] << 24) | + (desc->baSourceID[desc->bNrInPins + 3] << 16) | + (desc->baSourceID[desc->bNrInPins + 2] << 8) | + (desc->baSourceID[desc->bNrInPins + 1]); +} + +static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + return (protocol == UAC_VERSION_1) ? + desc->baSourceID[desc->bNrInPins + 3] : + desc->baSourceID[desc->bNrInPins + 5]; +} + +static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + return (protocol == UAC_VERSION_1) ? + desc->baSourceID[desc->bNrInPins + 4] : + desc->baSourceID[desc->bNrInPins + 6]; +} + +static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + return (protocol == UAC_VERSION_1) ? + &desc->baSourceID[desc->bNrInPins + 5] : + &desc->baSourceID[desc->bNrInPins + 7]; +} + +static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); + return desc->baSourceID[desc->bNrInPins + control_size]; +} + +static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); + return &desc->baSourceID[desc->bNrInPins + control_size + 1]; +} + +/* 4.5.2 Class-Specific AS Interface Descriptor */ +struct uac1_as_header_descriptor { + __u8 bLength; /* in bytes: 7 */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* AS_GENERAL */ + __u8 bTerminalLink; /* Terminal ID of connected Terminal */ + __u8 bDelay; /* Delay introduced by the data path */ + __le16 wFormatTag; /* The Audio Data Format */ +} __attribute__ ((packed)); + +#define UAC_DT_AS_HEADER_SIZE 7 + +/* Formats - A.1.1 Audio Data Format Type I Codes */ +#define UAC_FORMAT_TYPE_I_UNDEFINED 0x0 +#define UAC_FORMAT_TYPE_I_PCM 0x1 +#define UAC_FORMAT_TYPE_I_PCM8 0x2 +#define UAC_FORMAT_TYPE_I_IEEE_FLOAT 0x3 +#define UAC_FORMAT_TYPE_I_ALAW 0x4 +#define UAC_FORMAT_TYPE_I_MULAW 0x5 + +struct uac_format_type_i_continuous_descriptor { + __u8 bLength; /* in bytes: 8 + (ns * 3) */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* FORMAT_TYPE */ + __u8 bFormatType; /* FORMAT_TYPE_1 */ + __u8 bNrChannels; /* physical channels in the stream */ + __u8 bSubframeSize; /* */ + __u8 bBitResolution; + __u8 bSamFreqType; + __u8 tLowerSamFreq[3]; + __u8 tUpperSamFreq[3]; +} __attribute__ ((packed)); + +#define UAC_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 + +struct uac_format_type_i_discrete_descriptor { + __u8 bLength; /* in bytes: 8 + (ns * 3) */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* FORMAT_TYPE */ + __u8 bFormatType; /* FORMAT_TYPE_1 */ + __u8 bNrChannels; /* physical channels in the stream */ + __u8 bSubframeSize; /* */ + __u8 bBitResolution; + __u8 bSamFreqType; + __u8 tSamFreq[][3]; +} __attribute__ ((packed)); + +#define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \ +struct uac_format_type_i_discrete_descriptor_##n { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bFormatType; \ + __u8 bNrChannels; \ + __u8 bSubframeSize; \ + __u8 bBitResolution; \ + __u8 bSamFreqType; \ + __u8 tSamFreq[n][3]; \ +} __attribute__ ((packed)) + +#define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) + +struct uac_format_type_i_ext_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bFormatType; + __u8 bSubslotSize; + __u8 bBitResolution; + __u8 bHeaderLength; + __u8 bControlSize; + __u8 bSideBandProtocol; +} __attribute__((packed)); + +/* Formats - Audio Data Format Type I Codes */ + +#define UAC_FORMAT_TYPE_II_MPEG 0x1001 +#define UAC_FORMAT_TYPE_II_AC3 0x1002 + +struct uac_format_type_ii_discrete_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bFormatType; + __le16 wMaxBitRate; + __le16 wSamplesPerFrame; + __u8 bSamFreqType; + __u8 tSamFreq[][3]; +} __attribute__((packed)); + +struct uac_format_type_ii_ext_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bFormatType; + __u16 wMaxBitRate; + __u16 wSamplesPerFrame; + __u8 bHeaderLength; + __u8 bSideBandProtocol; +} __attribute__((packed)); + +/* type III */ +#define UAC_FORMAT_TYPE_III_IEC1937_AC3 0x2001 +#define UAC_FORMAT_TYPE_III_IEC1937_MPEG1_LAYER1 0x2002 +#define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_NOEXT 0x2003 +#define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_EXT 0x2004 +#define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_LAYER1_LS 0x2005 +#define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_LAYER23_LS 0x2006 + +/* Formats - A.2 Format Type Codes */ +#define UAC_FORMAT_TYPE_UNDEFINED 0x0 +#define UAC_FORMAT_TYPE_I 0x1 +#define UAC_FORMAT_TYPE_II 0x2 +#define UAC_FORMAT_TYPE_III 0x3 +#define UAC_EXT_FORMAT_TYPE_I 0x81 +#define UAC_EXT_FORMAT_TYPE_II 0x82 +#define UAC_EXT_FORMAT_TYPE_III 0x83 + +struct uac_iso_endpoint_descriptor { + __u8 bLength; /* in bytes: 7 */ + __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ + __u8 bDescriptorSubtype; /* EP_GENERAL */ + __u8 bmAttributes; + __u8 bLockDelayUnits; + __le16 wLockDelay; +} __attribute__((packed)); +#define UAC_ISO_ENDPOINT_DESC_SIZE 7 + +#define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01 +#define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02 +#define UAC_EP_CS_ATTR_FILL_MAX 0x80 + +/* status word format (3.7.1.1) */ + +#define UAC1_STATUS_TYPE_ORIG_MASK 0x0f +#define UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF 0x0 +#define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_IF 0x1 +#define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_EP 0x2 + +#define UAC1_STATUS_TYPE_IRQ_PENDING (1 << 7) +#define UAC1_STATUS_TYPE_MEM_CHANGED (1 << 6) + +struct uac1_status_word { + __u8 bStatusType; + __u8 bOriginator; +} __attribute__((packed)); + +#ifdef __KERNEL__ + +struct usb_audio_control { + struct list_head list; + const char *name; + u8 type; + int data[5]; + int (*set)(struct usb_audio_control *con, u8 cmd, int value); + int (*get)(struct usb_audio_control *con, u8 cmd); +}; + +struct usb_audio_control_selector { + struct list_head list; + struct list_head control; + u8 id; + const char *name; + u8 type; + struct usb_descriptor_header *desc; +}; + +#endif /* __KERNEL__ */ + +#endif /* __LINUX_USB_AUDIO_H */ diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h new file mode 100644 index 00000000..83c6b454 --- /dev/null +++ b/include/linux/usb/c67x00.h @@ -0,0 +1,48 @@ +/* + * usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip + * + * Copyright (C) 2006-2008 Barco N.V. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301 USA. + */ + +#ifndef _LINUX_USB_C67X00_H +#define _LINUX_USB_C67X00_H + +/* SIE configuration */ +#define C67X00_SIE_UNUSED 0 +#define C67X00_SIE_HOST 1 +#define C67X00_SIE_PERIPHERAL_A 2 /* peripheral on A port */ +#define C67X00_SIE_PERIPHERAL_B 3 /* peripheral on B port */ + +#define c67x00_sie_config(config, n) (((config)>>(4*(n)))&0x3) + +#define C67X00_SIE1_UNUSED (C67X00_SIE_UNUSED << 0) +#define C67X00_SIE1_HOST (C67X00_SIE_HOST << 0) +#define C67X00_SIE1_PERIPHERAL_A (C67X00_SIE_PERIPHERAL_A << 0) +#define C67X00_SIE1_PERIPHERAL_B (C67X00_SIE_PERIPHERAL_B << 0) + +#define C67X00_SIE2_UNUSED (C67X00_SIE_UNUSED << 4) +#define C67X00_SIE2_HOST (C67X00_SIE_HOST << 4) +#define C67X00_SIE2_PERIPHERAL_A (C67X00_SIE_PERIPHERAL_A << 4) +#define C67X00_SIE2_PERIPHERAL_B (C67X00_SIE_PERIPHERAL_B << 4) + +struct c67x00_platform_data { + int sie_config; /* SIEs config (C67X00_SIEx_*) */ + unsigned long hpi_regstep; /* Step between HPI registers */ +}; + +#endif /* _LINUX_USB_C67X00_H */ diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h new file mode 100644 index 00000000..719c3326 --- /dev/null +++ b/include/linux/usb/cdc-wdm.h @@ -0,0 +1,19 @@ +/* + * USB CDC Device Management subdriver + * + * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#ifndef __LINUX_USB_CDC_WDM_H +#define __LINUX_USB_CDC_WDM_H + +extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, + struct usb_endpoint_descriptor *ep, + int bufsize, + int (*manage_power)(struct usb_interface *, int)); + +#endif /* __LINUX_USB_CDC_WDM_H */ diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h new file mode 100644 index 00000000..81a92793 --- /dev/null +++ b/include/linux/usb/cdc.h @@ -0,0 +1,412 @@ +/* + * USB Communications Device Class (CDC) definitions + * + * CDC says how to talk to lots of different types of network adapters, + * notably ethernet adapters and various modems. It's used mostly with + * firmware based USB peripherals. + */ + +#ifndef __LINUX_USB_CDC_H +#define __LINUX_USB_CDC_H + +#include <linux/types.h> + +#define USB_CDC_SUBCLASS_ACM 0x02 +#define USB_CDC_SUBCLASS_ETHERNET 0x06 +#define USB_CDC_SUBCLASS_WHCM 0x08 +#define USB_CDC_SUBCLASS_DMM 0x09 +#define USB_CDC_SUBCLASS_MDLM 0x0a +#define USB_CDC_SUBCLASS_OBEX 0x0b +#define USB_CDC_SUBCLASS_EEM 0x0c +#define USB_CDC_SUBCLASS_NCM 0x0d + +#define USB_CDC_PROTO_NONE 0 + +#define USB_CDC_ACM_PROTO_AT_V25TER 1 +#define USB_CDC_ACM_PROTO_AT_PCCA101 2 +#define USB_CDC_ACM_PROTO_AT_PCCA101_WAKE 3 +#define USB_CDC_ACM_PROTO_AT_GSM 4 +#define USB_CDC_ACM_PROTO_AT_3G 5 +#define USB_CDC_ACM_PROTO_AT_CDMA 6 +#define USB_CDC_ACM_PROTO_VENDOR 0xff + +#define USB_CDC_PROTO_EEM 7 + +#define USB_CDC_NCM_PROTO_NTB 1 + +/*-------------------------------------------------------------------------*/ + +/* + * Class-Specific descriptors ... there are a couple dozen of them + */ + +#define USB_CDC_HEADER_TYPE 0x00 /* header_desc */ +#define USB_CDC_CALL_MANAGEMENT_TYPE 0x01 /* call_mgmt_descriptor */ +#define USB_CDC_ACM_TYPE 0x02 /* acm_descriptor */ +#define USB_CDC_UNION_TYPE 0x06 /* union_desc */ +#define USB_CDC_COUNTRY_TYPE 0x07 +#define USB_CDC_NETWORK_TERMINAL_TYPE 0x0a /* network_terminal_desc */ +#define USB_CDC_ETHERNET_TYPE 0x0f /* ether_desc */ +#define USB_CDC_WHCM_TYPE 0x11 +#define USB_CDC_MDLM_TYPE 0x12 /* mdlm_desc */ +#define USB_CDC_MDLM_DETAIL_TYPE 0x13 /* mdlm_detail_desc */ +#define USB_CDC_DMM_TYPE 0x14 +#define USB_CDC_OBEX_TYPE 0x15 +#define USB_CDC_NCM_TYPE 0x1a + +/* "Header Functional Descriptor" from CDC spec 5.2.3.1 */ +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __le16 bcdCDC; +} __attribute__ ((packed)); + +/* "Call Management Descriptor" from CDC spec 5.2.3.2 */ +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bmCapabilities; +#define USB_CDC_CALL_MGMT_CAP_CALL_MGMT 0x01 +#define USB_CDC_CALL_MGMT_CAP_DATA_INTF 0x02 + + __u8 bDataInterface; +} __attribute__ ((packed)); + +/* "Abstract Control Management Descriptor" from CDC spec 5.2.3.3 */ +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bmCapabilities; +} __attribute__ ((packed)); + +/* capabilities from 5.2.3.3 */ + +#define USB_CDC_COMM_FEATURE 0x01 +#define USB_CDC_CAP_LINE 0x02 +#define USB_CDC_CAP_BRK 0x04 +#define USB_CDC_CAP_NOTIFY 0x08 + +/* "Union Functional Descriptor" from CDC spec 5.2.3.8 */ +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bMasterInterface0; + __u8 bSlaveInterface0; + /* ... and there could be other slave interfaces */ +} __attribute__ ((packed)); + +/* "Country Selection Functional Descriptor" from CDC spec 5.2.3.9 */ +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; + /* ... and there can be a lot of country codes */ +} __attribute__ ((packed)); + +/* "Network Channel Terminal Functional Descriptor" from CDC spec 5.2.3.11 */ +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +} __attribute__ ((packed)); + +/* "Ethernet Networking Functional Descriptor" from CDC spec 5.2.3.16 */ +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__ ((packed)); + +/* "Telephone Control Model Functional Descriptor" from CDC WMC spec 6.3..3 */ +struct usb_cdc_dmm_desc { + __u8 bFunctionLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u16 bcdVersion; + __le16 wMaxCommand; +} __attribute__ ((packed)); + +/* "MDLM Functional Descriptor" from CDC WMC spec 6.7.2.3 */ +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__ ((packed)); + +/* "MDLM Detail Functional Descriptor" from CDC WMC spec 6.7.2.4 */ +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + /* type is associated with mdlm_desc.bGUID */ + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +} __attribute__ ((packed)); + +/* "OBEX Control Model Functional Descriptor" */ +struct usb_cdc_obex_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __le16 bcdVersion; +} __attribute__ ((packed)); + +/* "NCM Control Model Functional Descriptor" */ +struct usb_cdc_ncm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __le16 bcdNcmVersion; + __u8 bmNetworkCapabilities; +} __attribute__ ((packed)); +/*-------------------------------------------------------------------------*/ + +/* + * Class-Specific Control Requests (6.2) + * + * section 3.6.2.1 table 4 has the ACM profile, for modems. + * section 3.8.2 table 10 has the ethernet profile. + * + * Microsoft's RNDIS stack for Ethernet is a vendor-specific CDC ACM variant, + * heavily dependent on the encapsulated (proprietary) command mechanism. + */ + +#define USB_CDC_SEND_ENCAPSULATED_COMMAND 0x00 +#define USB_CDC_GET_ENCAPSULATED_RESPONSE 0x01 +#define USB_CDC_REQ_SET_LINE_CODING 0x20 +#define USB_CDC_REQ_GET_LINE_CODING 0x21 +#define USB_CDC_REQ_SET_CONTROL_LINE_STATE 0x22 +#define USB_CDC_REQ_SEND_BREAK 0x23 +#define USB_CDC_SET_ETHERNET_MULTICAST_FILTERS 0x40 +#define USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER 0x41 +#define USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER 0x42 +#define USB_CDC_SET_ETHERNET_PACKET_FILTER 0x43 +#define USB_CDC_GET_ETHERNET_STATISTIC 0x44 +#define USB_CDC_GET_NTB_PARAMETERS 0x80 +#define USB_CDC_GET_NET_ADDRESS 0x81 +#define USB_CDC_SET_NET_ADDRESS 0x82 +#define USB_CDC_GET_NTB_FORMAT 0x83 +#define USB_CDC_SET_NTB_FORMAT 0x84 +#define USB_CDC_GET_NTB_INPUT_SIZE 0x85 +#define USB_CDC_SET_NTB_INPUT_SIZE 0x86 +#define USB_CDC_GET_MAX_DATAGRAM_SIZE 0x87 +#define USB_CDC_SET_MAX_DATAGRAM_SIZE 0x88 +#define USB_CDC_GET_CRC_MODE 0x89 +#define USB_CDC_SET_CRC_MODE 0x8a + +/* Line Coding Structure from CDC spec 6.2.13 */ +struct usb_cdc_line_coding { + __le32 dwDTERate; + __u8 bCharFormat; +#define USB_CDC_1_STOP_BITS 0 +#define USB_CDC_1_5_STOP_BITS 1 +#define USB_CDC_2_STOP_BITS 2 + + __u8 bParityType; +#define USB_CDC_NO_PARITY 0 +#define USB_CDC_ODD_PARITY 1 +#define USB_CDC_EVEN_PARITY 2 +#define USB_CDC_MARK_PARITY 3 +#define USB_CDC_SPACE_PARITY 4 + + __u8 bDataBits; +} __attribute__ ((packed)); + +/* table 62; bits in multicast filter */ +#define USB_CDC_PACKET_TYPE_PROMISCUOUS (1 << 0) +#define USB_CDC_PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */ +#define USB_CDC_PACKET_TYPE_DIRECTED (1 << 2) +#define USB_CDC_PACKET_TYPE_BROADCAST (1 << 3) +#define USB_CDC_PACKET_TYPE_MULTICAST (1 << 4) /* filtered */ + + +/*-------------------------------------------------------------------------*/ + +/* + * Class-Specific Notifications (6.3) sent by interrupt transfers + * + * section 3.8.2 table 11 of the CDC spec lists Ethernet notifications + * section 3.6.2.1 table 5 specifies ACM notifications, accepted by RNDIS + * RNDIS also defines its own bit-incompatible notifications + */ + +#define USB_CDC_NOTIFY_NETWORK_CONNECTION 0x00 +#define USB_CDC_NOTIFY_RESPONSE_AVAILABLE 0x01 +#define USB_CDC_NOTIFY_SERIAL_STATE 0x20 +#define USB_CDC_NOTIFY_SPEED_CHANGE 0x2a + +struct usb_cdc_notification { + __u8 bmRequestType; + __u8 bNotificationType; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +} __attribute__ ((packed)); + +struct usb_cdc_speed_change { + __le32 DLBitRRate; /* contains the downlink bit rate (IN pipe) */ + __le32 ULBitRate; /* contains the uplink bit rate (OUT pipe) */ +} __attribute__ ((packed)); + +/*-------------------------------------------------------------------------*/ + +/* + * Class Specific structures and constants + * + * CDC NCM NTB parameters structure, CDC NCM subclass 6.2.1 + * + */ + +struct usb_cdc_ncm_ntb_parameters { + __le16 wLength; + __le16 bmNtbFormatsSupported; + __le32 dwNtbInMaxSize; + __le16 wNdpInDivisor; + __le16 wNdpInPayloadRemainder; + __le16 wNdpInAlignment; + __le16 wPadding1; + __le32 dwNtbOutMaxSize; + __le16 wNdpOutDivisor; + __le16 wNdpOutPayloadRemainder; + __le16 wNdpOutAlignment; + __le16 wNtbOutMaxDatagrams; +} __attribute__ ((packed)); + +/* + * CDC NCM transfer headers, CDC NCM subclass 3.2 + */ + +#define USB_CDC_NCM_NTH16_SIGN 0x484D434E /* NCMH */ +#define USB_CDC_NCM_NTH32_SIGN 0x686D636E /* ncmh */ + +struct usb_cdc_ncm_nth16 { + __le32 dwSignature; + __le16 wHeaderLength; + __le16 wSequence; + __le16 wBlockLength; + __le16 wNdpIndex; +} __attribute__ ((packed)); + +struct usb_cdc_ncm_nth32 { + __le32 dwSignature; + __le16 wHeaderLength; + __le16 wSequence; + __le32 dwBlockLength; + __le32 dwNdpIndex; +} __attribute__ ((packed)); + +/* + * CDC NCM datagram pointers, CDC NCM subclass 3.3 + */ + +#define USB_CDC_NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */ +#define USB_CDC_NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */ +#define USB_CDC_NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */ +#define USB_CDC_NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */ + +/* 16-bit NCM Datagram Pointer Entry */ +struct usb_cdc_ncm_dpe16 { + __le16 wDatagramIndex; + __le16 wDatagramLength; +} __attribute__((__packed__)); + +/* 16-bit NCM Datagram Pointer Table */ +struct usb_cdc_ncm_ndp16 { + __le32 dwSignature; + __le16 wLength; + __le16 wNextNdpIndex; + struct usb_cdc_ncm_dpe16 dpe16[0]; +} __attribute__ ((packed)); + +/* 32-bit NCM Datagram Pointer Entry */ +struct usb_cdc_ncm_dpe32 { + __le32 dwDatagramIndex; + __le32 dwDatagramLength; +} __attribute__((__packed__)); + +/* 32-bit NCM Datagram Pointer Table */ +struct usb_cdc_ncm_ndp32 { + __le32 dwSignature; + __le16 wLength; + __le16 wReserved6; + __le32 dwNextNdpIndex; + __le32 dwReserved12; + struct usb_cdc_ncm_dpe32 dpe32[0]; +} __attribute__ ((packed)); + +/* CDC NCM subclass 3.2.1 and 3.2.2 */ +#define USB_CDC_NCM_NDP16_INDEX_MIN 0x000C +#define USB_CDC_NCM_NDP32_INDEX_MIN 0x0010 + +/* CDC NCM subclass 3.3.3 Datagram Formatting */ +#define USB_CDC_NCM_DATAGRAM_FORMAT_CRC 0x30 +#define USB_CDC_NCM_DATAGRAM_FORMAT_NOCRC 0X31 + +/* CDC NCM subclass 4.2 NCM Communications Interface Protocol Code */ +#define USB_CDC_NCM_PROTO_CODE_NO_ENCAP_COMMANDS 0x00 +#define USB_CDC_NCM_PROTO_CODE_EXTERN_PROTO 0xFE + +/* CDC NCM subclass 5.2.1 NCM Functional Descriptor, bmNetworkCapabilities */ +#define USB_CDC_NCM_NCAP_ETH_FILTER (1 << 0) +#define USB_CDC_NCM_NCAP_NET_ADDRESS (1 << 1) +#define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2) +#define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3) +#define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4) +#define USB_CDC_NCM_NCAP_NTB_INPUT_SIZE (1 << 5) + +/* CDC NCM subclass Table 6-3: NTB Parameter Structure */ +#define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0) +#define USB_CDC_NCM_NTB32_SUPPORTED (1 << 1) + +/* CDC NCM subclass Table 6-3: NTB Parameter Structure */ +#define USB_CDC_NCM_NDP_ALIGN_MIN_SIZE 0x04 +#define USB_CDC_NCM_NTB_MAX_LENGTH 0x1C + +/* CDC NCM subclass 6.2.5 SetNtbFormat */ +#define USB_CDC_NCM_NTB16_FORMAT 0x00 +#define USB_CDC_NCM_NTB32_FORMAT 0x01 + +/* CDC NCM subclass 6.2.7 SetNtbInputSize */ +#define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048 +#define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048 + +/* NTB Input Size Structure */ +struct usb_cdc_ncm_ndp_input_size { + __le32 dwNtbInMaxSize; + __le16 wNtbInMaxDatagrams; + __le16 wReserved; +} __attribute__ ((packed)); + +/* CDC NCM subclass 6.2.11 SetCrcMode */ +#define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 +#define USB_CDC_NCM_CRC_APPENDED 0x01 + +#endif /* __LINUX_USB_CDC_H */ diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h new file mode 100644 index 00000000..f1d26b60 --- /dev/null +++ b/include/linux/usb/ch11.h @@ -0,0 +1,264 @@ +/* + * This file holds Hub protocol constants and data structures that are + * defined in chapter 11 (Hub Specification) of the USB 2.0 specification. + * + * It is used/shared between the USB core, the HCDs and couple of other USB + * drivers. + */ + +#ifndef __LINUX_CH11_H +#define __LINUX_CH11_H + +#include <linux/types.h> /* __u8 etc */ + +/* + * Hub request types + */ + +#define USB_RT_HUB (USB_TYPE_CLASS | USB_RECIP_DEVICE) +#define USB_RT_PORT (USB_TYPE_CLASS | USB_RECIP_OTHER) + +/* + * Hub class requests + * See USB 2.0 spec Table 11-16 + */ +#define HUB_CLEAR_TT_BUFFER 8 +#define HUB_RESET_TT 9 +#define HUB_GET_TT_STATE 10 +#define HUB_STOP_TT 11 + +/* + * Hub class additional requests defined by USB 3.0 spec + * See USB 3.0 spec Table 10-6 + */ +#define HUB_SET_DEPTH 12 +#define HUB_GET_PORT_ERR_COUNT 13 + +/* + * Hub Class feature numbers + * See USB 2.0 spec Table 11-17 + */ +#define C_HUB_LOCAL_POWER 0 +#define C_HUB_OVER_CURRENT 1 + +/* + * Port feature numbers + * See USB 2.0 spec Table 11-17 + */ +#define USB_PORT_FEAT_CONNECTION 0 +#define USB_PORT_FEAT_ENABLE 1 +#define USB_PORT_FEAT_SUSPEND 2 /* L2 suspend */ +#define USB_PORT_FEAT_OVER_CURRENT 3 +#define USB_PORT_FEAT_RESET 4 +#define USB_PORT_FEAT_L1 5 /* L1 suspend */ +#define USB_PORT_FEAT_POWER 8 +#define USB_PORT_FEAT_LOWSPEED 9 /* Should never be used */ +#define USB_PORT_FEAT_C_CONNECTION 16 +#define USB_PORT_FEAT_C_ENABLE 17 +#define USB_PORT_FEAT_C_SUSPEND 18 +#define USB_PORT_FEAT_C_OVER_CURRENT 19 +#define USB_PORT_FEAT_C_RESET 20 +#define USB_PORT_FEAT_TEST 21 +#define USB_PORT_FEAT_INDICATOR 22 +#define USB_PORT_FEAT_C_PORT_L1 23 + +/* + * Port feature selectors added by USB 3.0 spec. + * See USB 3.0 spec Table 10-7 + */ +#define USB_PORT_FEAT_LINK_STATE 5 +#define USB_PORT_FEAT_U1_TIMEOUT 23 +#define USB_PORT_FEAT_U2_TIMEOUT 24 +#define USB_PORT_FEAT_C_PORT_LINK_STATE 25 +#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26 +#define USB_PORT_FEAT_REMOTE_WAKE_MASK 27 +#define USB_PORT_FEAT_BH_PORT_RESET 28 +#define USB_PORT_FEAT_C_BH_PORT_RESET 29 +#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30 + +/* USB 3.0 hub remote wake mask bits, see table 10-14 */ +#define USB_PORT_FEAT_REMOTE_WAKE_CONNECT (1 << 8) +#define USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT (1 << 9) +#define USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT (1 << 10) + +/* + * Hub Status and Hub Change results + * See USB 2.0 spec Table 11-19 and Table 11-20 + */ +struct usb_port_status { + __le16 wPortStatus; + __le16 wPortChange; +} __attribute__ ((packed)); + +/* + * wPortStatus bit field + * See USB 2.0 spec Table 11-21 + */ +#define USB_PORT_STAT_CONNECTION 0x0001 +#define USB_PORT_STAT_ENABLE 0x0002 +#define USB_PORT_STAT_SUSPEND 0x0004 +#define USB_PORT_STAT_OVERCURRENT 0x0008 +#define USB_PORT_STAT_RESET 0x0010 +#define USB_PORT_STAT_L1 0x0020 +/* bits 6 to 7 are reserved */ +#define USB_PORT_STAT_POWER 0x0100 +#define USB_PORT_STAT_LOW_SPEED 0x0200 +#define USB_PORT_STAT_HIGH_SPEED 0x0400 +#define USB_PORT_STAT_TEST 0x0800 +#define USB_PORT_STAT_INDICATOR 0x1000 +/* bits 13 to 15 are reserved */ + +/* + * Additions to wPortStatus bit field from USB 3.0 + * See USB 3.0 spec Table 10-10 + */ +#define USB_PORT_STAT_LINK_STATE 0x01e0 +#define USB_SS_PORT_STAT_POWER 0x0200 +#define USB_SS_PORT_STAT_SPEED 0x1c00 +#define USB_PORT_STAT_SPEED_5GBPS 0x0000 +/* Valid only if port is enabled */ +/* Bits that are the same from USB 2.0 */ +#define USB_SS_PORT_STAT_MASK (USB_PORT_STAT_CONNECTION | \ + USB_PORT_STAT_ENABLE | \ + USB_PORT_STAT_OVERCURRENT | \ + USB_PORT_STAT_RESET) + +/* + * Definitions for PORT_LINK_STATE values + * (bits 5-8) in wPortStatus + */ +#define USB_SS_PORT_LS_U0 0x0000 +#define USB_SS_PORT_LS_U1 0x0020 +#define USB_SS_PORT_LS_U2 0x0040 +#define USB_SS_PORT_LS_U3 0x0060 +#define USB_SS_PORT_LS_SS_DISABLED 0x0080 +#define USB_SS_PORT_LS_RX_DETECT 0x00a0 +#define USB_SS_PORT_LS_SS_INACTIVE 0x00c0 +#define USB_SS_PORT_LS_POLLING 0x00e0 +#define USB_SS_PORT_LS_RECOVERY 0x0100 +#define USB_SS_PORT_LS_HOT_RESET 0x0120 +#define USB_SS_PORT_LS_COMP_MOD 0x0140 +#define USB_SS_PORT_LS_LOOPBACK 0x0160 + +/* + * wPortChange bit field + * See USB 2.0 spec Table 11-22 and USB 2.0 LPM ECN Table-4.10 + * Bits 0 to 5 shown, bits 6 to 15 are reserved + */ +#define USB_PORT_STAT_C_CONNECTION 0x0001 +#define USB_PORT_STAT_C_ENABLE 0x0002 +#define USB_PORT_STAT_C_SUSPEND 0x0004 +#define USB_PORT_STAT_C_OVERCURRENT 0x0008 +#define USB_PORT_STAT_C_RESET 0x0010 +#define USB_PORT_STAT_C_L1 0x0020 +/* + * USB 3.0 wPortChange bit fields + * See USB 3.0 spec Table 10-11 + */ +#define USB_PORT_STAT_C_BH_RESET 0x0020 +#define USB_PORT_STAT_C_LINK_STATE 0x0040 +#define USB_PORT_STAT_C_CONFIG_ERROR 0x0080 + +/* + * wHubCharacteristics (masks) + * See USB 2.0 spec Table 11-13, offset 3 + */ +#define HUB_CHAR_LPSM 0x0003 /* Logical Power Switching Mode mask */ +#define HUB_CHAR_COMMON_LPSM 0x0000 /* All ports power control at once */ +#define HUB_CHAR_INDV_PORT_LPSM 0x0001 /* per-port power control */ +#define HUB_CHAR_NO_LPSM 0x0002 /* no power switching */ + +#define HUB_CHAR_COMPOUND 0x0004 /* hub is part of a compound device */ + +#define HUB_CHAR_OCPM 0x0018 /* Over-Current Protection Mode mask */ +#define HUB_CHAR_COMMON_OCPM 0x0000 /* All ports Over-Current reporting */ +#define HUB_CHAR_INDV_PORT_OCPM 0x0008 /* per-port Over-current reporting */ +#define HUB_CHAR_NO_OCPM 0x0010 /* No Over-current Protection support */ + +#define HUB_CHAR_TTTT 0x0060 /* TT Think Time mask */ +#define HUB_CHAR_PORTIND 0x0080 /* per-port indicators (LEDs) */ + +struct usb_hub_status { + __le16 wHubStatus; + __le16 wHubChange; +} __attribute__ ((packed)); + +/* + * Hub Status & Hub Change bit masks + * See USB 2.0 spec Table 11-19 and Table 11-20 + * Bits 0 and 1 for wHubStatus and wHubChange + * Bits 2 to 15 are reserved for both + */ +#define HUB_STATUS_LOCAL_POWER 0x0001 +#define HUB_STATUS_OVERCURRENT 0x0002 +#define HUB_CHANGE_LOCAL_POWER 0x0001 +#define HUB_CHANGE_OVERCURRENT 0x0002 + + +/* + * Hub descriptor + * See USB 2.0 spec Table 11-13 + */ + +#define USB_DT_HUB (USB_TYPE_CLASS | 0x09) +#define USB_DT_SS_HUB (USB_TYPE_CLASS | 0x0a) +#define USB_DT_HUB_NONVAR_SIZE 7 +#define USB_DT_SS_HUB_SIZE 12 + +/* + * Hub Device descriptor + * USB Hub class device protocols + */ + +#define USB_HUB_PR_FS 0 /* Full speed hub */ +#define USB_HUB_PR_HS_NO_TT 0 /* Hi-speed hub without TT */ +#define USB_HUB_PR_HS_SINGLE_TT 1 /* Hi-speed hub with single TT */ +#define USB_HUB_PR_HS_MULTI_TT 2 /* Hi-speed hub with multiple TT */ +#define USB_HUB_PR_SS 3 /* Super speed hub */ + +struct usb_hub_descriptor { + __u8 bDescLength; + __u8 bDescriptorType; + __u8 bNbrPorts; + __le16 wHubCharacteristics; + __u8 bPwrOn2PwrGood; + __u8 bHubContrCurrent; + + /* 2.0 and 3.0 hubs differ here */ + union { + struct { + /* add 1 bit for hub status change; round to bytes */ + __u8 DeviceRemovable[(USB_MAXCHILDREN + 1 + 7) / 8]; + __u8 PortPwrCtrlMask[(USB_MAXCHILDREN + 1 + 7) / 8]; + } __attribute__ ((packed)) hs; + + struct { + __u8 bHubHdrDecLat; + __u16 wHubDelay; + __u16 DeviceRemovable; + } __attribute__ ((packed)) ss; + } u; +} __attribute__ ((packed)); + +/* port indicator status selectors, tables 11-7 and 11-25 */ +#define HUB_LED_AUTO 0 +#define HUB_LED_AMBER 1 +#define HUB_LED_GREEN 2 +#define HUB_LED_OFF 3 + +enum hub_led_mode { + INDICATOR_AUTO = 0, + INDICATOR_CYCLE, + /* software blinks for attention: software, hardware, reserved */ + INDICATOR_GREEN_BLINK, INDICATOR_GREEN_BLINK_OFF, + INDICATOR_AMBER_BLINK, INDICATOR_AMBER_BLINK_OFF, + INDICATOR_ALT_BLINK, INDICATOR_ALT_BLINK_OFF +} __attribute__ ((packed)); + +/* Transaction Translator Think Times, in bits */ +#define HUB_TTTT_8_BITS 0x00 +#define HUB_TTTT_16_BITS 0x20 +#define HUB_TTTT_24_BITS 0x40 +#define HUB_TTTT_32_BITS 0x60 + +#endif /* __LINUX_CH11_H */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h new file mode 100644 index 00000000..af21f311 --- /dev/null +++ b/include/linux/usb/ch9.h @@ -0,0 +1,946 @@ +/* + * This file holds USB constants and structures that are needed for + * USB device APIs. These are used by the USB device model, which is + * defined in chapter 9 of the USB 2.0 specification and in the + * Wireless USB 1.0 (spread around). Linux has several APIs in C that + * need these: + * + * - the master/host side Linux-USB kernel driver API; + * - the "usbfs" user space API; and + * - the Linux "gadget" slave/device/peripheral side driver API. + * + * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems + * act either as a USB master/host or as a USB slave/device. That means + * the master and slave side APIs benefit from working well together. + * + * There's also "Wireless USB", using low power short range radios for + * peripheral interconnection but otherwise building on the USB framework. + * + * Note all descriptors are declared '__attribute__((packed))' so that: + * + * [a] they never get padded, either internally (USB spec writers + * probably handled that) or externally; + * + * [b] so that accessing bigger-than-a-bytes fields will never + * generate bus errors on any platform, even when the location of + * its descriptor inside a bundle isn't "naturally aligned", and + * + * [c] for consistency, removing all doubt even when it appears to + * someone that the two other points are non-issues for that + * particular descriptor type. + */ + +#ifndef __LINUX_USB_CH9_H +#define __LINUX_USB_CH9_H + +#include <linux/types.h> /* __u8 etc */ +#include <asm/byteorder.h> /* le16_to_cpu */ + +/*-------------------------------------------------------------------------*/ + +/* CONTROL REQUEST SUPPORT */ + +/* + * USB directions + * + * This bit flag is used in endpoint descriptors' bEndpointAddress field. + * It's also one of three fields in control requests bRequestType. + */ +#define USB_DIR_OUT 0 /* to device */ +#define USB_DIR_IN 0x80 /* to host */ + +/* + * USB types, the second of three bRequestType fields + */ +#define USB_TYPE_MASK (0x03 << 5) +#define USB_TYPE_STANDARD (0x00 << 5) +#define USB_TYPE_CLASS (0x01 << 5) +#define USB_TYPE_VENDOR (0x02 << 5) +#define USB_TYPE_RESERVED (0x03 << 5) + +/* + * USB recipients, the third of three bRequestType fields + */ +#define USB_RECIP_MASK 0x1f +#define USB_RECIP_DEVICE 0x00 +#define USB_RECIP_INTERFACE 0x01 +#define USB_RECIP_ENDPOINT 0x02 +#define USB_RECIP_OTHER 0x03 +/* From Wireless USB 1.0 */ +#define USB_RECIP_PORT 0x04 +#define USB_RECIP_RPIPE 0x05 + +/* + * Standard requests, for the bRequest field of a SETUP packet. + * + * These are qualified by the bRequestType field, so that for example + * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved + * by a GET_STATUS request. + */ +#define USB_REQ_GET_STATUS 0x00 +#define USB_REQ_CLEAR_FEATURE 0x01 +#define USB_REQ_SET_FEATURE 0x03 +#define USB_REQ_SET_ADDRESS 0x05 +#define USB_REQ_GET_DESCRIPTOR 0x06 +#define USB_REQ_SET_DESCRIPTOR 0x07 +#define USB_REQ_GET_CONFIGURATION 0x08 +#define USB_REQ_SET_CONFIGURATION 0x09 +#define USB_REQ_GET_INTERFACE 0x0A +#define USB_REQ_SET_INTERFACE 0x0B +#define USB_REQ_SYNCH_FRAME 0x0C + +#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */ +#define USB_REQ_GET_ENCRYPTION 0x0E +#define USB_REQ_RPIPE_ABORT 0x0E +#define USB_REQ_SET_HANDSHAKE 0x0F +#define USB_REQ_RPIPE_RESET 0x0F +#define USB_REQ_GET_HANDSHAKE 0x10 +#define USB_REQ_SET_CONNECTION 0x11 +#define USB_REQ_SET_SECURITY_DATA 0x12 +#define USB_REQ_GET_SECURITY_DATA 0x13 +#define USB_REQ_SET_WUSB_DATA 0x14 +#define USB_REQ_LOOPBACK_DATA_WRITE 0x15 +#define USB_REQ_LOOPBACK_DATA_READ 0x16 +#define USB_REQ_SET_INTERFACE_DS 0x17 + +/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command, + * used by hubs to put ports into a new L1 suspend state, except that it + * forgot to define its number ... + */ + +/* + * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and + * are read as a bit array returned by USB_REQ_GET_STATUS. (So there + * are at most sixteen features of each type.) Hubs may also support a + * new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend. + */ +#define USB_DEVICE_SELF_POWERED 0 /* (read only) */ +#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */ +#define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */ +#define USB_DEVICE_BATTERY 2 /* (wireless) */ +#define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */ +#define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/ +#define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */ +#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ +#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ + +/* + * Test Mode Selectors + * See USB 2.0 spec Table 9-7 + */ +#define TEST_J 1 +#define TEST_K 2 +#define TEST_SE0_NAK 3 +#define TEST_PACKET 4 +#define TEST_FORCE_EN 5 + +/* + * New Feature Selectors as added by USB 3.0 + * See USB 3.0 spec Table 9-6 + */ +#define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */ +#define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */ +#define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */ +#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */ + +#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00 +/* + * Suspend Options, Table 9-7 USB 3.0 spec + */ +#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) +#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) + +#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ + +/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ +#define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */ +#define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */ +#define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */ + +/** + * struct usb_ctrlrequest - SETUP data for a USB device control request + * @bRequestType: matches the USB bmRequestType field + * @bRequest: matches the USB bRequest field + * @wValue: matches the USB wValue field (le16 byte order) + * @wIndex: matches the USB wIndex field (le16 byte order) + * @wLength: matches the USB wLength field (le16 byte order) + * + * This structure is used to send control requests to a USB device. It matches + * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the + * USB spec for a fuller description of the different fields, and what they are + * used for. + * + * Note that the driver for any interface can issue control requests. + * For most devices, interfaces don't coordinate with each other, so + * such requests may be made at any time. + */ +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +} __attribute__ ((packed)); + +/*-------------------------------------------------------------------------*/ + +/* + * STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or + * (rarely) accepted by SET_DESCRIPTOR. + * + * Note that all multi-byte values here are encoded in little endian + * byte order "on the wire". Within the kernel and when exposed + * through the Linux-USB APIs, they are not converted to cpu byte + * order; it is the responsibility of the client code to do this. + * The single exception is when device and configuration descriptors (but + * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD); + * in this case the fields are converted to host endianness by the kernel. + */ + +/* + * Descriptor types ... USB 2.0 spec table 9.5 + */ +#define USB_DT_DEVICE 0x01 +#define USB_DT_CONFIG 0x02 +#define USB_DT_STRING 0x03 +#define USB_DT_INTERFACE 0x04 +#define USB_DT_ENDPOINT 0x05 +#define USB_DT_DEVICE_QUALIFIER 0x06 +#define USB_DT_OTHER_SPEED_CONFIG 0x07 +#define USB_DT_INTERFACE_POWER 0x08 +/* these are from a minor usb 2.0 revision (ECN) */ +#define USB_DT_OTG 0x09 +#define USB_DT_DEBUG 0x0a +#define USB_DT_INTERFACE_ASSOCIATION 0x0b +/* these are from the Wireless USB spec */ +#define USB_DT_SECURITY 0x0c +#define USB_DT_KEY 0x0d +#define USB_DT_ENCRYPTION_TYPE 0x0e +#define USB_DT_BOS 0x0f +#define USB_DT_DEVICE_CAPABILITY 0x10 +#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11 +#define USB_DT_WIRE_ADAPTER 0x21 +#define USB_DT_RPIPE 0x22 +#define USB_DT_CS_RADIO_CONTROL 0x23 +/* From the T10 UAS specification */ +#define USB_DT_PIPE_USAGE 0x24 +/* From the USB 3.0 spec */ +#define USB_DT_SS_ENDPOINT_COMP 0x30 + +/* Conventional codes for class-specific descriptors. The convention is + * defined in the USB "Common Class" Spec (3.11). Individual class specs + * are authoritative for their usage, not the "common class" writeup. + */ +#define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE) +#define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG) +#define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING) +#define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE) +#define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT) + +/* All standard descriptors have these 2 fields at the beginning */ +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +} __attribute__ ((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEVICE: Device descriptor */ +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +} __attribute__ ((packed)); + +#define USB_DT_DEVICE_SIZE 18 + + +/* + * Device and/or Interface Class codes + * as found in bDeviceClass or bInterfaceClass + * and defined by www.usb.org documents + */ +#define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */ +#define USB_CLASS_AUDIO 1 +#define USB_CLASS_COMM 2 +#define USB_CLASS_HID 3 +#define USB_CLASS_PHYSICAL 5 +#define USB_CLASS_STILL_IMAGE 6 +#define USB_CLASS_PRINTER 7 +#define USB_CLASS_MASS_STORAGE 8 +#define USB_CLASS_HUB 9 +#define USB_CLASS_CDC_DATA 0x0a +#define USB_CLASS_CSCID 0x0b /* chip+ smart card */ +#define USB_CLASS_CONTENT_SEC 0x0d /* content security */ +#define USB_CLASS_VIDEO 0x0e +#define USB_CLASS_WIRELESS_CONTROLLER 0xe0 +#define USB_CLASS_MISC 0xef +#define USB_CLASS_APP_SPEC 0xfe +#define USB_CLASS_VENDOR_SPEC 0xff + +#define USB_SUBCLASS_VENDOR_SPEC 0xff + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_CONFIG: Configuration descriptor information. + * + * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the + * descriptor type is different. Highspeed-capable devices can look + * different depending on what speed they're currently running. Only + * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG + * descriptors. + */ +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__ ((packed)); + +#define USB_DT_CONFIG_SIZE 9 + +/* from config descriptor bmAttributes */ +#define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */ +#define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */ +#define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */ +#define USB_CONFIG_ATT_BATTERY (1 << 4) /* battery powered */ + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_STRING: String descriptor */ +struct usb_string_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wData[1]; /* UTF-16LE encoded */ +} __attribute__ ((packed)); + +/* note that "string" zero is special, it holds language codes that + * the device supports, not Unicode characters. + */ + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_INTERFACE: Interface descriptor */ +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +} __attribute__ ((packed)); + +#define USB_DT_INTERFACE_SIZE 9 + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_ENDPOINT: Endpoint descriptor */ +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + + /* NOTE: these two are _only_ in audio endpoints. */ + /* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */ + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__ ((packed)); + +#define USB_DT_ENDPOINT_SIZE 7 +#define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */ + + +/* + * Endpoints + */ +#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */ +#define USB_ENDPOINT_DIR_MASK 0x80 + +#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */ +#define USB_ENDPOINT_XFER_CONTROL 0 +#define USB_ENDPOINT_XFER_ISOC 1 +#define USB_ENDPOINT_XFER_BULK 2 +#define USB_ENDPOINT_XFER_INT 3 +#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80 + +#define USB_ENDPOINT_SYNCTYPE 0x0c +#define USB_ENDPOINT_SYNC_NONE (0 << 2) +#define USB_ENDPOINT_SYNC_ASYNC (1 << 2) +#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2) +#define USB_ENDPOINT_SYNC_SYNC (3 << 2) + +#define USB_ENDPOINT_USAGE_MASK 0x30 +#define USB_ENDPOINT_USAGE_DATA 0x00 +#define USB_ENDPOINT_USAGE_FEEDBACK 0x10 +#define USB_ENDPOINT_USAGE_IMPLICIT_FB 0x20 /* Implicit feedback Data endpoint */ + +/*-------------------------------------------------------------------------*/ + +/** + * usb_endpoint_num - get the endpoint's number + * @epd: endpoint to be checked + * + * Returns @epd's number: 0 to 15. + */ +static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd) +{ + return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; +} + +/** + * usb_endpoint_type - get the endpoint's transfer type + * @epd: endpoint to be checked + * + * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according + * to @epd's transfer type. + */ +static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd) +{ + return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; +} + +/** + * usb_endpoint_dir_in - check if the endpoint has IN direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type IN, otherwise it returns false. + */ +static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN); +} + +/** + * usb_endpoint_dir_out - check if the endpoint has OUT direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type OUT, otherwise it returns false. + */ +static inline int usb_endpoint_dir_out( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); +} + +/** + * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type bulk, otherwise it returns false. + */ +static inline int usb_endpoint_xfer_bulk( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK); +} + +/** + * usb_endpoint_xfer_control - check if the endpoint has control transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type control, otherwise it returns false. + */ +static inline int usb_endpoint_xfer_control( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_CONTROL); +} + +/** + * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type interrupt, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_int( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_INT); +} + +/** + * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type isochronous, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_isoc( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_ISOC); +} + +/** + * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has bulk transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_bulk_in( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd); +} + +/** + * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has bulk transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_bulk_out( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd); +} + +/** + * usb_endpoint_is_int_in - check if the endpoint is interrupt IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has interrupt transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_int_in( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd); +} + +/** + * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has interrupt transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_int_out( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd); +} + +/** + * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has isochronous transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_isoc_in( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd); +} + +/** + * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has isochronous transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_isoc_out( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd); +} + +/** + * usb_endpoint_maxp - get endpoint's max packet size + * @epd: endpoint to be checked + * + * Returns @epd's max packet + */ +static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd) +{ + return __le16_to_cpu(epd->wMaxPacketSize); +} + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */ +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +} __attribute__ ((packed)); + +#define USB_DT_SS_EP_COMP_SIZE 6 + +/* Bits 4:0 of bmAttributes if this is a bulk endpoint */ +static inline int +usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp) +{ + int max_streams; + + if (!comp) + return 0; + + max_streams = comp->bmAttributes & 0x1f; + + if (!max_streams) + return 0; + + max_streams = 1 << max_streams; + + return max_streams; +} + +/* Bits 1:0 of bmAttributes if this is an isoc endpoint */ +#define USB_SS_MULT(p) (1 + ((p) & 0x3)) + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */ +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +} __attribute__ ((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_OTG (from OTG 1.0a supplement) */ +struct usb_otg_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bmAttributes; /* support for HNP, SRP, etc */ +} __attribute__ ((packed)); + +/* from usb_otg_descriptor.bmAttributes */ +#define USB_OTG_SRP (1 << 0) +#define USB_OTG_HNP (1 << 1) /* swap host/device roles */ + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ +struct usb_debug_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + /* bulk endpoints with 8 byte maxpacket */ + __u8 bDebugInEndpoint; + __u8 bDebugOutEndpoint; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */ +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +} __attribute__ ((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_SECURITY: group of wireless security descriptors, including + * encryption types available for setting up a CC/association. + */ +struct usb_security_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wTotalLength; + __u8 bNumEncryptionTypes; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_KEY: used with {GET,SET}_SECURITY_DATA; only public keys + * may be retrieved. + */ +struct usb_key_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 tTKID[3]; + __u8 bReserved; + __u8 bKeyData[0]; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_ENCRYPTION_TYPE: bundled in DT_SECURITY groups */ +struct usb_encryption_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bEncryptionType; +#define USB_ENC_TYPE_UNSECURE 0 +#define USB_ENC_TYPE_WIRED 1 /* non-wireless mode */ +#define USB_ENC_TYPE_CCM_1 2 /* aes128/cbc session */ +#define USB_ENC_TYPE_RSA_1 3 /* rsa3072/sha1 auth */ + __u8 bEncryptionValue; /* use in SET_ENCRYPTION */ + __u8 bAuthKeyIndex; +} __attribute__((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_BOS: group of device-level capabilities */ +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +#define USB_DT_BOS_SIZE 5 +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +} __attribute__((packed)); + +#define USB_CAP_TYPE_WIRELESS_USB 1 + +struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + + __u8 bmAttributes; +#define USB_WIRELESS_P2P_DRD (1 << 1) +#define USB_WIRELESS_BEACON_MASK (3 << 2) +#define USB_WIRELESS_BEACON_SELF (1 << 2) +#define USB_WIRELESS_BEACON_DIRECTED (2 << 2) +#define USB_WIRELESS_BEACON_NONE (3 << 2) + __le16 wPHYRates; /* bit rates, Mbps */ +#define USB_WIRELESS_PHY_53 (1 << 0) /* always set */ +#define USB_WIRELESS_PHY_80 (1 << 1) +#define USB_WIRELESS_PHY_107 (1 << 2) /* always set */ +#define USB_WIRELESS_PHY_160 (1 << 3) +#define USB_WIRELESS_PHY_200 (1 << 4) /* always set */ +#define USB_WIRELESS_PHY_320 (1 << 5) +#define USB_WIRELESS_PHY_400 (1 << 6) +#define USB_WIRELESS_PHY_480 (1 << 7) + __u8 bmTFITXPowerInfo; /* TFI power levels */ + __u8 bmFFITXPowerInfo; /* FFI power levels */ + __le16 bmBandGroup; + __u8 bReserved; +} __attribute__((packed)); + +/* USB 2.0 Extension descriptor */ +#define USB_CAP_TYPE_EXT 2 + +struct usb_ext_cap_descriptor { /* Link Power Management */ + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ +#define USB_BESL_SUPPORT (1 << 2) /* supports BESL */ +#define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/ +#define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */ +#define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8) +#define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12) +} __attribute__((packed)); + +#define USB_DT_USB_EXT_CAP_SIZE 7 + +/* + * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB + * specific device level capabilities + */ +#define USB_SS_CAP_TYPE 3 +struct usb_ss_cap_descriptor { /* Link Power Management */ + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; +#define USB_LTM_SUPPORT (1 << 1) /* supports LTM */ + __le16 wSpeedSupported; +#define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */ +#define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */ +#define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */ +#define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */ + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +} __attribute__((packed)); + +#define USB_DT_USB_SS_CAP_SIZE 10 + +/* + * Container ID Capability descriptor: Defines the instance unique ID used to + * identify the instance across all operating modes + */ +#define CONTAINER_ID_TYPE 4 +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; /* 128-bit number */ +} __attribute__((packed)); + +#define USB_DT_USB_SS_CONTN_ID_SIZE 20 +/*-------------------------------------------------------------------------*/ + +/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with + * each endpoint descriptor for a wireless device + */ +struct usb_wireless_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bMaxBurst; + __u8 bMaxSequence; + __le16 wMaxStreamDelay; + __le16 wOverTheAirPacketSize; + __u8 bOverTheAirInterval; + __u8 bmCompAttributes; +#define USB_ENDPOINT_SWITCH_MASK 0x03 /* in bmCompAttributes */ +#define USB_ENDPOINT_SWITCH_NO 0 +#define USB_ENDPOINT_SWITCH_SWITCH 1 +#define USB_ENDPOINT_SWITCH_SCALE 2 +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless + * host and a device for connection set up, mutual authentication, and + * exchanging short lived session keys. The handshake depends on a CC. + */ +struct usb_handshake { + __u8 bMessageNumber; + __u8 bStatus; + __u8 tTKID[3]; + __u8 bReserved; + __u8 CDID[16]; + __u8 nonce[16]; + __u8 MIC[8]; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC). + * A CC may also be set up using non-wireless secure channels (including + * wired USB!), and some devices may support CCs with multiple hosts. + */ +struct usb_connection_context { + __u8 CHID[16]; /* persistent host id */ + __u8 CDID[16]; /* device id (unique w/in host context) */ + __u8 CK[16]; /* connection key */ +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB 2.0 defines three speeds, here's how Linux identifies them */ + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, /* enumerating */ + USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */ + USB_SPEED_HIGH, /* usb 2.0 */ + USB_SPEED_WIRELESS, /* wireless (usb 2.5) */ + USB_SPEED_SUPER, /* usb 3.0 */ +}; + +#ifdef __KERNEL__ + +/** + * usb_speed_string() - Returns human readable-name of the speed. + * @speed: The speed to return human-readable name for. If it's not + * any of the speeds defined in usb_device_speed enum, string for + * USB_SPEED_UNKNOWN will be returned. + */ +extern const char *usb_speed_string(enum usb_device_speed speed); + +#endif + +enum usb_device_state { + /* NOTATTACHED isn't in the USB spec, and this state acts + * the same as ATTACHED ... but it's clearer this way. + */ + USB_STATE_NOTATTACHED = 0, + + /* chapter 9 and authentication (wireless) device states */ + USB_STATE_ATTACHED, + USB_STATE_POWERED, /* wired */ + USB_STATE_RECONNECTING, /* auth */ + USB_STATE_UNAUTHENTICATED, /* auth */ + USB_STATE_DEFAULT, /* limited function */ + USB_STATE_ADDRESS, + USB_STATE_CONFIGURED, /* most functions */ + + USB_STATE_SUSPENDED + + /* NOTE: there are actually four different SUSPENDED + * states, returning to POWERED, DEFAULT, ADDRESS, or + * CONFIGURED respectively when SOF tokens flow again. + * At this level there's no difference between L1 and L2 + * suspend states. (L2 being original USB 1.1 suspend.) + */ +}; + +/*-------------------------------------------------------------------------*/ + +/* + * As per USB compliance update, a device that is actively drawing + * more than 100mA from USB must report itself as bus-powered in + * the GetStatus(DEVICE) call. + * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 + */ +#define USB_SELF_POWER_VBUS_MAX_DRAW 100 + +#endif /* __LINUX_USB_CH9_H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h new file mode 100644 index 00000000..6938a860 --- /dev/null +++ b/include/linux/usb/composite.h @@ -0,0 +1,388 @@ +/* + * composite.h -- framework for usb gadgets which are composite devices + * + * Copyright (C) 2006-2008 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __LINUX_USB_COMPOSITE_H +#define __LINUX_USB_COMPOSITE_H + +/* + * This framework is an optional layer on top of the USB Gadget interface, + * making it easier to build (a) Composite devices, supporting multiple + * functions within any single configuration, and (b) Multi-configuration + * devices, also supporting multiple functions but without necessarily + * having more than one function per configuration. + * + * Example: a device with a single configuration supporting both network + * link and mass storage functions is a composite device. Those functions + * might alternatively be packaged in individual configurations, but in + * the composite model the host can use both functions at the same time. + */ + +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> + +/* + * USB function drivers should return USB_GADGET_DELAYED_STATUS if they + * wish to delay the data/status stages of the control transfer till they + * are ready. The control transfer will then be kept from completing till + * all the function drivers that requested for USB_GADGET_DELAYED_STAUS + * invoke usb_composite_setup_continue(). + */ +#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ + +struct usb_configuration; + +/** + * struct usb_function - describes one function of a configuration + * @name: For diagnostics, identifies the function. + * @strings: tables of strings, keyed by identifiers assigned during bind() + * and by language IDs provided in control requests + * @descriptors: Table of full (or low) speed descriptors, using interface and + * string identifiers assigned during @bind(). If this pointer is null, + * the function will not be available at full speed (or at low speed). + * @hs_descriptors: Table of high speed descriptors, using interface and + * string identifiers assigned during @bind(). If this pointer is null, + * the function will not be available at high speed. + * @ss_descriptors: Table of super speed descriptors, using interface and + * string identifiers assigned during @bind(). If this + * pointer is null after initiation, the function will not + * be available at super speed. + * @config: assigned when @usb_add_function() is called; this is the + * configuration with which this function is associated. + * @bind: Before the gadget can register, all of its functions bind() to the + * available resources including string and interface identifiers used + * in interface or class descriptors; endpoints; I/O buffers; and so on. + * @unbind: Reverses @bind; called as a side effect of unregistering the + * driver which added this function. + * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may + * initialize usb_ep.driver data at this time (when it is used). + * Note that setting an interface to its current altsetting resets + * interface state, and that all interfaces have a disabled state. + * @get_alt: Returns the active altsetting. If this is not provided, + * then only altsetting zero is supported. + * @disable: (REQUIRED) Indicates the function should be disabled. Reasons + * include host resetting or reconfiguring the gadget, and disconnection. + * @setup: Used for interface-specific control requests. + * @suspend: Notifies functions when the host stops sending USB traffic. + * @resume: Notifies functions when the host restarts USB traffic. + * @get_status: Returns function status as a reply to + * GetStatus() request when the recepient is Interface. + * @func_suspend: callback to be called when + * SetFeature(FUNCTION_SUSPEND) is reseived + * + * A single USB function uses one or more interfaces, and should in most + * cases support operation at both full and high speeds. Each function is + * associated by @usb_add_function() with a one configuration; that function + * causes @bind() to be called so resources can be allocated as part of + * setting up a gadget driver. Those resources include endpoints, which + * should be allocated using @usb_ep_autoconfig(). + * + * To support dual speed operation, a function driver provides descriptors + * for both high and full speed operation. Except in rare cases that don't + * involve bulk endpoints, each speed needs different endpoint descriptors. + * + * Function drivers choose their own strategies for managing instance data. + * The simplest strategy just declares it "static', which means the function + * can only be activated once. If the function needs to be exposed in more + * than one configuration at a given speed, it needs to support multiple + * usb_function structures (one for each configuration). + * + * A more complex strategy might encapsulate a @usb_function structure inside + * a driver-specific instance structure to allows multiple activations. An + * example of multiple activations might be a CDC ACM function that supports + * two or more distinct instances within the same configuration, providing + * several independent logical data links to a USB host. + */ +struct usb_function { + const char *name; + struct usb_gadget_strings **strings; + struct usb_descriptor_header **descriptors; + struct usb_descriptor_header **hs_descriptors; + struct usb_descriptor_header **ss_descriptors; + + struct usb_configuration *config; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching. + * Related: unbind() may kfree() but bind() won't... + */ + + /* configuration management: bind/unbind */ + int (*bind)(struct usb_configuration *, + struct usb_function *); + void (*unbind)(struct usb_configuration *, + struct usb_function *); + + /* runtime state management */ + int (*set_alt)(struct usb_function *, + unsigned interface, unsigned alt); + int (*get_alt)(struct usb_function *, + unsigned interface); + void (*disable)(struct usb_function *); + int (*setup)(struct usb_function *, + const struct usb_ctrlrequest *); + void (*suspend)(struct usb_function *); + void (*resume)(struct usb_function *); + + /* USB 3.0 additions */ + int (*get_status)(struct usb_function *); + int (*func_suspend)(struct usb_function *, + u8 suspend_opt); + /* private: */ + /* internals */ + struct list_head list; + DECLARE_BITMAP(endpoints, 32); +}; + +int usb_add_function(struct usb_configuration *, struct usb_function *); + +int usb_function_deactivate(struct usb_function *); +int usb_function_activate(struct usb_function *); + +int usb_interface_id(struct usb_configuration *, struct usb_function *); + +int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep); + +#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */ + +/** + * struct usb_configuration - represents one gadget configuration + * @label: For diagnostics, describes the configuration. + * @strings: Tables of strings, keyed by identifiers assigned during @bind() + * and by language IDs provided in control requests. + * @descriptors: Table of descriptors preceding all function descriptors. + * Examples include OTG and vendor-specific descriptors. + * @unbind: Reverses @bind; called as a side effect of unregistering the + * driver which added this configuration. + * @setup: Used to delegate control requests that aren't handled by standard + * device infrastructure or directed at a specific interface. + * @bConfigurationValue: Copied into configuration descriptor. + * @iConfiguration: Copied into configuration descriptor. + * @bmAttributes: Copied into configuration descriptor. + * @bMaxPower: Copied into configuration descriptor. + * @cdev: assigned by @usb_add_config() before calling @bind(); this is + * the device associated with this configuration. + * + * Configurations are building blocks for gadget drivers structured around + * function drivers. Simple USB gadgets require only one function and one + * configuration, and handle dual-speed hardware by always providing the same + * functionality. Slightly more complex gadgets may have more than one + * single-function configuration at a given speed; or have configurations + * that only work at one speed. + * + * Composite devices are, by definition, ones with configurations which + * include more than one function. + * + * The lifecycle of a usb_configuration includes allocation, initialization + * of the fields described above, and calling @usb_add_config() to set up + * internal data and bind it to a specific device. The configuration's + * @bind() method is then used to initialize all the functions and then + * call @usb_add_function() for them. + * + * Those functions would normally be independent of each other, but that's + * not mandatory. CDC WMC devices are an example where functions often + * depend on other functions, with some functions subsidiary to others. + * Such interdependency may be managed in any way, so long as all of the + * descriptors complete by the time the composite driver returns from + * its bind() routine. + */ +struct usb_configuration { + const char *label; + struct usb_gadget_strings **strings; + const struct usb_descriptor_header **descriptors; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching... + */ + + /* configuration management: unbind/setup */ + void (*unbind)(struct usb_configuration *); + int (*setup)(struct usb_configuration *, + const struct usb_ctrlrequest *); + + /* fields in the config descriptor */ + u8 bConfigurationValue; + u8 iConfiguration; + u8 bmAttributes; + u8 bMaxPower; + + struct usb_composite_dev *cdev; + + /* private: */ + /* internals */ + struct list_head list; + struct list_head functions; + u8 next_interface_id; + unsigned superspeed:1; + unsigned highspeed:1; + unsigned fullspeed:1; + struct usb_function *interface[MAX_CONFIG_INTERFACES]; +}; + +int usb_add_config(struct usb_composite_dev *, + struct usb_configuration *, + int (*)(struct usb_configuration *)); + +int usb_remove_config(struct usb_composite_dev *, + struct usb_configuration *); + +/** + * struct usb_composite_driver - groups configurations into a gadget + * @name: For diagnostics, identifies the driver. + * @iProduct: Used as iProduct override if @dev->iProduct is not set. + * If NULL value of @name is taken. + * @iManufacturer: Used as iManufacturer override if @dev->iManufacturer is + * not set. If NULL a default "<system> <release> with <udc>" value + * will be used. + * @dev: Template descriptor for the device, including default device + * identifiers. + * @strings: tables of strings, keyed by identifiers assigned during bind() + * and language IDs provided in control requests + * @max_speed: Highest speed the driver supports. + * @needs_serial: set to 1 if the gadget needs userspace to provide + * a serial number. If one is not provided, warning will be printed. + * @unbind: Reverses bind; called as a side effect of unregistering + * this driver. + * @disconnect: optional driver disconnect method + * @suspend: Notifies when the host stops sending USB traffic, + * after function notifications + * @resume: Notifies configuration when the host restarts USB traffic, + * before function notifications + * + * Devices default to reporting self powered operation. Devices which rely + * on bus powered operation should report this in their @bind() method. + * + * Before returning from bind, various fields in the template descriptor + * may be overridden. These include the idVendor/idProduct/bcdDevice values + * normally to bind the appropriate host side driver, and the three strings + * (iManufacturer, iProduct, iSerialNumber) normally used to provide user + * meaningful device identifiers. (The strings will not be defined unless + * they are defined in @dev and @strings.) The correct ep0 maxpacket size + * is also reported, as defined by the underlying controller driver. + */ +struct usb_composite_driver { + const char *name; + const char *iProduct; + const char *iManufacturer; + const struct usb_device_descriptor *dev; + struct usb_gadget_strings **strings; + enum usb_device_speed max_speed; + unsigned needs_serial:1; + + int (*unbind)(struct usb_composite_dev *); + + void (*disconnect)(struct usb_composite_dev *); + + /* global suspend hooks */ + void (*suspend)(struct usb_composite_dev *); + void (*resume)(struct usb_composite_dev *); +}; + +extern int usb_composite_probe(struct usb_composite_driver *driver, + int (*bind)(struct usb_composite_dev *cdev)); +extern void usb_composite_unregister(struct usb_composite_driver *driver); +extern void usb_composite_setup_continue(struct usb_composite_dev *cdev); + + +/** + * struct usb_composite_device - represents one composite usb gadget + * @gadget: read-only, abstracts the gadget's usb peripheral controller + * @req: used for control responses; buffer is pre-allocated + * @bufsiz: size of buffer pre-allocated in @req + * @config: the currently active configuration + * + * One of these devices is allocated and initialized before the + * associated device driver's bind() is called. + * + * OPEN ISSUE: it appears that some WUSB devices will need to be + * built by combining a normal (wired) gadget with a wireless one. + * This revision of the gadget framework should probably try to make + * sure doing that won't hurt too much. + * + * One notion for how to handle Wireless USB devices involves: + * (a) a second gadget here, discovery mechanism TBD, but likely + * needing separate "register/unregister WUSB gadget" calls; + * (b) updates to usb_gadget to include flags "is it wireless", + * "is it wired", plus (presumably in a wrapper structure) + * bandgroup and PHY info; + * (c) presumably a wireless_ep wrapping a usb_ep, and reporting + * wireless-specific parameters like maxburst and maxsequence; + * (d) configurations that are specific to wireless links; + * (e) function drivers that understand wireless configs and will + * support wireless for (additional) function instances; + * (f) a function to support association setup (like CBAF), not + * necessarily requiring a wireless adapter; + * (g) composite device setup that can create one or more wireless + * configs, including appropriate association setup support; + * (h) more, TBD. + */ +struct usb_composite_dev { + struct usb_gadget *gadget; + struct usb_request *req; + unsigned bufsiz; + + struct usb_configuration *config; + + /* private: */ + /* internals */ + unsigned int suspended:1; + struct usb_device_descriptor desc; + struct list_head configs; + struct usb_composite_driver *driver; + u8 next_string_id; + u8 manufacturer_override; + u8 product_override; + u8 serial_override; + + /* the gadget driver won't enable the data pullup + * while the deactivation count is nonzero. + */ + unsigned deactivations; + + /* the composite driver won't complete the control transfer's + * data/status stages till delayed_status is zero. + */ + int delayed_status; + + /* protects deactivations and delayed_status counts*/ + spinlock_t lock; +}; + +extern int usb_string_id(struct usb_composite_dev *c); +extern int usb_string_ids_tab(struct usb_composite_dev *c, + struct usb_string *str); +extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); + + +/* messaging utils */ +#define DBG(d, fmt, args...) \ + dev_dbg(&(d)->gadget->dev , fmt , ## args) +#define VDBG(d, fmt, args...) \ + dev_vdbg(&(d)->gadget->dev , fmt , ## args) +#define ERROR(d, fmt, args...) \ + dev_err(&(d)->gadget->dev , fmt , ## args) +#define WARNING(d, fmt, args...) \ + dev_warn(&(d)->gadget->dev , fmt , ## args) +#define INFO(d, fmt, args...) \ + dev_info(&(d)->gadget->dev , fmt , ## args) + +#endif /* __LINUX_USB_COMPOSITE_H */ diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h new file mode 100644 index 00000000..7cc95ee3 --- /dev/null +++ b/include/linux/usb/ehci_def.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_USB_EHCI_DEF_H +#define __LINUX_USB_EHCI_DEF_H + +/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ + +/* Section 2.2 Host Controller Capability Registers */ +struct ehci_caps { + /* these fields are specified as 8 and 16 bit registers, + * but some hosts can't perform 8 or 16 bit PCI accesses. + * some hosts treat caplength and hciversion as parts of a 32-bit + * register, others treat them as two separate registers, this + * affects the memory map for big endian controllers. + */ + u32 hc_capbase; +#define HC_LENGTH(ehci, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \ + (ehci_big_endian_capbase(ehci) ? 24 : 0))) +#define HC_VERSION(ehci, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \ + (ehci_big_endian_capbase(ehci) ? 0 : 16))) + u32 hcs_params; /* HCSPARAMS - offset 0x4 */ +#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */ +#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */ +#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */ +#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */ +#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ +#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ +#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ + + u32 hcc_params; /* HCCPARAMS - offset 0x8 */ +/* EHCI 1.1 addendum */ +#define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19)) +#define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18)) +#define HCC_LPM(p) ((p)&(1 << 17)) +#define HCC_HW_PREFETCH(p) ((p)&(1 << 16)) + +#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */ +#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */ +#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */ +#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */ +#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ +#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */ + u8 portroute[8]; /* nibbles for routing - offset 0xC */ +}; + + +/* Section 2.3 Host Controller Operational Registers */ +struct ehci_regs { + + /* USBCMD: offset 0x00 */ + u32 command; + +/* EHCI 1.1 addendum */ +#define CMD_HIRD (0xf<<24) /* host initiated resume duration */ +#define CMD_PPCEE (1<<15) /* per port change event enable */ +#define CMD_FSP (1<<14) /* fully synchronized prefetch */ +#define CMD_ASPE (1<<13) /* async schedule prefetch enable */ +#define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */ +/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ +#define CMD_PARK (1<<11) /* enable "park" on async qh */ +#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ +#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */ +#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */ +#define CMD_ASE (1<<5) /* async schedule enable */ +#define CMD_PSE (1<<4) /* periodic schedule enable */ +/* 3:2 is periodic frame list size */ +#define CMD_RESET (1<<1) /* reset HC not bus */ +#define CMD_RUN (1<<0) /* start/stop HC */ + + /* USBSTS: offset 0x04 */ + u32 status; +#define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */ +#define STS_ASS (1<<15) /* Async Schedule Status */ +#define STS_PSS (1<<14) /* Periodic Schedule Status */ +#define STS_RECL (1<<13) /* Reclamation */ +#define STS_HALT (1<<12) /* Not running (any reason) */ +/* some bits reserved */ + /* these STS_* flags are also intr_enable bits (USBINTR) */ +#define STS_IAA (1<<5) /* Interrupted on async advance */ +#define STS_FATAL (1<<4) /* such as some PCI access errors */ +#define STS_FLR (1<<3) /* frame list rolled over */ +#define STS_PCD (1<<2) /* port change detect */ +#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */ +#define STS_INT (1<<0) /* "normal" completion (short, ...) */ + + /* USBINTR: offset 0x08 */ + u32 intr_enable; + + /* FRINDEX: offset 0x0C */ + u32 frame_index; /* current microframe number */ + /* CTRLDSSEGMENT: offset 0x10 */ + u32 segment; /* address bits 63:32 if needed */ + /* PERIODICLISTBASE: offset 0x14 */ + u32 frame_list; /* points to periodic list */ + /* ASYNCLISTADDR: offset 0x18 */ + u32 async_next; /* address of next async queue head */ + + u32 reserved[9]; + + /* CONFIGFLAG: offset 0x40 */ + u32 configured_flag; +#define FLAG_CF (1<<0) /* true: we'll support "high speed" */ + + /* PORTSC: offset 0x44 */ + u32 port_status[0]; /* up to N_PORTS */ +/* EHCI 1.1 addendum */ +#define PORTSC_SUSPEND_STS_ACK 0 +#define PORTSC_SUSPEND_STS_NYET 1 +#define PORTSC_SUSPEND_STS_STALL 2 +#define PORTSC_SUSPEND_STS_ERR 3 + +#define PORT_DEV_ADDR (0x7f<<25) /* device address */ +#define PORT_SSTS (0x3<<23) /* suspend status */ +/* 31:23 reserved */ +#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */ +#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ +#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */ +/* 19:16 for port testing */ +#define PORT_TEST(x) (((x)&0xf)<<16) /* Port Test Control */ +#define PORT_TEST_PKT PORT_TEST(0x4) /* Port Test Control - packet test */ +#define PORT_TEST_FORCE PORT_TEST(0x5) /* Port Test Control - force enable */ +#define PORT_LED_OFF (0<<14) +#define PORT_LED_AMBER (1<<14) +#define PORT_LED_GREEN (2<<14) +#define PORT_LED_MASK (3<<14) +#define PORT_OWNER (1<<13) /* true: companion hc owns this port */ +#define PORT_POWER (1<<12) /* true: has power (see PPC) */ +#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ +/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ +/* 9 reserved */ +#define PORT_LPM (1<<9) /* LPM transaction */ +#define PORT_RESET (1<<8) /* reset port */ +#define PORT_SUSPEND (1<<7) /* suspend port */ +#define PORT_RESUME (1<<6) /* resume it */ +#define PORT_OCC (1<<5) /* over current change */ +#define PORT_OC (1<<4) /* over current active */ +#define PORT_PEC (1<<3) /* port enable change */ +#define PORT_PE (1<<2) /* port enable */ +#define PORT_CSC (1<<1) /* connect status change */ +#define PORT_CONNECT (1<<0) /* device connected */ +#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) +}; + +#define USBMODE 0x68 /* USB Device mode */ +#define USBMODE_SDIS (1<<3) /* Stream disable */ +#define USBMODE_BE (1<<2) /* BE/LE endianness select */ +#define USBMODE_CM_HC (3<<0) /* host controller mode */ +#define USBMODE_CM_IDLE (0<<0) /* idle state */ + +/* Moorestown has some non-standard registers, partially due to the fact that + * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to + * PORTSCx + */ +#define HOSTPC0 0x84 /* HOSTPC extension */ +#define HOSTPC_PHCD (1<<22) /* Phy clock disable */ +#define HOSTPC_PSPD (3<<25) /* Port speed detection */ +#define USBMODE_EX 0xc8 /* USB Device mode extension */ +#define USBMODE_EX_VBPS (1<<5) /* VBus Power Select On */ +#define USBMODE_EX_HC (3<<0) /* host controller mode */ +#define TXFILLTUNING 0x24 /* TX FIFO Tuning register */ +#define TXFIFO_DEFAULT (8<<16) /* FIFO burst threshold 8 */ + +/* Appendix C, Debug port ... intended for use with special "debug devices" + * that can help if there's no serial console. (nonstandard enumeration.) + */ +struct ehci_dbg_port { + u32 control; +#define DBGP_OWNER (1<<30) +#define DBGP_ENABLED (1<<28) +#define DBGP_DONE (1<<16) +#define DBGP_INUSE (1<<10) +#define DBGP_ERRCODE(x) (((x)>>7)&0x07) +# define DBGP_ERR_BAD 1 +# define DBGP_ERR_SIGNAL 2 +#define DBGP_ERROR (1<<6) +#define DBGP_GO (1<<5) +#define DBGP_OUT (1<<4) +#define DBGP_LEN(x) (((x)>>0)&0x0f) + u32 pids; +#define DBGP_PID_GET(x) (((x)>>16)&0xff) +#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) + u32 data03; + u32 data47; + u32 address; +#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) +}; + +#ifdef CONFIG_EARLY_PRINTK_DBGP +#include <linux/init.h> +extern int __init early_dbgp_init(char *s); +extern struct console early_dbgp_console; +#endif /* CONFIG_EARLY_PRINTK_DBGP */ + +#ifdef CONFIG_EARLY_PRINTK_DBGP +/* Call backs from ehci host driver to ehci debug driver */ +extern int dbgp_external_startup(void); +extern int dbgp_reset_prep(void); +#else +static inline int dbgp_reset_prep(void) +{ + return 1; +} +static inline int dbgp_external_startup(void) +{ + return -1; +} +#endif + +#endif /* __LINUX_USB_EHCI_DEF_H */ diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h new file mode 100644 index 00000000..1894f42f --- /dev/null +++ b/include/linux/usb/ehci_pdriver.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __USB_CORE_EHCI_PDRIVER_H +#define __USB_CORE_EHCI_PDRIVER_H + +/** + * struct usb_ehci_pdata - platform_data for generic ehci driver + * + * @caps_offset: offset of the EHCI Capability Registers to the start of + * the io memory region provided to the driver. + * @has_tt: set to 1 if TT is integrated in root hub. + * @port_power_on: set to 1 if the controller needs a power up after + * initialization. + * @port_power_off: set to 1 if the controller needs to be powered down + * after initialization. + * + * These are general configuration options for the EHCI controller. All of + * these options are activating more or less workarounds for some hardware. + */ +struct usb_ehci_pdata { + int caps_offset; + unsigned has_tt:1; + unsigned has_synopsys_hc_bug:1; + unsigned big_endian_desc:1; + unsigned big_endian_mmio:1; + unsigned port_power_on:1; + unsigned port_power_off:1; +}; + +#endif /* __USB_CORE_EHCI_PDRIVER_H */ diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h new file mode 100644 index 00000000..61ebe0aa --- /dev/null +++ b/include/linux/usb/f_accessory.h @@ -0,0 +1,146 @@ +/* + * Gadget Function Driver for Android USB accessories + * + * Copyright (C) 2011 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_F_ACCESSORY_H +#define __LINUX_USB_F_ACCESSORY_H + +/* Use Google Vendor ID when in accessory mode */ +#define USB_ACCESSORY_VENDOR_ID 0x18D1 + + +/* Product ID to use when in accessory mode */ +#define USB_ACCESSORY_PRODUCT_ID 0x2D00 + +/* Product ID to use when in accessory mode and adb is enabled */ +#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01 + +/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */ +#define ACCESSORY_STRING_MANUFACTURER 0 +#define ACCESSORY_STRING_MODEL 1 +#define ACCESSORY_STRING_DESCRIPTION 2 +#define ACCESSORY_STRING_VERSION 3 +#define ACCESSORY_STRING_URI 4 +#define ACCESSORY_STRING_SERIAL 5 + +/* Control request for retrieving device's protocol version + * + * requestType: USB_DIR_IN | USB_TYPE_VENDOR + * request: ACCESSORY_GET_PROTOCOL + * value: 0 + * index: 0 + * data version number (16 bits little endian) + * 1 for original accessory support + * 2 adds HID and device to host audio support + */ +#define ACCESSORY_GET_PROTOCOL 51 + +/* Control request for host to send a string to the device + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_SEND_STRING + * value: 0 + * index: string ID + * data zero terminated UTF8 string + * + * The device can later retrieve these strings via the + * ACCESSORY_GET_STRING_* ioctls + */ +#define ACCESSORY_SEND_STRING 52 + +/* Control request for starting device in accessory mode. + * The host sends this after setting all its strings to the device. + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_START + * value: 0 + * index: 0 + * data none + */ +#define ACCESSORY_START 53 + +/* Control request for registering a HID device. + * Upon registering, a unique ID is sent by the accessory in the + * value parameter. This ID will be used for future commands for + * the device + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_REGISTER_HID_DEVICE + * value: Accessory assigned ID for the HID device + * index: total length of the HID report descriptor + * data none + */ +#define ACCESSORY_REGISTER_HID 54 + +/* Control request for unregistering a HID device. + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_REGISTER_HID + * value: Accessory assigned ID for the HID device + * index: 0 + * data none + */ +#define ACCESSORY_UNREGISTER_HID 55 + +/* Control request for sending the HID report descriptor. + * If the HID descriptor is longer than the endpoint zero max packet size, + * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC + * commands. The data for the descriptor must be sent sequentially + * if multiple packets are needed. + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_SET_HID_REPORT_DESC + * value: Accessory assigned ID for the HID device + * index: offset of data in descriptor + * (needed when HID descriptor is too big for one packet) + * data the HID report descriptor + */ +#define ACCESSORY_SET_HID_REPORT_DESC 56 + +/* Control request for sending HID events. + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_SEND_HID_EVENT + * value: Accessory assigned ID for the HID device + * index: 0 + * data the HID report for the event + */ +#define ACCESSORY_SEND_HID_EVENT 57 + +/* Control request for setting the audio mode. + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_SET_AUDIO_MODE + * value: 0 - no audio + * 1 - device to host, 44100 16-bit stereo PCM + * index: 0 + * data none + */ +#define ACCESSORY_SET_AUDIO_MODE 58 + +/* ioctls for retrieving strings set by the host */ +#define ACCESSORY_GET_STRING_MANUFACTURER _IOW('M', 1, char[256]) +#define ACCESSORY_GET_STRING_MODEL _IOW('M', 2, char[256]) +#define ACCESSORY_GET_STRING_DESCRIPTION _IOW('M', 3, char[256]) +#define ACCESSORY_GET_STRING_VERSION _IOW('M', 4, char[256]) +#define ACCESSORY_GET_STRING_URI _IOW('M', 5, char[256]) +#define ACCESSORY_GET_STRING_SERIAL _IOW('M', 6, char[256]) +/* returns 1 if there is a start request pending */ +#define ACCESSORY_IS_START_REQUESTED _IO('M', 7) +/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */ +#define ACCESSORY_GET_AUDIO_MODE _IO('M', 8) + +#endif /* __LINUX_USB_F_ACCESSORY_H */ diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h new file mode 100644 index 00000000..72a432e2 --- /dev/null +++ b/include/linux/usb/f_mtp.h @@ -0,0 +1,75 @@ +/* + * Gadget Function Driver for MTP + * + * Copyright (C) 2010 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_F_MTP_H +#define __LINUX_USB_F_MTP_H + +#include <linux/ioctl.h> + +#ifdef __KERNEL__ + +struct mtp_data_header { + /* length of packet, including this header */ + uint32_t length; + /* container type (2 for data packet) */ + uint16_t type; + /* MTP command code */ + uint16_t command; + /* MTP transaction ID */ + uint32_t transaction_id; +}; + +#endif /* __KERNEL__ */ + +struct mtp_file_range { + /* file descriptor for file to transfer */ + int fd; + /* offset in file for start of transfer */ + loff_t offset; + /* number of bytes to transfer */ + int64_t length; + /* MTP command ID for data header, + * used only for MTP_SEND_FILE_WITH_HEADER + */ + uint16_t command; + /* MTP transaction ID for data header, + * used only for MTP_SEND_FILE_WITH_HEADER + */ + uint32_t transaction_id; +}; + +struct mtp_event { + /* size of the event */ + size_t length; + /* event data to send */ + void *data; +}; + +/* Sends the specified file range to the host */ +#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range) +/* Receives data from the host and writes it to a file. + * The file is created if it does not exist. + */ +#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range) +/* Sends an event to the host via the interrupt endpoint */ +#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event) +/* Sends the specified file range to the host, + * with a 12 byte MTP data packet header at the beginning. + */ +#define MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, struct mtp_file_range) + +#endif /* __LINUX_USB_F_MTP_H */ diff --git a/include/linux/usb/functionfs.h b/include/linux/usb/functionfs.h new file mode 100644 index 00000000..7587ef93 --- /dev/null +++ b/include/linux/usb/functionfs.h @@ -0,0 +1,199 @@ +#ifndef __LINUX_FUNCTIONFS_H__ +#define __LINUX_FUNCTIONFS_H__ 1 + + +#include <linux/types.h> +#include <linux/ioctl.h> + +#include <linux/usb/ch9.h> + + +enum { + FUNCTIONFS_DESCRIPTORS_MAGIC = 1, + FUNCTIONFS_STRINGS_MAGIC = 2 +}; + + +#ifndef __KERNEL__ + +/* Descriptor of an non-audio endpoint */ +struct usb_endpoint_descriptor_no_audio { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; +} __attribute__((packed)); + + +/* + * All numbers must be in little endian order. + */ + +struct usb_functionfs_descs_head { + __le32 magic; + __le32 length; + __le32 fs_count; + __le32 hs_count; +} __attribute__((packed)); + +/* + * Descriptors format: + * + * | off | name | type | description | + * |-----+-----------+--------------+--------------------------------------| + * | 0 | magic | LE32 | FUNCTIONFS_{FS,HS}_DESCRIPTORS_MAGIC | + * | 4 | length | LE32 | length of the whole data chunk | + * | 8 | fs_count | LE32 | number of full-speed descriptors | + * | 12 | hs_count | LE32 | number of high-speed descriptors | + * | 16 | fs_descrs | Descriptor[] | list of full-speed descriptors | + * | | hs_descrs | Descriptor[] | list of high-speed descriptors | + * + * descs are just valid USB descriptors and have the following format: + * + * | off | name | type | description | + * |-----+-----------------+------+--------------------------| + * | 0 | bLength | U8 | length of the descriptor | + * | 1 | bDescriptorType | U8 | descriptor type | + * | 2 | payload | | descriptor's payload | + */ + +struct usb_functionfs_strings_head { + __le32 magic; + __le32 length; + __le32 str_count; + __le32 lang_count; +} __attribute__((packed)); + +/* + * Strings format: + * + * | off | name | type | description | + * |-----+------------+-----------------------+----------------------------| + * | 0 | magic | LE32 | FUNCTIONFS_STRINGS_MAGIC | + * | 4 | length | LE32 | length of the data chunk | + * | 8 | str_count | LE32 | number of strings | + * | 12 | lang_count | LE32 | number of languages | + * | 16 | stringtab | StringTab[lang_count] | table of strings per lang | + * + * For each language there is one stringtab entry (ie. there are lang_count + * stringtab entires). Each StringTab has following format: + * + * | off | name | type | description | + * |-----+---------+-------------------+------------------------------------| + * | 0 | lang | LE16 | language code | + * | 2 | strings | String[str_count] | array of strings in given language | + * + * For each string there is one strings entry (ie. there are str_count + * string entries). Each String is a NUL terminated string encoded in + * UTF-8. + */ + +#endif + + +/* + * Events are delivered on the ep0 file descriptor, when the user mode driver + * reads from this file descriptor after writing the descriptors. Don't + * stop polling this descriptor. + */ + +enum usb_functionfs_event_type { + FUNCTIONFS_BIND, + FUNCTIONFS_UNBIND, + + FUNCTIONFS_ENABLE, + FUNCTIONFS_DISABLE, + + FUNCTIONFS_SETUP, + + FUNCTIONFS_SUSPEND, + FUNCTIONFS_RESUME +}; + +/* NOTE: this structure must stay the same size and layout on + * both 32-bit and 64-bit kernels. + */ +struct usb_functionfs_event { + union { + /* SETUP: packet; DATA phase i/o precedes next event + *(setup.bmRequestType & USB_DIR_IN) flags direction */ + struct usb_ctrlrequest setup; + } __attribute__((packed)) u; + + /* enum usb_functionfs_event_type */ + __u8 type; + __u8 _pad[3]; +} __attribute__((packed)); + + +/* Endpoint ioctls */ +/* The same as in gadgetfs */ + +/* IN transfers may be reported to the gadget driver as complete + * when the fifo is loaded, before the host reads the data; + * OUT transfers may be reported to the host's "client" driver as + * complete when they're sitting in the FIFO unread. + * THIS returns how many bytes are "unclaimed" in the endpoint fifo + * (needed for precise fault handling, when the hardware allows it) + */ +#define FUNCTIONFS_FIFO_STATUS _IO('g', 1) + +/* discards any unclaimed data in the fifo. */ +#define FUNCTIONFS_FIFO_FLUSH _IO('g', 2) + +/* resets endpoint halt+toggle; used to implement set_interface. + * some hardware (like pxa2xx) can't support this. + */ +#define FUNCTIONFS_CLEAR_HALT _IO('g', 3) + +/* Specific for functionfs */ + +/* + * Returns reverse mapping of an interface. Called on EP0. If there + * is no such interface returns -EDOM. If function is not active + * returns -ENODEV. + */ +#define FUNCTIONFS_INTERFACE_REVMAP _IO('g', 128) + +/* + * Returns real bEndpointAddress of an endpoint. If function is not + * active returns -ENODEV. + */ +#define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129) + + +#ifdef __KERNEL__ + +struct ffs_data; +struct usb_composite_dev; +struct usb_configuration; + + +static int functionfs_init(void) __attribute__((warn_unused_result)); +static void functionfs_cleanup(void); + +static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) + __attribute__((warn_unused_result, nonnull)); +static void functionfs_unbind(struct ffs_data *ffs) + __attribute__((nonnull)); + +static int functionfs_bind_config(struct usb_composite_dev *cdev, + struct usb_configuration *c, + struct ffs_data *ffs) + __attribute__((warn_unused_result, nonnull)); + + +static int functionfs_ready_callback(struct ffs_data *ffs) + __attribute__((warn_unused_result, nonnull)); +static void functionfs_closed_callback(struct ffs_data *ffs) + __attribute__((nonnull)); +static int functionfs_check_dev_callback(const char *dev_name) + __attribute__((warn_unused_result, nonnull)); + + +#endif + +#endif diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h new file mode 100644 index 00000000..50f5745d --- /dev/null +++ b/include/linux/usb/g_hid.h @@ -0,0 +1,32 @@ +/* + * g_hid.h -- Header file for USB HID gadget driver + * + * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_G_HID_H +#define __LINUX_USB_G_HID_H + +struct hidg_func_descriptor { + unsigned char subclass; + unsigned char protocol; + unsigned short report_length; + unsigned short report_desc_length; + unsigned char report_desc[]; +}; + +#endif /* __LINUX_USB_G_HID_H */ diff --git a/include/linux/usb/g_printer.h b/include/linux/usb/g_printer.h new file mode 100644 index 00000000..6178fde5 --- /dev/null +++ b/include/linux/usb/g_printer.h @@ -0,0 +1,35 @@ +/* + * g_printer.h -- Header file for USB Printer gadget driver + * + * Copyright (C) 2007 Craig W. Nadler + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_G_PRINTER_H +#define __LINUX_USB_G_PRINTER_H + +#define PRINTER_NOT_ERROR 0x08 +#define PRINTER_SELECTED 0x10 +#define PRINTER_PAPER_EMPTY 0x20 + +/* The 'g' code is also used by gadgetfs ioctl requests. + * Don't add any colliding codes to either driver, and keep + * them in unique ranges (size 0x20 for now). + */ +#define GADGET_GET_PRINTER_STATUS _IOR('g', 0x21, unsigned char) +#define GADGET_SET_PRINTER_STATUS _IOWR('g', 0x22, unsigned char) + +#endif /* __LINUX_USB_G_PRINTER_H */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h new file mode 100644 index 00000000..9517466a --- /dev/null +++ b/include/linux/usb/gadget.h @@ -0,0 +1,975 @@ +/* + * <linux/usb/gadget.h> + * + * We call the USB code inside a Linux-based peripheral device a "gadget" + * driver, except for the hardware-specific bus glue. One USB host can + * master many USB gadgets, but the gadgets are only slaved to one host. + * + * + * (C) Copyright 2002-2004 by David Brownell + * All Rights Reserved. + * + * This software is licensed under the GNU GPL version 2. + */ + +#ifndef __LINUX_USB_GADGET_H +#define __LINUX_USB_GADGET_H + +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/scatterlist.h> +#include <linux/types.h> +#include <linux/usb/ch9.h> + +struct usb_ep; + +/** + * struct usb_request - describes one i/o request + * @buf: Buffer used for data. Always provide this; some controllers + * only use PIO, or don't use DMA for some endpoints. + * @dma: DMA address corresponding to 'buf'. If you don't set this + * field, and the usb controller needs one, it is responsible + * for mapping and unmapping the buffer. + * @sg: a scatterlist for SG-capable controllers. + * @num_sgs: number of SG entries + * @num_mapped_sgs: number of SG entries mapped to DMA (internal) + * @length: Length of that data + * @stream_id: The stream id, when USB3.0 bulk streams are being used + * @no_interrupt: If true, hints that no completion irq is needed. + * Helpful sometimes with deep request queues that are handled + * directly by DMA controllers. + * @zero: If true, when writing data, makes the last packet be "short" + * by adding a zero length packet as needed; + * @short_not_ok: When reading data, makes short packets be + * treated as errors (queue stops advancing till cleanup). + * @complete: Function called when request completes, so this request and + * its buffer may be re-used. The function will always be called with + * interrupts disabled, and it must not sleep. + * Reads terminate with a short packet, or when the buffer fills, + * whichever comes first. When writes terminate, some data bytes + * will usually still be in flight (often in a hardware fifo). + * Errors (for reads or writes) stop the queue from advancing + * until the completion function returns, so that any transfers + * invalidated by the error may first be dequeued. + * @context: For use by the completion callback + * @list: For use by the gadget driver. + * @status: Reports completion code, zero or a negative errno. + * Normally, faults block the transfer queue from advancing until + * the completion callback returns. + * Code "-ESHUTDOWN" indicates completion caused by device disconnect, + * or when the driver disabled the endpoint. + * @actual: Reports bytes transferred to/from the buffer. For reads (OUT + * transfers) this may be less than the requested length. If the + * short_not_ok flag is set, short reads are treated as errors + * even when status otherwise indicates successful completion. + * Note that for writes (IN transfers) some data bytes may still + * reside in a device-side FIFO when the request is reported as + * complete. + * + * These are allocated/freed through the endpoint they're used with. The + * hardware's driver can add extra per-request data to the memory it returns, + * which often avoids separate memory allocations (potential failures), + * later when the request is queued. + * + * Request flags affect request handling, such as whether a zero length + * packet is written (the "zero" flag), whether a short read should be + * treated as an error (blocking request queue advance, the "short_not_ok" + * flag), or hinting that an interrupt is not required (the "no_interrupt" + * flag, for use with deep request queues). + * + * Bulk endpoints can use any size buffers, and can also be used for interrupt + * transfers. interrupt-only endpoints can be much less functional. + * + * NOTE: this is analogous to 'struct urb' on the host side, except that + * it's thinner and promotes more pre-allocation. + */ + +struct usb_request { + void *buf; + unsigned length; + dma_addr_t dma; + + struct scatterlist *sg; + unsigned num_sgs; + unsigned num_mapped_sgs; + + unsigned stream_id:16; + unsigned no_interrupt:1; + unsigned zero:1; + unsigned short_not_ok:1; + + void (*complete)(struct usb_ep *ep, + struct usb_request *req); + void *context; + struct list_head list; + + int status; + unsigned actual; +}; + +/*-------------------------------------------------------------------------*/ + +/* endpoint-specific parts of the api to the usb controller hardware. + * unlike the urb model, (de)multiplexing layers are not required. + * (so this api could slash overhead if used on the host side...) + * + * note that device side usb controllers commonly differ in how many + * endpoints they support, as well as their capabilities. + */ +struct usb_ep_ops { + int (*enable) (struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc); + int (*disable) (struct usb_ep *ep); + + struct usb_request *(*alloc_request) (struct usb_ep *ep, + gfp_t gfp_flags); + void (*free_request) (struct usb_ep *ep, struct usb_request *req); + + int (*queue) (struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags); + int (*dequeue) (struct usb_ep *ep, struct usb_request *req); + + int (*set_halt) (struct usb_ep *ep, int value); + int (*set_wedge) (struct usb_ep *ep); + + int (*fifo_status) (struct usb_ep *ep); + void (*fifo_flush) (struct usb_ep *ep); +}; + +/** + * struct usb_ep - device side representation of USB endpoint + * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" + * @ops: Function pointers used to access hardware-specific operations. + * @ep_list:the gadget's ep_list holds all of its endpoints + * @maxpacket:The maximum packet size used on this endpoint. The initial + * value can sometimes be reduced (hardware allowing), according to + * the endpoint descriptor used to configure the endpoint. + * @max_streams: The maximum number of streams supported + * by this EP (0 - 16, actual number is 2^n) + * @mult: multiplier, 'mult' value for SS Isoc EPs + * @maxburst: the maximum number of bursts supported by this EP (for usb3) + * @driver_data:for use by the gadget driver. + * @address: used to identify the endpoint when finding descriptor that + * matches connection speed + * @desc: endpoint descriptor. This pointer is set before the endpoint is + * enabled and remains valid until the endpoint is disabled. + * @comp_desc: In case of SuperSpeed support, this is the endpoint companion + * descriptor that is used to configure the endpoint + * + * the bus controller driver lists all the general purpose endpoints in + * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list, + * and is accessed only in response to a driver setup() callback. + */ +struct usb_ep { + void *driver_data; + + const char *name; + const struct usb_ep_ops *ops; + struct list_head ep_list; + unsigned maxpacket:16; + unsigned max_streams:16; + unsigned mult:2; + unsigned maxburst:5; + u8 address; + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *comp_desc; +}; + +/*-------------------------------------------------------------------------*/ + +/** + * usb_ep_enable - configure endpoint, making it usable + * @ep:the endpoint being configured. may not be the endpoint named "ep0". + * drivers discover endpoints through the ep_list of a usb_gadget. + * + * When configurations are set, or when interface settings change, the driver + * will enable or disable the relevant endpoints. while it is enabled, an + * endpoint may be used for i/o until the driver receives a disconnect() from + * the host or until the endpoint is disabled. + * + * the ep0 implementation (which calls this routine) must ensure that the + * hardware capabilities of each endpoint match the descriptor provided + * for it. for example, an endpoint named "ep2in-bulk" would be usable + * for interrupt transfers as well as bulk, but it likely couldn't be used + * for iso transfers or for endpoint 14. some endpoints are fully + * configurable, with more generic names like "ep-a". (remember that for + * USB, "in" means "towards the USB master".) + * + * returns zero, or a negative error code. + */ +static inline int usb_ep_enable(struct usb_ep *ep) +{ + return ep->ops->enable(ep, ep->desc); +} + +/** + * usb_ep_disable - endpoint is no longer usable + * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". + * + * no other task may be using this endpoint when this is called. + * any pending and uncompleted requests will complete with status + * indicating disconnect (-ESHUTDOWN) before this call returns. + * gadget drivers must call usb_ep_enable() again before queueing + * requests to the endpoint. + * + * returns zero, or a negative error code. + */ +static inline int usb_ep_disable(struct usb_ep *ep) +{ + return ep->ops->disable(ep); +} + +/** + * usb_ep_alloc_request - allocate a request object to use with this endpoint + * @ep:the endpoint to be used with with the request + * @gfp_flags:GFP_* flags to use + * + * Request objects must be allocated with this call, since they normally + * need controller-specific setup and may even need endpoint-specific + * resources such as allocation of DMA descriptors. + * Requests may be submitted with usb_ep_queue(), and receive a single + * completion callback. Free requests with usb_ep_free_request(), when + * they are no longer needed. + * + * Returns the request, or null if one could not be allocated. + */ +static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags) +{ + return ep->ops->alloc_request(ep, gfp_flags); +} + +/** + * usb_ep_free_request - frees a request object + * @ep:the endpoint associated with the request + * @req:the request being freed + * + * Reverses the effect of usb_ep_alloc_request(). + * Caller guarantees the request is not queued, and that it will + * no longer be requeued (or otherwise used). + */ +static inline void usb_ep_free_request(struct usb_ep *ep, + struct usb_request *req) +{ + ep->ops->free_request(ep, req); +} + +/** + * usb_ep_queue - queues (submits) an I/O request to an endpoint. + * @ep:the endpoint associated with the request + * @req:the request being submitted + * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't + * pre-allocate all necessary memory with the request. + * + * This tells the device controller to perform the specified request through + * that endpoint (reading or writing a buffer). When the request completes, + * including being canceled by usb_ep_dequeue(), the request's completion + * routine is called to return the request to the driver. Any endpoint + * (except control endpoints like ep0) may have more than one transfer + * request queued; they complete in FIFO order. Once a gadget driver + * submits a request, that request may not be examined or modified until it + * is given back to that driver through the completion callback. + * + * Each request is turned into one or more packets. The controller driver + * never merges adjacent requests into the same packet. OUT transfers + * will sometimes use data that's already buffered in the hardware. + * Drivers can rely on the fact that the first byte of the request's buffer + * always corresponds to the first byte of some USB packet, for both + * IN and OUT transfers. + * + * Bulk endpoints can queue any amount of data; the transfer is packetized + * automatically. The last packet will be short if the request doesn't fill it + * out completely. Zero length packets (ZLPs) should be avoided in portable + * protocols since not all usb hardware can successfully handle zero length + * packets. (ZLPs may be explicitly written, and may be implicitly written if + * the request 'zero' flag is set.) Bulk endpoints may also be used + * for interrupt transfers; but the reverse is not true, and some endpoints + * won't support every interrupt transfer. (Such as 768 byte packets.) + * + * Interrupt-only endpoints are less functional than bulk endpoints, for + * example by not supporting queueing or not handling buffers that are + * larger than the endpoint's maxpacket size. They may also treat data + * toggle differently. + * + * Control endpoints ... after getting a setup() callback, the driver queues + * one response (even if it would be zero length). That enables the + * status ack, after transferring data as specified in the response. Setup + * functions may return negative error codes to generate protocol stalls. + * (Note that some USB device controllers disallow protocol stall responses + * in some cases.) When control responses are deferred (the response is + * written after the setup callback returns), then usb_ep_set_halt() may be + * used on ep0 to trigger protocol stalls. Depending on the controller, + * it may not be possible to trigger a status-stage protocol stall when the + * data stage is over, that is, from within the response's completion + * routine. + * + * For periodic endpoints, like interrupt or isochronous ones, the usb host + * arranges to poll once per interval, and the gadget driver usually will + * have queued some data to transfer at that time. + * + * Returns zero, or a negative error code. Endpoints that are not enabled + * report errors; errors will also be + * reported when the usb peripheral is disconnected. + */ +static inline int usb_ep_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp_flags) +{ + return ep->ops->queue(ep, req, gfp_flags); +} + +/** + * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint + * @ep:the endpoint associated with the request + * @req:the request being canceled + * + * if the request is still active on the endpoint, it is dequeued and its + * completion routine is called (with status -ECONNRESET); else a negative + * error code is returned. + * + * note that some hardware can't clear out write fifos (to unlink the request + * at the head of the queue) except as part of disconnecting from usb. such + * restrictions prevent drivers from supporting configuration changes, + * even to configuration zero (a "chapter 9" requirement). + */ +static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + return ep->ops->dequeue(ep, req); +} + +/** + * usb_ep_set_halt - sets the endpoint halt feature. + * @ep: the non-isochronous endpoint being stalled + * + * Use this to stall an endpoint, perhaps as an error report. + * Except for control endpoints, + * the endpoint stays halted (will not stream any data) until the host + * clears this feature; drivers may need to empty the endpoint's request + * queue first, to make sure no inappropriate transfers happen. + * + * Note that while an endpoint CLEAR_FEATURE will be invisible to the + * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the + * current altsetting, see usb_ep_clear_halt(). When switching altsettings, + * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. + * + * Returns zero, or a negative error code. On success, this call sets + * underlying hardware state that blocks data transfers. + * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any + * transfer requests are still queued, or if the controller hardware + * (usually a FIFO) still holds bytes that the host hasn't collected. + */ +static inline int usb_ep_set_halt(struct usb_ep *ep) +{ + return ep->ops->set_halt(ep, 1); +} + +/** + * usb_ep_clear_halt - clears endpoint halt, and resets toggle + * @ep:the bulk or interrupt endpoint being reset + * + * Use this when responding to the standard usb "set interface" request, + * for endpoints that aren't reconfigured, after clearing any other state + * in the endpoint's i/o queue. + * + * Returns zero, or a negative error code. On success, this call clears + * the underlying hardware state reflecting endpoint halt and data toggle. + * Note that some hardware can't support this request (like pxa2xx_udc), + * and accordingly can't correctly implement interface altsettings. + */ +static inline int usb_ep_clear_halt(struct usb_ep *ep) +{ + return ep->ops->set_halt(ep, 0); +} + +/** + * usb_ep_set_wedge - sets the halt feature and ignores clear requests + * @ep: the endpoint being wedged + * + * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT) + * requests. If the gadget driver clears the halt status, it will + * automatically unwedge the endpoint. + * + * Returns zero on success, else negative errno. + */ +static inline int +usb_ep_set_wedge(struct usb_ep *ep) +{ + if (ep->ops->set_wedge) + return ep->ops->set_wedge(ep); + else + return ep->ops->set_halt(ep, 1); +} + +/** + * usb_ep_fifo_status - returns number of bytes in fifo, or error + * @ep: the endpoint whose fifo status is being checked. + * + * FIFO endpoints may have "unclaimed data" in them in certain cases, + * such as after aborted transfers. Hosts may not have collected all + * the IN data written by the gadget driver (and reported by a request + * completion). The gadget driver may not have collected all the data + * written OUT to it by the host. Drivers that need precise handling for + * fault reporting or recovery may need to use this call. + * + * This returns the number of such bytes in the fifo, or a negative + * errno if the endpoint doesn't use a FIFO or doesn't support such + * precise handling. + */ +static inline int usb_ep_fifo_status(struct usb_ep *ep) +{ + if (ep->ops->fifo_status) + return ep->ops->fifo_status(ep); + else + return -EOPNOTSUPP; +} + +/** + * usb_ep_fifo_flush - flushes contents of a fifo + * @ep: the endpoint whose fifo is being flushed. + * + * This call may be used to flush the "unclaimed data" that may exist in + * an endpoint fifo after abnormal transaction terminations. The call + * must never be used except when endpoint is not being used for any + * protocol translation. + */ +static inline void usb_ep_fifo_flush(struct usb_ep *ep) +{ + if (ep->ops->fifo_flush) + ep->ops->fifo_flush(ep); +} + + +/*-------------------------------------------------------------------------*/ + +struct usb_dcd_config_params { + __u8 bU1devExitLat; /* U1 Device exit Latency */ +#define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ + __le16 bU2DevExitLat; /* U2 Device exit Latency */ +#define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */ +}; + + +struct usb_gadget; +struct usb_gadget_driver; + +/* the rest of the api to the controller hardware: device operations, + * which don't involve endpoints (or i/o). + */ +struct usb_gadget_ops { + int (*get_frame)(struct usb_gadget *); + int (*wakeup)(struct usb_gadget *); + int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered); + int (*vbus_session) (struct usb_gadget *, int is_active); + int (*vbus_draw) (struct usb_gadget *, unsigned mA); + int (*pullup) (struct usb_gadget *, int is_on); + int (*ioctl)(struct usb_gadget *, + unsigned code, unsigned long param); + void (*get_config_params)(struct usb_dcd_config_params *); + int (*udc_start)(struct usb_gadget *, + struct usb_gadget_driver *); + int (*udc_stop)(struct usb_gadget *, + struct usb_gadget_driver *); + + /* Those two are deprecated */ + int (*start)(struct usb_gadget_driver *, + int (*bind)(struct usb_gadget *)); + int (*stop)(struct usb_gadget_driver *); +}; + +/** + * struct usb_gadget - represents a usb slave device + * @ops: Function pointers used to access hardware-specific operations. + * @ep0: Endpoint zero, used when reading or writing responses to + * driver setup() requests + * @ep_list: List of other endpoints supported by the device. + * @speed: Speed of current connection to USB host. + * @max_speed: Maximal speed the UDC can handle. UDC must support this + * and all slower speeds. + * @sg_supported: true if we can handle scatter-gather + * @is_otg: True if the USB device port uses a Mini-AB jack, so that the + * gadget driver must provide a USB OTG descriptor. + * @is_a_peripheral: False unless is_otg, the "A" end of a USB cable + * is in the Mini-AB jack, and HNP has been used to switch roles + * so that the "A" device currently acts as A-Peripheral, not A-Host. + * @a_hnp_support: OTG device feature flag, indicating that the A-Host + * supports HNP at this port. + * @a_alt_hnp_support: OTG device feature flag, indicating that the A-Host + * only supports HNP on a different root port. + * @b_hnp_enable: OTG device feature flag, indicating that the A-Host + * enabled HNP support. + * @name: Identifies the controller hardware type. Used in diagnostics + * and sometimes configuration. + * @dev: Driver model state for this abstract device. + * + * Gadgets have a mostly-portable "gadget driver" implementing device + * functions, handling all usb configurations and interfaces. Gadget + * drivers talk to hardware-specific code indirectly, through ops vectors. + * That insulates the gadget driver from hardware details, and packages + * the hardware endpoints through generic i/o queues. The "usb_gadget" + * and "usb_ep" interfaces provide that insulation from the hardware. + * + * Except for the driver data, all fields in this structure are + * read-only to the gadget driver. That driver data is part of the + * "driver model" infrastructure in 2.6 (and later) kernels, and for + * earlier systems is grouped in a similar structure that's not known + * to the rest of the kernel. + * + * Values of the three OTG device feature flags are updated before the + * setup() call corresponding to USB_REQ_SET_CONFIGURATION, and before + * driver suspend() calls. They are valid only when is_otg, and when the + * device is acting as a B-Peripheral (so is_a_peripheral is false). + */ +struct usb_gadget { + /* readonly to gadget driver */ + const struct usb_gadget_ops *ops; + struct usb_ep *ep0; + struct list_head ep_list; /* of usb_ep */ + enum usb_device_speed speed; + enum usb_device_speed max_speed; + unsigned sg_supported:1; + unsigned is_otg:1; + unsigned is_a_peripheral:1; + unsigned b_hnp_enable:1; + unsigned a_hnp_support:1; + unsigned a_alt_hnp_support:1; + const char *name; + struct device dev; +}; + +static inline void set_gadget_data(struct usb_gadget *gadget, void *data) + { dev_set_drvdata(&gadget->dev, data); } +static inline void *get_gadget_data(struct usb_gadget *gadget) + { return dev_get_drvdata(&gadget->dev); } +static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) +{ + return container_of(dev, struct usb_gadget, dev); +} + +/* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */ +#define gadget_for_each_ep(tmp, gadget) \ + list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) + + +/** + * gadget_is_dualspeed - return true iff the hardware handles high speed + * @g: controller that might support both high and full speeds + */ +static inline int gadget_is_dualspeed(struct usb_gadget *g) +{ +#ifdef CONFIG_USB_GADGET_DUALSPEED + /* runtime test would check "g->max_speed" ... that might be + * useful to work around hardware bugs, but is mostly pointless + */ + return 1; +#else + return 0; +#endif +} + +/** + * gadget_is_superspeed() - return true if the hardware handles + * supperspeed + * @g: controller that might support supper speed + */ +static inline int gadget_is_superspeed(struct usb_gadget *g) +{ +#ifdef CONFIG_USB_GADGET_SUPERSPEED + /* + * runtime test would check "g->max_speed" ... that might be + * useful to work around hardware bugs, but is mostly pointless + */ + return 1; +#else + return 0; +#endif +} + +/** + * gadget_is_otg - return true iff the hardware is OTG-ready + * @g: controller that might have a Mini-AB connector + * + * This is a runtime test, since kernels with a USB-OTG stack sometimes + * run on boards which only have a Mini-B (or Mini-A) connector. + */ +static inline int gadget_is_otg(struct usb_gadget *g) +{ +#ifdef CONFIG_USB_OTG + return g->is_otg; +#else + return 0; +#endif +} + +/** + * usb_gadget_frame_number - returns the current frame number + * @gadget: controller that reports the frame number + * + * Returns the usb frame number, normally eleven bits from a SOF packet, + * or negative errno if this device doesn't support this capability. + */ +static inline int usb_gadget_frame_number(struct usb_gadget *gadget) +{ + return gadget->ops->get_frame(gadget); +} + +/** + * usb_gadget_wakeup - tries to wake up the host connected to this gadget + * @gadget: controller used to wake up the host + * + * Returns zero on success, else negative error code if the hardware + * doesn't support such attempts, or its support has not been enabled + * by the usb host. Drivers must return device descriptors that report + * their ability to support this, or hosts won't enable it. + * + * This may also try to use SRP to wake the host and start enumeration, + * even if OTG isn't otherwise in use. OTG devices may also start + * remote wakeup even when hosts don't explicitly enable it. + */ +static inline int usb_gadget_wakeup(struct usb_gadget *gadget) +{ + if (!gadget->ops->wakeup) + return -EOPNOTSUPP; + return gadget->ops->wakeup(gadget); +} + +/** + * usb_gadget_set_selfpowered - sets the device selfpowered feature. + * @gadget:the device being declared as self-powered + * + * this affects the device status reported by the hardware driver + * to reflect that it now has a local power supply. + * + * returns zero on success, else negative errno. + */ +static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) +{ + if (!gadget->ops->set_selfpowered) + return -EOPNOTSUPP; + return gadget->ops->set_selfpowered(gadget, 1); +} + +/** + * usb_gadget_clear_selfpowered - clear the device selfpowered feature. + * @gadget:the device being declared as bus-powered + * + * this affects the device status reported by the hardware driver. + * some hardware may not support bus-powered operation, in which + * case this feature's value can never change. + * + * returns zero on success, else negative errno. + */ +static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) +{ + if (!gadget->ops->set_selfpowered) + return -EOPNOTSUPP; + return gadget->ops->set_selfpowered(gadget, 0); +} + +/** + * usb_gadget_vbus_connect - Notify controller that VBUS is powered + * @gadget:The device which now has VBUS power. + * Context: can sleep + * + * This call is used by a driver for an external transceiver (or GPIO) + * that detects a VBUS power session starting. Common responses include + * resuming the controller, activating the D+ (or D-) pullup to let the + * host detect that a USB device is attached, and starting to draw power + * (8mA or possibly more, especially after SET_CONFIGURATION). + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) +{ + if (!gadget->ops->vbus_session) + return -EOPNOTSUPP; + return gadget->ops->vbus_session(gadget, 1); +} + +/** + * usb_gadget_vbus_draw - constrain controller's VBUS power usage + * @gadget:The device whose VBUS usage is being described + * @mA:How much current to draw, in milliAmperes. This should be twice + * the value listed in the configuration descriptor bMaxPower field. + * + * This call is used by gadget drivers during SET_CONFIGURATION calls, + * reporting how much power the device may consume. For example, this + * could affect how quickly batteries are recharged. + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) +{ + if (!gadget->ops->vbus_draw) + return -EOPNOTSUPP; + return gadget->ops->vbus_draw(gadget, mA); +} + +/** + * usb_gadget_vbus_disconnect - notify controller about VBUS session end + * @gadget:the device whose VBUS supply is being described + * Context: can sleep + * + * This call is used by a driver for an external transceiver (or GPIO) + * that detects a VBUS power session ending. Common responses include + * reversing everything done in usb_gadget_vbus_connect(). + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) +{ + if (!gadget->ops->vbus_session) + return -EOPNOTSUPP; + return gadget->ops->vbus_session(gadget, 0); +} + +/** + * usb_gadget_connect - software-controlled connect to USB host + * @gadget:the peripheral being connected + * + * Enables the D+ (or potentially D-) pullup. The host will start + * enumerating this gadget when the pullup is active and a VBUS session + * is active (the link is powered). This pullup is always enabled unless + * usb_gadget_disconnect() has been used to disable it. + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_connect(struct usb_gadget *gadget) +{ + if (!gadget->ops->pullup) + return -EOPNOTSUPP; + return gadget->ops->pullup(gadget, 1); +} + +/** + * usb_gadget_disconnect - software-controlled disconnect from USB host + * @gadget:the peripheral being disconnected + * + * Disables the D+ (or potentially D-) pullup, which the host may see + * as a disconnect (when a VBUS session is active). Not all systems + * support software pullup controls. + * + * This routine may be used during the gadget driver bind() call to prevent + * the peripheral from ever being visible to the USB host, unless later + * usb_gadget_connect() is called. For example, user mode components may + * need to be activated before the system can talk to hosts. + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_disconnect(struct usb_gadget *gadget) +{ + if (!gadget->ops->pullup) + return -EOPNOTSUPP; + return gadget->ops->pullup(gadget, 0); +} + + +/*-------------------------------------------------------------------------*/ + +/** + * struct usb_gadget_driver - driver for usb 'slave' devices + * @function: String describing the gadget's function + * @max_speed: Highest speed the driver handles. + * @setup: Invoked for ep0 control requests that aren't handled by + * the hardware level driver. Most calls must be handled by + * the gadget driver, including descriptor and configuration + * management. The 16 bit members of the setup data are in + * USB byte order. Called in_interrupt; this may not sleep. Driver + * queues a response to ep0, or returns negative to stall. + * @disconnect: Invoked after all transfers have been stopped, + * when the host is disconnected. May be called in_interrupt; this + * may not sleep. Some devices can't detect disconnect, so this might + * not be called except as part of controller shutdown. + * @unbind: Invoked when the driver is unbound from a gadget, + * usually from rmmod (after a disconnect is reported). + * Called in a context that permits sleeping. + * @suspend: Invoked on USB suspend. May be called in_interrupt. + * @resume: Invoked on USB resume. May be called in_interrupt. + * @driver: Driver model state for this driver. + * + * Devices are disabled till a gadget driver successfully bind()s, which + * means the driver will handle setup() requests needed to enumerate (and + * meet "chapter 9" requirements) then do some useful work. + * + * If gadget->is_otg is true, the gadget driver must provide an OTG + * descriptor during enumeration, or else fail the bind() call. In such + * cases, no USB traffic may flow until both bind() returns without + * having called usb_gadget_disconnect(), and the USB host stack has + * initialized. + * + * Drivers use hardware-specific knowledge to configure the usb hardware. + * endpoint addressing is only one of several hardware characteristics that + * are in descriptors the ep0 implementation returns from setup() calls. + * + * Except for ep0 implementation, most driver code shouldn't need change to + * run on top of different usb controllers. It'll use endpoints set up by + * that ep0 implementation. + * + * The usb controller driver handles a few standard usb requests. Those + * include set_address, and feature flags for devices, interfaces, and + * endpoints (the get_status, set_feature, and clear_feature requests). + * + * Accordingly, the driver's setup() callback must always implement all + * get_descriptor requests, returning at least a device descriptor and + * a configuration descriptor. Drivers must make sure the endpoint + * descriptors match any hardware constraints. Some hardware also constrains + * other descriptors. (The pxa250 allows only configurations 1, 2, or 3). + * + * The driver's setup() callback must also implement set_configuration, + * and should also implement set_interface, get_configuration, and + * get_interface. Setting a configuration (or interface) is where + * endpoints should be activated or (config 0) shut down. + * + * (Note that only the default control endpoint is supported. Neither + * hosts nor devices generally support control traffic except to ep0.) + * + * Most devices will ignore USB suspend/resume operations, and so will + * not provide those callbacks. However, some may need to change modes + * when the host is not longer directing those activities. For example, + * local controls (buttons, dials, etc) may need to be re-enabled since + * the (remote) host can't do that any longer; or an error state might + * be cleared, to make the device behave identically whether or not + * power is maintained. + */ +struct usb_gadget_driver { + char *function; + enum usb_device_speed max_speed; + void (*unbind)(struct usb_gadget *); + int (*setup)(struct usb_gadget *, + const struct usb_ctrlrequest *); + void (*disconnect)(struct usb_gadget *); + void (*suspend)(struct usb_gadget *); + void (*resume)(struct usb_gadget *); + + /* FIXME support safe rmmod */ + struct device_driver driver; +}; + + + +/*-------------------------------------------------------------------------*/ + +/* driver modules register and unregister, as usual. + * these calls must be made in a context that can sleep. + * + * these will usually be implemented directly by the hardware-dependent + * usb bus interface driver, which will only support a single driver. + */ + +/** + * usb_gadget_probe_driver - probe a gadget driver + * @driver: the driver being registered + * @bind: the driver's bind callback + * Context: can sleep + * + * Call this in your gadget driver's module initialization function, + * to tell the underlying usb controller driver about your driver. + * The @bind() function will be called to bind it to a gadget before this + * registration call returns. It's expected that the @bind() function will + * be in init sections. + */ +int usb_gadget_probe_driver(struct usb_gadget_driver *driver, + int (*bind)(struct usb_gadget *)); + +/** + * usb_gadget_unregister_driver - unregister a gadget driver + * @driver:the driver being unregistered + * Context: can sleep + * + * Call this in your gadget driver's module cleanup function, + * to tell the underlying usb controller that your driver is + * going away. If the controller is connected to a USB host, + * it will first disconnect(). The driver is also requested + * to unbind() and clean up any device state, before this procedure + * finally returns. It's expected that the unbind() functions + * will in in exit sections, so may not be linked in some kernels. + */ +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); + +extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); +extern void usb_del_gadget_udc(struct usb_gadget *gadget); + +/*-------------------------------------------------------------------------*/ + +/* utility to simplify dealing with string descriptors */ + +/** + * struct usb_string - wraps a C string and its USB id + * @id:the (nonzero) ID for this string + * @s:the string, in UTF-8 encoding + * + * If you're using usb_gadget_get_string(), use this to wrap a string + * together with its ID. + */ +struct usb_string { + u8 id; + const char *s; +}; + +/** + * struct usb_gadget_strings - a set of USB strings in a given language + * @language:identifies the strings' language (0x0409 for en-us) + * @strings:array of strings with their ids + * + * If you're using usb_gadget_get_string(), use this to wrap all the + * strings for a given language. + */ +struct usb_gadget_strings { + u16 language; /* 0x0409 for en-us */ + struct usb_string *strings; +}; + +/* put descriptor for string with that id into buf (buflen >= 256) */ +int usb_gadget_get_string(struct usb_gadget_strings *table, int id, u8 *buf); + +/*-------------------------------------------------------------------------*/ + +/* utility to simplify managing config descriptors */ + +/* write vector of descriptors into buffer */ +int usb_descriptor_fillbuf(void *, unsigned, + const struct usb_descriptor_header **); + +/* build config descriptor from single descriptor vector */ +int usb_gadget_config_buf(const struct usb_config_descriptor *config, + void *buf, unsigned buflen, const struct usb_descriptor_header **desc); + +/* copy a NULL-terminated vector of descriptors */ +struct usb_descriptor_header **usb_copy_descriptors( + struct usb_descriptor_header **); + +/** + * usb_free_descriptors - free descriptors returned by usb_copy_descriptors() + * @v: vector of descriptors + */ +static inline void usb_free_descriptors(struct usb_descriptor_header **v) +{ + kfree(v); +} + +/*-------------------------------------------------------------------------*/ + +/* utility to simplify map/unmap of usb_requests to/from DMA */ + +extern int usb_gadget_map_request(struct usb_gadget *gadget, + struct usb_request *req, int is_in); + +extern void usb_gadget_unmap_request(struct usb_gadget *gadget, + struct usb_request *req, int is_in); + +/*-------------------------------------------------------------------------*/ + +/* utility wrapping a simple endpoint selection policy */ + +extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *, + struct usb_endpoint_descriptor *); + + +extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *, + struct usb_endpoint_descriptor *, + struct usb_ss_ep_comp_descriptor *); + +extern void usb_ep_autoconfig_reset(struct usb_gadget *); + +#endif /* __LINUX_USB_GADGET_H */ diff --git a/include/linux/usb/gadgetfs.h b/include/linux/usb/gadgetfs.h new file mode 100644 index 00000000..0bb12e0d --- /dev/null +++ b/include/linux/usb/gadgetfs.h @@ -0,0 +1,88 @@ +/* + * Filesystem based user-mode API to USB Gadget controller hardware + * + * Other than ep0 operations, most things are done by read() and write() + * on endpoint files found in one directory. They are configured by + * writing descriptors, and then may be used for normal stream style + * i/o requests. When ep0 is configured, the device can enumerate; + * when it's closed, the device disconnects from usb. Operations on + * ep0 require ioctl() operations. + * + * Configuration and device descriptors get written to /dev/gadget/$CHIP, + * which may then be used to read usb_gadgetfs_event structs. The driver + * may activate endpoints as it handles SET_CONFIGURATION setup events, + * or earlier; writing endpoint descriptors to /dev/gadget/$ENDPOINT + * then performing data transfers by reading or writing. + */ + +#ifndef __LINUX_USB_GADGETFS_H +#define __LINUX_USB_GADGETFS_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#include <linux/usb/ch9.h> + +/* + * Events are delivered on the ep0 file descriptor, when the user mode driver + * reads from this file descriptor after writing the descriptors. Don't + * stop polling this descriptor. + */ + +enum usb_gadgetfs_event_type { + GADGETFS_NOP = 0, + + GADGETFS_CONNECT, + GADGETFS_DISCONNECT, + GADGETFS_SETUP, + GADGETFS_SUSPEND, + /* and likely more ! */ +}; + +/* NOTE: this structure must stay the same size and layout on + * both 32-bit and 64-bit kernels. + */ +struct usb_gadgetfs_event { + union { + /* NOP, DISCONNECT, SUSPEND: nothing + * ... some hardware can't report disconnection + */ + + /* CONNECT: just the speed */ + enum usb_device_speed speed; + + /* SETUP: packet; DATA phase i/o precedes next event + *(setup.bmRequestType & USB_DIR_IN) flags direction + * ... includes SET_CONFIGURATION, SET_INTERFACE + */ + struct usb_ctrlrequest setup; + } u; + enum usb_gadgetfs_event_type type; +}; + + +/* The 'g' code is also used by printer gadget ioctl requests. + * Don't add any colliding codes to either driver, and keep + * them in unique ranges (size 0x20 for now). + */ + +/* endpoint ioctls */ + +/* IN transfers may be reported to the gadget driver as complete + * when the fifo is loaded, before the host reads the data; + * OUT transfers may be reported to the host's "client" driver as + * complete when they're sitting in the FIFO unread. + * THIS returns how many bytes are "unclaimed" in the endpoint fifo + * (needed for precise fault handling, when the hardware allows it) + */ +#define GADGETFS_FIFO_STATUS _IO('g', 1) + +/* discards any unclaimed data in the fifo. */ +#define GADGETFS_FIFO_FLUSH _IO('g', 2) + +/* resets endpoint halt+toggle; used to implement set_interface. + * some hardware (like pxa2xx) can't support this. + */ +#define GADGETFS_CLEAR_HALT _IO('g', 3) + +#endif /* __LINUX_USB_GADGETFS_H */ diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h new file mode 100644 index 00000000..d9f03ccc --- /dev/null +++ b/include/linux/usb/gpio_vbus.h @@ -0,0 +1,30 @@ +/* + * A simple GPIO VBUS sensing driver for B peripheral only devices + * with internal transceivers. + * Optionally D+ pullup can be controlled by a second GPIO. + * + * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * struct gpio_vbus_mach_info - configuration for gpio_vbus + * @gpio_vbus: VBUS sensing GPIO + * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid) + * @gpio_vbus_inverted: true if gpio_vbus is active low + * @gpio_pullup_inverted: true if gpio_pullup is active low + * + * The VBUS sensing GPIO should have a pulldown, which will normally be + * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a + * value the GPIO detects as active. Some systems will use comparators. + */ +struct gpio_vbus_mach_info { + int gpio_vbus; + int gpio_pullup; + bool gpio_vbus_inverted; + bool gpio_pullup_inverted; +}; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h new file mode 100644 index 00000000..030f3610 --- /dev/null +++ b/include/linux/usb/hcd.h @@ -0,0 +1,696 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __USB_CORE_HCD_H +#define __USB_CORE_HCD_H + +#ifdef __KERNEL__ + +#include <linux/rwsem.h> + +#define MAX_TOPO_LEVEL 6 + +/* This file contains declarations of usbcore internals that are mostly + * used or exposed by Host Controller Drivers. + */ + +/* + * USB Packet IDs (PIDs) + */ +#define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */ +#define USB_PID_OUT 0xe1 +#define USB_PID_ACK 0xd2 +#define USB_PID_DATA0 0xc3 +#define USB_PID_PING 0xb4 /* USB 2.0 */ +#define USB_PID_SOF 0xa5 +#define USB_PID_NYET 0x96 /* USB 2.0 */ +#define USB_PID_DATA2 0x87 /* USB 2.0 */ +#define USB_PID_SPLIT 0x78 /* USB 2.0 */ +#define USB_PID_IN 0x69 +#define USB_PID_NAK 0x5a +#define USB_PID_DATA1 0x4b +#define USB_PID_PREAMBLE 0x3c /* Token mode */ +#define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */ +#define USB_PID_SETUP 0x2d +#define USB_PID_STALL 0x1e +#define USB_PID_MDATA 0x0f /* USB 2.0 */ + +/*-------------------------------------------------------------------------*/ + +/* + * USB Host Controller Driver (usb_hcd) framework + * + * Since "struct usb_bus" is so thin, you can't share much code in it. + * This framework is a layer over that, and should be more sharable. + * + * @authorized_default: Specifies if new devices are authorized to + * connect by default or they require explicit + * user space authorization; this bit is settable + * through /sys/class/usb_host/X/authorized_default. + * For the rest is RO, so we don't lock to r/w it. + */ + +/*-------------------------------------------------------------------------*/ + +struct usb_hcd { + + /* + * housekeeping + */ + struct usb_bus self; /* hcd is-a bus */ + struct kref kref; /* reference counter */ + + const char *product_desc; /* product/vendor string */ + int speed; /* Speed for this roothub. + * May be different from + * hcd->driver->flags & HCD_MASK + */ + char irq_descr[24]; /* driver + bus # */ + + struct timer_list rh_timer; /* drives root-hub polling */ + struct urb *status_urb; /* the current status urb */ +#ifdef CONFIG_USB_SUSPEND + struct work_struct wakeup_work; /* for remote wakeup */ +#endif + + /* + * hardware info/state + */ + const struct hc_driver *driver; /* hw-specific hooks */ + + /* Flags that need to be manipulated atomically because they can + * change while the host controller is running. Always use + * set_bit() or clear_bit() to change their values. + */ + unsigned long flags; +#define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */ +#define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ +#define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ +#define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ +#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ +#define HCD_FLAG_DEAD 6 /* controller has died? */ + + /* The flags can be tested using these macros; they are likely to + * be slightly faster than test_bit(). + */ +#define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE)) +#define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) +#define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) +#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) +#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) +#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) + + /* Flags that get set only during HCD registration or removal. */ + unsigned rh_registered:1;/* is root hub registered? */ + unsigned rh_pollable:1; /* may we poll the root hub? */ + unsigned msix_enabled:1; /* driver has MSI-X enabled? */ + + /* The next flag is a stopgap, to be removed when all the HCDs + * support the new root-hub polling mechanism. */ + unsigned uses_new_polling:1; + unsigned wireless:1; /* Wireless USB HCD */ + unsigned authorized_default:1; + unsigned has_tt:1; /* Integrated TT in root hub */ + + unsigned int irq; /* irq allocated */ + void __iomem *regs; /* device memory/io */ + u64 rsrc_start; /* memory/io resource start */ + u64 rsrc_len; /* memory/io resource length */ + unsigned power_budget; /* in mA, 0 = no limit */ + + /* bandwidth_mutex should be taken before adding or removing + * any new bus bandwidth constraints: + * 1. Before adding a configuration for a new device. + * 2. Before removing the configuration to put the device into + * the addressed state. + * 3. Before selecting a different configuration. + * 4. Before selecting an alternate interface setting. + * + * bandwidth_mutex should be dropped after a successful control message + * to the device, or resetting the bandwidth after a failed attempt. + */ + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + + +#define HCD_BUFFER_POOLS 4 + struct dma_pool *pool[HCD_BUFFER_POOLS]; + + int state; +# define __ACTIVE 0x01 +# define __SUSPEND 0x04 +# define __TRANSIENT 0x80 + +# define HC_STATE_HALT 0 +# define HC_STATE_RUNNING (__ACTIVE) +# define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE) +# define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT) +# define HC_STATE_SUSPENDED (__SUSPEND) + +#define HC_IS_RUNNING(state) ((state) & __ACTIVE) +#define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) + + /* more shared queuing code would be good; it should support + * smarter scheduling, handle transaction translators, etc; + * input size of periodic table to an interrupt scheduler. + * (ohci 32, uhci 1024, ehci 256/512/1024). + */ + + /* The HC driver's private data is stored at the end of + * this structure. + */ + unsigned long hcd_priv[0] + __attribute__ ((aligned(sizeof(s64)))); +}; + +/* 2.4 does this a bit differently ... */ +static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd) +{ + return &hcd->self; +} + +static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus) +{ + return container_of(bus, struct usb_hcd, self); +} + +struct hcd_timeout { /* timeouts we allocate */ + struct list_head timeout_list; + struct timer_list timer; +}; + +/*-------------------------------------------------------------------------*/ + + +struct hc_driver { + const char *description; /* "ehci-hcd" etc */ + const char *product_desc; /* product/vendor string */ + size_t hcd_priv_size; /* size of private data */ + + /* irq handler */ + irqreturn_t (*irq) (struct usb_hcd *hcd); + + int flags; +#define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ +#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */ +#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ +#define HCD_USB11 0x0010 /* USB 1.1 */ +#define HCD_USB2 0x0020 /* USB 2.0 */ +#define HCD_USB3 0x0040 /* USB 3.0 */ +#define HCD_MASK 0x0070 + + /* called to init HCD and root hub */ + int (*reset) (struct usb_hcd *hcd); + int (*start) (struct usb_hcd *hcd); + + /* NOTE: these suspend/resume calls relate to the HC as + * a whole, not just the root hub; they're for PCI bus glue. + */ + /* called after suspending the hub, before entering D3 etc */ + int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup); + + /* called after entering D0 (etc), before resuming the hub */ + int (*pci_resume)(struct usb_hcd *hcd, bool hibernated); + + /* cleanly make HCD stop writing memory and doing I/O */ + void (*stop) (struct usb_hcd *hcd); + + /* shutdown HCD */ + void (*shutdown) (struct usb_hcd *hcd); + + /* return current frame number */ + int (*get_frame_number) (struct usb_hcd *hcd); + + /* manage i/o requests, device state */ + int (*urb_enqueue)(struct usb_hcd *hcd, + struct urb *urb, gfp_t mem_flags); + int (*urb_dequeue)(struct usb_hcd *hcd, + struct urb *urb, int status); + + /* + * (optional) these hooks allow an HCD to override the default DMA + * mapping and unmapping routines. In general, they shouldn't be + * necessary unless the host controller has special DMA requirements, + * such as alignment contraints. If these are not specified, the + * general usb_hcd_(un)?map_urb_for_dma functions will be used instead + * (and it may be a good idea to call these functions in your HCD + * implementation) + */ + int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags); + void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb); + + /* hw synch, freeing endpoint resources that urb_dequeue can't */ + void (*endpoint_disable)(struct usb_hcd *hcd, + struct usb_host_endpoint *ep); + + /* (optional) reset any endpoint state such as sequence number + and current window */ + void (*endpoint_reset)(struct usb_hcd *hcd, + struct usb_host_endpoint *ep); + + /* root hub support */ + int (*hub_status_data) (struct usb_hcd *hcd, char *buf); + int (*hub_control) (struct usb_hcd *hcd, + u16 typeReq, u16 wValue, u16 wIndex, + char *buf, u16 wLength); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned port_num); + + /* force handover of high-speed port to full-speed companion */ + void (*relinquish_port)(struct usb_hcd *, int); + /* has a port been handed over to a companion? */ + int (*port_handed_over)(struct usb_hcd *, int); + + /* CLEAR_TT_BUFFER completion callback */ + void (*clear_tt_buffer_complete)(struct usb_hcd *, + struct usb_host_endpoint *); + + /* xHCI specific functions */ + /* Called by usb_alloc_dev to alloc HC device structures */ + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + /* Called by usb_disconnect to free HC device structures */ + void (*free_dev)(struct usb_hcd *, struct usb_device *); + /* Change a group of bulk endpoints to support multiple stream IDs */ + int (*alloc_streams)(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + unsigned int num_streams, gfp_t mem_flags); + /* Reverts a group of bulk endpoints back to not using stream IDs. + * Can fail if we run out of memory. + */ + int (*free_streams)(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint **eps, unsigned int num_eps, + gfp_t mem_flags); + + /* Bandwidth computation functions */ + /* Note that add_endpoint() can only be called once per endpoint before + * check_bandwidth() or reset_bandwidth() must be called. + * drop_endpoint() can only be called once per endpoint also. + * A call to xhci_drop_endpoint() followed by a call to + * xhci_add_endpoint() will add the endpoint to the schedule with + * possibly new parameters denoted by a different endpoint descriptor + * in usb_host_endpoint. A call to xhci_add_endpoint() followed by a + * call to xhci_drop_endpoint() is not allowed. + */ + /* Allocate endpoint resources and add them to a new schedule */ + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, + struct usb_host_endpoint *); + /* Drop an endpoint from a new schedule */ + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, + struct usb_host_endpoint *); + /* Check that a new hardware configuration, set using + * endpoint_enable and endpoint_disable, does not exceed bus + * bandwidth. This must be called before any set configuration + * or set interface requests are sent to the device. + */ + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + /* Reset the device schedule to the last known good schedule, + * which was set from a previous successful call to + * check_bandwidth(). This reverts any add_endpoint() and + * drop_endpoint() calls since that last successful call. + * Used for when a check_bandwidth() call fails due to resource + * or bandwidth constraints. + */ + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + /* Returns the hardware-chosen device address */ + int (*address_device)(struct usb_hcd *, struct usb_device *udev); + /* Notifies the HCD after a hub descriptor is fetched. + * Will block. + */ + int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + /* Notifies the HCD after a device is connected and its + * address is set + */ + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); +}; + +extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); +extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, + int status); +extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb); + +extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags); +extern int usb_hcd_unlink_urb(struct urb *urb, int status); +extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, + int status); +extern int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags); +extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); +extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *); +extern void usb_hcd_flush_endpoint(struct usb_device *udev, + struct usb_host_endpoint *ep); +extern void usb_hcd_disable_endpoint(struct usb_device *udev, + struct usb_host_endpoint *ep); +extern void usb_hcd_reset_endpoint(struct usb_device *udev, + struct usb_host_endpoint *ep); +extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); +extern int usb_hcd_alloc_bandwidth(struct usb_device *udev, + struct usb_host_config *new_config, + struct usb_host_interface *old_alt, + struct usb_host_interface *new_alt); +extern int usb_hcd_get_frame_number(struct usb_device *udev); + +extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, + struct device *dev, const char *bus_name); +extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, + struct device *dev, const char *bus_name, + struct usb_hcd *shared_hcd); +extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd); +extern void usb_put_hcd(struct usb_hcd *hcd); +extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd); +extern int usb_add_hcd(struct usb_hcd *hcd, + unsigned int irqnum, unsigned long irqflags); +extern void usb_remove_hcd(struct usb_hcd *hcd); + +struct platform_device; +extern void usb_hcd_platform_shutdown(struct platform_device *dev); + +#ifdef CONFIG_PCI +struct pci_dev; +struct pci_device_id; +extern int usb_hcd_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id); +extern void usb_hcd_pci_remove(struct pci_dev *dev); +extern void usb_hcd_pci_shutdown(struct pci_dev *dev); + +#ifdef CONFIG_PM_SLEEP +extern const struct dev_pm_ops usb_hcd_pci_pm_ops; +#endif +#endif /* CONFIG_PCI */ + +/* pci-ish (pdev null is ok) buffer alloc/mapping support */ +int hcd_buffer_create(struct usb_hcd *hcd); +void hcd_buffer_destroy(struct usb_hcd *hcd); + +void *hcd_buffer_alloc(struct usb_bus *bus, size_t size, + gfp_t mem_flags, dma_addr_t *dma); +void hcd_buffer_free(struct usb_bus *bus, size_t size, + void *addr, dma_addr_t dma); + +/* generic bus glue, needed for host controllers that don't use PCI */ +extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); + +extern void usb_hc_died(struct usb_hcd *hcd); +extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); +extern void usb_wakeup_notification(struct usb_device *hdev, + unsigned int portnum); + +/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ +#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) +#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) +#define usb_settoggle(dev, ep, out, bit) \ + ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ + ((bit) << (ep))) + +/* -------------------------------------------------------------------------- */ + +/* Enumeration is only for the hub driver, or HCD virtual root hubs */ +extern struct usb_device *usb_alloc_dev(struct usb_device *parent, + struct usb_bus *, unsigned port); +extern int usb_new_device(struct usb_device *dev); +extern void usb_disconnect(struct usb_device **); + +extern int usb_get_configuration(struct usb_device *dev); +extern void usb_destroy_configuration(struct usb_device *dev); + +/*-------------------------------------------------------------------------*/ + +/* + * HCD Root Hub support + */ + +#include <linux/usb/ch11.h> + +/* + * As of USB 2.0, full/low speed devices are segregated into trees. + * One type grows from USB 1.1 host controllers (OHCI, UHCI etc). + * The other type grows from high speed hubs when they connect to + * full/low speed devices using "Transaction Translators" (TTs). + * + * TTs should only be known to the hub driver, and high speed bus + * drivers (only EHCI for now). They affect periodic scheduling and + * sometimes control/bulk error recovery. + */ + +struct usb_device; + +struct usb_tt { + struct usb_device *hub; /* upstream highspeed hub */ + int multi; /* true means one TT per port */ + unsigned think_time; /* think time in ns */ + + /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ + spinlock_t lock; + struct list_head clear_list; /* of usb_tt_clear */ + struct work_struct clear_work; +}; + +struct usb_tt_clear { + struct list_head clear_list; + unsigned tt; + u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; +}; + +extern int usb_hub_clear_tt_buffer(struct urb *urb); +extern void usb_ep0_reinit(struct usb_device *); + +/*{CharlesTu, 2010.08.26, for test mode */ +#define USB_PORT_FEAT_INDICATOR 22 + +/* + * Hub Port Test Mode Selector Codes + * See USB 2.0 spec Table 11-24 + */ +#define USB_PORT_TEST_J 0x01 +#define USB_PORT_TEST_K 0x02 +#define USB_PORT_TEST_SE0_NAK 0x03 +#define USB_PORT_TEST_PACKET 0x04 +#define USB_PORT_TEST_FORCE_ENABLE 0x05 + +/* + * This is used for the Hi-Speed Host Electrical Tests + * on the root hub. See USB 2.0 spec 7.1.20 and the + * Embedded High-speed Host Electrical Test Procedure. + */ +#define USB_PORT_TEST_SINGLE_STEP_SET_FEATURE 0x00 +/*CharelsTu} */ + +/* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */ +#define DeviceRequest \ + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) +#define DeviceOutRequest \ + ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) + +#define InterfaceRequest \ + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + +#define EndpointRequest \ + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) +#define EndpointOutRequest \ + ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + +/* class requests from the USB 2.0 hub spec, table 11-15 */ +/* GetBusState and SetHubDescriptor are optional, omitted */ +#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) +#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) +#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) +#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) +#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) +#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) +#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) + + +/*-------------------------------------------------------------------------*/ + +/* class requests from USB 3.0 hub spec, table 10-5 */ +#define SetHubDepth (0x3000 | HUB_SET_DEPTH) +#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT) + +/* + * Generic bandwidth allocation constants/support + */ +#define FRAME_TIME_USECS 1000L +#define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */ + /* Trying not to use worst-case bit-stuffing + * of (7/6 * 8 * bytecount) = 9.33 * bytecount */ + /* bytecount = data payload byte count */ + +#define NS_TO_US(ns) ((ns + 500L) / 1000L) + /* convert & round nanoseconds to microseconds */ + + +/* + * Full/low speed bandwidth allocation constants/support. + */ +#define BW_HOST_DELAY 1000L /* nanoseconds */ +#define BW_HUB_LS_SETUP 333L /* nanoseconds */ + /* 4 full-speed bit times (est.) */ + +#define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */ +#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L) +#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L) + +/* + * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed + * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed + * to preallocate bandwidth) + */ +#define USB2_HOST_DELAY 5 /* nsec, guess */ +#define HS_NSECS(bytes) (((55 * 8 * 2083) \ + + (2083UL * (3 + BitTime(bytes))))/1000 \ + + USB2_HOST_DELAY) +#define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \ + + (2083UL * (3 + BitTime(bytes))))/1000 \ + + USB2_HOST_DELAY) +#define HS_USECS(bytes) NS_TO_US(HS_NSECS(bytes)) +#define HS_USECS_ISO(bytes) NS_TO_US(HS_NSECS_ISO(bytes)) + +extern long usb_calc_bus_time(int speed, int is_input, + int isoc, int bytecount); + +/*-------------------------------------------------------------------------*/ + +extern void usb_set_device_state(struct usb_device *udev, + enum usb_device_state new_state); + +/*-------------------------------------------------------------------------*/ + +/* exported only within usbcore */ + +extern struct list_head usb_bus_list; +extern struct mutex usb_bus_list_lock; +extern wait_queue_head_t usb_kill_urb_queue; + +extern int usb_find_interface_driver(struct usb_device *dev, + struct usb_interface *interface); + +#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) + +#ifdef CONFIG_PM +extern void usb_root_hub_lost_power(struct usb_device *rhdev); +extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); +extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); +#endif /* CONFIG_PM */ + +#ifdef CONFIG_USB_SUSPEND +extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); +#else +static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) +{ + return; +} +#endif /* CONFIG_USB_SUSPEND */ + + +/* + * USB device fs stuff + */ + +#ifdef CONFIG_USB_DEVICEFS + +/* + * these are expected to be called from the USB core/hub thread + * with the kernel lock held + */ +extern void usbfs_update_special(void); +extern int usbfs_init(void); +extern void usbfs_cleanup(void); + +#else /* CONFIG_USB_DEVICEFS */ + +static inline void usbfs_update_special(void) {} +static inline int usbfs_init(void) { return 0; } +static inline void usbfs_cleanup(void) { } + +#endif /* CONFIG_USB_DEVICEFS */ + +/*-------------------------------------------------------------------------*/ + +#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) + +struct usb_mon_operations { + void (*urb_submit)(struct usb_bus *bus, struct urb *urb); + void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); + void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status); + /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ +}; + +extern struct usb_mon_operations *mon_ops; + +static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) +{ + if (bus->monitored) + (*mon_ops->urb_submit)(bus, urb); +} + +static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, + int error) +{ + if (bus->monitored) + (*mon_ops->urb_submit_error)(bus, urb, error); +} + +static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, + int status) +{ + if (bus->monitored) + (*mon_ops->urb_complete)(bus, urb, status); +} + +int usb_mon_register(struct usb_mon_operations *ops); +void usb_mon_deregister(void); + +#else + +static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {} +static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, + int error) {} +static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, + int status) {} + +#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */ + +/*-------------------------------------------------------------------------*/ + +/* random stuff */ + +#define RUN_CONTEXT (in_irq() ? "in_irq" \ + : (in_interrupt() ? "in_interrupt" : "can sleep")) + + +/* This rwsem is for use only by the hub driver and ehci-hcd. + * Nobody else should touch it. + */ +extern struct rw_semaphore ehci_cf_port_reset_rwsem; + +/* Keep track of which host controller drivers are loaded */ +#define USB_UHCI_LOADED 0 +#define USB_OHCI_LOADED 1 +#define USB_EHCI_LOADED 2 +extern unsigned long usb_hcds_loaded; + +#endif /* __KERNEL__ */ + +#endif /* __USB_CORE_HCD_H */ diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h new file mode 100644 index 00000000..0e010b22 --- /dev/null +++ b/include/linux/usb/input.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2005 Dmitry Torokhov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __LINUX_USB_INPUT_H +#define __LINUX_USB_INPUT_H + +#include <linux/usb.h> +#include <linux/input.h> +#include <asm/byteorder.h> + +static inline void +usb_to_input_id(const struct usb_device *dev, struct input_id *id) +{ + id->bustype = BUS_USB; + id->vendor = le16_to_cpu(dev->descriptor.idVendor); + id->product = le16_to_cpu(dev->descriptor.idProduct); + id->version = le16_to_cpu(dev->descriptor.bcdDevice); +} + +#endif /* __LINUX_USB_INPUT_H */ diff --git a/include/linux/usb/intel_mid_otg.h b/include/linux/usb/intel_mid_otg.h new file mode 100644 index 00000000..756cf554 --- /dev/null +++ b/include/linux/usb/intel_mid_otg.h @@ -0,0 +1,180 @@ +/* + * Intel MID (Langwell/Penwell) USB OTG Transceiver driver + * Copyright (C) 2008 - 2010, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef __INTEL_MID_OTG_H +#define __INTEL_MID_OTG_H + +#include <linux/pm.h> +#include <linux/usb/otg.h> +#include <linux/notifier.h> + +struct intel_mid_otg_xceiv; + +/* This is a common data structure for Intel MID platform to + * save values of the OTG state machine */ +struct otg_hsm { + /* Input */ + int a_bus_resume; + int a_bus_suspend; + int a_conn; + int a_sess_vld; + int a_srp_det; + int a_vbus_vld; + int b_bus_resume; + int b_bus_suspend; + int b_conn; + int b_se0_srp; + int b_ssend_srp; + int b_sess_end; + int b_sess_vld; + int id; +/* id values */ +#define ID_B 0x05 +#define ID_A 0x04 +#define ID_ACA_C 0x03 +#define ID_ACA_B 0x02 +#define ID_ACA_A 0x01 + int power_up; + int adp_change; + int test_device; + + /* Internal variables */ + int a_set_b_hnp_en; + int b_srp_done; + int b_hnp_enable; + int hnp_poll_enable; + + /* Timeout indicator for timers */ + int a_wait_vrise_tmout; + int a_wait_bcon_tmout; + int a_aidl_bdis_tmout; + int a_bidl_adis_tmout; + int a_bidl_adis_tmr; + int a_wait_vfall_tmout; + int b_ase0_brst_tmout; + int b_bus_suspend_tmout; + int b_srp_init_tmout; + int b_srp_fail_tmout; + int b_srp_fail_tmr; + int b_adp_sense_tmout; + + /* Informative variables */ + int a_bus_drop; + int a_bus_req; + int a_clr_err; + int b_bus_req; + int a_suspend_req; + int b_bus_suspend_vld; + + /* Output */ + int drv_vbus; + int loc_conn; + int loc_sof; + + /* Others */ + int vbus_srp_up; +}; + +/* must provide ULPI access function to read/write registers implemented in + * ULPI address space */ +struct iotg_ulpi_access_ops { + int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val); + int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val); +}; + +#define OTG_A_DEVICE 0x0 +#define OTG_B_DEVICE 0x1 + +/* + * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact + * with device and host drivers to implement the USB OTG related feature. More + * function members are added based on usb_phy data structure for this + * purpose. + */ +struct intel_mid_otg_xceiv { + struct usb_phy otg; + struct otg_hsm hsm; + + /* base address */ + void __iomem *base; + + /* ops to access ulpi */ + struct iotg_ulpi_access_ops ulpi_ops; + + /* atomic notifier for interrupt context */ + struct atomic_notifier_head iotg_notifier; + + /* start/stop USB Host function */ + int (*start_host)(struct intel_mid_otg_xceiv *iotg); + int (*stop_host)(struct intel_mid_otg_xceiv *iotg); + + /* start/stop USB Peripheral function */ + int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg); + int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg); + + /* start/stop ADP sense/probe function */ + int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg, + bool enabled, int dev); + int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg, + bool enabled); + +#ifdef CONFIG_PM + /* suspend/resume USB host function */ + int (*suspend_host)(struct intel_mid_otg_xceiv *iotg, + pm_message_t message); + int (*resume_host)(struct intel_mid_otg_xceiv *iotg); + + int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg, + pm_message_t message); + int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg); +#endif + +}; +static inline +struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct usb_phy *otg) +{ + return container_of(otg, struct intel_mid_otg_xceiv, otg); +} + +#define MID_OTG_NOTIFY_CONNECT 0x0001 +#define MID_OTG_NOTIFY_DISCONN 0x0002 +#define MID_OTG_NOTIFY_HSUSPEND 0x0003 +#define MID_OTG_NOTIFY_HRESUME 0x0004 +#define MID_OTG_NOTIFY_CSUSPEND 0x0005 +#define MID_OTG_NOTIFY_CRESUME 0x0006 +#define MID_OTG_NOTIFY_HOSTADD 0x0007 +#define MID_OTG_NOTIFY_HOSTREMOVE 0x0008 +#define MID_OTG_NOTIFY_CLIENTADD 0x0009 +#define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a + +static inline int +intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg, + struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&iotg->iotg_notifier, nb); +} + +static inline void +intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg, + struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb); +} + +#endif /* __INTEL_MID_OTG_H */ diff --git a/include/linux/usb/iowarrior.h b/include/linux/usb/iowarrior.h new file mode 100644 index 00000000..4fd6513d --- /dev/null +++ b/include/linux/usb/iowarrior.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_USB_IOWARRIOR_H +#define __LINUX_USB_IOWARRIOR_H + +#define CODEMERCS_MAGIC_NUMBER 0xC0 /* like COde Mercenaries */ + +/* Define the ioctl commands for reading and writing data */ +#define IOW_WRITE _IOW(CODEMERCS_MAGIC_NUMBER, 1, __u8 *) +#define IOW_READ _IOW(CODEMERCS_MAGIC_NUMBER, 2, __u8 *) + +/* + A struct for available device info which is read + with the ioctl IOW_GETINFO. + To be compatible with 2.4 userspace which didn't have an easy way to get + this information. +*/ +struct iowarrior_info { + /* vendor id : supposed to be USB_VENDOR_ID_CODEMERCS in all cases */ + __u32 vendor; + /* product id : depends on type of chip (USB_DEVICE_ID_CODEMERCS_X) */ + __u32 product; + /* the serial number of our chip (if a serial-number is not available + * this is empty string) */ + __u8 serial[9]; + /* revision number of the chip */ + __u32 revision; + /* USB-speed of the device (0=UNKNOWN, 1=LOW, 2=FULL 3=HIGH) */ + __u32 speed; + /* power consumption of the device in mA */ + __u32 power; + /* the number of the endpoint */ + __u32 if_num; + /* size of the data-packets on this interface */ + __u32 report_size; +}; + +/* + Get some device-information (product-id , serial-number etc.) + in order to identify a chip. +*/ +#define IOW_GETINFO _IOR(CODEMERCS_MAGIC_NUMBER, 3, struct iowarrior_info) + +#endif /* __LINUX_USB_IOWARRIOR_H */ diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h new file mode 100644 index 00000000..e345ceaf --- /dev/null +++ b/include/linux/usb/irda.h @@ -0,0 +1,151 @@ +/* + * USB IrDA Bridge Device Definition + */ + +#ifndef __LINUX_USB_IRDA_H +#define __LINUX_USB_IRDA_H + +/* This device should use Application-specific class */ + +#define USB_SUBCLASS_IRDA 0x02 + +/*-------------------------------------------------------------------------*/ + +/* Class-Specific requests (bRequest field) */ + +#define USB_REQ_CS_IRDA_RECEIVING 1 +#define USB_REQ_CS_IRDA_CHECK_MEDIA_BUSY 3 +#define USB_REQ_CS_IRDA_RATE_SNIFF 4 +#define USB_REQ_CS_IRDA_UNICAST_LIST 5 +#define USB_REQ_CS_IRDA_GET_CLASS_DESC 6 + +/*-------------------------------------------------------------------------*/ + +/* Class-Specific descriptor */ + +#define USB_DT_CS_IRDA 0x21 + +/*-------------------------------------------------------------------------*/ + +/* Data sizes */ + +#define USB_IRDA_DS_2048 (1 << 5) +#define USB_IRDA_DS_1024 (1 << 4) +#define USB_IRDA_DS_512 (1 << 3) +#define USB_IRDA_DS_256 (1 << 2) +#define USB_IRDA_DS_128 (1 << 1) +#define USB_IRDA_DS_64 (1 << 0) + +/* Window sizes */ + +#define USB_IRDA_WS_7 (1 << 6) +#define USB_IRDA_WS_6 (1 << 5) +#define USB_IRDA_WS_5 (1 << 4) +#define USB_IRDA_WS_4 (1 << 3) +#define USB_IRDA_WS_3 (1 << 2) +#define USB_IRDA_WS_2 (1 << 1) +#define USB_IRDA_WS_1 (1 << 0) + +/* Min turnaround times in usecs */ + +#define USB_IRDA_MTT_0 (1 << 7) +#define USB_IRDA_MTT_10 (1 << 6) +#define USB_IRDA_MTT_50 (1 << 5) +#define USB_IRDA_MTT_100 (1 << 4) +#define USB_IRDA_MTT_500 (1 << 3) +#define USB_IRDA_MTT_1000 (1 << 2) +#define USB_IRDA_MTT_5000 (1 << 1) +#define USB_IRDA_MTT_10000 (1 << 0) + +/* Baud rates */ + +#define USB_IRDA_BR_4000000 (1 << 8) +#define USB_IRDA_BR_1152000 (1 << 7) +#define USB_IRDA_BR_576000 (1 << 6) +#define USB_IRDA_BR_115200 (1 << 5) +#define USB_IRDA_BR_57600 (1 << 4) +#define USB_IRDA_BR_38400 (1 << 3) +#define USB_IRDA_BR_19200 (1 << 2) +#define USB_IRDA_BR_9600 (1 << 1) +#define USB_IRDA_BR_2400 (1 << 0) + +/* Additional BOFs */ + +#define USB_IRDA_AB_0 (1 << 7) +#define USB_IRDA_AB_1 (1 << 6) +#define USB_IRDA_AB_2 (1 << 5) +#define USB_IRDA_AB_3 (1 << 4) +#define USB_IRDA_AB_6 (1 << 3) +#define USB_IRDA_AB_12 (1 << 2) +#define USB_IRDA_AB_24 (1 << 1) +#define USB_IRDA_AB_48 (1 << 0) + +/* IRDA Rate Sniff */ + +#define USB_IRDA_RATE_SNIFF 1 + +/*-------------------------------------------------------------------------*/ + +struct usb_irda_cs_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 bcdSpecRevision; + __u8 bmDataSize; + __u8 bmWindowSize; + __u8 bmMinTurnaroundTime; + __le16 wBaudRate; + __u8 bmAdditionalBOFs; + __u8 bIrdaRateSniff; + __u8 bMaxUnicastList; +} __attribute__ ((packed)); + +/*-------------------------------------------------------------------------*/ + +/* Data Format */ + +#define USB_IRDA_STATUS_MEDIA_BUSY (1 << 7) + +/* The following is a 4-bit value used for both + * inbound and outbound headers: + * + * 0 - speed ignored + * 1 - 2400 bps + * 2 - 9600 bps + * 3 - 19200 bps + * 4 - 38400 bps + * 5 - 57600 bps + * 6 - 115200 bps + * 7 - 576000 bps + * 8 - 1.152 Mbps + * 9 - 5 mbps + * 10..15 - Reserved + */ +#define USB_IRDA_STATUS_LINK_SPEED 0x0f + +/* The following is a 4-bit value used only for + * outbound header: + * + * 0 - No change (BOF ignored) + * 1 - 48 BOFs + * 2 - 24 BOFs + * 3 - 12 BOFs + * 4 - 6 BOFs + * 5 - 3 BOFs + * 6 - 2 BOFs + * 7 - 1 BOFs + * 8 - 0 BOFs + * 9..15 - Reserved + */ +#define USB_IRDA_EXTRA_BOFS 0xf0 + +struct usb_irda_inbound_header { + __u8 bmStatus; +}; + +struct usb_irda_outbound_header { + __u8 bmChange; +}; + +#endif /* __LINUX_USB_IRDA_H */ + diff --git a/include/linux/usb/isp116x.h b/include/linux/usb/isp116x.h new file mode 100644 index 00000000..96ca114e --- /dev/null +++ b/include/linux/usb/isp116x.h @@ -0,0 +1,33 @@ +/* + * Board initialization code should put one of these into dev->platform_data + * and place the isp116x onto platform_bus. + */ + +#ifndef __LINUX_USB_ISP116X_H +#define __LINUX_USB_ISP116X_H + +struct isp116x_platform_data { + /* Enable internal resistors on downstream ports */ + unsigned sel15Kres:1; + /* On-chip overcurrent detection */ + unsigned oc_enable:1; + /* INT output polarity */ + unsigned int_act_high:1; + /* INT edge or level triggered */ + unsigned int_edge_triggered:1; + /* Enable wakeup by devices on usb bus (e.g. wakeup + by attachment/detachment or by device activity + such as moving a mouse). When chosen, this option + prevents stopping internal clock, increasing + thereby power consumption in suspended state. */ + unsigned remote_wakeup_enable:1; + /* Inter-io delay (ns). The chip is picky about access timings; it + expects at least: + 150ns delay between consecutive accesses to DATA_REG, + 300ns delay between access to ADDR_REG and DATA_REG + OE, WE MUST NOT be changed during these intervals + */ + void (*delay) (struct device *dev, int delay); +}; + +#endif /* __LINUX_USB_ISP116X_H */ diff --git a/include/linux/usb/isp1362.h b/include/linux/usb/isp1362.h new file mode 100644 index 00000000..642684bb --- /dev/null +++ b/include/linux/usb/isp1362.h @@ -0,0 +1,46 @@ +/* + * board initialization code should put one of these into dev->platform_data + * and place the isp1362 onto platform_bus. + */ + +#ifndef __LINUX_USB_ISP1362_H__ +#define __LINUX_USB_ISP1362_H__ + +struct isp1362_platform_data { + /* Enable internal pulldown resistors on downstream ports */ + unsigned sel15Kres:1; + /* Clock cannot be stopped */ + unsigned clknotstop:1; + /* On-chip overcurrent protection */ + unsigned oc_enable:1; + /* INT output polarity */ + unsigned int_act_high:1; + /* INT edge or level triggered */ + unsigned int_edge_triggered:1; + /* DREQ output polarity */ + unsigned dreq_act_high:1; + /* DACK input polarity */ + unsigned dack_act_high:1; + /* chip can be resumed via H_WAKEUP pin */ + unsigned remote_wakeup_connected:1; + /* Switch or not to switch (keep always powered) */ + unsigned no_power_switching:1; + /* Ganged port power switching (0) or individual port power switching (1) */ + unsigned power_switching_mode:1; + /* Given port_power, msec/2 after power on till power good */ + u8 potpg; + /* Hardware reset set/clear */ + void (*reset) (struct device *dev, int set); + /* Clock start/stop */ + void (*clock) (struct device *dev, int start); + /* Inter-io delay (ns). The chip is picky about access timings; it + * expects at least: + * 110ns delay between consecutive accesses to DATA_REG, + * 300ns delay between access to ADDR_REG and DATA_REG (registers) + * 462ns delay between access to ADDR_REG and DATA_REG (buffer memory) + * WE MUST NOT be activated during these intervals (even without CS!) + */ + void (*delay) (struct device *dev, unsigned int delay); +}; + +#endif diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h new file mode 100644 index 00000000..de7de53c --- /dev/null +++ b/include/linux/usb/isp1760.h @@ -0,0 +1,18 @@ +/* + * board initialization should put one of these into dev->platform_data + * and place the isp1760 onto platform_bus named "isp1760-hcd". + */ + +#ifndef __LINUX_USB_ISP1760_H +#define __LINUX_USB_ISP1760_H + +struct isp1760_platform_data { + unsigned is_isp1761:1; /* Chip is ISP1761 */ + unsigned bus_width_16:1; /* 16/32-bit data bus width */ + unsigned port1_otg:1; /* Port 1 supports OTG */ + unsigned analog_oc:1; /* Analog overcurrent */ + unsigned dack_polarity_high:1; /* DACK active high */ + unsigned dreq_polarity_high:1; /* DREQ active high */ +}; + +#endif /* __LINUX_USB_ISP1760_H */ diff --git a/include/linux/usb/langwell_udc.h b/include/linux/usb/langwell_udc.h new file mode 100644 index 00000000..2d2d1bba --- /dev/null +++ b/include/linux/usb/langwell_udc.h @@ -0,0 +1,310 @@ +/* + * Intel Langwell USB Device Controller driver + * Copyright (C) 2008-2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef __LANGWELL_UDC_H +#define __LANGWELL_UDC_H + + +/* MACRO defines */ +#define CAP_REG_OFFSET 0x0 +#define OP_REG_OFFSET 0x28 + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#define DQH_ALIGNMENT 2048 +#define DTD_ALIGNMENT 64 +#define DMA_BOUNDARY 4096 + +#define EP0_MAX_PKT_SIZE 64 +#define EP_DIR_IN 1 +#define EP_DIR_OUT 0 + +#define FLUSH_TIMEOUT 1000 +#define RESET_TIMEOUT 1000 +#define SETUPSTAT_TIMEOUT 100 +#define PRIME_TIMEOUT 100 + + +/* device memory space registers */ + +/* Capability Registers, BAR0 + CAP_REG_OFFSET */ +struct langwell_cap_regs { + /* offset: 0x0 */ + u8 caplength; /* offset of Operational Register */ + u8 _reserved3; + u16 hciversion; /* H: BCD encoding of host version */ + u32 hcsparams; /* H: host port steering logic capability */ + u32 hccparams; /* H: host multiple mode control capability */ +#define HCC_LEN BIT(17) /* Link power management (LPM) capability */ + u8 _reserved4[0x20-0xc]; + /* offset: 0x20 */ + u16 dciversion; /* BCD encoding of device version */ + u8 _reserved5[0x24-0x22]; + u32 dccparams; /* overall device controller capability */ +#define HOSTCAP BIT(8) /* host capable */ +#define DEVCAP BIT(7) /* device capable */ +#define DEN(d) \ + (((d)>>0)&0x1f) /* bits 4:0, device endpoint number */ +} __attribute__ ((packed)); + + +/* Operational Registers, BAR0 + OP_REG_OFFSET */ +struct langwell_op_regs { + /* offset: 0x28 */ + u32 extsts; +#define EXTS_TI1 BIT(4) /* general purpose timer interrupt 1 */ +#define EXTS_TI1TI0 BIT(3) /* general purpose timer interrupt 0 */ +#define EXTS_TI1UPI BIT(2) /* USB host periodic interrupt */ +#define EXTS_TI1UAI BIT(1) /* USB host asynchronous interrupt */ +#define EXTS_TI1NAKI BIT(0) /* NAK interrupt */ + u32 extintr; +#define EXTI_TIE1 BIT(4) /* general purpose timer interrupt enable 1 */ +#define EXTI_TIE0 BIT(3) /* general purpose timer interrupt enable 0 */ +#define EXTI_UPIE BIT(2) /* USB host periodic interrupt enable */ +#define EXTI_UAIE BIT(1) /* USB host asynchronous interrupt enable */ +#define EXTI_NAKE BIT(0) /* NAK interrupt enable */ + /* offset: 0x30 */ + u32 usbcmd; +#define CMD_HIRD(u) \ + (((u)>>24)&0xf) /* bits 27:24, host init resume duration */ +#define CMD_ITC(u) \ + (((u)>>16)&0xff) /* bits 23:16, interrupt threshold control */ +#define CMD_PPE BIT(15) /* per-port change events enable */ +#define CMD_ATDTW BIT(14) /* add dTD tripwire */ +#define CMD_SUTW BIT(13) /* setup tripwire */ +#define CMD_ASPE BIT(11) /* asynchronous schedule park mode enable */ +#define CMD_FS2 BIT(10) /* frame list size */ +#define CMD_ASP1 BIT(9) /* asynchronous schedule park mode count */ +#define CMD_ASP0 BIT(8) +#define CMD_LR BIT(7) /* light host/device controller reset */ +#define CMD_IAA BIT(6) /* interrupt on async advance doorbell */ +#define CMD_ASE BIT(5) /* asynchronous schedule enable */ +#define CMD_PSE BIT(4) /* periodic schedule enable */ +#define CMD_FS1 BIT(3) +#define CMD_FS0 BIT(2) +#define CMD_RST BIT(1) /* controller reset */ +#define CMD_RUNSTOP BIT(0) /* run/stop */ + u32 usbsts; +#define STS_PPCI(u) \ + (((u)>>16)&0xffff) /* bits 31:16, port-n change detect */ +#define STS_AS BIT(15) /* asynchronous schedule status */ +#define STS_PS BIT(14) /* periodic schedule status */ +#define STS_RCL BIT(13) /* reclamation */ +#define STS_HCH BIT(12) /* HC halted */ +#define STS_ULPII BIT(10) /* ULPI interrupt */ +#define STS_SLI BIT(8) /* DC suspend */ +#define STS_SRI BIT(7) /* SOF received */ +#define STS_URI BIT(6) /* USB reset received */ +#define STS_AAI BIT(5) /* interrupt on async advance */ +#define STS_SEI BIT(4) /* system error */ +#define STS_FRI BIT(3) /* frame list rollover */ +#define STS_PCI BIT(2) /* port change detect */ +#define STS_UEI BIT(1) /* USB error interrupt */ +#define STS_UI BIT(0) /* USB interrupt */ + u32 usbintr; +/* bits 31:16, per-port interrupt enable */ +#define INTR_PPCE(u) (((u)>>16)&0xffff) +#define INTR_ULPIE BIT(10) /* ULPI enable */ +#define INTR_SLE BIT(8) /* DC sleep/suspend enable */ +#define INTR_SRE BIT(7) /* SOF received enable */ +#define INTR_URE BIT(6) /* USB reset enable */ +#define INTR_AAE BIT(5) /* interrupt on async advance enable */ +#define INTR_SEE BIT(4) /* system error enable */ +#define INTR_FRE BIT(3) /* frame list rollover enable */ +#define INTR_PCE BIT(2) /* port change detect enable */ +#define INTR_UEE BIT(1) /* USB error interrupt enable */ +#define INTR_UE BIT(0) /* USB interrupt enable */ + u32 frindex; /* frame index */ +#define FRINDEX_MASK (0x3fff << 0) + u32 ctrldssegment; /* not used */ + u32 deviceaddr; +#define USBADR_SHIFT 25 +#define USBADR(d) \ + (((d)>>25)&0x7f) /* bits 31:25, device address */ +#define USBADR_MASK (0x7f << 25) +#define USBADRA BIT(24) /* device address advance */ + u32 endpointlistaddr;/* endpoint list top memory address */ +/* bits 31:11, endpoint list pointer */ +#define EPBASE(d) (((d)>>11)&0x1fffff) +#define ENDPOINTLISTADDR_MASK (0x1fffff << 11) + u32 ttctrl; /* H: TT operatin, not used */ + /* offset: 0x50 */ + u32 burstsize; /* burst size of data movement */ +#define TXPBURST(b) \ + (((b)>>8)&0xff) /* bits 15:8, TX burst length */ +#define RXPBURST(b) \ + (((b)>>0)&0xff) /* bits 7:0, RX burst length */ + u32 txfilltuning; /* TX tuning */ + u32 txttfilltuning; /* H: TX TT tuning */ + u32 ic_usb; /* control the IC_USB FS/LS transceiver */ + /* offset: 0x60 */ + u32 ulpi_viewport; /* indirect access to ULPI PHY */ +#define ULPIWU BIT(31) /* ULPI wakeup */ +#define ULPIRUN BIT(30) /* ULPI read/write run */ +#define ULPIRW BIT(29) /* ULPI read/write control */ +#define ULPISS BIT(27) /* ULPI sync state */ +#define ULPIPORT(u) \ + (((u)>>24)&7) /* bits 26:24, ULPI port number */ +#define ULPIADDR(u) \ + (((u)>>16)&0xff) /* bits 23:16, ULPI data address */ +#define ULPIDATRD(u) \ + (((u)>>8)&0xff) /* bits 15:8, ULPI data read */ +#define ULPIDATWR(u) \ + (((u)>>0)&0xff) /* bits 7:0, ULPI date write */ + u8 _reserved6[0x70-0x64]; + /* offset: 0x70 */ + u32 configflag; /* H: not used */ + u32 portsc1; /* port status */ +#define DA(p) \ + (((p)>>25)&0x7f) /* bits 31:25, device address */ +#define PORTS_SSTS (BIT(24) | BIT(23)) /* suspend status */ +#define PORTS_WKOC BIT(22) /* wake on over-current enable */ +#define PORTS_WKDS BIT(21) /* wake on disconnect enable */ +#define PORTS_WKCN BIT(20) /* wake on connect enable */ +#define PORTS_PTC(p) (((p)>>16)&0xf) /* bits 19:16, port test control */ +#define PORTS_PIC (BIT(15) | BIT(14)) /* port indicator control */ +#define PORTS_PO BIT(13) /* port owner */ +#define PORTS_PP BIT(12) /* port power */ +#define PORTS_LS (BIT(11) | BIT(10)) /* line status */ +#define PORTS_SLP BIT(9) /* suspend using L1 */ +#define PORTS_PR BIT(8) /* port reset */ +#define PORTS_SUSP BIT(7) /* suspend */ +#define PORTS_FPR BIT(6) /* force port resume */ +#define PORTS_OCC BIT(5) /* over-current change */ +#define PORTS_OCA BIT(4) /* over-current active */ +#define PORTS_PEC BIT(3) /* port enable/disable change */ +#define PORTS_PE BIT(2) /* port enable/disable */ +#define PORTS_CSC BIT(1) /* connect status change */ +#define PORTS_CCS BIT(0) /* current connect status */ + u8 _reserved7[0xb4-0x78]; + /* offset: 0xb4 */ + u32 devlc; /* control LPM and each USB port behavior */ +/* bits 31:29, parallel transceiver select */ +#define LPM_PTS(d) (((d)>>29)&7) +#define LPM_STS BIT(28) /* serial transceiver select */ +#define LPM_PTW BIT(27) /* parallel transceiver width */ +#define LPM_PSPD(d) (((d)>>25)&3) /* bits 26:25, port speed */ +#define LPM_PSPD_MASK (BIT(26) | BIT(25)) +#define LPM_SPEED_FULL 0 +#define LPM_SPEED_LOW 1 +#define LPM_SPEED_HIGH 2 +#define LPM_SRT BIT(24) /* shorten reset time */ +#define LPM_PFSC BIT(23) /* port force full speed connect */ +#define LPM_PHCD BIT(22) /* PHY low power suspend clock disable */ +#define LPM_STL BIT(16) /* STALL reply to LPM token */ +#define LPM_BA(d) \ + (((d)>>1)&0x7ff) /* bits 11:1, BmAttributes */ +#define LPM_NYT_ACK BIT(0) /* NYET/ACK reply to LPM token */ + u8 _reserved8[0xf4-0xb8]; + /* offset: 0xf4 */ + u32 otgsc; /* On-The-Go status and control */ +#define OTGSC_DPIE BIT(30) /* data pulse interrupt enable */ +#define OTGSC_MSE BIT(29) /* 1 ms timer interrupt enable */ +#define OTGSC_BSEIE BIT(28) /* B session end interrupt enable */ +#define OTGSC_BSVIE BIT(27) /* B session valid interrupt enable */ +#define OTGSC_ASVIE BIT(26) /* A session valid interrupt enable */ +#define OTGSC_AVVIE BIT(25) /* A VBUS valid interrupt enable */ +#define OTGSC_IDIE BIT(24) /* USB ID interrupt enable */ +#define OTGSC_DPIS BIT(22) /* data pulse interrupt status */ +#define OTGSC_MSS BIT(21) /* 1 ms timer interrupt status */ +#define OTGSC_BSEIS BIT(20) /* B session end interrupt status */ +#define OTGSC_BSVIS BIT(19) /* B session valid interrupt status */ +#define OTGSC_ASVIS BIT(18) /* A session valid interrupt status */ +#define OTGSC_AVVIS BIT(17) /* A VBUS valid interrupt status */ +#define OTGSC_IDIS BIT(16) /* USB ID interrupt status */ +#define OTGSC_DPS BIT(14) /* data bus pulsing status */ +#define OTGSC_MST BIT(13) /* 1 ms timer toggle */ +#define OTGSC_BSE BIT(12) /* B session end */ +#define OTGSC_BSV BIT(11) /* B session valid */ +#define OTGSC_ASV BIT(10) /* A session valid */ +#define OTGSC_AVV BIT(9) /* A VBUS valid */ +#define OTGSC_USBID BIT(8) /* USB ID */ +#define OTGSC_HABA BIT(7) /* hw assist B-disconnect to A-connect */ +#define OTGSC_HADP BIT(6) /* hw assist data pulse */ +#define OTGSC_IDPU BIT(5) /* ID pullup */ +#define OTGSC_DP BIT(4) /* data pulsing */ +#define OTGSC_OT BIT(3) /* OTG termination */ +#define OTGSC_HAAR BIT(2) /* hw assist auto reset */ +#define OTGSC_VC BIT(1) /* VBUS charge */ +#define OTGSC_VD BIT(0) /* VBUS discharge */ + u32 usbmode; +#define MODE_VBPS BIT(5) /* R/W VBUS power select */ +#define MODE_SDIS BIT(4) /* R/W stream disable mode */ +#define MODE_SLOM BIT(3) /* R/W setup lockout mode */ +#define MODE_ENSE BIT(2) /* endian select */ +#define MODE_CM(u) (((u)>>0)&3) /* bits 1:0, controller mode */ +#define MODE_IDLE 0 +#define MODE_DEVICE 2 +#define MODE_HOST 3 + u8 _reserved9[0x100-0xfc]; + /* offset: 0x100 */ + u32 endptnak; +#define EPTN(e) \ + (((e)>>16)&0xffff) /* bits 31:16, TX endpoint NAK */ +#define EPRN(e) \ + (((e)>>0)&0xffff) /* bits 15:0, RX endpoint NAK */ + u32 endptnaken; +#define EPTNE(e) \ + (((e)>>16)&0xffff) /* bits 31:16, TX endpoint NAK enable */ +#define EPRNE(e) \ + (((e)>>0)&0xffff) /* bits 15:0, RX endpoint NAK enable */ + u32 endptsetupstat; +#define SETUPSTAT_MASK (0xffff << 0) /* bits 15:0 */ +#define EP0SETUPSTAT_MASK 1 + u32 endptprime; +/* bits 31:16, prime endpoint transmit buffer */ +#define PETB(e) (((e)>>16)&0xffff) +/* bits 15:0, prime endpoint receive buffer */ +#define PERB(e) (((e)>>0)&0xffff) + /* offset: 0x110 */ + u32 endptflush; +/* bits 31:16, flush endpoint transmit buffer */ +#define FETB(e) (((e)>>16)&0xffff) +/* bits 15:0, flush endpoint receive buffer */ +#define FERB(e) (((e)>>0)&0xffff) + u32 endptstat; +/* bits 31:16, endpoint transmit buffer ready */ +#define ETBR(e) (((e)>>16)&0xffff) +/* bits 15:0, endpoint receive buffer ready */ +#define ERBR(e) (((e)>>0)&0xffff) + u32 endptcomplete; +/* bits 31:16, endpoint transmit complete event */ +#define ETCE(e) (((e)>>16)&0xffff) +/* bits 15:0, endpoint receive complete event */ +#define ERCE(e) (((e)>>0)&0xffff) + /* offset: 0x11c */ + u32 endptctrl[16]; +#define EPCTRL_TXE BIT(23) /* TX endpoint enable */ +#define EPCTRL_TXR BIT(22) /* TX data toggle reset */ +#define EPCTRL_TXI BIT(21) /* TX data toggle inhibit */ +#define EPCTRL_TXT(e) (((e)>>18)&3) /* bits 19:18, TX endpoint type */ +#define EPCTRL_TXT_SHIFT 18 +#define EPCTRL_TXD BIT(17) /* TX endpoint data source */ +#define EPCTRL_TXS BIT(16) /* TX endpoint STALL */ +#define EPCTRL_RXE BIT(7) /* RX endpoint enable */ +#define EPCTRL_RXR BIT(6) /* RX data toggle reset */ +#define EPCTRL_RXI BIT(5) /* RX data toggle inhibit */ +#define EPCTRL_RXT(e) (((e)>>2)&3) /* bits 3:2, RX endpoint type */ +#define EPCTRL_RXT_SHIFT 2 /* bits 19:18, TX endpoint type */ +#define EPCTRL_RXD BIT(1) /* RX endpoint data sink */ +#define EPCTRL_RXS BIT(0) /* RX endpoint STALL */ +} __attribute__ ((packed)); + +#endif /* __LANGWELL_UDC_H */ + diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h new file mode 100644 index 00000000..a4ba31ab --- /dev/null +++ b/include/linux/usb/m66592.h @@ -0,0 +1,46 @@ +/* + * M66592 driver platform data + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_USB_M66592_H +#define __LINUX_USB_M66592_H + +#define M66592_PLATDATA_XTAL_12MHZ 0x01 +#define M66592_PLATDATA_XTAL_24MHZ 0x02 +#define M66592_PLATDATA_XTAL_48MHZ 0x03 + +struct m66592_platdata { + /* one = on chip controller, zero = external controller */ + unsigned on_chip:1; + + /* one = big endian, zero = little endian */ + unsigned endian:1; + + /* (external controller only) M66592_PLATDATA_XTAL_nnMHZ */ + unsigned xtal:2; + + /* (external controller only) one = 3.3V, zero = 1.5V */ + unsigned vif:1; + + /* (external controller only) set one = WR0_N shorted to WR1_N */ + unsigned wr0_shorted_to_wr1:1; +}; + +#endif /* __LINUX_USB_M66592_H */ + diff --git a/include/linux/usb/midi.h b/include/linux/usb/midi.h new file mode 100644 index 00000000..c8c52e3c --- /dev/null +++ b/include/linux/usb/midi.h @@ -0,0 +1,112 @@ +/* + * <linux/usb/midi.h> -- USB MIDI definitions. + * + * Copyright (C) 2006 Thumtronics Pty Ltd. + * Developed for Thumtronics by Grey Innovation + * Ben Williamson <ben.williamson@greyinnovation.com> + * + * This software is distributed under the terms of the GNU General Public + * License ("GPL") version 2, as published by the Free Software Foundation. + * + * This file holds USB constants and structures defined + * by the USB Device Class Definition for MIDI Devices. + * Comments below reference relevant sections of that document: + * + * http://www.usb.org/developers/devclass_docs/midi10.pdf + */ + +#ifndef __LINUX_USB_MIDI_H +#define __LINUX_USB_MIDI_H + +#include <linux/types.h> + +/* A.1 MS Class-Specific Interface Descriptor Subtypes */ +#define USB_MS_HEADER 0x01 +#define USB_MS_MIDI_IN_JACK 0x02 +#define USB_MS_MIDI_OUT_JACK 0x03 +#define USB_MS_ELEMENT 0x04 + +/* A.2 MS Class-Specific Endpoint Descriptor Subtypes */ +#define USB_MS_GENERAL 0x01 + +/* A.3 MS MIDI IN and OUT Jack Types */ +#define USB_MS_EMBEDDED 0x01 +#define USB_MS_EXTERNAL 0x02 + +/* 6.1.2.1 Class-Specific MS Interface Header Descriptor */ +struct usb_ms_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __le16 bcdMSC; + __le16 wTotalLength; +} __attribute__ ((packed)); + +#define USB_DT_MS_HEADER_SIZE 7 + +/* 6.1.2.2 MIDI IN Jack Descriptor */ +struct usb_midi_in_jack_descriptor { + __u8 bLength; + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* USB_MS_MIDI_IN_JACK */ + __u8 bJackType; /* USB_MS_EMBEDDED/EXTERNAL */ + __u8 bJackID; + __u8 iJack; +} __attribute__ ((packed)); + +#define USB_DT_MIDI_IN_SIZE 6 + +struct usb_midi_source_pin { + __u8 baSourceID; + __u8 baSourcePin; +} __attribute__ ((packed)); + +/* 6.1.2.3 MIDI OUT Jack Descriptor */ +struct usb_midi_out_jack_descriptor { + __u8 bLength; + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* USB_MS_MIDI_OUT_JACK */ + __u8 bJackType; /* USB_MS_EMBEDDED/EXTERNAL */ + __u8 bJackID; + __u8 bNrInputPins; /* p */ + struct usb_midi_source_pin pins[]; /* [p] */ + /*__u8 iJack; -- omitted due to variable-sized pins[] */ +} __attribute__ ((packed)); + +#define USB_DT_MIDI_OUT_SIZE(p) (7 + 2 * (p)) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(p) \ +struct usb_midi_out_jack_descriptor_##p { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bJackType; \ + __u8 bJackID; \ + __u8 bNrInputPins; \ + struct usb_midi_source_pin pins[p]; \ + __u8 iJack; \ +} __attribute__ ((packed)) + +/* 6.2.2 Class-Specific MS Bulk Data Endpoint Descriptor */ +struct usb_ms_endpoint_descriptor { + __u8 bLength; /* 4+n */ + __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ + __u8 bDescriptorSubtype; /* USB_MS_GENERAL */ + __u8 bNumEmbMIDIJack; /* n */ + __u8 baAssocJackID[]; /* [n] */ +} __attribute__ ((packed)); + +#define USB_DT_MS_ENDPOINT_SIZE(n) (4 + (n)) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(n) \ +struct usb_ms_endpoint_descriptor_##n { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bNumEmbMIDIJack; \ + __u8 baAssocJackID[n]; \ +} __attribute__ ((packed)) + +#endif /* __LINUX_USB_MIDI_H */ diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h new file mode 100644 index 00000000..22a396c1 --- /dev/null +++ b/include/linux/usb/msm_hsusb.h @@ -0,0 +1,185 @@ +/* linux/include/asm-arm/arch-msm/hsusb.h + * + * Copyright (C) 2008 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_HSUSB_H +#define __ASM_ARCH_MSM_HSUSB_H + +#include <linux/types.h> +#include <linux/usb/otg.h> + +/** + * Supported USB modes + * + * USB_PERIPHERAL Only peripheral mode is supported. + * USB_HOST Only host mode is supported. + * USB_OTG OTG mode is supported. + * + */ +enum usb_mode_type { + USB_NONE = 0, + USB_PERIPHERAL, + USB_HOST, + USB_OTG, +}; + +/** + * OTG control + * + * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host + * only configuration. + * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY. + * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware. + * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs. + * + */ +enum otg_control_type { + OTG_NO_CONTROL = 0, + OTG_PHY_CONTROL, + OTG_PMIC_CONTROL, + OTG_USER_CONTROL, +}; + +/** + * PHY used in + * + * INVALID_PHY Unsupported PHY + * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY + * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY + * + */ +enum msm_usb_phy_type { + INVALID_PHY = 0, + CI_45NM_INTEGRATED_PHY, + SNPS_28NM_INTEGRATED_PHY, +}; + +#define IDEV_CHG_MAX 1500 +#define IUNIT 100 + +/** + * Different states involved in USB charger detection. + * + * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection + * process is not yet started. + * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. + * USB_CHG_STATE_DCD_DONE Data pin contact is detected. + * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects + * between SDP and DCP/CDP). + * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects + * between DCP and CDP). + * USB_CHG_STATE_DETECTED USB charger type is determined. + * + */ +enum usb_chg_state { + USB_CHG_STATE_UNDEFINED = 0, + USB_CHG_STATE_WAIT_FOR_DCD, + USB_CHG_STATE_DCD_DONE, + USB_CHG_STATE_PRIMARY_DONE, + USB_CHG_STATE_SECONDARY_DONE, + USB_CHG_STATE_DETECTED, +}; + +/** + * USB charger types + * + * USB_INVALID_CHARGER Invalid USB charger. + * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port + * on USB2.0 compliant host/hub. + * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger). + * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and + * IDEV_CHG_MAX can be drawn irrespective of USB state. + * + */ +enum usb_chg_type { + USB_INVALID_CHARGER = 0, + USB_SDP_CHARGER, + USB_DCP_CHARGER, + USB_CDP_CHARGER, +}; + +/** + * struct msm_otg_platform_data - platform device data + * for msm_otg driver. + * @phy_init_seq: PHY configuration sequence. val, reg pairs + * terminated by -1. + * @vbus_power: VBUS power on/off routine. + * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). + * @mode: Supported mode (OTG/peripheral/host). + * @otg_control: OTG switch controlled by user/Id pin + * @default_mode: Default operational mode. Applicable only if + * OTG switch is controller by user. + * @pclk_src_name: pclk is derived from ebi1_usb_clk in case of 7x27 and 8k + * dfab_usb_hs_clk in case of 8660 and 8960. + */ +struct msm_otg_platform_data { + int *phy_init_seq; + void (*vbus_power)(bool on); + unsigned power_budget; + enum usb_mode_type mode; + enum otg_control_type otg_control; + enum usb_mode_type default_mode; + enum msm_usb_phy_type phy_type; + void (*setup_gpio)(enum usb_otg_state state); + char *pclk_src_name; +}; + +/** + * struct msm_otg: OTG driver data. Shared by HCD and DCD. + * @otg: USB OTG Transceiver structure. + * @pdata: otg device platform data. + * @irq: IRQ number assigned for HSUSB controller. + * @clk: clock struct of usb_hs_clk. + * @pclk: clock struct of usb_hs_pclk. + * @pclk_src: pclk source for voting. + * @phy_reset_clk: clock struct of usb_phy_clk. + * @core_clk: clock struct of usb_hs_core_clk. + * @regs: ioremapped register base address. + * @inputs: OTG state machine inputs(Id, SessValid etc). + * @sm_work: OTG state machine work. + * @in_lpm: indicates low power mode (LPM) state. + * @async_int: Async interrupt arrived. + * @cur_power: The amount of mA available from downstream port. + * @chg_work: Charger detection work. + * @chg_state: The state of charger detection process. + * @chg_type: The type of charger attached. + * @dcd_retires: The retry count used to track Data contact + * detection process. + */ +struct msm_otg { + struct usb_phy phy; + struct msm_otg_platform_data *pdata; + int irq; + struct clk *clk; + struct clk *pclk; + struct clk *pclk_src; + struct clk *phy_reset_clk; + struct clk *core_clk; + void __iomem *regs; +#define ID 0 +#define B_SESS_VLD 1 + unsigned long inputs; + struct work_struct sm_work; + atomic_t in_lpm; + int async_int; + unsigned cur_power; + struct delayed_work chg_work; + enum usb_chg_state chg_state; + enum usb_chg_type chg_type; + u8 dcd_retries; +}; + +#endif diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h new file mode 100644 index 00000000..6e97a2d3 --- /dev/null +++ b/include/linux/usb/msm_hsusb_hw.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ +#define __LINUX_USB_GADGET_MSM72K_UDC_H__ + +#define USB_AHBBURST (MSM_USB_BASE + 0x0090) +#define USB_AHBMODE (MSM_USB_BASE + 0x0098) +#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ + +#define USB_USBCMD (MSM_USB_BASE + 0x0140) +#define USB_PORTSC (MSM_USB_BASE + 0x0184) +#define USB_OTGSC (MSM_USB_BASE + 0x01A4) +#define USB_USBMODE (MSM_USB_BASE + 0x01A8) +#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) + +#define USBCMD_RESET 2 +#define USB_USBINTR (MSM_USB_BASE + 0x0148) + +#define PORTSC_PHCD (1 << 23) /* phy suspend mode */ +#define PORTSC_PTS_MASK (3 << 30) +#define PORTSC_PTS_ULPI (3 << 30) + +#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170) +#define ULPI_RUN (1 << 30) +#define ULPI_WRITE (1 << 29) +#define ULPI_READ (0 << 29) +#define ULPI_ADDR(n) (((n) & 255) << 16) +#define ULPI_DATA(n) ((n) & 255) +#define ULPI_DATA_READ(n) (((n) >> 8) & 255) + +#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ +#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ +#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ + +/* OTG definitions */ +#define OTGSC_INTSTS_MASK (0x7f << 16) +#define OTGSC_ID (1 << 8) +#define OTGSC_BSV (1 << 11) +#define OTGSC_IDIS (1 << 16) +#define OTGSC_BSVIS (1 << 19) +#define OTGSC_IDIE (1 << 24) +#define OTGSC_BSVIE (1 << 27) + +#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 00000000..eb505250 --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,152 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb-hdrc". It encapsulates + * key configuration differences between boards. + */ + +#ifndef __LINUX_USB_MUSB_H +#define __LINUX_USB_MUSB_H + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +enum musb_fifo_style { + FIFO_RXTX, + FIFO_TX, + FIFO_RX +} __attribute__ ((packed)); + +enum musb_buf_mode { + BUF_SINGLE, + BUF_DOUBLE +} __attribute__ ((packed)); + +struct musb_fifo_cfg { + u8 hw_ep_num; + enum musb_fifo_style style; + enum musb_buf_mode mode; + u16 maxpacket; +}; + +#define MUSB_EP_FIFO(ep, st, m, pkt) \ +{ \ + .hw_ep_num = ep, \ + .style = st, \ + .mode = m, \ + .maxpacket = pkt, \ +} + +#define MUSB_EP_FIFO_SINGLE(ep, st, pkt) \ + MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt) + +#define MUSB_EP_FIFO_DOUBLE(ep, st, pkt) \ + MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt) + +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */ + unsigned fifo_cfg_size; /* size of the fifo configuration */ + + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */ + unsigned soft_con:1 __deprecated; /* soft connect required */ + unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1 __deprecated; /* supports DMA */ + unsigned vendor_req:1 __deprecated; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels __deprecated; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl __deprecated; /* vendor control reg width */ + u8 vendor_stat __deprecated; /* vendor status reg witdh */ + u8 dma_req_chan __deprecated; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits __deprecated; +#ifdef CONFIG_BLACKFIN + /* A GPIO controlling VRSEL in Blackfin */ + unsigned int gpio_vrsel; + unsigned int gpio_vrsel_active; + /* musb CLKIN in Blackfin in MHZ */ + unsigned char clkin; +#endif + +}; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* (HOST or OTG) program PHY for external Vbus */ + unsigned extvbus:1; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; + + /* Architecture specific board data */ + void *board_data; + + /* Platform specific struct musb_ops pointer */ + const void *platform_ops; +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2 + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ + +#endif /* __LINUX_USB_MUSB_H */ diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h new file mode 100644 index 00000000..148b8fa5 --- /dev/null +++ b/include/linux/usb/net2280.h @@ -0,0 +1,443 @@ +/* + * NetChip 2280 high/full speed USB device controller. + * Unlike many such controllers, this one talks PCI. + * + * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) + * Copyright (C) 2003 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_NET2280_H +#define __LINUX_USB_NET2280_H + +/*-------------------------------------------------------------------------*/ + +/* NET2280 MEMORY MAPPED REGISTERS + * + * The register layout came from the chip documentation, and the bit + * number definitions were extracted from chip specification. + * + * Use the shift operator ('<<') to build bit masks, with readl/writel + * to access the registers through PCI. + */ + +/* main registers, BAR0 + 0x0000 */ +struct net2280_regs { + /* offset 0x0000 */ + u32 devinit; +#define LOCAL_CLOCK_FREQUENCY 8 +#define FORCE_PCI_RESET 7 +#define PCI_ID 6 +#define PCI_ENABLE 5 +#define FIFO_SOFT_RESET 4 +#define CFG_SOFT_RESET 3 +#define PCI_SOFT_RESET 2 +#define USB_SOFT_RESET 1 +#define M8051_RESET 0 + u32 eectl; +#define EEPROM_ADDRESS_WIDTH 23 +#define EEPROM_CHIP_SELECT_ACTIVE 22 +#define EEPROM_PRESENT 21 +#define EEPROM_VALID 20 +#define EEPROM_BUSY 19 +#define EEPROM_CHIP_SELECT_ENABLE 18 +#define EEPROM_BYTE_READ_START 17 +#define EEPROM_BYTE_WRITE_START 16 +#define EEPROM_READ_DATA 8 +#define EEPROM_WRITE_DATA 0 + u32 eeclkfreq; + u32 _unused0; + /* offset 0x0010 */ + + u32 pciirqenb0; /* interrupt PCI master ... */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + u32 pciirqenb1; +#define PCI_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + u32 cpu_irqenb0; /* ... or onboard 8051 */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + u32 cpu_irqenb1; +#define CPU_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + + /* offset 0x0020 */ + u32 _unused1; + u32 usbirqenb1; +#define USB_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + u32 irqstat0; +#define INTA_ASSERTED 12 +#define SETUP_PACKET_INTERRUPT 7 +#define ENDPOINT_F_INTERRUPT 6 +#define ENDPOINT_E_INTERRUPT 5 +#define ENDPOINT_D_INTERRUPT 4 +#define ENDPOINT_C_INTERRUPT 3 +#define ENDPOINT_B_INTERRUPT 2 +#define ENDPOINT_A_INTERRUPT 1 +#define ENDPOINT_0_INTERRUPT 0 + u32 irqstat1; +#define POWER_STATE_CHANGE_INTERRUPT 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT 26 +#define PCI_PARITY_ERROR_INTERRUPT 25 +#define PCI_INTA_INTERRUPT 24 +#define PCI_PME_INTERRUPT 23 +#define PCI_SERR_INTERRUPT 22 +#define PCI_PERR_INTERRUPT 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19 +#define PCI_RETRY_ABORT_INTERRUPT 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16 +#define SOF_DOWN_INTERRUPT 14 +#define GPIO_INTERRUPT 13 +#define DMA_D_INTERRUPT 12 +#define DMA_C_INTERRUPT 11 +#define DMA_B_INTERRUPT 10 +#define DMA_A_INTERRUPT 9 +#define EEPROM_DONE_INTERRUPT 8 +#define VBUS_INTERRUPT 7 +#define CONTROL_STATUS_INTERRUPT 6 +#define ROOT_PORT_RESET_INTERRUPT 4 +#define SUSPEND_REQUEST_INTERRUPT 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2 +#define RESUME_INTERRUPT 1 +#define SOF_INTERRUPT 0 + /* offset 0x0030 */ + u32 idxaddr; + u32 idxdata; + u32 fifoctl; +#define PCI_BASE2_RANGE 16 +#define IGNORE_FIFO_AVAILABILITY 3 +#define PCI_BASE2_SELECT 2 +#define FIFO_CONFIGURATION_SELECT 0 + u32 _unused2; + /* offset 0x0040 */ + u32 memaddr; +#define START 28 +#define DIRECTION 27 +#define FIFO_DIAGNOSTIC_SELECT 24 +#define MEMORY_ADDRESS 0 + u32 memdata0; + u32 memdata1; + u32 _unused3; + /* offset 0x0050 */ + u32 gpioctl; +#define GPIO3_LED_SELECT 12 +#define GPIO3_INTERRUPT_ENABLE 11 +#define GPIO2_INTERRUPT_ENABLE 10 +#define GPIO1_INTERRUPT_ENABLE 9 +#define GPIO0_INTERRUPT_ENABLE 8 +#define GPIO3_OUTPUT_ENABLE 7 +#define GPIO2_OUTPUT_ENABLE 6 +#define GPIO1_OUTPUT_ENABLE 5 +#define GPIO0_OUTPUT_ENABLE 4 +#define GPIO3_DATA 3 +#define GPIO2_DATA 2 +#define GPIO1_DATA 1 +#define GPIO0_DATA 0 + u32 gpiostat; +#define GPIO3_INTERRUPT 3 +#define GPIO2_INTERRUPT 2 +#define GPIO1_INTERRUPT 1 +#define GPIO0_INTERRUPT 0 +} __attribute__ ((packed)); + +/* usb control, BAR0 + 0x0080 */ +struct net2280_usb_regs { + /* offset 0x0080 */ + u32 stdrsp; +#define STALL_UNSUPPORTED_REQUESTS 31 +#define SET_TEST_MODE 16 +#define GET_OTHER_SPEED_CONFIGURATION 15 +#define GET_DEVICE_QUALIFIER 14 +#define SET_ADDRESS 13 +#define ENDPOINT_SET_CLEAR_HALT 12 +#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11 +#define GET_STRING_DESCRIPTOR_2 10 +#define GET_STRING_DESCRIPTOR_1 9 +#define GET_STRING_DESCRIPTOR_0 8 +#define GET_SET_INTERFACE 6 +#define GET_SET_CONFIGURATION 5 +#define GET_CONFIGURATION_DESCRIPTOR 4 +#define GET_DEVICE_DESCRIPTOR 3 +#define GET_ENDPOINT_STATUS 2 +#define GET_INTERFACE_STATUS 1 +#define GET_DEVICE_STATUS 0 + u32 prodvendid; +#define PRODUCT_ID 16 +#define VENDOR_ID 0 + u32 relnum; + u32 usbctl; +#define SERIAL_NUMBER_INDEX 16 +#define PRODUCT_ID_STRING_ENABLE 13 +#define VENDOR_ID_STRING_ENABLE 12 +#define USB_ROOT_PORT_WAKEUP_ENABLE 11 +#define VBUS_PIN 10 +#define TIMED_DISCONNECT 9 +#define SUSPEND_IMMEDIATELY 7 +#define SELF_POWERED_USB_DEVICE 6 +#define REMOTE_WAKEUP_SUPPORT 5 +#define PME_POLARITY 4 +#define USB_DETECT_ENABLE 3 +#define PME_WAKEUP_ENABLE 2 +#define DEVICE_REMOTE_WAKEUP_ENABLE 1 +#define SELF_POWERED_STATUS 0 + /* offset 0x0090 */ + u32 usbstat; +#define HIGH_SPEED 7 +#define FULL_SPEED 6 +#define GENERATE_RESUME 5 +#define GENERATE_DEVICE_REMOTE_WAKEUP 4 + u32 xcvrdiag; +#define FORCE_HIGH_SPEED_MODE 31 +#define FORCE_FULL_SPEED_MODE 30 +#define USB_TEST_MODE 24 +#define LINE_STATE 16 +#define TRANSCEIVER_OPERATION_MODE 2 +#define TRANSCEIVER_SELECT 1 +#define TERMINATION_SELECT 0 + u32 setup0123; + u32 setup4567; + /* offset 0x0090 */ + u32 _unused0; + u32 ouraddr; +#define FORCE_IMMEDIATE 7 +#define OUR_USB_ADDRESS 0 + u32 ourconfig; +} __attribute__ ((packed)); + +/* pci control, BAR0 + 0x0100 */ +struct net2280_pci_regs { + /* offset 0x0100 */ + u32 pcimstctl; +#define PCI_ARBITER_PARK_SELECT 13 +#define PCI_MULTI LEVEL_ARBITER 12 +#define PCI_RETRY_ABORT_ENABLE 11 +#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10 +#define DMA_READ_MULTIPLE_ENABLE 9 +#define DMA_READ_LINE_ENABLE 8 +#define PCI_MASTER_COMMAND_SELECT 6 +#define MEM_READ_OR_WRITE 0 +#define IO_READ_OR_WRITE 1 +#define CFG_READ_OR_WRITE 2 +#define PCI_MASTER_START 5 +#define PCI_MASTER_READ_WRITE 4 +#define PCI_MASTER_WRITE 0 +#define PCI_MASTER_READ 1 +#define PCI_MASTER_BYTE_WRITE_ENABLES 0 + u32 pcimstaddr; + u32 pcimstdata; + u32 pcimststat; +#define PCI_ARBITER_CLEAR 2 +#define PCI_EXTERNAL_ARBITER 1 +#define PCI_HOST_MODE 0 +} __attribute__ ((packed)); + +/* dma control, BAR0 + 0x0180 ... array of four structs like this, + * for channels 0..3. see also struct net2280_dma: descriptor + * that can be loaded into some of these registers. + */ +struct net2280_dma_regs { /* [11.7] */ + /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */ + u32 dmactl; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25 +#define DMA_CLEAR_COUNT_ENABLE 21 +#define DESCRIPTOR_POLLING_RATE 19 +#define POLL_CONTINUOUS 0 +#define POLL_1_USEC 1 +#define POLL_100_USEC 2 +#define POLL_1_MSEC 3 +#define DMA_VALID_BIT_POLLING_ENABLE 18 +#define DMA_VALID_BIT_ENABLE 17 +#define DMA_SCATTER_GATHER_ENABLE 16 +#define DMA_OUT_AUTO_START_ENABLE 4 +#define DMA_PREEMPT_ENABLE 3 +#define DMA_FIFO_VALIDATE 2 +#define DMA_ENABLE 1 +#define DMA_ADDRESS_HOLD 0 + u32 dmastat; +#define DMA_ABORT_DONE_INTERRUPT 27 +#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25 +#define DMA_TRANSACTION_DONE_INTERRUPT 24 +#define DMA_ABORT 1 +#define DMA_START 0 + u32 _unused0[2]; + /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */ + u32 dmacount; +#define VALID_BIT 31 +#define DMA_DIRECTION 30 +#define DMA_DONE_INTERRUPT_ENABLE 29 +#define END_OF_CHAIN 28 +#define DMA_BYTE_COUNT_MASK ((1<<24)-1) +#define DMA_BYTE_COUNT 0 + u32 dmaaddr; + u32 dmadesc; + u32 _unused1; +} __attribute__ ((packed)); + +/* dedicated endpoint registers, BAR0 + 0x0200 */ + +struct net2280_dep_regs { /* [11.8] */ + /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */ + u32 dep_cfg; + /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ + u32 dep_rsp; + u32 _unused[2]; +} __attribute__ ((packed)); + +/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs + * like this, for ep0 then the configurable endpoints A..F + * ep0 reserved for control; E and F have only 64 bytes of fifo + */ +struct net2280_ep_regs { /* [11.9] */ + /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */ + u32 ep_cfg; +#define ENDPOINT_BYTE_COUNT 16 +#define ENDPOINT_ENABLE 10 +#define ENDPOINT_TYPE 8 +#define ENDPOINT_DIRECTION 7 +#define ENDPOINT_NUMBER 0 + u32 ep_rsp; +#define SET_NAK_OUT_PACKETS 15 +#define SET_EP_HIDE_STATUS_PHASE 14 +#define SET_EP_FORCE_CRC_ERROR 13 +#define SET_INTERRUPT_MODE 12 +#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11 +#define SET_NAK_OUT_PACKETS_MODE 10 +#define SET_ENDPOINT_TOGGLE 9 +#define SET_ENDPOINT_HALT 8 +#define CLEAR_NAK_OUT_PACKETS 7 +#define CLEAR_EP_HIDE_STATUS_PHASE 6 +#define CLEAR_EP_FORCE_CRC_ERROR 5 +#define CLEAR_INTERRUPT_MODE 4 +#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3 +#define CLEAR_NAK_OUT_PACKETS_MODE 2 +#define CLEAR_ENDPOINT_TOGGLE 1 +#define CLEAR_ENDPOINT_HALT 0 + u32 ep_irqenb; +#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5 +#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1 +#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 + u32 ep_stat; +#define FIFO_VALID_COUNT 24 +#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22 +#define TIMEOUT 21 +#define USB_STALL_SENT 20 +#define USB_IN_NAK_SENT 19 +#define USB_IN_ACK_RCVD 18 +#define USB_OUT_PING_NAK_SENT 17 +#define USB_OUT_ACK_SENT 16 +#define FIFO_OVERFLOW 13 +#define FIFO_UNDERFLOW 12 +#define FIFO_FULL 11 +#define FIFO_EMPTY 10 +#define FIFO_FLUSH 9 +#define SHORT_PACKET_OUT_DONE_INTERRUPT 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5 +#define NAK_OUT_PACKETS 4 +#define DATA_PACKET_RECEIVED_INTERRUPT 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT 1 +#define DATA_IN_TOKEN_INTERRUPT 0 + /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */ + u32 ep_avail; + u32 ep_data; + u32 _unused0[2]; +} __attribute__ ((packed)); + +#endif /* __LINUX_USB_NET2280_H */ diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h new file mode 100644 index 00000000..2808f2a9 --- /dev/null +++ b/include/linux/usb/ohci_pdriver.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __USB_CORE_OHCI_PDRIVER_H +#define __USB_CORE_OHCI_PDRIVER_H + +/** + * struct usb_ohci_pdata - platform_data for generic ohci driver + * + * @big_endian_desc: BE descriptors + * @big_endian_mmio: BE registers + * @no_big_frame_no: no big endian frame_no shift + * + * These are general configuration options for the OHCI controller. All of + * these options are activating more or less workarounds for some hardware. + */ +struct usb_ohci_pdata { + unsigned big_endian_desc:1; + unsigned big_endian_mmio:1; + unsigned no_big_frame_no:1; +}; + +#endif /* __USB_CORE_OHCI_PDRIVER_H */ diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h new file mode 100644 index 00000000..38ab3f46 --- /dev/null +++ b/include/linux/usb/otg.h @@ -0,0 +1,279 @@ +/* USB OTG (On The Go) defines */ +/* + * + * These APIs may be used between USB controllers. USB device drivers + * (for either host or peripheral roles) don't use these calls; they + * continue to use just usb_device and usb_gadget. + */ + +#ifndef __LINUX_USB_OTG_H +#define __LINUX_USB_OTG_H + +#include <linux/notifier.h> + +/* OTG defines lots of enumeration states before device reset */ +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + + /* single-role peripheral, and dual-role default-b */ + OTG_STATE_B_IDLE, + OTG_STATE_B_SRP_INIT, + OTG_STATE_B_PERIPHERAL, + + /* extra dual-role default-b states */ + OTG_STATE_B_WAIT_ACON, + OTG_STATE_B_HOST, + + /* dual-role default-a */ + OTG_STATE_A_IDLE, + OTG_STATE_A_WAIT_VRISE, + OTG_STATE_A_WAIT_BCON, + OTG_STATE_A_HOST, + OTG_STATE_A_SUSPEND, + OTG_STATE_A_PERIPHERAL, + OTG_STATE_A_WAIT_VFALL, + OTG_STATE_A_VBUS_ERR, +}; + +enum usb_phy_events { + USB_EVENT_NONE, /* no events or cable disconnected */ + USB_EVENT_VBUS, /* vbus valid event */ + USB_EVENT_ID, /* id was grounded */ + USB_EVENT_CHARGER, /* usb dedicated charger */ + USB_EVENT_ENUMERATED, /* gadget driver enumerated */ +}; + +struct usb_phy; + +/* for transceivers connected thru an ULPI interface, the user must + * provide access ops + */ +struct usb_phy_io_ops { + int (*read)(struct usb_phy *x, u32 reg); + int (*write)(struct usb_phy *x, u32 val, u32 reg); +}; + +struct usb_otg { + u8 default_a; + + struct usb_phy *phy; + struct usb_bus *host; + struct usb_gadget *gadget; + + /* bind/unbind the host controller */ + int (*set_host)(struct usb_otg *otg, struct usb_bus *host); + + /* bind/unbind the peripheral controller */ + int (*set_peripheral)(struct usb_otg *otg, + struct usb_gadget *gadget); + + /* effective for A-peripheral, ignored for B devices */ + int (*set_vbus)(struct usb_otg *otg, bool enabled); + + /* for B devices only: start session with A-Host */ + int (*start_srp)(struct usb_otg *otg); + + /* start or continue HNP role switch */ + int (*start_hnp)(struct usb_otg *otg); + +}; + +/* + * the otg driver needs to interact with both device side and host side + * usb controllers. it decides which controller is active at a given + * moment, using the transceiver, ID signal, HNP and sometimes static + * configuration information (including "board isn't wired for otg"). + */ +struct usb_phy { + struct device *dev; + const char *label; + unsigned int flags; + + enum usb_otg_state state; + enum usb_phy_events last_event; + + struct usb_otg *otg; + + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void __iomem *io_priv; + + /* for notification of usb_phy_events */ + struct atomic_notifier_head notifier; + + /* to pass extra port status to the root hub */ + u16 port_status; + u16 port_change; + + /* initialize/shutdown the OTG controller */ + int (*init)(struct usb_phy *x); + void (*shutdown)(struct usb_phy *x); + + /* effective for B devices, ignored for A-peripheral */ + int (*set_power)(struct usb_phy *x, + unsigned mA); + + /* for non-OTG B devices: set transceiver into suspend mode */ + int (*set_suspend)(struct usb_phy *x, + int suspend); + +}; + + +/* for board-specific init logic */ +extern int usb_set_transceiver(struct usb_phy *); + +#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE)) +/* sometimes transceivers are accessed only through e.g. ULPI */ +extern void usb_nop_xceiv_register(void); +extern void usb_nop_xceiv_unregister(void); +#else +static inline void usb_nop_xceiv_register(void) +{ +} + +static inline void usb_nop_xceiv_unregister(void) +{ +} +#endif + +/* helpers for direct access thru low-level io interface */ +static inline int usb_phy_io_read(struct usb_phy *x, u32 reg) +{ + if (x->io_ops && x->io_ops->read) + return x->io_ops->read(x, reg); + + return -EINVAL; +} + +static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg) +{ + if (x->io_ops && x->io_ops->write) + return x->io_ops->write(x, val, reg); + + return -EINVAL; +} + +static inline int +usb_phy_init(struct usb_phy *x) +{ + if (x->init) + return x->init(x); + + return 0; +} + +static inline void +usb_phy_shutdown(struct usb_phy *x) +{ + if (x->shutdown) + x->shutdown(x); +} + +/* for usb host and peripheral controller drivers */ +#ifdef CONFIG_USB_OTG_UTILS +extern struct usb_phy *usb_get_transceiver(void); +extern void usb_put_transceiver(struct usb_phy *); +extern const char *otg_state_string(enum usb_otg_state state); +#else +static inline struct usb_phy *usb_get_transceiver(void) +{ + return NULL; +} + +static inline void usb_put_transceiver(struct usb_phy *x) +{ +} + +static inline const char *otg_state_string(enum usb_otg_state state) +{ + return NULL; +} +#endif + +/* Context: can sleep */ +static inline int +otg_start_hnp(struct usb_otg *otg) +{ + if (otg && otg->start_hnp) + return otg->start_hnp(otg); + + return -ENOTSUPP; +} + +/* Context: can sleep */ +static inline int +otg_set_vbus(struct usb_otg *otg, bool enabled) +{ + if (otg && otg->set_vbus) + return otg->set_vbus(otg, enabled); + + return -ENOTSUPP; +} + +/* for HCDs */ +static inline int +otg_set_host(struct usb_otg *otg, struct usb_bus *host) +{ + if (otg && otg->set_host) + return otg->set_host(otg, host); + + return -ENOTSUPP; +} + +/* for usb peripheral controller drivers */ + +/* Context: can sleep */ +static inline int +otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *periph) +{ + if (otg && otg->set_peripheral) + return otg->set_peripheral(otg, periph); + + return -ENOTSUPP; +} + +static inline int +usb_phy_set_power(struct usb_phy *x, unsigned mA) +{ + if (x && x->set_power) + return x->set_power(x, mA); + return 0; +} + +/* Context: can sleep */ +static inline int +usb_phy_set_suspend(struct usb_phy *x, int suspend) +{ + if (x->set_suspend != NULL) + return x->set_suspend(x, suspend); + else + return 0; +} + +static inline int +otg_start_srp(struct usb_otg *otg) +{ + if (otg && otg->start_srp) + return otg->start_srp(otg); + + return -ENOTSUPP; +} + +/* notifiers */ +static inline int +usb_register_notifier(struct usb_phy *x, struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&x->notifier, nb); +} + +static inline void +usb_unregister_notifier(struct usb_phy *x, struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&x->notifier, nb); +} + +/* for OTG controller drivers (and maybe other stuff) */ +extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num); + +#endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h new file mode 100644 index 00000000..3e93de7e --- /dev/null +++ b/include/linux/usb/quirks.h @@ -0,0 +1,33 @@ +/* + * This file holds the definitions of quirks found in USB devices. + * Only quirks that affect the whole device, not an interface, + * belong here. + */ + +#ifndef __LINUX_USB_QUIRKS_H +#define __LINUX_USB_QUIRKS_H + +/* string descriptors must not be fetched using a 255-byte read */ +#define USB_QUIRK_STRING_FETCH_255 0x00000001 + +/* device can't resume correctly so reset it instead */ +#define USB_QUIRK_RESET_RESUME 0x00000002 + +/* device can't handle Set-Interface requests */ +#define USB_QUIRK_NO_SET_INTF 0x00000004 + +/* device can't handle its Configuration or Interface strings */ +#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008 + +/*device will morph if reset, don't use reset for handling errors */ +#define USB_QUIRK_RESET_MORPHS 0x00000010 + +/* device has more interface descriptions than the bNumInterfaces count, + and can't handle talking to these interfaces */ +#define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 + +/* device needs a pause during initialization, after we read the device + descriptor */ +#define USB_QUIRK_DELAY_INIT 0x00000040 + +#endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h new file mode 100644 index 00000000..55805f9d --- /dev/null +++ b/include/linux/usb/r8a66597.h @@ -0,0 +1,481 @@ +/* + * R8A66597 driver platform data + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_USB_R8A66597_H +#define __LINUX_USB_R8A66597_H + +#define R8A66597_PLATDATA_XTAL_12MHZ 0x01 +#define R8A66597_PLATDATA_XTAL_24MHZ 0x02 +#define R8A66597_PLATDATA_XTAL_48MHZ 0x03 + +struct r8a66597_platdata { + /* This callback can control port power instead of DVSTCTR register. */ + void (*port_power)(int port, int power); + + /* This parameter is for BUSWAIT */ + u16 buswait; + + /* set one = on chip controller, set zero = external controller */ + unsigned on_chip:1; + + /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */ + unsigned xtal:2; + + /* set one = 3.3V, set zero = 1.5V */ + unsigned vif:1; + + /* set one = big endian, set zero = little endian */ + unsigned endian:1; + + /* (external controller only) set one = WR0_N shorted to WR1_N */ + unsigned wr0_shorted_to_wr1:1; + + /* set one = using SUDMAC */ + unsigned sudmac:1; +}; + +/* Register definitions */ +#define SYSCFG0 0x00 +#define SYSCFG1 0x02 +#define SYSSTS0 0x04 +#define SYSSTS1 0x06 +#define DVSTCTR0 0x08 +#define DVSTCTR1 0x0A +#define TESTMODE 0x0C +#define PINCFG 0x0E +#define DMA0CFG 0x10 +#define DMA1CFG 0x12 +#define CFIFO 0x14 +#define D0FIFO 0x18 +#define D1FIFO 0x1C +#define CFIFOSEL 0x20 +#define CFIFOCTR 0x22 +#define CFIFOSIE 0x24 +#define D0FIFOSEL 0x28 +#define D0FIFOCTR 0x2A +#define D1FIFOSEL 0x2C +#define D1FIFOCTR 0x2E +#define INTENB0 0x30 +#define INTENB1 0x32 +#define INTENB2 0x34 +#define BRDYENB 0x36 +#define NRDYENB 0x38 +#define BEMPENB 0x3A +#define SOFCFG 0x3C +#define INTSTS0 0x40 +#define INTSTS1 0x42 +#define INTSTS2 0x44 +#define BRDYSTS 0x46 +#define NRDYSTS 0x48 +#define BEMPSTS 0x4A +#define FRMNUM 0x4C +#define UFRMNUM 0x4E +#define USBADDR 0x50 +#define USBREQ 0x54 +#define USBVAL 0x56 +#define USBINDX 0x58 +#define USBLENG 0x5A +#define DCPCFG 0x5C +#define DCPMAXP 0x5E +#define DCPCTR 0x60 +#define PIPESEL 0x64 +#define PIPECFG 0x68 +#define PIPEBUF 0x6A +#define PIPEMAXP 0x6C +#define PIPEPERI 0x6E +#define PIPE1CTR 0x70 +#define PIPE2CTR 0x72 +#define PIPE3CTR 0x74 +#define PIPE4CTR 0x76 +#define PIPE5CTR 0x78 +#define PIPE6CTR 0x7A +#define PIPE7CTR 0x7C +#define PIPE8CTR 0x7E +#define PIPE9CTR 0x80 +#define PIPE1TRE 0x90 +#define PIPE1TRN 0x92 +#define PIPE2TRE 0x94 +#define PIPE2TRN 0x96 +#define PIPE3TRE 0x98 +#define PIPE3TRN 0x9A +#define PIPE4TRE 0x9C +#define PIPE4TRN 0x9E +#define PIPE5TRE 0xA0 +#define PIPE5TRN 0xA2 +#define DEVADD0 0xD0 +#define DEVADD1 0xD2 +#define DEVADD2 0xD4 +#define DEVADD3 0xD6 +#define DEVADD4 0xD8 +#define DEVADD5 0xDA +#define DEVADD6 0xDC +#define DEVADD7 0xDE +#define DEVADD8 0xE0 +#define DEVADD9 0xE2 +#define DEVADDA 0xE4 + +/* System Configuration Control Register */ +#define XTAL 0xC000 /* b15-14: Crystal selection */ +#define XTAL48 0x8000 /* 48MHz */ +#define XTAL24 0x4000 /* 24MHz */ +#define XTAL12 0x0000 /* 12MHz */ +#define XCKE 0x2000 /* b13: External clock enable */ +#define PLLC 0x0800 /* b11: PLL control */ +#define SCKE 0x0400 /* b10: USB clock enable */ +#define PCSDIS 0x0200 /* b9: not CS wakeup */ +#define LPSME 0x0100 /* b8: Low power sleep mode */ +#define HSE 0x0080 /* b7: Hi-speed enable */ +#define DCFM 0x0040 /* b6: Controller function select */ +#define DRPD 0x0020 /* b5: D+/- pull down control */ +#define DPRPU 0x0010 /* b4: D+ pull up control */ +#define USBE 0x0001 /* b0: USB module operation enable */ + +/* System Configuration Status Register */ +#define OVCBIT 0x8000 /* b15-14: Over-current bit */ +#define OVCMON 0xC000 /* b15-14: Over-current monitor */ +#define SOFEA 0x0020 /* b5: SOF monitor */ +#define IDMON 0x0004 /* b3: ID-pin monitor */ +#define LNST 0x0003 /* b1-0: D+, D- line status */ +#define SE1 0x0003 /* SE1 */ +#define FS_KSTS 0x0002 /* Full-Speed K State */ +#define FS_JSTS 0x0001 /* Full-Speed J State */ +#define LS_JSTS 0x0002 /* Low-Speed J State */ +#define LS_KSTS 0x0001 /* Low-Speed K State */ +#define SE0 0x0000 /* SE0 */ + +/* Device State Control Register */ +#define EXTLP0 0x0400 /* b10: External port */ +#define VBOUT 0x0200 /* b9: VBUS output */ +#define WKUP 0x0100 /* b8: Remote wakeup */ +#define RWUPE 0x0080 /* b7: Remote wakeup sense */ +#define USBRST 0x0040 /* b6: USB reset enable */ +#define RESUME 0x0020 /* b5: Resume enable */ +#define UACT 0x0010 /* b4: USB bus enable */ +#define RHST 0x0007 /* b1-0: Reset handshake status */ +#define HSPROC 0x0004 /* HS handshake is processing */ +#define HSMODE 0x0003 /* Hi-Speed mode */ +#define FSMODE 0x0002 /* Full-Speed mode */ +#define LSMODE 0x0001 /* Low-Speed mode */ +#define UNDECID 0x0000 /* Undecided */ + +/* Test Mode Register */ +#define UTST 0x000F /* b3-0: Test select */ +#define H_TST_PACKET 0x000C /* HOST TEST Packet */ +#define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ +#define H_TST_K 0x000A /* HOST TEST K */ +#define H_TST_J 0x0009 /* HOST TEST J */ +#define H_TST_NORMAL 0x0000 /* HOST Normal Mode */ +#define P_TST_PACKET 0x0004 /* PERI TEST Packet */ +#define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ +#define P_TST_K 0x0002 /* PERI TEST K */ +#define P_TST_J 0x0001 /* PERI TEST J */ +#define P_TST_NORMAL 0x0000 /* PERI Normal Mode */ + +/* Data Pin Configuration Register */ +#define LDRV 0x8000 /* b15: Drive Current Adjust */ +#define VIF1 0x0000 /* VIF = 1.8V */ +#define VIF3 0x8000 /* VIF = 3.3V */ +#define INTA 0x0001 /* b1: USB INT-pin active */ + +/* DMAx Pin Configuration Register */ +#define DREQA 0x4000 /* b14: Dreq active select */ +#define BURST 0x2000 /* b13: Burst mode */ +#define DACKA 0x0400 /* b10: Dack active select */ +#define DFORM 0x0380 /* b9-7: DMA mode select */ +#define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ +#define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ +#define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ +#define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ +#define DENDA 0x0040 /* b6: Dend active select */ +#define PKTM 0x0020 /* b5: Packet mode */ +#define DENDE 0x0010 /* b4: Dend enable */ +#define OBUS 0x0004 /* b2: OUTbus mode */ + +/* CFIFO/DxFIFO Port Select Register */ +#define RCNT 0x8000 /* b15: Read count mode */ +#define REW 0x4000 /* b14: Buffer rewind */ +#define DCLRM 0x2000 /* b13: DMA buffer clear mode */ +#define DREQE 0x1000 /* b12: DREQ output enable */ +#define MBW_8 0x0000 /* 8bit */ +#define MBW_16 0x0400 /* 16bit */ +#define MBW_32 0x0800 /* 32bit */ +#define BIGEND 0x0100 /* b8: Big endian mode */ +#define BYTE_LITTLE 0x0000 /* little dendian */ +#define BYTE_BIG 0x0100 /* big endifan */ +#define ISEL 0x0020 /* b5: DCP FIFO port direction select */ +#define CURPIPE 0x000F /* b2-0: PIPE select */ + +/* CFIFO/DxFIFO Port Control Register */ +#define BVAL 0x8000 /* b15: Buffer valid flag */ +#define BCLR 0x4000 /* b14: Buffer clear */ +#define FRDY 0x2000 /* b13: FIFO ready */ +#define DTLN 0x0FFF /* b11-0: FIFO received data length */ + +/* Interrupt Enable Register 0 */ +#define VBSE 0x8000 /* b15: VBUS interrupt */ +#define RSME 0x4000 /* b14: Resume interrupt */ +#define SOFE 0x2000 /* b13: Frame update interrupt */ +#define DVSE 0x1000 /* b12: Device state transition interrupt */ +#define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ +#define BEMPE 0x0400 /* b10: Buffer empty interrupt */ +#define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ +#define BRDYE 0x0100 /* b8: Buffer ready interrupt */ + +/* Interrupt Enable Register 1 */ +#define OVRCRE 0x8000 /* b15: Over-current interrupt */ +#define BCHGE 0x4000 /* b14: USB us chenge interrupt */ +#define DTCHE 0x1000 /* b12: Detach sense interrupt */ +#define ATTCHE 0x0800 /* b11: Attach sense interrupt */ +#define EOFERRE 0x0040 /* b6: EOF error interrupt */ +#define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ +#define SACKE 0x0010 /* b4: SETUP ACK interrupt */ + +/* BRDY Interrupt Enable/Status Register */ +#define BRDY9 0x0200 /* b9: PIPE9 */ +#define BRDY8 0x0100 /* b8: PIPE8 */ +#define BRDY7 0x0080 /* b7: PIPE7 */ +#define BRDY6 0x0040 /* b6: PIPE6 */ +#define BRDY5 0x0020 /* b5: PIPE5 */ +#define BRDY4 0x0010 /* b4: PIPE4 */ +#define BRDY3 0x0008 /* b3: PIPE3 */ +#define BRDY2 0x0004 /* b2: PIPE2 */ +#define BRDY1 0x0002 /* b1: PIPE1 */ +#define BRDY0 0x0001 /* b1: PIPE0 */ + +/* NRDY Interrupt Enable/Status Register */ +#define NRDY9 0x0200 /* b9: PIPE9 */ +#define NRDY8 0x0100 /* b8: PIPE8 */ +#define NRDY7 0x0080 /* b7: PIPE7 */ +#define NRDY6 0x0040 /* b6: PIPE6 */ +#define NRDY5 0x0020 /* b5: PIPE5 */ +#define NRDY4 0x0010 /* b4: PIPE4 */ +#define NRDY3 0x0008 /* b3: PIPE3 */ +#define NRDY2 0x0004 /* b2: PIPE2 */ +#define NRDY1 0x0002 /* b1: PIPE1 */ +#define NRDY0 0x0001 /* b1: PIPE0 */ + +/* BEMP Interrupt Enable/Status Register */ +#define BEMP9 0x0200 /* b9: PIPE9 */ +#define BEMP8 0x0100 /* b8: PIPE8 */ +#define BEMP7 0x0080 /* b7: PIPE7 */ +#define BEMP6 0x0040 /* b6: PIPE6 */ +#define BEMP5 0x0020 /* b5: PIPE5 */ +#define BEMP4 0x0010 /* b4: PIPE4 */ +#define BEMP3 0x0008 /* b3: PIPE3 */ +#define BEMP2 0x0004 /* b2: PIPE2 */ +#define BEMP1 0x0002 /* b1: PIPE1 */ +#define BEMP0 0x0001 /* b0: PIPE0 */ + +/* SOF Pin Configuration Register */ +#define TRNENSEL 0x0100 /* b8: Select transaction enable period */ +#define BRDYM 0x0040 /* b6: BRDY clear timing */ +#define INTL 0x0020 /* b5: Interrupt sense select */ +#define EDGESTS 0x0010 /* b4: */ +#define SOFMODE 0x000C /* b3-2: SOF pin select */ +#define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */ +#define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ +#define SOF_DISABLE 0x0000 /* SOF OUT Disable */ + +/* Interrupt Status Register 0 */ +#define VBINT 0x8000 /* b15: VBUS interrupt */ +#define RESM 0x4000 /* b14: Resume interrupt */ +#define SOFR 0x2000 /* b13: SOF frame update interrupt */ +#define DVST 0x1000 /* b12: Device state transition interrupt */ +#define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ +#define BEMP 0x0400 /* b10: Buffer empty interrupt */ +#define NRDY 0x0200 /* b9: Buffer not ready interrupt */ +#define BRDY 0x0100 /* b8: Buffer ready interrupt */ +#define VBSTS 0x0080 /* b7: VBUS input port */ +#define DVSQ 0x0070 /* b6-4: Device state */ +#define DS_SPD_CNFG 0x0070 /* Suspend Configured */ +#define DS_SPD_ADDR 0x0060 /* Suspend Address */ +#define DS_SPD_DFLT 0x0050 /* Suspend Default */ +#define DS_SPD_POWR 0x0040 /* Suspend Powered */ +#define DS_SUSP 0x0040 /* Suspend */ +#define DS_CNFG 0x0030 /* Configured */ +#define DS_ADDS 0x0020 /* Address */ +#define DS_DFLT 0x0010 /* Default */ +#define DS_POWR 0x0000 /* Powered */ +#define DVSQS 0x0030 /* b5-4: Device state */ +#define VALID 0x0008 /* b3: Setup packet detected flag */ +#define CTSQ 0x0007 /* b2-0: Control transfer stage */ +#define CS_SQER 0x0006 /* Sequence error */ +#define CS_WRND 0x0005 /* Control write nodata status stage */ +#define CS_WRSS 0x0004 /* Control write status stage */ +#define CS_WRDS 0x0003 /* Control write data stage */ +#define CS_RDSS 0x0002 /* Control read status stage */ +#define CS_RDDS 0x0001 /* Control read data stage */ +#define CS_IDST 0x0000 /* Idle or setup stage */ + +/* Interrupt Status Register 1 */ +#define OVRCR 0x8000 /* b15: Over-current interrupt */ +#define BCHG 0x4000 /* b14: USB bus chenge interrupt */ +#define DTCH 0x1000 /* b12: Detach sense interrupt */ +#define ATTCH 0x0800 /* b11: Attach sense interrupt */ +#define EOFERR 0x0040 /* b6: EOF-error interrupt */ +#define SIGN 0x0020 /* b5: Setup ignore interrupt */ +#define SACK 0x0010 /* b4: Setup acknowledge interrupt */ + +/* Frame Number Register */ +#define OVRN 0x8000 /* b15: Overrun error */ +#define CRCE 0x4000 /* b14: Received data error */ +#define FRNM 0x07FF /* b10-0: Frame number */ + +/* Micro Frame Number Register */ +#define UFRNM 0x0007 /* b2-0: Micro frame number */ + +/* Default Control Pipe Maxpacket Size Register */ +/* Pipe Maxpacket Size Register */ +#define DEVSEL 0xF000 /* b15-14: Device address select */ +#define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ + +/* Default Control Pipe Control Register */ +#define BSTS 0x8000 /* b15: Buffer status */ +#define SUREQ 0x4000 /* b14: Send USB request */ +#define CSCLR 0x2000 /* b13: complete-split status clear */ +#define CSSTS 0x1000 /* b12: complete-split status */ +#define SUREQCLR 0x0800 /* b11: stop setup request */ +#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ +#define SQSET 0x0080 /* b7: Sequence toggle bit set */ +#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ +#define PBUSY 0x0020 /* b5: pipe busy */ +#define PINGE 0x0010 /* b4: ping enable */ +#define CCPL 0x0004 /* b2: Enable control transfer complete */ +#define PID 0x0003 /* b1-0: Response PID */ +#define PID_STALL11 0x0003 /* STALL */ +#define PID_STALL 0x0002 /* STALL */ +#define PID_BUF 0x0001 /* BUF */ +#define PID_NAK 0x0000 /* NAK */ + +/* Pipe Window Select Register */ +#define PIPENM 0x0007 /* b2-0: Pipe select */ + +/* Pipe Configuration Register */ +#define R8A66597_TYP 0xC000 /* b15-14: Transfer type */ +#define R8A66597_ISO 0xC000 /* Isochronous */ +#define R8A66597_INT 0x8000 /* Interrupt */ +#define R8A66597_BULK 0x4000 /* Bulk */ +#define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ +#define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */ +#define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */ +#define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */ +#define R8A66597_DIR 0x0010 /* b4: Transfer direction select */ +#define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */ + +/* Pipe Buffer Configuration Register */ +#define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ +#define BUFNMB 0x007F /* b6-0: Pipe buffer number */ +#define PIPE0BUF 256 +#define PIPExBUF 64 + +/* Pipe Maxpacket Size Register */ +#define MXPS 0x07FF /* b10-0: Maxpacket size */ + +/* Pipe Cycle Configuration Register */ +#define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ +#define IITV 0x0007 /* b2-0: Isochronous interval */ + +/* Pipex Control Register */ +#define BSTS 0x8000 /* b15: Buffer status */ +#define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ +#define CSCLR 0x2000 /* b13: complete-split status clear */ +#define CSSTS 0x1000 /* b12: complete-split status */ +#define ATREPM 0x0400 /* b10: Auto repeat mode */ +#define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ +#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ +#define SQSET 0x0080 /* b7: Sequence toggle bit set */ +#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ +#define PBUSY 0x0020 /* b5: pipe busy */ +#define PID 0x0003 /* b1-0: Response PID */ + +/* PIPExTRE */ +#define TRENB 0x0200 /* b9: Transaction counter enable */ +#define TRCLR 0x0100 /* b8: Transaction counter clear */ + +/* PIPExTRN */ +#define TRNCNT 0xFFFF /* b15-0: Transaction counter */ + +/* DEVADDx */ +#define UPPHUB 0x7800 +#define HUBPORT 0x0700 +#define USBSPD 0x00C0 +#define RTPORT 0x0001 + +/* SUDMAC registers */ +#define CH0CFG 0x00 +#define CH1CFG 0x04 +#define CH0BA 0x10 +#define CH1BA 0x14 +#define CH0BBC 0x18 +#define CH1BBC 0x1C +#define CH0CA 0x20 +#define CH1CA 0x24 +#define CH0CBC 0x28 +#define CH1CBC 0x2C +#define CH0DEN 0x30 +#define CH1DEN 0x34 +#define DSTSCLR 0x38 +#define DBUFCTRL 0x3C +#define DINTCTRL 0x40 +#define DINTSTS 0x44 +#define DINTSTSCLR 0x48 +#define CH0SHCTRL 0x50 +#define CH1SHCTRL 0x54 + +/* SUDMAC Configuration Registers */ +#define SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ +#define RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ +#define LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ + +/* DMA Enable Registers */ +#define DEN 0x0001 /* b1: DMA Transfer Enable */ + +/* DMA Status Clear Register */ +#define CH1STCLR 0x0002 /* b2: Ch1 DMA Status Clear */ +#define CH0STCLR 0x0001 /* b1: Ch0 DMA Status Clear */ + +/* DMA Buffer Control Register */ +#define CH1BUFW 0x0200 /* b9: Ch1 DMA Buffer Data Transfer Enable */ +#define CH0BUFW 0x0100 /* b8: Ch0 DMA Buffer Data Transfer Enable */ +#define CH1BUFS 0x0002 /* b2: Ch1 DMA Buffer Data Status */ +#define CH0BUFS 0x0001 /* b1: Ch0 DMA Buffer Data Status */ + +/* DMA Interrupt Control Register */ +#define CH1ERRE 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Enable */ +#define CH0ERRE 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Enable */ +#define CH1ENDE 0x0002 /* b2: Ch1 DMA Transfer End Int Enable */ +#define CH0ENDE 0x0001 /* b1: Ch0 DMA Transfer End Int Enable */ + +/* DMA Interrupt Status Register */ +#define CH1ERRS 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Status */ +#define CH0ERRS 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Status */ +#define CH1ENDS 0x0002 /* b2: Ch1 DMA Transfer End Int Status */ +#define CH0ENDS 0x0001 /* b1: Ch0 DMA Transfer End Int Status */ + +/* DMA Interrupt Status Clear Register */ +#define CH1ERRC 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Stat Clear */ +#define CH0ERRC 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Stat Clear */ +#define CH1ENDC 0x0002 /* b2: Ch1 DMA Transfer End Int Stat Clear */ +#define CH0ENDC 0x0001 /* b1: Ch0 DMA Transfer End Int Stat Clear */ + +#endif /* __LINUX_USB_R8A66597_H */ + diff --git a/include/linux/usb/rawbulk.h b/include/linux/usb/rawbulk.h new file mode 100755 index 00000000..05a22b10 --- /dev/null +++ b/include/linux/usb/rawbulk.h @@ -0,0 +1,166 @@ +/* + * Rawbulk Driver from VIA Telecom + * + * Copyright (C) 2011 VIA Telecom, Inc. + * Author: Karfield Chen (kfchen@via-telecom.com) + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __RAWBULK_H__ +#define __RAWBULK_H__ + +#include <linux/usb.h> +#include <linux/usb/ch9.h> +#include <linux/usb/composite.h> +#include <linux/usb/gadget.h> +#include <linux/wakelock.h> +#include <linux/device.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> + +#define INTF_DESC 0 +#define BULKIN_DESC 1 +#define BULKOUT_DESC 2 +#define MAX_DESC_ARRAY 4 + +#define NAME_BUFFSIZE 64 +#define MAX_ATTRIBUTES 10 + +#define MAX_TTY_RX 4 +#define MAX_TTY_RX_PACKAGE 512 +#define MAX_TTY_TX 8 +#define MAX_TTY_TX_PACKAGE 64 + +struct rawbulk_function { + int transfer_id; + const char *longname; + const char *shortname; + struct device *dev; + + /* Controls */ + spinlock_t lock; + int enable: 1; + int activated: 1; /* set when usb enabled */ + int tty_opened: 1; + + struct work_struct activator; /* asynic transaction starter */ + + struct wake_lock keep_awake; + + /* USB Gadget related */ + struct usb_function function; + struct usb_composite_dev *cdev; + struct usb_ep *bulk_out, *bulk_in; + + int rts_state; /* Handshaking pins (outputs) */ + int dtr_state; + int cts_state; /* Handshaking pins (inputs) */ + int dsr_state; + int dcd_state; + int ri_state; + + /* TTY related */ + struct tty_struct *tty; + int tty_minor; + struct tty_port port; + spinlock_t tx_lock; + struct list_head tx_free; + struct list_head tx_inproc; + spinlock_t rx_lock; + struct list_head rx_free; + struct list_head rx_inproc; + struct list_head rx_throttled; + unsigned int last_pushed; + + /* Transfer Controls */ + int nups; + int ndowns; + int upsz; + int downsz; + int splitsz; + int autoreconn; + int pushable; /* Set to use push-way for upstream */ + + /* Descriptors and Strings */ + struct usb_descriptor_header *fs_descs[MAX_DESC_ARRAY]; + struct usb_descriptor_header *hs_descs[MAX_DESC_ARRAY]; + struct usb_string string_defs[2]; + struct usb_gadget_strings string_table; + struct usb_gadget_strings *strings[2]; + struct usb_interface_descriptor interface; + struct usb_endpoint_descriptor fs_bulkin_endpoint; + struct usb_endpoint_descriptor hs_bulkin_endpoint; + struct usb_endpoint_descriptor fs_bulkout_endpoint; + struct usb_endpoint_descriptor hs_bulkout_endpoint; + + /* Sysfs Accesses */ + int max_attrs; + struct device_attribute attr[MAX_ATTRIBUTES]; +}; + +enum transfer_id { + RAWBULK_TID_MODEM, + RAWBULK_TID_ETS, + RAWBULK_TID_AT, + RAWBULK_TID_PCV, + RAWBULK_TID_GPS, + _MAX_TID +}; + +#define RAWBULK_INCEPT_FLAG_ENABLE 0x01 +#define RAWBULK_INCEPT_FLAG_PUSH_WAY 0x02 +typedef int (*rawbulk_intercept_t)(struct usb_interface *interface, unsigned int flags); +typedef void (*rawbulk_autoreconn_callback_t)(int transfer_id); + +struct rawbulk_function *rawbulk_lookup_function(int transfer_id); + +/* rawbulk tty io */ +int rawbulk_register_tty(struct rawbulk_function *fn); +void rawbulk_unregister_tty(struct rawbulk_function *fn); + +int rawbulk_tty_stop_io(struct rawbulk_function *fn); +int rawbulk_tty_start_io(struct rawbulk_function *fn); +int rawbulk_tty_alloc_request(struct rawbulk_function *fn); +void rawbulk_tty_free_request(struct rawbulk_function *fn); + +/* bind/unbind for gadget */ +int rawbulk_bind_function(int transfer_id, struct usb_function *function, + struct usb_ep *bulk_out, struct usb_ep *bulk_in, + rawbulk_autoreconn_callback_t autoreconn_callback); +void rawbulk_unbind_function(int trasfer_id); +int rawbulk_check_enable(struct rawbulk_function *fn); +void rawbulk_disable_function(struct rawbulk_function *fn); + +/* bind/unbind host interfaces */ +int rawbulk_bind_host_interface(struct usb_interface *interface, + rawbulk_intercept_t inceptor); +void rawbulk_unbind_host_interface(struct usb_interface *interface); + +/* operations for transactions */ +int rawbulk_start_transactions(int transfer_id, int nups, int ndowns, + int upsz, int downsz, int splitsz, int pushable); +void rawbulk_stop_transactions(int transfer_id); +int rawbulk_halt_transactions(int transfer_id); +int rawbulk_resume_transactions(int transfer_id); + +int rawbulk_suspend_host_interface(int transfer_id, pm_message_t message); +int rawbulk_resume_host_interface(int transfer_id); +int rawbulk_push_upstream_buffer(int transfer_id, const void *buffer, + unsigned int length); + +int rawbulk_forward_ctrlrequest(const struct usb_ctrlrequest *ctrl); + +/* statics and state */ +int rawbulk_transfer_statistics(int transfer_id, char *buf); +int rawbulk_transfer_state(int transfer_id); + +#endif /* __RAWBULK_HEADER_FILE__ */ diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h new file mode 100644 index 00000000..547e59cc --- /dev/null +++ b/include/linux/usb/renesas_usbhs.h @@ -0,0 +1,196 @@ +/* + * Renesas USB + * + * Copyright (C) 2011 Renesas Solutions Corp. + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#ifndef RENESAS_USB_H +#define RENESAS_USB_H +#include <linux/platform_device.h> +#include <linux/usb/ch9.h> + +/* + * module type + * + * it will be return value from get_id + */ +enum { + USBHS_HOST = 0, + USBHS_GADGET, + USBHS_MAX, +}; + +/* + * callback functions table for driver + * + * These functions are called from platform for driver. + * Callback function's pointer will be set before + * renesas_usbhs_platform_callback :: hardware_init was called + */ +struct renesas_usbhs_driver_callback { + int (*notify_hotplug)(struct platform_device *pdev); +}; + +/* + * callback functions for platform + * + * These functions are called from driver for platform + */ +struct renesas_usbhs_platform_callback { + + /* + * option: + * + * Hardware init function for platform. + * it is called when driver was probed. + */ + int (*hardware_init)(struct platform_device *pdev); + + /* + * option: + * + * Hardware exit function for platform. + * it is called when driver was removed + */ + void (*hardware_exit)(struct platform_device *pdev); + + /* + * option: + * + * for board specific clock control + */ + void (*power_ctrl)(struct platform_device *pdev, + void __iomem *base, int enable); + + /* + * option: + * + * Phy reset for platform + */ + void (*phy_reset)(struct platform_device *pdev); + + /* + * get USB ID function + * - USBHS_HOST + * - USBHS_GADGET + */ + int (*get_id)(struct platform_device *pdev); + + /* + * get VBUS status function. + */ + int (*get_vbus)(struct platform_device *pdev); + + /* + * option: + * + * VBUS control is needed for Host + */ + int (*set_vbus)(struct platform_device *pdev, int enable); +}; + +/* + * parameters for renesas usbhs + * + * some register needs USB chip specific parameters. + * This struct show it to driver + */ +struct renesas_usbhs_driver_param { + /* + * pipe settings + */ + u32 *pipe_type; /* array of USB_ENDPOINT_XFER_xxx (from ep0) */ + int pipe_size; /* pipe_type array size */ + + /* + * option: + * + * for BUSWAIT :: BWAIT + * see + * renesas_usbhs/common.c :: usbhsc_set_buswait() + * */ + int buswait_bwait; + + /* + * option: + * + * delay time from notify_hotplug callback + */ + int detection_delay; /* msec */ + + /* + * option: + * + * dma id for dmaengine + */ + int d0_tx_id; + int d0_rx_id; + int d1_tx_id; + int d1_rx_id; + + /* + * option: + * + * pio <--> dma border. + */ + int pio_dma_border; /* default is 64byte */ + + /* + * option: + */ + u32 has_otg:1; /* for controlling PWEN/EXTLP */ + u32 has_sudmac:1; /* for SUDMAC */ +}; + +/* + * option: + * + * platform information for renesas_usbhs driver. + */ +struct renesas_usbhs_platform_info { + /* + * option: + * + * platform set these functions before + * call platform_add_devices if needed + */ + struct renesas_usbhs_platform_callback platform_callback; + + /* + * driver set these callback functions pointer. + * platform can use it on callback functions + */ + struct renesas_usbhs_driver_callback driver_callback; + + /* + * option: + * + * driver use these param for some register + */ + struct renesas_usbhs_driver_param driver_param; +}; + +/* + * macro for platform + */ +#define renesas_usbhs_get_info(pdev)\ + ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) + +#define renesas_usbhs_call_notify_hotplug(pdev) \ + ({ \ + struct renesas_usbhs_driver_callback *dc; \ + dc = &(renesas_usbhs_get_info(pdev)->driver_callback); \ + if (dc && dc->notify_hotplug) \ + dc->notify_hotplug(pdev); \ + }) +#endif /* RENESAS_USB_H */ diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h new file mode 100644 index 00000000..88fceb71 --- /dev/null +++ b/include/linux/usb/rndis_host.h @@ -0,0 +1,272 @@ +/* + * Host Side support for RNDIS Networking Links + * Copyright (C) 2005 by David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_RNDIS_HOST_H +#define __LINUX_USB_RNDIS_HOST_H + +/* + * CONTROL uses CDC "encapsulated commands" with funky notifications. + * - control-out: SEND_ENCAPSULATED + * - interrupt-in: RESPONSE_AVAILABLE + * - control-in: GET_ENCAPSULATED + * + * We'll try to ignore the RESPONSE_AVAILABLE notifications. + * + * REVISIT some RNDIS implementations seem to have curious issues still + * to be resolved. + */ +struct rndis_msg_hdr { + __le32 msg_type; /* RNDIS_MSG_* */ + __le32 msg_len; + /* followed by data that varies between messages */ + __le32 request_id; + __le32 status; + /* ... and more */ +} __attribute__ ((packed)); + +/* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */ +#define CONTROL_BUFFER_SIZE 1025 + +/* RNDIS defines an (absurdly huge) 10 second control timeout, + * but ActiveSync seems to use a more usual 5 second timeout + * (which matches the USB 2.0 spec). + */ +#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000) + +#define RNDIS_MSG_COMPLETION cpu_to_le32(0x80000000) + +/* codes for "msg_type" field of rndis messages; + * only the data channel uses packet messages (maybe batched); + * everything else goes on the control channel. + */ +#define RNDIS_MSG_PACKET cpu_to_le32(0x00000001) /* 1-N packets */ +#define RNDIS_MSG_INIT cpu_to_le32(0x00000002) +#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_HALT cpu_to_le32(0x00000003) +#define RNDIS_MSG_QUERY cpu_to_le32(0x00000004) +#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_SET cpu_to_le32(0x00000005) +#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_RESET cpu_to_le32(0x00000006) +#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_INDICATE cpu_to_le32(0x00000007) +#define RNDIS_MSG_KEEPALIVE cpu_to_le32(0x00000008) +#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) + +/* codes for "status" field of completion messages */ +#define RNDIS_STATUS_SUCCESS cpu_to_le32(0x00000000) +#define RNDIS_STATUS_FAILURE cpu_to_le32(0xc0000001) +#define RNDIS_STATUS_INVALID_DATA cpu_to_le32(0xc0010015) +#define RNDIS_STATUS_NOT_SUPPORTED cpu_to_le32(0xc00000bb) +#define RNDIS_STATUS_MEDIA_CONNECT cpu_to_le32(0x4001000b) +#define RNDIS_STATUS_MEDIA_DISCONNECT cpu_to_le32(0x4001000c) +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION cpu_to_le32(0x40010012) + +/* codes for OID_GEN_PHYSICAL_MEDIUM */ +#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED cpu_to_le32(0x00000000) +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN cpu_to_le32(0x00000001) +#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM cpu_to_le32(0x00000002) +#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE cpu_to_le32(0x00000003) +#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE cpu_to_le32(0x00000004) +#define RNDIS_PHYSICAL_MEDIUM_DSL cpu_to_le32(0x00000005) +#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL cpu_to_le32(0x00000006) +#define RNDIS_PHYSICAL_MEDIUM_1394 cpu_to_le32(0x00000007) +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN cpu_to_le32(0x00000008) +#define RNDIS_PHYSICAL_MEDIUM_MAX cpu_to_le32(0x00000009) + +struct rndis_data_hdr { + __le32 msg_type; /* RNDIS_MSG_PACKET */ + __le32 msg_len; /* rndis_data_hdr + data_len + pad */ + __le32 data_offset; /* 36 -- right after header */ + __le32 data_len; /* ... real packet size */ + + __le32 oob_data_offset; /* zero */ + __le32 oob_data_len; /* zero */ + __le32 num_oob; /* zero */ + __le32 packet_data_offset; /* zero */ + + __le32 packet_data_len; /* zero */ + __le32 vc_handle; /* zero */ + __le32 reserved; /* zero */ +} __attribute__ ((packed)); + +struct rndis_init { /* OUT */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_INIT */ + __le32 msg_len; /* 24 */ + __le32 request_id; + __le32 major_version; /* of rndis (1.0) */ + __le32 minor_version; + __le32 max_transfer_size; +} __attribute__ ((packed)); + +struct rndis_init_c { /* IN */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_INIT_C */ + __le32 msg_len; + __le32 request_id; + __le32 status; + __le32 major_version; /* of rndis (1.0) */ + __le32 minor_version; + __le32 device_flags; + __le32 medium; /* zero == 802.3 */ + __le32 max_packets_per_message; + __le32 max_transfer_size; + __le32 packet_alignment; /* max 7; (1<<n) bytes */ + __le32 af_list_offset; /* zero */ + __le32 af_list_size; /* zero */ +} __attribute__ ((packed)); + +struct rndis_halt { /* OUT (no reply) */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_HALT */ + __le32 msg_len; + __le32 request_id; +} __attribute__ ((packed)); + +struct rndis_query { /* OUT */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_QUERY */ + __le32 msg_len; + __le32 request_id; + __le32 oid; + __le32 len; + __le32 offset; +/*?*/ __le32 handle; /* zero */ +} __attribute__ ((packed)); + +struct rndis_query_c { /* IN */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_QUERY_C */ + __le32 msg_len; + __le32 request_id; + __le32 status; + __le32 len; + __le32 offset; +} __attribute__ ((packed)); + +struct rndis_set { /* OUT */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_SET */ + __le32 msg_len; + __le32 request_id; + __le32 oid; + __le32 len; + __le32 offset; +/*?*/ __le32 handle; /* zero */ +} __attribute__ ((packed)); + +struct rndis_set_c { /* IN */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_SET_C */ + __le32 msg_len; + __le32 request_id; + __le32 status; +} __attribute__ ((packed)); + +struct rndis_reset { /* IN */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_RESET */ + __le32 msg_len; + __le32 reserved; +} __attribute__ ((packed)); + +struct rndis_reset_c { /* OUT */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_RESET_C */ + __le32 msg_len; + __le32 status; + __le32 addressing_lost; +} __attribute__ ((packed)); + +struct rndis_indicate { /* IN (unrequested) */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_INDICATE */ + __le32 msg_len; + __le32 status; + __le32 length; + __le32 offset; +/**/ __le32 diag_status; + __le32 error_offset; +/**/ __le32 message; +} __attribute__ ((packed)); + +struct rndis_keepalive { /* OUT (optionally IN) */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_KEEPALIVE */ + __le32 msg_len; + __le32 request_id; +} __attribute__ ((packed)); + +struct rndis_keepalive_c { /* IN (optionally OUT) */ + /* header and: */ + __le32 msg_type; /* RNDIS_MSG_KEEPALIVE_C */ + __le32 msg_len; + __le32 request_id; + __le32 status; +} __attribute__ ((packed)); + +/* NOTE: about 30 OIDs are "mandatory" for peripherals to support ... and + * there are gobs more that may optionally be supported. We'll avoid as much + * of that mess as possible. + */ +#define OID_802_3_PERMANENT_ADDRESS cpu_to_le32(0x01010101) +#define OID_GEN_MAXIMUM_FRAME_SIZE cpu_to_le32(0x00010106) +#define OID_GEN_CURRENT_PACKET_FILTER cpu_to_le32(0x0001010e) +#define OID_GEN_PHYSICAL_MEDIUM cpu_to_le32(0x00010202) + +/* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */ +#define RNDIS_PACKET_TYPE_DIRECTED cpu_to_le32(0x00000001) +#define RNDIS_PACKET_TYPE_MULTICAST cpu_to_le32(0x00000002) +#define RNDIS_PACKET_TYPE_ALL_MULTICAST cpu_to_le32(0x00000004) +#define RNDIS_PACKET_TYPE_BROADCAST cpu_to_le32(0x00000008) +#define RNDIS_PACKET_TYPE_SOURCE_ROUTING cpu_to_le32(0x00000010) +#define RNDIS_PACKET_TYPE_PROMISCUOUS cpu_to_le32(0x00000020) +#define RNDIS_PACKET_TYPE_SMT cpu_to_le32(0x00000040) +#define RNDIS_PACKET_TYPE_ALL_LOCAL cpu_to_le32(0x00000080) +#define RNDIS_PACKET_TYPE_GROUP cpu_to_le32(0x00001000) +#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL cpu_to_le32(0x00002000) +#define RNDIS_PACKET_TYPE_FUNCTIONAL cpu_to_le32(0x00004000) +#define RNDIS_PACKET_TYPE_MAC_FRAME cpu_to_le32(0x00008000) + +/* default filter used with RNDIS devices */ +#define RNDIS_DEFAULT_FILTER ( \ + RNDIS_PACKET_TYPE_DIRECTED | \ + RNDIS_PACKET_TYPE_BROADCAST | \ + RNDIS_PACKET_TYPE_ALL_MULTICAST | \ + RNDIS_PACKET_TYPE_PROMISCUOUS) + +/* Flags to require specific physical medium type for generic_rndis_bind() */ +#define FLAG_RNDIS_PHYM_NOT_WIRELESS 0x0001 +#define FLAG_RNDIS_PHYM_WIRELESS 0x0002 + +/* Flags for driver_info::data */ +#define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */ + +extern void rndis_status(struct usbnet *dev, struct urb *urb); +extern int +rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen); +extern int +generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags); +extern void rndis_unbind(struct usbnet *dev, struct usb_interface *intf); +extern int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb); +extern struct sk_buff * +rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); + +#endif /* __LINUX_USB_RNDIS_HOST_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h new file mode 100644 index 00000000..47428388 --- /dev/null +++ b/include/linux/usb/serial.h @@ -0,0 +1,417 @@ +/* + * USB Serial Converter stuff + * + * Copyright (C) 1999 - 2005 + * Greg Kroah-Hartman (greg@kroah.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + */ + +#ifndef __LINUX_USB_SERIAL_H +#define __LINUX_USB_SERIAL_H + +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/sysrq.h> +#include <linux/kfifo.h> + +#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ + +/* The maximum number of ports one device can grab at once */ +#define MAX_NUM_PORTS 8 + +/* parity check flag */ +#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) + +/* USB serial flags */ +#define USB_SERIAL_WRITE_BUSY 0 + +/** + * usb_serial_port: structure for the specific ports of a device. + * @serial: pointer back to the struct usb_serial owner of this port. + * @port: pointer to the corresponding tty_port for this port. + * @lock: spinlock to grab when updating portions of this structure. + * @number: the number of the port (the minor number). + * @interrupt_in_buffer: pointer to the interrupt in buffer for this port. + * @interrupt_in_urb: pointer to the interrupt in struct urb for this port. + * @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe + * for this port. + * @interrupt_out_buffer: pointer to the interrupt out buffer for this port. + * @interrupt_out_size: the size of the interrupt_out_buffer, in bytes. + * @interrupt_out_urb: pointer to the interrupt out struct urb for this port. + * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe + * for this port. + * @bulk_in_buffer: pointer to the bulk in buffer for this port. + * @bulk_in_size: the size of the bulk_in_buffer, in bytes. + * @read_urb: pointer to the bulk in struct urb for this port. + * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this + * port. + * @bulk_in_buffers: pointers to the bulk in buffers for this port + * @read_urbs: pointers to the bulk in urbs for this port + * @read_urbs_free: status bitmap the for bulk in urbs + * @bulk_out_buffer: pointer to the bulk out buffer for this port. + * @bulk_out_size: the size of the bulk_out_buffer, in bytes. + * @write_urb: pointer to the bulk out struct urb for this port. + * @write_fifo: kfifo used to buffer outgoing data + * @bulk_out_buffers: pointers to the bulk out buffers for this port + * @write_urbs: pointers to the bulk out urbs for this port + * @write_urbs_free: status bitmap the for bulk out urbs + * @tx_bytes: number of bytes currently in host stack queues + * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this + * port. + * @flags: usb serial port flags + * @write_wait: a wait_queue_head_t used by the port. + * @work: work queue entry for the line discipline waking up. + * @throttled: nonzero if the read urb is inactive to throttle the device + * @throttle_req: nonzero if the tty wants to throttle us + * @dev: pointer to the serial device + * + * This structure is used by the usb-serial core and drivers for the specific + * ports of a device. + */ +struct usb_serial_port { + struct usb_serial *serial; + struct tty_port port; + spinlock_t lock; + unsigned char number; + + unsigned char *interrupt_in_buffer; + struct urb *interrupt_in_urb; + __u8 interrupt_in_endpointAddress; + + unsigned char *interrupt_out_buffer; + int interrupt_out_size; + struct urb *interrupt_out_urb; + __u8 interrupt_out_endpointAddress; + + unsigned char *bulk_in_buffer; + int bulk_in_size; + struct urb *read_urb; + __u8 bulk_in_endpointAddress; + + unsigned char *bulk_in_buffers[2]; + struct urb *read_urbs[2]; + unsigned long read_urbs_free; + + unsigned char *bulk_out_buffer; + int bulk_out_size; + struct urb *write_urb; + struct kfifo write_fifo; + + unsigned char *bulk_out_buffers[2]; + struct urb *write_urbs[2]; + unsigned long write_urbs_free; + __u8 bulk_out_endpointAddress; + + int tx_bytes; + + unsigned long flags; + wait_queue_head_t write_wait; + struct work_struct work; + char throttled; + char throttle_req; + unsigned long sysrq; /* sysrq timeout */ + struct device dev; +}; +#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev) + +/* get and set the port private data pointer helper functions */ +static inline void *usb_get_serial_port_data(struct usb_serial_port *port) +{ + return dev_get_drvdata(&port->dev); +} + +static inline void usb_set_serial_port_data(struct usb_serial_port *port, + void *data) +{ + dev_set_drvdata(&port->dev, data); +} + +/** + * usb_serial - structure used by the usb-serial core for a device + * @dev: pointer to the struct usb_device for this device + * @type: pointer to the struct usb_serial_driver for this device + * @interface: pointer to the struct usb_interface for this device + * @minor: the starting minor number for this device + * @num_ports: the number of ports this device has + * @num_interrupt_in: number of interrupt in endpoints we have + * @num_interrupt_out: number of interrupt out endpoints we have + * @num_bulk_in: number of bulk in endpoints we have + * @num_bulk_out: number of bulk out endpoints we have + * @port: array of struct usb_serial_port structures for the different ports. + * @private: place to put any driver specific information that is needed. The + * usb-serial driver is required to manage this data, the usb-serial core + * will not touch this. Use usb_get_serial_data() and + * usb_set_serial_data() to access this. + */ +struct usb_serial { + struct usb_device *dev; + struct usb_serial_driver *type; + struct usb_interface *interface; + unsigned char disconnected:1; + unsigned char suspending:1; + unsigned char attached:1; + unsigned char minor; + unsigned char num_ports; + unsigned char num_port_pointers; + char num_interrupt_in; + char num_interrupt_out; + char num_bulk_in; + char num_bulk_out; + struct usb_serial_port *port[MAX_NUM_PORTS]; + struct kref kref; + struct mutex disc_mutex; + void *private; +}; +#define to_usb_serial(d) container_of(d, struct usb_serial, kref) + +/* get and set the serial private data pointer helper functions */ +static inline void *usb_get_serial_data(struct usb_serial *serial) +{ + return serial->private; +} + +static inline void usb_set_serial_data(struct usb_serial *serial, void *data) +{ + serial->private = data; +} + +/** + * usb_serial_driver - describes a usb serial driver + * @description: pointer to a string that describes this driver. This string + * used in the syslog messages when a device is inserted or removed. + * @id_table: pointer to a list of usb_device_id structures that define all + * of the devices this structure can support. + * @num_ports: the number of different ports this device will have. + * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer + * (0 = end-point size) + * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) + * @calc_num_ports: pointer to a function to determine how many ports this + * device has dynamically. It will be called after the probe() + * callback is called, but before attach() + * @probe: pointer to the driver's probe function. + * This will be called when the device is inserted into the system, + * but before the device has been fully initialized by the usb_serial + * subsystem. Use this function to download any firmware to the device, + * or any other early initialization that might be needed. + * Return 0 to continue on with the initialization sequence. Anything + * else will abort it. + * @attach: pointer to the driver's attach function. + * This will be called when the struct usb_serial structure is fully set + * set up. Do any local initialization of the device, or any private + * memory structure allocation at this point in time. + * @disconnect: pointer to the driver's disconnect function. This will be + * called when the device is unplugged or unbound from the driver. + * @release: pointer to the driver's release function. This will be called + * when the usb_serial data structure is about to be destroyed. + * @usb_driver: pointer to the struct usb_driver that controls this + * device. This is necessary to allow dynamic ids to be added to + * the driver from sysfs. + * + * This structure is defines a USB Serial driver. It provides all of + * the information that the USB serial core code needs. If the function + * pointers are defined, then the USB serial core code will call them when + * the corresponding tty port functions are called. If they are not + * called, the generic serial function will be used instead. + * + * The driver.owner field should be set to the module owner of this driver. + * The driver.name field should be set to the name of this driver (remember + * it will show up in sysfs, so it needs to be short and to the point. + * Using the module name is a good idea.) + */ +struct usb_serial_driver { + const char *description; + const struct usb_device_id *id_table; + char num_ports; + + struct list_head driver_list; + struct device_driver driver; + struct usb_driver *usb_driver; + struct usb_dynids dynids; + + size_t bulk_in_size; + size_t bulk_out_size; + + int (*probe)(struct usb_serial *serial, const struct usb_device_id *id); + int (*attach)(struct usb_serial *serial); + int (*calc_num_ports) (struct usb_serial *serial); + + void (*disconnect)(struct usb_serial *serial); + void (*release)(struct usb_serial *serial); + + int (*port_probe)(struct usb_serial_port *port); + int (*port_remove)(struct usb_serial_port *port); + + int (*suspend)(struct usb_serial *serial, pm_message_t message); + int (*resume)(struct usb_serial *serial); + + /* serial function calls */ + /* Called by console and by the tty layer */ + int (*open)(struct tty_struct *tty, struct usb_serial_port *port); + void (*close)(struct usb_serial_port *port); + int (*write)(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); + /* Called only by the tty layer */ + int (*write_room)(struct tty_struct *tty); + int (*ioctl)(struct tty_struct *tty, + unsigned int cmd, unsigned long arg); + void (*set_termios)(struct tty_struct *tty, + struct usb_serial_port *port, struct ktermios *old); + void (*break_ctl)(struct tty_struct *tty, int break_state); + int (*chars_in_buffer)(struct tty_struct *tty); + void (*throttle)(struct tty_struct *tty); + void (*unthrottle)(struct tty_struct *tty); + int (*tiocmget)(struct tty_struct *tty); + int (*tiocmset)(struct tty_struct *tty, + unsigned int set, unsigned int clear); + int (*get_icount)(struct tty_struct *tty, + struct serial_icounter_struct *icount); + /* Called by the tty layer for port level work. There may or may not + be an attached tty at this point */ + void (*dtr_rts)(struct usb_serial_port *port, int on); + int (*carrier_raised)(struct usb_serial_port *port); + /* Called by the usb serial hooks to allow the user to rework the + termios state */ + void (*init_termios)(struct tty_struct *tty); + /* USB events */ + void (*read_int_callback)(struct urb *urb); + void (*write_int_callback)(struct urb *urb); + void (*read_bulk_callback)(struct urb *urb); + void (*write_bulk_callback)(struct urb *urb); + /* Called by the generic read bulk callback */ + void (*process_read_urb)(struct urb *urb); + /* Called by the generic write implementation */ + int (*prepare_write_buffer)(struct usb_serial_port *port, + void *dest, size_t size); +}; +#define to_usb_serial_driver(d) \ + container_of(d, struct usb_serial_driver, driver) + +extern int usb_serial_register_drivers(struct usb_driver *udriver, + struct usb_serial_driver * const serial_drivers[]); +extern void usb_serial_deregister_drivers(struct usb_driver *udriver, + struct usb_serial_driver * const serial_drivers[]); +extern void usb_serial_port_softint(struct usb_serial_port *port); + +extern int usb_serial_probe(struct usb_interface *iface, + const struct usb_device_id *id); +extern void usb_serial_disconnect(struct usb_interface *iface); + +extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); +extern int usb_serial_resume(struct usb_interface *intf); + +extern int ezusb_writememory(struct usb_serial *serial, int address, + unsigned char *data, int length, __u8 bRequest); +extern int ezusb_set_reset(struct usb_serial *serial, unsigned char reset_bit); + +/* USB Serial console functions */ +#ifdef CONFIG_USB_SERIAL_CONSOLE +extern void usb_serial_console_init(int debug, int minor); +extern void usb_serial_console_exit(void); +extern void usb_serial_console_disconnect(struct usb_serial *serial); +#else +static inline void usb_serial_console_init(int debug, int minor) { } +static inline void usb_serial_console_exit(void) { } +static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} +#endif + +/* Functions needed by other parts of the usbserial core */ +extern struct usb_serial *usb_serial_get_by_index(unsigned int minor); +extern void usb_serial_put(struct usb_serial *serial); +extern int usb_serial_generic_open(struct tty_struct *tty, + struct usb_serial_port *port); +extern int usb_serial_generic_write(struct tty_struct *tty, + struct usb_serial_port *port, const unsigned char *buf, int count); +extern void usb_serial_generic_close(struct usb_serial_port *port); +extern int usb_serial_generic_resume(struct usb_serial *serial); +extern int usb_serial_generic_write_room(struct tty_struct *tty); +extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); +extern void usb_serial_generic_read_bulk_callback(struct urb *urb); +extern void usb_serial_generic_write_bulk_callback(struct urb *urb); +extern void usb_serial_generic_throttle(struct tty_struct *tty); +extern void usb_serial_generic_unthrottle(struct tty_struct *tty); +extern void usb_serial_generic_disconnect(struct usb_serial *serial); +extern void usb_serial_generic_release(struct usb_serial *serial); +extern int usb_serial_generic_register(int debug); +extern void usb_serial_generic_deregister(void); +extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, + gfp_t mem_flags); +extern void usb_serial_generic_process_read_urb(struct urb *urb); +extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, + void *dest, size_t size); +extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, + unsigned int ch); +extern int usb_serial_handle_break(struct usb_serial_port *port); +extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, + unsigned int status); + + +extern int usb_serial_bus_register(struct usb_serial_driver *device); +extern void usb_serial_bus_deregister(struct usb_serial_driver *device); + +extern struct usb_serial_driver usb_serial_generic_device; +extern struct bus_type usb_serial_bus_type; +extern struct tty_driver *usb_serial_tty_driver; + +static inline void usb_serial_debug_data(int debug, + struct device *dev, + const char *function, int size, + const unsigned char *data) +{ + int i; + + if (debug) { + dev_printk(KERN_DEBUG, dev, "%s - length = %d, data = ", + function, size); + for (i = 0; i < size; ++i) + printk("%.2x ", data[i]); + printk("\n"); + } +} + +/* Use our own dbg macro */ +#undef dbg +#define dbg(format, arg...) \ +do { \ + if (debug) \ + printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ +} while (0) + +/* + * Macro for reporting errors in write path to avoid inifinite loop + * when port is used as a console. + */ +#define dev_err_console(usport, fmt, ...) \ +do { \ + static bool __print_once; \ + struct usb_serial_port *__port = (usport); \ + \ + if (!__port->port.console || !__print_once) { \ + __print_once = true; \ + dev_err(&__port->dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) + +/* + * module_usb_serial_driver() - Helper macro for registering a USB Serial driver + * @__usb_driver: usb_driver struct to register + * @__serial_drivers: list of usb_serial drivers to register + * + * Helper macro for USB serial drivers which do not do anything special + * in module init/exit. This eliminates a lot of boilerplate. Each + * module may only use this macro once, and calling it replaces + * module_init() and module_exit() + * + */ +#define module_usb_serial_driver(__usb_driver, __serial_drivers) \ + module_driver(__usb_driver, usb_serial_register_drivers, \ + usb_serial_deregister_drivers, __serial_drivers) + +#endif /* __LINUX_USB_SERIAL_H */ + diff --git a/include/linux/usb/sl811.h b/include/linux/usb/sl811.h new file mode 100644 index 00000000..3afe4d16 --- /dev/null +++ b/include/linux/usb/sl811.h @@ -0,0 +1,29 @@ +/* + * board initialization should put one of these into dev->platform_data + * and place the sl811hs onto platform_bus named "sl811-hcd". + */ + +#ifndef __LINUX_USB_SL811_H +#define __LINUX_USB_SL811_H + +struct sl811_platform_data { + unsigned can_wakeup:1; + + /* given port_power, msec/2 after power on till power good */ + u8 potpg; + + /* mA/2 power supplied on this port (max = default = 250) */ + u8 power; + + /* sl811 relies on an external source of VBUS current */ + void (*port_power)(struct device *dev, int is_on); + + /* pulse sl811 nRST (probably with a GPIO) */ + void (*reset)(struct device *dev); + + /* some boards need something like these: */ + /* int (*check_overcurrent)(struct device *dev); */ + /* void (*clock_enable)(struct device *dev, int is_on); */ +}; + +#endif /* __LINUX_USB_SL811_H */ diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h new file mode 100644 index 00000000..cb33fff2 --- /dev/null +++ b/include/linux/usb/storage.h @@ -0,0 +1,86 @@ +#ifndef __LINUX_USB_STORAGE_H +#define __LINUX_USB_STORAGE_H + +/* + * linux/usb/storage.h + * + * Copyright Matthew Wilcox for Intel Corp, 2010 + * + * This file contains definitions taken from the + * USB Mass Storage Class Specification Overview + * + * Distributed under the terms of the GNU GPL, version two. + */ + +/* Storage subclass codes */ + +#define USB_SC_RBC 0x01 /* Typically, flash devices */ +#define USB_SC_8020 0x02 /* CD-ROM */ +#define USB_SC_QIC 0x03 /* QIC-157 Tapes */ +#define USB_SC_UFI 0x04 /* Floppy */ +#define USB_SC_8070 0x05 /* Removable media */ +#define USB_SC_SCSI 0x06 /* Transparent */ +#define USB_SC_LOCKABLE 0x07 /* Password-protected */ + +#define USB_SC_ISD200 0xf0 /* ISD200 ATA */ +#define USB_SC_CYP_ATACB 0xf1 /* Cypress ATACB */ +#define USB_SC_DEVICE 0xff /* Use device's value */ + +/* Storage protocol codes */ + +#define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */ +#define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */ +#define USB_PR_BULK 0x50 /* bulk only */ +#define USB_PR_UAS 0x62 /* USB Attached SCSI */ + +#define USB_PR_USBAT 0x80 /* SCM-ATAPI bridge */ +#define USB_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ +#define USB_PR_SDDR55 0x82 /* SDDR-55 (made up) */ +#define USB_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ +#define USB_PR_FREECOM 0xf1 /* Freecom */ +#define USB_PR_DATAFAB 0xf2 /* Datafab chipsets */ +#define USB_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ +#define USB_PR_ALAUDA 0xf4 /* Alauda chipsets */ +#define USB_PR_KARMA 0xf5 /* Rio Karma */ + +#define USB_PR_DEVICE 0xff /* Use device's value */ + + /* + * Bulk only data structures + */ + +/* command block wrapper */ +struct bulk_cb_wrap { + __le32 Signature; /* contains 'USBC' */ + __u32 Tag; /* unique per command id */ + __le32 DataTransferLength; /* size of data */ + __u8 Flags; /* direction in bit 0 */ + __u8 Lun; /* LUN normally 0 */ + __u8 Length; /* of of the CDB */ + __u8 CDB[16]; /* max command */ +}; + +#define US_BULK_CB_WRAP_LEN 31 +#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ +#define US_BULK_FLAG_IN (1 << 7) +#define US_BULK_FLAG_OUT 0 + +/* command status wrapper */ +struct bulk_cs_wrap { + __le32 Signature; /* should = 'USBS' */ + __u32 Tag; /* same as original command */ + __le32 Residue; /* amount not transferred */ + __u8 Status; /* see below */ +}; + +#define US_BULK_CS_WRAP_LEN 13 +#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ +#define US_BULK_STAT_OK 0 +#define US_BULK_STAT_FAIL 1 +#define US_BULK_STAT_PHASE 2 + +/* bulk-only class specific requests */ +#define US_BULK_RESET_REQUEST 0xff +#define US_BULK_GET_MAX_LUN 0xfe + +#endif diff --git a/include/linux/usb/tmc.h b/include/linux/usb/tmc.h new file mode 100644 index 00000000..c045ae12 --- /dev/null +++ b/include/linux/usb/tmc.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany + * Copyright (C) 2008 Novell, Inc. + * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> + * + * This file holds USB constants defined by the USB Device Class + * Definition for Test and Measurement devices published by the USB-IF. + * + * It also has the ioctl definitions for the usbtmc kernel driver that + * userspace needs to know about. + */ + +#ifndef __LINUX_USB_TMC_H +#define __LINUX_USB_TMC_H + +/* USB TMC status values */ +#define USBTMC_STATUS_SUCCESS 0x01 +#define USBTMC_STATUS_PENDING 0x02 +#define USBTMC_STATUS_FAILED 0x80 +#define USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS 0x81 +#define USBTMC_STATUS_SPLIT_NOT_IN_PROGRESS 0x82 +#define USBTMC_STATUS_SPLIT_IN_PROGRESS 0x83 + +/* USB TMC requests values */ +#define USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT 1 +#define USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS 2 +#define USBTMC_REQUEST_INITIATE_ABORT_BULK_IN 3 +#define USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS 4 +#define USBTMC_REQUEST_INITIATE_CLEAR 5 +#define USBTMC_REQUEST_CHECK_CLEAR_STATUS 6 +#define USBTMC_REQUEST_GET_CAPABILITIES 7 +#define USBTMC_REQUEST_INDICATOR_PULSE 64 + +/* Request values for USBTMC driver's ioctl entry point */ +#define USBTMC_IOC_NR 91 +#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1) +#define USBTMC_IOCTL_CLEAR _IO(USBTMC_IOC_NR, 2) +#define USBTMC_IOCTL_ABORT_BULK_OUT _IO(USBTMC_IOC_NR, 3) +#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4) +#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6) +#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7) + +#endif diff --git a/include/linux/usb/uas.h b/include/linux/usb/uas.h new file mode 100644 index 00000000..9a988e41 --- /dev/null +++ b/include/linux/usb/uas.h @@ -0,0 +1,69 @@ +#ifndef __USB_UAS_H__ +#define __USB_UAS_H__ + +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> + +/* Common header for all IUs */ +struct iu { + __u8 iu_id; + __u8 rsvd1; + __be16 tag; +}; + +enum { + IU_ID_COMMAND = 0x01, + IU_ID_STATUS = 0x03, + IU_ID_RESPONSE = 0x04, + IU_ID_TASK_MGMT = 0x05, + IU_ID_READ_READY = 0x06, + IU_ID_WRITE_READY = 0x07, +}; + +struct command_iu { + __u8 iu_id; + __u8 rsvd1; + __be16 tag; + __u8 prio_attr; + __u8 rsvd5; + __u8 len; + __u8 rsvd7; + struct scsi_lun lun; + __u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */ +}; + +/* + * Also used for the Read Ready and Write Ready IUs since they have the + * same first four bytes + */ +struct sense_iu { + __u8 iu_id; + __u8 rsvd1; + __be16 tag; + __be16 status_qual; + __u8 status; + __u8 rsvd7[7]; + __be16 len; + __u8 sense[SCSI_SENSE_BUFFERSIZE]; +}; + +struct usb_pipe_usage_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bPipeID; + __u8 Reserved; +} __attribute__((__packed__)); + +enum { + CMD_PIPE_ID = 1, + STATUS_PIPE_ID = 2, + DATA_IN_PIPE_ID = 3, + DATA_OUT_PIPE_ID = 4, + + UAS_SIMPLE_TAG = 0, + UAS_HEAD_TAG = 1, + UAS_ORDERED_TAG = 2, + UAS_ACA = 4, +}; +#endif diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h new file mode 100644 index 00000000..6f033a41 --- /dev/null +++ b/include/linux/usb/ulpi.h @@ -0,0 +1,192 @@ +/* + * ulpi.h -- ULPI defines and function prorotypes + * + * Copyright (C) 2010 Nokia Corporation + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * version 2 of that License. + */ + +#ifndef __LINUX_USB_ULPI_H +#define __LINUX_USB_ULPI_H + +#include <linux/usb/otg.h> +/*-------------------------------------------------------------------------*/ + +/* + * ULPI Flags + */ +#define ULPI_OTG_ID_PULLUP (1 << 0) +#define ULPI_OTG_DP_PULLDOWN_DIS (1 << 1) +#define ULPI_OTG_DM_PULLDOWN_DIS (1 << 2) +#define ULPI_OTG_DISCHRGVBUS (1 << 3) +#define ULPI_OTG_CHRGVBUS (1 << 4) +#define ULPI_OTG_DRVVBUS (1 << 5) +#define ULPI_OTG_DRVVBUS_EXT (1 << 6) +#define ULPI_OTG_EXTVBUSIND (1 << 7) + +#define ULPI_IC_6PIN_SERIAL (1 << 8) +#define ULPI_IC_3PIN_SERIAL (1 << 9) +#define ULPI_IC_CARKIT (1 << 10) +#define ULPI_IC_CLKSUSPM (1 << 11) +#define ULPI_IC_AUTORESUME (1 << 12) +#define ULPI_IC_EXTVBUS_INDINV (1 << 13) +#define ULPI_IC_IND_PASSTHRU (1 << 14) +#define ULPI_IC_PROTECT_DIS (1 << 15) + +#define ULPI_FC_HS (1 << 16) +#define ULPI_FC_FS (1 << 17) +#define ULPI_FC_LS (1 << 18) +#define ULPI_FC_FS4LS (1 << 19) +#define ULPI_FC_TERMSEL (1 << 20) +#define ULPI_FC_OP_NORM (1 << 21) +#define ULPI_FC_OP_NODRV (1 << 22) +#define ULPI_FC_OP_DIS_NRZI (1 << 23) +#define ULPI_FC_OP_NSYNC_NEOP (1 << 24) +#define ULPI_FC_RST (1 << 25) +#define ULPI_FC_SUSPM (1 << 26) + +/*-------------------------------------------------------------------------*/ + +/* + * Macros for Set and Clear + * See ULPI 1.1 specification to find the registers with Set and Clear offsets + */ +#define ULPI_SET(a) (a + 1) +#define ULPI_CLR(a) (a + 2) + +/*-------------------------------------------------------------------------*/ + +/* + * Register Map + */ +#define ULPI_VENDOR_ID_LOW 0x00 +#define ULPI_VENDOR_ID_HIGH 0x01 +#define ULPI_PRODUCT_ID_LOW 0x02 +#define ULPI_PRODUCT_ID_HIGH 0x03 +#define ULPI_FUNC_CTRL 0x04 +#define ULPI_IFC_CTRL 0x07 +#define ULPI_OTG_CTRL 0x0a +#define ULPI_USB_INT_EN_RISE 0x0d +#define ULPI_USB_INT_EN_FALL 0x10 +#define ULPI_USB_INT_STS 0x13 +#define ULPI_USB_INT_LATCH 0x14 +#define ULPI_DEBUG 0x15 +#define ULPI_SCRATCH 0x16 +/* Optional Carkit Registers */ +#define ULPI_CARCIT_CTRL 0x19 +#define ULPI_CARCIT_INT_DELAY 0x1c +#define ULPI_CARCIT_INT_EN 0x1d +#define ULPI_CARCIT_INT_STS 0x20 +#define ULPI_CARCIT_INT_LATCH 0x21 +#define ULPI_CARCIT_PLS_CTRL 0x22 +/* Other Optional Registers */ +#define ULPI_TX_POS_WIDTH 0x25 +#define ULPI_TX_NEG_WIDTH 0x26 +#define ULPI_POLARITY_RECOVERY 0x27 +/* Access Extended Register Set */ +#define ULPI_ACCESS_EXTENDED 0x2f +/* Vendor Specific */ +#define ULPI_VENDOR_SPECIFIC 0x30 +/* Extended Registers */ +#define ULPI_EXT_VENDOR_SPECIFIC 0x80 + +/*-------------------------------------------------------------------------*/ + +/* + * Register Bits + */ + +/* Function Control */ +#define ULPI_FUNC_CTRL_XCVRSEL (1 << 0) +#define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0) +#define ULPI_FUNC_CTRL_HIGH_SPEED (0 << 0) +#define ULPI_FUNC_CTRL_FULL_SPEED (1 << 0) +#define ULPI_FUNC_CTRL_LOW_SPEED (2 << 0) +#define ULPI_FUNC_CTRL_FS4LS (3 << 0) +#define ULPI_FUNC_CTRL_TERMSELECT (1 << 2) +#define ULPI_FUNC_CTRL_OPMODE (1 << 3) +#define ULPI_FUNC_CTRL_OPMODE_MASK (3 << 3) +#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0 << 3) +#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (1 << 3) +#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (2 << 3) +#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (3 << 3) +#define ULPI_FUNC_CTRL_RESET (1 << 5) +#define ULPI_FUNC_CTRL_SUSPENDM (1 << 6) + +/* Interface Control */ +#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE (1 << 0) +#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE (1 << 1) +#define ULPI_IFC_CTRL_CARKITMODE (1 << 2) +#define ULPI_IFC_CTRL_CLOCKSUSPENDM (1 << 3) +#define ULPI_IFC_CTRL_AUTORESUME (1 << 4) +#define ULPI_IFC_CTRL_EXTERNAL_VBUS (1 << 5) +#define ULPI_IFC_CTRL_PASSTHRU (1 << 6) +#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE (1 << 7) + +/* OTG Control */ +#define ULPI_OTG_CTRL_ID_PULLUP (1 << 0) +#define ULPI_OTG_CTRL_DP_PULLDOWN (1 << 1) +#define ULPI_OTG_CTRL_DM_PULLDOWN (1 << 2) +#define ULPI_OTG_CTRL_DISCHRGVBUS (1 << 3) +#define ULPI_OTG_CTRL_CHRGVBUS (1 << 4) +#define ULPI_OTG_CTRL_DRVVBUS (1 << 5) +#define ULPI_OTG_CTRL_DRVVBUS_EXT (1 << 6) +#define ULPI_OTG_CTRL_EXTVBUSIND (1 << 7) + +/* USB Interrupt Enable Rising, + * USB Interrupt Enable Falling, + * USB Interrupt Status and + * USB Interrupt Latch + */ +#define ULPI_INT_HOST_DISCONNECT (1 << 0) +#define ULPI_INT_VBUS_VALID (1 << 1) +#define ULPI_INT_SESS_VALID (1 << 2) +#define ULPI_INT_SESS_END (1 << 3) +#define ULPI_INT_IDGRD (1 << 4) + +/* Debug */ +#define ULPI_DEBUG_LINESTATE0 (1 << 0) +#define ULPI_DEBUG_LINESTATE1 (1 << 1) + +/* Carkit Control */ +#define ULPI_CARKIT_CTRL_CARKITPWR (1 << 0) +#define ULPI_CARKIT_CTRL_IDGNDDRV (1 << 1) +#define ULPI_CARKIT_CTRL_TXDEN (1 << 2) +#define ULPI_CARKIT_CTRL_RXDEN (1 << 3) +#define ULPI_CARKIT_CTRL_SPKLEFTEN (1 << 4) +#define ULPI_CARKIT_CTRL_SPKRIGHTEN (1 << 5) +#define ULPI_CARKIT_CTRL_MICEN (1 << 6) + +/* Carkit Interrupt Enable */ +#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE (1 << 0) +#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL (1 << 1) +#define ULPI_CARKIT_INT_EN_CARINTDET (1 << 2) +#define ULPI_CARKIT_INT_EN_DP_RISE (1 << 3) +#define ULPI_CARKIT_INT_EN_DP_FALL (1 << 4) + +/* Carkit Interrupt Status and + * Carkit Interrupt Latch + */ +#define ULPI_CARKIT_INT_IDFLOAT (1 << 0) +#define ULPI_CARKIT_INT_CARINTDET (1 << 1) +#define ULPI_CARKIT_INT_DP (1 << 2) + +/* Carkit Pulse Control*/ +#define ULPI_CARKIT_PLS_CTRL_TXPLSEN (1 << 0) +#define ULPI_CARKIT_PLS_CTRL_RXPLSEN (1 << 1) +#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2) +#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3) + +/*-------------------------------------------------------------------------*/ + +struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, + unsigned int flags); + +#ifdef CONFIG_USB_ULPI_VIEWPORT +/* access ops for controllers with a viewport register */ +extern struct usb_phy_io_ops ulpi_viewport_access_ops; +#endif + +#endif /* __LINUX_USB_ULPI_H */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h new file mode 100644 index 00000000..76f43964 --- /dev/null +++ b/include/linux/usb/usbnet.h @@ -0,0 +1,232 @@ +/* + * USB Networking Link Interface + * + * Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net> + * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_USBNET_H +#define __LINUX_USB_USBNET_H + +/* interface from usbnet core to each USB networking link we handle */ +struct usbnet { + /* housekeeping */ + struct usb_device *udev; + struct usb_interface *intf; + struct driver_info *driver_info; + const char *driver_name; + void *driver_priv; + wait_queue_head_t *wait; + struct mutex phy_mutex; + unsigned char suspend_count; + + /* i/o info: pipes etc */ + unsigned in, out; + struct usb_host_endpoint *status; + unsigned maxpacket; + struct timer_list delay; + + /* protocol/interface state */ + struct net_device *net; + int msg_enable; + unsigned long data[5]; + u32 xid; + u32 hard_mtu; /* count any extra framing */ + size_t rx_urb_size; /* size for rx urbs */ + struct mii_if_info mii; + + /* various kinds of pending driver work */ + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct sk_buff_head done; + struct sk_buff_head rxq_pause; + struct urb *interrupt; + struct usb_anchor deferred; + struct tasklet_struct bh; + + struct work_struct kevent; + unsigned long flags; +# define EVENT_TX_HALT 0 +# define EVENT_RX_HALT 1 +# define EVENT_RX_MEMORY 2 +# define EVENT_STS_SPLIT 3 +# define EVENT_LINK_RESET 4 +# define EVENT_RX_PAUSED 5 +# define EVENT_DEV_WAKING 6 +# define EVENT_DEV_ASLEEP 7 +# define EVENT_DEV_OPEN 8 +}; + +static inline struct usb_driver *driver_of(struct usb_interface *intf) +{ + return to_usb_driver(intf->dev.driver); +} + +/* interface from the device/framing level "minidriver" to core */ +struct driver_info { + char *description; + + int flags; +/* framing is CDC Ethernet, not writing ZLPs (hw issues), or optionally: */ +#define FLAG_FRAMING_NC 0x0001 /* guard against device dropouts */ +#define FLAG_FRAMING_GL 0x0002 /* genelink batches packets */ +#define FLAG_FRAMING_Z 0x0004 /* zaurus adds a trailer */ +#define FLAG_FRAMING_RN 0x0008 /* RNDIS batches, plus huge header */ + +#define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */ +#define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */ + +#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ +#define FLAG_WLAN 0x0080 /* use "wlan%d" names */ +#define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */ +#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */ +#define FLAG_WWAN 0x0400 /* use "wwan%d" names */ + +#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ + +#define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ + +/* + * Indicates to usbnet, that USB driver accumulates multiple IP packets. + * Affects statistic (counters) and short packet handling. + */ +#define FLAG_MULTI_PACKET 0x2000 +#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ + + /* init device ... can sleep, or cause probe() failure */ + int (*bind)(struct usbnet *, struct usb_interface *); + + /* cleanup device ... can sleep, but can't fail */ + void (*unbind)(struct usbnet *, struct usb_interface *); + + /* reset device ... can sleep */ + int (*reset)(struct usbnet *); + + /* stop device ... can sleep */ + int (*stop)(struct usbnet *); + + /* see if peer is connected ... can sleep */ + int (*check_connect)(struct usbnet *); + + /* (dis)activate runtime power management */ + int (*manage_power)(struct usbnet *, int); + + /* for status polling */ + void (*status)(struct usbnet *, struct urb *); + + /* link reset handling, called from defer_kevent */ + int (*link_reset)(struct usbnet *); + + /* fixup rx packet (strip framing) */ + int (*rx_fixup)(struct usbnet *dev, struct sk_buff *skb); + + /* fixup tx packet (add framing) */ + struct sk_buff *(*tx_fixup)(struct usbnet *dev, + struct sk_buff *skb, gfp_t flags); + + /* early initialization code, can sleep. This is for minidrivers + * having 'subminidrivers' that need to do extra initialization + * right after minidriver have initialized hardware. */ + int (*early_init)(struct usbnet *dev); + + /* called by minidriver when receiving indication */ + void (*indication)(struct usbnet *dev, void *ind, int indlen); + + /* for new devices, use the descriptor-reading code instead */ + int in; /* rx endpoint */ + int out; /* tx endpoint */ + + unsigned long data; /* Misc driver specific data */ +}; + +/* Minidrivers are just drivers using the "usbnet" core as a powerful + * network-specific subroutine library ... that happens to do pretty + * much everything except custom framing and chip-specific stuff. + */ +extern int usbnet_probe(struct usb_interface *, const struct usb_device_id *); +extern int usbnet_suspend(struct usb_interface *, pm_message_t); +extern int usbnet_resume(struct usb_interface *); +extern void usbnet_disconnect(struct usb_interface *); + + +/* Drivers that reuse some of the standard USB CDC infrastructure + * (notably, using multiple interfaces according to the CDC + * union descriptor) get some helper code. + */ +struct cdc_state { + struct usb_cdc_header_desc *header; + struct usb_cdc_union_desc *u; + struct usb_cdc_ether_desc *ether; + struct usb_interface *control; + struct usb_interface *data; +}; + +extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); +extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); +extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); +extern void usbnet_cdc_status(struct usbnet *, struct urb *); + +/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */ +#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ + |USB_CDC_PACKET_TYPE_ALL_MULTICAST \ + |USB_CDC_PACKET_TYPE_PROMISCUOUS \ + |USB_CDC_PACKET_TYPE_DIRECTED) + + +/* we record the state for each of our queued skbs */ +enum skb_state { + illegal = 0, + tx_start, tx_done, + rx_start, rx_done, rx_cleanup, + unlink_start +}; + +struct skb_data { /* skb->cb is one of these */ + struct urb *urb; + struct usbnet *dev; + enum skb_state state; + size_t length; +}; + +extern int usbnet_open(struct net_device *net); +extern int usbnet_stop(struct net_device *net); +extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, + struct net_device *net); +extern void usbnet_tx_timeout(struct net_device *net); +extern int usbnet_change_mtu(struct net_device *net, int new_mtu); + +extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *); +extern int usbnet_get_ethernet_addr(struct usbnet *, int); +extern void usbnet_defer_kevent(struct usbnet *, int); +extern void usbnet_skb_return(struct usbnet *, struct sk_buff *); +extern void usbnet_unlink_rx_urbs(struct usbnet *); + +extern void usbnet_pause_rx(struct usbnet *); +extern void usbnet_resume_rx(struct usbnet *); +extern void usbnet_purge_paused_rxq(struct usbnet *); + +extern int usbnet_get_settings(struct net_device *net, + struct ethtool_cmd *cmd); +extern int usbnet_set_settings(struct net_device *net, + struct ethtool_cmd *cmd); +extern u32 usbnet_get_link(struct net_device *net); +extern u32 usbnet_get_msglevel(struct net_device *); +extern void usbnet_set_msglevel(struct net_device *, u32); +extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); +extern int usbnet_nway_reset(struct net_device *net); + +#endif /* __LINUX_USB_USBNET_H */ diff --git a/include/linux/usb/video.h b/include/linux/usb/video.h new file mode 100644 index 00000000..3b3b95e0 --- /dev/null +++ b/include/linux/usb/video.h @@ -0,0 +1,568 @@ +/* + * USB Video Class definitions. + * + * Copyright (C) 2009 Laurent Pinchart <laurent.pinchart@skynet.be> + * + * This file holds USB constants and structures defined by the USB Device + * Class Definition for Video Devices. Unless otherwise stated, comments + * below reference relevant sections of the USB Video Class 1.1 specification + * available at + * + * http://www.usb.org/developers/devclass_docs/USB_Video_Class_1_1.zip + */ + +#ifndef __LINUX_USB_VIDEO_H +#define __LINUX_USB_VIDEO_H + +#include <linux/types.h> + +/* -------------------------------------------------------------------------- + * UVC constants + */ + +/* A.2. Video Interface Subclass Codes */ +#define UVC_SC_UNDEFINED 0x00 +#define UVC_SC_VIDEOCONTROL 0x01 +#define UVC_SC_VIDEOSTREAMING 0x02 +#define UVC_SC_VIDEO_INTERFACE_COLLECTION 0x03 + +/* A.3. Video Interface Protocol Codes */ +#define UVC_PC_PROTOCOL_UNDEFINED 0x00 + +/* A.5. Video Class-Specific VC Interface Descriptor Subtypes */ +#define UVC_VC_DESCRIPTOR_UNDEFINED 0x00 +#define UVC_VC_HEADER 0x01 +#define UVC_VC_INPUT_TERMINAL 0x02 +#define UVC_VC_OUTPUT_TERMINAL 0x03 +#define UVC_VC_SELECTOR_UNIT 0x04 +#define UVC_VC_PROCESSING_UNIT 0x05 +#define UVC_VC_EXTENSION_UNIT 0x06 + +/* A.6. Video Class-Specific VS Interface Descriptor Subtypes */ +#define UVC_VS_UNDEFINED 0x00 +#define UVC_VS_INPUT_HEADER 0x01 +#define UVC_VS_OUTPUT_HEADER 0x02 +#define UVC_VS_STILL_IMAGE_FRAME 0x03 +#define UVC_VS_FORMAT_UNCOMPRESSED 0x04 +#define UVC_VS_FRAME_UNCOMPRESSED 0x05 +#define UVC_VS_FORMAT_MJPEG 0x06 +#define UVC_VS_FRAME_MJPEG 0x07 +#define UVC_VS_FORMAT_MPEG2TS 0x0a +#define UVC_VS_FORMAT_DV 0x0c +#define UVC_VS_COLORFORMAT 0x0d +#define UVC_VS_FORMAT_FRAME_BASED 0x10 +#define UVC_VS_FRAME_FRAME_BASED 0x11 +#define UVC_VS_FORMAT_STREAM_BASED 0x12 + +/* A.7. Video Class-Specific Endpoint Descriptor Subtypes */ +#define UVC_EP_UNDEFINED 0x00 +#define UVC_EP_GENERAL 0x01 +#define UVC_EP_ENDPOINT 0x02 +#define UVC_EP_INTERRUPT 0x03 + +/* A.8. Video Class-Specific Request Codes */ +#define UVC_RC_UNDEFINED 0x00 +#define UVC_SET_CUR 0x01 +#define UVC_GET_CUR 0x81 +#define UVC_GET_MIN 0x82 +#define UVC_GET_MAX 0x83 +#define UVC_GET_RES 0x84 +#define UVC_GET_LEN 0x85 +#define UVC_GET_INFO 0x86 +#define UVC_GET_DEF 0x87 + +/* A.9.1. VideoControl Interface Control Selectors */ +#define UVC_VC_CONTROL_UNDEFINED 0x00 +#define UVC_VC_VIDEO_POWER_MODE_CONTROL 0x01 +#define UVC_VC_REQUEST_ERROR_CODE_CONTROL 0x02 + +/* A.9.2. Terminal Control Selectors */ +#define UVC_TE_CONTROL_UNDEFINED 0x00 + +/* A.9.3. Selector Unit Control Selectors */ +#define UVC_SU_CONTROL_UNDEFINED 0x00 +#define UVC_SU_INPUT_SELECT_CONTROL 0x01 + +/* A.9.4. Camera Terminal Control Selectors */ +#define UVC_CT_CONTROL_UNDEFINED 0x00 +#define UVC_CT_SCANNING_MODE_CONTROL 0x01 +#define UVC_CT_AE_MODE_CONTROL 0x02 +#define UVC_CT_AE_PRIORITY_CONTROL 0x03 +#define UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL 0x04 +#define UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL 0x05 +#define UVC_CT_FOCUS_ABSOLUTE_CONTROL 0x06 +#define UVC_CT_FOCUS_RELATIVE_CONTROL 0x07 +#define UVC_CT_FOCUS_AUTO_CONTROL 0x08 +#define UVC_CT_IRIS_ABSOLUTE_CONTROL 0x09 +#define UVC_CT_IRIS_RELATIVE_CONTROL 0x0a +#define UVC_CT_ZOOM_ABSOLUTE_CONTROL 0x0b +#define UVC_CT_ZOOM_RELATIVE_CONTROL 0x0c +#define UVC_CT_PANTILT_ABSOLUTE_CONTROL 0x0d +#define UVC_CT_PANTILT_RELATIVE_CONTROL 0x0e +#define UVC_CT_ROLL_ABSOLUTE_CONTROL 0x0f +#define UVC_CT_ROLL_RELATIVE_CONTROL 0x10 +#define UVC_CT_PRIVACY_CONTROL 0x11 + +/* A.9.5. Processing Unit Control Selectors */ +#define UVC_PU_CONTROL_UNDEFINED 0x00 +#define UVC_PU_BACKLIGHT_COMPENSATION_CONTROL 0x01 +#define UVC_PU_BRIGHTNESS_CONTROL 0x02 +#define UVC_PU_CONTRAST_CONTROL 0x03 +#define UVC_PU_GAIN_CONTROL 0x04 +#define UVC_PU_POWER_LINE_FREQUENCY_CONTROL 0x05 +#define UVC_PU_HUE_CONTROL 0x06 +#define UVC_PU_SATURATION_CONTROL 0x07 +#define UVC_PU_SHARPNESS_CONTROL 0x08 +#define UVC_PU_GAMMA_CONTROL 0x09 +#define UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL 0x0a +#define UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL 0x0b +#define UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL 0x0c +#define UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL 0x0d +#define UVC_PU_DIGITAL_MULTIPLIER_CONTROL 0x0e +#define UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL 0x0f +#define UVC_PU_HUE_AUTO_CONTROL 0x10 +#define UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL 0x11 +#define UVC_PU_ANALOG_LOCK_STATUS_CONTROL 0x12 + +/* A.9.7. VideoStreaming Interface Control Selectors */ +#define UVC_VS_CONTROL_UNDEFINED 0x00 +#define UVC_VS_PROBE_CONTROL 0x01 +#define UVC_VS_COMMIT_CONTROL 0x02 +#define UVC_VS_STILL_PROBE_CONTROL 0x03 +#define UVC_VS_STILL_COMMIT_CONTROL 0x04 +#define UVC_VS_STILL_IMAGE_TRIGGER_CONTROL 0x05 +#define UVC_VS_STREAM_ERROR_CODE_CONTROL 0x06 +#define UVC_VS_GENERATE_KEY_FRAME_CONTROL 0x07 +#define UVC_VS_UPDATE_FRAME_SEGMENT_CONTROL 0x08 +#define UVC_VS_SYNC_DELAY_CONTROL 0x09 + +/* B.1. USB Terminal Types */ +#define UVC_TT_VENDOR_SPECIFIC 0x0100 +#define UVC_TT_STREAMING 0x0101 + +/* B.2. Input Terminal Types */ +#define UVC_ITT_VENDOR_SPECIFIC 0x0200 +#define UVC_ITT_CAMERA 0x0201 +#define UVC_ITT_MEDIA_TRANSPORT_INPUT 0x0202 + +/* B.3. Output Terminal Types */ +#define UVC_OTT_VENDOR_SPECIFIC 0x0300 +#define UVC_OTT_DISPLAY 0x0301 +#define UVC_OTT_MEDIA_TRANSPORT_OUTPUT 0x0302 + +/* B.4. External Terminal Types */ +#define UVC_EXTERNAL_VENDOR_SPECIFIC 0x0400 +#define UVC_COMPOSITE_CONNECTOR 0x0401 +#define UVC_SVIDEO_CONNECTOR 0x0402 +#define UVC_COMPONENT_CONNECTOR 0x0403 + +/* 2.4.2.2. Status Packet Type */ +#define UVC_STATUS_TYPE_CONTROL 1 +#define UVC_STATUS_TYPE_STREAMING 2 + +/* 2.4.3.3. Payload Header Information */ +#define UVC_STREAM_EOH (1 << 7) +#define UVC_STREAM_ERR (1 << 6) +#define UVC_STREAM_STI (1 << 5) +#define UVC_STREAM_RES (1 << 4) +#define UVC_STREAM_SCR (1 << 3) +#define UVC_STREAM_PTS (1 << 2) +#define UVC_STREAM_EOF (1 << 1) +#define UVC_STREAM_FID (1 << 0) + +/* 4.1.2. Control Capabilities */ +#define UVC_CONTROL_CAP_GET (1 << 0) +#define UVC_CONTROL_CAP_SET (1 << 1) +#define UVC_CONTROL_CAP_DISABLED (1 << 2) +#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3) +#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4) + +/* ------------------------------------------------------------------------ + * UVC structures + */ + +/* All UVC descriptors have these 3 fields at the beginning */ +struct uvc_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; +} __attribute__((packed)); + +/* 3.7.2. Video Control Interface Header Descriptor */ +struct uvc_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u16 bcdUVC; + __u16 wTotalLength; + __u32 dwClockFrequency; + __u8 bInCollection; + __u8 baInterfaceNr[]; +} __attribute__((__packed__)); + +#define UVC_DT_HEADER_SIZE(n) (12+(n)) + +#define UVC_HEADER_DESCRIPTOR(n) \ + uvc_header_descriptor_##n + +#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \ +struct UVC_HEADER_DESCRIPTOR(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u16 bcdUVC; \ + __u16 wTotalLength; \ + __u32 dwClockFrequency; \ + __u8 bInCollection; \ + __u8 baInterfaceNr[n]; \ +} __attribute__ ((packed)) + +/* 3.7.2.1. Input Terminal Descriptor */ +struct uvc_input_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; +} __attribute__((__packed__)); + +#define UVC_DT_INPUT_TERMINAL_SIZE 8 + +/* 3.7.2.2. Output Terminal Descriptor */ +struct uvc_output_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 bSourceID; + __u8 iTerminal; +} __attribute__((__packed__)); + +#define UVC_DT_OUTPUT_TERMINAL_SIZE 9 + +/* 3.7.2.3. Camera Terminal Descriptor */ +struct uvc_camera_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; + __u16 wObjectiveFocalLengthMin; + __u16 wObjectiveFocalLengthMax; + __u16 wOcularFocalLength; + __u8 bControlSize; + __u8 bmControls[3]; +} __attribute__((__packed__)); + +#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n)) + +/* 3.7.2.4. Selector Unit Descriptor */ +struct uvc_selector_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bNrInPins; + __u8 baSourceID[0]; + __u8 iSelector; +} __attribute__((__packed__)); + +#define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n)) + +#define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ + uvc_selector_unit_descriptor_##n + +#define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ +struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bUnitID; \ + __u8 bNrInPins; \ + __u8 baSourceID[n]; \ + __u8 iSelector; \ +} __attribute__ ((packed)) + +/* 3.7.2.5. Processing Unit Descriptor */ +struct uvc_processing_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bSourceID; + __u16 wMaxMultiplier; + __u8 bControlSize; + __u8 bmControls[2]; + __u8 iProcessing; +} __attribute__((__packed__)); + +#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) + +/* 3.7.2.6. Extension Unit Descriptor */ +struct uvc_extension_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 guidExtensionCode[16]; + __u8 bNumControls; + __u8 bNrInPins; + __u8 baSourceID[0]; + __u8 bControlSize; + __u8 bmControls[0]; + __u8 iExtension; +} __attribute__((__packed__)); + +#define UVC_DT_EXTENSION_UNIT_SIZE(p, n) (24+(p)+(n)) + +#define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ + uvc_extension_unit_descriptor_##p_##n + +#define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ +struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bUnitID; \ + __u8 guidExtensionCode[16]; \ + __u8 bNumControls; \ + __u8 bNrInPins; \ + __u8 baSourceID[p]; \ + __u8 bControlSize; \ + __u8 bmControls[n]; \ + __u8 iExtension; \ +} __attribute__ ((packed)) + +/* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */ +struct uvc_control_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u16 wMaxTransferSize; +} __attribute__((__packed__)); + +#define UVC_DT_CONTROL_ENDPOINT_SIZE 5 + +/* 3.9.2.1. Input Header Descriptor */ +struct uvc_input_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __u16 wTotalLength; + __u8 bEndpointAddress; + __u8 bmInfo; + __u8 bTerminalLink; + __u8 bStillCaptureMethod; + __u8 bTriggerSupport; + __u8 bTriggerUsage; + __u8 bControlSize; + __u8 bmaControls[]; +} __attribute__((__packed__)); + +#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p)) + +#define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ + uvc_input_header_descriptor_##n_##p + +#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ +struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __u16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bmInfo; \ + __u8 bTerminalLink; \ + __u8 bStillCaptureMethod; \ + __u8 bTriggerSupport; \ + __u8 bTriggerUsage; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ +} __attribute__ ((packed)) + +/* 3.9.2.2. Output Header Descriptor */ +struct uvc_output_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __u16 wTotalLength; + __u8 bEndpointAddress; + __u8 bTerminalLink; + __u8 bControlSize; + __u8 bmaControls[]; +} __attribute__((__packed__)); + +#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p)) + +#define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ + uvc_output_header_descriptor_##n_##p + +#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ +struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __u16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bTerminalLink; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ +} __attribute__ ((packed)) + +/* 3.9.2.6. Color matching descriptor */ +struct uvc_color_matching_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bColorPrimaries; + __u8 bTransferCharacteristics; + __u8 bMatrixCoefficients; +} __attribute__((__packed__)); + +#define UVC_DT_COLOR_MATCHING_SIZE 6 + +/* 4.3.1.1. Video Probe and Commit Controls */ +struct uvc_streaming_control { + __u16 bmHint; + __u8 bFormatIndex; + __u8 bFrameIndex; + __u32 dwFrameInterval; + __u16 wKeyFrameRate; + __u16 wPFrameRate; + __u16 wCompQuality; + __u16 wCompWindowSize; + __u16 wDelay; + __u32 dwMaxVideoFrameSize; + __u32 dwMaxPayloadTransferSize; + __u32 dwClockFrequency; + __u8 bmFramingInfo; + __u8 bPreferedVersion; + __u8 bMinVersion; + __u8 bMaxVersion; +} __attribute__((__packed__)); + +/* Uncompressed Payload - 3.1.1. Uncompressed Video Format Descriptor */ +struct uvc_format_uncompressed { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 guidFormat[16]; + __u8 bBitsPerPixel; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_UNCOMPRESSED_SIZE 27 + +/* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */ +struct uvc_frame_uncompressed { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwMaxVideoFrameBufferSize; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_UNCOMPRESSED(n) \ + uvc_frame_uncompressed_##n + +#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \ +struct UVC_FRAME_UNCOMPRESSED(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwMaxVideoFrameBufferSize; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + +/* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */ +struct uvc_format_mjpeg { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 bmFlags; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_MJPEG_SIZE 11 + +/* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */ +struct uvc_frame_mjpeg { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwMaxVideoFrameBufferSize; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_MJPEG(n) \ + uvc_frame_mjpeg_##n + +#define DECLARE_UVC_FRAME_MJPEG(n) \ +struct UVC_FRAME_MJPEG(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwMaxVideoFrameBufferSize; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + +#endif /* __LINUX_USB_VIDEO_H */ + diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h new file mode 100644 index 00000000..f9dec37f --- /dev/null +++ b/include/linux/usb/wusb-wa.h @@ -0,0 +1,272 @@ +/* + * Wireless USB Wire Adapter constants and structures. + * + * Copyright (C) 2005-2006 Intel Corporation. + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: organize properly, group logically + * + * All the event structures are defined in uwb/spec.h, as they are + * common to the WHCI and WUSB radio control interfaces. + * + * References: + * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 + */ +#ifndef __LINUX_USB_WUSB_WA_H +#define __LINUX_USB_WUSB_WA_H + +/** + * Radio Command Request for the Radio Control Interface + * + * Radio Control Interface command and event codes are the same as + * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* + */ +enum { + WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ +}; + +/* Wireless Adapter Requests ([WUSB] table 8-51) */ +enum { + WUSB_REQ_ADD_MMC_IE = 20, + WUSB_REQ_REMOVE_MMC_IE = 21, + WUSB_REQ_SET_NUM_DNTS = 22, + WUSB_REQ_SET_CLUSTER_ID = 23, + WUSB_REQ_SET_DEV_INFO = 24, + WUSB_REQ_GET_TIME = 25, + WUSB_REQ_SET_STREAM_IDX = 26, + WUSB_REQ_SET_WUSB_MAS = 27, + WUSB_REQ_CHAN_STOP = 28, +}; + + +/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ +enum { + WUSB_TIME_ADJ = 0, + WUSB_TIME_BPST = 1, + WUSB_TIME_WUSB = 2, +}; + +enum { + WA_ENABLE = 0x01, + WA_RESET = 0x02, + RPIPE_PAUSE = 0x1, +}; + +/* Responses from Get Status request ([WUSB] section 8.3.1.6) */ +enum { + WA_STATUS_ENABLED = 0x01, + WA_STATUS_RESETTING = 0x02 +}; + +enum rpipe_crs { + RPIPE_CRS_CTL = 0x01, + RPIPE_CRS_ISO = 0x02, + RPIPE_CRS_BULK = 0x04, + RPIPE_CRS_INTR = 0x08 +}; + +/** + * RPipe descriptor ([WUSB] section 8.5.2.11) + * + * FIXME: explain rpipes + */ +struct usb_rpipe_descriptor { + u8 bLength; + u8 bDescriptorType; + __le16 wRPipeIndex; + __le16 wRequests; + __le16 wBlocks; /* rw if 0 */ + __le16 wMaxPacketSize; /* rw? */ + u8 bHSHubAddress; /* reserved: 0 */ + u8 bHSHubPort; /* ??? FIXME ??? */ + u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ + u8 bDeviceAddress; /* rw: Target device address */ + u8 bEndpointAddress; /* rw: Target EP address */ + u8 bDataSequence; /* ro: Current Data sequence */ + __le32 dwCurrentWindow; /* ro */ + u8 bMaxDataSequence; /* ro?: max supported seq */ + u8 bInterval; /* rw: */ + u8 bOverTheAirInterval; /* rw: */ + u8 bmAttribute; /* ro? */ + u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ + u8 bmRetryOptions; /* rw? */ + __le16 wNumTransactionErrors; /* rw */ +} __attribute__ ((packed)); + +/** + * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) + * + * These are the notifications coming on the notification endpoint of + * an HWA and a DWA. + */ +enum wa_notif_type { + DWA_NOTIF_RWAKE = 0x91, + DWA_NOTIF_PORTSTATUS = 0x92, + WA_NOTIF_TRANSFER = 0x93, + HWA_NOTIF_BPST_ADJ = 0x94, + HWA_NOTIF_DN = 0x95, +}; + +/** + * Wire Adapter notification header + * + * Notifications coming from a wire adapter use a common header + * defined in [WUSB] sections 8.4.5 & 8.5.4. + */ +struct wa_notif_hdr { + u8 bLength; + u8 bNotifyType; /* enum wa_notif_type */ +} __attribute__((packed)); + +/** + * HWA DN Received notification [(WUSB] section 8.5.4.2) + * + * The DNData is specified in WUSB1.0[7.6]. For each device + * notification we received, we just need to dispatch it. + * + * @dndata: this is really an array of notifications, but all start + * with the same header. + */ +struct hwa_notif_dn { + struct wa_notif_hdr hdr; + u8 bSourceDeviceAddr; /* from errata 2005/07 */ + u8 bmAttributes; + struct wusb_dn_hdr dndata[]; +} __attribute__((packed)); + +/* [WUSB] section 8.3.3 */ +enum wa_xfer_type { + WA_XFER_TYPE_CTL = 0x80, + WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ + WA_XFER_TYPE_ISO = 0x82, + WA_XFER_RESULT = 0x83, + WA_XFER_ABORT = 0x84, +}; + +/* [WUSB] section 8.3.3 */ +struct wa_xfer_hdr { + u8 bLength; /* 0x18 */ + u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ + __le16 wRPipe; /* RPipe index */ + __le32 dwTransferID; /* Host-assigned ID */ + __le32 dwTransferLength; /* Length of data to xfer */ + u8 bTransferSegment; +} __attribute__((packed)); + +struct wa_xfer_ctl { + struct wa_xfer_hdr hdr; + u8 bmAttribute; + __le16 wReserved; + struct usb_ctrlrequest baSetupData; +} __attribute__((packed)); + +struct wa_xfer_bi { + struct wa_xfer_hdr hdr; + u8 bReserved; + __le16 wReserved; +} __attribute__((packed)); + +struct wa_xfer_hwaiso { + struct wa_xfer_hdr hdr; + u8 bReserved; + __le16 wPresentationTime; + __le32 dwNumOfPackets; + /* FIXME: u8 pktdata[]? */ +} __attribute__((packed)); + +/* [WUSB] section 8.3.3.5 */ +struct wa_xfer_abort { + u8 bLength; + u8 bRequestType; + __le16 wRPipe; /* RPipe index */ + __le32 dwTransferID; /* Host-assigned ID */ +} __attribute__((packed)); + +/** + * WA Transfer Complete notification ([WUSB] section 8.3.3.3) + * + */ +struct wa_notif_xfer { + struct wa_notif_hdr hdr; + u8 bEndpoint; + u8 Reserved; +} __attribute__((packed)); + +/** Transfer result basic codes [WUSB] table 8-15 */ +enum { + WA_XFER_STATUS_SUCCESS, + WA_XFER_STATUS_HALTED, + WA_XFER_STATUS_DATA_BUFFER_ERROR, + WA_XFER_STATUS_BABBLE, + WA_XFER_RESERVED, + WA_XFER_STATUS_NOT_FOUND, + WA_XFER_STATUS_INSUFFICIENT_RESOURCE, + WA_XFER_STATUS_TRANSACTION_ERROR, + WA_XFER_STATUS_ABORTED, + WA_XFER_STATUS_RPIPE_NOT_READY, + WA_XFER_INVALID_FORMAT, + WA_XFER_UNEXPECTED_SEGMENT_NUMBER, + WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, +}; + +/** [WUSB] section 8.3.3.4 */ +struct wa_xfer_result { + struct wa_notif_hdr hdr; + __le32 dwTransferID; + __le32 dwTransferLength; + u8 bTransferSegment; + u8 bTransferStatus; + __le32 dwNumOfPackets; +} __attribute__((packed)); + +/** + * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). + * + * NOTE: u16 fields are read Little Endian from the hardware. + * + * @bNumPorts is the original max number of devices that the host can + * connect; we might chop this so the stack can handle + * it. In case you need to access it, use wusbhc->ports_max + * if it is a Wireless USB WA. + */ +struct usb_wa_descriptor { + u8 bLength; + u8 bDescriptorType; + u16 bcdWAVersion; + u8 bNumPorts; /* don't use!! */ + u8 bmAttributes; /* Reserved == 0 */ + u16 wNumRPipes; + u16 wRPipeMaxBlock; + u8 bRPipeBlockSize; + u8 bPwrOn2PwrGood; + u8 bNumMMCIEs; + u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ +} __attribute__((packed)); + +/** + * HWA Device Information Buffer (WUSB1.0[T8.54]) + */ +struct hwa_dev_info { + u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ + u8 bDeviceAddress; + __le16 wPHYRates; + u8 bmDeviceAttribute; +} __attribute__((packed)); + +#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h new file mode 100644 index 00000000..0c4d4ca3 --- /dev/null +++ b/include/linux/usb/wusb.h @@ -0,0 +1,375 @@ +/* + * Wireless USB Standard Definitions + * Event Size Tables + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: organize properly, group logically + * + * All the event structures are defined in uwb/spec.h, as they are + * common to the WHCI and WUSB radio control interfaces. + */ + +#ifndef __WUSB_H__ +#define __WUSB_H__ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/uwb/spec.h> +#include <linux/usb/ch9.h> +#include <linux/param.h> + +/** + * WUSB Information Element header + * + * I don't know why, they decided to make it different to the MBOA MAC + * IE Header; beats me. + */ +struct wuie_hdr { + u8 bLength; + u8 bIEIdentifier; +} __attribute__((packed)); + +enum { + WUIE_ID_WCTA = 0x80, + WUIE_ID_CONNECTACK, + WUIE_ID_HOST_INFO, + WUIE_ID_CHANGE_ANNOUNCE, + WUIE_ID_DEVICE_DISCONNECT, + WUIE_ID_HOST_DISCONNECT, + WUIE_ID_KEEP_ALIVE = 0x89, + WUIE_ID_ISOCH_DISCARD, + WUIE_ID_RESET_DEVICE, +}; + +/** + * Maximum number of array elements in a WUSB IE. + * + * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that + * are "arrays" have to limited to 4 elements. So we define it + * like that to ease up and submit only the neeed size. + */ +#define WUIE_ELT_MAX 4 + +/** + * Wrapper for the data that defines a CHID, a CDID or a CK + * + * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of + * data. In order to avoid confusion and enforce types, we wrap it. + * + * Make it packed, as we use it in some hw definitions. + */ +struct wusb_ckhdid { + u8 data[16]; +} __attribute__((packed)); + +static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; + +#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) + +/** + * WUSB IE: Host Information (WUSB1.0[7.5.2]) + * + * Used to provide information about the host to the Wireless USB + * devices in range (CHID can be used as an ASCII string). + */ +struct wuie_host_info { + struct wuie_hdr hdr; + __le16 attributes; + struct wusb_ckhdid CHID; +} __attribute__((packed)); + +/** + * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) + * + * Used to acknowledge device connect requests. See note for + * WUIE_ELT_MAX. + */ +struct wuie_connect_ack { + struct wuie_hdr hdr; + struct { + struct wusb_ckhdid CDID; + u8 bDeviceAddress; /* 0 means unused */ + u8 bReserved; + } blk[WUIE_ELT_MAX]; +} __attribute__((packed)); + +/** + * WUSB IE Host Information Element, Connect Availability + * + * WUSB1.0[7.5.2], bmAttributes description + */ +enum { + WUIE_HI_CAP_RECONNECT = 0, + WUIE_HI_CAP_LIMITED, + WUIE_HI_CAP_RESERVED, + WUIE_HI_CAP_ALL, +}; + +/** + * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) + * + * Tells devices the host is going to stop sending MMCs and will disappear. + */ +struct wuie_channel_stop { + struct wuie_hdr hdr; + u8 attributes; + u8 timestamp[3]; +} __attribute__((packed)); + +/** + * WUSB IE: Keepalive (WUSB1.0[7.5.9]) + * + * Ask device(s) to send keepalives. + */ +struct wuie_keep_alive { + struct wuie_hdr hdr; + u8 bDeviceAddress[WUIE_ELT_MAX]; +} __attribute__((packed)); + +/** + * WUSB IE: Reset device (WUSB1.0[7.5.11]) + * + * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only + * use it for one at the time... + * + * In any case, this request is a wee bit silly: why don't they target + * by address?? + */ +struct wuie_reset { + struct wuie_hdr hdr; + struct wusb_ckhdid CDID; +} __attribute__((packed)); + +/** + * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) + * + * Tell device to disconnect; we can fit 4 addresses, but we only use + * it for one at the time... + */ +struct wuie_disconnect { + struct wuie_hdr hdr; + u8 bDeviceAddress; + u8 padding; +} __attribute__((packed)); + +/** + * WUSB IE: Host disconnect ([WUSB] section 7.5.5) + * + * Tells all connected devices to disconnect. + */ +struct wuie_host_disconnect { + struct wuie_hdr hdr; +} __attribute__((packed)); + +/** + * WUSB Device Notification header (WUSB1.0[7.6]) + */ +struct wusb_dn_hdr { + u8 bType; + u8 notifdata[]; +} __attribute__((packed)); + +/** Device Notification codes (WUSB1.0[Table 7-54]) */ +enum WUSB_DN { + WUSB_DN_CONNECT = 0x01, + WUSB_DN_DISCONNECT = 0x02, + WUSB_DN_EPRDY = 0x03, + WUSB_DN_MASAVAILCHANGED = 0x04, + WUSB_DN_RWAKE = 0x05, + WUSB_DN_SLEEP = 0x06, + WUSB_DN_ALIVE = 0x07, +}; + +/** WUSB Device Notification Connect */ +struct wusb_dn_connect { + struct wusb_dn_hdr hdr; + __le16 attributes; + struct wusb_ckhdid CDID; +} __attribute__((packed)); + +static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) +{ + return le16_to_cpu(dn->attributes) & 0xff; +} + +static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) +{ + return (le16_to_cpu(dn->attributes) >> 8) & 0x1; +} + +static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) +{ + return (le16_to_cpu(dn->attributes) >> 9) & 0x03; +} + +/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ +struct wusb_dn_alive { + struct wusb_dn_hdr hdr; +} __attribute__((packed)); + +/** Device is disconnecting (WUSB1.0[7.6.2]) */ +struct wusb_dn_disconnect { + struct wusb_dn_hdr hdr; +} __attribute__((packed)); + +/* General constants */ +enum { + WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ +}; + +static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size, + const struct wusb_ckhdid *ckhdid) +{ + return scnprintf(pr_ckhdid, size, + "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx " + "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx", + ckhdid->data[0], ckhdid->data[1], + ckhdid->data[2], ckhdid->data[3], + ckhdid->data[4], ckhdid->data[5], + ckhdid->data[6], ckhdid->data[7], + ckhdid->data[8], ckhdid->data[9], + ckhdid->data[10], ckhdid->data[11], + ckhdid->data[12], ckhdid->data[13], + ckhdid->data[14], ckhdid->data[15]); +} + +/* + * WUSB Crypto stuff (WUSB1.0[6]) + */ + +extern const char *wusb_et_name(u8); + +/** + * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for + * the host or the device. + */ +static inline u8 wusb_key_index(int index, int type, int originator) +{ + return (originator << 6) | (type << 4) | index; +} + +#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ +#define WUSB_KEY_INDEX_TYPE_ASSOC 1 +#define WUSB_KEY_INDEX_TYPE_GTK 2 +#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 +#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 + +/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ +struct aes_ccm_nonce { + u8 sfn[6]; /* Little Endian */ + u8 tkid[3]; /* LE */ + struct uwb_dev_addr dest_addr; + struct uwb_dev_addr src_addr; +} __attribute__((packed)); + +/* A CCM operation label, defined on WUSB1.0[6.5.x] */ +struct aes_ccm_label { + u8 data[14]; +} __attribute__((packed)); + +/* + * Input to the key derivation sequence defined in + * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the + * PRF function. + */ +struct wusb_keydvt_in { + u8 hnonce[16]; + u8 dnonce[16]; +} __attribute__((packed)); + +/* + * Output from the key derivation sequence defined in + * WUSB1.0[6.5.1]. + */ +struct wusb_keydvt_out { + u8 kck[16]; + u8 ptk[16]; +} __attribute__((packed)); + +/* Pseudo Random Function WUSB1.0[6.5] */ +extern int wusb_crypto_init(void); +extern void wusb_crypto_exit(void); +extern ssize_t wusb_prf(void *out, size_t out_size, + const u8 key[16], const struct aes_ccm_nonce *_n, + const struct aes_ccm_label *a, + const void *b, size_t blen, size_t len); + +static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 64); +} + +static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 128); +} + +static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 256); +} + +/* Key derivation WUSB1.0[6.5.1] */ +static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, + const u8 key[16], + const struct aes_ccm_nonce *n, + const struct wusb_keydvt_in *keydvt_in) +{ + const struct aes_ccm_label a = { .data = "Pair-wise keys" }; + return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, + keydvt_in, sizeof(*keydvt_in)); +} + +/* + * Out-of-band MIC Generation WUSB1.0[6.5.2] + * + * Compute the MIC over @key, @n and @hs and place it in @mic_out. + * + * @mic_out: Where to place the 8 byte MIC tag + * @key: KCK from the derivation process + * @n: CCM nonce, n->sfn == 0, TKID as established in the + * process. + * @hs: Handshake struct for phase 2 of the 4-way. + * hs->bStatus and hs->bReserved are zero. + * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] + * hs->dest_addr is the device's USB address padded with 0 + * hs->src_addr is the hosts's UWB device address + * hs->mic is ignored (as we compute that value). + */ +static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], + const struct aes_ccm_nonce *n, + const struct usb_handshake *hs) +{ + const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; + return wusb_prf_64(mic_out, 8, key, n, &a, + hs, sizeof(*hs) - sizeof(hs->MIC)); +} + +#endif /* #ifndef __WUSB_H__ */ diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h new file mode 100644 index 00000000..17df3600 --- /dev/null +++ b/include/linux/usb_usual.h @@ -0,0 +1,102 @@ +/* + * Interface to the libusual. + * + * Copyright (c) 2005 Pete Zaitcev <zaitcev@redhat.com> + * Copyright (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net) + * Copyright (c) 1999 Michael Gee (michael@linuxspecific.com) + */ + +#ifndef __LINUX_USB_USUAL_H +#define __LINUX_USB_USUAL_H + + +/* We should do this for cleanliness... But other usb_foo.h do not do this. */ +/* #include <linux/usb.h> */ + +/* + * The flags field, which we store in usb_device_id.driver_info. + * It is compatible with the old usb-storage flags in lower 24 bits. + */ + +/* + * Static flag definitions. We use this roundabout technique so that the + * proc_info() routine can automatically display a message for each flag. + */ +#define US_DO_ALL_FLAGS \ + US_FLAG(SINGLE_LUN, 0x00000001) \ + /* allow access to only LUN 0 */ \ + US_FLAG(NEED_OVERRIDE, 0x00000002) \ + /* unusual_devs entry is necessary */ \ + US_FLAG(SCM_MULT_TARG, 0x00000004) \ + /* supports multiple targets */ \ + US_FLAG(FIX_INQUIRY, 0x00000008) \ + /* INQUIRY response needs faking */ \ + US_FLAG(FIX_CAPACITY, 0x00000010) \ + /* READ CAPACITY response too big */ \ + US_FLAG(IGNORE_RESIDUE, 0x00000020) \ + /* reported residue is wrong */ \ + US_FLAG(BULK32, 0x00000040) \ + /* Uses 32-byte CBW length */ \ + US_FLAG(NOT_LOCKABLE, 0x00000080) \ + /* PREVENT/ALLOW not supported */ \ + US_FLAG(GO_SLOW, 0x00000100) \ + /* Need delay after Command phase */ \ + US_FLAG(NO_WP_DETECT, 0x00000200) \ + /* Don't check for write-protect */ \ + US_FLAG(MAX_SECTORS_64, 0x00000400) \ + /* Sets max_sectors to 64 */ \ + US_FLAG(IGNORE_DEVICE, 0x00000800) \ + /* Don't claim device */ \ + US_FLAG(CAPACITY_HEURISTICS, 0x00001000) \ + /* sometimes sizes is too big */ \ + US_FLAG(MAX_SECTORS_MIN,0x00002000) \ + /* Sets max_sectors to arch min */ \ + US_FLAG(BULK_IGNORE_TAG,0x00004000) \ + /* Ignore tag mismatch in bulk operations */ \ + US_FLAG(SANE_SENSE, 0x00008000) \ + /* Sane Sense (> 18 bytes) */ \ + US_FLAG(CAPACITY_OK, 0x00010000) \ + /* READ CAPACITY response is correct */ \ + US_FLAG(BAD_SENSE, 0x00020000) \ + /* Bad Sense (never more than 18 bytes) */ \ + US_FLAG(NO_READ_DISC_INFO, 0x00040000) \ + /* cannot handle READ_DISC_INFO */ \ + US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \ + /* cannot handle READ_CAPACITY_16 */ \ + US_FLAG(INITIAL_READ10, 0x00100000) \ + /* Initial READ(10) (and others) must be retried */ + +#define US_FLAG(name, value) US_FL_##name = value , +enum { US_DO_ALL_FLAGS }; +#undef US_FLAG + +/* + * The bias field for libusual and friends. + */ +#define USB_US_TYPE_NONE 0 +#define USB_US_TYPE_STOR 1 /* usb-storage */ +#define USB_US_TYPE_UB 2 /* ub */ + +#define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF) +#define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF) + +#include <linux/usb/storage.h> + +/* + */ +extern int usb_usual_ignore_device(struct usb_interface *intf); +extern struct usb_device_id usb_storage_usb_ids[]; + +#ifdef CONFIG_USB_LIBUSUAL + +extern void usb_usual_set_present(int type); +extern void usb_usual_clear_present(int type); +extern int usb_usual_check_type(const struct usb_device_id *, int type); +#else + +#define usb_usual_set_present(t) do { } while(0) +#define usb_usual_clear_present(t) do { } while(0) +#define usb_usual_check_type(id, t) (0) +#endif /* CONFIG_USB_LIBUSUAL */ + +#endif /* __LINUX_USB_USUAL_H */ diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h new file mode 100644 index 00000000..15591d2e --- /dev/null +++ b/include/linux/usbdevice_fs.h @@ -0,0 +1,207 @@ +/*****************************************************************************/ + +/* + * usbdevice_fs.h -- USB device file system. + * + * Copyright (C) 2000 + * Thomas Sailer (sailer@ife.ee.ethz.ch) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * History: + * 0.1 04.01.2000 Created + */ + +/*****************************************************************************/ + +#ifndef _LINUX_USBDEVICE_FS_H +#define _LINUX_USBDEVICE_FS_H + +#include <linux/types.h> +#include <linux/magic.h> + +/* --------------------------------------------------------------------- */ + +/* usbdevfs ioctl codes */ + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; /* in milliseconds */ + void __user *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; /* in milliseconds */ + void __user *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void __user *context; +}; + +#define USBDEVFS_MAXDRIVERNAME 255 + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[USBDEVFS_MAXDRIVERNAME + 1]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +#define USBDEVFS_URB_SHORT_NOT_OK 0x01 +#define USBDEVFS_URB_ISO_ASAP 0x02 +#define USBDEVFS_URB_BULK_CONTINUATION 0x04 +#define USBDEVFS_URB_NO_FSBR 0x20 +#define USBDEVFS_URB_ZERO_PACKET 0x40 +#define USBDEVFS_URB_NO_INTERRUPT 0x80 + +#define USBDEVFS_URB_TYPE_ISO 0 +#define USBDEVFS_URB_TYPE_INTERRUPT 1 +#define USBDEVFS_URB_TYPE_CONTROL 2 +#define USBDEVFS_URB_TYPE_BULK 3 + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void __user *buffer; + int buffer_length; + int actual_length; + int start_frame; + int number_of_packets; + int error_count; + unsigned int signr; /* signal to be sent on completion, + or 0 if none should be sent. */ + void __user *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +/* ioctls for talking directly to drivers */ +struct usbdevfs_ioctl { + int ifno; /* interface 0..N ; negative numbers reserved */ + int ioctl_code; /* MUST encode size + direction of data so the + * macros in <asm/ioctl.h> give correct values */ + void __user *data; /* param buffer (in, or out) */ +}; + +/* You can do most things with hubs just through control messages, + * except find out what device connects to what port. */ +struct usbdevfs_hub_portinfo { + char nports; /* number of downstream ports in this hub */ + char port [127]; /* e.g. port 3 connects to device 27 */ +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> + +struct usbdevfs_ctrltransfer32 { + u8 bRequestType; + u8 bRequest; + u16 wValue; + u16 wIndex; + u16 wLength; + u32 timeout; /* in milliseconds */ + compat_caddr_t data; +}; + +struct usbdevfs_bulktransfer32 { + compat_uint_t ep; + compat_uint_t len; + compat_uint_t timeout; /* in milliseconds */ + compat_caddr_t data; +}; + +struct usbdevfs_disconnectsignal32 { + compat_int_t signr; + compat_caddr_t context; +}; + +struct usbdevfs_urb32 { + unsigned char type; + unsigned char endpoint; + compat_int_t status; + compat_uint_t flags; + compat_caddr_t buffer; + compat_int_t buffer_length; + compat_int_t actual_length; + compat_int_t start_frame; + compat_int_t number_of_packets; + compat_int_t error_count; + compat_uint_t signr; + compat_caddr_t usercontext; /* unused */ + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl32 { + s32 ifno; + s32 ioctl_code; + compat_caddr_t data; +}; +#endif +#endif /* __KERNEL__ */ + +#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer) +#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32) +#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer) +#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32) +#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int) +#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface) +#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int) +#define USBDEVFS_GETDRIVER _IOW('U', 8, struct usbdevfs_getdriver) +#define USBDEVFS_SUBMITURB _IOR('U', 10, struct usbdevfs_urb) +#define USBDEVFS_SUBMITURB32 _IOR('U', 10, struct usbdevfs_urb32) +#define USBDEVFS_DISCARDURB _IO('U', 11) +#define USBDEVFS_REAPURB _IOW('U', 12, void *) +#define USBDEVFS_REAPURB32 _IOW('U', 12, __u32) +#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *) +#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32) +#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal) +#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32) +#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int) +#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int) +#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo) +#define USBDEVFS_IOCTL _IOWR('U', 18, struct usbdevfs_ioctl) +#define USBDEVFS_IOCTL32 _IOWR('U', 18, struct usbdevfs_ioctl32) +#define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo) +#define USBDEVFS_RESET _IO('U', 20) +#define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define USBDEVFS_DISCONNECT _IO('U', 22) +#define USBDEVFS_CONNECT _IO('U', 23) +#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int) +#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int) +#endif /* _LINUX_USBDEVICE_FS_H */ diff --git a/include/linux/user-return-notifier.h b/include/linux/user-return-notifier.h new file mode 100644 index 00000000..9c4a445b --- /dev/null +++ b/include/linux/user-return-notifier.h @@ -0,0 +1,49 @@ +#ifndef _LINUX_USER_RETURN_NOTIFIER_H +#define _LINUX_USER_RETURN_NOTIFIER_H + +#ifdef CONFIG_USER_RETURN_NOTIFIER + +#include <linux/list.h> +#include <linux/sched.h> + +struct user_return_notifier { + void (*on_user_return)(struct user_return_notifier *urn); + struct hlist_node link; +}; + + +void user_return_notifier_register(struct user_return_notifier *urn); +void user_return_notifier_unregister(struct user_return_notifier *urn); + +static inline void propagate_user_return_notify(struct task_struct *prev, + struct task_struct *next) +{ + if (test_tsk_thread_flag(prev, TIF_USER_RETURN_NOTIFY)) { + clear_tsk_thread_flag(prev, TIF_USER_RETURN_NOTIFY); + set_tsk_thread_flag(next, TIF_USER_RETURN_NOTIFY); + } +} + +void fire_user_return_notifiers(void); + +static inline void clear_user_return_notifier(struct task_struct *p) +{ + clear_tsk_thread_flag(p, TIF_USER_RETURN_NOTIFY); +} + +#else + +struct user_return_notifier {}; + +static inline void propagate_user_return_notify(struct task_struct *prev, + struct task_struct *next) +{ +} + +static inline void fire_user_return_notifiers(void) {} + +static inline void clear_user_return_notifier(struct task_struct *p) {} + +#endif + +#endif diff --git a/include/linux/user.h b/include/linux/user.h new file mode 100644 index 00000000..68daf840 --- /dev/null +++ b/include/linux/user.h @@ -0,0 +1 @@ +#include <asm/user.h> diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h new file mode 100644 index 00000000..faf46794 --- /dev/null +++ b/include/linux/user_namespace.h @@ -0,0 +1,71 @@ +#ifndef _LINUX_USER_NAMESPACE_H +#define _LINUX_USER_NAMESPACE_H + +#include <linux/kref.h> +#include <linux/nsproxy.h> +#include <linux/sched.h> +#include <linux/err.h> + +#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) +#define UIDHASH_SZ (1 << UIDHASH_BITS) + +struct user_namespace { + struct kref kref; + struct hlist_head uidhash_table[UIDHASH_SZ]; + struct user_struct *creator; + struct work_struct destroyer; +}; + +extern struct user_namespace init_user_ns; + +#ifdef CONFIG_USER_NS + +static inline struct user_namespace *get_user_ns(struct user_namespace *ns) +{ + if (ns) + kref_get(&ns->kref); + return ns; +} + +extern int create_user_ns(struct cred *new); +extern void free_user_ns(struct kref *kref); + +static inline void put_user_ns(struct user_namespace *ns) +{ + if (ns) + kref_put(&ns->kref, free_user_ns); +} + +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid); +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid); + +#else + +static inline struct user_namespace *get_user_ns(struct user_namespace *ns) +{ + return &init_user_ns; +} + +static inline int create_user_ns(struct cred *new) +{ + return -EINVAL; +} + +static inline void put_user_ns(struct user_namespace *ns) +{ +} + +static inline uid_t user_ns_map_uid(struct user_namespace *to, + const struct cred *cred, uid_t uid) +{ + return uid; +} +static inline gid_t user_ns_map_gid(struct user_namespace *to, + const struct cred *cred, gid_t gid) +{ + return gid; +} + +#endif + +#endif /* _LINUX_USER_H */ diff --git a/include/linux/utime.h b/include/linux/utime.h new file mode 100644 index 00000000..5cdf673a --- /dev/null +++ b/include/linux/utime.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_UTIME_H +#define _LINUX_UTIME_H + +#include <linux/types.h> + +struct utimbuf { + __kernel_time_t actime; + __kernel_time_t modtime; +}; + +#endif diff --git a/include/linux/uts.h b/include/linux/uts.h new file mode 100644 index 00000000..6ddbd863 --- /dev/null +++ b/include/linux/uts.h @@ -0,0 +1,19 @@ +#ifndef _LINUX_UTS_H +#define _LINUX_UTS_H + +/* + * Defines for what uname() should return + */ +#ifndef UTS_SYSNAME +#define UTS_SYSNAME "Linux" +#endif + +#ifndef UTS_NODENAME +#define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */ +#endif + +#ifndef UTS_DOMAINNAME +#define UTS_DOMAINNAME "(none)" /* set by setdomainname() */ +#endif + +#endif diff --git a/include/linux/utsname.h b/include/linux/utsname.h new file mode 100644 index 00000000..c714ed75 --- /dev/null +++ b/include/linux/utsname.h @@ -0,0 +1,113 @@ +#ifndef _LINUX_UTSNAME_H +#define _LINUX_UTSNAME_H + +#define __OLD_UTS_LEN 8 + +struct oldold_utsname { + char sysname[9]; + char nodename[9]; + char release[9]; + char version[9]; + char machine[9]; +}; + +#define __NEW_UTS_LEN 64 + +struct old_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; +}; + +struct new_utsname { + char sysname[__NEW_UTS_LEN + 1]; + char nodename[__NEW_UTS_LEN + 1]; + char release[__NEW_UTS_LEN + 1]; + char version[__NEW_UTS_LEN + 1]; + char machine[__NEW_UTS_LEN + 1]; + char domainname[__NEW_UTS_LEN + 1]; +}; + +#ifdef __KERNEL__ + +#include <linux/sched.h> +#include <linux/kref.h> +#include <linux/nsproxy.h> +#include <linux/err.h> + +enum uts_proc { + UTS_PROC_OSTYPE, + UTS_PROC_OSRELEASE, + UTS_PROC_VERSION, + UTS_PROC_HOSTNAME, + UTS_PROC_DOMAINNAME, +}; + +struct user_namespace; +extern struct user_namespace init_user_ns; + +struct uts_namespace { + struct kref kref; + struct new_utsname name; + struct user_namespace *user_ns; +}; +extern struct uts_namespace init_uts_ns; + +#ifdef CONFIG_UTS_NS +static inline void get_uts_ns(struct uts_namespace *ns) +{ + kref_get(&ns->kref); +} + +extern struct uts_namespace *copy_utsname(unsigned long flags, + struct task_struct *tsk); +extern void free_uts_ns(struct kref *kref); + +static inline void put_uts_ns(struct uts_namespace *ns) +{ + kref_put(&ns->kref, free_uts_ns); +} +#else +static inline void get_uts_ns(struct uts_namespace *ns) +{ +} + +static inline void put_uts_ns(struct uts_namespace *ns) +{ +} + +static inline struct uts_namespace *copy_utsname(unsigned long flags, + struct task_struct *tsk) +{ + if (flags & CLONE_NEWUTS) + return ERR_PTR(-EINVAL); + + return tsk->nsproxy->uts_ns; +} +#endif + +#ifdef CONFIG_PROC_SYSCTL +extern void uts_proc_notify(enum uts_proc proc); +#else +static inline void uts_proc_notify(enum uts_proc proc) +{ +} +#endif + +static inline struct new_utsname *utsname(void) +{ + return ¤t->nsproxy->uts_ns->name; +} + +static inline struct new_utsname *init_utsname(void) +{ + return &init_uts_ns.name; +} + +extern struct rw_semaphore uts_sem; + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_UTSNAME_H */ diff --git a/include/linux/uuid.h b/include/linux/uuid.h new file mode 100644 index 00000000..5b7efbfc --- /dev/null +++ b/include/linux/uuid.h @@ -0,0 +1,70 @@ +/* + * UUID/GUID definition + * + * Copyright (C) 2010, Intel Corp. + * Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_UUID_H_ +#define _LINUX_UUID_H_ + +#include <linux/types.h> +#include <linux/string.h> + +typedef struct { + __u8 b[16]; +} uuid_le; + +typedef struct { + __u8 b[16]; +} uuid_be; + +#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_le) \ +{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_be) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define NULL_UUID_LE \ + UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00) + +#define NULL_UUID_BE \ + UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00) + +static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) +{ + return memcmp(&u1, &u2, sizeof(uuid_le)); +} + +static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) +{ + return memcmp(&u1, &u2, sizeof(uuid_be)); +} + +extern void uuid_le_gen(uuid_le *u); +extern void uuid_be_gen(uuid_be *u); + +#endif diff --git a/include/linux/uvcvideo.h b/include/linux/uvcvideo.h new file mode 100644 index 00000000..f46a53f0 --- /dev/null +++ b/include/linux/uvcvideo.h @@ -0,0 +1,69 @@ +#ifndef __LINUX_UVCVIDEO_H_ +#define __LINUX_UVCVIDEO_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* + * Dynamic controls + */ + +/* Data types for UVC control data */ +#define UVC_CTRL_DATA_TYPE_RAW 0 +#define UVC_CTRL_DATA_TYPE_SIGNED 1 +#define UVC_CTRL_DATA_TYPE_UNSIGNED 2 +#define UVC_CTRL_DATA_TYPE_BOOLEAN 3 +#define UVC_CTRL_DATA_TYPE_ENUM 4 +#define UVC_CTRL_DATA_TYPE_BITMASK 5 + +/* Control flags */ +#define UVC_CTRL_FLAG_SET_CUR (1 << 0) +#define UVC_CTRL_FLAG_GET_CUR (1 << 1) +#define UVC_CTRL_FLAG_GET_MIN (1 << 2) +#define UVC_CTRL_FLAG_GET_MAX (1 << 3) +#define UVC_CTRL_FLAG_GET_RES (1 << 4) +#define UVC_CTRL_FLAG_GET_DEF (1 << 5) +/* Control should be saved at suspend and restored at resume. */ +#define UVC_CTRL_FLAG_RESTORE (1 << 6) +/* Control can be updated by the camera. */ +#define UVC_CTRL_FLAG_AUTO_UPDATE (1 << 7) + +#define UVC_CTRL_FLAG_GET_RANGE \ + (UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_MIN | \ + UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | \ + UVC_CTRL_FLAG_GET_DEF) + +struct uvc_menu_info { + __u32 value; + __u8 name[32]; +}; + +struct uvc_xu_control_mapping { + __u32 id; + __u8 name[32]; + __u8 entity[16]; + __u8 selector; + + __u8 size; + __u8 offset; + __u32 v4l2_type; + __u32 data_type; + + struct uvc_menu_info __user *menu_info; + __u32 menu_count; + + __u32 reserved[4]; +}; + +struct uvc_xu_control_query { + __u8 unit; + __u8 selector; + __u8 query; + __u16 size; + __u8 __user *data; +}; + +#define UVCIOC_CTRL_MAP _IOWR('u', 0x20, struct uvc_xu_control_mapping) +#define UVCIOC_CTRL_QUERY _IOWR('u', 0x21, struct uvc_xu_control_query) + +#endif diff --git a/include/linux/uwb.h b/include/linux/uwb.h new file mode 100644 index 00000000..7dbbee97 --- /dev/null +++ b/include/linux/uwb.h @@ -0,0 +1,831 @@ +/* + * Ultra Wide Band + * UWB API + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: doc: overview of the API, different parts and pointers + */ + +#ifndef __LINUX__UWB_H__ +#define __LINUX__UWB_H__ + +#include <linux/limits.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/wait.h> +#include <linux/workqueue.h> +#include <linux/uwb/spec.h> +#include <asm/page.h> + +struct uwb_dev; +struct uwb_beca_e; +struct uwb_rc; +struct uwb_rsv; +struct uwb_dbg; + +/** + * struct uwb_dev - a UWB Device + * @rc: UWB Radio Controller that discovered the device (kind of its + * parent). + * @bce: a beacon cache entry for this device; or NULL if the device + * is a local radio controller. + * @mac_addr: the EUI-48 address of this device. + * @dev_addr: the current DevAddr used by this device. + * @beacon_slot: the slot number the beacon is using. + * @streams: bitmap of streams allocated to reservations targeted at + * this device. For an RC, this is the streams allocated for + * reservations targeted at DevAddrs. + * + * A UWB device may either by a neighbor or part of a local radio + * controller. + */ +struct uwb_dev { + struct mutex mutex; + struct list_head list_node; + struct device dev; + struct uwb_rc *rc; /* radio controller */ + struct uwb_beca_e *bce; /* Beacon Cache Entry */ + + struct uwb_mac_addr mac_addr; + struct uwb_dev_addr dev_addr; + int beacon_slot; + DECLARE_BITMAP(streams, UWB_NUM_STREAMS); + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); +}; +#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) + +/** + * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs + * + * RC[CE]Bs have a 'context ID' field that matches the command with + * the event received to confirm it. + * + * Maximum number of context IDs + */ +enum { UWB_RC_CTX_MAX = 256 }; + + +/** Notification chain head for UWB generated events to listeners */ +struct uwb_notifs_chain { + struct list_head list; + struct mutex mutex; +}; + +/* Beacon cache list */ +struct uwb_beca { + struct list_head list; + size_t entries; + struct mutex mutex; +}; + +/* Event handling thread. */ +struct uwbd { + int pid; + struct task_struct *task; + wait_queue_head_t wq; + struct list_head event_list; + spinlock_t event_list_lock; +}; + +/** + * struct uwb_mas_bm - a bitmap of all MAS in a superframe + * @bm: a bitmap of length #UWB_NUM_MAS + */ +struct uwb_mas_bm { + DECLARE_BITMAP(bm, UWB_NUM_MAS); + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); + int safe; + int unsafe; +}; + +/** + * uwb_rsv_state - UWB Reservation state. + * + * NONE - reservation is not active (no DRP IE being transmitted). + * + * Owner reservation states: + * + * INITIATED - owner has sent an initial DRP request. + * PENDING - target responded with pending Reason Code. + * MODIFIED - reservation manager is modifying an established + * reservation with a different MAS allocation. + * ESTABLISHED - the reservation has been successfully negotiated. + * + * Target reservation states: + * + * DENIED - request is denied. + * ACCEPTED - request is accepted. + * PENDING - PAL has yet to make a decision to whether to accept or + * deny. + * + * FIXME: further target states TBD. + */ +enum uwb_rsv_state { + UWB_RSV_STATE_NONE = 0, + UWB_RSV_STATE_O_INITIATED, + UWB_RSV_STATE_O_PENDING, + UWB_RSV_STATE_O_MODIFIED, + UWB_RSV_STATE_O_ESTABLISHED, + UWB_RSV_STATE_O_TO_BE_MOVED, + UWB_RSV_STATE_O_MOVE_EXPANDING, + UWB_RSV_STATE_O_MOVE_COMBINING, + UWB_RSV_STATE_O_MOVE_REDUCING, + UWB_RSV_STATE_T_ACCEPTED, + UWB_RSV_STATE_T_DENIED, + UWB_RSV_STATE_T_CONFLICT, + UWB_RSV_STATE_T_PENDING, + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, + UWB_RSV_STATE_T_EXPANDING_CONFLICT, + UWB_RSV_STATE_T_EXPANDING_PENDING, + UWB_RSV_STATE_T_EXPANDING_DENIED, + UWB_RSV_STATE_T_RESIZED, + + UWB_RSV_STATE_LAST, +}; + +enum uwb_rsv_target_type { + UWB_RSV_TARGET_DEV, + UWB_RSV_TARGET_DEVADDR, +}; + +/** + * struct uwb_rsv_target - the target of a reservation. + * + * Reservations unicast and targeted at a single device + * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a + * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). + */ +struct uwb_rsv_target { + enum uwb_rsv_target_type type; + union { + struct uwb_dev *dev; + struct uwb_dev_addr devaddr; + }; +}; + +struct uwb_rsv_move { + struct uwb_mas_bm final_mas; + struct uwb_ie_drp *companion_drp_ie; + struct uwb_mas_bm companion_mas; +}; + +/* + * Number of streams reserved for reservations targeted at DevAddrs. + */ +#define UWB_NUM_GLOBAL_STREAMS 1 + +typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); + +/** + * struct uwb_rsv - a DRP reservation + * + * Data structure management: + * + * @rc: the radio controller this reservation is for + * (as target or owner) + * @rc_node: a list node for the RC + * @pal_node: a list node for the PAL + * + * Owner and target parameters: + * + * @owner: the UWB device owning this reservation + * @target: the target UWB device + * @type: reservation type + * + * Owner parameters: + * + * @max_mas: maxiumum number of MAS + * @min_mas: minimum number of MAS + * @sparsity: owner selected sparsity + * @is_multicast: true iff multicast + * + * @callback: callback function when the reservation completes + * @pal_priv: private data for the PAL making the reservation + * + * Reservation status: + * + * @status: negotiation status + * @stream: stream index allocated for this reservation + * @tiebreaker: conflict tiebreaker for this reservation + * @mas: reserved MAS + * @drp_ie: the DRP IE + * @ie_valid: true iff the DRP IE matches the reservation parameters + * + * DRP reservations are uniquely identified by the owner, target and + * stream index. However, when using a DevAddr as a target (e.g., for + * a WUSB cluster reservation) the responses may be received from + * devices with different DevAddrs. In this case, reservations are + * uniquely identified by just the stream index. A number of stream + * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. + */ +struct uwb_rsv { + struct uwb_rc *rc; + struct list_head rc_node; + struct list_head pal_node; + struct kref kref; + + struct uwb_dev *owner; + struct uwb_rsv_target target; + enum uwb_drp_type type; + int max_mas; + int min_mas; + int max_interval; + bool is_multicast; + + uwb_rsv_cb_f callback; + void *pal_priv; + + enum uwb_rsv_state state; + bool needs_release_companion_mas; + u8 stream; + u8 tiebreaker; + struct uwb_mas_bm mas; + struct uwb_ie_drp *drp_ie; + struct uwb_rsv_move mv; + bool ie_valid; + struct timer_list timer; + struct work_struct handle_timeout_work; +}; + +static const +struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; + +static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) +{ + bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); +} + +/** + * struct uwb_drp_avail - a radio controller's view of MAS usage + * @global: MAS unused by neighbors (excluding reservations targeted + * or owned by the local radio controller) or the beaon period + * @local: MAS unused by local established reservations + * @pending: MAS unused by local pending reservations + * @ie: DRP Availability IE to be included in the beacon + * @ie_valid: true iff @ie is valid and does not need to regenerated from + * @global and @local + * + * Each radio controller maintains a view of MAS usage or + * availability. MAS available for a new reservation are determined + * from the intersection of @global, @local, and @pending. + * + * The radio controller must transmit a DRP Availability IE that's the + * intersection of @global and @local. + * + * A set bit indicates the MAS is unused and available. + * + * rc->rsvs_mutex should be held before accessing this data structure. + * + * [ECMA-368] section 17.4.3. + */ +struct uwb_drp_avail { + DECLARE_BITMAP(global, UWB_NUM_MAS); + DECLARE_BITMAP(local, UWB_NUM_MAS); + DECLARE_BITMAP(pending, UWB_NUM_MAS); + struct uwb_ie_drp_avail ie; + bool ie_valid; +}; + +struct uwb_drp_backoff_win { + u8 window; + u8 n; + int total_expired; + struct timer_list timer; + bool can_reserve_extra_mases; +}; + +const char *uwb_rsv_state_str(enum uwb_rsv_state state); +const char *uwb_rsv_type_str(enum uwb_drp_type type); + +struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, + void *pal_priv); +void uwb_rsv_destroy(struct uwb_rsv *rsv); + +int uwb_rsv_establish(struct uwb_rsv *rsv); +int uwb_rsv_modify(struct uwb_rsv *rsv, + int max_mas, int min_mas, int sparsity); +void uwb_rsv_terminate(struct uwb_rsv *rsv); + +void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); + +void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); + +/** + * Radio Control Interface instance + * + * + * Life cycle rules: those of the UWB Device. + * + * @index: an index number for this radio controller, as used in the + * device name. + * @version: version of protocol supported by this device + * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. + * @cmd: Backend implementation to execute commands; rw and call + * only with uwb_dev.dev.sem taken. + * @reset: Hardware reset of radio controller and any PAL controllers. + * @filter: Backend implementation to manipulate data to and from device + * to be compliant to specification assumed by driver (WHCI + * 0.95). + * + * uwb_dev.dev.mutex is used to execute commands and update + * the corresponding structures; can't use a spinlock + * because rc->cmd() can sleep. + * @ies: This is a dynamically allocated array cacheing the + * IEs (settable by the host) that the beacon of this + * radio controller is currently sending. + * + * In reality, we store here the full command we set to + * the radio controller (which is basically a command + * prefix followed by all the IEs the beacon currently + * contains). This way we don't have to realloc and + * memcpy when setting it. + * + * We set this up in uwb_rc_ie_setup(), where we alloc + * this struct, call get_ie() [so we know which IEs are + * currently being sent, if any]. + * + * @ies_capacity:Amount of space (in bytes) allocated in @ies. The + * amount used is given by sizeof(*ies) plus ies->wIELength + * (which is a little endian quantity all the time). + * @ies_mutex: protect the IE cache + * @dbg: information for the debug interface + */ +struct uwb_rc { + struct uwb_dev uwb_dev; + int index; + u16 version; + + struct module *owner; + void *priv; + int (*start)(struct uwb_rc *rc); + void (*stop)(struct uwb_rc *rc); + int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); + int (*reset)(struct uwb_rc *rc); + int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); + int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, + size_t *, size_t *); + + spinlock_t neh_lock; /* protects neh_* and ctx_* */ + struct list_head neh_list; /* Open NE handles */ + unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; + u8 ctx_roll; + + int beaconing; /* Beaconing state [channel number] */ + int beaconing_forced; + int scanning; + enum uwb_scan_type scan_type:3; + unsigned ready:1; + struct uwb_notifs_chain notifs_chain; + struct uwb_beca uwb_beca; + + struct uwbd uwbd; + + struct uwb_drp_backoff_win bow; + struct uwb_drp_avail drp_avail; + struct list_head reservations; + struct list_head cnflt_alien_list; + struct uwb_mas_bm cnflt_alien_bitmap; + struct mutex rsvs_mutex; + spinlock_t rsvs_lock; + struct workqueue_struct *rsv_workq; + + struct delayed_work rsv_update_work; + struct delayed_work rsv_alien_bp_work; + int set_drp_ie_pending; + struct mutex ies_mutex; + struct uwb_rc_cmd_set_ie *ies; + size_t ies_capacity; + + struct list_head pals; + int active_pals; + + struct uwb_dbg *dbg; +}; + + +/** + * struct uwb_pal - a UWB PAL + * @name: descriptive name for this PAL (wusbhc, wlp, etc.). + * @device: a device for the PAL. Used to link the PAL and the radio + * controller in sysfs. + * @rc: the radio controller the PAL uses. + * @channel_changed: called when the channel used by the radio changes. + * A channel of -1 means the channel has been stopped. + * @new_rsv: called when a peer requests a reservation (may be NULL if + * the PAL cannot accept reservation requests). + * @channel: channel being used by the PAL; 0 if the PAL isn't using + * the radio; -1 if the PAL wishes to use the radio but + * cannot. + * @debugfs_dir: a debugfs directory which the PAL can use for its own + * debugfs files. + * + * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB + * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). + * + * The PALs using a radio controller must register themselves to + * permit the UWB stack to coordinate usage of the radio between the + * various PALs or to allow PALs to response to certain requests from + * peers. + * + * A struct uwb_pal should be embedded in a containing structure + * belonging to the PAL and initialized with uwb_pal_init()). Fields + * should be set appropriately by the PAL before registering the PAL + * with uwb_pal_register(). + */ +struct uwb_pal { + struct list_head node; + const char *name; + struct device *device; + struct uwb_rc *rc; + + void (*channel_changed)(struct uwb_pal *pal, int channel); + void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); + + int channel; + struct dentry *debugfs_dir; +}; + +void uwb_pal_init(struct uwb_pal *pal); +int uwb_pal_register(struct uwb_pal *pal); +void uwb_pal_unregister(struct uwb_pal *pal); + +int uwb_radio_start(struct uwb_pal *pal); +void uwb_radio_stop(struct uwb_pal *pal); + +/* + * General public API + * + * This API can be used by UWB device drivers or by those implementing + * UWB Radio Controllers + */ +struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, + const struct uwb_dev_addr *devaddr); +struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); +static inline void uwb_dev_get(struct uwb_dev *uwb_dev) +{ + get_device(&uwb_dev->dev); +} +static inline void uwb_dev_put(struct uwb_dev *uwb_dev) +{ + put_device(&uwb_dev->dev); +} +struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); + +/** + * Callback function for 'uwb_{dev,rc}_foreach()'. + * + * @dev: Linux device instance + * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' + * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. + * + * @returns: 0 to continue the iterations, any other val to stop + * iterating and return the value to the caller of + * _foreach(). + */ +typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); +int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); + +struct uwb_rc *uwb_rc_alloc(void); +struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); +struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); +void uwb_rc_put(struct uwb_rc *rc); + +typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, + struct uwb_rceb *reply, ssize_t reply_size); + +int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + uwb_rc_cmd_cb_f cb, void *arg); +ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + struct uwb_rceb *reply, size_t reply_size); +ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + struct uwb_rceb **preply); + +size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); + +int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); +int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); +int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); +int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); +int __uwb_mac_addr_assigned_check(struct device *, void *); +int __uwb_dev_addr_assigned_check(struct device *, void *); + +/* Print in @buf a pretty repr of @addr */ +static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, + const struct uwb_dev_addr *addr) +{ + return __uwb_addr_print(buf, buf_size, addr->data, 0); +} + +/* Print in @buf a pretty repr of @addr */ +static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, + const struct uwb_mac_addr *addr) +{ + return __uwb_addr_print(buf, buf_size, addr->data, 1); +} + +/* @returns 0 if device addresses @addr2 and @addr1 are equal */ +static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, + const struct uwb_dev_addr *addr2) +{ + return memcmp(addr1, addr2, sizeof(*addr1)); +} + +/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ +static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, + const struct uwb_mac_addr *addr2) +{ + return memcmp(addr1, addr2, sizeof(*addr1)); +} + +/* @returns !0 if a MAC @addr is a broadcast address */ +static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) +{ + struct uwb_mac_addr bcast = { + .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } + }; + return !uwb_mac_addr_cmp(addr, &bcast); +} + +/* @returns !0 if a MAC @addr is all zeroes*/ +static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) +{ + struct uwb_mac_addr unset = { + .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } + }; + return !uwb_mac_addr_cmp(addr, &unset); +} + +/* @returns !0 if the address is in use. */ +static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, + struct uwb_dev_addr *addr) +{ + return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); +} + +/* + * UWB Radio Controller API + * + * This API is used (in addition to the general API) to implement UWB + * Radio Controllers. + */ +void uwb_rc_init(struct uwb_rc *); +int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); +void uwb_rc_rm(struct uwb_rc *); +void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); +void uwb_rc_neh_error(struct uwb_rc *, int); +void uwb_rc_reset_all(struct uwb_rc *rc); +void uwb_rc_pre_reset(struct uwb_rc *rc); +int uwb_rc_post_reset(struct uwb_rc *rc); + +/** + * uwb_rsv_is_owner - is the owner of this reservation the RC? + * @rsv: the reservation + */ +static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) +{ + return rsv->owner == &rsv->rc->uwb_dev; +} + +/** + * enum uwb_notifs - UWB events that can be passed to any listeners + * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. + * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. + * + * Higher layers can register callback functions with the radio + * controller using uwb_notifs_register(). The radio controller + * maintains a list of all registered handlers and will notify all + * nodes when an event occurs. + */ +enum uwb_notifs { + UWB_NOTIF_ONAIR, + UWB_NOTIF_OFFAIR, +}; + +/* Callback function registered with UWB */ +struct uwb_notifs_handler { + struct list_head list_node; + void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); + void *data; +}; + +int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); +int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); + + +/** + * UWB radio controller Event Size Entry (for creating entry tables) + * + * WUSB and WHCI define events and notifications, and they might have + * fixed or variable size. + * + * Each event/notification has a size which is not necessarily known + * in advance based on the event code. As well, vendor specific + * events/notifications will have a size impossible to determine + * unless we know about the device's specific details. + * + * It was way too smart of the spec writers not to think that it would + * be impossible for a generic driver to skip over vendor specific + * events/notifications if there are no LENGTH fields in the HEADER of + * each message...the transaction size cannot be counted on as the + * spec does not forbid to pack more than one event in a single + * transaction. + * + * Thus, we guess sizes with tables (or for events, when you know the + * size ahead of time you can use uwb_rc_neh_extra_size*()). We + * register tables with the known events and their sizes, and then we + * traverse those tables. For those with variable length, we provide a + * way to lookup the size inside the event/notification's + * payload. This allows device-specific event size tables to be + * registered. + * + * @size: Size of the payload + * + * @offset: if != 0, at offset @offset-1 starts a field with a length + * that has to be added to @size. The format of the field is + * given by @type. + * + * @type: Type and length of the offset field. Most common is LE 16 + * bits (that's why that is zero); others are there mostly to + * cover for bugs and weirdos. + */ +struct uwb_est_entry { + size_t size; + unsigned offset; + enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; +}; + +int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, + const struct uwb_est_entry *, size_t entries); +int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, + const struct uwb_est_entry *, size_t entries); +ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, + size_t len); + +/* -- Misc */ + +enum { + EDC_MAX_ERRORS = 10, + EDC_ERROR_TIMEFRAME = HZ, +}; + +/* error density counter */ +struct edc { + unsigned long timestart; + u16 errorcount; +}; + +static inline +void edc_init(struct edc *edc) +{ + edc->timestart = jiffies; +} + +/* Called when an error occurred. + * This is way to determine if the number of acceptable errors per time + * period has been exceeded. It is not accurate as there are cases in which + * this scheme will not work, for example if there are periodic occurrences + * of errors that straddle updates to the start time. This scheme is + * sufficient for our usage. + * + * @returns 1 if maximum acceptable errors per timeframe has been exceeded. + */ +static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) +{ + unsigned long now; + + now = jiffies; + if (now - err_hist->timestart > timeframe) { + err_hist->errorcount = 1; + err_hist->timestart = now; + } else if (++err_hist->errorcount > max_err) { + err_hist->errorcount = 0; + err_hist->timestart = now; + return 1; + } + return 0; +} + + +/* Information Element handling */ + +struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); +int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); +int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); + +/* + * Transmission statistics + * + * UWB uses LQI and RSSI (one byte values) for reporting radio signal + * strength and line quality indication. We do quick and dirty + * averages of those. They are signed values, btw. + * + * For 8 bit quantities, we keep the min, the max, an accumulator + * (@sigma) and a # of samples. When @samples gets to 255, we compute + * the average (@sigma / @samples), place it in @sigma and reset + * @samples to 1 (so we use it as the first sample). + * + * Now, statistically speaking, probably I am kicking the kidneys of + * some books I have in my shelves collecting dust, but I just want to + * get an approx, not the Nobel. + * + * LOCKING: there is no locking per se, but we try to keep a lockless + * schema. Only _add_samples() modifies the values--as long as you + * have other locking on top that makes sure that no two calls of + * _add_sample() happen at the same time, then we are fine. Now, for + * resetting the values we just set @samples to 0 and that makes the + * next _add_sample() to start with defaults. Reading the values in + * _show() currently can race, so you need to make sure the calls are + * under the same lock that protects calls to _add_sample(). FIXME: + * currently unlocked (It is not ultraprecise but does the trick. Bite + * me). + */ +struct stats { + s8 min, max; + s16 sigma; + atomic_t samples; +}; + +static inline +void stats_init(struct stats *stats) +{ + atomic_set(&stats->samples, 0); + wmb(); +} + +static inline +void stats_add_sample(struct stats *stats, s8 sample) +{ + s8 min, max; + s16 sigma; + unsigned samples = atomic_read(&stats->samples); + if (samples == 0) { /* it was zero before, so we initialize */ + min = 127; + max = -128; + sigma = 0; + } else { + min = stats->min; + max = stats->max; + sigma = stats->sigma; + } + + if (sample < min) /* compute new values */ + min = sample; + else if (sample > max) + max = sample; + sigma += sample; + + stats->min = min; /* commit */ + stats->max = max; + stats->sigma = sigma; + if (atomic_add_return(1, &stats->samples) > 255) { + /* wrapped around! reset */ + stats->sigma = sigma / 256; + atomic_set(&stats->samples, 1); + } +} + +static inline ssize_t stats_show(struct stats *stats, char *buf) +{ + int min, max, avg; + int samples = atomic_read(&stats->samples); + if (samples == 0) + min = max = avg = 0; + else { + min = stats->min; + max = stats->max; + avg = stats->sigma / samples; + } + return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); +} + +static inline ssize_t stats_store(struct stats *stats, const char *buf, + size_t size) +{ + stats_init(stats); + return size; +} + +#endif /* #ifndef __LINUX__UWB_H__ */ diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h new file mode 100644 index 00000000..8da004e2 --- /dev/null +++ b/include/linux/uwb/debug-cmd.h @@ -0,0 +1,68 @@ +/* + * Ultra Wide Band + * Debug interface commands + * + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __LINUX__UWB__DEBUG_CMD_H__ +#define __LINUX__UWB__DEBUG_CMD_H__ + +#include <linux/types.h> + +/* + * Debug interface commands + * + * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. + * + * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. + */ + +enum uwb_dbg_cmd_type { + UWB_DBG_CMD_RSV_ESTABLISH = 1, + UWB_DBG_CMD_RSV_TERMINATE = 2, + UWB_DBG_CMD_IE_ADD = 3, + UWB_DBG_CMD_IE_RM = 4, + UWB_DBG_CMD_RADIO_START = 5, + UWB_DBG_CMD_RADIO_STOP = 6, +}; + +struct uwb_dbg_cmd_rsv_establish { + __u8 target[6]; + __u8 type; + __u16 max_mas; + __u16 min_mas; + __u8 max_interval; +}; + +struct uwb_dbg_cmd_rsv_terminate { + int index; +}; + +struct uwb_dbg_cmd_ie { + __u8 data[128]; + int len; +}; + +struct uwb_dbg_cmd { + __u32 type; + union { + struct uwb_dbg_cmd_rsv_establish rsv_establish; + struct uwb_dbg_cmd_rsv_terminate rsv_terminate; + struct uwb_dbg_cmd_ie ie_add; + struct uwb_dbg_cmd_ie ie_rm; + }; +}; + +#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h new file mode 100644 index 00000000..b52e44f1 --- /dev/null +++ b/include/linux/uwb/spec.h @@ -0,0 +1,780 @@ +/* + * Ultra Wide Band + * UWB Standard definitions + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * All these definitions are based on the ECMA-368 standard. + * + * Note all definitions are Little Endian in the wire, and we will + * convert them to host order before operating on the bitfields (that + * yes, we use extensively). + */ + +#ifndef __LINUX__UWB_SPEC_H__ +#define __LINUX__UWB_SPEC_H__ + +#include <linux/types.h> +#include <linux/bitmap.h> + +#define i1480_FW 0x00000303 +/* #define i1480_FW 0x00000302 */ + +/** + * Number of Medium Access Slots in a superframe. + * + * UWB divides time in SuperFrames, each one divided in 256 pieces, or + * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the + * basic bandwidth allocation unit in UWB. + */ +enum { UWB_NUM_MAS = 256 }; + +/** + * Number of Zones in superframe. + * + * UWB divides the superframe into zones with numbering starting from BPST. + * See MBOA MAC[16.8.6] + */ +enum { UWB_NUM_ZONES = 16 }; + +/* + * Number of MAS in a zone. + */ +#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) + +/* + * Number of MAS required before a row can be considered available. + */ +#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) + +/* + * Number of streams per DRP reservation between a pair of devices. + * + * [ECMA-368] section 16.8.6. + */ +enum { UWB_NUM_STREAMS = 8 }; + +/* + * mMasLength + * + * The length of a MAS in microseconds. + * + * [ECMA-368] section 17.16. + */ +enum { UWB_MAS_LENGTH_US = 256 }; + +/* + * mBeaconSlotLength + * + * The length of the beacon slot in microseconds. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; + +/* + * mMaxLostBeacons + * + * The number beacons missing in consecutive superframes before a + * device can be considered as unreachable. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_MAX_LOST_BEACONS = 3 }; + +/* + * mDRPBackOffWinMin + * + * The minimum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; + +/* + * mDRPBackOffWinMax + * + * The maximum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; + +/* + * Length of a superframe in microseconds. + */ +#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) + +/** + * UWB MAC address + * + * It is *imperative* that this struct is exactly 6 packed bytes (as + * it is also used to define headers sent down and up the wire/radio). + */ +struct uwb_mac_addr { + u8 data[6]; +} __attribute__((packed)); + + +/** + * UWB device address + * + * It is *imperative* that this struct is exactly 6 packed bytes (as + * it is also used to define headers sent down and up the wire/radio). + */ +struct uwb_dev_addr { + u8 data[2]; +} __attribute__((packed)); + + +/** + * Types of UWB addresses + * + * Order matters (by size). + */ +enum uwb_addr_type { + UWB_ADDR_DEV = 0, + UWB_ADDR_MAC = 1, +}; + + +/** Size of a char buffer for printing a MAC/device address */ +enum { UWB_ADDR_STRSIZE = 32 }; + + +/** UWB WiMedia protocol IDs. */ +enum uwb_prid { + UWB_PRID_WLP_RESERVED = 0x0000, + UWB_PRID_WLP = 0x0001, + UWB_PRID_WUSB_BOT = 0x0010, + UWB_PRID_WUSB = 0x0010, + UWB_PRID_WUSB_TOP = 0x001F, +}; + + +/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ +enum uwb_phy_rate { + UWB_PHY_RATE_53 = 0, + UWB_PHY_RATE_80, + UWB_PHY_RATE_106, + UWB_PHY_RATE_160, + UWB_PHY_RATE_200, + UWB_PHY_RATE_320, + UWB_PHY_RATE_400, + UWB_PHY_RATE_480, + UWB_PHY_RATE_INVALID +}; + + +/** + * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) + */ +enum uwb_scan_type { + UWB_SCAN_ONLY = 0, + UWB_SCAN_OUTSIDE_BP, + UWB_SCAN_WHILE_INACTIVE, + UWB_SCAN_DISABLED, + UWB_SCAN_ONLY_STARTTIME, + UWB_SCAN_TOP +}; + + +/** ACK Policy types (MBOA MAC[7.2.1.3]) */ +enum uwb_ack_pol { + UWB_ACK_NO = 0, + UWB_ACK_INM = 1, + UWB_ACK_B = 2, + UWB_ACK_B_REQ = 3, +}; + + +/** DRP reservation types ([ECMA-368 table 106) */ +enum uwb_drp_type { + UWB_DRP_TYPE_ALIEN_BP = 0, + UWB_DRP_TYPE_HARD, + UWB_DRP_TYPE_SOFT, + UWB_DRP_TYPE_PRIVATE, + UWB_DRP_TYPE_PCA, +}; + + +/** DRP Reason Codes ([ECMA-368] table 107) */ +enum uwb_drp_reason { + UWB_DRP_REASON_ACCEPTED = 0, + UWB_DRP_REASON_CONFLICT, + UWB_DRP_REASON_PENDING, + UWB_DRP_REASON_DENIED, + UWB_DRP_REASON_MODIFIED, +}; + +/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ +enum uwb_relinquish_req_reason { + UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, + UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, +}; + +/** + * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) + */ +enum uwb_drp_notif_reason { + UWB_DRP_NOTIF_DRP_IE_RCVD = 0, + UWB_DRP_NOTIF_CONFLICT, + UWB_DRP_NOTIF_TERMINATE, +}; + + +/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ +struct uwb_drp_alloc { + __le16 zone_bm; + __le16 mas_bm; +} __attribute__((packed)); + + +/** General MAC Header format (ECMA-368[16.2]) */ +struct uwb_mac_frame_hdr { + __le16 Frame_Control; + struct uwb_dev_addr DestAddr; + struct uwb_dev_addr SrcAddr; + __le16 Sequence_Control; + __le16 Access_Information; +} __attribute__((packed)); + + +/** + * uwb_beacon_frame - a beacon frame including MAC headers + * + * [ECMA] section 16.3. + */ +struct uwb_beacon_frame { + struct uwb_mac_frame_hdr hdr; + struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ + u8 Beacon_Slot_Number; + u8 Device_Control; + u8 IEData[]; +} __attribute__((packed)); + + +/** Information Element codes (MBOA MAC[T54]) */ +enum uwb_ie { + UWB_PCA_AVAILABILITY = 2, + UWB_IE_DRP_AVAILABILITY = 8, + UWB_IE_DRP = 9, + UWB_BP_SWITCH_IE = 11, + UWB_MAC_CAPABILITIES_IE = 12, + UWB_PHY_CAPABILITIES_IE = 13, + UWB_APP_SPEC_PROBE_IE = 15, + UWB_IDENTIFICATION_IE = 19, + UWB_MASTER_KEY_ID_IE = 20, + UWB_RELINQUISH_REQUEST_IE = 21, + UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ + UWB_APP_SPEC_IE = 255, +}; + + +/** + * Header common to all Information Elements (IEs) + */ +struct uwb_ie_hdr { + u8 element_id; /* enum uwb_ie */ + u8 length; +} __attribute__((packed)); + + +/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ +struct uwb_ie_drp { + struct uwb_ie_hdr hdr; + __le16 drp_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; +} + +static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; +} + +static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; +} + +static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; +} + +static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; +} + +static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; +} + +static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; +} + +static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, + enum uwb_drp_reason reason_code) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); + ie->drp_control = cpu_to_le16(drp_control); +} + +/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ +struct uwb_ie_drp_avail { + struct uwb_ie_hdr hdr; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); +} __attribute__((packed)); + +/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ +struct uwb_relinquish_request_ie { + struct uwb_ie_hdr hdr; + __le16 relinquish_req_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) +{ + return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; +} + +static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, + int reason_code) +{ + u16 ctrl = le16_to_cpu(ie->relinquish_req_control); + ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); + ie->relinquish_req_control = cpu_to_le16(ctrl); +} + +/** + * The Vendor ID is set to an OUI that indicates the vendor of the device. + * ECMA-368 [16.8.10] + */ +struct uwb_vendor_id { + u8 data[3]; +} __attribute__((packed)); + +/** + * The device type ID + * FIXME: clarify what this means + * ECMA-368 [16.8.10] + */ +struct uwb_device_type_id { + u8 data[3]; +} __attribute__((packed)); + + +/** + * UWB device information types + * ECMA-368 [16.8.10] + */ +enum uwb_dev_info_type { + UWB_DEV_INFO_VENDOR_ID = 0, + UWB_DEV_INFO_VENDOR_TYPE, + UWB_DEV_INFO_NAME, +}; + +/** + * UWB device information found in Identification IE + * ECMA-368 [16.8.10] + */ +struct uwb_dev_info { + u8 type; /* enum uwb_dev_info_type */ + u8 length; + u8 data[]; +} __attribute__((packed)); + +/** + * UWB Identification IE + * ECMA-368 [16.8.10] + */ +struct uwb_identification_ie { + struct uwb_ie_hdr hdr; + struct uwb_dev_info info[]; +} __attribute__((packed)); + +/* + * UWB Radio Controller + * + * These definitions are common to the Radio Control layers as + * exported by the WUSB1.0 HWA and WHCI interfaces. + */ + +/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ +struct uwb_rccb { + u8 bCommandType; /* enum hwa_cet */ + __le16 wCommand; /* Command code */ + u8 bCommandContext; /* Context ID */ +} __attribute__((packed)); + + +/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ +struct uwb_rceb { + u8 bEventType; /* enum hwa_cet */ + __le16 wEvent; /* Event code */ + u8 bEventContext; /* Context ID */ +} __attribute__((packed)); + + +enum { + UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ + UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ +}; + +/* Commands to the radio controller */ +enum uwb_rc_cmd { + UWB_RC_CMD_CHANNEL_CHANGE = 16, + UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ + UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ + UWB_RC_CMD_RESET = 19, + UWB_RC_CMD_SCAN = 20, /* Scan management */ + UWB_RC_CMD_SET_BEACON_FILTER = 21, + UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ + UWB_RC_CMD_SET_IE = 23, /* Information Element management */ + UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, + UWB_RC_CMD_SET_TX_POWER = 25, + UWB_RC_CMD_SLEEP = 26, + UWB_RC_CMD_START_BEACON = 27, + UWB_RC_CMD_STOP_BEACON = 28, + UWB_RC_CMD_BP_MERGE = 29, + UWB_RC_CMD_SEND_COMMAND_FRAME = 30, + UWB_RC_CMD_SET_ASIE_NOTIF = 31, +}; + +/* Notifications from the radio controller */ +enum uwb_rc_evt { + UWB_RC_EVT_IE_RCV = 0, + UWB_RC_EVT_BEACON = 1, + UWB_RC_EVT_BEACON_SIZE = 2, + UWB_RC_EVT_BPOIE_CHANGE = 3, + UWB_RC_EVT_BP_SLOT_CHANGE = 4, + UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, + UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, + UWB_RC_EVT_DRP_AVAIL = 7, + UWB_RC_EVT_DRP = 8, + UWB_RC_EVT_BP_SWITCH_STATUS = 9, + UWB_RC_EVT_CMD_FRAME_RCV = 10, + UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, + /* Events (command responses) use the same code as the command */ + UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, +}; + +enum uwb_rc_extended_type_1_cmd { + UWB_RC_SET_DAA_ENERGY_MASK = 32, + UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, +}; + +enum uwb_rc_extended_type_1_evt { + UWB_RC_DAA_ENERGY_DETECTED = 0, +}; + +/* Radio Control Result Code. [WHCI] table 3-3. */ +enum { + UWB_RC_RES_SUCCESS = 0, + UWB_RC_RES_FAIL, + UWB_RC_RES_FAIL_HARDWARE, + UWB_RC_RES_FAIL_NO_SLOTS, + UWB_RC_RES_FAIL_BEACON_TOO_LARGE, + UWB_RC_RES_FAIL_INVALID_PARAMETER, + UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, + UWB_RC_RES_FAIL_INVALID_IE_DATA, + UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, + UWB_RC_RES_FAIL_CANCELLED, + UWB_RC_RES_FAIL_INVALID_STATE, + UWB_RC_RES_FAIL_INVALID_SIZE, + UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, + UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, + UWB_RC_RES_FAIL_TIME_OUT = 255, +}; + +/* Confirm event. [WHCI] section 3.1.3.1 etc. */ +struct uwb_rc_evt_confirm { + struct uwb_rceb rceb; + u8 bResultCode; +} __attribute__((packed)); + +/* Device Address Management event. [WHCI] section 3.1.3.2. */ +struct uwb_rc_evt_dev_addr_mgmt { + struct uwb_rceb rceb; + u8 baAddr[6]; + u8 bResultCode; +} __attribute__((packed)); + + +/* Get IE Event. [WHCI] section 3.1.3.3. */ +struct uwb_rc_evt_get_ie { + struct uwb_rceb rceb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Set DRP IE Event. [WHCI] section 3.1.3.7. */ +struct uwb_rc_evt_set_drp_ie { + struct uwb_rceb rceb; + __le16 wRemainingSpace; + u8 bResultCode; +} __attribute__((packed)); + +/* Set IE Event. [WHCI] section 3.1.3.8. */ +struct uwb_rc_evt_set_ie { + struct uwb_rceb rceb; + __le16 RemainingSpace; + u8 bResultCode; +} __attribute__((packed)); + +/* Scan command. [WHCI] 3.1.3.5. */ +struct uwb_rc_cmd_scan { + struct uwb_rccb rccb; + u8 bChannelNumber; + u8 bScanState; + __le16 wStartTime; +} __attribute__((packed)); + +/* Set DRP IE command. [WHCI] section 3.1.3.7. */ +struct uwb_rc_cmd_set_drp_ie { + struct uwb_rccb rccb; + __le16 wIELength; + struct uwb_ie_drp IEData[]; +} __attribute__((packed)); + +/* Set IE command. [WHCI] section 3.1.3.8. */ +struct uwb_rc_cmd_set_ie { + struct uwb_rccb rccb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ +struct uwb_rc_evt_set_daa_energy_mask { + struct uwb_rceb rceb; + __le16 wLength; + u8 result; +} __attribute__((packed)); + +/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ +struct uwb_rc_evt_set_notification_filter_ex { + struct uwb_rceb rceb; + __le16 wLength; + u8 result; +} __attribute__((packed)); + +/* IE Received notification. [WHCI] section 3.1.4.1. */ +struct uwb_rc_evt_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr SrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Type of the received beacon. [WHCI] section 3.1.4.2. */ +enum uwb_rc_beacon_type { + UWB_RC_BEACON_TYPE_SCAN = 0, + UWB_RC_BEACON_TYPE_NEIGHBOR, + UWB_RC_BEACON_TYPE_OL_ALIEN, + UWB_RC_BEACON_TYPE_NOL_ALIEN, +}; + +/* Beacon received notification. [WHCI] 3.1.4.2. */ +struct uwb_rc_evt_beacon { + struct uwb_rceb rceb; + u8 bChannelNumber; + u8 bBeaconType; + __le16 wBPSTOffset; + u8 bLQI; + u8 bRSSI; + __le16 wBeaconInfoLength; + u8 BeaconInfo[]; +} __attribute__((packed)); + + +/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ +struct uwb_rc_evt_beacon_size { + struct uwb_rceb rceb; + __le16 wNewBeaconSize; +} __attribute__((packed)); + + +/* BPOIE Change notification. [WHCI] section 3.1.4.4. */ +struct uwb_rc_evt_bpoie_change { + struct uwb_rceb rceb; + __le16 wBPOIELength; + u8 BPOIE[]; +} __attribute__((packed)); + + +/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ +struct uwb_rc_evt_bp_slot_change { + struct uwb_rceb rceb; + u8 slot_info; +} __attribute__((packed)); + +static inline int uwb_rc_evt_bp_slot_change_slot_num( + const struct uwb_rc_evt_bp_slot_change *evt) +{ + return evt->slot_info & 0x7f; +} + +static inline int uwb_rc_evt_bp_slot_change_no_slot( + const struct uwb_rc_evt_bp_slot_change *evt) +{ + return (evt->slot_info & 0x80) >> 7; +} + +/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ +struct uwb_rc_evt_bp_switch_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ +struct uwb_rc_evt_dev_addr_conflict { + struct uwb_rceb rceb; +} __attribute__((packed)); + +/* DRP notification. [WHCI] section 3.1.4.9. */ +struct uwb_rc_evt_drp { + struct uwb_rceb rceb; + struct uwb_dev_addr src_addr; + u8 reason; + u8 beacon_slot_number; + __le16 ie_length; + u8 ie_data[]; +} __attribute__((packed)); + +static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) +{ + return evt->reason & 0x0f; +} + + +/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ +struct uwb_rc_evt_drp_avail { + struct uwb_rceb rceb; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); +} __attribute__((packed)); + +/* BP switch status notification. [WHCI] section 3.1.4.10. */ +struct uwb_rc_evt_bp_switch_status { + struct uwb_rceb rceb; + u8 status; + u8 slot_offset; + __le16 bpst_offset; + u8 move_countdown; +} __attribute__((packed)); + +/* Command Frame Received notification. [WHCI] section 3.1.4.11. */ +struct uwb_rc_evt_cmd_frame_rcv { + struct uwb_rceb rceb; + __le16 receive_time; + struct uwb_dev_addr wSrcAddr; + struct uwb_dev_addr wDstAddr; + __le16 control; + __le16 reserved; + __le16 dataLength; + u8 data[]; +} __attribute__((packed)); + +/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ +struct uwb_rc_evt_channel_change_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ +struct uwb_rc_evt_daa_energy_detected { + struct uwb_rceb rceb; + __le16 wLength; + u8 bandID; + u8 reserved; + u8 toneBmp[16]; +} __attribute__((packed)); + + +/** + * Radio Control Interface Class Descriptor + * + * WUSB 1.0 [8.6.1.2] + */ +struct uwb_rc_control_intf_class_desc { + u8 bLength; + u8 bDescriptorType; + __le16 bcdRCIVersion; +} __attribute__((packed)); + +#endif /* #ifndef __LINUX__UWB_SPEC_H__ */ diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h new file mode 100644 index 00000000..891d1d5f --- /dev/null +++ b/include/linux/uwb/umc.h @@ -0,0 +1,195 @@ +/* + * UWB Multi-interface Controller support. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GPLv2 + * + * UMC (UWB Multi-interface Controller) capabilities (e.g., radio + * controller, host controller) are presented as devices on the "umc" + * bus. + * + * The radio controller is not strictly a UMC capability but it's + * useful to present it as such. + * + * References: + * + * [WHCI] Wireless Host Controller Interface Specification for + * Certified Wireless Universal Serial Bus, revision 0.95. + * + * How this works is kind of convoluted but simple. The whci.ko driver + * loads when WHCI devices are detected. These WHCI devices expose + * many devices in the same PCI function (they couldn't have reused + * functions, no), so for each PCI function that exposes these many + * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] + * with umc_device_create() and adds it to the bus with + * umc_device_register(). + * + * umc_device_register() calls device_register() which will push the + * bus management code to load your UMC driver's somehting_probe() + * that you have registered for that capability code. + * + * Now when the WHCI device is removed, whci_remove() will go over + * each umc_dev assigned to each of the PCI function's capabilities + * and through whci_del_cap() call umc_device_unregister() each + * created umc_dev. Of course, if you are bound to the device, your + * driver's something_remove() will be called. + */ + +#ifndef _LINUX_UWB_UMC_H_ +#define _LINUX_UWB_UMC_H_ + +#include <linux/device.h> +#include <linux/pci.h> + +/* + * UMC capability IDs. + * + * 0x00 is reserved so use it for the radio controller device. + * + * [WHCI] table 2-8 + */ +#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ +#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ + +/** + * struct umc_dev - UMC capability device + * + * @version: version of the specification this capability conforms to. + * @cap_id: capability ID. + * @bar: PCI Bar (64 bit) where the resource lies + * @resource: register space resource. + * @irq: interrupt line. + */ +struct umc_dev { + u16 version; + u8 cap_id; + u8 bar; + struct resource resource; + unsigned irq; + struct device dev; +}; + +#define to_umc_dev(d) container_of(d, struct umc_dev, dev) + +/** + * struct umc_driver - UMC capability driver + * @cap_id: supported capability ID. + * @match: driver specific capability matching function. + * @match_data: driver specific data for match() (e.g., a + * table of pci_device_id's if umc_match_pci_id() is used). + */ +struct umc_driver { + char *name; + u8 cap_id; + int (*match)(struct umc_driver *, struct umc_dev *); + const void *match_data; + + int (*probe)(struct umc_dev *); + void (*remove)(struct umc_dev *); + int (*suspend)(struct umc_dev *, pm_message_t state); + int (*resume)(struct umc_dev *); + int (*pre_reset)(struct umc_dev *); + int (*post_reset)(struct umc_dev *); + + struct device_driver driver; +}; + +#define to_umc_driver(d) container_of(d, struct umc_driver, driver) + +extern struct bus_type umc_bus_type; + +struct umc_dev *umc_device_create(struct device *parent, int n); +int __must_check umc_device_register(struct umc_dev *umc); +void umc_device_unregister(struct umc_dev *umc); + +int __must_check __umc_driver_register(struct umc_driver *umc_drv, + struct module *mod, + const char *mod_name); + +/** + * umc_driver_register - register a UMC capabiltity driver. + * @umc_drv: pointer to the driver. + */ +#define umc_driver_register(umc_drv) \ + __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME) + +void umc_driver_unregister(struct umc_driver *umc_drv); + +/* + * Utility function you can use to match (umc_driver->match) against a + * null-terminated array of 'struct pci_device_id' in + * umc_driver->match_data. + */ +int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); + +/** + * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none + * @umc_dev: UMC device whose parent PCI device we are looking for + * + * DIRTY!!! DON'T RELY ON THIS + * + * FIXME: This is as dirty as it gets, but we need some way to check + * the correct type of umc_dev->parent (so that for example, we can + * cast to pci_dev). Casting to pci_dev is necessary because at some + * point we need to request resources from the device. Mapping is + * easily over come (ioremap and stuff are bus agnostic), but hooking + * up to some error handlers (such as pci error handlers) might need + * this. + * + * THIS might (probably will) be removed in the future, so don't count + * on it. + */ +static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) +{ + struct pci_dev *pci_dev = NULL; + if (umc_dev->dev.parent->bus == &pci_bus_type) + pci_dev = to_pci_dev(umc_dev->dev.parent); + return pci_dev; +} + +/** + * umc_dev_get() - reference a UMC device. + * @umc_dev: Pointer to UMC device. + * + * NOTE: we are assuming in this whole scheme that the parent device + * is referenced at _probe() time and unreferenced at _remove() + * time by the parent's subsystem. + */ +static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) +{ + get_device(&umc_dev->dev); + return umc_dev; +} + +/** + * umc_dev_put() - unreference a UMC device. + * @umc_dev: Pointer to UMC device. + */ +static inline void umc_dev_put(struct umc_dev *umc_dev) +{ + put_device(&umc_dev->dev); +} + +/** + * umc_set_drvdata - set UMC device's driver data. + * @umc_dev: Pointer to UMC device. + * @data: Data to set. + */ +static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) +{ + dev_set_drvdata(&umc_dev->dev, data); +} + +/** + * umc_get_drvdata - recover UMC device's driver data. + * @umc_dev: Pointer to UMC device. + */ +static inline void *umc_get_drvdata(struct umc_dev *umc_dev) +{ + return dev_get_drvdata(&umc_dev->dev); +} + +int umc_controller_reset(struct umc_dev *umc); + +#endif /* #ifndef _LINUX_UWB_UMC_H_ */ diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h new file mode 100644 index 00000000..915ec230 --- /dev/null +++ b/include/linux/uwb/whci.h @@ -0,0 +1,117 @@ +/* + * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * + * References: + * [WHCI] Wireless Host Controller Interface Specification for + * Certified Wireless Universal Serial Bus, revision 0.95. + */ +#ifndef _LINUX_UWB_WHCI_H_ +#define _LINUX_UWB_WHCI_H_ + +#include <linux/pci.h> + +/* + * UWB interface capability registers (offsets from UWBBASE) + * + * [WHCI] section 2.2 + */ +#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ +# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) +#define UWBCAPDATA(n) (8*(n)) +# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) +# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) +# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) +# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) +# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) + +/* Size of the WHCI capability data (including the RC capability) for + a device with n capabilities. */ +#define UWBCAPDATA_SIZE(n) (8 + 8*(n)) + + +/* + * URC registers (offsets from URCBASE) + * + * [WHCI] section 2.3 + */ +#define URCCMD 0x00 +# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ +# define URCCMD_RS (1 << 30) /* Run/Stop */ +# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ +# define URCCMD_ACTIVE (1 << 15) /* Command is active */ +# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ +# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ +#define URCSTS 0x04 +# define URCSTS_EPS (1 << 17) /* Event Processing Status */ +# define URCSTS_HALTED (1 << 16) /* RC halted */ +# define URCSTS_HSE (1 << 10) /* Host System Error...fried */ +# define URCSTS_ER (1 << 9) /* Event Ready */ +# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ +# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ +# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ +#define URCINTR 0x08 +# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ +#define URCCMDADDR 0x10 +#define URCEVTADDR 0x18 +# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ + + +/** Write 32 bit @value to little endian register at @addr */ +static inline +void le_writel(u32 value, void __iomem *addr) +{ + iowrite32(value, addr); +} + + +/** Read from 32 bit little endian register at @addr */ +static inline +u32 le_readl(void __iomem *addr) +{ + return ioread32(addr); +} + + +/** Write 64 bit @value to little endian register at @addr */ +static inline +void le_writeq(u64 value, void __iomem *addr) +{ + iowrite32(value, addr); + iowrite32(value >> 32, addr + 4); +} + + +/** Read from 64 bit little endian register at @addr */ +static inline +u64 le_readq(void __iomem *addr) +{ + u64 value; + value = ioread32(addr); + value |= (u64)ioread32(addr + 4) << 32; + return value; +} + +extern int whci_wait_for(struct device *dev, u32 __iomem *reg, + u32 mask, u32 result, + unsigned long max_ms, const char *tag); + +#endif /* #ifndef _LINUX_UWB_WHCI_H_ */ diff --git a/include/linux/v4l2-mediabus.h b/include/linux/v4l2-mediabus.h new file mode 100644 index 00000000..5ea7f753 --- /dev/null +++ b/include/linux/v4l2-mediabus.h @@ -0,0 +1,114 @@ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_V4L2_MEDIABUS_H +#define __LINUX_V4L2_MEDIABUS_H + +#include <linux/types.h> +#include <linux/videodev2.h> + +/* + * These pixel codes uniquely identify data formats on the media bus. Mostly + * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is + * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the + * data format is fixed. Additionally, "2X8" means that one pixel is transferred + * in two 8-bit samples, "BE" or "LE" specify in which order those samples are + * transferred over the bus: "LE" means that the least significant bits are + * transferred first, "BE" means that the most significant bits are transferred + * first, and "PADHI" and "PADLO" define which bits - low or high, in the + * incomplete high byte, are filled with padding bits. + * + * The pixel codes are grouped by type, bus_width, bits per component, samples + * per pixel and order of subsamples. Numerical values are sorted using generic + * numerical sort order (8 thus comes before 10). + * + * As their value can't change when a new pixel code is inserted in the + * enumeration, the pixel codes are explicitly given a numerical value. The next + * free values for each category are listed below, update them when inserting + * new pixel codes. + */ +enum v4l2_mbus_pixelcode { + V4L2_MBUS_FMT_FIXED = 0x0001, + + /* RGB - next is 0x1009 */ + V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 0x1001, + V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 0x1002, + V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 0x1003, + V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 0x1004, + V4L2_MBUS_FMT_BGR565_2X8_BE = 0x1005, + V4L2_MBUS_FMT_BGR565_2X8_LE = 0x1006, + V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007, + V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008, + + /* YUV (including grey) - next is 0x2014 */ + V4L2_MBUS_FMT_Y8_1X8 = 0x2001, + V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002, + V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003, + V4L2_MBUS_FMT_YUYV8_1_5X8 = 0x2004, + V4L2_MBUS_FMT_YVYU8_1_5X8 = 0x2005, + V4L2_MBUS_FMT_UYVY8_2X8 = 0x2006, + V4L2_MBUS_FMT_VYUY8_2X8 = 0x2007, + V4L2_MBUS_FMT_YUYV8_2X8 = 0x2008, + V4L2_MBUS_FMT_YVYU8_2X8 = 0x2009, + V4L2_MBUS_FMT_Y10_1X10 = 0x200a, + V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b, + V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c, + V4L2_MBUS_FMT_Y12_1X12 = 0x2013, + V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f, + V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010, + V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011, + V4L2_MBUS_FMT_YVYU8_1X16 = 0x2012, + V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d, + V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e, + + /* Bayer - next is 0x3015 */ + V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001, + V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013, + V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002, + V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014, + V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b, + V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c, + V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009, + V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 0x300d, + V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 0x3003, + V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 0x3004, + V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 0x3005, + V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 0x3006, + V4L2_MBUS_FMT_SBGGR10_1X10 = 0x3007, + V4L2_MBUS_FMT_SGBRG10_1X10 = 0x300e, + V4L2_MBUS_FMT_SGRBG10_1X10 = 0x300a, + V4L2_MBUS_FMT_SRGGB10_1X10 = 0x300f, + V4L2_MBUS_FMT_SBGGR12_1X12 = 0x3008, + V4L2_MBUS_FMT_SGBRG12_1X12 = 0x3010, + V4L2_MBUS_FMT_SGRBG12_1X12 = 0x3011, + V4L2_MBUS_FMT_SRGGB12_1X12 = 0x3012, + + /* JPEG compressed formats - next is 0x4002 */ + V4L2_MBUS_FMT_JPEG_1X8 = 0x4001, +}; + +/** + * struct v4l2_mbus_framefmt - frame format on the media bus + * @width: frame width + * @height: frame height + * @code: data format code (from enum v4l2_mbus_pixelcode) + * @field: used interlacing type (from enum v4l2_field) + * @colorspace: colorspace of the data (from enum v4l2_colorspace) + */ +struct v4l2_mbus_framefmt { + __u32 width; + __u32 height; + __u32 code; + __u32 field; + __u32 colorspace; + __u32 reserved[7]; +}; + +#endif diff --git a/include/linux/v4l2-subdev.h b/include/linux/v4l2-subdev.h new file mode 100644 index 00000000..ed29cbbe --- /dev/null +++ b/include/linux/v4l2-subdev.h @@ -0,0 +1,141 @@ +/* + * V4L2 subdev userspace API + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * Sakari Ailus <sakari.ailus@iki.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_V4L2_SUBDEV_H +#define __LINUX_V4L2_SUBDEV_H + +#include <linux/ioctl.h> +#include <linux/types.h> +#include <linux/v4l2-mediabus.h> + +/** + * enum v4l2_subdev_format_whence - Media bus format type + * @V4L2_SUBDEV_FORMAT_TRY: try format, for negotiation only + * @V4L2_SUBDEV_FORMAT_ACTIVE: active format, applied to the device + */ +enum v4l2_subdev_format_whence { + V4L2_SUBDEV_FORMAT_TRY = 0, + V4L2_SUBDEV_FORMAT_ACTIVE = 1, +}; + +/** + * struct v4l2_subdev_format - Pad-level media bus format + * @which: format type (from enum v4l2_subdev_format_whence) + * @pad: pad number, as reported by the media API + * @format: media bus format (format code and frame size) + */ +struct v4l2_subdev_format { + __u32 which; + __u32 pad; + struct v4l2_mbus_framefmt format; + __u32 reserved[8]; +}; + +/** + * struct v4l2_subdev_crop - Pad-level crop settings + * @which: format type (from enum v4l2_subdev_format_whence) + * @pad: pad number, as reported by the media API + * @rect: pad crop rectangle boundaries + */ +struct v4l2_subdev_crop { + __u32 which; + __u32 pad; + struct v4l2_rect rect; + __u32 reserved[8]; +}; + +/** + * struct v4l2_subdev_mbus_code_enum - Media bus format enumeration + * @pad: pad number, as reported by the media API + * @index: format index during enumeration + * @code: format code (from enum v4l2_mbus_pixelcode) + */ +struct v4l2_subdev_mbus_code_enum { + __u32 pad; + __u32 index; + __u32 code; + __u32 reserved[9]; +}; + +/** + * struct v4l2_subdev_frame_size_enum - Media bus format enumeration + * @pad: pad number, as reported by the media API + * @index: format index during enumeration + * @code: format code (from enum v4l2_mbus_pixelcode) + */ +struct v4l2_subdev_frame_size_enum { + __u32 index; + __u32 pad; + __u32 code; + __u32 min_width; + __u32 max_width; + __u32 min_height; + __u32 max_height; + __u32 reserved[9]; +}; + +/** + * struct v4l2_subdev_frame_interval - Pad-level frame rate + * @pad: pad number, as reported by the media API + * @interval: frame interval in seconds + */ +struct v4l2_subdev_frame_interval { + __u32 pad; + struct v4l2_fract interval; + __u32 reserved[9]; +}; + +/** + * struct v4l2_subdev_frame_interval_enum - Frame interval enumeration + * @pad: pad number, as reported by the media API + * @index: frame interval index during enumeration + * @code: format code (from enum v4l2_mbus_pixelcode) + * @width: frame width in pixels + * @height: frame height in pixels + * @interval: frame interval in seconds + */ +struct v4l2_subdev_frame_interval_enum { + __u32 index; + __u32 pad; + __u32 code; + __u32 width; + __u32 height; + struct v4l2_fract interval; + __u32 reserved[9]; +}; + +#define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_format) +#define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_format) +#define VIDIOC_SUBDEV_G_FRAME_INTERVAL \ + _IOWR('V', 21, struct v4l2_subdev_frame_interval) +#define VIDIOC_SUBDEV_S_FRAME_INTERVAL \ + _IOWR('V', 22, struct v4l2_subdev_frame_interval) +#define VIDIOC_SUBDEV_ENUM_MBUS_CODE \ + _IOWR('V', 2, struct v4l2_subdev_mbus_code_enum) +#define VIDIOC_SUBDEV_ENUM_FRAME_SIZE \ + _IOWR('V', 74, struct v4l2_subdev_frame_size_enum) +#define VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL \ + _IOWR('V', 75, struct v4l2_subdev_frame_interval_enum) +#define VIDIOC_SUBDEV_G_CROP _IOWR('V', 59, struct v4l2_subdev_crop) +#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop) + +#endif diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h new file mode 100644 index 00000000..6f8fbcf1 --- /dev/null +++ b/include/linux/vermagic.h @@ -0,0 +1,33 @@ +#include <generated/utsrelease.h> + +/* Simply sanity version stamp for modules. */ +#ifdef CONFIG_SMP +#define MODULE_VERMAGIC_SMP "SMP " +#else +#define MODULE_VERMAGIC_SMP "" +#endif +#ifdef CONFIG_PREEMPT +#define MODULE_VERMAGIC_PREEMPT "preempt " +#else +#define MODULE_VERMAGIC_PREEMPT "" +#endif +#ifdef CONFIG_MODULE_UNLOAD +#define MODULE_VERMAGIC_MODULE_UNLOAD "mod_unload " +#else +#define MODULE_VERMAGIC_MODULE_UNLOAD "" +#endif +#ifdef CONFIG_MODVERSIONS +#define MODULE_VERMAGIC_MODVERSIONS "modversions " +#else +#define MODULE_VERMAGIC_MODVERSIONS "" +#endif +#ifndef MODULE_ARCH_VERMAGIC +#define MODULE_ARCH_VERMAGIC "" +#endif + +#define VERMAGIC_STRING \ + UTS_RELEASE " " \ + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ + MODULE_ARCH_VERMAGIC + diff --git a/include/linux/veth.h b/include/linux/veth.h new file mode 100644 index 00000000..3354c1eb --- /dev/null +++ b/include/linux/veth.h @@ -0,0 +1,12 @@ +#ifndef __NET_VETH_H_ +#define __NET_VETH_H_ + +enum { + VETH_INFO_UNSPEC, + VETH_INFO_PEER, + + __VETH_INFO_MAX +#define VETH_INFO_MAX (__VETH_INFO_MAX - 1) +}; + +#endif diff --git a/include/linux/vfs.h b/include/linux/vfs.h new file mode 100644 index 00000000..e701d054 --- /dev/null +++ b/include/linux/vfs.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_VFS_H +#define _LINUX_VFS_H + +#include <linux/statfs.h> + +#endif diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h new file mode 100644 index 00000000..4b9a7f59 --- /dev/null +++ b/include/linux/vga_switcheroo.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2010 Red Hat Inc. + * Author : Dave Airlie <airlied@redhat.com> + * + * Licensed under GPLv2 + * + * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs + */ + +#include <linux/fb.h> + +enum vga_switcheroo_state { + VGA_SWITCHEROO_OFF, + VGA_SWITCHEROO_ON, +}; + +enum vga_switcheroo_client_id { + VGA_SWITCHEROO_IGD, + VGA_SWITCHEROO_DIS, + VGA_SWITCHEROO_MAX_CLIENTS, +}; + +struct vga_switcheroo_handler { + int (*switchto)(enum vga_switcheroo_client_id id); + int (*power_state)(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state); + int (*init)(void); + int (*get_client_id)(struct pci_dev *pdev); +}; + + +#if defined(CONFIG_VGA_SWITCHEROO) +void vga_switcheroo_unregister_client(struct pci_dev *dev); +int vga_switcheroo_register_client(struct pci_dev *dev, + void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state), + void (*reprobe)(struct pci_dev *dev), + bool (*can_switch)(struct pci_dev *dev)); + +void vga_switcheroo_client_fb_set(struct pci_dev *dev, + struct fb_info *info); + +int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler); +void vga_switcheroo_unregister_handler(void); + +int vga_switcheroo_process_delayed_switch(void); + +#else + +static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} +static inline int vga_switcheroo_register_client(struct pci_dev *dev, + void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state), + void (*reprobe)(struct pci_dev *dev), + bool (*can_switch)(struct pci_dev *dev)) { return 0; } +static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} +static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } +static inline void vga_switcheroo_unregister_handler(void) {} +static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } + +#endif diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h new file mode 100644 index 00000000..b572f80b --- /dev/null +++ b/include/linux/vgaarb.h @@ -0,0 +1,244 @@ +/* + * The VGA aribiter manages VGA space routing and VGA resource decode to + * allow multiple VGA devices to be used in a system in a safe way. + * + * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> + * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> + * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef LINUX_VGA_H +#define LINUX_VGA_H + + +/* Legacy VGA regions */ +#define VGA_RSRC_NONE 0x00 +#define VGA_RSRC_LEGACY_IO 0x01 +#define VGA_RSRC_LEGACY_MEM 0x02 +#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) +/* Non-legacy access */ +#define VGA_RSRC_NORMAL_IO 0x04 +#define VGA_RSRC_NORMAL_MEM 0x08 + +/* Passing that instead of a pci_dev to use the system "default" + * device, that is the one used by vgacon. Archs will probably + * have to provide their own vga_default_device(); + */ +#define VGA_DEFAULT_DEVICE (NULL) + +struct pci_dev; + +/* For use by clients */ + +/** + * vga_set_legacy_decoding + * + * @pdev: pci device of the VGA card + * @decodes: bit mask of what legacy regions the card decodes + * + * Indicates to the arbiter if the card decodes legacy VGA IOs, + * legacy VGA Memory, both, or none. All cards default to both, + * the card driver (fbdev for example) should tell the arbiter + * if it has disabled legacy decoding, so the card can be left + * out of the arbitration process (and can be safe to take + * interrupts at any time. + */ +extern void vga_set_legacy_decoding(struct pci_dev *pdev, + unsigned int decodes); + +/** + * vga_get - acquire & locks VGA resources + * + * @pdev: pci device of the VGA card or NULL for the system default + * @rsrc: bit mask of resources to acquire and lock + * @interruptible: blocking should be interruptible by signals ? + * + * This function acquires VGA resources for the given + * card and mark those resources locked. If the resource requested + * are "normal" (and not legacy) resources, the arbiter will first check + * wether the card is doing legacy decoding for that type of resource. If + * yes, the lock is "converted" into a legacy resource lock. + * The arbiter will first look for all VGA cards that might conflict + * and disable their IOs and/or Memory access, including VGA forwarding + * on P2P bridges if necessary, so that the requested resources can + * be used. Then, the card is marked as locking these resources and + * the IO and/or Memory accesse are enabled on the card (including + * VGA forwarding on parent P2P bridges if any). + * This function will block if some conflicting card is already locking + * one of the required resources (or any resource on a different bus + * segment, since P2P bridges don't differenciate VGA memory and IO + * afaik). You can indicate wether this blocking should be interruptible + * by a signal (for userland interface) or not. + * Must not be called at interrupt time or in atomic context. + * If the card already owns the resources, the function succeeds. + * Nested calls are supported (a per-resource counter is maintained) + */ + +#if defined(CONFIG_VGA_ARB) +extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); +#else +static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } +#endif + +/** + * vga_get_interruptible + * + * Shortcut to vga_get + */ + +static inline int vga_get_interruptible(struct pci_dev *pdev, + unsigned int rsrc) +{ + return vga_get(pdev, rsrc, 1); +} + +/** + * vga_get_uninterruptible + * + * Shortcut to vga_get + */ + +static inline int vga_get_uninterruptible(struct pci_dev *pdev, + unsigned int rsrc) +{ + return vga_get(pdev, rsrc, 0); +} + +/** + * vga_tryget - try to acquire & lock legacy VGA resources + * + * @pdev: pci devivce of VGA card or NULL for system default + * @rsrc: bit mask of resources to acquire and lock + * + * This function performs the same operation as vga_get(), but + * will return an error (-EBUSY) instead of blocking if the resources + * are already locked by another card. It can be called in any context + */ + +#if defined(CONFIG_VGA_ARB) +extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); +#else +static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } +#endif + +/** + * vga_put - release lock on legacy VGA resources + * + * @pdev: pci device of VGA card or NULL for system default + * @rsrc: but mask of resource to release + * + * This function releases resources previously locked by vga_get() + * or vga_tryget(). The resources aren't disabled right away, so + * that a subsequence vga_get() on the same card will succeed + * immediately. Resources have a counter, so locks are only + * released if the counter reaches 0. + */ + +#if defined(CONFIG_VGA_ARB) +extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); +#else +#define vga_put(pdev, rsrc) +#endif + + +/** + * vga_default_device + * + * This can be defined by the platform. The default implementation + * is rather dumb and will probably only work properly on single + * vga card setups and/or x86 platforms. + * + * If your VGA default device is not PCI, you'll have to return + * NULL here. In this case, I assume it will not conflict with + * any PCI card. If this is not true, I'll have to define two archs + * hooks for enabling/disabling the VGA default device if that is + * possible. This may be a problem with real _ISA_ VGA cards, in + * addition to a PCI one. I don't know at this point how to deal + * with that card. Can theirs IOs be disabled at all ? If not, then + * I suppose it's a matter of having the proper arch hook telling + * us about it, so we basically never allow anybody to succeed a + * vga_get()... + */ + +#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE +extern struct pci_dev *vga_default_device(void); +#endif + +/** + * vga_conflicts + * + * Architectures should define this if they have several + * independent PCI domains that can afford concurrent VGA + * decoding + */ + +#ifndef __ARCH_HAS_VGA_CONFLICT +static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) +{ + return 1; +} +#endif + +/** + * vga_client_register + * + * @pdev: pci device of the VGA client + * @cookie: client cookie to be used in callbacks + * @irq_set_state: irq state change callback + * @set_vga_decode: vga decode change callback + * + * return value: 0 on success, -1 on failure + * Register a client with the VGA arbitration logic + * + * Clients have two callback mechanisms they can use. + * irq enable/disable callback - + * If a client can't disable its GPUs VGA resources, then we + * need to be able to ask it to turn off its irqs when we + * turn off its mem and io decoding. + * set_vga_decode + * If a client can disable its GPU VGA resource, it will + * get a callback from this to set the encode/decode state + * + * Rationale: we cannot disable VGA decode resources unconditionally + * some single GPU laptops seem to require ACPI or BIOS access to the + * VGA registers to control things like backlights etc. + * Hopefully newer multi-GPU laptops do something saner, and desktops + * won't have any special ACPI for this. + * They driver will get a callback when VGA arbitration is first used + * by userspace since we some older X servers have issues. + */ +#if defined(CONFIG_VGA_ARB) +int vga_client_register(struct pci_dev *pdev, void *cookie, + void (*irq_set_state)(void *cookie, bool state), + unsigned int (*set_vga_decode)(void *cookie, bool state)); +#else +static inline int vga_client_register(struct pci_dev *pdev, void *cookie, + void (*irq_set_state)(void *cookie, bool state), + unsigned int (*set_vga_decode)(void *cookie, bool state)) +{ + return 0; +} +#endif + +#endif /* LINUX_VGA_H */ diff --git a/include/linux/vhost.h b/include/linux/vhost.h new file mode 100644 index 00000000..e847f1e3 --- /dev/null +++ b/include/linux/vhost.h @@ -0,0 +1,130 @@ +#ifndef _LINUX_VHOST_H +#define _LINUX_VHOST_H +/* Userspace interface for in-kernel virtio accelerators. */ + +/* vhost is used to reduce the number of system calls involved in virtio. + * + * Existing virtio net code is used in the guest without modification. + * + * This header includes interface used by userspace hypervisor for + * device configuration. + */ + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/ioctl.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> + +struct vhost_vring_state { + unsigned int index; + unsigned int num; +}; + +struct vhost_vring_file { + unsigned int index; + int fd; /* Pass -1 to unbind from file. */ + +}; + +struct vhost_vring_addr { + unsigned int index; + /* Option flags. */ + unsigned int flags; + /* Flag values: */ + /* Whether log address is valid. If set enables logging. */ +#define VHOST_VRING_F_LOG 0 + + /* Start of array of descriptors (virtually contiguous) */ + __u64 desc_user_addr; + /* Used structure address. Must be 32 bit aligned */ + __u64 used_user_addr; + /* Available structure address. Must be 16 bit aligned */ + __u64 avail_user_addr; + /* Logging support. */ + /* Log writes to used structure, at offset calculated from specified + * address. Address must be 32 bit aligned. */ + __u64 log_guest_addr; +}; + +struct vhost_memory_region { + __u64 guest_phys_addr; + __u64 memory_size; /* bytes */ + __u64 userspace_addr; + __u64 flags_padding; /* No flags are currently specified. */ +}; + +/* All region addresses and sizes must be 4K aligned. */ +#define VHOST_PAGE_SIZE 0x1000 + +struct vhost_memory { + __u32 nregions; + __u32 padding; + struct vhost_memory_region regions[0]; +}; + +/* ioctls */ + +#define VHOST_VIRTIO 0xAF + +/* Features bitmask for forward compatibility. Transport bits are used for + * vhost specific features. */ +#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64) +#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64) + +/* Set current process as the (exclusive) owner of this file descriptor. This + * must be called before any other vhost command. Further calls to + * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */ +#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01) +/* Give up ownership, and reset the device to default values. + * Allows subsequent call to VHOST_OWNER_SET to succeed. */ +#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02) + +/* Set up/modify memory layout */ +#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory) + +/* Write logging setup. */ +/* Memory writes can optionally be logged by setting bit at an offset + * (calculated from the physical address) from specified log base. + * The bit is set using an atomic 32 bit operation. */ +/* Set base address for logging. */ +#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64) +/* Specify an eventfd file descriptor to signal on log write. */ +#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int) + +/* Ring setup. */ +/* Set number of descriptors in ring. This parameter can not + * be modified while ring is running (bound to a device). */ +#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state) +/* Set addresses for the ring. */ +#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr) +/* Base value where queue looks for available descriptors */ +#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state) +/* Get accessor: reads index, writes value in num */ +#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state) + +/* The following ioctls use eventfd file descriptors to signal and poll + * for events. */ + +/* Set eventfd to poll for added buffers */ +#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file) +/* Set eventfd to signal when buffers have beed used */ +#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file) +/* Set eventfd to signal an error */ +#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file) + +/* VHOST_NET specific defines */ + +/* Attach virtio net ring to a raw socket, or tap device. + * The socket must be already bound to an ethernet device, this device will be + * used for transmit. Pass fd -1 to unbind from the socket and the transmit + * device. This can be used to stop the ring (e.g. for migration). */ +#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) + +/* Feature bits */ +/* Log all write descriptors. Can be changed while device is active. */ +#define VHOST_F_LOG_ALL 26 +/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ +#define VHOST_NET_F_VIRTIO_NET_HDR 27 + +#endif diff --git a/include/linux/via-core.h b/include/linux/via-core.h new file mode 100644 index 00000000..9c21cdf3 --- /dev/null +++ b/include/linux/via-core.h @@ -0,0 +1,236 @@ +/* + * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. + * Copyright 2009-2010 Jonathan Corbet <corbet@lwn.net> + * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; + * either version 2, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even + * the implied warranty of MERCHANTABILITY or FITNESS FOR + * A PARTICULAR PURPOSE.See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __VIA_CORE_H__ +#define __VIA_CORE_H__ +#include <linux/types.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/pci.h> + +/* + * A description of each known serial I2C/GPIO port. + */ +enum via_port_type { + VIA_PORT_NONE = 0, + VIA_PORT_I2C, + VIA_PORT_GPIO, +}; + +enum via_port_mode { + VIA_MODE_OFF = 0, + VIA_MODE_I2C, /* Used as I2C port */ + VIA_MODE_GPIO, /* Two GPIO ports */ +}; + +enum viafb_i2c_adap { + VIA_PORT_26 = 0, + VIA_PORT_31, + VIA_PORT_25, + VIA_PORT_2C, + VIA_PORT_3D, +}; +#define VIAFB_NUM_PORTS 5 + +struct via_port_cfg { + enum via_port_type type; + enum via_port_mode mode; + u16 io_port; + u8 ioport_index; +}; + +/* + * Allow subdevs to register suspend/resume hooks. + */ +#ifdef CONFIG_PM +struct viafb_pm_hooks { + struct list_head list; + int (*suspend)(void *private); + int (*resume)(void *private); + void *private; +}; + +void viafb_pm_register(struct viafb_pm_hooks *hooks); +void viafb_pm_unregister(struct viafb_pm_hooks *hooks); +#endif /* CONFIG_PM */ + +/* + * This is the global viafb "device" containing stuff needed by + * all subdevs. + */ +struct viafb_dev { + struct pci_dev *pdev; + int chip_type; + struct via_port_cfg *port_cfg; + /* + * Spinlock for access to device registers. Not yet + * globally used. + */ + spinlock_t reg_lock; + /* + * The framebuffer MMIO region. Little, if anything, touches + * this memory directly, and certainly nothing outside of the + * framebuffer device itself. We *do* have to be able to allocate + * chunks of this memory for other devices, though. + */ + unsigned long fbmem_start; + long fbmem_len; + void __iomem *fbmem; +#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE) + long camera_fbmem_offset; + long camera_fbmem_size; +#endif + /* + * The MMIO region for device registers. + */ + unsigned long engine_start; + unsigned long engine_len; + void __iomem *engine_mmio; + +}; + +/* + * Interrupt management. + */ + +void viafb_irq_enable(u32 mask); +void viafb_irq_disable(u32 mask); + +/* + * The global interrupt control register and its bits. + */ +#define VDE_INTERRUPT 0x200 /* Video interrupt flags/masks */ +#define VDE_I_DVISENSE 0x00000001 /* DVI sense int status */ +#define VDE_I_VBLANK 0x00000002 /* Vertical blank status */ +#define VDE_I_MCCFI 0x00000004 /* MCE compl. frame int status */ +#define VDE_I_VSYNC 0x00000008 /* VGA VSYNC int status */ +#define VDE_I_DMA0DDONE 0x00000010 /* DMA 0 descr done */ +#define VDE_I_DMA0TDONE 0x00000020 /* DMA 0 transfer done */ +#define VDE_I_DMA1DDONE 0x00000040 /* DMA 1 descr done */ +#define VDE_I_DMA1TDONE 0x00000080 /* DMA 1 transfer done */ +#define VDE_I_C1AV 0x00000100 /* Cap Eng 1 act vid end */ +#define VDE_I_HQV0 0x00000200 /* First HQV engine */ +#define VDE_I_HQV1 0x00000400 /* Second HQV engine */ +#define VDE_I_HQV1EN 0x00000800 /* Second HQV engine enable */ +#define VDE_I_C0AV 0x00001000 /* Cap Eng 0 act vid end */ +#define VDE_I_C0VBI 0x00002000 /* Cap Eng 0 VBI end */ +#define VDE_I_C1VBI 0x00004000 /* Cap Eng 1 VBI end */ +#define VDE_I_VSYNC2 0x00008000 /* Sec. Disp. VSYNC */ +#define VDE_I_DVISNSEN 0x00010000 /* DVI sense enable */ +#define VDE_I_VSYNC2EN 0x00020000 /* Sec Disp VSYNC enable */ +#define VDE_I_MCCFIEN 0x00040000 /* MC comp frame int mask enable */ +#define VDE_I_VSYNCEN 0x00080000 /* VSYNC enable */ +#define VDE_I_DMA0DDEN 0x00100000 /* DMA 0 descr done enable */ +#define VDE_I_DMA0TDEN 0x00200000 /* DMA 0 trans done enable */ +#define VDE_I_DMA1DDEN 0x00400000 /* DMA 1 descr done enable */ +#define VDE_I_DMA1TDEN 0x00800000 /* DMA 1 trans done enable */ +#define VDE_I_C1AVEN 0x01000000 /* cap 1 act vid end enable */ +#define VDE_I_HQV0EN 0x02000000 /* First hqv engine enable */ +#define VDE_I_C1VBIEN 0x04000000 /* Cap 1 VBI end enable */ +#define VDE_I_LVDSSI 0x08000000 /* LVDS sense interrupt */ +#define VDE_I_C0AVEN 0x10000000 /* Cap 0 act vid end enable */ +#define VDE_I_C0VBIEN 0x20000000 /* Cap 0 VBI end enable */ +#define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */ +#define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */ + +#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE) +/* + * DMA management. + */ +int viafb_request_dma(void); +void viafb_release_dma(void); +/* void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len); */ +int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg); + +/* + * DMA Controller registers. + */ +#define VDMA_MR0 0xe00 /* Mod reg 0 */ +#define VDMA_MR_CHAIN 0x01 /* Chaining mode */ +#define VDMA_MR_TDIE 0x02 /* Transfer done int enable */ +#define VDMA_CSR0 0xe04 /* Control/status */ +#define VDMA_C_ENABLE 0x01 /* DMA Enable */ +#define VDMA_C_START 0x02 /* Start a transfer */ +#define VDMA_C_ABORT 0x04 /* Abort a transfer */ +#define VDMA_C_DONE 0x08 /* Transfer is done */ +#define VDMA_MARL0 0xe20 /* Mem addr low */ +#define VDMA_MARH0 0xe24 /* Mem addr high */ +#define VDMA_DAR0 0xe28 /* Device address */ +#define VDMA_DQWCR0 0xe2c /* Count (16-byte) */ +#define VDMA_TMR0 0xe30 /* Tile mode reg */ +#define VDMA_DPRL0 0xe34 /* Not sure */ +#define VDMA_DPR_IN 0x08 /* Inbound transfer to FB */ +#define VDMA_DPRH0 0xe38 +#define VDMA_PMR0 (0xe00 + 0x134) /* Pitch mode */ + +/* + * Useful stuff that probably belongs somewhere global. + */ +#define VGA_WIDTH 640 +#define VGA_HEIGHT 480 +#endif /* CONFIG_VIDEO_VIA_CAMERA */ + +/* + * Indexed port operations. Note that these are all multi-op + * functions; every invocation will be racy if you're not holding + * reg_lock. + */ + +#define VIAStatus 0x3DA /* Non-indexed port */ +#define VIACR 0x3D4 +#define VIASR 0x3C4 +#define VIAGR 0x3CE +#define VIAAR 0x3C0 + +static inline u8 via_read_reg(u16 port, u8 index) +{ + outb(index, port); + return inb(port + 1); +} + +static inline void via_write_reg(u16 port, u8 index, u8 data) +{ + outb(index, port); + outb(data, port + 1); +} + +static inline void via_write_reg_mask(u16 port, u8 index, u8 data, u8 mask) +{ + u8 old; + + outb(index, port); + old = inb(port + 1); + outb((data & mask) | (old & ~mask), port + 1); +} + +#define VIA_MISC_REG_READ 0x03CC +#define VIA_MISC_REG_WRITE 0x03C2 + +static inline void via_write_misc_reg_mask(u8 data, u8 mask) +{ + u8 old = inb(VIA_MISC_REG_READ); + outb((data & mask) | (old & ~mask), VIA_MISC_REG_WRITE); +} + + +#endif /* __VIA_CORE_H__ */ diff --git a/include/linux/via-gpio.h b/include/linux/via-gpio.h new file mode 100644 index 00000000..8281aea3 --- /dev/null +++ b/include/linux/via-gpio.h @@ -0,0 +1,14 @@ +/* + * Support for viafb GPIO ports. + * + * Copyright 2009 Jonathan Corbet <corbet@lwn.net> + * Distributable under version 2 of the GNU General Public License. + */ + +#ifndef __VIA_GPIO_H__ +#define __VIA_GPIO_H__ + +extern int viafb_gpio_lookup(const char *name); +extern int viafb_gpio_init(void); +extern void viafb_gpio_exit(void); +#endif diff --git a/include/linux/via.h b/include/linux/via.h new file mode 100644 index 00000000..86ae3bcd --- /dev/null +++ b/include/linux/via.h @@ -0,0 +1,22 @@ +/* Miscellaneous definitions for VIA chipsets + Currently used only by drivers/parport/parport_pc.c */ + +/* Values for SuperIO function select configuration register */ +#define VIA_FUNCTION_PARPORT_SPP 0x00 +#define VIA_FUNCTION_PARPORT_ECP 0x01 +#define VIA_FUNCTION_PARPORT_EPP 0x02 +#define VIA_FUNCTION_PARPORT_DISABLE 0x03 +#define VIA_FUNCTION_PROBE 0xFF /* Special magic value to be used in code, not to be written into chip */ + +/* Bits for parallel port mode configuration register */ +#define VIA_PARPORT_ECPEPP 0X20 +#define VIA_PARPORT_BIDIR 0x80 + +/* VIA configuration registers */ +#define VIA_CONFIG_INDEX 0x3F0 +#define VIA_CONFIG_DATA 0x3F1 + +/* Mask for parallel port IRQ bits (in ISA PnP IRQ routing register 1) */ +#define VIA_IRQCONTROL_PARALLEL 0xF0 +/* Mask for parallel port DMA bits (in ISA PnP DMA routing register) */ +#define VIA_DMACONTROL_PARALLEL 0x0C diff --git a/include/linux/via_i2c.h b/include/linux/via_i2c.h new file mode 100644 index 00000000..44532e46 --- /dev/null +++ b/include/linux/via_i2c.h @@ -0,0 +1,42 @@ +/* + * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. + + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; + * either version 2, or (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even + * the implied warranty of MERCHANTABILITY or FITNESS FOR + * A PARTICULAR PURPOSE.See the GNU General Public License + * for more details. + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef __VIA_I2C_H__ +#define __VIA_I2C_H__ + +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> + +struct via_i2c_stuff { + u16 i2c_port; /* GPIO or I2C port */ + u16 is_active; /* Being used as I2C? */ + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo; +}; + + +int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata); +int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data); +int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len); +struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which); + +extern int viafb_i2c_init(void); +extern void viafb_i2c_exit(void); +#endif /* __VIA_I2C_H__ */ diff --git a/include/linux/video_output.h b/include/linux/video_output.h new file mode 100644 index 00000000..ed5cdeb3 --- /dev/null +++ b/include/linux/video_output.h @@ -0,0 +1,57 @@ +/* + * + * Copyright (C) 2006 Luming Yu <luming.yu@intel.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#ifndef _LINUX_VIDEO_OUTPUT_H +#define _LINUX_VIDEO_OUTPUT_H +#include <linux/device.h> +#include <linux/err.h> +struct output_device; +struct output_properties { + int (*set_state)(struct output_device *); + int (*get_status)(struct output_device *); +}; +struct output_device { + int request_state; + struct output_properties *props; + struct device dev; +}; +#define to_output_device(obj) container_of(obj, struct output_device, dev) +#if defined(CONFIG_VIDEO_OUTPUT_CONTROL) || defined(CONFIG_VIDEO_OUTPUT_CONTROL_MODULE) +struct output_device *video_output_register(const char *name, + struct device *dev, + void *devdata, + struct output_properties *op); +void video_output_unregister(struct output_device *dev); +#else +static struct output_device *video_output_register(const char *name, + struct device *dev, + void *devdata, + struct output_properties *op) +{ + return ERR_PTR(-ENODEV); +} +static void video_output_unregister(struct output_device *dev) +{ + return; +} +#endif +#endif diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h new file mode 100644 index 00000000..c9c9a468 --- /dev/null +++ b/include/linux/videodev2.h @@ -0,0 +1,2416 @@ +/* + * Video for Linux Two header file + * + * Copyright (C) 1999-2007 the contributors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Header file for v4l or V4L2 drivers and applications + * with public API. + * All kernel-specific stuff were moved to media/v4l2-dev.h, so + * no #if __KERNEL tests are allowed here + * + * See http://linuxtv.org for more info + * + * Author: Bill Dirks <bill@thedirks.org> + * Justin Schoeman + * Hans Verkuil <hverkuil@xs4all.nl> + * et al. + */ +#ifndef __LINUX_VIDEODEV2_H +#define __LINUX_VIDEODEV2_H + +#ifdef __KERNEL__ +#include <linux/time.h> /* need struct timeval */ +#else +#include <sys/time.h> +#endif +#include <linux/compiler.h> +#include <linux/ioctl.h> +#include <linux/types.h> + +/* + * Common stuff for both V4L1 and V4L2 + * Moved from videodev.h + */ +#define VIDEO_MAX_FRAME 32 +#define VIDEO_MAX_PLANES 8 + +#ifndef __KERNEL__ + +/* These defines are V4L1 specific and should not be used with the V4L2 API! + They will be removed from this header in the future. */ + +#define VID_TYPE_CAPTURE 1 /* Can capture */ +#define VID_TYPE_TUNER 2 /* Can tune */ +#define VID_TYPE_TELETEXT 4 /* Does teletext */ +#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */ +#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */ +#define VID_TYPE_CLIPPING 32 /* Can clip */ +#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */ +#define VID_TYPE_SCALES 128 /* Scalable */ +#define VID_TYPE_MONOCHROME 256 /* Monochrome only */ +#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */ +#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */ +#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ +#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ +#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ +#endif + +/* + * M I S C E L L A N E O U S + */ + +/* Four-character-code (FOURCC) */ +#define v4l2_fourcc(a, b, c, d)\ + ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) + +/* + * E N U M S + */ +enum v4l2_field { + V4L2_FIELD_ANY = 0, /* driver can choose from none, + top, bottom, interlaced + depending on whatever it thinks + is approximate ... */ + V4L2_FIELD_NONE = 1, /* this device has no fields ... */ + V4L2_FIELD_TOP = 2, /* top field only */ + V4L2_FIELD_BOTTOM = 3, /* bottom field only */ + V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */ + V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one + buffer, top-bottom order */ + V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */ + V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into + separate buffers */ + V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field + first and the top field is + transmitted first */ + V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field + first and the bottom field is + transmitted first */ +}; +#define V4L2_FIELD_HAS_TOP(field) \ + ((field) == V4L2_FIELD_TOP ||\ + (field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) +#define V4L2_FIELD_HAS_BOTTOM(field) \ + ((field) == V4L2_FIELD_BOTTOM ||\ + (field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) +#define V4L2_FIELD_HAS_BOTH(field) \ + ((field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) + +enum v4l2_buf_type { + V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, + V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, + V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, + V4L2_BUF_TYPE_VBI_CAPTURE = 4, + V4L2_BUF_TYPE_VBI_OUTPUT = 5, + V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, + V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, +#if 1 + /* Experimental */ + V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, +#endif + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10, + V4L2_BUF_TYPE_PRIVATE = 0x80, +}; + +#define V4L2_TYPE_IS_MULTIPLANAR(type) \ + ((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE \ + || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + +#define V4L2_TYPE_IS_OUTPUT(type) \ + ((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT \ + || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE \ + || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \ + || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \ + || (type) == V4L2_BUF_TYPE_VBI_OUTPUT \ + || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) + +enum v4l2_tuner_type { + V4L2_TUNER_RADIO = 1, + V4L2_TUNER_ANALOG_TV = 2, + V4L2_TUNER_DIGITAL_TV = 3, +}; + +enum v4l2_memory { + V4L2_MEMORY_MMAP = 1, + V4L2_MEMORY_USERPTR = 2, + V4L2_MEMORY_OVERLAY = 3, +}; + +/* see also http://vektor.theorem.ca/graphics/ycbcr/ */ +enum v4l2_colorspace { + /* ITU-R 601 -- broadcast NTSC/PAL */ + V4L2_COLORSPACE_SMPTE170M = 1, + + /* 1125-Line (US) HDTV */ + V4L2_COLORSPACE_SMPTE240M = 2, + + /* HD and modern captures. */ + V4L2_COLORSPACE_REC709 = 3, + + /* broken BT878 extents (601, luma range 16-253 instead of 16-235) */ + V4L2_COLORSPACE_BT878 = 4, + + /* These should be useful. Assume 601 extents. */ + V4L2_COLORSPACE_470_SYSTEM_M = 5, + V4L2_COLORSPACE_470_SYSTEM_BG = 6, + + /* I know there will be cameras that send this. So, this is + * unspecified chromaticities and full 0-255 on each of the + * Y'CbCr components + */ + V4L2_COLORSPACE_JPEG = 7, + + /* For RGB colourspaces, this is probably a good start. */ + V4L2_COLORSPACE_SRGB = 8, +}; + +enum v4l2_priority { + V4L2_PRIORITY_UNSET = 0, /* not initialized */ + V4L2_PRIORITY_BACKGROUND = 1, + V4L2_PRIORITY_INTERACTIVE = 2, + V4L2_PRIORITY_RECORD = 3, + V4L2_PRIORITY_DEFAULT = V4L2_PRIORITY_INTERACTIVE, +}; + +struct v4l2_rect { + __s32 left; + __s32 top; + __s32 width; + __s32 height; +}; + +struct v4l2_fract { + __u32 numerator; + __u32 denominator; +}; + +/** + * struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP + * + * @driver: name of the driver module (e.g. "bttv") + * @card: name of the card (e.g. "Hauppauge WinTV") + * @bus_info: name of the bus (e.g. "PCI:" + pci_name(pci_dev) ) + * @version: KERNEL_VERSION + * @capabilities: capabilities of the physical device as a whole + * @device_caps: capabilities accessed via this particular device (node) + * @reserved: reserved fields for future extensions + */ +struct v4l2_capability { + __u8 driver[16]; + __u8 card[32]; + __u8 bus_info[32]; + __u32 version; + __u32 capabilities; + __u32 device_caps; + __u32 reserved[3]; +}; + +/* Values for 'capabilities' field */ +#define V4L2_CAP_VIDEO_CAPTURE 0x00000001 /* Is a video capture device */ +#define V4L2_CAP_VIDEO_OUTPUT 0x00000002 /* Is a video output device */ +#define V4L2_CAP_VIDEO_OVERLAY 0x00000004 /* Can do video overlay */ +#define V4L2_CAP_VBI_CAPTURE 0x00000010 /* Is a raw VBI capture device */ +#define V4L2_CAP_VBI_OUTPUT 0x00000020 /* Is a raw VBI output device */ +#define V4L2_CAP_SLICED_VBI_CAPTURE 0x00000040 /* Is a sliced VBI capture device */ +#define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */ +#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ +#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000200 /* Can do video output overlay */ +#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */ +#define V4L2_CAP_RDS_OUTPUT 0x00000800 /* Is an RDS encoder */ + +/* Is a video capture device that supports multiplanar formats */ +#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000 +/* Is a video output device that supports multiplanar formats */ +#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000 + +#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ +#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ +#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */ +#define V4L2_CAP_MODULATOR 0x00080000 /* has a modulator */ + +#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */ +#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */ +#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */ + +#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */ + +/* + * V I D E O I M A G E F O R M A T + */ +struct v4l2_pix_format { + __u32 width; + __u32 height; + __u32 pixelformat; + enum v4l2_field field; + __u32 bytesperline; /* for padding, zero if unused */ + __u32 sizeimage; + enum v4l2_colorspace colorspace; + __u32 priv; /* private data, depends on pixelformat */ +}; + +/* Pixel format FOURCC depth Description */ + +/* RGB formats */ +#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */ +#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ +#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ +#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */ +#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */ + +/* Grey formats */ +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */ +#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ +#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ +#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ +#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ + +/* Grey bit-packed formats */ +#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */ + +/* Palette formats */ +#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ + +/* Luminance+Chrominance formats */ +#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */ +#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ +#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ +#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ +#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ +#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ +#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ +#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ +#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */ +#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */ +#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */ + +/* two planes -- one Y, one Cr + Cb interleaved */ +#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */ +#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ +#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */ +#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */ + +/* two non contiguous planes - one Y, one Cr + Cb interleaved */ +#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */ + +/* three non contiguous planes - Y, Cb, Cr */ +#define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */ + +/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */ +#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */ +#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */ +#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */ + /* 10bit raw bayer DPCM compressed to 8 bits */ +#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') + /* + * 10bit raw bayer, expanded to 16 bits + * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb... + */ +#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ + +/* compressed formats */ +#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ +#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */ +#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */ +#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */ +#define V4L2_PIX_FMT_H264 v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */ +#define V4L2_PIX_FMT_H264_NO_SC v4l2_fourcc('A', 'V', 'C', '1') /* H264 without start codes */ +#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */ +#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */ +#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */ +#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 ES */ +#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */ +#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */ +#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */ + +/* Vendor-specific formats */ +#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */ +#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ +#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */ +#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ +#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ +#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_JL2005BCD v4l2_fourcc('J', 'L', '2', '0') /* compressed RGGB bayer */ +#define V4L2_PIX_FMT_SN9C2028 v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */ +#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ +#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */ +#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */ +#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */ +#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */ +#define V4L2_PIX_FMT_CIT_YYVYUY v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */ +#define V4L2_PIX_FMT_KONICA420 v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */ +#define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */ +#define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */ + +/* + * F O R M A T E N U M E R A T I O N + */ +struct v4l2_fmtdesc { + __u32 index; /* Format number */ + enum v4l2_buf_type type; /* buffer type */ + __u32 flags; + __u8 description[32]; /* Description string */ + __u32 pixelformat; /* Format fourcc */ + __u32 reserved[4]; +}; + +#define V4L2_FMT_FLAG_COMPRESSED 0x0001 +#define V4L2_FMT_FLAG_EMULATED 0x0002 + +#if 1 + /* Experimental Frame Size and frame rate enumeration */ +/* + * F R A M E S I Z E E N U M E R A T I O N + */ +enum v4l2_frmsizetypes { + V4L2_FRMSIZE_TYPE_DISCRETE = 1, + V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, + V4L2_FRMSIZE_TYPE_STEPWISE = 3, +}; + +struct v4l2_frmsize_discrete { + __u32 width; /* Frame width [pixel] */ + __u32 height; /* Frame height [pixel] */ +}; + +struct v4l2_frmsize_stepwise { + __u32 min_width; /* Minimum frame width [pixel] */ + __u32 max_width; /* Maximum frame width [pixel] */ + __u32 step_width; /* Frame width step size [pixel] */ + __u32 min_height; /* Minimum frame height [pixel] */ + __u32 max_height; /* Maximum frame height [pixel] */ + __u32 step_height; /* Frame height step size [pixel] */ +}; + +struct v4l2_frmsizeenum { + __u32 index; /* Frame size number */ + __u32 pixel_format; /* Pixel format */ + __u32 type; /* Frame size type the device supports. */ + + union { /* Frame size */ + struct v4l2_frmsize_discrete discrete; + struct v4l2_frmsize_stepwise stepwise; + }; + + __u32 reserved[2]; /* Reserved space for future use */ +}; + +/* + * F R A M E R A T E E N U M E R A T I O N + */ +enum v4l2_frmivaltypes { + V4L2_FRMIVAL_TYPE_DISCRETE = 1, + V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, + V4L2_FRMIVAL_TYPE_STEPWISE = 3, +}; + +struct v4l2_frmival_stepwise { + struct v4l2_fract min; /* Minimum frame interval [s] */ + struct v4l2_fract max; /* Maximum frame interval [s] */ + struct v4l2_fract step; /* Frame interval step size [s] */ +}; + +struct v4l2_frmivalenum { + __u32 index; /* Frame format index */ + __u32 pixel_format; /* Pixel format */ + __u32 width; /* Frame width */ + __u32 height; /* Frame height */ + __u32 type; /* Frame interval type the device supports. */ + + union { /* Frame interval */ + struct v4l2_fract discrete; + struct v4l2_frmival_stepwise stepwise; + }; + + __u32 reserved[2]; /* Reserved space for future use */ +}; +#endif + +/* + * T I M E C O D E + */ +struct v4l2_timecode { + __u32 type; + __u32 flags; + __u8 frames; + __u8 seconds; + __u8 minutes; + __u8 hours; + __u8 userbits[4]; +}; + +/* Type */ +#define V4L2_TC_TYPE_24FPS 1 +#define V4L2_TC_TYPE_25FPS 2 +#define V4L2_TC_TYPE_30FPS 3 +#define V4L2_TC_TYPE_50FPS 4 +#define V4L2_TC_TYPE_60FPS 5 + +/* Flags */ +#define V4L2_TC_FLAG_DROPFRAME 0x0001 /* "drop-frame" mode */ +#define V4L2_TC_FLAG_COLORFRAME 0x0002 +#define V4L2_TC_USERBITS_field 0x000C +#define V4L2_TC_USERBITS_USERDEFINED 0x0000 +#define V4L2_TC_USERBITS_8BITCHARS 0x0008 +/* The above is based on SMPTE timecodes */ + +struct v4l2_jpegcompression { + int quality; + + int APPn; /* Number of APP segment to be written, + * must be 0..15 */ + int APP_len; /* Length of data in JPEG APPn segment */ + char APP_data[60]; /* Data in the JPEG APPn segment. */ + + int COM_len; /* Length of data in JPEG COM segment */ + char COM_data[60]; /* Data in JPEG COM segment */ + + __u32 jpeg_markers; /* Which markers should go into the JPEG + * output. Unless you exactly know what + * you do, leave them untouched. + * Inluding less markers will make the + * resulting code smaller, but there will + * be fewer applications which can read it. + * The presence of the APP and COM marker + * is influenced by APP_len and COM_len + * ONLY, not by this property! */ + +#define V4L2_JPEG_MARKER_DHT (1<<3) /* Define Huffman Tables */ +#define V4L2_JPEG_MARKER_DQT (1<<4) /* Define Quantization Tables */ +#define V4L2_JPEG_MARKER_DRI (1<<5) /* Define Restart Interval */ +#define V4L2_JPEG_MARKER_COM (1<<6) /* Comment segment */ +#define V4L2_JPEG_MARKER_APP (1<<7) /* App segment, driver will + * allways use APP0 */ +}; + +/* + * M E M O R Y - M A P P I N G B U F F E R S + */ +struct v4l2_requestbuffers { + __u32 count; + enum v4l2_buf_type type; + enum v4l2_memory memory; + __u32 reserved[2]; +}; + +/** + * struct v4l2_plane - plane info for multi-planar buffers + * @bytesused: number of bytes occupied by data in the plane (payload) + * @length: size of this plane (NOT the payload) in bytes + * @mem_offset: when memory in the associated struct v4l2_buffer is + * V4L2_MEMORY_MMAP, equals the offset from the start of + * the device memory for this plane (or is a "cookie" that + * should be passed to mmap() called on the video node) + * @userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane + * @data_offset: offset in the plane to the start of data; usually 0, + * unless there is a header in front of the data + * + * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer + * with two planes can have one plane for Y, and another for interleaved CbCr + * components. Each plane can reside in a separate memory buffer, or even in + * a completely separate memory node (e.g. in embedded devices). + */ +struct v4l2_plane { + __u32 bytesused; + __u32 length; + union { + __u32 mem_offset; + unsigned long userptr; + } m; + __u32 data_offset; + __u32 reserved[11]; +}; + +/** + * struct v4l2_buffer - video buffer info + * @index: id number of the buffer + * @type: buffer type (type == *_MPLANE for multiplanar buffers) + * @bytesused: number of bytes occupied by data in the buffer (payload); + * unused (set to 0) for multiplanar buffers + * @flags: buffer informational flags + * @field: field order of the image in the buffer + * @timestamp: frame timestamp + * @timecode: frame timecode + * @sequence: sequence count of this frame + * @memory: the method, in which the actual video data is passed + * @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP; + * offset from the start of the device memory for this plane, + * (or a "cookie" that should be passed to mmap() as offset) + * @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR; + * a userspace pointer pointing to this buffer + * @planes: for multiplanar buffers; userspace pointer to the array of plane + * info structs for this buffer + * @length: size in bytes of the buffer (NOT its payload) for single-plane + * buffers (when type != *_MPLANE); number of elements in the + * planes array for multi-plane buffers + * @input: input number from which the video data has has been captured + * + * Contains data exchanged by application and driver using one of the Streaming + * I/O methods. + */ +struct v4l2_buffer { + __u32 index; + enum v4l2_buf_type type; + __u32 bytesused; + __u32 flags; + enum v4l2_field field; + struct timeval timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + enum v4l2_memory memory; + union { + __u32 offset; + unsigned long userptr; + struct v4l2_plane *planes; + } m; + __u32 length; + __u32 input; + __u32 reserved; +}; + +/* Flags for 'flags' field */ +#define V4L2_BUF_FLAG_MAPPED 0x0001 /* Buffer is mapped (flag) */ +#define V4L2_BUF_FLAG_QUEUED 0x0002 /* Buffer is queued for processing */ +#define V4L2_BUF_FLAG_DONE 0x0004 /* Buffer is ready */ +#define V4L2_BUF_FLAG_KEYFRAME 0x0008 /* Image is a keyframe (I-frame) */ +#define V4L2_BUF_FLAG_PFRAME 0x0010 /* Image is a P-frame */ +#define V4L2_BUF_FLAG_BFRAME 0x0020 /* Image is a B-frame */ +/* Buffer is ready, but the data contained within is corrupted. */ +#define V4L2_BUF_FLAG_ERROR 0x0040 +#define V4L2_BUF_FLAG_TIMECODE 0x0100 /* timecode field is valid */ +#define V4L2_BUF_FLAG_INPUT 0x0200 /* input field is valid */ +#define V4L2_BUF_FLAG_PREPARED 0x0400 /* Buffer is prepared for queuing */ +/* Cache handling flags */ +#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x0800 +#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x1000 + +/* + * O V E R L A Y P R E V I E W + */ +struct v4l2_framebuffer { + __u32 capability; + __u32 flags; +/* FIXME: in theory we should pass something like PCI device + memory + * region + offset instead of some physical address */ + void *base; + struct v4l2_pix_format fmt; +}; +/* Flags for the 'capability' field. Read only */ +#define V4L2_FBUF_CAP_EXTERNOVERLAY 0x0001 +#define V4L2_FBUF_CAP_CHROMAKEY 0x0002 +#define V4L2_FBUF_CAP_LIST_CLIPPING 0x0004 +#define V4L2_FBUF_CAP_BITMAP_CLIPPING 0x0008 +#define V4L2_FBUF_CAP_LOCAL_ALPHA 0x0010 +#define V4L2_FBUF_CAP_GLOBAL_ALPHA 0x0020 +#define V4L2_FBUF_CAP_LOCAL_INV_ALPHA 0x0040 +#define V4L2_FBUF_CAP_SRC_CHROMAKEY 0x0080 +/* Flags for the 'flags' field. */ +#define V4L2_FBUF_FLAG_PRIMARY 0x0001 +#define V4L2_FBUF_FLAG_OVERLAY 0x0002 +#define V4L2_FBUF_FLAG_CHROMAKEY 0x0004 +#define V4L2_FBUF_FLAG_LOCAL_ALPHA 0x0008 +#define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 +#define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020 +#define V4L2_FBUF_FLAG_SRC_CHROMAKEY 0x0040 + +struct v4l2_clip { + struct v4l2_rect c; + struct v4l2_clip __user *next; +}; + +struct v4l2_window { + struct v4l2_rect w; + enum v4l2_field field; + __u32 chromakey; + struct v4l2_clip __user *clips; + __u32 clipcount; + void __user *bitmap; + __u8 global_alpha; +}; + +/* + * C A P T U R E P A R A M E T E R S + */ +struct v4l2_captureparm { + __u32 capability; /* Supported modes */ + __u32 capturemode; /* Current mode */ + struct v4l2_fract timeperframe; /* Time per frame in .1us units */ + __u32 extendedmode; /* Driver-specific extensions */ + __u32 readbuffers; /* # of buffers for read */ + __u32 reserved[4]; +}; + +/* Flags for 'capability' and 'capturemode' fields */ +#define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */ +#define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */ + +struct v4l2_outputparm { + __u32 capability; /* Supported modes */ + __u32 outputmode; /* Current mode */ + struct v4l2_fract timeperframe; /* Time per frame in seconds */ + __u32 extendedmode; /* Driver-specific extensions */ + __u32 writebuffers; /* # of buffers for write */ + __u32 reserved[4]; +}; + +/* + * I N P U T I M A G E C R O P P I N G + */ +struct v4l2_cropcap { + enum v4l2_buf_type type; + struct v4l2_rect bounds; + struct v4l2_rect defrect; + struct v4l2_fract pixelaspect; +}; + +struct v4l2_crop { + enum v4l2_buf_type type; + struct v4l2_rect c; +}; + +/* Hints for adjustments of selection rectangle */ +#define V4L2_SEL_FLAG_GE 0x00000001 +#define V4L2_SEL_FLAG_LE 0x00000002 + +/* Selection targets */ + +/* Current cropping area */ +#define V4L2_SEL_TGT_CROP_ACTIVE 0x0000 +/* Default cropping area */ +#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001 +/* Cropping bounds */ +#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002 +/* Current composing area */ +#define V4L2_SEL_TGT_COMPOSE_ACTIVE 0x0100 +/* Default composing area */ +#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101 +/* Composing bounds */ +#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102 +/* Current composing area plus all padding pixels */ +#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103 + +/** + * struct v4l2_selection - selection info + * @type: buffer type (do not use *_MPLANE types) + * @target: selection target, used to choose one of possible rectangles + * @flags: constraints flags + * @r: coordinates of selection window + * @reserved: for future use, rounds structure size to 64 bytes, set to zero + * + * Hardware may use multiple helper windows to process a video stream. + * The structure is used to exchange this selection areas between + * an application and a driver. + */ +struct v4l2_selection { + __u32 type; + __u32 target; + __u32 flags; + struct v4l2_rect r; + __u32 reserved[9]; +}; + + +/* + * A N A L O G V I D E O S T A N D A R D + */ + +typedef __u64 v4l2_std_id; + +/* one bit for each */ +#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001) +#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002) +#define V4L2_STD_PAL_G ((v4l2_std_id)0x00000004) +#define V4L2_STD_PAL_H ((v4l2_std_id)0x00000008) +#define V4L2_STD_PAL_I ((v4l2_std_id)0x00000010) +#define V4L2_STD_PAL_D ((v4l2_std_id)0x00000020) +#define V4L2_STD_PAL_D1 ((v4l2_std_id)0x00000040) +#define V4L2_STD_PAL_K ((v4l2_std_id)0x00000080) + +#define V4L2_STD_PAL_M ((v4l2_std_id)0x00000100) +#define V4L2_STD_PAL_N ((v4l2_std_id)0x00000200) +#define V4L2_STD_PAL_Nc ((v4l2_std_id)0x00000400) +#define V4L2_STD_PAL_60 ((v4l2_std_id)0x00000800) + +#define V4L2_STD_NTSC_M ((v4l2_std_id)0x00001000) /* BTSC */ +#define V4L2_STD_NTSC_M_JP ((v4l2_std_id)0x00002000) /* EIA-J */ +#define V4L2_STD_NTSC_443 ((v4l2_std_id)0x00004000) +#define V4L2_STD_NTSC_M_KR ((v4l2_std_id)0x00008000) /* FM A2 */ + +#define V4L2_STD_SECAM_B ((v4l2_std_id)0x00010000) +#define V4L2_STD_SECAM_D ((v4l2_std_id)0x00020000) +#define V4L2_STD_SECAM_G ((v4l2_std_id)0x00040000) +#define V4L2_STD_SECAM_H ((v4l2_std_id)0x00080000) +#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000) +#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000) +#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000) +#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000) + +/* ATSC/HDTV */ +#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000) +#define V4L2_STD_ATSC_16_VSB ((v4l2_std_id)0x02000000) + +/* FIXME: + Although std_id is 64 bits, there is an issue on PPC32 architecture that + makes switch(__u64) to break. So, there's a hack on v4l2-common.c rounding + this value to 32 bits. + As, currently, the max value is for V4L2_STD_ATSC_16_VSB (30 bits wide), + it should work fine. However, if needed to add more than two standards, + v4l2-common.c should be fixed. + */ + +/* + * Some macros to merge video standards in order to make live easier for the + * drivers and V4L2 applications + */ + +/* + * "Common" NTSC/M - It should be noticed that V4L2_STD_NTSC_443 is + * Missing here. + */ +#define V4L2_STD_NTSC (V4L2_STD_NTSC_M |\ + V4L2_STD_NTSC_M_JP |\ + V4L2_STD_NTSC_M_KR) +/* Secam macros */ +#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\ + V4L2_STD_SECAM_K |\ + V4L2_STD_SECAM_K1) +/* All Secam Standards */ +#define V4L2_STD_SECAM (V4L2_STD_SECAM_B |\ + V4L2_STD_SECAM_G |\ + V4L2_STD_SECAM_H |\ + V4L2_STD_SECAM_DK |\ + V4L2_STD_SECAM_L |\ + V4L2_STD_SECAM_LC) +/* PAL macros */ +#define V4L2_STD_PAL_BG (V4L2_STD_PAL_B |\ + V4L2_STD_PAL_B1 |\ + V4L2_STD_PAL_G) +#define V4L2_STD_PAL_DK (V4L2_STD_PAL_D |\ + V4L2_STD_PAL_D1 |\ + V4L2_STD_PAL_K) +/* + * "Common" PAL - This macro is there to be compatible with the old + * V4L1 concept of "PAL": /BGDKHI. + * Several PAL standards are mising here: /M, /N and /Nc + */ +#define V4L2_STD_PAL (V4L2_STD_PAL_BG |\ + V4L2_STD_PAL_DK |\ + V4L2_STD_PAL_H |\ + V4L2_STD_PAL_I) +/* Chroma "agnostic" standards */ +#define V4L2_STD_B (V4L2_STD_PAL_B |\ + V4L2_STD_PAL_B1 |\ + V4L2_STD_SECAM_B) +#define V4L2_STD_G (V4L2_STD_PAL_G |\ + V4L2_STD_SECAM_G) +#define V4L2_STD_H (V4L2_STD_PAL_H |\ + V4L2_STD_SECAM_H) +#define V4L2_STD_L (V4L2_STD_SECAM_L |\ + V4L2_STD_SECAM_LC) +#define V4L2_STD_GH (V4L2_STD_G |\ + V4L2_STD_H) +#define V4L2_STD_DK (V4L2_STD_PAL_DK |\ + V4L2_STD_SECAM_DK) +#define V4L2_STD_BG (V4L2_STD_B |\ + V4L2_STD_G) +#define V4L2_STD_MN (V4L2_STD_PAL_M |\ + V4L2_STD_PAL_N |\ + V4L2_STD_PAL_Nc |\ + V4L2_STD_NTSC) + +/* Standards where MTS/BTSC stereo could be found */ +#define V4L2_STD_MTS (V4L2_STD_NTSC_M |\ + V4L2_STD_PAL_M |\ + V4L2_STD_PAL_N |\ + V4L2_STD_PAL_Nc) + +/* Standards for Countries with 60Hz Line frequency */ +#define V4L2_STD_525_60 (V4L2_STD_PAL_M |\ + V4L2_STD_PAL_60 |\ + V4L2_STD_NTSC |\ + V4L2_STD_NTSC_443) +/* Standards for Countries with 50Hz Line frequency */ +#define V4L2_STD_625_50 (V4L2_STD_PAL |\ + V4L2_STD_PAL_N |\ + V4L2_STD_PAL_Nc |\ + V4L2_STD_SECAM) + +#define V4L2_STD_ATSC (V4L2_STD_ATSC_8_VSB |\ + V4L2_STD_ATSC_16_VSB) +/* Macros with none and all analog standards */ +#define V4L2_STD_UNKNOWN 0 +#define V4L2_STD_ALL (V4L2_STD_525_60 |\ + V4L2_STD_625_50) + +struct v4l2_standard { + __u32 index; + v4l2_std_id id; + __u8 name[24]; + struct v4l2_fract frameperiod; /* Frames, not fields */ + __u32 framelines; + __u32 reserved[4]; +}; + +/* + * V I D E O T I M I N G S D V P R E S E T + */ +struct v4l2_dv_preset { + __u32 preset; + __u32 reserved[4]; +}; + +/* + * D V P R E S E T S E N U M E R A T I O N + */ +struct v4l2_dv_enum_preset { + __u32 index; + __u32 preset; + __u8 name[32]; /* Name of the preset timing */ + __u32 width; + __u32 height; + __u32 reserved[4]; +}; + +/* + * D V P R E S E T V A L U E S + */ +#define V4L2_DV_INVALID 0 +#define V4L2_DV_480P59_94 1 /* BT.1362 */ +#define V4L2_DV_576P50 2 /* BT.1362 */ +#define V4L2_DV_720P24 3 /* SMPTE 296M */ +#define V4L2_DV_720P25 4 /* SMPTE 296M */ +#define V4L2_DV_720P30 5 /* SMPTE 296M */ +#define V4L2_DV_720P50 6 /* SMPTE 296M */ +#define V4L2_DV_720P59_94 7 /* SMPTE 274M */ +#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */ +#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */ +#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */ +#define V4L2_DV_1080I25 11 /* BT.1120 */ +#define V4L2_DV_1080I50 12 /* SMPTE 296M */ +#define V4L2_DV_1080I60 13 /* SMPTE 296M */ +#define V4L2_DV_1080P24 14 /* SMPTE 296M */ +#define V4L2_DV_1080P25 15 /* SMPTE 296M */ +#define V4L2_DV_1080P30 16 /* SMPTE 296M */ +#define V4L2_DV_1080P50 17 /* BT.1120 */ +#define V4L2_DV_1080P60 18 /* BT.1120 */ + +/* + * D V B T T I M I N G S + */ + +/* BT.656/BT.1120 timing data */ +struct v4l2_bt_timings { + __u32 width; /* width in pixels */ + __u32 height; /* height in lines */ + __u32 interlaced; /* Interlaced or progressive */ + __u32 polarities; /* Positive or negative polarity */ + __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */ + __u32 hfrontporch; /* Horizpontal front porch in pixels */ + __u32 hsync; /* Horizontal Sync length in pixels */ + __u32 hbackporch; /* Horizontal back porch in pixels */ + __u32 vfrontporch; /* Vertical front porch in pixels */ + __u32 vsync; /* Vertical Sync length in lines */ + __u32 vbackporch; /* Vertical back porch in lines */ + __u32 il_vfrontporch; /* Vertical front porch for bottom field of + * interlaced field formats + */ + __u32 il_vsync; /* Vertical sync length for bottom field of + * interlaced field formats + */ + __u32 il_vbackporch; /* Vertical back porch for bottom field of + * interlaced field formats + */ + __u32 reserved[16]; +} __attribute__ ((packed)); + +/* Interlaced or progressive format */ +#define V4L2_DV_PROGRESSIVE 0 +#define V4L2_DV_INTERLACED 1 + +/* Polarities. If bit is not set, it is assumed to be negative polarity */ +#define V4L2_DV_VSYNC_POS_POL 0x00000001 +#define V4L2_DV_HSYNC_POS_POL 0x00000002 + + +/* DV timings */ +struct v4l2_dv_timings { + __u32 type; + union { + struct v4l2_bt_timings bt; + __u32 reserved[32]; + }; +} __attribute__ ((packed)); + +/* Values for the type field */ +#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */ + +/* + * V I D E O I N P U T S + */ +struct v4l2_input { + __u32 index; /* Which input */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of input */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 tuner; /* Associated tuner */ + v4l2_std_id std; + __u32 status; + __u32 capabilities; + __u32 reserved[3]; +}; + +/* Values for the 'type' field */ +#define V4L2_INPUT_TYPE_TUNER 1 +#define V4L2_INPUT_TYPE_CAMERA 2 + +/* field 'status' - general */ +#define V4L2_IN_ST_NO_POWER 0x00000001 /* Attached device is off */ +#define V4L2_IN_ST_NO_SIGNAL 0x00000002 +#define V4L2_IN_ST_NO_COLOR 0x00000004 + +/* field 'status' - sensor orientation */ +/* If sensor is mounted upside down set both bits */ +#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */ +#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */ + +/* field 'status' - analog */ +#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ +#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ + +/* field 'status' - digital */ +#define V4L2_IN_ST_NO_SYNC 0x00010000 /* No synchronization lock */ +#define V4L2_IN_ST_NO_EQU 0x00020000 /* No equalizer lock */ +#define V4L2_IN_ST_NO_CARRIER 0x00040000 /* Carrier recovery failed */ + +/* field 'status' - VCR and set-top box */ +#define V4L2_IN_ST_MACROVISION 0x01000000 /* Macrovision detected */ +#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ +#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ + +/* capabilities flags */ +#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */ +#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ +#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */ + +/* + * V I D E O O U T P U T S + */ +struct v4l2_output { + __u32 index; /* Which output */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of output */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 modulator; /* Associated modulator */ + v4l2_std_id std; + __u32 capabilities; + __u32 reserved[3]; +}; +/* Values for the 'type' field */ +#define V4L2_OUTPUT_TYPE_MODULATOR 1 +#define V4L2_OUTPUT_TYPE_ANALOG 2 +#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 + +/* capabilities flags */ +#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */ +#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ +#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */ + +/* + * C O N T R O L S + */ +struct v4l2_control { + __u32 id; + __s32 value; +}; + +struct v4l2_ext_control { + __u32 id; + __u32 size; + __u32 reserved2[1]; + union { + __s32 value; + __s64 value64; + char *string; + }; +} __attribute__ ((packed)); + +struct v4l2_ext_controls { + __u32 ctrl_class; + __u32 count; + __u32 error_idx; + __u32 reserved[2]; + struct v4l2_ext_control *controls; +}; + +/* Values for ctrl_class field */ +#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ +#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ +#define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */ +#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator control class */ +#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */ +#define V4L2_CTRL_CLASS_JPEG 0x009d0000 /* JPEG-compression controls */ + +#define V4L2_CTRL_ID_MASK (0x0fffffff) +#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL) +#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) + +enum v4l2_ctrl_type { + V4L2_CTRL_TYPE_INTEGER = 1, + V4L2_CTRL_TYPE_BOOLEAN = 2, + V4L2_CTRL_TYPE_MENU = 3, + V4L2_CTRL_TYPE_BUTTON = 4, + V4L2_CTRL_TYPE_INTEGER64 = 5, + V4L2_CTRL_TYPE_CTRL_CLASS = 6, + V4L2_CTRL_TYPE_STRING = 7, + V4L2_CTRL_TYPE_BITMASK = 8, +}; + +/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ +struct v4l2_queryctrl { + __u32 id; + enum v4l2_ctrl_type type; + __u8 name[32]; /* Whatever */ + __s32 minimum; /* Note signedness */ + __s32 maximum; + __s32 step; + __s32 default_value; + __u32 flags; + __u32 reserved[2]; +}; + +/* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */ +struct v4l2_querymenu { + __u32 id; + __u32 index; + __u8 name[32]; /* Whatever */ + __u32 reserved; +}; + +/* Control flags */ +#define V4L2_CTRL_FLAG_DISABLED 0x0001 +#define V4L2_CTRL_FLAG_GRABBED 0x0002 +#define V4L2_CTRL_FLAG_READ_ONLY 0x0004 +#define V4L2_CTRL_FLAG_UPDATE 0x0008 +#define V4L2_CTRL_FLAG_INACTIVE 0x0010 +#define V4L2_CTRL_FLAG_SLIDER 0x0020 +#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 +#define V4L2_CTRL_FLAG_VOLATILE 0x0080 + +/* Query flag, to be ORed with the control ID */ +#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 + +/* User-class control IDs defined by V4L2 */ +#define V4L2_CID_MAX_CTRLS 1024 +#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900) +#define V4L2_CID_USER_BASE V4L2_CID_BASE +/* IDs reserved for driver specific controls */ +#define V4L2_CID_PRIVATE_BASE 0x08000000 + +#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1) +#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0) +#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1) +#define V4L2_CID_SATURATION (V4L2_CID_BASE+2) +#define V4L2_CID_HUE (V4L2_CID_BASE+3) +#define V4L2_CID_AUDIO_VOLUME (V4L2_CID_BASE+5) +#define V4L2_CID_AUDIO_BALANCE (V4L2_CID_BASE+6) +#define V4L2_CID_AUDIO_BASS (V4L2_CID_BASE+7) +#define V4L2_CID_AUDIO_TREBLE (V4L2_CID_BASE+8) +#define V4L2_CID_AUDIO_MUTE (V4L2_CID_BASE+9) +#define V4L2_CID_AUDIO_LOUDNESS (V4L2_CID_BASE+10) +#define V4L2_CID_BLACK_LEVEL (V4L2_CID_BASE+11) /* Deprecated */ +#define V4L2_CID_AUTO_WHITE_BALANCE (V4L2_CID_BASE+12) +#define V4L2_CID_DO_WHITE_BALANCE (V4L2_CID_BASE+13) +#define V4L2_CID_RED_BALANCE (V4L2_CID_BASE+14) +#define V4L2_CID_BLUE_BALANCE (V4L2_CID_BASE+15) +#define V4L2_CID_GAMMA (V4L2_CID_BASE+16) +#define V4L2_CID_WHITENESS (V4L2_CID_GAMMA) /* Deprecated */ +#define V4L2_CID_EXPOSURE (V4L2_CID_BASE+17) +#define V4L2_CID_AUTOGAIN (V4L2_CID_BASE+18) +#define V4L2_CID_GAIN (V4L2_CID_BASE+19) +#define V4L2_CID_HFLIP (V4L2_CID_BASE+20) +#define V4L2_CID_VFLIP (V4L2_CID_BASE+21) + +/* Deprecated; use V4L2_CID_PAN_RESET and V4L2_CID_TILT_RESET */ +#define V4L2_CID_HCENTER (V4L2_CID_BASE+22) +#define V4L2_CID_VCENTER (V4L2_CID_BASE+23) + +#define V4L2_CID_POWER_LINE_FREQUENCY (V4L2_CID_BASE+24) +enum v4l2_power_line_frequency { + V4L2_CID_POWER_LINE_FREQUENCY_DISABLED = 0, + V4L2_CID_POWER_LINE_FREQUENCY_50HZ = 1, + V4L2_CID_POWER_LINE_FREQUENCY_60HZ = 2, + V4L2_CID_POWER_LINE_FREQUENCY_AUTO = 3, +}; +#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25) +#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26) +#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27) +#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) +#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) +#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) +#define V4L2_CID_COLORFX (V4L2_CID_BASE+31) +enum v4l2_colorfx { + V4L2_COLORFX_NONE = 0, + V4L2_COLORFX_BW = 1, + V4L2_COLORFX_SEPIA = 2, + V4L2_COLORFX_NEGATIVE = 3, + V4L2_COLORFX_EMBOSS = 4, + V4L2_COLORFX_SKETCH = 5, + V4L2_COLORFX_SKY_BLUE = 6, + V4L2_COLORFX_GRASS_GREEN = 7, + V4L2_COLORFX_SKIN_WHITEN = 8, + V4L2_COLORFX_VIVID = 9, +}; +#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32) +#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33) + +#define V4L2_CID_ROTATE (V4L2_CID_BASE+34) +#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35) + +#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36) + +#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37) +#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38) + +#define V4L2_CID_MIN_BUFFERS_FOR_CAPTURE (V4L2_CID_BASE+39) +#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT (V4L2_CID_BASE+40) + +#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41) + +/* last CID + 1 */ +#define V4L2_CID_LASTP1 (V4L2_CID_BASE+42) + +/* MPEG-class control IDs defined by V4L2 */ +#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) +#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1) + +/* MPEG streams, specific to multiplexed streams */ +#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0) +enum v4l2_mpeg_stream_type { + V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */ + V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */ + V4L2_MPEG_STREAM_TYPE_MPEG1_SS = 2, /* MPEG-1 system stream */ + V4L2_MPEG_STREAM_TYPE_MPEG2_DVD = 3, /* MPEG-2 DVD-compatible stream */ + V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */ + V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */ +}; +#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1) +#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2) +#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3) +#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4) +#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5) +#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6) +#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7) +enum v4l2_mpeg_stream_vbi_fmt { + V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */ + V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */ +}; + +/* MPEG audio controls specific to multiplexed streams */ +#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100) +enum v4l2_mpeg_audio_sampling_freq { + V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2, +}; +#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101) +enum v4l2_mpeg_audio_encoding { + V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0, + V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1, + V4L2_MPEG_AUDIO_ENCODING_LAYER_3 = 2, + V4L2_MPEG_AUDIO_ENCODING_AAC = 3, + V4L2_MPEG_AUDIO_ENCODING_AC3 = 4, +}; +#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102) +enum v4l2_mpeg_audio_l1_bitrate { + V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1, + V4L2_MPEG_AUDIO_L1_BITRATE_96K = 2, + V4L2_MPEG_AUDIO_L1_BITRATE_128K = 3, + V4L2_MPEG_AUDIO_L1_BITRATE_160K = 4, + V4L2_MPEG_AUDIO_L1_BITRATE_192K = 5, + V4L2_MPEG_AUDIO_L1_BITRATE_224K = 6, + V4L2_MPEG_AUDIO_L1_BITRATE_256K = 7, + V4L2_MPEG_AUDIO_L1_BITRATE_288K = 8, + V4L2_MPEG_AUDIO_L1_BITRATE_320K = 9, + V4L2_MPEG_AUDIO_L1_BITRATE_352K = 10, + V4L2_MPEG_AUDIO_L1_BITRATE_384K = 11, + V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12, + V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13, +}; +#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103) +enum v4l2_mpeg_audio_l2_bitrate { + V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1, + V4L2_MPEG_AUDIO_L2_BITRATE_56K = 2, + V4L2_MPEG_AUDIO_L2_BITRATE_64K = 3, + V4L2_MPEG_AUDIO_L2_BITRATE_80K = 4, + V4L2_MPEG_AUDIO_L2_BITRATE_96K = 5, + V4L2_MPEG_AUDIO_L2_BITRATE_112K = 6, + V4L2_MPEG_AUDIO_L2_BITRATE_128K = 7, + V4L2_MPEG_AUDIO_L2_BITRATE_160K = 8, + V4L2_MPEG_AUDIO_L2_BITRATE_192K = 9, + V4L2_MPEG_AUDIO_L2_BITRATE_224K = 10, + V4L2_MPEG_AUDIO_L2_BITRATE_256K = 11, + V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12, + V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13, +}; +#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104) +enum v4l2_mpeg_audio_l3_bitrate { + V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1, + V4L2_MPEG_AUDIO_L3_BITRATE_48K = 2, + V4L2_MPEG_AUDIO_L3_BITRATE_56K = 3, + V4L2_MPEG_AUDIO_L3_BITRATE_64K = 4, + V4L2_MPEG_AUDIO_L3_BITRATE_80K = 5, + V4L2_MPEG_AUDIO_L3_BITRATE_96K = 6, + V4L2_MPEG_AUDIO_L3_BITRATE_112K = 7, + V4L2_MPEG_AUDIO_L3_BITRATE_128K = 8, + V4L2_MPEG_AUDIO_L3_BITRATE_160K = 9, + V4L2_MPEG_AUDIO_L3_BITRATE_192K = 10, + V4L2_MPEG_AUDIO_L3_BITRATE_224K = 11, + V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12, + V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13, +}; +#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105) +enum v4l2_mpeg_audio_mode { + V4L2_MPEG_AUDIO_MODE_STEREO = 0, + V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1, + V4L2_MPEG_AUDIO_MODE_DUAL = 2, + V4L2_MPEG_AUDIO_MODE_MONO = 3, +}; +#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106) +enum v4l2_mpeg_audio_mode_extension { + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3, +}; +#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107) +enum v4l2_mpeg_audio_emphasis { + V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0, + V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1, + V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2, +}; +#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108) +enum v4l2_mpeg_audio_crc { + V4L2_MPEG_AUDIO_CRC_NONE = 0, + V4L2_MPEG_AUDIO_CRC_CRC16 = 1, +}; +#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109) +#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110) +#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111) +enum v4l2_mpeg_audio_ac3_bitrate { + V4L2_MPEG_AUDIO_AC3_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_AC3_BITRATE_40K = 1, + V4L2_MPEG_AUDIO_AC3_BITRATE_48K = 2, + V4L2_MPEG_AUDIO_AC3_BITRATE_56K = 3, + V4L2_MPEG_AUDIO_AC3_BITRATE_64K = 4, + V4L2_MPEG_AUDIO_AC3_BITRATE_80K = 5, + V4L2_MPEG_AUDIO_AC3_BITRATE_96K = 6, + V4L2_MPEG_AUDIO_AC3_BITRATE_112K = 7, + V4L2_MPEG_AUDIO_AC3_BITRATE_128K = 8, + V4L2_MPEG_AUDIO_AC3_BITRATE_160K = 9, + V4L2_MPEG_AUDIO_AC3_BITRATE_192K = 10, + V4L2_MPEG_AUDIO_AC3_BITRATE_224K = 11, + V4L2_MPEG_AUDIO_AC3_BITRATE_256K = 12, + V4L2_MPEG_AUDIO_AC3_BITRATE_320K = 13, + V4L2_MPEG_AUDIO_AC3_BITRATE_384K = 14, + V4L2_MPEG_AUDIO_AC3_BITRATE_448K = 15, + V4L2_MPEG_AUDIO_AC3_BITRATE_512K = 16, + V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17, + V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18, +}; +#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_MPEG_BASE+112) +enum v4l2_mpeg_audio_dec_playback { + V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO = 0, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO = 1, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT = 2, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT = 3, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO = 4, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5, +}; +#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113) + +/* MPEG video controls specific to multiplexed streams */ +#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200) +enum v4l2_mpeg_video_encoding { + V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0, + V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1, + V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2, +}; +#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201) +enum v4l2_mpeg_video_aspect { + V4L2_MPEG_VIDEO_ASPECT_1x1 = 0, + V4L2_MPEG_VIDEO_ASPECT_4x3 = 1, + V4L2_MPEG_VIDEO_ASPECT_16x9 = 2, + V4L2_MPEG_VIDEO_ASPECT_221x100 = 3, +}; +#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202) +#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203) +#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204) +#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205) +#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206) +enum v4l2_mpeg_video_bitrate_mode { + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1, +}; +#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207) +#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208) +#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209) +#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) +#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) +#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212) +#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213) +#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214) +#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_MPEG_BASE+215) +#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_MPEG_BASE+216) +enum v4l2_mpeg_video_header_mode { + V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE = 0, + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME = 1, + +}; +#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_MPEG_BASE+217) +#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_MPEG_BASE+218) +#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_MPEG_BASE+219) +#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_MPEG_BASE+220) +#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_MPEG_BASE+221) +enum v4l2_mpeg_video_multi_slice_mode { + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0, + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB = 1, + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2, +}; +#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_MPEG_BASE+222) +#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_MPEG_BASE+223) +#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_MPEG_BASE+224) + +#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) +#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) +#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) +#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_MPEG_BASE+303) +#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_MPEG_BASE+304) +#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_MPEG_BASE+350) +#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_MPEG_BASE+351) +#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_MPEG_BASE+352) +#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_MPEG_BASE+353) +#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_MPEG_BASE+354) +#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_MPEG_BASE+355) +#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_MPEG_BASE+356) +#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_MPEG_BASE+357) +enum v4l2_mpeg_video_h264_entropy_mode { + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC = 0, + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC = 1, +}; +#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_MPEG_BASE+358) +#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_MPEG_BASE+359) +enum v4l2_mpeg_video_h264_level { + V4L2_MPEG_VIDEO_H264_LEVEL_1_0 = 0, + V4L2_MPEG_VIDEO_H264_LEVEL_1B = 1, + V4L2_MPEG_VIDEO_H264_LEVEL_1_1 = 2, + V4L2_MPEG_VIDEO_H264_LEVEL_1_2 = 3, + V4L2_MPEG_VIDEO_H264_LEVEL_1_3 = 4, + V4L2_MPEG_VIDEO_H264_LEVEL_2_0 = 5, + V4L2_MPEG_VIDEO_H264_LEVEL_2_1 = 6, + V4L2_MPEG_VIDEO_H264_LEVEL_2_2 = 7, + V4L2_MPEG_VIDEO_H264_LEVEL_3_0 = 8, + V4L2_MPEG_VIDEO_H264_LEVEL_3_1 = 9, + V4L2_MPEG_VIDEO_H264_LEVEL_3_2 = 10, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0 = 11, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1 = 12, + V4L2_MPEG_VIDEO_H264_LEVEL_4_2 = 13, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0 = 14, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1 = 15, +}; +#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_MPEG_BASE+360) +#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_MPEG_BASE+361) +#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_MPEG_BASE+362) +enum v4l2_mpeg_video_h264_loop_filter_mode { + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED = 0, + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED = 1, + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2, +}; +#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_MPEG_BASE+363) +enum v4l2_mpeg_video_h264_profile { + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE = 0, + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE = 1, + V4L2_MPEG_VIDEO_H264_PROFILE_MAIN = 2, + V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED = 3, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH = 4, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10 = 5, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422 = 6, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE = 7, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA = 8, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA = 9, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA = 10, + V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA = 11, + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE = 12, + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH = 13, + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14, + V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH = 15, + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16, +}; +#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+364) +#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+365) +#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_MPEG_BASE+366) +#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_MPEG_BASE+367) +enum v4l2_mpeg_video_h264_vui_sar_idc { + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED = 0, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 = 1, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 = 2, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 = 3, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 = 4, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 = 5, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 = 6, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 = 7, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 = 8, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 = 9, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 = 10, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 = 11, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 = 12, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 = 13, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 = 14, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 = 15, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 = 16, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED = 17, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_MPEG_BASE+400) +#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_MPEG_BASE+401) +#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_MPEG_BASE+402) +#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_MPEG_BASE+403) +#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_MPEG_BASE+404) +#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_MPEG_BASE+405) +enum v4l2_mpeg_video_mpeg4_level { + V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 = 2, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 = 3, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 = 4, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B = 5, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 = 6, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 = 7, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_MPEG_BASE+406) +enum v4l2_mpeg_video_mpeg4_profile { + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE = 0, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE = 1, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE = 2, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE = 3, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY = 4, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_MPEG_BASE+407) + +/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ +#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) +#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) +enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2) +enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type { + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT = 2, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3) +enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type { + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4) +enum v4l2_mpeg_cx2341x_video_temporal_filter_mode { + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5) +#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6) +enum v4l2_mpeg_cx2341x_video_median_filter_type { + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT = 2, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8) +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9) +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10) +#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11) + +/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */ +#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100) + +#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_MPEG_MFC51_BASE+0) +#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_MPEG_MFC51_BASE+1) +#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_MPEG_MFC51_BASE+2) +enum v4l2_mpeg_mfc51_video_frame_skip_mode { + V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED = 0, + V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1, + V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2, +}; +#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_MPEG_MFC51_BASE+3) +enum v4l2_mpeg_mfc51_video_force_frame_type { + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED = 0, + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME = 1, + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED = 2, +}; +#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_MPEG_MFC51_BASE+4) +#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_MPEG_MFC51_BASE+5) +#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_MPEG_MFC51_BASE+6) +#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_MPEG_MFC51_BASE+7) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_MPEG_MFC51_BASE+50) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_MPEG_MFC51_BASE+51) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_MPEG_MFC51_BASE+52) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54) + +/* Camera class control IDs */ +#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900) +#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1) + +#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1) +enum v4l2_exposure_auto_type { + V4L2_EXPOSURE_AUTO = 0, + V4L2_EXPOSURE_MANUAL = 1, + V4L2_EXPOSURE_SHUTTER_PRIORITY = 2, + V4L2_EXPOSURE_APERTURE_PRIORITY = 3 +}; +#define V4L2_CID_EXPOSURE_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+2) +#define V4L2_CID_EXPOSURE_AUTO_PRIORITY (V4L2_CID_CAMERA_CLASS_BASE+3) + +#define V4L2_CID_PAN_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+4) +#define V4L2_CID_TILT_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+5) +#define V4L2_CID_PAN_RESET (V4L2_CID_CAMERA_CLASS_BASE+6) +#define V4L2_CID_TILT_RESET (V4L2_CID_CAMERA_CLASS_BASE+7) + +#define V4L2_CID_PAN_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+8) +#define V4L2_CID_TILT_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+9) + +#define V4L2_CID_FOCUS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+10) +#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) +#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) + +#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13) +#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14) +#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15) + +#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16) + +#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17) +#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18) + +/* FM Modulator class control IDs */ +#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900) +#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1) + +#define V4L2_CID_RDS_TX_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 1) +#define V4L2_CID_RDS_TX_PI (V4L2_CID_FM_TX_CLASS_BASE + 2) +#define V4L2_CID_RDS_TX_PTY (V4L2_CID_FM_TX_CLASS_BASE + 3) +#define V4L2_CID_RDS_TX_PS_NAME (V4L2_CID_FM_TX_CLASS_BASE + 5) +#define V4L2_CID_RDS_TX_RADIO_TEXT (V4L2_CID_FM_TX_CLASS_BASE + 6) + +#define V4L2_CID_AUDIO_LIMITER_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 64) +#define V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 65) +#define V4L2_CID_AUDIO_LIMITER_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 66) + +#define V4L2_CID_AUDIO_COMPRESSION_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 80) +#define V4L2_CID_AUDIO_COMPRESSION_GAIN (V4L2_CID_FM_TX_CLASS_BASE + 81) +#define V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (V4L2_CID_FM_TX_CLASS_BASE + 82) +#define V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (V4L2_CID_FM_TX_CLASS_BASE + 83) +#define V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 84) + +#define V4L2_CID_PILOT_TONE_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 96) +#define V4L2_CID_PILOT_TONE_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 97) +#define V4L2_CID_PILOT_TONE_FREQUENCY (V4L2_CID_FM_TX_CLASS_BASE + 98) + +#define V4L2_CID_TUNE_PREEMPHASIS (V4L2_CID_FM_TX_CLASS_BASE + 112) +enum v4l2_preemphasis { + V4L2_PREEMPHASIS_DISABLED = 0, + V4L2_PREEMPHASIS_50_uS = 1, + V4L2_PREEMPHASIS_75_uS = 2, +}; +#define V4L2_CID_TUNE_POWER_LEVEL (V4L2_CID_FM_TX_CLASS_BASE + 113) +#define V4L2_CID_TUNE_ANTENNA_CAPACITOR (V4L2_CID_FM_TX_CLASS_BASE + 114) + +/* Flash and privacy (indicator) light controls */ +#define V4L2_CID_FLASH_CLASS_BASE (V4L2_CTRL_CLASS_FLASH | 0x900) +#define V4L2_CID_FLASH_CLASS (V4L2_CTRL_CLASS_FLASH | 1) + +#define V4L2_CID_FLASH_LED_MODE (V4L2_CID_FLASH_CLASS_BASE + 1) +enum v4l2_flash_led_mode { + V4L2_FLASH_LED_MODE_NONE, + V4L2_FLASH_LED_MODE_FLASH, + V4L2_FLASH_LED_MODE_TORCH, +}; + +#define V4L2_CID_FLASH_STROBE_SOURCE (V4L2_CID_FLASH_CLASS_BASE + 2) +enum v4l2_flash_strobe_source { + V4L2_FLASH_STROBE_SOURCE_SOFTWARE, + V4L2_FLASH_STROBE_SOURCE_EXTERNAL, +}; + +#define V4L2_CID_FLASH_STROBE (V4L2_CID_FLASH_CLASS_BASE + 3) +#define V4L2_CID_FLASH_STROBE_STOP (V4L2_CID_FLASH_CLASS_BASE + 4) +#define V4L2_CID_FLASH_STROBE_STATUS (V4L2_CID_FLASH_CLASS_BASE + 5) + +#define V4L2_CID_FLASH_TIMEOUT (V4L2_CID_FLASH_CLASS_BASE + 6) +#define V4L2_CID_FLASH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 7) +#define V4L2_CID_FLASH_TORCH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 8) +#define V4L2_CID_FLASH_INDICATOR_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 9) + +#define V4L2_CID_FLASH_FAULT (V4L2_CID_FLASH_CLASS_BASE + 10) +#define V4L2_FLASH_FAULT_OVER_VOLTAGE (1 << 0) +#define V4L2_FLASH_FAULT_TIMEOUT (1 << 1) +#define V4L2_FLASH_FAULT_OVER_TEMPERATURE (1 << 2) +#define V4L2_FLASH_FAULT_SHORT_CIRCUIT (1 << 3) +#define V4L2_FLASH_FAULT_OVER_CURRENT (1 << 4) +#define V4L2_FLASH_FAULT_INDICATOR (1 << 5) + +#define V4L2_CID_FLASH_CHARGE (V4L2_CID_FLASH_CLASS_BASE + 11) +#define V4L2_CID_FLASH_READY (V4L2_CID_FLASH_CLASS_BASE + 12) + +/* JPEG-class control IDs defined by V4L2 */ +#define V4L2_CID_JPEG_CLASS_BASE (V4L2_CTRL_CLASS_JPEG | 0x900) +#define V4L2_CID_JPEG_CLASS (V4L2_CTRL_CLASS_JPEG | 1) + +#define V4L2_CID_JPEG_CHROMA_SUBSAMPLING (V4L2_CID_JPEG_CLASS_BASE + 1) +enum v4l2_jpeg_chroma_subsampling { + V4L2_JPEG_CHROMA_SUBSAMPLING_444 = 0, + V4L2_JPEG_CHROMA_SUBSAMPLING_422 = 1, + V4L2_JPEG_CHROMA_SUBSAMPLING_420 = 2, + V4L2_JPEG_CHROMA_SUBSAMPLING_411 = 3, + V4L2_JPEG_CHROMA_SUBSAMPLING_410 = 4, + V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY = 5, +}; +#define V4L2_CID_JPEG_RESTART_INTERVAL (V4L2_CID_JPEG_CLASS_BASE + 2) +#define V4L2_CID_JPEG_COMPRESSION_QUALITY (V4L2_CID_JPEG_CLASS_BASE + 3) + +#define V4L2_CID_JPEG_ACTIVE_MARKER (V4L2_CID_JPEG_CLASS_BASE + 4) +#define V4L2_JPEG_ACTIVE_MARKER_APP0 (1 << 0) +#define V4L2_JPEG_ACTIVE_MARKER_APP1 (1 << 1) +#define V4L2_JPEG_ACTIVE_MARKER_COM (1 << 16) +#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17) +#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18) + +/* + * T U N I N G + */ +struct v4l2_tuner { + __u32 index; + __u8 name[32]; + enum v4l2_tuner_type type; + __u32 capability; + __u32 rangelow; + __u32 rangehigh; + __u32 rxsubchans; + __u32 audmode; + __s32 signal; + __s32 afc; + __u32 reserved[4]; +}; + +struct v4l2_modulator { + __u32 index; + __u8 name[32]; + __u32 capability; + __u32 rangelow; + __u32 rangehigh; + __u32 txsubchans; + __u32 reserved[4]; +}; + +/* Flags for the 'capability' field */ +#define V4L2_TUNER_CAP_LOW 0x0001 +#define V4L2_TUNER_CAP_NORM 0x0002 +#define V4L2_TUNER_CAP_STEREO 0x0010 +#define V4L2_TUNER_CAP_LANG2 0x0020 +#define V4L2_TUNER_CAP_SAP 0x0020 +#define V4L2_TUNER_CAP_LANG1 0x0040 +#define V4L2_TUNER_CAP_RDS 0x0080 +#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100 +#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200 + +/* Flags for the 'rxsubchans' field */ +#define V4L2_TUNER_SUB_MONO 0x0001 +#define V4L2_TUNER_SUB_STEREO 0x0002 +#define V4L2_TUNER_SUB_LANG2 0x0004 +#define V4L2_TUNER_SUB_SAP 0x0004 +#define V4L2_TUNER_SUB_LANG1 0x0008 +#define V4L2_TUNER_SUB_RDS 0x0010 + +/* Values for the 'audmode' field */ +#define V4L2_TUNER_MODE_MONO 0x0000 +#define V4L2_TUNER_MODE_STEREO 0x0001 +#define V4L2_TUNER_MODE_LANG2 0x0002 +#define V4L2_TUNER_MODE_SAP 0x0002 +#define V4L2_TUNER_MODE_LANG1 0x0003 +#define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 + +struct v4l2_frequency { + __u32 tuner; + enum v4l2_tuner_type type; + __u32 frequency; + __u32 reserved[8]; +}; + +struct v4l2_hw_freq_seek { + __u32 tuner; + enum v4l2_tuner_type type; + __u32 seek_upward; + __u32 wrap_around; + __u32 spacing; + __u32 reserved[7]; +}; + +/* + * R D S + */ + +struct v4l2_rds_data { + __u8 lsb; + __u8 msb; + __u8 block; +} __attribute__ ((packed)); + +#define V4L2_RDS_BLOCK_MSK 0x7 +#define V4L2_RDS_BLOCK_A 0 +#define V4L2_RDS_BLOCK_B 1 +#define V4L2_RDS_BLOCK_C 2 +#define V4L2_RDS_BLOCK_D 3 +#define V4L2_RDS_BLOCK_C_ALT 4 +#define V4L2_RDS_BLOCK_INVALID 7 + +#define V4L2_RDS_BLOCK_CORRECTED 0x40 +#define V4L2_RDS_BLOCK_ERROR 0x80 + +/* + * A U D I O + */ +struct v4l2_audio { + __u32 index; + __u8 name[32]; + __u32 capability; + __u32 mode; + __u32 reserved[2]; +}; + +/* Flags for the 'capability' field */ +#define V4L2_AUDCAP_STEREO 0x00001 +#define V4L2_AUDCAP_AVL 0x00002 + +/* Flags for the 'mode' field */ +#define V4L2_AUDMODE_AVL 0x00001 + +struct v4l2_audioout { + __u32 index; + __u8 name[32]; + __u32 capability; + __u32 mode; + __u32 reserved[2]; +}; + +/* + * M P E G S E R V I C E S + * + * NOTE: EXPERIMENTAL API + */ +#if 1 +#define V4L2_ENC_IDX_FRAME_I (0) +#define V4L2_ENC_IDX_FRAME_P (1) +#define V4L2_ENC_IDX_FRAME_B (2) +#define V4L2_ENC_IDX_FRAME_MASK (0xf) + +struct v4l2_enc_idx_entry { + __u64 offset; + __u64 pts; + __u32 length; + __u32 flags; + __u32 reserved[2]; +}; + +#define V4L2_ENC_IDX_ENTRIES (64) +struct v4l2_enc_idx { + __u32 entries; + __u32 entries_cap; + __u32 reserved[4]; + struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES]; +}; + + +#define V4L2_ENC_CMD_START (0) +#define V4L2_ENC_CMD_STOP (1) +#define V4L2_ENC_CMD_PAUSE (2) +#define V4L2_ENC_CMD_RESUME (3) + +/* Flags for V4L2_ENC_CMD_STOP */ +#define V4L2_ENC_CMD_STOP_AT_GOP_END (1 << 0) + +struct v4l2_encoder_cmd { + __u32 cmd; + __u32 flags; + union { + struct { + __u32 data[8]; + } raw; + }; +}; + +/* Decoder commands */ +#define V4L2_DEC_CMD_START (0) +#define V4L2_DEC_CMD_STOP (1) +#define V4L2_DEC_CMD_PAUSE (2) +#define V4L2_DEC_CMD_RESUME (3) + +/* Flags for V4L2_DEC_CMD_START */ +#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0) + +/* Flags for V4L2_DEC_CMD_PAUSE */ +#define V4L2_DEC_CMD_PAUSE_TO_BLACK (1 << 0) + +/* Flags for V4L2_DEC_CMD_STOP */ +#define V4L2_DEC_CMD_STOP_TO_BLACK (1 << 0) +#define V4L2_DEC_CMD_STOP_IMMEDIATELY (1 << 1) + +/* Play format requirements (returned by the driver): */ + +/* The decoder has no special format requirements */ +#define V4L2_DEC_START_FMT_NONE (0) +/* The decoder requires full GOPs */ +#define V4L2_DEC_START_FMT_GOP (1) + +/* The structure must be zeroed before use by the application + This ensures it can be extended safely in the future. */ +struct v4l2_decoder_cmd { + __u32 cmd; + __u32 flags; + union { + struct { + __u64 pts; + } stop; + + struct { + /* 0 or 1000 specifies normal speed, + 1 specifies forward single stepping, + -1 specifies backward single stepping, + >1: playback at speed/1000 of the normal speed, + <-1: reverse playback at (-speed/1000) of the normal speed. */ + __s32 speed; + __u32 format; + } start; + + struct { + __u32 data[16]; + } raw; + }; +}; +#endif + + +/* + * D A T A S E R V I C E S ( V B I ) + * + * Data services API by Michael Schimek + */ + +/* Raw VBI */ +struct v4l2_vbi_format { + __u32 sampling_rate; /* in 1 Hz */ + __u32 offset; + __u32 samples_per_line; + __u32 sample_format; /* V4L2_PIX_FMT_* */ + __s32 start[2]; + __u32 count[2]; + __u32 flags; /* V4L2_VBI_* */ + __u32 reserved[2]; /* must be zero */ +}; + +/* VBI flags */ +#define V4L2_VBI_UNSYNC (1 << 0) +#define V4L2_VBI_INTERLACED (1 << 1) + +/* Sliced VBI + * + * This implements is a proposal V4L2 API to allow SLICED VBI + * required for some hardware encoders. It should change without + * notice in the definitive implementation. + */ + +struct v4l2_sliced_vbi_format { + __u16 service_set; + /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field + service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field + (equals frame lines 313-336 for 625 line video + standards, 263-286 for 525 line standards) */ + __u16 service_lines[2][24]; + __u32 io_size; + __u32 reserved[2]; /* must be zero */ +}; + +/* Teletext World System Teletext + (WST), defined on ITU-R BT.653-2 */ +#define V4L2_SLICED_TELETEXT_B (0x0001) +/* Video Program System, defined on ETS 300 231*/ +#define V4L2_SLICED_VPS (0x0400) +/* Closed Caption, defined on EIA-608 */ +#define V4L2_SLICED_CAPTION_525 (0x1000) +/* Wide Screen System, defined on ITU-R BT1119.1 */ +#define V4L2_SLICED_WSS_625 (0x4000) + +#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) +#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) + +struct v4l2_sliced_vbi_cap { + __u16 service_set; + /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field + service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field + (equals frame lines 313-336 for 625 line video + standards, 263-286 for 525 line standards) */ + __u16 service_lines[2][24]; + enum v4l2_buf_type type; + __u32 reserved[3]; /* must be 0 */ +}; + +struct v4l2_sliced_vbi_data { + __u32 id; + __u32 field; /* 0: first field, 1: second field */ + __u32 line; /* 1-23 */ + __u32 reserved; /* must be 0 */ + __u8 data[48]; +}; + +/* + * Sliced VBI data inserted into MPEG Streams + */ + +/* + * V4L2_MPEG_STREAM_VBI_FMT_IVTV: + * + * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an + * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI + * data + * + * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header + * definitions are not included here. See the MPEG-2 specifications for details + * on these headers. + */ + +/* Line type IDs */ +#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1) +#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4) +#define V4L2_MPEG_VBI_IVTV_WSS_625 (5) +#define V4L2_MPEG_VBI_IVTV_VPS (7) + +struct v4l2_mpeg_vbi_itv0_line { + __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */ + __u8 data[42]; /* Sliced VBI data for the line */ +} __attribute__ ((packed)); + +struct v4l2_mpeg_vbi_itv0 { + __le32 linemask[2]; /* Bitmasks of VBI service lines present */ + struct v4l2_mpeg_vbi_itv0_line line[35]; +} __attribute__ ((packed)); + +struct v4l2_mpeg_vbi_ITV0 { + struct v4l2_mpeg_vbi_itv0_line line[36]; +} __attribute__ ((packed)); + +#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0" +#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0" + +struct v4l2_mpeg_vbi_fmt_ivtv { + __u8 magic[4]; + union { + struct v4l2_mpeg_vbi_itv0 itv0; + struct v4l2_mpeg_vbi_ITV0 ITV0; + }; +} __attribute__ ((packed)); + +/* + * A G G R E G A T E S T R U C T U R E S + */ + +/** + * struct v4l2_plane_pix_format - additional, per-plane format definition + * @sizeimage: maximum size in bytes required for data, for which + * this plane will be used + * @bytesperline: distance in bytes between the leftmost pixels in two + * adjacent lines + */ +struct v4l2_plane_pix_format { + __u32 sizeimage; + __u16 bytesperline; + __u16 reserved[7]; +} __attribute__ ((packed)); + +/** + * struct v4l2_pix_format_mplane - multiplanar format definition + * @width: image width in pixels + * @height: image height in pixels + * @pixelformat: little endian four character code (fourcc) + * @field: field order (for interlaced video) + * @colorspace: supplemental to pixelformat + * @plane_fmt: per-plane information + * @num_planes: number of planes for this format + */ +struct v4l2_pix_format_mplane { + __u32 width; + __u32 height; + __u32 pixelformat; + enum v4l2_field field; + enum v4l2_colorspace colorspace; + + struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES]; + __u8 num_planes; + __u8 reserved[11]; +} __attribute__ ((packed)); + +/** + * struct v4l2_format - stream data format + * @type: type of the data stream + * @pix: definition of an image format + * @pix_mp: definition of a multiplanar image format + * @win: definition of an overlaid image + * @vbi: raw VBI capture or output parameters + * @sliced: sliced VBI capture or output parameters + * @raw_data: placeholder for future extensions and custom formats + */ +struct v4l2_format { + enum v4l2_buf_type type; + union { + struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */ + struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */ + struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */ + struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */ + struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ + __u8 raw_data[200]; /* user-defined */ + } fmt; +}; + +/* Stream type-dependent parameters + */ +struct v4l2_streamparm { + enum v4l2_buf_type type; + union { + struct v4l2_captureparm capture; + struct v4l2_outputparm output; + __u8 raw_data[200]; /* user-defined */ + } parm; +}; + +/* + * E V E N T S + */ + +#define V4L2_EVENT_ALL 0 +#define V4L2_EVENT_VSYNC 1 +#define V4L2_EVENT_EOS 2 +#define V4L2_EVENT_CTRL 3 +#define V4L2_EVENT_FRAME_SYNC 4 +#define V4L2_EVENT_PRIVATE_START 0x08000000 + +/* Payload for V4L2_EVENT_VSYNC */ +struct v4l2_event_vsync { + /* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */ + __u8 field; +} __attribute__ ((packed)); + +/* Payload for V4L2_EVENT_CTRL */ +#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0) +#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1) + +struct v4l2_event_ctrl { + __u32 changes; + __u32 type; + union { + __s32 value; + __s64 value64; + }; + __u32 flags; + __s32 minimum; + __s32 maximum; + __s32 step; + __s32 default_value; +}; + +struct v4l2_event_frame_sync { + __u32 frame_sequence; +}; + +struct v4l2_event { + __u32 type; + union { + struct v4l2_event_vsync vsync; + struct v4l2_event_ctrl ctrl; + struct v4l2_event_frame_sync frame_sync; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct timespec timestamp; + __u32 id; + __u32 reserved[8]; +}; + +#define V4L2_EVENT_SUB_FL_SEND_INITIAL (1 << 0) +#define V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK (1 << 1) + +struct v4l2_event_subscription { + __u32 type; + __u32 id; + __u32 flags; + __u32 reserved[5]; +}; + +/* + * A D V A N C E D D E B U G G I N G + * + * NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS! + * FOR DEBUGGING, TESTING AND INTERNAL USE ONLY! + */ + +/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */ + +#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */ +#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver name */ +#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ +#define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */ + +struct v4l2_dbg_match { + __u32 type; /* Match type */ + union { /* Match this chip, meaning determined by type */ + __u32 addr; + char name[32]; + }; +} __attribute__ ((packed)); + +struct v4l2_dbg_register { + struct v4l2_dbg_match match; + __u32 size; /* register size in bytes */ + __u64 reg; + __u64 val; +} __attribute__ ((packed)); + +/* VIDIOC_DBG_G_CHIP_IDENT */ +struct v4l2_dbg_chip_ident { + struct v4l2_dbg_match match; + __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ + __u32 revision; /* chip revision, chip specific */ +} __attribute__ ((packed)); + +/** + * struct v4l2_create_buffers - VIDIOC_CREATE_BUFS argument + * @index: on return, index of the first created buffer + * @count: entry: number of requested buffers, + * return: number of created buffers + * @memory: buffer memory type + * @format: frame format, for which buffers are requested + * @reserved: future extensions + */ +struct v4l2_create_buffers { + __u32 index; + __u32 count; + enum v4l2_memory memory; + struct v4l2_format format; + __u32 reserved[8]; +}; + +/* + * I O C T L C O D E S F O R V I D E O D E V I C E S + * + */ +#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) +#define VIDIOC_RESERVED _IO('V', 1) +#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) +#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) +#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) +#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers) +#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer) +#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer) +#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer) +#define VIDIOC_OVERLAY _IOW('V', 14, int) +#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer) +#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer) +#define VIDIOC_STREAMON _IOW('V', 18, int) +#define VIDIOC_STREAMOFF _IOW('V', 19, int) +#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm) +#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm) +#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input) +#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control) +#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control) +#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner) +#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner) +#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio) +#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio) +#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl) +#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu) +#define VIDIOC_G_INPUT _IOR('V', 38, int) +#define VIDIOC_S_INPUT _IOWR('V', 39, int) +#define VIDIOC_G_OUTPUT _IOR('V', 46, int) +#define VIDIOC_S_OUTPUT _IOWR('V', 47, int) +#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output) +#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout) +#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout) +#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator) +#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator) +#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency) +#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency) +#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap) +#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop) +#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) +#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) +#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) +#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) +#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority) +#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority) +#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap) +#define VIDIOC_LOG_STATUS _IO('V', 70) +#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls) +#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls) +#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls) +#if 1 +#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum) +#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum) +#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) +#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) +#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) +#endif + +#if 1 +/* Experimental, meant for debugging, testing and internal use. + Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined. + You must be root to use these ioctls. Never use these in applications! */ +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) + +/* Experimental, meant for debugging, testing and internal use. + Never use this ioctl in applications! */ +#define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) +#endif + +#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset) +#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset) +#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset) +#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset) +#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) +#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings) +#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event) +#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription) +#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription) + +/* Experimental, the below two ioctls may change over the next couple of kernel + versions */ +#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers) +#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer) + +/* Experimental selection API */ +#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection) +#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection) + +/* Experimental, these two ioctls may change over the next couple of kernel + versions. */ +#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd) +#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd) + +/* Reminder: when adding new ioctls please add support for them to + drivers/media/video/v4l2-compat-ioctl32.c as well! */ + +#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ + +#endif /* __LINUX_VIDEODEV2_H */ diff --git a/include/linux/videodev2_wmt.h b/include/linux/videodev2_wmt.h new file mode 100755 index 00000000..53d19f51 --- /dev/null +++ b/include/linux/videodev2_wmt.h @@ -0,0 +1,196 @@ +#ifndef __LINUX_VIDEODEV2_WMT_H +#define __LINUX_VIDEODEV2_WMT_H + +#include <linux/videodev2.h> + +#define WMT_V4L2_CID_CAMERA_SCENEMODE 0 +#define WMT_V4L2_CID_CAMERA_WHITEBALANCE 1 +#define WMT_V4L2_CID_CAMERA_ANTIBANDING 2 +#define WMT_V4L2_CID_CAMERA_FLASH 3 +#define WMT_V4L2_CID_CAMERA_EXPOSURE 4 +#define WMT_V4L2_CID_HFLIP 5 +#define WMT_V4L2_CID_VFLIP 6 + +/* CID extensions */ +#define V4L2_CID_CAMERA_SCENE_MODE (V4L2_CID_PRIVATE_BASE+0) +enum v4l2_scene_mode { + SCENE_MODE_BASE = 0, + SCENE_MODE_AUTO = 0, + SCENE_MODE_NIGHTSHOT, + SCENE_MODE_PORTRAIT, + SCENE_MODE_BACK_LIGHT, + SCENE_MODE_LANDSCAPE, + SCENE_MODE_SPORTS, + SCENE_MODE_PARTY_INDOOR, + SCENE_MODE_BEACH_SNOW, + SCENE_MODE_SUNSET, + SCENE_MODE_DUSK_DAWN, + SCENE_MODE_FALL_COLOR, + SCENE_MODE_FIREWORKS, + SCENE_MODE_TEXT, + SCENE_MODE_CANDLE_LIGHT, + SCENE_MODE_MAX, +}; + +#define V4L2_CID_CAMERA_FLASH_MODE (V4L2_CID_PRIVATE_BASE+1) +enum v4l2_flash_mode { + FLASH_MODE_BASE = 0, + FLASH_MODE_OFF = 0, + FLASH_MODE_AUTO, + FLASH_MODE_ON, + FLASH_MODE_TORCH, + FLASH_MODE_STROBE, + FLASH_MODE_MAX, +}; + +#define V4L2_CID_CAMERA_ISO (V4L2_CID_PRIVATE_BASE+2) +enum v4l2_iso_mode { + ISO_AUTO = 0, + ISO_50, + ISO_100, + ISO_200, + ISO_400, + ISO_800, + ISO_1600, + ISO_SPORTS, + ISO_NIGHT, + ISO_MOVIE, + ISO_MAX, +}; + +#define V4L2_CID_CAMERA_METERING (V4L2_CID_PRIVATE_BASE+3) +enum v4l2_metering_mode { + METERING_BASE = 0, + METERING_MATRIX, + METERING_CENTER, + METERING_SPOT, + METERING_MAX, +}; + +#define V4L2_CID_CAMERA_WDR (V4L2_CID_PRIVATE_BASE+4) +enum v4l2_wdr_mode { + WDR_OFF, + WDR_ON, + WDR_MAX, +}; + +#define V4L2_CID_CAMERA_ANTI_SHAKE (V4L2_CID_PRIVATE_BASE+5) +enum v4l2_anti_shake_mode { + ANTI_SHAKE_OFF, + ANTI_SHAKE_STILL_ON, + ANTI_SHAKE_MOVIE_ON, + ANTI_SHAKE_MAX, +}; + +#define V4L2_CID_CAMERA_FOCUS_MODE (V4L2_CID_PRIVATE_BASE+6) +enum v4l2_focusmode { + FOCUS_MODE_AUTO = 0, + FOCUS_MODE_MACRO, + FOCUS_MODE_FACEDETECT, + FOCUS_MODE_AUTO_DEFAULT, + FOCUS_MODE_MACRO_DEFAULT, + FOCUS_MODE_FACEDETECT_DEFAULT, + FOCUS_MODE_INFINITY, + FOCUS_MODE_FIXED, + FOCUS_MODE_CONTINUOUS, + FOCUS_MODE_CONTINUOUS_PICTURE, + FOCUS_MODE_CONTINUOUS_PICTURE_MACRO, + FOCUS_MODE_CONTINUOUS_VIDEO, + FOCUS_MODE_TOUCH, + FOCUS_MODE_MAX, + FOCUS_MODE_DEFAULT = (1 << 8), +}; + +#define V4L2_CID_CAMERA_AUTO_FOCUS_RESULT (V4L2_CID_PRIVATE_BASE+7) +enum v4l2_focusresult { + FOCUS_RESULT_FOCUSING = 0, + FOCUS_RESULT_SUCCEED, + FOCUS_RESULT_FAILED, +}; + +#define V4L2_CID_CAMERA_FOCUS_POSITION_X (V4L2_CID_PRIVATE_BASE+8) +#define V4L2_CID_CAMERA_FOCUS_POSITION_Y (V4L2_CID_PRIVATE_BASE+9) + +#define V4L2_CID_CAMERA_FLASH_MODE_AUTO (V4L2_CID_PRIVATE_BASE+10) + +#define V4L2_CID_CAMERA_ANTI_BANDING (V4L2_CID_PRIVATE_BASE+11) +enum v4l2_anti_banding{ + ANTI_BANDING_AUTO = 0, + ANTI_BANDING_50HZ = 1, + ANTI_BANDING_60HZ = 2, + ANTI_BANDING_OFF = 3, +}; + + +// V4L2_CID_BRIGHTNESS +enum v4l2_ev_mode { + EV_MINUS_4 = -4, + EV_MINUS_3 = -3, + EV_MINUS_2 = -2, + EV_MINUS_1 = -1, + EV_DEFAULT = 0, + EV_PLUS_1 = 1, + EV_PLUS_2 = 2, + EV_PLUS_3 = 3, + EV_PLUS_4 = 4, + EV_MAX, +}; + +// V4L2_CID_DO_WHITE_BALANCE +enum v4l2_wb_mode { + WHITE_BALANCE_BASE = 0, + WHITE_BALANCE_AUTO = 0, + WHITE_BALANCE_INCANDESCENCE, + WHITE_BALANCE_FLUORESCENT, + WHITE_BALANCE_DAYLIGHT, + WHITE_BALANCE_CLOUDY, + WHITE_BALANCE_TWILIGHT, + WHITE_BALANCE_SHADE, + WHITE_BALANCE_MAX, +}; + +// V4L2_CID_CONTRAST +enum v4l2_contrast_mode { + CONTRAST_MINUS_2 = 0, + CONTRAST_MINUS_1, + CONTRAST_DEFAULT, + CONTRAST_PLUS_1, + CONTRAST_PLUS_2, + CONTRAST_MAX, +}; + +// V4L2_CID_SATURATION +enum v4l2_saturation_mode { + SATURATION_MINUS_2 = 0, + SATURATION_MINUS_1, + SATURATION_DEFAULT, + SATURATION_PLUS_1, + SATURATION_PLUS_2, + SATURATION_MAX, +}; + +// V4L2_CID_SHARPNESS +enum v4l2_sharpness_mode { + SHARPNESS_MINUS_2 = 0, + SHARPNESS_MINUS_1, + SHARPNESS_DEFAULT, + SHARPNESS_PLUS_1, + SHARPNESS_PLUS_2, + SHARPNESS_MAX, +}; + +// V4L2_CID_EXPOSURE +enum v4l2_exposure_mode { + EXPOSURE_MINUS_4, + EXPOSURE_MINUS_3, + EXPOSURE_MINUS_2 = -2, + EXPOSURE_MINUS_1 = -1, + EXPOSURE_DEFAULT = 0, + EXPOSURE_PLUS_1 = 1, + EXPOSURE_PLUS_2 = 2, + EXPOSURE_PLUS_3, + EXPOSURE_PLUS_4, + EXPOSURE_MAX +}; + +#endif /* #ifndef __LINUX_VIDEODEV2_WMT_H */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h new file mode 100644 index 00000000..8efd28ae --- /dev/null +++ b/include/linux/virtio.h @@ -0,0 +1,105 @@ +#ifndef _LINUX_VIRTIO_H +#define _LINUX_VIRTIO_H +/* Everything a virtio driver needs to work with any particular virtio + * implementation. */ +#include <linux/types.h> +#include <linux/scatterlist.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/gfp.h> + +/** + * virtqueue - a queue to register buffers for sending or receiving. + * @list: the chain of virtqueues for this device + * @callback: the function to call when buffers are consumed (can be NULL). + * @name: the name of this virtqueue (mainly for debugging) + * @vdev: the virtio device this queue was created for. + * @priv: a pointer for the virtqueue implementation to use. + */ +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *vq); + const char *name; + struct virtio_device *vdev; + void *priv; +}; + +int virtqueue_add_buf(struct virtqueue *vq, + struct scatterlist sg[], + unsigned int out_num, + unsigned int in_num, + void *data, + gfp_t gfp); + +void virtqueue_kick(struct virtqueue *vq); + +bool virtqueue_kick_prepare(struct virtqueue *vq); + +void virtqueue_notify(struct virtqueue *vq); + +void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); + +void virtqueue_disable_cb(struct virtqueue *vq); + +bool virtqueue_enable_cb(struct virtqueue *vq); + +bool virtqueue_enable_cb_delayed(struct virtqueue *vq); + +void *virtqueue_detach_unused_buf(struct virtqueue *vq); + +unsigned int virtqueue_get_vring_size(struct virtqueue *vq); + +/** + * virtio_device - representation of a device using virtio + * @index: unique position on the virtio bus + * @dev: underlying device. + * @id: the device type identification (used to match it with a driver). + * @config: the configuration ops for this device. + * @vqs: the list of virtqueues for this device. + * @features: the features supported by both driver and device. + * @priv: private pointer for the driver's use. + */ +struct virtio_device { + int index; + struct device dev; + struct virtio_device_id id; + struct virtio_config_ops *config; + struct list_head vqs; + /* Note that this is a Linux set_bit-style bitmap. */ + unsigned long features[1]; + void *priv; +}; + +#define dev_to_virtio(dev) container_of(dev, struct virtio_device, dev) +int register_virtio_device(struct virtio_device *dev); +void unregister_virtio_device(struct virtio_device *dev); + +/** + * virtio_driver - operations for a virtio I/O driver + * @driver: underlying device driver (populate name and owner). + * @id_table: the ids serviced by this driver. + * @feature_table: an array of feature numbers supported by this driver. + * @feature_table_size: number of entries in the feature table array. + * @probe: the function to call when a device is found. Returns 0 or -errno. + * @remove: the function to call when a device is removed. + * @config_changed: optional function to call when the device configuration + * changes; may be called in interrupt context. + */ +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + int (*probe)(struct virtio_device *dev); + void (*remove)(struct virtio_device *dev); + void (*config_changed)(struct virtio_device *dev); +#ifdef CONFIG_PM + int (*freeze)(struct virtio_device *dev); + int (*restore)(struct virtio_device *dev); +#endif +}; + +int register_virtio_driver(struct virtio_driver *drv); +void unregister_virtio_driver(struct virtio_driver *drv); +#endif /* _LINUX_VIRTIO_H */ diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h new file mode 100644 index 00000000..277c4ad4 --- /dev/null +++ b/include/linux/virtio_9p.h @@ -0,0 +1,44 @@ +#ifndef _LINUX_VIRTIO_9P_H +#define _LINUX_VIRTIO_9P_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ +#include <linux/types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> + +/* The feature bitmap for virtio 9P */ + +/* The mount point is specified in a config variable */ +#define VIRTIO_9P_MOUNT_TAG 0 + +struct virtio_9p_config { + /* length of the tag name */ + __u16 tag_len; + /* non-NULL terminated tag name */ + __u8 tag[0]; +} __attribute__((packed)); + +#endif /* _LINUX_VIRTIO_9P_H */ diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h new file mode 100644 index 00000000..652dc8be --- /dev/null +++ b/include/linux/virtio_balloon.h @@ -0,0 +1,59 @@ +#ifndef _LINUX_VIRTIO_BALLOON_H +#define _LINUX_VIRTIO_BALLOON_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> + +/* The feature bitmap for virtio balloon */ +#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ +#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */ + +/* Size of a PFN in the balloon interface. */ +#define VIRTIO_BALLOON_PFN_SHIFT 12 + +struct virtio_balloon_config +{ + /* Number of pages host wants Guest to give up. */ + __le32 num_pages; + /* Number of pages we've actually got in balloon. */ + __le32 actual; +}; + +#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ +#define VIRTIO_BALLOON_S_SWAP_OUT 1 /* Amount of memory swapped out */ +#define VIRTIO_BALLOON_S_MAJFLT 2 /* Number of major faults */ +#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */ +#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ +#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ +#define VIRTIO_BALLOON_S_NR 6 + +struct virtio_balloon_stat { + u16 tag; + u64 val; +} __attribute__((packed)); + +#endif /* _LINUX_VIRTIO_BALLOON_H */ diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h new file mode 100644 index 00000000..e0edb40c --- /dev/null +++ b/include/linux/virtio_blk.h @@ -0,0 +1,122 @@ +#ifndef _LINUX_VIRTIO_BLK_H +#define _LINUX_VIRTIO_BLK_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ +#include <linux/types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> + +/* Feature bits */ +#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ +#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */ +#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ +#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ +#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ +#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ +#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ +#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */ +#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ + +#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */ + +struct virtio_blk_config { + /* The capacity (in 512-byte sectors). */ + __u64 capacity; + /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ + __u32 size_max; + /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ + __u32 seg_max; + /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ + struct virtio_blk_geometry { + __u16 cylinders; + __u8 heads; + __u8 sectors; + } geometry; + + /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ + __u32 blk_size; + + /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */ + /* exponent for physical block per logical block. */ + __u8 physical_block_exp; + /* alignment offset in logical blocks. */ + __u8 alignment_offset; + /* minimum I/O size without performance penalty in logical blocks. */ + __u16 min_io_size; + /* optimal sustained I/O size in logical blocks. */ + __u32 opt_io_size; + +} __attribute__((packed)); + +/* + * Command types + * + * Usage is a bit tricky as some bits are used as flags and some are not. + * + * Rules: + * VIRTIO_BLK_T_OUT may be combined with VIRTIO_BLK_T_SCSI_CMD or + * VIRTIO_BLK_T_BARRIER. VIRTIO_BLK_T_FLUSH is a command of its own + * and may not be combined with any of the other flags. + */ + +/* These two define direction. */ +#define VIRTIO_BLK_T_IN 0 +#define VIRTIO_BLK_T_OUT 1 + +/* This bit says it's a scsi command, not an actual read or write. */ +#define VIRTIO_BLK_T_SCSI_CMD 2 + +/* Cache flush command */ +#define VIRTIO_BLK_T_FLUSH 4 + +/* Get device ID command */ +#define VIRTIO_BLK_T_GET_ID 8 + +/* Barrier before this op. */ +#define VIRTIO_BLK_T_BARRIER 0x80000000 + +/* This is the first element of the read scatter-gather list. */ +struct virtio_blk_outhdr { + /* VIRTIO_BLK_T* */ + __u32 type; + /* io priority. */ + __u32 ioprio; + /* Sector (ie. 512 byte offset) */ + __u64 sector; +}; + +struct virtio_scsi_inhdr { + __u32 errors; + __u32 data_len; + __u32 sense_len; + __u32 residual; +}; + +/* And this is the final byte of the write scatter-gather list. */ +#define VIRTIO_BLK_S_OK 0 +#define VIRTIO_BLK_S_IOERR 1 +#define VIRTIO_BLK_S_UNSUPP 2 +#endif /* _LINUX_VIRTIO_BLK_H */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h new file mode 100644 index 00000000..7323a339 --- /dev/null +++ b/include/linux/virtio_config.h @@ -0,0 +1,203 @@ +#ifndef _LINUX_VIRTIO_CONFIG_H +#define _LINUX_VIRTIO_CONFIG_H +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ + +/* Virtio devices use a standardized configuration space to define their + * features and pass configuration information, but each implementation can + * store and access that space differently. */ +#include <linux/types.h> + +/* Status byte for guest to report progress, and synchronize features. */ +/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */ +#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1 +/* We have found a driver for the device. */ +#define VIRTIO_CONFIG_S_DRIVER 2 +/* Driver has used its parts of the config, and is happy */ +#define VIRTIO_CONFIG_S_DRIVER_OK 4 +/* We've given up on this device. */ +#define VIRTIO_CONFIG_S_FAILED 0x80 + +/* Some virtio feature bits (currently bits 28 through 31) are reserved for the + * transport being used (eg. virtio_ring), the rest are per-device feature + * bits. */ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 32 + +/* Do we get callbacks when the ring is completely used, even if we've + * suppressed them? */ +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 + +#ifdef __KERNEL__ +#include <linux/err.h> +#include <linux/bug.h> +#include <linux/virtio.h> + +/** + * virtio_config_ops - operations for configuring a virtio device + * @get: read the value of a configuration field + * vdev: the virtio_device + * offset: the offset of the configuration field + * buf: the buffer to write the field value into. + * len: the length of the buffer + * @set: write the value of a configuration field + * vdev: the virtio_device + * offset: the offset of the configuration field + * buf: the buffer to read the field value from. + * len: the length of the buffer + * @get_status: read the status byte + * vdev: the virtio_device + * Returns the status byte + * @set_status: write the status byte + * vdev: the virtio_device + * status: the new status byte + * @request_vqs: request the specified number of virtqueues + * vdev: the virtio_device + * max_vqs: the max number of virtqueues we want + * If supplied, must call before any virtqueues are instantiated. + * To modify the max number of virtqueues after request_vqs has been + * called, call free_vqs and then request_vqs with a new value. + * @free_vqs: cleanup resources allocated by request_vqs + * vdev: the virtio_device + * If supplied, must call after all virtqueues have been deleted. + * @reset: reset the device + * vdev: the virtio device + * After this, status and feature negotiation must be done again + * Device must not be reset from its vq/config callbacks, or in + * parallel with being added/removed. + * @find_vqs: find virtqueues and instantiate them. + * vdev: the virtio_device + * nvqs: the number of virtqueues to find + * vqs: on success, includes new virtqueues + * callbacks: array of callbacks, for each virtqueue + * names: array of virtqueue names (mainly for debugging) + * Returns 0 on success or error status + * @del_vqs: free virtqueues found by find_vqs(). + * @get_features: get the array of feature bits for this device. + * vdev: the virtio_device + * Returns the first 32 feature bits (all we currently need). + * @finalize_features: confirm what device features we'll be using. + * vdev: the virtio_device + * This gives the final feature bits for the device: it can change + * the dev->feature bits if it wants. + * @bus_name: return the bus name associated with the device + * vdev: the virtio_device + * This returns a pointer to the bus name a la pci_name from which + * the caller can then copy. + */ +typedef void vq_callback_t(struct virtqueue *); +struct virtio_config_ops { + void (*get)(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len); + void (*set)(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len); + u8 (*get_status)(struct virtio_device *vdev); + void (*set_status)(struct virtio_device *vdev, u8 status); + void (*reset)(struct virtio_device *vdev); + int (*find_vqs)(struct virtio_device *, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); + void (*del_vqs)(struct virtio_device *); + u32 (*get_features)(struct virtio_device *vdev); + void (*finalize_features)(struct virtio_device *vdev); + const char *(*bus_name)(struct virtio_device *vdev); +}; + +/* If driver didn't advertise the feature, it will never appear. */ +void virtio_check_driver_offered_feature(const struct virtio_device *vdev, + unsigned int fbit); + +/** + * virtio_has_feature - helper to determine if this device has this feature. + * @vdev: the device + * @fbit: the feature bit + */ +static inline bool virtio_has_feature(const struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 32); + else + BUG_ON(fbit >= 32); + + if (fbit < VIRTIO_TRANSPORT_F_START) + virtio_check_driver_offered_feature(vdev, fbit); + + return test_bit(fbit, vdev->features); +} + +/** + * virtio_config_val - look for a feature and get a virtio config entry. + * @vdev: the virtio device + * @fbit: the feature bit + * @offset: the type to search for. + * @val: a pointer to the value to fill in. + * + * The return value is -ENOENT if the feature doesn't exist. Otherwise + * the config value is copied into whatever is pointed to by v. */ +#define virtio_config_val(vdev, fbit, offset, v) \ + virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v)) + +#define virtio_config_val_len(vdev, fbit, offset, v, len) \ + virtio_config_buf((vdev), (fbit), (offset), (v), (len)) + +static inline int virtio_config_buf(struct virtio_device *vdev, + unsigned int fbit, + unsigned int offset, + void *buf, unsigned len) +{ + if (!virtio_has_feature(vdev, fbit)) + return -ENOENT; + + vdev->config->get(vdev, offset, buf, len); + return 0; +} + +static inline +struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, + vq_callback_t *c, const char *n) +{ + vq_callback_t *callbacks[] = { c }; + const char *names[] = { n }; + struct virtqueue *vq; + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + if (err < 0) + return ERR_PTR(err); + return vq; +} + +static inline +const char *virtio_bus_name(struct virtio_device *vdev) +{ + if (!vdev->config->bus_name) + return "virtio"; + return vdev->config->bus_name(vdev); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h new file mode 100644 index 00000000..bdf4b003 --- /dev/null +++ b/include/linux/virtio_console.h @@ -0,0 +1,77 @@ +#ifndef _LINUX_VIRTIO_CONSOLE_H +#define _LINUX_VIRTIO_CONSOLE_H +#include <linux/types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> +/* + * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers: + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 + * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011 + */ + +/* Feature bits */ +#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ +#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ + +#define VIRTIO_CONSOLE_BAD_ID (~(u32)0) + +struct virtio_console_config { + /* colums of the screens */ + __u16 cols; + /* rows of the screens */ + __u16 rows; + /* max. number of ports this device can hold */ + __u32 max_nr_ports; +} __attribute__((packed)); + +/* + * A message that's passed between the Host and the Guest for a + * particular port. + */ +struct virtio_console_control { + __u32 id; /* Port number */ + __u16 event; /* The kind of control event (see below) */ + __u16 value; /* Extra information for the key */ +}; + +/* Some events for control messages */ +#define VIRTIO_CONSOLE_DEVICE_READY 0 +#define VIRTIO_CONSOLE_PORT_ADD 1 +#define VIRTIO_CONSOLE_PORT_REMOVE 2 +#define VIRTIO_CONSOLE_PORT_READY 3 +#define VIRTIO_CONSOLE_CONSOLE_PORT 4 +#define VIRTIO_CONSOLE_RESIZE 5 +#define VIRTIO_CONSOLE_PORT_OPEN 6 +#define VIRTIO_CONSOLE_PORT_NAME 7 + +#ifdef __KERNEL__ +int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); +#endif /* __KERNEL__ */ + +#endif /* _LINUX_VIRTIO_CONSOLE_H */ diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h new file mode 100644 index 00000000..7529b854 --- /dev/null +++ b/include/linux/virtio_ids.h @@ -0,0 +1,41 @@ +#ifndef _LINUX_VIRTIO_IDS_H +#define _LINUX_VIRTIO_IDS_H +/* + * Virtio IDs + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ + +#define VIRTIO_ID_NET 1 /* virtio net */ +#define VIRTIO_ID_BLOCK 2 /* virtio block */ +#define VIRTIO_ID_CONSOLE 3 /* virtio console */ +#define VIRTIO_ID_RNG 4 /* virtio ring */ +#define VIRTIO_ID_BALLOON 5 /* virtio balloon */ +#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */ +#define VIRTIO_ID_SCSI 8 /* virtio scsi */ +#define VIRTIO_ID_9P 9 /* 9p virtio console */ + +#endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h new file mode 100644 index 00000000..5c7b6f0d --- /dev/null +++ b/include/linux/virtio_mmio.h @@ -0,0 +1,111 @@ +/* + * Virtio platform device driver + * + * Copyright 2011, ARM Ltd. + * + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_VIRTIO_MMIO_H +#define _LINUX_VIRTIO_MMIO_H + +/* + * Control registers + */ + +/* Magic value ("virt" string) - Read Only */ +#define VIRTIO_MMIO_MAGIC_VALUE 0x000 + +/* Virtio device version - Read Only */ +#define VIRTIO_MMIO_VERSION 0x004 + +/* Virtio device ID - Read Only */ +#define VIRTIO_MMIO_DEVICE_ID 0x008 + +/* Virtio vendor ID - Read Only */ +#define VIRTIO_MMIO_VENDOR_ID 0x00c + +/* Bitmask of the features supported by the host + * (32 bits per set) - Read Only */ +#define VIRTIO_MMIO_HOST_FEATURES 0x010 + +/* Host features set selector - Write Only */ +#define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014 + +/* Bitmask of features activated by the guest + * (32 bits per set) - Write Only */ +#define VIRTIO_MMIO_GUEST_FEATURES 0x020 + +/* Activated features set selector - Write Only */ +#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024 + +/* Guest's memory page size in bytes - Write Only */ +#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 + +/* Queue selector - Write Only */ +#define VIRTIO_MMIO_QUEUE_SEL 0x030 + +/* Maximum size of the currently selected queue - Read Only */ +#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 + +/* Queue size for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_NUM 0x038 + +/* Used Ring alignment for the currently selected queue - Write Only */ +#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c + +/* Guest's PFN for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_PFN 0x040 + +/* Queue notifier - Write Only */ +#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 + +/* Interrupt status - Read Only */ +#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 + +/* Interrupt acknowledge - Write Only */ +#define VIRTIO_MMIO_INTERRUPT_ACK 0x064 + +/* Device status register - Read Write */ +#define VIRTIO_MMIO_STATUS 0x070 + +/* The config space is defined by each driver as + * the per-driver configuration space - Read Write */ +#define VIRTIO_MMIO_CONFIG 0x100 + + + +/* + * Interrupt flags (re: interrupt status & acknowledge registers) + */ + +#define VIRTIO_MMIO_INT_VRING (1 << 0) +#define VIRTIO_MMIO_INT_CONFIG (1 << 1) + +#endif diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h new file mode 100644 index 00000000..970d5a2a --- /dev/null +++ b/include/linux/virtio_net.h @@ -0,0 +1,155 @@ +#ifndef _LINUX_VIRTIO_NET_H +#define _LINUX_VIRTIO_NET_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ +#include <linux/types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> +#include <linux/if_ether.h> + +/* The feature bitmap for virtio net */ +#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ +#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ +#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ +#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ +#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ +#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ +#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ +#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ +#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ +#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ +#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ +#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ +#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ +#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ + +#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ + +struct virtio_net_config { + /* The config defining mac address (if VIRTIO_NET_F_MAC) */ + __u8 mac[6]; + /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ + __u16 status; +} __attribute__((packed)); + +/* This is the first element of the scatter-gather list. If you don't + * specify GSO or CSUM features, you can simply ignore the header. */ +struct virtio_net_hdr { +#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset +#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid + __u8 flags; +#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame +#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO) +#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO) +#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP +#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set + __u8 gso_type; + __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ + __u16 gso_size; /* Bytes to append to hdr_len per frame */ + __u16 csum_start; /* Position to start checksumming from */ + __u16 csum_offset; /* Offset after that to place checksum */ +}; + +/* This is the version of the header to use when the MRG_RXBUF + * feature has been negotiated. */ +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __u16 num_buffers; /* Number of merged rx buffers */ +}; + +/* + * Control virtqueue data structures + * + * The control virtqueue expects a header in the first sg entry + * and an ack/status response in the last entry. Data for the + * command goes in between. + */ +struct virtio_net_ctrl_hdr { + __u8 class; + __u8 cmd; +} __attribute__((packed)); + +typedef __u8 virtio_net_ctrl_ack; + +#define VIRTIO_NET_OK 0 +#define VIRTIO_NET_ERR 1 + +/* + * Control the RX mode, ie. promisucous, allmulti, etc... + * All commands require an "out" sg entry containing a 1 byte + * state value, zero = disable, non-zero = enable. Commands + * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. + * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. + */ +#define VIRTIO_NET_CTRL_RX 0 + #define VIRTIO_NET_CTRL_RX_PROMISC 0 + #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 + #define VIRTIO_NET_CTRL_RX_ALLUNI 2 + #define VIRTIO_NET_CTRL_RX_NOMULTI 3 + #define VIRTIO_NET_CTRL_RX_NOUNI 4 + #define VIRTIO_NET_CTRL_RX_NOBCAST 5 + +/* + * Control the MAC filter table. + * + * The MAC filter table is managed by the hypervisor, the guest should + * assume the size is infinite. Filtering should be considered + * non-perfect, ie. based on hypervisor resources, the guest may + * received packets from sources not specified in the filter list. + * + * In addition to the class/cmd header, the TABLE_SET command requires + * two out scatterlists. Each contains a 4 byte count of entries followed + * by a concatenated byte stream of the ETH_ALEN MAC addresses. The + * first sg list contains unicast addresses, the second is for multicast. + * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature + * is available. + */ +struct virtio_net_ctrl_mac { + __u32 entries; + __u8 macs[][ETH_ALEN]; +} __attribute__((packed)); + +#define VIRTIO_NET_CTRL_MAC 1 + #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 + +/* + * Control VLAN filtering + * + * The VLAN filter table is controlled via a simple ADD/DEL interface. + * VLAN IDs not added may be filterd by the hypervisor. Del is the + * opposite of add. Both commands expect an out entry containing a 2 + * byte VLAN ID. VLAN filterting is available with the + * VIRTIO_NET_F_CTRL_VLAN feature bit. + */ +#define VIRTIO_NET_CTRL_VLAN 2 + #define VIRTIO_NET_CTRL_VLAN_ADD 0 + #define VIRTIO_NET_CTRL_VLAN_DEL 1 + +#endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h new file mode 100644 index 00000000..ea66f3f6 --- /dev/null +++ b/include/linux/virtio_pci.h @@ -0,0 +1,95 @@ +/* + * Virtio PCI driver + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_VIRTIO_PCI_H +#define _LINUX_VIRTIO_PCI_H + +#include <linux/virtio_config.h> + +/* A 32-bit r/o bitmask of the features supported by the host */ +#define VIRTIO_PCI_HOST_FEATURES 0 + +/* A 32-bit r/w bitmask of features activated by the guest */ +#define VIRTIO_PCI_GUEST_FEATURES 4 + +/* A 32-bit r/w PFN for the currently selected queue */ +#define VIRTIO_PCI_QUEUE_PFN 8 + +/* A 16-bit r/o queue size for the currently selected queue */ +#define VIRTIO_PCI_QUEUE_NUM 12 + +/* A 16-bit r/w queue selector */ +#define VIRTIO_PCI_QUEUE_SEL 14 + +/* A 16-bit r/w queue notifier */ +#define VIRTIO_PCI_QUEUE_NOTIFY 16 + +/* An 8-bit device status register. */ +#define VIRTIO_PCI_STATUS 18 + +/* An 8-bit r/o interrupt status register. Reading the value will return the + * current contents of the ISR and will also clear it. This is effectively + * a read-and-acknowledge. */ +#define VIRTIO_PCI_ISR 19 + +/* The bit of the ISR which indicates a device configuration change. */ +#define VIRTIO_PCI_ISR_CONFIG 0x2 + +/* MSI-X registers: only enabled if MSI-X is enabled. */ +/* A 16-bit vector for configuration changes. */ +#define VIRTIO_MSI_CONFIG_VECTOR 20 +/* A 16-bit vector for selected queue notifications. */ +#define VIRTIO_MSI_QUEUE_VECTOR 22 +/* Vector value used to disable MSI for queue */ +#define VIRTIO_MSI_NO_VECTOR 0xffff + +/* The remaining space is defined by each driver as the per-driver + * configuration space */ +#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) + +/* Virtio ABI version, this must match exactly */ +#define VIRTIO_PCI_ABI_VERSION 0 + +/* How many bits to shift physical queue address written to QUEUE_PFN. + * 12 is historical, and due to x86 page size. */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. + * x86 pagesize again. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 +#endif diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h new file mode 100644 index 00000000..e338730c --- /dev/null +++ b/include/linux/virtio_ring.h @@ -0,0 +1,182 @@ +#ifndef _LINUX_VIRTIO_RING_H +#define _LINUX_VIRTIO_RING_H +/* An interface for efficient virtio implementation, currently for use by KVM + * and lguest, but hopefully others soon. Do NOT change this since it will + * break existing servers and clients. + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Copyright Rusty Russell IBM Corporation 2007. */ +#include <linux/types.h> + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 + +/* The Host uses this in used->flags to advise the Guest: don't kick me when + * you add a buffer. It's unreliable, so it's simply an optimization. Guest + * will still kick if it's out of buffers. */ +#define VRING_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't interrupt me + * when you consume a buffer. It's unreliable, so it's simply an + * optimization. */ +#define VRING_AVAIL_F_NO_INTERRUPT 1 + +/* We support indirect buffer descriptors */ +#define VIRTIO_RING_F_INDIRECT_DESC 28 + +/* The Guest publishes the used index for which it expects an interrupt + * at the end of the avail ring. Host should ignore the avail->flags field. */ +/* The Host publishes the avail index for which it expects a kick + * at the end of the used ring. Guest should ignore the used->flags field. */ +#define VIRTIO_RING_F_EVENT_IDX 29 + +/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ +struct vring_desc { + /* Address (guest-physical). */ + __u64 addr; + /* Length. */ + __u32 len; + /* The flags as indicated above. */ + __u16 flags; + /* We chain unused descriptors via this, too */ + __u16 next; +}; + +struct vring_avail { + __u16 flags; + __u16 idx; + __u16 ring[]; +}; + +/* u32 is used here for ids for padding reasons. */ +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + __u32 id; + /* Total length of the descriptor chain which was used (written to) */ + __u32 len; +}; + +struct vring_used { + __u16 flags; + __u16 idx; + struct vring_used_elem ring[]; +}; + +struct vring { + unsigned int num; + + struct vring_desc *desc; + + struct vring_avail *avail; + + struct vring_used *used; +}; + +/* The standard layout for the ring is a continuous chunk of memory which looks + * like this. We assume num is a power of 2. + * + * struct vring + * { + * // The actual descriptors (16 bytes each) + * struct vring_desc desc[num]; + * + * // A ring of available descriptor heads with free-running index. + * __u16 avail_flags; + * __u16 avail_idx; + * __u16 available[num]; + * __u16 used_event_idx; + * + * // Padding to the next align boundary. + * char pad[]; + * + * // A ring of used descriptor heads with free-running index. + * __u16 used_flags; + * __u16 used_idx; + * struct vring_used_elem used[num]; + * __u16 avail_event_idx; + * }; + */ +/* We publish the used event index at the end of the available ring, and vice + * versa. They are at the end for backwards compatibility. */ +#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) + +static inline void vring_init(struct vring *vr, unsigned int num, void *p, + unsigned long align) +{ + vr->num = num; + vr->desc = p; + vr->avail = p + num*sizeof(struct vring_desc); + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16) + + align-1) & ~(align - 1)); +} + +static inline unsigned vring_size(unsigned int num, unsigned long align) +{ + return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + + align - 1) & ~(align - 1)) + + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; +} + +/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ +/* Assuming a given event_idx value from the other size, if + * we have just incremented index from old to new_idx, + * should we trigger an event? */ +static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) +{ + /* Note: Xen has similar logic for notification hold-off + * in include/xen/interface/io/ring.h with req_event and req_prod + * corresponding to event_idx + 1 and new_idx respectively. + * Note also that req_event and req_prod in Xen start at 1, + * event indexes in virtio start at 0. */ + return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); +} + +#ifdef __KERNEL__ +#include <linux/irqreturn.h> +struct virtio_device; +struct virtqueue; + +struct virtqueue *vring_new_virtqueue(unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, + bool weak_barriers, + void *pages, + void (*notify)(struct virtqueue *vq), + void (*callback)(struct virtqueue *vq), + const char *name); +void vring_del_virtqueue(struct virtqueue *vq); +/* Filter out transport-specific feature bits. */ +void vring_transport_features(struct virtio_device *vdev); + +irqreturn_t vring_interrupt(int irq, void *_vq); +#endif /* __KERNEL__ */ +#endif /* _LINUX_VIRTIO_RING_H */ diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h new file mode 100644 index 00000000..c4d5de89 --- /dev/null +++ b/include/linux/virtio_rng.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_VIRTIO_RNG_H +#define _LINUX_VIRTIO_RNG_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> + +#endif /* _LINUX_VIRTIO_RNG_H */ diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h new file mode 100644 index 00000000..8ddeafdc --- /dev/null +++ b/include/linux/virtio_scsi.h @@ -0,0 +1,114 @@ +#ifndef _LINUX_VIRTIO_SCSI_H +#define _LINUX_VIRTIO_SCSI_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ + +#define VIRTIO_SCSI_CDB_SIZE 32 +#define VIRTIO_SCSI_SENSE_SIZE 96 + +/* SCSI command request, followed by data-out */ +struct virtio_scsi_cmd_req { + u8 lun[8]; /* Logical Unit Number */ + u64 tag; /* Command identifier */ + u8 task_attr; /* Task attribute */ + u8 prio; + u8 crn; + u8 cdb[VIRTIO_SCSI_CDB_SIZE]; +} __packed; + +/* Response, followed by sense data and data-in */ +struct virtio_scsi_cmd_resp { + u32 sense_len; /* Sense data length */ + u32 resid; /* Residual bytes in data buffer */ + u16 status_qualifier; /* Status qualifier */ + u8 status; /* Command completion status */ + u8 response; /* Response values */ + u8 sense[VIRTIO_SCSI_SENSE_SIZE]; +} __packed; + +/* Task Management Request */ +struct virtio_scsi_ctrl_tmf_req { + u32 type; + u32 subtype; + u8 lun[8]; + u64 tag; +} __packed; + +struct virtio_scsi_ctrl_tmf_resp { + u8 response; +} __packed; + +/* Asynchronous notification query/subscription */ +struct virtio_scsi_ctrl_an_req { + u32 type; + u8 lun[8]; + u32 event_requested; +} __packed; + +struct virtio_scsi_ctrl_an_resp { + u32 event_actual; + u8 response; +} __packed; + +struct virtio_scsi_event { + u32 event; + u8 lun[8]; + u32 reason; +} __packed; + +struct virtio_scsi_config { + u32 num_queues; + u32 seg_max; + u32 max_sectors; + u32 cmd_per_lun; + u32 event_info_size; + u32 sense_size; + u32 cdb_size; + u16 max_channel; + u16 max_target; + u32 max_lun; +} __packed; + +/* Response codes */ +#define VIRTIO_SCSI_S_OK 0 +#define VIRTIO_SCSI_S_OVERRUN 1 +#define VIRTIO_SCSI_S_ABORTED 2 +#define VIRTIO_SCSI_S_BAD_TARGET 3 +#define VIRTIO_SCSI_S_RESET 4 +#define VIRTIO_SCSI_S_BUSY 5 +#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 +#define VIRTIO_SCSI_S_TARGET_FAILURE 7 +#define VIRTIO_SCSI_S_NEXUS_FAILURE 8 +#define VIRTIO_SCSI_S_FAILURE 9 +#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 +#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 +#define VIRTIO_SCSI_S_INCORRECT_LUN 12 + +/* Controlq type codes. */ +#define VIRTIO_SCSI_T_TMF 0 +#define VIRTIO_SCSI_T_AN_QUERY 1 +#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2 + +/* Valid TMF subtypes. */ +#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 +#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 +#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 +#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 +#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 +#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 +#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 +#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 + +/* Events. */ +#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000 +#define VIRTIO_SCSI_T_NO_EVENT 0 +#define VIRTIO_SCSI_T_TRANSPORT_RESET 1 +#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2 + +#define VIRTIO_SCSI_S_SIMPLE 0 +#define VIRTIO_SCSI_S_ORDERED 1 +#define VIRTIO_SCSI_S_HEAD 2 +#define VIRTIO_SCSI_S_ACA 3 + + +#endif /* _LINUX_VIRTIO_SCSI_H */ diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h new file mode 100644 index 00000000..017d4a53 --- /dev/null +++ b/include/linux/vlynq.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __VLYNQ_H__ +#define __VLYNQ_H__ + +#include <linux/device.h> +#include <linux/types.h> + +struct module; + +#define VLYNQ_NUM_IRQS 32 + +struct vlynq_mapping { + u32 size; + u32 offset; +}; + +enum vlynq_divisor { + vlynq_div_auto = 0, + vlynq_ldiv1, + vlynq_ldiv2, + vlynq_ldiv3, + vlynq_ldiv4, + vlynq_ldiv5, + vlynq_ldiv6, + vlynq_ldiv7, + vlynq_ldiv8, + vlynq_rdiv1, + vlynq_rdiv2, + vlynq_rdiv3, + vlynq_rdiv4, + vlynq_rdiv5, + vlynq_rdiv6, + vlynq_rdiv7, + vlynq_rdiv8, + vlynq_div_external +}; + +struct vlynq_device_id { + u32 id; + enum vlynq_divisor divisor; + unsigned long driver_data; +}; + +struct vlynq_regs; +struct vlynq_device { + u32 id, dev_id; + int local_irq; + int remote_irq; + enum vlynq_divisor divisor; + u32 regs_start, regs_end; + u32 mem_start, mem_end; + u32 irq_start, irq_end; + int irq; + int enabled; + struct vlynq_regs *local; + struct vlynq_regs *remote; + struct device dev; +}; + +struct vlynq_driver { + char *name; + struct vlynq_device_id *id_table; + int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id); + void (*remove)(struct vlynq_device *dev); + struct device_driver driver; +}; + +struct plat_vlynq_ops { + int (*on)(struct vlynq_device *dev); + void (*off)(struct vlynq_device *dev); +}; + +static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv) +{ + return container_of(drv, struct vlynq_driver, driver); +} + +static inline struct vlynq_device *to_vlynq_device(struct device *device) +{ + return container_of(device, struct vlynq_device, dev); +} + +extern struct bus_type vlynq_bus_type; + +extern int __vlynq_register_driver(struct vlynq_driver *driver, + struct module *owner); + +static inline int vlynq_register_driver(struct vlynq_driver *driver) +{ + return __vlynq_register_driver(driver, THIS_MODULE); +} + +static inline void *vlynq_get_drvdata(struct vlynq_device *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +static inline u32 vlynq_mem_start(struct vlynq_device *dev) +{ + return dev->mem_start; +} + +static inline u32 vlynq_mem_end(struct vlynq_device *dev) +{ + return dev->mem_end; +} + +static inline u32 vlynq_mem_len(struct vlynq_device *dev) +{ + return dev->mem_end - dev->mem_start + 1; +} + +static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq) +{ + int irq = dev->irq_start + virq; + if ((irq < dev->irq_start) || (irq > dev->irq_end)) + return -EINVAL; + + return irq; +} + +static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq) +{ + if ((irq < dev->irq_start) || (irq > dev->irq_end)) + return -EINVAL; + + return irq - dev->irq_start; +} + +extern void vlynq_unregister_driver(struct vlynq_driver *driver); +extern int vlynq_enable_device(struct vlynq_device *dev); +extern void vlynq_disable_device(struct vlynq_device *dev); +extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, + struct vlynq_mapping *mapping); +extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, + struct vlynq_mapping *mapping); +extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq); +extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq); + +#endif /* __VLYNQ_H__ */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h new file mode 100644 index 00000000..06f8e385 --- /dev/null +++ b/include/linux/vm_event_item.h @@ -0,0 +1,65 @@ +#ifndef VM_EVENT_ITEM_H_INCLUDED +#define VM_EVENT_ITEM_H_INCLUDED + +#ifdef CONFIG_ZONE_DMA +#define DMA_ZONE(xx) xx##_DMA, +#else +#define DMA_ZONE(xx) +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define DMA32_ZONE(xx) xx##_DMA32, +#else +#define DMA32_ZONE(xx) +#endif + +#ifdef CONFIG_HIGHMEM +#define HIGHMEM_ZONE(xx) , xx##_HIGH +#else +#define HIGHMEM_ZONE(xx) +#endif + +#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE + +enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + FOR_ALL_ZONES(PGALLOC), + PGFREE, PGACTIVATE, PGDEACTIVATE, + PGFAULT, PGMAJFAULT, + FOR_ALL_ZONES(PGREFILL), + FOR_ALL_ZONES(PGSTEAL_KSWAPD), + FOR_ALL_ZONES(PGSTEAL_DIRECT), + FOR_ALL_ZONES(PGSCAN_KSWAPD), + FOR_ALL_ZONES(PGSCAN_DIRECT), +#ifdef CONFIG_NUMA + PGSCAN_ZONE_RECLAIM_FAILED, +#endif + PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, + KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, + KSWAPD_SKIP_CONGESTION_WAIT, + PAGEOUTRUN, ALLOCSTALL, PGROTATED, +#ifdef CONFIG_COMPACTION + COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, + COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, +#endif +#ifdef CONFIG_HUGETLB_PAGE + HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, +#endif + UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ + UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ + UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ + UNEVICTABLE_PGMLOCKED, + UNEVICTABLE_PGMUNLOCKED, + UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ + UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ + UNEVICTABLE_MLOCKFREED, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + THP_FAULT_ALLOC, + THP_FAULT_FALLBACK, + THP_COLLAPSE_ALLOC, + THP_COLLAPSE_ALLOC_FAILED, + THP_SPLIT, +#endif + NR_VM_EVENT_ITEMS +}; + +#endif /* VM_EVENT_ITEM_H_INCLUDED */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h new file mode 100644 index 00000000..dcdfc2bd --- /dev/null +++ b/include/linux/vmalloc.h @@ -0,0 +1,160 @@ +#ifndef _LINUX_VMALLOC_H +#define _LINUX_VMALLOC_H + +#include <linux/spinlock.h> +#include <linux/init.h> +#include <asm/page.h> /* pgprot_t */ + +struct vm_area_struct; /* vma defining user mapping in mm_types.h */ + +/* bits in flags of vmalloc's vm_struct below */ +#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ +#define VM_ALLOC 0x00000002 /* vmalloc() */ +#define VM_MAP 0x00000004 /* vmap()ed pages */ +#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ +#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ +#define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ +/* bits [20..32] reserved for arch specific ioremap internals */ + +/* + * Maximum alignment for ioremap() regions. + * Can be overriden by arch-specific value. + */ +#ifndef IOREMAP_MAX_ORDER +#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ +#endif + +struct vm_struct { + struct vm_struct *next; + void *addr; + unsigned long size; + unsigned long flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + void *caller; +}; + +/* + * Highlevel APIs for driver use + */ +extern void vm_unmap_ram(const void *mem, unsigned int count); +extern void *vm_map_ram(struct page **pages, unsigned int count, + int node, pgprot_t prot); +extern void vm_unmap_aliases(void); + +#ifdef CONFIG_MMU +extern void __init vmalloc_init(void); +#else +static inline void vmalloc_init(void) +{ +} +#endif + +extern void *vmalloc(unsigned long size); +extern void *vzalloc(unsigned long size); +extern void *vmalloc_user(unsigned long size); +extern void *vmalloc_node(unsigned long size, int node); +extern void *vzalloc_node(unsigned long size, int node); +extern void *vmalloc_exec(unsigned long size); +extern void *vmalloc_32(unsigned long size); +extern void *vmalloc_32_user(unsigned long size); +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); +extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, void *caller); +extern void vfree(const void *addr); + +extern void *vmap(struct page **pages, unsigned int count, + unsigned long flags, pgprot_t prot); +extern void vunmap(const void *addr); + +extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long pgoff); +void vmalloc_sync_all(void); + +/* + * Lowlevel-APIs (not for driver use!) + */ + +static inline size_t get_vm_area_size(const struct vm_struct *area) +{ + /* return actual size without guard page */ + return area->size - PAGE_SIZE; +} + +extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); +extern struct vm_struct *get_vm_area_caller(unsigned long size, + unsigned long flags, void *caller); +extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, + unsigned long start, unsigned long end); +extern struct vm_struct *__get_vm_area_caller(unsigned long size, + unsigned long flags, + unsigned long start, unsigned long end, + void *caller); +extern struct vm_struct *remove_vm_area(const void *addr); + +extern int map_vm_area(struct vm_struct *area, pgprot_t prot, + struct page ***pages); +#ifdef CONFIG_MMU +extern int map_kernel_range_noflush(unsigned long start, unsigned long size, + pgprot_t prot, struct page **pages); +extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); +extern void unmap_kernel_range(unsigned long addr, unsigned long size); +#else +static inline int +map_kernel_range_noflush(unsigned long start, unsigned long size, + pgprot_t prot, struct page **pages) +{ + return size >> PAGE_SHIFT; +} +static inline void +unmap_kernel_range_noflush(unsigned long addr, unsigned long size) +{ +} +static inline void +unmap_kernel_range(unsigned long addr, unsigned long size) +{ +} +#endif + +/* Allocate/destroy a 'vmalloc' VM area. */ +extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); +extern void free_vm_area(struct vm_struct *area); + +/* for /dev/kmem */ +extern long vread(char *buf, char *addr, unsigned long count); +extern long vwrite(char *buf, char *addr, unsigned long count); + +/* + * Internals. Dont't use.. + */ +extern rwlock_t vmlist_lock; +extern struct vm_struct *vmlist; +extern __init void vm_area_add_early(struct vm_struct *vm); +extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); + +#ifdef CONFIG_SMP +# ifdef CONFIG_MMU +struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align); + +void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); +# else +static inline struct vm_struct ** +pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align) +{ + return NULL; +} + +static inline void +pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) +{ +} +# endif +#endif + +#endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h new file mode 100644 index 00000000..65efb92d --- /dev/null +++ b/include/linux/vmstat.h @@ -0,0 +1,263 @@ +#ifndef _LINUX_VMSTAT_H +#define _LINUX_VMSTAT_H + +#include <linux/types.h> +#include <linux/percpu.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/vm_event_item.h> +#include <linux/atomic.h> + +extern int sysctl_stat_interval; + +#ifdef CONFIG_VM_EVENT_COUNTERS +/* + * Light weight per cpu counter implementation. + * + * Counters should only be incremented and no critical kernel component + * should rely on the counter values. + * + * Counters are handled completely inline. On many platforms the code + * generated will simply be the increment of a global address. + */ + +struct vm_event_state { + unsigned long event[NR_VM_EVENT_ITEMS]; +}; + +DECLARE_PER_CPU(struct vm_event_state, vm_event_states); + +static inline void __count_vm_event(enum vm_event_item item) +{ + __this_cpu_inc(vm_event_states.event[item]); +} + +static inline void count_vm_event(enum vm_event_item item) +{ + this_cpu_inc(vm_event_states.event[item]); +} + +static inline void __count_vm_events(enum vm_event_item item, long delta) +{ + __this_cpu_add(vm_event_states.event[item], delta); +} + +static inline void count_vm_events(enum vm_event_item item, long delta) +{ + this_cpu_add(vm_event_states.event[item], delta); +} + +extern void all_vm_events(unsigned long *); +#ifdef CONFIG_HOTPLUG +extern void vm_events_fold_cpu(int cpu); +#else +static inline void vm_events_fold_cpu(int cpu) +{ +} +#endif + +#else + +/* Disable counters */ +static inline void count_vm_event(enum vm_event_item item) +{ +} +static inline void count_vm_events(enum vm_event_item item, long delta) +{ +} +static inline void __count_vm_event(enum vm_event_item item) +{ +} +static inline void __count_vm_events(enum vm_event_item item, long delta) +{ +} +static inline void all_vm_events(unsigned long *ret) +{ +} +static inline void vm_events_fold_cpu(int cpu) +{ +} + +#endif /* CONFIG_VM_EVENT_COUNTERS */ + +#define __count_zone_vm_events(item, zone, delta) \ + __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ + zone_idx(zone), delta) + +/* + * Zone based page accounting with per cpu differentials. + */ +extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + +static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) +{ + atomic_long_add(x, &zone->vm_stat[item]); + atomic_long_add(x, &vm_stat[item]); +} + +static inline unsigned long global_page_state(enum zone_stat_item item) +{ + long x = atomic_long_read(&vm_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; +} + +/* + * More accurate version that also considers the currently pending + * deltas. For that we need to loop over all cpus to find the current + * deltas. There is no synchronization so the result cannot be + * exactly accurate either. + */ +static inline unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); + +#ifdef CONFIG_SMP + int cpu; + for_each_online_cpu(cpu) + x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; + + if (x < 0) + x = 0; +#endif + return x; +} + +extern unsigned long global_reclaimable_pages(void); +extern unsigned long zone_reclaimable_pages(struct zone *zone); + +#ifdef CONFIG_NUMA +/* + * Determine the per node value of a stat item. This function + * is called frequently in a NUMA machine, so try to be as + * frugal as possible. + */ +static inline unsigned long node_page_state(int node, + enum zone_stat_item item) +{ + struct zone *zones = NODE_DATA(node)->node_zones; + + return +#ifdef CONFIG_ZONE_DMA + zone_page_state(&zones[ZONE_DMA], item) + +#endif +#ifdef CONFIG_ZONE_DMA32 + zone_page_state(&zones[ZONE_DMA32], item) + +#endif +#ifdef CONFIG_HIGHMEM + zone_page_state(&zones[ZONE_HIGHMEM], item) + +#endif + zone_page_state(&zones[ZONE_NORMAL], item) + + zone_page_state(&zones[ZONE_MOVABLE], item); +} + +extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); + +#else + +#define node_page_state(node, item) global_page_state(item) +#define zone_statistics(_zl, _z, gfp) do { } while (0) + +#endif /* CONFIG_NUMA */ + +#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) +#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) + +static inline void zap_zone_vm_stats(struct zone *zone) +{ + memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); +} + +extern void inc_zone_state(struct zone *, enum zone_stat_item); + +#ifdef CONFIG_SMP +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); +void __inc_zone_page_state(struct page *, enum zone_stat_item); +void __dec_zone_page_state(struct page *, enum zone_stat_item); + +void mod_zone_page_state(struct zone *, enum zone_stat_item, int); +void inc_zone_page_state(struct page *, enum zone_stat_item); +void dec_zone_page_state(struct page *, enum zone_stat_item); + +extern void inc_zone_state(struct zone *, enum zone_stat_item); +extern void __inc_zone_state(struct zone *, enum zone_stat_item); +extern void dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_zone_state(struct zone *, enum zone_stat_item); + +void refresh_cpu_vm_stats(int); +void refresh_zone_stat_thresholds(void); + +int calculate_pressure_threshold(struct zone *zone); +int calculate_normal_threshold(struct zone *zone); +void set_pgdat_percpu_threshold(pg_data_t *pgdat, + int (*calculate_pressure)(struct zone *)); +#else /* CONFIG_SMP */ + +/* + * We do not maintain differentials in a single processor configuration. + * The functions directly modify the zone and global counters. + */ +static inline void __mod_zone_page_state(struct zone *zone, + enum zone_stat_item item, int delta) +{ + zone_page_state_add(delta, zone, item); +} + +static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) +{ + atomic_long_inc(&zone->vm_stat[item]); + atomic_long_inc(&vm_stat[item]); +} + +static inline void __inc_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + __inc_zone_state(page_zone(page), item); +} + +static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) +{ + atomic_long_dec(&zone->vm_stat[item]); + atomic_long_dec(&vm_stat[item]); +} + +static inline void __dec_zone_page_state(struct page *page, + enum zone_stat_item item) +{ + __dec_zone_state(page_zone(page), item); +} + +/* + * We only use atomic operations to update counters. So there is no need to + * disable interrupts. + */ +#define inc_zone_page_state __inc_zone_page_state +#define dec_zone_page_state __dec_zone_page_state +#define mod_zone_page_state __mod_zone_page_state + +#define set_pgdat_percpu_threshold(pgdat, callback) { } + +static inline void refresh_cpu_vm_stats(int cpu) { } +static inline void refresh_zone_stat_thresholds(void) { } + +#endif /* CONFIG_SMP */ + +extern const char * const vmstat_text[]; + +#endif /* _LINUX_VMSTAT_H */ diff --git a/include/linux/vt.h b/include/linux/vt.h new file mode 100644 index 00000000..30a8dd9c --- /dev/null +++ b/include/linux/vt.h @@ -0,0 +1,113 @@ +#ifndef _LINUX_VT_H +#define _LINUX_VT_H + + +/* + * These constants are also useful for user-level apps (e.g., VC + * resizing). + */ +#define MIN_NR_CONSOLES 1 /* must be at least 1 */ +#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */ +#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */ + /* Note: the ioctl VT_GETSTATE does not work for + consoles 16 and higher (since it returns a short) */ + +/* 0x56 is 'V', to avoid collision with termios and kd */ + +#define VT_OPENQRY 0x5600 /* find available vt */ + +struct vt_mode { + char mode; /* vt mode */ + char waitv; /* if set, hang on writes if not active */ + short relsig; /* signal to raise on release req */ + short acqsig; /* signal to raise on acquisition */ + short frsig; /* unused (set to 0) */ +}; +#define VT_GETMODE 0x5601 /* get mode of active vt */ +#define VT_SETMODE 0x5602 /* set mode of active vt */ +#define VT_AUTO 0x00 /* auto vt switching */ +#define VT_PROCESS 0x01 /* process controls switching */ +#define VT_ACKACQ 0x02 /* acknowledge switch */ + +struct vt_stat { + unsigned short v_active; /* active vt */ + unsigned short v_signal; /* signal to send */ + unsigned short v_state; /* vt bitmask */ +}; +#define VT_GETSTATE 0x5603 /* get global vt state info */ +#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ + +#define VT_RELDISP 0x5605 /* release display */ + +#define VT_ACTIVATE 0x5606 /* make vt active */ +#define VT_WAITACTIVE 0x5607 /* wait for vt active */ +#define VT_DISALLOCATE 0x5608 /* free memory associated to vt */ + +struct vt_sizes { + unsigned short v_rows; /* number of rows */ + unsigned short v_cols; /* number of columns */ + unsigned short v_scrollsize; /* number of lines of scrollback */ +}; +#define VT_RESIZE 0x5609 /* set kernel's idea of screensize */ + +struct vt_consize { + unsigned short v_rows; /* number of rows */ + unsigned short v_cols; /* number of columns */ + unsigned short v_vlin; /* number of pixel rows on screen */ + unsigned short v_clin; /* number of pixel rows per character */ + unsigned short v_vcol; /* number of pixel columns on screen */ + unsigned short v_ccol; /* number of pixel columns per character */ +}; +#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ +#define VT_LOCKSWITCH 0x560B /* disallow vt switching */ +#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ +#define VT_GETHIFONTMASK 0x560D /* return hi font mask */ + +struct vt_event { + unsigned int event; +#define VT_EVENT_SWITCH 0x0001 /* Console switch */ +#define VT_EVENT_BLANK 0x0002 /* Screen blank */ +#define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */ +#define VT_EVENT_RESIZE 0x0008 /* Resize display */ +#define VT_MAX_EVENT 0x000F + unsigned int oldev; /* Old console */ + unsigned int newev; /* New console (if changing) */ + unsigned int pad[4]; /* Padding for expansion */ +}; + +#define VT_WAITEVENT 0x560E /* Wait for an event */ + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ + +#ifdef __KERNEL__ + +/* Virtual Terminal events. */ +#define VT_ALLOCATE 0x0001 /* Console got allocated */ +#define VT_DEALLOCATE 0x0002 /* Console will be deallocated */ +#define VT_WRITE 0x0003 /* A char got output */ +#define VT_UPDATE 0x0004 /* A bigger update occurred */ +#define VT_PREWRITE 0x0005 /* A char is about to be written to the console */ + +#ifdef CONFIG_VT_CONSOLE + +extern int vt_kmsg_redirect(int new); + +#else + +static inline int vt_kmsg_redirect(int new) +{ + return 0; +} + +#endif + +#endif /* __KERNEL__ */ + +#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1) + +#endif /* _LINUX_VT_H */ diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h new file mode 100644 index 00000000..057db7d2 --- /dev/null +++ b/include/linux/vt_buffer.h @@ -0,0 +1,63 @@ +/* + * include/linux/vt_buffer.h -- Access to VT screen buffer + * + * (c) 1998 Martin Mares <mj@ucw.cz> + * + * This is a set of macros and functions which are used in the + * console driver and related code to access the screen buffer. + * In most cases the console works with simple in-memory buffer, + * but when handling hardware text mode consoles, we store + * the foreground console directly in video memory. + */ + +#ifndef _LINUX_VT_BUFFER_H_ +#define _LINUX_VT_BUFFER_H_ + + +#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE) +#include <asm/vga.h> +#endif + +#ifndef VT_BUF_HAVE_RW +#define scr_writew(val, addr) (*(addr) = (val)) +#define scr_readw(addr) (*(addr)) +#define scr_memcpyw(d, s, c) memcpy(d, s, c) +#define scr_memmovew(d, s, c) memmove(d, s, c) +#define VT_BUF_HAVE_MEMCPYW +#define VT_BUF_HAVE_MEMMOVEW +#endif + +#ifndef VT_BUF_HAVE_MEMSETW +static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) +{ + count /= 2; + while (count--) + scr_writew(c, s++); +} +#endif + +#ifndef VT_BUF_HAVE_MEMCPYW +static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) +{ + count /= 2; + while (count--) + scr_writew(scr_readw(s++), d++); +} +#endif + +#ifndef VT_BUF_HAVE_MEMMOVEW +static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count) +{ + if (d < s) + scr_memcpyw(d, s, count); + else { + count /= 2; + d += count; + s += count; + while (count--) + scr_writew(scr_readw(--s), --d); + } +} +#endif + +#endif diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h new file mode 100644 index 00000000..e33d77f1 --- /dev/null +++ b/include/linux/vt_kern.h @@ -0,0 +1,196 @@ +#ifndef _VT_KERN_H +#define _VT_KERN_H + +/* + * this really is an extension of the vc_cons structure in console.c, but + * with information needed by the vt package + */ + +#include <linux/vt.h> +#include <linux/kd.h> +#include <linux/tty.h> +#include <linux/mutex.h> +#include <linux/console_struct.h> +#include <linux/mm.h> +#include <linux/consolemap.h> +#include <linux/notifier.h> + +/* + * Presently, a lot of graphics programs do not restore the contents of + * the higher font pages. Defining this flag will avoid use of them, but + * will lose support for PIO_FONTRESET. Note that many font operations are + * not likely to work with these programs anyway; they need to be + * fixed. The linux/Documentation directory includes a code snippet + * to save and restore the text font. + */ +#ifdef CONFIG_VGA_CONSOLE +#define BROKEN_GRAPHICS_PROGRAMS 1 +#endif + +extern void kd_mksound(unsigned int hz, unsigned int ticks); +extern int kbd_rate(struct kbd_repeat *rep); +extern int fg_console, last_console, want_console; + +/* console.c */ + +int vc_allocate(unsigned int console); +int vc_cons_allocated(unsigned int console); +int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); +void vc_deallocate(unsigned int console); +void reset_palette(struct vc_data *vc); +void do_blank_screen(int entering_gfx); +void do_unblank_screen(int leaving_gfx); +void unblank_screen(void); +void poke_blanked_console(void); +int con_font_op(struct vc_data *vc, struct console_font_op *op); +int con_set_cmap(unsigned char __user *cmap); +int con_get_cmap(unsigned char __user *cmap); +void scrollback(struct vc_data *vc, int lines); +void scrollfront(struct vc_data *vc, int lines); +void update_region(struct vc_data *vc, unsigned long start, int count); +void redraw_screen(struct vc_data *vc, int is_switch); +#define update_screen(x) redraw_screen(x, 0) +#define switch_screen(x) redraw_screen(x, 1) + +struct tty_struct; +int tioclinux(struct tty_struct *tty, unsigned long arg); + +#ifdef CONFIG_CONSOLE_TRANSLATIONS +/* consolemap.c */ + +struct unimapinit; +struct unipair; + +int con_set_trans_old(unsigned char __user * table); +int con_get_trans_old(unsigned char __user * table); +int con_set_trans_new(unsigned short __user * table); +int con_get_trans_new(unsigned short __user * table); +int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui); +int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); +int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); +int con_set_default_unimap(struct vc_data *vc); +void con_free_unimap(struct vc_data *vc); +void con_protect_unimap(struct vc_data *vc, int rdonly); +int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); + +#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ + ((vc)->vc_toggle_meta ? 0x80 : 0)]) +#else +static inline int con_set_trans_old(unsigned char __user *table) +{ + return 0; +} +static inline int con_get_trans_old(unsigned char __user *table) +{ + return -EINVAL; +} +static inline int con_set_trans_new(unsigned short __user *table) +{ + return 0; +} +static inline int con_get_trans_new(unsigned short __user *table) +{ + return -EINVAL; +} +static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui) +{ + return 0; +} +static inline +int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) +{ + return 0; +} +static inline +int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, + struct unipair __user *list) +{ + return -EINVAL; +} +static inline int con_set_default_unimap(struct vc_data *vc) +{ + return 0; +} +static inline void con_free_unimap(struct vc_data *vc) +{ +} +static inline void con_protect_unimap(struct vc_data *vc, int rdonly) +{ +} +static inline +int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) +{ + return 0; +} + +#define vc_translate(vc, c) (c) +#endif + +/* vt.c */ +void vt_event_post(unsigned int event, unsigned int old, unsigned int new); +int vt_waitactive(int n); +void change_console(struct vc_data *new_vc); +void reset_vc(struct vc_data *vc); +extern int unbind_con_driver(const struct consw *csw, int first, int last, + int deflt); +int vty_init(const struct file_operations *console_fops); + +static inline bool vt_force_oops_output(struct vc_data *vc) +{ + if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0) + return true; + return false; +} + +extern char vt_dont_switch; +extern int default_utf8; +extern int global_cursor_default; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; +extern struct vt_spawn_console vt_spawn_con; + +extern int vt_move_to_console(unsigned int vt, int alloc); + +/* Interfaces for VC notification of character events (for accessibility etc) */ + +struct vt_notifier_param { + struct vc_data *vc; /* VC on which the update happened */ + unsigned int c; /* Printed char */ +}; + +extern int register_vt_notifier(struct notifier_block *nb); +extern int unregister_vt_notifier(struct notifier_block *nb); + +extern void hide_boot_cursor(bool hide); + +/* keyboard provided interfaces */ +extern int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); +extern int vt_do_kdskbmode(int console, unsigned int arg); +extern int vt_do_kdskbmeta(int console, unsigned int arg); +extern int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, + int perm); +extern int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, + int perm, int console); +extern int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, + int perm); +extern int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); +extern int vt_do_kdgkbmode(int console); +extern int vt_do_kdgkbmeta(int console); +extern void vt_reset_unicode(int console); +extern int vt_get_shift_state(void); +extern void vt_reset_keyboard(int console); +extern int vt_get_leds(int console, int flag); +extern int vt_get_kbd_mode_bit(int console, int bit); +extern void vt_set_kbd_mode_bit(int console, int bit); +extern void vt_clr_kbd_mode_bit(int console, int bit); +extern void vt_set_led_state(int console, int leds); +extern void vt_set_led_state(int console, int leds); +extern void vt_kbd_con_start(int console); +extern void vt_kbd_con_stop(int console); + + +#endif /* _VT_KERN_H */ diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h new file mode 100644 index 00000000..3adeff82 --- /dev/null +++ b/include/linux/w1-gpio.h @@ -0,0 +1,24 @@ +/* + * w1-gpio interface to platform code + * + * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ +#ifndef _LINUX_W1_GPIO_H +#define _LINUX_W1_GPIO_H + +/** + * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio + * @pin: GPIO pin to use + * @is_open_drain: GPIO pin is configured as open drain + */ +struct w1_gpio_platform_data { + unsigned int pin; + unsigned int is_open_drain:1; + void (*enable_external_pullup)(int enable); +}; + +#endif /* _LINUX_W1_GPIO_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h new file mode 100644 index 00000000..1dee81c4 --- /dev/null +++ b/include/linux/wait.h @@ -0,0 +1,668 @@ +#ifndef _LINUX_WAIT_H +#define _LINUX_WAIT_H + +#define WNOHANG 0x00000001 +#define WUNTRACED 0x00000002 +#define WSTOPPED WUNTRACED +#define WEXITED 0x00000004 +#define WCONTINUED 0x00000008 +#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */ + +#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */ +#define __WALL 0x40000000 /* Wait on all children, regardless of type */ +#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */ + +/* First argument to waitid: */ +#define P_ALL 0 +#define P_PID 1 +#define P_PGID 2 + +#ifdef __KERNEL__ + +#include <linux/list.h> +#include <linux/stddef.h> +#include <linux/spinlock.h> +#include <asm/current.h> + +typedef struct __wait_queue wait_queue_t; +typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); +int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); + +struct __wait_queue { + unsigned int flags; +#define WQ_FLAG_EXCLUSIVE 0x01 + void *private; + wait_queue_func_t func; + struct list_head task_list; +}; + +struct wait_bit_key { + void *flags; + int bit_nr; +}; + +struct wait_bit_queue { + struct wait_bit_key key; + wait_queue_t wait; +}; + +struct __wait_queue_head { + spinlock_t lock; + struct list_head task_list; +}; +typedef struct __wait_queue_head wait_queue_head_t; + +struct task_struct; + +/* + * Macros for declaration and initialisaton of the datatypes + */ + +#define __WAITQUEUE_INITIALIZER(name, tsk) { \ + .private = tsk, \ + .func = default_wake_function, \ + .task_list = { NULL, NULL } } + +#define DECLARE_WAITQUEUE(name, tsk) \ + wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) + +#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .task_list = { &(name).task_list, &(name).task_list } } + +#define DECLARE_WAIT_QUEUE_HEAD(name) \ + wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) + +#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ + { .flags = word, .bit_nr = bit, } + +extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); + +#define init_waitqueue_head(q) \ + do { \ + static struct lock_class_key __key; \ + \ + __init_waitqueue_head((q), #q, &__key); \ + } while (0) + +#ifdef CONFIG_LOCKDEP +# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ + ({ init_waitqueue_head(&name); name; }) +# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ + wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) +#else +# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) +#endif + +static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) +{ + q->flags = 0; + q->private = p; + q->func = default_wake_function; +} + +static inline void init_waitqueue_func_entry(wait_queue_t *q, + wait_queue_func_t func) +{ + q->flags = 0; + q->private = NULL; + q->func = func; +} + +static inline int waitqueue_active(wait_queue_head_t *q) +{ + return !list_empty(&q->task_list); +} + +extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); +extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); +extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); + +static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) +{ + list_add(&new->task_list, &head->task_list); +} + +/* + * Used for wake-one threads: + */ +static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, + wait_queue_t *wait) +{ + wait->flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue(q, wait); +} + +static inline void __add_wait_queue_tail(wait_queue_head_t *head, + wait_queue_t *new) +{ + list_add_tail(&new->task_list, &head->task_list); +} + +static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, + wait_queue_t *wait) +{ + wait->flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue_tail(q, wait); +} + +static inline void __remove_wait_queue(wait_queue_head_t *head, + wait_queue_t *old) +{ + list_del(&old->task_list); +} + +void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, + void *key); +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); +void __wake_up_bit(wait_queue_head_t *, void *, int); +int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); +int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); +void wake_up_bit(void *, int); +int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); +int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); +wait_queue_head_t *bit_waitqueue(void *, int); + +#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) +#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) +#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) +#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) +#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) + +#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) +#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) +#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) +#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) + +/* + * Wakeup macros to be used to report events to the targets. + */ +#define wake_up_poll(x, m) \ + __wake_up(x, TASK_NORMAL, 1, (void *) (m)) +#define wake_up_locked_poll(x, m) \ + __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) +#define wake_up_interruptible_poll(x, m) \ + __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) +#define wake_up_interruptible_sync_poll(x, m) \ + __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) + +#define __wait_event(wq, condition) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ + if (condition) \ + break; \ + schedule(); \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define wait_event(wq, condition) \ +do { \ + if (condition) \ + break; \ + __wait_event(wq, condition); \ +} while (0) + +#define __wait_event_timeout(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ + if (condition) \ + break; \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_timeout - sleep until a condition gets true or a timeout elapses + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function returns 0 if the @timeout elapsed, and the remaining + * jiffies if the condition evaluated to true before the timeout elapsed. + */ +#define wait_event_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#define __wait_event_interruptible(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + schedule(); \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_interruptible - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_event_interruptible(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __wait_event_interruptible(wq, condition, __ret); \ + __ret; \ +}) + +#define __wait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it + * was interrupted by a signal, and the remaining jiffies otherwise + * if the condition evaluated to true before the timeout elapsed. + */ +#define wait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#define __wait_event_interruptible_exclusive(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait_exclusive(&wq, &__wait, \ + TASK_INTERRUPTIBLE); \ + if (condition) { \ + finish_wait(&wq, &__wait); \ + break; \ + } \ + if (!signal_pending(current)) { \ + schedule(); \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + abort_exclusive_wait(&wq, &__wait, \ + TASK_INTERRUPTIBLE, NULL); \ + break; \ + } \ +} while (0) + +#define wait_event_interruptible_exclusive(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __wait_event_interruptible_exclusive(wq, condition, __ret);\ + __ret; \ +}) + + +#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ +({ \ + int __ret = 0; \ + DEFINE_WAIT(__wait); \ + if (exclusive) \ + __wait.flags |= WQ_FLAG_EXCLUSIVE; \ + do { \ + if (likely(list_empty(&__wait.task_list))) \ + __add_wait_queue_tail(&(wq), &__wait); \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (signal_pending(current)) { \ + __ret = -ERESTARTSYS; \ + break; \ + } \ + if (irq) \ + spin_unlock_irq(&(wq).lock); \ + else \ + spin_unlock(&(wq).lock); \ + schedule(); \ + if (irq) \ + spin_lock_irq(&(wq).lock); \ + else \ + spin_lock(&(wq).lock); \ + } while (!(condition)); \ + __remove_wait_queue(&(wq), &__wait); \ + __set_current_state(TASK_RUNNING); \ + __ret; \ +}) + + +/** + * wait_event_interruptible_locked - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * It must be called with wq.lock being held. This spinlock is + * unlocked while sleeping but @condition testing is done while lock + * is held and when this macro exits the lock is held. + * + * The lock is locked/unlocked using spin_lock()/spin_unlock() + * functions which must match the way they are locked/unlocked outside + * of this macro. + * + * wake_up_locked() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_event_interruptible_locked(wq, condition) \ + ((condition) \ + ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) + +/** + * wait_event_interruptible_locked_irq - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * It must be called with wq.lock being held. This spinlock is + * unlocked while sleeping but @condition testing is done while lock + * is held and when this macro exits the lock is held. + * + * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() + * functions which must match the way they are locked/unlocked outside + * of this macro. + * + * wake_up_locked() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_event_interruptible_locked_irq(wq, condition) \ + ((condition) \ + ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) + +/** + * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * It must be called with wq.lock being held. This spinlock is + * unlocked while sleeping but @condition testing is done while lock + * is held and when this macro exits the lock is held. + * + * The lock is locked/unlocked using spin_lock()/spin_unlock() + * functions which must match the way they are locked/unlocked outside + * of this macro. + * + * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag + * set thus when other process waits process on the list if this + * process is awaken further processes are not considered. + * + * wake_up_locked() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_event_interruptible_exclusive_locked(wq, condition) \ + ((condition) \ + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) + +/** + * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * It must be called with wq.lock being held. This spinlock is + * unlocked while sleeping but @condition testing is done while lock + * is held and when this macro exits the lock is held. + * + * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() + * functions which must match the way they are locked/unlocked outside + * of this macro. + * + * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag + * set thus when other process waits process on the list if this + * process is awaken further processes are not considered. + * + * wake_up_locked() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ + ((condition) \ + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) + + + +#define __wait_event_killable(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \ + if (condition) \ + break; \ + if (!fatal_signal_pending(current)) { \ + schedule(); \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_killable - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_KILLABLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_event_killable(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __wait_event_killable(wq, condition, __ret); \ + __ret; \ +}) + +/* + * These are the old interfaces to sleep waiting for an event. + * They are racy. DO NOT use them, use the wait_event* interfaces above. + * We plan to remove these interfaces. + */ +extern void sleep_on(wait_queue_head_t *q); +extern long sleep_on_timeout(wait_queue_head_t *q, + signed long timeout); +extern void interruptible_sleep_on(wait_queue_head_t *q); +extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, + signed long timeout); + +/* + * Waitqueues which are removed from the waitqueue_head at wakeup time + */ +void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); +void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); +void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); +void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, + unsigned int mode, void *key); +int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); +int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); + +#define DEFINE_WAIT_FUNC(name, function) \ + wait_queue_t name = { \ + .private = current, \ + .func = function, \ + .task_list = LIST_HEAD_INIT((name).task_list), \ + } + +#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) + +#define DEFINE_WAIT_BIT(name, word, bit) \ + struct wait_bit_queue name = { \ + .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ + .wait = { \ + .private = current, \ + .func = wake_bit_function, \ + .task_list = \ + LIST_HEAD_INIT((name).wait.task_list), \ + }, \ + } + +#define init_wait(wait) \ + do { \ + (wait)->private = current; \ + (wait)->func = autoremove_wake_function; \ + INIT_LIST_HEAD(&(wait)->task_list); \ + (wait)->flags = 0; \ + } while (0) + +/** + * wait_on_bit - wait for a bit to be cleared + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @action: the function used to sleep, which may take special actions + * @mode: the task state to sleep in + * + * There is a standard hashed waitqueue table for generic use. This + * is the part of the hashtable's accessor API that waits on a bit. + * For instance, if one were to have waiters on a bitflag, one would + * call wait_on_bit() in threads waiting for the bit to clear. + * One uses wait_on_bit() where one is waiting for the bit to clear, + * but has no intention of setting it. + */ +static inline int wait_on_bit(void *word, int bit, + int (*action)(void *), unsigned mode) +{ + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, action, mode); +} + +/** + * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @action: the function used to sleep, which may take special actions + * @mode: the task state to sleep in + * + * There is a standard hashed waitqueue table for generic use. This + * is the part of the hashtable's accessor API that waits on a bit + * when one intends to set it, for instance, trying to lock bitflags. + * For instance, if one were to have waiters trying to set bitflag + * and waiting for it to clear before setting it, one would call + * wait_on_bit() in threads waiting to be able to set the bit. + * One uses wait_on_bit_lock() where one is waiting for the bit to + * clear with the intention of setting it, and when done, clearing it. + */ +static inline int wait_on_bit_lock(void *word, int bit, + int (*action)(void *), unsigned mode) +{ + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, action, mode); +} + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h new file mode 100644 index 00000000..f4a698a2 --- /dev/null +++ b/include/linux/wakelock.h @@ -0,0 +1,67 @@ +/* include/linux/wakelock.h + * + * Copyright (C) 2007-2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_WAKELOCK_H +#define _LINUX_WAKELOCK_H + +#include <linux/ktime.h> +#include <linux/device.h> + +/* A wake_lock prevents the system from entering suspend or other low power + * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock + * prevents a full system suspend. + */ + +enum { + WAKE_LOCK_SUSPEND, /* Prevent suspend */ + WAKE_LOCK_TYPE_COUNT +}; + +struct wake_lock { + struct wakeup_source ws; +}; + +static inline void wake_lock_init(struct wake_lock *lock, int type, + const char *name) +{ + wakeup_source_init(&lock->ws, name); +} + +static inline void wake_lock_destroy(struct wake_lock *lock) +{ + wakeup_source_trash(&lock->ws); +} + +static inline void wake_lock(struct wake_lock *lock) +{ + __pm_stay_awake(&lock->ws); +} + +static inline void wake_lock_timeout(struct wake_lock *lock, long timeout) +{ + __pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout)); +} + +static inline void wake_unlock(struct wake_lock *lock) +{ + __pm_relax(&lock->ws); +} + +static inline int wake_lock_active(struct wake_lock *lock) +{ + return lock->ws.active; +} + +#endif diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h new file mode 100644 index 00000000..3157cc1f --- /dev/null +++ b/include/linux/wanrouter.h @@ -0,0 +1,532 @@ +/***************************************************************************** +* wanrouter.h Definitions for the WAN Multiprotocol Router Module. +* This module provides API and common services for WAN Link +* Drivers and is completely hardware-independent. +* +* Author: Nenad Corbic <ncorbic@sangoma.com> +* Gideon Hack +* Additions: Arnaldo Melo +* +* Copyright: (c) 1995-2000 Sangoma Technologies Inc. +* +* This program is free software; you can redistribute it and/or +* modify it under the terms of the GNU General Public License +* as published by the Free Software Foundation; either version +* 2 of the License, or (at your option) any later version. +* ============================================================================ +* Jul 21, 2000 Nenad Corbic Added WAN_FT1_READY State +* Feb 24, 2000 Nenad Corbic Added support for socket based x25api +* Jan 28, 2000 Nenad Corbic Added support for the ASYNC protocol. +* Oct 04, 1999 Nenad Corbic Updated for 2.1.0 release +* Jun 02, 1999 Gideon Hack Added support for the S514 adapter. +* May 23, 1999 Arnaldo Melo Added local_addr to wanif_conf_t +* WAN_DISCONNECTING state added +* Jul 20, 1998 David Fong Added Inverse ARP options to 'wanif_conf_t' +* Jun 12, 1998 David Fong Added Cisco HDLC support. +* Dec 16, 1997 Jaspreet Singh Moved 'enable_IPX' and 'network_number' to +* 'wanif_conf_t' +* Dec 05, 1997 Jaspreet Singh Added 'pap', 'chap' to 'wanif_conf_t' +* Added 'authenticator' to 'wan_ppp_conf_t' +* Nov 06, 1997 Jaspreet Singh Changed Router Driver version to 1.1 from 1.0 +* Oct 20, 1997 Jaspreet Singh Added 'cir','bc','be' and 'mc' to 'wanif_conf_t' +* Added 'enable_IPX' and 'network_number' to +* 'wan_device_t'. Also added defines for +* UDP PACKET TYPE, Interrupt test, critical values +* for RACE conditions. +* Oct 05, 1997 Jaspreet Singh Added 'dlci_num' and 'dlci[100]' to +* 'wan_fr_conf_t' to configure a list of dlci(s) +* for a NODE +* Jul 07, 1997 Jaspreet Singh Added 'ttl' to 'wandev_conf_t' & 'wan_device_t' +* May 29, 1997 Jaspreet Singh Added 'tx_int_enabled' to 'wan_device_t' +* May 21, 1997 Jaspreet Singh Added 'udp_port' to 'wan_device_t' +* Apr 25, 1997 Farhan Thawar Added 'udp_port' to 'wandev_conf_t' +* Jan 16, 1997 Gene Kozin router_devlist made public +* Jan 02, 1997 Gene Kozin Initial version (based on wanpipe.h). +*****************************************************************************/ + +#ifndef _ROUTER_H +#define _ROUTER_H + +#define ROUTER_NAME "wanrouter" /* in case we ever change it */ +#define ROUTER_VERSION 1 /* version number */ +#define ROUTER_RELEASE 1 /* release (minor version) number */ +#define ROUTER_IOCTL 'W' /* for IOCTL calls */ +#define ROUTER_MAGIC 0x524D4157L /* signature: 'WANR' reversed */ + +/* IOCTL codes for /proc/router/<device> entries (up to 255) */ +enum router_ioctls +{ + ROUTER_SETUP = ROUTER_IOCTL<<8, /* configure device */ + ROUTER_DOWN, /* shut down device */ + ROUTER_STAT, /* get device status */ + ROUTER_IFNEW, /* add interface */ + ROUTER_IFDEL, /* delete interface */ + ROUTER_IFSTAT, /* get interface status */ + ROUTER_USER = (ROUTER_IOCTL<<8)+16, /* driver-specific calls */ + ROUTER_USER_MAX = (ROUTER_IOCTL<<8)+31 +}; + +/* identifiers for displaying proc file data for dual port adapters */ +#define PROC_DATA_PORT_0 0x8000 /* the data is for port 0 */ +#define PROC_DATA_PORT_1 0x8001 /* the data is for port 1 */ + +/* NLPID for packet encapsulation (ISO/IEC TR 9577) */ +#define NLPID_IP 0xCC /* Internet Protocol Datagram */ +#define NLPID_SNAP 0x80 /* IEEE Subnetwork Access Protocol */ +#define NLPID_CLNP 0x81 /* ISO/IEC 8473 */ +#define NLPID_ESIS 0x82 /* ISO/IEC 9542 */ +#define NLPID_ISIS 0x83 /* ISO/IEC ISIS */ +#define NLPID_Q933 0x08 /* CCITT Q.933 */ + +/* Miscellaneous */ +#define WAN_IFNAME_SZ 15 /* max length of the interface name */ +#define WAN_DRVNAME_SZ 15 /* max length of the link driver name */ +#define WAN_ADDRESS_SZ 31 /* max length of the WAN media address */ +#define USED_BY_FIELD 8 /* max length of the used by field */ + +/* Defines for UDP PACKET TYPE */ +#define UDP_PTPIPE_TYPE 0x01 +#define UDP_FPIPE_TYPE 0x02 +#define UDP_CPIPE_TYPE 0x03 +#define UDP_DRVSTATS_TYPE 0x04 +#define UDP_INVALID_TYPE 0x05 + +/* Command return code */ +#define CMD_OK 0 /* normal firmware return code */ +#define CMD_TIMEOUT 0xFF /* firmware command timed out */ + +/* UDP Packet Management */ +#define UDP_PKT_FRM_STACK 0x00 +#define UDP_PKT_FRM_NETWORK 0x01 + +/* Maximum interrupt test counter */ +#define MAX_INTR_TEST_COUNTER 100 + +/* Critical Values for RACE conditions*/ +#define CRITICAL_IN_ISR 0xA1 +#define CRITICAL_INTR_HANDLED 0xB1 + +/****** Data Types **********************************************************/ + +/*---------------------------------------------------------------------------- + * X.25-specific link-level configuration. + */ +typedef struct wan_x25_conf +{ + unsigned lo_pvc; /* lowest permanent circuit number */ + unsigned hi_pvc; /* highest permanent circuit number */ + unsigned lo_svc; /* lowest switched circuit number */ + unsigned hi_svc; /* highest switched circuit number */ + unsigned hdlc_window; /* HDLC window size (1..7) */ + unsigned pkt_window; /* X.25 packet window size (1..7) */ + unsigned t1; /* HDLC timer T1, sec (1..30) */ + unsigned t2; /* HDLC timer T2, sec (0..29) */ + unsigned t4; /* HDLC supervisory frame timer = T4 * T1 */ + unsigned n2; /* HDLC retransmission limit (1..30) */ + unsigned t10_t20; /* X.25 RESTART timeout, sec (1..255) */ + unsigned t11_t21; /* X.25 CALL timeout, sec (1..255) */ + unsigned t12_t22; /* X.25 RESET timeout, sec (1..255) */ + unsigned t13_t23; /* X.25 CLEAR timeout, sec (1..255) */ + unsigned t16_t26; /* X.25 INTERRUPT timeout, sec (1..255) */ + unsigned t28; /* X.25 REGISTRATION timeout, sec (1..255) */ + unsigned r10_r20; /* RESTART retransmission limit (0..250) */ + unsigned r12_r22; /* RESET retransmission limit (0..250) */ + unsigned r13_r23; /* CLEAR retransmission limit (0..250) */ + unsigned ccitt_compat; /* compatibility mode: 1988/1984/1980 */ + unsigned x25_conf_opt; /* User defined x25 config optoins */ + unsigned char LAPB_hdlc_only; /* Run in HDLC only mode */ + unsigned char logging; /* Control connection logging */ + unsigned char oob_on_modem; /* Whether to send modem status to the user app */ +} wan_x25_conf_t; + +/*---------------------------------------------------------------------------- + * Frame relay specific link-level configuration. + */ +typedef struct wan_fr_conf +{ + unsigned signalling; /* local in-channel signalling type */ + unsigned t391; /* link integrity verification timer */ + unsigned t392; /* polling verification timer */ + unsigned n391; /* full status polling cycle counter */ + unsigned n392; /* error threshold counter */ + unsigned n393; /* monitored events counter */ + unsigned dlci_num; /* number of DLCs (access node) */ + unsigned dlci[100]; /* List of all DLCIs */ +} wan_fr_conf_t; + +/*---------------------------------------------------------------------------- + * PPP-specific link-level configuration. + */ +typedef struct wan_ppp_conf +{ + unsigned restart_tmr; /* restart timer */ + unsigned auth_rsrt_tmr; /* authentication timer */ + unsigned auth_wait_tmr; /* authentication timer */ + unsigned mdm_fail_tmr; /* modem failure timer */ + unsigned dtr_drop_tmr; /* DTR drop timer */ + unsigned connect_tmout; /* connection timeout */ + unsigned conf_retry; /* max. retry */ + unsigned term_retry; /* max. retry */ + unsigned fail_retry; /* max. retry */ + unsigned auth_retry; /* max. retry */ + unsigned auth_options; /* authentication opt. */ + unsigned ip_options; /* IP options */ + char authenticator; /* AUTHENTICATOR or not */ + char ip_mode; /* Static/Host/Peer */ +} wan_ppp_conf_t; + +/*---------------------------------------------------------------------------- + * CHDLC-specific link-level configuration. + */ +typedef struct wan_chdlc_conf +{ + unsigned char ignore_dcd; /* Protocol options: */ + unsigned char ignore_cts; /* Ignore these to determine */ + unsigned char ignore_keepalive; /* link status (Yes or No) */ + unsigned char hdlc_streaming; /* hdlc_streaming mode (Y/N) */ + unsigned char receive_only; /* no transmit buffering (Y/N) */ + unsigned keepalive_tx_tmr; /* transmit keepalive timer */ + unsigned keepalive_rx_tmr; /* receive keepalive timer */ + unsigned keepalive_err_margin; /* keepalive_error_tolerance */ + unsigned slarp_timer; /* SLARP request timer */ +} wan_chdlc_conf_t; + + +/*---------------------------------------------------------------------------- + * WAN device configuration. Passed to ROUTER_SETUP IOCTL. + */ +typedef struct wandev_conf +{ + unsigned magic; /* magic number (for verification) */ + unsigned config_id; /* configuration structure identifier */ + /****** hardware configuration ******/ + unsigned ioport; /* adapter I/O port base */ + unsigned long maddr; /* dual-port memory address */ + unsigned msize; /* dual-port memory size */ + int irq; /* interrupt request level */ + int dma; /* DMA request level */ + char S514_CPU_no[1]; /* S514 PCI adapter CPU number ('A' or 'B') */ + unsigned PCI_slot_no; /* S514 PCI adapter slot number */ + char auto_pci_cfg; /* S515 PCI automatic slot detection */ + char comm_port; /* Communication Port (PRI=0, SEC=1) */ + unsigned bps; /* data transfer rate */ + unsigned mtu; /* maximum transmit unit size */ + unsigned udp_port; /* UDP port for management */ + unsigned char ttl; /* Time To Live for UDP security */ + unsigned char ft1; /* FT1 Configurator Option */ + char interface; /* RS-232/V.35, etc. */ + char clocking; /* external/internal */ + char line_coding; /* NRZ/NRZI/FM0/FM1, etc. */ + char station; /* DTE/DCE, primary/secondary, etc. */ + char connection; /* permanent/switched/on-demand */ + char read_mode; /* read mode: Polling or interrupt */ + char receive_only; /* disable tx buffers */ + char tty; /* Create a fake tty device */ + unsigned tty_major; /* Major number for wanpipe tty device */ + unsigned tty_minor; /* Minor number for wanpipe tty device */ + unsigned tty_mode; /* TTY operation mode SYNC or ASYNC */ + char backup; /* Backup Mode */ + unsigned hw_opt[4]; /* other hardware options */ + unsigned reserved[4]; + /****** arbitrary data ***************/ + unsigned data_size; /* data buffer size */ + void* data; /* data buffer, e.g. firmware */ + union /****** protocol-specific ************/ + { + wan_x25_conf_t x25; /* X.25 configuration */ + wan_ppp_conf_t ppp; /* PPP configuration */ + wan_fr_conf_t fr; /* frame relay configuration */ + wan_chdlc_conf_t chdlc; /* Cisco HDLC configuration */ + } u; +} wandev_conf_t; + +/* 'config_id' definitions */ +#define WANCONFIG_X25 101 /* X.25 link */ +#define WANCONFIG_FR 102 /* frame relay link */ +#define WANCONFIG_PPP 103 /* synchronous PPP link */ +#define WANCONFIG_CHDLC 104 /* Cisco HDLC Link */ +#define WANCONFIG_BSC 105 /* BiSync Streaming */ +#define WANCONFIG_HDLC 106 /* HDLC Support */ +#define WANCONFIG_MPPP 107 /* Multi Port PPP over RAW CHDLC */ + +/* + * Configuration options defines. + */ +/* general options */ +#define WANOPT_OFF 0 +#define WANOPT_ON 1 +#define WANOPT_NO 0 +#define WANOPT_YES 1 + +/* intercace options */ +#define WANOPT_RS232 0 +#define WANOPT_V35 1 + +/* data encoding options */ +#define WANOPT_NRZ 0 +#define WANOPT_NRZI 1 +#define WANOPT_FM0 2 +#define WANOPT_FM1 3 + +/* link type options */ +#define WANOPT_POINTTOPOINT 0 /* RTS always active */ +#define WANOPT_MULTIDROP 1 /* RTS is active when transmitting */ + +/* clocking options */ +#define WANOPT_EXTERNAL 0 +#define WANOPT_INTERNAL 1 + +/* station options */ +#define WANOPT_DTE 0 +#define WANOPT_DCE 1 +#define WANOPT_CPE 0 +#define WANOPT_NODE 1 +#define WANOPT_SECONDARY 0 +#define WANOPT_PRIMARY 1 + +/* connection options */ +#define WANOPT_PERMANENT 0 /* DTR always active */ +#define WANOPT_SWITCHED 1 /* use DTR to setup link (dial-up) */ +#define WANOPT_ONDEMAND 2 /* activate DTR only before sending */ + +/* frame relay in-channel signalling */ +#define WANOPT_FR_ANSI 1 /* ANSI T1.617 Annex D */ +#define WANOPT_FR_Q933 2 /* ITU Q.933A */ +#define WANOPT_FR_LMI 3 /* LMI */ + +/* PPP IP Mode Options */ +#define WANOPT_PPP_STATIC 0 +#define WANOPT_PPP_HOST 1 +#define WANOPT_PPP_PEER 2 + +/* ASY Mode Options */ +#define WANOPT_ONE 1 +#define WANOPT_TWO 2 +#define WANOPT_ONE_AND_HALF 3 + +#define WANOPT_NONE 0 +#define WANOPT_ODD 1 +#define WANOPT_EVEN 2 + +/* CHDLC Protocol Options */ +/* DF Commented out for now. + +#define WANOPT_CHDLC_NO_DCD IGNORE_DCD_FOR_LINK_STAT +#define WANOPT_CHDLC_NO_CTS IGNORE_CTS_FOR_LINK_STAT +#define WANOPT_CHDLC_NO_KEEPALIVE IGNORE_KPALV_FOR_LINK_STAT +*/ + +/* Port options */ +#define WANOPT_PRI 0 +#define WANOPT_SEC 1 +/* read mode */ +#define WANOPT_INTR 0 +#define WANOPT_POLL 1 + + +#define WANOPT_TTY_SYNC 0 +#define WANOPT_TTY_ASYNC 1 +/*---------------------------------------------------------------------------- + * WAN Link Status Info (for ROUTER_STAT IOCTL). + */ +typedef struct wandev_stat +{ + unsigned state; /* link state */ + unsigned ndev; /* number of configured interfaces */ + + /* link/interface configuration */ + unsigned connection; /* permanent/switched/on-demand */ + unsigned media_type; /* Frame relay/PPP/X.25/SDLC, etc. */ + unsigned mtu; /* max. transmit unit for this device */ + + /* physical level statistics */ + unsigned modem_status; /* modem status */ + unsigned rx_frames; /* received frames count */ + unsigned rx_overruns; /* receiver overrun error count */ + unsigned rx_crc_err; /* receive CRC error count */ + unsigned rx_aborts; /* received aborted frames count */ + unsigned rx_bad_length; /* unexpetedly long/short frames count */ + unsigned rx_dropped; /* frames discarded at device level */ + unsigned tx_frames; /* transmitted frames count */ + unsigned tx_underruns; /* aborted transmissions (underruns) count */ + unsigned tx_timeouts; /* transmission timeouts */ + unsigned tx_rejects; /* other transmit errors */ + + /* media level statistics */ + unsigned rx_bad_format; /* frames with invalid format */ + unsigned rx_bad_addr; /* frames with invalid media address */ + unsigned tx_retries; /* frames re-transmitted */ + unsigned reserved[16]; /* reserved for future use */ +} wandev_stat_t; + +/* 'state' defines */ +enum wan_states +{ + WAN_UNCONFIGURED, /* link/channel is not configured */ + WAN_DISCONNECTED, /* link/channel is disconnected */ + WAN_CONNECTING, /* connection is in progress */ + WAN_CONNECTED, /* link/channel is operational */ + WAN_LIMIT, /* for verification only */ + WAN_DUALPORT, /* for Dual Port cards */ + WAN_DISCONNECTING, + WAN_FT1_READY /* FT1 Configurator Ready */ +}; + +enum { + WAN_LOCAL_IP, + WAN_POINTOPOINT_IP, + WAN_NETMASK_IP, + WAN_BROADCAST_IP +}; + +/* 'modem_status' masks */ +#define WAN_MODEM_CTS 0x0001 /* CTS line active */ +#define WAN_MODEM_DCD 0x0002 /* DCD line active */ +#define WAN_MODEM_DTR 0x0010 /* DTR line active */ +#define WAN_MODEM_RTS 0x0020 /* RTS line active */ + +/*---------------------------------------------------------------------------- + * WAN interface (logical channel) configuration (for ROUTER_IFNEW IOCTL). + */ +typedef struct wanif_conf +{ + unsigned magic; /* magic number */ + unsigned config_id; /* configuration identifier */ + char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */ + char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */ + char usedby[USED_BY_FIELD]; /* used by API or WANPIPE */ + unsigned idle_timeout; /* sec, before disconnecting */ + unsigned hold_timeout; /* sec, before re-connecting */ + unsigned cir; /* Committed Information Rate fwd,bwd*/ + unsigned bc; /* Committed Burst Size fwd, bwd */ + unsigned be; /* Excess Burst Size fwd, bwd */ + unsigned char enable_IPX; /* Enable or Disable IPX */ + unsigned char inarp; /* Send Inverse ARP requests Y/N */ + unsigned inarp_interval; /* sec, between InARP requests */ + unsigned long network_number; /* Network Number for IPX */ + char mc; /* Multicast on or off */ + char local_addr[WAN_ADDRESS_SZ+1];/* local media address, ASCIIZ */ + unsigned char port; /* board port */ + unsigned char protocol; /* prococol used in this channel (TCPOX25 or X25) */ + char pap; /* PAP enabled or disabled */ + char chap; /* CHAP enabled or disabled */ + unsigned char userid[511]; /* List of User Id */ + unsigned char passwd[511]; /* List of passwords */ + unsigned char sysname[31]; /* Name of the system */ + unsigned char ignore_dcd; /* Protocol options: */ + unsigned char ignore_cts; /* Ignore these to determine */ + unsigned char ignore_keepalive; /* link status (Yes or No) */ + unsigned char hdlc_streaming; /* Hdlc streaming mode (Y/N) */ + unsigned keepalive_tx_tmr; /* transmit keepalive timer */ + unsigned keepalive_rx_tmr; /* receive keepalive timer */ + unsigned keepalive_err_margin; /* keepalive_error_tolerance */ + unsigned slarp_timer; /* SLARP request timer */ + unsigned char ttl; /* Time To Live for UDP security */ + char interface; /* RS-232/V.35, etc. */ + char clocking; /* external/internal */ + unsigned bps; /* data transfer rate */ + unsigned mtu; /* maximum transmit unit size */ + unsigned char if_down; /* brind down interface when disconnected */ + unsigned char gateway; /* Is this interface a gateway */ + unsigned char true_if_encoding; /* Set the dev->type to true board protocol */ + + unsigned char asy_data_trans; /* async API options */ + unsigned char rts_hs_for_receive; /* async Protocol options */ + unsigned char xon_xoff_hs_for_receive; + unsigned char xon_xoff_hs_for_transmit; + unsigned char dcd_hs_for_transmit; + unsigned char cts_hs_for_transmit; + unsigned char async_mode; + unsigned tx_bits_per_char; + unsigned rx_bits_per_char; + unsigned stop_bits; + unsigned char parity; + unsigned break_timer; + unsigned inter_char_timer; + unsigned rx_complete_length; + unsigned xon_char; + unsigned xoff_char; + unsigned char receive_only; /* no transmit buffering (Y/N) */ +} wanif_conf_t; + +#ifdef __KERNEL__ +/****** Kernel Interface ****************************************************/ + +#include <linux/fs.h> /* support for device drivers */ +#include <linux/proc_fs.h> /* proc filesystem pragmatics */ +#include <linux/netdevice.h> /* support for network drivers */ +#include <linux/spinlock.h> /* Support for SMP Locking */ + +/*---------------------------------------------------------------------------- + * WAN device data space. + */ +struct wan_device { + unsigned magic; /* magic number */ + char* name; /* -> WAN device name (ASCIIZ) */ + void* private; /* -> driver private data */ + unsigned config_id; /* Configuration ID */ + /****** hardware configuration ******/ + unsigned ioport; /* adapter I/O port base #1 */ + char S514_cpu_no[1]; /* PCI CPU Number */ + unsigned char S514_slot_no; /* PCI Slot Number */ + unsigned long maddr; /* dual-port memory address */ + unsigned msize; /* dual-port memory size */ + int irq; /* interrupt request level */ + int dma; /* DMA request level */ + unsigned bps; /* data transfer rate */ + unsigned mtu; /* max physical transmit unit size */ + unsigned udp_port; /* UDP port for management */ + unsigned char ttl; /* Time To Live for UDP security */ + unsigned enable_tx_int; /* Transmit Interrupt enabled or not */ + char interface; /* RS-232/V.35, etc. */ + char clocking; /* external/internal */ + char line_coding; /* NRZ/NRZI/FM0/FM1, etc. */ + char station; /* DTE/DCE, primary/secondary, etc. */ + char connection; /* permanent/switched/on-demand */ + char signalling; /* Signalling RS232 or V35 */ + char read_mode; /* read mode: Polling or interrupt */ + char new_if_cnt; /* Number of interfaces per wanpipe */ + char del_if_cnt; /* Number of times del_if() gets called */ + unsigned char piggyback; /* Piggibacking a port */ + unsigned hw_opt[4]; /* other hardware options */ + /****** status and statistics *******/ + char state; /* device state */ + char api_status; /* device api status */ + struct net_device_stats stats; /* interface statistics */ + unsigned reserved[16]; /* reserved for future use */ + unsigned long critical; /* critical section flag */ + spinlock_t lock; /* Support for SMP Locking */ + + /****** device management methods ***/ + int (*setup) (struct wan_device *wandev, wandev_conf_t *conf); + int (*shutdown) (struct wan_device *wandev); + int (*update) (struct wan_device *wandev); + int (*ioctl) (struct wan_device *wandev, unsigned cmd, + unsigned long arg); + int (*new_if)(struct wan_device *wandev, struct net_device *dev, + wanif_conf_t *conf); + int (*del_if)(struct wan_device *wandev, struct net_device *dev); + /****** maintained by the router ****/ + struct wan_device* next; /* -> next device */ + struct net_device* dev; /* list of network interfaces */ + unsigned ndev; /* number of interfaces */ + struct proc_dir_entry *dent; /* proc filesystem entry */ +}; + +/* Public functions available for device drivers */ +extern int register_wan_device(struct wan_device *wandev); +extern int unregister_wan_device(char *name); + +/* Proc interface functions. These must not be called by the drivers! */ +extern int wanrouter_proc_init(void); +extern void wanrouter_proc_cleanup(void); +extern int wanrouter_proc_add(struct wan_device *wandev); +extern int wanrouter_proc_delete(struct wan_device *wandev); +extern long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + +/* Public Data */ +/* list of registered devices */ +extern struct wan_device *wanrouter_router_devlist; + +#endif /* __KERNEL__ */ +#endif /* _ROUTER_H */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h new file mode 100644 index 00000000..ac40716b --- /dev/null +++ b/include/linux/watchdog.h @@ -0,0 +1,155 @@ +/* + * Generic watchdog defines. Derived from.. + * + * Berkshire PC Watchdog Defines + * by Ken Hollis <khollis@bitgate.com> + * + */ + +#ifndef _LINUX_WATCHDOG_H +#define _LINUX_WATCHDOG_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define WATCHDOG_IOCTL_BASE 'W' + +struct watchdog_info { + __u32 options; /* Options the card/driver supports */ + __u32 firmware_version; /* Firmware version of the card */ + __u8 identity[32]; /* Identity of the board */ +}; + +#define WDIOC_GETSUPPORT _IOR(WATCHDOG_IOCTL_BASE, 0, struct watchdog_info) +#define WDIOC_GETSTATUS _IOR(WATCHDOG_IOCTL_BASE, 1, int) +#define WDIOC_GETBOOTSTATUS _IOR(WATCHDOG_IOCTL_BASE, 2, int) +#define WDIOC_GETTEMP _IOR(WATCHDOG_IOCTL_BASE, 3, int) +#define WDIOC_SETOPTIONS _IOR(WATCHDOG_IOCTL_BASE, 4, int) +#define WDIOC_KEEPALIVE _IOR(WATCHDOG_IOCTL_BASE, 5, int) +#define WDIOC_SETTIMEOUT _IOWR(WATCHDOG_IOCTL_BASE, 6, int) +#define WDIOC_GETTIMEOUT _IOR(WATCHDOG_IOCTL_BASE, 7, int) +#define WDIOC_SETPRETIMEOUT _IOWR(WATCHDOG_IOCTL_BASE, 8, int) +#define WDIOC_GETPRETIMEOUT _IOR(WATCHDOG_IOCTL_BASE, 9, int) +#define WDIOC_GETTIMELEFT _IOR(WATCHDOG_IOCTL_BASE, 10, int) + +#define WDIOF_UNKNOWN -1 /* Unknown flag error */ +#define WDIOS_UNKNOWN -1 /* Unknown status error */ + +#define WDIOF_OVERHEAT 0x0001 /* Reset due to CPU overheat */ +#define WDIOF_FANFAULT 0x0002 /* Fan failed */ +#define WDIOF_EXTERN1 0x0004 /* External relay 1 */ +#define WDIOF_EXTERN2 0x0008 /* External relay 2 */ +#define WDIOF_POWERUNDER 0x0010 /* Power bad/power fault */ +#define WDIOF_CARDRESET 0x0020 /* Card previously reset the CPU */ +#define WDIOF_POWEROVER 0x0040 /* Power over voltage */ +#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */ +#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */ +#define WDIOF_PRETIMEOUT 0x0200 /* Pretimeout (in seconds), get/set */ +#define WDIOF_KEEPALIVEPING 0x8000 /* Keep alive ping reply */ + +#define WDIOS_DISABLECARD 0x0001 /* Turn off the watchdog timer */ +#define WDIOS_ENABLECARD 0x0002 /* Turn on the watchdog timer */ +#define WDIOS_TEMPPANIC 0x0004 /* Kernel panic on temperature trip */ + +#ifdef __KERNEL__ + +#include <linux/bitops.h> + +struct watchdog_ops; +struct watchdog_device; + +/** struct watchdog_ops - The watchdog-devices operations + * + * @owner: The module owner. + * @start: The routine for starting the watchdog device. + * @stop: The routine for stopping the watchdog device. + * @ping: The routine that sends a keepalive ping to the watchdog device. + * @status: The routine that shows the status of the watchdog device. + * @set_timeout:The routine for setting the watchdog devices timeout value. + * @get_timeleft:The routine that get's the time that's left before a reset. + * @ioctl: The routines that handles extra ioctl calls. + * + * The watchdog_ops structure contains a list of low-level operations + * that control a watchdog device. It also contains the module that owns + * these operations. The start and stop function are mandatory, all other + * functions are optonal. + */ +struct watchdog_ops { + struct module *owner; + /* mandatory operations */ + int (*start)(struct watchdog_device *); + int (*stop)(struct watchdog_device *); + /* optional operations */ + int (*ping)(struct watchdog_device *); + unsigned int (*status)(struct watchdog_device *); + int (*set_timeout)(struct watchdog_device *, unsigned int); + unsigned int (*get_timeleft)(struct watchdog_device *); + long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); +}; + +/** struct watchdog_device - The structure that defines a watchdog device + * + * @info: Pointer to a watchdog_info structure. + * @ops: Pointer to the list of watchdog operations. + * @bootstatus: Status of the watchdog device at boot. + * @timeout: The watchdog devices timeout value. + * @min_timeout:The watchdog devices minimum timeout value. + * @max_timeout:The watchdog devices maximum timeout value. + * @driver-data:Pointer to the drivers private data. + * @status: Field that contains the devices internal status bits. + * + * The watchdog_device structure contains all information about a + * watchdog timer device. + * + * The driver-data field may not be accessed directly. It must be accessed + * via the watchdog_set_drvdata and watchdog_get_drvdata helpers. + */ +struct watchdog_device { + const struct watchdog_info *info; + const struct watchdog_ops *ops; + unsigned int bootstatus; + unsigned int timeout; + unsigned int min_timeout; + unsigned int max_timeout; + void *driver_data; + unsigned long status; +/* Bit numbers for status flags */ +#define WDOG_ACTIVE 0 /* Is the watchdog running/active */ +#define WDOG_DEV_OPEN 1 /* Opened via /dev/watchdog ? */ +#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */ +#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */ +}; + +#ifdef CONFIG_WATCHDOG_NOWAYOUT +#define WATCHDOG_NOWAYOUT 1 +#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT) +#else +#define WATCHDOG_NOWAYOUT 0 +#define WATCHDOG_NOWAYOUT_INIT_STATUS 0 +#endif + +/* Use the following function to set the nowayout feature */ +static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout) +{ + if (nowayout) + set_bit(WDOG_NO_WAY_OUT, &wdd->status); +} + +/* Use the following functions to manipulate watchdog driver specific data */ +static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data) +{ + wdd->driver_data = data; +} + +static inline void *watchdog_get_drvdata(struct watchdog_device *wdd) +{ + return wdd->driver_data; +} + +/* drivers/watchdog/core/watchdog_core.c */ +extern int watchdog_register_device(struct watchdog_device *); +extern void watchdog_unregister_device(struct watchdog_device *); + +#endif /* __KERNEL__ */ + +#endif /* ifndef _LINUX_WATCHDOG_H */ diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h new file mode 100644 index 00000000..f07e0679 --- /dev/null +++ b/include/linux/wifi_tiwlan.h @@ -0,0 +1,27 @@ +/* include/linux/wifi_tiwlan.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_WIFI_TIWLAN_H_ +#define _LINUX_WIFI_TIWLAN_H_ + +#include <linux/wlan_plat.h> + +#define WMPA_NUMBER_OF_SECTIONS 3 +#define WMPA_NUMBER_OF_BUFFERS 160 +#define WMPA_SECTION_HEADER 24 +#define WMPA_SECTION_SIZE_0 (WMPA_NUMBER_OF_BUFFERS * 64) +#define WMPA_SECTION_SIZE_1 (WMPA_NUMBER_OF_BUFFERS * 256) +#define WMPA_SECTION_SIZE_2 (WMPA_NUMBER_OF_BUFFERS * 2048) + +#endif diff --git a/include/linux/wimax.h b/include/linux/wimax.h new file mode 100644 index 00000000..9f6b77af --- /dev/null +++ b/include/linux/wimax.h @@ -0,0 +1,239 @@ +/* + * Linux WiMax + * API for user space + * + * + * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation <linux-wimax@intel.com> + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * - Initial implementation + * + * + * This file declares the user/kernel protocol that is spoken over + * Generic Netlink, as well as any type declaration that is to be used + * by kernel and user space. + * + * It is intended for user space to clone it verbatim to use it as a + * primary reference for definitions. + * + * Stuff intended for kernel usage as well as full protocol and stack + * documentation is rooted in include/net/wimax.h. + */ + +#ifndef __LINUX__WIMAX_H__ +#define __LINUX__WIMAX_H__ + +#include <linux/types.h> + +enum { + /** + * Version of the interface (unsigned decimal, MMm, max 25.5) + * M - Major: change if removing or modifying an existing call. + * m - minor: change when adding a new call + */ + WIMAX_GNL_VERSION = 01, + /* Generic NetLink attributes */ + WIMAX_GNL_ATTR_INVALID = 0x00, + WIMAX_GNL_ATTR_MAX = 10, +}; + + +/* + * Generic NetLink operations + * + * Most of these map to an API call; _OP_ stands for operation, _RP_ + * for reply and _RE_ for report (aka: signal). + */ +enum { + WIMAX_GNL_OP_MSG_FROM_USER, /* User to kernel message */ + WIMAX_GNL_OP_MSG_TO_USER, /* Kernel to user message */ + WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */ + WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */ + WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */ + WIMAX_GNL_OP_STATE_GET, /* Request for current state */ +}; + + +/* Message from user / to user */ +enum { + WIMAX_GNL_MSG_IFIDX = 1, + WIMAX_GNL_MSG_PIPE_NAME, + WIMAX_GNL_MSG_DATA, +}; + + +/* + * wimax_rfkill() + * + * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's + * switch state (DISABLED/ENABLED). + */ +enum wimax_rf_state { + WIMAX_RF_OFF = 0, /* Radio is off, rfkill on/enabled */ + WIMAX_RF_ON = 1, /* Radio is on, rfkill off/disabled */ + WIMAX_RF_QUERY = 2, +}; + +/* Attributes */ +enum { + WIMAX_GNL_RFKILL_IFIDX = 1, + WIMAX_GNL_RFKILL_STATE, +}; + + +/* Attributes for wimax_reset() */ +enum { + WIMAX_GNL_RESET_IFIDX = 1, +}; + +/* Attributes for wimax_state_get() */ +enum { + WIMAX_GNL_STGET_IFIDX = 1, +}; + +/* + * Attributes for the Report State Change + * + * For now we just have the old and new states; new attributes might + * be added later on. + */ +enum { + WIMAX_GNL_STCH_IFIDX = 1, + WIMAX_GNL_STCH_STATE_OLD, + WIMAX_GNL_STCH_STATE_NEW, +}; + + +/** + * enum wimax_st - The different states of a WiMAX device + * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed, + * but still wimax_dev_add() hasn't been called. There is no state. + * + * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and + * networking stacks, but it is not initialized (normally that is + * done with 'ifconfig DEV up' [or equivalent], which can upload + * firmware and enable communications with the device). + * In this state, the device is powered down and using as less + * power as possible. + * This state is the default after a call to wimax_dev_add(). It + * is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED + * or %WIMAX_ST_RADIO_OFF in _probe() after the call to + * wimax_dev_add(). + * It is recommended that the driver leaves this state when + * calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV + * down'. + * + * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API + * operations are allowed to proceed except the ones needed to + * complete the device clean up process. + * + * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device + * is setup, but the device still requires some configuration + * before being operational. + * Some WiMAX API calls might work. + * + * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether + * by hardware or software switches). + * It is recommended to always leave the device in this state + * after initialization. + * + * @WIMAX_ST_READY: The device is fully up and radio is on. + * + * @WIMAX_ST_SCANNING: [optional] The device has been instructed to + * scan. In this state, the device cannot be actively connected to + * a network. + * + * @WIMAX_ST_CONNECTING: The device is connecting to a network. This + * state exists because in some devices, the connect process can + * include a number of negotiations between user space, kernel + * space and the device. User space needs to know what the device + * is doing. If the connect sequence in a device is atomic and + * fast, the device can transition directly to CONNECTED + * + * @WIMAX_ST_CONNECTED: The device is connected to a network. + * + * @__WIMAX_ST_INVALID: This is an invalid state used to mark the + * maximum numeric value of states. + * + * Description: + * + * Transitions from one state to another one are atomic and can only + * be caused in kernel space with wimax_state_change(). To read the + * state, use wimax_state_get(). + * + * States starting with __ are internal and shall not be used or + * referred to by drivers or userspace. They look ugly, but that's the + * point -- if any use is made non-internal to the stack, it is easier + * to catch on review. + * + * All API operations [with well defined exceptions] will take the + * device mutex before starting and then check the state. If the state + * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or + * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with + * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN. + * + * The order of the definitions is important, so we can do numerical + * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready + * to operate). + */ +/* + * The allowed state transitions are described in the table below + * (states in rows can go to states in columns where there is an X): + * + * UNINI RADIO READY SCAN CONNEC CONNEC + * NULL DOWN QUIESCING TIALIZED OFF NING TING TED + * NULL - x + * DOWN - x x x + * QUIESCING x - + * UNINITIALIZED x - x + * RADIO_OFF x - x + * READY x x - x x x + * SCANNING x x x - x x + * CONNECTING x x x x - x + * CONNECTED x x x - + * + * This table not available in kernel-doc because the formatting messes it up. + */ + enum wimax_st { + __WIMAX_ST_NULL = 0, + WIMAX_ST_DOWN, + __WIMAX_ST_QUIESCING, + WIMAX_ST_UNINITIALIZED, + WIMAX_ST_RADIO_OFF, + WIMAX_ST_READY, + WIMAX_ST_SCANNING, + WIMAX_ST_CONNECTING, + WIMAX_ST_CONNECTED, + __WIMAX_ST_INVALID /* Always keep last */ +}; + + +#endif /* #ifndef __LINUX__WIMAX_H__ */ diff --git a/include/linux/wimax/Kbuild b/include/linux/wimax/Kbuild new file mode 100644 index 00000000..3cb4f269 --- /dev/null +++ b/include/linux/wimax/Kbuild @@ -0,0 +1 @@ +header-y += i2400m.h diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h new file mode 100644 index 00000000..aaf24ba1 --- /dev/null +++ b/include/linux/wimax/debug.h @@ -0,0 +1,526 @@ +/* + * Linux WiMAX + * Collection of tools to manage debug operations. + * + * + * Copyright (C) 2005-2007 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Don't #include this file directly, read on! + * + * + * EXECUTING DEBUGGING ACTIONS OR NOT + * + * The main thing this framework provides is decission power to take a + * debug action (like printing a message) if the current debug level + * allows it. + * + * The decission power is at two levels: at compile-time (what does + * not make it is compiled out) and at run-time. The run-time + * selection is done per-submodule (as they are declared by the user + * of the framework). + * + * A call to d_test(L) (L being the target debug level) returns true + * if the action should be taken because the current debug levels + * allow it (both compile and run time). + * + * It follows that a call to d_test() that can be determined to be + * always false at compile time will get the code depending on it + * compiled out by optimization. + * + * + * DEBUG LEVELS + * + * It is up to the caller to define how much a debugging level is. + * + * Convention sets 0 as "no debug" (so an action marked as debug level 0 + * will always be taken). The increasing debug levels are used for + * increased verbosity. + * + * + * USAGE + * + * Group the code in modules and submodules inside each module [which + * in most cases maps to Linux modules and .c files that compose + * those]. + * + * + * For each module, there is: + * + * - a MODULENAME (single word, legal C identifier) + * + * - a debug-levels.h header file that declares the list of + * submodules and that is included by all .c files that use + * the debugging tools. The file name can be anything. + * + * - some (optional) .c code to manipulate the runtime debug levels + * through debugfs. + * + * The debug-levels.h file would look like: + * + * #ifndef __debug_levels__h__ + * #define __debug_levels__h__ + * + * #define D_MODULENAME modulename + * #define D_MASTER 10 + * + * #include <linux/wimax/debug.h> + * + * enum d_module { + * D_SUBMODULE_DECLARE(submodule_1), + * D_SUBMODULE_DECLARE(submodule_2), + * ... + * D_SUBMODULE_DECLARE(submodule_N) + * }; + * + * #endif + * + * D_MASTER is the maximum compile-time debug level; any debug actions + * above this will be out. D_MODULENAME is the module name (legal C + * identifier), which has to be unique for each module (to avoid + * namespace collisions during linkage). Note those #defines need to + * be done before #including debug.h + * + * We declare N different submodules whose debug level can be + * independently controlled during runtime. + * + * In a .c file of the module (and only in one of them), define the + * following code: + * + * struct d_level D_LEVEL[] = { + * D_SUBMODULE_DEFINE(submodule_1), + * D_SUBMODULE_DEFINE(submodule_2), + * ... + * D_SUBMODULE_DEFINE(submodule_N), + * }; + * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); + * + * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used + * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros + * #defined also in this file. + * + * To manipulate from user space the levels, create a debugfs dentry + * and then register each submodule with: + * + * result = d_level_register_debugfs("PREFIX_", submodule_X, parent); + * if (result < 0) + * goto error; + * + * Where PREFIX_ is a name of your chosing. This will create debugfs + * file with a single numeric value that can be use to tweak it. To + * remove the entires, just use debugfs_remove_recursive() on 'parent'. + * + * NOTE: remember that even if this will show attached to some + * particular instance of a device, the settings are *global*. + * + * + * On each submodule (for example, .c files), the debug infrastructure + * should be included like this: + * + * #define D_SUBMODULE submodule_x // matches one in debug-levels.h + * #include "debug-levels.h" + * + * after #including all your include files. + * + * + * Now you can use the d_*() macros below [d_test(), d_fnstart(), + * d_fnend(), d_printf(), d_dump()]. + * + * If their debug level is greater than D_MASTER, they will be + * compiled out. + * + * If their debug level is lower or equal than D_MASTER but greater + * than the current debug level of their submodule, they'll be + * ignored. + * + * Otherwise, the action will be performed. + */ +#ifndef __debug__h__ +#define __debug__h__ + +#include <linux/types.h> +#include <linux/slab.h> + +struct device; + +/* Backend stuff */ + +/* + * Debug backend: generate a message header from a 'struct device' + * + * @head: buffer where to place the header + * @head_size: length of @head + * @dev: pointer to device used to generate a header from. If NULL, + * an empty ("") header is generated. + */ +static inline +void __d_head(char *head, size_t head_size, + struct device *dev) +{ + if (dev == NULL) + head[0] = 0; + else if ((unsigned long)dev < 4096) { + printk(KERN_ERR "E: Corrupt dev %p\n", dev); + WARN_ON(1); + } else + snprintf(head, head_size, "%s %s: ", + dev_driver_string(dev), dev_name(dev)); +} + + +/* + * Debug backend: log some message if debugging is enabled + * + * @l: intended debug level + * @tag: tag to prefix the message with + * @dev: 'struct device' associated to this message + * @f: printf-like format and arguments + * + * Note this is optimized out if it doesn't pass the compile-time + * check; however, it is *always* compiled. This is useful to make + * sure the printf-like formats and variables are always checked and + * they don't get bit rot if you have all the debugging disabled. + */ +#define _d_printf(l, tag, dev, f, a...) \ +do { \ + char head[64]; \ + if (!d_test(l)) \ + break; \ + __d_head(head, sizeof(head), dev); \ + printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \ +} while (0) + + +/* + * CPP sintatic sugar to generate A_B like symbol names when one of + * the arguments is a a preprocessor #define. + */ +#define __D_PASTE__(varname, modulename) varname##_##modulename +#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename)) +#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name)) + + +/* + * Store a submodule's runtime debug level and name + */ +struct d_level { + u8 level; + const char *name; +}; + + +/* + * List of available submodules and their debug levels + * + * We call them d_level_MODULENAME and d_level_size_MODULENAME; the + * macros D_LEVEL and D_LEVEL_SIZE contain the name already for + * convenience. + * + * This array and the size are defined on some .c file that is part of + * the current module. + */ +#define D_LEVEL __D_PASTE(d_level, D_MODULENAME) +#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME) + +extern struct d_level D_LEVEL[]; +extern size_t D_LEVEL_SIZE; + + +/* + * Frontend stuff + * + * + * Stuff you need to declare prior to using the actual "debug" actions + * (defined below). + */ + +#ifndef D_MODULENAME +#error D_MODULENAME is not defined in your debug-levels.h file +/** + * D_MODULE - Name of the current module + * + * #define in your module's debug-levels.h, making sure it is + * unique. This has to be a legal C identifier. + */ +#define D_MODULENAME undefined_modulename +#endif + + +#ifndef D_MASTER +#warning D_MASTER not defined, but debug.h included! [see docs] +/** + * D_MASTER - Compile time maximum debug level + * + * #define in your debug-levels.h file to the maximum debug level the + * runtime code will be allowed to have. This allows you to provide a + * main knob. + * + * Anything above that level will be optimized out of the compile. + * + * Defaults to zero (no debug code compiled in). + * + * Maximum one definition per module (at the debug-levels.h file). + */ +#define D_MASTER 0 +#endif + +#ifndef D_SUBMODULE +#error D_SUBMODULE not defined, but debug.h included! [see docs] +/** + * D_SUBMODULE - Name of the current submodule + * + * #define in your submodule .c file before #including debug-levels.h + * to the name of the current submodule as previously declared and + * defined with D_SUBMODULE_DECLARE() (in your module's + * debug-levels.h) and D_SUBMODULE_DEFINE(). + * + * This is used to provide runtime-control over the debug levels. + * + * Maximum one per .c file! Can be shared among different .c files + * (meaning they belong to the same submodule categorization). + */ +#define D_SUBMODULE undefined_module +#endif + + +/** + * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control + * + * @_name: name of the submodule, restricted to the chars that make up a + * valid C identifier ([a-zA-Z0-9_]). + * + * Declare in the module's debug-levels.h header file as: + * + * enum d_module { + * D_SUBMODULE_DECLARE(submodule_1), + * D_SUBMODULE_DECLARE(submodule_2), + * D_SUBMODULE_DECLARE(submodule_3), + * }; + * + * Some corresponding .c file needs to have a matching + * D_SUBMODULE_DEFINE(). + */ +#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name + + +/** + * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control + * + * @_name: name of the submodule, restricted to the chars that make up a + * valid C identifier ([a-zA-Z0-9_]). + * + * Use once per module (in some .c file) as: + * + * static + * struct d_level d_level_SUBMODULENAME[] = { + * D_SUBMODULE_DEFINE(submodule_1), + * D_SUBMODULE_DEFINE(submodule_2), + * D_SUBMODULE_DEFINE(submodule_3), + * }; + * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME); + * + * Matching D_SUBMODULE_DECLARE()s have to be present in a + * debug-levels.h header file. + */ +#define D_SUBMODULE_DEFINE(_name) \ +[__D_SUBMODULE_##_name] = { \ + .level = 0, \ + .name = #_name \ +} + + + +/* The actual "debug" operations */ + + +/** + * d_test - Returns true if debugging should be enabled + * + * @l: intended debug level (unsigned) + * + * If the master debug switch is enabled and the current settings are + * higher or equal to the requested level, then debugging + * output/actions should be enabled. + * + * NOTE: + * + * This needs to be coded so that it can be evaluated in compile + * time; this is why the ugly BUG_ON() is placed in there, so the + * D_MASTER evaluation compiles all out if it is compile-time false. + */ +#define d_test(l) \ +({ \ + unsigned __l = l; /* type enforcer */ \ + (D_MASTER) >= __l \ + && ({ \ + BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\ + D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \ + }); \ +}) + + +/** + * d_fnstart - log message at function start if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a) + + +/** + * d_fnend - log message at function end if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a) + + +/** + * d_printf - log message if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a) + + +/** + * d_dump - log buffer hex dump if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_dump(l, dev, ptr, size) \ +do { \ + char head[64]; \ + if (!d_test(l)) \ + break; \ + __d_head(head, sizeof(head), dev); \ + print_hex_dump(KERN_ERR, head, 0, 16, 1, \ + ((void *) ptr), (size), 0); \ +} while (0) + + +/** + * Export a submodule's debug level over debugfs as PREFIXSUBMODULE + * + * @prefix: string to prefix the name with + * @submodule: name of submodule (not a string, just the name) + * @dentry: debugfs parent dentry + * + * Returns: 0 if ok, < 0 errno on error. + * + * For removing, just use debugfs_remove_recursive() on the parent. + */ +#define d_level_register_debugfs(prefix, name, parent) \ +({ \ + int rc; \ + struct dentry *fd; \ + struct dentry *verify_parent_type = parent; \ + fd = debugfs_create_u8( \ + prefix #name, 0600, verify_parent_type, \ + &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \ + rc = PTR_ERR(fd); \ + if (IS_ERR(fd) && rc != -ENODEV) \ + printk(KERN_ERR "%s: Can't create debugfs entry %s: " \ + "%d\n", __func__, prefix #name, rc); \ + else \ + rc = 0; \ + rc; \ +}) + + +static inline +void d_submodule_set(struct d_level *d_level, size_t d_level_size, + const char *submodule, u8 level, const char *tag) +{ + struct d_level *itr, *top; + int index = -1; + + for (itr = d_level, top = itr + d_level_size; itr < top; itr++) { + index++; + if (itr->name == NULL) { + printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n", + tag, itr, index); + continue; + } + if (!strcmp(itr->name, submodule)) { + itr->level = level; + return; + } + } + printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule); +} + + +/** + * d_parse_params - Parse a string with debug parameters from the + * command line + * + * @d_level: level structure (D_LEVEL) + * @d_level_size: number of items in the level structure + * (D_LEVEL_SIZE). + * @_params: string with the parameters; this is a space (not tab!) + * separated list of NAME:VALUE, where value is the debug level + * and NAME is the name of the submodule. + * @tag: string for error messages (example: MODULE.ARGNAME). + */ +static inline +void d_parse_params(struct d_level *d_level, size_t d_level_size, + const char *_params, const char *tag) +{ + char submodule[130], *params, *params_orig, *token, *colon; + unsigned level, tokens; + + if (_params == NULL) + return; + params_orig = kstrdup(_params, GFP_KERNEL); + params = params_orig; + while (1) { + token = strsep(¶ms, " "); + if (token == NULL) + break; + if (*token == '\0') /* eat joint spaces */ + continue; + /* kernel's sscanf %s eats until whitespace, so we + * replace : by \n so it doesn't get eaten later by + * strsep */ + colon = strchr(token, ':'); + if (colon != NULL) + *colon = '\n'; + tokens = sscanf(token, "%s\n%u", submodule, &level); + if (colon != NULL) + *colon = ':'; /* set back, for error messages */ + if (tokens == 2) + d_submodule_set(d_level, d_level_size, + submodule, level, tag); + else + printk(KERN_ERR "%s: can't parse '%s' as a " + "SUBMODULE:LEVEL (%d tokens)\n", + tag, token, tokens); + } + kfree(params_orig); +} + +#endif /* #ifndef __debug__h__ */ diff --git a/include/linux/wimax/i2400m.h b/include/linux/wimax/i2400m.h new file mode 100644 index 00000000..62d35615 --- /dev/null +++ b/include/linux/wimax/i2400m.h @@ -0,0 +1,572 @@ +/* + * Intel Wireless WiMax Connection 2400m + * Host-Device protocol interface definitions + * + * + * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation <linux-wimax@intel.com> + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * - Initial implementation + * + * + * This header defines the data structures and constants used to + * communicate with the device. + * + * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL + * + * The firmware upload protocol is quite simple and only requires a + * handful of commands. See drivers/net/wimax/i2400m/fw.c for more + * details. + * + * The BCF data structure is for the firmware file header. + * + * + * THE DATA / CONTROL PROTOCOL + * + * This is the normal protocol spoken with the device once the + * firmware is uploaded. It transports data payloads and control + * messages back and forth. + * + * It consists 'messages' that pack one or more payloads each. The + * format is described in detail in drivers/net/wimax/i2400m/rx.c and + * tx.c. + * + * + * THE L3L4 PROTOCOL + * + * The term L3L4 refers to Layer 3 (the device), Layer 4 (the + * driver/host software). + * + * This is the control protocol used by the host to control the i2400m + * device (scan, connect, disconnect...). This is sent to / received + * as control frames. These frames consist of a header and zero or + * more TLVs with information. We call each control frame a "message". + * + * Each message is composed of: + * + * HEADER + * [TLV0 + PAYLOAD0] + * [TLV1 + PAYLOAD1] + * [...] + * [TLVN + PAYLOADN] + * + * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are + * defined by a TLV structure (Type Length Value) which is a 'header' + * (struct i2400m_tlv_hdr) and then the payload. + * + * All integers are represented as Little Endian. + * + * - REQUESTS AND EVENTS + * + * The requests can be clasified as follows: + * + * COMMAND: implies a request from the host to the device requesting + * an action being performed. The device will reply with a + * message (with the same type as the command), status and + * no (TLV) payload. Execution of a command might cause + * events (of different type) to be sent later on as + * device's state changes. + * + * GET/SET: similar to COMMAND, but will not cause other + * EVENTs. The reply, in the case of GET, will contain + * TLVs with the requested information. + * + * EVENT: asynchronous messages sent from the device, maybe as a + * consequence of previous COMMANDs but disassociated from + * them. + * + * Only one request might be pending at the same time (ie: don't + * parallelize nor post another GET request before the previous + * COMMAND has been acknowledged with it's corresponding reply by the + * device). + * + * The different requests and their formats are described below: + * + * I2400M_MT_* Message types + * I2400M_MS_* Message status (for replies, events) + * i2400m_tlv_* TLVs + * + * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the + * operation. + */ + +#ifndef __LINUX__WIMAX__I2400M_H__ +#define __LINUX__WIMAX__I2400M_H__ + +#include <linux/types.h> + + +/* + * Host Device Interface (HDI) common to all busses + */ + +/* Boot-mode (firmware upload mode) commands */ + +/* Header for the firmware file */ +struct i2400m_bcf_hdr { + __le32 module_type; + __le32 header_len; + __le32 header_version; + __le32 module_id; + __le32 module_vendor; + __le32 date; /* BCD YYYMMDD */ + __le32 size; /* in dwords */ + __le32 key_size; /* in dwords */ + __le32 modulus_size; /* in dwords */ + __le32 exponent_size; /* in dwords */ + __u8 reserved[88]; +} __attribute__ ((packed)); + +/* Boot mode opcodes */ +enum i2400m_brh_opcode { + I2400M_BRH_READ = 1, + I2400M_BRH_WRITE = 2, + I2400M_BRH_JUMP = 3, + I2400M_BRH_SIGNED_JUMP = 8, + I2400M_BRH_HASH_PAYLOAD_ONLY = 9, +}; + +/* Boot mode command masks and stuff */ +enum i2400m_brh { + I2400M_BRH_SIGNATURE = 0xcbbc0000, + I2400M_BRH_SIGNATURE_MASK = 0xffff0000, + I2400M_BRH_SIGNATURE_SHIFT = 16, + I2400M_BRH_OPCODE_MASK = 0x0000000f, + I2400M_BRH_RESPONSE_MASK = 0x000000f0, + I2400M_BRH_RESPONSE_SHIFT = 4, + I2400M_BRH_DIRECT_ACCESS = 0x00000400, + I2400M_BRH_RESPONSE_REQUIRED = 0x00000200, + I2400M_BRH_USE_CHECKSUM = 0x00000100, +}; + + +/** + * i2400m_bootrom_header - Header for a boot-mode command + * + * @cmd: the above command descriptor + * @target_addr: where on the device memory should the action be performed. + * @data_size: for read/write, amount of data to be read/written + * @block_checksum: checksum value (if applicable) + * @payload: the beginning of data attached to this header + */ +struct i2400m_bootrom_header { + __le32 command; /* Compose with enum i2400_brh */ + __le32 target_addr; + __le32 data_size; + __le32 block_checksum; + char payload[0]; +} __attribute__ ((packed)); + + +/* + * Data / control protocol + */ + +/* Packet types for the host-device interface */ +enum i2400m_pt { + I2400M_PT_DATA = 0, + I2400M_PT_CTRL, + I2400M_PT_TRACE, /* For device debug */ + I2400M_PT_RESET_WARM, /* device reset */ + I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */ + I2400M_PT_EDATA, /* Extended RX data */ + I2400M_PT_ILLEGAL +}; + + +/* + * Payload for a data packet + * + * This is prefixed to each and every outgoing DATA type. + */ +struct i2400m_pl_data_hdr { + __le32 reserved; +} __attribute__((packed)); + + +/* + * Payload for an extended data packet + * + * New in fw v1.4 + * + * @reorder: if this payload has to be reorder or not (and how) + * @cs: the type of data in the packet, as defined per (802.16e + * T11.13.19.1). Currently only 2 (IPv4 packet) supported. + * + * This is prefixed to each and every INCOMING DATA packet. + */ +struct i2400m_pl_edata_hdr { + __le32 reorder; /* bits defined in i2400m_ro */ + __u8 cs; + __u8 reserved[11]; +} __attribute__((packed)); + +enum i2400m_cs { + I2400M_CS_IPV4_0 = 0, + I2400M_CS_IPV4 = 2, +}; + +enum i2400m_ro { + I2400M_RO_NEEDED = 0x01, + I2400M_RO_TYPE = 0x03, + I2400M_RO_TYPE_SHIFT = 1, + I2400M_RO_CIN = 0x0f, + I2400M_RO_CIN_SHIFT = 4, + I2400M_RO_FBN = 0x07ff, + I2400M_RO_FBN_SHIFT = 8, + I2400M_RO_SN = 0x07ff, + I2400M_RO_SN_SHIFT = 21, +}; + +enum i2400m_ro_type { + I2400M_RO_TYPE_RESET = 0, + I2400M_RO_TYPE_PACKET, + I2400M_RO_TYPE_WS, + I2400M_RO_TYPE_PACKET_WS, +}; + + +/* Misc constants */ +enum { + I2400M_PL_ALIGN = 16, /* Payload data size alignment */ + I2400M_PL_SIZE_MAX = 0x3EFF, + I2400M_MAX_PLS_IN_MSG = 60, + /* protocol barkers: sync sequences; for notifications they + * are sent in groups of four. */ + I2400M_H2D_PREVIEW_BARKER = 0xcafe900d, + I2400M_COLD_RESET_BARKER = 0xc01dc01d, + I2400M_WARM_RESET_BARKER = 0x50f750f7, + I2400M_NBOOT_BARKER = 0xdeadbeef, + I2400M_SBOOT_BARKER = 0x0ff1c1a1, + I2400M_SBOOT_BARKER_6050 = 0x80000001, + I2400M_ACK_BARKER = 0xfeedbabe, + I2400M_D2H_MSG_BARKER = 0xbeefbabe, +}; + + +/* + * Hardware payload descriptor + * + * Bitfields encoded in a struct to enforce typing semantics. + * + * Look in rx.c and tx.c for a full description of the format. + */ +struct i2400m_pld { + __le32 val; +} __attribute__ ((packed)); + +#define I2400M_PLD_SIZE_MASK 0x00003fff +#define I2400M_PLD_TYPE_SHIFT 16 +#define I2400M_PLD_TYPE_MASK 0x000f0000 + +/* + * Header for a TX message or RX message + * + * @barker: preamble + * @size: used for management of the FIFO queue buffer; before + * sending, this is converted to be a real preamble. This + * indicates the real size of the TX message that starts at this + * point. If the highest bit is set, then this message is to be + * skipped. + * @sequence: sequence number of this message + * @offset: offset where the message itself starts -- see the comments + * in the file header about message header and payload descriptor + * alignment. + * @num_pls: number of payloads in this message + * @padding: amount of padding bytes at the end of the message to make + * it be of block-size aligned + * + * Look in rx.c and tx.c for a full description of the format. + */ +struct i2400m_msg_hdr { + union { + __le32 barker; + __u32 size; /* same size type as barker!! */ + }; + union { + __le32 sequence; + __u32 offset; /* same size type as barker!! */ + }; + __le16 num_pls; + __le16 rsv1; + __le16 padding; + __le16 rsv2; + struct i2400m_pld pld[0]; +} __attribute__ ((packed)); + + + +/* + * L3/L4 control protocol + */ + +enum { + /* Interface version */ + I2400M_L3L4_VERSION = 0x0100, +}; + +/* Message types */ +enum i2400m_mt { + I2400M_MT_RESERVED = 0x0000, + I2400M_MT_INVALID = 0xffff, + I2400M_MT_REPORT_MASK = 0x8000, + + I2400M_MT_GET_SCAN_RESULT = 0x4202, + I2400M_MT_SET_SCAN_PARAM = 0x4402, + I2400M_MT_CMD_RF_CONTROL = 0x4602, + I2400M_MT_CMD_SCAN = 0x4603, + I2400M_MT_CMD_CONNECT = 0x4604, + I2400M_MT_CMD_DISCONNECT = 0x4605, + I2400M_MT_CMD_EXIT_IDLE = 0x4606, + I2400M_MT_GET_LM_VERSION = 0x5201, + I2400M_MT_GET_DEVICE_INFO = 0x5202, + I2400M_MT_GET_LINK_STATUS = 0x5203, + I2400M_MT_GET_STATISTICS = 0x5204, + I2400M_MT_GET_STATE = 0x5205, + I2400M_MT_GET_MEDIA_STATUS = 0x5206, + I2400M_MT_SET_INIT_CONFIG = 0x5404, + I2400M_MT_CMD_INIT = 0x5601, + I2400M_MT_CMD_TERMINATE = 0x5602, + I2400M_MT_CMD_MODE_OF_OP = 0x5603, + I2400M_MT_CMD_RESET_DEVICE = 0x5604, + I2400M_MT_CMD_MONITOR_CONTROL = 0x5605, + I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606, + I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201, + I2400M_MT_SET_EAP_SUCCESS = 0x6402, + I2400M_MT_SET_EAP_FAIL = 0x6403, + I2400M_MT_SET_EAP_KEY = 0x6404, + I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602, + I2400M_MT_REPORT_SCAN_RESULT = 0xc002, + I2400M_MT_REPORT_STATE = 0xd002, + I2400M_MT_REPORT_POWERSAVE_READY = 0xd005, + I2400M_MT_REPORT_EAP_REQUEST = 0xe002, + I2400M_MT_REPORT_EAP_RESTART = 0xe003, + I2400M_MT_REPORT_ALT_ACCEPT = 0xe004, + I2400M_MT_REPORT_KEY_REQUEST = 0xe005, +}; + + +/* + * Message Ack Status codes + * + * When a message is replied-to, this status is reported. + */ +enum i2400m_ms { + I2400M_MS_DONE_OK = 0, + I2400M_MS_DONE_IN_PROGRESS = 1, + I2400M_MS_INVALID_OP = 2, + I2400M_MS_BAD_STATE = 3, + I2400M_MS_ILLEGAL_VALUE = 4, + I2400M_MS_MISSING_PARAMS = 5, + I2400M_MS_VERSION_ERROR = 6, + I2400M_MS_ACCESSIBILITY_ERROR = 7, + I2400M_MS_BUSY = 8, + I2400M_MS_CORRUPTED_TLV = 9, + I2400M_MS_UNINITIALIZED = 10, + I2400M_MS_UNKNOWN_ERROR = 11, + I2400M_MS_PRODUCTION_ERROR = 12, + I2400M_MS_NO_RF = 13, + I2400M_MS_NOT_READY_FOR_POWERSAVE = 14, + I2400M_MS_THERMAL_CRITICAL = 15, + I2400M_MS_MAX +}; + + +/** + * i2400m_tlv - enumeration of the different types of TLVs + * + * TLVs stand for type-length-value and are the header for a payload + * composed of almost anything. Each payload has a type assigned + * and a length. + */ +enum i2400m_tlv { + I2400M_TLV_L4_MESSAGE_VERSIONS = 129, + I2400M_TLV_SYSTEM_STATE = 141, + I2400M_TLV_MEDIA_STATUS = 161, + I2400M_TLV_RF_OPERATION = 162, + I2400M_TLV_RF_STATUS = 163, + I2400M_TLV_DEVICE_RESET_TYPE = 132, + I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601, + I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611, + I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614, + I2400M_TLV_CONFIG_DL_HOST_REORDER = 615, +}; + + +struct i2400m_tlv_hdr { + __le16 type; + __le16 length; /* payload's */ + __u8 pl[0]; +} __attribute__((packed)); + + +struct i2400m_l3l4_hdr { + __le16 type; + __le16 length; /* payload's */ + __le16 version; + __le16 resv1; + __le16 status; + __le16 resv2; + struct i2400m_tlv_hdr pl[0]; +} __attribute__((packed)); + + +/** + * i2400m_system_state - different states of the device + */ +enum i2400m_system_state { + I2400M_SS_UNINITIALIZED = 1, + I2400M_SS_INIT, + I2400M_SS_READY, + I2400M_SS_SCAN, + I2400M_SS_STANDBY, + I2400M_SS_CONNECTING, + I2400M_SS_WIMAX_CONNECTED, + I2400M_SS_DATA_PATH_CONNECTED, + I2400M_SS_IDLE, + I2400M_SS_DISCONNECTING, + I2400M_SS_OUT_OF_ZONE, + I2400M_SS_SLEEPACTIVE, + I2400M_SS_PRODUCTION, + I2400M_SS_CONFIG, + I2400M_SS_RF_OFF, + I2400M_SS_RF_SHUTDOWN, + I2400M_SS_DEVICE_DISCONNECT, + I2400M_SS_MAX, +}; + + +/** + * i2400m_tlv_system_state - report on the state of the system + * + * @state: see enum i2400m_system_state + */ +struct i2400m_tlv_system_state { + struct i2400m_tlv_hdr hdr; + __le32 state; +} __attribute__((packed)); + + +struct i2400m_tlv_l4_message_versions { + struct i2400m_tlv_hdr hdr; + __le16 major; + __le16 minor; + __le16 branch; + __le16 reserved; +} __attribute__((packed)); + + +struct i2400m_tlv_detailed_device_info { + struct i2400m_tlv_hdr hdr; + __u8 reserved1[400]; + __u8 mac_address[6]; + __u8 reserved2[2]; +} __attribute__((packed)); + + +enum i2400m_rf_switch_status { + I2400M_RF_SWITCH_ON = 1, + I2400M_RF_SWITCH_OFF = 2, +}; + +struct i2400m_tlv_rf_switches_status { + struct i2400m_tlv_hdr hdr; + __u8 sw_rf_switch; /* 1 ON, 2 OFF */ + __u8 hw_rf_switch; /* 1 ON, 2 OFF */ + __u8 reserved[2]; +} __attribute__((packed)); + + +enum { + i2400m_rf_operation_on = 1, + i2400m_rf_operation_off = 2 +}; + +struct i2400m_tlv_rf_operation { + struct i2400m_tlv_hdr hdr; + __le32 status; /* 1 ON, 2 OFF */ +} __attribute__((packed)); + + +enum i2400m_tlv_reset_type { + I2400M_RESET_TYPE_COLD = 1, + I2400M_RESET_TYPE_WARM +}; + +struct i2400m_tlv_device_reset_type { + struct i2400m_tlv_hdr hdr; + __le32 reset_type; +} __attribute__((packed)); + + +struct i2400m_tlv_config_idle_parameters { + struct i2400m_tlv_hdr hdr; + __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments + * 0 disabled */ + __le32 idle_paging_interval; /* frames */ +} __attribute__((packed)); + + +enum i2400m_media_status { + I2400M_MEDIA_STATUS_LINK_UP = 1, + I2400M_MEDIA_STATUS_LINK_DOWN, + I2400M_MEDIA_STATUS_LINK_RENEW, +}; + +struct i2400m_tlv_media_status { + struct i2400m_tlv_hdr hdr; + __le32 media_status; +} __attribute__((packed)); + + +/* New in v1.4 */ +struct i2400m_tlv_config_idle_timeout { + struct i2400m_tlv_hdr hdr; + __le32 timeout; /* 100 to 300000 ms [5min], 100 increments + * 0 disabled */ +} __attribute__((packed)); + +/* New in v1.4 -- for backward compat, will be removed */ +struct i2400m_tlv_config_d2h_data_format { + struct i2400m_tlv_hdr hdr; + __u8 format; /* 0 old format, 1 enhanced */ + __u8 reserved[3]; +} __attribute__((packed)); + +/* New in v1.4 */ +struct i2400m_tlv_config_dl_host_reorder { + struct i2400m_tlv_hdr hdr; + __u8 reorder; /* 0 disabled, 1 enabled */ + __u8 reserved[3]; +} __attribute__((packed)); + + +#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h new file mode 100644 index 00000000..4395b28b --- /dev/null +++ b/include/linux/wireless.h @@ -0,0 +1,1162 @@ +/* + * This file define a set of standard wireless extensions + * + * Version : 22 16.3.07 + * + * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> + * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. + */ + +#ifndef _LINUX_WIRELESS_H +#define _LINUX_WIRELESS_H + +/************************** DOCUMENTATION **************************/ +/* + * Initial APIs (1996 -> onward) : + * ----------------------------- + * Basically, the wireless extensions are for now a set of standard ioctl + * call + /proc/net/wireless + * + * The entry /proc/net/wireless give statistics and information on the + * driver. + * This is better than having each driver having its entry because + * its centralised and we may remove the driver module safely. + * + * Ioctl are used to configure the driver and issue commands. This is + * better than command line options of insmod because we may want to + * change dynamically (while the driver is running) some parameters. + * + * The ioctl mechanimsm are copied from standard devices ioctl. + * We have the list of command plus a structure descibing the + * data exchanged... + * Note that to add these ioctl, I was obliged to modify : + * # net/core/dev.c (two place + add include) + * # net/ipv4/af_inet.c (one place + add include) + * + * /proc/net/wireless is a copy of /proc/net/dev. + * We have a structure for data passed from the driver to /proc/net/wireless + * Too add this, I've modified : + * # net/core/dev.c (two other places) + * # include/linux/netdevice.h (one place) + * # include/linux/proc_fs.h (one place) + * + * New driver API (2002 -> onward) : + * ------------------------------- + * This file is only concerned with the user space API and common definitions. + * The new driver API is defined and documented in : + * # include/net/iw_handler.h + * + * Note as well that /proc/net/wireless implementation has now moved in : + * # net/core/wireless.c + * + * Wireless Events (2002 -> onward) : + * -------------------------------- + * Events are defined at the end of this file, and implemented in : + * # net/core/wireless.c + * + * Other comments : + * -------------- + * Do not add here things that are redundant with other mechanisms + * (drivers init, ifconfig, /proc/net/dev, ...) and with are not + * wireless specific. + * + * These wireless extensions are not magic : each driver has to provide + * support for them... + * + * IMPORTANT NOTE : As everything in the kernel, this is very much a + * work in progress. Contact me if you have ideas of improvements... + */ + +/***************************** INCLUDES *****************************/ + +#include <linux/types.h> /* for __u* and __s* typedefs */ +#include <linux/socket.h> /* for "struct sockaddr" et al */ +#include <linux/if.h> /* for IFNAMSIZ and co... */ + +/***************************** VERSION *****************************/ +/* + * This constant is used to know the availability of the wireless + * extensions and to know which version of wireless extensions it is + * (there is some stuff that will be added in the future...) + * I just plan to increment with each new version. + */ +#define WIRELESS_EXT 22 + +/* + * Changes : + * + * V2 to V3 + * -------- + * Alan Cox start some incompatibles changes. I've integrated a bit more. + * - Encryption renamed to Encode to avoid US regulation problems + * - Frequency changed from float to struct to avoid problems on old 386 + * + * V3 to V4 + * -------- + * - Add sensitivity + * + * V4 to V5 + * -------- + * - Missing encoding definitions in range + * - Access points stuff + * + * V5 to V6 + * -------- + * - 802.11 support (ESSID ioctls) + * + * V6 to V7 + * -------- + * - define IW_ESSID_MAX_SIZE and IW_MAX_AP + * + * V7 to V8 + * -------- + * - Changed my e-mail address + * - More 802.11 support (nickname, rate, rts, frag) + * - List index in frequencies + * + * V8 to V9 + * -------- + * - Support for 'mode of operation' (ad-hoc, managed...) + * - Support for unicast and multicast power saving + * - Change encoding to support larger tokens (>64 bits) + * - Updated iw_params (disable, flags) and use it for NWID + * - Extracted iw_point from iwreq for clarity + * + * V9 to V10 + * --------- + * - Add PM capability to range structure + * - Add PM modifier : MAX/MIN/RELATIVE + * - Add encoding option : IW_ENCODE_NOKEY + * - Add TxPower ioctls (work like TxRate) + * + * V10 to V11 + * ---------- + * - Add WE version in range (help backward/forward compatibility) + * - Add retry ioctls (work like PM) + * + * V11 to V12 + * ---------- + * - Add SIOCSIWSTATS to get /proc/net/wireless programatically + * - Add DEV PRIVATE IOCTL to avoid collisions in SIOCDEVPRIVATE space + * - Add new statistics (frag, retry, beacon) + * - Add average quality (for user space calibration) + * + * V12 to V13 + * ---------- + * - Document creation of new driver API. + * - Extract union iwreq_data from struct iwreq (for new driver API). + * - Rename SIOCSIWNAME as SIOCSIWCOMMIT + * + * V13 to V14 + * ---------- + * - Wireless Events support : define struct iw_event + * - Define additional specific event numbers + * - Add "addr" and "param" fields in union iwreq_data + * - AP scanning stuff (SIOCSIWSCAN and friends) + * + * V14 to V15 + * ---------- + * - Add IW_PRIV_TYPE_ADDR for struct sockaddr private arg + * - Make struct iw_freq signed (both m & e), add explicit padding + * - Add IWEVCUSTOM for driver specific event/scanning token + * - Add IW_MAX_GET_SPY for driver returning a lot of addresses + * - Add IW_TXPOW_RANGE for range of Tx Powers + * - Add IWEVREGISTERED & IWEVEXPIRED events for Access Points + * - Add IW_MODE_MONITOR for passive monitor + * + * V15 to V16 + * ---------- + * - Increase the number of bitrates in iw_range to 32 (for 802.11g) + * - Increase the number of frequencies in iw_range to 32 (for 802.11b+a) + * - Reshuffle struct iw_range for increases, add filler + * - Increase IW_MAX_AP to 64 for driver returning a lot of addresses + * - Remove IW_MAX_GET_SPY because conflict with enhanced spy support + * - Add SIOCSIWTHRSPY/SIOCGIWTHRSPY and "struct iw_thrspy" + * - Add IW_ENCODE_TEMP and iw_range->encoding_login_index + * + * V16 to V17 + * ---------- + * - Add flags to frequency -> auto/fixed + * - Document (struct iw_quality *)->updated, add new flags (INVALID) + * - Wireless Event capability in struct iw_range + * - Add support for relative TxPower (yick !) + * + * V17 to V18 (From Jouni Malinen <j@w1.fi>) + * ---------- + * - Add support for WPA/WPA2 + * - Add extended encoding configuration (SIOCSIWENCODEEXT and + * SIOCGIWENCODEEXT) + * - Add SIOCSIWGENIE/SIOCGIWGENIE + * - Add SIOCSIWMLME + * - Add SIOCSIWPMKSA + * - Add struct iw_range bit field for supported encoding capabilities + * - Add optional scan request parameters for SIOCSIWSCAN + * - Add SIOCSIWAUTH/SIOCGIWAUTH for setting authentication and WPA + * related parameters (extensible up to 4096 parameter values) + * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE, + * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND + * + * V18 to V19 + * ---------- + * - Remove (struct iw_point *)->pointer from events and streams + * - Remove header includes to help user space + * - Increase IW_ENCODING_TOKEN_MAX from 32 to 64 + * - Add IW_QUAL_ALL_UPDATED and IW_QUAL_ALL_INVALID macros + * - Add explicit flag to tell stats are in dBm : IW_QUAL_DBM + * - Add IW_IOCTL_IDX() and IW_EVENT_IDX() macros + * + * V19 to V20 + * ---------- + * - RtNetlink requests support (SET/GET) + * + * V20 to V21 + * ---------- + * - Remove (struct net_device *)->get_wireless_stats() + * - Change length in ESSID and NICK to strlen() instead of strlen()+1 + * - Add IW_RETRY_SHORT/IW_RETRY_LONG retry modifiers + * - Power/Retry relative values no longer * 100000 + * - Add explicit flag to tell stats are in 802.11k RCPI : IW_QUAL_RCPI + * + * V21 to V22 + * ---------- + * - Prevent leaking of kernel space in stream on 64 bits. + */ + +/**************************** CONSTANTS ****************************/ + +/* -------------------------- IOCTL LIST -------------------------- */ + +/* Wireless Identification */ +#define SIOCSIWCOMMIT 0x8B00 /* Commit pending changes to driver */ +#define SIOCGIWNAME 0x8B01 /* get name == wireless protocol */ +/* SIOCGIWNAME is used to verify the presence of Wireless Extensions. + * Common values : "IEEE 802.11-DS", "IEEE 802.11-FH", "IEEE 802.11b"... + * Don't put the name of your driver there, it's useless. */ + +/* Basic operations */ +#define SIOCSIWNWID 0x8B02 /* set network id (pre-802.11) */ +#define SIOCGIWNWID 0x8B03 /* get network id (the cell) */ +#define SIOCSIWFREQ 0x8B04 /* set channel/frequency (Hz) */ +#define SIOCGIWFREQ 0x8B05 /* get channel/frequency (Hz) */ +#define SIOCSIWMODE 0x8B06 /* set operation mode */ +#define SIOCGIWMODE 0x8B07 /* get operation mode */ +#define SIOCSIWSENS 0x8B08 /* set sensitivity (dBm) */ +#define SIOCGIWSENS 0x8B09 /* get sensitivity (dBm) */ + +/* Informative stuff */ +#define SIOCSIWRANGE 0x8B0A /* Unused */ +#define SIOCGIWRANGE 0x8B0B /* Get range of parameters */ +#define SIOCSIWPRIV 0x8B0C /* Unused */ +#define SIOCGIWPRIV 0x8B0D /* get private ioctl interface info */ +#define SIOCSIWSTATS 0x8B0E /* Unused */ +#define SIOCGIWSTATS 0x8B0F /* Get /proc/net/wireless stats */ +/* SIOCGIWSTATS is strictly used between user space and the kernel, and + * is never passed to the driver (i.e. the driver will never see it). */ + +/* Spy support (statistics per MAC address - used for Mobile IP support) */ +#define SIOCSIWSPY 0x8B10 /* set spy addresses */ +#define SIOCGIWSPY 0x8B11 /* get spy info (quality of link) */ +#define SIOCSIWTHRSPY 0x8B12 /* set spy threshold (spy event) */ +#define SIOCGIWTHRSPY 0x8B13 /* get spy threshold */ + +/* Access Point manipulation */ +#define SIOCSIWAP 0x8B14 /* set access point MAC addresses */ +#define SIOCGIWAP 0x8B15 /* get access point MAC addresses */ +#define SIOCGIWAPLIST 0x8B17 /* Deprecated in favor of scanning */ +#define SIOCSIWSCAN 0x8B18 /* trigger scanning (list cells) */ +#define SIOCGIWSCAN 0x8B19 /* get scanning results */ + +/* 802.11 specific support */ +#define SIOCSIWESSID 0x8B1A /* set ESSID (network name) */ +#define SIOCGIWESSID 0x8B1B /* get ESSID */ +#define SIOCSIWNICKN 0x8B1C /* set node name/nickname */ +#define SIOCGIWNICKN 0x8B1D /* get node name/nickname */ +/* As the ESSID and NICKN are strings up to 32 bytes long, it doesn't fit + * within the 'iwreq' structure, so we need to use the 'data' member to + * point to a string in user space, like it is done for RANGE... */ + +/* Other parameters useful in 802.11 and some other devices */ +#define SIOCSIWRATE 0x8B20 /* set default bit rate (bps) */ +#define SIOCGIWRATE 0x8B21 /* get default bit rate (bps) */ +#define SIOCSIWRTS 0x8B22 /* set RTS/CTS threshold (bytes) */ +#define SIOCGIWRTS 0x8B23 /* get RTS/CTS threshold (bytes) */ +#define SIOCSIWFRAG 0x8B24 /* set fragmentation thr (bytes) */ +#define SIOCGIWFRAG 0x8B25 /* get fragmentation thr (bytes) */ +#define SIOCSIWTXPOW 0x8B26 /* set transmit power (dBm) */ +#define SIOCGIWTXPOW 0x8B27 /* get transmit power (dBm) */ +#define SIOCSIWRETRY 0x8B28 /* set retry limits and lifetime */ +#define SIOCGIWRETRY 0x8B29 /* get retry limits and lifetime */ + +/* Encoding stuff (scrambling, hardware security, WEP...) */ +#define SIOCSIWENCODE 0x8B2A /* set encoding token & mode */ +#define SIOCGIWENCODE 0x8B2B /* get encoding token & mode */ +/* Power saving stuff (power management, unicast and multicast) */ +#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */ +#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */ + +/* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM). + * This ioctl uses struct iw_point and data buffer that includes IE id and len + * fields. More than one IE may be included in the request. Setting the generic + * IE to empty buffer (len=0) removes the generic IE from the driver. Drivers + * are allowed to generate their own WPA/RSN IEs, but in these cases, drivers + * are required to report the used IE as a wireless event, e.g., when + * associating with an AP. */ +#define SIOCSIWGENIE 0x8B30 /* set generic IE */ +#define SIOCGIWGENIE 0x8B31 /* get generic IE */ + +/* WPA : IEEE 802.11 MLME requests */ +#define SIOCSIWMLME 0x8B16 /* request MLME operation; uses + * struct iw_mlme */ +/* WPA : Authentication mode parameters */ +#define SIOCSIWAUTH 0x8B32 /* set authentication mode params */ +#define SIOCGIWAUTH 0x8B33 /* get authentication mode params */ + +/* WPA : Extended version of encoding configuration */ +#define SIOCSIWENCODEEXT 0x8B34 /* set encoding token & mode */ +#define SIOCGIWENCODEEXT 0x8B35 /* get encoding token & mode */ + +/* WPA2 : PMKSA cache management */ +#define SIOCSIWPMKSA 0x8B36 /* PMKSA cache operation */ + +/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */ + +/* These 32 ioctl are wireless device private, for 16 commands. + * Each driver is free to use them for whatever purpose it chooses, + * however the driver *must* export the description of those ioctls + * with SIOCGIWPRIV and *must* use arguments as defined below. + * If you don't follow those rules, DaveM is going to hate you (reason : + * it make mixed 32/64bit operation impossible). + */ +#define SIOCIWFIRSTPRIV 0x8BE0 +#define SIOCIWLASTPRIV 0x8BFF +/* Previously, we were using SIOCDEVPRIVATE, but we now have our + * separate range because of collisions with other tools such as + * 'mii-tool'. + * We now have 32 commands, so a bit more space ;-). + * Also, all 'even' commands are only usable by root and don't return the + * content of ifr/iwr to user (but you are not obliged to use the set/get + * convention, just use every other two command). More details in iwpriv.c. + * And I repeat : you are not forced to use them with iwpriv, but you + * must be compliant with it. + */ + +/* ------------------------- IOCTL STUFF ------------------------- */ + +/* The first and the last (range) */ +#define SIOCIWFIRST 0x8B00 +#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */ +#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) +#define IW_HANDLER(id, func) \ + [IW_IOCTL_IDX(id)] = func + +/* Odd : get (world access), even : set (root access) */ +#define IW_IS_SET(cmd) (!((cmd) & 0x1)) +#define IW_IS_GET(cmd) ((cmd) & 0x1) + +/* ----------------------- WIRELESS EVENTS ----------------------- */ +/* Those are *NOT* ioctls, do not issue request on them !!! */ +/* Most events use the same identifier as ioctl requests */ + +#define IWEVTXDROP 0x8C00 /* Packet dropped to excessive retry */ +#define IWEVQUAL 0x8C01 /* Quality part of statistics (scan) */ +#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */ +#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */ +#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */ +#define IWEVGENIE 0x8C05 /* Generic IE (WPA, RSN, WMM, ..) + * (scan results); This includes id and + * length fields. One IWEVGENIE may + * contain more than one IE. Scan + * results may contain one or more + * IWEVGENIE events. */ +#define IWEVMICHAELMICFAILURE 0x8C06 /* Michael MIC failure + * (struct iw_michaelmicfailure) + */ +#define IWEVASSOCREQIE 0x8C07 /* IEs used in (Re)Association Request. + * The data includes id and length + * fields and may contain more than one + * IE. This event is required in + * Managed mode if the driver + * generates its own WPA/RSN IE. This + * should be sent just before + * IWEVREGISTERED event for the + * association. */ +#define IWEVASSOCRESPIE 0x8C08 /* IEs used in (Re)Association + * Response. The data includes id and + * length fields and may contain more + * than one IE. This may be sent + * between IWEVASSOCREQIE and + * IWEVREGISTERED events for the + * association. */ +#define IWEVPMKIDCAND 0x8C09 /* PMKID candidate for RSN + * pre-authentication + * (struct iw_pmkid_cand) */ + +#define IWEVFIRST 0x8C00 +#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) + +/* ------------------------- PRIVATE INFO ------------------------- */ +/* + * The following is used with SIOCGIWPRIV. It allow a driver to define + * the interface (name, type of data) for its private ioctl. + * Privates ioctl are SIOCIWFIRSTPRIV -> SIOCIWLASTPRIV + */ + +#define IW_PRIV_TYPE_MASK 0x7000 /* Type of arguments */ +#define IW_PRIV_TYPE_NONE 0x0000 +#define IW_PRIV_TYPE_BYTE 0x1000 /* Char as number */ +#define IW_PRIV_TYPE_CHAR 0x2000 /* Char as character */ +#define IW_PRIV_TYPE_INT 0x4000 /* 32 bits int */ +#define IW_PRIV_TYPE_FLOAT 0x5000 /* struct iw_freq */ +#define IW_PRIV_TYPE_ADDR 0x6000 /* struct sockaddr */ + +#define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed number of args */ + +#define IW_PRIV_SIZE_MASK 0x07FF /* Max number of those args */ + +/* + * Note : if the number of args is fixed and the size < 16 octets, + * instead of passing a pointer we will put args in the iwreq struct... + */ + +/* ----------------------- OTHER CONSTANTS ----------------------- */ + +/* Maximum frequencies in the range struct */ +#define IW_MAX_FREQUENCIES 32 +/* Note : if you have something like 80 frequencies, + * don't increase this constant and don't fill the frequency list. + * The user will be able to set by channel anyway... */ + +/* Maximum bit rates in the range struct */ +#define IW_MAX_BITRATES 32 + +/* Maximum tx powers in the range struct */ +#define IW_MAX_TXPOWER 8 +/* Note : if you more than 8 TXPowers, just set the max and min or + * a few of them in the struct iw_range. */ + +/* Maximum of address that you may set with SPY */ +#define IW_MAX_SPY 8 + +/* Maximum of address that you may get in the + list of access points in range */ +#define IW_MAX_AP 64 + +/* Maximum size of the ESSID and NICKN strings */ +#define IW_ESSID_MAX_SIZE 32 + +/* Modes of operation */ +#define IW_MODE_AUTO 0 /* Let the driver decides */ +#define IW_MODE_ADHOC 1 /* Single cell network */ +#define IW_MODE_INFRA 2 /* Multi cell network, roaming, ... */ +#define IW_MODE_MASTER 3 /* Synchronisation master or Access Point */ +#define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */ +#define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */ +#define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */ +#define IW_MODE_MESH 7 /* Mesh (IEEE 802.11s) network */ + +/* Statistics flags (bitmask in updated) */ +#define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */ +#define IW_QUAL_LEVEL_UPDATED 0x02 +#define IW_QUAL_NOISE_UPDATED 0x04 +#define IW_QUAL_ALL_UPDATED 0x07 +#define IW_QUAL_DBM 0x08 /* Level + Noise are dBm */ +#define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */ +#define IW_QUAL_LEVEL_INVALID 0x20 +#define IW_QUAL_NOISE_INVALID 0x40 +#define IW_QUAL_RCPI 0x80 /* Level + Noise are 802.11k RCPI */ +#define IW_QUAL_ALL_INVALID 0x70 + +/* Frequency flags */ +#define IW_FREQ_AUTO 0x00 /* Let the driver decides */ +#define IW_FREQ_FIXED 0x01 /* Force a specific value */ + +/* Maximum number of size of encoding token available + * they are listed in the range structure */ +#define IW_MAX_ENCODING_SIZES 8 + +/* Maximum size of the encoding token in bytes */ +#define IW_ENCODING_TOKEN_MAX 64 /* 512 bits (for now) */ + +/* Flags for encoding (along with the token) */ +#define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */ +#define IW_ENCODE_FLAGS 0xFF00 /* Flags defined below */ +#define IW_ENCODE_MODE 0xF000 /* Modes defined below */ +#define IW_ENCODE_DISABLED 0x8000 /* Encoding disabled */ +#define IW_ENCODE_ENABLED 0x0000 /* Encoding enabled */ +#define IW_ENCODE_RESTRICTED 0x4000 /* Refuse non-encoded packets */ +#define IW_ENCODE_OPEN 0x2000 /* Accept non-encoded packets */ +#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */ +#define IW_ENCODE_TEMP 0x0400 /* Temporary key */ + +/* Power management flags available (along with the value, if any) */ +#define IW_POWER_ON 0x0000 /* No details... */ +#define IW_POWER_TYPE 0xF000 /* Type of parameter */ +#define IW_POWER_PERIOD 0x1000 /* Value is a period/duration of */ +#define IW_POWER_TIMEOUT 0x2000 /* Value is a timeout (to go asleep) */ +#define IW_POWER_MODE 0x0F00 /* Power Management mode */ +#define IW_POWER_UNICAST_R 0x0100 /* Receive only unicast messages */ +#define IW_POWER_MULTICAST_R 0x0200 /* Receive only multicast messages */ +#define IW_POWER_ALL_R 0x0300 /* Receive all messages though PM */ +#define IW_POWER_FORCE_S 0x0400 /* Force PM procedure for sending unicast */ +#define IW_POWER_REPEATER 0x0800 /* Repeat broadcast messages in PM period */ +#define IW_POWER_MODIFIER 0x000F /* Modify a parameter */ +#define IW_POWER_MIN 0x0001 /* Value is a minimum */ +#define IW_POWER_MAX 0x0002 /* Value is a maximum */ +#define IW_POWER_RELATIVE 0x0004 /* Value is not in seconds/ms/us */ + +/* Transmit Power flags available */ +#define IW_TXPOW_TYPE 0x00FF /* Type of value */ +#define IW_TXPOW_DBM 0x0000 /* Value is in dBm */ +#define IW_TXPOW_MWATT 0x0001 /* Value is in mW */ +#define IW_TXPOW_RELATIVE 0x0002 /* Value is in arbitrary units */ +#define IW_TXPOW_RANGE 0x1000 /* Range of value between min/max */ + +/* Retry limits and lifetime flags available */ +#define IW_RETRY_ON 0x0000 /* No details... */ +#define IW_RETRY_TYPE 0xF000 /* Type of parameter */ +#define IW_RETRY_LIMIT 0x1000 /* Maximum number of retries*/ +#define IW_RETRY_LIFETIME 0x2000 /* Maximum duration of retries in us */ +#define IW_RETRY_MODIFIER 0x00FF /* Modify a parameter */ +#define IW_RETRY_MIN 0x0001 /* Value is a minimum */ +#define IW_RETRY_MAX 0x0002 /* Value is a maximum */ +#define IW_RETRY_RELATIVE 0x0004 /* Value is not in seconds/ms/us */ +#define IW_RETRY_SHORT 0x0010 /* Value is for short packets */ +#define IW_RETRY_LONG 0x0020 /* Value is for long packets */ + +/* Scanning request flags */ +#define IW_SCAN_DEFAULT 0x0000 /* Default scan of the driver */ +#define IW_SCAN_ALL_ESSID 0x0001 /* Scan all ESSIDs */ +#define IW_SCAN_THIS_ESSID 0x0002 /* Scan only this ESSID */ +#define IW_SCAN_ALL_FREQ 0x0004 /* Scan all Frequencies */ +#define IW_SCAN_THIS_FREQ 0x0008 /* Scan only this Frequency */ +#define IW_SCAN_ALL_MODE 0x0010 /* Scan all Modes */ +#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */ +#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */ +#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */ +/* struct iw_scan_req scan_type */ +#define IW_SCAN_TYPE_ACTIVE 0 +#define IW_SCAN_TYPE_PASSIVE 1 +/* Maximum size of returned data */ +#define IW_SCAN_MAX_DATA 4096 /* In bytes */ + +/* Scan capability flags - in (struct iw_range *)->scan_capa */ +#define IW_SCAN_CAPA_NONE 0x00 +#define IW_SCAN_CAPA_ESSID 0x01 +#define IW_SCAN_CAPA_BSSID 0x02 +#define IW_SCAN_CAPA_CHANNEL 0x04 +#define IW_SCAN_CAPA_MODE 0x08 +#define IW_SCAN_CAPA_RATE 0x10 +#define IW_SCAN_CAPA_TYPE 0x20 +#define IW_SCAN_CAPA_TIME 0x40 + +/* Max number of char in custom event - use multiple of them if needed */ +#define IW_CUSTOM_MAX 256 /* In bytes */ + +/* Generic information element */ +#define IW_GENERIC_IE_MAX 1024 + +/* MLME requests (SIOCSIWMLME / struct iw_mlme) */ +#define IW_MLME_DEAUTH 0 +#define IW_MLME_DISASSOC 1 +#define IW_MLME_AUTH 2 +#define IW_MLME_ASSOC 3 + +/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */ +#define IW_AUTH_INDEX 0x0FFF +#define IW_AUTH_FLAGS 0xF000 +/* SIOCSIWAUTH/SIOCGIWAUTH parameters (0 .. 4095) + * (IW_AUTH_INDEX mask in struct iw_param flags; this is the index of the + * parameter that is being set/get to; value will be read/written to + * struct iw_param value field) */ +#define IW_AUTH_WPA_VERSION 0 +#define IW_AUTH_CIPHER_PAIRWISE 1 +#define IW_AUTH_CIPHER_GROUP 2 +#define IW_AUTH_KEY_MGMT 3 +#define IW_AUTH_TKIP_COUNTERMEASURES 4 +#define IW_AUTH_DROP_UNENCRYPTED 5 +#define IW_AUTH_80211_AUTH_ALG 6 +#define IW_AUTH_WPA_ENABLED 7 +#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8 +#define IW_AUTH_ROAMING_CONTROL 9 +#define IW_AUTH_PRIVACY_INVOKED 10 +#define IW_AUTH_CIPHER_GROUP_MGMT 11 +#define IW_AUTH_MFP 12 + +/* IW_AUTH_WPA_VERSION values (bit field) */ +#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001 +#define IW_AUTH_WPA_VERSION_WPA 0x00000002 +#define IW_AUTH_WPA_VERSION_WPA2 0x00000004 + +/* IW_AUTH_PAIRWISE_CIPHER, IW_AUTH_GROUP_CIPHER, and IW_AUTH_CIPHER_GROUP_MGMT + * values (bit field) */ +#define IW_AUTH_CIPHER_NONE 0x00000001 +#define IW_AUTH_CIPHER_WEP40 0x00000002 +#define IW_AUTH_CIPHER_TKIP 0x00000004 +#define IW_AUTH_CIPHER_CCMP 0x00000008 +#define IW_AUTH_CIPHER_WEP104 0x00000010 +#define IW_AUTH_CIPHER_AES_CMAC 0x00000020 + +/* IW_AUTH_KEY_MGMT values (bit field) */ +#define IW_AUTH_KEY_MGMT_802_1X 1 +#define IW_AUTH_KEY_MGMT_PSK 2 + +/* IW_AUTH_80211_AUTH_ALG values (bit field) */ +#define IW_AUTH_ALG_OPEN_SYSTEM 0x00000001 +#define IW_AUTH_ALG_SHARED_KEY 0x00000002 +#define IW_AUTH_ALG_LEAP 0x00000004 + +/* IW_AUTH_ROAMING_CONTROL values */ +#define IW_AUTH_ROAMING_ENABLE 0 /* driver/firmware based roaming */ +#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming + * control */ + +/* IW_AUTH_MFP (management frame protection) values */ +#define IW_AUTH_MFP_DISABLED 0 /* MFP disabled */ +#define IW_AUTH_MFP_OPTIONAL 1 /* MFP optional */ +#define IW_AUTH_MFP_REQUIRED 2 /* MFP required */ + +/* SIOCSIWENCODEEXT definitions */ +#define IW_ENCODE_SEQ_MAX_SIZE 8 +/* struct iw_encode_ext ->alg */ +#define IW_ENCODE_ALG_NONE 0 +#define IW_ENCODE_ALG_WEP 1 +#define IW_ENCODE_ALG_TKIP 2 +#define IW_ENCODE_ALG_CCMP 3 +#define IW_ENCODE_ALG_PMK 4 +#define IW_ENCODE_ALG_AES_CMAC 5 +/* struct iw_encode_ext ->ext_flags */ +#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 +#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 +#define IW_ENCODE_EXT_GROUP_KEY 0x00000004 +#define IW_ENCODE_EXT_SET_TX_KEY 0x00000008 + +/* IWEVMICHAELMICFAILURE : struct iw_michaelmicfailure ->flags */ +#define IW_MICFAILURE_KEY_ID 0x00000003 /* Key ID 0..3 */ +#define IW_MICFAILURE_GROUP 0x00000004 +#define IW_MICFAILURE_PAIRWISE 0x00000008 +#define IW_MICFAILURE_STAKEY 0x00000010 +#define IW_MICFAILURE_COUNT 0x00000060 /* 1 or 2 (0 = count not supported) + */ + +/* Bit field values for enc_capa in struct iw_range */ +#define IW_ENC_CAPA_WPA 0x00000001 +#define IW_ENC_CAPA_WPA2 0x00000002 +#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004 +#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008 +#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 + +/* Event capability macros - in (struct iw_range *)->event_capa + * Because we have more than 32 possible events, we use an array of + * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */ +#define IW_EVENT_CAPA_BASE(cmd) ((cmd >= SIOCIWFIRSTPRIV) ? \ + (cmd - SIOCIWFIRSTPRIV + 0x60) : \ + (cmd - SIOCIWFIRST)) +#define IW_EVENT_CAPA_INDEX(cmd) (IW_EVENT_CAPA_BASE(cmd) >> 5) +#define IW_EVENT_CAPA_MASK(cmd) (1 << (IW_EVENT_CAPA_BASE(cmd) & 0x1F)) +/* Event capability constants - event autogenerated by the kernel + * This list is valid for most 802.11 devices, customise as needed... */ +#define IW_EVENT_CAPA_K_0 (IW_EVENT_CAPA_MASK(0x8B04) | \ + IW_EVENT_CAPA_MASK(0x8B06) | \ + IW_EVENT_CAPA_MASK(0x8B1A)) +#define IW_EVENT_CAPA_K_1 (IW_EVENT_CAPA_MASK(0x8B2A)) +/* "Easy" macro to set events in iw_range (less efficient) */ +#define IW_EVENT_CAPA_SET(event_capa, cmd) (event_capa[IW_EVENT_CAPA_INDEX(cmd)] |= IW_EVENT_CAPA_MASK(cmd)) +#define IW_EVENT_CAPA_SET_KERNEL(event_capa) {event_capa[0] |= IW_EVENT_CAPA_K_0; event_capa[1] |= IW_EVENT_CAPA_K_1; } + + +/****************************** TYPES ******************************/ + +/* --------------------------- SUBTYPES --------------------------- */ +/* + * Generic format for most parameters that fit in an int + */ +struct iw_param +{ + __s32 value; /* The value of the parameter itself */ + __u8 fixed; /* Hardware should not use auto select */ + __u8 disabled; /* Disable the feature */ + __u16 flags; /* Various specifc flags (if any) */ +}; + +/* + * For all data larger than 16 octets, we need to use a + * pointer to memory allocated in user space. + */ +struct iw_point +{ + void __user *pointer; /* Pointer to the data (in user space) */ + __u16 length; /* number of fields or size in bytes */ + __u16 flags; /* Optional params */ +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +#include <linux/compat.h> + +struct compat_iw_point { + compat_caddr_t pointer; + __u16 length; + __u16 flags; +}; +#endif +#endif + +/* + * A frequency + * For numbers lower than 10^9, we encode the number in 'm' and + * set 'e' to 0 + * For number greater than 10^9, we divide it by the lowest power + * of 10 to get 'm' lower than 10^9, with 'm'= f / (10^'e')... + * The power of 10 is in 'e', the result of the division is in 'm'. + */ +struct iw_freq +{ + __s32 m; /* Mantissa */ + __s16 e; /* Exponent */ + __u8 i; /* List index (when in range struct) */ + __u8 flags; /* Flags (fixed/auto) */ +}; + +/* + * Quality of the link + */ +struct iw_quality +{ + __u8 qual; /* link quality (%retries, SNR, + %missed beacons or better...) */ + __u8 level; /* signal level (dBm) */ + __u8 noise; /* noise level (dBm) */ + __u8 updated; /* Flags to know if updated */ +}; + +/* + * Packet discarded in the wireless adapter due to + * "wireless" specific problems... + * Note : the list of counter and statistics in net_device_stats + * is already pretty exhaustive, and you should use that first. + * This is only additional stats... + */ +struct iw_discarded +{ + __u32 nwid; /* Rx : Wrong nwid/essid */ + __u32 code; /* Rx : Unable to code/decode (WEP) */ + __u32 fragment; /* Rx : Can't perform MAC reassembly */ + __u32 retries; /* Tx : Max MAC retries num reached */ + __u32 misc; /* Others cases */ +}; + +/* + * Packet/Time period missed in the wireless adapter due to + * "wireless" specific problems... + */ +struct iw_missed +{ + __u32 beacon; /* Missed beacons/superframe */ +}; + +/* + * Quality range (for spy threshold) + */ +struct iw_thrspy +{ + struct sockaddr addr; /* Source address (hw/mac) */ + struct iw_quality qual; /* Quality of the link */ + struct iw_quality low; /* Low threshold */ + struct iw_quality high; /* High threshold */ +}; + +/* + * Optional data for scan request + * + * Note: these optional parameters are controlling parameters for the + * scanning behavior, these do not apply to getting scan results + * (SIOCGIWSCAN). Drivers are expected to keep a local BSS table and + * provide a merged results with all BSSes even if the previous scan + * request limited scanning to a subset, e.g., by specifying an SSID. + * Especially, scan results are required to include an entry for the + * current BSS if the driver is in Managed mode and associated with an AP. + */ +struct iw_scan_req +{ + __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */ + __u8 essid_len; + __u8 num_channels; /* num entries in channel_list; + * 0 = scan all allowed channels */ + __u8 flags; /* reserved as padding; use zero, this may + * be used in the future for adding flags + * to request different scan behavior */ + struct sockaddr bssid; /* ff:ff:ff:ff:ff:ff for broadcast BSSID or + * individual address of a specific BSS */ + + /* + * Use this ESSID if IW_SCAN_THIS_ESSID flag is used instead of using + * the current ESSID. This allows scan requests for specific ESSID + * without having to change the current ESSID and potentially breaking + * the current association. + */ + __u8 essid[IW_ESSID_MAX_SIZE]; + + /* + * Optional parameters for changing the default scanning behavior. + * These are based on the MLME-SCAN.request from IEEE Std 802.11. + * TU is 1.024 ms. If these are set to 0, driver is expected to use + * reasonable default values. min_channel_time defines the time that + * will be used to wait for the first reply on each channel. If no + * replies are received, next channel will be scanned after this. If + * replies are received, total time waited on the channel is defined by + * max_channel_time. + */ + __u32 min_channel_time; /* in TU */ + __u32 max_channel_time; /* in TU */ + + struct iw_freq channel_list[IW_MAX_FREQUENCIES]; +}; + +/* ------------------------- WPA SUPPORT ------------------------- */ + +/* + * Extended data structure for get/set encoding (this is used with + * SIOCSIWENCODEEXT/SIOCGIWENCODEEXT. struct iw_point and IW_ENCODE_* + * flags are used in the same way as with SIOCSIWENCODE/SIOCGIWENCODE and + * only the data contents changes (key data -> this structure, including + * key data). + * + * If the new key is the first group key, it will be set as the default + * TX key. Otherwise, default TX key index is only changed if + * IW_ENCODE_EXT_SET_TX_KEY flag is set. + * + * Key will be changed with SIOCSIWENCODEEXT in all cases except for + * special "change TX key index" operation which is indicated by setting + * key_len = 0 and ext_flags |= IW_ENCODE_EXT_SET_TX_KEY. + * + * tx_seq/rx_seq are only used when respective + * IW_ENCODE_EXT_{TX,RX}_SEQ_VALID flag is set in ext_flags. Normal + * TKIP/CCMP operation is to set RX seq with SIOCSIWENCODEEXT and start + * TX seq from zero whenever key is changed. SIOCGIWENCODEEXT is normally + * used only by an Authenticator (AP or an IBSS station) to get the + * current TX sequence number. Using TX_SEQ_VALID for SIOCSIWENCODEEXT and + * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for + * debugging/testing. + */ +struct iw_encode_ext +{ + __u32 ext_flags; /* IW_ENCODE_EXT_* */ + __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast + * (group) keys or unicast address for + * individual keys */ + __u16 alg; /* IW_ENCODE_ALG_* */ + __u16 key_len; + __u8 key[0]; +}; + +/* SIOCSIWMLME data */ +struct iw_mlme +{ + __u16 cmd; /* IW_MLME_* */ + __u16 reason_code; + struct sockaddr addr; +}; + +/* SIOCSIWPMKSA data */ +#define IW_PMKSA_ADD 1 +#define IW_PMKSA_REMOVE 2 +#define IW_PMKSA_FLUSH 3 + +#define IW_PMKID_LEN 16 + +struct iw_pmksa +{ + __u32 cmd; /* IW_PMKSA_* */ + struct sockaddr bssid; + __u8 pmkid[IW_PMKID_LEN]; +}; + +/* IWEVMICHAELMICFAILURE data */ +struct iw_michaelmicfailure +{ + __u32 flags; + struct sockaddr src_addr; + __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ +}; + +/* IWEVPMKIDCAND data */ +#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */ +struct iw_pmkid_cand +{ + __u32 flags; /* IW_PMKID_CAND_* */ + __u32 index; /* the smaller the index, the higher the + * priority */ + struct sockaddr bssid; +}; + +/* ------------------------ WIRELESS STATS ------------------------ */ +/* + * Wireless statistics (used for /proc/net/wireless) + */ +struct iw_statistics +{ + __u16 status; /* Status + * - device dependent for now */ + + struct iw_quality qual; /* Quality of the link + * (instant/mean/max) */ + struct iw_discarded discard; /* Packet discarded counts */ + struct iw_missed miss; /* Packet missed counts */ +}; + +/* ------------------------ IOCTL REQUEST ------------------------ */ +/* + * This structure defines the payload of an ioctl, and is used + * below. + * + * Note that this structure should fit on the memory footprint + * of iwreq (which is the same as ifreq), which mean a max size of + * 16 octets = 128 bits. Warning, pointers might be 64 bits wide... + * You should check this when increasing the structures defined + * above in this file... + */ +union iwreq_data +{ + /* Config - generic */ + char name[IFNAMSIZ]; + /* Name : used to verify the presence of wireless extensions. + * Name of the protocol/provider... */ + + struct iw_point essid; /* Extended network name */ + struct iw_param nwid; /* network id (or domain - the cell) */ + struct iw_freq freq; /* frequency or channel : + * 0-1000 = channel + * > 1000 = frequency in Hz */ + + struct iw_param sens; /* signal level threshold */ + struct iw_param bitrate; /* default bit rate */ + struct iw_param txpower; /* default transmit power */ + struct iw_param rts; /* RTS threshold threshold */ + struct iw_param frag; /* Fragmentation threshold */ + __u32 mode; /* Operation mode */ + struct iw_param retry; /* Retry limits & lifetime */ + + struct iw_point encoding; /* Encoding stuff : tokens */ + struct iw_param power; /* PM duration/timeout */ + struct iw_quality qual; /* Quality part of statistics */ + + struct sockaddr ap_addr; /* Access point address */ + struct sockaddr addr; /* Destination address (hw/mac) */ + + struct iw_param param; /* Other small parameters */ + struct iw_point data; /* Other large parameters */ +}; + +/* + * The structure to exchange data for ioctl. + * This structure is the same as 'struct ifreq', but (re)defined for + * convenience... + * Do I need to remind you about structure size (32 octets) ? + */ +struct iwreq +{ + union + { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */ + } ifr_ifrn; + + /* Data part (defined just above) */ + union iwreq_data u; +}; + +/* -------------------------- IOCTL DATA -------------------------- */ +/* + * For those ioctl which want to exchange mode data that what could + * fit in the above structure... + */ + +/* + * Range of parameters + */ + +struct iw_range +{ + /* Informative stuff (to choose between different interface) */ + __u32 throughput; /* To give an idea... */ + /* In theory this value should be the maximum benchmarked + * TCP/IP throughput, because with most of these devices the + * bit rate is meaningless (overhead an co) to estimate how + * fast the connection will go and pick the fastest one. + * I suggest people to play with Netperf or any benchmark... + */ + + /* NWID (or domain id) */ + __u32 min_nwid; /* Minimal NWID we are able to set */ + __u32 max_nwid; /* Maximal NWID we are able to set */ + + /* Old Frequency (backward compat - moved lower ) */ + __u16 old_num_channels; + __u8 old_num_frequency; + + /* Scan capabilities */ + __u8 scan_capa; /* IW_SCAN_CAPA_* bit field */ + + /* Wireless event capability bitmasks */ + __u32 event_capa[6]; + + /* signal level threshold range */ + __s32 sensitivity; + + /* Quality of link & SNR stuff */ + /* Quality range (link, level, noise) + * If the quality is absolute, it will be in the range [0 ; max_qual], + * if the quality is dBm, it will be in the range [max_qual ; 0]. + * Don't forget that we use 8 bit arithmetics... */ + struct iw_quality max_qual; /* Quality of the link */ + /* This should contain the average/typical values of the quality + * indicator. This should be the threshold between a "good" and + * a "bad" link (example : monitor going from green to orange). + * Currently, user space apps like quality monitors don't have any + * way to calibrate the measurement. With this, they can split + * the range between 0 and max_qual in different quality level + * (using a geometric subdivision centered on the average). + * I expect that people doing the user space apps will feedback + * us on which value we need to put in each driver... */ + struct iw_quality avg_qual; /* Quality of the link */ + + /* Rates */ + __u8 num_bitrates; /* Number of entries in the list */ + __s32 bitrate[IW_MAX_BITRATES]; /* list, in bps */ + + /* RTS threshold */ + __s32 min_rts; /* Minimal RTS threshold */ + __s32 max_rts; /* Maximal RTS threshold */ + + /* Frag threshold */ + __s32 min_frag; /* Minimal frag threshold */ + __s32 max_frag; /* Maximal frag threshold */ + + /* Power Management duration & timeout */ + __s32 min_pmp; /* Minimal PM period */ + __s32 max_pmp; /* Maximal PM period */ + __s32 min_pmt; /* Minimal PM timeout */ + __s32 max_pmt; /* Maximal PM timeout */ + __u16 pmp_flags; /* How to decode max/min PM period */ + __u16 pmt_flags; /* How to decode max/min PM timeout */ + __u16 pm_capa; /* What PM options are supported */ + + /* Encoder stuff */ + __u16 encoding_size[IW_MAX_ENCODING_SIZES]; /* Different token sizes */ + __u8 num_encoding_sizes; /* Number of entry in the list */ + __u8 max_encoding_tokens; /* Max number of tokens */ + /* For drivers that need a "login/passwd" form */ + __u8 encoding_login_index; /* token index for login token */ + + /* Transmit power */ + __u16 txpower_capa; /* What options are supported */ + __u8 num_txpower; /* Number of entries in the list */ + __s32 txpower[IW_MAX_TXPOWER]; /* list, in bps */ + + /* Wireless Extension version info */ + __u8 we_version_compiled; /* Must be WIRELESS_EXT */ + __u8 we_version_source; /* Last update of source */ + + /* Retry limits and lifetime */ + __u16 retry_capa; /* What retry options are supported */ + __u16 retry_flags; /* How to decode max/min retry limit */ + __u16 r_time_flags; /* How to decode max/min retry life */ + __s32 min_retry; /* Minimal number of retries */ + __s32 max_retry; /* Maximal number of retries */ + __s32 min_r_time; /* Minimal retry lifetime */ + __s32 max_r_time; /* Maximal retry lifetime */ + + /* Frequency */ + __u16 num_channels; /* Number of channels [0; num - 1] */ + __u8 num_frequency; /* Number of entry in the list */ + struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */ + /* Note : this frequency list doesn't need to fit channel numbers, + * because each entry contain its channel index */ + + __u32 enc_capa; /* IW_ENC_CAPA_* bit field */ +}; + +/* + * Private ioctl interface information + */ + +struct iw_priv_args +{ + __u32 cmd; /* Number of the ioctl to issue */ + __u16 set_args; /* Type and number of args */ + __u16 get_args; /* Type and number of args */ + char name[IFNAMSIZ]; /* Name of the extension */ +}; + +/* ----------------------- WIRELESS EVENTS ----------------------- */ +/* + * Wireless events are carried through the rtnetlink socket to user + * space. They are encapsulated in the IFLA_WIRELESS field of + * a RTM_NEWLINK message. + */ + +/* + * A Wireless Event. Contains basically the same data as the ioctl... + */ +struct iw_event +{ + __u16 len; /* Real length of this stuff */ + __u16 cmd; /* Wireless IOCTL */ + union iwreq_data u; /* IOCTL fixed payload */ +}; + +/* Size of the Event prefix (including padding and alignement junk) */ +#define IW_EV_LCP_LEN (sizeof(struct iw_event) - sizeof(union iwreq_data)) +/* Size of the various events */ +#define IW_EV_CHAR_LEN (IW_EV_LCP_LEN + IFNAMSIZ) +#define IW_EV_UINT_LEN (IW_EV_LCP_LEN + sizeof(__u32)) +#define IW_EV_FREQ_LEN (IW_EV_LCP_LEN + sizeof(struct iw_freq)) +#define IW_EV_PARAM_LEN (IW_EV_LCP_LEN + sizeof(struct iw_param)) +#define IW_EV_ADDR_LEN (IW_EV_LCP_LEN + sizeof(struct sockaddr)) +#define IW_EV_QUAL_LEN (IW_EV_LCP_LEN + sizeof(struct iw_quality)) + +/* iw_point events are special. First, the payload (extra data) come at + * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, + * we omit the pointer, so start at an offset. */ +#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ + (char *) NULL) +#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ + IW_EV_POINT_OFF) + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +struct __compat_iw_event { + __u16 len; /* Real length of this stuff */ + __u16 cmd; /* Wireless IOCTL */ + compat_caddr_t pointer; +}; +#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer) +#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length) + +/* Size of the various events for compat */ +#define IW_EV_COMPAT_CHAR_LEN (IW_EV_COMPAT_LCP_LEN + IFNAMSIZ) +#define IW_EV_COMPAT_UINT_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(__u32)) +#define IW_EV_COMPAT_FREQ_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_freq)) +#define IW_EV_COMPAT_PARAM_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_param)) +#define IW_EV_COMPAT_ADDR_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct sockaddr)) +#define IW_EV_COMPAT_QUAL_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_quality)) +#define IW_EV_COMPAT_POINT_LEN \ + (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ + IW_EV_COMPAT_POINT_OFF) +#endif +#endif + +/* Size of the Event prefix when packed in stream */ +#define IW_EV_LCP_PK_LEN (4) +/* Size of the various events when packed in stream */ +#define IW_EV_CHAR_PK_LEN (IW_EV_LCP_PK_LEN + IFNAMSIZ) +#define IW_EV_UINT_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(__u32)) +#define IW_EV_FREQ_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_freq)) +#define IW_EV_PARAM_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_param)) +#define IW_EV_ADDR_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct sockaddr)) +#define IW_EV_QUAL_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_quality)) +#define IW_EV_POINT_PK_LEN (IW_EV_LCP_PK_LEN + 4) + +#endif /* _LINUX_WIRELESS_H */ diff --git a/include/linux/wl127x-rfkill.h b/include/linux/wl127x-rfkill.h new file mode 100644 index 00000000..9057ec63 --- /dev/null +++ b/include/linux/wl127x-rfkill.h @@ -0,0 +1,35 @@ +/* + * Bluetooth TI wl127x rfkill power control via GPIO + * + * Copyright (C) 2009 Motorola, Inc. + * Copyright (C) 2008 Texas Instruments + * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _LINUX_WL127X_RFKILL_H +#define _LINUX_WL127X_RFKILL_H + +#include <linux/rfkill.h> + +struct wl127x_rfkill_platform_data { + int nshutdown_gpio; + + struct rfkill *rfkill; /* for driver only */ +}; + +#endif diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h new file mode 100644 index 00000000..0d637319 --- /dev/null +++ b/include/linux/wl12xx.h @@ -0,0 +1,81 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho <luciano.coelho@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef _LINUX_WL12XX_H +#define _LINUX_WL12XX_H + +/* Reference clock values */ +enum { + WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */ + WL12XX_REFCLOCK_26 = 1, /* 26 MHz */ + WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */ + WL12XX_REFCLOCK_52 = 3, /* 52 MHz */ + WL12XX_REFCLOCK_38_XTAL = 4, /* 38.4 MHz, XTAL */ + WL12XX_REFCLOCK_26_XTAL = 5, /* 26 MHz, XTAL */ +}; + +/* TCXO clock values */ +enum { + WL12XX_TCXOCLOCK_19_2 = 0, /* 19.2MHz */ + WL12XX_TCXOCLOCK_26 = 1, /* 26 MHz */ + WL12XX_TCXOCLOCK_38_4 = 2, /* 38.4MHz */ + WL12XX_TCXOCLOCK_52 = 3, /* 52 MHz */ + WL12XX_TCXOCLOCK_16_368 = 4, /* 16.368 MHz */ + WL12XX_TCXOCLOCK_32_736 = 5, /* 32.736 MHz */ + WL12XX_TCXOCLOCK_16_8 = 6, /* 16.8 MHz */ + WL12XX_TCXOCLOCK_33_6 = 7, /* 33.6 MHz */ +}; + +struct wl12xx_platform_data { + void (*set_power)(bool enable); + /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ + int irq; + bool use_eeprom; + int board_ref_clock; + int board_tcxo_clock; + unsigned long platform_quirks; + bool pwr_in_suspend; + + struct wl1271_if_operations *ops; +}; + +/* Platform does not support level trigger interrupts */ +#define WL12XX_PLATFORM_QUIRK_EDGE_IRQ BIT(0) + +#ifdef CONFIG_WL12XX_PLATFORM_DATA + +int wl12xx_set_platform_data(const struct wl12xx_platform_data *data); + +#else + +static inline +int wl12xx_set_platform_data(const struct wl12xx_platform_data *data) +{ + return -ENOSYS; +} + +#endif + +struct wl12xx_platform_data *wl12xx_get_platform_data(void); + +#endif diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h new file mode 100644 index 00000000..40ec3482 --- /dev/null +++ b/include/linux/wlan_plat.h @@ -0,0 +1,27 @@ +/* include/linux/wlan_plat.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _LINUX_WLAN_PLAT_H_ +#define _LINUX_WLAN_PLAT_H_ + +struct wifi_platform_data { + int (*set_power)(int val); + int (*set_reset)(int val); + int (*set_carddetect)(int val); + void *(*mem_prealloc)(int section, unsigned long size); + int (*get_mac_addr)(unsigned char *buf); + void *(*get_country_code)(char *ccode); +}; + +#endif diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h new file mode 100644 index 00000000..fd98bb96 --- /dev/null +++ b/include/linux/wm97xx.h @@ -0,0 +1,337 @@ + +/* + * Register bits and API for Wolfson WM97xx series of codecs + */ + +#ifndef _LINUX_WM97XX_H +#define _LINUX_WM97XX_H + +#include <sound/core.h> +#include <sound/pcm.h> +#include <sound/ac97_codec.h> +#include <sound/initval.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/input.h> /* Input device layer */ +#include <linux/platform_device.h> + +/* + * WM97xx variants + */ +#define WM97xx_GENERIC 0x0000 +#define WM97xx_WM1613 0x1613 + +/* + * WM97xx AC97 Touchscreen registers + */ +#define AC97_WM97XX_DIGITISER1 0x76 +#define AC97_WM97XX_DIGITISER2 0x78 +#define AC97_WM97XX_DIGITISER_RD 0x7a +#define AC97_WM9713_DIG1 0x74 +#define AC97_WM9713_DIG2 AC97_WM97XX_DIGITISER1 +#define AC97_WM9713_DIG3 AC97_WM97XX_DIGITISER2 + +/* + * WM97xx register bits + */ +#define WM97XX_POLL 0x8000 /* initiate a polling measurement */ +#define WM97XX_ADCSEL_X 0x1000 /* x coord measurement */ +#define WM97XX_ADCSEL_Y 0x2000 /* y coord measurement */ +#define WM97XX_ADCSEL_PRES 0x3000 /* pressure measurement */ +#define WM97XX_AUX_ID1 0x4000 +#define WM97XX_AUX_ID2 0x5000 +#define WM97XX_AUX_ID3 0x6000 +#define WM97XX_AUX_ID4 0x7000 +#define WM97XX_ADCSEL_MASK 0x7000 /* ADC selection mask */ +#define WM97XX_COO 0x0800 /* enable coordinate mode */ +#define WM97XX_CTC 0x0400 /* enable continuous mode */ +#define WM97XX_CM_RATE_93 0x0000 /* 93.75Hz continuous rate */ +#define WM97XX_CM_RATE_187 0x0100 /* 187.5Hz continuous rate */ +#define WM97XX_CM_RATE_375 0x0200 /* 375Hz continuous rate */ +#define WM97XX_CM_RATE_750 0x0300 /* 750Hz continuous rate */ +#define WM97XX_CM_RATE_8K 0x00f0 /* 8kHz continuous rate */ +#define WM97XX_CM_RATE_12K 0x01f0 /* 12kHz continuous rate */ +#define WM97XX_CM_RATE_24K 0x02f0 /* 24kHz continuous rate */ +#define WM97XX_CM_RATE_48K 0x03f0 /* 48kHz continuous rate */ +#define WM97XX_CM_RATE_MASK 0x03f0 +#define WM97XX_RATE(i) (((i & 3) << 8) | ((i & 4) ? 0xf0 : 0)) +#define WM97XX_DELAY(i) ((i << 4) & 0x00f0) /* sample delay times */ +#define WM97XX_DELAY_MASK 0x00f0 +#define WM97XX_SLEN 0x0008 /* slot read back enable */ +#define WM97XX_SLT(i) ((i - 5) & 0x7) /* panel slot (5-11) */ +#define WM97XX_SLT_MASK 0x0007 +#define WM97XX_PRP_DETW 0x4000 /* detect on, digitise off, wake */ +#define WM97XX_PRP_DET 0x8000 /* detect on, digitise off, no wake */ +#define WM97XX_PRP_DET_DIG 0xc000 /* setect on, digitise on */ +#define WM97XX_RPR 0x2000 /* wake up on pen down */ +#define WM97XX_PEN_DOWN 0x8000 /* pen is down */ + +/* WM9712 Bits */ +#define WM9712_45W 0x1000 /* set for 5-wire touchscreen */ +#define WM9712_PDEN 0x0800 /* measure only when pen down */ +#define WM9712_WAIT 0x0200 /* wait until adc is read before next sample */ +#define WM9712_PIL 0x0100 /* current used for pressure measurement. set 400uA else 200uA */ +#define WM9712_MASK_HI 0x0040 /* hi on mask pin (47) stops conversions */ +#define WM9712_MASK_EDGE 0x0080 /* rising/falling edge on pin delays sample */ +#define WM9712_MASK_SYNC 0x00c0 /* rising/falling edge on mask initiates sample */ +#define WM9712_RPU(i) (i&0x3f) /* internal pull up on pen detect (64k / rpu) */ +#define WM9712_PD(i) (0x1 << i) /* power management */ + +/* WM9712 Registers */ +#define AC97_WM9712_POWER 0x24 +#define AC97_WM9712_REV 0x58 + +/* WM9705 Bits */ +#define WM9705_PDEN 0x1000 /* measure only when pen is down */ +#define WM9705_PINV 0x0800 /* inverts sense of pen down output */ +#define WM9705_BSEN 0x0400 /* BUSY flag enable, pin47 is 1 when busy */ +#define WM9705_BINV 0x0200 /* invert BUSY (pin47) output */ +#define WM9705_WAIT 0x0100 /* wait until adc is read before next sample */ +#define WM9705_PIL 0x0080 /* current used for pressure measurement. set 400uA else 200uA */ +#define WM9705_PHIZ 0x0040 /* set PHONE and PCBEEP inputs to high impedance */ +#define WM9705_MASK_HI 0x0010 /* hi on mask stops conversions */ +#define WM9705_MASK_EDGE 0x0020 /* rising/falling edge on pin delays sample */ +#define WM9705_MASK_SYNC 0x0030 /* rising/falling edge on mask initiates sample */ +#define WM9705_PDD(i) (i & 0x000f) /* pen detect comparator threshold */ + + +/* WM9713 Bits */ +#define WM9713_PDPOL 0x0400 /* Pen down polarity */ +#define WM9713_POLL 0x0200 /* initiate a polling measurement */ +#define WM9713_CTC 0x0100 /* enable continuous mode */ +#define WM9713_ADCSEL_X 0x0002 /* X measurement */ +#define WM9713_ADCSEL_Y 0x0004 /* Y measurement */ +#define WM9713_ADCSEL_PRES 0x0008 /* Pressure measurement */ +#define WM9713_COO 0x0001 /* enable coordinate mode */ +#define WM9713_45W 0x1000 /* set for 5 wire panel */ +#define WM9713_PDEN 0x0800 /* measure only when pen down */ +#define WM9713_ADCSEL_MASK 0x00fe /* ADC selection mask */ +#define WM9713_WAIT 0x0200 /* coordinate wait */ + +/* AUX ADC ID's */ +#define TS_COMP1 0x0 +#define TS_COMP2 0x1 +#define TS_BMON 0x2 +#define TS_WIPER 0x3 + +/* ID numbers */ +#define WM97XX_ID1 0x574d +#define WM9712_ID2 0x4c12 +#define WM9705_ID2 0x4c05 +#define WM9713_ID2 0x4c13 + +/* Codec GPIO's */ +#define WM97XX_MAX_GPIO 16 +#define WM97XX_GPIO_1 (1 << 1) +#define WM97XX_GPIO_2 (1 << 2) +#define WM97XX_GPIO_3 (1 << 3) +#define WM97XX_GPIO_4 (1 << 4) +#define WM97XX_GPIO_5 (1 << 5) +#define WM97XX_GPIO_6 (1 << 6) +#define WM97XX_GPIO_7 (1 << 7) +#define WM97XX_GPIO_8 (1 << 8) +#define WM97XX_GPIO_9 (1 << 9) +#define WM97XX_GPIO_10 (1 << 10) +#define WM97XX_GPIO_11 (1 << 11) +#define WM97XX_GPIO_12 (1 << 12) +#define WM97XX_GPIO_13 (1 << 13) +#define WM97XX_GPIO_14 (1 << 14) +#define WM97XX_GPIO_15 (1 << 15) + + +#define AC97_LINK_FRAME 21 /* time in uS for AC97 link frame */ + + +/*---------------- Return codes from sample reading functions ---------------*/ + +/* More data is available; call the sample gathering function again */ +#define RC_AGAIN 0x00000001 +/* The returned sample is valid */ +#define RC_VALID 0x00000002 +/* The pen is up (the first RC_VALID without RC_PENUP means pen is down) */ +#define RC_PENUP 0x00000004 +/* The pen is down (RC_VALID implies RC_PENDOWN, but sometimes it is helpful + to tell the handler that the pen is down but we don't know yet his coords, + so the handler should not sleep or wait for pendown irq) */ +#define RC_PENDOWN 0x00000008 + +/* + * The wm97xx driver provides a private API for writing platform-specific + * drivers. + */ + +/* The structure used to return arch specific sampled data into */ +struct wm97xx_data { + int x; + int y; + int p; +}; + +/* + * Codec GPIO status + */ +enum wm97xx_gpio_status { + WM97XX_GPIO_HIGH, + WM97XX_GPIO_LOW +}; + +/* + * Codec GPIO direction + */ +enum wm97xx_gpio_dir { + WM97XX_GPIO_IN, + WM97XX_GPIO_OUT +}; + +/* + * Codec GPIO polarity + */ +enum wm97xx_gpio_pol { + WM97XX_GPIO_POL_HIGH, + WM97XX_GPIO_POL_LOW +}; + +/* + * Codec GPIO sticky + */ +enum wm97xx_gpio_sticky { + WM97XX_GPIO_STICKY, + WM97XX_GPIO_NOTSTICKY +}; + +/* + * Codec GPIO wake + */ +enum wm97xx_gpio_wake { + WM97XX_GPIO_WAKE, + WM97XX_GPIO_NOWAKE +}; + +/* + * Digitiser ioctl commands + */ +#define WM97XX_DIG_START 0x1 +#define WM97XX_DIG_STOP 0x2 +#define WM97XX_PHY_INIT 0x3 +#define WM97XX_AUX_PREPARE 0x4 +#define WM97XX_DIG_RESTORE 0x5 + +struct wm97xx; + +extern struct wm97xx_codec_drv wm9705_codec; +extern struct wm97xx_codec_drv wm9712_codec; +extern struct wm97xx_codec_drv wm9713_codec; + +/* + * Codec driver interface - allows mapping to WM9705/12/13 and newer codecs + */ +struct wm97xx_codec_drv { + u16 id; + char *name; + + /* read 1 sample */ + int (*poll_sample) (struct wm97xx *, int adcsel, int *sample); + + /* read X,Y,[P] in poll */ + int (*poll_touch) (struct wm97xx *, struct wm97xx_data *); + + int (*acc_enable) (struct wm97xx *, int enable); + void (*phy_init) (struct wm97xx *); + void (*dig_enable) (struct wm97xx *, int enable); + void (*dig_restore) (struct wm97xx *); + void (*aux_prepare) (struct wm97xx *); +}; + + +/* Machine specific and accelerated touch operations */ +struct wm97xx_mach_ops { + + /* accelerated touch readback - coords are transmited on AC97 link */ + int acc_enabled; + void (*acc_pen_up) (struct wm97xx *); + int (*acc_pen_down) (struct wm97xx *); + int (*acc_startup) (struct wm97xx *); + void (*acc_shutdown) (struct wm97xx *); + + /* interrupt mask control - required for accelerated operation */ + void (*irq_enable) (struct wm97xx *, int enable); + + /* GPIO pin used for accelerated operation */ + int irq_gpio; + + /* pre and post sample - can be used to minimise any analog noise */ + void (*pre_sample) (int); /* function to run before sampling */ + void (*post_sample) (int); /* function to run after sampling */ +}; + +struct wm97xx { + u16 dig[3], id, gpio[6], misc; /* Cached codec registers */ + u16 dig_save[3]; /* saved during aux reading */ + struct wm97xx_codec_drv *codec; /* attached codec driver*/ + struct input_dev *input_dev; /* touchscreen input device */ + struct snd_ac97 *ac97; /* ALSA codec access */ + struct device *dev; /* ALSA device */ + struct platform_device *battery_dev; + struct platform_device *touch_dev; + struct wm97xx_mach_ops *mach_ops; + struct mutex codec_mutex; + struct delayed_work ts_reader; /* Used to poll touchscreen */ + unsigned long ts_reader_interval; /* Current interval for timer */ + unsigned long ts_reader_min_interval; /* Minimum interval */ + unsigned int pen_irq; /* Pen IRQ number in use */ + struct workqueue_struct *ts_workq; + struct work_struct pen_event_work; + u16 acc_slot; /* AC97 slot used for acc touch data */ + u16 acc_rate; /* acc touch data rate */ + unsigned pen_is_down:1; /* Pen is down */ + unsigned aux_waiting:1; /* aux measurement waiting */ + unsigned pen_probably_down:1; /* used in polling mode */ + u16 variant; /* WM97xx chip variant */ + u16 suspend_mode; /* PRP in suspend mode */ +}; + +struct wm97xx_batt_pdata { + int batt_aux; + int temp_aux; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int temp_div; + int temp_mult; + int batt_tech; + char *batt_name; +}; + +struct wm97xx_pdata { + struct wm97xx_batt_pdata *batt_pdata; /* battery data */ +}; + +/* + * Codec GPIO access (not supported on WM9705) + * This can be used to set/get codec GPIO and Virtual GPIO status. + */ +enum wm97xx_gpio_status wm97xx_get_gpio(struct wm97xx *wm, u32 gpio); +void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio, + enum wm97xx_gpio_status status); +void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio, + enum wm97xx_gpio_dir dir, + enum wm97xx_gpio_pol pol, + enum wm97xx_gpio_sticky sticky, + enum wm97xx_gpio_wake wake); + +void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode); + +/* codec AC97 IO access */ +int wm97xx_reg_read(struct wm97xx *wm, u16 reg); +void wm97xx_reg_write(struct wm97xx *wm, u16 reg, u16 val); + +/* aux adc readback */ +int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel); + +/* machine ops */ +int wm97xx_register_mach_ops(struct wm97xx *, struct wm97xx_mach_ops *); +void wm97xx_unregister_mach_ops(struct wm97xx *); + +#endif diff --git a/include/linux/wmt-efuse-stub.h b/include/linux/wmt-efuse-stub.h new file mode 100755 index 00000000..d0111014 --- /dev/null +++ b/include/linux/wmt-efuse-stub.h @@ -0,0 +1,32 @@ +/*++ + * + * Copyright c 2014 WonderMedia Technologies, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * WonderMedia Technologies, Inc. + * 4F, 533, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C +--*/ + +#ifndef WMT_EFUSE_STUB_H +/* To assert that only one occurrence is included */ +#define WMT_EFUSE_STUB_H + +typedef int (*EFUSE_READ_OTP_STUB) (int type, unsigned char *rbuf, int rlen); + +extern EFUSE_READ_OTP_STUB wmt_efuse_read_otp; + +#endif /* ifndef WMT_EFUSE_STUB */ + +/*=== END wmt-efuse-stub.h ==========================================================*/ diff --git a/include/linux/wmt-mb.h b/include/linux/wmt-mb.h new file mode 100755 index 00000000..8c1a48d7 --- /dev/null +++ b/include/linux/wmt-mb.h @@ -0,0 +1,101 @@ +/*++ + * WonderMedia Memory Block driver + * + * Copyright c 2010 WonderMedia Technologies, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * WonderMedia Technologies, Inc. + * 10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C +--*/ +#ifndef MEMBLOCK_H +#define MEMBLOCK_H + +#include <linux/types.h> + +struct prdt_struct{ + unsigned int addr; + unsigned short size; + unsigned short reserve : 15; + unsigned short EDT : 1; +}__attribute__((packed)) ; + +struct mmu_table_info{ + unsigned int addr; + unsigned int size; + unsigned int offset; +}__attribute__((packed)) ; + +// 1 presents the task init that would never be released +#define MB_DEF_TGID 0 + +#ifdef CONFIG_WMT_MB_RESERVE_FROM_IO +#define mb_virt_to_phys(v) mb_do_virt_to_phys(v,MB_DEF_TGID,THE_MB_USER) +#define mb_phys_to_virt(p) mb_do_phys_to_virt(p,MB_DEF_TGID,THE_MB_USER) +#else +#define mb_virt_to_phys(v) virt_to_phys(v) +#define mb_phys_to_virt(p) phys_to_virt(p) +#endif +#define mb_user_to_virt(u) mb_do_user_to_virt(u,MB_DEF_TGID,THE_MB_USER) +#define mb_user_to_phys(u) mb_do_user_to_phys(u,MB_DEF_TGID,THE_MB_USER) +/* all addresses are physical */ +#define mb_alloc(s) mb_do_alloc(s,MB_DEF_TGID,THE_MB_USER) +#define mb_free(p) mb_do_free(p,MB_DEF_TGID,THE_MB_USER) +#define mb_get(p) mb_do_get(p,MB_DEF_TGID,THE_MB_USER) +#define mb_put(p) mb_do_put(p,MB_DEF_TGID,THE_MB_USER) +#define mb_counter(p) mb_do_counter(p,THE_MB_USER) + +void *mb_do_phys_to_virt(unsigned long phys, pid_t, char *); +unsigned long mb_do_virt_to_phys(void *virt, pid_t, char *); +unsigned long mb_do_user_to_virt(unsigned long, pid_t, char *); +unsigned long mb_do_user_to_phys(unsigned long, pid_t, char *); +unsigned long mb_do_alloc(unsigned long, pid_t, char *); +int mb_do_free(unsigned long, pid_t, char *); +int mb_do_get(unsigned long, pid_t, char *); +int mb_do_put(unsigned long, pid_t, char *); +int mb_do_counter(unsigned long, char *); + +int user_to_prdt( + unsigned long user, + unsigned int size, + struct prdt_struct *next, + unsigned int items); + +unsigned int wmt_mmu_table_size(unsigned int size); +void wmt_mmu_table_dump(struct mmu_table_info *info); +// To fix framebuffer build issue +#if 0 +int wmt_mmu_table_check( + unsigned int mmuPhys, + unsigned int mmuSize, + unsigned int size); +unsigned int wmt_mmu_table_from_phys( + unsigned int mmuPhys, + unsigned int mmuSize, + unsigned int addr, + unsigned int size); +unsigned int wmt_mmu_table_from_user( + unsigned int mmuPhys, + unsigned int mmuSize, + unsigned int user, + unsigned int size); +#endif +unsigned int wmt_mmu_table_create( + unsigned int addr, + unsigned int size, + unsigned int addrType, + struct mmu_table_info *info); +unsigned int wmt_mmu_table_destroy(struct mmu_table_info *info); + +#endif diff --git a/include/linux/wmt-se.h b/include/linux/wmt-se.h new file mode 100755 index 00000000..41382643 --- /dev/null +++ b/include/linux/wmt-se.h @@ -0,0 +1,37 @@ +/*++ + * WonderMedia cipher driver + * + * Copyright c 2012 WonderMedia Technologies, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * WonderMedia Technologies, Inc. + * 4F, 533, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C +--*/ + +#ifndef WMT_SE_H +/* To assert that only one occurrence is included */ +#define WMT_SE_H + + +typedef int (*SE_FPTR)(unsigned char *buf_in, unsigned char *buf_out, int buf_len); + +extern SE_FPTR wmt_se_aes; +extern SE_FPTR wmt_se_aes_core; +extern SE_FPTR wmt_se_sha; + + +#endif /* ifndef WMT_SE_H */ + +/*=== END wmt-se.h ==========================================================*/ diff --git a/include/linux/wmt_battery.h b/include/linux/wmt_battery.h new file mode 100755 index 00000000..eaca14c1 --- /dev/null +++ b/include/linux/wmt_battery.h @@ -0,0 +1,37 @@ +/*++ +linux/arch/arm/mach-wmt/board.c + +Copyright (c) 2008 WonderMedia Technologies, Inc. + +This program is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free Software Foundation, +either version 2 of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. +You should have received a copy of the GNU General Public License along with +this program. If not, see <http://www.gnu.org/licenses/>. + +WonderMedia Technologies, Inc. +10F, 529, Chung-Cheng Road, Hsin-Tien, Taipei 231, R.O.C. +--*/ +#ifndef _LINUX_WMT_BAT_H +#define _LINUX_WMT_BAT_H + + +struct wmt_battery_info { + int batt_aux; + int temp_aux; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int temp_div; + int temp_mult; + int batt_tech; + char *batt_name; +}; + +#endif diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h new file mode 100644 index 00000000..af155450 --- /dev/null +++ b/include/linux/workqueue.h @@ -0,0 +1,450 @@ +/* + * workqueue.h --- work queue handling for Linux. + */ + +#ifndef _LINUX_WORKQUEUE_H +#define _LINUX_WORKQUEUE_H + +#include <linux/timer.h> +#include <linux/linkage.h> +#include <linux/bitops.h> +#include <linux/lockdep.h> +#include <linux/threads.h> +#include <linux/atomic.h> + +struct workqueue_struct; + +struct work_struct; +typedef void (*work_func_t)(struct work_struct *work); + +/* + * The first word is the work queue pointer and the flags rolled into + * one + */ +#define work_data_bits(work) ((unsigned long *)(&(work)->data)) + +enum { + WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ + WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ + WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ + WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ + WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ +#else + WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ +#endif + + WORK_STRUCT_COLOR_BITS = 4, + + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, + WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, + WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, + WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, +#else + WORK_STRUCT_STATIC = 0, +#endif + + /* + * The last color is no color used for works which don't + * participate in workqueue flushing. + */ + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, + WORK_NO_COLOR = WORK_NR_COLORS, + + /* special cpu IDs */ + WORK_CPU_UNBOUND = NR_CPUS, + WORK_CPU_NONE = NR_CPUS + 1, + WORK_CPU_LAST = WORK_CPU_NONE, + + /* + * Reserve 7 bits off of cwq pointer w/ debugobjects turned + * off. This makes cwqs aligned to 256 bytes and allows 15 + * workqueue flush colors. + */ + WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + + WORK_STRUCT_COLOR_BITS, + + WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, + WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, + WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, + + /* bit mask for work_busy() return values */ + WORK_BUSY_PENDING = 1 << 0, + WORK_BUSY_RUNNING = 1 << 1, +}; + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) +#define WORK_DATA_STATIC_INIT() \ + ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) + +struct delayed_work { + struct work_struct work; + struct timer_list timer; +}; + +static inline struct delayed_work *to_delayed_work(struct work_struct *work) +{ + return container_of(work, struct delayed_work, work); +} + +struct execute_work { + struct work_struct work; +}; + +#ifdef CONFIG_LOCKDEP +/* + * NB: because we have to copy the lockdep_map, setting _key + * here is required, otherwise it could get initialised to the + * copy of the lockdep_map! + */ +#define __WORK_INIT_LOCKDEP_MAP(n, k) \ + .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), +#else +#define __WORK_INIT_LOCKDEP_MAP(n, k) +#endif + +#define __WORK_INITIALIZER(n, f) { \ + .data = WORK_DATA_STATIC_INIT(), \ + .entry = { &(n).entry, &(n).entry }, \ + .func = (f), \ + __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ + } + +#define __DELAYED_WORK_INITIALIZER(n, f) { \ + .work = __WORK_INITIALIZER((n).work, (f)), \ + .timer = TIMER_INITIALIZER(NULL, 0, 0), \ + } + +#define __DEFERRED_WORK_INITIALIZER(n, f) { \ + .work = __WORK_INITIALIZER((n).work, (f)), \ + .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \ + } + +#define DECLARE_WORK(n, f) \ + struct work_struct n = __WORK_INITIALIZER(n, f) + +#define DECLARE_DELAYED_WORK(n, f) \ + struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) + +#define DECLARE_DEFERRED_WORK(n, f) \ + struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) + +/* + * initialize a work item's function pointer + */ +#define PREPARE_WORK(_work, _func) \ + do { \ + (_work)->func = (_func); \ + } while (0) + +#define PREPARE_DELAYED_WORK(_work, _func) \ + PREPARE_WORK(&(_work)->work, (_func)) + +#ifdef CONFIG_DEBUG_OBJECTS_WORK +extern void __init_work(struct work_struct *work, int onstack); +extern void destroy_work_on_stack(struct work_struct *work); +static inline unsigned int work_static(struct work_struct *work) +{ + return *work_data_bits(work) & WORK_STRUCT_STATIC; +} +#else +static inline void __init_work(struct work_struct *work, int onstack) { } +static inline void destroy_work_on_stack(struct work_struct *work) { } +static inline unsigned int work_static(struct work_struct *work) { return 0; } +#endif + +/* + * initialize all of a work item in one go + * + * NOTE! No point in using "atomic_long_set()": using a direct + * assignment of the work data initializer allows the compiler + * to generate better code. + */ +#ifdef CONFIG_LOCKDEP +#define __INIT_WORK(_work, _func, _onstack) \ + do { \ + static struct lock_class_key __key; \ + \ + __init_work((_work), _onstack); \ + (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ + lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ + INIT_LIST_HEAD(&(_work)->entry); \ + PREPARE_WORK((_work), (_func)); \ + } while (0) +#else +#define __INIT_WORK(_work, _func, _onstack) \ + do { \ + __init_work((_work), _onstack); \ + (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ + INIT_LIST_HEAD(&(_work)->entry); \ + PREPARE_WORK((_work), (_func)); \ + } while (0) +#endif + +#define INIT_WORK(_work, _func) \ + do { \ + __INIT_WORK((_work), (_func), 0); \ + } while (0) + +#define INIT_WORK_ONSTACK(_work, _func) \ + do { \ + __INIT_WORK((_work), (_func), 1); \ + } while (0) + +#define INIT_DELAYED_WORK(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer(&(_work)->timer); \ + } while (0) + +#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ + do { \ + INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ + init_timer_on_stack(&(_work)->timer); \ + } while (0) + +#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_deferrable(&(_work)->timer); \ + } while (0) + +/** + * work_pending - Find out whether a work item is currently pending + * @work: The work item in question + */ +#define work_pending(work) \ + test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) + +/** + * delayed_work_pending - Find out whether a delayable work item is currently + * pending + * @work: The work item in question + */ +#define delayed_work_pending(w) \ + work_pending(&(w)->work) + +/** + * work_clear_pending - for internal use only, mark a work item as not pending + * @work: The work item in question + */ +#define work_clear_pending(work) \ + clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) + +/* + * Workqueue flags and constants. For details, please refer to + * Documentation/workqueue.txt. + */ +enum { + WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ + WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ + WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ + WQ_HIGHPRI = 1 << 4, /* high priority */ + WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ + + WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ + WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ + + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ + WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, +}; + +/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ +#define WQ_UNBOUND_MAX_ACTIVE \ + max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) + +/* + * System-wide workqueues which are always present. + * + * system_wq is the one used by schedule[_delayed]_work[_on](). + * Multi-CPU multi-threaded. There are users which expect relatively + * short queue flush time. Don't queue works which can run for too + * long. + * + * system_long_wq is similar to system_wq but may host long running + * works. Queue flushing might take relatively long. + * + * system_nrt_wq is non-reentrant and guarantees that any given work + * item is never executed in parallel by multiple CPUs. Queue + * flushing might take relatively long. + * + * system_unbound_wq is unbound workqueue. Workers are not bound to + * any specific CPU, not concurrency managed, and all queued works are + * executed immediately as long as max_active limit is not reached and + * resources are available. + * + * system_freezable_wq is equivalent to system_wq except that it's + * freezable. + * + * system_nrt_freezable_wq is equivalent to system_nrt_wq except that + * it's freezable. + */ +extern struct workqueue_struct *system_wq; +extern struct workqueue_struct *system_long_wq; +extern struct workqueue_struct *system_nrt_wq; +extern struct workqueue_struct *system_unbound_wq; +extern struct workqueue_struct *system_freezable_wq; +extern struct workqueue_struct *system_nrt_freezable_wq; + +extern struct workqueue_struct * +__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, + struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); + +/** + * alloc_workqueue - allocate a workqueue + * @fmt: printf format for the name of the workqueue + * @flags: WQ_* flags + * @max_active: max in-flight work items, 0 for default + * @args: args for @fmt + * + * Allocate a workqueue with the specified parameters. For detailed + * information on WQ_* flags, please refer to Documentation/workqueue.txt. + * + * The __lock_name macro dance is to guarantee that single lock_class_key + * doesn't end up with different namesm, which isn't allowed by lockdep. + * + * RETURNS: + * Pointer to the allocated workqueue on success, %NULL on failure. + */ +#ifdef CONFIG_LOCKDEP +#define alloc_workqueue(fmt, flags, max_active, args...) \ +({ \ + static struct lock_class_key __key; \ + const char *__lock_name; \ + \ + if (__builtin_constant_p(fmt)) \ + __lock_name = (fmt); \ + else \ + __lock_name = #fmt; \ + \ + __alloc_workqueue_key((fmt), (flags), (max_active), \ + &__key, __lock_name, ##args); \ +}) +#else +#define alloc_workqueue(fmt, flags, max_active, args...) \ + __alloc_workqueue_key((fmt), (flags), (max_active), \ + NULL, NULL, ##args) +#endif + +/** + * alloc_ordered_workqueue - allocate an ordered workqueue + * @fmt: printf format for the name of the workqueue + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) + * @args: args for @fmt + * + * Allocate an ordered workqueue. An ordered workqueue executes at + * most one work item at any given time in the queued order. They are + * implemented as unbound workqueues with @max_active of one. + * + * RETURNS: + * Pointer to the allocated workqueue on success, %NULL on failure. + */ +#define alloc_ordered_workqueue(fmt, flags, args...) \ + alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) + +#define create_workqueue(name) \ + alloc_workqueue((name), WQ_MEM_RECLAIM, 1) +#define create_freezable_workqueue(name) \ + alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) +#define create_singlethread_workqueue(name) \ + alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) + +extern void destroy_workqueue(struct workqueue_struct *wq); + +extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); +extern int queue_work_on(int cpu, struct workqueue_struct *wq, + struct work_struct *work); +extern int queue_delayed_work(struct workqueue_struct *wq, + struct delayed_work *work, unsigned long delay); +extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + struct delayed_work *work, unsigned long delay); + +extern void flush_workqueue(struct workqueue_struct *wq); +extern void drain_workqueue(struct workqueue_struct *wq); +extern void flush_scheduled_work(void); + +extern int schedule_work(struct work_struct *work); +extern int schedule_work_on(int cpu, struct work_struct *work); +extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); +extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, + unsigned long delay); +extern int schedule_on_each_cpu(work_func_t func); +extern int keventd_up(void); + +int execute_in_process_context(work_func_t fn, struct execute_work *); + +extern bool flush_work(struct work_struct *work); +extern bool flush_work_sync(struct work_struct *work); +extern bool cancel_work_sync(struct work_struct *work); + +extern bool flush_delayed_work(struct delayed_work *dwork); +extern bool flush_delayed_work_sync(struct delayed_work *work); +extern bool cancel_delayed_work_sync(struct delayed_work *dwork); + +extern void workqueue_set_max_active(struct workqueue_struct *wq, + int max_active); +extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); +extern unsigned int work_cpu(struct work_struct *work); +extern unsigned int work_busy(struct work_struct *work); + +/* + * Kill off a pending schedule_delayed_work(). Note that the work callback + * function may still be running on return from cancel_delayed_work(), unless + * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or + * cancel_work_sync() to wait on it. + */ +static inline bool cancel_delayed_work(struct delayed_work *work) +{ + bool ret; + + ret = del_timer_sync(&work->timer); + if (ret) + work_clear_pending(&work->work); + return ret; +} + +/* + * Like above, but uses del_timer() instead of del_timer_sync(). This means, + * if it returns 0 the timer function may be running and the queueing is in + * progress. + */ +static inline bool __cancel_delayed_work(struct delayed_work *work) +{ + bool ret; + + ret = del_timer(&work->timer); + if (ret) + work_clear_pending(&work->work); + return ret; +} + +#ifndef CONFIG_SMP +static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) +{ + return fn(arg); +} +#else +long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_FREEZER +extern void freeze_workqueues_begin(void); +extern bool freeze_workqueues_busy(void); +extern void thaw_workqueues(void); +#endif /* CONFIG_FREEZER */ + +#endif diff --git a/include/linux/writeback.h b/include/linux/writeback.h new file mode 100644 index 00000000..a2b84f59 --- /dev/null +++ b/include/linux/writeback.h @@ -0,0 +1,201 @@ +/* + * include/linux/writeback.h + */ +#ifndef WRITEBACK_H +#define WRITEBACK_H + +#include <linux/sched.h> +#include <linux/fs.h> + +DECLARE_PER_CPU(int, dirty_throttle_leaks); + +/* + * The 1/4 region under the global dirty thresh is for smooth dirty throttling: + * + * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) + * + * Further beyond, all dirtier tasks will enter a loop waiting (possibly long + * time) for the dirty pages to drop, unless written enough pages. + * + * The global dirty threshold is normally equal to the global dirty limit, + * except when the system suddenly allocates a lot of anonymous memory and + * knocks down the global dirty threshold quickly, in which case the global + * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. + */ +#define DIRTY_SCOPE 8 +#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) + +struct backing_dev_info; + +/* + * fs/fs-writeback.c + */ +enum writeback_sync_modes { + WB_SYNC_NONE, /* Don't wait on anything */ + WB_SYNC_ALL, /* Wait on every mapping */ +}; + +/* + * why some writeback work was initiated + */ +enum wb_reason { + WB_REASON_BACKGROUND, + WB_REASON_TRY_TO_FREE_PAGES, + WB_REASON_SYNC, + WB_REASON_PERIODIC, + WB_REASON_LAPTOP_TIMER, + WB_REASON_FREE_MORE_MEM, + WB_REASON_FS_FREE_SPACE, + WB_REASON_FORKER_THREAD, + + WB_REASON_MAX, +}; +extern const char *wb_reason_name[]; + +/* + * A control structure which tells the writeback code what to do. These are + * always on the stack, and hence need no locking. They are always initialised + * in a manner such that unspecified fields are set to zero. + */ +struct writeback_control { + enum writeback_sync_modes sync_mode; + long nr_to_write; /* Write this many pages, and decrement + this for each page written */ + long pages_skipped; /* Pages which were not written */ + + /* + * For a_ops->writepages(): if start or end are non-zero then this is + * a hint that the filesystem need only write out the pages inside that + * byterange. The byte at `end' is included in the writeout request. + */ + loff_t range_start; + loff_t range_end; + + unsigned for_kupdate:1; /* A kupdate writeback */ + unsigned for_background:1; /* A background writeback */ + unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ + unsigned for_reclaim:1; /* Invoked from the page allocator */ + unsigned range_cyclic:1; /* range_start is cyclic */ +}; + +/* + * fs/fs-writeback.c + */ +struct bdi_writeback; +int inode_wait(void *); +void writeback_inodes_sb(struct super_block *, enum wb_reason reason); +void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, + enum wb_reason reason); +int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason); +int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr, + enum wb_reason reason); +void sync_inodes_sb(struct super_block *); +long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + enum wb_reason reason); +long wb_do_writeback(struct bdi_writeback *wb, int force_wait); +void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); + +/* writeback.h requires fs.h; it, too, is not included from here. */ +static inline void wait_on_inode(struct inode *inode) +{ + might_sleep(); + wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); +} +static inline void inode_sync_wait(struct inode *inode) +{ + might_sleep(); + wait_on_bit(&inode->i_state, __I_SYNC, inode_wait, + TASK_UNINTERRUPTIBLE); +} + + +/* + * mm/page-writeback.c + */ +#ifdef CONFIG_BLOCK +void laptop_io_completion(struct backing_dev_info *info); +void laptop_sync_completion(void); +void laptop_mode_sync(struct work_struct *work); +void laptop_mode_timer_fn(unsigned long data); +#else +static inline void laptop_sync_completion(void) { } +#endif +void throttle_vm_writeout(gfp_t gfp_mask); +bool zone_dirty_ok(struct zone *zone); + +extern unsigned long global_dirty_limit; + +/* These are exported to sysctl. */ +extern int dirty_background_ratio; +extern unsigned long dirty_background_bytes; +extern int vm_dirty_ratio; +extern unsigned long vm_dirty_bytes; +extern unsigned int dirty_writeback_interval; +extern unsigned int dirty_expire_interval; +extern int vm_highmem_is_dirtyable; +extern int block_dump; +extern int laptop_mode; + +extern int dirty_background_ratio_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +extern int dirty_background_bytes_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +extern int dirty_ratio_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +extern int dirty_bytes_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +struct ctl_table; +int dirty_writeback_centisecs_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + +void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); +unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, + unsigned long dirty); + +void __bdi_update_bandwidth(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, + unsigned long bdi_thresh, + unsigned long bdi_dirty, + unsigned long start_time); + +void page_writeback_init(void); +void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, + unsigned long nr_pages_dirtied); + +static inline void +balance_dirty_pages_ratelimited(struct address_space *mapping) +{ + balance_dirty_pages_ratelimited_nr(mapping, 1); +} + +typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, + void *data); + +int generic_writepages(struct address_space *mapping, + struct writeback_control *wbc); +void tag_pages_for_writeback(struct address_space *mapping, + pgoff_t start, pgoff_t end); +int write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc, writepage_t writepage, + void *data); +int do_writepages(struct address_space *mapping, struct writeback_control *wbc); +void set_page_dirty_balance(struct page *page, int page_mkwrite); +void writeback_set_ratelimit(void); +void tag_pages_for_writeback(struct address_space *mapping, + pgoff_t start, pgoff_t end); + +void account_page_redirty(struct page *page); + +/* pdflush.c */ +extern int nr_pdflush_threads; /* Global so it can be exported to sysctl + read-only. */ + + +#endif /* WRITEBACK_H */ diff --git a/include/linux/x25.h b/include/linux/x25.h new file mode 100644 index 00000000..810cce67 --- /dev/null +++ b/include/linux/x25.h @@ -0,0 +1,152 @@ +/* + * These are the public elements of the Linux kernel X.25 implementation. + * + * History + * mar/20/00 Daniela Squassoni Disabling/enabling of facilities + * negotiation. + * apr/02/05 Shaun Pereira Selective sub address matching with + * call user data + */ + +#ifndef X25_KERNEL_H +#define X25_KERNEL_H + +#include <linux/types.h> +#include <linux/socket.h> + +#define SIOCX25GSUBSCRIP (SIOCPROTOPRIVATE + 0) +#define SIOCX25SSUBSCRIP (SIOCPROTOPRIVATE + 1) +#define SIOCX25GFACILITIES (SIOCPROTOPRIVATE + 2) +#define SIOCX25SFACILITIES (SIOCPROTOPRIVATE + 3) +#define SIOCX25GCALLUSERDATA (SIOCPROTOPRIVATE + 4) +#define SIOCX25SCALLUSERDATA (SIOCPROTOPRIVATE + 5) +#define SIOCX25GCAUSEDIAG (SIOCPROTOPRIVATE + 6) +#define SIOCX25SCUDMATCHLEN (SIOCPROTOPRIVATE + 7) +#define SIOCX25CALLACCPTAPPRV (SIOCPROTOPRIVATE + 8) +#define SIOCX25SENDCALLACCPT (SIOCPROTOPRIVATE + 9) +#define SIOCX25GDTEFACILITIES (SIOCPROTOPRIVATE + 10) +#define SIOCX25SDTEFACILITIES (SIOCPROTOPRIVATE + 11) +#define SIOCX25SCAUSEDIAG (SIOCPROTOPRIVATE + 12) + +/* + * Values for {get,set}sockopt. + */ +#define X25_QBITINCL 1 + +/* + * X.25 Packet Size values. + */ +#define X25_PS16 4 +#define X25_PS32 5 +#define X25_PS64 6 +#define X25_PS128 7 +#define X25_PS256 8 +#define X25_PS512 9 +#define X25_PS1024 10 +#define X25_PS2048 11 +#define X25_PS4096 12 + +/* + * An X.121 address, it is held as ASCII text, null terminated, up to 15 + * digits and a null terminator. + */ +struct x25_address { + char x25_addr[16]; +}; + +/* + * Linux X.25 Address structure, used for bind, and connect mostly. + */ +struct sockaddr_x25 { + __kernel_sa_family_t sx25_family; /* Must be AF_X25 */ + struct x25_address sx25_addr; /* X.121 Address */ +}; + +/* + * DTE/DCE subscription options. + * + * As this is missing lots of options, user should expect major + * changes of this structure in 2.5.x which might break compatibilty. + * The somewhat ugly dimension 200-sizeof() is needed to maintain + * backward compatibility. + */ +struct x25_subscrip_struct { + char device[200-sizeof(unsigned long)]; + unsigned long global_facil_mask; /* 0 to disable negotiation */ + unsigned int extended; +}; + +/* values for above global_facil_mask */ + +#define X25_MASK_REVERSE 0x01 +#define X25_MASK_THROUGHPUT 0x02 +#define X25_MASK_PACKET_SIZE 0x04 +#define X25_MASK_WINDOW_SIZE 0x08 + +#define X25_MASK_CALLING_AE 0x10 +#define X25_MASK_CALLED_AE 0x20 + + +/* + * Routing table control structure. + */ +struct x25_route_struct { + struct x25_address address; + unsigned int sigdigits; + char device[200]; +}; + +/* + * Facilities structure. + */ +struct x25_facilities { + unsigned int winsize_in, winsize_out; + unsigned int pacsize_in, pacsize_out; + unsigned int throughput; + unsigned int reverse; +}; + +/* +* ITU DTE facilities +* Only the called and calling address +* extension are currently implemented. +* The rest are in place to avoid the struct +* changing size if someone needs them later +*/ + +struct x25_dte_facilities { + __u16 delay_cumul; + __u16 delay_target; + __u16 delay_max; + __u8 min_throughput; + __u8 expedited; + __u8 calling_len; + __u8 called_len; + __u8 calling_ae[20]; + __u8 called_ae[20]; +}; + +/* + * Call User Data structure. + */ +struct x25_calluserdata { + unsigned int cudlength; + unsigned char cuddata[128]; +}; + +/* + * Call clearing Cause and Diagnostic structure. + */ +struct x25_causediag { + unsigned char cause; + unsigned char diagnostic; +}; + +/* + * Further optional call user data match length selection + */ +struct x25_subaddr { + unsigned int cudmatchlength; +}; + +#endif diff --git a/include/linux/xattr.h b/include/linux/xattr.h new file mode 100644 index 00000000..e5d12203 --- /dev/null +++ b/include/linux/xattr.h @@ -0,0 +1,101 @@ +/* + File: linux/xattr.h + + Extended attributes handling. + + Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org> + Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. + Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> +*/ +#ifndef _LINUX_XATTR_H +#define _LINUX_XATTR_H + +#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ +#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ + +/* Namespaces */ +#define XATTR_OS2_PREFIX "os2." +#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1) + +#define XATTR_SECURITY_PREFIX "security." +#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1) + +#define XATTR_SYSTEM_PREFIX "system." +#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1) + +#define XATTR_TRUSTED_PREFIX "trusted." +#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1) + +#define XATTR_USER_PREFIX "user." +#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) + +/* Security namespace */ +#define XATTR_EVM_SUFFIX "evm" +#define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX + +#define XATTR_SELINUX_SUFFIX "selinux" +#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX + +#define XATTR_SMACK_SUFFIX "SMACK64" +#define XATTR_SMACK_IPIN "SMACK64IPIN" +#define XATTR_SMACK_IPOUT "SMACK64IPOUT" +#define XATTR_SMACK_EXEC "SMACK64EXEC" +#define XATTR_SMACK_TRANSMUTE "SMACK64TRANSMUTE" +#define XATTR_SMACK_MMAP "SMACK64MMAP" +#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX +#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN +#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT +#define XATTR_NAME_SMACKEXEC XATTR_SECURITY_PREFIX XATTR_SMACK_EXEC +#define XATTR_NAME_SMACKTRANSMUTE XATTR_SECURITY_PREFIX XATTR_SMACK_TRANSMUTE +#define XATTR_NAME_SMACKMMAP XATTR_SECURITY_PREFIX XATTR_SMACK_MMAP + +#define XATTR_CAPS_SUFFIX "capability" +#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX + +#define XATTR_POSIX_ACL_ACCESS "posix_acl_access" +#define XATTR_NAME_POSIX_ACL_ACCESS XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_ACCESS +#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" +#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT + +#ifdef __KERNEL__ + +#include <linux/types.h> + +struct inode; +struct dentry; + +struct xattr_handler { + const char *prefix; + int flags; /* fs private flags passed back to the handlers */ + size_t (*list)(struct dentry *dentry, char *list, size_t list_size, + const char *name, size_t name_len, int handler_flags); + int (*get)(struct dentry *dentry, const char *name, void *buffer, + size_t size, int handler_flags); + int (*set)(struct dentry *dentry, const char *name, const void *buffer, + size_t size, int flags, int handler_flags); +}; + +struct xattr { + char *name; + void *value; + size_t value_len; +}; + +ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); +ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); +ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); +int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); +int vfs_removexattr(struct dentry *, const char *); + +ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); +ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); +int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); +int generic_removexattr(struct dentry *dentry, const char *name); +ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, + char **xattr_value, size_t size, gfp_t flags); +int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, + const char *value, size_t size, gfp_t flags); +#endif /* __KERNEL__ */ + +#endif /* _LINUX_XATTR_H */ diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h new file mode 100644 index 00000000..22e61fdf --- /dev/null +++ b/include/linux/xfrm.h @@ -0,0 +1,504 @@ +#ifndef _LINUX_XFRM_H +#define _LINUX_XFRM_H + +#include <linux/types.h> + +/* All of the structures in this file may not change size as they are + * passed into the kernel from userspace via netlink sockets. + */ + +/* Structure to encapsulate addresses. I do not want to use + * "standard" structure. My apologies. + */ +typedef union { + __be32 a4; + __be32 a6[4]; +} xfrm_address_t; + +/* Ident of a specific xfrm_state. It is used on input to lookup + * the state by (spi,daddr,ah/esp) or to store information about + * spi, protocol and tunnel address on output. + */ +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +/* Security Context Domains of Interpretation */ +#define XFRM_SC_DOI_RESERVED 0 +#define XFRM_SC_DOI_LSM 1 + +/* Security Context Algorithms */ +#define XFRM_SC_ALG_RESERVED 0 +#define XFRM_SC_ALG_SELINUX 1 + +/* Selector, used as selector both on policy rules (SPD) and SAs. */ + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +#define XFRM_INF (~(__u64)0) + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; /* in bits */ + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; /* in bits */ + unsigned int alg_trunc_len; /* in bits */ + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; /* in bits */ + unsigned int alg_icv_len; /* in bits */ + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255 +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3 +}; + +enum { + XFRM_SHARE_ANY, /* No limitations */ + XFRM_SHARE_SESSION, /* For this session only */ + XFRM_SHARE_USER, /* For this user only */ + XFRM_SHARE_UNIQUE /* Use once */ +}; + +#define XFRM_MODE_TRANSPORT 0 +#define XFRM_MODE_TUNNEL 1 +#define XFRM_MODE_ROUTEOPTIMIZATION 2 +#define XFRM_MODE_IN_TRIGGER 3 +#define XFRM_MODE_BEET 4 +#define XFRM_MODE_MAX 5 + +/* Netlink configuration messages. */ +enum { + XFRM_MSG_BASE = 0x10, + + XFRM_MSG_NEWSA = 0x10, +#define XFRM_MSG_NEWSA XFRM_MSG_NEWSA + XFRM_MSG_DELSA, +#define XFRM_MSG_DELSA XFRM_MSG_DELSA + XFRM_MSG_GETSA, +#define XFRM_MSG_GETSA XFRM_MSG_GETSA + + XFRM_MSG_NEWPOLICY, +#define XFRM_MSG_NEWPOLICY XFRM_MSG_NEWPOLICY + XFRM_MSG_DELPOLICY, +#define XFRM_MSG_DELPOLICY XFRM_MSG_DELPOLICY + XFRM_MSG_GETPOLICY, +#define XFRM_MSG_GETPOLICY XFRM_MSG_GETPOLICY + + XFRM_MSG_ALLOCSPI, +#define XFRM_MSG_ALLOCSPI XFRM_MSG_ALLOCSPI + XFRM_MSG_ACQUIRE, +#define XFRM_MSG_ACQUIRE XFRM_MSG_ACQUIRE + XFRM_MSG_EXPIRE, +#define XFRM_MSG_EXPIRE XFRM_MSG_EXPIRE + + XFRM_MSG_UPDPOLICY, +#define XFRM_MSG_UPDPOLICY XFRM_MSG_UPDPOLICY + XFRM_MSG_UPDSA, +#define XFRM_MSG_UPDSA XFRM_MSG_UPDSA + + XFRM_MSG_POLEXPIRE, +#define XFRM_MSG_POLEXPIRE XFRM_MSG_POLEXPIRE + + XFRM_MSG_FLUSHSA, +#define XFRM_MSG_FLUSHSA XFRM_MSG_FLUSHSA + XFRM_MSG_FLUSHPOLICY, +#define XFRM_MSG_FLUSHPOLICY XFRM_MSG_FLUSHPOLICY + + XFRM_MSG_NEWAE, +#define XFRM_MSG_NEWAE XFRM_MSG_NEWAE + XFRM_MSG_GETAE, +#define XFRM_MSG_GETAE XFRM_MSG_GETAE + + XFRM_MSG_REPORT, +#define XFRM_MSG_REPORT XFRM_MSG_REPORT + + XFRM_MSG_MIGRATE, +#define XFRM_MSG_MIGRATE XFRM_MSG_MIGRATE + + XFRM_MSG_NEWSADINFO, +#define XFRM_MSG_NEWSADINFO XFRM_MSG_NEWSADINFO + XFRM_MSG_GETSADINFO, +#define XFRM_MSG_GETSADINFO XFRM_MSG_GETSADINFO + + XFRM_MSG_NEWSPDINFO, +#define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO + XFRM_MSG_GETSPDINFO, +#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO + + XFRM_MSG_MAPPING, +#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING + __XFRM_MSG_MAX +}; +#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) + +#define XFRM_NR_MSGTYPES (XFRM_MSG_MAX + 1 - XFRM_MSG_BASE) + +/* + * Generic LSM security context for comunicating to user space + * NOTE: Same format as sadb_x_sec_ctx + */ +struct xfrm_user_sec_ctx { + __u16 len; + __u16 exttype; + __u8 ctx_alg; /* LSMs: e.g., selinux == 1 */ + __u8 ctx_doi; + __u16 ctx_len; +}; + +struct xfrm_user_tmpl { + struct xfrm_id id; + __u16 family; + xfrm_address_t saddr; + __u32 reqid; + __u8 mode; + __u8 share; + __u8 optional; + __u32 aalgos; + __u32 ealgos; + __u32 calgos; +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +/* AEVENT flags */ +enum xfrm_ae_ftype_t { + XFRM_AE_UNSPEC, + XFRM_AE_RTHR=1, /* replay threshold*/ + XFRM_AE_RVAL=2, /* replay value */ + XFRM_AE_LVAL=4, /* lifetime value */ + XFRM_AE_ETHR=8, /* expiry timer threshold */ + XFRM_AE_CR=16, /* Event cause is replay update */ + XFRM_AE_CE=32, /* Event cause is timer expiry */ + XFRM_AE_CU=64, /* Event cause is policy update */ + __XFRM_AE_MAX + +#define XFRM_AE_MAX (__XFRM_AE_MAX - 1) +}; + +struct xfrm_userpolicy_type { + __u8 type; + __u16 reserved1; + __u8 reserved2; +}; + +/* Netlink message attributes. */ +enum xfrm_attr_type_t { + XFRMA_UNSPEC, + XFRMA_ALG_AUTH, /* struct xfrm_algo */ + XFRMA_ALG_CRYPT, /* struct xfrm_algo */ + XFRMA_ALG_COMP, /* struct xfrm_algo */ + XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */ + XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA, /* struct xfrm_usersa_info */ + XFRMA_POLICY, /*struct xfrm_userpolicy_info */ + XFRMA_SEC_CTX, /* struct xfrm_sec_ctx */ + XFRMA_LTIME_VAL, + XFRMA_REPLAY_VAL, + XFRMA_REPLAY_THRESH, + XFRMA_ETIMER_THRESH, + XFRMA_SRCADDR, /* xfrm_address_t */ + XFRMA_COADDR, /* xfrm_address_t */ + XFRMA_LASTUSED, /* unsigned long */ + XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */ + XFRMA_MIGRATE, + XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */ + XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */ + XFRMA_MARK, /* struct xfrm_mark */ + XFRMA_TFCPAD, /* __u32 */ + XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */ + __XFRMA_MAX + +#define XFRMA_MAX (__XFRMA_MAX - 1) +}; + +struct xfrm_mark { + __u32 v; /* value */ + __u32 m; /* mask */ +}; + +enum xfrm_sadattr_type_t { + XFRMA_SAD_UNSPEC, + XFRMA_SAD_CNT, + XFRMA_SAD_HINFO, + __XFRMA_SAD_MAX + +#define XFRMA_SAD_MAX (__XFRMA_SAD_MAX - 1) +}; + +struct xfrmu_sadhinfo { + __u32 sadhcnt; /* current hash bkts */ + __u32 sadhmcnt; /* max allowed hash bkts */ +}; + +enum xfrm_spdattr_type_t { + XFRMA_SPD_UNSPEC, + XFRMA_SPD_INFO, + XFRMA_SPD_HINFO, + __XFRMA_SPD_MAX + +#define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1) +}; + +struct xfrmu_spdinfo { + __u32 incnt; + __u32 outcnt; + __u32 fwdcnt; + __u32 inscnt; + __u32 outscnt; + __u32 fwdscnt; +}; + +struct xfrmu_spdhinfo { + __u32 spdhcnt; + __u32 spdhmcnt; +}; + +struct xfrm_usersa_info { + struct xfrm_selector sel; + struct xfrm_id id; + xfrm_address_t saddr; + struct xfrm_lifetime_cfg lft; + struct xfrm_lifetime_cur curlft; + struct xfrm_stats stats; + __u32 seq; + __u32 reqid; + __u16 family; + __u8 mode; /* XFRM_MODE_xxx */ + __u8 replay_window; + __u8 flags; +#define XFRM_STATE_NOECN 1 +#define XFRM_STATE_DECAP_DSCP 2 +#define XFRM_STATE_NOPMTUDISC 4 +#define XFRM_STATE_WILDRECV 8 +#define XFRM_STATE_ICMP 16 +#define XFRM_STATE_AF_UNSPEC 32 +#define XFRM_STATE_ALIGN4 64 +#define XFRM_STATE_ESN 128 +}; + +struct xfrm_usersa_id { + xfrm_address_t daddr; + __be32 spi; + __u16 family; + __u8 proto; +}; + +struct xfrm_aevent_id { + struct xfrm_usersa_id sa_id; + xfrm_address_t saddr; + __u32 flags; + __u32 reqid; +}; + +struct xfrm_userspi_info { + struct xfrm_usersa_info info; + __u32 min; + __u32 max; +}; + +struct xfrm_userpolicy_info { + struct xfrm_selector sel; + struct xfrm_lifetime_cfg lft; + struct xfrm_lifetime_cur curlft; + __u32 priority; + __u32 index; + __u8 dir; + __u8 action; +#define XFRM_POLICY_ALLOW 0 +#define XFRM_POLICY_BLOCK 1 + __u8 flags; +#define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ + /* Automatically expand selector to include matching ICMP payloads. */ +#define XFRM_POLICY_ICMP 2 + __u8 share; +}; + +struct xfrm_userpolicy_id { + struct xfrm_selector sel; + __u32 index; + __u8 dir; +}; + +struct xfrm_user_acquire { + struct xfrm_id id; + xfrm_address_t saddr; + struct xfrm_selector sel; + struct xfrm_userpolicy_info policy; + __u32 aalgos; + __u32 ealgos; + __u32 calgos; + __u32 seq; +}; + +struct xfrm_user_expire { + struct xfrm_usersa_info state; + __u8 hard; +}; + +struct xfrm_user_polexpire { + struct xfrm_userpolicy_info pol; + __u8 hard; +}; + +struct xfrm_usersa_flush { + __u8 proto; +}; + +struct xfrm_user_report { + __u8 proto; + struct xfrm_selector sel; +}; + +/* Used by MIGRATE to pass addresses IKE should use to perform + * SA negotiation with the peer */ +struct xfrm_user_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + __u32 reserved; + __u16 family; +}; + +struct xfrm_user_migrate { + xfrm_address_t old_daddr; + xfrm_address_t old_saddr; + xfrm_address_t new_daddr; + xfrm_address_t new_saddr; + __u8 proto; + __u8 mode; + __u16 reserved; + __u32 reqid; + __u16 old_family; + __u16 new_family; +}; + +struct xfrm_user_mapping { + struct xfrm_usersa_id id; + __u32 reqid; + xfrm_address_t old_saddr; + xfrm_address_t new_saddr; + __be16 old_sport; + __be16 new_sport; +}; + +#ifndef __KERNEL__ +/* backwards compatibility for userspace */ +#define XFRMGRP_ACQUIRE 1 +#define XFRMGRP_EXPIRE 2 +#define XFRMGRP_SA 4 +#define XFRMGRP_POLICY 8 +#define XFRMGRP_REPORT 0x20 +#endif + +enum xfrm_nlgroups { + XFRMNLGRP_NONE, +#define XFRMNLGRP_NONE XFRMNLGRP_NONE + XFRMNLGRP_ACQUIRE, +#define XFRMNLGRP_ACQUIRE XFRMNLGRP_ACQUIRE + XFRMNLGRP_EXPIRE, +#define XFRMNLGRP_EXPIRE XFRMNLGRP_EXPIRE + XFRMNLGRP_SA, +#define XFRMNLGRP_SA XFRMNLGRP_SA + XFRMNLGRP_POLICY, +#define XFRMNLGRP_POLICY XFRMNLGRP_POLICY + XFRMNLGRP_AEVENTS, +#define XFRMNLGRP_AEVENTS XFRMNLGRP_AEVENTS + XFRMNLGRP_REPORT, +#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT + XFRMNLGRP_MIGRATE, +#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE + XFRMNLGRP_MAPPING, +#define XFRMNLGRP_MAPPING XFRMNLGRP_MAPPING + __XFRMNLGRP_MAX +}; +#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1) + +#endif /* _LINUX_XFRM_H */ diff --git a/include/linux/xilinxfb.h b/include/linux/xilinxfb.h new file mode 100644 index 00000000..5a155a96 --- /dev/null +++ b/include/linux/xilinxfb.h @@ -0,0 +1,30 @@ +/* + * Platform device data for Xilinx Framebuffer device + * + * Copyright 2007 Secret Lab Technologies Ltd. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __XILINXFB_H__ +#define __XILINXFB_H__ + +#include <linux/types.h> + +/* ML300/403 reference design framebuffer driver platform data struct */ +struct xilinxfb_platform_data { + u32 rotate_screen; /* Flag to rotate display 180 degrees */ + u32 screen_height_mm; /* Physical dimensions of screen in mm */ + u32 screen_width_mm; + u32 xres, yres; /* resolution of screen in pixels */ + u32 xvirt, yvirt; /* resolution of memory buffer */ + + /* Physical address of framebuffer memory; If non-zero, driver + * will use provided memory address instead of allocating one from + * the consistent pool. */ + u32 fb_phys; +}; + +#endif /* __XILINXFB_H__ */ diff --git a/include/linux/xz.h b/include/linux/xz.h new file mode 100644 index 00000000..64cffa6d --- /dev/null +++ b/include/linux/xz.h @@ -0,0 +1,264 @@ +/* + * XZ decompressor + * + * Authors: Lasse Collin <lasse.collin@tukaani.org> + * Igor Pavlov <http://7-zip.org/> + * + * This file has been put into the public domain. + * You can do whatever you want with this file. + */ + +#ifndef XZ_H +#define XZ_H + +#ifdef __KERNEL__ +# include <linux/stddef.h> +# include <linux/types.h> +#else +# include <stddef.h> +# include <stdint.h> +#endif + +/* In Linux, this is used to make extern functions static when needed. */ +#ifndef XZ_EXTERN +# define XZ_EXTERN extern +#endif + +/** + * enum xz_mode - Operation mode + * + * @XZ_SINGLE: Single-call mode. This uses less RAM than + * than multi-call modes, because the LZMA2 + * dictionary doesn't need to be allocated as + * part of the decoder state. All required data + * structures are allocated at initialization, + * so xz_dec_run() cannot return XZ_MEM_ERROR. + * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 + * dictionary buffer. All data structures are + * allocated at initialization, so xz_dec_run() + * cannot return XZ_MEM_ERROR. + * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is + * allocated once the required size has been + * parsed from the stream headers. If the + * allocation fails, xz_dec_run() will return + * XZ_MEM_ERROR. + * + * It is possible to enable support only for a subset of the above + * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, + * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled + * with support for all operation modes, but the preboot code may + * be built with fewer features to minimize code size. + */ +enum xz_mode { + XZ_SINGLE, + XZ_PREALLOC, + XZ_DYNALLOC +}; + +/** + * enum xz_ret - Return codes + * @XZ_OK: Everything is OK so far. More input or more + * output space is required to continue. This + * return code is possible only in multi-call mode + * (XZ_PREALLOC or XZ_DYNALLOC). + * @XZ_STREAM_END: Operation finished successfully. + * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding + * is still possible in multi-call mode by simply + * calling xz_dec_run() again. + * Note that this return value is used only if + * XZ_DEC_ANY_CHECK was defined at build time, + * which is not used in the kernel. Unsupported + * check types return XZ_OPTIONS_ERROR if + * XZ_DEC_ANY_CHECK was not defined at build time. + * @XZ_MEM_ERROR: Allocating memory failed. This return code is + * possible only if the decoder was initialized + * with XZ_DYNALLOC. The amount of memory that was + * tried to be allocated was no more than the + * dict_max argument given to xz_dec_init(). + * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than + * allowed by the dict_max argument given to + * xz_dec_init(). This return value is possible + * only in multi-call mode (XZ_PREALLOC or + * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) + * ignores the dict_max argument. + * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic + * bytes). + * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested + * compression options. In the decoder this means + * that the header CRC32 matches, but the header + * itself specifies something that we don't support. + * @XZ_DATA_ERROR: Compressed data is corrupt. + * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly + * different between multi-call and single-call + * mode; more information below. + * + * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls + * to XZ code cannot consume any input and cannot produce any new output. + * This happens when there is no new input available, or the output buffer + * is full while at least one output byte is still pending. Assuming your + * code is not buggy, you can get this error only when decoding a compressed + * stream that is truncated or otherwise corrupt. + * + * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer + * is too small or the compressed input is corrupt in a way that makes the + * decoder produce more output than the caller expected. When it is + * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR + * is used instead of XZ_BUF_ERROR. + */ +enum xz_ret { + XZ_OK, + XZ_STREAM_END, + XZ_UNSUPPORTED_CHECK, + XZ_MEM_ERROR, + XZ_MEMLIMIT_ERROR, + XZ_FORMAT_ERROR, + XZ_OPTIONS_ERROR, + XZ_DATA_ERROR, + XZ_BUF_ERROR +}; + +/** + * struct xz_buf - Passing input and output buffers to XZ code + * @in: Beginning of the input buffer. This may be NULL if and only + * if in_pos is equal to in_size. + * @in_pos: Current position in the input buffer. This must not exceed + * in_size. + * @in_size: Size of the input buffer + * @out: Beginning of the output buffer. This may be NULL if and only + * if out_pos is equal to out_size. + * @out_pos: Current position in the output buffer. This must not exceed + * out_size. + * @out_size: Size of the output buffer + * + * Only the contents of the output buffer from out[out_pos] onward, and + * the variables in_pos and out_pos are modified by the XZ code. + */ +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +/** + * struct xz_dec - Opaque type to hold the XZ decoder state + */ +struct xz_dec; + +/** + * xz_dec_init() - Allocate and initialize a XZ decoder state + * @mode: Operation mode + * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for + * multi-call decoding. This is ignored in single-call mode + * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes + * or 2^n + 2^(n-1) bytes (the latter sizes are less common + * in practice), so other values for dict_max don't make sense. + * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, + * 512 KiB, and 1 MiB are probably the only reasonable values, + * except for kernel and initramfs images where a bigger + * dictionary can be fine and useful. + * + * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at + * once. The caller must provide enough output space or the decoding will + * fail. The output space is used as the dictionary buffer, which is why + * there is no need to allocate the dictionary as part of the decoder's + * internal state. + * + * Because the output buffer is used as the workspace, streams encoded using + * a big dictionary are not a problem in single-call mode. It is enough that + * the output buffer is big enough to hold the actual uncompressed data; it + * can be smaller than the dictionary size stored in the stream headers. + * + * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes + * of memory is preallocated for the LZMA2 dictionary. This way there is no + * risk that xz_dec_run() could run out of memory, since xz_dec_run() will + * never allocate any memory. Instead, if the preallocated dictionary is too + * small for decoding the given input stream, xz_dec_run() will return + * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be + * decoded to avoid allocating excessive amount of memory for the dictionary. + * + * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): + * dict_max specifies the maximum allowed dictionary size that xz_dec_run() + * may allocate once it has parsed the dictionary size from the stream + * headers. This way excessive allocations can be avoided while still + * limiting the maximum memory usage to a sane value to prevent running the + * system out of memory when decompressing streams from untrusted sources. + * + * On success, xz_dec_init() returns a pointer to struct xz_dec, which is + * ready to be used with xz_dec_run(). If memory allocation fails, + * xz_dec_init() returns NULL. + */ +XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); + +/** + * xz_dec_run() - Run the XZ decoder + * @s: Decoder state allocated using xz_dec_init() + * @b: Input and output buffers + * + * The possible return values depend on build options and operation mode. + * See enum xz_ret for details. + * + * Note that if an error occurs in single-call mode (return value is not + * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the + * contents of the output buffer from b->out[b->out_pos] onward are + * undefined. This is true even after XZ_BUF_ERROR, because with some filter + * chains, there may be a second pass over the output buffer, and this pass + * cannot be properly done if the output buffer is truncated. Thus, you + * cannot give the single-call decoder a too small buffer and then expect to + * get that amount valid data from the beginning of the stream. You must use + * the multi-call decoder if you don't want to uncompress the whole stream. + */ +XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); + +/** + * xz_dec_reset() - Reset an already allocated decoder state + * @s: Decoder state allocated using xz_dec_init() + * + * This function can be used to reset the multi-call decoder state without + * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). + * + * In single-call mode, xz_dec_reset() is always called in the beginning of + * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in + * multi-call mode. + */ +XZ_EXTERN void xz_dec_reset(struct xz_dec *s); + +/** + * xz_dec_end() - Free the memory allocated for the decoder state + * @s: Decoder state allocated using xz_dec_init(). If s is NULL, + * this function does nothing. + */ +XZ_EXTERN void xz_dec_end(struct xz_dec *s); + +/* + * Standalone build (userspace build or in-kernel build for boot time use) + * needs a CRC32 implementation. For normal in-kernel use, kernel's own + * CRC32 module is used instead, and users of this module don't need to + * care about the functions below. + */ +#ifndef XZ_INTERNAL_CRC32 +# ifdef __KERNEL__ +# define XZ_INTERNAL_CRC32 0 +# else +# define XZ_INTERNAL_CRC32 1 +# endif +#endif + +#if XZ_INTERNAL_CRC32 +/* + * This must be called before any other xz_* function to initialize + * the CRC32 lookup table. + */ +XZ_EXTERN void xz_crc32_init(void); + +/* + * Update CRC32 value using the polynomial from IEEE-802.3. To start a new + * calculation, the third argument must be zero. To continue the calculation, + * the previously returned value is passed as the third argument. + */ +XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); +#endif +#endif diff --git a/include/linux/yam.h b/include/linux/yam.h new file mode 100644 index 00000000..7fe28228 --- /dev/null +++ b/include/linux/yam.h @@ -0,0 +1,82 @@ +/*****************************************************************************/ + +/* + * yam.h -- YAM radio modem driver. + * + * Copyright (C) 1998 Frederic Rible F1OAT (frible@teaser.fr) + * Adapted from baycom.c driver written by Thomas Sailer (sailer@ife.ee.ethz.ch) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Please note that the GPL allows you to use the driver, NOT the radio. + * In order to use the radio, you need a license from the communications + * authority of your country. + * + * + */ + +/*****************************************************************************/ + +#define SIOCYAMRESERVED (0) +#define SIOCYAMSCFG (1) /* Set configuration */ +#define SIOCYAMGCFG (2) /* Get configuration */ +#define SIOCYAMSMCS (3) /* Set mcs data */ + +#define YAM_IOBASE (1 << 0) +#define YAM_IRQ (1 << 1) +#define YAM_BITRATE (1 << 2) /* Bit rate of radio port ->57600 */ +#define YAM_MODE (1 << 3) /* 0=simplex 1=duplex 2=duplex+tempo */ +#define YAM_HOLDDLY (1 << 4) /* duplex tempo (sec) */ +#define YAM_TXDELAY (1 << 5) /* Tx Delay (ms) */ +#define YAM_TXTAIL (1 << 6) /* Tx Tail (ms) */ +#define YAM_PERSIST (1 << 7) /* Persist (ms) */ +#define YAM_SLOTTIME (1 << 8) /* Slottime (ms) */ +#define YAM_BAUDRATE (1 << 9) /* Baud rate of rs232 port ->115200 */ + +#define YAM_MAXBITRATE 57600 +#define YAM_MAXBAUDRATE 115200 +#define YAM_MAXMODE 2 +#define YAM_MAXHOLDDLY 99 +#define YAM_MAXTXDELAY 999 +#define YAM_MAXTXTAIL 999 +#define YAM_MAXPERSIST 255 +#define YAM_MAXSLOTTIME 999 + +#define YAM_FPGA_SIZE 5302 + +struct yamcfg { + unsigned int mask; /* Mask of commands */ + unsigned int iobase; /* IO Base of COM port */ + unsigned int irq; /* IRQ of COM port */ + unsigned int bitrate; /* Bit rate of radio port */ + unsigned int baudrate; /* Baud rate of the RS232 port */ + unsigned int txdelay; /* TxDelay */ + unsigned int txtail; /* TxTail */ + unsigned int persist; /* Persistence */ + unsigned int slottime; /* Slottime */ + unsigned int mode; /* mode 0 (simp), 1(Dupl), 2(Dupl+delay) */ + unsigned int holddly; /* PTT delay in FullDuplex 2 mode */ +}; + +struct yamdrv_ioctl_cfg { + int cmd; + struct yamcfg cfg; +}; + +struct yamdrv_ioctl_mcs { + int cmd; + int bitrate; + unsigned char bits[YAM_FPGA_SIZE]; +}; diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h new file mode 100644 index 00000000..7b975040 --- /dev/null +++ b/include/linux/z2_battery.h @@ -0,0 +1,17 @@ +#ifndef _LINUX_Z2_BATTERY_H +#define _LINUX_Z2_BATTERY_H + +struct z2_battery_info { + int batt_I2C_bus; + int batt_I2C_addr; + int batt_I2C_reg; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int batt_tech; + char *batt_name; +}; + +#endif diff --git a/include/linux/zconf.h b/include/linux/zconf.h new file mode 100644 index 00000000..0beb75e3 --- /dev/null +++ b/include/linux/zconf.h @@ -0,0 +1,57 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-1998 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef _ZCONF_H +#define _ZCONF_H + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# define MAX_MEM_LEVEL 8 +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* default windowBits for decompression. MAX_WBITS is for compression only */ +#ifndef DEF_WBITS +# define DEF_WBITS MAX_WBITS +#endif + +/* default memLevel */ +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif + + /* Type declarations */ + +typedef unsigned char Byte; /* 8 bits */ +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ +typedef void *voidp; + +#endif /* _ZCONF_H */ diff --git a/include/linux/zlib.h b/include/linux/zlib.h new file mode 100644 index 00000000..9c5a6b4d --- /dev/null +++ b/include/linux/zlib.h @@ -0,0 +1,711 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + + Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt + (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). +*/ + +#ifndef _ZLIB_H +#define _ZLIB_H + +#include <linux/zconf.h> + +/* zlib deflate based on ZLIB_VERSION "1.1.3" */ +/* zlib inflate based on ZLIB_VERSION "1.2.3" */ + +/* + This is a modified version of zlib for use inside the Linux kernel. + The main changes are to perform all memory allocation in advance. + + Inflation Changes: + * Z_PACKET_FLUSH is added and used by ppp_deflate. Before returning + this checks there is no more input data available and the next data + is a STORED block. It also resets the mode to be read for the next + data, all as per PPP requirements. + * Addition of zlib_inflateIncomp which copies incompressible data into + the history window and adjusts the accoutning without calling + zlib_inflate itself to inflate the data. +*/ + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed + data. This version of the library supports only one compression method + (deflation) but other algorithms will be added later and will have the same + stream interface. + + Compression can be done in a single step if the buffers are large + enough (for example if an input file is mmap'ed), or can be done by + repeated calls of the compression function. In the latter case, the + application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never + crash even in case of corrupted input. +*/ + +struct internal_state; + +typedef struct z_stream_s { + const Byte *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total nb of input bytes read so far */ + + Byte *next_out; /* next output byte should be put there */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total nb of bytes output so far */ + + char *msg; /* last error message, NULL if no error */ + struct internal_state *state; /* not visible by applications */ + + void *workspace; /* memory allocated for this stream */ + + int data_type; /* best guess about the data type: ascii or binary */ + uLong adler; /* adler32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream *z_streamp; + +/* + The application must update next_in and avail_in when avail_in has + dropped to zero. It must update next_out and avail_out when avail_out + has dropped to zero. The application must initialize zalloc, zfree and + opaque before calling the init function. All other fields are set by the + compression library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this + if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, + pointers returned by zalloc for objects of exactly 65536 bytes *must* + have their offset normalized to zero. The default allocation function + provided by this library ensures this (see zutil.c). To reduce memory + requirements and avoid any allocation of 64K objects, at the expense of + compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or + progress reports. After compression, total_in holds the total size of + the uncompressed data and may be saved for use in the decompressor + (particularly if the decompressor wants to decompress everything in + a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */ +#define Z_PACKET_FLUSH 2 +#define Z_SYNC_FLUSH 3 +#define Z_FULL_FLUSH 4 +#define Z_FINISH 5 +#define Z_BLOCK 6 /* Only for inflate at present */ +/* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative + * values are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_ASCII 1 +#define Z_UNKNOWN 2 +/* Possible values of the data_type field */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + + /* basic functions */ + +extern int zlib_deflate_workspacesize (int windowBits, int memLevel); +/* + Returns the number of bytes that needs to be allocated for a per- + stream workspace with the specified parameters. A pointer to this + number of bytes should be returned in stream->workspace before + you call zlib_deflateInit() or zlib_deflateInit2(). If you call + zlib_deflateInit(), specify windowBits = MAX_WBITS and memLevel = + MAX_MEM_LEVEL here. If you call zlib_deflateInit2(), the windowBits + and memLevel parameters passed to zlib_deflateInit2() must not + exceed those passed here. +*/ + +/* +extern int deflateInit (z_streamp strm, int level); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. + If zalloc and zfree are set to NULL, deflateInit updates them to + use default allocation functions. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at + all (the input data is simply copied a block at a time). + Z_DEFAULT_COMPRESSION requests a default compromise between speed and + compression (currently equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if level is not a valid compression level, + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). + msg is set to null if there is no error message. deflateInit does not + perform any compression: this will be done by deflate(). +*/ + + +extern int zlib_deflate (z_streamp strm, int flush); +/* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce some + output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary (in interactive applications). + Some output may be provided even if flush is not set. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming + more output, and updating avail_in or avail_out accordingly; avail_out + should never be zero before the call. The application can consume the + compressed output when it wants, for example when the output buffer is full + (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK + and with zero avail_out, it must be called again after making room in the + output buffer because there might be more output pending. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In particular + avail_in is zero after the call if enough output space has been provided + before the call.) Flushing may degrade compression for some compression + algorithms and so it should be used only when necessary. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + the compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there + was enough output space; if deflate returns with Z_OK, this function must be + called again with Z_FINISH and more output space (updated avail_out) but no + more input data, until it returns with Z_STREAM_END or an error. After + deflate has returned Z_STREAM_END, the only possible operations on the + stream are deflateReset or deflateEnd. + + Z_FINISH can be used immediately after deflateInit if all the compression + is to be done in a single step. In this case, avail_out must be at least + 0.1% larger than avail_in plus 12 bytes. If deflate does not return + Z_STREAM_END, then it must be called again as described above. + + deflate() sets strm->adler to the adler32 checksum of all input read + so far (that is, total_in bytes). + + deflate() may update data_type if it can make a good guess about + the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered + binary. This field is only for information purposes and does not affect + the compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible + (for example avail_in or avail_out was zero). +*/ + + +extern int zlib_deflateEnd (z_streamp strm); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any + pending output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, + msg may be set but then points to a static string (which must not be + deallocated). +*/ + + +extern int zlib_inflate_workspacesize (void); +/* + Returns the number of bytes that needs to be allocated for a per- + stream workspace. A pointer to this number of bytes should be + returned in stream->workspace before calling zlib_inflateInit(). +*/ + +/* +extern int zlib_inflateInit (z_streamp strm); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, and workspace must be initialized before by + the caller. If next_in is not NULL and avail_in is large enough (the exact + value depends on the compression method), inflateInit determines the + compression method from the zlib header and allocates all data structures + accordingly; otherwise the allocation will be deferred to the first call of + inflate. If zalloc and zfree are set to NULL, inflateInit updates them to + use default allocation functions. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller. msg is set to null if there is no error + message. inflateInit does not perform any decompression apart from reading + the zlib header if present: this will be done by inflate(). (So next_in and + avail_in may be modified, but next_out and avail_out are unchanged.) +*/ + + +extern int zlib_inflate (z_streamp strm, int flush); +/* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in is updated and processing + will resume at this point for the next call of inflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there + is no more input data or no more space in the output buffer (see below + about the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming + more output, and updating the next_* and avail_* values accordingly. + The application can consume the uncompressed output when it wants, for + example when the output buffer is full (avail_out == 0), or after each + call of inflate(). If inflate returns Z_OK and with zero avail_out, it + must be called again after making room in the output buffer because there + might be more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, + Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() stop + if and when it gets to the next deflate block boundary. When decoding the + zlib or gzip format, this will cause inflate() to return immediately after + the header and before the first block. When doing a raw inflate, inflate() + will go ahead and process the first block, and will return when it gets to + the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + Also to assist in this, on return inflate() will set strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 + if inflate() is currently decoding the last block in the deflate stream, + plus 128 if inflate() returned immediately after decoding an end-of-block + code or decoding the complete header up to just before the first byte of the + deflate stream. The end-of-block will not be indicated until all of the + uncompressed data from that block has been written to strm->next_out. The + number of unused bits may in general be greater than seven, except when + bit 7 of data_type is set, in which case the number of unused bits will be + less than eight. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step + (a single call of inflate), the parameter flush should be set to + Z_FINISH. In this case all pending input is processed and all pending + output is flushed; avail_out must be large enough to hold all the + uncompressed data. (The size of the uncompressed data may have been saved + by the compressor for this purpose.) The next operation on this stream must + be inflateEnd to deallocate the decompression state. The use of Z_FINISH + is never required, but can be used to inform inflate that a faster approach + may be used for the single inflate() call. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the only effect of the flush parameter in this implementation + is on the return value of inflate(), as noted below, or when it returns early + because Z_BLOCK is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the adler32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the adler32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed adler32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() will decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically. Any information + contained in the gzip header is not retained, so applications that need that + information should instead use raw inflate, see inflateInit2() below, or + inflateBack() and perform their own processing of the gzip header and + trailer. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value), Z_STREAM_ERROR if the stream structure was inconsistent (for example + if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, + Z_BUF_ERROR if no progress is possible or if there was not enough room in the + output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may then + call inflateSync() to look for a good compression block if a partial recovery + of the data is desired. +*/ + + +extern int zlib_inflateEnd (z_streamp strm); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any + pending output. + + inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state + was inconsistent. In the error case, msg may be set but then points to a + static string (which must not be deallocated). +*/ + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +extern int deflateInit2 (z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy); + + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by + the caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but + is slow and reduces compression ratio; memLevel=9 uses maximum memory + for optimal speed. The default value is 8. See zconf.h for total memory + usage as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match). Filtered data consists mostly of small values with a + somewhat random distribution. In this case, the compression algorithm is + tuned to compress them better. The effect of Z_FILTERED is to force more + Huffman coding and less string matching; it is somewhat intermediate + between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects + the compression ratio but not the correctness of the compressed output even + if it is not set appropriately. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid + method). msg is set to null if there is no error message. deflateInit2 does + not perform any compression: this will be done by deflate(). +*/ + +#if 0 +extern int zlib_deflateSetDictionary (z_streamp strm, + const Byte *dictionary, + uInt dictLength); +#endif +/* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. This function must be called + immediately after deflateInit, deflateInit2 or deflateReset, before any + call of deflate. The compressor and decompressor must use exactly the same + dictionary (see inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size in + deflate or deflate2. Thus the strings most likely to be useful should be + put at the end of the dictionary, not at the front. + + Upon return of this function, strm->adler is set to the Adler32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The Adler32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (such as NULL dictionary) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if the compression method is bsort). deflateSetDictionary does not + perform any compression: this will be done by deflate(). +*/ + +#if 0 +extern int zlib_deflateCopy (z_streamp dest, z_streamp source); +#endif + +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and + can consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being NULL). msg is left unchanged in both source and + destination. +*/ + +extern int zlib_deflateReset (z_streamp strm); +/* + This function is equivalent to deflateEnd followed by deflateInit, + but does not free and reallocate all the internal compression state. + The stream will keep the same compression level and any other attributes + that may have been set by deflateInit2. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being NULL). +*/ + +static inline unsigned long deflateBound(unsigned long s) +{ + return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11; +} + +#if 0 +extern int zlib_deflateParams (z_streamp strm, int level, int strategy); +#endif +/* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2. This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different + strategy. If the compression level is changed, the input available so far + is compressed with the old level (and may be flushed); the new level will + take effect only at the next call of deflate(). + + Before the call of deflateParams, the stream state must be set as for + a call of deflate(), since the currently available input may have to + be compressed and flushed. In particular, strm->avail_out must be non-zero. + + deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source + stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR + if strm->avail_out was zero. +*/ + +/* +extern int inflateInit2 (z_streamp strm, int windowBits); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an adler32 or a crc32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is + a crc32 instead of an adler32. + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg + is set to null if there is no error message. inflateInit2 does not perform + any decompression apart from reading the zlib header if present: this will + be done by inflate(). (So next_in and avail_in may be modified, but next_out + and avail_out are unchanged.) +*/ + +extern int zlib_inflateSetDictionary (z_streamp strm, + const Byte *dictionary, + uInt dictLength); +/* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the adler32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called + immediately after inflateInit2() or inflateReset() and before any call of + inflate() to set the dictionary. The application must insure that the + dictionary that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (such as NULL dictionary) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect adler32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). +*/ + +#if 0 +extern int zlib_inflateSync (z_streamp strm); +#endif +/* + Skips invalid compressed data until a full flush point (see above the + description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR + if no more input was provided, Z_DATA_ERROR if no flush point has been found, + or Z_STREAM_ERROR if the stream structure was inconsistent. In the success + case, the application may save the current current value of total_in which + indicates where valid compressed data was found. In the error case, the + application may repeatedly call inflateSync, providing more input each time, + until success or end of the input data. +*/ + +extern int zlib_inflateReset (z_streamp strm); +/* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate all the internal decompression state. + The stream will keep attributes that may have been set by inflateInit2. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being NULL). +*/ + +extern int zlib_inflateIncomp (z_stream *strm); +/* + This function adds the data at next_in (avail_in bytes) to the output + history without performing any output. There must be no pending output, + and the decompressor must be expecting to see the start of a block. + Calling this function is equivalent to decompressing a stored block + containing the data at next_in (except that the data is not output). +*/ + +#define zlib_deflateInit(strm, level) \ + zlib_deflateInit2((strm), (level), Z_DEFLATED, MAX_WBITS, \ + DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY) +#define zlib_inflateInit(strm) \ + zlib_inflateInit2((strm), DEF_WBITS) + +extern int zlib_deflateInit2(z_streamp strm, int level, int method, + int windowBits, int memLevel, + int strategy); +extern int zlib_inflateInit2(z_streamp strm, int windowBits); + +#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL) + struct internal_state {int dummy;}; /* hack for buggy compilers */ +#endif + +/* Utility function: initialize zlib, unpack binary blob, clean up zlib, + * return len or negative error code. */ +extern int zlib_inflate_blob(void *dst, unsigned dst_sz, const void *src, unsigned src_sz); + +#endif /* _ZLIB_H */ diff --git a/include/linux/zorro.h b/include/linux/zorro.h new file mode 100644 index 00000000..dff42025 --- /dev/null +++ b/include/linux/zorro.h @@ -0,0 +1,234 @@ +/* + * linux/zorro.h -- Amiga AutoConfig (Zorro) Bus Definitions + * + * Copyright (C) 1995--2003 Geert Uytterhoeven + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _LINUX_ZORRO_H +#define _LINUX_ZORRO_H + +#include <linux/device.h> + + + /* + * Each Zorro board has a 32-bit ID of the form + * + * mmmmmmmmmmmmmmmmppppppppeeeeeeee + * + * with + * + * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh)) + * pppppppp 8-bit Product ID (assigned by manufacturer) + * eeeeeeee 8-bit Extended Product ID (currently only used + * for some GVP boards) + */ + + +#define ZORRO_MANUF(id) ((id) >> 16) +#define ZORRO_PROD(id) (((id) >> 8) & 0xff) +#define ZORRO_EPC(id) ((id) & 0xff) + +#define ZORRO_ID(manuf, prod, epc) \ + ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc)) + +typedef __u32 zorro_id; + + +/* Include the ID list */ +#include <linux/zorro_ids.h> + + + /* + * GVP identifies most of its products through the 'extended product code' + * (epc). The epc has to be ANDed with the GVP_PRODMASK before the + * identification. + */ + +#define GVP_PRODMASK (0xf8) +#define GVP_SCSICLKMASK (0x01) + +enum GVP_flags { + GVP_IO = 0x01, + GVP_ACCEL = 0x02, + GVP_SCSI = 0x04, + GVP_24BITDMA = 0x08, + GVP_25BITDMA = 0x10, + GVP_NOBANK = 0x20, + GVP_14MHZ = 0x40, +}; + + +struct Node { + struct Node *ln_Succ; /* Pointer to next (successor) */ + struct Node *ln_Pred; /* Pointer to previous (predecessor) */ + __u8 ln_Type; + __s8 ln_Pri; /* Priority, for sorting */ + __s8 *ln_Name; /* ID string, null terminated */ +} __attribute__ ((packed)); + +struct ExpansionRom { + /* -First 16 bytes of the expansion ROM */ + __u8 er_Type; /* Board type, size and flags */ + __u8 er_Product; /* Product number, assigned by manufacturer */ + __u8 er_Flags; /* Flags */ + __u8 er_Reserved03; /* Must be zero ($ff inverted) */ + __u16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */ + __u32 er_SerialNumber; /* Available for use by manufacturer */ + __u16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */ + __u8 er_Reserved0c; + __u8 er_Reserved0d; + __u8 er_Reserved0e; + __u8 er_Reserved0f; +} __attribute__ ((packed)); + +/* er_Type board type bits */ +#define ERT_TYPEMASK 0xc0 +#define ERT_ZORROII 0xc0 +#define ERT_ZORROIII 0x80 + +/* other bits defined in er_Type */ +#define ERTB_MEMLIST 5 /* Link RAM into free memory list */ +#define ERTF_MEMLIST (1<<5) + +struct ConfigDev { + struct Node cd_Node; + __u8 cd_Flags; /* (read/write) */ + __u8 cd_Pad; /* reserved */ + struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */ + void *cd_BoardAddr; /* where in memory the board was placed */ + __u32 cd_BoardSize; /* size of board in bytes */ + __u16 cd_SlotAddr; /* which slot number (PRIVATE) */ + __u16 cd_SlotSize; /* number of slots (PRIVATE) */ + void *cd_Driver; /* pointer to node of driver */ + struct ConfigDev *cd_NextCD; /* linked list of drivers to config */ + __u32 cd_Unused[4]; /* for whatever the driver wants */ +} __attribute__ ((packed)); + +#define ZORRO_NUM_AUTO 16 + +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/mod_devicetable.h> + +#include <asm/zorro.h> + + + /* + * Zorro devices + */ + +struct zorro_dev { + struct ExpansionRom rom; + zorro_id id; + struct zorro_driver *driver; /* which driver has allocated this device */ + struct device dev; /* Generic device interface */ + u16 slotaddr; + u16 slotsize; + char name[64]; + struct resource resource; +}; + +#define to_zorro_dev(n) container_of(n, struct zorro_dev, dev) + + + /* + * Zorro bus + */ + +extern struct bus_type zorro_bus_type; + + + /* + * Zorro device drivers + */ + +struct zorro_driver { + struct list_head node; + char *name; + const struct zorro_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct zorro_dev *z, const struct zorro_device_id *id); /* New device inserted */ + void (*remove)(struct zorro_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */ + struct device_driver driver; +}; + +#define to_zorro_driver(drv) container_of(drv, struct zorro_driver, driver) + + +#define zorro_for_each_dev(dev) \ + for (dev = &zorro_autocon[0]; dev < zorro_autocon+zorro_num_autocon; dev++) + + +/* New-style probing */ +extern int zorro_register_driver(struct zorro_driver *); +extern void zorro_unregister_driver(struct zorro_driver *); +extern const struct zorro_device_id *zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z); +static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z) +{ + return z->driver; +} + + +extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */ +extern struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; + + + /* + * Zorro Functions + */ + +extern struct zorro_dev *zorro_find_device(zorro_id id, + struct zorro_dev *from); + +#define zorro_resource_start(z) ((z)->resource.start) +#define zorro_resource_end(z) ((z)->resource.end) +#define zorro_resource_len(z) (resource_size(&(z)->resource)) +#define zorro_resource_flags(z) ((z)->resource.flags) + +#define zorro_request_device(z, name) \ + request_mem_region(zorro_resource_start(z), zorro_resource_len(z), name) +#define zorro_release_device(z) \ + release_mem_region(zorro_resource_start(z), zorro_resource_len(z)) + +/* Similar to the helpers above, these manipulate per-zorro_dev + * driver-specific data. They are really just a wrapper around + * the generic device structure functions of these calls. + */ +static inline void *zorro_get_drvdata (struct zorro_dev *z) +{ + return dev_get_drvdata(&z->dev); +} + +static inline void zorro_set_drvdata (struct zorro_dev *z, void *data) +{ + dev_set_drvdata(&z->dev, data); +} + + + /* + * Bitmask indicating portions of available Zorro II RAM that are unused + * by the system. Every bit represents a 64K chunk, for a maximum of 8MB + * (128 chunks, physical 0x00200000-0x009fffff). + * + * If you want to use (= allocate) portions of this RAM, you should clear + * the corresponding bits. + */ + +extern DECLARE_BITMAP(zorro_unused_z2ram, 128); + +#define Z2RAM_START (0x00200000) +#define Z2RAM_END (0x00a00000) +#define Z2RAM_SIZE (0x00800000) +#define Z2RAM_CHUNKSIZE (0x00010000) +#define Z2RAM_CHUNKMASK (0x0000ffff) +#define Z2RAM_CHUNKSHIFT (16) + + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_ZORRO_H */ diff --git a/include/linux/zorro_ids.h b/include/linux/zorro_ids.h new file mode 100644 index 00000000..74bc53bc --- /dev/null +++ b/include/linux/zorro_ids.h @@ -0,0 +1,552 @@ +/* + * Zorro board IDs + * + * Please keep sorted. + */ + + +#define ZORRO_MANUF_PACIFIC_PERIPHERALS 0x00D3 +#define ZORRO_PROD_PACIFIC_PERIPHERALS_SE_2000_A500 ZORRO_ID(PACIFIC_PERIPHERALS, 0x00, 0) +#define ZORRO_PROD_PACIFIC_PERIPHERALS_SCSI ZORRO_ID(PACIFIC_PERIPHERALS, 0x0A, 0) + +#define ZORRO_MANUF_MACROSYSTEMS_USA_2 0x0100 +#define ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE ZORRO_ID(MACROSYSTEMS_USA_2, 0x13, 0) + +#define ZORRO_MANUF_KUPKE_1 0x00DD +#define ZORRO_PROD_KUPKE_GOLEM_RAM_BOX_2MB ZORRO_ID(KUPKE_1, 0x00, 0) + +#define ZORRO_MANUF_MEMPHIS 0x0100 +#define ZORRO_PROD_MEMPHIS_STORMBRINGER ZORRO_ID(MEMPHIS, 0x00, 0) + +#define ZORRO_MANUF_3_STATE 0x0200 +#define ZORRO_PROD_3_STATE_MEGAMIX_2000 ZORRO_ID(3_STATE, 0x02, 0) + +#define ZORRO_MANUF_COMMODORE_BRAUNSCHWEIG 0x0201 +#define ZORRO_PROD_CBM_A2088_A2286 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x01, 0) +#define ZORRO_PROD_CBM_A2286 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x02, 0) +#define ZORRO_PROD_CBM_A4091_1 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x54, 0) +#define ZORRO_PROD_CBM_A2386SX_1 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x67, 0) + +#define ZORRO_MANUF_COMMODORE_WEST_CHESTER_1 0x0202 +#define ZORRO_PROD_CBM_A2090A ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x01, 0) +#define ZORRO_PROD_CBM_A590_A2091_1 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x02, 0) +#define ZORRO_PROD_CBM_A590_A2091_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x03, 0) +#define ZORRO_PROD_CBM_A2090B ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x04, 0) +#define ZORRO_PROD_CBM_A2060 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x09, 0) +#define ZORRO_PROD_CBM_A590_A2052_A2058_A2091 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x0A, 0) +#define ZORRO_PROD_CBM_A560_RAM ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x20, 0) +#define ZORRO_PROD_CBM_A2232_PROTOTYPE ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x45, 0) +#define ZORRO_PROD_CBM_A2232 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x46, 0) +#define ZORRO_PROD_CBM_A2620 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x50, 0) +#define ZORRO_PROD_CBM_A2630 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x51, 0) +#define ZORRO_PROD_CBM_A4091_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x54, 0) +#define ZORRO_PROD_CBM_A2065_1 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x5A, 0) +#define ZORRO_PROD_CBM_ROMULATOR ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x60, 0) +#define ZORRO_PROD_CBM_A3000_TEST_FIXTURE ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x61, 0) +#define ZORRO_PROD_CBM_A2386SX_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x67, 0) +#define ZORRO_PROD_CBM_A2065_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x70, 0) + +#define ZORRO_MANUF_COMMODORE_WEST_CHESTER_2 0x0203 +#define ZORRO_PROD_CBM_A2090A_CM ZORRO_ID(COMMODORE_WEST_CHESTER_2, 0x03, 0) + +#define ZORRO_MANUF_PROGRESSIVE_PERIPHERALS_AND_SYSTEMS_2 0x02F4 +#define ZORRO_PROD_PPS_EXP8000 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS_2, 0x02, 0) + +#define ZORRO_MANUF_KOLFF_COMPUTER_SUPPLIES 0x02FF +#define ZORRO_PROD_KCS_POWER_PC_BOARD ZORRO_ID(KOLFF_COMPUTER_SUPPLIES, 0x00, 0) + +#define ZORRO_MANUF_CARDCO_1 0x03EC +#define ZORRO_PROD_CARDCO_KRONOS_2000_1 ZORRO_ID(CARDCO_1, 0x04, 0) +#define ZORRO_PROD_CARDCO_A1000_1 ZORRO_ID(CARDCO_1, 0x0C, 0) +#define ZORRO_PROD_CARDCO_ESCORT ZORRO_ID(CARDCO_1, 0x0E, 0) +#define ZORRO_PROD_CARDCO_A2410 ZORRO_ID(CARDCO_1, 0xF5, 0) + +#define ZORRO_MANUF_A_SQUARED 0x03ED +#define ZORRO_PROD_A_SQUARED_LIVE_2000 ZORRO_ID(A_SQUARED, 0x01, 0) + +#define ZORRO_MANUF_COMSPEC_COMMUNICATIONS 0x03EE +#define ZORRO_PROD_COMSPEC_COMMUNICATIONS_AX2000 ZORRO_ID(COMSPEC_COMMUNICATIONS, 0x01, 0) + +#define ZORRO_MANUF_ANAKIN_RESEARCH 0x03F1 +#define ZORRO_PROD_ANAKIN_RESEARCH_EASYL ZORRO_ID(ANAKIN_RESEARCH, 0x01, 0) + +#define ZORRO_MANUF_MICROBOTICS 0x03F2 +#define ZORRO_PROD_MICROBOTICS_STARBOARD_II ZORRO_ID(MICROBOTICS, 0x00, 0) +#define ZORRO_PROD_MICROBOTICS_STARDRIVE ZORRO_ID(MICROBOTICS, 0x02, 0) +#define ZORRO_PROD_MICROBOTICS_8_UP_A ZORRO_ID(MICROBOTICS, 0x03, 0) +#define ZORRO_PROD_MICROBOTICS_8_UP_Z ZORRO_ID(MICROBOTICS, 0x04, 0) +#define ZORRO_PROD_MICROBOTICS_DELTA_RAM ZORRO_ID(MICROBOTICS, 0x20, 0) +#define ZORRO_PROD_MICROBOTICS_8_STAR_RAM ZORRO_ID(MICROBOTICS, 0x40, 0) +#define ZORRO_PROD_MICROBOTICS_8_STAR ZORRO_ID(MICROBOTICS, 0x41, 0) +#define ZORRO_PROD_MICROBOTICS_VXL_RAM_32 ZORRO_ID(MICROBOTICS, 0x44, 0) +#define ZORRO_PROD_MICROBOTICS_VXL_68030 ZORRO_ID(MICROBOTICS, 0x45, 0) +#define ZORRO_PROD_MICROBOTICS_DELTA ZORRO_ID(MICROBOTICS, 0x60, 0) +#define ZORRO_PROD_MICROBOTICS_MBX_1200_1200Z_RAM ZORRO_ID(MICROBOTICS, 0x81, 0) +#define ZORRO_PROD_MICROBOTICS_HARDFRAME_2000_1 ZORRO_ID(MICROBOTICS, 0x96, 0) +#define ZORRO_PROD_MICROBOTICS_HARDFRAME_2000_2 ZORRO_ID(MICROBOTICS, 0x9E, 0) +#define ZORRO_PROD_MICROBOTICS_MBX_1200_1200Z ZORRO_ID(MICROBOTICS, 0xC1, 0) + +#define ZORRO_MANUF_ACCESS_ASSOCIATES_ALEGRA 0x03F4 + +#define ZORRO_MANUF_EXPANSION_TECHNOLOGIES 0x03F6 + +#define ZORRO_MANUF_ASDG 0x03FF +#define ZORRO_PROD_ASDG_MEMORY_1 ZORRO_ID(ASDG, 0x01, 0) +#define ZORRO_PROD_ASDG_MEMORY_2 ZORRO_ID(ASDG, 0x02, 0) +#define ZORRO_PROD_ASDG_EB920_LAN_ROVER ZORRO_ID(ASDG, 0xFE, 0) +#define ZORRO_PROD_ASDG_GPIB_DUALIEEE488_TWIN_X ZORRO_ID(ASDG, 0xFF, 0) + +#define ZORRO_MANUF_IMTRONICS_1 0x0404 +#define ZORRO_PROD_IMTRONICS_HURRICANE_2800_1 ZORRO_ID(IMTRONICS_1, 0x39, 0) +#define ZORRO_PROD_IMTRONICS_HURRICANE_2800_2 ZORRO_ID(IMTRONICS_1, 0x57, 0) + +#define ZORRO_MANUF_CBM_UNIVERSITY_OF_LOWELL 0x0406 +#define ZORRO_PROD_CBM_A2410 ZORRO_ID(CBM_UNIVERSITY_OF_LOWELL, 0x00, 0) + +#define ZORRO_MANUF_AMERISTAR 0x041D +#define ZORRO_PROD_AMERISTAR_A2065 ZORRO_ID(AMERISTAR, 0x01, 0) +#define ZORRO_PROD_AMERISTAR_A560 ZORRO_ID(AMERISTAR, 0x09, 0) +#define ZORRO_PROD_AMERISTAR_A4066 ZORRO_ID(AMERISTAR, 0x0A, 0) + +#define ZORRO_MANUF_SUPRA 0x0420 +#define ZORRO_PROD_SUPRA_SUPRADRIVE_4x4 ZORRO_ID(SUPRA, 0x01, 0) +#define ZORRO_PROD_SUPRA_1000_RAM ZORRO_ID(SUPRA, 0x02, 0) +#define ZORRO_PROD_SUPRA_2000_DMA ZORRO_ID(SUPRA, 0x03, 0) +#define ZORRO_PROD_SUPRA_500 ZORRO_ID(SUPRA, 0x05, 0) +#define ZORRO_PROD_SUPRA_500_SCSI ZORRO_ID(SUPRA, 0x08, 0) +#define ZORRO_PROD_SUPRA_500XP_2000_RAM ZORRO_ID(SUPRA, 0x09, 0) +#define ZORRO_PROD_SUPRA_500RX_2000_RAM ZORRO_ID(SUPRA, 0x0A, 0) +#define ZORRO_PROD_SUPRA_2400ZI ZORRO_ID(SUPRA, 0x0B, 0) +#define ZORRO_PROD_SUPRA_500XP_SUPRADRIVE_WORDSYNC ZORRO_ID(SUPRA, 0x0C, 0) +#define ZORRO_PROD_SUPRA_SUPRADRIVE_WORDSYNC_II ZORRO_ID(SUPRA, 0x0D, 0) +#define ZORRO_PROD_SUPRA_2400ZIPLUS ZORRO_ID(SUPRA, 0x10, 0) + +#define ZORRO_MANUF_COMPUTER_SYSTEMS_ASSOCIATES 0x0422 +#define ZORRO_PROD_CSA_MAGNUM ZORRO_ID(COMPUTER_SYSTEMS_ASSOCIATES, 0x11, 0) +#define ZORRO_PROD_CSA_12_GAUGE ZORRO_ID(COMPUTER_SYSTEMS_ASSOCIATES, 0x15, 0) + +#define ZORRO_MANUF_MARC_MICHAEL_GROTH 0x0439 + +#define ZORRO_MANUF_M_TECH 0x0502 +#define ZORRO_PROD_MTEC_AT500_1 ZORRO_ID(M_TECH, 0x03, 0) + +#define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_1 0x06E1 +#define ZORRO_PROD_GVP_IMPACT_SERIES_I ZORRO_ID(GREAT_VALLEY_PRODUCTS_1, 0x08, 0) + +#define ZORRO_MANUF_BYTEBOX 0x07DA +#define ZORRO_PROD_BYTEBOX_A500 ZORRO_ID(BYTEBOX, 0x00, 0) + +#define ZORRO_MANUF_DKB_POWER_COMPUTING 0x07DC +#define ZORRO_PROD_DKB_POWER_COMPUTING_SECUREKEY ZORRO_ID(DKB_POWER_COMPUTING, 0x09, 0) +#define ZORRO_PROD_DKB_POWER_COMPUTING_DKM_3128 ZORRO_ID(DKB_POWER_COMPUTING, 0x0E, 0) +#define ZORRO_PROD_DKB_POWER_COMPUTING_RAPID_FIRE ZORRO_ID(DKB_POWER_COMPUTING, 0x0F, 0) +#define ZORRO_PROD_DKB_POWER_COMPUTING_DKM_1202 ZORRO_ID(DKB_POWER_COMPUTING, 0x10, 0) +#define ZORRO_PROD_DKB_POWER_COMPUTING_COBRA_VIPER_II_68EC030 ZORRO_ID(DKB_POWER_COMPUTING, 0x12, 0) +#define ZORRO_PROD_DKB_POWER_COMPUTING_WILDFIRE_060_1 ZORRO_ID(DKB_POWER_COMPUTING, 0x17, 0) +#define ZORRO_PROD_DKB_POWER_COMPUTING_WILDFIRE_060_2 ZORRO_ID(DKB_POWER_COMPUTING, 0xFF, 0) + +#define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_2 0x07E1 +#define ZORRO_PROD_GVP_IMPACT_SERIES_I_4K ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x01, 0) +#define ZORRO_PROD_GVP_IMPACT_SERIES_I_16K_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x02, 0) +#define ZORRO_PROD_GVP_IMPACT_SERIES_I_16K_3 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x03, 0) +#define ZORRO_PROD_GVP_IMPACT_3001_IDE_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x08, 0) +#define ZORRO_PROD_GVP_IMPACT_3001_RAM ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x09, 0) +#define ZORRO_PROD_GVP_IMPACT_SERIES_II_RAM_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0A, 0) +#define ZORRO_PROD_GVP_EPC_BASE ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0) +#define ZORRO_PROD_GVP_GFORCE_040_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x20) +#define ZORRO_PROD_GVP_GFORCE_040_SCSI_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x30) +#define ZORRO_PROD_GVP_A1291 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x40) +#define ZORRO_PROD_GVP_COMBO_030_R4 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x60) +#define ZORRO_PROD_GVP_COMBO_030_R4_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x70) +#define ZORRO_PROD_GVP_PHONEPAK ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x78) +#define ZORRO_PROD_GVP_IO_EXTENDER ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x98) +#define ZORRO_PROD_GVP_GFORCE_030 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xa0) +#define ZORRO_PROD_GVP_GFORCE_030_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xb0) +#define ZORRO_PROD_GVP_A530 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xc0) +#define ZORRO_PROD_GVP_A530_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xd0) +#define ZORRO_PROD_GVP_COMBO_030_R3 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xe0) +#define ZORRO_PROD_GVP_COMBO_030_R3_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xf0) +#define ZORRO_PROD_GVP_SERIES_II ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xf8) +#define ZORRO_PROD_GVP_IMPACT_3001_IDE_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0D, 0) +/*#define ZORRO_PROD_GVP_A2000_030 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0D, 0)*/ +/*#define ZORRO_PROD_GVP_GFORCE_040_SCSI_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0D, 0)*/ +#define ZORRO_PROD_GVP_GFORCE_040_060 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x16, 0) +#define ZORRO_PROD_GVP_IMPACT_VISION_24 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x20, 0) +#define ZORRO_PROD_GVP_GFORCE_040_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0xFF, 0) + +#define ZORRO_MANUF_CALIFORNIA_ACCESS_SYNERGY 0x07E5 +#define ZORRO_PROD_CALIFORNIA_ACCESS_SYNERGY_MALIBU ZORRO_ID(CALIFORNIA_ACCESS_SYNERGY, 0x01, 0) + +#define ZORRO_MANUF_XETEC 0x07E6 +#define ZORRO_PROD_XETEC_FASTCARD ZORRO_ID(XETEC, 0x01, 0) +#define ZORRO_PROD_XETEC_FASTCARD_RAM ZORRO_ID(XETEC, 0x02, 0) +#define ZORRO_PROD_XETEC_FASTCARD_PLUS ZORRO_ID(XETEC, 0x03, 0) + +#define ZORRO_MANUF_PROGRESSIVE_PERIPHERALS_AND_SYSTEMS 0x07EA +#define ZORRO_PROD_PPS_MERCURY ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x00, 0) +#define ZORRO_PROD_PPS_A3000_68040 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x01, 0) +#define ZORRO_PROD_PPS_A2000_68040 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x69, 0) +#define ZORRO_PROD_PPS_ZEUS ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x96, 0) +#define ZORRO_PROD_PPS_A500_68040 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0xBB, 0) + +#define ZORRO_MANUF_XEBEC 0x07EC + +#define ZORRO_MANUF_SPIRIT_TECHNOLOGY 0x07F2 +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_INSIDER_IN1000 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x01, 0) +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_INSIDER_IN500 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x02, 0) +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_SIN500 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x03, 0) +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_HDA_506 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x04, 0) +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_AX_S ZORRO_ID(SPIRIT_TECHNOLOGY, 0x05, 0) +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_OCTABYTE ZORRO_ID(SPIRIT_TECHNOLOGY, 0x06, 0) +#define ZORRO_PROD_SPIRIT_TECHNOLOGY_INMATE ZORRO_ID(SPIRIT_TECHNOLOGY, 0x08, 0) + +#define ZORRO_MANUF_SPIRIT_TECHNOLOGY_2 0x07F3 + +#define ZORRO_MANUF_BSC_ALFADATA_1 0x07FE +#define ZORRO_PROD_BSC_ALF_3_1 ZORRO_ID(BSC_ALFADATA_1, 0x03, 0) + +#define ZORRO_MANUF_BSC_ALFADATA_2 0x0801 +#define ZORRO_PROD_BSC_ALF_2_1 ZORRO_ID(BSC_ALFADATA_2, 0x01, 0) +#define ZORRO_PROD_BSC_ALF_2_2 ZORRO_ID(BSC_ALFADATA_2, 0x02, 0) +#define ZORRO_PROD_BSC_ALF_3_2 ZORRO_ID(BSC_ALFADATA_2, 0x03, 0) + +#define ZORRO_MANUF_CARDCO_2 0x0802 +#define ZORRO_PROD_CARDCO_KRONOS_2000_2 ZORRO_ID(CARDCO_2, 0x04, 0) +#define ZORRO_PROD_CARDCO_A1000_2 ZORRO_ID(CARDCO_2, 0x0C, 0) + +#define ZORRO_MANUF_JOCHHEIM 0x0804 +#define ZORRO_PROD_JOCHHEIM_RAM ZORRO_ID(JOCHHEIM, 0x01, 0) + +#define ZORRO_MANUF_CHECKPOINT_TECHNOLOGIES 0x0807 +#define ZORRO_PROD_CHECKPOINT_TECHNOLOGIES_SERIAL_SOLUTION ZORRO_ID(CHECKPOINT_TECHNOLOGIES, 0x00, 0) + +#define ZORRO_MANUF_EDOTRONIK 0x0810 +#define ZORRO_PROD_EDOTRONIK_IEEE_488 ZORRO_ID(EDOTRONIK, 0x01, 0) +#define ZORRO_PROD_EDOTRONIK_8032 ZORRO_ID(EDOTRONIK, 0x02, 0) +#define ZORRO_PROD_EDOTRONIK_MULTISERIAL ZORRO_ID(EDOTRONIK, 0x03, 0) +#define ZORRO_PROD_EDOTRONIK_VIDEODIGITIZER ZORRO_ID(EDOTRONIK, 0x04, 0) +#define ZORRO_PROD_EDOTRONIK_PARALLEL_IO ZORRO_ID(EDOTRONIK, 0x05, 0) +#define ZORRO_PROD_EDOTRONIK_PIC_PROTOYPING ZORRO_ID(EDOTRONIK, 0x06, 0) +#define ZORRO_PROD_EDOTRONIK_ADC ZORRO_ID(EDOTRONIK, 0x07, 0) +#define ZORRO_PROD_EDOTRONIK_VME ZORRO_ID(EDOTRONIK, 0x08, 0) +#define ZORRO_PROD_EDOTRONIK_DSP96000 ZORRO_ID(EDOTRONIK, 0x09, 0) + +#define ZORRO_MANUF_NES_INC 0x0813 +#define ZORRO_PROD_NES_INC_RAM ZORRO_ID(NES_INC, 0x00, 0) + +#define ZORRO_MANUF_ICD 0x0817 +#define ZORRO_PROD_ICD_ADVANTAGE_2000_SCSI ZORRO_ID(ICD, 0x01, 0) +#define ZORRO_PROD_ICD_ADVANTAGE_IDE ZORRO_ID(ICD, 0x03, 0) +#define ZORRO_PROD_ICD_ADVANTAGE_2080_RAM ZORRO_ID(ICD, 0x04, 0) + +#define ZORRO_MANUF_KUPKE_2 0x0819 +#define ZORRO_PROD_KUPKE_OMTI ZORRO_ID(KUPKE_2, 0x01, 0) +#define ZORRO_PROD_KUPKE_SCSI_II ZORRO_ID(KUPKE_2, 0x02, 0) +#define ZORRO_PROD_KUPKE_GOLEM_BOX ZORRO_ID(KUPKE_2, 0x03, 0) +#define ZORRO_PROD_KUPKE_030_882 ZORRO_ID(KUPKE_2, 0x04, 0) +#define ZORRO_PROD_KUPKE_SCSI_AT ZORRO_ID(KUPKE_2, 0x05, 0) + +#define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_3 0x081D +#define ZORRO_PROD_GVP_A2000_RAM8 ZORRO_ID(GREAT_VALLEY_PRODUCTS_3, 0x09, 0) +#define ZORRO_PROD_GVP_IMPACT_SERIES_II_RAM_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_3, 0x0A, 0) + +#define ZORRO_MANUF_INTERWORKS_NETWORK 0x081E + +#define ZORRO_MANUF_HARDITAL_SYNTHESIS 0x0820 +#define ZORRO_PROD_HARDITAL_SYNTHESIS_TQM_68030_68882 ZORRO_ID(HARDITAL_SYNTHESIS, 0x14, 0) + +#define ZORRO_MANUF_APPLIED_ENGINEERING 0x0828 +#define ZORRO_PROD_APPLIED_ENGINEERING_DL2000 ZORRO_ID(APPLIED_ENGINEERING, 0x10, 0) +#define ZORRO_PROD_APPLIED_ENGINEERING_RAM_WORKS ZORRO_ID(APPLIED_ENGINEERING, 0xE0, 0) + +#define ZORRO_MANUF_BSC_ALFADATA_3 0x082C +#define ZORRO_PROD_BSC_OKTAGON_2008 ZORRO_ID(BSC_ALFADATA_3, 0x05, 0) +#define ZORRO_PROD_BSC_TANDEM_AT_2008_508 ZORRO_ID(BSC_ALFADATA_3, 0x06, 0) +#define ZORRO_PROD_BSC_ALFA_RAM_1200 ZORRO_ID(BSC_ALFADATA_3, 0x07, 0) +#define ZORRO_PROD_BSC_OKTAGON_2008_RAM ZORRO_ID(BSC_ALFADATA_3, 0x08, 0) +#define ZORRO_PROD_BSC_MULTIFACE_I ZORRO_ID(BSC_ALFADATA_3, 0x10, 0) +#define ZORRO_PROD_BSC_MULTIFACE_II ZORRO_ID(BSC_ALFADATA_3, 0x11, 0) +#define ZORRO_PROD_BSC_MULTIFACE_III ZORRO_ID(BSC_ALFADATA_3, 0x12, 0) +#define ZORRO_PROD_BSC_FRAMEMASTER_II ZORRO_ID(BSC_ALFADATA_3, 0x20, 0) +#define ZORRO_PROD_BSC_GRAFFITI_RAM ZORRO_ID(BSC_ALFADATA_3, 0x21, 0) +#define ZORRO_PROD_BSC_GRAFFITI_REG ZORRO_ID(BSC_ALFADATA_3, 0x22, 0) +#define ZORRO_PROD_BSC_ISDN_MASTERCARD ZORRO_ID(BSC_ALFADATA_3, 0x40, 0) +#define ZORRO_PROD_BSC_ISDN_MASTERCARD_II ZORRO_ID(BSC_ALFADATA_3, 0x41, 0) + +#define ZORRO_MANUF_PHOENIX 0x0835 +#define ZORRO_PROD_PHOENIX_ST506 ZORRO_ID(PHOENIX, 0x21, 0) +#define ZORRO_PROD_PHOENIX_SCSI ZORRO_ID(PHOENIX, 0x22, 0) +#define ZORRO_PROD_PHOENIX_RAM ZORRO_ID(PHOENIX, 0xBE, 0) + +#define ZORRO_MANUF_ADVANCED_STORAGE_SYSTEMS 0x0836 +#define ZORRO_PROD_ADVANCED_STORAGE_SYSTEMS_NEXUS ZORRO_ID(ADVANCED_STORAGE_SYSTEMS, 0x01, 0) +#define ZORRO_PROD_ADVANCED_STORAGE_SYSTEMS_NEXUS_RAM ZORRO_ID(ADVANCED_STORAGE_SYSTEMS, 0x08, 0) + +#define ZORRO_MANUF_IMPULSE 0x0838 +#define ZORRO_PROD_IMPULSE_FIRECRACKER_24 ZORRO_ID(IMPULSE, 0x00, 0) + +#define ZORRO_MANUF_IVS 0x0840 +#define ZORRO_PROD_IVS_GRANDSLAM_PIC_2 ZORRO_ID(IVS, 0x02, 0) +#define ZORRO_PROD_IVS_GRANDSLAM_PIC_1 ZORRO_ID(IVS, 0x04, 0) +#define ZORRO_PROD_IVS_OVERDRIVE ZORRO_ID(IVS, 0x10, 0) +#define ZORRO_PROD_IVS_TRUMPCARD_CLASSIC ZORRO_ID(IVS, 0x30, 0) +#define ZORRO_PROD_IVS_TRUMPCARD_PRO_GRANDSLAM ZORRO_ID(IVS, 0x34, 0) +#define ZORRO_PROD_IVS_META_4 ZORRO_ID(IVS, 0x40, 0) +#define ZORRO_PROD_IVS_WAVETOOLS ZORRO_ID(IVS, 0xBF, 0) +#define ZORRO_PROD_IVS_VECTOR_1 ZORRO_ID(IVS, 0xF3, 0) +#define ZORRO_PROD_IVS_VECTOR_2 ZORRO_ID(IVS, 0xF4, 0) + +#define ZORRO_MANUF_VECTOR_1 0x0841 +#define ZORRO_PROD_VECTOR_CONNECTION_1 ZORRO_ID(VECTOR_1, 0xE3, 0) + +#define ZORRO_MANUF_XPERT_PRODEV 0x0845 +#define ZORRO_PROD_XPERT_PRODEV_VISIONA_RAM ZORRO_ID(XPERT_PRODEV, 0x01, 0) +#define ZORRO_PROD_XPERT_PRODEV_VISIONA_REG ZORRO_ID(XPERT_PRODEV, 0x02, 0) +#define ZORRO_PROD_XPERT_PRODEV_MERLIN_RAM ZORRO_ID(XPERT_PRODEV, 0x03, 0) +#define ZORRO_PROD_XPERT_PRODEV_MERLIN_REG_1 ZORRO_ID(XPERT_PRODEV, 0x04, 0) +#define ZORRO_PROD_XPERT_PRODEV_MERLIN_REG_2 ZORRO_ID(XPERT_PRODEV, 0xC9, 0) + +#define ZORRO_MANUF_HYDRA_SYSTEMS 0x0849 +#define ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET ZORRO_ID(HYDRA_SYSTEMS, 0x01, 0) + +#define ZORRO_MANUF_SUNRIZE_INDUSTRIES 0x084F +#define ZORRO_PROD_SUNRIZE_INDUSTRIES_AD1012 ZORRO_ID(SUNRIZE_INDUSTRIES, 0x01, 0) +#define ZORRO_PROD_SUNRIZE_INDUSTRIES_AD516 ZORRO_ID(SUNRIZE_INDUSTRIES, 0x02, 0) +#define ZORRO_PROD_SUNRIZE_INDUSTRIES_DD512 ZORRO_ID(SUNRIZE_INDUSTRIES, 0x03, 0) + +#define ZORRO_MANUF_TRICERATOPS 0x0850 +#define ZORRO_PROD_TRICERATOPS_MULTI_IO ZORRO_ID(TRICERATOPS, 0x01, 0) + +#define ZORRO_MANUF_APPLIED_MAGIC 0x0851 +#define ZORRO_PROD_APPLIED_MAGIC_DMI_RESOLVER ZORRO_ID(APPLIED_MAGIC, 0x01, 0) +#define ZORRO_PROD_APPLIED_MAGIC_DIGITAL_BROADCASTER ZORRO_ID(APPLIED_MAGIC, 0x06, 0) + +#define ZORRO_MANUF_GFX_BASE 0x085E +#define ZORRO_PROD_GFX_BASE_GDA_1_VRAM ZORRO_ID(GFX_BASE, 0x00, 0) +#define ZORRO_PROD_GFX_BASE_GDA_1 ZORRO_ID(GFX_BASE, 0x01, 0) + +#define ZORRO_MANUF_ROCTEC 0x0860 +#define ZORRO_PROD_ROCTEC_RH_800C ZORRO_ID(ROCTEC, 0x01, 0) +#define ZORRO_PROD_ROCTEC_RH_800C_RAM ZORRO_ID(ROCTEC, 0x01, 0) + +#define ZORRO_MANUF_KATO 0x0861 +#define ZORRO_PROD_KATO_MELODY ZORRO_ID(KATO, 0x80, 0) +/* ID clash!! */ +#define ZORRO_MANUF_HELFRICH_1 0x0861 +#define ZORRO_PROD_HELFRICH_RAINBOW_II ZORRO_ID(HELFRICH_1, 0x20, 0) +#define ZORRO_PROD_HELFRICH_RAINBOW_III ZORRO_ID(HELFRICH_1, 0x21, 0) + +#define ZORRO_MANUF_ATLANTIS 0x0862 + +#define ZORRO_MANUF_PROTAR 0x0864 + +#define ZORRO_MANUF_ACS 0x0865 + +#define ZORRO_MANUF_SOFTWARE_RESULTS_ENTERPRISES 0x0866 +#define ZORRO_PROD_SOFTWARE_RESULTS_ENTERPRISES_GOLDEN_GATE_2_BUS_PLUS ZORRO_ID(SOFTWARE_RESULTS_ENTERPRISES, 0x01, 0) + +#define ZORRO_MANUF_MASOBOSHI 0x086D +#define ZORRO_PROD_MASOBOSHI_MASTER_CARD_SC201 ZORRO_ID(MASOBOSHI, 0x03, 0) +#define ZORRO_PROD_MASOBOSHI_MASTER_CARD_MC702 ZORRO_ID(MASOBOSHI, 0x04, 0) +#define ZORRO_PROD_MASOBOSHI_MVD_819 ZORRO_ID(MASOBOSHI, 0x07, 0) + +#define ZORRO_MANUF_MAINHATTAN_DATA 0x086F +#define ZORRO_PROD_MAINHATTAN_DATA_IDE ZORRO_ID(MAINHATTAN_DATA, 0x01, 0) + +#define ZORRO_MANUF_VILLAGE_TRONIC 0x0877 +#define ZORRO_PROD_VILLAGE_TRONIC_DOMINO_RAM ZORRO_ID(VILLAGE_TRONIC, 0x01, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_DOMINO_REG ZORRO_ID(VILLAGE_TRONIC, 0x02, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_DOMINO_16M_PROTOTYPE ZORRO_ID(VILLAGE_TRONIC, 0x03, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM ZORRO_ID(VILLAGE_TRONIC, 0x0B, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG ZORRO_ID(VILLAGE_TRONIC, 0x0C, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_SEGMENTED_MODE ZORRO_ID(VILLAGE_TRONIC, 0x0D, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1 ZORRO_ID(VILLAGE_TRONIC, 0x15, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2 ZORRO_ID(VILLAGE_TRONIC, 0x16, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG ZORRO_ID(VILLAGE_TRONIC, 0x17, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3 ZORRO_ID(VILLAGE_TRONIC, 0x18, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_ARIADNE ZORRO_ID(VILLAGE_TRONIC, 0xC9, 0) +#define ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2 ZORRO_ID(VILLAGE_TRONIC, 0xCA, 0) + +#define ZORRO_MANUF_UTILITIES_UNLIMITED 0x087B +#define ZORRO_PROD_UTILITIES_UNLIMITED_EMPLANT_DELUXE ZORRO_ID(UTILITIES_UNLIMITED, 0x15, 0) +#define ZORRO_PROD_UTILITIES_UNLIMITED_EMPLANT_DELUXE2 ZORRO_ID(UTILITIES_UNLIMITED, 0x20, 0) + +#define ZORRO_MANUF_AMITRIX 0x0880 +#define ZORRO_PROD_AMITRIX_MULTI_IO ZORRO_ID(AMITRIX, 0x01, 0) +#define ZORRO_PROD_AMITRIX_CD_RAM ZORRO_ID(AMITRIX, 0x02, 0) + +#define ZORRO_MANUF_ARMAX 0x0885 +#define ZORRO_PROD_ARMAX_OMNIBUS ZORRO_ID(ARMAX, 0x00, 0) + +#define ZORRO_MANUF_ZEUS 0x088D +#define ZORRO_PROD_ZEUS_SPIDER ZORRO_ID(ZEUS, 0x04, 0) + +#define ZORRO_MANUF_NEWTEK 0x088F +#define ZORRO_PROD_NEWTEK_VIDEOTOASTER ZORRO_ID(NEWTEK, 0x00, 0) + +#define ZORRO_MANUF_M_TECH_GERMANY 0x0890 +#define ZORRO_PROD_MTEC_AT500_2 ZORRO_ID(M_TECH_GERMANY, 0x01, 0) +#define ZORRO_PROD_MTEC_68030 ZORRO_ID(M_TECH_GERMANY, 0x03, 0) +#define ZORRO_PROD_MTEC_68020I ZORRO_ID(M_TECH_GERMANY, 0x06, 0) +#define ZORRO_PROD_MTEC_A1200_T68030_RTC ZORRO_ID(M_TECH_GERMANY, 0x20, 0) +#define ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530 ZORRO_ID(M_TECH_GERMANY, 0x21, 0) +#define ZORRO_PROD_MTEC_8_MB_RAM ZORRO_ID(M_TECH_GERMANY, 0x22, 0) +#define ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE ZORRO_ID(M_TECH_GERMANY, 0x24, 0) + +#define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_4 0x0891 +#define ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM ZORRO_ID(GREAT_VALLEY_PRODUCTS_4, 0x01, 0) +#define ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG ZORRO_ID(GREAT_VALLEY_PRODUCTS_4, 0x02, 0) + +#define ZORRO_MANUF_APOLLO_1 0x0892 +#define ZORRO_PROD_APOLLO_A1200 ZORRO_ID(APOLLO_1, 0x01, 0) + +#define ZORRO_MANUF_HELFRICH_2 0x0893 +#define ZORRO_PROD_HELFRICH_PICCOLO_RAM ZORRO_ID(HELFRICH_2, 0x05, 0) +#define ZORRO_PROD_HELFRICH_PICCOLO_REG ZORRO_ID(HELFRICH_2, 0x06, 0) +#define ZORRO_PROD_HELFRICH_PEGGY_PLUS_MPEG ZORRO_ID(HELFRICH_2, 0x07, 0) +#define ZORRO_PROD_HELFRICH_VIDEOCRUNCHER ZORRO_ID(HELFRICH_2, 0x08, 0) +#define ZORRO_PROD_HELFRICH_SD64_RAM ZORRO_ID(HELFRICH_2, 0x0A, 0) +#define ZORRO_PROD_HELFRICH_SD64_REG ZORRO_ID(HELFRICH_2, 0x0B, 0) + +#define ZORRO_MANUF_MACROSYSTEMS_USA 0x089B +#define ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx ZORRO_ID(MACROSYSTEMS_USA, 0x13, 0) + +#define ZORRO_MANUF_ELBOX_COMPUTER 0x089E +#define ZORRO_PROD_ELBOX_COMPUTER_1200_4 ZORRO_ID(ELBOX_COMPUTER, 0x06, 0) + +#define ZORRO_MANUF_HARMS_PROFESSIONAL 0x0A00 +#define ZORRO_PROD_HARMS_PROFESSIONAL_030_PLUS ZORRO_ID(HARMS_PROFESSIONAL, 0x10, 0) +#define ZORRO_PROD_HARMS_PROFESSIONAL_3500 ZORRO_ID(HARMS_PROFESSIONAL, 0xD0, 0) + +#define ZORRO_MANUF_MICRONIK 0x0A50 +#define ZORRO_PROD_MICRONIK_RCA_120 ZORRO_ID(MICRONIK, 0x0A, 0) + +#define ZORRO_MANUF_MICRONIK2 0x0F0F +#define ZORRO_PROD_MICRONIK2_Z3I ZORRO_ID(MICRONIK2, 0x01, 0) + +#define ZORRO_MANUF_MEGAMICRO 0x1000 +#define ZORRO_PROD_MEGAMICRO_SCRAM_500 ZORRO_ID(MEGAMICRO, 0x03, 0) +#define ZORRO_PROD_MEGAMICRO_SCRAM_500_RAM ZORRO_ID(MEGAMICRO, 0x04, 0) + +#define ZORRO_MANUF_IMTRONICS_2 0x1028 +#define ZORRO_PROD_IMTRONICS_HURRICANE_2800_3 ZORRO_ID(IMTRONICS_2, 0x39, 0) +#define ZORRO_PROD_IMTRONICS_HURRICANE_2800_4 ZORRO_ID(IMTRONICS_2, 0x57, 0) + +/* unofficial ID */ +#define ZORRO_MANUF_INDIVIDUAL_COMPUTERS 0x1212 +#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x00, 0) +#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x17, 0) +#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x2A, 0) + +#define ZORRO_MANUF_KUPKE_3 0x1248 +#define ZORRO_PROD_KUPKE_GOLEM_HD_3000 ZORRO_ID(KUPKE_3, 0x01, 0) + +#define ZORRO_MANUF_ITH 0x1388 +#define ZORRO_PROD_ITH_ISDN_MASTER_II ZORRO_ID(ITH, 0x01, 0) + +#define ZORRO_MANUF_VMC 0x1389 +#define ZORRO_PROD_VMC_ISDN_BLASTER_Z2 ZORRO_ID(VMC, 0x01, 0) +#define ZORRO_PROD_VMC_HYPERCOM_4 ZORRO_ID(VMC, 0x02, 0) + +#define ZORRO_MANUF_INFORMATION 0x157C +#define ZORRO_PROD_INFORMATION_ISDN_ENGINE_I ZORRO_ID(INFORMATION, 0x64, 0) + +#define ZORRO_MANUF_VORTEX 0x2017 +#define ZORRO_PROD_VORTEX_GOLDEN_GATE_80386SX ZORRO_ID(VORTEX, 0x07, 0) +#define ZORRO_PROD_VORTEX_GOLDEN_GATE_RAM ZORRO_ID(VORTEX, 0x08, 0) +#define ZORRO_PROD_VORTEX_GOLDEN_GATE_80486 ZORRO_ID(VORTEX, 0x09, 0) + +#define ZORRO_MANUF_EXPANSION_SYSTEMS 0x2062 +#define ZORRO_PROD_EXPANSION_SYSTEMS_DATAFLYER_4000SX ZORRO_ID(EXPANSION_SYSTEMS, 0x01, 0) +#define ZORRO_PROD_EXPANSION_SYSTEMS_DATAFLYER_4000SX_RAM ZORRO_ID(EXPANSION_SYSTEMS, 0x02, 0) + +#define ZORRO_MANUF_READYSOFT 0x2100 +#define ZORRO_PROD_READYSOFT_AMAX_II_IV ZORRO_ID(READYSOFT, 0x01, 0) + +#define ZORRO_MANUF_PHASE5 0x2140 +#define ZORRO_PROD_PHASE5_BLIZZARD_RAM ZORRO_ID(PHASE5, 0x01, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD ZORRO_ID(PHASE5, 0x02, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_1220_IV ZORRO_ID(PHASE5, 0x06, 0) +#define ZORRO_PROD_PHASE5_FASTLANE_Z3_RAM ZORRO_ID(PHASE5, 0x0A, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060 ZORRO_ID(PHASE5, 0x0B, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM ZORRO_ID(PHASE5, 0x0C, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_1230 ZORRO_ID(PHASE5, 0x0D, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260 ZORRO_ID(PHASE5, 0x11, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_2060 ZORRO_ID(PHASE5, 0x18, 0) +#define ZORRO_PROD_PHASE5_CYBERSTORM_MK_II ZORRO_ID(PHASE5, 0x19, 0) +#define ZORRO_PROD_PHASE5_CYBERVISION64 ZORRO_ID(PHASE5, 0x22, 0) +#define ZORRO_PROD_PHASE5_CYBERVISION64_3D_PROTOTYPE ZORRO_ID(PHASE5, 0x32, 0) +#define ZORRO_PROD_PHASE5_CYBERVISION64_3D ZORRO_ID(PHASE5, 0x43, 0) +#define ZORRO_PROD_PHASE5_CYBERSTORM_MK_III ZORRO_ID(PHASE5, 0x64, 0) +#define ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS ZORRO_ID(PHASE5, 0x6e, 0) + +#define ZORRO_MANUF_DPS 0x2169 +#define ZORRO_PROD_DPS_PERSONAL_ANIMATION_RECORDER ZORRO_ID(DPS, 0x01, 0) + +#define ZORRO_MANUF_APOLLO_2 0x2200 +#define ZORRO_PROD_APOLLO_A620_68020_1 ZORRO_ID(APOLLO_2, 0x00, 0) +#define ZORRO_PROD_APOLLO_A620_68020_2 ZORRO_ID(APOLLO_2, 0x01, 0) + +#define ZORRO_MANUF_APOLLO_3 0x2222 +#define ZORRO_PROD_APOLLO_AT_APOLLO ZORRO_ID(APOLLO_3, 0x22, 0) +#define ZORRO_PROD_APOLLO_1230_1240_1260_2030_4040_4060 ZORRO_ID(APOLLO_3, 0x23, 0) + +#define ZORRO_MANUF_PETSOFF_LP 0x38A5 +#define ZORRO_PROD_PETSOFF_LP_DELFINA ZORRO_ID(PETSOFF_LP, 0x00, 0) +#define ZORRO_PROD_PETSOFF_LP_DELFINA_LITE ZORRO_ID(PETSOFF_LP, 0x01, 0) + +#define ZORRO_MANUF_UWE_GERLACH 0x3FF7 +#define ZORRO_PROD_UWE_GERLACH_RAM_ROM ZORRO_ID(UWE_GERLACH, 0xd4, 0) + +#define ZORRO_MANUF_ACT 0x4231 +#define ZORRO_PROD_ACT_PRELUDE ZORRO_ID(ACT, 0x01, 0) + +#define ZORRO_MANUF_MACROSYSTEMS_GERMANY 0x4754 +#define ZORRO_PROD_MACROSYSTEMS_MAESTRO ZORRO_ID(MACROSYSTEMS_GERMANY, 0x03, 0) +#define ZORRO_PROD_MACROSYSTEMS_VLAB ZORRO_ID(MACROSYSTEMS_GERMANY, 0x04, 0) +#define ZORRO_PROD_MACROSYSTEMS_MAESTRO_PRO ZORRO_ID(MACROSYSTEMS_GERMANY, 0x05, 0) +#define ZORRO_PROD_MACROSYSTEMS_RETINA ZORRO_ID(MACROSYSTEMS_GERMANY, 0x06, 0) +#define ZORRO_PROD_MACROSYSTEMS_MULTI_EVOLUTION ZORRO_ID(MACROSYSTEMS_GERMANY, 0x08, 0) +#define ZORRO_PROD_MACROSYSTEMS_TOCCATA ZORRO_ID(MACROSYSTEMS_GERMANY, 0x0C, 0) +#define ZORRO_PROD_MACROSYSTEMS_RETINA_Z3 ZORRO_ID(MACROSYSTEMS_GERMANY, 0x10, 0) +#define ZORRO_PROD_MACROSYSTEMS_VLAB_MOTION ZORRO_ID(MACROSYSTEMS_GERMANY, 0x12, 0) +#define ZORRO_PROD_MACROSYSTEMS_ALTAIS ZORRO_ID(MACROSYSTEMS_GERMANY, 0x13, 0) +#define ZORRO_PROD_MACROSYSTEMS_FALCON_040 ZORRO_ID(MACROSYSTEMS_GERMANY, 0xFD, 0) + +#define ZORRO_MANUF_COMBITEC 0x6766 + +#define ZORRO_MANUF_SKI_PERIPHERALS 0x8000 +#define ZORRO_PROD_SKI_PERIPHERALS_MAST_FIREBALL ZORRO_ID(SKI_PERIPHERALS, 0x08, 0) +#define ZORRO_PROD_SKI_PERIPHERALS_SCSI_DUAL_SERIAL ZORRO_ID(SKI_PERIPHERALS, 0x80, 0) + +#define ZORRO_MANUF_REIS_WARE_2 0xA9AD +#define ZORRO_PROD_REIS_WARE_SCAN_KING ZORRO_ID(REIS_WARE_2, 0x11, 0) + +#define ZORRO_MANUF_CAMERON 0xAA01 +#define ZORRO_PROD_CAMERON_PERSONAL_A4 ZORRO_ID(CAMERON, 0x10, 0) + +#define ZORRO_MANUF_REIS_WARE 0xAA11 +#define ZORRO_PROD_REIS_WARE_HANDYSCANNER ZORRO_ID(REIS_WARE, 0x11, 0) + +#define ZORRO_MANUF_PHOENIX_2 0xB5A8 +#define ZORRO_PROD_PHOENIX_ST506_2 ZORRO_ID(PHOENIX_2, 0x21, 0) +#define ZORRO_PROD_PHOENIX_SCSI_2 ZORRO_ID(PHOENIX_2, 0x22, 0) +#define ZORRO_PROD_PHOENIX_RAM_2 ZORRO_ID(PHOENIX_2, 0xBE, 0) + +#define ZORRO_MANUF_COMBITEC_2 0xC008 +#define ZORRO_PROD_COMBITEC_HD ZORRO_ID(COMBITEC_2, 0x2A, 0) +#define ZORRO_PROD_COMBITEC_SRAM ZORRO_ID(COMBITEC_2, 0x2B, 0) + + + /* + * Test and illegal Manufacturer IDs. + */ + +#define ZORRO_MANUF_HACKER 0x07DB +#define ZORRO_PROD_GENERAL_PROTOTYPE ZORRO_ID(HACKER, 0x00, 0) +#define ZORRO_PROD_HACKER_SCSI ZORRO_ID(HACKER, 0x01, 0) +#define ZORRO_PROD_RESOURCE_MANAGEMENT_FORCE_QUICKNET_QN2000 ZORRO_ID(HACKER, 0x02, 0) +#define ZORRO_PROD_VECTOR_CONNECTION_2 ZORRO_ID(HACKER, 0xE0, 0) +#define ZORRO_PROD_VECTOR_CONNECTION_3 ZORRO_ID(HACKER, 0xE1, 0) +#define ZORRO_PROD_VECTOR_CONNECTION_4 ZORRO_ID(HACKER, 0xE2, 0) +#define ZORRO_PROD_VECTOR_CONNECTION_5 ZORRO_ID(HACKER, 0xE3, 0) diff --git a/include/linux/zutil.h b/include/linux/zutil.h new file mode 100644 index 00000000..6adfa9a6 --- /dev/null +++ b/include/linux/zutil.h @@ -0,0 +1,106 @@ +/* zutil.h -- internal interface and configuration of the compression library + * Copyright (C) 1995-1998 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id: zutil.h,v 1.1 2000/01/01 03:32:23 davem Exp $ */ + +#ifndef _Z_UTIL_H +#define _Z_UTIL_H + +#include <linux/zlib.h> +#include <linux/string.h> +#include <linux/kernel.h> + +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; + + /* common constants */ + +#define STORED_BLOCK 0 +#define STATIC_TREES 1 +#define DYN_TREES 2 +/* The three kinds of block type */ + +#define MIN_MATCH 3 +#define MAX_MATCH 258 +/* The minimum and maximum match lengths */ + +#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ + + /* target dependencies */ + + /* Common defaults */ + +#ifndef OS_CODE +# define OS_CODE 0x03 /* assume Unix */ +#endif + + /* functions */ + +typedef uLong (*check_func) (uLong check, const Byte *buf, + uInt len); + + + /* checksum functions */ + +#define BASE 65521L /* largest prime smaller than 65536 */ +#define NMAX 5552 +/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ + +#define DO1(buf,i) {s1 += buf[i]; s2 += s1;} +#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); +#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); +#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); +#define DO16(buf) DO8(buf,0); DO8(buf,8); + +/* ========================================================================= */ +/* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. If buf is NULL, this function returns + the required initial value for the checksum. + An Adler-32 checksum is almost as reliable as a CRC32 but can be computed + much faster. Usage example: + + uLong adler = adler32(0L, NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); +*/ +static inline uLong zlib_adler32(uLong adler, + const Byte *buf, + uInt len) +{ + unsigned long s1 = adler & 0xffff; + unsigned long s2 = (adler >> 16) & 0xffff; + int k; + + if (buf == NULL) return 1L; + + while (len > 0) { + k = len < NMAX ? len : NMAX; + len -= k; + while (k >= 16) { + DO16(buf); + buf += 16; + k -= 16; + } + if (k != 0) do { + s1 += *buf++; + s2 += s1; + } while (--k); + s1 %= BASE; + s2 %= BASE; + } + return (s2 << 16) | s1; +} + +#endif /* _Z_UTIL_H */ |